]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/verifier.c
libbpf: Add btf__new_empty() to create an empty BTF object
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
51580e79
AS
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
838e9690 11#include <linux/btf.h>
58e2af8b 12#include <linux/bpf_verifier.h>
51580e79
AS
13#include <linux/filter.h>
14#include <net/netlink.h>
15#include <linux/file.h>
16#include <linux/vmalloc.h>
ebb676da 17#include <linux/stringify.h>
cc8b0b92
AS
18#include <linux/bsearch.h>
19#include <linux/sort.h>
c195651e 20#include <linux/perf_event.h>
d9762e84 21#include <linux/ctype.h>
6ba43b76 22#include <linux/error-injection.h>
9e4e01df 23#include <linux/bpf_lsm.h>
1e6c62a8 24#include <linux/btf_ids.h>
51580e79 25
f4ac7e0b
JK
26#include "disasm.h"
27
00176a34 28static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 29#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
30 [_id] = & _name ## _verifier_ops,
31#define BPF_MAP_TYPE(_id, _ops)
f2e10bff 32#define BPF_LINK_TYPE(_id, _name)
00176a34
JK
33#include <linux/bpf_types.h>
34#undef BPF_PROG_TYPE
35#undef BPF_MAP_TYPE
f2e10bff 36#undef BPF_LINK_TYPE
00176a34
JK
37};
38
51580e79
AS
39/* bpf_check() is a static code analyzer that walks eBPF program
40 * instruction by instruction and updates register/stack state.
41 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
42 *
43 * The first pass is depth-first-search to check that the program is a DAG.
44 * It rejects the following programs:
45 * - larger than BPF_MAXINSNS insns
46 * - if loop is present (detected via back-edge)
47 * - unreachable insns exist (shouldn't be a forest. program = one function)
48 * - out of bounds or malformed jumps
49 * The second pass is all possible path descent from the 1st insn.
50 * Since it's analyzing all pathes through the program, the length of the
eba38a96 51 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
52 * insn is less then 4K, but there are too many branches that change stack/regs.
53 * Number of 'branches to be analyzed' is limited to 1k
54 *
55 * On entry to each instruction, each register has a type, and the instruction
56 * changes the types of the registers depending on instruction semantics.
57 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
58 * copied to R1.
59 *
60 * All registers are 64-bit.
61 * R0 - return register
62 * R1-R5 argument passing registers
63 * R6-R9 callee saved registers
64 * R10 - frame pointer read-only
65 *
66 * At the start of BPF program the register R1 contains a pointer to bpf_context
67 * and has type PTR_TO_CTX.
68 *
69 * Verifier tracks arithmetic operations on pointers in case:
70 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
71 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
72 * 1st insn copies R10 (which has FRAME_PTR) type into R1
73 * and 2nd arithmetic instruction is pattern matched to recognize
74 * that it wants to construct a pointer to some element within stack.
75 * So after 2nd insn, the register R1 has type PTR_TO_STACK
76 * (and -20 constant is saved for further stack bounds checking).
77 * Meaning that this reg is a pointer to stack plus known immediate constant.
78 *
f1174f77 79 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 80 * means the register has some value, but it's not a valid pointer.
f1174f77 81 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
82 *
83 * When verifier sees load or store instructions the type of base register
c64b7983
JS
84 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
85 * four pointer types recognized by check_mem_access() function.
51580e79
AS
86 *
87 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
88 * and the range of [ptr, ptr + map's value_size) is accessible.
89 *
90 * registers used to pass values to function calls are checked against
91 * function argument constraints.
92 *
93 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
94 * It means that the register type passed to this function must be
95 * PTR_TO_STACK and it will be used inside the function as
96 * 'pointer to map element key'
97 *
98 * For example the argument constraints for bpf_map_lookup_elem():
99 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
100 * .arg1_type = ARG_CONST_MAP_PTR,
101 * .arg2_type = ARG_PTR_TO_MAP_KEY,
102 *
103 * ret_type says that this function returns 'pointer to map elem value or null'
104 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
105 * 2nd argument should be a pointer to stack, which will be used inside
106 * the helper function as a pointer to map element key.
107 *
108 * On the kernel side the helper function looks like:
109 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
110 * {
111 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
112 * void *key = (void *) (unsigned long) r2;
113 * void *value;
114 *
115 * here kernel can access 'key' and 'map' pointers safely, knowing that
116 * [key, key + map->key_size) bytes are valid and were initialized on
117 * the stack of eBPF program.
118 * }
119 *
120 * Corresponding eBPF program may look like:
121 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
122 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
123 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
124 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
125 * here verifier looks at prototype of map_lookup_elem() and sees:
126 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
127 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
128 *
129 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
130 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
131 * and were initialized prior to this call.
132 * If it's ok, then verifier allows this BPF_CALL insn and looks at
133 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
134 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
135 * returns ether pointer to map value or NULL.
136 *
137 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
138 * insn, the register holding that pointer in the true branch changes state to
139 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
140 * branch. See check_cond_jmp_op().
141 *
142 * After the call R0 is set to return type of the function and registers R1-R5
143 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
144 *
145 * The following reference types represent a potential reference to a kernel
146 * resource which, after first being allocated, must be checked and freed by
147 * the BPF program:
148 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
149 *
150 * When the verifier sees a helper call return a reference type, it allocates a
151 * pointer id for the reference and stores it in the current function state.
152 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
153 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
154 * passes through a NULL-check conditional. For the branch wherein the state is
155 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
156 *
157 * For each helper function that allocates a reference, such as
158 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
159 * bpf_sk_release(). When a reference type passes into the release function,
160 * the verifier also releases the reference. If any unchecked or unreleased
161 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
162 */
163
17a52670 164/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 165struct bpf_verifier_stack_elem {
17a52670
AS
166 /* verifer state is 'st'
167 * before processing instruction 'insn_idx'
168 * and after processing instruction 'prev_insn_idx'
169 */
58e2af8b 170 struct bpf_verifier_state st;
17a52670
AS
171 int insn_idx;
172 int prev_insn_idx;
58e2af8b 173 struct bpf_verifier_stack_elem *next;
6f8a57cc
AN
174 /* length of verifier log at the time this state was pushed on stack */
175 u32 log_pos;
cbd35700
AS
176};
177
b285fcb7 178#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 179#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 180
d2e4c1e6
DB
181#define BPF_MAP_KEY_POISON (1ULL << 63)
182#define BPF_MAP_KEY_SEEN (1ULL << 62)
183
c93552c4
DB
184#define BPF_MAP_PTR_UNPRIV 1UL
185#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
186 POISON_POINTER_DELTA))
187#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
188
189static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
190{
d2e4c1e6 191 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
192}
193
194static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
195{
d2e4c1e6 196 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
197}
198
199static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
200 const struct bpf_map *map, bool unpriv)
201{
202 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
203 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
204 aux->map_ptr_state = (unsigned long)map |
205 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
206}
207
208static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
209{
210 return aux->map_key_state & BPF_MAP_KEY_POISON;
211}
212
213static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
214{
215 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
216}
217
218static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
219{
220 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
221}
222
223static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
224{
225 bool poisoned = bpf_map_key_poisoned(aux);
226
227 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
228 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 229}
fad73a1a 230
33ff9823
DB
231struct bpf_call_arg_meta {
232 struct bpf_map *map_ptr;
435faee1 233 bool raw_mode;
36bbef52 234 bool pkt_access;
435faee1
DB
235 int regno;
236 int access_size;
457f4436 237 int mem_size;
10060503 238 u64 msize_max_value;
1b986589 239 int ref_obj_id;
d83525ca 240 int func_id;
33ff9823
DB
241};
242
8580ac94
AS
243struct btf *btf_vmlinux;
244
cbd35700
AS
245static DEFINE_MUTEX(bpf_verifier_lock);
246
d9762e84
MKL
247static const struct bpf_line_info *
248find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
249{
250 const struct bpf_line_info *linfo;
251 const struct bpf_prog *prog;
252 u32 i, nr_linfo;
253
254 prog = env->prog;
255 nr_linfo = prog->aux->nr_linfo;
256
257 if (!nr_linfo || insn_off >= prog->len)
258 return NULL;
259
260 linfo = prog->aux->linfo;
261 for (i = 1; i < nr_linfo; i++)
262 if (insn_off < linfo[i].insn_off)
263 break;
264
265 return &linfo[i - 1];
266}
267
77d2e05a
MKL
268void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
269 va_list args)
cbd35700 270{
a2a7d570 271 unsigned int n;
cbd35700 272
a2a7d570 273 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
a2a7d570
JK
274
275 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
276 "verifier log line truncated - local buffer too short\n");
277
278 n = min(log->len_total - log->len_used - 1, n);
279 log->kbuf[n] = '\0';
280
8580ac94
AS
281 if (log->level == BPF_LOG_KERNEL) {
282 pr_err("BPF:%s\n", log->kbuf);
283 return;
284 }
a2a7d570
JK
285 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
286 log->len_used += n;
287 else
288 log->ubuf = NULL;
cbd35700 289}
abe08840 290
6f8a57cc
AN
291static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
292{
293 char zero = 0;
294
295 if (!bpf_verifier_log_needed(log))
296 return;
297
298 log->len_used = new_pos;
299 if (put_user(zero, log->ubuf + new_pos))
300 log->ubuf = NULL;
301}
302
abe08840
JO
303/* log_level controls verbosity level of eBPF verifier.
304 * bpf_verifier_log_write() is used to dump the verification trace to the log,
305 * so the user can figure out what's wrong with the program
430e68d1 306 */
abe08840
JO
307__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
308 const char *fmt, ...)
309{
310 va_list args;
311
77d2e05a
MKL
312 if (!bpf_verifier_log_needed(&env->log))
313 return;
314
abe08840 315 va_start(args, fmt);
77d2e05a 316 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
317 va_end(args);
318}
319EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
320
321__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
322{
77d2e05a 323 struct bpf_verifier_env *env = private_data;
abe08840
JO
324 va_list args;
325
77d2e05a
MKL
326 if (!bpf_verifier_log_needed(&env->log))
327 return;
328
abe08840 329 va_start(args, fmt);
77d2e05a 330 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
331 va_end(args);
332}
cbd35700 333
9e15db66
AS
334__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
335 const char *fmt, ...)
336{
337 va_list args;
338
339 if (!bpf_verifier_log_needed(log))
340 return;
341
342 va_start(args, fmt);
343 bpf_verifier_vlog(log, fmt, args);
344 va_end(args);
345}
346
d9762e84
MKL
347static const char *ltrim(const char *s)
348{
349 while (isspace(*s))
350 s++;
351
352 return s;
353}
354
355__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
356 u32 insn_off,
357 const char *prefix_fmt, ...)
358{
359 const struct bpf_line_info *linfo;
360
361 if (!bpf_verifier_log_needed(&env->log))
362 return;
363
364 linfo = find_linfo(env, insn_off);
365 if (!linfo || linfo == env->prev_linfo)
366 return;
367
368 if (prefix_fmt) {
369 va_list args;
370
371 va_start(args, prefix_fmt);
372 bpf_verifier_vlog(&env->log, prefix_fmt, args);
373 va_end(args);
374 }
375
376 verbose(env, "%s\n",
377 ltrim(btf_name_by_offset(env->prog->aux->btf,
378 linfo->line_off)));
379
380 env->prev_linfo = linfo;
381}
382
de8f3a83
DB
383static bool type_is_pkt_pointer(enum bpf_reg_type type)
384{
385 return type == PTR_TO_PACKET ||
386 type == PTR_TO_PACKET_META;
387}
388
46f8bc92
MKL
389static bool type_is_sk_pointer(enum bpf_reg_type type)
390{
391 return type == PTR_TO_SOCKET ||
655a51e5 392 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
393 type == PTR_TO_TCP_SOCK ||
394 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
395}
396
cac616db
JF
397static bool reg_type_not_null(enum bpf_reg_type type)
398{
399 return type == PTR_TO_SOCKET ||
400 type == PTR_TO_TCP_SOCK ||
401 type == PTR_TO_MAP_VALUE ||
01c66c48 402 type == PTR_TO_SOCK_COMMON;
cac616db
JF
403}
404
840b9615
JS
405static bool reg_type_may_be_null(enum bpf_reg_type type)
406{
fd978bf7 407 return type == PTR_TO_MAP_VALUE_OR_NULL ||
46f8bc92 408 type == PTR_TO_SOCKET_OR_NULL ||
655a51e5 409 type == PTR_TO_SOCK_COMMON_OR_NULL ||
b121b341 410 type == PTR_TO_TCP_SOCK_OR_NULL ||
457f4436 411 type == PTR_TO_BTF_ID_OR_NULL ||
afbf21dc
YS
412 type == PTR_TO_MEM_OR_NULL ||
413 type == PTR_TO_RDONLY_BUF_OR_NULL ||
414 type == PTR_TO_RDWR_BUF_OR_NULL;
fd978bf7
JS
415}
416
d83525ca
AS
417static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
418{
419 return reg->type == PTR_TO_MAP_VALUE &&
420 map_value_has_spin_lock(reg->map_ptr);
421}
422
cba368c1
MKL
423static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
424{
425 return type == PTR_TO_SOCKET ||
426 type == PTR_TO_SOCKET_OR_NULL ||
427 type == PTR_TO_TCP_SOCK ||
457f4436
AN
428 type == PTR_TO_TCP_SOCK_OR_NULL ||
429 type == PTR_TO_MEM ||
430 type == PTR_TO_MEM_OR_NULL;
cba368c1
MKL
431}
432
1b986589 433static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
fd978bf7 434{
1b986589 435 return type == ARG_PTR_TO_SOCK_COMMON;
fd978bf7
JS
436}
437
fd1b0d60
LB
438static bool arg_type_may_be_null(enum bpf_arg_type type)
439{
440 return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
441 type == ARG_PTR_TO_MEM_OR_NULL ||
442 type == ARG_PTR_TO_CTX_OR_NULL ||
443 type == ARG_PTR_TO_SOCKET_OR_NULL ||
444 type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
445}
446
fd978bf7
JS
447/* Determine whether the function releases some resources allocated by another
448 * function call. The first reference type argument will be assumed to be
449 * released by release_reference().
450 */
451static bool is_release_function(enum bpf_func_id func_id)
452{
457f4436
AN
453 return func_id == BPF_FUNC_sk_release ||
454 func_id == BPF_FUNC_ringbuf_submit ||
455 func_id == BPF_FUNC_ringbuf_discard;
840b9615
JS
456}
457
64d85290 458static bool may_be_acquire_function(enum bpf_func_id func_id)
46f8bc92
MKL
459{
460 return func_id == BPF_FUNC_sk_lookup_tcp ||
edbf8c01 461 func_id == BPF_FUNC_sk_lookup_udp ||
64d85290 462 func_id == BPF_FUNC_skc_lookup_tcp ||
457f4436
AN
463 func_id == BPF_FUNC_map_lookup_elem ||
464 func_id == BPF_FUNC_ringbuf_reserve;
64d85290
JS
465}
466
467static bool is_acquire_function(enum bpf_func_id func_id,
468 const struct bpf_map *map)
469{
470 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
471
472 if (func_id == BPF_FUNC_sk_lookup_tcp ||
473 func_id == BPF_FUNC_sk_lookup_udp ||
457f4436
AN
474 func_id == BPF_FUNC_skc_lookup_tcp ||
475 func_id == BPF_FUNC_ringbuf_reserve)
64d85290
JS
476 return true;
477
478 if (func_id == BPF_FUNC_map_lookup_elem &&
479 (map_type == BPF_MAP_TYPE_SOCKMAP ||
480 map_type == BPF_MAP_TYPE_SOCKHASH))
481 return true;
482
483 return false;
46f8bc92
MKL
484}
485
1b986589
MKL
486static bool is_ptr_cast_function(enum bpf_func_id func_id)
487{
488 return func_id == BPF_FUNC_tcp_sock ||
1df8f55a
MKL
489 func_id == BPF_FUNC_sk_fullsock ||
490 func_id == BPF_FUNC_skc_to_tcp_sock ||
491 func_id == BPF_FUNC_skc_to_tcp6_sock ||
492 func_id == BPF_FUNC_skc_to_udp6_sock ||
493 func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
494 func_id == BPF_FUNC_skc_to_tcp_request_sock;
1b986589
MKL
495}
496
17a52670
AS
497/* string representation of 'enum bpf_reg_type' */
498static const char * const reg_type_str[] = {
499 [NOT_INIT] = "?",
f1174f77 500 [SCALAR_VALUE] = "inv",
17a52670
AS
501 [PTR_TO_CTX] = "ctx",
502 [CONST_PTR_TO_MAP] = "map_ptr",
503 [PTR_TO_MAP_VALUE] = "map_value",
504 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
17a52670 505 [PTR_TO_STACK] = "fp",
969bf05e 506 [PTR_TO_PACKET] = "pkt",
de8f3a83 507 [PTR_TO_PACKET_META] = "pkt_meta",
969bf05e 508 [PTR_TO_PACKET_END] = "pkt_end",
d58e468b 509 [PTR_TO_FLOW_KEYS] = "flow_keys",
c64b7983
JS
510 [PTR_TO_SOCKET] = "sock",
511 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
46f8bc92
MKL
512 [PTR_TO_SOCK_COMMON] = "sock_common",
513 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
655a51e5
MKL
514 [PTR_TO_TCP_SOCK] = "tcp_sock",
515 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
9df1c28b 516 [PTR_TO_TP_BUFFER] = "tp_buffer",
fada7fdc 517 [PTR_TO_XDP_SOCK] = "xdp_sock",
9e15db66 518 [PTR_TO_BTF_ID] = "ptr_",
b121b341 519 [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
457f4436
AN
520 [PTR_TO_MEM] = "mem",
521 [PTR_TO_MEM_OR_NULL] = "mem_or_null",
afbf21dc
YS
522 [PTR_TO_RDONLY_BUF] = "rdonly_buf",
523 [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null",
524 [PTR_TO_RDWR_BUF] = "rdwr_buf",
525 [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null",
17a52670
AS
526};
527
8efea21d
EC
528static char slot_type_char[] = {
529 [STACK_INVALID] = '?',
530 [STACK_SPILL] = 'r',
531 [STACK_MISC] = 'm',
532 [STACK_ZERO] = '0',
533};
534
4e92024a
AS
535static void print_liveness(struct bpf_verifier_env *env,
536 enum bpf_reg_liveness live)
537{
9242b5f5 538 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
539 verbose(env, "_");
540 if (live & REG_LIVE_READ)
541 verbose(env, "r");
542 if (live & REG_LIVE_WRITTEN)
543 verbose(env, "w");
9242b5f5
AS
544 if (live & REG_LIVE_DONE)
545 verbose(env, "D");
4e92024a
AS
546}
547
f4d7e40a
AS
548static struct bpf_func_state *func(struct bpf_verifier_env *env,
549 const struct bpf_reg_state *reg)
550{
551 struct bpf_verifier_state *cur = env->cur_state;
552
553 return cur->frame[reg->frameno];
554}
555
9e15db66
AS
556const char *kernel_type_name(u32 id)
557{
558 return btf_name_by_offset(btf_vmlinux,
559 btf_type_by_id(btf_vmlinux, id)->name_off);
560}
561
61bd5218 562static void print_verifier_state(struct bpf_verifier_env *env,
f4d7e40a 563 const struct bpf_func_state *state)
17a52670 564{
f4d7e40a 565 const struct bpf_reg_state *reg;
17a52670
AS
566 enum bpf_reg_type t;
567 int i;
568
f4d7e40a
AS
569 if (state->frameno)
570 verbose(env, " frame%d:", state->frameno);
17a52670 571 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
572 reg = &state->regs[i];
573 t = reg->type;
17a52670
AS
574 if (t == NOT_INIT)
575 continue;
4e92024a
AS
576 verbose(env, " R%d", i);
577 print_liveness(env, reg->live);
578 verbose(env, "=%s", reg_type_str[t]);
b5dc0163
AS
579 if (t == SCALAR_VALUE && reg->precise)
580 verbose(env, "P");
f1174f77
EC
581 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
582 tnum_is_const(reg->var_off)) {
583 /* reg->off should be 0 for SCALAR_VALUE */
61bd5218 584 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 585 } else {
b121b341 586 if (t == PTR_TO_BTF_ID || t == PTR_TO_BTF_ID_OR_NULL)
9e15db66 587 verbose(env, "%s", kernel_type_name(reg->btf_id));
cba368c1
MKL
588 verbose(env, "(id=%d", reg->id);
589 if (reg_type_may_be_refcounted_or_null(t))
590 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
f1174f77 591 if (t != SCALAR_VALUE)
61bd5218 592 verbose(env, ",off=%d", reg->off);
de8f3a83 593 if (type_is_pkt_pointer(t))
61bd5218 594 verbose(env, ",r=%d", reg->range);
f1174f77
EC
595 else if (t == CONST_PTR_TO_MAP ||
596 t == PTR_TO_MAP_VALUE ||
597 t == PTR_TO_MAP_VALUE_OR_NULL)
61bd5218 598 verbose(env, ",ks=%d,vs=%d",
f1174f77
EC
599 reg->map_ptr->key_size,
600 reg->map_ptr->value_size);
7d1238f2
EC
601 if (tnum_is_const(reg->var_off)) {
602 /* Typically an immediate SCALAR_VALUE, but
603 * could be a pointer whose offset is too big
604 * for reg->off
605 */
61bd5218 606 verbose(env, ",imm=%llx", reg->var_off.value);
7d1238f2
EC
607 } else {
608 if (reg->smin_value != reg->umin_value &&
609 reg->smin_value != S64_MIN)
61bd5218 610 verbose(env, ",smin_value=%lld",
7d1238f2
EC
611 (long long)reg->smin_value);
612 if (reg->smax_value != reg->umax_value &&
613 reg->smax_value != S64_MAX)
61bd5218 614 verbose(env, ",smax_value=%lld",
7d1238f2
EC
615 (long long)reg->smax_value);
616 if (reg->umin_value != 0)
61bd5218 617 verbose(env, ",umin_value=%llu",
7d1238f2
EC
618 (unsigned long long)reg->umin_value);
619 if (reg->umax_value != U64_MAX)
61bd5218 620 verbose(env, ",umax_value=%llu",
7d1238f2
EC
621 (unsigned long long)reg->umax_value);
622 if (!tnum_is_unknown(reg->var_off)) {
623 char tn_buf[48];
f1174f77 624
7d1238f2 625 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 626 verbose(env, ",var_off=%s", tn_buf);
7d1238f2 627 }
3f50f132
JF
628 if (reg->s32_min_value != reg->smin_value &&
629 reg->s32_min_value != S32_MIN)
630 verbose(env, ",s32_min_value=%d",
631 (int)(reg->s32_min_value));
632 if (reg->s32_max_value != reg->smax_value &&
633 reg->s32_max_value != S32_MAX)
634 verbose(env, ",s32_max_value=%d",
635 (int)(reg->s32_max_value));
636 if (reg->u32_min_value != reg->umin_value &&
637 reg->u32_min_value != U32_MIN)
638 verbose(env, ",u32_min_value=%d",
639 (int)(reg->u32_min_value));
640 if (reg->u32_max_value != reg->umax_value &&
641 reg->u32_max_value != U32_MAX)
642 verbose(env, ",u32_max_value=%d",
643 (int)(reg->u32_max_value));
f1174f77 644 }
61bd5218 645 verbose(env, ")");
f1174f77 646 }
17a52670 647 }
638f5b90 648 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
649 char types_buf[BPF_REG_SIZE + 1];
650 bool valid = false;
651 int j;
652
653 for (j = 0; j < BPF_REG_SIZE; j++) {
654 if (state->stack[i].slot_type[j] != STACK_INVALID)
655 valid = true;
656 types_buf[j] = slot_type_char[
657 state->stack[i].slot_type[j]];
658 }
659 types_buf[BPF_REG_SIZE] = 0;
660 if (!valid)
661 continue;
662 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
663 print_liveness(env, state->stack[i].spilled_ptr.live);
b5dc0163
AS
664 if (state->stack[i].slot_type[0] == STACK_SPILL) {
665 reg = &state->stack[i].spilled_ptr;
666 t = reg->type;
667 verbose(env, "=%s", reg_type_str[t]);
668 if (t == SCALAR_VALUE && reg->precise)
669 verbose(env, "P");
670 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
671 verbose(env, "%lld", reg->var_off.value + reg->off);
672 } else {
8efea21d 673 verbose(env, "=%s", types_buf);
b5dc0163 674 }
17a52670 675 }
fd978bf7
JS
676 if (state->acquired_refs && state->refs[0].id) {
677 verbose(env, " refs=%d", state->refs[0].id);
678 for (i = 1; i < state->acquired_refs; i++)
679 if (state->refs[i].id)
680 verbose(env, ",%d", state->refs[i].id);
681 }
61bd5218 682 verbose(env, "\n");
17a52670
AS
683}
684
84dbf350
JS
685#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
686static int copy_##NAME##_state(struct bpf_func_state *dst, \
687 const struct bpf_func_state *src) \
688{ \
689 if (!src->FIELD) \
690 return 0; \
691 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
692 /* internal bug, make state invalid to reject the program */ \
693 memset(dst, 0, sizeof(*dst)); \
694 return -EFAULT; \
695 } \
696 memcpy(dst->FIELD, src->FIELD, \
697 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
698 return 0; \
638f5b90 699}
fd978bf7
JS
700/* copy_reference_state() */
701COPY_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
702/* copy_stack_state() */
703COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
704#undef COPY_STATE_FN
705
706#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
707static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
708 bool copy_old) \
709{ \
710 u32 old_size = state->COUNT; \
711 struct bpf_##NAME##_state *new_##FIELD; \
712 int slot = size / SIZE; \
713 \
714 if (size <= old_size || !size) { \
715 if (copy_old) \
716 return 0; \
717 state->COUNT = slot * SIZE; \
718 if (!size && old_size) { \
719 kfree(state->FIELD); \
720 state->FIELD = NULL; \
721 } \
722 return 0; \
723 } \
724 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
725 GFP_KERNEL); \
726 if (!new_##FIELD) \
727 return -ENOMEM; \
728 if (copy_old) { \
729 if (state->FIELD) \
730 memcpy(new_##FIELD, state->FIELD, \
731 sizeof(*new_##FIELD) * (old_size / SIZE)); \
732 memset(new_##FIELD + old_size / SIZE, 0, \
733 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
734 } \
735 state->COUNT = slot * SIZE; \
736 kfree(state->FIELD); \
737 state->FIELD = new_##FIELD; \
738 return 0; \
739}
fd978bf7
JS
740/* realloc_reference_state() */
741REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
742/* realloc_stack_state() */
743REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
744#undef REALLOC_STATE_FN
638f5b90
AS
745
746/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
747 * make it consume minimal amount of memory. check_stack_write() access from
f4d7e40a 748 * the program calls into realloc_func_state() to grow the stack size.
84dbf350
JS
749 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
750 * which realloc_stack_state() copies over. It points to previous
751 * bpf_verifier_state which is never reallocated.
638f5b90 752 */
fd978bf7
JS
753static int realloc_func_state(struct bpf_func_state *state, int stack_size,
754 int refs_size, bool copy_old)
638f5b90 755{
fd978bf7
JS
756 int err = realloc_reference_state(state, refs_size, copy_old);
757 if (err)
758 return err;
759 return realloc_stack_state(state, stack_size, copy_old);
760}
761
762/* Acquire a pointer id from the env and update the state->refs to include
763 * this new pointer reference.
764 * On success, returns a valid pointer id to associate with the register
765 * On failure, returns a negative errno.
638f5b90 766 */
fd978bf7 767static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 768{
fd978bf7
JS
769 struct bpf_func_state *state = cur_func(env);
770 int new_ofs = state->acquired_refs;
771 int id, err;
772
773 err = realloc_reference_state(state, state->acquired_refs + 1, true);
774 if (err)
775 return err;
776 id = ++env->id_gen;
777 state->refs[new_ofs].id = id;
778 state->refs[new_ofs].insn_idx = insn_idx;
638f5b90 779
fd978bf7
JS
780 return id;
781}
782
783/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 784static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
785{
786 int i, last_idx;
787
fd978bf7
JS
788 last_idx = state->acquired_refs - 1;
789 for (i = 0; i < state->acquired_refs; i++) {
790 if (state->refs[i].id == ptr_id) {
791 if (last_idx && i != last_idx)
792 memcpy(&state->refs[i], &state->refs[last_idx],
793 sizeof(*state->refs));
794 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
795 state->acquired_refs--;
638f5b90 796 return 0;
638f5b90 797 }
638f5b90 798 }
46f8bc92 799 return -EINVAL;
fd978bf7
JS
800}
801
802static int transfer_reference_state(struct bpf_func_state *dst,
803 struct bpf_func_state *src)
804{
805 int err = realloc_reference_state(dst, src->acquired_refs, false);
806 if (err)
807 return err;
808 err = copy_reference_state(dst, src);
809 if (err)
810 return err;
638f5b90
AS
811 return 0;
812}
813
f4d7e40a
AS
814static void free_func_state(struct bpf_func_state *state)
815{
5896351e
AS
816 if (!state)
817 return;
fd978bf7 818 kfree(state->refs);
f4d7e40a
AS
819 kfree(state->stack);
820 kfree(state);
821}
822
b5dc0163
AS
823static void clear_jmp_history(struct bpf_verifier_state *state)
824{
825 kfree(state->jmp_history);
826 state->jmp_history = NULL;
827 state->jmp_history_cnt = 0;
828}
829
1969db47
AS
830static void free_verifier_state(struct bpf_verifier_state *state,
831 bool free_self)
638f5b90 832{
f4d7e40a
AS
833 int i;
834
835 for (i = 0; i <= state->curframe; i++) {
836 free_func_state(state->frame[i]);
837 state->frame[i] = NULL;
838 }
b5dc0163 839 clear_jmp_history(state);
1969db47
AS
840 if (free_self)
841 kfree(state);
638f5b90
AS
842}
843
844/* copy verifier state from src to dst growing dst stack space
845 * when necessary to accommodate larger src stack
846 */
f4d7e40a
AS
847static int copy_func_state(struct bpf_func_state *dst,
848 const struct bpf_func_state *src)
638f5b90
AS
849{
850 int err;
851
fd978bf7
JS
852 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
853 false);
854 if (err)
855 return err;
856 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
857 err = copy_reference_state(dst, src);
638f5b90
AS
858 if (err)
859 return err;
638f5b90
AS
860 return copy_stack_state(dst, src);
861}
862
f4d7e40a
AS
863static int copy_verifier_state(struct bpf_verifier_state *dst_state,
864 const struct bpf_verifier_state *src)
865{
866 struct bpf_func_state *dst;
b5dc0163 867 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
f4d7e40a
AS
868 int i, err;
869
b5dc0163
AS
870 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
871 kfree(dst_state->jmp_history);
872 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
873 if (!dst_state->jmp_history)
874 return -ENOMEM;
875 }
876 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
877 dst_state->jmp_history_cnt = src->jmp_history_cnt;
878
f4d7e40a
AS
879 /* if dst has more stack frames then src frame, free them */
880 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
881 free_func_state(dst_state->frame[i]);
882 dst_state->frame[i] = NULL;
883 }
979d63d5 884 dst_state->speculative = src->speculative;
f4d7e40a 885 dst_state->curframe = src->curframe;
d83525ca 886 dst_state->active_spin_lock = src->active_spin_lock;
2589726d
AS
887 dst_state->branches = src->branches;
888 dst_state->parent = src->parent;
b5dc0163
AS
889 dst_state->first_insn_idx = src->first_insn_idx;
890 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
891 for (i = 0; i <= src->curframe; i++) {
892 dst = dst_state->frame[i];
893 if (!dst) {
894 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
895 if (!dst)
896 return -ENOMEM;
897 dst_state->frame[i] = dst;
898 }
899 err = copy_func_state(dst, src->frame[i]);
900 if (err)
901 return err;
902 }
903 return 0;
904}
905
2589726d
AS
906static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
907{
908 while (st) {
909 u32 br = --st->branches;
910
911 /* WARN_ON(br > 1) technically makes sense here,
912 * but see comment in push_stack(), hence:
913 */
914 WARN_ONCE((int)br < 0,
915 "BUG update_branch_counts:branches_to_explore=%d\n",
916 br);
917 if (br)
918 break;
919 st = st->parent;
920 }
921}
922
638f5b90 923static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
6f8a57cc 924 int *insn_idx, bool pop_log)
638f5b90
AS
925{
926 struct bpf_verifier_state *cur = env->cur_state;
927 struct bpf_verifier_stack_elem *elem, *head = env->head;
928 int err;
17a52670
AS
929
930 if (env->head == NULL)
638f5b90 931 return -ENOENT;
17a52670 932
638f5b90
AS
933 if (cur) {
934 err = copy_verifier_state(cur, &head->st);
935 if (err)
936 return err;
937 }
6f8a57cc
AN
938 if (pop_log)
939 bpf_vlog_reset(&env->log, head->log_pos);
638f5b90
AS
940 if (insn_idx)
941 *insn_idx = head->insn_idx;
17a52670 942 if (prev_insn_idx)
638f5b90
AS
943 *prev_insn_idx = head->prev_insn_idx;
944 elem = head->next;
1969db47 945 free_verifier_state(&head->st, false);
638f5b90 946 kfree(head);
17a52670
AS
947 env->head = elem;
948 env->stack_size--;
638f5b90 949 return 0;
17a52670
AS
950}
951
58e2af8b 952static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
953 int insn_idx, int prev_insn_idx,
954 bool speculative)
17a52670 955{
638f5b90 956 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 957 struct bpf_verifier_stack_elem *elem;
638f5b90 958 int err;
17a52670 959
638f5b90 960 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
961 if (!elem)
962 goto err;
963
17a52670
AS
964 elem->insn_idx = insn_idx;
965 elem->prev_insn_idx = prev_insn_idx;
966 elem->next = env->head;
6f8a57cc 967 elem->log_pos = env->log.len_used;
17a52670
AS
968 env->head = elem;
969 env->stack_size++;
1969db47
AS
970 err = copy_verifier_state(&elem->st, cur);
971 if (err)
972 goto err;
979d63d5 973 elem->st.speculative |= speculative;
b285fcb7
AS
974 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
975 verbose(env, "The sequence of %d jumps is too complex.\n",
976 env->stack_size);
17a52670
AS
977 goto err;
978 }
2589726d
AS
979 if (elem->st.parent) {
980 ++elem->st.parent->branches;
981 /* WARN_ON(branches > 2) technically makes sense here,
982 * but
983 * 1. speculative states will bump 'branches' for non-branch
984 * instructions
985 * 2. is_state_visited() heuristics may decide not to create
986 * a new state for a sequence of branches and all such current
987 * and cloned states will be pointing to a single parent state
988 * which might have large 'branches' count.
989 */
990 }
17a52670
AS
991 return &elem->st;
992err:
5896351e
AS
993 free_verifier_state(env->cur_state, true);
994 env->cur_state = NULL;
17a52670 995 /* pop all elements and return */
6f8a57cc 996 while (!pop_stack(env, NULL, NULL, false));
17a52670
AS
997 return NULL;
998}
999
1000#define CALLER_SAVED_REGS 6
1001static const int caller_saved[CALLER_SAVED_REGS] = {
1002 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1003};
1004
f54c7898
DB
1005static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1006 struct bpf_reg_state *reg);
f1174f77 1007
b03c9f9f
EC
1008/* Mark the unknown part of a register (variable offset or scalar value) as
1009 * known to have the value @imm.
1010 */
1011static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1012{
a9c676bc
AS
1013 /* Clear id, off, and union(map_ptr, range) */
1014 memset(((u8 *)reg) + sizeof(reg->type), 0,
1015 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
b03c9f9f
EC
1016 reg->var_off = tnum_const(imm);
1017 reg->smin_value = (s64)imm;
1018 reg->smax_value = (s64)imm;
1019 reg->umin_value = imm;
1020 reg->umax_value = imm;
3f50f132
JF
1021
1022 reg->s32_min_value = (s32)imm;
1023 reg->s32_max_value = (s32)imm;
1024 reg->u32_min_value = (u32)imm;
1025 reg->u32_max_value = (u32)imm;
1026}
1027
1028static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1029{
1030 reg->var_off = tnum_const_subreg(reg->var_off, imm);
1031 reg->s32_min_value = (s32)imm;
1032 reg->s32_max_value = (s32)imm;
1033 reg->u32_min_value = (u32)imm;
1034 reg->u32_max_value = (u32)imm;
b03c9f9f
EC
1035}
1036
f1174f77
EC
1037/* Mark the 'variable offset' part of a register as zero. This should be
1038 * used only on registers holding a pointer type.
1039 */
1040static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 1041{
b03c9f9f 1042 __mark_reg_known(reg, 0);
f1174f77 1043}
a9789ef9 1044
cc2b14d5
AS
1045static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1046{
1047 __mark_reg_known(reg, 0);
cc2b14d5
AS
1048 reg->type = SCALAR_VALUE;
1049}
1050
61bd5218
JK
1051static void mark_reg_known_zero(struct bpf_verifier_env *env,
1052 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1053{
1054 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1055 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
1056 /* Something bad happened, let's kill all regs */
1057 for (regno = 0; regno < MAX_BPF_REG; regno++)
f54c7898 1058 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1059 return;
1060 }
1061 __mark_reg_known_zero(regs + regno);
1062}
1063
de8f3a83
DB
1064static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1065{
1066 return type_is_pkt_pointer(reg->type);
1067}
1068
1069static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1070{
1071 return reg_is_pkt_pointer(reg) ||
1072 reg->type == PTR_TO_PACKET_END;
1073}
1074
1075/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1076static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1077 enum bpf_reg_type which)
1078{
1079 /* The register can already have a range from prior markings.
1080 * This is fine as long as it hasn't been advanced from its
1081 * origin.
1082 */
1083 return reg->type == which &&
1084 reg->id == 0 &&
1085 reg->off == 0 &&
1086 tnum_equals_const(reg->var_off, 0);
1087}
1088
3f50f132
JF
1089/* Reset the min/max bounds of a register */
1090static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1091{
1092 reg->smin_value = S64_MIN;
1093 reg->smax_value = S64_MAX;
1094 reg->umin_value = 0;
1095 reg->umax_value = U64_MAX;
1096
1097 reg->s32_min_value = S32_MIN;
1098 reg->s32_max_value = S32_MAX;
1099 reg->u32_min_value = 0;
1100 reg->u32_max_value = U32_MAX;
1101}
1102
1103static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1104{
1105 reg->smin_value = S64_MIN;
1106 reg->smax_value = S64_MAX;
1107 reg->umin_value = 0;
1108 reg->umax_value = U64_MAX;
1109}
1110
1111static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1112{
1113 reg->s32_min_value = S32_MIN;
1114 reg->s32_max_value = S32_MAX;
1115 reg->u32_min_value = 0;
1116 reg->u32_max_value = U32_MAX;
1117}
1118
1119static void __update_reg32_bounds(struct bpf_reg_state *reg)
1120{
1121 struct tnum var32_off = tnum_subreg(reg->var_off);
1122
1123 /* min signed is max(sign bit) | min(other bits) */
1124 reg->s32_min_value = max_t(s32, reg->s32_min_value,
1125 var32_off.value | (var32_off.mask & S32_MIN));
1126 /* max signed is min(sign bit) | max(other bits) */
1127 reg->s32_max_value = min_t(s32, reg->s32_max_value,
1128 var32_off.value | (var32_off.mask & S32_MAX));
1129 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1130 reg->u32_max_value = min(reg->u32_max_value,
1131 (u32)(var32_off.value | var32_off.mask));
1132}
1133
1134static void __update_reg64_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
1135{
1136 /* min signed is max(sign bit) | min(other bits) */
1137 reg->smin_value = max_t(s64, reg->smin_value,
1138 reg->var_off.value | (reg->var_off.mask & S64_MIN));
1139 /* max signed is min(sign bit) | max(other bits) */
1140 reg->smax_value = min_t(s64, reg->smax_value,
1141 reg->var_off.value | (reg->var_off.mask & S64_MAX));
1142 reg->umin_value = max(reg->umin_value, reg->var_off.value);
1143 reg->umax_value = min(reg->umax_value,
1144 reg->var_off.value | reg->var_off.mask);
1145}
1146
3f50f132
JF
1147static void __update_reg_bounds(struct bpf_reg_state *reg)
1148{
1149 __update_reg32_bounds(reg);
1150 __update_reg64_bounds(reg);
1151}
1152
b03c9f9f 1153/* Uses signed min/max values to inform unsigned, and vice-versa */
3f50f132
JF
1154static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1155{
1156 /* Learn sign from signed bounds.
1157 * If we cannot cross the sign boundary, then signed and unsigned bounds
1158 * are the same, so combine. This works even in the negative case, e.g.
1159 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1160 */
1161 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1162 reg->s32_min_value = reg->u32_min_value =
1163 max_t(u32, reg->s32_min_value, reg->u32_min_value);
1164 reg->s32_max_value = reg->u32_max_value =
1165 min_t(u32, reg->s32_max_value, reg->u32_max_value);
1166 return;
1167 }
1168 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1169 * boundary, so we must be careful.
1170 */
1171 if ((s32)reg->u32_max_value >= 0) {
1172 /* Positive. We can't learn anything from the smin, but smax
1173 * is positive, hence safe.
1174 */
1175 reg->s32_min_value = reg->u32_min_value;
1176 reg->s32_max_value = reg->u32_max_value =
1177 min_t(u32, reg->s32_max_value, reg->u32_max_value);
1178 } else if ((s32)reg->u32_min_value < 0) {
1179 /* Negative. We can't learn anything from the smax, but smin
1180 * is negative, hence safe.
1181 */
1182 reg->s32_min_value = reg->u32_min_value =
1183 max_t(u32, reg->s32_min_value, reg->u32_min_value);
1184 reg->s32_max_value = reg->u32_max_value;
1185 }
1186}
1187
1188static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
1189{
1190 /* Learn sign from signed bounds.
1191 * If we cannot cross the sign boundary, then signed and unsigned bounds
1192 * are the same, so combine. This works even in the negative case, e.g.
1193 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1194 */
1195 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1196 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1197 reg->umin_value);
1198 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1199 reg->umax_value);
1200 return;
1201 }
1202 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1203 * boundary, so we must be careful.
1204 */
1205 if ((s64)reg->umax_value >= 0) {
1206 /* Positive. We can't learn anything from the smin, but smax
1207 * is positive, hence safe.
1208 */
1209 reg->smin_value = reg->umin_value;
1210 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1211 reg->umax_value);
1212 } else if ((s64)reg->umin_value < 0) {
1213 /* Negative. We can't learn anything from the smax, but smin
1214 * is negative, hence safe.
1215 */
1216 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1217 reg->umin_value);
1218 reg->smax_value = reg->umax_value;
1219 }
1220}
1221
3f50f132
JF
1222static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1223{
1224 __reg32_deduce_bounds(reg);
1225 __reg64_deduce_bounds(reg);
1226}
1227
b03c9f9f
EC
1228/* Attempts to improve var_off based on unsigned min/max information */
1229static void __reg_bound_offset(struct bpf_reg_state *reg)
1230{
3f50f132
JF
1231 struct tnum var64_off = tnum_intersect(reg->var_off,
1232 tnum_range(reg->umin_value,
1233 reg->umax_value));
1234 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1235 tnum_range(reg->u32_min_value,
1236 reg->u32_max_value));
1237
1238 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
b03c9f9f
EC
1239}
1240
3f50f132 1241static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
b03c9f9f 1242{
3f50f132
JF
1243 reg->umin_value = reg->u32_min_value;
1244 reg->umax_value = reg->u32_max_value;
1245 /* Attempt to pull 32-bit signed bounds into 64-bit bounds
1246 * but must be positive otherwise set to worse case bounds
1247 * and refine later from tnum.
1248 */
3a71dc36 1249 if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
3f50f132
JF
1250 reg->smax_value = reg->s32_max_value;
1251 else
1252 reg->smax_value = U32_MAX;
3a71dc36
JF
1253 if (reg->s32_min_value >= 0)
1254 reg->smin_value = reg->s32_min_value;
1255 else
1256 reg->smin_value = 0;
3f50f132
JF
1257}
1258
1259static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1260{
1261 /* special case when 64-bit register has upper 32-bit register
1262 * zeroed. Typically happens after zext or <<32, >>32 sequence
1263 * allowing us to use 32-bit bounds directly,
1264 */
1265 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1266 __reg_assign_32_into_64(reg);
1267 } else {
1268 /* Otherwise the best we can do is push lower 32bit known and
1269 * unknown bits into register (var_off set from jmp logic)
1270 * then learn as much as possible from the 64-bit tnum
1271 * known and unknown bits. The previous smin/smax bounds are
1272 * invalid here because of jmp32 compare so mark them unknown
1273 * so they do not impact tnum bounds calculation.
1274 */
1275 __mark_reg64_unbounded(reg);
1276 __update_reg_bounds(reg);
1277 }
1278
1279 /* Intersecting with the old var_off might have improved our bounds
1280 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1281 * then new var_off is (0; 0x7f...fc) which improves our umax.
1282 */
1283 __reg_deduce_bounds(reg);
1284 __reg_bound_offset(reg);
1285 __update_reg_bounds(reg);
1286}
1287
1288static bool __reg64_bound_s32(s64 a)
1289{
1290 if (a > S32_MIN && a < S32_MAX)
1291 return true;
1292 return false;
1293}
1294
1295static bool __reg64_bound_u32(u64 a)
1296{
1297 if (a > U32_MIN && a < U32_MAX)
1298 return true;
1299 return false;
1300}
1301
1302static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1303{
1304 __mark_reg32_unbounded(reg);
1305
1306 if (__reg64_bound_s32(reg->smin_value))
1307 reg->s32_min_value = (s32)reg->smin_value;
1308 if (__reg64_bound_s32(reg->smax_value))
1309 reg->s32_max_value = (s32)reg->smax_value;
1310 if (__reg64_bound_u32(reg->umin_value))
1311 reg->u32_min_value = (u32)reg->umin_value;
1312 if (__reg64_bound_u32(reg->umax_value))
1313 reg->u32_max_value = (u32)reg->umax_value;
1314
1315 /* Intersecting with the old var_off might have improved our bounds
1316 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1317 * then new var_off is (0; 0x7f...fc) which improves our umax.
1318 */
1319 __reg_deduce_bounds(reg);
1320 __reg_bound_offset(reg);
1321 __update_reg_bounds(reg);
b03c9f9f
EC
1322}
1323
f1174f77 1324/* Mark a register as having a completely unknown (scalar) value. */
f54c7898
DB
1325static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1326 struct bpf_reg_state *reg)
f1174f77 1327{
a9c676bc
AS
1328 /*
1329 * Clear type, id, off, and union(map_ptr, range) and
1330 * padding between 'type' and union
1331 */
1332 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 1333 reg->type = SCALAR_VALUE;
f1174f77 1334 reg->var_off = tnum_unknown;
f4d7e40a 1335 reg->frameno = 0;
2c78ee89 1336 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
b03c9f9f 1337 __mark_reg_unbounded(reg);
f1174f77
EC
1338}
1339
61bd5218
JK
1340static void mark_reg_unknown(struct bpf_verifier_env *env,
1341 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1342{
1343 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1344 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
1345 /* Something bad happened, let's kill all regs except FP */
1346 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1347 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1348 return;
1349 }
f54c7898 1350 __mark_reg_unknown(env, regs + regno);
f1174f77
EC
1351}
1352
f54c7898
DB
1353static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1354 struct bpf_reg_state *reg)
f1174f77 1355{
f54c7898 1356 __mark_reg_unknown(env, reg);
f1174f77
EC
1357 reg->type = NOT_INIT;
1358}
1359
61bd5218
JK
1360static void mark_reg_not_init(struct bpf_verifier_env *env,
1361 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1362{
1363 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1364 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
1365 /* Something bad happened, let's kill all regs except FP */
1366 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1367 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1368 return;
1369 }
f54c7898 1370 __mark_reg_not_init(env, regs + regno);
a9789ef9
DB
1371}
1372
41c48f3a
AI
1373static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1374 struct bpf_reg_state *regs, u32 regno,
1375 enum bpf_reg_type reg_type, u32 btf_id)
1376{
1377 if (reg_type == SCALAR_VALUE) {
1378 mark_reg_unknown(env, regs, regno);
1379 return;
1380 }
1381 mark_reg_known_zero(env, regs, regno);
1382 regs[regno].type = PTR_TO_BTF_ID;
1383 regs[regno].btf_id = btf_id;
1384}
1385
5327ed3d 1386#define DEF_NOT_SUBREG (0)
61bd5218 1387static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 1388 struct bpf_func_state *state)
17a52670 1389{
f4d7e40a 1390 struct bpf_reg_state *regs = state->regs;
17a52670
AS
1391 int i;
1392
dc503a8a 1393 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 1394 mark_reg_not_init(env, regs, i);
dc503a8a 1395 regs[i].live = REG_LIVE_NONE;
679c782d 1396 regs[i].parent = NULL;
5327ed3d 1397 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 1398 }
17a52670
AS
1399
1400 /* frame pointer */
f1174f77 1401 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 1402 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 1403 regs[BPF_REG_FP].frameno = state->frameno;
6760bf2d
DB
1404}
1405
f4d7e40a
AS
1406#define BPF_MAIN_FUNC (-1)
1407static void init_func_state(struct bpf_verifier_env *env,
1408 struct bpf_func_state *state,
1409 int callsite, int frameno, int subprogno)
1410{
1411 state->callsite = callsite;
1412 state->frameno = frameno;
1413 state->subprogno = subprogno;
1414 init_reg_state(env, state);
1415}
1416
17a52670
AS
1417enum reg_arg_type {
1418 SRC_OP, /* register is used as source operand */
1419 DST_OP, /* register is used as destination operand */
1420 DST_OP_NO_MARK /* same as above, check only, don't mark */
1421};
1422
cc8b0b92
AS
1423static int cmp_subprogs(const void *a, const void *b)
1424{
9c8105bd
JW
1425 return ((struct bpf_subprog_info *)a)->start -
1426 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
1427}
1428
1429static int find_subprog(struct bpf_verifier_env *env, int off)
1430{
9c8105bd 1431 struct bpf_subprog_info *p;
cc8b0b92 1432
9c8105bd
JW
1433 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1434 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
1435 if (!p)
1436 return -ENOENT;
9c8105bd 1437 return p - env->subprog_info;
cc8b0b92
AS
1438
1439}
1440
1441static int add_subprog(struct bpf_verifier_env *env, int off)
1442{
1443 int insn_cnt = env->prog->len;
1444 int ret;
1445
1446 if (off >= insn_cnt || off < 0) {
1447 verbose(env, "call to invalid destination\n");
1448 return -EINVAL;
1449 }
1450 ret = find_subprog(env, off);
1451 if (ret >= 0)
1452 return 0;
4cb3d99c 1453 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
1454 verbose(env, "too many subprograms\n");
1455 return -E2BIG;
1456 }
9c8105bd
JW
1457 env->subprog_info[env->subprog_cnt++].start = off;
1458 sort(env->subprog_info, env->subprog_cnt,
1459 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
cc8b0b92
AS
1460 return 0;
1461}
1462
1463static int check_subprogs(struct bpf_verifier_env *env)
1464{
1465 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
9c8105bd 1466 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92
AS
1467 struct bpf_insn *insn = env->prog->insnsi;
1468 int insn_cnt = env->prog->len;
1469
f910cefa
JW
1470 /* Add entry function. */
1471 ret = add_subprog(env, 0);
1472 if (ret < 0)
1473 return ret;
1474
cc8b0b92
AS
1475 /* determine subprog starts. The end is one before the next starts */
1476 for (i = 0; i < insn_cnt; i++) {
1477 if (insn[i].code != (BPF_JMP | BPF_CALL))
1478 continue;
1479 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1480 continue;
2c78ee89
AS
1481 if (!env->bpf_capable) {
1482 verbose(env,
1483 "function calls to other bpf functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
cc8b0b92
AS
1484 return -EPERM;
1485 }
cc8b0b92
AS
1486 ret = add_subprog(env, i + insn[i].imm + 1);
1487 if (ret < 0)
1488 return ret;
1489 }
1490
4cb3d99c
JW
1491 /* Add a fake 'exit' subprog which could simplify subprog iteration
1492 * logic. 'subprog_cnt' should not be increased.
1493 */
1494 subprog[env->subprog_cnt].start = insn_cnt;
1495
06ee7115 1496 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 1497 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 1498 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92
AS
1499
1500 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
1501 subprog_start = subprog[cur_subprog].start;
1502 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1503 for (i = 0; i < insn_cnt; i++) {
1504 u8 code = insn[i].code;
1505
7f6e4312
MF
1506 if (code == (BPF_JMP | BPF_CALL) &&
1507 insn[i].imm == BPF_FUNC_tail_call &&
1508 insn[i].src_reg != BPF_PSEUDO_CALL)
1509 subprog[cur_subprog].has_tail_call = true;
09b28d76
AS
1510 if (BPF_CLASS(code) == BPF_LD &&
1511 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
1512 subprog[cur_subprog].has_ld_abs = true;
092ed096 1513 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
1514 goto next;
1515 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1516 goto next;
1517 off = i + insn[i].off + 1;
1518 if (off < subprog_start || off >= subprog_end) {
1519 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1520 return -EINVAL;
1521 }
1522next:
1523 if (i == subprog_end - 1) {
1524 /* to avoid fall-through from one subprog into another
1525 * the last insn of the subprog should be either exit
1526 * or unconditional jump back
1527 */
1528 if (code != (BPF_JMP | BPF_EXIT) &&
1529 code != (BPF_JMP | BPF_JA)) {
1530 verbose(env, "last insn is not an exit or jmp\n");
1531 return -EINVAL;
1532 }
1533 subprog_start = subprog_end;
4cb3d99c
JW
1534 cur_subprog++;
1535 if (cur_subprog < env->subprog_cnt)
9c8105bd 1536 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1537 }
1538 }
1539 return 0;
1540}
1541
679c782d
EC
1542/* Parentage chain of this register (or stack slot) should take care of all
1543 * issues like callee-saved registers, stack slot allocation time, etc.
1544 */
f4d7e40a 1545static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 1546 const struct bpf_reg_state *state,
5327ed3d 1547 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
1548{
1549 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 1550 int cnt = 0;
dc503a8a
EC
1551
1552 while (parent) {
1553 /* if read wasn't screened by an earlier write ... */
679c782d 1554 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 1555 break;
9242b5f5
AS
1556 if (parent->live & REG_LIVE_DONE) {
1557 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1558 reg_type_str[parent->type],
1559 parent->var_off.value, parent->off);
1560 return -EFAULT;
1561 }
5327ed3d
JW
1562 /* The first condition is more likely to be true than the
1563 * second, checked it first.
1564 */
1565 if ((parent->live & REG_LIVE_READ) == flag ||
1566 parent->live & REG_LIVE_READ64)
25af32da
AS
1567 /* The parentage chain never changes and
1568 * this parent was already marked as LIVE_READ.
1569 * There is no need to keep walking the chain again and
1570 * keep re-marking all parents as LIVE_READ.
1571 * This case happens when the same register is read
1572 * multiple times without writes into it in-between.
5327ed3d
JW
1573 * Also, if parent has the stronger REG_LIVE_READ64 set,
1574 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
1575 */
1576 break;
dc503a8a 1577 /* ... then we depend on parent's value */
5327ed3d
JW
1578 parent->live |= flag;
1579 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1580 if (flag == REG_LIVE_READ64)
1581 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
1582 state = parent;
1583 parent = state->parent;
f4d7e40a 1584 writes = true;
06ee7115 1585 cnt++;
dc503a8a 1586 }
06ee7115
AS
1587
1588 if (env->longest_mark_read_walk < cnt)
1589 env->longest_mark_read_walk = cnt;
f4d7e40a 1590 return 0;
dc503a8a
EC
1591}
1592
5327ed3d
JW
1593/* This function is supposed to be used by the following 32-bit optimization
1594 * code only. It returns TRUE if the source or destination register operates
1595 * on 64-bit, otherwise return FALSE.
1596 */
1597static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
1598 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
1599{
1600 u8 code, class, op;
1601
1602 code = insn->code;
1603 class = BPF_CLASS(code);
1604 op = BPF_OP(code);
1605 if (class == BPF_JMP) {
1606 /* BPF_EXIT for "main" will reach here. Return TRUE
1607 * conservatively.
1608 */
1609 if (op == BPF_EXIT)
1610 return true;
1611 if (op == BPF_CALL) {
1612 /* BPF to BPF call will reach here because of marking
1613 * caller saved clobber with DST_OP_NO_MARK for which we
1614 * don't care the register def because they are anyway
1615 * marked as NOT_INIT already.
1616 */
1617 if (insn->src_reg == BPF_PSEUDO_CALL)
1618 return false;
1619 /* Helper call will reach here because of arg type
1620 * check, conservatively return TRUE.
1621 */
1622 if (t == SRC_OP)
1623 return true;
1624
1625 return false;
1626 }
1627 }
1628
1629 if (class == BPF_ALU64 || class == BPF_JMP ||
1630 /* BPF_END always use BPF_ALU class. */
1631 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
1632 return true;
1633
1634 if (class == BPF_ALU || class == BPF_JMP32)
1635 return false;
1636
1637 if (class == BPF_LDX) {
1638 if (t != SRC_OP)
1639 return BPF_SIZE(code) == BPF_DW;
1640 /* LDX source must be ptr. */
1641 return true;
1642 }
1643
1644 if (class == BPF_STX) {
1645 if (reg->type != SCALAR_VALUE)
1646 return true;
1647 return BPF_SIZE(code) == BPF_DW;
1648 }
1649
1650 if (class == BPF_LD) {
1651 u8 mode = BPF_MODE(code);
1652
1653 /* LD_IMM64 */
1654 if (mode == BPF_IMM)
1655 return true;
1656
1657 /* Both LD_IND and LD_ABS return 32-bit data. */
1658 if (t != SRC_OP)
1659 return false;
1660
1661 /* Implicit ctx ptr. */
1662 if (regno == BPF_REG_6)
1663 return true;
1664
1665 /* Explicit source could be any width. */
1666 return true;
1667 }
1668
1669 if (class == BPF_ST)
1670 /* The only source register for BPF_ST is a ptr. */
1671 return true;
1672
1673 /* Conservatively return true at default. */
1674 return true;
1675}
1676
b325fbca
JW
1677/* Return TRUE if INSN doesn't have explicit value define. */
1678static bool insn_no_def(struct bpf_insn *insn)
1679{
1680 u8 class = BPF_CLASS(insn->code);
1681
1682 return (class == BPF_JMP || class == BPF_JMP32 ||
1683 class == BPF_STX || class == BPF_ST);
1684}
1685
1686/* Return TRUE if INSN has defined any 32-bit value explicitly. */
1687static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1688{
1689 if (insn_no_def(insn))
1690 return false;
1691
1692 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1693}
1694
5327ed3d
JW
1695static void mark_insn_zext(struct bpf_verifier_env *env,
1696 struct bpf_reg_state *reg)
1697{
1698 s32 def_idx = reg->subreg_def;
1699
1700 if (def_idx == DEF_NOT_SUBREG)
1701 return;
1702
1703 env->insn_aux_data[def_idx - 1].zext_dst = true;
1704 /* The dst will be zero extended, so won't be sub-register anymore. */
1705 reg->subreg_def = DEF_NOT_SUBREG;
1706}
1707
dc503a8a 1708static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
1709 enum reg_arg_type t)
1710{
f4d7e40a
AS
1711 struct bpf_verifier_state *vstate = env->cur_state;
1712 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 1713 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 1714 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 1715 bool rw64;
dc503a8a 1716
17a52670 1717 if (regno >= MAX_BPF_REG) {
61bd5218 1718 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
1719 return -EINVAL;
1720 }
1721
c342dc10 1722 reg = &regs[regno];
5327ed3d 1723 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
1724 if (t == SRC_OP) {
1725 /* check whether register used as source operand can be read */
c342dc10 1726 if (reg->type == NOT_INIT) {
61bd5218 1727 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
1728 return -EACCES;
1729 }
679c782d 1730 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
1731 if (regno == BPF_REG_FP)
1732 return 0;
1733
5327ed3d
JW
1734 if (rw64)
1735 mark_insn_zext(env, reg);
1736
1737 return mark_reg_read(env, reg, reg->parent,
1738 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
1739 } else {
1740 /* check whether register used as dest operand can be written to */
1741 if (regno == BPF_REG_FP) {
61bd5218 1742 verbose(env, "frame pointer is read only\n");
17a52670
AS
1743 return -EACCES;
1744 }
c342dc10 1745 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 1746 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 1747 if (t == DST_OP)
61bd5218 1748 mark_reg_unknown(env, regs, regno);
17a52670
AS
1749 }
1750 return 0;
1751}
1752
b5dc0163
AS
1753/* for any branch, call, exit record the history of jmps in the given state */
1754static int push_jmp_history(struct bpf_verifier_env *env,
1755 struct bpf_verifier_state *cur)
1756{
1757 u32 cnt = cur->jmp_history_cnt;
1758 struct bpf_idx_pair *p;
1759
1760 cnt++;
1761 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1762 if (!p)
1763 return -ENOMEM;
1764 p[cnt - 1].idx = env->insn_idx;
1765 p[cnt - 1].prev_idx = env->prev_insn_idx;
1766 cur->jmp_history = p;
1767 cur->jmp_history_cnt = cnt;
1768 return 0;
1769}
1770
1771/* Backtrack one insn at a time. If idx is not at the top of recorded
1772 * history then previous instruction came from straight line execution.
1773 */
1774static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
1775 u32 *history)
1776{
1777 u32 cnt = *history;
1778
1779 if (cnt && st->jmp_history[cnt - 1].idx == i) {
1780 i = st->jmp_history[cnt - 1].prev_idx;
1781 (*history)--;
1782 } else {
1783 i--;
1784 }
1785 return i;
1786}
1787
1788/* For given verifier state backtrack_insn() is called from the last insn to
1789 * the first insn. Its purpose is to compute a bitmask of registers and
1790 * stack slots that needs precision in the parent verifier state.
1791 */
1792static int backtrack_insn(struct bpf_verifier_env *env, int idx,
1793 u32 *reg_mask, u64 *stack_mask)
1794{
1795 const struct bpf_insn_cbs cbs = {
1796 .cb_print = verbose,
1797 .private_data = env,
1798 };
1799 struct bpf_insn *insn = env->prog->insnsi + idx;
1800 u8 class = BPF_CLASS(insn->code);
1801 u8 opcode = BPF_OP(insn->code);
1802 u8 mode = BPF_MODE(insn->code);
1803 u32 dreg = 1u << insn->dst_reg;
1804 u32 sreg = 1u << insn->src_reg;
1805 u32 spi;
1806
1807 if (insn->code == 0)
1808 return 0;
1809 if (env->log.level & BPF_LOG_LEVEL) {
1810 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
1811 verbose(env, "%d: ", idx);
1812 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1813 }
1814
1815 if (class == BPF_ALU || class == BPF_ALU64) {
1816 if (!(*reg_mask & dreg))
1817 return 0;
1818 if (opcode == BPF_MOV) {
1819 if (BPF_SRC(insn->code) == BPF_X) {
1820 /* dreg = sreg
1821 * dreg needs precision after this insn
1822 * sreg needs precision before this insn
1823 */
1824 *reg_mask &= ~dreg;
1825 *reg_mask |= sreg;
1826 } else {
1827 /* dreg = K
1828 * dreg needs precision after this insn.
1829 * Corresponding register is already marked
1830 * as precise=true in this verifier state.
1831 * No further markings in parent are necessary
1832 */
1833 *reg_mask &= ~dreg;
1834 }
1835 } else {
1836 if (BPF_SRC(insn->code) == BPF_X) {
1837 /* dreg += sreg
1838 * both dreg and sreg need precision
1839 * before this insn
1840 */
1841 *reg_mask |= sreg;
1842 } /* else dreg += K
1843 * dreg still needs precision before this insn
1844 */
1845 }
1846 } else if (class == BPF_LDX) {
1847 if (!(*reg_mask & dreg))
1848 return 0;
1849 *reg_mask &= ~dreg;
1850
1851 /* scalars can only be spilled into stack w/o losing precision.
1852 * Load from any other memory can be zero extended.
1853 * The desire to keep that precision is already indicated
1854 * by 'precise' mark in corresponding register of this state.
1855 * No further tracking necessary.
1856 */
1857 if (insn->src_reg != BPF_REG_FP)
1858 return 0;
1859 if (BPF_SIZE(insn->code) != BPF_DW)
1860 return 0;
1861
1862 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
1863 * that [fp - off] slot contains scalar that needs to be
1864 * tracked with precision
1865 */
1866 spi = (-insn->off - 1) / BPF_REG_SIZE;
1867 if (spi >= 64) {
1868 verbose(env, "BUG spi %d\n", spi);
1869 WARN_ONCE(1, "verifier backtracking bug");
1870 return -EFAULT;
1871 }
1872 *stack_mask |= 1ull << spi;
b3b50f05 1873 } else if (class == BPF_STX || class == BPF_ST) {
b5dc0163 1874 if (*reg_mask & dreg)
b3b50f05 1875 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
1876 * to access memory. It means backtracking
1877 * encountered a case of pointer subtraction.
1878 */
1879 return -ENOTSUPP;
1880 /* scalars can only be spilled into stack */
1881 if (insn->dst_reg != BPF_REG_FP)
1882 return 0;
1883 if (BPF_SIZE(insn->code) != BPF_DW)
1884 return 0;
1885 spi = (-insn->off - 1) / BPF_REG_SIZE;
1886 if (spi >= 64) {
1887 verbose(env, "BUG spi %d\n", spi);
1888 WARN_ONCE(1, "verifier backtracking bug");
1889 return -EFAULT;
1890 }
1891 if (!(*stack_mask & (1ull << spi)))
1892 return 0;
1893 *stack_mask &= ~(1ull << spi);
b3b50f05
AN
1894 if (class == BPF_STX)
1895 *reg_mask |= sreg;
b5dc0163
AS
1896 } else if (class == BPF_JMP || class == BPF_JMP32) {
1897 if (opcode == BPF_CALL) {
1898 if (insn->src_reg == BPF_PSEUDO_CALL)
1899 return -ENOTSUPP;
1900 /* regular helper call sets R0 */
1901 *reg_mask &= ~1;
1902 if (*reg_mask & 0x3f) {
1903 /* if backtracing was looking for registers R1-R5
1904 * they should have been found already.
1905 */
1906 verbose(env, "BUG regs %x\n", *reg_mask);
1907 WARN_ONCE(1, "verifier backtracking bug");
1908 return -EFAULT;
1909 }
1910 } else if (opcode == BPF_EXIT) {
1911 return -ENOTSUPP;
1912 }
1913 } else if (class == BPF_LD) {
1914 if (!(*reg_mask & dreg))
1915 return 0;
1916 *reg_mask &= ~dreg;
1917 /* It's ld_imm64 or ld_abs or ld_ind.
1918 * For ld_imm64 no further tracking of precision
1919 * into parent is necessary
1920 */
1921 if (mode == BPF_IND || mode == BPF_ABS)
1922 /* to be analyzed */
1923 return -ENOTSUPP;
b5dc0163
AS
1924 }
1925 return 0;
1926}
1927
1928/* the scalar precision tracking algorithm:
1929 * . at the start all registers have precise=false.
1930 * . scalar ranges are tracked as normal through alu and jmp insns.
1931 * . once precise value of the scalar register is used in:
1932 * . ptr + scalar alu
1933 * . if (scalar cond K|scalar)
1934 * . helper_call(.., scalar, ...) where ARG_CONST is expected
1935 * backtrack through the verifier states and mark all registers and
1936 * stack slots with spilled constants that these scalar regisers
1937 * should be precise.
1938 * . during state pruning two registers (or spilled stack slots)
1939 * are equivalent if both are not precise.
1940 *
1941 * Note the verifier cannot simply walk register parentage chain,
1942 * since many different registers and stack slots could have been
1943 * used to compute single precise scalar.
1944 *
1945 * The approach of starting with precise=true for all registers and then
1946 * backtrack to mark a register as not precise when the verifier detects
1947 * that program doesn't care about specific value (e.g., when helper
1948 * takes register as ARG_ANYTHING parameter) is not safe.
1949 *
1950 * It's ok to walk single parentage chain of the verifier states.
1951 * It's possible that this backtracking will go all the way till 1st insn.
1952 * All other branches will be explored for needing precision later.
1953 *
1954 * The backtracking needs to deal with cases like:
1955 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1956 * r9 -= r8
1957 * r5 = r9
1958 * if r5 > 0x79f goto pc+7
1959 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1960 * r5 += 1
1961 * ...
1962 * call bpf_perf_event_output#25
1963 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1964 *
1965 * and this case:
1966 * r6 = 1
1967 * call foo // uses callee's r6 inside to compute r0
1968 * r0 += r6
1969 * if r0 == 0 goto
1970 *
1971 * to track above reg_mask/stack_mask needs to be independent for each frame.
1972 *
1973 * Also if parent's curframe > frame where backtracking started,
1974 * the verifier need to mark registers in both frames, otherwise callees
1975 * may incorrectly prune callers. This is similar to
1976 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1977 *
1978 * For now backtracking falls back into conservative marking.
1979 */
1980static void mark_all_scalars_precise(struct bpf_verifier_env *env,
1981 struct bpf_verifier_state *st)
1982{
1983 struct bpf_func_state *func;
1984 struct bpf_reg_state *reg;
1985 int i, j;
1986
1987 /* big hammer: mark all scalars precise in this path.
1988 * pop_stack may still get !precise scalars.
1989 */
1990 for (; st; st = st->parent)
1991 for (i = 0; i <= st->curframe; i++) {
1992 func = st->frame[i];
1993 for (j = 0; j < BPF_REG_FP; j++) {
1994 reg = &func->regs[j];
1995 if (reg->type != SCALAR_VALUE)
1996 continue;
1997 reg->precise = true;
1998 }
1999 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
2000 if (func->stack[j].slot_type[0] != STACK_SPILL)
2001 continue;
2002 reg = &func->stack[j].spilled_ptr;
2003 if (reg->type != SCALAR_VALUE)
2004 continue;
2005 reg->precise = true;
2006 }
2007 }
2008}
2009
a3ce685d
AS
2010static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
2011 int spi)
b5dc0163
AS
2012{
2013 struct bpf_verifier_state *st = env->cur_state;
2014 int first_idx = st->first_insn_idx;
2015 int last_idx = env->insn_idx;
2016 struct bpf_func_state *func;
2017 struct bpf_reg_state *reg;
a3ce685d
AS
2018 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
2019 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
b5dc0163 2020 bool skip_first = true;
a3ce685d 2021 bool new_marks = false;
b5dc0163
AS
2022 int i, err;
2023
2c78ee89 2024 if (!env->bpf_capable)
b5dc0163
AS
2025 return 0;
2026
2027 func = st->frame[st->curframe];
a3ce685d
AS
2028 if (regno >= 0) {
2029 reg = &func->regs[regno];
2030 if (reg->type != SCALAR_VALUE) {
2031 WARN_ONCE(1, "backtracing misuse");
2032 return -EFAULT;
2033 }
2034 if (!reg->precise)
2035 new_marks = true;
2036 else
2037 reg_mask = 0;
2038 reg->precise = true;
b5dc0163 2039 }
b5dc0163 2040
a3ce685d
AS
2041 while (spi >= 0) {
2042 if (func->stack[spi].slot_type[0] != STACK_SPILL) {
2043 stack_mask = 0;
2044 break;
2045 }
2046 reg = &func->stack[spi].spilled_ptr;
2047 if (reg->type != SCALAR_VALUE) {
2048 stack_mask = 0;
2049 break;
2050 }
2051 if (!reg->precise)
2052 new_marks = true;
2053 else
2054 stack_mask = 0;
2055 reg->precise = true;
2056 break;
2057 }
2058
2059 if (!new_marks)
2060 return 0;
2061 if (!reg_mask && !stack_mask)
2062 return 0;
b5dc0163
AS
2063 for (;;) {
2064 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
2065 u32 history = st->jmp_history_cnt;
2066
2067 if (env->log.level & BPF_LOG_LEVEL)
2068 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2069 for (i = last_idx;;) {
2070 if (skip_first) {
2071 err = 0;
2072 skip_first = false;
2073 } else {
2074 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
2075 }
2076 if (err == -ENOTSUPP) {
2077 mark_all_scalars_precise(env, st);
2078 return 0;
2079 } else if (err) {
2080 return err;
2081 }
2082 if (!reg_mask && !stack_mask)
2083 /* Found assignment(s) into tracked register in this state.
2084 * Since this state is already marked, just return.
2085 * Nothing to be tracked further in the parent state.
2086 */
2087 return 0;
2088 if (i == first_idx)
2089 break;
2090 i = get_prev_insn_idx(st, i, &history);
2091 if (i >= env->prog->len) {
2092 /* This can happen if backtracking reached insn 0
2093 * and there are still reg_mask or stack_mask
2094 * to backtrack.
2095 * It means the backtracking missed the spot where
2096 * particular register was initialized with a constant.
2097 */
2098 verbose(env, "BUG backtracking idx %d\n", i);
2099 WARN_ONCE(1, "verifier backtracking bug");
2100 return -EFAULT;
2101 }
2102 }
2103 st = st->parent;
2104 if (!st)
2105 break;
2106
a3ce685d 2107 new_marks = false;
b5dc0163
AS
2108 func = st->frame[st->curframe];
2109 bitmap_from_u64(mask, reg_mask);
2110 for_each_set_bit(i, mask, 32) {
2111 reg = &func->regs[i];
a3ce685d
AS
2112 if (reg->type != SCALAR_VALUE) {
2113 reg_mask &= ~(1u << i);
b5dc0163 2114 continue;
a3ce685d 2115 }
b5dc0163
AS
2116 if (!reg->precise)
2117 new_marks = true;
2118 reg->precise = true;
2119 }
2120
2121 bitmap_from_u64(mask, stack_mask);
2122 for_each_set_bit(i, mask, 64) {
2123 if (i >= func->allocated_stack / BPF_REG_SIZE) {
2339cd6c
AS
2124 /* the sequence of instructions:
2125 * 2: (bf) r3 = r10
2126 * 3: (7b) *(u64 *)(r3 -8) = r0
2127 * 4: (79) r4 = *(u64 *)(r10 -8)
2128 * doesn't contain jmps. It's backtracked
2129 * as a single block.
2130 * During backtracking insn 3 is not recognized as
2131 * stack access, so at the end of backtracking
2132 * stack slot fp-8 is still marked in stack_mask.
2133 * However the parent state may not have accessed
2134 * fp-8 and it's "unallocated" stack space.
2135 * In such case fallback to conservative.
b5dc0163 2136 */
2339cd6c
AS
2137 mark_all_scalars_precise(env, st);
2138 return 0;
b5dc0163
AS
2139 }
2140
a3ce685d
AS
2141 if (func->stack[i].slot_type[0] != STACK_SPILL) {
2142 stack_mask &= ~(1ull << i);
b5dc0163 2143 continue;
a3ce685d 2144 }
b5dc0163 2145 reg = &func->stack[i].spilled_ptr;
a3ce685d
AS
2146 if (reg->type != SCALAR_VALUE) {
2147 stack_mask &= ~(1ull << i);
b5dc0163 2148 continue;
a3ce685d 2149 }
b5dc0163
AS
2150 if (!reg->precise)
2151 new_marks = true;
2152 reg->precise = true;
2153 }
2154 if (env->log.level & BPF_LOG_LEVEL) {
2155 print_verifier_state(env, func);
2156 verbose(env, "parent %s regs=%x stack=%llx marks\n",
2157 new_marks ? "didn't have" : "already had",
2158 reg_mask, stack_mask);
2159 }
2160
a3ce685d
AS
2161 if (!reg_mask && !stack_mask)
2162 break;
b5dc0163
AS
2163 if (!new_marks)
2164 break;
2165
2166 last_idx = st->last_insn_idx;
2167 first_idx = st->first_insn_idx;
2168 }
2169 return 0;
2170}
2171
a3ce685d
AS
2172static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
2173{
2174 return __mark_chain_precision(env, regno, -1);
2175}
2176
2177static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
2178{
2179 return __mark_chain_precision(env, -1, spi);
2180}
b5dc0163 2181
1be7f75d
AS
2182static bool is_spillable_regtype(enum bpf_reg_type type)
2183{
2184 switch (type) {
2185 case PTR_TO_MAP_VALUE:
2186 case PTR_TO_MAP_VALUE_OR_NULL:
2187 case PTR_TO_STACK:
2188 case PTR_TO_CTX:
969bf05e 2189 case PTR_TO_PACKET:
de8f3a83 2190 case PTR_TO_PACKET_META:
969bf05e 2191 case PTR_TO_PACKET_END:
d58e468b 2192 case PTR_TO_FLOW_KEYS:
1be7f75d 2193 case CONST_PTR_TO_MAP:
c64b7983
JS
2194 case PTR_TO_SOCKET:
2195 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
2196 case PTR_TO_SOCK_COMMON:
2197 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
2198 case PTR_TO_TCP_SOCK:
2199 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 2200 case PTR_TO_XDP_SOCK:
65726b5b 2201 case PTR_TO_BTF_ID:
b121b341 2202 case PTR_TO_BTF_ID_OR_NULL:
afbf21dc
YS
2203 case PTR_TO_RDONLY_BUF:
2204 case PTR_TO_RDONLY_BUF_OR_NULL:
2205 case PTR_TO_RDWR_BUF:
2206 case PTR_TO_RDWR_BUF_OR_NULL:
1be7f75d
AS
2207 return true;
2208 default:
2209 return false;
2210 }
2211}
2212
cc2b14d5
AS
2213/* Does this register contain a constant zero? */
2214static bool register_is_null(struct bpf_reg_state *reg)
2215{
2216 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2217}
2218
f7cf25b2
AS
2219static bool register_is_const(struct bpf_reg_state *reg)
2220{
2221 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2222}
2223
6e7e63cb
JH
2224static bool __is_pointer_value(bool allow_ptr_leaks,
2225 const struct bpf_reg_state *reg)
2226{
2227 if (allow_ptr_leaks)
2228 return false;
2229
2230 return reg->type != SCALAR_VALUE;
2231}
2232
f7cf25b2
AS
2233static void save_register_state(struct bpf_func_state *state,
2234 int spi, struct bpf_reg_state *reg)
2235{
2236 int i;
2237
2238 state->stack[spi].spilled_ptr = *reg;
2239 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2240
2241 for (i = 0; i < BPF_REG_SIZE; i++)
2242 state->stack[spi].slot_type[i] = STACK_SPILL;
2243}
2244
17a52670
AS
2245/* check_stack_read/write functions track spill/fill of registers,
2246 * stack boundary and alignment are checked in check_mem_access()
2247 */
61bd5218 2248static int check_stack_write(struct bpf_verifier_env *env,
f4d7e40a 2249 struct bpf_func_state *state, /* func where register points to */
af86ca4e 2250 int off, int size, int value_regno, int insn_idx)
17a52670 2251{
f4d7e40a 2252 struct bpf_func_state *cur; /* state of the current function */
638f5b90 2253 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
b5dc0163 2254 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
f7cf25b2 2255 struct bpf_reg_state *reg = NULL;
638f5b90 2256
f4d7e40a 2257 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
fd978bf7 2258 state->acquired_refs, true);
638f5b90
AS
2259 if (err)
2260 return err;
9c399760
AS
2261 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
2262 * so it's aligned access and [off, off + size) are within stack limits
2263 */
638f5b90
AS
2264 if (!env->allow_ptr_leaks &&
2265 state->stack[spi].slot_type[0] == STACK_SPILL &&
2266 size != BPF_REG_SIZE) {
2267 verbose(env, "attempt to corrupt spilled pointer on stack\n");
2268 return -EACCES;
2269 }
17a52670 2270
f4d7e40a 2271 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
2272 if (value_regno >= 0)
2273 reg = &cur->regs[value_regno];
17a52670 2274
f7cf25b2 2275 if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
2c78ee89 2276 !register_is_null(reg) && env->bpf_capable) {
b5dc0163
AS
2277 if (dst_reg != BPF_REG_FP) {
2278 /* The backtracking logic can only recognize explicit
2279 * stack slot address like [fp - 8]. Other spill of
2280 * scalar via different register has to be conervative.
2281 * Backtrack from here and mark all registers as precise
2282 * that contributed into 'reg' being a constant.
2283 */
2284 err = mark_chain_precision(env, value_regno);
2285 if (err)
2286 return err;
2287 }
f7cf25b2
AS
2288 save_register_state(state, spi, reg);
2289 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 2290 /* register containing pointer is being spilled into stack */
9c399760 2291 if (size != BPF_REG_SIZE) {
f7cf25b2 2292 verbose_linfo(env, insn_idx, "; ");
61bd5218 2293 verbose(env, "invalid size of register spill\n");
17a52670
AS
2294 return -EACCES;
2295 }
2296
f7cf25b2 2297 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
2298 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2299 return -EINVAL;
2300 }
2301
2c78ee89 2302 if (!env->bypass_spec_v4) {
f7cf25b2 2303 bool sanitize = false;
17a52670 2304
f7cf25b2
AS
2305 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
2306 register_is_const(&state->stack[spi].spilled_ptr))
2307 sanitize = true;
2308 for (i = 0; i < BPF_REG_SIZE; i++)
2309 if (state->stack[spi].slot_type[i] == STACK_MISC) {
2310 sanitize = true;
2311 break;
2312 }
2313 if (sanitize) {
af86ca4e
AS
2314 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
2315 int soff = (-spi - 1) * BPF_REG_SIZE;
2316
2317 /* detected reuse of integer stack slot with a pointer
2318 * which means either llvm is reusing stack slot or
2319 * an attacker is trying to exploit CVE-2018-3639
2320 * (speculative store bypass)
2321 * Have to sanitize that slot with preemptive
2322 * store of zero.
2323 */
2324 if (*poff && *poff != soff) {
2325 /* disallow programs where single insn stores
2326 * into two different stack slots, since verifier
2327 * cannot sanitize them
2328 */
2329 verbose(env,
2330 "insn %d cannot access two stack slots fp%d and fp%d",
2331 insn_idx, *poff, soff);
2332 return -EINVAL;
2333 }
2334 *poff = soff;
2335 }
af86ca4e 2336 }
f7cf25b2 2337 save_register_state(state, spi, reg);
9c399760 2338 } else {
cc2b14d5
AS
2339 u8 type = STACK_MISC;
2340
679c782d
EC
2341 /* regular write of data into stack destroys any spilled ptr */
2342 state->stack[spi].spilled_ptr.type = NOT_INIT;
0bae2d4d
JW
2343 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2344 if (state->stack[spi].slot_type[0] == STACK_SPILL)
2345 for (i = 0; i < BPF_REG_SIZE; i++)
2346 state->stack[spi].slot_type[i] = STACK_MISC;
9c399760 2347
cc2b14d5
AS
2348 /* only mark the slot as written if all 8 bytes were written
2349 * otherwise read propagation may incorrectly stop too soon
2350 * when stack slots are partially written.
2351 * This heuristic means that read propagation will be
2352 * conservative, since it will add reg_live_read marks
2353 * to stack slots all the way to first state when programs
2354 * writes+reads less than 8 bytes
2355 */
2356 if (size == BPF_REG_SIZE)
2357 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2358
2359 /* when we zero initialize stack slots mark them as such */
b5dc0163
AS
2360 if (reg && register_is_null(reg)) {
2361 /* backtracking doesn't work for STACK_ZERO yet. */
2362 err = mark_chain_precision(env, value_regno);
2363 if (err)
2364 return err;
cc2b14d5 2365 type = STACK_ZERO;
b5dc0163 2366 }
cc2b14d5 2367
0bae2d4d 2368 /* Mark slots affected by this stack write. */
9c399760 2369 for (i = 0; i < size; i++)
638f5b90 2370 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 2371 type;
17a52670
AS
2372 }
2373 return 0;
2374}
2375
61bd5218 2376static int check_stack_read(struct bpf_verifier_env *env,
f4d7e40a
AS
2377 struct bpf_func_state *reg_state /* func where register points to */,
2378 int off, int size, int value_regno)
17a52670 2379{
f4d7e40a
AS
2380 struct bpf_verifier_state *vstate = env->cur_state;
2381 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 2382 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 2383 struct bpf_reg_state *reg;
638f5b90 2384 u8 *stype;
17a52670 2385
f4d7e40a 2386 if (reg_state->allocated_stack <= slot) {
638f5b90
AS
2387 verbose(env, "invalid read from stack off %d+0 size %d\n",
2388 off, size);
2389 return -EACCES;
2390 }
f4d7e40a 2391 stype = reg_state->stack[spi].slot_type;
f7cf25b2 2392 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 2393
638f5b90 2394 if (stype[0] == STACK_SPILL) {
9c399760 2395 if (size != BPF_REG_SIZE) {
f7cf25b2
AS
2396 if (reg->type != SCALAR_VALUE) {
2397 verbose_linfo(env, env->insn_idx, "; ");
2398 verbose(env, "invalid size of register fill\n");
2399 return -EACCES;
2400 }
2401 if (value_regno >= 0) {
2402 mark_reg_unknown(env, state->regs, value_regno);
2403 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2404 }
2405 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2406 return 0;
17a52670 2407 }
9c399760 2408 for (i = 1; i < BPF_REG_SIZE; i++) {
638f5b90 2409 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
61bd5218 2410 verbose(env, "corrupted spill memory\n");
17a52670
AS
2411 return -EACCES;
2412 }
2413 }
2414
dc503a8a 2415 if (value_regno >= 0) {
17a52670 2416 /* restore register state from stack */
f7cf25b2 2417 state->regs[value_regno] = *reg;
2f18f62e
AS
2418 /* mark reg as written since spilled pointer state likely
2419 * has its liveness marks cleared by is_state_visited()
2420 * which resets stack/reg liveness for state transitions
2421 */
2422 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
6e7e63cb
JH
2423 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
2424 /* If value_regno==-1, the caller is asking us whether
2425 * it is acceptable to use this value as a SCALAR_VALUE
2426 * (e.g. for XADD).
2427 * We must not allow unprivileged callers to do that
2428 * with spilled pointers.
2429 */
2430 verbose(env, "leaking pointer from stack off %d\n",
2431 off);
2432 return -EACCES;
dc503a8a 2433 }
f7cf25b2 2434 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670 2435 } else {
cc2b14d5
AS
2436 int zeros = 0;
2437
17a52670 2438 for (i = 0; i < size; i++) {
cc2b14d5
AS
2439 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
2440 continue;
2441 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
2442 zeros++;
2443 continue;
17a52670 2444 }
cc2b14d5
AS
2445 verbose(env, "invalid read from stack off %d+%d size %d\n",
2446 off, i, size);
2447 return -EACCES;
2448 }
f7cf25b2 2449 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
cc2b14d5
AS
2450 if (value_regno >= 0) {
2451 if (zeros == size) {
2452 /* any size read into register is zero extended,
2453 * so the whole register == const_zero
2454 */
2455 __mark_reg_const_zero(&state->regs[value_regno]);
b5dc0163
AS
2456 /* backtracking doesn't support STACK_ZERO yet,
2457 * so mark it precise here, so that later
2458 * backtracking can stop here.
2459 * Backtracking may not need this if this register
2460 * doesn't participate in pointer adjustment.
2461 * Forward propagation of precise flag is not
2462 * necessary either. This mark is only to stop
2463 * backtracking. Any register that contributed
2464 * to const 0 was marked precise before spill.
2465 */
2466 state->regs[value_regno].precise = true;
cc2b14d5
AS
2467 } else {
2468 /* have read misc data from the stack */
2469 mark_reg_unknown(env, state->regs, value_regno);
2470 }
2471 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
17a52670 2472 }
17a52670 2473 }
f7cf25b2 2474 return 0;
17a52670
AS
2475}
2476
e4298d25
DB
2477static int check_stack_access(struct bpf_verifier_env *env,
2478 const struct bpf_reg_state *reg,
2479 int off, int size)
2480{
2481 /* Stack accesses must be at a fixed offset, so that we
2482 * can determine what type of data were returned. See
2483 * check_stack_read().
2484 */
2485 if (!tnum_is_const(reg->var_off)) {
2486 char tn_buf[48];
2487
2488 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1fbd20f8 2489 verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
e4298d25
DB
2490 tn_buf, off, size);
2491 return -EACCES;
2492 }
2493
2494 if (off >= 0 || off < -MAX_BPF_STACK) {
2495 verbose(env, "invalid stack off=%d size=%d\n", off, size);
2496 return -EACCES;
2497 }
2498
2499 return 0;
2500}
2501
591fe988
DB
2502static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
2503 int off, int size, enum bpf_access_type type)
2504{
2505 struct bpf_reg_state *regs = cur_regs(env);
2506 struct bpf_map *map = regs[regno].map_ptr;
2507 u32 cap = bpf_map_flags_to_cap(map);
2508
2509 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
2510 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
2511 map->value_size, off, size);
2512 return -EACCES;
2513 }
2514
2515 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
2516 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
2517 map->value_size, off, size);
2518 return -EACCES;
2519 }
2520
2521 return 0;
2522}
2523
457f4436
AN
2524/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
2525static int __check_mem_access(struct bpf_verifier_env *env, int regno,
2526 int off, int size, u32 mem_size,
2527 bool zero_size_allowed)
17a52670 2528{
457f4436
AN
2529 bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
2530 struct bpf_reg_state *reg;
2531
2532 if (off >= 0 && size_ok && (u64)off + size <= mem_size)
2533 return 0;
17a52670 2534
457f4436
AN
2535 reg = &cur_regs(env)[regno];
2536 switch (reg->type) {
2537 case PTR_TO_MAP_VALUE:
61bd5218 2538 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
457f4436
AN
2539 mem_size, off, size);
2540 break;
2541 case PTR_TO_PACKET:
2542 case PTR_TO_PACKET_META:
2543 case PTR_TO_PACKET_END:
2544 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
2545 off, size, regno, reg->id, off, mem_size);
2546 break;
2547 case PTR_TO_MEM:
2548 default:
2549 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
2550 mem_size, off, size);
17a52670 2551 }
457f4436
AN
2552
2553 return -EACCES;
17a52670
AS
2554}
2555
457f4436
AN
2556/* check read/write into a memory region with possible variable offset */
2557static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
2558 int off, int size, u32 mem_size,
2559 bool zero_size_allowed)
dbcfe5f7 2560{
f4d7e40a
AS
2561 struct bpf_verifier_state *vstate = env->cur_state;
2562 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
2563 struct bpf_reg_state *reg = &state->regs[regno];
2564 int err;
2565
457f4436 2566 /* We may have adjusted the register pointing to memory region, so we
f1174f77
EC
2567 * need to try adding each of min_value and max_value to off
2568 * to make sure our theoretical access will be safe.
dbcfe5f7 2569 */
06ee7115 2570 if (env->log.level & BPF_LOG_LEVEL)
61bd5218 2571 print_verifier_state(env, state);
b7137c4e 2572
dbcfe5f7
GB
2573 /* The minimum value is only important with signed
2574 * comparisons where we can't assume the floor of a
2575 * value is 0. If we are using signed variables for our
2576 * index'es we need to make sure that whatever we use
2577 * will have a set floor within our range.
2578 */
b7137c4e
DB
2579 if (reg->smin_value < 0 &&
2580 (reg->smin_value == S64_MIN ||
2581 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2582 reg->smin_value + off < 0)) {
61bd5218 2583 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
2584 regno);
2585 return -EACCES;
2586 }
457f4436
AN
2587 err = __check_mem_access(env, regno, reg->smin_value + off, size,
2588 mem_size, zero_size_allowed);
dbcfe5f7 2589 if (err) {
457f4436 2590 verbose(env, "R%d min value is outside of the allowed memory range\n",
61bd5218 2591 regno);
dbcfe5f7
GB
2592 return err;
2593 }
2594
b03c9f9f
EC
2595 /* If we haven't set a max value then we need to bail since we can't be
2596 * sure we won't do bad things.
2597 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 2598 */
b03c9f9f 2599 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
457f4436 2600 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
dbcfe5f7
GB
2601 regno);
2602 return -EACCES;
2603 }
457f4436
AN
2604 err = __check_mem_access(env, regno, reg->umax_value + off, size,
2605 mem_size, zero_size_allowed);
2606 if (err) {
2607 verbose(env, "R%d max value is outside of the allowed memory range\n",
61bd5218 2608 regno);
457f4436
AN
2609 return err;
2610 }
2611
2612 return 0;
2613}
d83525ca 2614
457f4436
AN
2615/* check read/write into a map element with possible variable offset */
2616static int check_map_access(struct bpf_verifier_env *env, u32 regno,
2617 int off, int size, bool zero_size_allowed)
2618{
2619 struct bpf_verifier_state *vstate = env->cur_state;
2620 struct bpf_func_state *state = vstate->frame[vstate->curframe];
2621 struct bpf_reg_state *reg = &state->regs[regno];
2622 struct bpf_map *map = reg->map_ptr;
2623 int err;
2624
2625 err = check_mem_region_access(env, regno, off, size, map->value_size,
2626 zero_size_allowed);
2627 if (err)
2628 return err;
2629
2630 if (map_value_has_spin_lock(map)) {
2631 u32 lock = map->spin_lock_off;
d83525ca
AS
2632
2633 /* if any part of struct bpf_spin_lock can be touched by
2634 * load/store reject this program.
2635 * To check that [x1, x2) overlaps with [y1, y2)
2636 * it is sufficient to check x1 < y2 && y1 < x2.
2637 */
2638 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
2639 lock < reg->umax_value + off + size) {
2640 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
2641 return -EACCES;
2642 }
2643 }
f1174f77 2644 return err;
dbcfe5f7
GB
2645}
2646
969bf05e
AS
2647#define MAX_PACKET_OFF 0xffff
2648
7e40781c
UP
2649static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
2650{
2651 return prog->aux->linked_prog ? prog->aux->linked_prog->type
2652 : prog->type;
2653}
2654
58e2af8b 2655static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
2656 const struct bpf_call_arg_meta *meta,
2657 enum bpf_access_type t)
4acf6c0b 2658{
7e40781c
UP
2659 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
2660
2661 switch (prog_type) {
5d66fa7d 2662 /* Program types only with direct read access go here! */
3a0af8fd
TG
2663 case BPF_PROG_TYPE_LWT_IN:
2664 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 2665 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 2666 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 2667 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 2668 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
2669 if (t == BPF_WRITE)
2670 return false;
7e57fbb2 2671 /* fallthrough */
5d66fa7d
DB
2672
2673 /* Program types with direct read + write access go here! */
36bbef52
DB
2674 case BPF_PROG_TYPE_SCHED_CLS:
2675 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 2676 case BPF_PROG_TYPE_XDP:
3a0af8fd 2677 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 2678 case BPF_PROG_TYPE_SK_SKB:
4f738adb 2679 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
2680 if (meta)
2681 return meta->pkt_access;
2682
2683 env->seen_direct_write = true;
4acf6c0b 2684 return true;
0d01da6a
SF
2685
2686 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2687 if (t == BPF_WRITE)
2688 env->seen_direct_write = true;
2689
2690 return true;
2691
4acf6c0b
BB
2692 default:
2693 return false;
2694 }
2695}
2696
f1174f77 2697static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2698 int size, bool zero_size_allowed)
f1174f77 2699{
638f5b90 2700 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
2701 struct bpf_reg_state *reg = &regs[regno];
2702 int err;
2703
2704 /* We may have added a variable offset to the packet pointer; but any
2705 * reg->range we have comes after that. We are only checking the fixed
2706 * offset.
2707 */
2708
2709 /* We don't allow negative numbers, because we aren't tracking enough
2710 * detail to prove they're safe.
2711 */
b03c9f9f 2712 if (reg->smin_value < 0) {
61bd5218 2713 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
2714 regno);
2715 return -EACCES;
2716 }
457f4436
AN
2717 err = __check_mem_access(env, regno, off, size, reg->range,
2718 zero_size_allowed);
f1174f77 2719 if (err) {
61bd5218 2720 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
2721 return err;
2722 }
e647815a 2723
457f4436 2724 /* __check_mem_access has made sure "off + size - 1" is within u16.
e647815a
JW
2725 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2726 * otherwise find_good_pkt_pointers would have refused to set range info
457f4436 2727 * that __check_mem_access would have rejected this pkt access.
e647815a
JW
2728 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2729 */
2730 env->prog->aux->max_pkt_offset =
2731 max_t(u32, env->prog->aux->max_pkt_offset,
2732 off + reg->umax_value + size - 1);
2733
f1174f77
EC
2734 return err;
2735}
2736
2737/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 2738static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66
AS
2739 enum bpf_access_type t, enum bpf_reg_type *reg_type,
2740 u32 *btf_id)
17a52670 2741{
f96da094
DB
2742 struct bpf_insn_access_aux info = {
2743 .reg_type = *reg_type,
9e15db66 2744 .log = &env->log,
f96da094 2745 };
31fd8581 2746
4f9218aa 2747 if (env->ops->is_valid_access &&
5e43f899 2748 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
2749 /* A non zero info.ctx_field_size indicates that this field is a
2750 * candidate for later verifier transformation to load the whole
2751 * field and then apply a mask when accessed with a narrower
2752 * access than actual ctx access size. A zero info.ctx_field_size
2753 * will only allow for whole field access and rejects any other
2754 * type of narrower access.
31fd8581 2755 */
23994631 2756 *reg_type = info.reg_type;
31fd8581 2757
b121b341 2758 if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL)
9e15db66
AS
2759 *btf_id = info.btf_id;
2760 else
2761 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
32bbe007
AS
2762 /* remember the offset of last byte accessed in ctx */
2763 if (env->prog->aux->max_ctx_offset < off + size)
2764 env->prog->aux->max_ctx_offset = off + size;
17a52670 2765 return 0;
32bbe007 2766 }
17a52670 2767
61bd5218 2768 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
2769 return -EACCES;
2770}
2771
d58e468b
PP
2772static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
2773 int size)
2774{
2775 if (size < 0 || off < 0 ||
2776 (u64)off + size > sizeof(struct bpf_flow_keys)) {
2777 verbose(env, "invalid access to flow keys off=%d size=%d\n",
2778 off, size);
2779 return -EACCES;
2780 }
2781 return 0;
2782}
2783
5f456649
MKL
2784static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
2785 u32 regno, int off, int size,
2786 enum bpf_access_type t)
c64b7983
JS
2787{
2788 struct bpf_reg_state *regs = cur_regs(env);
2789 struct bpf_reg_state *reg = &regs[regno];
5f456649 2790 struct bpf_insn_access_aux info = {};
46f8bc92 2791 bool valid;
c64b7983
JS
2792
2793 if (reg->smin_value < 0) {
2794 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2795 regno);
2796 return -EACCES;
2797 }
2798
46f8bc92
MKL
2799 switch (reg->type) {
2800 case PTR_TO_SOCK_COMMON:
2801 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
2802 break;
2803 case PTR_TO_SOCKET:
2804 valid = bpf_sock_is_valid_access(off, size, t, &info);
2805 break;
655a51e5
MKL
2806 case PTR_TO_TCP_SOCK:
2807 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
2808 break;
fada7fdc
JL
2809 case PTR_TO_XDP_SOCK:
2810 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
2811 break;
46f8bc92
MKL
2812 default:
2813 valid = false;
c64b7983
JS
2814 }
2815
5f456649 2816
46f8bc92
MKL
2817 if (valid) {
2818 env->insn_aux_data[insn_idx].ctx_field_size =
2819 info.ctx_field_size;
2820 return 0;
2821 }
2822
2823 verbose(env, "R%d invalid %s access off=%d size=%d\n",
2824 regno, reg_type_str[reg->type], off, size);
2825
2826 return -EACCES;
c64b7983
JS
2827}
2828
2a159c6f
DB
2829static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
2830{
2831 return cur_regs(env) + regno;
2832}
2833
4cabc5b1
DB
2834static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2835{
2a159c6f 2836 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
2837}
2838
f37a8cb8
DB
2839static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2840{
2a159c6f 2841 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 2842
46f8bc92
MKL
2843 return reg->type == PTR_TO_CTX;
2844}
2845
2846static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
2847{
2848 const struct bpf_reg_state *reg = reg_state(env, regno);
2849
2850 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
2851}
2852
ca369602
DB
2853static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
2854{
2a159c6f 2855 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
2856
2857 return type_is_pkt_pointer(reg->type);
2858}
2859
4b5defde
DB
2860static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
2861{
2862 const struct bpf_reg_state *reg = reg_state(env, regno);
2863
2864 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2865 return reg->type == PTR_TO_FLOW_KEYS;
2866}
2867
61bd5218
JK
2868static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
2869 const struct bpf_reg_state *reg,
d1174416 2870 int off, int size, bool strict)
969bf05e 2871{
f1174f77 2872 struct tnum reg_off;
e07b98d9 2873 int ip_align;
d1174416
DM
2874
2875 /* Byte size accesses are always allowed. */
2876 if (!strict || size == 1)
2877 return 0;
2878
e4eda884
DM
2879 /* For platforms that do not have a Kconfig enabling
2880 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2881 * NET_IP_ALIGN is universally set to '2'. And on platforms
2882 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2883 * to this code only in strict mode where we want to emulate
2884 * the NET_IP_ALIGN==2 checking. Therefore use an
2885 * unconditional IP align value of '2'.
e07b98d9 2886 */
e4eda884 2887 ip_align = 2;
f1174f77
EC
2888
2889 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
2890 if (!tnum_is_aligned(reg_off, size)) {
2891 char tn_buf[48];
2892
2893 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
2894 verbose(env,
2895 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 2896 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
2897 return -EACCES;
2898 }
79adffcd 2899
969bf05e
AS
2900 return 0;
2901}
2902
61bd5218
JK
2903static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
2904 const struct bpf_reg_state *reg,
f1174f77
EC
2905 const char *pointer_desc,
2906 int off, int size, bool strict)
79adffcd 2907{
f1174f77
EC
2908 struct tnum reg_off;
2909
2910 /* Byte size accesses are always allowed. */
2911 if (!strict || size == 1)
2912 return 0;
2913
2914 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
2915 if (!tnum_is_aligned(reg_off, size)) {
2916 char tn_buf[48];
2917
2918 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 2919 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 2920 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
2921 return -EACCES;
2922 }
2923
969bf05e
AS
2924 return 0;
2925}
2926
e07b98d9 2927static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
2928 const struct bpf_reg_state *reg, int off,
2929 int size, bool strict_alignment_once)
79adffcd 2930{
ca369602 2931 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 2932 const char *pointer_desc = "";
d1174416 2933
79adffcd
DB
2934 switch (reg->type) {
2935 case PTR_TO_PACKET:
de8f3a83
DB
2936 case PTR_TO_PACKET_META:
2937 /* Special case, because of NET_IP_ALIGN. Given metadata sits
2938 * right in front, treat it the very same way.
2939 */
61bd5218 2940 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
2941 case PTR_TO_FLOW_KEYS:
2942 pointer_desc = "flow keys ";
2943 break;
f1174f77
EC
2944 case PTR_TO_MAP_VALUE:
2945 pointer_desc = "value ";
2946 break;
2947 case PTR_TO_CTX:
2948 pointer_desc = "context ";
2949 break;
2950 case PTR_TO_STACK:
2951 pointer_desc = "stack ";
a5ec6ae1
JH
2952 /* The stack spill tracking logic in check_stack_write()
2953 * and check_stack_read() relies on stack accesses being
2954 * aligned.
2955 */
2956 strict = true;
f1174f77 2957 break;
c64b7983
JS
2958 case PTR_TO_SOCKET:
2959 pointer_desc = "sock ";
2960 break;
46f8bc92
MKL
2961 case PTR_TO_SOCK_COMMON:
2962 pointer_desc = "sock_common ";
2963 break;
655a51e5
MKL
2964 case PTR_TO_TCP_SOCK:
2965 pointer_desc = "tcp_sock ";
2966 break;
fada7fdc
JL
2967 case PTR_TO_XDP_SOCK:
2968 pointer_desc = "xdp_sock ";
2969 break;
79adffcd 2970 default:
f1174f77 2971 break;
79adffcd 2972 }
61bd5218
JK
2973 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
2974 strict);
79adffcd
DB
2975}
2976
f4d7e40a
AS
2977static int update_stack_depth(struct bpf_verifier_env *env,
2978 const struct bpf_func_state *func,
2979 int off)
2980{
9c8105bd 2981 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
2982
2983 if (stack >= -off)
2984 return 0;
2985
2986 /* update known max for given subprogram */
9c8105bd 2987 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
2988 return 0;
2989}
f4d7e40a 2990
70a87ffe
AS
2991/* starting from main bpf function walk all instructions of the function
2992 * and recursively walk all callees that given function can call.
2993 * Ignore jump and exit insns.
2994 * Since recursion is prevented by check_cfg() this algorithm
2995 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2996 */
2997static int check_max_stack_depth(struct bpf_verifier_env *env)
2998{
9c8105bd
JW
2999 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
3000 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 3001 struct bpf_insn *insn = env->prog->insnsi;
ebf7d1f5 3002 bool tail_call_reachable = false;
70a87ffe
AS
3003 int ret_insn[MAX_CALL_FRAMES];
3004 int ret_prog[MAX_CALL_FRAMES];
ebf7d1f5 3005 int j;
f4d7e40a 3006
70a87ffe 3007process_func:
7f6e4312
MF
3008 /* protect against potential stack overflow that might happen when
3009 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
3010 * depth for such case down to 256 so that the worst case scenario
3011 * would result in 8k stack size (32 which is tailcall limit * 256 =
3012 * 8k).
3013 *
3014 * To get the idea what might happen, see an example:
3015 * func1 -> sub rsp, 128
3016 * subfunc1 -> sub rsp, 256
3017 * tailcall1 -> add rsp, 256
3018 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
3019 * subfunc2 -> sub rsp, 64
3020 * subfunc22 -> sub rsp, 128
3021 * tailcall2 -> add rsp, 128
3022 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
3023 *
3024 * tailcall will unwind the current stack frame but it will not get rid
3025 * of caller's stack as shown on the example above.
3026 */
3027 if (idx && subprog[idx].has_tail_call && depth >= 256) {
3028 verbose(env,
3029 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
3030 depth);
3031 return -EACCES;
3032 }
70a87ffe
AS
3033 /* round up to 32-bytes, since this is granularity
3034 * of interpreter stack size
3035 */
9c8105bd 3036 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 3037 if (depth > MAX_BPF_STACK) {
f4d7e40a 3038 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 3039 frame + 1, depth);
f4d7e40a
AS
3040 return -EACCES;
3041 }
70a87ffe 3042continue_func:
4cb3d99c 3043 subprog_end = subprog[idx + 1].start;
70a87ffe
AS
3044 for (; i < subprog_end; i++) {
3045 if (insn[i].code != (BPF_JMP | BPF_CALL))
3046 continue;
3047 if (insn[i].src_reg != BPF_PSEUDO_CALL)
3048 continue;
3049 /* remember insn and function to return to */
3050 ret_insn[frame] = i + 1;
9c8105bd 3051 ret_prog[frame] = idx;
70a87ffe
AS
3052
3053 /* find the callee */
3054 i = i + insn[i].imm + 1;
9c8105bd
JW
3055 idx = find_subprog(env, i);
3056 if (idx < 0) {
70a87ffe
AS
3057 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3058 i);
3059 return -EFAULT;
3060 }
ebf7d1f5
MF
3061
3062 if (subprog[idx].has_tail_call)
3063 tail_call_reachable = true;
3064
70a87ffe
AS
3065 frame++;
3066 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
3067 verbose(env, "the call stack of %d frames is too deep !\n",
3068 frame);
3069 return -E2BIG;
70a87ffe
AS
3070 }
3071 goto process_func;
3072 }
ebf7d1f5
MF
3073 /* if tail call got detected across bpf2bpf calls then mark each of the
3074 * currently present subprog frames as tail call reachable subprogs;
3075 * this info will be utilized by JIT so that we will be preserving the
3076 * tail call counter throughout bpf2bpf calls combined with tailcalls
3077 */
3078 if (tail_call_reachable)
3079 for (j = 0; j < frame; j++)
3080 subprog[ret_prog[j]].tail_call_reachable = true;
3081
70a87ffe
AS
3082 /* end of for() loop means the last insn of the 'subprog'
3083 * was reached. Doesn't matter whether it was JA or EXIT
3084 */
3085 if (frame == 0)
3086 return 0;
9c8105bd 3087 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
3088 frame--;
3089 i = ret_insn[frame];
9c8105bd 3090 idx = ret_prog[frame];
70a87ffe 3091 goto continue_func;
f4d7e40a
AS
3092}
3093
19d28fbd 3094#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
3095static int get_callee_stack_depth(struct bpf_verifier_env *env,
3096 const struct bpf_insn *insn, int idx)
3097{
3098 int start = idx + insn->imm + 1, subprog;
3099
3100 subprog = find_subprog(env, start);
3101 if (subprog < 0) {
3102 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3103 start);
3104 return -EFAULT;
3105 }
9c8105bd 3106 return env->subprog_info[subprog].stack_depth;
1ea47e01 3107}
19d28fbd 3108#endif
1ea47e01 3109
51c39bb1
AS
3110int check_ctx_reg(struct bpf_verifier_env *env,
3111 const struct bpf_reg_state *reg, int regno)
58990d1f
DB
3112{
3113 /* Access to ctx or passing it to a helper is only allowed in
3114 * its original, unmodified form.
3115 */
3116
3117 if (reg->off) {
3118 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
3119 regno, reg->off);
3120 return -EACCES;
3121 }
3122
3123 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3124 char tn_buf[48];
3125
3126 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3127 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
3128 return -EACCES;
3129 }
3130
3131 return 0;
3132}
3133
afbf21dc
YS
3134static int __check_buffer_access(struct bpf_verifier_env *env,
3135 const char *buf_info,
3136 const struct bpf_reg_state *reg,
3137 int regno, int off, int size)
9df1c28b
MM
3138{
3139 if (off < 0) {
3140 verbose(env,
4fc00b79 3141 "R%d invalid %s buffer access: off=%d, size=%d\n",
afbf21dc 3142 regno, buf_info, off, size);
9df1c28b
MM
3143 return -EACCES;
3144 }
3145 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3146 char tn_buf[48];
3147
3148 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3149 verbose(env,
4fc00b79 3150 "R%d invalid variable buffer offset: off=%d, var_off=%s\n",
9df1c28b
MM
3151 regno, off, tn_buf);
3152 return -EACCES;
3153 }
afbf21dc
YS
3154
3155 return 0;
3156}
3157
3158static int check_tp_buffer_access(struct bpf_verifier_env *env,
3159 const struct bpf_reg_state *reg,
3160 int regno, int off, int size)
3161{
3162 int err;
3163
3164 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
3165 if (err)
3166 return err;
3167
9df1c28b
MM
3168 if (off + size > env->prog->aux->max_tp_access)
3169 env->prog->aux->max_tp_access = off + size;
3170
3171 return 0;
3172}
3173
afbf21dc
YS
3174static int check_buffer_access(struct bpf_verifier_env *env,
3175 const struct bpf_reg_state *reg,
3176 int regno, int off, int size,
3177 bool zero_size_allowed,
3178 const char *buf_info,
3179 u32 *max_access)
3180{
3181 int err;
3182
3183 err = __check_buffer_access(env, buf_info, reg, regno, off, size);
3184 if (err)
3185 return err;
3186
3187 if (off + size > *max_access)
3188 *max_access = off + size;
3189
3190 return 0;
3191}
3192
3f50f132
JF
3193/* BPF architecture zero extends alu32 ops into 64-bit registesr */
3194static void zext_32_to_64(struct bpf_reg_state *reg)
3195{
3196 reg->var_off = tnum_subreg(reg->var_off);
3197 __reg_assign_32_into_64(reg);
3198}
9df1c28b 3199
0c17d1d2
JH
3200/* truncate register to smaller size (in bytes)
3201 * must be called with size < BPF_REG_SIZE
3202 */
3203static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
3204{
3205 u64 mask;
3206
3207 /* clear high bits in bit representation */
3208 reg->var_off = tnum_cast(reg->var_off, size);
3209
3210 /* fix arithmetic bounds */
3211 mask = ((u64)1 << (size * 8)) - 1;
3212 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
3213 reg->umin_value &= mask;
3214 reg->umax_value &= mask;
3215 } else {
3216 reg->umin_value = 0;
3217 reg->umax_value = mask;
3218 }
3219 reg->smin_value = reg->umin_value;
3220 reg->smax_value = reg->umax_value;
3f50f132
JF
3221
3222 /* If size is smaller than 32bit register the 32bit register
3223 * values are also truncated so we push 64-bit bounds into
3224 * 32-bit bounds. Above were truncated < 32-bits already.
3225 */
3226 if (size >= 4)
3227 return;
3228 __reg_combine_64_into_32(reg);
0c17d1d2
JH
3229}
3230
a23740ec
AN
3231static bool bpf_map_is_rdonly(const struct bpf_map *map)
3232{
3233 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
3234}
3235
3236static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
3237{
3238 void *ptr;
3239 u64 addr;
3240 int err;
3241
3242 err = map->ops->map_direct_value_addr(map, &addr, off);
3243 if (err)
3244 return err;
2dedd7d2 3245 ptr = (void *)(long)addr + off;
a23740ec
AN
3246
3247 switch (size) {
3248 case sizeof(u8):
3249 *val = (u64)*(u8 *)ptr;
3250 break;
3251 case sizeof(u16):
3252 *val = (u64)*(u16 *)ptr;
3253 break;
3254 case sizeof(u32):
3255 *val = (u64)*(u32 *)ptr;
3256 break;
3257 case sizeof(u64):
3258 *val = *(u64 *)ptr;
3259 break;
3260 default:
3261 return -EINVAL;
3262 }
3263 return 0;
3264}
3265
9e15db66
AS
3266static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
3267 struct bpf_reg_state *regs,
3268 int regno, int off, int size,
3269 enum bpf_access_type atype,
3270 int value_regno)
3271{
3272 struct bpf_reg_state *reg = regs + regno;
3273 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
3274 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
3275 u32 btf_id;
3276 int ret;
3277
9e15db66
AS
3278 if (off < 0) {
3279 verbose(env,
3280 "R%d is ptr_%s invalid negative access: off=%d\n",
3281 regno, tname, off);
3282 return -EACCES;
3283 }
3284 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3285 char tn_buf[48];
3286
3287 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3288 verbose(env,
3289 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
3290 regno, tname, off, tn_buf);
3291 return -EACCES;
3292 }
3293
27ae7997
MKL
3294 if (env->ops->btf_struct_access) {
3295 ret = env->ops->btf_struct_access(&env->log, t, off, size,
3296 atype, &btf_id);
3297 } else {
3298 if (atype != BPF_READ) {
3299 verbose(env, "only read is supported\n");
3300 return -EACCES;
3301 }
3302
3303 ret = btf_struct_access(&env->log, t, off, size, atype,
3304 &btf_id);
3305 }
3306
9e15db66
AS
3307 if (ret < 0)
3308 return ret;
3309
41c48f3a
AI
3310 if (atype == BPF_READ && value_regno >= 0)
3311 mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
3312
3313 return 0;
3314}
3315
3316static int check_ptr_to_map_access(struct bpf_verifier_env *env,
3317 struct bpf_reg_state *regs,
3318 int regno, int off, int size,
3319 enum bpf_access_type atype,
3320 int value_regno)
3321{
3322 struct bpf_reg_state *reg = regs + regno;
3323 struct bpf_map *map = reg->map_ptr;
3324 const struct btf_type *t;
3325 const char *tname;
3326 u32 btf_id;
3327 int ret;
3328
3329 if (!btf_vmlinux) {
3330 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
3331 return -ENOTSUPP;
3332 }
3333
3334 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
3335 verbose(env, "map_ptr access not supported for map type %d\n",
3336 map->map_type);
3337 return -ENOTSUPP;
3338 }
3339
3340 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
3341 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
3342
3343 if (!env->allow_ptr_to_map_access) {
3344 verbose(env,
3345 "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
3346 tname);
3347 return -EPERM;
9e15db66 3348 }
27ae7997 3349
41c48f3a
AI
3350 if (off < 0) {
3351 verbose(env, "R%d is %s invalid negative access: off=%d\n",
3352 regno, tname, off);
3353 return -EACCES;
3354 }
3355
3356 if (atype != BPF_READ) {
3357 verbose(env, "only read from %s is supported\n", tname);
3358 return -EACCES;
3359 }
3360
3361 ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
3362 if (ret < 0)
3363 return ret;
3364
3365 if (value_regno >= 0)
3366 mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
3367
9e15db66
AS
3368 return 0;
3369}
3370
41c48f3a 3371
17a52670
AS
3372/* check whether memory at (regno + off) is accessible for t = (read | write)
3373 * if t==write, value_regno is a register which value is stored into memory
3374 * if t==read, value_regno is a register which will receive the value from memory
3375 * if t==write && value_regno==-1, some unknown value is stored into memory
3376 * if t==read && value_regno==-1, don't care what we read from memory
3377 */
ca369602
DB
3378static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
3379 int off, int bpf_size, enum bpf_access_type t,
3380 int value_regno, bool strict_alignment_once)
17a52670 3381{
638f5b90
AS
3382 struct bpf_reg_state *regs = cur_regs(env);
3383 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 3384 struct bpf_func_state *state;
17a52670
AS
3385 int size, err = 0;
3386
3387 size = bpf_size_to_bytes(bpf_size);
3388 if (size < 0)
3389 return size;
3390
f1174f77 3391 /* alignment checks will add in reg->off themselves */
ca369602 3392 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
3393 if (err)
3394 return err;
17a52670 3395
f1174f77
EC
3396 /* for access checks, reg->off is just part of off */
3397 off += reg->off;
3398
3399 if (reg->type == PTR_TO_MAP_VALUE) {
1be7f75d
AS
3400 if (t == BPF_WRITE && value_regno >= 0 &&
3401 is_pointer_value(env, value_regno)) {
61bd5218 3402 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
3403 return -EACCES;
3404 }
591fe988
DB
3405 err = check_map_access_type(env, regno, off, size, t);
3406 if (err)
3407 return err;
9fd29c08 3408 err = check_map_access(env, regno, off, size, false);
a23740ec
AN
3409 if (!err && t == BPF_READ && value_regno >= 0) {
3410 struct bpf_map *map = reg->map_ptr;
3411
3412 /* if map is read-only, track its contents as scalars */
3413 if (tnum_is_const(reg->var_off) &&
3414 bpf_map_is_rdonly(map) &&
3415 map->ops->map_direct_value_addr) {
3416 int map_off = off + reg->var_off.value;
3417 u64 val = 0;
3418
3419 err = bpf_map_direct_read(map, map_off, size,
3420 &val);
3421 if (err)
3422 return err;
3423
3424 regs[value_regno].type = SCALAR_VALUE;
3425 __mark_reg_known(&regs[value_regno], val);
3426 } else {
3427 mark_reg_unknown(env, regs, value_regno);
3428 }
3429 }
457f4436
AN
3430 } else if (reg->type == PTR_TO_MEM) {
3431 if (t == BPF_WRITE && value_regno >= 0 &&
3432 is_pointer_value(env, value_regno)) {
3433 verbose(env, "R%d leaks addr into mem\n", value_regno);
3434 return -EACCES;
3435 }
3436 err = check_mem_region_access(env, regno, off, size,
3437 reg->mem_size, false);
3438 if (!err && t == BPF_READ && value_regno >= 0)
3439 mark_reg_unknown(env, regs, value_regno);
1a0dc1ac 3440 } else if (reg->type == PTR_TO_CTX) {
f1174f77 3441 enum bpf_reg_type reg_type = SCALAR_VALUE;
9e15db66 3442 u32 btf_id = 0;
19de99f7 3443
1be7f75d
AS
3444 if (t == BPF_WRITE && value_regno >= 0 &&
3445 is_pointer_value(env, value_regno)) {
61bd5218 3446 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
3447 return -EACCES;
3448 }
f1174f77 3449
58990d1f
DB
3450 err = check_ctx_reg(env, reg, regno);
3451 if (err < 0)
3452 return err;
3453
9e15db66
AS
3454 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
3455 if (err)
3456 verbose_linfo(env, insn_idx, "; ");
969bf05e 3457 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 3458 /* ctx access returns either a scalar, or a
de8f3a83
DB
3459 * PTR_TO_PACKET[_META,_END]. In the latter
3460 * case, we know the offset is zero.
f1174f77 3461 */
46f8bc92 3462 if (reg_type == SCALAR_VALUE) {
638f5b90 3463 mark_reg_unknown(env, regs, value_regno);
46f8bc92 3464 } else {
638f5b90 3465 mark_reg_known_zero(env, regs,
61bd5218 3466 value_regno);
46f8bc92
MKL
3467 if (reg_type_may_be_null(reg_type))
3468 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
3469 /* A load of ctx field could have different
3470 * actual load size with the one encoded in the
3471 * insn. When the dst is PTR, it is for sure not
3472 * a sub-register.
3473 */
3474 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
b121b341
YS
3475 if (reg_type == PTR_TO_BTF_ID ||
3476 reg_type == PTR_TO_BTF_ID_OR_NULL)
9e15db66 3477 regs[value_regno].btf_id = btf_id;
46f8bc92 3478 }
638f5b90 3479 regs[value_regno].type = reg_type;
969bf05e 3480 }
17a52670 3481
f1174f77 3482 } else if (reg->type == PTR_TO_STACK) {
f1174f77 3483 off += reg->var_off.value;
e4298d25
DB
3484 err = check_stack_access(env, reg, off, size);
3485 if (err)
3486 return err;
8726679a 3487
f4d7e40a
AS
3488 state = func(env, reg);
3489 err = update_stack_depth(env, state, off);
3490 if (err)
3491 return err;
8726679a 3492
638f5b90 3493 if (t == BPF_WRITE)
61bd5218 3494 err = check_stack_write(env, state, off, size,
af86ca4e 3495 value_regno, insn_idx);
638f5b90 3496 else
61bd5218
JK
3497 err = check_stack_read(env, state, off, size,
3498 value_regno);
de8f3a83 3499 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 3500 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 3501 verbose(env, "cannot write into packet\n");
969bf05e
AS
3502 return -EACCES;
3503 }
4acf6c0b
BB
3504 if (t == BPF_WRITE && value_regno >= 0 &&
3505 is_pointer_value(env, value_regno)) {
61bd5218
JK
3506 verbose(env, "R%d leaks addr into packet\n",
3507 value_regno);
4acf6c0b
BB
3508 return -EACCES;
3509 }
9fd29c08 3510 err = check_packet_access(env, regno, off, size, false);
969bf05e 3511 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 3512 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
3513 } else if (reg->type == PTR_TO_FLOW_KEYS) {
3514 if (t == BPF_WRITE && value_regno >= 0 &&
3515 is_pointer_value(env, value_regno)) {
3516 verbose(env, "R%d leaks addr into flow keys\n",
3517 value_regno);
3518 return -EACCES;
3519 }
3520
3521 err = check_flow_keys_access(env, off, size);
3522 if (!err && t == BPF_READ && value_regno >= 0)
3523 mark_reg_unknown(env, regs, value_regno);
46f8bc92 3524 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 3525 if (t == BPF_WRITE) {
46f8bc92
MKL
3526 verbose(env, "R%d cannot write into %s\n",
3527 regno, reg_type_str[reg->type]);
c64b7983
JS
3528 return -EACCES;
3529 }
5f456649 3530 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
3531 if (!err && value_regno >= 0)
3532 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
3533 } else if (reg->type == PTR_TO_TP_BUFFER) {
3534 err = check_tp_buffer_access(env, reg, regno, off, size);
3535 if (!err && t == BPF_READ && value_regno >= 0)
3536 mark_reg_unknown(env, regs, value_regno);
9e15db66
AS
3537 } else if (reg->type == PTR_TO_BTF_ID) {
3538 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
3539 value_regno);
41c48f3a
AI
3540 } else if (reg->type == CONST_PTR_TO_MAP) {
3541 err = check_ptr_to_map_access(env, regs, regno, off, size, t,
3542 value_regno);
afbf21dc
YS
3543 } else if (reg->type == PTR_TO_RDONLY_BUF) {
3544 if (t == BPF_WRITE) {
3545 verbose(env, "R%d cannot write into %s\n",
3546 regno, reg_type_str[reg->type]);
3547 return -EACCES;
3548 }
f6dfbe31
CIK
3549 err = check_buffer_access(env, reg, regno, off, size, false,
3550 "rdonly",
afbf21dc
YS
3551 &env->prog->aux->max_rdonly_access);
3552 if (!err && value_regno >= 0)
3553 mark_reg_unknown(env, regs, value_regno);
3554 } else if (reg->type == PTR_TO_RDWR_BUF) {
f6dfbe31
CIK
3555 err = check_buffer_access(env, reg, regno, off, size, false,
3556 "rdwr",
afbf21dc
YS
3557 &env->prog->aux->max_rdwr_access);
3558 if (!err && t == BPF_READ && value_regno >= 0)
3559 mark_reg_unknown(env, regs, value_regno);
17a52670 3560 } else {
61bd5218
JK
3561 verbose(env, "R%d invalid mem access '%s'\n", regno,
3562 reg_type_str[reg->type]);
17a52670
AS
3563 return -EACCES;
3564 }
969bf05e 3565
f1174f77 3566 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 3567 regs[value_regno].type == SCALAR_VALUE) {
f1174f77 3568 /* b/h/w load zero-extends, mark upper bits as known 0 */
0c17d1d2 3569 coerce_reg_to_size(&regs[value_regno], size);
969bf05e 3570 }
17a52670
AS
3571 return err;
3572}
3573
31fd8581 3574static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 3575{
17a52670
AS
3576 int err;
3577
3578 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
3579 insn->imm != 0) {
61bd5218 3580 verbose(env, "BPF_XADD uses reserved fields\n");
17a52670
AS
3581 return -EINVAL;
3582 }
3583
3584 /* check src1 operand */
dc503a8a 3585 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
3586 if (err)
3587 return err;
3588
3589 /* check src2 operand */
dc503a8a 3590 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
3591 if (err)
3592 return err;
3593
6bdf6abc 3594 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 3595 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
3596 return -EACCES;
3597 }
3598
ca369602 3599 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 3600 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
3601 is_flow_key_reg(env, insn->dst_reg) ||
3602 is_sk_reg(env, insn->dst_reg)) {
ca369602 3603 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2a159c6f
DB
3604 insn->dst_reg,
3605 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
3606 return -EACCES;
3607 }
3608
17a52670 3609 /* check whether atomic_add can read the memory */
31fd8581 3610 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3611 BPF_SIZE(insn->code), BPF_READ, -1, true);
17a52670
AS
3612 if (err)
3613 return err;
3614
3615 /* check whether atomic_add can write into the same memory */
31fd8581 3616 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3617 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
17a52670
AS
3618}
3619
2011fccf
AI
3620static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
3621 int off, int access_size,
3622 bool zero_size_allowed)
3623{
3624 struct bpf_reg_state *reg = reg_state(env, regno);
3625
3626 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
3627 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
3628 if (tnum_is_const(reg->var_off)) {
3629 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
3630 regno, off, access_size);
3631 } else {
3632 char tn_buf[48];
3633
3634 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3635 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
3636 regno, tn_buf, access_size);
3637 }
3638 return -EACCES;
3639 }
3640 return 0;
3641}
3642
17a52670
AS
3643/* when register 'regno' is passed into function that will read 'access_size'
3644 * bytes from that pointer, make sure that it's within stack boundary
f1174f77
EC
3645 * and all elements of stack are initialized.
3646 * Unlike most pointer bounds-checking functions, this one doesn't take an
3647 * 'off' argument, so it has to add in reg->off itself.
17a52670 3648 */
58e2af8b 3649static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
435faee1
DB
3650 int access_size, bool zero_size_allowed,
3651 struct bpf_call_arg_meta *meta)
17a52670 3652{
2a159c6f 3653 struct bpf_reg_state *reg = reg_state(env, regno);
f4d7e40a 3654 struct bpf_func_state *state = func(env, reg);
f7cf25b2 3655 int err, min_off, max_off, i, j, slot, spi;
17a52670 3656
2011fccf
AI
3657 if (tnum_is_const(reg->var_off)) {
3658 min_off = max_off = reg->var_off.value + reg->off;
3659 err = __check_stack_boundary(env, regno, min_off, access_size,
3660 zero_size_allowed);
3661 if (err)
3662 return err;
3663 } else {
088ec26d
AI
3664 /* Variable offset is prohibited for unprivileged mode for
3665 * simplicity since it requires corresponding support in
3666 * Spectre masking for stack ALU.
3667 * See also retrieve_ptr_limit().
3668 */
2c78ee89 3669 if (!env->bypass_spec_v1) {
088ec26d 3670 char tn_buf[48];
f1174f77 3671
088ec26d
AI
3672 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3673 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
3674 regno, tn_buf);
3675 return -EACCES;
3676 }
f2bcd05e
AI
3677 /* Only initialized buffer on stack is allowed to be accessed
3678 * with variable offset. With uninitialized buffer it's hard to
3679 * guarantee that whole memory is marked as initialized on
3680 * helper return since specific bounds are unknown what may
3681 * cause uninitialized stack leaking.
3682 */
3683 if (meta && meta->raw_mode)
3684 meta = NULL;
3685
107c26a7
AI
3686 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
3687 reg->smax_value <= -BPF_MAX_VAR_OFF) {
3688 verbose(env, "R%d unbounded indirect variable offset stack access\n",
3689 regno);
3690 return -EACCES;
3691 }
2011fccf 3692 min_off = reg->smin_value + reg->off;
107c26a7 3693 max_off = reg->smax_value + reg->off;
2011fccf
AI
3694 err = __check_stack_boundary(env, regno, min_off, access_size,
3695 zero_size_allowed);
107c26a7
AI
3696 if (err) {
3697 verbose(env, "R%d min value is outside of stack bound\n",
3698 regno);
2011fccf 3699 return err;
107c26a7 3700 }
2011fccf
AI
3701 err = __check_stack_boundary(env, regno, max_off, access_size,
3702 zero_size_allowed);
107c26a7
AI
3703 if (err) {
3704 verbose(env, "R%d max value is outside of stack bound\n",
3705 regno);
2011fccf 3706 return err;
107c26a7 3707 }
17a52670
AS
3708 }
3709
435faee1
DB
3710 if (meta && meta->raw_mode) {
3711 meta->access_size = access_size;
3712 meta->regno = regno;
3713 return 0;
3714 }
3715
2011fccf 3716 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
3717 u8 *stype;
3718
2011fccf 3719 slot = -i - 1;
638f5b90 3720 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
3721 if (state->allocated_stack <= slot)
3722 goto err;
3723 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3724 if (*stype == STACK_MISC)
3725 goto mark;
3726 if (*stype == STACK_ZERO) {
3727 /* helper can write anything into the stack */
3728 *stype = STACK_MISC;
3729 goto mark;
17a52670 3730 }
1d68f22b
YS
3731
3732 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3733 state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
3734 goto mark;
3735
f7cf25b2
AS
3736 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3737 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
f54c7898 3738 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
f7cf25b2
AS
3739 for (j = 0; j < BPF_REG_SIZE; j++)
3740 state->stack[spi].slot_type[j] = STACK_MISC;
3741 goto mark;
3742 }
3743
cc2b14d5 3744err:
2011fccf
AI
3745 if (tnum_is_const(reg->var_off)) {
3746 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
3747 min_off, i - min_off, access_size);
3748 } else {
3749 char tn_buf[48];
3750
3751 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3752 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
3753 tn_buf, i - min_off, access_size);
3754 }
cc2b14d5
AS
3755 return -EACCES;
3756mark:
3757 /* reading any byte out of 8-byte 'spill_slot' will cause
3758 * the whole slot to be marked as 'read'
3759 */
679c782d 3760 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
3761 state->stack[spi].spilled_ptr.parent,
3762 REG_LIVE_READ64);
17a52670 3763 }
2011fccf 3764 return update_stack_depth(env, state, min_off);
17a52670
AS
3765}
3766
06c1c049
GB
3767static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
3768 int access_size, bool zero_size_allowed,
3769 struct bpf_call_arg_meta *meta)
3770{
638f5b90 3771 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
06c1c049 3772
f1174f77 3773 switch (reg->type) {
06c1c049 3774 case PTR_TO_PACKET:
de8f3a83 3775 case PTR_TO_PACKET_META:
9fd29c08
YS
3776 return check_packet_access(env, regno, reg->off, access_size,
3777 zero_size_allowed);
06c1c049 3778 case PTR_TO_MAP_VALUE:
591fe988
DB
3779 if (check_map_access_type(env, regno, reg->off, access_size,
3780 meta && meta->raw_mode ? BPF_WRITE :
3781 BPF_READ))
3782 return -EACCES;
9fd29c08
YS
3783 return check_map_access(env, regno, reg->off, access_size,
3784 zero_size_allowed);
457f4436
AN
3785 case PTR_TO_MEM:
3786 return check_mem_region_access(env, regno, reg->off,
3787 access_size, reg->mem_size,
3788 zero_size_allowed);
afbf21dc
YS
3789 case PTR_TO_RDONLY_BUF:
3790 if (meta && meta->raw_mode)
3791 return -EACCES;
3792 return check_buffer_access(env, reg, regno, reg->off,
3793 access_size, zero_size_allowed,
3794 "rdonly",
3795 &env->prog->aux->max_rdonly_access);
3796 case PTR_TO_RDWR_BUF:
3797 return check_buffer_access(env, reg, regno, reg->off,
3798 access_size, zero_size_allowed,
3799 "rdwr",
3800 &env->prog->aux->max_rdwr_access);
0d004c02 3801 case PTR_TO_STACK:
06c1c049
GB
3802 return check_stack_boundary(env, regno, access_size,
3803 zero_size_allowed, meta);
0d004c02
LB
3804 default: /* scalar_value or invalid ptr */
3805 /* Allow zero-byte read from NULL, regardless of pointer type */
3806 if (zero_size_allowed && access_size == 0 &&
3807 register_is_null(reg))
3808 return 0;
3809
3810 verbose(env, "R%d type=%s expected=%s\n", regno,
3811 reg_type_str[reg->type],
3812 reg_type_str[PTR_TO_STACK]);
3813 return -EACCES;
06c1c049
GB
3814 }
3815}
3816
d83525ca
AS
3817/* Implementation details:
3818 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3819 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3820 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3821 * value_or_null->value transition, since the verifier only cares about
3822 * the range of access to valid map value pointer and doesn't care about actual
3823 * address of the map element.
3824 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3825 * reg->id > 0 after value_or_null->value transition. By doing so
3826 * two bpf_map_lookups will be considered two different pointers that
3827 * point to different bpf_spin_locks.
3828 * The verifier allows taking only one bpf_spin_lock at a time to avoid
3829 * dead-locks.
3830 * Since only one bpf_spin_lock is allowed the checks are simpler than
3831 * reg_is_refcounted() logic. The verifier needs to remember only
3832 * one spin_lock instead of array of acquired_refs.
3833 * cur_state->active_spin_lock remembers which map value element got locked
3834 * and clears it after bpf_spin_unlock.
3835 */
3836static int process_spin_lock(struct bpf_verifier_env *env, int regno,
3837 bool is_lock)
3838{
3839 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3840 struct bpf_verifier_state *cur = env->cur_state;
3841 bool is_const = tnum_is_const(reg->var_off);
3842 struct bpf_map *map = reg->map_ptr;
3843 u64 val = reg->var_off.value;
3844
d83525ca
AS
3845 if (!is_const) {
3846 verbose(env,
3847 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3848 regno);
3849 return -EINVAL;
3850 }
3851 if (!map->btf) {
3852 verbose(env,
3853 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
3854 map->name);
3855 return -EINVAL;
3856 }
3857 if (!map_value_has_spin_lock(map)) {
3858 if (map->spin_lock_off == -E2BIG)
3859 verbose(env,
3860 "map '%s' has more than one 'struct bpf_spin_lock'\n",
3861 map->name);
3862 else if (map->spin_lock_off == -ENOENT)
3863 verbose(env,
3864 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
3865 map->name);
3866 else
3867 verbose(env,
3868 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3869 map->name);
3870 return -EINVAL;
3871 }
3872 if (map->spin_lock_off != val + reg->off) {
3873 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3874 val + reg->off);
3875 return -EINVAL;
3876 }
3877 if (is_lock) {
3878 if (cur->active_spin_lock) {
3879 verbose(env,
3880 "Locking two bpf_spin_locks are not allowed\n");
3881 return -EINVAL;
3882 }
3883 cur->active_spin_lock = reg->id;
3884 } else {
3885 if (!cur->active_spin_lock) {
3886 verbose(env, "bpf_spin_unlock without taking a lock\n");
3887 return -EINVAL;
3888 }
3889 if (cur->active_spin_lock != reg->id) {
3890 verbose(env, "bpf_spin_unlock of different lock\n");
3891 return -EINVAL;
3892 }
3893 cur->active_spin_lock = 0;
3894 }
3895 return 0;
3896}
3897
90133415
DB
3898static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
3899{
3900 return type == ARG_PTR_TO_MEM ||
3901 type == ARG_PTR_TO_MEM_OR_NULL ||
3902 type == ARG_PTR_TO_UNINIT_MEM;
3903}
3904
3905static bool arg_type_is_mem_size(enum bpf_arg_type type)
3906{
3907 return type == ARG_CONST_SIZE ||
3908 type == ARG_CONST_SIZE_OR_ZERO;
3909}
3910
457f4436
AN
3911static bool arg_type_is_alloc_size(enum bpf_arg_type type)
3912{
3913 return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
3914}
3915
57c3bb72
AI
3916static bool arg_type_is_int_ptr(enum bpf_arg_type type)
3917{
3918 return type == ARG_PTR_TO_INT ||
3919 type == ARG_PTR_TO_LONG;
3920}
3921
3922static int int_ptr_type_to_size(enum bpf_arg_type type)
3923{
3924 if (type == ARG_PTR_TO_INT)
3925 return sizeof(u32);
3926 else if (type == ARG_PTR_TO_LONG)
3927 return sizeof(u64);
3928
3929 return -EINVAL;
3930}
3931
912f442c
LB
3932static int resolve_map_arg_type(struct bpf_verifier_env *env,
3933 const struct bpf_call_arg_meta *meta,
3934 enum bpf_arg_type *arg_type)
3935{
3936 if (!meta->map_ptr) {
3937 /* kernel subsystem misconfigured verifier */
3938 verbose(env, "invalid map_ptr to access map->type\n");
3939 return -EACCES;
3940 }
3941
3942 switch (meta->map_ptr->map_type) {
3943 case BPF_MAP_TYPE_SOCKMAP:
3944 case BPF_MAP_TYPE_SOCKHASH:
3945 if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
6550f2dd 3946 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
912f442c
LB
3947 } else {
3948 verbose(env, "invalid arg_type for sockmap/sockhash\n");
3949 return -EINVAL;
3950 }
3951 break;
3952
3953 default:
3954 break;
3955 }
3956 return 0;
3957}
3958
f79e7ea5
LB
3959struct bpf_reg_types {
3960 const enum bpf_reg_type types[10];
1df8f55a 3961 u32 *btf_id;
f79e7ea5
LB
3962};
3963
3964static const struct bpf_reg_types map_key_value_types = {
3965 .types = {
3966 PTR_TO_STACK,
3967 PTR_TO_PACKET,
3968 PTR_TO_PACKET_META,
3969 PTR_TO_MAP_VALUE,
3970 },
3971};
3972
3973static const struct bpf_reg_types sock_types = {
3974 .types = {
3975 PTR_TO_SOCK_COMMON,
3976 PTR_TO_SOCKET,
3977 PTR_TO_TCP_SOCK,
3978 PTR_TO_XDP_SOCK,
3979 },
3980};
3981
1df8f55a
MKL
3982static const struct bpf_reg_types btf_id_sock_common_types = {
3983 .types = {
3984 PTR_TO_SOCK_COMMON,
3985 PTR_TO_SOCKET,
3986 PTR_TO_TCP_SOCK,
3987 PTR_TO_XDP_SOCK,
3988 PTR_TO_BTF_ID,
3989 },
3990 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
3991};
3992
f79e7ea5
LB
3993static const struct bpf_reg_types mem_types = {
3994 .types = {
3995 PTR_TO_STACK,
3996 PTR_TO_PACKET,
3997 PTR_TO_PACKET_META,
3998 PTR_TO_MAP_VALUE,
3999 PTR_TO_MEM,
4000 PTR_TO_RDONLY_BUF,
4001 PTR_TO_RDWR_BUF,
4002 },
4003};
4004
4005static const struct bpf_reg_types int_ptr_types = {
4006 .types = {
4007 PTR_TO_STACK,
4008 PTR_TO_PACKET,
4009 PTR_TO_PACKET_META,
4010 PTR_TO_MAP_VALUE,
4011 },
4012};
4013
4014static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
4015static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
4016static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
4017static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
4018static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
4019static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
4020static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
4021
0789e13b 4022static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
f79e7ea5
LB
4023 [ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
4024 [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
4025 [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
4026 [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
4027 [ARG_CONST_SIZE] = &scalar_types,
4028 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
4029 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
4030 [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
4031 [ARG_PTR_TO_CTX] = &context_types,
4032 [ARG_PTR_TO_CTX_OR_NULL] = &context_types,
4033 [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
1df8f55a 4034 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
f79e7ea5
LB
4035 [ARG_PTR_TO_SOCKET] = &fullsock_types,
4036 [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
4037 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
4038 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
4039 [ARG_PTR_TO_MEM] = &mem_types,
4040 [ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
4041 [ARG_PTR_TO_UNINIT_MEM] = &mem_types,
4042 [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
4043 [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
4044 [ARG_PTR_TO_INT] = &int_ptr_types,
4045 [ARG_PTR_TO_LONG] = &int_ptr_types,
f79e7ea5
LB
4046};
4047
4048static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
a968d5e2
MKL
4049 enum bpf_arg_type arg_type,
4050 const u32 *arg_btf_id)
f79e7ea5
LB
4051{
4052 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4053 enum bpf_reg_type expected, type = reg->type;
a968d5e2 4054 const struct bpf_reg_types *compatible;
f79e7ea5
LB
4055 int i, j;
4056
a968d5e2
MKL
4057 compatible = compatible_reg_types[arg_type];
4058 if (!compatible) {
4059 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
4060 return -EFAULT;
4061 }
4062
f79e7ea5
LB
4063 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
4064 expected = compatible->types[i];
4065 if (expected == NOT_INIT)
4066 break;
4067
4068 if (type == expected)
a968d5e2 4069 goto found;
f79e7ea5
LB
4070 }
4071
4072 verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
4073 for (j = 0; j + 1 < i; j++)
4074 verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
4075 verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
4076 return -EACCES;
a968d5e2
MKL
4077
4078found:
4079 if (type == PTR_TO_BTF_ID) {
1df8f55a
MKL
4080 if (!arg_btf_id) {
4081 if (!compatible->btf_id) {
4082 verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
4083 return -EFAULT;
4084 }
4085 arg_btf_id = compatible->btf_id;
4086 }
4087
a968d5e2
MKL
4088 if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
4089 *arg_btf_id)) {
4090 verbose(env, "R%d is of type %s but %s is expected\n",
4091 regno, kernel_type_name(reg->btf_id),
4092 kernel_type_name(*arg_btf_id));
4093 return -EACCES;
4094 }
4095
4096 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4097 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
4098 regno);
4099 return -EACCES;
4100 }
4101 }
4102
4103 return 0;
f79e7ea5
LB
4104}
4105
af7ec138
YS
4106static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
4107 struct bpf_call_arg_meta *meta,
4108 const struct bpf_func_proto *fn)
17a52670 4109{
af7ec138 4110 u32 regno = BPF_REG_1 + arg;
638f5b90 4111 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
af7ec138 4112 enum bpf_arg_type arg_type = fn->arg_type[arg];
f79e7ea5 4113 enum bpf_reg_type type = reg->type;
17a52670
AS
4114 int err = 0;
4115
80f1d68c 4116 if (arg_type == ARG_DONTCARE)
17a52670
AS
4117 return 0;
4118
dc503a8a
EC
4119 err = check_reg_arg(env, regno, SRC_OP);
4120 if (err)
4121 return err;
17a52670 4122
1be7f75d
AS
4123 if (arg_type == ARG_ANYTHING) {
4124 if (is_pointer_value(env, regno)) {
61bd5218
JK
4125 verbose(env, "R%d leaks addr into helper function\n",
4126 regno);
1be7f75d
AS
4127 return -EACCES;
4128 }
80f1d68c 4129 return 0;
1be7f75d 4130 }
80f1d68c 4131
de8f3a83 4132 if (type_is_pkt_pointer(type) &&
3a0af8fd 4133 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 4134 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
4135 return -EACCES;
4136 }
4137
912f442c
LB
4138 if (arg_type == ARG_PTR_TO_MAP_VALUE ||
4139 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
4140 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
4141 err = resolve_map_arg_type(env, meta, &arg_type);
4142 if (err)
4143 return err;
4144 }
4145
fd1b0d60
LB
4146 if (register_is_null(reg) && arg_type_may_be_null(arg_type))
4147 /* A NULL register has a SCALAR_VALUE type, so skip
4148 * type checking.
4149 */
4150 goto skip_type_check;
4151
a968d5e2 4152 err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
f79e7ea5
LB
4153 if (err)
4154 return err;
4155
a968d5e2 4156 if (type == PTR_TO_CTX) {
feec7040
LB
4157 err = check_ctx_reg(env, reg, regno);
4158 if (err < 0)
4159 return err;
d7b9454a
LB
4160 }
4161
fd1b0d60 4162skip_type_check:
02f7c958 4163 if (reg->ref_obj_id) {
457f4436
AN
4164 if (meta->ref_obj_id) {
4165 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
4166 regno, reg->ref_obj_id,
4167 meta->ref_obj_id);
4168 return -EFAULT;
4169 }
4170 meta->ref_obj_id = reg->ref_obj_id;
17a52670
AS
4171 }
4172
17a52670
AS
4173 if (arg_type == ARG_CONST_MAP_PTR) {
4174 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
33ff9823 4175 meta->map_ptr = reg->map_ptr;
17a52670
AS
4176 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
4177 /* bpf_map_xxx(..., map_ptr, ..., key) call:
4178 * check that [key, key + map->key_size) are within
4179 * stack limits and initialized
4180 */
33ff9823 4181 if (!meta->map_ptr) {
17a52670
AS
4182 /* in function declaration map_ptr must come before
4183 * map_key, so that it's verified and known before
4184 * we have to check map_key here. Otherwise it means
4185 * that kernel subsystem misconfigured verifier
4186 */
61bd5218 4187 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
4188 return -EACCES;
4189 }
d71962f3
PC
4190 err = check_helper_mem_access(env, regno,
4191 meta->map_ptr->key_size, false,
4192 NULL);
2ea864c5 4193 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
4194 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
4195 !register_is_null(reg)) ||
2ea864c5 4196 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
17a52670
AS
4197 /* bpf_map_xxx(..., map_ptr, ..., value) call:
4198 * check [value, value + map->value_size) validity
4199 */
33ff9823 4200 if (!meta->map_ptr) {
17a52670 4201 /* kernel subsystem misconfigured verifier */
61bd5218 4202 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
4203 return -EACCES;
4204 }
2ea864c5 4205 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
d71962f3
PC
4206 err = check_helper_mem_access(env, regno,
4207 meta->map_ptr->value_size, false,
2ea864c5 4208 meta);
c18f0b6a
LB
4209 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
4210 if (meta->func_id == BPF_FUNC_spin_lock) {
4211 if (process_spin_lock(env, regno, true))
4212 return -EACCES;
4213 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
4214 if (process_spin_lock(env, regno, false))
4215 return -EACCES;
4216 } else {
4217 verbose(env, "verifier internal error\n");
4218 return -EFAULT;
4219 }
a2bbe7cc
LB
4220 } else if (arg_type_is_mem_ptr(arg_type)) {
4221 /* The access to this pointer is only checked when we hit the
4222 * next is_mem_size argument below.
4223 */
4224 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
90133415 4225 } else if (arg_type_is_mem_size(arg_type)) {
39f19ebb 4226 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
17a52670 4227
10060503
JF
4228 /* This is used to refine r0 return value bounds for helpers
4229 * that enforce this value as an upper bound on return values.
4230 * See do_refine_retval_range() for helpers that can refine
4231 * the return value. C type of helper is u32 so we pull register
4232 * bound from umax_value however, if negative verifier errors
4233 * out. Only upper bounds can be learned because retval is an
4234 * int type and negative retvals are allowed.
849fa506 4235 */
10060503 4236 meta->msize_max_value = reg->umax_value;
849fa506 4237
f1174f77
EC
4238 /* The register is SCALAR_VALUE; the access check
4239 * happens using its boundaries.
06c1c049 4240 */
f1174f77 4241 if (!tnum_is_const(reg->var_off))
06c1c049
GB
4242 /* For unprivileged variable accesses, disable raw
4243 * mode so that the program is required to
4244 * initialize all the memory that the helper could
4245 * just partially fill up.
4246 */
4247 meta = NULL;
4248
b03c9f9f 4249 if (reg->smin_value < 0) {
61bd5218 4250 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
f1174f77
EC
4251 regno);
4252 return -EACCES;
4253 }
06c1c049 4254
b03c9f9f 4255 if (reg->umin_value == 0) {
f1174f77
EC
4256 err = check_helper_mem_access(env, regno - 1, 0,
4257 zero_size_allowed,
4258 meta);
06c1c049
GB
4259 if (err)
4260 return err;
06c1c049 4261 }
f1174f77 4262
b03c9f9f 4263 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
61bd5218 4264 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
f1174f77
EC
4265 regno);
4266 return -EACCES;
4267 }
4268 err = check_helper_mem_access(env, regno - 1,
b03c9f9f 4269 reg->umax_value,
f1174f77 4270 zero_size_allowed, meta);
b5dc0163
AS
4271 if (!err)
4272 err = mark_chain_precision(env, regno);
457f4436
AN
4273 } else if (arg_type_is_alloc_size(arg_type)) {
4274 if (!tnum_is_const(reg->var_off)) {
4275 verbose(env, "R%d unbounded size, use 'var &= const' or 'if (var < const)'\n",
4276 regno);
4277 return -EACCES;
4278 }
4279 meta->mem_size = reg->var_off.value;
57c3bb72
AI
4280 } else if (arg_type_is_int_ptr(arg_type)) {
4281 int size = int_ptr_type_to_size(arg_type);
4282
4283 err = check_helper_mem_access(env, regno, size, false, meta);
4284 if (err)
4285 return err;
4286 err = check_ptr_alignment(env, reg, 0, size, true);
17a52670
AS
4287 }
4288
4289 return err;
4290}
4291
0126240f
LB
4292static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
4293{
4294 enum bpf_attach_type eatype = env->prog->expected_attach_type;
7e40781c 4295 enum bpf_prog_type type = resolve_prog_type(env->prog);
0126240f
LB
4296
4297 if (func_id != BPF_FUNC_map_update_elem)
4298 return false;
4299
4300 /* It's not possible to get access to a locked struct sock in these
4301 * contexts, so updating is safe.
4302 */
4303 switch (type) {
4304 case BPF_PROG_TYPE_TRACING:
4305 if (eatype == BPF_TRACE_ITER)
4306 return true;
4307 break;
4308 case BPF_PROG_TYPE_SOCKET_FILTER:
4309 case BPF_PROG_TYPE_SCHED_CLS:
4310 case BPF_PROG_TYPE_SCHED_ACT:
4311 case BPF_PROG_TYPE_XDP:
4312 case BPF_PROG_TYPE_SK_REUSEPORT:
4313 case BPF_PROG_TYPE_FLOW_DISSECTOR:
4314 case BPF_PROG_TYPE_SK_LOOKUP:
4315 return true;
4316 default:
4317 break;
4318 }
4319
4320 verbose(env, "cannot update sockmap in this context\n");
4321 return false;
4322}
4323
e411901c
MF
4324static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
4325{
4326 return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
4327}
4328
61bd5218
JK
4329static int check_map_func_compatibility(struct bpf_verifier_env *env,
4330 struct bpf_map *map, int func_id)
35578d79 4331{
35578d79
KX
4332 if (!map)
4333 return 0;
4334
6aff67c8
AS
4335 /* We need a two way check, first is from map perspective ... */
4336 switch (map->map_type) {
4337 case BPF_MAP_TYPE_PROG_ARRAY:
4338 if (func_id != BPF_FUNC_tail_call)
4339 goto error;
4340 break;
4341 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
4342 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 4343 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 4344 func_id != BPF_FUNC_skb_output &&
d831ee84
EC
4345 func_id != BPF_FUNC_perf_event_read_value &&
4346 func_id != BPF_FUNC_xdp_output)
6aff67c8
AS
4347 goto error;
4348 break;
457f4436
AN
4349 case BPF_MAP_TYPE_RINGBUF:
4350 if (func_id != BPF_FUNC_ringbuf_output &&
4351 func_id != BPF_FUNC_ringbuf_reserve &&
4352 func_id != BPF_FUNC_ringbuf_submit &&
4353 func_id != BPF_FUNC_ringbuf_discard &&
4354 func_id != BPF_FUNC_ringbuf_query)
4355 goto error;
4356 break;
6aff67c8
AS
4357 case BPF_MAP_TYPE_STACK_TRACE:
4358 if (func_id != BPF_FUNC_get_stackid)
4359 goto error;
4360 break;
4ed8ec52 4361 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 4362 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 4363 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
4364 goto error;
4365 break;
cd339431 4366 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 4367 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
4368 if (func_id != BPF_FUNC_get_local_storage)
4369 goto error;
4370 break;
546ac1ff 4371 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 4372 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
4373 if (func_id != BPF_FUNC_redirect_map &&
4374 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
4375 goto error;
4376 break;
fbfc504a
BT
4377 /* Restrict bpf side of cpumap and xskmap, open when use-cases
4378 * appear.
4379 */
6710e112
JDB
4380 case BPF_MAP_TYPE_CPUMAP:
4381 if (func_id != BPF_FUNC_redirect_map)
4382 goto error;
4383 break;
fada7fdc
JL
4384 case BPF_MAP_TYPE_XSKMAP:
4385 if (func_id != BPF_FUNC_redirect_map &&
4386 func_id != BPF_FUNC_map_lookup_elem)
4387 goto error;
4388 break;
56f668df 4389 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 4390 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
4391 if (func_id != BPF_FUNC_map_lookup_elem)
4392 goto error;
16a43625 4393 break;
174a79ff
JF
4394 case BPF_MAP_TYPE_SOCKMAP:
4395 if (func_id != BPF_FUNC_sk_redirect_map &&
4396 func_id != BPF_FUNC_sock_map_update &&
4f738adb 4397 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 4398 func_id != BPF_FUNC_msg_redirect_map &&
64d85290 4399 func_id != BPF_FUNC_sk_select_reuseport &&
0126240f
LB
4400 func_id != BPF_FUNC_map_lookup_elem &&
4401 !may_update_sockmap(env, func_id))
174a79ff
JF
4402 goto error;
4403 break;
81110384
JF
4404 case BPF_MAP_TYPE_SOCKHASH:
4405 if (func_id != BPF_FUNC_sk_redirect_hash &&
4406 func_id != BPF_FUNC_sock_hash_update &&
4407 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 4408 func_id != BPF_FUNC_msg_redirect_hash &&
64d85290 4409 func_id != BPF_FUNC_sk_select_reuseport &&
0126240f
LB
4410 func_id != BPF_FUNC_map_lookup_elem &&
4411 !may_update_sockmap(env, func_id))
81110384
JF
4412 goto error;
4413 break;
2dbb9b9e
MKL
4414 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
4415 if (func_id != BPF_FUNC_sk_select_reuseport)
4416 goto error;
4417 break;
f1a2e44a
MV
4418 case BPF_MAP_TYPE_QUEUE:
4419 case BPF_MAP_TYPE_STACK:
4420 if (func_id != BPF_FUNC_map_peek_elem &&
4421 func_id != BPF_FUNC_map_pop_elem &&
4422 func_id != BPF_FUNC_map_push_elem)
4423 goto error;
4424 break;
6ac99e8f
MKL
4425 case BPF_MAP_TYPE_SK_STORAGE:
4426 if (func_id != BPF_FUNC_sk_storage_get &&
4427 func_id != BPF_FUNC_sk_storage_delete)
4428 goto error;
4429 break;
8ea63684
KS
4430 case BPF_MAP_TYPE_INODE_STORAGE:
4431 if (func_id != BPF_FUNC_inode_storage_get &&
4432 func_id != BPF_FUNC_inode_storage_delete)
4433 goto error;
4434 break;
6aff67c8
AS
4435 default:
4436 break;
4437 }
4438
4439 /* ... and second from the function itself. */
4440 switch (func_id) {
4441 case BPF_FUNC_tail_call:
4442 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
4443 goto error;
e411901c
MF
4444 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
4445 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
f4d7e40a
AS
4446 return -EINVAL;
4447 }
6aff67c8
AS
4448 break;
4449 case BPF_FUNC_perf_event_read:
4450 case BPF_FUNC_perf_event_output:
908432ca 4451 case BPF_FUNC_perf_event_read_value:
a7658e1a 4452 case BPF_FUNC_skb_output:
d831ee84 4453 case BPF_FUNC_xdp_output:
6aff67c8
AS
4454 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
4455 goto error;
4456 break;
4457 case BPF_FUNC_get_stackid:
4458 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
4459 goto error;
4460 break;
60d20f91 4461 case BPF_FUNC_current_task_under_cgroup:
747ea55e 4462 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
4463 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
4464 goto error;
4465 break;
97f91a7c 4466 case BPF_FUNC_redirect_map:
9c270af3 4467 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 4468 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
4469 map->map_type != BPF_MAP_TYPE_CPUMAP &&
4470 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
4471 goto error;
4472 break;
174a79ff 4473 case BPF_FUNC_sk_redirect_map:
4f738adb 4474 case BPF_FUNC_msg_redirect_map:
81110384 4475 case BPF_FUNC_sock_map_update:
174a79ff
JF
4476 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
4477 goto error;
4478 break;
81110384
JF
4479 case BPF_FUNC_sk_redirect_hash:
4480 case BPF_FUNC_msg_redirect_hash:
4481 case BPF_FUNC_sock_hash_update:
4482 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
4483 goto error;
4484 break;
cd339431 4485 case BPF_FUNC_get_local_storage:
b741f163
RG
4486 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
4487 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
4488 goto error;
4489 break;
2dbb9b9e 4490 case BPF_FUNC_sk_select_reuseport:
9fed9000
JS
4491 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
4492 map->map_type != BPF_MAP_TYPE_SOCKMAP &&
4493 map->map_type != BPF_MAP_TYPE_SOCKHASH)
2dbb9b9e
MKL
4494 goto error;
4495 break;
f1a2e44a
MV
4496 case BPF_FUNC_map_peek_elem:
4497 case BPF_FUNC_map_pop_elem:
4498 case BPF_FUNC_map_push_elem:
4499 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
4500 map->map_type != BPF_MAP_TYPE_STACK)
4501 goto error;
4502 break;
6ac99e8f
MKL
4503 case BPF_FUNC_sk_storage_get:
4504 case BPF_FUNC_sk_storage_delete:
4505 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
4506 goto error;
4507 break;
8ea63684
KS
4508 case BPF_FUNC_inode_storage_get:
4509 case BPF_FUNC_inode_storage_delete:
4510 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
4511 goto error;
4512 break;
6aff67c8
AS
4513 default:
4514 break;
35578d79
KX
4515 }
4516
4517 return 0;
6aff67c8 4518error:
61bd5218 4519 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 4520 map->map_type, func_id_name(func_id), func_id);
6aff67c8 4521 return -EINVAL;
35578d79
KX
4522}
4523
90133415 4524static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
4525{
4526 int count = 0;
4527
39f19ebb 4528 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 4529 count++;
39f19ebb 4530 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 4531 count++;
39f19ebb 4532 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 4533 count++;
39f19ebb 4534 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 4535 count++;
39f19ebb 4536 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
4537 count++;
4538
90133415
DB
4539 /* We only support one arg being in raw mode at the moment,
4540 * which is sufficient for the helper functions we have
4541 * right now.
4542 */
4543 return count <= 1;
4544}
4545
4546static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
4547 enum bpf_arg_type arg_next)
4548{
4549 return (arg_type_is_mem_ptr(arg_curr) &&
4550 !arg_type_is_mem_size(arg_next)) ||
4551 (!arg_type_is_mem_ptr(arg_curr) &&
4552 arg_type_is_mem_size(arg_next));
4553}
4554
4555static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
4556{
4557 /* bpf_xxx(..., buf, len) call will access 'len'
4558 * bytes from memory 'buf'. Both arg types need
4559 * to be paired, so make sure there's no buggy
4560 * helper function specification.
4561 */
4562 if (arg_type_is_mem_size(fn->arg1_type) ||
4563 arg_type_is_mem_ptr(fn->arg5_type) ||
4564 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
4565 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
4566 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
4567 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
4568 return false;
4569
4570 return true;
4571}
4572
1b986589 4573static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
fd978bf7
JS
4574{
4575 int count = 0;
4576
1b986589 4577 if (arg_type_may_be_refcounted(fn->arg1_type))
fd978bf7 4578 count++;
1b986589 4579 if (arg_type_may_be_refcounted(fn->arg2_type))
fd978bf7 4580 count++;
1b986589 4581 if (arg_type_may_be_refcounted(fn->arg3_type))
fd978bf7 4582 count++;
1b986589 4583 if (arg_type_may_be_refcounted(fn->arg4_type))
fd978bf7 4584 count++;
1b986589 4585 if (arg_type_may_be_refcounted(fn->arg5_type))
fd978bf7
JS
4586 count++;
4587
1b986589
MKL
4588 /* A reference acquiring function cannot acquire
4589 * another refcounted ptr.
4590 */
64d85290 4591 if (may_be_acquire_function(func_id) && count)
1b986589
MKL
4592 return false;
4593
fd978bf7
JS
4594 /* We only support one arg being unreferenced at the moment,
4595 * which is sufficient for the helper functions we have right now.
4596 */
4597 return count <= 1;
4598}
4599
9436ef6e
LB
4600static bool check_btf_id_ok(const struct bpf_func_proto *fn)
4601{
4602 int i;
4603
1df8f55a 4604 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
9436ef6e
LB
4605 if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
4606 return false;
4607
1df8f55a
MKL
4608 if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
4609 return false;
4610 }
4611
9436ef6e
LB
4612 return true;
4613}
4614
1b986589 4615static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
90133415
DB
4616{
4617 return check_raw_mode_ok(fn) &&
fd978bf7 4618 check_arg_pair_ok(fn) &&
9436ef6e 4619 check_btf_id_ok(fn) &&
1b986589 4620 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
435faee1
DB
4621}
4622
de8f3a83
DB
4623/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
4624 * are now invalid, so turn them into unknown SCALAR_VALUE.
f1174f77 4625 */
f4d7e40a
AS
4626static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
4627 struct bpf_func_state *state)
969bf05e 4628{
58e2af8b 4629 struct bpf_reg_state *regs = state->regs, *reg;
969bf05e
AS
4630 int i;
4631
4632 for (i = 0; i < MAX_BPF_REG; i++)
de8f3a83 4633 if (reg_is_pkt_pointer_any(&regs[i]))
61bd5218 4634 mark_reg_unknown(env, regs, i);
969bf05e 4635
f3709f69
JS
4636 bpf_for_each_spilled_reg(i, state, reg) {
4637 if (!reg)
969bf05e 4638 continue;
de8f3a83 4639 if (reg_is_pkt_pointer_any(reg))
f54c7898 4640 __mark_reg_unknown(env, reg);
969bf05e
AS
4641 }
4642}
4643
f4d7e40a
AS
4644static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
4645{
4646 struct bpf_verifier_state *vstate = env->cur_state;
4647 int i;
4648
4649 for (i = 0; i <= vstate->curframe; i++)
4650 __clear_all_pkt_pointers(env, vstate->frame[i]);
4651}
4652
fd978bf7 4653static void release_reg_references(struct bpf_verifier_env *env,
1b986589
MKL
4654 struct bpf_func_state *state,
4655 int ref_obj_id)
fd978bf7
JS
4656{
4657 struct bpf_reg_state *regs = state->regs, *reg;
4658 int i;
4659
4660 for (i = 0; i < MAX_BPF_REG; i++)
1b986589 4661 if (regs[i].ref_obj_id == ref_obj_id)
fd978bf7
JS
4662 mark_reg_unknown(env, regs, i);
4663
4664 bpf_for_each_spilled_reg(i, state, reg) {
4665 if (!reg)
4666 continue;
1b986589 4667 if (reg->ref_obj_id == ref_obj_id)
f54c7898 4668 __mark_reg_unknown(env, reg);
fd978bf7
JS
4669 }
4670}
4671
4672/* The pointer with the specified id has released its reference to kernel
4673 * resources. Identify all copies of the same pointer and clear the reference.
4674 */
4675static int release_reference(struct bpf_verifier_env *env,
1b986589 4676 int ref_obj_id)
fd978bf7
JS
4677{
4678 struct bpf_verifier_state *vstate = env->cur_state;
1b986589 4679 int err;
fd978bf7
JS
4680 int i;
4681
1b986589
MKL
4682 err = release_reference_state(cur_func(env), ref_obj_id);
4683 if (err)
4684 return err;
4685
fd978bf7 4686 for (i = 0; i <= vstate->curframe; i++)
1b986589 4687 release_reg_references(env, vstate->frame[i], ref_obj_id);
fd978bf7 4688
1b986589 4689 return 0;
fd978bf7
JS
4690}
4691
51c39bb1
AS
4692static void clear_caller_saved_regs(struct bpf_verifier_env *env,
4693 struct bpf_reg_state *regs)
4694{
4695 int i;
4696
4697 /* after the call registers r0 - r5 were scratched */
4698 for (i = 0; i < CALLER_SAVED_REGS; i++) {
4699 mark_reg_not_init(env, regs, caller_saved[i]);
4700 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4701 }
4702}
4703
f4d7e40a
AS
4704static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
4705 int *insn_idx)
4706{
4707 struct bpf_verifier_state *state = env->cur_state;
51c39bb1 4708 struct bpf_func_info_aux *func_info_aux;
f4d7e40a 4709 struct bpf_func_state *caller, *callee;
fd978bf7 4710 int i, err, subprog, target_insn;
51c39bb1 4711 bool is_global = false;
f4d7e40a 4712
aada9ce6 4713 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 4714 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 4715 state->curframe + 2);
f4d7e40a
AS
4716 return -E2BIG;
4717 }
4718
4719 target_insn = *insn_idx + insn->imm;
4720 subprog = find_subprog(env, target_insn + 1);
4721 if (subprog < 0) {
4722 verbose(env, "verifier bug. No program starts at insn %d\n",
4723 target_insn + 1);
4724 return -EFAULT;
4725 }
4726
4727 caller = state->frame[state->curframe];
4728 if (state->frame[state->curframe + 1]) {
4729 verbose(env, "verifier bug. Frame %d already allocated\n",
4730 state->curframe + 1);
4731 return -EFAULT;
4732 }
4733
51c39bb1
AS
4734 func_info_aux = env->prog->aux->func_info_aux;
4735 if (func_info_aux)
4736 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
4737 err = btf_check_func_arg_match(env, subprog, caller->regs);
4738 if (err == -EFAULT)
4739 return err;
4740 if (is_global) {
4741 if (err) {
4742 verbose(env, "Caller passes invalid args into func#%d\n",
4743 subprog);
4744 return err;
4745 } else {
4746 if (env->log.level & BPF_LOG_LEVEL)
4747 verbose(env,
4748 "Func#%d is global and valid. Skipping.\n",
4749 subprog);
4750 clear_caller_saved_regs(env, caller->regs);
4751
4752 /* All global functions return SCALAR_VALUE */
4753 mark_reg_unknown(env, caller->regs, BPF_REG_0);
4754
4755 /* continue with next insn after call */
4756 return 0;
4757 }
4758 }
4759
f4d7e40a
AS
4760 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
4761 if (!callee)
4762 return -ENOMEM;
4763 state->frame[state->curframe + 1] = callee;
4764
4765 /* callee cannot access r0, r6 - r9 for reading and has to write
4766 * into its own stack before reading from it.
4767 * callee can read/write into caller's stack
4768 */
4769 init_func_state(env, callee,
4770 /* remember the callsite, it will be used by bpf_exit */
4771 *insn_idx /* callsite */,
4772 state->curframe + 1 /* frameno within this callchain */,
f910cefa 4773 subprog /* subprog number within this prog */);
f4d7e40a 4774
fd978bf7
JS
4775 /* Transfer references to the callee */
4776 err = transfer_reference_state(callee, caller);
4777 if (err)
4778 return err;
4779
679c782d
EC
4780 /* copy r1 - r5 args that callee can access. The copy includes parent
4781 * pointers, which connects us up to the liveness chain
4782 */
f4d7e40a
AS
4783 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
4784 callee->regs[i] = caller->regs[i];
4785
51c39bb1 4786 clear_caller_saved_regs(env, caller->regs);
f4d7e40a
AS
4787
4788 /* only increment it after check_reg_arg() finished */
4789 state->curframe++;
4790
4791 /* and go analyze first insn of the callee */
4792 *insn_idx = target_insn;
4793
06ee7115 4794 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4795 verbose(env, "caller:\n");
4796 print_verifier_state(env, caller);
4797 verbose(env, "callee:\n");
4798 print_verifier_state(env, callee);
4799 }
4800 return 0;
4801}
4802
4803static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
4804{
4805 struct bpf_verifier_state *state = env->cur_state;
4806 struct bpf_func_state *caller, *callee;
4807 struct bpf_reg_state *r0;
fd978bf7 4808 int err;
f4d7e40a
AS
4809
4810 callee = state->frame[state->curframe];
4811 r0 = &callee->regs[BPF_REG_0];
4812 if (r0->type == PTR_TO_STACK) {
4813 /* technically it's ok to return caller's stack pointer
4814 * (or caller's caller's pointer) back to the caller,
4815 * since these pointers are valid. Only current stack
4816 * pointer will be invalid as soon as function exits,
4817 * but let's be conservative
4818 */
4819 verbose(env, "cannot return stack pointer to the caller\n");
4820 return -EINVAL;
4821 }
4822
4823 state->curframe--;
4824 caller = state->frame[state->curframe];
4825 /* return to the caller whatever r0 had in the callee */
4826 caller->regs[BPF_REG_0] = *r0;
4827
fd978bf7
JS
4828 /* Transfer references to the caller */
4829 err = transfer_reference_state(caller, callee);
4830 if (err)
4831 return err;
4832
f4d7e40a 4833 *insn_idx = callee->callsite + 1;
06ee7115 4834 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4835 verbose(env, "returning from callee:\n");
4836 print_verifier_state(env, callee);
4837 verbose(env, "to caller at %d:\n", *insn_idx);
4838 print_verifier_state(env, caller);
4839 }
4840 /* clear everything in the callee */
4841 free_func_state(callee);
4842 state->frame[state->curframe + 1] = NULL;
4843 return 0;
4844}
4845
849fa506
YS
4846static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
4847 int func_id,
4848 struct bpf_call_arg_meta *meta)
4849{
4850 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
4851
4852 if (ret_type != RET_INTEGER ||
4853 (func_id != BPF_FUNC_get_stack &&
47cc0ed5
DB
4854 func_id != BPF_FUNC_probe_read_str &&
4855 func_id != BPF_FUNC_probe_read_kernel_str &&
4856 func_id != BPF_FUNC_probe_read_user_str))
849fa506
YS
4857 return;
4858
10060503 4859 ret_reg->smax_value = meta->msize_max_value;
fa123ac0 4860 ret_reg->s32_max_value = meta->msize_max_value;
849fa506
YS
4861 __reg_deduce_bounds(ret_reg);
4862 __reg_bound_offset(ret_reg);
10060503 4863 __update_reg_bounds(ret_reg);
849fa506
YS
4864}
4865
c93552c4
DB
4866static int
4867record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4868 int func_id, int insn_idx)
4869{
4870 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 4871 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
4872
4873 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
4874 func_id != BPF_FUNC_map_lookup_elem &&
4875 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
4876 func_id != BPF_FUNC_map_delete_elem &&
4877 func_id != BPF_FUNC_map_push_elem &&
4878 func_id != BPF_FUNC_map_pop_elem &&
4879 func_id != BPF_FUNC_map_peek_elem)
c93552c4 4880 return 0;
09772d92 4881
591fe988 4882 if (map == NULL) {
c93552c4
DB
4883 verbose(env, "kernel subsystem misconfigured verifier\n");
4884 return -EINVAL;
4885 }
4886
591fe988
DB
4887 /* In case of read-only, some additional restrictions
4888 * need to be applied in order to prevent altering the
4889 * state of the map from program side.
4890 */
4891 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
4892 (func_id == BPF_FUNC_map_delete_elem ||
4893 func_id == BPF_FUNC_map_update_elem ||
4894 func_id == BPF_FUNC_map_push_elem ||
4895 func_id == BPF_FUNC_map_pop_elem)) {
4896 verbose(env, "write into map forbidden\n");
4897 return -EACCES;
4898 }
4899
d2e4c1e6 4900 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4 4901 bpf_map_ptr_store(aux, meta->map_ptr,
2c78ee89 4902 !meta->map_ptr->bypass_spec_v1);
d2e4c1e6 4903 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4 4904 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2c78ee89 4905 !meta->map_ptr->bypass_spec_v1);
c93552c4
DB
4906 return 0;
4907}
4908
d2e4c1e6
DB
4909static int
4910record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4911 int func_id, int insn_idx)
4912{
4913 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
4914 struct bpf_reg_state *regs = cur_regs(env), *reg;
4915 struct bpf_map *map = meta->map_ptr;
4916 struct tnum range;
4917 u64 val;
cc52d914 4918 int err;
d2e4c1e6
DB
4919
4920 if (func_id != BPF_FUNC_tail_call)
4921 return 0;
4922 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
4923 verbose(env, "kernel subsystem misconfigured verifier\n");
4924 return -EINVAL;
4925 }
4926
4927 range = tnum_range(0, map->max_entries - 1);
4928 reg = &regs[BPF_REG_3];
4929
4930 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
4931 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4932 return 0;
4933 }
4934
cc52d914
DB
4935 err = mark_chain_precision(env, BPF_REG_3);
4936 if (err)
4937 return err;
4938
d2e4c1e6
DB
4939 val = reg->var_off.value;
4940 if (bpf_map_key_unseen(aux))
4941 bpf_map_key_store(aux, val);
4942 else if (!bpf_map_key_poisoned(aux) &&
4943 bpf_map_key_immediate(aux) != val)
4944 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4945 return 0;
4946}
4947
fd978bf7
JS
4948static int check_reference_leak(struct bpf_verifier_env *env)
4949{
4950 struct bpf_func_state *state = cur_func(env);
4951 int i;
4952
4953 for (i = 0; i < state->acquired_refs; i++) {
4954 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
4955 state->refs[i].id, state->refs[i].insn_idx);
4956 }
4957 return state->acquired_refs ? -EINVAL : 0;
4958}
4959
f4d7e40a 4960static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
17a52670 4961{
17a52670 4962 const struct bpf_func_proto *fn = NULL;
638f5b90 4963 struct bpf_reg_state *regs;
33ff9823 4964 struct bpf_call_arg_meta meta;
969bf05e 4965 bool changes_data;
17a52670
AS
4966 int i, err;
4967
4968 /* find function prototype */
4969 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
4970 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
4971 func_id);
17a52670
AS
4972 return -EINVAL;
4973 }
4974
00176a34 4975 if (env->ops->get_func_proto)
5e43f899 4976 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 4977 if (!fn) {
61bd5218
JK
4978 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
4979 func_id);
17a52670
AS
4980 return -EINVAL;
4981 }
4982
4983 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 4984 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 4985 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
4986 return -EINVAL;
4987 }
4988
eae2e83e
JO
4989 if (fn->allowed && !fn->allowed(env->prog)) {
4990 verbose(env, "helper call is not allowed in probe\n");
4991 return -EINVAL;
4992 }
4993
04514d13 4994 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 4995 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
4996 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
4997 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
4998 func_id_name(func_id), func_id);
4999 return -EINVAL;
5000 }
969bf05e 5001
33ff9823 5002 memset(&meta, 0, sizeof(meta));
36bbef52 5003 meta.pkt_access = fn->pkt_access;
33ff9823 5004
1b986589 5005 err = check_func_proto(fn, func_id);
435faee1 5006 if (err) {
61bd5218 5007 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 5008 func_id_name(func_id), func_id);
435faee1
DB
5009 return err;
5010 }
5011
d83525ca 5012 meta.func_id = func_id;
17a52670 5013 /* check args */
a7658e1a 5014 for (i = 0; i < 5; i++) {
af7ec138 5015 err = check_func_arg(env, i, &meta, fn);
a7658e1a
AS
5016 if (err)
5017 return err;
5018 }
17a52670 5019
c93552c4
DB
5020 err = record_func_map(env, &meta, func_id, insn_idx);
5021 if (err)
5022 return err;
5023
d2e4c1e6
DB
5024 err = record_func_key(env, &meta, func_id, insn_idx);
5025 if (err)
5026 return err;
5027
435faee1
DB
5028 /* Mark slots with STACK_MISC in case of raw mode, stack offset
5029 * is inferred from register state.
5030 */
5031 for (i = 0; i < meta.access_size; i++) {
ca369602
DB
5032 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
5033 BPF_WRITE, -1, false);
435faee1
DB
5034 if (err)
5035 return err;
5036 }
5037
fd978bf7
JS
5038 if (func_id == BPF_FUNC_tail_call) {
5039 err = check_reference_leak(env);
5040 if (err) {
5041 verbose(env, "tail_call would lead to reference leak\n");
5042 return err;
5043 }
5044 } else if (is_release_function(func_id)) {
1b986589 5045 err = release_reference(env, meta.ref_obj_id);
46f8bc92
MKL
5046 if (err) {
5047 verbose(env, "func %s#%d reference has not been acquired before\n",
5048 func_id_name(func_id), func_id);
fd978bf7 5049 return err;
46f8bc92 5050 }
fd978bf7
JS
5051 }
5052
638f5b90 5053 regs = cur_regs(env);
cd339431
RG
5054
5055 /* check that flags argument in get_local_storage(map, flags) is 0,
5056 * this is required because get_local_storage() can't return an error.
5057 */
5058 if (func_id == BPF_FUNC_get_local_storage &&
5059 !register_is_null(&regs[BPF_REG_2])) {
5060 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
5061 return -EINVAL;
5062 }
5063
17a52670 5064 /* reset caller saved regs */
dc503a8a 5065 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 5066 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
5067 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
5068 }
17a52670 5069
5327ed3d
JW
5070 /* helper call returns 64-bit value. */
5071 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
5072
dc503a8a 5073 /* update return register (already marked as written above) */
17a52670 5074 if (fn->ret_type == RET_INTEGER) {
f1174f77 5075 /* sets type to SCALAR_VALUE */
61bd5218 5076 mark_reg_unknown(env, regs, BPF_REG_0);
17a52670
AS
5077 } else if (fn->ret_type == RET_VOID) {
5078 regs[BPF_REG_0].type = NOT_INIT;
3e6a4b3e
RG
5079 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
5080 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
f1174f77 5081 /* There is no offset yet applied, variable or fixed */
61bd5218 5082 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
5083 /* remember map_ptr, so that check_map_access()
5084 * can check 'value_size' boundary of memory access
5085 * to map element returned from bpf_map_lookup_elem()
5086 */
33ff9823 5087 if (meta.map_ptr == NULL) {
61bd5218
JK
5088 verbose(env,
5089 "kernel subsystem misconfigured verifier\n");
17a52670
AS
5090 return -EINVAL;
5091 }
33ff9823 5092 regs[BPF_REG_0].map_ptr = meta.map_ptr;
4d31f301
DB
5093 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
5094 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
e16d2f1a
AS
5095 if (map_value_has_spin_lock(meta.map_ptr))
5096 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301
DB
5097 } else {
5098 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
5099 regs[BPF_REG_0].id = ++env->id_gen;
5100 }
c64b7983
JS
5101 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
5102 mark_reg_known_zero(env, regs, BPF_REG_0);
5103 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
0f3adc28 5104 regs[BPF_REG_0].id = ++env->id_gen;
85a51f8c
LB
5105 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
5106 mark_reg_known_zero(env, regs, BPF_REG_0);
5107 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
5108 regs[BPF_REG_0].id = ++env->id_gen;
655a51e5
MKL
5109 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
5110 mark_reg_known_zero(env, regs, BPF_REG_0);
5111 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
5112 regs[BPF_REG_0].id = ++env->id_gen;
457f4436
AN
5113 } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
5114 mark_reg_known_zero(env, regs, BPF_REG_0);
5115 regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
5116 regs[BPF_REG_0].id = ++env->id_gen;
5117 regs[BPF_REG_0].mem_size = meta.mem_size;
af7ec138
YS
5118 } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
5119 int ret_btf_id;
5120
5121 mark_reg_known_zero(env, regs, BPF_REG_0);
5122 regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL;
5123 ret_btf_id = *fn->ret_btf_id;
5124 if (ret_btf_id == 0) {
5125 verbose(env, "invalid return type %d of func %s#%d\n",
5126 fn->ret_type, func_id_name(func_id), func_id);
5127 return -EINVAL;
5128 }
5129 regs[BPF_REG_0].btf_id = ret_btf_id;
17a52670 5130 } else {
61bd5218 5131 verbose(env, "unknown return type %d of func %s#%d\n",
ebb676da 5132 fn->ret_type, func_id_name(func_id), func_id);
17a52670
AS
5133 return -EINVAL;
5134 }
04fd61ab 5135
0f3adc28 5136 if (is_ptr_cast_function(func_id)) {
1b986589
MKL
5137 /* For release_reference() */
5138 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
64d85290 5139 } else if (is_acquire_function(func_id, meta.map_ptr)) {
0f3adc28
LB
5140 int id = acquire_reference_state(env, insn_idx);
5141
5142 if (id < 0)
5143 return id;
5144 /* For mark_ptr_or_null_reg() */
5145 regs[BPF_REG_0].id = id;
5146 /* For release_reference() */
5147 regs[BPF_REG_0].ref_obj_id = id;
5148 }
1b986589 5149
849fa506
YS
5150 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
5151
61bd5218 5152 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
5153 if (err)
5154 return err;
04fd61ab 5155
fa28dcb8
SL
5156 if ((func_id == BPF_FUNC_get_stack ||
5157 func_id == BPF_FUNC_get_task_stack) &&
5158 !env->prog->has_callchain_buf) {
c195651e
YS
5159 const char *err_str;
5160
5161#ifdef CONFIG_PERF_EVENTS
5162 err = get_callchain_buffers(sysctl_perf_event_max_stack);
5163 err_str = "cannot get callchain buffer for func %s#%d\n";
5164#else
5165 err = -ENOTSUPP;
5166 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
5167#endif
5168 if (err) {
5169 verbose(env, err_str, func_id_name(func_id), func_id);
5170 return err;
5171 }
5172
5173 env->prog->has_callchain_buf = true;
5174 }
5175
5d99cb2c
SL
5176 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
5177 env->prog->call_get_stack = true;
5178
969bf05e
AS
5179 if (changes_data)
5180 clear_all_pkt_pointers(env);
5181 return 0;
5182}
5183
b03c9f9f
EC
5184static bool signed_add_overflows(s64 a, s64 b)
5185{
5186 /* Do the add in u64, where overflow is well-defined */
5187 s64 res = (s64)((u64)a + (u64)b);
5188
5189 if (b < 0)
5190 return res > a;
5191 return res < a;
5192}
5193
3f50f132
JF
5194static bool signed_add32_overflows(s64 a, s64 b)
5195{
5196 /* Do the add in u32, where overflow is well-defined */
5197 s32 res = (s32)((u32)a + (u32)b);
5198
5199 if (b < 0)
5200 return res > a;
5201 return res < a;
5202}
5203
5204static bool signed_sub_overflows(s32 a, s32 b)
b03c9f9f
EC
5205{
5206 /* Do the sub in u64, where overflow is well-defined */
5207 s64 res = (s64)((u64)a - (u64)b);
5208
5209 if (b < 0)
5210 return res < a;
5211 return res > a;
969bf05e
AS
5212}
5213
3f50f132
JF
5214static bool signed_sub32_overflows(s32 a, s32 b)
5215{
5216 /* Do the sub in u64, where overflow is well-defined */
5217 s32 res = (s32)((u32)a - (u32)b);
5218
5219 if (b < 0)
5220 return res < a;
5221 return res > a;
5222}
5223
bb7f0f98
AS
5224static bool check_reg_sane_offset(struct bpf_verifier_env *env,
5225 const struct bpf_reg_state *reg,
5226 enum bpf_reg_type type)
5227{
5228 bool known = tnum_is_const(reg->var_off);
5229 s64 val = reg->var_off.value;
5230 s64 smin = reg->smin_value;
5231
5232 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
5233 verbose(env, "math between %s pointer and %lld is not allowed\n",
5234 reg_type_str[type], val);
5235 return false;
5236 }
5237
5238 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
5239 verbose(env, "%s pointer offset %d is not allowed\n",
5240 reg_type_str[type], reg->off);
5241 return false;
5242 }
5243
5244 if (smin == S64_MIN) {
5245 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
5246 reg_type_str[type]);
5247 return false;
5248 }
5249
5250 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
5251 verbose(env, "value %lld makes %s pointer be out of bounds\n",
5252 smin, reg_type_str[type]);
5253 return false;
5254 }
5255
5256 return true;
5257}
5258
979d63d5
DB
5259static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
5260{
5261 return &env->insn_aux_data[env->insn_idx];
5262}
5263
5264static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
5265 u32 *ptr_limit, u8 opcode, bool off_is_neg)
5266{
5267 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
5268 (opcode == BPF_SUB && !off_is_neg);
5269 u32 off;
5270
5271 switch (ptr_reg->type) {
5272 case PTR_TO_STACK:
088ec26d
AI
5273 /* Indirect variable offset stack access is prohibited in
5274 * unprivileged mode so it's not handled here.
5275 */
979d63d5
DB
5276 off = ptr_reg->off + ptr_reg->var_off.value;
5277 if (mask_to_left)
5278 *ptr_limit = MAX_BPF_STACK + off;
5279 else
5280 *ptr_limit = -off;
5281 return 0;
5282 case PTR_TO_MAP_VALUE:
5283 if (mask_to_left) {
5284 *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
5285 } else {
5286 off = ptr_reg->smin_value + ptr_reg->off;
5287 *ptr_limit = ptr_reg->map_ptr->value_size - off;
5288 }
5289 return 0;
5290 default:
5291 return -EINVAL;
5292 }
5293}
5294
d3bd7413
DB
5295static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
5296 const struct bpf_insn *insn)
5297{
2c78ee89 5298 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
d3bd7413
DB
5299}
5300
5301static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
5302 u32 alu_state, u32 alu_limit)
5303{
5304 /* If we arrived here from different branches with different
5305 * state or limits to sanitize, then this won't work.
5306 */
5307 if (aux->alu_state &&
5308 (aux->alu_state != alu_state ||
5309 aux->alu_limit != alu_limit))
5310 return -EACCES;
5311
5312 /* Corresponding fixup done in fixup_bpf_calls(). */
5313 aux->alu_state = alu_state;
5314 aux->alu_limit = alu_limit;
5315 return 0;
5316}
5317
5318static int sanitize_val_alu(struct bpf_verifier_env *env,
5319 struct bpf_insn *insn)
5320{
5321 struct bpf_insn_aux_data *aux = cur_aux(env);
5322
5323 if (can_skip_alu_sanitation(env, insn))
5324 return 0;
5325
5326 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
5327}
5328
979d63d5
DB
5329static int sanitize_ptr_alu(struct bpf_verifier_env *env,
5330 struct bpf_insn *insn,
5331 const struct bpf_reg_state *ptr_reg,
5332 struct bpf_reg_state *dst_reg,
5333 bool off_is_neg)
5334{
5335 struct bpf_verifier_state *vstate = env->cur_state;
5336 struct bpf_insn_aux_data *aux = cur_aux(env);
5337 bool ptr_is_dst_reg = ptr_reg == dst_reg;
5338 u8 opcode = BPF_OP(insn->code);
5339 u32 alu_state, alu_limit;
5340 struct bpf_reg_state tmp;
5341 bool ret;
5342
d3bd7413 5343 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
5344 return 0;
5345
5346 /* We already marked aux for masking from non-speculative
5347 * paths, thus we got here in the first place. We only care
5348 * to explore bad access from here.
5349 */
5350 if (vstate->speculative)
5351 goto do_sim;
5352
5353 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
5354 alu_state |= ptr_is_dst_reg ?
5355 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
5356
5357 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
5358 return 0;
d3bd7413 5359 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
979d63d5 5360 return -EACCES;
979d63d5
DB
5361do_sim:
5362 /* Simulate and find potential out-of-bounds access under
5363 * speculative execution from truncation as a result of
5364 * masking when off was not within expected range. If off
5365 * sits in dst, then we temporarily need to move ptr there
5366 * to simulate dst (== 0) +/-= ptr. Needed, for example,
5367 * for cases where we use K-based arithmetic in one direction
5368 * and truncated reg-based in the other in order to explore
5369 * bad access.
5370 */
5371 if (!ptr_is_dst_reg) {
5372 tmp = *dst_reg;
5373 *dst_reg = *ptr_reg;
5374 }
5375 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
0803278b 5376 if (!ptr_is_dst_reg && ret)
979d63d5
DB
5377 *dst_reg = tmp;
5378 return !ret ? -EFAULT : 0;
5379}
5380
f1174f77 5381/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
5382 * Caller should also handle BPF_MOV case separately.
5383 * If we return -EACCES, caller may want to try again treating pointer as a
5384 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
5385 */
5386static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
5387 struct bpf_insn *insn,
5388 const struct bpf_reg_state *ptr_reg,
5389 const struct bpf_reg_state *off_reg)
969bf05e 5390{
f4d7e40a
AS
5391 struct bpf_verifier_state *vstate = env->cur_state;
5392 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5393 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 5394 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
5395 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
5396 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
5397 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
5398 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
9d7eceed 5399 u32 dst = insn->dst_reg, src = insn->src_reg;
969bf05e 5400 u8 opcode = BPF_OP(insn->code);
979d63d5 5401 int ret;
969bf05e 5402
f1174f77 5403 dst_reg = &regs[dst];
969bf05e 5404
6f16101e
DB
5405 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
5406 smin_val > smax_val || umin_val > umax_val) {
5407 /* Taint dst register if offset had invalid bounds derived from
5408 * e.g. dead branches.
5409 */
f54c7898 5410 __mark_reg_unknown(env, dst_reg);
6f16101e 5411 return 0;
f1174f77
EC
5412 }
5413
5414 if (BPF_CLASS(insn->code) != BPF_ALU64) {
5415 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
6c693541
YS
5416 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
5417 __mark_reg_unknown(env, dst_reg);
5418 return 0;
5419 }
5420
82abbf8d
AS
5421 verbose(env,
5422 "R%d 32-bit pointer arithmetic prohibited\n",
5423 dst);
f1174f77 5424 return -EACCES;
969bf05e
AS
5425 }
5426
aad2eeaf
JS
5427 switch (ptr_reg->type) {
5428 case PTR_TO_MAP_VALUE_OR_NULL:
5429 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
5430 dst, reg_type_str[ptr_reg->type]);
f1174f77 5431 return -EACCES;
aad2eeaf 5432 case CONST_PTR_TO_MAP:
7c696732
YS
5433 /* smin_val represents the known value */
5434 if (known && smin_val == 0 && opcode == BPF_ADD)
5435 break;
5436 /* fall-through */
aad2eeaf 5437 case PTR_TO_PACKET_END:
c64b7983
JS
5438 case PTR_TO_SOCKET:
5439 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
5440 case PTR_TO_SOCK_COMMON:
5441 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
5442 case PTR_TO_TCP_SOCK:
5443 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 5444 case PTR_TO_XDP_SOCK:
aad2eeaf
JS
5445 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
5446 dst, reg_type_str[ptr_reg->type]);
f1174f77 5447 return -EACCES;
9d7eceed
DB
5448 case PTR_TO_MAP_VALUE:
5449 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
5450 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
5451 off_reg == dst_reg ? dst : src);
5452 return -EACCES;
5453 }
df561f66 5454 fallthrough;
aad2eeaf
JS
5455 default:
5456 break;
f1174f77
EC
5457 }
5458
5459 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
5460 * The id may be overwritten later if we create a new variable offset.
969bf05e 5461 */
f1174f77
EC
5462 dst_reg->type = ptr_reg->type;
5463 dst_reg->id = ptr_reg->id;
969bf05e 5464
bb7f0f98
AS
5465 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
5466 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
5467 return -EINVAL;
5468
3f50f132
JF
5469 /* pointer types do not carry 32-bit bounds at the moment. */
5470 __mark_reg32_unbounded(dst_reg);
5471
f1174f77
EC
5472 switch (opcode) {
5473 case BPF_ADD:
979d63d5
DB
5474 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
5475 if (ret < 0) {
5476 verbose(env, "R%d tried to add from different maps or paths\n", dst);
5477 return ret;
5478 }
f1174f77
EC
5479 /* We can take a fixed offset as long as it doesn't overflow
5480 * the s32 'off' field
969bf05e 5481 */
b03c9f9f
EC
5482 if (known && (ptr_reg->off + smin_val ==
5483 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 5484 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
5485 dst_reg->smin_value = smin_ptr;
5486 dst_reg->smax_value = smax_ptr;
5487 dst_reg->umin_value = umin_ptr;
5488 dst_reg->umax_value = umax_ptr;
f1174f77 5489 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 5490 dst_reg->off = ptr_reg->off + smin_val;
0962590e 5491 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
5492 break;
5493 }
f1174f77
EC
5494 /* A new variable offset is created. Note that off_reg->off
5495 * == 0, since it's a scalar.
5496 * dst_reg gets the pointer type and since some positive
5497 * integer value was added to the pointer, give it a new 'id'
5498 * if it's a PTR_TO_PACKET.
5499 * this creates a new 'base' pointer, off_reg (variable) gets
5500 * added into the variable offset, and we copy the fixed offset
5501 * from ptr_reg.
969bf05e 5502 */
b03c9f9f
EC
5503 if (signed_add_overflows(smin_ptr, smin_val) ||
5504 signed_add_overflows(smax_ptr, smax_val)) {
5505 dst_reg->smin_value = S64_MIN;
5506 dst_reg->smax_value = S64_MAX;
5507 } else {
5508 dst_reg->smin_value = smin_ptr + smin_val;
5509 dst_reg->smax_value = smax_ptr + smax_val;
5510 }
5511 if (umin_ptr + umin_val < umin_ptr ||
5512 umax_ptr + umax_val < umax_ptr) {
5513 dst_reg->umin_value = 0;
5514 dst_reg->umax_value = U64_MAX;
5515 } else {
5516 dst_reg->umin_value = umin_ptr + umin_val;
5517 dst_reg->umax_value = umax_ptr + umax_val;
5518 }
f1174f77
EC
5519 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
5520 dst_reg->off = ptr_reg->off;
0962590e 5521 dst_reg->raw = ptr_reg->raw;
de8f3a83 5522 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
5523 dst_reg->id = ++env->id_gen;
5524 /* something was added to pkt_ptr, set range to zero */
0962590e 5525 dst_reg->raw = 0;
f1174f77
EC
5526 }
5527 break;
5528 case BPF_SUB:
979d63d5
DB
5529 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
5530 if (ret < 0) {
5531 verbose(env, "R%d tried to sub from different maps or paths\n", dst);
5532 return ret;
5533 }
f1174f77
EC
5534 if (dst_reg == off_reg) {
5535 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
5536 verbose(env, "R%d tried to subtract pointer from scalar\n",
5537 dst);
f1174f77
EC
5538 return -EACCES;
5539 }
5540 /* We don't allow subtraction from FP, because (according to
5541 * test_verifier.c test "invalid fp arithmetic", JITs might not
5542 * be able to deal with it.
969bf05e 5543 */
f1174f77 5544 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
5545 verbose(env, "R%d subtraction from stack pointer prohibited\n",
5546 dst);
f1174f77
EC
5547 return -EACCES;
5548 }
b03c9f9f
EC
5549 if (known && (ptr_reg->off - smin_val ==
5550 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 5551 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
5552 dst_reg->smin_value = smin_ptr;
5553 dst_reg->smax_value = smax_ptr;
5554 dst_reg->umin_value = umin_ptr;
5555 dst_reg->umax_value = umax_ptr;
f1174f77
EC
5556 dst_reg->var_off = ptr_reg->var_off;
5557 dst_reg->id = ptr_reg->id;
b03c9f9f 5558 dst_reg->off = ptr_reg->off - smin_val;
0962590e 5559 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
5560 break;
5561 }
f1174f77
EC
5562 /* A new variable offset is created. If the subtrahend is known
5563 * nonnegative, then any reg->range we had before is still good.
969bf05e 5564 */
b03c9f9f
EC
5565 if (signed_sub_overflows(smin_ptr, smax_val) ||
5566 signed_sub_overflows(smax_ptr, smin_val)) {
5567 /* Overflow possible, we know nothing */
5568 dst_reg->smin_value = S64_MIN;
5569 dst_reg->smax_value = S64_MAX;
5570 } else {
5571 dst_reg->smin_value = smin_ptr - smax_val;
5572 dst_reg->smax_value = smax_ptr - smin_val;
5573 }
5574 if (umin_ptr < umax_val) {
5575 /* Overflow possible, we know nothing */
5576 dst_reg->umin_value = 0;
5577 dst_reg->umax_value = U64_MAX;
5578 } else {
5579 /* Cannot overflow (as long as bounds are consistent) */
5580 dst_reg->umin_value = umin_ptr - umax_val;
5581 dst_reg->umax_value = umax_ptr - umin_val;
5582 }
f1174f77
EC
5583 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
5584 dst_reg->off = ptr_reg->off;
0962590e 5585 dst_reg->raw = ptr_reg->raw;
de8f3a83 5586 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
5587 dst_reg->id = ++env->id_gen;
5588 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 5589 if (smin_val < 0)
0962590e 5590 dst_reg->raw = 0;
43188702 5591 }
f1174f77
EC
5592 break;
5593 case BPF_AND:
5594 case BPF_OR:
5595 case BPF_XOR:
82abbf8d
AS
5596 /* bitwise ops on pointers are troublesome, prohibit. */
5597 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
5598 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
5599 return -EACCES;
5600 default:
5601 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
5602 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
5603 dst, bpf_alu_string[opcode >> 4]);
f1174f77 5604 return -EACCES;
43188702
JF
5605 }
5606
bb7f0f98
AS
5607 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
5608 return -EINVAL;
5609
b03c9f9f
EC
5610 __update_reg_bounds(dst_reg);
5611 __reg_deduce_bounds(dst_reg);
5612 __reg_bound_offset(dst_reg);
0d6303db
DB
5613
5614 /* For unprivileged we require that resulting offset must be in bounds
5615 * in order to be able to sanitize access later on.
5616 */
2c78ee89 5617 if (!env->bypass_spec_v1) {
e4298d25
DB
5618 if (dst_reg->type == PTR_TO_MAP_VALUE &&
5619 check_map_access(env, dst, dst_reg->off, 1, false)) {
5620 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
5621 "prohibited for !root\n", dst);
5622 return -EACCES;
5623 } else if (dst_reg->type == PTR_TO_STACK &&
5624 check_stack_access(env, dst_reg, dst_reg->off +
5625 dst_reg->var_off.value, 1)) {
5626 verbose(env, "R%d stack pointer arithmetic goes out of range, "
5627 "prohibited for !root\n", dst);
5628 return -EACCES;
5629 }
0d6303db
DB
5630 }
5631
43188702
JF
5632 return 0;
5633}
5634
3f50f132
JF
5635static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
5636 struct bpf_reg_state *src_reg)
5637{
5638 s32 smin_val = src_reg->s32_min_value;
5639 s32 smax_val = src_reg->s32_max_value;
5640 u32 umin_val = src_reg->u32_min_value;
5641 u32 umax_val = src_reg->u32_max_value;
5642
5643 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
5644 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
5645 dst_reg->s32_min_value = S32_MIN;
5646 dst_reg->s32_max_value = S32_MAX;
5647 } else {
5648 dst_reg->s32_min_value += smin_val;
5649 dst_reg->s32_max_value += smax_val;
5650 }
5651 if (dst_reg->u32_min_value + umin_val < umin_val ||
5652 dst_reg->u32_max_value + umax_val < umax_val) {
5653 dst_reg->u32_min_value = 0;
5654 dst_reg->u32_max_value = U32_MAX;
5655 } else {
5656 dst_reg->u32_min_value += umin_val;
5657 dst_reg->u32_max_value += umax_val;
5658 }
5659}
5660
07cd2631
JF
5661static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
5662 struct bpf_reg_state *src_reg)
5663{
5664 s64 smin_val = src_reg->smin_value;
5665 s64 smax_val = src_reg->smax_value;
5666 u64 umin_val = src_reg->umin_value;
5667 u64 umax_val = src_reg->umax_value;
5668
5669 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
5670 signed_add_overflows(dst_reg->smax_value, smax_val)) {
5671 dst_reg->smin_value = S64_MIN;
5672 dst_reg->smax_value = S64_MAX;
5673 } else {
5674 dst_reg->smin_value += smin_val;
5675 dst_reg->smax_value += smax_val;
5676 }
5677 if (dst_reg->umin_value + umin_val < umin_val ||
5678 dst_reg->umax_value + umax_val < umax_val) {
5679 dst_reg->umin_value = 0;
5680 dst_reg->umax_value = U64_MAX;
5681 } else {
5682 dst_reg->umin_value += umin_val;
5683 dst_reg->umax_value += umax_val;
5684 }
3f50f132
JF
5685}
5686
5687static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
5688 struct bpf_reg_state *src_reg)
5689{
5690 s32 smin_val = src_reg->s32_min_value;
5691 s32 smax_val = src_reg->s32_max_value;
5692 u32 umin_val = src_reg->u32_min_value;
5693 u32 umax_val = src_reg->u32_max_value;
5694
5695 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
5696 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
5697 /* Overflow possible, we know nothing */
5698 dst_reg->s32_min_value = S32_MIN;
5699 dst_reg->s32_max_value = S32_MAX;
5700 } else {
5701 dst_reg->s32_min_value -= smax_val;
5702 dst_reg->s32_max_value -= smin_val;
5703 }
5704 if (dst_reg->u32_min_value < umax_val) {
5705 /* Overflow possible, we know nothing */
5706 dst_reg->u32_min_value = 0;
5707 dst_reg->u32_max_value = U32_MAX;
5708 } else {
5709 /* Cannot overflow (as long as bounds are consistent) */
5710 dst_reg->u32_min_value -= umax_val;
5711 dst_reg->u32_max_value -= umin_val;
5712 }
07cd2631
JF
5713}
5714
5715static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
5716 struct bpf_reg_state *src_reg)
5717{
5718 s64 smin_val = src_reg->smin_value;
5719 s64 smax_val = src_reg->smax_value;
5720 u64 umin_val = src_reg->umin_value;
5721 u64 umax_val = src_reg->umax_value;
5722
5723 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
5724 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
5725 /* Overflow possible, we know nothing */
5726 dst_reg->smin_value = S64_MIN;
5727 dst_reg->smax_value = S64_MAX;
5728 } else {
5729 dst_reg->smin_value -= smax_val;
5730 dst_reg->smax_value -= smin_val;
5731 }
5732 if (dst_reg->umin_value < umax_val) {
5733 /* Overflow possible, we know nothing */
5734 dst_reg->umin_value = 0;
5735 dst_reg->umax_value = U64_MAX;
5736 } else {
5737 /* Cannot overflow (as long as bounds are consistent) */
5738 dst_reg->umin_value -= umax_val;
5739 dst_reg->umax_value -= umin_val;
5740 }
3f50f132
JF
5741}
5742
5743static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
5744 struct bpf_reg_state *src_reg)
5745{
5746 s32 smin_val = src_reg->s32_min_value;
5747 u32 umin_val = src_reg->u32_min_value;
5748 u32 umax_val = src_reg->u32_max_value;
5749
5750 if (smin_val < 0 || dst_reg->s32_min_value < 0) {
5751 /* Ain't nobody got time to multiply that sign */
5752 __mark_reg32_unbounded(dst_reg);
5753 return;
5754 }
5755 /* Both values are positive, so we can work with unsigned and
5756 * copy the result to signed (unless it exceeds S32_MAX).
5757 */
5758 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
5759 /* Potential overflow, we know nothing */
5760 __mark_reg32_unbounded(dst_reg);
5761 return;
5762 }
5763 dst_reg->u32_min_value *= umin_val;
5764 dst_reg->u32_max_value *= umax_val;
5765 if (dst_reg->u32_max_value > S32_MAX) {
5766 /* Overflow possible, we know nothing */
5767 dst_reg->s32_min_value = S32_MIN;
5768 dst_reg->s32_max_value = S32_MAX;
5769 } else {
5770 dst_reg->s32_min_value = dst_reg->u32_min_value;
5771 dst_reg->s32_max_value = dst_reg->u32_max_value;
5772 }
07cd2631
JF
5773}
5774
5775static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
5776 struct bpf_reg_state *src_reg)
5777{
5778 s64 smin_val = src_reg->smin_value;
5779 u64 umin_val = src_reg->umin_value;
5780 u64 umax_val = src_reg->umax_value;
5781
07cd2631
JF
5782 if (smin_val < 0 || dst_reg->smin_value < 0) {
5783 /* Ain't nobody got time to multiply that sign */
3f50f132 5784 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
5785 return;
5786 }
5787 /* Both values are positive, so we can work with unsigned and
5788 * copy the result to signed (unless it exceeds S64_MAX).
5789 */
5790 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
5791 /* Potential overflow, we know nothing */
3f50f132 5792 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
5793 return;
5794 }
5795 dst_reg->umin_value *= umin_val;
5796 dst_reg->umax_value *= umax_val;
5797 if (dst_reg->umax_value > S64_MAX) {
5798 /* Overflow possible, we know nothing */
5799 dst_reg->smin_value = S64_MIN;
5800 dst_reg->smax_value = S64_MAX;
5801 } else {
5802 dst_reg->smin_value = dst_reg->umin_value;
5803 dst_reg->smax_value = dst_reg->umax_value;
5804 }
5805}
5806
3f50f132
JF
5807static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
5808 struct bpf_reg_state *src_reg)
5809{
5810 bool src_known = tnum_subreg_is_const(src_reg->var_off);
5811 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
5812 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
5813 s32 smin_val = src_reg->s32_min_value;
5814 u32 umax_val = src_reg->u32_max_value;
5815
5816 /* Assuming scalar64_min_max_and will be called so its safe
5817 * to skip updating register for known 32-bit case.
5818 */
5819 if (src_known && dst_known)
5820 return;
5821
5822 /* We get our minimum from the var_off, since that's inherently
5823 * bitwise. Our maximum is the minimum of the operands' maxima.
5824 */
5825 dst_reg->u32_min_value = var32_off.value;
5826 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
5827 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
5828 /* Lose signed bounds when ANDing negative numbers,
5829 * ain't nobody got time for that.
5830 */
5831 dst_reg->s32_min_value = S32_MIN;
5832 dst_reg->s32_max_value = S32_MAX;
5833 } else {
5834 /* ANDing two positives gives a positive, so safe to
5835 * cast result into s64.
5836 */
5837 dst_reg->s32_min_value = dst_reg->u32_min_value;
5838 dst_reg->s32_max_value = dst_reg->u32_max_value;
5839 }
5840
5841}
5842
07cd2631
JF
5843static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
5844 struct bpf_reg_state *src_reg)
5845{
3f50f132
JF
5846 bool src_known = tnum_is_const(src_reg->var_off);
5847 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
5848 s64 smin_val = src_reg->smin_value;
5849 u64 umax_val = src_reg->umax_value;
5850
3f50f132 5851 if (src_known && dst_known) {
4fbb38a3 5852 __mark_reg_known(dst_reg, dst_reg->var_off.value);
3f50f132
JF
5853 return;
5854 }
5855
07cd2631
JF
5856 /* We get our minimum from the var_off, since that's inherently
5857 * bitwise. Our maximum is the minimum of the operands' maxima.
5858 */
07cd2631
JF
5859 dst_reg->umin_value = dst_reg->var_off.value;
5860 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
5861 if (dst_reg->smin_value < 0 || smin_val < 0) {
5862 /* Lose signed bounds when ANDing negative numbers,
5863 * ain't nobody got time for that.
5864 */
5865 dst_reg->smin_value = S64_MIN;
5866 dst_reg->smax_value = S64_MAX;
5867 } else {
5868 /* ANDing two positives gives a positive, so safe to
5869 * cast result into s64.
5870 */
5871 dst_reg->smin_value = dst_reg->umin_value;
5872 dst_reg->smax_value = dst_reg->umax_value;
5873 }
5874 /* We may learn something more from the var_off */
5875 __update_reg_bounds(dst_reg);
5876}
5877
3f50f132
JF
5878static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
5879 struct bpf_reg_state *src_reg)
5880{
5881 bool src_known = tnum_subreg_is_const(src_reg->var_off);
5882 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
5883 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
5884 s32 smin_val = src_reg->smin_value;
5885 u32 umin_val = src_reg->umin_value;
5886
5887 /* Assuming scalar64_min_max_or will be called so it is safe
5888 * to skip updating register for known case.
5889 */
5890 if (src_known && dst_known)
5891 return;
5892
5893 /* We get our maximum from the var_off, and our minimum is the
5894 * maximum of the operands' minima
5895 */
5896 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
5897 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
5898 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
5899 /* Lose signed bounds when ORing negative numbers,
5900 * ain't nobody got time for that.
5901 */
5902 dst_reg->s32_min_value = S32_MIN;
5903 dst_reg->s32_max_value = S32_MAX;
5904 } else {
5905 /* ORing two positives gives a positive, so safe to
5906 * cast result into s64.
5907 */
5908 dst_reg->s32_min_value = dst_reg->umin_value;
5909 dst_reg->s32_max_value = dst_reg->umax_value;
5910 }
5911}
5912
07cd2631
JF
5913static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
5914 struct bpf_reg_state *src_reg)
5915{
3f50f132
JF
5916 bool src_known = tnum_is_const(src_reg->var_off);
5917 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
5918 s64 smin_val = src_reg->smin_value;
5919 u64 umin_val = src_reg->umin_value;
5920
3f50f132 5921 if (src_known && dst_known) {
4fbb38a3 5922 __mark_reg_known(dst_reg, dst_reg->var_off.value);
3f50f132
JF
5923 return;
5924 }
5925
07cd2631
JF
5926 /* We get our maximum from the var_off, and our minimum is the
5927 * maximum of the operands' minima
5928 */
07cd2631
JF
5929 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
5930 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
5931 if (dst_reg->smin_value < 0 || smin_val < 0) {
5932 /* Lose signed bounds when ORing negative numbers,
5933 * ain't nobody got time for that.
5934 */
5935 dst_reg->smin_value = S64_MIN;
5936 dst_reg->smax_value = S64_MAX;
5937 } else {
5938 /* ORing two positives gives a positive, so safe to
5939 * cast result into s64.
5940 */
5941 dst_reg->smin_value = dst_reg->umin_value;
5942 dst_reg->smax_value = dst_reg->umax_value;
5943 }
5944 /* We may learn something more from the var_off */
5945 __update_reg_bounds(dst_reg);
5946}
5947
2921c90d
YS
5948static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
5949 struct bpf_reg_state *src_reg)
5950{
5951 bool src_known = tnum_subreg_is_const(src_reg->var_off);
5952 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
5953 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
5954 s32 smin_val = src_reg->s32_min_value;
5955
5956 /* Assuming scalar64_min_max_xor will be called so it is safe
5957 * to skip updating register for known case.
5958 */
5959 if (src_known && dst_known)
5960 return;
5961
5962 /* We get both minimum and maximum from the var32_off. */
5963 dst_reg->u32_min_value = var32_off.value;
5964 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
5965
5966 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
5967 /* XORing two positive sign numbers gives a positive,
5968 * so safe to cast u32 result into s32.
5969 */
5970 dst_reg->s32_min_value = dst_reg->u32_min_value;
5971 dst_reg->s32_max_value = dst_reg->u32_max_value;
5972 } else {
5973 dst_reg->s32_min_value = S32_MIN;
5974 dst_reg->s32_max_value = S32_MAX;
5975 }
5976}
5977
5978static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
5979 struct bpf_reg_state *src_reg)
5980{
5981 bool src_known = tnum_is_const(src_reg->var_off);
5982 bool dst_known = tnum_is_const(dst_reg->var_off);
5983 s64 smin_val = src_reg->smin_value;
5984
5985 if (src_known && dst_known) {
5986 /* dst_reg->var_off.value has been updated earlier */
5987 __mark_reg_known(dst_reg, dst_reg->var_off.value);
5988 return;
5989 }
5990
5991 /* We get both minimum and maximum from the var_off. */
5992 dst_reg->umin_value = dst_reg->var_off.value;
5993 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
5994
5995 if (dst_reg->smin_value >= 0 && smin_val >= 0) {
5996 /* XORing two positive sign numbers gives a positive,
5997 * so safe to cast u64 result into s64.
5998 */
5999 dst_reg->smin_value = dst_reg->umin_value;
6000 dst_reg->smax_value = dst_reg->umax_value;
6001 } else {
6002 dst_reg->smin_value = S64_MIN;
6003 dst_reg->smax_value = S64_MAX;
6004 }
6005
6006 __update_reg_bounds(dst_reg);
6007}
6008
3f50f132
JF
6009static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
6010 u64 umin_val, u64 umax_val)
07cd2631 6011{
07cd2631
JF
6012 /* We lose all sign bit information (except what we can pick
6013 * up from var_off)
6014 */
3f50f132
JF
6015 dst_reg->s32_min_value = S32_MIN;
6016 dst_reg->s32_max_value = S32_MAX;
6017 /* If we might shift our top bit out, then we know nothing */
6018 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
6019 dst_reg->u32_min_value = 0;
6020 dst_reg->u32_max_value = U32_MAX;
6021 } else {
6022 dst_reg->u32_min_value <<= umin_val;
6023 dst_reg->u32_max_value <<= umax_val;
6024 }
6025}
6026
6027static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
6028 struct bpf_reg_state *src_reg)
6029{
6030 u32 umax_val = src_reg->u32_max_value;
6031 u32 umin_val = src_reg->u32_min_value;
6032 /* u32 alu operation will zext upper bits */
6033 struct tnum subreg = tnum_subreg(dst_reg->var_off);
6034
6035 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
6036 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
6037 /* Not required but being careful mark reg64 bounds as unknown so
6038 * that we are forced to pick them up from tnum and zext later and
6039 * if some path skips this step we are still safe.
6040 */
6041 __mark_reg64_unbounded(dst_reg);
6042 __update_reg32_bounds(dst_reg);
6043}
6044
6045static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
6046 u64 umin_val, u64 umax_val)
6047{
6048 /* Special case <<32 because it is a common compiler pattern to sign
6049 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
6050 * positive we know this shift will also be positive so we can track
6051 * bounds correctly. Otherwise we lose all sign bit information except
6052 * what we can pick up from var_off. Perhaps we can generalize this
6053 * later to shifts of any length.
6054 */
6055 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
6056 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
6057 else
6058 dst_reg->smax_value = S64_MAX;
6059
6060 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
6061 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
6062 else
6063 dst_reg->smin_value = S64_MIN;
6064
07cd2631
JF
6065 /* If we might shift our top bit out, then we know nothing */
6066 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
6067 dst_reg->umin_value = 0;
6068 dst_reg->umax_value = U64_MAX;
6069 } else {
6070 dst_reg->umin_value <<= umin_val;
6071 dst_reg->umax_value <<= umax_val;
6072 }
3f50f132
JF
6073}
6074
6075static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
6076 struct bpf_reg_state *src_reg)
6077{
6078 u64 umax_val = src_reg->umax_value;
6079 u64 umin_val = src_reg->umin_value;
6080
6081 /* scalar64 calc uses 32bit unshifted bounds so must be called first */
6082 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
6083 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
6084
07cd2631
JF
6085 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
6086 /* We may learn something more from the var_off */
6087 __update_reg_bounds(dst_reg);
6088}
6089
3f50f132
JF
6090static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
6091 struct bpf_reg_state *src_reg)
6092{
6093 struct tnum subreg = tnum_subreg(dst_reg->var_off);
6094 u32 umax_val = src_reg->u32_max_value;
6095 u32 umin_val = src_reg->u32_min_value;
6096
6097 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
6098 * be negative, then either:
6099 * 1) src_reg might be zero, so the sign bit of the result is
6100 * unknown, so we lose our signed bounds
6101 * 2) it's known negative, thus the unsigned bounds capture the
6102 * signed bounds
6103 * 3) the signed bounds cross zero, so they tell us nothing
6104 * about the result
6105 * If the value in dst_reg is known nonnegative, then again the
6106 * unsigned bounts capture the signed bounds.
6107 * Thus, in all cases it suffices to blow away our signed bounds
6108 * and rely on inferring new ones from the unsigned bounds and
6109 * var_off of the result.
6110 */
6111 dst_reg->s32_min_value = S32_MIN;
6112 dst_reg->s32_max_value = S32_MAX;
6113
6114 dst_reg->var_off = tnum_rshift(subreg, umin_val);
6115 dst_reg->u32_min_value >>= umax_val;
6116 dst_reg->u32_max_value >>= umin_val;
6117
6118 __mark_reg64_unbounded(dst_reg);
6119 __update_reg32_bounds(dst_reg);
6120}
6121
07cd2631
JF
6122static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
6123 struct bpf_reg_state *src_reg)
6124{
6125 u64 umax_val = src_reg->umax_value;
6126 u64 umin_val = src_reg->umin_value;
6127
6128 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
6129 * be negative, then either:
6130 * 1) src_reg might be zero, so the sign bit of the result is
6131 * unknown, so we lose our signed bounds
6132 * 2) it's known negative, thus the unsigned bounds capture the
6133 * signed bounds
6134 * 3) the signed bounds cross zero, so they tell us nothing
6135 * about the result
6136 * If the value in dst_reg is known nonnegative, then again the
6137 * unsigned bounts capture the signed bounds.
6138 * Thus, in all cases it suffices to blow away our signed bounds
6139 * and rely on inferring new ones from the unsigned bounds and
6140 * var_off of the result.
6141 */
6142 dst_reg->smin_value = S64_MIN;
6143 dst_reg->smax_value = S64_MAX;
6144 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
6145 dst_reg->umin_value >>= umax_val;
6146 dst_reg->umax_value >>= umin_val;
3f50f132
JF
6147
6148 /* Its not easy to operate on alu32 bounds here because it depends
6149 * on bits being shifted in. Take easy way out and mark unbounded
6150 * so we can recalculate later from tnum.
6151 */
6152 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
6153 __update_reg_bounds(dst_reg);
6154}
6155
3f50f132
JF
6156static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
6157 struct bpf_reg_state *src_reg)
07cd2631 6158{
3f50f132 6159 u64 umin_val = src_reg->u32_min_value;
07cd2631
JF
6160
6161 /* Upon reaching here, src_known is true and
6162 * umax_val is equal to umin_val.
6163 */
3f50f132
JF
6164 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
6165 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
07cd2631 6166
3f50f132
JF
6167 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
6168
6169 /* blow away the dst_reg umin_value/umax_value and rely on
6170 * dst_reg var_off to refine the result.
6171 */
6172 dst_reg->u32_min_value = 0;
6173 dst_reg->u32_max_value = U32_MAX;
6174
6175 __mark_reg64_unbounded(dst_reg);
6176 __update_reg32_bounds(dst_reg);
6177}
6178
6179static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
6180 struct bpf_reg_state *src_reg)
6181{
6182 u64 umin_val = src_reg->umin_value;
6183
6184 /* Upon reaching here, src_known is true and umax_val is equal
6185 * to umin_val.
6186 */
6187 dst_reg->smin_value >>= umin_val;
6188 dst_reg->smax_value >>= umin_val;
6189
6190 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
07cd2631
JF
6191
6192 /* blow away the dst_reg umin_value/umax_value and rely on
6193 * dst_reg var_off to refine the result.
6194 */
6195 dst_reg->umin_value = 0;
6196 dst_reg->umax_value = U64_MAX;
3f50f132
JF
6197
6198 /* Its not easy to operate on alu32 bounds here because it depends
6199 * on bits being shifted in from upper 32-bits. Take easy way out
6200 * and mark unbounded so we can recalculate later from tnum.
6201 */
6202 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
6203 __update_reg_bounds(dst_reg);
6204}
6205
468f6eaf
JH
6206/* WARNING: This function does calculations on 64-bit values, but the actual
6207 * execution may occur on 32-bit values. Therefore, things like bitshifts
6208 * need extra checks in the 32-bit case.
6209 */
f1174f77
EC
6210static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
6211 struct bpf_insn *insn,
6212 struct bpf_reg_state *dst_reg,
6213 struct bpf_reg_state src_reg)
969bf05e 6214{
638f5b90 6215 struct bpf_reg_state *regs = cur_regs(env);
48461135 6216 u8 opcode = BPF_OP(insn->code);
b0b3fb67 6217 bool src_known;
b03c9f9f
EC
6218 s64 smin_val, smax_val;
6219 u64 umin_val, umax_val;
3f50f132
JF
6220 s32 s32_min_val, s32_max_val;
6221 u32 u32_min_val, u32_max_val;
468f6eaf 6222 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
d3bd7413
DB
6223 u32 dst = insn->dst_reg;
6224 int ret;
3f50f132 6225 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
b799207e 6226
b03c9f9f
EC
6227 smin_val = src_reg.smin_value;
6228 smax_val = src_reg.smax_value;
6229 umin_val = src_reg.umin_value;
6230 umax_val = src_reg.umax_value;
f23cc643 6231
3f50f132
JF
6232 s32_min_val = src_reg.s32_min_value;
6233 s32_max_val = src_reg.s32_max_value;
6234 u32_min_val = src_reg.u32_min_value;
6235 u32_max_val = src_reg.u32_max_value;
6236
6237 if (alu32) {
6238 src_known = tnum_subreg_is_const(src_reg.var_off);
3f50f132
JF
6239 if ((src_known &&
6240 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
6241 s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
6242 /* Taint dst register if offset had invalid bounds
6243 * derived from e.g. dead branches.
6244 */
6245 __mark_reg_unknown(env, dst_reg);
6246 return 0;
6247 }
6248 } else {
6249 src_known = tnum_is_const(src_reg.var_off);
3f50f132
JF
6250 if ((src_known &&
6251 (smin_val != smax_val || umin_val != umax_val)) ||
6252 smin_val > smax_val || umin_val > umax_val) {
6253 /* Taint dst register if offset had invalid bounds
6254 * derived from e.g. dead branches.
6255 */
6256 __mark_reg_unknown(env, dst_reg);
6257 return 0;
6258 }
6f16101e
DB
6259 }
6260
bb7f0f98
AS
6261 if (!src_known &&
6262 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
f54c7898 6263 __mark_reg_unknown(env, dst_reg);
bb7f0f98
AS
6264 return 0;
6265 }
6266
3f50f132
JF
6267 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
6268 * There are two classes of instructions: The first class we track both
6269 * alu32 and alu64 sign/unsigned bounds independently this provides the
6270 * greatest amount of precision when alu operations are mixed with jmp32
6271 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
6272 * and BPF_OR. This is possible because these ops have fairly easy to
6273 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
6274 * See alu32 verifier tests for examples. The second class of
6275 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
6276 * with regards to tracking sign/unsigned bounds because the bits may
6277 * cross subreg boundaries in the alu64 case. When this happens we mark
6278 * the reg unbounded in the subreg bound space and use the resulting
6279 * tnum to calculate an approximation of the sign/unsigned bounds.
6280 */
48461135
JB
6281 switch (opcode) {
6282 case BPF_ADD:
d3bd7413
DB
6283 ret = sanitize_val_alu(env, insn);
6284 if (ret < 0) {
6285 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
6286 return ret;
6287 }
3f50f132 6288 scalar32_min_max_add(dst_reg, &src_reg);
07cd2631 6289 scalar_min_max_add(dst_reg, &src_reg);
3f50f132 6290 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
48461135
JB
6291 break;
6292 case BPF_SUB:
d3bd7413
DB
6293 ret = sanitize_val_alu(env, insn);
6294 if (ret < 0) {
6295 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
6296 return ret;
6297 }
3f50f132 6298 scalar32_min_max_sub(dst_reg, &src_reg);
07cd2631 6299 scalar_min_max_sub(dst_reg, &src_reg);
3f50f132 6300 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
48461135
JB
6301 break;
6302 case BPF_MUL:
3f50f132
JF
6303 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
6304 scalar32_min_max_mul(dst_reg, &src_reg);
07cd2631 6305 scalar_min_max_mul(dst_reg, &src_reg);
48461135
JB
6306 break;
6307 case BPF_AND:
3f50f132
JF
6308 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
6309 scalar32_min_max_and(dst_reg, &src_reg);
07cd2631 6310 scalar_min_max_and(dst_reg, &src_reg);
f1174f77
EC
6311 break;
6312 case BPF_OR:
3f50f132
JF
6313 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
6314 scalar32_min_max_or(dst_reg, &src_reg);
07cd2631 6315 scalar_min_max_or(dst_reg, &src_reg);
48461135 6316 break;
2921c90d
YS
6317 case BPF_XOR:
6318 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
6319 scalar32_min_max_xor(dst_reg, &src_reg);
6320 scalar_min_max_xor(dst_reg, &src_reg);
6321 break;
48461135 6322 case BPF_LSH:
468f6eaf
JH
6323 if (umax_val >= insn_bitness) {
6324 /* Shifts greater than 31 or 63 are undefined.
6325 * This includes shifts by a negative number.
b03c9f9f 6326 */
61bd5218 6327 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
6328 break;
6329 }
3f50f132
JF
6330 if (alu32)
6331 scalar32_min_max_lsh(dst_reg, &src_reg);
6332 else
6333 scalar_min_max_lsh(dst_reg, &src_reg);
48461135
JB
6334 break;
6335 case BPF_RSH:
468f6eaf
JH
6336 if (umax_val >= insn_bitness) {
6337 /* Shifts greater than 31 or 63 are undefined.
6338 * This includes shifts by a negative number.
b03c9f9f 6339 */
61bd5218 6340 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
6341 break;
6342 }
3f50f132
JF
6343 if (alu32)
6344 scalar32_min_max_rsh(dst_reg, &src_reg);
6345 else
6346 scalar_min_max_rsh(dst_reg, &src_reg);
48461135 6347 break;
9cbe1f5a
YS
6348 case BPF_ARSH:
6349 if (umax_val >= insn_bitness) {
6350 /* Shifts greater than 31 or 63 are undefined.
6351 * This includes shifts by a negative number.
6352 */
6353 mark_reg_unknown(env, regs, insn->dst_reg);
6354 break;
6355 }
3f50f132
JF
6356 if (alu32)
6357 scalar32_min_max_arsh(dst_reg, &src_reg);
6358 else
6359 scalar_min_max_arsh(dst_reg, &src_reg);
9cbe1f5a 6360 break;
48461135 6361 default:
61bd5218 6362 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
6363 break;
6364 }
6365
3f50f132
JF
6366 /* ALU32 ops are zero extended into 64bit register */
6367 if (alu32)
6368 zext_32_to_64(dst_reg);
468f6eaf 6369
294f2fc6 6370 __update_reg_bounds(dst_reg);
b03c9f9f
EC
6371 __reg_deduce_bounds(dst_reg);
6372 __reg_bound_offset(dst_reg);
f1174f77
EC
6373 return 0;
6374}
6375
6376/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
6377 * and var_off.
6378 */
6379static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
6380 struct bpf_insn *insn)
6381{
f4d7e40a
AS
6382 struct bpf_verifier_state *vstate = env->cur_state;
6383 struct bpf_func_state *state = vstate->frame[vstate->curframe];
6384 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
6385 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
6386 u8 opcode = BPF_OP(insn->code);
b5dc0163 6387 int err;
f1174f77
EC
6388
6389 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
6390 src_reg = NULL;
6391 if (dst_reg->type != SCALAR_VALUE)
6392 ptr_reg = dst_reg;
6393 if (BPF_SRC(insn->code) == BPF_X) {
6394 src_reg = &regs[insn->src_reg];
f1174f77
EC
6395 if (src_reg->type != SCALAR_VALUE) {
6396 if (dst_reg->type != SCALAR_VALUE) {
6397 /* Combining two pointers by any ALU op yields
82abbf8d
AS
6398 * an arbitrary scalar. Disallow all math except
6399 * pointer subtraction
f1174f77 6400 */
dd066823 6401 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
6402 mark_reg_unknown(env, regs, insn->dst_reg);
6403 return 0;
f1174f77 6404 }
82abbf8d
AS
6405 verbose(env, "R%d pointer %s pointer prohibited\n",
6406 insn->dst_reg,
6407 bpf_alu_string[opcode >> 4]);
6408 return -EACCES;
f1174f77
EC
6409 } else {
6410 /* scalar += pointer
6411 * This is legal, but we have to reverse our
6412 * src/dest handling in computing the range
6413 */
b5dc0163
AS
6414 err = mark_chain_precision(env, insn->dst_reg);
6415 if (err)
6416 return err;
82abbf8d
AS
6417 return adjust_ptr_min_max_vals(env, insn,
6418 src_reg, dst_reg);
f1174f77
EC
6419 }
6420 } else if (ptr_reg) {
6421 /* pointer += scalar */
b5dc0163
AS
6422 err = mark_chain_precision(env, insn->src_reg);
6423 if (err)
6424 return err;
82abbf8d
AS
6425 return adjust_ptr_min_max_vals(env, insn,
6426 dst_reg, src_reg);
f1174f77
EC
6427 }
6428 } else {
6429 /* Pretend the src is a reg with a known value, since we only
6430 * need to be able to read from this state.
6431 */
6432 off_reg.type = SCALAR_VALUE;
b03c9f9f 6433 __mark_reg_known(&off_reg, insn->imm);
f1174f77 6434 src_reg = &off_reg;
82abbf8d
AS
6435 if (ptr_reg) /* pointer += K */
6436 return adjust_ptr_min_max_vals(env, insn,
6437 ptr_reg, src_reg);
f1174f77
EC
6438 }
6439
6440 /* Got here implies adding two SCALAR_VALUEs */
6441 if (WARN_ON_ONCE(ptr_reg)) {
f4d7e40a 6442 print_verifier_state(env, state);
61bd5218 6443 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
6444 return -EINVAL;
6445 }
6446 if (WARN_ON(!src_reg)) {
f4d7e40a 6447 print_verifier_state(env, state);
61bd5218 6448 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
6449 return -EINVAL;
6450 }
6451 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
6452}
6453
17a52670 6454/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 6455static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 6456{
638f5b90 6457 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
6458 u8 opcode = BPF_OP(insn->code);
6459 int err;
6460
6461 if (opcode == BPF_END || opcode == BPF_NEG) {
6462 if (opcode == BPF_NEG) {
6463 if (BPF_SRC(insn->code) != 0 ||
6464 insn->src_reg != BPF_REG_0 ||
6465 insn->off != 0 || insn->imm != 0) {
61bd5218 6466 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
6467 return -EINVAL;
6468 }
6469 } else {
6470 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68
EC
6471 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
6472 BPF_CLASS(insn->code) == BPF_ALU64) {
61bd5218 6473 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
6474 return -EINVAL;
6475 }
6476 }
6477
6478 /* check src operand */
dc503a8a 6479 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6480 if (err)
6481 return err;
6482
1be7f75d 6483 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 6484 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
6485 insn->dst_reg);
6486 return -EACCES;
6487 }
6488
17a52670 6489 /* check dest operand */
dc503a8a 6490 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
6491 if (err)
6492 return err;
6493
6494 } else if (opcode == BPF_MOV) {
6495
6496 if (BPF_SRC(insn->code) == BPF_X) {
6497 if (insn->imm != 0 || insn->off != 0) {
61bd5218 6498 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
6499 return -EINVAL;
6500 }
6501
6502 /* check src operand */
dc503a8a 6503 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6504 if (err)
6505 return err;
6506 } else {
6507 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 6508 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
6509 return -EINVAL;
6510 }
6511 }
6512
fbeb1603
AF
6513 /* check dest operand, mark as required later */
6514 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
6515 if (err)
6516 return err;
6517
6518 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
6519 struct bpf_reg_state *src_reg = regs + insn->src_reg;
6520 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
6521
17a52670
AS
6522 if (BPF_CLASS(insn->code) == BPF_ALU64) {
6523 /* case: R1 = R2
6524 * copy register state to dest reg
6525 */
e434b8cd
JW
6526 *dst_reg = *src_reg;
6527 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 6528 dst_reg->subreg_def = DEF_NOT_SUBREG;
17a52670 6529 } else {
f1174f77 6530 /* R1 = (u32) R2 */
1be7f75d 6531 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
6532 verbose(env,
6533 "R%d partial copy of pointer\n",
1be7f75d
AS
6534 insn->src_reg);
6535 return -EACCES;
e434b8cd
JW
6536 } else if (src_reg->type == SCALAR_VALUE) {
6537 *dst_reg = *src_reg;
6538 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 6539 dst_reg->subreg_def = env->insn_idx + 1;
e434b8cd
JW
6540 } else {
6541 mark_reg_unknown(env, regs,
6542 insn->dst_reg);
1be7f75d 6543 }
3f50f132 6544 zext_32_to_64(dst_reg);
17a52670
AS
6545 }
6546 } else {
6547 /* case: R = imm
6548 * remember the value we stored into this reg
6549 */
fbeb1603
AF
6550 /* clear any state __mark_reg_known doesn't set */
6551 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 6552 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
6553 if (BPF_CLASS(insn->code) == BPF_ALU64) {
6554 __mark_reg_known(regs + insn->dst_reg,
6555 insn->imm);
6556 } else {
6557 __mark_reg_known(regs + insn->dst_reg,
6558 (u32)insn->imm);
6559 }
17a52670
AS
6560 }
6561
6562 } else if (opcode > BPF_END) {
61bd5218 6563 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
6564 return -EINVAL;
6565
6566 } else { /* all other ALU ops: and, sub, xor, add, ... */
6567
17a52670
AS
6568 if (BPF_SRC(insn->code) == BPF_X) {
6569 if (insn->imm != 0 || insn->off != 0) {
61bd5218 6570 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
6571 return -EINVAL;
6572 }
6573 /* check src1 operand */
dc503a8a 6574 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6575 if (err)
6576 return err;
6577 } else {
6578 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 6579 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
6580 return -EINVAL;
6581 }
6582 }
6583
6584 /* check src2 operand */
dc503a8a 6585 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6586 if (err)
6587 return err;
6588
6589 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
6590 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 6591 verbose(env, "div by zero\n");
17a52670
AS
6592 return -EINVAL;
6593 }
6594
229394e8
RV
6595 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
6596 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
6597 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
6598
6599 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 6600 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
6601 return -EINVAL;
6602 }
6603 }
6604
1a0dc1ac 6605 /* check dest operand */
dc503a8a 6606 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
6607 if (err)
6608 return err;
6609
f1174f77 6610 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
6611 }
6612
6613 return 0;
6614}
6615
c6a9efa1
PC
6616static void __find_good_pkt_pointers(struct bpf_func_state *state,
6617 struct bpf_reg_state *dst_reg,
6618 enum bpf_reg_type type, u16 new_range)
6619{
6620 struct bpf_reg_state *reg;
6621 int i;
6622
6623 for (i = 0; i < MAX_BPF_REG; i++) {
6624 reg = &state->regs[i];
6625 if (reg->type == type && reg->id == dst_reg->id)
6626 /* keep the maximum range already checked */
6627 reg->range = max(reg->range, new_range);
6628 }
6629
6630 bpf_for_each_spilled_reg(i, state, reg) {
6631 if (!reg)
6632 continue;
6633 if (reg->type == type && reg->id == dst_reg->id)
6634 reg->range = max(reg->range, new_range);
6635 }
6636}
6637
f4d7e40a 6638static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 6639 struct bpf_reg_state *dst_reg,
f8ddadc4 6640 enum bpf_reg_type type,
fb2a311a 6641 bool range_right_open)
969bf05e 6642{
fb2a311a 6643 u16 new_range;
c6a9efa1 6644 int i;
2d2be8ca 6645
fb2a311a
DB
6646 if (dst_reg->off < 0 ||
6647 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
6648 /* This doesn't give us any range */
6649 return;
6650
b03c9f9f
EC
6651 if (dst_reg->umax_value > MAX_PACKET_OFF ||
6652 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
6653 /* Risk of overflow. For instance, ptr + (1<<63) may be less
6654 * than pkt_end, but that's because it's also less than pkt.
6655 */
6656 return;
6657
fb2a311a
DB
6658 new_range = dst_reg->off;
6659 if (range_right_open)
6660 new_range--;
6661
6662 /* Examples for register markings:
2d2be8ca 6663 *
fb2a311a 6664 * pkt_data in dst register:
2d2be8ca
DB
6665 *
6666 * r2 = r3;
6667 * r2 += 8;
6668 * if (r2 > pkt_end) goto <handle exception>
6669 * <access okay>
6670 *
b4e432f1
DB
6671 * r2 = r3;
6672 * r2 += 8;
6673 * if (r2 < pkt_end) goto <access okay>
6674 * <handle exception>
6675 *
2d2be8ca
DB
6676 * Where:
6677 * r2 == dst_reg, pkt_end == src_reg
6678 * r2=pkt(id=n,off=8,r=0)
6679 * r3=pkt(id=n,off=0,r=0)
6680 *
fb2a311a 6681 * pkt_data in src register:
2d2be8ca
DB
6682 *
6683 * r2 = r3;
6684 * r2 += 8;
6685 * if (pkt_end >= r2) goto <access okay>
6686 * <handle exception>
6687 *
b4e432f1
DB
6688 * r2 = r3;
6689 * r2 += 8;
6690 * if (pkt_end <= r2) goto <handle exception>
6691 * <access okay>
6692 *
2d2be8ca
DB
6693 * Where:
6694 * pkt_end == dst_reg, r2 == src_reg
6695 * r2=pkt(id=n,off=8,r=0)
6696 * r3=pkt(id=n,off=0,r=0)
6697 *
6698 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
6699 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
6700 * and [r3, r3 + 8-1) respectively is safe to access depending on
6701 * the check.
969bf05e 6702 */
2d2be8ca 6703
f1174f77
EC
6704 /* If our ids match, then we must have the same max_value. And we
6705 * don't care about the other reg's fixed offset, since if it's too big
6706 * the range won't allow anything.
6707 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
6708 */
c6a9efa1
PC
6709 for (i = 0; i <= vstate->curframe; i++)
6710 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
6711 new_range);
969bf05e
AS
6712}
6713
3f50f132 6714static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
4f7b3e82 6715{
3f50f132
JF
6716 struct tnum subreg = tnum_subreg(reg->var_off);
6717 s32 sval = (s32)val;
a72dafaf 6718
3f50f132
JF
6719 switch (opcode) {
6720 case BPF_JEQ:
6721 if (tnum_is_const(subreg))
6722 return !!tnum_equals_const(subreg, val);
6723 break;
6724 case BPF_JNE:
6725 if (tnum_is_const(subreg))
6726 return !tnum_equals_const(subreg, val);
6727 break;
6728 case BPF_JSET:
6729 if ((~subreg.mask & subreg.value) & val)
6730 return 1;
6731 if (!((subreg.mask | subreg.value) & val))
6732 return 0;
6733 break;
6734 case BPF_JGT:
6735 if (reg->u32_min_value > val)
6736 return 1;
6737 else if (reg->u32_max_value <= val)
6738 return 0;
6739 break;
6740 case BPF_JSGT:
6741 if (reg->s32_min_value > sval)
6742 return 1;
6743 else if (reg->s32_max_value < sval)
6744 return 0;
6745 break;
6746 case BPF_JLT:
6747 if (reg->u32_max_value < val)
6748 return 1;
6749 else if (reg->u32_min_value >= val)
6750 return 0;
6751 break;
6752 case BPF_JSLT:
6753 if (reg->s32_max_value < sval)
6754 return 1;
6755 else if (reg->s32_min_value >= sval)
6756 return 0;
6757 break;
6758 case BPF_JGE:
6759 if (reg->u32_min_value >= val)
6760 return 1;
6761 else if (reg->u32_max_value < val)
6762 return 0;
6763 break;
6764 case BPF_JSGE:
6765 if (reg->s32_min_value >= sval)
6766 return 1;
6767 else if (reg->s32_max_value < sval)
6768 return 0;
6769 break;
6770 case BPF_JLE:
6771 if (reg->u32_max_value <= val)
6772 return 1;
6773 else if (reg->u32_min_value > val)
6774 return 0;
6775 break;
6776 case BPF_JSLE:
6777 if (reg->s32_max_value <= sval)
6778 return 1;
6779 else if (reg->s32_min_value > sval)
6780 return 0;
6781 break;
6782 }
4f7b3e82 6783
3f50f132
JF
6784 return -1;
6785}
092ed096 6786
3f50f132
JF
6787
6788static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
6789{
6790 s64 sval = (s64)val;
a72dafaf 6791
4f7b3e82
AS
6792 switch (opcode) {
6793 case BPF_JEQ:
6794 if (tnum_is_const(reg->var_off))
6795 return !!tnum_equals_const(reg->var_off, val);
6796 break;
6797 case BPF_JNE:
6798 if (tnum_is_const(reg->var_off))
6799 return !tnum_equals_const(reg->var_off, val);
6800 break;
960ea056
JK
6801 case BPF_JSET:
6802 if ((~reg->var_off.mask & reg->var_off.value) & val)
6803 return 1;
6804 if (!((reg->var_off.mask | reg->var_off.value) & val))
6805 return 0;
6806 break;
4f7b3e82
AS
6807 case BPF_JGT:
6808 if (reg->umin_value > val)
6809 return 1;
6810 else if (reg->umax_value <= val)
6811 return 0;
6812 break;
6813 case BPF_JSGT:
a72dafaf 6814 if (reg->smin_value > sval)
4f7b3e82 6815 return 1;
a72dafaf 6816 else if (reg->smax_value < sval)
4f7b3e82
AS
6817 return 0;
6818 break;
6819 case BPF_JLT:
6820 if (reg->umax_value < val)
6821 return 1;
6822 else if (reg->umin_value >= val)
6823 return 0;
6824 break;
6825 case BPF_JSLT:
a72dafaf 6826 if (reg->smax_value < sval)
4f7b3e82 6827 return 1;
a72dafaf 6828 else if (reg->smin_value >= sval)
4f7b3e82
AS
6829 return 0;
6830 break;
6831 case BPF_JGE:
6832 if (reg->umin_value >= val)
6833 return 1;
6834 else if (reg->umax_value < val)
6835 return 0;
6836 break;
6837 case BPF_JSGE:
a72dafaf 6838 if (reg->smin_value >= sval)
4f7b3e82 6839 return 1;
a72dafaf 6840 else if (reg->smax_value < sval)
4f7b3e82
AS
6841 return 0;
6842 break;
6843 case BPF_JLE:
6844 if (reg->umax_value <= val)
6845 return 1;
6846 else if (reg->umin_value > val)
6847 return 0;
6848 break;
6849 case BPF_JSLE:
a72dafaf 6850 if (reg->smax_value <= sval)
4f7b3e82 6851 return 1;
a72dafaf 6852 else if (reg->smin_value > sval)
4f7b3e82
AS
6853 return 0;
6854 break;
6855 }
6856
6857 return -1;
6858}
6859
3f50f132
JF
6860/* compute branch direction of the expression "if (reg opcode val) goto target;"
6861 * and return:
6862 * 1 - branch will be taken and "goto target" will be executed
6863 * 0 - branch will not be taken and fall-through to next insn
6864 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
6865 * range [0,10]
604dca5e 6866 */
3f50f132
JF
6867static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
6868 bool is_jmp32)
604dca5e 6869{
cac616db
JF
6870 if (__is_pointer_value(false, reg)) {
6871 if (!reg_type_not_null(reg->type))
6872 return -1;
6873
6874 /* If pointer is valid tests against zero will fail so we can
6875 * use this to direct branch taken.
6876 */
6877 if (val != 0)
6878 return -1;
6879
6880 switch (opcode) {
6881 case BPF_JEQ:
6882 return 0;
6883 case BPF_JNE:
6884 return 1;
6885 default:
6886 return -1;
6887 }
6888 }
604dca5e 6889
3f50f132
JF
6890 if (is_jmp32)
6891 return is_branch32_taken(reg, val, opcode);
6892 return is_branch64_taken(reg, val, opcode);
604dca5e
JH
6893}
6894
48461135
JB
6895/* Adjusts the register min/max values in the case that the dst_reg is the
6896 * variable register that we are working on, and src_reg is a constant or we're
6897 * simply doing a BPF_K check.
f1174f77 6898 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
6899 */
6900static void reg_set_min_max(struct bpf_reg_state *true_reg,
3f50f132
JF
6901 struct bpf_reg_state *false_reg,
6902 u64 val, u32 val32,
092ed096 6903 u8 opcode, bool is_jmp32)
48461135 6904{
3f50f132
JF
6905 struct tnum false_32off = tnum_subreg(false_reg->var_off);
6906 struct tnum false_64off = false_reg->var_off;
6907 struct tnum true_32off = tnum_subreg(true_reg->var_off);
6908 struct tnum true_64off = true_reg->var_off;
6909 s64 sval = (s64)val;
6910 s32 sval32 = (s32)val32;
a72dafaf 6911
f1174f77
EC
6912 /* If the dst_reg is a pointer, we can't learn anything about its
6913 * variable offset from the compare (unless src_reg were a pointer into
6914 * the same object, but we don't bother with that.
6915 * Since false_reg and true_reg have the same type by construction, we
6916 * only need to check one of them for pointerness.
6917 */
6918 if (__is_pointer_value(false, false_reg))
6919 return;
4cabc5b1 6920
48461135
JB
6921 switch (opcode) {
6922 case BPF_JEQ:
48461135 6923 case BPF_JNE:
a72dafaf
JW
6924 {
6925 struct bpf_reg_state *reg =
6926 opcode == BPF_JEQ ? true_reg : false_reg;
6927
6928 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
6929 * if it is true we know the value for sure. Likewise for
6930 * BPF_JNE.
48461135 6931 */
3f50f132
JF
6932 if (is_jmp32)
6933 __mark_reg32_known(reg, val32);
6934 else
092ed096 6935 __mark_reg_known(reg, val);
48461135 6936 break;
a72dafaf 6937 }
960ea056 6938 case BPF_JSET:
3f50f132
JF
6939 if (is_jmp32) {
6940 false_32off = tnum_and(false_32off, tnum_const(~val32));
6941 if (is_power_of_2(val32))
6942 true_32off = tnum_or(true_32off,
6943 tnum_const(val32));
6944 } else {
6945 false_64off = tnum_and(false_64off, tnum_const(~val));
6946 if (is_power_of_2(val))
6947 true_64off = tnum_or(true_64off,
6948 tnum_const(val));
6949 }
960ea056 6950 break;
48461135 6951 case BPF_JGE:
a72dafaf
JW
6952 case BPF_JGT:
6953 {
3f50f132
JF
6954 if (is_jmp32) {
6955 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
6956 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
6957
6958 false_reg->u32_max_value = min(false_reg->u32_max_value,
6959 false_umax);
6960 true_reg->u32_min_value = max(true_reg->u32_min_value,
6961 true_umin);
6962 } else {
6963 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
6964 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
6965
6966 false_reg->umax_value = min(false_reg->umax_value, false_umax);
6967 true_reg->umin_value = max(true_reg->umin_value, true_umin);
6968 }
b03c9f9f 6969 break;
a72dafaf 6970 }
48461135 6971 case BPF_JSGE:
a72dafaf
JW
6972 case BPF_JSGT:
6973 {
3f50f132
JF
6974 if (is_jmp32) {
6975 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
6976 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
a72dafaf 6977
3f50f132
JF
6978 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
6979 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
6980 } else {
6981 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
6982 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
6983
6984 false_reg->smax_value = min(false_reg->smax_value, false_smax);
6985 true_reg->smin_value = max(true_reg->smin_value, true_smin);
6986 }
48461135 6987 break;
a72dafaf 6988 }
b4e432f1 6989 case BPF_JLE:
a72dafaf
JW
6990 case BPF_JLT:
6991 {
3f50f132
JF
6992 if (is_jmp32) {
6993 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
6994 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
6995
6996 false_reg->u32_min_value = max(false_reg->u32_min_value,
6997 false_umin);
6998 true_reg->u32_max_value = min(true_reg->u32_max_value,
6999 true_umax);
7000 } else {
7001 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
7002 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
7003
7004 false_reg->umin_value = max(false_reg->umin_value, false_umin);
7005 true_reg->umax_value = min(true_reg->umax_value, true_umax);
7006 }
b4e432f1 7007 break;
a72dafaf 7008 }
b4e432f1 7009 case BPF_JSLE:
a72dafaf
JW
7010 case BPF_JSLT:
7011 {
3f50f132
JF
7012 if (is_jmp32) {
7013 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
7014 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
a72dafaf 7015
3f50f132
JF
7016 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
7017 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
7018 } else {
7019 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
7020 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
7021
7022 false_reg->smin_value = max(false_reg->smin_value, false_smin);
7023 true_reg->smax_value = min(true_reg->smax_value, true_smax);
7024 }
b4e432f1 7025 break;
a72dafaf 7026 }
48461135 7027 default:
0fc31b10 7028 return;
48461135
JB
7029 }
7030
3f50f132
JF
7031 if (is_jmp32) {
7032 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
7033 tnum_subreg(false_32off));
7034 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
7035 tnum_subreg(true_32off));
7036 __reg_combine_32_into_64(false_reg);
7037 __reg_combine_32_into_64(true_reg);
7038 } else {
7039 false_reg->var_off = false_64off;
7040 true_reg->var_off = true_64off;
7041 __reg_combine_64_into_32(false_reg);
7042 __reg_combine_64_into_32(true_reg);
7043 }
48461135
JB
7044}
7045
f1174f77
EC
7046/* Same as above, but for the case that dst_reg holds a constant and src_reg is
7047 * the variable reg.
48461135
JB
7048 */
7049static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3f50f132
JF
7050 struct bpf_reg_state *false_reg,
7051 u64 val, u32 val32,
092ed096 7052 u8 opcode, bool is_jmp32)
48461135 7053{
0fc31b10
JH
7054 /* How can we transform "a <op> b" into "b <op> a"? */
7055 static const u8 opcode_flip[16] = {
7056 /* these stay the same */
7057 [BPF_JEQ >> 4] = BPF_JEQ,
7058 [BPF_JNE >> 4] = BPF_JNE,
7059 [BPF_JSET >> 4] = BPF_JSET,
7060 /* these swap "lesser" and "greater" (L and G in the opcodes) */
7061 [BPF_JGE >> 4] = BPF_JLE,
7062 [BPF_JGT >> 4] = BPF_JLT,
7063 [BPF_JLE >> 4] = BPF_JGE,
7064 [BPF_JLT >> 4] = BPF_JGT,
7065 [BPF_JSGE >> 4] = BPF_JSLE,
7066 [BPF_JSGT >> 4] = BPF_JSLT,
7067 [BPF_JSLE >> 4] = BPF_JSGE,
7068 [BPF_JSLT >> 4] = BPF_JSGT
7069 };
7070 opcode = opcode_flip[opcode >> 4];
7071 /* This uses zero as "not present in table"; luckily the zero opcode,
7072 * BPF_JA, can't get here.
b03c9f9f 7073 */
0fc31b10 7074 if (opcode)
3f50f132 7075 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
f1174f77
EC
7076}
7077
7078/* Regs are known to be equal, so intersect their min/max/var_off */
7079static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
7080 struct bpf_reg_state *dst_reg)
7081{
b03c9f9f
EC
7082 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
7083 dst_reg->umin_value);
7084 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
7085 dst_reg->umax_value);
7086 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
7087 dst_reg->smin_value);
7088 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
7089 dst_reg->smax_value);
f1174f77
EC
7090 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
7091 dst_reg->var_off);
b03c9f9f
EC
7092 /* We might have learned new bounds from the var_off. */
7093 __update_reg_bounds(src_reg);
7094 __update_reg_bounds(dst_reg);
7095 /* We might have learned something about the sign bit. */
7096 __reg_deduce_bounds(src_reg);
7097 __reg_deduce_bounds(dst_reg);
7098 /* We might have learned some bits from the bounds. */
7099 __reg_bound_offset(src_reg);
7100 __reg_bound_offset(dst_reg);
7101 /* Intersecting with the old var_off might have improved our bounds
7102 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
7103 * then new var_off is (0; 0x7f...fc) which improves our umax.
7104 */
7105 __update_reg_bounds(src_reg);
7106 __update_reg_bounds(dst_reg);
f1174f77
EC
7107}
7108
7109static void reg_combine_min_max(struct bpf_reg_state *true_src,
7110 struct bpf_reg_state *true_dst,
7111 struct bpf_reg_state *false_src,
7112 struct bpf_reg_state *false_dst,
7113 u8 opcode)
7114{
7115 switch (opcode) {
7116 case BPF_JEQ:
7117 __reg_combine_min_max(true_src, true_dst);
7118 break;
7119 case BPF_JNE:
7120 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 7121 break;
4cabc5b1 7122 }
48461135
JB
7123}
7124
fd978bf7
JS
7125static void mark_ptr_or_null_reg(struct bpf_func_state *state,
7126 struct bpf_reg_state *reg, u32 id,
840b9615 7127 bool is_null)
57a09bf0 7128{
840b9615 7129 if (reg_type_may_be_null(reg->type) && reg->id == id) {
f1174f77
EC
7130 /* Old offset (both fixed and variable parts) should
7131 * have been known-zero, because we don't allow pointer
7132 * arithmetic on pointers that might be NULL.
7133 */
b03c9f9f
EC
7134 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
7135 !tnum_equals_const(reg->var_off, 0) ||
f1174f77 7136 reg->off)) {
b03c9f9f
EC
7137 __mark_reg_known_zero(reg);
7138 reg->off = 0;
f1174f77
EC
7139 }
7140 if (is_null) {
7141 reg->type = SCALAR_VALUE;
840b9615 7142 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
64d85290
JS
7143 const struct bpf_map *map = reg->map_ptr;
7144
7145 if (map->inner_map_meta) {
840b9615 7146 reg->type = CONST_PTR_TO_MAP;
64d85290
JS
7147 reg->map_ptr = map->inner_map_meta;
7148 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
fada7fdc 7149 reg->type = PTR_TO_XDP_SOCK;
64d85290
JS
7150 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
7151 map->map_type == BPF_MAP_TYPE_SOCKHASH) {
7152 reg->type = PTR_TO_SOCKET;
840b9615
JS
7153 } else {
7154 reg->type = PTR_TO_MAP_VALUE;
7155 }
c64b7983
JS
7156 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
7157 reg->type = PTR_TO_SOCKET;
46f8bc92
MKL
7158 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
7159 reg->type = PTR_TO_SOCK_COMMON;
655a51e5
MKL
7160 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
7161 reg->type = PTR_TO_TCP_SOCK;
b121b341
YS
7162 } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) {
7163 reg->type = PTR_TO_BTF_ID;
457f4436
AN
7164 } else if (reg->type == PTR_TO_MEM_OR_NULL) {
7165 reg->type = PTR_TO_MEM;
afbf21dc
YS
7166 } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) {
7167 reg->type = PTR_TO_RDONLY_BUF;
7168 } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) {
7169 reg->type = PTR_TO_RDWR_BUF;
56f668df 7170 }
1b986589
MKL
7171 if (is_null) {
7172 /* We don't need id and ref_obj_id from this point
7173 * onwards anymore, thus we should better reset it,
7174 * so that state pruning has chances to take effect.
7175 */
7176 reg->id = 0;
7177 reg->ref_obj_id = 0;
7178 } else if (!reg_may_point_to_spin_lock(reg)) {
7179 /* For not-NULL ptr, reg->ref_obj_id will be reset
7180 * in release_reg_references().
7181 *
7182 * reg->id is still used by spin_lock ptr. Other
7183 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
7184 */
7185 reg->id = 0;
56f668df 7186 }
57a09bf0
TG
7187 }
7188}
7189
c6a9efa1
PC
7190static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
7191 bool is_null)
7192{
7193 struct bpf_reg_state *reg;
7194 int i;
7195
7196 for (i = 0; i < MAX_BPF_REG; i++)
7197 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
7198
7199 bpf_for_each_spilled_reg(i, state, reg) {
7200 if (!reg)
7201 continue;
7202 mark_ptr_or_null_reg(state, reg, id, is_null);
7203 }
7204}
7205
57a09bf0
TG
7206/* The logic is similar to find_good_pkt_pointers(), both could eventually
7207 * be folded together at some point.
7208 */
840b9615
JS
7209static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
7210 bool is_null)
57a09bf0 7211{
f4d7e40a 7212 struct bpf_func_state *state = vstate->frame[vstate->curframe];
c6a9efa1 7213 struct bpf_reg_state *regs = state->regs;
1b986589 7214 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 7215 u32 id = regs[regno].id;
c6a9efa1 7216 int i;
57a09bf0 7217
1b986589
MKL
7218 if (ref_obj_id && ref_obj_id == id && is_null)
7219 /* regs[regno] is in the " == NULL" branch.
7220 * No one could have freed the reference state before
7221 * doing the NULL check.
7222 */
7223 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 7224
c6a9efa1
PC
7225 for (i = 0; i <= vstate->curframe; i++)
7226 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
57a09bf0
TG
7227}
7228
5beca081
DB
7229static bool try_match_pkt_pointers(const struct bpf_insn *insn,
7230 struct bpf_reg_state *dst_reg,
7231 struct bpf_reg_state *src_reg,
7232 struct bpf_verifier_state *this_branch,
7233 struct bpf_verifier_state *other_branch)
7234{
7235 if (BPF_SRC(insn->code) != BPF_X)
7236 return false;
7237
092ed096
JW
7238 /* Pointers are always 64-bit. */
7239 if (BPF_CLASS(insn->code) == BPF_JMP32)
7240 return false;
7241
5beca081
DB
7242 switch (BPF_OP(insn->code)) {
7243 case BPF_JGT:
7244 if ((dst_reg->type == PTR_TO_PACKET &&
7245 src_reg->type == PTR_TO_PACKET_END) ||
7246 (dst_reg->type == PTR_TO_PACKET_META &&
7247 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
7248 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
7249 find_good_pkt_pointers(this_branch, dst_reg,
7250 dst_reg->type, false);
7251 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
7252 src_reg->type == PTR_TO_PACKET) ||
7253 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
7254 src_reg->type == PTR_TO_PACKET_META)) {
7255 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
7256 find_good_pkt_pointers(other_branch, src_reg,
7257 src_reg->type, true);
7258 } else {
7259 return false;
7260 }
7261 break;
7262 case BPF_JLT:
7263 if ((dst_reg->type == PTR_TO_PACKET &&
7264 src_reg->type == PTR_TO_PACKET_END) ||
7265 (dst_reg->type == PTR_TO_PACKET_META &&
7266 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
7267 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
7268 find_good_pkt_pointers(other_branch, dst_reg,
7269 dst_reg->type, true);
7270 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
7271 src_reg->type == PTR_TO_PACKET) ||
7272 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
7273 src_reg->type == PTR_TO_PACKET_META)) {
7274 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
7275 find_good_pkt_pointers(this_branch, src_reg,
7276 src_reg->type, false);
7277 } else {
7278 return false;
7279 }
7280 break;
7281 case BPF_JGE:
7282 if ((dst_reg->type == PTR_TO_PACKET &&
7283 src_reg->type == PTR_TO_PACKET_END) ||
7284 (dst_reg->type == PTR_TO_PACKET_META &&
7285 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
7286 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
7287 find_good_pkt_pointers(this_branch, dst_reg,
7288 dst_reg->type, true);
7289 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
7290 src_reg->type == PTR_TO_PACKET) ||
7291 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
7292 src_reg->type == PTR_TO_PACKET_META)) {
7293 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
7294 find_good_pkt_pointers(other_branch, src_reg,
7295 src_reg->type, false);
7296 } else {
7297 return false;
7298 }
7299 break;
7300 case BPF_JLE:
7301 if ((dst_reg->type == PTR_TO_PACKET &&
7302 src_reg->type == PTR_TO_PACKET_END) ||
7303 (dst_reg->type == PTR_TO_PACKET_META &&
7304 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
7305 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
7306 find_good_pkt_pointers(other_branch, dst_reg,
7307 dst_reg->type, false);
7308 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
7309 src_reg->type == PTR_TO_PACKET) ||
7310 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
7311 src_reg->type == PTR_TO_PACKET_META)) {
7312 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
7313 find_good_pkt_pointers(this_branch, src_reg,
7314 src_reg->type, true);
7315 } else {
7316 return false;
7317 }
7318 break;
7319 default:
7320 return false;
7321 }
7322
7323 return true;
7324}
7325
58e2af8b 7326static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
7327 struct bpf_insn *insn, int *insn_idx)
7328{
f4d7e40a
AS
7329 struct bpf_verifier_state *this_branch = env->cur_state;
7330 struct bpf_verifier_state *other_branch;
7331 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 7332 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
17a52670 7333 u8 opcode = BPF_OP(insn->code);
092ed096 7334 bool is_jmp32;
fb8d251e 7335 int pred = -1;
17a52670
AS
7336 int err;
7337
092ed096
JW
7338 /* Only conditional jumps are expected to reach here. */
7339 if (opcode == BPF_JA || opcode > BPF_JSLE) {
7340 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
7341 return -EINVAL;
7342 }
7343
7344 if (BPF_SRC(insn->code) == BPF_X) {
7345 if (insn->imm != 0) {
092ed096 7346 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
7347 return -EINVAL;
7348 }
7349
7350 /* check src1 operand */
dc503a8a 7351 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
7352 if (err)
7353 return err;
1be7f75d
AS
7354
7355 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 7356 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
7357 insn->src_reg);
7358 return -EACCES;
7359 }
fb8d251e 7360 src_reg = &regs[insn->src_reg];
17a52670
AS
7361 } else {
7362 if (insn->src_reg != BPF_REG_0) {
092ed096 7363 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
7364 return -EINVAL;
7365 }
7366 }
7367
7368 /* check src2 operand */
dc503a8a 7369 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
7370 if (err)
7371 return err;
7372
1a0dc1ac 7373 dst_reg = &regs[insn->dst_reg];
092ed096 7374 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 7375
3f50f132
JF
7376 if (BPF_SRC(insn->code) == BPF_K) {
7377 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
7378 } else if (src_reg->type == SCALAR_VALUE &&
7379 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
7380 pred = is_branch_taken(dst_reg,
7381 tnum_subreg(src_reg->var_off).value,
7382 opcode,
7383 is_jmp32);
7384 } else if (src_reg->type == SCALAR_VALUE &&
7385 !is_jmp32 && tnum_is_const(src_reg->var_off)) {
7386 pred = is_branch_taken(dst_reg,
7387 src_reg->var_off.value,
7388 opcode,
7389 is_jmp32);
7390 }
7391
b5dc0163 7392 if (pred >= 0) {
cac616db
JF
7393 /* If we get here with a dst_reg pointer type it is because
7394 * above is_branch_taken() special cased the 0 comparison.
7395 */
7396 if (!__is_pointer_value(false, dst_reg))
7397 err = mark_chain_precision(env, insn->dst_reg);
b5dc0163
AS
7398 if (BPF_SRC(insn->code) == BPF_X && !err)
7399 err = mark_chain_precision(env, insn->src_reg);
7400 if (err)
7401 return err;
7402 }
fb8d251e
AS
7403 if (pred == 1) {
7404 /* only follow the goto, ignore fall-through */
7405 *insn_idx += insn->off;
7406 return 0;
7407 } else if (pred == 0) {
7408 /* only follow fall-through branch, since
7409 * that's where the program will go
7410 */
7411 return 0;
17a52670
AS
7412 }
7413
979d63d5
DB
7414 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
7415 false);
17a52670
AS
7416 if (!other_branch)
7417 return -EFAULT;
f4d7e40a 7418 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 7419
48461135
JB
7420 /* detect if we are comparing against a constant value so we can adjust
7421 * our min/max values for our dst register.
f1174f77
EC
7422 * this is only legit if both are scalars (or pointers to the same
7423 * object, I suppose, but we don't support that right now), because
7424 * otherwise the different base pointers mean the offsets aren't
7425 * comparable.
48461135
JB
7426 */
7427 if (BPF_SRC(insn->code) == BPF_X) {
092ed096 7428 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
092ed096 7429
f1174f77 7430 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
7431 src_reg->type == SCALAR_VALUE) {
7432 if (tnum_is_const(src_reg->var_off) ||
3f50f132
JF
7433 (is_jmp32 &&
7434 tnum_is_const(tnum_subreg(src_reg->var_off))))
f4d7e40a 7435 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 7436 dst_reg,
3f50f132
JF
7437 src_reg->var_off.value,
7438 tnum_subreg(src_reg->var_off).value,
092ed096
JW
7439 opcode, is_jmp32);
7440 else if (tnum_is_const(dst_reg->var_off) ||
3f50f132
JF
7441 (is_jmp32 &&
7442 tnum_is_const(tnum_subreg(dst_reg->var_off))))
f4d7e40a 7443 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096 7444 src_reg,
3f50f132
JF
7445 dst_reg->var_off.value,
7446 tnum_subreg(dst_reg->var_off).value,
092ed096
JW
7447 opcode, is_jmp32);
7448 else if (!is_jmp32 &&
7449 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 7450 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
7451 reg_combine_min_max(&other_branch_regs[insn->src_reg],
7452 &other_branch_regs[insn->dst_reg],
092ed096 7453 src_reg, dst_reg, opcode);
f1174f77
EC
7454 }
7455 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 7456 reg_set_min_max(&other_branch_regs[insn->dst_reg],
3f50f132
JF
7457 dst_reg, insn->imm, (u32)insn->imm,
7458 opcode, is_jmp32);
48461135
JB
7459 }
7460
092ed096
JW
7461 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
7462 * NOTE: these optimizations below are related with pointer comparison
7463 * which will never be JMP32.
7464 */
7465 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 7466 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
840b9615
JS
7467 reg_type_may_be_null(dst_reg->type)) {
7468 /* Mark all identical registers in each branch as either
57a09bf0
TG
7469 * safe or unknown depending R == 0 or R != 0 conditional.
7470 */
840b9615
JS
7471 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
7472 opcode == BPF_JNE);
7473 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
7474 opcode == BPF_JEQ);
5beca081
DB
7475 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
7476 this_branch, other_branch) &&
7477 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
7478 verbose(env, "R%d pointer comparison prohibited\n",
7479 insn->dst_reg);
1be7f75d 7480 return -EACCES;
17a52670 7481 }
06ee7115 7482 if (env->log.level & BPF_LOG_LEVEL)
f4d7e40a 7483 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
7484 return 0;
7485}
7486
17a52670 7487/* verify BPF_LD_IMM64 instruction */
58e2af8b 7488static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 7489{
d8eca5bb 7490 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 7491 struct bpf_reg_state *regs = cur_regs(env);
d8eca5bb 7492 struct bpf_map *map;
17a52670
AS
7493 int err;
7494
7495 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 7496 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
7497 return -EINVAL;
7498 }
7499 if (insn->off != 0) {
61bd5218 7500 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
7501 return -EINVAL;
7502 }
7503
dc503a8a 7504 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
7505 if (err)
7506 return err;
7507
6b173873 7508 if (insn->src_reg == 0) {
6b173873
JK
7509 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
7510
f1174f77 7511 regs[insn->dst_reg].type = SCALAR_VALUE;
b03c9f9f 7512 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 7513 return 0;
6b173873 7514 }
17a52670 7515
d8eca5bb
DB
7516 map = env->used_maps[aux->map_index];
7517 mark_reg_known_zero(env, regs, insn->dst_reg);
7518 regs[insn->dst_reg].map_ptr = map;
7519
7520 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
7521 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
7522 regs[insn->dst_reg].off = aux->map_off;
7523 if (map_value_has_spin_lock(map))
7524 regs[insn->dst_reg].id = ++env->id_gen;
7525 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
7526 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
7527 } else {
7528 verbose(env, "bpf verifier is misconfigured\n");
7529 return -EINVAL;
7530 }
17a52670 7531
17a52670
AS
7532 return 0;
7533}
7534
96be4325
DB
7535static bool may_access_skb(enum bpf_prog_type type)
7536{
7537 switch (type) {
7538 case BPF_PROG_TYPE_SOCKET_FILTER:
7539 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 7540 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
7541 return true;
7542 default:
7543 return false;
7544 }
7545}
7546
ddd872bc
AS
7547/* verify safety of LD_ABS|LD_IND instructions:
7548 * - they can only appear in the programs where ctx == skb
7549 * - since they are wrappers of function calls, they scratch R1-R5 registers,
7550 * preserve R6-R9, and store return value into R0
7551 *
7552 * Implicit input:
7553 * ctx == skb == R6 == CTX
7554 *
7555 * Explicit input:
7556 * SRC == any register
7557 * IMM == 32-bit immediate
7558 *
7559 * Output:
7560 * R0 - 8/16/32-bit skb data converted to cpu endianness
7561 */
58e2af8b 7562static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 7563{
638f5b90 7564 struct bpf_reg_state *regs = cur_regs(env);
6d4f151a 7565 static const int ctx_reg = BPF_REG_6;
ddd872bc 7566 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
7567 int i, err;
7568
7e40781c 7569 if (!may_access_skb(resolve_prog_type(env->prog))) {
61bd5218 7570 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
7571 return -EINVAL;
7572 }
7573
e0cea7ce
DB
7574 if (!env->ops->gen_ld_abs) {
7575 verbose(env, "bpf verifier is misconfigured\n");
7576 return -EINVAL;
7577 }
7578
ddd872bc 7579 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 7580 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 7581 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 7582 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
7583 return -EINVAL;
7584 }
7585
7586 /* check whether implicit source operand (register R6) is readable */
6d4f151a 7587 err = check_reg_arg(env, ctx_reg, SRC_OP);
ddd872bc
AS
7588 if (err)
7589 return err;
7590
fd978bf7
JS
7591 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
7592 * gen_ld_abs() may terminate the program at runtime, leading to
7593 * reference leak.
7594 */
7595 err = check_reference_leak(env);
7596 if (err) {
7597 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
7598 return err;
7599 }
7600
d83525ca
AS
7601 if (env->cur_state->active_spin_lock) {
7602 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
7603 return -EINVAL;
7604 }
7605
6d4f151a 7606 if (regs[ctx_reg].type != PTR_TO_CTX) {
61bd5218
JK
7607 verbose(env,
7608 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
7609 return -EINVAL;
7610 }
7611
7612 if (mode == BPF_IND) {
7613 /* check explicit source operand */
dc503a8a 7614 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
7615 if (err)
7616 return err;
7617 }
7618
6d4f151a
DB
7619 err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
7620 if (err < 0)
7621 return err;
7622
ddd872bc 7623 /* reset caller saved regs to unreadable */
dc503a8a 7624 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 7625 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
7626 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7627 }
ddd872bc
AS
7628
7629 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
7630 * the value fetched from the packet.
7631 * Already marked as written above.
ddd872bc 7632 */
61bd5218 7633 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
7634 /* ld_abs load up to 32-bit skb data. */
7635 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
7636 return 0;
7637}
7638
390ee7e2
AS
7639static int check_return_code(struct bpf_verifier_env *env)
7640{
5cf1e914 7641 struct tnum enforce_attach_type_range = tnum_unknown;
27ae7997 7642 const struct bpf_prog *prog = env->prog;
390ee7e2
AS
7643 struct bpf_reg_state *reg;
7644 struct tnum range = tnum_range(0, 1);
7e40781c 7645 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
27ae7997
MKL
7646 int err;
7647
9e4e01df 7648 /* LSM and struct_ops func-ptr's return type could be "void" */
7e40781c
UP
7649 if ((prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
7650 prog_type == BPF_PROG_TYPE_LSM) &&
27ae7997
MKL
7651 !prog->aux->attach_func_proto->type)
7652 return 0;
7653
7654 /* eBPF calling convetion is such that R0 is used
7655 * to return the value from eBPF program.
7656 * Make sure that it's readable at this time
7657 * of bpf_exit, which means that program wrote
7658 * something into it earlier
7659 */
7660 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
7661 if (err)
7662 return err;
7663
7664 if (is_pointer_value(env, BPF_REG_0)) {
7665 verbose(env, "R0 leaks addr as return value\n");
7666 return -EACCES;
7667 }
390ee7e2 7668
7e40781c 7669 switch (prog_type) {
983695fa
DB
7670 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
7671 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
1b66d253
DB
7672 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
7673 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
7674 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
7675 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
7676 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
983695fa 7677 range = tnum_range(1, 1);
ed4ed404 7678 break;
390ee7e2 7679 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 7680 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
7681 range = tnum_range(0, 3);
7682 enforce_attach_type_range = tnum_range(2, 3);
7683 }
ed4ed404 7684 break;
390ee7e2
AS
7685 case BPF_PROG_TYPE_CGROUP_SOCK:
7686 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 7687 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 7688 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 7689 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 7690 break;
15ab09bd
AS
7691 case BPF_PROG_TYPE_RAW_TRACEPOINT:
7692 if (!env->prog->aux->attach_btf_id)
7693 return 0;
7694 range = tnum_const(0);
7695 break;
15d83c4d 7696 case BPF_PROG_TYPE_TRACING:
e92888c7
YS
7697 switch (env->prog->expected_attach_type) {
7698 case BPF_TRACE_FENTRY:
7699 case BPF_TRACE_FEXIT:
7700 range = tnum_const(0);
7701 break;
7702 case BPF_TRACE_RAW_TP:
7703 case BPF_MODIFY_RETURN:
15d83c4d 7704 return 0;
2ec0616e
DB
7705 case BPF_TRACE_ITER:
7706 break;
e92888c7
YS
7707 default:
7708 return -ENOTSUPP;
7709 }
15d83c4d 7710 break;
e9ddbb77
JS
7711 case BPF_PROG_TYPE_SK_LOOKUP:
7712 range = tnum_range(SK_DROP, SK_PASS);
7713 break;
e92888c7
YS
7714 case BPF_PROG_TYPE_EXT:
7715 /* freplace program can return anything as its return value
7716 * depends on the to-be-replaced kernel func or bpf program.
7717 */
390ee7e2
AS
7718 default:
7719 return 0;
7720 }
7721
638f5b90 7722 reg = cur_regs(env) + BPF_REG_0;
390ee7e2 7723 if (reg->type != SCALAR_VALUE) {
61bd5218 7724 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
390ee7e2
AS
7725 reg_type_str[reg->type]);
7726 return -EINVAL;
7727 }
7728
7729 if (!tnum_in(range, reg->var_off)) {
5cf1e914 7730 char tn_buf[48];
7731
61bd5218 7732 verbose(env, "At program exit the register R0 ");
390ee7e2 7733 if (!tnum_is_unknown(reg->var_off)) {
390ee7e2 7734 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 7735 verbose(env, "has value %s", tn_buf);
390ee7e2 7736 } else {
61bd5218 7737 verbose(env, "has unknown scalar value");
390ee7e2 7738 }
5cf1e914 7739 tnum_strn(tn_buf, sizeof(tn_buf), range);
983695fa 7740 verbose(env, " should have been in %s\n", tn_buf);
390ee7e2
AS
7741 return -EINVAL;
7742 }
5cf1e914 7743
7744 if (!tnum_is_unknown(enforce_attach_type_range) &&
7745 tnum_in(enforce_attach_type_range, reg->var_off))
7746 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
7747 return 0;
7748}
7749
475fb78f
AS
7750/* non-recursive DFS pseudo code
7751 * 1 procedure DFS-iterative(G,v):
7752 * 2 label v as discovered
7753 * 3 let S be a stack
7754 * 4 S.push(v)
7755 * 5 while S is not empty
7756 * 6 t <- S.pop()
7757 * 7 if t is what we're looking for:
7758 * 8 return t
7759 * 9 for all edges e in G.adjacentEdges(t) do
7760 * 10 if edge e is already labelled
7761 * 11 continue with the next edge
7762 * 12 w <- G.adjacentVertex(t,e)
7763 * 13 if vertex w is not discovered and not explored
7764 * 14 label e as tree-edge
7765 * 15 label w as discovered
7766 * 16 S.push(w)
7767 * 17 continue at 5
7768 * 18 else if vertex w is discovered
7769 * 19 label e as back-edge
7770 * 20 else
7771 * 21 // vertex w is explored
7772 * 22 label e as forward- or cross-edge
7773 * 23 label t as explored
7774 * 24 S.pop()
7775 *
7776 * convention:
7777 * 0x10 - discovered
7778 * 0x11 - discovered and fall-through edge labelled
7779 * 0x12 - discovered and fall-through and branch edges labelled
7780 * 0x20 - explored
7781 */
7782
7783enum {
7784 DISCOVERED = 0x10,
7785 EXPLORED = 0x20,
7786 FALLTHROUGH = 1,
7787 BRANCH = 2,
7788};
7789
dc2a4ebc
AS
7790static u32 state_htab_size(struct bpf_verifier_env *env)
7791{
7792 return env->prog->len;
7793}
7794
5d839021
AS
7795static struct bpf_verifier_state_list **explored_state(
7796 struct bpf_verifier_env *env,
7797 int idx)
7798{
dc2a4ebc
AS
7799 struct bpf_verifier_state *cur = env->cur_state;
7800 struct bpf_func_state *state = cur->frame[cur->curframe];
7801
7802 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
7803}
7804
7805static void init_explored_state(struct bpf_verifier_env *env, int idx)
7806{
a8f500af 7807 env->insn_aux_data[idx].prune_point = true;
5d839021 7808}
f1bca824 7809
475fb78f
AS
7810/* t, w, e - match pseudo-code above:
7811 * t - index of current instruction
7812 * w - next instruction
7813 * e - edge
7814 */
2589726d
AS
7815static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
7816 bool loop_ok)
475fb78f 7817{
7df737e9
AS
7818 int *insn_stack = env->cfg.insn_stack;
7819 int *insn_state = env->cfg.insn_state;
7820
475fb78f
AS
7821 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
7822 return 0;
7823
7824 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
7825 return 0;
7826
7827 if (w < 0 || w >= env->prog->len) {
d9762e84 7828 verbose_linfo(env, t, "%d: ", t);
61bd5218 7829 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
7830 return -EINVAL;
7831 }
7832
f1bca824
AS
7833 if (e == BRANCH)
7834 /* mark branch target for state pruning */
5d839021 7835 init_explored_state(env, w);
f1bca824 7836
475fb78f
AS
7837 if (insn_state[w] == 0) {
7838 /* tree-edge */
7839 insn_state[t] = DISCOVERED | e;
7840 insn_state[w] = DISCOVERED;
7df737e9 7841 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 7842 return -E2BIG;
7df737e9 7843 insn_stack[env->cfg.cur_stack++] = w;
475fb78f
AS
7844 return 1;
7845 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2c78ee89 7846 if (loop_ok && env->bpf_capable)
2589726d 7847 return 0;
d9762e84
MKL
7848 verbose_linfo(env, t, "%d: ", t);
7849 verbose_linfo(env, w, "%d: ", w);
61bd5218 7850 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
7851 return -EINVAL;
7852 } else if (insn_state[w] == EXPLORED) {
7853 /* forward- or cross-edge */
7854 insn_state[t] = DISCOVERED | e;
7855 } else {
61bd5218 7856 verbose(env, "insn state internal bug\n");
475fb78f
AS
7857 return -EFAULT;
7858 }
7859 return 0;
7860}
7861
7862/* non-recursive depth-first-search to detect loops in BPF program
7863 * loop == back-edge in directed graph
7864 */
58e2af8b 7865static int check_cfg(struct bpf_verifier_env *env)
475fb78f
AS
7866{
7867 struct bpf_insn *insns = env->prog->insnsi;
7868 int insn_cnt = env->prog->len;
7df737e9 7869 int *insn_stack, *insn_state;
475fb78f
AS
7870 int ret = 0;
7871 int i, t;
7872
7df737e9 7873 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
7874 if (!insn_state)
7875 return -ENOMEM;
7876
7df737e9 7877 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 7878 if (!insn_stack) {
71dde681 7879 kvfree(insn_state);
475fb78f
AS
7880 return -ENOMEM;
7881 }
7882
7883 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
7884 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 7885 env->cfg.cur_stack = 1;
475fb78f
AS
7886
7887peek_stack:
7df737e9 7888 if (env->cfg.cur_stack == 0)
475fb78f 7889 goto check_state;
7df737e9 7890 t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 7891
092ed096
JW
7892 if (BPF_CLASS(insns[t].code) == BPF_JMP ||
7893 BPF_CLASS(insns[t].code) == BPF_JMP32) {
475fb78f
AS
7894 u8 opcode = BPF_OP(insns[t].code);
7895
7896 if (opcode == BPF_EXIT) {
7897 goto mark_explored;
7898 } else if (opcode == BPF_CALL) {
2589726d 7899 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
7900 if (ret == 1)
7901 goto peek_stack;
7902 else if (ret < 0)
7903 goto err_free;
07016151 7904 if (t + 1 < insn_cnt)
5d839021 7905 init_explored_state(env, t + 1);
cc8b0b92 7906 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
5d839021 7907 init_explored_state(env, t);
2589726d
AS
7908 ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
7909 env, false);
cc8b0b92
AS
7910 if (ret == 1)
7911 goto peek_stack;
7912 else if (ret < 0)
7913 goto err_free;
7914 }
475fb78f
AS
7915 } else if (opcode == BPF_JA) {
7916 if (BPF_SRC(insns[t].code) != BPF_K) {
7917 ret = -EINVAL;
7918 goto err_free;
7919 }
7920 /* unconditional jump with single edge */
7921 ret = push_insn(t, t + insns[t].off + 1,
2589726d 7922 FALLTHROUGH, env, true);
475fb78f
AS
7923 if (ret == 1)
7924 goto peek_stack;
7925 else if (ret < 0)
7926 goto err_free;
b5dc0163
AS
7927 /* unconditional jmp is not a good pruning point,
7928 * but it's marked, since backtracking needs
7929 * to record jmp history in is_state_visited().
7930 */
7931 init_explored_state(env, t + insns[t].off + 1);
f1bca824
AS
7932 /* tell verifier to check for equivalent states
7933 * after every call and jump
7934 */
c3de6317 7935 if (t + 1 < insn_cnt)
5d839021 7936 init_explored_state(env, t + 1);
475fb78f
AS
7937 } else {
7938 /* conditional jump with two edges */
5d839021 7939 init_explored_state(env, t);
2589726d 7940 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
475fb78f
AS
7941 if (ret == 1)
7942 goto peek_stack;
7943 else if (ret < 0)
7944 goto err_free;
7945
2589726d 7946 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
475fb78f
AS
7947 if (ret == 1)
7948 goto peek_stack;
7949 else if (ret < 0)
7950 goto err_free;
7951 }
7952 } else {
7953 /* all other non-branch instructions with single
7954 * fall-through edge
7955 */
2589726d 7956 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
7957 if (ret == 1)
7958 goto peek_stack;
7959 else if (ret < 0)
7960 goto err_free;
7961 }
7962
7963mark_explored:
7964 insn_state[t] = EXPLORED;
7df737e9 7965 if (env->cfg.cur_stack-- <= 0) {
61bd5218 7966 verbose(env, "pop stack internal bug\n");
475fb78f
AS
7967 ret = -EFAULT;
7968 goto err_free;
7969 }
7970 goto peek_stack;
7971
7972check_state:
7973 for (i = 0; i < insn_cnt; i++) {
7974 if (insn_state[i] != EXPLORED) {
61bd5218 7975 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
7976 ret = -EINVAL;
7977 goto err_free;
7978 }
7979 }
7980 ret = 0; /* cfg looks good */
7981
7982err_free:
71dde681
AS
7983 kvfree(insn_state);
7984 kvfree(insn_stack);
7df737e9 7985 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
7986 return ret;
7987}
7988
09b28d76
AS
7989static int check_abnormal_return(struct bpf_verifier_env *env)
7990{
7991 int i;
7992
7993 for (i = 1; i < env->subprog_cnt; i++) {
7994 if (env->subprog_info[i].has_ld_abs) {
7995 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
7996 return -EINVAL;
7997 }
7998 if (env->subprog_info[i].has_tail_call) {
7999 verbose(env, "tail_call is not allowed in subprogs without BTF\n");
8000 return -EINVAL;
8001 }
8002 }
8003 return 0;
8004}
8005
838e9690
YS
8006/* The minimum supported BTF func info size */
8007#define MIN_BPF_FUNCINFO_SIZE 8
8008#define MAX_FUNCINFO_REC_SIZE 252
8009
c454a46b
MKL
8010static int check_btf_func(struct bpf_verifier_env *env,
8011 const union bpf_attr *attr,
8012 union bpf_attr __user *uattr)
838e9690 8013{
09b28d76 8014 const struct btf_type *type, *func_proto, *ret_type;
d0b2818e 8015 u32 i, nfuncs, urec_size, min_size;
838e9690 8016 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 8017 struct bpf_func_info *krecord;
8c1b6e69 8018 struct bpf_func_info_aux *info_aux = NULL;
c454a46b
MKL
8019 struct bpf_prog *prog;
8020 const struct btf *btf;
838e9690 8021 void __user *urecord;
d0b2818e 8022 u32 prev_offset = 0;
09b28d76 8023 bool scalar_return;
e7ed83d6 8024 int ret = -ENOMEM;
838e9690
YS
8025
8026 nfuncs = attr->func_info_cnt;
09b28d76
AS
8027 if (!nfuncs) {
8028 if (check_abnormal_return(env))
8029 return -EINVAL;
838e9690 8030 return 0;
09b28d76 8031 }
838e9690
YS
8032
8033 if (nfuncs != env->subprog_cnt) {
8034 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
8035 return -EINVAL;
8036 }
8037
8038 urec_size = attr->func_info_rec_size;
8039 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
8040 urec_size > MAX_FUNCINFO_REC_SIZE ||
8041 urec_size % sizeof(u32)) {
8042 verbose(env, "invalid func info rec size %u\n", urec_size);
8043 return -EINVAL;
8044 }
8045
c454a46b
MKL
8046 prog = env->prog;
8047 btf = prog->aux->btf;
838e9690
YS
8048
8049 urecord = u64_to_user_ptr(attr->func_info);
8050 min_size = min_t(u32, krec_size, urec_size);
8051
ba64e7d8 8052 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
8053 if (!krecord)
8054 return -ENOMEM;
8c1b6e69
AS
8055 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
8056 if (!info_aux)
8057 goto err_free;
ba64e7d8 8058
838e9690
YS
8059 for (i = 0; i < nfuncs; i++) {
8060 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
8061 if (ret) {
8062 if (ret == -E2BIG) {
8063 verbose(env, "nonzero tailing record in func info");
8064 /* set the size kernel expects so loader can zero
8065 * out the rest of the record.
8066 */
8067 if (put_user(min_size, &uattr->func_info_rec_size))
8068 ret = -EFAULT;
8069 }
c454a46b 8070 goto err_free;
838e9690
YS
8071 }
8072
ba64e7d8 8073 if (copy_from_user(&krecord[i], urecord, min_size)) {
838e9690 8074 ret = -EFAULT;
c454a46b 8075 goto err_free;
838e9690
YS
8076 }
8077
d30d42e0 8078 /* check insn_off */
09b28d76 8079 ret = -EINVAL;
838e9690 8080 if (i == 0) {
d30d42e0 8081 if (krecord[i].insn_off) {
838e9690 8082 verbose(env,
d30d42e0
MKL
8083 "nonzero insn_off %u for the first func info record",
8084 krecord[i].insn_off);
c454a46b 8085 goto err_free;
838e9690 8086 }
d30d42e0 8087 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
8088 verbose(env,
8089 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 8090 krecord[i].insn_off, prev_offset);
c454a46b 8091 goto err_free;
838e9690
YS
8092 }
8093
d30d42e0 8094 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690 8095 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
c454a46b 8096 goto err_free;
838e9690
YS
8097 }
8098
8099 /* check type_id */
ba64e7d8 8100 type = btf_type_by_id(btf, krecord[i].type_id);
51c39bb1 8101 if (!type || !btf_type_is_func(type)) {
838e9690 8102 verbose(env, "invalid type id %d in func info",
ba64e7d8 8103 krecord[i].type_id);
c454a46b 8104 goto err_free;
838e9690 8105 }
51c39bb1 8106 info_aux[i].linkage = BTF_INFO_VLEN(type->info);
09b28d76
AS
8107
8108 func_proto = btf_type_by_id(btf, type->type);
8109 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
8110 /* btf_func_check() already verified it during BTF load */
8111 goto err_free;
8112 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
8113 scalar_return =
8114 btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
8115 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
8116 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
8117 goto err_free;
8118 }
8119 if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
8120 verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
8121 goto err_free;
8122 }
8123
d30d42e0 8124 prev_offset = krecord[i].insn_off;
838e9690
YS
8125 urecord += urec_size;
8126 }
8127
ba64e7d8
YS
8128 prog->aux->func_info = krecord;
8129 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 8130 prog->aux->func_info_aux = info_aux;
838e9690
YS
8131 return 0;
8132
c454a46b 8133err_free:
ba64e7d8 8134 kvfree(krecord);
8c1b6e69 8135 kfree(info_aux);
838e9690
YS
8136 return ret;
8137}
8138
ba64e7d8
YS
8139static void adjust_btf_func(struct bpf_verifier_env *env)
8140{
8c1b6e69 8141 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
8142 int i;
8143
8c1b6e69 8144 if (!aux->func_info)
ba64e7d8
YS
8145 return;
8146
8147 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 8148 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
8149}
8150
c454a46b
MKL
8151#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
8152 sizeof(((struct bpf_line_info *)(0))->line_col))
8153#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
8154
8155static int check_btf_line(struct bpf_verifier_env *env,
8156 const union bpf_attr *attr,
8157 union bpf_attr __user *uattr)
8158{
8159 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
8160 struct bpf_subprog_info *sub;
8161 struct bpf_line_info *linfo;
8162 struct bpf_prog *prog;
8163 const struct btf *btf;
8164 void __user *ulinfo;
8165 int err;
8166
8167 nr_linfo = attr->line_info_cnt;
8168 if (!nr_linfo)
8169 return 0;
8170
8171 rec_size = attr->line_info_rec_size;
8172 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
8173 rec_size > MAX_LINEINFO_REC_SIZE ||
8174 rec_size & (sizeof(u32) - 1))
8175 return -EINVAL;
8176
8177 /* Need to zero it in case the userspace may
8178 * pass in a smaller bpf_line_info object.
8179 */
8180 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
8181 GFP_KERNEL | __GFP_NOWARN);
8182 if (!linfo)
8183 return -ENOMEM;
8184
8185 prog = env->prog;
8186 btf = prog->aux->btf;
8187
8188 s = 0;
8189 sub = env->subprog_info;
8190 ulinfo = u64_to_user_ptr(attr->line_info);
8191 expected_size = sizeof(struct bpf_line_info);
8192 ncopy = min_t(u32, expected_size, rec_size);
8193 for (i = 0; i < nr_linfo; i++) {
8194 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
8195 if (err) {
8196 if (err == -E2BIG) {
8197 verbose(env, "nonzero tailing record in line_info");
8198 if (put_user(expected_size,
8199 &uattr->line_info_rec_size))
8200 err = -EFAULT;
8201 }
8202 goto err_free;
8203 }
8204
8205 if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
8206 err = -EFAULT;
8207 goto err_free;
8208 }
8209
8210 /*
8211 * Check insn_off to ensure
8212 * 1) strictly increasing AND
8213 * 2) bounded by prog->len
8214 *
8215 * The linfo[0].insn_off == 0 check logically falls into
8216 * the later "missing bpf_line_info for func..." case
8217 * because the first linfo[0].insn_off must be the
8218 * first sub also and the first sub must have
8219 * subprog_info[0].start == 0.
8220 */
8221 if ((i && linfo[i].insn_off <= prev_offset) ||
8222 linfo[i].insn_off >= prog->len) {
8223 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
8224 i, linfo[i].insn_off, prev_offset,
8225 prog->len);
8226 err = -EINVAL;
8227 goto err_free;
8228 }
8229
fdbaa0be
MKL
8230 if (!prog->insnsi[linfo[i].insn_off].code) {
8231 verbose(env,
8232 "Invalid insn code at line_info[%u].insn_off\n",
8233 i);
8234 err = -EINVAL;
8235 goto err_free;
8236 }
8237
23127b33
MKL
8238 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
8239 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
8240 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
8241 err = -EINVAL;
8242 goto err_free;
8243 }
8244
8245 if (s != env->subprog_cnt) {
8246 if (linfo[i].insn_off == sub[s].start) {
8247 sub[s].linfo_idx = i;
8248 s++;
8249 } else if (sub[s].start < linfo[i].insn_off) {
8250 verbose(env, "missing bpf_line_info for func#%u\n", s);
8251 err = -EINVAL;
8252 goto err_free;
8253 }
8254 }
8255
8256 prev_offset = linfo[i].insn_off;
8257 ulinfo += rec_size;
8258 }
8259
8260 if (s != env->subprog_cnt) {
8261 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
8262 env->subprog_cnt - s, s);
8263 err = -EINVAL;
8264 goto err_free;
8265 }
8266
8267 prog->aux->linfo = linfo;
8268 prog->aux->nr_linfo = nr_linfo;
8269
8270 return 0;
8271
8272err_free:
8273 kvfree(linfo);
8274 return err;
8275}
8276
8277static int check_btf_info(struct bpf_verifier_env *env,
8278 const union bpf_attr *attr,
8279 union bpf_attr __user *uattr)
8280{
8281 struct btf *btf;
8282 int err;
8283
09b28d76
AS
8284 if (!attr->func_info_cnt && !attr->line_info_cnt) {
8285 if (check_abnormal_return(env))
8286 return -EINVAL;
c454a46b 8287 return 0;
09b28d76 8288 }
c454a46b
MKL
8289
8290 btf = btf_get_by_fd(attr->prog_btf_fd);
8291 if (IS_ERR(btf))
8292 return PTR_ERR(btf);
8293 env->prog->aux->btf = btf;
8294
8295 err = check_btf_func(env, attr, uattr);
8296 if (err)
8297 return err;
8298
8299 err = check_btf_line(env, attr, uattr);
8300 if (err)
8301 return err;
8302
8303 return 0;
ba64e7d8
YS
8304}
8305
f1174f77
EC
8306/* check %cur's range satisfies %old's */
8307static bool range_within(struct bpf_reg_state *old,
8308 struct bpf_reg_state *cur)
8309{
b03c9f9f
EC
8310 return old->umin_value <= cur->umin_value &&
8311 old->umax_value >= cur->umax_value &&
8312 old->smin_value <= cur->smin_value &&
8313 old->smax_value >= cur->smax_value;
f1174f77
EC
8314}
8315
8316/* Maximum number of register states that can exist at once */
8317#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
8318struct idpair {
8319 u32 old;
8320 u32 cur;
8321};
8322
8323/* If in the old state two registers had the same id, then they need to have
8324 * the same id in the new state as well. But that id could be different from
8325 * the old state, so we need to track the mapping from old to new ids.
8326 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
8327 * regs with old id 5 must also have new id 9 for the new state to be safe. But
8328 * regs with a different old id could still have new id 9, we don't care about
8329 * that.
8330 * So we look through our idmap to see if this old id has been seen before. If
8331 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 8332 */
f1174f77 8333static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
969bf05e 8334{
f1174f77 8335 unsigned int i;
969bf05e 8336
f1174f77
EC
8337 for (i = 0; i < ID_MAP_SIZE; i++) {
8338 if (!idmap[i].old) {
8339 /* Reached an empty slot; haven't seen this id before */
8340 idmap[i].old = old_id;
8341 idmap[i].cur = cur_id;
8342 return true;
8343 }
8344 if (idmap[i].old == old_id)
8345 return idmap[i].cur == cur_id;
8346 }
8347 /* We ran out of idmap slots, which should be impossible */
8348 WARN_ON_ONCE(1);
8349 return false;
8350}
8351
9242b5f5
AS
8352static void clean_func_state(struct bpf_verifier_env *env,
8353 struct bpf_func_state *st)
8354{
8355 enum bpf_reg_liveness live;
8356 int i, j;
8357
8358 for (i = 0; i < BPF_REG_FP; i++) {
8359 live = st->regs[i].live;
8360 /* liveness must not touch this register anymore */
8361 st->regs[i].live |= REG_LIVE_DONE;
8362 if (!(live & REG_LIVE_READ))
8363 /* since the register is unused, clear its state
8364 * to make further comparison simpler
8365 */
f54c7898 8366 __mark_reg_not_init(env, &st->regs[i]);
9242b5f5
AS
8367 }
8368
8369 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
8370 live = st->stack[i].spilled_ptr.live;
8371 /* liveness must not touch this stack slot anymore */
8372 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
8373 if (!(live & REG_LIVE_READ)) {
f54c7898 8374 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
9242b5f5
AS
8375 for (j = 0; j < BPF_REG_SIZE; j++)
8376 st->stack[i].slot_type[j] = STACK_INVALID;
8377 }
8378 }
8379}
8380
8381static void clean_verifier_state(struct bpf_verifier_env *env,
8382 struct bpf_verifier_state *st)
8383{
8384 int i;
8385
8386 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
8387 /* all regs in this state in all frames were already marked */
8388 return;
8389
8390 for (i = 0; i <= st->curframe; i++)
8391 clean_func_state(env, st->frame[i]);
8392}
8393
8394/* the parentage chains form a tree.
8395 * the verifier states are added to state lists at given insn and
8396 * pushed into state stack for future exploration.
8397 * when the verifier reaches bpf_exit insn some of the verifer states
8398 * stored in the state lists have their final liveness state already,
8399 * but a lot of states will get revised from liveness point of view when
8400 * the verifier explores other branches.
8401 * Example:
8402 * 1: r0 = 1
8403 * 2: if r1 == 100 goto pc+1
8404 * 3: r0 = 2
8405 * 4: exit
8406 * when the verifier reaches exit insn the register r0 in the state list of
8407 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
8408 * of insn 2 and goes exploring further. At the insn 4 it will walk the
8409 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
8410 *
8411 * Since the verifier pushes the branch states as it sees them while exploring
8412 * the program the condition of walking the branch instruction for the second
8413 * time means that all states below this branch were already explored and
8414 * their final liveness markes are already propagated.
8415 * Hence when the verifier completes the search of state list in is_state_visited()
8416 * we can call this clean_live_states() function to mark all liveness states
8417 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
8418 * will not be used.
8419 * This function also clears the registers and stack for states that !READ
8420 * to simplify state merging.
8421 *
8422 * Important note here that walking the same branch instruction in the callee
8423 * doesn't meant that the states are DONE. The verifier has to compare
8424 * the callsites
8425 */
8426static void clean_live_states(struct bpf_verifier_env *env, int insn,
8427 struct bpf_verifier_state *cur)
8428{
8429 struct bpf_verifier_state_list *sl;
8430 int i;
8431
5d839021 8432 sl = *explored_state(env, insn);
a8f500af 8433 while (sl) {
2589726d
AS
8434 if (sl->state.branches)
8435 goto next;
dc2a4ebc
AS
8436 if (sl->state.insn_idx != insn ||
8437 sl->state.curframe != cur->curframe)
9242b5f5
AS
8438 goto next;
8439 for (i = 0; i <= cur->curframe; i++)
8440 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
8441 goto next;
8442 clean_verifier_state(env, &sl->state);
8443next:
8444 sl = sl->next;
8445 }
8446}
8447
f1174f77 8448/* Returns true if (rold safe implies rcur safe) */
1b688a19
EC
8449static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
8450 struct idpair *idmap)
f1174f77 8451{
f4d7e40a
AS
8452 bool equal;
8453
dc503a8a
EC
8454 if (!(rold->live & REG_LIVE_READ))
8455 /* explored state didn't use this */
8456 return true;
8457
679c782d 8458 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
f4d7e40a
AS
8459
8460 if (rold->type == PTR_TO_STACK)
8461 /* two stack pointers are equal only if they're pointing to
8462 * the same stack frame, since fp-8 in foo != fp-8 in bar
8463 */
8464 return equal && rold->frameno == rcur->frameno;
8465
8466 if (equal)
969bf05e
AS
8467 return true;
8468
f1174f77
EC
8469 if (rold->type == NOT_INIT)
8470 /* explored state can't have used this */
969bf05e 8471 return true;
f1174f77
EC
8472 if (rcur->type == NOT_INIT)
8473 return false;
8474 switch (rold->type) {
8475 case SCALAR_VALUE:
8476 if (rcur->type == SCALAR_VALUE) {
b5dc0163
AS
8477 if (!rold->precise && !rcur->precise)
8478 return true;
f1174f77
EC
8479 /* new val must satisfy old val knowledge */
8480 return range_within(rold, rcur) &&
8481 tnum_in(rold->var_off, rcur->var_off);
8482 } else {
179d1c56
JH
8483 /* We're trying to use a pointer in place of a scalar.
8484 * Even if the scalar was unbounded, this could lead to
8485 * pointer leaks because scalars are allowed to leak
8486 * while pointers are not. We could make this safe in
8487 * special cases if root is calling us, but it's
8488 * probably not worth the hassle.
f1174f77 8489 */
179d1c56 8490 return false;
f1174f77
EC
8491 }
8492 case PTR_TO_MAP_VALUE:
1b688a19
EC
8493 /* If the new min/max/var_off satisfy the old ones and
8494 * everything else matches, we are OK.
d83525ca
AS
8495 * 'id' is not compared, since it's only used for maps with
8496 * bpf_spin_lock inside map element and in such cases if
8497 * the rest of the prog is valid for one map element then
8498 * it's valid for all map elements regardless of the key
8499 * used in bpf_map_lookup()
1b688a19
EC
8500 */
8501 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
8502 range_within(rold, rcur) &&
8503 tnum_in(rold->var_off, rcur->var_off);
f1174f77
EC
8504 case PTR_TO_MAP_VALUE_OR_NULL:
8505 /* a PTR_TO_MAP_VALUE could be safe to use as a
8506 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
8507 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
8508 * checked, doing so could have affected others with the same
8509 * id, and we can't check for that because we lost the id when
8510 * we converted to a PTR_TO_MAP_VALUE.
8511 */
8512 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
8513 return false;
8514 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
8515 return false;
8516 /* Check our ids match any regs they're supposed to */
8517 return check_ids(rold->id, rcur->id, idmap);
de8f3a83 8518 case PTR_TO_PACKET_META:
f1174f77 8519 case PTR_TO_PACKET:
de8f3a83 8520 if (rcur->type != rold->type)
f1174f77
EC
8521 return false;
8522 /* We must have at least as much range as the old ptr
8523 * did, so that any accesses which were safe before are
8524 * still safe. This is true even if old range < old off,
8525 * since someone could have accessed through (ptr - k), or
8526 * even done ptr -= k in a register, to get a safe access.
8527 */
8528 if (rold->range > rcur->range)
8529 return false;
8530 /* If the offsets don't match, we can't trust our alignment;
8531 * nor can we be sure that we won't fall out of range.
8532 */
8533 if (rold->off != rcur->off)
8534 return false;
8535 /* id relations must be preserved */
8536 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
8537 return false;
8538 /* new val must satisfy old val knowledge */
8539 return range_within(rold, rcur) &&
8540 tnum_in(rold->var_off, rcur->var_off);
8541 case PTR_TO_CTX:
8542 case CONST_PTR_TO_MAP:
f1174f77 8543 case PTR_TO_PACKET_END:
d58e468b 8544 case PTR_TO_FLOW_KEYS:
c64b7983
JS
8545 case PTR_TO_SOCKET:
8546 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
8547 case PTR_TO_SOCK_COMMON:
8548 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
8549 case PTR_TO_TCP_SOCK:
8550 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 8551 case PTR_TO_XDP_SOCK:
f1174f77
EC
8552 /* Only valid matches are exact, which memcmp() above
8553 * would have accepted
8554 */
8555 default:
8556 /* Don't know what's going on, just say it's not safe */
8557 return false;
8558 }
969bf05e 8559
f1174f77
EC
8560 /* Shouldn't get here; if we do, say it's not safe */
8561 WARN_ON_ONCE(1);
969bf05e
AS
8562 return false;
8563}
8564
f4d7e40a
AS
8565static bool stacksafe(struct bpf_func_state *old,
8566 struct bpf_func_state *cur,
638f5b90
AS
8567 struct idpair *idmap)
8568{
8569 int i, spi;
8570
638f5b90
AS
8571 /* walk slots of the explored stack and ignore any additional
8572 * slots in the current stack, since explored(safe) state
8573 * didn't use them
8574 */
8575 for (i = 0; i < old->allocated_stack; i++) {
8576 spi = i / BPF_REG_SIZE;
8577
b233920c
AS
8578 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
8579 i += BPF_REG_SIZE - 1;
cc2b14d5 8580 /* explored state didn't use this */
fd05e57b 8581 continue;
b233920c 8582 }
cc2b14d5 8583
638f5b90
AS
8584 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
8585 continue;
19e2dbb7
AS
8586
8587 /* explored stack has more populated slots than current stack
8588 * and these slots were used
8589 */
8590 if (i >= cur->allocated_stack)
8591 return false;
8592
cc2b14d5
AS
8593 /* if old state was safe with misc data in the stack
8594 * it will be safe with zero-initialized stack.
8595 * The opposite is not true
8596 */
8597 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
8598 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
8599 continue;
638f5b90
AS
8600 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
8601 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
8602 /* Ex: old explored (safe) state has STACK_SPILL in
b8c1a309 8603 * this stack slot, but current has STACK_MISC ->
638f5b90
AS
8604 * this verifier states are not equivalent,
8605 * return false to continue verification of this path
8606 */
8607 return false;
8608 if (i % BPF_REG_SIZE)
8609 continue;
8610 if (old->stack[spi].slot_type[0] != STACK_SPILL)
8611 continue;
8612 if (!regsafe(&old->stack[spi].spilled_ptr,
8613 &cur->stack[spi].spilled_ptr,
8614 idmap))
8615 /* when explored and current stack slot are both storing
8616 * spilled registers, check that stored pointers types
8617 * are the same as well.
8618 * Ex: explored safe path could have stored
8619 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
8620 * but current path has stored:
8621 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
8622 * such verifier states are not equivalent.
8623 * return false to continue verification of this path
8624 */
8625 return false;
8626 }
8627 return true;
8628}
8629
fd978bf7
JS
8630static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
8631{
8632 if (old->acquired_refs != cur->acquired_refs)
8633 return false;
8634 return !memcmp(old->refs, cur->refs,
8635 sizeof(*old->refs) * old->acquired_refs);
8636}
8637
f1bca824
AS
8638/* compare two verifier states
8639 *
8640 * all states stored in state_list are known to be valid, since
8641 * verifier reached 'bpf_exit' instruction through them
8642 *
8643 * this function is called when verifier exploring different branches of
8644 * execution popped from the state stack. If it sees an old state that has
8645 * more strict register state and more strict stack state then this execution
8646 * branch doesn't need to be explored further, since verifier already
8647 * concluded that more strict state leads to valid finish.
8648 *
8649 * Therefore two states are equivalent if register state is more conservative
8650 * and explored stack state is more conservative than the current one.
8651 * Example:
8652 * explored current
8653 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
8654 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
8655 *
8656 * In other words if current stack state (one being explored) has more
8657 * valid slots than old one that already passed validation, it means
8658 * the verifier can stop exploring and conclude that current state is valid too
8659 *
8660 * Similarly with registers. If explored state has register type as invalid
8661 * whereas register type in current state is meaningful, it means that
8662 * the current state will reach 'bpf_exit' instruction safely
8663 */
f4d7e40a
AS
8664static bool func_states_equal(struct bpf_func_state *old,
8665 struct bpf_func_state *cur)
f1bca824 8666{
f1174f77
EC
8667 struct idpair *idmap;
8668 bool ret = false;
f1bca824
AS
8669 int i;
8670
f1174f77
EC
8671 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
8672 /* If we failed to allocate the idmap, just say it's not safe */
8673 if (!idmap)
1a0dc1ac 8674 return false;
f1174f77
EC
8675
8676 for (i = 0; i < MAX_BPF_REG; i++) {
1b688a19 8677 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
f1174f77 8678 goto out_free;
f1bca824
AS
8679 }
8680
638f5b90
AS
8681 if (!stacksafe(old, cur, idmap))
8682 goto out_free;
fd978bf7
JS
8683
8684 if (!refsafe(old, cur))
8685 goto out_free;
f1174f77
EC
8686 ret = true;
8687out_free:
8688 kfree(idmap);
8689 return ret;
f1bca824
AS
8690}
8691
f4d7e40a
AS
8692static bool states_equal(struct bpf_verifier_env *env,
8693 struct bpf_verifier_state *old,
8694 struct bpf_verifier_state *cur)
8695{
8696 int i;
8697
8698 if (old->curframe != cur->curframe)
8699 return false;
8700
979d63d5
DB
8701 /* Verification state from speculative execution simulation
8702 * must never prune a non-speculative execution one.
8703 */
8704 if (old->speculative && !cur->speculative)
8705 return false;
8706
d83525ca
AS
8707 if (old->active_spin_lock != cur->active_spin_lock)
8708 return false;
8709
f4d7e40a
AS
8710 /* for states to be equal callsites have to be the same
8711 * and all frame states need to be equivalent
8712 */
8713 for (i = 0; i <= old->curframe; i++) {
8714 if (old->frame[i]->callsite != cur->frame[i]->callsite)
8715 return false;
8716 if (!func_states_equal(old->frame[i], cur->frame[i]))
8717 return false;
8718 }
8719 return true;
8720}
8721
5327ed3d
JW
8722/* Return 0 if no propagation happened. Return negative error code if error
8723 * happened. Otherwise, return the propagated bit.
8724 */
55e7f3b5
JW
8725static int propagate_liveness_reg(struct bpf_verifier_env *env,
8726 struct bpf_reg_state *reg,
8727 struct bpf_reg_state *parent_reg)
8728{
5327ed3d
JW
8729 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
8730 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
8731 int err;
8732
5327ed3d
JW
8733 /* When comes here, read flags of PARENT_REG or REG could be any of
8734 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
8735 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
8736 */
8737 if (parent_flag == REG_LIVE_READ64 ||
8738 /* Or if there is no read flag from REG. */
8739 !flag ||
8740 /* Or if the read flag from REG is the same as PARENT_REG. */
8741 parent_flag == flag)
55e7f3b5
JW
8742 return 0;
8743
5327ed3d 8744 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
8745 if (err)
8746 return err;
8747
5327ed3d 8748 return flag;
55e7f3b5
JW
8749}
8750
8e9cd9ce 8751/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
8752 * straight-line code between a state and its parent. When we arrive at an
8753 * equivalent state (jump target or such) we didn't arrive by the straight-line
8754 * code, so read marks in the state must propagate to the parent regardless
8755 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 8756 * in mark_reg_read() is for.
8e9cd9ce 8757 */
f4d7e40a
AS
8758static int propagate_liveness(struct bpf_verifier_env *env,
8759 const struct bpf_verifier_state *vstate,
8760 struct bpf_verifier_state *vparent)
dc503a8a 8761{
3f8cafa4 8762 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 8763 struct bpf_func_state *state, *parent;
3f8cafa4 8764 int i, frame, err = 0;
dc503a8a 8765
f4d7e40a
AS
8766 if (vparent->curframe != vstate->curframe) {
8767 WARN(1, "propagate_live: parent frame %d current frame %d\n",
8768 vparent->curframe, vstate->curframe);
8769 return -EFAULT;
8770 }
dc503a8a
EC
8771 /* Propagate read liveness of registers... */
8772 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 8773 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
8774 parent = vparent->frame[frame];
8775 state = vstate->frame[frame];
8776 parent_reg = parent->regs;
8777 state_reg = state->regs;
83d16312
JK
8778 /* We don't need to worry about FP liveness, it's read-only */
8779 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
8780 err = propagate_liveness_reg(env, &state_reg[i],
8781 &parent_reg[i]);
5327ed3d 8782 if (err < 0)
3f8cafa4 8783 return err;
5327ed3d
JW
8784 if (err == REG_LIVE_READ64)
8785 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 8786 }
f4d7e40a 8787
1b04aee7 8788 /* Propagate stack slots. */
f4d7e40a
AS
8789 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
8790 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
8791 parent_reg = &parent->stack[i].spilled_ptr;
8792 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
8793 err = propagate_liveness_reg(env, state_reg,
8794 parent_reg);
5327ed3d 8795 if (err < 0)
3f8cafa4 8796 return err;
dc503a8a
EC
8797 }
8798 }
5327ed3d 8799 return 0;
dc503a8a
EC
8800}
8801
a3ce685d
AS
8802/* find precise scalars in the previous equivalent state and
8803 * propagate them into the current state
8804 */
8805static int propagate_precision(struct bpf_verifier_env *env,
8806 const struct bpf_verifier_state *old)
8807{
8808 struct bpf_reg_state *state_reg;
8809 struct bpf_func_state *state;
8810 int i, err = 0;
8811
8812 state = old->frame[old->curframe];
8813 state_reg = state->regs;
8814 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
8815 if (state_reg->type != SCALAR_VALUE ||
8816 !state_reg->precise)
8817 continue;
8818 if (env->log.level & BPF_LOG_LEVEL2)
8819 verbose(env, "propagating r%d\n", i);
8820 err = mark_chain_precision(env, i);
8821 if (err < 0)
8822 return err;
8823 }
8824
8825 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8826 if (state->stack[i].slot_type[0] != STACK_SPILL)
8827 continue;
8828 state_reg = &state->stack[i].spilled_ptr;
8829 if (state_reg->type != SCALAR_VALUE ||
8830 !state_reg->precise)
8831 continue;
8832 if (env->log.level & BPF_LOG_LEVEL2)
8833 verbose(env, "propagating fp%d\n",
8834 (-i - 1) * BPF_REG_SIZE);
8835 err = mark_chain_precision_stack(env, i);
8836 if (err < 0)
8837 return err;
8838 }
8839 return 0;
8840}
8841
2589726d
AS
8842static bool states_maybe_looping(struct bpf_verifier_state *old,
8843 struct bpf_verifier_state *cur)
8844{
8845 struct bpf_func_state *fold, *fcur;
8846 int i, fr = cur->curframe;
8847
8848 if (old->curframe != fr)
8849 return false;
8850
8851 fold = old->frame[fr];
8852 fcur = cur->frame[fr];
8853 for (i = 0; i < MAX_BPF_REG; i++)
8854 if (memcmp(&fold->regs[i], &fcur->regs[i],
8855 offsetof(struct bpf_reg_state, parent)))
8856 return false;
8857 return true;
8858}
8859
8860
58e2af8b 8861static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 8862{
58e2af8b 8863 struct bpf_verifier_state_list *new_sl;
9f4686c4 8864 struct bpf_verifier_state_list *sl, **pprev;
679c782d 8865 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 8866 int i, j, err, states_cnt = 0;
10d274e8 8867 bool add_new_state = env->test_state_freq ? true : false;
f1bca824 8868
b5dc0163 8869 cur->last_insn_idx = env->prev_insn_idx;
a8f500af 8870 if (!env->insn_aux_data[insn_idx].prune_point)
f1bca824
AS
8871 /* this 'insn_idx' instruction wasn't marked, so we will not
8872 * be doing state search here
8873 */
8874 return 0;
8875
2589726d
AS
8876 /* bpf progs typically have pruning point every 4 instructions
8877 * http://vger.kernel.org/bpfconf2019.html#session-1
8878 * Do not add new state for future pruning if the verifier hasn't seen
8879 * at least 2 jumps and at least 8 instructions.
8880 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
8881 * In tests that amounts to up to 50% reduction into total verifier
8882 * memory consumption and 20% verifier time speedup.
8883 */
8884 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
8885 env->insn_processed - env->prev_insn_processed >= 8)
8886 add_new_state = true;
8887
a8f500af
AS
8888 pprev = explored_state(env, insn_idx);
8889 sl = *pprev;
8890
9242b5f5
AS
8891 clean_live_states(env, insn_idx, cur);
8892
a8f500af 8893 while (sl) {
dc2a4ebc
AS
8894 states_cnt++;
8895 if (sl->state.insn_idx != insn_idx)
8896 goto next;
2589726d
AS
8897 if (sl->state.branches) {
8898 if (states_maybe_looping(&sl->state, cur) &&
8899 states_equal(env, &sl->state, cur)) {
8900 verbose_linfo(env, insn_idx, "; ");
8901 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
8902 return -EINVAL;
8903 }
8904 /* if the verifier is processing a loop, avoid adding new state
8905 * too often, since different loop iterations have distinct
8906 * states and may not help future pruning.
8907 * This threshold shouldn't be too low to make sure that
8908 * a loop with large bound will be rejected quickly.
8909 * The most abusive loop will be:
8910 * r1 += 1
8911 * if r1 < 1000000 goto pc-2
8912 * 1M insn_procssed limit / 100 == 10k peak states.
8913 * This threshold shouldn't be too high either, since states
8914 * at the end of the loop are likely to be useful in pruning.
8915 */
8916 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
8917 env->insn_processed - env->prev_insn_processed < 100)
8918 add_new_state = false;
8919 goto miss;
8920 }
638f5b90 8921 if (states_equal(env, &sl->state, cur)) {
9f4686c4 8922 sl->hit_cnt++;
f1bca824 8923 /* reached equivalent register/stack state,
dc503a8a
EC
8924 * prune the search.
8925 * Registers read by the continuation are read by us.
8e9cd9ce
EC
8926 * If we have any write marks in env->cur_state, they
8927 * will prevent corresponding reads in the continuation
8928 * from reaching our parent (an explored_state). Our
8929 * own state will get the read marks recorded, but
8930 * they'll be immediately forgotten as we're pruning
8931 * this state and will pop a new one.
f1bca824 8932 */
f4d7e40a 8933 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
8934
8935 /* if previous state reached the exit with precision and
8936 * current state is equivalent to it (except precsion marks)
8937 * the precision needs to be propagated back in
8938 * the current state.
8939 */
8940 err = err ? : push_jmp_history(env, cur);
8941 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
8942 if (err)
8943 return err;
f1bca824 8944 return 1;
dc503a8a 8945 }
2589726d
AS
8946miss:
8947 /* when new state is not going to be added do not increase miss count.
8948 * Otherwise several loop iterations will remove the state
8949 * recorded earlier. The goal of these heuristics is to have
8950 * states from some iterations of the loop (some in the beginning
8951 * and some at the end) to help pruning.
8952 */
8953 if (add_new_state)
8954 sl->miss_cnt++;
9f4686c4
AS
8955 /* heuristic to determine whether this state is beneficial
8956 * to keep checking from state equivalence point of view.
8957 * Higher numbers increase max_states_per_insn and verification time,
8958 * but do not meaningfully decrease insn_processed.
8959 */
8960 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
8961 /* the state is unlikely to be useful. Remove it to
8962 * speed up verification
8963 */
8964 *pprev = sl->next;
8965 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
8966 u32 br = sl->state.branches;
8967
8968 WARN_ONCE(br,
8969 "BUG live_done but branches_to_explore %d\n",
8970 br);
9f4686c4
AS
8971 free_verifier_state(&sl->state, false);
8972 kfree(sl);
8973 env->peak_states--;
8974 } else {
8975 /* cannot free this state, since parentage chain may
8976 * walk it later. Add it for free_list instead to
8977 * be freed at the end of verification
8978 */
8979 sl->next = env->free_list;
8980 env->free_list = sl;
8981 }
8982 sl = *pprev;
8983 continue;
8984 }
dc2a4ebc 8985next:
9f4686c4
AS
8986 pprev = &sl->next;
8987 sl = *pprev;
f1bca824
AS
8988 }
8989
06ee7115
AS
8990 if (env->max_states_per_insn < states_cnt)
8991 env->max_states_per_insn = states_cnt;
8992
2c78ee89 8993 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
b5dc0163 8994 return push_jmp_history(env, cur);
ceefbc96 8995
2589726d 8996 if (!add_new_state)
b5dc0163 8997 return push_jmp_history(env, cur);
ceefbc96 8998
2589726d
AS
8999 /* There were no equivalent states, remember the current one.
9000 * Technically the current state is not proven to be safe yet,
f4d7e40a 9001 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 9002 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 9003 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
9004 * again on the way to bpf_exit.
9005 * When looping the sl->state.branches will be > 0 and this state
9006 * will not be considered for equivalence until branches == 0.
f1bca824 9007 */
638f5b90 9008 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
9009 if (!new_sl)
9010 return -ENOMEM;
06ee7115
AS
9011 env->total_states++;
9012 env->peak_states++;
2589726d
AS
9013 env->prev_jmps_processed = env->jmps_processed;
9014 env->prev_insn_processed = env->insn_processed;
f1bca824
AS
9015
9016 /* add new state to the head of linked list */
679c782d
EC
9017 new = &new_sl->state;
9018 err = copy_verifier_state(new, cur);
1969db47 9019 if (err) {
679c782d 9020 free_verifier_state(new, false);
1969db47
AS
9021 kfree(new_sl);
9022 return err;
9023 }
dc2a4ebc 9024 new->insn_idx = insn_idx;
2589726d
AS
9025 WARN_ONCE(new->branches != 1,
9026 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 9027
2589726d 9028 cur->parent = new;
b5dc0163
AS
9029 cur->first_insn_idx = insn_idx;
9030 clear_jmp_history(cur);
5d839021
AS
9031 new_sl->next = *explored_state(env, insn_idx);
9032 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
9033 /* connect new state to parentage chain. Current frame needs all
9034 * registers connected. Only r6 - r9 of the callers are alive (pushed
9035 * to the stack implicitly by JITs) so in callers' frames connect just
9036 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
9037 * the state of the call instruction (with WRITTEN set), and r0 comes
9038 * from callee with its full parentage chain, anyway.
9039 */
8e9cd9ce
EC
9040 /* clear write marks in current state: the writes we did are not writes
9041 * our child did, so they don't screen off its reads from us.
9042 * (There are no read marks in current state, because reads always mark
9043 * their parent and current state never has children yet. Only
9044 * explored_states can get read marks.)
9045 */
eea1c227
AS
9046 for (j = 0; j <= cur->curframe; j++) {
9047 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
9048 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
9049 for (i = 0; i < BPF_REG_FP; i++)
9050 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
9051 }
f4d7e40a
AS
9052
9053 /* all stack frames are accessible from callee, clear them all */
9054 for (j = 0; j <= cur->curframe; j++) {
9055 struct bpf_func_state *frame = cur->frame[j];
679c782d 9056 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 9057
679c782d 9058 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 9059 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
9060 frame->stack[i].spilled_ptr.parent =
9061 &newframe->stack[i].spilled_ptr;
9062 }
f4d7e40a 9063 }
f1bca824
AS
9064 return 0;
9065}
9066
c64b7983
JS
9067/* Return true if it's OK to have the same insn return a different type. */
9068static bool reg_type_mismatch_ok(enum bpf_reg_type type)
9069{
9070 switch (type) {
9071 case PTR_TO_CTX:
9072 case PTR_TO_SOCKET:
9073 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
9074 case PTR_TO_SOCK_COMMON:
9075 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
9076 case PTR_TO_TCP_SOCK:
9077 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 9078 case PTR_TO_XDP_SOCK:
2a02759e 9079 case PTR_TO_BTF_ID:
b121b341 9080 case PTR_TO_BTF_ID_OR_NULL:
c64b7983
JS
9081 return false;
9082 default:
9083 return true;
9084 }
9085}
9086
9087/* If an instruction was previously used with particular pointer types, then we
9088 * need to be careful to avoid cases such as the below, where it may be ok
9089 * for one branch accessing the pointer, but not ok for the other branch:
9090 *
9091 * R1 = sock_ptr
9092 * goto X;
9093 * ...
9094 * R1 = some_other_valid_ptr;
9095 * goto X;
9096 * ...
9097 * R2 = *(u32 *)(R1 + 0);
9098 */
9099static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
9100{
9101 return src != prev && (!reg_type_mismatch_ok(src) ||
9102 !reg_type_mismatch_ok(prev));
9103}
9104
58e2af8b 9105static int do_check(struct bpf_verifier_env *env)
17a52670 9106{
6f8a57cc 9107 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1 9108 struct bpf_verifier_state *state = env->cur_state;
17a52670 9109 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 9110 struct bpf_reg_state *regs;
06ee7115 9111 int insn_cnt = env->prog->len;
17a52670 9112 bool do_print_state = false;
b5dc0163 9113 int prev_insn_idx = -1;
17a52670 9114
17a52670
AS
9115 for (;;) {
9116 struct bpf_insn *insn;
9117 u8 class;
9118 int err;
9119
b5dc0163 9120 env->prev_insn_idx = prev_insn_idx;
c08435ec 9121 if (env->insn_idx >= insn_cnt) {
61bd5218 9122 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 9123 env->insn_idx, insn_cnt);
17a52670
AS
9124 return -EFAULT;
9125 }
9126
c08435ec 9127 insn = &insns[env->insn_idx];
17a52670
AS
9128 class = BPF_CLASS(insn->code);
9129
06ee7115 9130 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
9131 verbose(env,
9132 "BPF program is too large. Processed %d insn\n",
06ee7115 9133 env->insn_processed);
17a52670
AS
9134 return -E2BIG;
9135 }
9136
c08435ec 9137 err = is_state_visited(env, env->insn_idx);
f1bca824
AS
9138 if (err < 0)
9139 return err;
9140 if (err == 1) {
9141 /* found equivalent state, can prune the search */
06ee7115 9142 if (env->log.level & BPF_LOG_LEVEL) {
f1bca824 9143 if (do_print_state)
979d63d5
DB
9144 verbose(env, "\nfrom %d to %d%s: safe\n",
9145 env->prev_insn_idx, env->insn_idx,
9146 env->cur_state->speculative ?
9147 " (speculative execution)" : "");
f1bca824 9148 else
c08435ec 9149 verbose(env, "%d: safe\n", env->insn_idx);
f1bca824
AS
9150 }
9151 goto process_bpf_exit;
9152 }
9153
c3494801
AS
9154 if (signal_pending(current))
9155 return -EAGAIN;
9156
3c2ce60b
DB
9157 if (need_resched())
9158 cond_resched();
9159
06ee7115
AS
9160 if (env->log.level & BPF_LOG_LEVEL2 ||
9161 (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
9162 if (env->log.level & BPF_LOG_LEVEL2)
c08435ec 9163 verbose(env, "%d:", env->insn_idx);
c5fc9692 9164 else
979d63d5
DB
9165 verbose(env, "\nfrom %d to %d%s:",
9166 env->prev_insn_idx, env->insn_idx,
9167 env->cur_state->speculative ?
9168 " (speculative execution)" : "");
f4d7e40a 9169 print_verifier_state(env, state->frame[state->curframe]);
17a52670
AS
9170 do_print_state = false;
9171 }
9172
06ee7115 9173 if (env->log.level & BPF_LOG_LEVEL) {
7105e828
DB
9174 const struct bpf_insn_cbs cbs = {
9175 .cb_print = verbose,
abe08840 9176 .private_data = env,
7105e828
DB
9177 };
9178
c08435ec
DB
9179 verbose_linfo(env, env->insn_idx, "; ");
9180 verbose(env, "%d: ", env->insn_idx);
abe08840 9181 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
17a52670
AS
9182 }
9183
cae1927c 9184 if (bpf_prog_is_dev_bound(env->prog->aux)) {
c08435ec
DB
9185 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
9186 env->prev_insn_idx);
cae1927c
JK
9187 if (err)
9188 return err;
9189 }
13a27dfc 9190
638f5b90 9191 regs = cur_regs(env);
51c39bb1 9192 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
b5dc0163 9193 prev_insn_idx = env->insn_idx;
fd978bf7 9194
17a52670 9195 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 9196 err = check_alu_op(env, insn);
17a52670
AS
9197 if (err)
9198 return err;
9199
9200 } else if (class == BPF_LDX) {
3df126f3 9201 enum bpf_reg_type *prev_src_type, src_reg_type;
9bac3d6d
AS
9202
9203 /* check for reserved fields is already done */
9204
17a52670 9205 /* check src operand */
dc503a8a 9206 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
9207 if (err)
9208 return err;
9209
dc503a8a 9210 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
9211 if (err)
9212 return err;
9213
725f9dcd
AS
9214 src_reg_type = regs[insn->src_reg].type;
9215
17a52670
AS
9216 /* check that memory (src_reg + off) is readable,
9217 * the state of dst_reg will be updated by this func
9218 */
c08435ec
DB
9219 err = check_mem_access(env, env->insn_idx, insn->src_reg,
9220 insn->off, BPF_SIZE(insn->code),
9221 BPF_READ, insn->dst_reg, false);
17a52670
AS
9222 if (err)
9223 return err;
9224
c08435ec 9225 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
9226
9227 if (*prev_src_type == NOT_INIT) {
9bac3d6d
AS
9228 /* saw a valid insn
9229 * dst_reg = *(u32 *)(src_reg + off)
3df126f3 9230 * save type to validate intersecting paths
9bac3d6d 9231 */
3df126f3 9232 *prev_src_type = src_reg_type;
9bac3d6d 9233
c64b7983 9234 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
9bac3d6d
AS
9235 /* ABuser program is trying to use the same insn
9236 * dst_reg = *(u32*) (src_reg + off)
9237 * with different pointer types:
9238 * src_reg == ctx in one branch and
9239 * src_reg == stack|map in some other branch.
9240 * Reject it.
9241 */
61bd5218 9242 verbose(env, "same insn cannot be used with different pointers\n");
9bac3d6d
AS
9243 return -EINVAL;
9244 }
9245
17a52670 9246 } else if (class == BPF_STX) {
3df126f3 9247 enum bpf_reg_type *prev_dst_type, dst_reg_type;
d691f9e8 9248
17a52670 9249 if (BPF_MODE(insn->code) == BPF_XADD) {
c08435ec 9250 err = check_xadd(env, env->insn_idx, insn);
17a52670
AS
9251 if (err)
9252 return err;
c08435ec 9253 env->insn_idx++;
17a52670
AS
9254 continue;
9255 }
9256
17a52670 9257 /* check src1 operand */
dc503a8a 9258 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
9259 if (err)
9260 return err;
9261 /* check src2 operand */
dc503a8a 9262 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
9263 if (err)
9264 return err;
9265
d691f9e8
AS
9266 dst_reg_type = regs[insn->dst_reg].type;
9267
17a52670 9268 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
9269 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
9270 insn->off, BPF_SIZE(insn->code),
9271 BPF_WRITE, insn->src_reg, false);
17a52670
AS
9272 if (err)
9273 return err;
9274
c08435ec 9275 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
9276
9277 if (*prev_dst_type == NOT_INIT) {
9278 *prev_dst_type = dst_reg_type;
c64b7983 9279 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
61bd5218 9280 verbose(env, "same insn cannot be used with different pointers\n");
d691f9e8
AS
9281 return -EINVAL;
9282 }
9283
17a52670
AS
9284 } else if (class == BPF_ST) {
9285 if (BPF_MODE(insn->code) != BPF_MEM ||
9286 insn->src_reg != BPF_REG_0) {
61bd5218 9287 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
9288 return -EINVAL;
9289 }
9290 /* check src operand */
dc503a8a 9291 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
9292 if (err)
9293 return err;
9294
f37a8cb8 9295 if (is_ctx_reg(env, insn->dst_reg)) {
9d2be44a 9296 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
2a159c6f
DB
9297 insn->dst_reg,
9298 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
9299 return -EACCES;
9300 }
9301
17a52670 9302 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
9303 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
9304 insn->off, BPF_SIZE(insn->code),
9305 BPF_WRITE, -1, false);
17a52670
AS
9306 if (err)
9307 return err;
9308
092ed096 9309 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
9310 u8 opcode = BPF_OP(insn->code);
9311
2589726d 9312 env->jmps_processed++;
17a52670
AS
9313 if (opcode == BPF_CALL) {
9314 if (BPF_SRC(insn->code) != BPF_K ||
9315 insn->off != 0 ||
f4d7e40a
AS
9316 (insn->src_reg != BPF_REG_0 &&
9317 insn->src_reg != BPF_PSEUDO_CALL) ||
092ed096
JW
9318 insn->dst_reg != BPF_REG_0 ||
9319 class == BPF_JMP32) {
61bd5218 9320 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
9321 return -EINVAL;
9322 }
9323
d83525ca
AS
9324 if (env->cur_state->active_spin_lock &&
9325 (insn->src_reg == BPF_PSEUDO_CALL ||
9326 insn->imm != BPF_FUNC_spin_unlock)) {
9327 verbose(env, "function calls are not allowed while holding a lock\n");
9328 return -EINVAL;
9329 }
f4d7e40a 9330 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 9331 err = check_func_call(env, insn, &env->insn_idx);
f4d7e40a 9332 else
c08435ec 9333 err = check_helper_call(env, insn->imm, env->insn_idx);
17a52670
AS
9334 if (err)
9335 return err;
9336
9337 } else if (opcode == BPF_JA) {
9338 if (BPF_SRC(insn->code) != BPF_K ||
9339 insn->imm != 0 ||
9340 insn->src_reg != BPF_REG_0 ||
092ed096
JW
9341 insn->dst_reg != BPF_REG_0 ||
9342 class == BPF_JMP32) {
61bd5218 9343 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
9344 return -EINVAL;
9345 }
9346
c08435ec 9347 env->insn_idx += insn->off + 1;
17a52670
AS
9348 continue;
9349
9350 } else if (opcode == BPF_EXIT) {
9351 if (BPF_SRC(insn->code) != BPF_K ||
9352 insn->imm != 0 ||
9353 insn->src_reg != BPF_REG_0 ||
092ed096
JW
9354 insn->dst_reg != BPF_REG_0 ||
9355 class == BPF_JMP32) {
61bd5218 9356 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
9357 return -EINVAL;
9358 }
9359
d83525ca
AS
9360 if (env->cur_state->active_spin_lock) {
9361 verbose(env, "bpf_spin_unlock is missing\n");
9362 return -EINVAL;
9363 }
9364
f4d7e40a
AS
9365 if (state->curframe) {
9366 /* exit from nested function */
c08435ec 9367 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
9368 if (err)
9369 return err;
9370 do_print_state = true;
9371 continue;
9372 }
9373
fd978bf7
JS
9374 err = check_reference_leak(env);
9375 if (err)
9376 return err;
9377
390ee7e2
AS
9378 err = check_return_code(env);
9379 if (err)
9380 return err;
f1bca824 9381process_bpf_exit:
2589726d 9382 update_branch_counts(env, env->cur_state);
b5dc0163 9383 err = pop_stack(env, &prev_insn_idx,
6f8a57cc 9384 &env->insn_idx, pop_log);
638f5b90
AS
9385 if (err < 0) {
9386 if (err != -ENOENT)
9387 return err;
17a52670
AS
9388 break;
9389 } else {
9390 do_print_state = true;
9391 continue;
9392 }
9393 } else {
c08435ec 9394 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
9395 if (err)
9396 return err;
9397 }
9398 } else if (class == BPF_LD) {
9399 u8 mode = BPF_MODE(insn->code);
9400
9401 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
9402 err = check_ld_abs(env, insn);
9403 if (err)
9404 return err;
9405
17a52670
AS
9406 } else if (mode == BPF_IMM) {
9407 err = check_ld_imm(env, insn);
9408 if (err)
9409 return err;
9410
c08435ec 9411 env->insn_idx++;
51c39bb1 9412 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
17a52670 9413 } else {
61bd5218 9414 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
9415 return -EINVAL;
9416 }
9417 } else {
61bd5218 9418 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
9419 return -EINVAL;
9420 }
9421
c08435ec 9422 env->insn_idx++;
17a52670
AS
9423 }
9424
9425 return 0;
9426}
9427
56f668df
MKL
9428static int check_map_prealloc(struct bpf_map *map)
9429{
9430 return (map->map_type != BPF_MAP_TYPE_HASH &&
bcc6b1b7
MKL
9431 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
9432 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
56f668df
MKL
9433 !(map->map_flags & BPF_F_NO_PREALLOC);
9434}
9435
d83525ca
AS
9436static bool is_tracing_prog_type(enum bpf_prog_type type)
9437{
9438 switch (type) {
9439 case BPF_PROG_TYPE_KPROBE:
9440 case BPF_PROG_TYPE_TRACEPOINT:
9441 case BPF_PROG_TYPE_PERF_EVENT:
9442 case BPF_PROG_TYPE_RAW_TRACEPOINT:
9443 return true;
9444 default:
9445 return false;
9446 }
9447}
9448
94dacdbd
TG
9449static bool is_preallocated_map(struct bpf_map *map)
9450{
9451 if (!check_map_prealloc(map))
9452 return false;
9453 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
9454 return false;
9455 return true;
9456}
9457
61bd5218
JK
9458static int check_map_prog_compatibility(struct bpf_verifier_env *env,
9459 struct bpf_map *map,
fdc15d38
AS
9460 struct bpf_prog *prog)
9461
9462{
7e40781c 9463 enum bpf_prog_type prog_type = resolve_prog_type(prog);
94dacdbd
TG
9464 /*
9465 * Validate that trace type programs use preallocated hash maps.
9466 *
9467 * For programs attached to PERF events this is mandatory as the
9468 * perf NMI can hit any arbitrary code sequence.
9469 *
9470 * All other trace types using preallocated hash maps are unsafe as
9471 * well because tracepoint or kprobes can be inside locked regions
9472 * of the memory allocator or at a place where a recursion into the
9473 * memory allocator would see inconsistent state.
9474 *
2ed905c5
TG
9475 * On RT enabled kernels run-time allocation of all trace type
9476 * programs is strictly prohibited due to lock type constraints. On
9477 * !RT kernels it is allowed for backwards compatibility reasons for
9478 * now, but warnings are emitted so developers are made aware of
9479 * the unsafety and can fix their programs before this is enforced.
56f668df 9480 */
7e40781c
UP
9481 if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
9482 if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
61bd5218 9483 verbose(env, "perf_event programs can only use preallocated hash map\n");
56f668df
MKL
9484 return -EINVAL;
9485 }
2ed905c5
TG
9486 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
9487 verbose(env, "trace type programs can only use preallocated hash map\n");
9488 return -EINVAL;
9489 }
94dacdbd
TG
9490 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
9491 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
fdc15d38 9492 }
a3884572 9493
7e40781c
UP
9494 if ((is_tracing_prog_type(prog_type) ||
9495 prog_type == BPF_PROG_TYPE_SOCKET_FILTER) &&
d83525ca
AS
9496 map_value_has_spin_lock(map)) {
9497 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
9498 return -EINVAL;
9499 }
9500
a3884572 9501 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
09728266 9502 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
9503 verbose(env, "offload device mismatch between prog and map\n");
9504 return -EINVAL;
9505 }
9506
85d33df3
MKL
9507 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
9508 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
9509 return -EINVAL;
9510 }
9511
1e6c62a8
AS
9512 if (prog->aux->sleepable)
9513 switch (map->map_type) {
9514 case BPF_MAP_TYPE_HASH:
9515 case BPF_MAP_TYPE_LRU_HASH:
9516 case BPF_MAP_TYPE_ARRAY:
9517 if (!is_preallocated_map(map)) {
9518 verbose(env,
9519 "Sleepable programs can only use preallocated hash maps\n");
9520 return -EINVAL;
9521 }
9522 break;
9523 default:
9524 verbose(env,
9525 "Sleepable programs can only use array and hash maps\n");
9526 return -EINVAL;
9527 }
9528
fdc15d38
AS
9529 return 0;
9530}
9531
b741f163
RG
9532static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
9533{
9534 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
9535 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
9536}
9537
0246e64d
AS
9538/* look for pseudo eBPF instructions that access map FDs and
9539 * replace them with actual map pointers
9540 */
58e2af8b 9541static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
0246e64d
AS
9542{
9543 struct bpf_insn *insn = env->prog->insnsi;
9544 int insn_cnt = env->prog->len;
fdc15d38 9545 int i, j, err;
0246e64d 9546
f1f7714e 9547 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
9548 if (err)
9549 return err;
9550
0246e64d 9551 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 9552 if (BPF_CLASS(insn->code) == BPF_LDX &&
d691f9e8 9553 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
61bd5218 9554 verbose(env, "BPF_LDX uses reserved fields\n");
9bac3d6d
AS
9555 return -EINVAL;
9556 }
9557
d691f9e8
AS
9558 if (BPF_CLASS(insn->code) == BPF_STX &&
9559 ((BPF_MODE(insn->code) != BPF_MEM &&
9560 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
61bd5218 9561 verbose(env, "BPF_STX uses reserved fields\n");
d691f9e8
AS
9562 return -EINVAL;
9563 }
9564
0246e64d 9565 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 9566 struct bpf_insn_aux_data *aux;
0246e64d
AS
9567 struct bpf_map *map;
9568 struct fd f;
d8eca5bb 9569 u64 addr;
0246e64d
AS
9570
9571 if (i == insn_cnt - 1 || insn[1].code != 0 ||
9572 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
9573 insn[1].off != 0) {
61bd5218 9574 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
9575 return -EINVAL;
9576 }
9577
d8eca5bb 9578 if (insn[0].src_reg == 0)
0246e64d
AS
9579 /* valid generic load 64-bit imm */
9580 goto next_insn;
9581
d8eca5bb
DB
9582 /* In final convert_pseudo_ld_imm64() step, this is
9583 * converted into regular 64-bit imm load insn.
9584 */
9585 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
9586 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
9587 (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
9588 insn[1].imm != 0)) {
9589 verbose(env,
9590 "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
9591 return -EINVAL;
9592 }
9593
20182390 9594 f = fdget(insn[0].imm);
c2101297 9595 map = __bpf_map_get(f);
0246e64d 9596 if (IS_ERR(map)) {
61bd5218 9597 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 9598 insn[0].imm);
0246e64d
AS
9599 return PTR_ERR(map);
9600 }
9601
61bd5218 9602 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
9603 if (err) {
9604 fdput(f);
9605 return err;
9606 }
9607
d8eca5bb
DB
9608 aux = &env->insn_aux_data[i];
9609 if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
9610 addr = (unsigned long)map;
9611 } else {
9612 u32 off = insn[1].imm;
9613
9614 if (off >= BPF_MAX_VAR_OFF) {
9615 verbose(env, "direct value offset of %u is not allowed\n", off);
9616 fdput(f);
9617 return -EINVAL;
9618 }
9619
9620 if (!map->ops->map_direct_value_addr) {
9621 verbose(env, "no direct value access support for this map type\n");
9622 fdput(f);
9623 return -EINVAL;
9624 }
9625
9626 err = map->ops->map_direct_value_addr(map, &addr, off);
9627 if (err) {
9628 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
9629 map->value_size, off);
9630 fdput(f);
9631 return err;
9632 }
9633
9634 aux->map_off = off;
9635 addr += off;
9636 }
9637
9638 insn[0].imm = (u32)addr;
9639 insn[1].imm = addr >> 32;
0246e64d
AS
9640
9641 /* check whether we recorded this map already */
d8eca5bb 9642 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 9643 if (env->used_maps[j] == map) {
d8eca5bb 9644 aux->map_index = j;
0246e64d
AS
9645 fdput(f);
9646 goto next_insn;
9647 }
d8eca5bb 9648 }
0246e64d
AS
9649
9650 if (env->used_map_cnt >= MAX_USED_MAPS) {
9651 fdput(f);
9652 return -E2BIG;
9653 }
9654
0246e64d
AS
9655 /* hold the map. If the program is rejected by verifier,
9656 * the map will be released by release_maps() or it
9657 * will be used by the valid program until it's unloaded
ab7f5bf0 9658 * and all maps are released in free_used_maps()
0246e64d 9659 */
1e0bd5a0 9660 bpf_map_inc(map);
d8eca5bb
DB
9661
9662 aux->map_index = env->used_map_cnt;
92117d84
AS
9663 env->used_maps[env->used_map_cnt++] = map;
9664
b741f163 9665 if (bpf_map_is_cgroup_storage(map) &&
e4730423 9666 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 9667 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
9668 fdput(f);
9669 return -EBUSY;
9670 }
9671
0246e64d
AS
9672 fdput(f);
9673next_insn:
9674 insn++;
9675 i++;
5e581dad
DB
9676 continue;
9677 }
9678
9679 /* Basic sanity check before we invest more work here. */
9680 if (!bpf_opcode_in_insntable(insn->code)) {
9681 verbose(env, "unknown opcode %02x\n", insn->code);
9682 return -EINVAL;
0246e64d
AS
9683 }
9684 }
9685
9686 /* now all pseudo BPF_LD_IMM64 instructions load valid
9687 * 'struct bpf_map *' into a register instead of user map_fd.
9688 * These pointers will be used later by verifier to validate map access.
9689 */
9690 return 0;
9691}
9692
9693/* drop refcnt of maps used by the rejected program */
58e2af8b 9694static void release_maps(struct bpf_verifier_env *env)
0246e64d 9695{
a2ea0746
DB
9696 __bpf_free_used_maps(env->prog->aux, env->used_maps,
9697 env->used_map_cnt);
0246e64d
AS
9698}
9699
9700/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 9701static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
9702{
9703 struct bpf_insn *insn = env->prog->insnsi;
9704 int insn_cnt = env->prog->len;
9705 int i;
9706
9707 for (i = 0; i < insn_cnt; i++, insn++)
9708 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
9709 insn->src_reg = 0;
9710}
9711
8041902d
AS
9712/* single env->prog->insni[off] instruction was replaced with the range
9713 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
9714 * [0, off) and [off, end) to new locations, so the patched range stays zero
9715 */
b325fbca
JW
9716static int adjust_insn_aux_data(struct bpf_verifier_env *env,
9717 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d
AS
9718{
9719 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
b325fbca
JW
9720 struct bpf_insn *insn = new_prog->insnsi;
9721 u32 prog_len;
c131187d 9722 int i;
8041902d 9723
b325fbca
JW
9724 /* aux info at OFF always needs adjustment, no matter fast path
9725 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
9726 * original insn at old prog.
9727 */
9728 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
9729
8041902d
AS
9730 if (cnt == 1)
9731 return 0;
b325fbca 9732 prog_len = new_prog->len;
fad953ce
KC
9733 new_data = vzalloc(array_size(prog_len,
9734 sizeof(struct bpf_insn_aux_data)));
8041902d
AS
9735 if (!new_data)
9736 return -ENOMEM;
9737 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
9738 memcpy(new_data + off + cnt - 1, old_data + off,
9739 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 9740 for (i = off; i < off + cnt - 1; i++) {
51c39bb1 9741 new_data[i].seen = env->pass_cnt;
b325fbca
JW
9742 new_data[i].zext_dst = insn_has_def32(env, insn + i);
9743 }
8041902d
AS
9744 env->insn_aux_data = new_data;
9745 vfree(old_data);
9746 return 0;
9747}
9748
cc8b0b92
AS
9749static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
9750{
9751 int i;
9752
9753 if (len == 1)
9754 return;
4cb3d99c
JW
9755 /* NOTE: fake 'exit' subprog should be updated as well. */
9756 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 9757 if (env->subprog_info[i].start <= off)
cc8b0b92 9758 continue;
9c8105bd 9759 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
9760 }
9761}
9762
a748c697
MF
9763static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
9764{
9765 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
9766 int i, sz = prog->aux->size_poke_tab;
9767 struct bpf_jit_poke_descriptor *desc;
9768
9769 for (i = 0; i < sz; i++) {
9770 desc = &tab[i];
9771 desc->insn_idx += len - 1;
9772 }
9773}
9774
8041902d
AS
9775static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
9776 const struct bpf_insn *patch, u32 len)
9777{
9778 struct bpf_prog *new_prog;
9779
9780 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
9781 if (IS_ERR(new_prog)) {
9782 if (PTR_ERR(new_prog) == -ERANGE)
9783 verbose(env,
9784 "insn %d cannot be patched due to 16-bit range\n",
9785 env->insn_aux_data[off].orig_idx);
8041902d 9786 return NULL;
4f73379e 9787 }
b325fbca 9788 if (adjust_insn_aux_data(env, new_prog, off, len))
8041902d 9789 return NULL;
cc8b0b92 9790 adjust_subprog_starts(env, off, len);
a748c697 9791 adjust_poke_descs(new_prog, len);
8041902d
AS
9792 return new_prog;
9793}
9794
52875a04
JK
9795static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
9796 u32 off, u32 cnt)
9797{
9798 int i, j;
9799
9800 /* find first prog starting at or after off (first to remove) */
9801 for (i = 0; i < env->subprog_cnt; i++)
9802 if (env->subprog_info[i].start >= off)
9803 break;
9804 /* find first prog starting at or after off + cnt (first to stay) */
9805 for (j = i; j < env->subprog_cnt; j++)
9806 if (env->subprog_info[j].start >= off + cnt)
9807 break;
9808 /* if j doesn't start exactly at off + cnt, we are just removing
9809 * the front of previous prog
9810 */
9811 if (env->subprog_info[j].start != off + cnt)
9812 j--;
9813
9814 if (j > i) {
9815 struct bpf_prog_aux *aux = env->prog->aux;
9816 int move;
9817
9818 /* move fake 'exit' subprog as well */
9819 move = env->subprog_cnt + 1 - j;
9820
9821 memmove(env->subprog_info + i,
9822 env->subprog_info + j,
9823 sizeof(*env->subprog_info) * move);
9824 env->subprog_cnt -= j - i;
9825
9826 /* remove func_info */
9827 if (aux->func_info) {
9828 move = aux->func_info_cnt - j;
9829
9830 memmove(aux->func_info + i,
9831 aux->func_info + j,
9832 sizeof(*aux->func_info) * move);
9833 aux->func_info_cnt -= j - i;
9834 /* func_info->insn_off is set after all code rewrites,
9835 * in adjust_btf_func() - no need to adjust
9836 */
9837 }
9838 } else {
9839 /* convert i from "first prog to remove" to "first to adjust" */
9840 if (env->subprog_info[i].start == off)
9841 i++;
9842 }
9843
9844 /* update fake 'exit' subprog as well */
9845 for (; i <= env->subprog_cnt; i++)
9846 env->subprog_info[i].start -= cnt;
9847
9848 return 0;
9849}
9850
9851static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
9852 u32 cnt)
9853{
9854 struct bpf_prog *prog = env->prog;
9855 u32 i, l_off, l_cnt, nr_linfo;
9856 struct bpf_line_info *linfo;
9857
9858 nr_linfo = prog->aux->nr_linfo;
9859 if (!nr_linfo)
9860 return 0;
9861
9862 linfo = prog->aux->linfo;
9863
9864 /* find first line info to remove, count lines to be removed */
9865 for (i = 0; i < nr_linfo; i++)
9866 if (linfo[i].insn_off >= off)
9867 break;
9868
9869 l_off = i;
9870 l_cnt = 0;
9871 for (; i < nr_linfo; i++)
9872 if (linfo[i].insn_off < off + cnt)
9873 l_cnt++;
9874 else
9875 break;
9876
9877 /* First live insn doesn't match first live linfo, it needs to "inherit"
9878 * last removed linfo. prog is already modified, so prog->len == off
9879 * means no live instructions after (tail of the program was removed).
9880 */
9881 if (prog->len != off && l_cnt &&
9882 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
9883 l_cnt--;
9884 linfo[--i].insn_off = off + cnt;
9885 }
9886
9887 /* remove the line info which refer to the removed instructions */
9888 if (l_cnt) {
9889 memmove(linfo + l_off, linfo + i,
9890 sizeof(*linfo) * (nr_linfo - i));
9891
9892 prog->aux->nr_linfo -= l_cnt;
9893 nr_linfo = prog->aux->nr_linfo;
9894 }
9895
9896 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
9897 for (i = l_off; i < nr_linfo; i++)
9898 linfo[i].insn_off -= cnt;
9899
9900 /* fix up all subprogs (incl. 'exit') which start >= off */
9901 for (i = 0; i <= env->subprog_cnt; i++)
9902 if (env->subprog_info[i].linfo_idx > l_off) {
9903 /* program may have started in the removed region but
9904 * may not be fully removed
9905 */
9906 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
9907 env->subprog_info[i].linfo_idx -= l_cnt;
9908 else
9909 env->subprog_info[i].linfo_idx = l_off;
9910 }
9911
9912 return 0;
9913}
9914
9915static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
9916{
9917 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
9918 unsigned int orig_prog_len = env->prog->len;
9919 int err;
9920
08ca90af
JK
9921 if (bpf_prog_is_dev_bound(env->prog->aux))
9922 bpf_prog_offload_remove_insns(env, off, cnt);
9923
52875a04
JK
9924 err = bpf_remove_insns(env->prog, off, cnt);
9925 if (err)
9926 return err;
9927
9928 err = adjust_subprog_starts_after_remove(env, off, cnt);
9929 if (err)
9930 return err;
9931
9932 err = bpf_adj_linfo_after_remove(env, off, cnt);
9933 if (err)
9934 return err;
9935
9936 memmove(aux_data + off, aux_data + off + cnt,
9937 sizeof(*aux_data) * (orig_prog_len - off - cnt));
9938
9939 return 0;
9940}
9941
2a5418a1
DB
9942/* The verifier does more data flow analysis than llvm and will not
9943 * explore branches that are dead at run time. Malicious programs can
9944 * have dead code too. Therefore replace all dead at-run-time code
9945 * with 'ja -1'.
9946 *
9947 * Just nops are not optimal, e.g. if they would sit at the end of the
9948 * program and through another bug we would manage to jump there, then
9949 * we'd execute beyond program memory otherwise. Returning exception
9950 * code also wouldn't work since we can have subprogs where the dead
9951 * code could be located.
c131187d
AS
9952 */
9953static void sanitize_dead_code(struct bpf_verifier_env *env)
9954{
9955 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 9956 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
9957 struct bpf_insn *insn = env->prog->insnsi;
9958 const int insn_cnt = env->prog->len;
9959 int i;
9960
9961 for (i = 0; i < insn_cnt; i++) {
9962 if (aux_data[i].seen)
9963 continue;
2a5418a1 9964 memcpy(insn + i, &trap, sizeof(trap));
c131187d
AS
9965 }
9966}
9967
e2ae4ca2
JK
9968static bool insn_is_cond_jump(u8 code)
9969{
9970 u8 op;
9971
092ed096
JW
9972 if (BPF_CLASS(code) == BPF_JMP32)
9973 return true;
9974
e2ae4ca2
JK
9975 if (BPF_CLASS(code) != BPF_JMP)
9976 return false;
9977
9978 op = BPF_OP(code);
9979 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
9980}
9981
9982static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
9983{
9984 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
9985 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
9986 struct bpf_insn *insn = env->prog->insnsi;
9987 const int insn_cnt = env->prog->len;
9988 int i;
9989
9990 for (i = 0; i < insn_cnt; i++, insn++) {
9991 if (!insn_is_cond_jump(insn->code))
9992 continue;
9993
9994 if (!aux_data[i + 1].seen)
9995 ja.off = insn->off;
9996 else if (!aux_data[i + 1 + insn->off].seen)
9997 ja.off = 0;
9998 else
9999 continue;
10000
08ca90af
JK
10001 if (bpf_prog_is_dev_bound(env->prog->aux))
10002 bpf_prog_offload_replace_insn(env, i, &ja);
10003
e2ae4ca2
JK
10004 memcpy(insn, &ja, sizeof(ja));
10005 }
10006}
10007
52875a04
JK
10008static int opt_remove_dead_code(struct bpf_verifier_env *env)
10009{
10010 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
10011 int insn_cnt = env->prog->len;
10012 int i, err;
10013
10014 for (i = 0; i < insn_cnt; i++) {
10015 int j;
10016
10017 j = 0;
10018 while (i + j < insn_cnt && !aux_data[i + j].seen)
10019 j++;
10020 if (!j)
10021 continue;
10022
10023 err = verifier_remove_insns(env, i, j);
10024 if (err)
10025 return err;
10026 insn_cnt = env->prog->len;
10027 }
10028
10029 return 0;
10030}
10031
a1b14abc
JK
10032static int opt_remove_nops(struct bpf_verifier_env *env)
10033{
10034 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
10035 struct bpf_insn *insn = env->prog->insnsi;
10036 int insn_cnt = env->prog->len;
10037 int i, err;
10038
10039 for (i = 0; i < insn_cnt; i++) {
10040 if (memcmp(&insn[i], &ja, sizeof(ja)))
10041 continue;
10042
10043 err = verifier_remove_insns(env, i, 1);
10044 if (err)
10045 return err;
10046 insn_cnt--;
10047 i--;
10048 }
10049
10050 return 0;
10051}
10052
d6c2308c
JW
10053static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
10054 const union bpf_attr *attr)
a4b1d3c1 10055{
d6c2308c 10056 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 10057 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 10058 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 10059 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 10060 struct bpf_prog *new_prog;
d6c2308c 10061 bool rnd_hi32;
a4b1d3c1 10062
d6c2308c 10063 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 10064 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
10065 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
10066 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
10067 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
10068 for (i = 0; i < len; i++) {
10069 int adj_idx = i + delta;
10070 struct bpf_insn insn;
10071
d6c2308c
JW
10072 insn = insns[adj_idx];
10073 if (!aux[adj_idx].zext_dst) {
10074 u8 code, class;
10075 u32 imm_rnd;
10076
10077 if (!rnd_hi32)
10078 continue;
10079
10080 code = insn.code;
10081 class = BPF_CLASS(code);
10082 if (insn_no_def(&insn))
10083 continue;
10084
10085 /* NOTE: arg "reg" (the fourth one) is only used for
10086 * BPF_STX which has been ruled out in above
10087 * check, it is safe to pass NULL here.
10088 */
10089 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
10090 if (class == BPF_LD &&
10091 BPF_MODE(code) == BPF_IMM)
10092 i++;
10093 continue;
10094 }
10095
10096 /* ctx load could be transformed into wider load. */
10097 if (class == BPF_LDX &&
10098 aux[adj_idx].ptr_type == PTR_TO_CTX)
10099 continue;
10100
10101 imm_rnd = get_random_int();
10102 rnd_hi32_patch[0] = insn;
10103 rnd_hi32_patch[1].imm = imm_rnd;
10104 rnd_hi32_patch[3].dst_reg = insn.dst_reg;
10105 patch = rnd_hi32_patch;
10106 patch_len = 4;
10107 goto apply_patch_buffer;
10108 }
10109
10110 if (!bpf_jit_needs_zext())
a4b1d3c1
JW
10111 continue;
10112
a4b1d3c1
JW
10113 zext_patch[0] = insn;
10114 zext_patch[1].dst_reg = insn.dst_reg;
10115 zext_patch[1].src_reg = insn.dst_reg;
d6c2308c
JW
10116 patch = zext_patch;
10117 patch_len = 2;
10118apply_patch_buffer:
10119 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
10120 if (!new_prog)
10121 return -ENOMEM;
10122 env->prog = new_prog;
10123 insns = new_prog->insnsi;
10124 aux = env->insn_aux_data;
d6c2308c 10125 delta += patch_len - 1;
a4b1d3c1
JW
10126 }
10127
10128 return 0;
10129}
10130
c64b7983
JS
10131/* convert load instructions that access fields of a context type into a
10132 * sequence of instructions that access fields of the underlying structure:
10133 * struct __sk_buff -> struct sk_buff
10134 * struct bpf_sock_ops -> struct sock
9bac3d6d 10135 */
58e2af8b 10136static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 10137{
00176a34 10138 const struct bpf_verifier_ops *ops = env->ops;
f96da094 10139 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 10140 const int insn_cnt = env->prog->len;
36bbef52 10141 struct bpf_insn insn_buf[16], *insn;
46f53a65 10142 u32 target_size, size_default, off;
9bac3d6d 10143 struct bpf_prog *new_prog;
d691f9e8 10144 enum bpf_access_type type;
f96da094 10145 bool is_narrower_load;
9bac3d6d 10146
b09928b9
DB
10147 if (ops->gen_prologue || env->seen_direct_write) {
10148 if (!ops->gen_prologue) {
10149 verbose(env, "bpf verifier is misconfigured\n");
10150 return -EINVAL;
10151 }
36bbef52
DB
10152 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
10153 env->prog);
10154 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 10155 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
10156 return -EINVAL;
10157 } else if (cnt) {
8041902d 10158 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
10159 if (!new_prog)
10160 return -ENOMEM;
8041902d 10161
36bbef52 10162 env->prog = new_prog;
3df126f3 10163 delta += cnt - 1;
36bbef52
DB
10164 }
10165 }
10166
c64b7983 10167 if (bpf_prog_is_dev_bound(env->prog->aux))
9bac3d6d
AS
10168 return 0;
10169
3df126f3 10170 insn = env->prog->insnsi + delta;
36bbef52 10171
9bac3d6d 10172 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983
JS
10173 bpf_convert_ctx_access_t convert_ctx_access;
10174
62c7989b
DB
10175 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
10176 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
10177 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
ea2e7ce5 10178 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
d691f9e8 10179 type = BPF_READ;
62c7989b
DB
10180 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
10181 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
10182 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
ea2e7ce5 10183 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
d691f9e8
AS
10184 type = BPF_WRITE;
10185 else
9bac3d6d
AS
10186 continue;
10187
af86ca4e
AS
10188 if (type == BPF_WRITE &&
10189 env->insn_aux_data[i + delta].sanitize_stack_off) {
10190 struct bpf_insn patch[] = {
10191 /* Sanitize suspicious stack slot with zero.
10192 * There are no memory dependencies for this store,
10193 * since it's only using frame pointer and immediate
10194 * constant of zero
10195 */
10196 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
10197 env->insn_aux_data[i + delta].sanitize_stack_off,
10198 0),
10199 /* the original STX instruction will immediately
10200 * overwrite the same stack slot with appropriate value
10201 */
10202 *insn,
10203 };
10204
10205 cnt = ARRAY_SIZE(patch);
10206 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
10207 if (!new_prog)
10208 return -ENOMEM;
10209
10210 delta += cnt - 1;
10211 env->prog = new_prog;
10212 insn = new_prog->insnsi + i + delta;
10213 continue;
10214 }
10215
c64b7983
JS
10216 switch (env->insn_aux_data[i + delta].ptr_type) {
10217 case PTR_TO_CTX:
10218 if (!ops->convert_ctx_access)
10219 continue;
10220 convert_ctx_access = ops->convert_ctx_access;
10221 break;
10222 case PTR_TO_SOCKET:
46f8bc92 10223 case PTR_TO_SOCK_COMMON:
c64b7983
JS
10224 convert_ctx_access = bpf_sock_convert_ctx_access;
10225 break;
655a51e5
MKL
10226 case PTR_TO_TCP_SOCK:
10227 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
10228 break;
fada7fdc
JL
10229 case PTR_TO_XDP_SOCK:
10230 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
10231 break;
2a02759e 10232 case PTR_TO_BTF_ID:
27ae7997
MKL
10233 if (type == BPF_READ) {
10234 insn->code = BPF_LDX | BPF_PROBE_MEM |
10235 BPF_SIZE((insn)->code);
10236 env->prog->aux->num_exentries++;
7e40781c 10237 } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
2a02759e
AS
10238 verbose(env, "Writes through BTF pointers are not allowed\n");
10239 return -EINVAL;
10240 }
2a02759e 10241 continue;
c64b7983 10242 default:
9bac3d6d 10243 continue;
c64b7983 10244 }
9bac3d6d 10245
31fd8581 10246 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 10247 size = BPF_LDST_BYTES(insn);
31fd8581
YS
10248
10249 /* If the read access is a narrower load of the field,
10250 * convert to a 4/8-byte load, to minimum program type specific
10251 * convert_ctx_access changes. If conversion is successful,
10252 * we will apply proper mask to the result.
10253 */
f96da094 10254 is_narrower_load = size < ctx_field_size;
46f53a65
AI
10255 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
10256 off = insn->off;
31fd8581 10257 if (is_narrower_load) {
f96da094
DB
10258 u8 size_code;
10259
10260 if (type == BPF_WRITE) {
61bd5218 10261 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
10262 return -EINVAL;
10263 }
31fd8581 10264
f96da094 10265 size_code = BPF_H;
31fd8581
YS
10266 if (ctx_field_size == 4)
10267 size_code = BPF_W;
10268 else if (ctx_field_size == 8)
10269 size_code = BPF_DW;
f96da094 10270
bc23105c 10271 insn->off = off & ~(size_default - 1);
31fd8581
YS
10272 insn->code = BPF_LDX | BPF_MEM | size_code;
10273 }
f96da094
DB
10274
10275 target_size = 0;
c64b7983
JS
10276 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
10277 &target_size);
f96da094
DB
10278 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
10279 (ctx_field_size && !target_size)) {
61bd5218 10280 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
10281 return -EINVAL;
10282 }
f96da094
DB
10283
10284 if (is_narrower_load && size < target_size) {
d895a0f1
IL
10285 u8 shift = bpf_ctx_narrow_access_offset(
10286 off, size, size_default) * 8;
46f53a65
AI
10287 if (ctx_field_size <= 4) {
10288 if (shift)
10289 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
10290 insn->dst_reg,
10291 shift);
31fd8581 10292 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 10293 (1 << size * 8) - 1);
46f53a65
AI
10294 } else {
10295 if (shift)
10296 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
10297 insn->dst_reg,
10298 shift);
31fd8581 10299 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 10300 (1ULL << size * 8) - 1);
46f53a65 10301 }
31fd8581 10302 }
9bac3d6d 10303
8041902d 10304 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
10305 if (!new_prog)
10306 return -ENOMEM;
10307
3df126f3 10308 delta += cnt - 1;
9bac3d6d
AS
10309
10310 /* keep walking new program and skip insns we just inserted */
10311 env->prog = new_prog;
3df126f3 10312 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
10313 }
10314
10315 return 0;
10316}
10317
1c2a088a
AS
10318static int jit_subprogs(struct bpf_verifier_env *env)
10319{
10320 struct bpf_prog *prog = env->prog, **func, *tmp;
10321 int i, j, subprog_start, subprog_end = 0, len, subprog;
a748c697 10322 struct bpf_map *map_ptr;
7105e828 10323 struct bpf_insn *insn;
1c2a088a 10324 void *old_bpf_func;
c4c0bdc0 10325 int err, num_exentries;
1c2a088a 10326
f910cefa 10327 if (env->subprog_cnt <= 1)
1c2a088a
AS
10328 return 0;
10329
7105e828 10330 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
1c2a088a
AS
10331 if (insn->code != (BPF_JMP | BPF_CALL) ||
10332 insn->src_reg != BPF_PSEUDO_CALL)
10333 continue;
c7a89784
DB
10334 /* Upon error here we cannot fall back to interpreter but
10335 * need a hard reject of the program. Thus -EFAULT is
10336 * propagated in any case.
10337 */
1c2a088a
AS
10338 subprog = find_subprog(env, i + insn->imm + 1);
10339 if (subprog < 0) {
10340 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
10341 i + insn->imm + 1);
10342 return -EFAULT;
10343 }
10344 /* temporarily remember subprog id inside insn instead of
10345 * aux_data, since next loop will split up all insns into funcs
10346 */
f910cefa 10347 insn->off = subprog;
1c2a088a
AS
10348 /* remember original imm in case JIT fails and fallback
10349 * to interpreter will be needed
10350 */
10351 env->insn_aux_data[i].call_imm = insn->imm;
10352 /* point imm to __bpf_call_base+1 from JITs point of view */
10353 insn->imm = 1;
10354 }
10355
c454a46b
MKL
10356 err = bpf_prog_alloc_jited_linfo(prog);
10357 if (err)
10358 goto out_undo_insn;
10359
10360 err = -ENOMEM;
6396bb22 10361 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 10362 if (!func)
c7a89784 10363 goto out_undo_insn;
1c2a088a 10364
f910cefa 10365 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 10366 subprog_start = subprog_end;
4cb3d99c 10367 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
10368
10369 len = subprog_end - subprog_start;
492ecee8
AS
10370 /* BPF_PROG_RUN doesn't call subprogs directly,
10371 * hence main prog stats include the runtime of subprogs.
10372 * subprogs don't have IDs and not reachable via prog_get_next_id
10373 * func[i]->aux->stats will never be accessed and stays NULL
10374 */
10375 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
10376 if (!func[i])
10377 goto out_free;
10378 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
10379 len * sizeof(struct bpf_insn));
4f74d809 10380 func[i]->type = prog->type;
1c2a088a 10381 func[i]->len = len;
4f74d809
DB
10382 if (bpf_prog_calc_tag(func[i]))
10383 goto out_free;
1c2a088a 10384 func[i]->is_func = 1;
ba64e7d8
YS
10385 func[i]->aux->func_idx = i;
10386 /* the btf and func_info will be freed only at prog->aux */
10387 func[i]->aux->btf = prog->aux->btf;
10388 func[i]->aux->func_info = prog->aux->func_info;
10389
a748c697
MF
10390 for (j = 0; j < prog->aux->size_poke_tab; j++) {
10391 u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
10392 int ret;
10393
10394 if (!(insn_idx >= subprog_start &&
10395 insn_idx <= subprog_end))
10396 continue;
10397
10398 ret = bpf_jit_add_poke_descriptor(func[i],
10399 &prog->aux->poke_tab[j]);
10400 if (ret < 0) {
10401 verbose(env, "adding tail call poke descriptor failed\n");
10402 goto out_free;
10403 }
10404
10405 func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
10406
10407 map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
10408 ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
10409 if (ret < 0) {
10410 verbose(env, "tracking tail call prog failed\n");
10411 goto out_free;
10412 }
10413 }
10414
1c2a088a
AS
10415 /* Use bpf_prog_F_tag to indicate functions in stack traces.
10416 * Long term would need debug info to populate names
10417 */
10418 func[i]->aux->name[0] = 'F';
9c8105bd 10419 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 10420 func[i]->jit_requested = 1;
c454a46b
MKL
10421 func[i]->aux->linfo = prog->aux->linfo;
10422 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
10423 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
10424 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
c4c0bdc0
YS
10425 num_exentries = 0;
10426 insn = func[i]->insnsi;
10427 for (j = 0; j < func[i]->len; j++, insn++) {
10428 if (BPF_CLASS(insn->code) == BPF_LDX &&
10429 BPF_MODE(insn->code) == BPF_PROBE_MEM)
10430 num_exentries++;
10431 }
10432 func[i]->aux->num_exentries = num_exentries;
ebf7d1f5 10433 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
1c2a088a
AS
10434 func[i] = bpf_int_jit_compile(func[i]);
10435 if (!func[i]->jited) {
10436 err = -ENOTSUPP;
10437 goto out_free;
10438 }
10439 cond_resched();
10440 }
a748c697
MF
10441
10442 /* Untrack main program's aux structs so that during map_poke_run()
10443 * we will not stumble upon the unfilled poke descriptors; each
10444 * of the main program's poke descs got distributed across subprogs
10445 * and got tracked onto map, so we are sure that none of them will
10446 * be missed after the operation below
10447 */
10448 for (i = 0; i < prog->aux->size_poke_tab; i++) {
10449 map_ptr = prog->aux->poke_tab[i].tail_call.map;
10450
10451 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
10452 }
10453
1c2a088a
AS
10454 /* at this point all bpf functions were successfully JITed
10455 * now populate all bpf_calls with correct addresses and
10456 * run last pass of JIT
10457 */
f910cefa 10458 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
10459 insn = func[i]->insnsi;
10460 for (j = 0; j < func[i]->len; j++, insn++) {
10461 if (insn->code != (BPF_JMP | BPF_CALL) ||
10462 insn->src_reg != BPF_PSEUDO_CALL)
10463 continue;
10464 subprog = insn->off;
0d306c31
PB
10465 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
10466 __bpf_call_base;
1c2a088a 10467 }
2162fed4
SD
10468
10469 /* we use the aux data to keep a list of the start addresses
10470 * of the JITed images for each function in the program
10471 *
10472 * for some architectures, such as powerpc64, the imm field
10473 * might not be large enough to hold the offset of the start
10474 * address of the callee's JITed image from __bpf_call_base
10475 *
10476 * in such cases, we can lookup the start address of a callee
10477 * by using its subprog id, available from the off field of
10478 * the call instruction, as an index for this list
10479 */
10480 func[i]->aux->func = func;
10481 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 10482 }
f910cefa 10483 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
10484 old_bpf_func = func[i]->bpf_func;
10485 tmp = bpf_int_jit_compile(func[i]);
10486 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
10487 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 10488 err = -ENOTSUPP;
1c2a088a
AS
10489 goto out_free;
10490 }
10491 cond_resched();
10492 }
10493
10494 /* finally lock prog and jit images for all functions and
10495 * populate kallsysm
10496 */
f910cefa 10497 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
10498 bpf_prog_lock_ro(func[i]);
10499 bpf_prog_kallsyms_add(func[i]);
10500 }
7105e828
DB
10501
10502 /* Last step: make now unused interpreter insns from main
10503 * prog consistent for later dump requests, so they can
10504 * later look the same as if they were interpreted only.
10505 */
10506 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
7105e828
DB
10507 if (insn->code != (BPF_JMP | BPF_CALL) ||
10508 insn->src_reg != BPF_PSEUDO_CALL)
10509 continue;
10510 insn->off = env->insn_aux_data[i].call_imm;
10511 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 10512 insn->imm = subprog;
7105e828
DB
10513 }
10514
1c2a088a
AS
10515 prog->jited = 1;
10516 prog->bpf_func = func[0]->bpf_func;
10517 prog->aux->func = func;
f910cefa 10518 prog->aux->func_cnt = env->subprog_cnt;
c454a46b 10519 bpf_prog_free_unused_jited_linfo(prog);
1c2a088a
AS
10520 return 0;
10521out_free:
a748c697
MF
10522 for (i = 0; i < env->subprog_cnt; i++) {
10523 if (!func[i])
10524 continue;
10525
10526 for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
10527 map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
10528 map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
10529 }
10530 bpf_jit_free(func[i]);
10531 }
1c2a088a 10532 kfree(func);
c7a89784 10533out_undo_insn:
1c2a088a
AS
10534 /* cleanup main prog to be interpreted */
10535 prog->jit_requested = 0;
10536 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
10537 if (insn->code != (BPF_JMP | BPF_CALL) ||
10538 insn->src_reg != BPF_PSEUDO_CALL)
10539 continue;
10540 insn->off = 0;
10541 insn->imm = env->insn_aux_data[i].call_imm;
10542 }
c454a46b 10543 bpf_prog_free_jited_linfo(prog);
1c2a088a
AS
10544 return err;
10545}
10546
1ea47e01
AS
10547static int fixup_call_args(struct bpf_verifier_env *env)
10548{
19d28fbd 10549#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
10550 struct bpf_prog *prog = env->prog;
10551 struct bpf_insn *insn = prog->insnsi;
10552 int i, depth;
19d28fbd 10553#endif
e4052d06 10554 int err = 0;
1ea47e01 10555
e4052d06
QM
10556 if (env->prog->jit_requested &&
10557 !bpf_prog_is_dev_bound(env->prog->aux)) {
19d28fbd
DM
10558 err = jit_subprogs(env);
10559 if (err == 0)
1c2a088a 10560 return 0;
c7a89784
DB
10561 if (err == -EFAULT)
10562 return err;
19d28fbd
DM
10563 }
10564#ifndef CONFIG_BPF_JIT_ALWAYS_ON
e411901c
MF
10565 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
10566 /* When JIT fails the progs with bpf2bpf calls and tail_calls
10567 * have to be rejected, since interpreter doesn't support them yet.
10568 */
10569 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
10570 return -EINVAL;
10571 }
1ea47e01
AS
10572 for (i = 0; i < prog->len; i++, insn++) {
10573 if (insn->code != (BPF_JMP | BPF_CALL) ||
10574 insn->src_reg != BPF_PSEUDO_CALL)
10575 continue;
10576 depth = get_callee_stack_depth(env, insn, i);
10577 if (depth < 0)
10578 return depth;
10579 bpf_patch_call_args(insn, depth);
10580 }
19d28fbd
DM
10581 err = 0;
10582#endif
10583 return err;
1ea47e01
AS
10584}
10585
79741b3b 10586/* fixup insn->imm field of bpf_call instructions
81ed18ab 10587 * and inline eligible helpers as explicit sequence of BPF instructions
e245c5c6
AS
10588 *
10589 * this function is called after eBPF program passed verification
10590 */
79741b3b 10591static int fixup_bpf_calls(struct bpf_verifier_env *env)
e245c5c6 10592{
79741b3b 10593 struct bpf_prog *prog = env->prog;
d2e4c1e6 10594 bool expect_blinding = bpf_jit_blinding_enabled(prog);
79741b3b 10595 struct bpf_insn *insn = prog->insnsi;
e245c5c6 10596 const struct bpf_func_proto *fn;
79741b3b 10597 const int insn_cnt = prog->len;
09772d92 10598 const struct bpf_map_ops *ops;
c93552c4 10599 struct bpf_insn_aux_data *aux;
81ed18ab
AS
10600 struct bpf_insn insn_buf[16];
10601 struct bpf_prog *new_prog;
10602 struct bpf_map *map_ptr;
d2e4c1e6 10603 int i, ret, cnt, delta = 0;
e245c5c6 10604
79741b3b 10605 for (i = 0; i < insn_cnt; i++, insn++) {
f6b1b3bf
DB
10606 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
10607 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
10608 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 10609 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf
DB
10610 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
10611 struct bpf_insn mask_and_div[] = {
10612 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
10613 /* Rx div 0 -> 0 */
10614 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
10615 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
10616 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10617 *insn,
10618 };
10619 struct bpf_insn mask_and_mod[] = {
10620 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
10621 /* Rx mod 0 -> Rx */
10622 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
10623 *insn,
10624 };
10625 struct bpf_insn *patchlet;
10626
10627 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
10628 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
10629 patchlet = mask_and_div + (is64 ? 1 : 0);
10630 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
10631 } else {
10632 patchlet = mask_and_mod + (is64 ? 1 : 0);
10633 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
10634 }
10635
10636 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
10637 if (!new_prog)
10638 return -ENOMEM;
10639
10640 delta += cnt - 1;
10641 env->prog = prog = new_prog;
10642 insn = new_prog->insnsi + i + delta;
10643 continue;
10644 }
10645
e0cea7ce
DB
10646 if (BPF_CLASS(insn->code) == BPF_LD &&
10647 (BPF_MODE(insn->code) == BPF_ABS ||
10648 BPF_MODE(insn->code) == BPF_IND)) {
10649 cnt = env->ops->gen_ld_abs(insn, insn_buf);
10650 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
10651 verbose(env, "bpf verifier is misconfigured\n");
10652 return -EINVAL;
10653 }
10654
10655 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
10656 if (!new_prog)
10657 return -ENOMEM;
10658
10659 delta += cnt - 1;
10660 env->prog = prog = new_prog;
10661 insn = new_prog->insnsi + i + delta;
10662 continue;
10663 }
10664
979d63d5
DB
10665 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
10666 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
10667 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
10668 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
10669 struct bpf_insn insn_buf[16];
10670 struct bpf_insn *patch = &insn_buf[0];
10671 bool issrc, isneg;
10672 u32 off_reg;
10673
10674 aux = &env->insn_aux_data[i + delta];
3612af78
DB
10675 if (!aux->alu_state ||
10676 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
10677 continue;
10678
10679 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
10680 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
10681 BPF_ALU_SANITIZE_SRC;
10682
10683 off_reg = issrc ? insn->src_reg : insn->dst_reg;
10684 if (isneg)
10685 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
10686 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
10687 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
10688 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
10689 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
10690 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
10691 if (issrc) {
10692 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
10693 off_reg);
10694 insn->src_reg = BPF_REG_AX;
10695 } else {
10696 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
10697 BPF_REG_AX);
10698 }
10699 if (isneg)
10700 insn->code = insn->code == code_add ?
10701 code_sub : code_add;
10702 *patch++ = *insn;
10703 if (issrc && isneg)
10704 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
10705 cnt = patch - insn_buf;
10706
10707 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
10708 if (!new_prog)
10709 return -ENOMEM;
10710
10711 delta += cnt - 1;
10712 env->prog = prog = new_prog;
10713 insn = new_prog->insnsi + i + delta;
10714 continue;
10715 }
10716
79741b3b
AS
10717 if (insn->code != (BPF_JMP | BPF_CALL))
10718 continue;
cc8b0b92
AS
10719 if (insn->src_reg == BPF_PSEUDO_CALL)
10720 continue;
e245c5c6 10721
79741b3b
AS
10722 if (insn->imm == BPF_FUNC_get_route_realm)
10723 prog->dst_needed = 1;
10724 if (insn->imm == BPF_FUNC_get_prandom_u32)
10725 bpf_user_rnd_init_once();
9802d865
JB
10726 if (insn->imm == BPF_FUNC_override_return)
10727 prog->kprobe_override = 1;
79741b3b 10728 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
10729 /* If we tail call into other programs, we
10730 * cannot make any assumptions since they can
10731 * be replaced dynamically during runtime in
10732 * the program array.
10733 */
10734 prog->cb_access = 1;
e411901c
MF
10735 if (!allow_tail_call_in_subprogs(env))
10736 prog->aux->stack_depth = MAX_BPF_STACK;
10737 prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 10738
79741b3b
AS
10739 /* mark bpf_tail_call as different opcode to avoid
10740 * conditional branch in the interpeter for every normal
10741 * call and to prevent accidental JITing by JIT compiler
10742 * that doesn't support bpf_tail_call yet
e245c5c6 10743 */
79741b3b 10744 insn->imm = 0;
71189fa9 10745 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 10746
c93552c4 10747 aux = &env->insn_aux_data[i + delta];
2c78ee89 10748 if (env->bpf_capable && !expect_blinding &&
cc52d914 10749 prog->jit_requested &&
d2e4c1e6
DB
10750 !bpf_map_key_poisoned(aux) &&
10751 !bpf_map_ptr_poisoned(aux) &&
10752 !bpf_map_ptr_unpriv(aux)) {
10753 struct bpf_jit_poke_descriptor desc = {
10754 .reason = BPF_POKE_REASON_TAIL_CALL,
10755 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
10756 .tail_call.key = bpf_map_key_immediate(aux),
a748c697 10757 .insn_idx = i + delta,
d2e4c1e6
DB
10758 };
10759
10760 ret = bpf_jit_add_poke_descriptor(prog, &desc);
10761 if (ret < 0) {
10762 verbose(env, "adding tail call poke descriptor failed\n");
10763 return ret;
10764 }
10765
10766 insn->imm = ret + 1;
10767 continue;
10768 }
10769
c93552c4
DB
10770 if (!bpf_map_ptr_unpriv(aux))
10771 continue;
10772
b2157399
AS
10773 /* instead of changing every JIT dealing with tail_call
10774 * emit two extra insns:
10775 * if (index >= max_entries) goto out;
10776 * index &= array->index_mask;
10777 * to avoid out-of-bounds cpu speculation
10778 */
c93552c4 10779 if (bpf_map_ptr_poisoned(aux)) {
40950343 10780 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
10781 return -EINVAL;
10782 }
c93552c4 10783
d2e4c1e6 10784 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
10785 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
10786 map_ptr->max_entries, 2);
10787 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
10788 container_of(map_ptr,
10789 struct bpf_array,
10790 map)->index_mask);
10791 insn_buf[2] = *insn;
10792 cnt = 3;
10793 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
10794 if (!new_prog)
10795 return -ENOMEM;
10796
10797 delta += cnt - 1;
10798 env->prog = prog = new_prog;
10799 insn = new_prog->insnsi + i + delta;
79741b3b
AS
10800 continue;
10801 }
e245c5c6 10802
89c63074 10803 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
10804 * and other inlining handlers are currently limited to 64 bit
10805 * only.
89c63074 10806 */
60b58afc 10807 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
10808 (insn->imm == BPF_FUNC_map_lookup_elem ||
10809 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
10810 insn->imm == BPF_FUNC_map_delete_elem ||
10811 insn->imm == BPF_FUNC_map_push_elem ||
10812 insn->imm == BPF_FUNC_map_pop_elem ||
10813 insn->imm == BPF_FUNC_map_peek_elem)) {
c93552c4
DB
10814 aux = &env->insn_aux_data[i + delta];
10815 if (bpf_map_ptr_poisoned(aux))
10816 goto patch_call_imm;
10817
d2e4c1e6 10818 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
10819 ops = map_ptr->ops;
10820 if (insn->imm == BPF_FUNC_map_lookup_elem &&
10821 ops->map_gen_lookup) {
10822 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
10823 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
10824 verbose(env, "bpf verifier is misconfigured\n");
10825 return -EINVAL;
10826 }
81ed18ab 10827
09772d92
DB
10828 new_prog = bpf_patch_insn_data(env, i + delta,
10829 insn_buf, cnt);
10830 if (!new_prog)
10831 return -ENOMEM;
81ed18ab 10832
09772d92
DB
10833 delta += cnt - 1;
10834 env->prog = prog = new_prog;
10835 insn = new_prog->insnsi + i + delta;
10836 continue;
10837 }
81ed18ab 10838
09772d92
DB
10839 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
10840 (void *(*)(struct bpf_map *map, void *key))NULL));
10841 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
10842 (int (*)(struct bpf_map *map, void *key))NULL));
10843 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
10844 (int (*)(struct bpf_map *map, void *key, void *value,
10845 u64 flags))NULL));
84430d42
DB
10846 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
10847 (int (*)(struct bpf_map *map, void *value,
10848 u64 flags))NULL));
10849 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
10850 (int (*)(struct bpf_map *map, void *value))NULL));
10851 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
10852 (int (*)(struct bpf_map *map, void *value))NULL));
10853
09772d92
DB
10854 switch (insn->imm) {
10855 case BPF_FUNC_map_lookup_elem:
10856 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
10857 __bpf_call_base;
10858 continue;
10859 case BPF_FUNC_map_update_elem:
10860 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
10861 __bpf_call_base;
10862 continue;
10863 case BPF_FUNC_map_delete_elem:
10864 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
10865 __bpf_call_base;
10866 continue;
84430d42
DB
10867 case BPF_FUNC_map_push_elem:
10868 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
10869 __bpf_call_base;
10870 continue;
10871 case BPF_FUNC_map_pop_elem:
10872 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
10873 __bpf_call_base;
10874 continue;
10875 case BPF_FUNC_map_peek_elem:
10876 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
10877 __bpf_call_base;
10878 continue;
09772d92 10879 }
81ed18ab 10880
09772d92 10881 goto patch_call_imm;
81ed18ab
AS
10882 }
10883
5576b991
MKL
10884 if (prog->jit_requested && BITS_PER_LONG == 64 &&
10885 insn->imm == BPF_FUNC_jiffies64) {
10886 struct bpf_insn ld_jiffies_addr[2] = {
10887 BPF_LD_IMM64(BPF_REG_0,
10888 (unsigned long)&jiffies),
10889 };
10890
10891 insn_buf[0] = ld_jiffies_addr[0];
10892 insn_buf[1] = ld_jiffies_addr[1];
10893 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
10894 BPF_REG_0, 0);
10895 cnt = 3;
10896
10897 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
10898 cnt);
10899 if (!new_prog)
10900 return -ENOMEM;
10901
10902 delta += cnt - 1;
10903 env->prog = prog = new_prog;
10904 insn = new_prog->insnsi + i + delta;
10905 continue;
10906 }
10907
81ed18ab 10908patch_call_imm:
5e43f899 10909 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
10910 /* all functions that have prototype and verifier allowed
10911 * programs to call them, must be real in-kernel functions
10912 */
10913 if (!fn->func) {
61bd5218
JK
10914 verbose(env,
10915 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
10916 func_id_name(insn->imm), insn->imm);
10917 return -EFAULT;
e245c5c6 10918 }
79741b3b 10919 insn->imm = fn->func - __bpf_call_base;
e245c5c6 10920 }
e245c5c6 10921
d2e4c1e6
DB
10922 /* Since poke tab is now finalized, publish aux to tracker. */
10923 for (i = 0; i < prog->aux->size_poke_tab; i++) {
10924 map_ptr = prog->aux->poke_tab[i].tail_call.map;
10925 if (!map_ptr->ops->map_poke_track ||
10926 !map_ptr->ops->map_poke_untrack ||
10927 !map_ptr->ops->map_poke_run) {
10928 verbose(env, "bpf verifier is misconfigured\n");
10929 return -EINVAL;
10930 }
10931
10932 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
10933 if (ret < 0) {
10934 verbose(env, "tracking tail call prog failed\n");
10935 return ret;
10936 }
10937 }
10938
79741b3b
AS
10939 return 0;
10940}
e245c5c6 10941
58e2af8b 10942static void free_states(struct bpf_verifier_env *env)
f1bca824 10943{
58e2af8b 10944 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
10945 int i;
10946
9f4686c4
AS
10947 sl = env->free_list;
10948 while (sl) {
10949 sln = sl->next;
10950 free_verifier_state(&sl->state, false);
10951 kfree(sl);
10952 sl = sln;
10953 }
51c39bb1 10954 env->free_list = NULL;
9f4686c4 10955
f1bca824
AS
10956 if (!env->explored_states)
10957 return;
10958
dc2a4ebc 10959 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
10960 sl = env->explored_states[i];
10961
a8f500af
AS
10962 while (sl) {
10963 sln = sl->next;
10964 free_verifier_state(&sl->state, false);
10965 kfree(sl);
10966 sl = sln;
10967 }
51c39bb1 10968 env->explored_states[i] = NULL;
f1bca824 10969 }
51c39bb1 10970}
f1bca824 10971
51c39bb1
AS
10972/* The verifier is using insn_aux_data[] to store temporary data during
10973 * verification and to store information for passes that run after the
10974 * verification like dead code sanitization. do_check_common() for subprogram N
10975 * may analyze many other subprograms. sanitize_insn_aux_data() clears all
10976 * temporary data after do_check_common() finds that subprogram N cannot be
10977 * verified independently. pass_cnt counts the number of times
10978 * do_check_common() was run and insn->aux->seen tells the pass number
10979 * insn_aux_data was touched. These variables are compared to clear temporary
10980 * data from failed pass. For testing and experiments do_check_common() can be
10981 * run multiple times even when prior attempt to verify is unsuccessful.
10982 */
10983static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
10984{
10985 struct bpf_insn *insn = env->prog->insnsi;
10986 struct bpf_insn_aux_data *aux;
10987 int i, class;
10988
10989 for (i = 0; i < env->prog->len; i++) {
10990 class = BPF_CLASS(insn[i].code);
10991 if (class != BPF_LDX && class != BPF_STX)
10992 continue;
10993 aux = &env->insn_aux_data[i];
10994 if (aux->seen != env->pass_cnt)
10995 continue;
10996 memset(aux, 0, offsetof(typeof(*aux), orig_idx));
10997 }
f1bca824
AS
10998}
10999
51c39bb1
AS
11000static int do_check_common(struct bpf_verifier_env *env, int subprog)
11001{
6f8a57cc 11002 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1
AS
11003 struct bpf_verifier_state *state;
11004 struct bpf_reg_state *regs;
11005 int ret, i;
11006
11007 env->prev_linfo = NULL;
11008 env->pass_cnt++;
11009
11010 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
11011 if (!state)
11012 return -ENOMEM;
11013 state->curframe = 0;
11014 state->speculative = false;
11015 state->branches = 1;
11016 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
11017 if (!state->frame[0]) {
11018 kfree(state);
11019 return -ENOMEM;
11020 }
11021 env->cur_state = state;
11022 init_func_state(env, state->frame[0],
11023 BPF_MAIN_FUNC /* callsite */,
11024 0 /* frameno */,
11025 subprog);
11026
11027 regs = state->frame[state->curframe]->regs;
be8704ff 11028 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
51c39bb1
AS
11029 ret = btf_prepare_func_args(env, subprog, regs);
11030 if (ret)
11031 goto out;
11032 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
11033 if (regs[i].type == PTR_TO_CTX)
11034 mark_reg_known_zero(env, regs, i);
11035 else if (regs[i].type == SCALAR_VALUE)
11036 mark_reg_unknown(env, regs, i);
11037 }
11038 } else {
11039 /* 1st arg to a function */
11040 regs[BPF_REG_1].type = PTR_TO_CTX;
11041 mark_reg_known_zero(env, regs, BPF_REG_1);
11042 ret = btf_check_func_arg_match(env, subprog, regs);
11043 if (ret == -EFAULT)
11044 /* unlikely verifier bug. abort.
11045 * ret == 0 and ret < 0 are sadly acceptable for
11046 * main() function due to backward compatibility.
11047 * Like socket filter program may be written as:
11048 * int bpf_prog(struct pt_regs *ctx)
11049 * and never dereference that ctx in the program.
11050 * 'struct pt_regs' is a type mismatch for socket
11051 * filter that should be using 'struct __sk_buff'.
11052 */
11053 goto out;
11054 }
11055
11056 ret = do_check(env);
11057out:
f59bbfc2
AS
11058 /* check for NULL is necessary, since cur_state can be freed inside
11059 * do_check() under memory pressure.
11060 */
11061 if (env->cur_state) {
11062 free_verifier_state(env->cur_state, true);
11063 env->cur_state = NULL;
11064 }
6f8a57cc
AN
11065 while (!pop_stack(env, NULL, NULL, false));
11066 if (!ret && pop_log)
11067 bpf_vlog_reset(&env->log, 0);
51c39bb1
AS
11068 free_states(env);
11069 if (ret)
11070 /* clean aux data in case subprog was rejected */
11071 sanitize_insn_aux_data(env);
11072 return ret;
11073}
11074
11075/* Verify all global functions in a BPF program one by one based on their BTF.
11076 * All global functions must pass verification. Otherwise the whole program is rejected.
11077 * Consider:
11078 * int bar(int);
11079 * int foo(int f)
11080 * {
11081 * return bar(f);
11082 * }
11083 * int bar(int b)
11084 * {
11085 * ...
11086 * }
11087 * foo() will be verified first for R1=any_scalar_value. During verification it
11088 * will be assumed that bar() already verified successfully and call to bar()
11089 * from foo() will be checked for type match only. Later bar() will be verified
11090 * independently to check that it's safe for R1=any_scalar_value.
11091 */
11092static int do_check_subprogs(struct bpf_verifier_env *env)
11093{
11094 struct bpf_prog_aux *aux = env->prog->aux;
11095 int i, ret;
11096
11097 if (!aux->func_info)
11098 return 0;
11099
11100 for (i = 1; i < env->subprog_cnt; i++) {
11101 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
11102 continue;
11103 env->insn_idx = env->subprog_info[i].start;
11104 WARN_ON_ONCE(env->insn_idx == 0);
11105 ret = do_check_common(env, i);
11106 if (ret) {
11107 return ret;
11108 } else if (env->log.level & BPF_LOG_LEVEL) {
11109 verbose(env,
11110 "Func#%d is safe for any args that match its prototype\n",
11111 i);
11112 }
11113 }
11114 return 0;
11115}
11116
11117static int do_check_main(struct bpf_verifier_env *env)
11118{
11119 int ret;
11120
11121 env->insn_idx = 0;
11122 ret = do_check_common(env, 0);
11123 if (!ret)
11124 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
11125 return ret;
11126}
11127
11128
06ee7115
AS
11129static void print_verification_stats(struct bpf_verifier_env *env)
11130{
11131 int i;
11132
11133 if (env->log.level & BPF_LOG_STATS) {
11134 verbose(env, "verification time %lld usec\n",
11135 div_u64(env->verification_time, 1000));
11136 verbose(env, "stack depth ");
11137 for (i = 0; i < env->subprog_cnt; i++) {
11138 u32 depth = env->subprog_info[i].stack_depth;
11139
11140 verbose(env, "%d", depth);
11141 if (i + 1 < env->subprog_cnt)
11142 verbose(env, "+");
11143 }
11144 verbose(env, "\n");
11145 }
11146 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
11147 "total_states %d peak_states %d mark_read %d\n",
11148 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
11149 env->max_states_per_insn, env->total_states,
11150 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
11151}
11152
27ae7997
MKL
11153static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
11154{
11155 const struct btf_type *t, *func_proto;
11156 const struct bpf_struct_ops *st_ops;
11157 const struct btf_member *member;
11158 struct bpf_prog *prog = env->prog;
11159 u32 btf_id, member_idx;
11160 const char *mname;
11161
11162 btf_id = prog->aux->attach_btf_id;
11163 st_ops = bpf_struct_ops_find(btf_id);
11164 if (!st_ops) {
11165 verbose(env, "attach_btf_id %u is not a supported struct\n",
11166 btf_id);
11167 return -ENOTSUPP;
11168 }
11169
11170 t = st_ops->type;
11171 member_idx = prog->expected_attach_type;
11172 if (member_idx >= btf_type_vlen(t)) {
11173 verbose(env, "attach to invalid member idx %u of struct %s\n",
11174 member_idx, st_ops->name);
11175 return -EINVAL;
11176 }
11177
11178 member = &btf_type_member(t)[member_idx];
11179 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
11180 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
11181 NULL);
11182 if (!func_proto) {
11183 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
11184 mname, member_idx, st_ops->name);
11185 return -EINVAL;
11186 }
11187
11188 if (st_ops->check_member) {
11189 int err = st_ops->check_member(t, member);
11190
11191 if (err) {
11192 verbose(env, "attach to unsupported member %s of struct %s\n",
11193 mname, st_ops->name);
11194 return err;
11195 }
11196 }
11197
11198 prog->aux->attach_func_proto = func_proto;
11199 prog->aux->attach_func_name = mname;
11200 env->ops = st_ops->verifier_ops;
11201
11202 return 0;
11203}
6ba43b76
KS
11204#define SECURITY_PREFIX "security_"
11205
f7b12b6f 11206static int check_attach_modify_return(unsigned long addr, const char *func_name)
6ba43b76 11207{
69191754 11208 if (within_error_injection_list(addr) ||
f7b12b6f 11209 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
6ba43b76 11210 return 0;
6ba43b76 11211
6ba43b76
KS
11212 return -EINVAL;
11213}
27ae7997 11214
1e6c62a8
AS
11215/* non exhaustive list of sleepable bpf_lsm_*() functions */
11216BTF_SET_START(btf_sleepable_lsm_hooks)
11217#ifdef CONFIG_BPF_LSM
1e6c62a8 11218BTF_ID(func, bpf_lsm_bprm_committed_creds)
29523c5e
AS
11219#else
11220BTF_ID_UNUSED
1e6c62a8
AS
11221#endif
11222BTF_SET_END(btf_sleepable_lsm_hooks)
11223
11224static int check_sleepable_lsm_hook(u32 btf_id)
11225{
11226 return btf_id_set_contains(&btf_sleepable_lsm_hooks, btf_id);
11227}
11228
11229/* list of non-sleepable functions that are otherwise on
11230 * ALLOW_ERROR_INJECTION list
11231 */
11232BTF_SET_START(btf_non_sleepable_error_inject)
11233/* Three functions below can be called from sleepable and non-sleepable context.
11234 * Assume non-sleepable from bpf safety point of view.
11235 */
11236BTF_ID(func, __add_to_page_cache_locked)
11237BTF_ID(func, should_fail_alloc_page)
11238BTF_ID(func, should_failslab)
11239BTF_SET_END(btf_non_sleepable_error_inject)
11240
11241static int check_non_sleepable_error_inject(u32 btf_id)
11242{
11243 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
11244}
11245
f7b12b6f
THJ
11246int bpf_check_attach_target(struct bpf_verifier_log *log,
11247 const struct bpf_prog *prog,
11248 const struct bpf_prog *tgt_prog,
11249 u32 btf_id,
11250 struct bpf_attach_target_info *tgt_info)
38207291 11251{
be8704ff 11252 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
f1b9509c 11253 const char prefix[] = "btf_trace_";
5b92a28a 11254 int ret = 0, subprog = -1, i;
38207291 11255 const struct btf_type *t;
5b92a28a 11256 bool conservative = true;
38207291 11257 const char *tname;
5b92a28a 11258 struct btf *btf;
f7b12b6f 11259 long addr = 0;
38207291 11260
f1b9509c 11261 if (!btf_id) {
efc68158 11262 bpf_log(log, "Tracing programs must provide btf_id\n");
f1b9509c
AS
11263 return -EINVAL;
11264 }
f7b12b6f 11265 btf = tgt_prog ? tgt_prog->aux->btf : btf_vmlinux;
5b92a28a 11266 if (!btf) {
efc68158 11267 bpf_log(log,
5b92a28a
AS
11268 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
11269 return -EINVAL;
11270 }
11271 t = btf_type_by_id(btf, btf_id);
f1b9509c 11272 if (!t) {
efc68158 11273 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
f1b9509c
AS
11274 return -EINVAL;
11275 }
5b92a28a 11276 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c 11277 if (!tname) {
efc68158 11278 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
f1b9509c
AS
11279 return -EINVAL;
11280 }
5b92a28a
AS
11281 if (tgt_prog) {
11282 struct bpf_prog_aux *aux = tgt_prog->aux;
11283
11284 for (i = 0; i < aux->func_info_cnt; i++)
11285 if (aux->func_info[i].type_id == btf_id) {
11286 subprog = i;
11287 break;
11288 }
11289 if (subprog == -1) {
efc68158 11290 bpf_log(log, "Subprog %s doesn't exist\n", tname);
5b92a28a
AS
11291 return -EINVAL;
11292 }
11293 conservative = aux->func_info_aux[subprog].unreliable;
be8704ff
AS
11294 if (prog_extension) {
11295 if (conservative) {
efc68158 11296 bpf_log(log,
be8704ff
AS
11297 "Cannot replace static functions\n");
11298 return -EINVAL;
11299 }
11300 if (!prog->jit_requested) {
efc68158 11301 bpf_log(log,
be8704ff
AS
11302 "Extension programs should be JITed\n");
11303 return -EINVAL;
11304 }
be8704ff
AS
11305 }
11306 if (!tgt_prog->jited) {
efc68158 11307 bpf_log(log, "Can attach to only JITed progs\n");
be8704ff
AS
11308 return -EINVAL;
11309 }
11310 if (tgt_prog->type == prog->type) {
11311 /* Cannot fentry/fexit another fentry/fexit program.
11312 * Cannot attach program extension to another extension.
11313 * It's ok to attach fentry/fexit to extension program.
11314 */
efc68158 11315 bpf_log(log, "Cannot recursively attach\n");
be8704ff
AS
11316 return -EINVAL;
11317 }
11318 if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
11319 prog_extension &&
11320 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
11321 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
11322 /* Program extensions can extend all program types
11323 * except fentry/fexit. The reason is the following.
11324 * The fentry/fexit programs are used for performance
11325 * analysis, stats and can be attached to any program
11326 * type except themselves. When extension program is
11327 * replacing XDP function it is necessary to allow
11328 * performance analysis of all functions. Both original
11329 * XDP program and its program extension. Hence
11330 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
11331 * allowed. If extending of fentry/fexit was allowed it
11332 * would be possible to create long call chain
11333 * fentry->extension->fentry->extension beyond
11334 * reasonable stack size. Hence extending fentry is not
11335 * allowed.
11336 */
efc68158 11337 bpf_log(log, "Cannot extend fentry/fexit\n");
be8704ff
AS
11338 return -EINVAL;
11339 }
5b92a28a 11340 } else {
be8704ff 11341 if (prog_extension) {
efc68158 11342 bpf_log(log, "Cannot replace kernel functions\n");
be8704ff
AS
11343 return -EINVAL;
11344 }
5b92a28a 11345 }
f1b9509c
AS
11346
11347 switch (prog->expected_attach_type) {
11348 case BPF_TRACE_RAW_TP:
5b92a28a 11349 if (tgt_prog) {
efc68158 11350 bpf_log(log,
5b92a28a
AS
11351 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
11352 return -EINVAL;
11353 }
38207291 11354 if (!btf_type_is_typedef(t)) {
efc68158 11355 bpf_log(log, "attach_btf_id %u is not a typedef\n",
38207291
MKL
11356 btf_id);
11357 return -EINVAL;
11358 }
f1b9509c 11359 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
efc68158 11360 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
38207291
MKL
11361 btf_id, tname);
11362 return -EINVAL;
11363 }
11364 tname += sizeof(prefix) - 1;
5b92a28a 11365 t = btf_type_by_id(btf, t->type);
38207291
MKL
11366 if (!btf_type_is_ptr(t))
11367 /* should never happen in valid vmlinux build */
11368 return -EINVAL;
5b92a28a 11369 t = btf_type_by_id(btf, t->type);
38207291
MKL
11370 if (!btf_type_is_func_proto(t))
11371 /* should never happen in valid vmlinux build */
11372 return -EINVAL;
11373
f7b12b6f 11374 break;
15d83c4d
YS
11375 case BPF_TRACE_ITER:
11376 if (!btf_type_is_func(t)) {
efc68158 11377 bpf_log(log, "attach_btf_id %u is not a function\n",
15d83c4d
YS
11378 btf_id);
11379 return -EINVAL;
11380 }
11381 t = btf_type_by_id(btf, t->type);
11382 if (!btf_type_is_func_proto(t))
11383 return -EINVAL;
f7b12b6f
THJ
11384 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
11385 if (ret)
11386 return ret;
11387 break;
be8704ff
AS
11388 default:
11389 if (!prog_extension)
11390 return -EINVAL;
df561f66 11391 fallthrough;
ae240823 11392 case BPF_MODIFY_RETURN:
9e4e01df 11393 case BPF_LSM_MAC:
fec56f58
AS
11394 case BPF_TRACE_FENTRY:
11395 case BPF_TRACE_FEXIT:
11396 if (!btf_type_is_func(t)) {
efc68158 11397 bpf_log(log, "attach_btf_id %u is not a function\n",
fec56f58
AS
11398 btf_id);
11399 return -EINVAL;
11400 }
be8704ff 11401 if (prog_extension &&
efc68158 11402 btf_check_type_match(log, prog, btf, t))
be8704ff 11403 return -EINVAL;
5b92a28a 11404 t = btf_type_by_id(btf, t->type);
fec56f58
AS
11405 if (!btf_type_is_func_proto(t))
11406 return -EINVAL;
f7b12b6f
THJ
11407
11408 if (tgt_prog && conservative)
5b92a28a 11409 t = NULL;
f7b12b6f
THJ
11410
11411 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
fec56f58 11412 if (ret < 0)
f7b12b6f
THJ
11413 return ret;
11414
5b92a28a 11415 if (tgt_prog) {
e9eeec58
YS
11416 if (subprog == 0)
11417 addr = (long) tgt_prog->bpf_func;
11418 else
11419 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a
AS
11420 } else {
11421 addr = kallsyms_lookup_name(tname);
11422 if (!addr) {
efc68158 11423 bpf_log(log,
5b92a28a
AS
11424 "The address of function %s cannot be found\n",
11425 tname);
f7b12b6f 11426 return -ENOENT;
5b92a28a 11427 }
fec56f58 11428 }
18644cec 11429
1e6c62a8
AS
11430 if (prog->aux->sleepable) {
11431 ret = -EINVAL;
11432 switch (prog->type) {
11433 case BPF_PROG_TYPE_TRACING:
11434 /* fentry/fexit/fmod_ret progs can be sleepable only if they are
11435 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
11436 */
11437 if (!check_non_sleepable_error_inject(btf_id) &&
11438 within_error_injection_list(addr))
11439 ret = 0;
11440 break;
11441 case BPF_PROG_TYPE_LSM:
11442 /* LSM progs check that they are attached to bpf_lsm_*() funcs.
11443 * Only some of them are sleepable.
11444 */
11445 if (check_sleepable_lsm_hook(btf_id))
11446 ret = 0;
11447 break;
11448 default:
11449 break;
11450 }
f7b12b6f
THJ
11451 if (ret) {
11452 bpf_log(log, "%s is not sleepable\n", tname);
11453 return ret;
11454 }
1e6c62a8 11455 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
1af9270e 11456 if (tgt_prog) {
efc68158 11457 bpf_log(log, "can't modify return codes of BPF programs\n");
f7b12b6f
THJ
11458 return -EINVAL;
11459 }
11460 ret = check_attach_modify_return(addr, tname);
11461 if (ret) {
11462 bpf_log(log, "%s() is not modifiable\n", tname);
11463 return ret;
1af9270e 11464 }
18644cec 11465 }
f7b12b6f
THJ
11466
11467 break;
11468 }
11469 tgt_info->tgt_addr = addr;
11470 tgt_info->tgt_name = tname;
11471 tgt_info->tgt_type = t;
11472 return 0;
11473}
11474
11475static int check_attach_btf_id(struct bpf_verifier_env *env)
11476{
11477 struct bpf_prog *prog = env->prog;
11478 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
11479 struct bpf_attach_target_info tgt_info = {};
11480 u32 btf_id = prog->aux->attach_btf_id;
11481 struct bpf_trampoline *tr;
11482 int ret;
11483 u64 key;
11484
11485 if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
11486 prog->type != BPF_PROG_TYPE_LSM) {
11487 verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
11488 return -EINVAL;
11489 }
11490
11491 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
11492 return check_struct_ops_btf_id(env);
11493
11494 if (prog->type != BPF_PROG_TYPE_TRACING &&
11495 prog->type != BPF_PROG_TYPE_LSM &&
11496 prog->type != BPF_PROG_TYPE_EXT)
11497 return 0;
11498
11499 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
11500 if (ret)
fec56f58 11501 return ret;
f7b12b6f
THJ
11502
11503 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
11504 env->ops = bpf_verifier_ops[tgt_prog->type];
11505 prog->expected_attach_type = tgt_prog->expected_attach_type;
11506 }
11507
11508 /* store info about the attachment target that will be used later */
11509 prog->aux->attach_func_proto = tgt_info.tgt_type;
11510 prog->aux->attach_func_name = tgt_info.tgt_name;
11511
11512 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
11513 prog->aux->attach_btf_trace = true;
11514 return 0;
11515 } else if (prog->expected_attach_type == BPF_TRACE_ITER) {
11516 if (!bpf_iter_prog_supported(prog))
11517 return -EINVAL;
11518 return 0;
11519 }
11520
11521 if (prog->type == BPF_PROG_TYPE_LSM) {
11522 ret = bpf_lsm_verify_prog(&env->log, prog);
11523 if (ret < 0)
11524 return ret;
38207291 11525 }
f7b12b6f
THJ
11526
11527 key = bpf_trampoline_compute_key(tgt_prog, btf_id);
11528 tr = bpf_trampoline_get(key, &tgt_info);
11529 if (!tr)
11530 return -ENOMEM;
11531
11532 prog->aux->trampoline = tr;
11533 return 0;
38207291
MKL
11534}
11535
838e9690
YS
11536int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
11537 union bpf_attr __user *uattr)
51580e79 11538{
06ee7115 11539 u64 start_time = ktime_get_ns();
58e2af8b 11540 struct bpf_verifier_env *env;
b9193c1b 11541 struct bpf_verifier_log *log;
9e4c24e7 11542 int i, len, ret = -EINVAL;
e2ae4ca2 11543 bool is_priv;
51580e79 11544
eba0c929
AB
11545 /* no program is valid */
11546 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
11547 return -EINVAL;
11548
58e2af8b 11549 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
11550 * allocate/free it every time bpf_check() is called
11551 */
58e2af8b 11552 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
11553 if (!env)
11554 return -ENOMEM;
61bd5218 11555 log = &env->log;
cbd35700 11556
9e4c24e7 11557 len = (*prog)->len;
fad953ce 11558 env->insn_aux_data =
9e4c24e7 11559 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
11560 ret = -ENOMEM;
11561 if (!env->insn_aux_data)
11562 goto err_free_env;
9e4c24e7
JK
11563 for (i = 0; i < len; i++)
11564 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 11565 env->prog = *prog;
00176a34 11566 env->ops = bpf_verifier_ops[env->prog->type];
2c78ee89 11567 is_priv = bpf_capable();
0246e64d 11568
8580ac94
AS
11569 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
11570 mutex_lock(&bpf_verifier_lock);
11571 if (!btf_vmlinux)
11572 btf_vmlinux = btf_parse_vmlinux();
11573 mutex_unlock(&bpf_verifier_lock);
11574 }
11575
cbd35700 11576 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
11577 if (!is_priv)
11578 mutex_lock(&bpf_verifier_lock);
cbd35700
AS
11579
11580 if (attr->log_level || attr->log_buf || attr->log_size) {
11581 /* user requested verbose verifier output
11582 * and supplied buffer to store the verification trace
11583 */
e7bf8249
JK
11584 log->level = attr->log_level;
11585 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
11586 log->len_total = attr->log_size;
cbd35700
AS
11587
11588 ret = -EINVAL;
e7bf8249 11589 /* log attributes have to be sane */
7a9f5c65 11590 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
06ee7115 11591 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
3df126f3 11592 goto err_unlock;
cbd35700 11593 }
1ad2f583 11594
8580ac94
AS
11595 if (IS_ERR(btf_vmlinux)) {
11596 /* Either gcc or pahole or kernel are broken. */
11597 verbose(env, "in-kernel BTF is malformed\n");
11598 ret = PTR_ERR(btf_vmlinux);
38207291 11599 goto skip_full_check;
8580ac94
AS
11600 }
11601
1ad2f583
DB
11602 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
11603 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 11604 env->strict_alignment = true;
e9ee9efc
DM
11605 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
11606 env->strict_alignment = false;
cbd35700 11607
2c78ee89 11608 env->allow_ptr_leaks = bpf_allow_ptr_leaks();
41c48f3a 11609 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
2c78ee89
AS
11610 env->bypass_spec_v1 = bpf_bypass_spec_v1();
11611 env->bypass_spec_v4 = bpf_bypass_spec_v4();
11612 env->bpf_capable = bpf_capable();
e2ae4ca2 11613
10d274e8
AS
11614 if (is_priv)
11615 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
11616
f4e3ec0d
JK
11617 ret = replace_map_fd_with_map_ptr(env);
11618 if (ret < 0)
11619 goto skip_full_check;
11620
cae1927c 11621 if (bpf_prog_is_dev_bound(env->prog->aux)) {
a40a2632 11622 ret = bpf_prog_offload_verifier_prep(env->prog);
ab3f0063 11623 if (ret)
f4e3ec0d 11624 goto skip_full_check;
ab3f0063
JK
11625 }
11626
dc2a4ebc 11627 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 11628 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
11629 GFP_USER);
11630 ret = -ENOMEM;
11631 if (!env->explored_states)
11632 goto skip_full_check;
11633
d9762e84 11634 ret = check_subprogs(env);
475fb78f
AS
11635 if (ret < 0)
11636 goto skip_full_check;
11637
c454a46b 11638 ret = check_btf_info(env, attr, uattr);
838e9690
YS
11639 if (ret < 0)
11640 goto skip_full_check;
11641
be8704ff
AS
11642 ret = check_attach_btf_id(env);
11643 if (ret)
11644 goto skip_full_check;
11645
d9762e84
MKL
11646 ret = check_cfg(env);
11647 if (ret < 0)
11648 goto skip_full_check;
11649
51c39bb1
AS
11650 ret = do_check_subprogs(env);
11651 ret = ret ?: do_check_main(env);
cbd35700 11652
c941ce9c
QM
11653 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
11654 ret = bpf_prog_offload_finalize(env);
11655
0246e64d 11656skip_full_check:
51c39bb1 11657 kvfree(env->explored_states);
0246e64d 11658
c131187d 11659 if (ret == 0)
9b38c405 11660 ret = check_max_stack_depth(env);
c131187d 11661
9b38c405 11662 /* instruction rewrites happen after this point */
e2ae4ca2
JK
11663 if (is_priv) {
11664 if (ret == 0)
11665 opt_hard_wire_dead_code_branches(env);
52875a04
JK
11666 if (ret == 0)
11667 ret = opt_remove_dead_code(env);
a1b14abc
JK
11668 if (ret == 0)
11669 ret = opt_remove_nops(env);
52875a04
JK
11670 } else {
11671 if (ret == 0)
11672 sanitize_dead_code(env);
e2ae4ca2
JK
11673 }
11674
9bac3d6d
AS
11675 if (ret == 0)
11676 /* program is valid, convert *(u32*)(ctx + off) accesses */
11677 ret = convert_ctx_accesses(env);
11678
e245c5c6 11679 if (ret == 0)
79741b3b 11680 ret = fixup_bpf_calls(env);
e245c5c6 11681
a4b1d3c1
JW
11682 /* do 32-bit optimization after insn patching has done so those patched
11683 * insns could be handled correctly.
11684 */
d6c2308c
JW
11685 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
11686 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
11687 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
11688 : false;
a4b1d3c1
JW
11689 }
11690
1ea47e01
AS
11691 if (ret == 0)
11692 ret = fixup_call_args(env);
11693
06ee7115
AS
11694 env->verification_time = ktime_get_ns() - start_time;
11695 print_verification_stats(env);
11696
a2a7d570 11697 if (log->level && bpf_verifier_log_full(log))
cbd35700 11698 ret = -ENOSPC;
a2a7d570 11699 if (log->level && !log->ubuf) {
cbd35700 11700 ret = -EFAULT;
a2a7d570 11701 goto err_release_maps;
cbd35700
AS
11702 }
11703
0246e64d
AS
11704 if (ret == 0 && env->used_map_cnt) {
11705 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
11706 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
11707 sizeof(env->used_maps[0]),
11708 GFP_KERNEL);
0246e64d 11709
9bac3d6d 11710 if (!env->prog->aux->used_maps) {
0246e64d 11711 ret = -ENOMEM;
a2a7d570 11712 goto err_release_maps;
0246e64d
AS
11713 }
11714
9bac3d6d 11715 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 11716 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 11717 env->prog->aux->used_map_cnt = env->used_map_cnt;
0246e64d
AS
11718
11719 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
11720 * bpf_ld_imm64 instructions
11721 */
11722 convert_pseudo_ld_imm64(env);
11723 }
cbd35700 11724
ba64e7d8
YS
11725 if (ret == 0)
11726 adjust_btf_func(env);
11727
a2a7d570 11728err_release_maps:
9bac3d6d 11729 if (!env->prog->aux->used_maps)
0246e64d 11730 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 11731 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
11732 */
11733 release_maps(env);
03f87c0b
THJ
11734
11735 /* extension progs temporarily inherit the attach_type of their targets
11736 for verification purposes, so set it back to zero before returning
11737 */
11738 if (env->prog->type == BPF_PROG_TYPE_EXT)
11739 env->prog->expected_attach_type = 0;
11740
9bac3d6d 11741 *prog = env->prog;
3df126f3 11742err_unlock:
45a73c17
AS
11743 if (!is_priv)
11744 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
11745 vfree(env->insn_aux_data);
11746err_free_env:
11747 kfree(env);
51580e79
AS
11748 return ret;
11749}