]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/verifier.c
libbpf: Don't allocate 16M for log buffer by default
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
51580e79
AS
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
838e9690 11#include <linux/btf.h>
58e2af8b 12#include <linux/bpf_verifier.h>
51580e79
AS
13#include <linux/filter.h>
14#include <net/netlink.h>
15#include <linux/file.h>
16#include <linux/vmalloc.h>
ebb676da 17#include <linux/stringify.h>
cc8b0b92
AS
18#include <linux/bsearch.h>
19#include <linux/sort.h>
c195651e 20#include <linux/perf_event.h>
d9762e84 21#include <linux/ctype.h>
6ba43b76 22#include <linux/error-injection.h>
51580e79 23
f4ac7e0b
JK
24#include "disasm.h"
25
00176a34 26static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 27#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
28 [_id] = & _name ## _verifier_ops,
29#define BPF_MAP_TYPE(_id, _ops)
30#include <linux/bpf_types.h>
31#undef BPF_PROG_TYPE
32#undef BPF_MAP_TYPE
33};
34
51580e79
AS
35/* bpf_check() is a static code analyzer that walks eBPF program
36 * instruction by instruction and updates register/stack state.
37 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
38 *
39 * The first pass is depth-first-search to check that the program is a DAG.
40 * It rejects the following programs:
41 * - larger than BPF_MAXINSNS insns
42 * - if loop is present (detected via back-edge)
43 * - unreachable insns exist (shouldn't be a forest. program = one function)
44 * - out of bounds or malformed jumps
45 * The second pass is all possible path descent from the 1st insn.
46 * Since it's analyzing all pathes through the program, the length of the
eba38a96 47 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
48 * insn is less then 4K, but there are too many branches that change stack/regs.
49 * Number of 'branches to be analyzed' is limited to 1k
50 *
51 * On entry to each instruction, each register has a type, and the instruction
52 * changes the types of the registers depending on instruction semantics.
53 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
54 * copied to R1.
55 *
56 * All registers are 64-bit.
57 * R0 - return register
58 * R1-R5 argument passing registers
59 * R6-R9 callee saved registers
60 * R10 - frame pointer read-only
61 *
62 * At the start of BPF program the register R1 contains a pointer to bpf_context
63 * and has type PTR_TO_CTX.
64 *
65 * Verifier tracks arithmetic operations on pointers in case:
66 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
67 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
68 * 1st insn copies R10 (which has FRAME_PTR) type into R1
69 * and 2nd arithmetic instruction is pattern matched to recognize
70 * that it wants to construct a pointer to some element within stack.
71 * So after 2nd insn, the register R1 has type PTR_TO_STACK
72 * (and -20 constant is saved for further stack bounds checking).
73 * Meaning that this reg is a pointer to stack plus known immediate constant.
74 *
f1174f77 75 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 76 * means the register has some value, but it's not a valid pointer.
f1174f77 77 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
78 *
79 * When verifier sees load or store instructions the type of base register
c64b7983
JS
80 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
81 * four pointer types recognized by check_mem_access() function.
51580e79
AS
82 *
83 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
84 * and the range of [ptr, ptr + map's value_size) is accessible.
85 *
86 * registers used to pass values to function calls are checked against
87 * function argument constraints.
88 *
89 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
90 * It means that the register type passed to this function must be
91 * PTR_TO_STACK and it will be used inside the function as
92 * 'pointer to map element key'
93 *
94 * For example the argument constraints for bpf_map_lookup_elem():
95 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
96 * .arg1_type = ARG_CONST_MAP_PTR,
97 * .arg2_type = ARG_PTR_TO_MAP_KEY,
98 *
99 * ret_type says that this function returns 'pointer to map elem value or null'
100 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
101 * 2nd argument should be a pointer to stack, which will be used inside
102 * the helper function as a pointer to map element key.
103 *
104 * On the kernel side the helper function looks like:
105 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
106 * {
107 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
108 * void *key = (void *) (unsigned long) r2;
109 * void *value;
110 *
111 * here kernel can access 'key' and 'map' pointers safely, knowing that
112 * [key, key + map->key_size) bytes are valid and were initialized on
113 * the stack of eBPF program.
114 * }
115 *
116 * Corresponding eBPF program may look like:
117 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
118 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
119 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
120 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
121 * here verifier looks at prototype of map_lookup_elem() and sees:
122 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
123 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
124 *
125 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
126 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
127 * and were initialized prior to this call.
128 * If it's ok, then verifier allows this BPF_CALL insn and looks at
129 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
130 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
131 * returns ether pointer to map value or NULL.
132 *
133 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
134 * insn, the register holding that pointer in the true branch changes state to
135 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
136 * branch. See check_cond_jmp_op().
137 *
138 * After the call R0 is set to return type of the function and registers R1-R5
139 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
140 *
141 * The following reference types represent a potential reference to a kernel
142 * resource which, after first being allocated, must be checked and freed by
143 * the BPF program:
144 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
145 *
146 * When the verifier sees a helper call return a reference type, it allocates a
147 * pointer id for the reference and stores it in the current function state.
148 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
149 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
150 * passes through a NULL-check conditional. For the branch wherein the state is
151 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
152 *
153 * For each helper function that allocates a reference, such as
154 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
155 * bpf_sk_release(). When a reference type passes into the release function,
156 * the verifier also releases the reference. If any unchecked or unreleased
157 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
158 */
159
17a52670 160/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 161struct bpf_verifier_stack_elem {
17a52670
AS
162 /* verifer state is 'st'
163 * before processing instruction 'insn_idx'
164 * and after processing instruction 'prev_insn_idx'
165 */
58e2af8b 166 struct bpf_verifier_state st;
17a52670
AS
167 int insn_idx;
168 int prev_insn_idx;
58e2af8b 169 struct bpf_verifier_stack_elem *next;
cbd35700
AS
170};
171
b285fcb7 172#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 173#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 174
d2e4c1e6
DB
175#define BPF_MAP_KEY_POISON (1ULL << 63)
176#define BPF_MAP_KEY_SEEN (1ULL << 62)
177
c93552c4
DB
178#define BPF_MAP_PTR_UNPRIV 1UL
179#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
180 POISON_POINTER_DELTA))
181#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
182
183static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
184{
d2e4c1e6 185 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
186}
187
188static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
189{
d2e4c1e6 190 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
191}
192
193static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
194 const struct bpf_map *map, bool unpriv)
195{
196 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
197 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
198 aux->map_ptr_state = (unsigned long)map |
199 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
200}
201
202static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
203{
204 return aux->map_key_state & BPF_MAP_KEY_POISON;
205}
206
207static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
208{
209 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
210}
211
212static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
213{
214 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
215}
216
217static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
218{
219 bool poisoned = bpf_map_key_poisoned(aux);
220
221 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
222 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 223}
fad73a1a 224
33ff9823
DB
225struct bpf_call_arg_meta {
226 struct bpf_map *map_ptr;
435faee1 227 bool raw_mode;
36bbef52 228 bool pkt_access;
435faee1
DB
229 int regno;
230 int access_size;
849fa506
YS
231 s64 msize_smax_value;
232 u64 msize_umax_value;
1b986589 233 int ref_obj_id;
d83525ca 234 int func_id;
a7658e1a 235 u32 btf_id;
33ff9823
DB
236};
237
8580ac94
AS
238struct btf *btf_vmlinux;
239
cbd35700
AS
240static DEFINE_MUTEX(bpf_verifier_lock);
241
d9762e84
MKL
242static const struct bpf_line_info *
243find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
244{
245 const struct bpf_line_info *linfo;
246 const struct bpf_prog *prog;
247 u32 i, nr_linfo;
248
249 prog = env->prog;
250 nr_linfo = prog->aux->nr_linfo;
251
252 if (!nr_linfo || insn_off >= prog->len)
253 return NULL;
254
255 linfo = prog->aux->linfo;
256 for (i = 1; i < nr_linfo; i++)
257 if (insn_off < linfo[i].insn_off)
258 break;
259
260 return &linfo[i - 1];
261}
262
77d2e05a
MKL
263void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
264 va_list args)
cbd35700 265{
a2a7d570 266 unsigned int n;
cbd35700 267
a2a7d570 268 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
a2a7d570
JK
269
270 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
271 "verifier log line truncated - local buffer too short\n");
272
273 n = min(log->len_total - log->len_used - 1, n);
274 log->kbuf[n] = '\0';
275
8580ac94
AS
276 if (log->level == BPF_LOG_KERNEL) {
277 pr_err("BPF:%s\n", log->kbuf);
278 return;
279 }
a2a7d570
JK
280 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
281 log->len_used += n;
282 else
283 log->ubuf = NULL;
cbd35700 284}
abe08840
JO
285
286/* log_level controls verbosity level of eBPF verifier.
287 * bpf_verifier_log_write() is used to dump the verification trace to the log,
288 * so the user can figure out what's wrong with the program
430e68d1 289 */
abe08840
JO
290__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
291 const char *fmt, ...)
292{
293 va_list args;
294
77d2e05a
MKL
295 if (!bpf_verifier_log_needed(&env->log))
296 return;
297
abe08840 298 va_start(args, fmt);
77d2e05a 299 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
300 va_end(args);
301}
302EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
303
304__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
305{
77d2e05a 306 struct bpf_verifier_env *env = private_data;
abe08840
JO
307 va_list args;
308
77d2e05a
MKL
309 if (!bpf_verifier_log_needed(&env->log))
310 return;
311
abe08840 312 va_start(args, fmt);
77d2e05a 313 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
314 va_end(args);
315}
cbd35700 316
9e15db66
AS
317__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
318 const char *fmt, ...)
319{
320 va_list args;
321
322 if (!bpf_verifier_log_needed(log))
323 return;
324
325 va_start(args, fmt);
326 bpf_verifier_vlog(log, fmt, args);
327 va_end(args);
328}
329
d9762e84
MKL
330static const char *ltrim(const char *s)
331{
332 while (isspace(*s))
333 s++;
334
335 return s;
336}
337
338__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
339 u32 insn_off,
340 const char *prefix_fmt, ...)
341{
342 const struct bpf_line_info *linfo;
343
344 if (!bpf_verifier_log_needed(&env->log))
345 return;
346
347 linfo = find_linfo(env, insn_off);
348 if (!linfo || linfo == env->prev_linfo)
349 return;
350
351 if (prefix_fmt) {
352 va_list args;
353
354 va_start(args, prefix_fmt);
355 bpf_verifier_vlog(&env->log, prefix_fmt, args);
356 va_end(args);
357 }
358
359 verbose(env, "%s\n",
360 ltrim(btf_name_by_offset(env->prog->aux->btf,
361 linfo->line_off)));
362
363 env->prev_linfo = linfo;
364}
365
de8f3a83
DB
366static bool type_is_pkt_pointer(enum bpf_reg_type type)
367{
368 return type == PTR_TO_PACKET ||
369 type == PTR_TO_PACKET_META;
370}
371
46f8bc92
MKL
372static bool type_is_sk_pointer(enum bpf_reg_type type)
373{
374 return type == PTR_TO_SOCKET ||
655a51e5 375 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
376 type == PTR_TO_TCP_SOCK ||
377 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
378}
379
840b9615
JS
380static bool reg_type_may_be_null(enum bpf_reg_type type)
381{
fd978bf7 382 return type == PTR_TO_MAP_VALUE_OR_NULL ||
46f8bc92 383 type == PTR_TO_SOCKET_OR_NULL ||
655a51e5
MKL
384 type == PTR_TO_SOCK_COMMON_OR_NULL ||
385 type == PTR_TO_TCP_SOCK_OR_NULL;
fd978bf7
JS
386}
387
d83525ca
AS
388static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
389{
390 return reg->type == PTR_TO_MAP_VALUE &&
391 map_value_has_spin_lock(reg->map_ptr);
392}
393
cba368c1
MKL
394static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
395{
396 return type == PTR_TO_SOCKET ||
397 type == PTR_TO_SOCKET_OR_NULL ||
398 type == PTR_TO_TCP_SOCK ||
399 type == PTR_TO_TCP_SOCK_OR_NULL;
400}
401
1b986589 402static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
fd978bf7 403{
1b986589 404 return type == ARG_PTR_TO_SOCK_COMMON;
fd978bf7
JS
405}
406
407/* Determine whether the function releases some resources allocated by another
408 * function call. The first reference type argument will be assumed to be
409 * released by release_reference().
410 */
411static bool is_release_function(enum bpf_func_id func_id)
412{
6acc9b43 413 return func_id == BPF_FUNC_sk_release;
840b9615
JS
414}
415
46f8bc92
MKL
416static bool is_acquire_function(enum bpf_func_id func_id)
417{
418 return func_id == BPF_FUNC_sk_lookup_tcp ||
edbf8c01
LB
419 func_id == BPF_FUNC_sk_lookup_udp ||
420 func_id == BPF_FUNC_skc_lookup_tcp;
46f8bc92
MKL
421}
422
1b986589
MKL
423static bool is_ptr_cast_function(enum bpf_func_id func_id)
424{
425 return func_id == BPF_FUNC_tcp_sock ||
426 func_id == BPF_FUNC_sk_fullsock;
427}
428
17a52670
AS
429/* string representation of 'enum bpf_reg_type' */
430static const char * const reg_type_str[] = {
431 [NOT_INIT] = "?",
f1174f77 432 [SCALAR_VALUE] = "inv",
17a52670
AS
433 [PTR_TO_CTX] = "ctx",
434 [CONST_PTR_TO_MAP] = "map_ptr",
435 [PTR_TO_MAP_VALUE] = "map_value",
436 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
17a52670 437 [PTR_TO_STACK] = "fp",
969bf05e 438 [PTR_TO_PACKET] = "pkt",
de8f3a83 439 [PTR_TO_PACKET_META] = "pkt_meta",
969bf05e 440 [PTR_TO_PACKET_END] = "pkt_end",
d58e468b 441 [PTR_TO_FLOW_KEYS] = "flow_keys",
c64b7983
JS
442 [PTR_TO_SOCKET] = "sock",
443 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
46f8bc92
MKL
444 [PTR_TO_SOCK_COMMON] = "sock_common",
445 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
655a51e5
MKL
446 [PTR_TO_TCP_SOCK] = "tcp_sock",
447 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
9df1c28b 448 [PTR_TO_TP_BUFFER] = "tp_buffer",
fada7fdc 449 [PTR_TO_XDP_SOCK] = "xdp_sock",
9e15db66 450 [PTR_TO_BTF_ID] = "ptr_",
17a52670
AS
451};
452
8efea21d
EC
453static char slot_type_char[] = {
454 [STACK_INVALID] = '?',
455 [STACK_SPILL] = 'r',
456 [STACK_MISC] = 'm',
457 [STACK_ZERO] = '0',
458};
459
4e92024a
AS
460static void print_liveness(struct bpf_verifier_env *env,
461 enum bpf_reg_liveness live)
462{
9242b5f5 463 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
464 verbose(env, "_");
465 if (live & REG_LIVE_READ)
466 verbose(env, "r");
467 if (live & REG_LIVE_WRITTEN)
468 verbose(env, "w");
9242b5f5
AS
469 if (live & REG_LIVE_DONE)
470 verbose(env, "D");
4e92024a
AS
471}
472
f4d7e40a
AS
473static struct bpf_func_state *func(struct bpf_verifier_env *env,
474 const struct bpf_reg_state *reg)
475{
476 struct bpf_verifier_state *cur = env->cur_state;
477
478 return cur->frame[reg->frameno];
479}
480
9e15db66
AS
481const char *kernel_type_name(u32 id)
482{
483 return btf_name_by_offset(btf_vmlinux,
484 btf_type_by_id(btf_vmlinux, id)->name_off);
485}
486
61bd5218 487static void print_verifier_state(struct bpf_verifier_env *env,
f4d7e40a 488 const struct bpf_func_state *state)
17a52670 489{
f4d7e40a 490 const struct bpf_reg_state *reg;
17a52670
AS
491 enum bpf_reg_type t;
492 int i;
493
f4d7e40a
AS
494 if (state->frameno)
495 verbose(env, " frame%d:", state->frameno);
17a52670 496 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
497 reg = &state->regs[i];
498 t = reg->type;
17a52670
AS
499 if (t == NOT_INIT)
500 continue;
4e92024a
AS
501 verbose(env, " R%d", i);
502 print_liveness(env, reg->live);
503 verbose(env, "=%s", reg_type_str[t]);
b5dc0163
AS
504 if (t == SCALAR_VALUE && reg->precise)
505 verbose(env, "P");
f1174f77
EC
506 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
507 tnum_is_const(reg->var_off)) {
508 /* reg->off should be 0 for SCALAR_VALUE */
61bd5218 509 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 510 } else {
9e15db66
AS
511 if (t == PTR_TO_BTF_ID)
512 verbose(env, "%s", kernel_type_name(reg->btf_id));
cba368c1
MKL
513 verbose(env, "(id=%d", reg->id);
514 if (reg_type_may_be_refcounted_or_null(t))
515 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
f1174f77 516 if (t != SCALAR_VALUE)
61bd5218 517 verbose(env, ",off=%d", reg->off);
de8f3a83 518 if (type_is_pkt_pointer(t))
61bd5218 519 verbose(env, ",r=%d", reg->range);
f1174f77
EC
520 else if (t == CONST_PTR_TO_MAP ||
521 t == PTR_TO_MAP_VALUE ||
522 t == PTR_TO_MAP_VALUE_OR_NULL)
61bd5218 523 verbose(env, ",ks=%d,vs=%d",
f1174f77
EC
524 reg->map_ptr->key_size,
525 reg->map_ptr->value_size);
7d1238f2
EC
526 if (tnum_is_const(reg->var_off)) {
527 /* Typically an immediate SCALAR_VALUE, but
528 * could be a pointer whose offset is too big
529 * for reg->off
530 */
61bd5218 531 verbose(env, ",imm=%llx", reg->var_off.value);
7d1238f2
EC
532 } else {
533 if (reg->smin_value != reg->umin_value &&
534 reg->smin_value != S64_MIN)
61bd5218 535 verbose(env, ",smin_value=%lld",
7d1238f2
EC
536 (long long)reg->smin_value);
537 if (reg->smax_value != reg->umax_value &&
538 reg->smax_value != S64_MAX)
61bd5218 539 verbose(env, ",smax_value=%lld",
7d1238f2
EC
540 (long long)reg->smax_value);
541 if (reg->umin_value != 0)
61bd5218 542 verbose(env, ",umin_value=%llu",
7d1238f2
EC
543 (unsigned long long)reg->umin_value);
544 if (reg->umax_value != U64_MAX)
61bd5218 545 verbose(env, ",umax_value=%llu",
7d1238f2
EC
546 (unsigned long long)reg->umax_value);
547 if (!tnum_is_unknown(reg->var_off)) {
548 char tn_buf[48];
f1174f77 549
7d1238f2 550 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 551 verbose(env, ",var_off=%s", tn_buf);
7d1238f2 552 }
f1174f77 553 }
61bd5218 554 verbose(env, ")");
f1174f77 555 }
17a52670 556 }
638f5b90 557 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
558 char types_buf[BPF_REG_SIZE + 1];
559 bool valid = false;
560 int j;
561
562 for (j = 0; j < BPF_REG_SIZE; j++) {
563 if (state->stack[i].slot_type[j] != STACK_INVALID)
564 valid = true;
565 types_buf[j] = slot_type_char[
566 state->stack[i].slot_type[j]];
567 }
568 types_buf[BPF_REG_SIZE] = 0;
569 if (!valid)
570 continue;
571 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
572 print_liveness(env, state->stack[i].spilled_ptr.live);
b5dc0163
AS
573 if (state->stack[i].slot_type[0] == STACK_SPILL) {
574 reg = &state->stack[i].spilled_ptr;
575 t = reg->type;
576 verbose(env, "=%s", reg_type_str[t]);
577 if (t == SCALAR_VALUE && reg->precise)
578 verbose(env, "P");
579 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
580 verbose(env, "%lld", reg->var_off.value + reg->off);
581 } else {
8efea21d 582 verbose(env, "=%s", types_buf);
b5dc0163 583 }
17a52670 584 }
fd978bf7
JS
585 if (state->acquired_refs && state->refs[0].id) {
586 verbose(env, " refs=%d", state->refs[0].id);
587 for (i = 1; i < state->acquired_refs; i++)
588 if (state->refs[i].id)
589 verbose(env, ",%d", state->refs[i].id);
590 }
61bd5218 591 verbose(env, "\n");
17a52670
AS
592}
593
84dbf350
JS
594#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
595static int copy_##NAME##_state(struct bpf_func_state *dst, \
596 const struct bpf_func_state *src) \
597{ \
598 if (!src->FIELD) \
599 return 0; \
600 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
601 /* internal bug, make state invalid to reject the program */ \
602 memset(dst, 0, sizeof(*dst)); \
603 return -EFAULT; \
604 } \
605 memcpy(dst->FIELD, src->FIELD, \
606 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
607 return 0; \
638f5b90 608}
fd978bf7
JS
609/* copy_reference_state() */
610COPY_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
611/* copy_stack_state() */
612COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
613#undef COPY_STATE_FN
614
615#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
616static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
617 bool copy_old) \
618{ \
619 u32 old_size = state->COUNT; \
620 struct bpf_##NAME##_state *new_##FIELD; \
621 int slot = size / SIZE; \
622 \
623 if (size <= old_size || !size) { \
624 if (copy_old) \
625 return 0; \
626 state->COUNT = slot * SIZE; \
627 if (!size && old_size) { \
628 kfree(state->FIELD); \
629 state->FIELD = NULL; \
630 } \
631 return 0; \
632 } \
633 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
634 GFP_KERNEL); \
635 if (!new_##FIELD) \
636 return -ENOMEM; \
637 if (copy_old) { \
638 if (state->FIELD) \
639 memcpy(new_##FIELD, state->FIELD, \
640 sizeof(*new_##FIELD) * (old_size / SIZE)); \
641 memset(new_##FIELD + old_size / SIZE, 0, \
642 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
643 } \
644 state->COUNT = slot * SIZE; \
645 kfree(state->FIELD); \
646 state->FIELD = new_##FIELD; \
647 return 0; \
648}
fd978bf7
JS
649/* realloc_reference_state() */
650REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
651/* realloc_stack_state() */
652REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
653#undef REALLOC_STATE_FN
638f5b90
AS
654
655/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
656 * make it consume minimal amount of memory. check_stack_write() access from
f4d7e40a 657 * the program calls into realloc_func_state() to grow the stack size.
84dbf350
JS
658 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
659 * which realloc_stack_state() copies over. It points to previous
660 * bpf_verifier_state which is never reallocated.
638f5b90 661 */
fd978bf7
JS
662static int realloc_func_state(struct bpf_func_state *state, int stack_size,
663 int refs_size, bool copy_old)
638f5b90 664{
fd978bf7
JS
665 int err = realloc_reference_state(state, refs_size, copy_old);
666 if (err)
667 return err;
668 return realloc_stack_state(state, stack_size, copy_old);
669}
670
671/* Acquire a pointer id from the env and update the state->refs to include
672 * this new pointer reference.
673 * On success, returns a valid pointer id to associate with the register
674 * On failure, returns a negative errno.
638f5b90 675 */
fd978bf7 676static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 677{
fd978bf7
JS
678 struct bpf_func_state *state = cur_func(env);
679 int new_ofs = state->acquired_refs;
680 int id, err;
681
682 err = realloc_reference_state(state, state->acquired_refs + 1, true);
683 if (err)
684 return err;
685 id = ++env->id_gen;
686 state->refs[new_ofs].id = id;
687 state->refs[new_ofs].insn_idx = insn_idx;
638f5b90 688
fd978bf7
JS
689 return id;
690}
691
692/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 693static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
694{
695 int i, last_idx;
696
fd978bf7
JS
697 last_idx = state->acquired_refs - 1;
698 for (i = 0; i < state->acquired_refs; i++) {
699 if (state->refs[i].id == ptr_id) {
700 if (last_idx && i != last_idx)
701 memcpy(&state->refs[i], &state->refs[last_idx],
702 sizeof(*state->refs));
703 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
704 state->acquired_refs--;
638f5b90 705 return 0;
638f5b90 706 }
638f5b90 707 }
46f8bc92 708 return -EINVAL;
fd978bf7
JS
709}
710
711static int transfer_reference_state(struct bpf_func_state *dst,
712 struct bpf_func_state *src)
713{
714 int err = realloc_reference_state(dst, src->acquired_refs, false);
715 if (err)
716 return err;
717 err = copy_reference_state(dst, src);
718 if (err)
719 return err;
638f5b90
AS
720 return 0;
721}
722
f4d7e40a
AS
723static void free_func_state(struct bpf_func_state *state)
724{
5896351e
AS
725 if (!state)
726 return;
fd978bf7 727 kfree(state->refs);
f4d7e40a
AS
728 kfree(state->stack);
729 kfree(state);
730}
731
b5dc0163
AS
732static void clear_jmp_history(struct bpf_verifier_state *state)
733{
734 kfree(state->jmp_history);
735 state->jmp_history = NULL;
736 state->jmp_history_cnt = 0;
737}
738
1969db47
AS
739static void free_verifier_state(struct bpf_verifier_state *state,
740 bool free_self)
638f5b90 741{
f4d7e40a
AS
742 int i;
743
744 for (i = 0; i <= state->curframe; i++) {
745 free_func_state(state->frame[i]);
746 state->frame[i] = NULL;
747 }
b5dc0163 748 clear_jmp_history(state);
1969db47
AS
749 if (free_self)
750 kfree(state);
638f5b90
AS
751}
752
753/* copy verifier state from src to dst growing dst stack space
754 * when necessary to accommodate larger src stack
755 */
f4d7e40a
AS
756static int copy_func_state(struct bpf_func_state *dst,
757 const struct bpf_func_state *src)
638f5b90
AS
758{
759 int err;
760
fd978bf7
JS
761 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
762 false);
763 if (err)
764 return err;
765 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
766 err = copy_reference_state(dst, src);
638f5b90
AS
767 if (err)
768 return err;
638f5b90
AS
769 return copy_stack_state(dst, src);
770}
771
f4d7e40a
AS
772static int copy_verifier_state(struct bpf_verifier_state *dst_state,
773 const struct bpf_verifier_state *src)
774{
775 struct bpf_func_state *dst;
b5dc0163 776 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
f4d7e40a
AS
777 int i, err;
778
b5dc0163
AS
779 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
780 kfree(dst_state->jmp_history);
781 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
782 if (!dst_state->jmp_history)
783 return -ENOMEM;
784 }
785 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
786 dst_state->jmp_history_cnt = src->jmp_history_cnt;
787
f4d7e40a
AS
788 /* if dst has more stack frames then src frame, free them */
789 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
790 free_func_state(dst_state->frame[i]);
791 dst_state->frame[i] = NULL;
792 }
979d63d5 793 dst_state->speculative = src->speculative;
f4d7e40a 794 dst_state->curframe = src->curframe;
d83525ca 795 dst_state->active_spin_lock = src->active_spin_lock;
2589726d
AS
796 dst_state->branches = src->branches;
797 dst_state->parent = src->parent;
b5dc0163
AS
798 dst_state->first_insn_idx = src->first_insn_idx;
799 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
800 for (i = 0; i <= src->curframe; i++) {
801 dst = dst_state->frame[i];
802 if (!dst) {
803 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
804 if (!dst)
805 return -ENOMEM;
806 dst_state->frame[i] = dst;
807 }
808 err = copy_func_state(dst, src->frame[i]);
809 if (err)
810 return err;
811 }
812 return 0;
813}
814
2589726d
AS
815static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
816{
817 while (st) {
818 u32 br = --st->branches;
819
820 /* WARN_ON(br > 1) technically makes sense here,
821 * but see comment in push_stack(), hence:
822 */
823 WARN_ONCE((int)br < 0,
824 "BUG update_branch_counts:branches_to_explore=%d\n",
825 br);
826 if (br)
827 break;
828 st = st->parent;
829 }
830}
831
638f5b90
AS
832static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
833 int *insn_idx)
834{
835 struct bpf_verifier_state *cur = env->cur_state;
836 struct bpf_verifier_stack_elem *elem, *head = env->head;
837 int err;
17a52670
AS
838
839 if (env->head == NULL)
638f5b90 840 return -ENOENT;
17a52670 841
638f5b90
AS
842 if (cur) {
843 err = copy_verifier_state(cur, &head->st);
844 if (err)
845 return err;
846 }
847 if (insn_idx)
848 *insn_idx = head->insn_idx;
17a52670 849 if (prev_insn_idx)
638f5b90
AS
850 *prev_insn_idx = head->prev_insn_idx;
851 elem = head->next;
1969db47 852 free_verifier_state(&head->st, false);
638f5b90 853 kfree(head);
17a52670
AS
854 env->head = elem;
855 env->stack_size--;
638f5b90 856 return 0;
17a52670
AS
857}
858
58e2af8b 859static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
860 int insn_idx, int prev_insn_idx,
861 bool speculative)
17a52670 862{
638f5b90 863 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 864 struct bpf_verifier_stack_elem *elem;
638f5b90 865 int err;
17a52670 866
638f5b90 867 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
868 if (!elem)
869 goto err;
870
17a52670
AS
871 elem->insn_idx = insn_idx;
872 elem->prev_insn_idx = prev_insn_idx;
873 elem->next = env->head;
874 env->head = elem;
875 env->stack_size++;
1969db47
AS
876 err = copy_verifier_state(&elem->st, cur);
877 if (err)
878 goto err;
979d63d5 879 elem->st.speculative |= speculative;
b285fcb7
AS
880 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
881 verbose(env, "The sequence of %d jumps is too complex.\n",
882 env->stack_size);
17a52670
AS
883 goto err;
884 }
2589726d
AS
885 if (elem->st.parent) {
886 ++elem->st.parent->branches;
887 /* WARN_ON(branches > 2) technically makes sense here,
888 * but
889 * 1. speculative states will bump 'branches' for non-branch
890 * instructions
891 * 2. is_state_visited() heuristics may decide not to create
892 * a new state for a sequence of branches and all such current
893 * and cloned states will be pointing to a single parent state
894 * which might have large 'branches' count.
895 */
896 }
17a52670
AS
897 return &elem->st;
898err:
5896351e
AS
899 free_verifier_state(env->cur_state, true);
900 env->cur_state = NULL;
17a52670 901 /* pop all elements and return */
638f5b90 902 while (!pop_stack(env, NULL, NULL));
17a52670
AS
903 return NULL;
904}
905
906#define CALLER_SAVED_REGS 6
907static const int caller_saved[CALLER_SAVED_REGS] = {
908 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
909};
910
f54c7898
DB
911static void __mark_reg_not_init(const struct bpf_verifier_env *env,
912 struct bpf_reg_state *reg);
f1174f77 913
b03c9f9f
EC
914/* Mark the unknown part of a register (variable offset or scalar value) as
915 * known to have the value @imm.
916 */
917static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
918{
a9c676bc
AS
919 /* Clear id, off, and union(map_ptr, range) */
920 memset(((u8 *)reg) + sizeof(reg->type), 0,
921 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
b03c9f9f
EC
922 reg->var_off = tnum_const(imm);
923 reg->smin_value = (s64)imm;
924 reg->smax_value = (s64)imm;
925 reg->umin_value = imm;
926 reg->umax_value = imm;
927}
928
f1174f77
EC
929/* Mark the 'variable offset' part of a register as zero. This should be
930 * used only on registers holding a pointer type.
931 */
932static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 933{
b03c9f9f 934 __mark_reg_known(reg, 0);
f1174f77 935}
a9789ef9 936
cc2b14d5
AS
937static void __mark_reg_const_zero(struct bpf_reg_state *reg)
938{
939 __mark_reg_known(reg, 0);
cc2b14d5
AS
940 reg->type = SCALAR_VALUE;
941}
942
61bd5218
JK
943static void mark_reg_known_zero(struct bpf_verifier_env *env,
944 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
945{
946 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 947 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
948 /* Something bad happened, let's kill all regs */
949 for (regno = 0; regno < MAX_BPF_REG; regno++)
f54c7898 950 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
951 return;
952 }
953 __mark_reg_known_zero(regs + regno);
954}
955
de8f3a83
DB
956static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
957{
958 return type_is_pkt_pointer(reg->type);
959}
960
961static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
962{
963 return reg_is_pkt_pointer(reg) ||
964 reg->type == PTR_TO_PACKET_END;
965}
966
967/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
968static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
969 enum bpf_reg_type which)
970{
971 /* The register can already have a range from prior markings.
972 * This is fine as long as it hasn't been advanced from its
973 * origin.
974 */
975 return reg->type == which &&
976 reg->id == 0 &&
977 reg->off == 0 &&
978 tnum_equals_const(reg->var_off, 0);
979}
980
b03c9f9f
EC
981/* Attempts to improve min/max values based on var_off information */
982static void __update_reg_bounds(struct bpf_reg_state *reg)
983{
984 /* min signed is max(sign bit) | min(other bits) */
985 reg->smin_value = max_t(s64, reg->smin_value,
986 reg->var_off.value | (reg->var_off.mask & S64_MIN));
987 /* max signed is min(sign bit) | max(other bits) */
988 reg->smax_value = min_t(s64, reg->smax_value,
989 reg->var_off.value | (reg->var_off.mask & S64_MAX));
990 reg->umin_value = max(reg->umin_value, reg->var_off.value);
991 reg->umax_value = min(reg->umax_value,
992 reg->var_off.value | reg->var_off.mask);
993}
994
995/* Uses signed min/max values to inform unsigned, and vice-versa */
996static void __reg_deduce_bounds(struct bpf_reg_state *reg)
997{
998 /* Learn sign from signed bounds.
999 * If we cannot cross the sign boundary, then signed and unsigned bounds
1000 * are the same, so combine. This works even in the negative case, e.g.
1001 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1002 */
1003 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1004 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1005 reg->umin_value);
1006 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1007 reg->umax_value);
1008 return;
1009 }
1010 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1011 * boundary, so we must be careful.
1012 */
1013 if ((s64)reg->umax_value >= 0) {
1014 /* Positive. We can't learn anything from the smin, but smax
1015 * is positive, hence safe.
1016 */
1017 reg->smin_value = reg->umin_value;
1018 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1019 reg->umax_value);
1020 } else if ((s64)reg->umin_value < 0) {
1021 /* Negative. We can't learn anything from the smax, but smin
1022 * is negative, hence safe.
1023 */
1024 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1025 reg->umin_value);
1026 reg->smax_value = reg->umax_value;
1027 }
1028}
1029
1030/* Attempts to improve var_off based on unsigned min/max information */
1031static void __reg_bound_offset(struct bpf_reg_state *reg)
1032{
1033 reg->var_off = tnum_intersect(reg->var_off,
1034 tnum_range(reg->umin_value,
1035 reg->umax_value));
1036}
1037
581738a6
YS
1038static void __reg_bound_offset32(struct bpf_reg_state *reg)
1039{
1040 u64 mask = 0xffffFFFF;
1041 struct tnum range = tnum_range(reg->umin_value & mask,
1042 reg->umax_value & mask);
1043 struct tnum lo32 = tnum_cast(reg->var_off, 4);
1044 struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
1045
1046 reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
1047}
1048
b03c9f9f
EC
1049/* Reset the min/max bounds of a register */
1050static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1051{
1052 reg->smin_value = S64_MIN;
1053 reg->smax_value = S64_MAX;
1054 reg->umin_value = 0;
1055 reg->umax_value = U64_MAX;
1056}
1057
f1174f77 1058/* Mark a register as having a completely unknown (scalar) value. */
f54c7898
DB
1059static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1060 struct bpf_reg_state *reg)
f1174f77 1061{
a9c676bc
AS
1062 /*
1063 * Clear type, id, off, and union(map_ptr, range) and
1064 * padding between 'type' and union
1065 */
1066 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 1067 reg->type = SCALAR_VALUE;
f1174f77 1068 reg->var_off = tnum_unknown;
f4d7e40a 1069 reg->frameno = 0;
f54c7898
DB
1070 reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1071 true : false;
b03c9f9f 1072 __mark_reg_unbounded(reg);
f1174f77
EC
1073}
1074
61bd5218
JK
1075static void mark_reg_unknown(struct bpf_verifier_env *env,
1076 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1077{
1078 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1079 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
1080 /* Something bad happened, let's kill all regs except FP */
1081 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1082 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1083 return;
1084 }
f54c7898 1085 __mark_reg_unknown(env, regs + regno);
f1174f77
EC
1086}
1087
f54c7898
DB
1088static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1089 struct bpf_reg_state *reg)
f1174f77 1090{
f54c7898 1091 __mark_reg_unknown(env, reg);
f1174f77
EC
1092 reg->type = NOT_INIT;
1093}
1094
61bd5218
JK
1095static void mark_reg_not_init(struct bpf_verifier_env *env,
1096 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1097{
1098 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1099 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
1100 /* Something bad happened, let's kill all regs except FP */
1101 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1102 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1103 return;
1104 }
f54c7898 1105 __mark_reg_not_init(env, regs + regno);
a9789ef9
DB
1106}
1107
5327ed3d 1108#define DEF_NOT_SUBREG (0)
61bd5218 1109static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 1110 struct bpf_func_state *state)
17a52670 1111{
f4d7e40a 1112 struct bpf_reg_state *regs = state->regs;
17a52670
AS
1113 int i;
1114
dc503a8a 1115 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 1116 mark_reg_not_init(env, regs, i);
dc503a8a 1117 regs[i].live = REG_LIVE_NONE;
679c782d 1118 regs[i].parent = NULL;
5327ed3d 1119 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 1120 }
17a52670
AS
1121
1122 /* frame pointer */
f1174f77 1123 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 1124 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 1125 regs[BPF_REG_FP].frameno = state->frameno;
6760bf2d
DB
1126}
1127
f4d7e40a
AS
1128#define BPF_MAIN_FUNC (-1)
1129static void init_func_state(struct bpf_verifier_env *env,
1130 struct bpf_func_state *state,
1131 int callsite, int frameno, int subprogno)
1132{
1133 state->callsite = callsite;
1134 state->frameno = frameno;
1135 state->subprogno = subprogno;
1136 init_reg_state(env, state);
1137}
1138
17a52670
AS
1139enum reg_arg_type {
1140 SRC_OP, /* register is used as source operand */
1141 DST_OP, /* register is used as destination operand */
1142 DST_OP_NO_MARK /* same as above, check only, don't mark */
1143};
1144
cc8b0b92
AS
1145static int cmp_subprogs(const void *a, const void *b)
1146{
9c8105bd
JW
1147 return ((struct bpf_subprog_info *)a)->start -
1148 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
1149}
1150
1151static int find_subprog(struct bpf_verifier_env *env, int off)
1152{
9c8105bd 1153 struct bpf_subprog_info *p;
cc8b0b92 1154
9c8105bd
JW
1155 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1156 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
1157 if (!p)
1158 return -ENOENT;
9c8105bd 1159 return p - env->subprog_info;
cc8b0b92
AS
1160
1161}
1162
1163static int add_subprog(struct bpf_verifier_env *env, int off)
1164{
1165 int insn_cnt = env->prog->len;
1166 int ret;
1167
1168 if (off >= insn_cnt || off < 0) {
1169 verbose(env, "call to invalid destination\n");
1170 return -EINVAL;
1171 }
1172 ret = find_subprog(env, off);
1173 if (ret >= 0)
1174 return 0;
4cb3d99c 1175 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
1176 verbose(env, "too many subprograms\n");
1177 return -E2BIG;
1178 }
9c8105bd
JW
1179 env->subprog_info[env->subprog_cnt++].start = off;
1180 sort(env->subprog_info, env->subprog_cnt,
1181 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
cc8b0b92
AS
1182 return 0;
1183}
1184
1185static int check_subprogs(struct bpf_verifier_env *env)
1186{
1187 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
9c8105bd 1188 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92
AS
1189 struct bpf_insn *insn = env->prog->insnsi;
1190 int insn_cnt = env->prog->len;
1191
f910cefa
JW
1192 /* Add entry function. */
1193 ret = add_subprog(env, 0);
1194 if (ret < 0)
1195 return ret;
1196
cc8b0b92
AS
1197 /* determine subprog starts. The end is one before the next starts */
1198 for (i = 0; i < insn_cnt; i++) {
1199 if (insn[i].code != (BPF_JMP | BPF_CALL))
1200 continue;
1201 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1202 continue;
1203 if (!env->allow_ptr_leaks) {
1204 verbose(env, "function calls to other bpf functions are allowed for root only\n");
1205 return -EPERM;
1206 }
cc8b0b92
AS
1207 ret = add_subprog(env, i + insn[i].imm + 1);
1208 if (ret < 0)
1209 return ret;
1210 }
1211
4cb3d99c
JW
1212 /* Add a fake 'exit' subprog which could simplify subprog iteration
1213 * logic. 'subprog_cnt' should not be increased.
1214 */
1215 subprog[env->subprog_cnt].start = insn_cnt;
1216
06ee7115 1217 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 1218 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 1219 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92
AS
1220
1221 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
1222 subprog_start = subprog[cur_subprog].start;
1223 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1224 for (i = 0; i < insn_cnt; i++) {
1225 u8 code = insn[i].code;
1226
092ed096 1227 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
1228 goto next;
1229 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1230 goto next;
1231 off = i + insn[i].off + 1;
1232 if (off < subprog_start || off >= subprog_end) {
1233 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1234 return -EINVAL;
1235 }
1236next:
1237 if (i == subprog_end - 1) {
1238 /* to avoid fall-through from one subprog into another
1239 * the last insn of the subprog should be either exit
1240 * or unconditional jump back
1241 */
1242 if (code != (BPF_JMP | BPF_EXIT) &&
1243 code != (BPF_JMP | BPF_JA)) {
1244 verbose(env, "last insn is not an exit or jmp\n");
1245 return -EINVAL;
1246 }
1247 subprog_start = subprog_end;
4cb3d99c
JW
1248 cur_subprog++;
1249 if (cur_subprog < env->subprog_cnt)
9c8105bd 1250 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1251 }
1252 }
1253 return 0;
1254}
1255
679c782d
EC
1256/* Parentage chain of this register (or stack slot) should take care of all
1257 * issues like callee-saved registers, stack slot allocation time, etc.
1258 */
f4d7e40a 1259static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 1260 const struct bpf_reg_state *state,
5327ed3d 1261 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
1262{
1263 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 1264 int cnt = 0;
dc503a8a
EC
1265
1266 while (parent) {
1267 /* if read wasn't screened by an earlier write ... */
679c782d 1268 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 1269 break;
9242b5f5
AS
1270 if (parent->live & REG_LIVE_DONE) {
1271 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1272 reg_type_str[parent->type],
1273 parent->var_off.value, parent->off);
1274 return -EFAULT;
1275 }
5327ed3d
JW
1276 /* The first condition is more likely to be true than the
1277 * second, checked it first.
1278 */
1279 if ((parent->live & REG_LIVE_READ) == flag ||
1280 parent->live & REG_LIVE_READ64)
25af32da
AS
1281 /* The parentage chain never changes and
1282 * this parent was already marked as LIVE_READ.
1283 * There is no need to keep walking the chain again and
1284 * keep re-marking all parents as LIVE_READ.
1285 * This case happens when the same register is read
1286 * multiple times without writes into it in-between.
5327ed3d
JW
1287 * Also, if parent has the stronger REG_LIVE_READ64 set,
1288 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
1289 */
1290 break;
dc503a8a 1291 /* ... then we depend on parent's value */
5327ed3d
JW
1292 parent->live |= flag;
1293 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1294 if (flag == REG_LIVE_READ64)
1295 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
1296 state = parent;
1297 parent = state->parent;
f4d7e40a 1298 writes = true;
06ee7115 1299 cnt++;
dc503a8a 1300 }
06ee7115
AS
1301
1302 if (env->longest_mark_read_walk < cnt)
1303 env->longest_mark_read_walk = cnt;
f4d7e40a 1304 return 0;
dc503a8a
EC
1305}
1306
5327ed3d
JW
1307/* This function is supposed to be used by the following 32-bit optimization
1308 * code only. It returns TRUE if the source or destination register operates
1309 * on 64-bit, otherwise return FALSE.
1310 */
1311static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
1312 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
1313{
1314 u8 code, class, op;
1315
1316 code = insn->code;
1317 class = BPF_CLASS(code);
1318 op = BPF_OP(code);
1319 if (class == BPF_JMP) {
1320 /* BPF_EXIT for "main" will reach here. Return TRUE
1321 * conservatively.
1322 */
1323 if (op == BPF_EXIT)
1324 return true;
1325 if (op == BPF_CALL) {
1326 /* BPF to BPF call will reach here because of marking
1327 * caller saved clobber with DST_OP_NO_MARK for which we
1328 * don't care the register def because they are anyway
1329 * marked as NOT_INIT already.
1330 */
1331 if (insn->src_reg == BPF_PSEUDO_CALL)
1332 return false;
1333 /* Helper call will reach here because of arg type
1334 * check, conservatively return TRUE.
1335 */
1336 if (t == SRC_OP)
1337 return true;
1338
1339 return false;
1340 }
1341 }
1342
1343 if (class == BPF_ALU64 || class == BPF_JMP ||
1344 /* BPF_END always use BPF_ALU class. */
1345 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
1346 return true;
1347
1348 if (class == BPF_ALU || class == BPF_JMP32)
1349 return false;
1350
1351 if (class == BPF_LDX) {
1352 if (t != SRC_OP)
1353 return BPF_SIZE(code) == BPF_DW;
1354 /* LDX source must be ptr. */
1355 return true;
1356 }
1357
1358 if (class == BPF_STX) {
1359 if (reg->type != SCALAR_VALUE)
1360 return true;
1361 return BPF_SIZE(code) == BPF_DW;
1362 }
1363
1364 if (class == BPF_LD) {
1365 u8 mode = BPF_MODE(code);
1366
1367 /* LD_IMM64 */
1368 if (mode == BPF_IMM)
1369 return true;
1370
1371 /* Both LD_IND and LD_ABS return 32-bit data. */
1372 if (t != SRC_OP)
1373 return false;
1374
1375 /* Implicit ctx ptr. */
1376 if (regno == BPF_REG_6)
1377 return true;
1378
1379 /* Explicit source could be any width. */
1380 return true;
1381 }
1382
1383 if (class == BPF_ST)
1384 /* The only source register for BPF_ST is a ptr. */
1385 return true;
1386
1387 /* Conservatively return true at default. */
1388 return true;
1389}
1390
b325fbca
JW
1391/* Return TRUE if INSN doesn't have explicit value define. */
1392static bool insn_no_def(struct bpf_insn *insn)
1393{
1394 u8 class = BPF_CLASS(insn->code);
1395
1396 return (class == BPF_JMP || class == BPF_JMP32 ||
1397 class == BPF_STX || class == BPF_ST);
1398}
1399
1400/* Return TRUE if INSN has defined any 32-bit value explicitly. */
1401static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1402{
1403 if (insn_no_def(insn))
1404 return false;
1405
1406 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1407}
1408
5327ed3d
JW
1409static void mark_insn_zext(struct bpf_verifier_env *env,
1410 struct bpf_reg_state *reg)
1411{
1412 s32 def_idx = reg->subreg_def;
1413
1414 if (def_idx == DEF_NOT_SUBREG)
1415 return;
1416
1417 env->insn_aux_data[def_idx - 1].zext_dst = true;
1418 /* The dst will be zero extended, so won't be sub-register anymore. */
1419 reg->subreg_def = DEF_NOT_SUBREG;
1420}
1421
dc503a8a 1422static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
1423 enum reg_arg_type t)
1424{
f4d7e40a
AS
1425 struct bpf_verifier_state *vstate = env->cur_state;
1426 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 1427 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 1428 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 1429 bool rw64;
dc503a8a 1430
17a52670 1431 if (regno >= MAX_BPF_REG) {
61bd5218 1432 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
1433 return -EINVAL;
1434 }
1435
c342dc10 1436 reg = &regs[regno];
5327ed3d 1437 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
1438 if (t == SRC_OP) {
1439 /* check whether register used as source operand can be read */
c342dc10 1440 if (reg->type == NOT_INIT) {
61bd5218 1441 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
1442 return -EACCES;
1443 }
679c782d 1444 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
1445 if (regno == BPF_REG_FP)
1446 return 0;
1447
5327ed3d
JW
1448 if (rw64)
1449 mark_insn_zext(env, reg);
1450
1451 return mark_reg_read(env, reg, reg->parent,
1452 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
1453 } else {
1454 /* check whether register used as dest operand can be written to */
1455 if (regno == BPF_REG_FP) {
61bd5218 1456 verbose(env, "frame pointer is read only\n");
17a52670
AS
1457 return -EACCES;
1458 }
c342dc10 1459 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 1460 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 1461 if (t == DST_OP)
61bd5218 1462 mark_reg_unknown(env, regs, regno);
17a52670
AS
1463 }
1464 return 0;
1465}
1466
b5dc0163
AS
1467/* for any branch, call, exit record the history of jmps in the given state */
1468static int push_jmp_history(struct bpf_verifier_env *env,
1469 struct bpf_verifier_state *cur)
1470{
1471 u32 cnt = cur->jmp_history_cnt;
1472 struct bpf_idx_pair *p;
1473
1474 cnt++;
1475 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1476 if (!p)
1477 return -ENOMEM;
1478 p[cnt - 1].idx = env->insn_idx;
1479 p[cnt - 1].prev_idx = env->prev_insn_idx;
1480 cur->jmp_history = p;
1481 cur->jmp_history_cnt = cnt;
1482 return 0;
1483}
1484
1485/* Backtrack one insn at a time. If idx is not at the top of recorded
1486 * history then previous instruction came from straight line execution.
1487 */
1488static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
1489 u32 *history)
1490{
1491 u32 cnt = *history;
1492
1493 if (cnt && st->jmp_history[cnt - 1].idx == i) {
1494 i = st->jmp_history[cnt - 1].prev_idx;
1495 (*history)--;
1496 } else {
1497 i--;
1498 }
1499 return i;
1500}
1501
1502/* For given verifier state backtrack_insn() is called from the last insn to
1503 * the first insn. Its purpose is to compute a bitmask of registers and
1504 * stack slots that needs precision in the parent verifier state.
1505 */
1506static int backtrack_insn(struct bpf_verifier_env *env, int idx,
1507 u32 *reg_mask, u64 *stack_mask)
1508{
1509 const struct bpf_insn_cbs cbs = {
1510 .cb_print = verbose,
1511 .private_data = env,
1512 };
1513 struct bpf_insn *insn = env->prog->insnsi + idx;
1514 u8 class = BPF_CLASS(insn->code);
1515 u8 opcode = BPF_OP(insn->code);
1516 u8 mode = BPF_MODE(insn->code);
1517 u32 dreg = 1u << insn->dst_reg;
1518 u32 sreg = 1u << insn->src_reg;
1519 u32 spi;
1520
1521 if (insn->code == 0)
1522 return 0;
1523 if (env->log.level & BPF_LOG_LEVEL) {
1524 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
1525 verbose(env, "%d: ", idx);
1526 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1527 }
1528
1529 if (class == BPF_ALU || class == BPF_ALU64) {
1530 if (!(*reg_mask & dreg))
1531 return 0;
1532 if (opcode == BPF_MOV) {
1533 if (BPF_SRC(insn->code) == BPF_X) {
1534 /* dreg = sreg
1535 * dreg needs precision after this insn
1536 * sreg needs precision before this insn
1537 */
1538 *reg_mask &= ~dreg;
1539 *reg_mask |= sreg;
1540 } else {
1541 /* dreg = K
1542 * dreg needs precision after this insn.
1543 * Corresponding register is already marked
1544 * as precise=true in this verifier state.
1545 * No further markings in parent are necessary
1546 */
1547 *reg_mask &= ~dreg;
1548 }
1549 } else {
1550 if (BPF_SRC(insn->code) == BPF_X) {
1551 /* dreg += sreg
1552 * both dreg and sreg need precision
1553 * before this insn
1554 */
1555 *reg_mask |= sreg;
1556 } /* else dreg += K
1557 * dreg still needs precision before this insn
1558 */
1559 }
1560 } else if (class == BPF_LDX) {
1561 if (!(*reg_mask & dreg))
1562 return 0;
1563 *reg_mask &= ~dreg;
1564
1565 /* scalars can only be spilled into stack w/o losing precision.
1566 * Load from any other memory can be zero extended.
1567 * The desire to keep that precision is already indicated
1568 * by 'precise' mark in corresponding register of this state.
1569 * No further tracking necessary.
1570 */
1571 if (insn->src_reg != BPF_REG_FP)
1572 return 0;
1573 if (BPF_SIZE(insn->code) != BPF_DW)
1574 return 0;
1575
1576 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
1577 * that [fp - off] slot contains scalar that needs to be
1578 * tracked with precision
1579 */
1580 spi = (-insn->off - 1) / BPF_REG_SIZE;
1581 if (spi >= 64) {
1582 verbose(env, "BUG spi %d\n", spi);
1583 WARN_ONCE(1, "verifier backtracking bug");
1584 return -EFAULT;
1585 }
1586 *stack_mask |= 1ull << spi;
b3b50f05 1587 } else if (class == BPF_STX || class == BPF_ST) {
b5dc0163 1588 if (*reg_mask & dreg)
b3b50f05 1589 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
1590 * to access memory. It means backtracking
1591 * encountered a case of pointer subtraction.
1592 */
1593 return -ENOTSUPP;
1594 /* scalars can only be spilled into stack */
1595 if (insn->dst_reg != BPF_REG_FP)
1596 return 0;
1597 if (BPF_SIZE(insn->code) != BPF_DW)
1598 return 0;
1599 spi = (-insn->off - 1) / BPF_REG_SIZE;
1600 if (spi >= 64) {
1601 verbose(env, "BUG spi %d\n", spi);
1602 WARN_ONCE(1, "verifier backtracking bug");
1603 return -EFAULT;
1604 }
1605 if (!(*stack_mask & (1ull << spi)))
1606 return 0;
1607 *stack_mask &= ~(1ull << spi);
b3b50f05
AN
1608 if (class == BPF_STX)
1609 *reg_mask |= sreg;
b5dc0163
AS
1610 } else if (class == BPF_JMP || class == BPF_JMP32) {
1611 if (opcode == BPF_CALL) {
1612 if (insn->src_reg == BPF_PSEUDO_CALL)
1613 return -ENOTSUPP;
1614 /* regular helper call sets R0 */
1615 *reg_mask &= ~1;
1616 if (*reg_mask & 0x3f) {
1617 /* if backtracing was looking for registers R1-R5
1618 * they should have been found already.
1619 */
1620 verbose(env, "BUG regs %x\n", *reg_mask);
1621 WARN_ONCE(1, "verifier backtracking bug");
1622 return -EFAULT;
1623 }
1624 } else if (opcode == BPF_EXIT) {
1625 return -ENOTSUPP;
1626 }
1627 } else if (class == BPF_LD) {
1628 if (!(*reg_mask & dreg))
1629 return 0;
1630 *reg_mask &= ~dreg;
1631 /* It's ld_imm64 or ld_abs or ld_ind.
1632 * For ld_imm64 no further tracking of precision
1633 * into parent is necessary
1634 */
1635 if (mode == BPF_IND || mode == BPF_ABS)
1636 /* to be analyzed */
1637 return -ENOTSUPP;
b5dc0163
AS
1638 }
1639 return 0;
1640}
1641
1642/* the scalar precision tracking algorithm:
1643 * . at the start all registers have precise=false.
1644 * . scalar ranges are tracked as normal through alu and jmp insns.
1645 * . once precise value of the scalar register is used in:
1646 * . ptr + scalar alu
1647 * . if (scalar cond K|scalar)
1648 * . helper_call(.., scalar, ...) where ARG_CONST is expected
1649 * backtrack through the verifier states and mark all registers and
1650 * stack slots with spilled constants that these scalar regisers
1651 * should be precise.
1652 * . during state pruning two registers (or spilled stack slots)
1653 * are equivalent if both are not precise.
1654 *
1655 * Note the verifier cannot simply walk register parentage chain,
1656 * since many different registers and stack slots could have been
1657 * used to compute single precise scalar.
1658 *
1659 * The approach of starting with precise=true for all registers and then
1660 * backtrack to mark a register as not precise when the verifier detects
1661 * that program doesn't care about specific value (e.g., when helper
1662 * takes register as ARG_ANYTHING parameter) is not safe.
1663 *
1664 * It's ok to walk single parentage chain of the verifier states.
1665 * It's possible that this backtracking will go all the way till 1st insn.
1666 * All other branches will be explored for needing precision later.
1667 *
1668 * The backtracking needs to deal with cases like:
1669 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1670 * r9 -= r8
1671 * r5 = r9
1672 * if r5 > 0x79f goto pc+7
1673 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1674 * r5 += 1
1675 * ...
1676 * call bpf_perf_event_output#25
1677 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1678 *
1679 * and this case:
1680 * r6 = 1
1681 * call foo // uses callee's r6 inside to compute r0
1682 * r0 += r6
1683 * if r0 == 0 goto
1684 *
1685 * to track above reg_mask/stack_mask needs to be independent for each frame.
1686 *
1687 * Also if parent's curframe > frame where backtracking started,
1688 * the verifier need to mark registers in both frames, otherwise callees
1689 * may incorrectly prune callers. This is similar to
1690 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1691 *
1692 * For now backtracking falls back into conservative marking.
1693 */
1694static void mark_all_scalars_precise(struct bpf_verifier_env *env,
1695 struct bpf_verifier_state *st)
1696{
1697 struct bpf_func_state *func;
1698 struct bpf_reg_state *reg;
1699 int i, j;
1700
1701 /* big hammer: mark all scalars precise in this path.
1702 * pop_stack may still get !precise scalars.
1703 */
1704 for (; st; st = st->parent)
1705 for (i = 0; i <= st->curframe; i++) {
1706 func = st->frame[i];
1707 for (j = 0; j < BPF_REG_FP; j++) {
1708 reg = &func->regs[j];
1709 if (reg->type != SCALAR_VALUE)
1710 continue;
1711 reg->precise = true;
1712 }
1713 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
1714 if (func->stack[j].slot_type[0] != STACK_SPILL)
1715 continue;
1716 reg = &func->stack[j].spilled_ptr;
1717 if (reg->type != SCALAR_VALUE)
1718 continue;
1719 reg->precise = true;
1720 }
1721 }
1722}
1723
a3ce685d
AS
1724static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1725 int spi)
b5dc0163
AS
1726{
1727 struct bpf_verifier_state *st = env->cur_state;
1728 int first_idx = st->first_insn_idx;
1729 int last_idx = env->insn_idx;
1730 struct bpf_func_state *func;
1731 struct bpf_reg_state *reg;
a3ce685d
AS
1732 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
1733 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
b5dc0163 1734 bool skip_first = true;
a3ce685d 1735 bool new_marks = false;
b5dc0163
AS
1736 int i, err;
1737
1738 if (!env->allow_ptr_leaks)
1739 /* backtracking is root only for now */
1740 return 0;
1741
1742 func = st->frame[st->curframe];
a3ce685d
AS
1743 if (regno >= 0) {
1744 reg = &func->regs[regno];
1745 if (reg->type != SCALAR_VALUE) {
1746 WARN_ONCE(1, "backtracing misuse");
1747 return -EFAULT;
1748 }
1749 if (!reg->precise)
1750 new_marks = true;
1751 else
1752 reg_mask = 0;
1753 reg->precise = true;
b5dc0163 1754 }
b5dc0163 1755
a3ce685d
AS
1756 while (spi >= 0) {
1757 if (func->stack[spi].slot_type[0] != STACK_SPILL) {
1758 stack_mask = 0;
1759 break;
1760 }
1761 reg = &func->stack[spi].spilled_ptr;
1762 if (reg->type != SCALAR_VALUE) {
1763 stack_mask = 0;
1764 break;
1765 }
1766 if (!reg->precise)
1767 new_marks = true;
1768 else
1769 stack_mask = 0;
1770 reg->precise = true;
1771 break;
1772 }
1773
1774 if (!new_marks)
1775 return 0;
1776 if (!reg_mask && !stack_mask)
1777 return 0;
b5dc0163
AS
1778 for (;;) {
1779 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
1780 u32 history = st->jmp_history_cnt;
1781
1782 if (env->log.level & BPF_LOG_LEVEL)
1783 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
1784 for (i = last_idx;;) {
1785 if (skip_first) {
1786 err = 0;
1787 skip_first = false;
1788 } else {
1789 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
1790 }
1791 if (err == -ENOTSUPP) {
1792 mark_all_scalars_precise(env, st);
1793 return 0;
1794 } else if (err) {
1795 return err;
1796 }
1797 if (!reg_mask && !stack_mask)
1798 /* Found assignment(s) into tracked register in this state.
1799 * Since this state is already marked, just return.
1800 * Nothing to be tracked further in the parent state.
1801 */
1802 return 0;
1803 if (i == first_idx)
1804 break;
1805 i = get_prev_insn_idx(st, i, &history);
1806 if (i >= env->prog->len) {
1807 /* This can happen if backtracking reached insn 0
1808 * and there are still reg_mask or stack_mask
1809 * to backtrack.
1810 * It means the backtracking missed the spot where
1811 * particular register was initialized with a constant.
1812 */
1813 verbose(env, "BUG backtracking idx %d\n", i);
1814 WARN_ONCE(1, "verifier backtracking bug");
1815 return -EFAULT;
1816 }
1817 }
1818 st = st->parent;
1819 if (!st)
1820 break;
1821
a3ce685d 1822 new_marks = false;
b5dc0163
AS
1823 func = st->frame[st->curframe];
1824 bitmap_from_u64(mask, reg_mask);
1825 for_each_set_bit(i, mask, 32) {
1826 reg = &func->regs[i];
a3ce685d
AS
1827 if (reg->type != SCALAR_VALUE) {
1828 reg_mask &= ~(1u << i);
b5dc0163 1829 continue;
a3ce685d 1830 }
b5dc0163
AS
1831 if (!reg->precise)
1832 new_marks = true;
1833 reg->precise = true;
1834 }
1835
1836 bitmap_from_u64(mask, stack_mask);
1837 for_each_set_bit(i, mask, 64) {
1838 if (i >= func->allocated_stack / BPF_REG_SIZE) {
2339cd6c
AS
1839 /* the sequence of instructions:
1840 * 2: (bf) r3 = r10
1841 * 3: (7b) *(u64 *)(r3 -8) = r0
1842 * 4: (79) r4 = *(u64 *)(r10 -8)
1843 * doesn't contain jmps. It's backtracked
1844 * as a single block.
1845 * During backtracking insn 3 is not recognized as
1846 * stack access, so at the end of backtracking
1847 * stack slot fp-8 is still marked in stack_mask.
1848 * However the parent state may not have accessed
1849 * fp-8 and it's "unallocated" stack space.
1850 * In such case fallback to conservative.
b5dc0163 1851 */
2339cd6c
AS
1852 mark_all_scalars_precise(env, st);
1853 return 0;
b5dc0163
AS
1854 }
1855
a3ce685d
AS
1856 if (func->stack[i].slot_type[0] != STACK_SPILL) {
1857 stack_mask &= ~(1ull << i);
b5dc0163 1858 continue;
a3ce685d 1859 }
b5dc0163 1860 reg = &func->stack[i].spilled_ptr;
a3ce685d
AS
1861 if (reg->type != SCALAR_VALUE) {
1862 stack_mask &= ~(1ull << i);
b5dc0163 1863 continue;
a3ce685d 1864 }
b5dc0163
AS
1865 if (!reg->precise)
1866 new_marks = true;
1867 reg->precise = true;
1868 }
1869 if (env->log.level & BPF_LOG_LEVEL) {
1870 print_verifier_state(env, func);
1871 verbose(env, "parent %s regs=%x stack=%llx marks\n",
1872 new_marks ? "didn't have" : "already had",
1873 reg_mask, stack_mask);
1874 }
1875
a3ce685d
AS
1876 if (!reg_mask && !stack_mask)
1877 break;
b5dc0163
AS
1878 if (!new_marks)
1879 break;
1880
1881 last_idx = st->last_insn_idx;
1882 first_idx = st->first_insn_idx;
1883 }
1884 return 0;
1885}
1886
a3ce685d
AS
1887static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
1888{
1889 return __mark_chain_precision(env, regno, -1);
1890}
1891
1892static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
1893{
1894 return __mark_chain_precision(env, -1, spi);
1895}
b5dc0163 1896
1be7f75d
AS
1897static bool is_spillable_regtype(enum bpf_reg_type type)
1898{
1899 switch (type) {
1900 case PTR_TO_MAP_VALUE:
1901 case PTR_TO_MAP_VALUE_OR_NULL:
1902 case PTR_TO_STACK:
1903 case PTR_TO_CTX:
969bf05e 1904 case PTR_TO_PACKET:
de8f3a83 1905 case PTR_TO_PACKET_META:
969bf05e 1906 case PTR_TO_PACKET_END:
d58e468b 1907 case PTR_TO_FLOW_KEYS:
1be7f75d 1908 case CONST_PTR_TO_MAP:
c64b7983
JS
1909 case PTR_TO_SOCKET:
1910 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
1911 case PTR_TO_SOCK_COMMON:
1912 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
1913 case PTR_TO_TCP_SOCK:
1914 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 1915 case PTR_TO_XDP_SOCK:
65726b5b 1916 case PTR_TO_BTF_ID:
1be7f75d
AS
1917 return true;
1918 default:
1919 return false;
1920 }
1921}
1922
cc2b14d5
AS
1923/* Does this register contain a constant zero? */
1924static bool register_is_null(struct bpf_reg_state *reg)
1925{
1926 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1927}
1928
f7cf25b2
AS
1929static bool register_is_const(struct bpf_reg_state *reg)
1930{
1931 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
1932}
1933
1934static void save_register_state(struct bpf_func_state *state,
1935 int spi, struct bpf_reg_state *reg)
1936{
1937 int i;
1938
1939 state->stack[spi].spilled_ptr = *reg;
1940 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1941
1942 for (i = 0; i < BPF_REG_SIZE; i++)
1943 state->stack[spi].slot_type[i] = STACK_SPILL;
1944}
1945
17a52670
AS
1946/* check_stack_read/write functions track spill/fill of registers,
1947 * stack boundary and alignment are checked in check_mem_access()
1948 */
61bd5218 1949static int check_stack_write(struct bpf_verifier_env *env,
f4d7e40a 1950 struct bpf_func_state *state, /* func where register points to */
af86ca4e 1951 int off, int size, int value_regno, int insn_idx)
17a52670 1952{
f4d7e40a 1953 struct bpf_func_state *cur; /* state of the current function */
638f5b90 1954 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
b5dc0163 1955 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
f7cf25b2 1956 struct bpf_reg_state *reg = NULL;
638f5b90 1957
f4d7e40a 1958 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
fd978bf7 1959 state->acquired_refs, true);
638f5b90
AS
1960 if (err)
1961 return err;
9c399760
AS
1962 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1963 * so it's aligned access and [off, off + size) are within stack limits
1964 */
638f5b90
AS
1965 if (!env->allow_ptr_leaks &&
1966 state->stack[spi].slot_type[0] == STACK_SPILL &&
1967 size != BPF_REG_SIZE) {
1968 verbose(env, "attempt to corrupt spilled pointer on stack\n");
1969 return -EACCES;
1970 }
17a52670 1971
f4d7e40a 1972 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
1973 if (value_regno >= 0)
1974 reg = &cur->regs[value_regno];
17a52670 1975
f7cf25b2
AS
1976 if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
1977 !register_is_null(reg) && env->allow_ptr_leaks) {
b5dc0163
AS
1978 if (dst_reg != BPF_REG_FP) {
1979 /* The backtracking logic can only recognize explicit
1980 * stack slot address like [fp - 8]. Other spill of
1981 * scalar via different register has to be conervative.
1982 * Backtrack from here and mark all registers as precise
1983 * that contributed into 'reg' being a constant.
1984 */
1985 err = mark_chain_precision(env, value_regno);
1986 if (err)
1987 return err;
1988 }
f7cf25b2
AS
1989 save_register_state(state, spi, reg);
1990 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 1991 /* register containing pointer is being spilled into stack */
9c399760 1992 if (size != BPF_REG_SIZE) {
f7cf25b2 1993 verbose_linfo(env, insn_idx, "; ");
61bd5218 1994 verbose(env, "invalid size of register spill\n");
17a52670
AS
1995 return -EACCES;
1996 }
1997
f7cf25b2 1998 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
1999 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2000 return -EINVAL;
2001 }
2002
f7cf25b2
AS
2003 if (!env->allow_ptr_leaks) {
2004 bool sanitize = false;
17a52670 2005
f7cf25b2
AS
2006 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
2007 register_is_const(&state->stack[spi].spilled_ptr))
2008 sanitize = true;
2009 for (i = 0; i < BPF_REG_SIZE; i++)
2010 if (state->stack[spi].slot_type[i] == STACK_MISC) {
2011 sanitize = true;
2012 break;
2013 }
2014 if (sanitize) {
af86ca4e
AS
2015 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
2016 int soff = (-spi - 1) * BPF_REG_SIZE;
2017
2018 /* detected reuse of integer stack slot with a pointer
2019 * which means either llvm is reusing stack slot or
2020 * an attacker is trying to exploit CVE-2018-3639
2021 * (speculative store bypass)
2022 * Have to sanitize that slot with preemptive
2023 * store of zero.
2024 */
2025 if (*poff && *poff != soff) {
2026 /* disallow programs where single insn stores
2027 * into two different stack slots, since verifier
2028 * cannot sanitize them
2029 */
2030 verbose(env,
2031 "insn %d cannot access two stack slots fp%d and fp%d",
2032 insn_idx, *poff, soff);
2033 return -EINVAL;
2034 }
2035 *poff = soff;
2036 }
af86ca4e 2037 }
f7cf25b2 2038 save_register_state(state, spi, reg);
9c399760 2039 } else {
cc2b14d5
AS
2040 u8 type = STACK_MISC;
2041
679c782d
EC
2042 /* regular write of data into stack destroys any spilled ptr */
2043 state->stack[spi].spilled_ptr.type = NOT_INIT;
0bae2d4d
JW
2044 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2045 if (state->stack[spi].slot_type[0] == STACK_SPILL)
2046 for (i = 0; i < BPF_REG_SIZE; i++)
2047 state->stack[spi].slot_type[i] = STACK_MISC;
9c399760 2048
cc2b14d5
AS
2049 /* only mark the slot as written if all 8 bytes were written
2050 * otherwise read propagation may incorrectly stop too soon
2051 * when stack slots are partially written.
2052 * This heuristic means that read propagation will be
2053 * conservative, since it will add reg_live_read marks
2054 * to stack slots all the way to first state when programs
2055 * writes+reads less than 8 bytes
2056 */
2057 if (size == BPF_REG_SIZE)
2058 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2059
2060 /* when we zero initialize stack slots mark them as such */
b5dc0163
AS
2061 if (reg && register_is_null(reg)) {
2062 /* backtracking doesn't work for STACK_ZERO yet. */
2063 err = mark_chain_precision(env, value_regno);
2064 if (err)
2065 return err;
cc2b14d5 2066 type = STACK_ZERO;
b5dc0163 2067 }
cc2b14d5 2068
0bae2d4d 2069 /* Mark slots affected by this stack write. */
9c399760 2070 for (i = 0; i < size; i++)
638f5b90 2071 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 2072 type;
17a52670
AS
2073 }
2074 return 0;
2075}
2076
61bd5218 2077static int check_stack_read(struct bpf_verifier_env *env,
f4d7e40a
AS
2078 struct bpf_func_state *reg_state /* func where register points to */,
2079 int off, int size, int value_regno)
17a52670 2080{
f4d7e40a
AS
2081 struct bpf_verifier_state *vstate = env->cur_state;
2082 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 2083 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 2084 struct bpf_reg_state *reg;
638f5b90 2085 u8 *stype;
17a52670 2086
f4d7e40a 2087 if (reg_state->allocated_stack <= slot) {
638f5b90
AS
2088 verbose(env, "invalid read from stack off %d+0 size %d\n",
2089 off, size);
2090 return -EACCES;
2091 }
f4d7e40a 2092 stype = reg_state->stack[spi].slot_type;
f7cf25b2 2093 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 2094
638f5b90 2095 if (stype[0] == STACK_SPILL) {
9c399760 2096 if (size != BPF_REG_SIZE) {
f7cf25b2
AS
2097 if (reg->type != SCALAR_VALUE) {
2098 verbose_linfo(env, env->insn_idx, "; ");
2099 verbose(env, "invalid size of register fill\n");
2100 return -EACCES;
2101 }
2102 if (value_regno >= 0) {
2103 mark_reg_unknown(env, state->regs, value_regno);
2104 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2105 }
2106 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2107 return 0;
17a52670 2108 }
9c399760 2109 for (i = 1; i < BPF_REG_SIZE; i++) {
638f5b90 2110 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
61bd5218 2111 verbose(env, "corrupted spill memory\n");
17a52670
AS
2112 return -EACCES;
2113 }
2114 }
2115
dc503a8a 2116 if (value_regno >= 0) {
17a52670 2117 /* restore register state from stack */
f7cf25b2 2118 state->regs[value_regno] = *reg;
2f18f62e
AS
2119 /* mark reg as written since spilled pointer state likely
2120 * has its liveness marks cleared by is_state_visited()
2121 * which resets stack/reg liveness for state transitions
2122 */
2123 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
dc503a8a 2124 }
f7cf25b2 2125 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670 2126 } else {
cc2b14d5
AS
2127 int zeros = 0;
2128
17a52670 2129 for (i = 0; i < size; i++) {
cc2b14d5
AS
2130 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
2131 continue;
2132 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
2133 zeros++;
2134 continue;
17a52670 2135 }
cc2b14d5
AS
2136 verbose(env, "invalid read from stack off %d+%d size %d\n",
2137 off, i, size);
2138 return -EACCES;
2139 }
f7cf25b2 2140 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
cc2b14d5
AS
2141 if (value_regno >= 0) {
2142 if (zeros == size) {
2143 /* any size read into register is zero extended,
2144 * so the whole register == const_zero
2145 */
2146 __mark_reg_const_zero(&state->regs[value_regno]);
b5dc0163
AS
2147 /* backtracking doesn't support STACK_ZERO yet,
2148 * so mark it precise here, so that later
2149 * backtracking can stop here.
2150 * Backtracking may not need this if this register
2151 * doesn't participate in pointer adjustment.
2152 * Forward propagation of precise flag is not
2153 * necessary either. This mark is only to stop
2154 * backtracking. Any register that contributed
2155 * to const 0 was marked precise before spill.
2156 */
2157 state->regs[value_regno].precise = true;
cc2b14d5
AS
2158 } else {
2159 /* have read misc data from the stack */
2160 mark_reg_unknown(env, state->regs, value_regno);
2161 }
2162 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
17a52670 2163 }
17a52670 2164 }
f7cf25b2 2165 return 0;
17a52670
AS
2166}
2167
e4298d25
DB
2168static int check_stack_access(struct bpf_verifier_env *env,
2169 const struct bpf_reg_state *reg,
2170 int off, int size)
2171{
2172 /* Stack accesses must be at a fixed offset, so that we
2173 * can determine what type of data were returned. See
2174 * check_stack_read().
2175 */
2176 if (!tnum_is_const(reg->var_off)) {
2177 char tn_buf[48];
2178
2179 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1fbd20f8 2180 verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
e4298d25
DB
2181 tn_buf, off, size);
2182 return -EACCES;
2183 }
2184
2185 if (off >= 0 || off < -MAX_BPF_STACK) {
2186 verbose(env, "invalid stack off=%d size=%d\n", off, size);
2187 return -EACCES;
2188 }
2189
2190 return 0;
2191}
2192
591fe988
DB
2193static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
2194 int off, int size, enum bpf_access_type type)
2195{
2196 struct bpf_reg_state *regs = cur_regs(env);
2197 struct bpf_map *map = regs[regno].map_ptr;
2198 u32 cap = bpf_map_flags_to_cap(map);
2199
2200 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
2201 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
2202 map->value_size, off, size);
2203 return -EACCES;
2204 }
2205
2206 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
2207 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
2208 map->value_size, off, size);
2209 return -EACCES;
2210 }
2211
2212 return 0;
2213}
2214
17a52670 2215/* check read/write into map element returned by bpf_map_lookup_elem() */
f1174f77 2216static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2217 int size, bool zero_size_allowed)
17a52670 2218{
638f5b90
AS
2219 struct bpf_reg_state *regs = cur_regs(env);
2220 struct bpf_map *map = regs[regno].map_ptr;
17a52670 2221
9fd29c08
YS
2222 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2223 off + size > map->value_size) {
61bd5218 2224 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
17a52670
AS
2225 map->value_size, off, size);
2226 return -EACCES;
2227 }
2228 return 0;
2229}
2230
f1174f77
EC
2231/* check read/write into a map element with possible variable offset */
2232static int check_map_access(struct bpf_verifier_env *env, u32 regno,
9fd29c08 2233 int off, int size, bool zero_size_allowed)
dbcfe5f7 2234{
f4d7e40a
AS
2235 struct bpf_verifier_state *vstate = env->cur_state;
2236 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
2237 struct bpf_reg_state *reg = &state->regs[regno];
2238 int err;
2239
f1174f77
EC
2240 /* We may have adjusted the register to this map value, so we
2241 * need to try adding each of min_value and max_value to off
2242 * to make sure our theoretical access will be safe.
dbcfe5f7 2243 */
06ee7115 2244 if (env->log.level & BPF_LOG_LEVEL)
61bd5218 2245 print_verifier_state(env, state);
b7137c4e 2246
dbcfe5f7
GB
2247 /* The minimum value is only important with signed
2248 * comparisons where we can't assume the floor of a
2249 * value is 0. If we are using signed variables for our
2250 * index'es we need to make sure that whatever we use
2251 * will have a set floor within our range.
2252 */
b7137c4e
DB
2253 if (reg->smin_value < 0 &&
2254 (reg->smin_value == S64_MIN ||
2255 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2256 reg->smin_value + off < 0)) {
61bd5218 2257 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
2258 regno);
2259 return -EACCES;
2260 }
9fd29c08
YS
2261 err = __check_map_access(env, regno, reg->smin_value + off, size,
2262 zero_size_allowed);
dbcfe5f7 2263 if (err) {
61bd5218
JK
2264 verbose(env, "R%d min value is outside of the array range\n",
2265 regno);
dbcfe5f7
GB
2266 return err;
2267 }
2268
b03c9f9f
EC
2269 /* If we haven't set a max value then we need to bail since we can't be
2270 * sure we won't do bad things.
2271 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 2272 */
b03c9f9f 2273 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
61bd5218 2274 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
dbcfe5f7
GB
2275 regno);
2276 return -EACCES;
2277 }
9fd29c08
YS
2278 err = __check_map_access(env, regno, reg->umax_value + off, size,
2279 zero_size_allowed);
f1174f77 2280 if (err)
61bd5218
JK
2281 verbose(env, "R%d max value is outside of the array range\n",
2282 regno);
d83525ca
AS
2283
2284 if (map_value_has_spin_lock(reg->map_ptr)) {
2285 u32 lock = reg->map_ptr->spin_lock_off;
2286
2287 /* if any part of struct bpf_spin_lock can be touched by
2288 * load/store reject this program.
2289 * To check that [x1, x2) overlaps with [y1, y2)
2290 * it is sufficient to check x1 < y2 && y1 < x2.
2291 */
2292 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
2293 lock < reg->umax_value + off + size) {
2294 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
2295 return -EACCES;
2296 }
2297 }
f1174f77 2298 return err;
dbcfe5f7
GB
2299}
2300
969bf05e
AS
2301#define MAX_PACKET_OFF 0xffff
2302
58e2af8b 2303static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
2304 const struct bpf_call_arg_meta *meta,
2305 enum bpf_access_type t)
4acf6c0b 2306{
36bbef52 2307 switch (env->prog->type) {
5d66fa7d 2308 /* Program types only with direct read access go here! */
3a0af8fd
TG
2309 case BPF_PROG_TYPE_LWT_IN:
2310 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 2311 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 2312 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 2313 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 2314 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
2315 if (t == BPF_WRITE)
2316 return false;
7e57fbb2 2317 /* fallthrough */
5d66fa7d
DB
2318
2319 /* Program types with direct read + write access go here! */
36bbef52
DB
2320 case BPF_PROG_TYPE_SCHED_CLS:
2321 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 2322 case BPF_PROG_TYPE_XDP:
3a0af8fd 2323 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 2324 case BPF_PROG_TYPE_SK_SKB:
4f738adb 2325 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
2326 if (meta)
2327 return meta->pkt_access;
2328
2329 env->seen_direct_write = true;
4acf6c0b 2330 return true;
0d01da6a
SF
2331
2332 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2333 if (t == BPF_WRITE)
2334 env->seen_direct_write = true;
2335
2336 return true;
2337
4acf6c0b
BB
2338 default:
2339 return false;
2340 }
2341}
2342
f1174f77 2343static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
9fd29c08 2344 int off, int size, bool zero_size_allowed)
969bf05e 2345{
638f5b90 2346 struct bpf_reg_state *regs = cur_regs(env);
58e2af8b 2347 struct bpf_reg_state *reg = &regs[regno];
969bf05e 2348
9fd29c08
YS
2349 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2350 (u64)off + size > reg->range) {
61bd5218 2351 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
d91b28ed 2352 off, size, regno, reg->id, reg->off, reg->range);
969bf05e
AS
2353 return -EACCES;
2354 }
2355 return 0;
2356}
2357
f1174f77 2358static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2359 int size, bool zero_size_allowed)
f1174f77 2360{
638f5b90 2361 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
2362 struct bpf_reg_state *reg = &regs[regno];
2363 int err;
2364
2365 /* We may have added a variable offset to the packet pointer; but any
2366 * reg->range we have comes after that. We are only checking the fixed
2367 * offset.
2368 */
2369
2370 /* We don't allow negative numbers, because we aren't tracking enough
2371 * detail to prove they're safe.
2372 */
b03c9f9f 2373 if (reg->smin_value < 0) {
61bd5218 2374 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
2375 regno);
2376 return -EACCES;
2377 }
9fd29c08 2378 err = __check_packet_access(env, regno, off, size, zero_size_allowed);
f1174f77 2379 if (err) {
61bd5218 2380 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
2381 return err;
2382 }
e647815a
JW
2383
2384 /* __check_packet_access has made sure "off + size - 1" is within u16.
2385 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2386 * otherwise find_good_pkt_pointers would have refused to set range info
2387 * that __check_packet_access would have rejected this pkt access.
2388 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2389 */
2390 env->prog->aux->max_pkt_offset =
2391 max_t(u32, env->prog->aux->max_pkt_offset,
2392 off + reg->umax_value + size - 1);
2393
f1174f77
EC
2394 return err;
2395}
2396
2397/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 2398static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66
AS
2399 enum bpf_access_type t, enum bpf_reg_type *reg_type,
2400 u32 *btf_id)
17a52670 2401{
f96da094
DB
2402 struct bpf_insn_access_aux info = {
2403 .reg_type = *reg_type,
9e15db66 2404 .log = &env->log,
f96da094 2405 };
31fd8581 2406
4f9218aa 2407 if (env->ops->is_valid_access &&
5e43f899 2408 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
2409 /* A non zero info.ctx_field_size indicates that this field is a
2410 * candidate for later verifier transformation to load the whole
2411 * field and then apply a mask when accessed with a narrower
2412 * access than actual ctx access size. A zero info.ctx_field_size
2413 * will only allow for whole field access and rejects any other
2414 * type of narrower access.
31fd8581 2415 */
23994631 2416 *reg_type = info.reg_type;
31fd8581 2417
9e15db66
AS
2418 if (*reg_type == PTR_TO_BTF_ID)
2419 *btf_id = info.btf_id;
2420 else
2421 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
32bbe007
AS
2422 /* remember the offset of last byte accessed in ctx */
2423 if (env->prog->aux->max_ctx_offset < off + size)
2424 env->prog->aux->max_ctx_offset = off + size;
17a52670 2425 return 0;
32bbe007 2426 }
17a52670 2427
61bd5218 2428 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
2429 return -EACCES;
2430}
2431
d58e468b
PP
2432static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
2433 int size)
2434{
2435 if (size < 0 || off < 0 ||
2436 (u64)off + size > sizeof(struct bpf_flow_keys)) {
2437 verbose(env, "invalid access to flow keys off=%d size=%d\n",
2438 off, size);
2439 return -EACCES;
2440 }
2441 return 0;
2442}
2443
5f456649
MKL
2444static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
2445 u32 regno, int off, int size,
2446 enum bpf_access_type t)
c64b7983
JS
2447{
2448 struct bpf_reg_state *regs = cur_regs(env);
2449 struct bpf_reg_state *reg = &regs[regno];
5f456649 2450 struct bpf_insn_access_aux info = {};
46f8bc92 2451 bool valid;
c64b7983
JS
2452
2453 if (reg->smin_value < 0) {
2454 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2455 regno);
2456 return -EACCES;
2457 }
2458
46f8bc92
MKL
2459 switch (reg->type) {
2460 case PTR_TO_SOCK_COMMON:
2461 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
2462 break;
2463 case PTR_TO_SOCKET:
2464 valid = bpf_sock_is_valid_access(off, size, t, &info);
2465 break;
655a51e5
MKL
2466 case PTR_TO_TCP_SOCK:
2467 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
2468 break;
fada7fdc
JL
2469 case PTR_TO_XDP_SOCK:
2470 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
2471 break;
46f8bc92
MKL
2472 default:
2473 valid = false;
c64b7983
JS
2474 }
2475
5f456649 2476
46f8bc92
MKL
2477 if (valid) {
2478 env->insn_aux_data[insn_idx].ctx_field_size =
2479 info.ctx_field_size;
2480 return 0;
2481 }
2482
2483 verbose(env, "R%d invalid %s access off=%d size=%d\n",
2484 regno, reg_type_str[reg->type], off, size);
2485
2486 return -EACCES;
c64b7983
JS
2487}
2488
4cabc5b1
DB
2489static bool __is_pointer_value(bool allow_ptr_leaks,
2490 const struct bpf_reg_state *reg)
1be7f75d 2491{
4cabc5b1 2492 if (allow_ptr_leaks)
1be7f75d
AS
2493 return false;
2494
f1174f77 2495 return reg->type != SCALAR_VALUE;
1be7f75d
AS
2496}
2497
2a159c6f
DB
2498static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
2499{
2500 return cur_regs(env) + regno;
2501}
2502
4cabc5b1
DB
2503static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2504{
2a159c6f 2505 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
2506}
2507
f37a8cb8
DB
2508static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2509{
2a159c6f 2510 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 2511
46f8bc92
MKL
2512 return reg->type == PTR_TO_CTX;
2513}
2514
2515static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
2516{
2517 const struct bpf_reg_state *reg = reg_state(env, regno);
2518
2519 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
2520}
2521
ca369602
DB
2522static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
2523{
2a159c6f 2524 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
2525
2526 return type_is_pkt_pointer(reg->type);
2527}
2528
4b5defde
DB
2529static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
2530{
2531 const struct bpf_reg_state *reg = reg_state(env, regno);
2532
2533 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2534 return reg->type == PTR_TO_FLOW_KEYS;
2535}
2536
61bd5218
JK
2537static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
2538 const struct bpf_reg_state *reg,
d1174416 2539 int off, int size, bool strict)
969bf05e 2540{
f1174f77 2541 struct tnum reg_off;
e07b98d9 2542 int ip_align;
d1174416
DM
2543
2544 /* Byte size accesses are always allowed. */
2545 if (!strict || size == 1)
2546 return 0;
2547
e4eda884
DM
2548 /* For platforms that do not have a Kconfig enabling
2549 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2550 * NET_IP_ALIGN is universally set to '2'. And on platforms
2551 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2552 * to this code only in strict mode where we want to emulate
2553 * the NET_IP_ALIGN==2 checking. Therefore use an
2554 * unconditional IP align value of '2'.
e07b98d9 2555 */
e4eda884 2556 ip_align = 2;
f1174f77
EC
2557
2558 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
2559 if (!tnum_is_aligned(reg_off, size)) {
2560 char tn_buf[48];
2561
2562 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
2563 verbose(env,
2564 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 2565 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
2566 return -EACCES;
2567 }
79adffcd 2568
969bf05e
AS
2569 return 0;
2570}
2571
61bd5218
JK
2572static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
2573 const struct bpf_reg_state *reg,
f1174f77
EC
2574 const char *pointer_desc,
2575 int off, int size, bool strict)
79adffcd 2576{
f1174f77
EC
2577 struct tnum reg_off;
2578
2579 /* Byte size accesses are always allowed. */
2580 if (!strict || size == 1)
2581 return 0;
2582
2583 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
2584 if (!tnum_is_aligned(reg_off, size)) {
2585 char tn_buf[48];
2586
2587 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 2588 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 2589 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
2590 return -EACCES;
2591 }
2592
969bf05e
AS
2593 return 0;
2594}
2595
e07b98d9 2596static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
2597 const struct bpf_reg_state *reg, int off,
2598 int size, bool strict_alignment_once)
79adffcd 2599{
ca369602 2600 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 2601 const char *pointer_desc = "";
d1174416 2602
79adffcd
DB
2603 switch (reg->type) {
2604 case PTR_TO_PACKET:
de8f3a83
DB
2605 case PTR_TO_PACKET_META:
2606 /* Special case, because of NET_IP_ALIGN. Given metadata sits
2607 * right in front, treat it the very same way.
2608 */
61bd5218 2609 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
2610 case PTR_TO_FLOW_KEYS:
2611 pointer_desc = "flow keys ";
2612 break;
f1174f77
EC
2613 case PTR_TO_MAP_VALUE:
2614 pointer_desc = "value ";
2615 break;
2616 case PTR_TO_CTX:
2617 pointer_desc = "context ";
2618 break;
2619 case PTR_TO_STACK:
2620 pointer_desc = "stack ";
a5ec6ae1
JH
2621 /* The stack spill tracking logic in check_stack_write()
2622 * and check_stack_read() relies on stack accesses being
2623 * aligned.
2624 */
2625 strict = true;
f1174f77 2626 break;
c64b7983
JS
2627 case PTR_TO_SOCKET:
2628 pointer_desc = "sock ";
2629 break;
46f8bc92
MKL
2630 case PTR_TO_SOCK_COMMON:
2631 pointer_desc = "sock_common ";
2632 break;
655a51e5
MKL
2633 case PTR_TO_TCP_SOCK:
2634 pointer_desc = "tcp_sock ";
2635 break;
fada7fdc
JL
2636 case PTR_TO_XDP_SOCK:
2637 pointer_desc = "xdp_sock ";
2638 break;
79adffcd 2639 default:
f1174f77 2640 break;
79adffcd 2641 }
61bd5218
JK
2642 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
2643 strict);
79adffcd
DB
2644}
2645
f4d7e40a
AS
2646static int update_stack_depth(struct bpf_verifier_env *env,
2647 const struct bpf_func_state *func,
2648 int off)
2649{
9c8105bd 2650 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
2651
2652 if (stack >= -off)
2653 return 0;
2654
2655 /* update known max for given subprogram */
9c8105bd 2656 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
2657 return 0;
2658}
f4d7e40a 2659
70a87ffe
AS
2660/* starting from main bpf function walk all instructions of the function
2661 * and recursively walk all callees that given function can call.
2662 * Ignore jump and exit insns.
2663 * Since recursion is prevented by check_cfg() this algorithm
2664 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2665 */
2666static int check_max_stack_depth(struct bpf_verifier_env *env)
2667{
9c8105bd
JW
2668 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
2669 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 2670 struct bpf_insn *insn = env->prog->insnsi;
70a87ffe
AS
2671 int ret_insn[MAX_CALL_FRAMES];
2672 int ret_prog[MAX_CALL_FRAMES];
f4d7e40a 2673
70a87ffe
AS
2674process_func:
2675 /* round up to 32-bytes, since this is granularity
2676 * of interpreter stack size
2677 */
9c8105bd 2678 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 2679 if (depth > MAX_BPF_STACK) {
f4d7e40a 2680 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 2681 frame + 1, depth);
f4d7e40a
AS
2682 return -EACCES;
2683 }
70a87ffe 2684continue_func:
4cb3d99c 2685 subprog_end = subprog[idx + 1].start;
70a87ffe
AS
2686 for (; i < subprog_end; i++) {
2687 if (insn[i].code != (BPF_JMP | BPF_CALL))
2688 continue;
2689 if (insn[i].src_reg != BPF_PSEUDO_CALL)
2690 continue;
2691 /* remember insn and function to return to */
2692 ret_insn[frame] = i + 1;
9c8105bd 2693 ret_prog[frame] = idx;
70a87ffe
AS
2694
2695 /* find the callee */
2696 i = i + insn[i].imm + 1;
9c8105bd
JW
2697 idx = find_subprog(env, i);
2698 if (idx < 0) {
70a87ffe
AS
2699 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2700 i);
2701 return -EFAULT;
2702 }
70a87ffe
AS
2703 frame++;
2704 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
2705 verbose(env, "the call stack of %d frames is too deep !\n",
2706 frame);
2707 return -E2BIG;
70a87ffe
AS
2708 }
2709 goto process_func;
2710 }
2711 /* end of for() loop means the last insn of the 'subprog'
2712 * was reached. Doesn't matter whether it was JA or EXIT
2713 */
2714 if (frame == 0)
2715 return 0;
9c8105bd 2716 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
2717 frame--;
2718 i = ret_insn[frame];
9c8105bd 2719 idx = ret_prog[frame];
70a87ffe 2720 goto continue_func;
f4d7e40a
AS
2721}
2722
19d28fbd 2723#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
2724static int get_callee_stack_depth(struct bpf_verifier_env *env,
2725 const struct bpf_insn *insn, int idx)
2726{
2727 int start = idx + insn->imm + 1, subprog;
2728
2729 subprog = find_subprog(env, start);
2730 if (subprog < 0) {
2731 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2732 start);
2733 return -EFAULT;
2734 }
9c8105bd 2735 return env->subprog_info[subprog].stack_depth;
1ea47e01 2736}
19d28fbd 2737#endif
1ea47e01 2738
51c39bb1
AS
2739int check_ctx_reg(struct bpf_verifier_env *env,
2740 const struct bpf_reg_state *reg, int regno)
58990d1f
DB
2741{
2742 /* Access to ctx or passing it to a helper is only allowed in
2743 * its original, unmodified form.
2744 */
2745
2746 if (reg->off) {
2747 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
2748 regno, reg->off);
2749 return -EACCES;
2750 }
2751
2752 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2753 char tn_buf[48];
2754
2755 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2756 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
2757 return -EACCES;
2758 }
2759
2760 return 0;
2761}
2762
9df1c28b
MM
2763static int check_tp_buffer_access(struct bpf_verifier_env *env,
2764 const struct bpf_reg_state *reg,
2765 int regno, int off, int size)
2766{
2767 if (off < 0) {
2768 verbose(env,
2769 "R%d invalid tracepoint buffer access: off=%d, size=%d",
2770 regno, off, size);
2771 return -EACCES;
2772 }
2773 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2774 char tn_buf[48];
2775
2776 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2777 verbose(env,
2778 "R%d invalid variable buffer offset: off=%d, var_off=%s",
2779 regno, off, tn_buf);
2780 return -EACCES;
2781 }
2782 if (off + size > env->prog->aux->max_tp_access)
2783 env->prog->aux->max_tp_access = off + size;
2784
2785 return 0;
2786}
2787
2788
0c17d1d2
JH
2789/* truncate register to smaller size (in bytes)
2790 * must be called with size < BPF_REG_SIZE
2791 */
2792static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
2793{
2794 u64 mask;
2795
2796 /* clear high bits in bit representation */
2797 reg->var_off = tnum_cast(reg->var_off, size);
2798
2799 /* fix arithmetic bounds */
2800 mask = ((u64)1 << (size * 8)) - 1;
2801 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
2802 reg->umin_value &= mask;
2803 reg->umax_value &= mask;
2804 } else {
2805 reg->umin_value = 0;
2806 reg->umax_value = mask;
2807 }
2808 reg->smin_value = reg->umin_value;
2809 reg->smax_value = reg->umax_value;
2810}
2811
a23740ec
AN
2812static bool bpf_map_is_rdonly(const struct bpf_map *map)
2813{
2814 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
2815}
2816
2817static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
2818{
2819 void *ptr;
2820 u64 addr;
2821 int err;
2822
2823 err = map->ops->map_direct_value_addr(map, &addr, off);
2824 if (err)
2825 return err;
2dedd7d2 2826 ptr = (void *)(long)addr + off;
a23740ec
AN
2827
2828 switch (size) {
2829 case sizeof(u8):
2830 *val = (u64)*(u8 *)ptr;
2831 break;
2832 case sizeof(u16):
2833 *val = (u64)*(u16 *)ptr;
2834 break;
2835 case sizeof(u32):
2836 *val = (u64)*(u32 *)ptr;
2837 break;
2838 case sizeof(u64):
2839 *val = *(u64 *)ptr;
2840 break;
2841 default:
2842 return -EINVAL;
2843 }
2844 return 0;
2845}
2846
9e15db66
AS
2847static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
2848 struct bpf_reg_state *regs,
2849 int regno, int off, int size,
2850 enum bpf_access_type atype,
2851 int value_regno)
2852{
2853 struct bpf_reg_state *reg = regs + regno;
2854 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
2855 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
2856 u32 btf_id;
2857 int ret;
2858
9e15db66
AS
2859 if (off < 0) {
2860 verbose(env,
2861 "R%d is ptr_%s invalid negative access: off=%d\n",
2862 regno, tname, off);
2863 return -EACCES;
2864 }
2865 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2866 char tn_buf[48];
2867
2868 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2869 verbose(env,
2870 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
2871 regno, tname, off, tn_buf);
2872 return -EACCES;
2873 }
2874
27ae7997
MKL
2875 if (env->ops->btf_struct_access) {
2876 ret = env->ops->btf_struct_access(&env->log, t, off, size,
2877 atype, &btf_id);
2878 } else {
2879 if (atype != BPF_READ) {
2880 verbose(env, "only read is supported\n");
2881 return -EACCES;
2882 }
2883
2884 ret = btf_struct_access(&env->log, t, off, size, atype,
2885 &btf_id);
2886 }
2887
9e15db66
AS
2888 if (ret < 0)
2889 return ret;
2890
27ae7997
MKL
2891 if (atype == BPF_READ) {
2892 if (ret == SCALAR_VALUE) {
2893 mark_reg_unknown(env, regs, value_regno);
2894 return 0;
2895 }
2896 mark_reg_known_zero(env, regs, value_regno);
2897 regs[value_regno].type = PTR_TO_BTF_ID;
2898 regs[value_regno].btf_id = btf_id;
9e15db66 2899 }
27ae7997 2900
9e15db66
AS
2901 return 0;
2902}
2903
17a52670
AS
2904/* check whether memory at (regno + off) is accessible for t = (read | write)
2905 * if t==write, value_regno is a register which value is stored into memory
2906 * if t==read, value_regno is a register which will receive the value from memory
2907 * if t==write && value_regno==-1, some unknown value is stored into memory
2908 * if t==read && value_regno==-1, don't care what we read from memory
2909 */
ca369602
DB
2910static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
2911 int off, int bpf_size, enum bpf_access_type t,
2912 int value_regno, bool strict_alignment_once)
17a52670 2913{
638f5b90
AS
2914 struct bpf_reg_state *regs = cur_regs(env);
2915 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 2916 struct bpf_func_state *state;
17a52670
AS
2917 int size, err = 0;
2918
2919 size = bpf_size_to_bytes(bpf_size);
2920 if (size < 0)
2921 return size;
2922
f1174f77 2923 /* alignment checks will add in reg->off themselves */
ca369602 2924 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
2925 if (err)
2926 return err;
17a52670 2927
f1174f77
EC
2928 /* for access checks, reg->off is just part of off */
2929 off += reg->off;
2930
2931 if (reg->type == PTR_TO_MAP_VALUE) {
1be7f75d
AS
2932 if (t == BPF_WRITE && value_regno >= 0 &&
2933 is_pointer_value(env, value_regno)) {
61bd5218 2934 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
2935 return -EACCES;
2936 }
591fe988
DB
2937 err = check_map_access_type(env, regno, off, size, t);
2938 if (err)
2939 return err;
9fd29c08 2940 err = check_map_access(env, regno, off, size, false);
a23740ec
AN
2941 if (!err && t == BPF_READ && value_regno >= 0) {
2942 struct bpf_map *map = reg->map_ptr;
2943
2944 /* if map is read-only, track its contents as scalars */
2945 if (tnum_is_const(reg->var_off) &&
2946 bpf_map_is_rdonly(map) &&
2947 map->ops->map_direct_value_addr) {
2948 int map_off = off + reg->var_off.value;
2949 u64 val = 0;
2950
2951 err = bpf_map_direct_read(map, map_off, size,
2952 &val);
2953 if (err)
2954 return err;
2955
2956 regs[value_regno].type = SCALAR_VALUE;
2957 __mark_reg_known(&regs[value_regno], val);
2958 } else {
2959 mark_reg_unknown(env, regs, value_regno);
2960 }
2961 }
1a0dc1ac 2962 } else if (reg->type == PTR_TO_CTX) {
f1174f77 2963 enum bpf_reg_type reg_type = SCALAR_VALUE;
9e15db66 2964 u32 btf_id = 0;
19de99f7 2965
1be7f75d
AS
2966 if (t == BPF_WRITE && value_regno >= 0 &&
2967 is_pointer_value(env, value_regno)) {
61bd5218 2968 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
2969 return -EACCES;
2970 }
f1174f77 2971
58990d1f
DB
2972 err = check_ctx_reg(env, reg, regno);
2973 if (err < 0)
2974 return err;
2975
9e15db66
AS
2976 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
2977 if (err)
2978 verbose_linfo(env, insn_idx, "; ");
969bf05e 2979 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 2980 /* ctx access returns either a scalar, or a
de8f3a83
DB
2981 * PTR_TO_PACKET[_META,_END]. In the latter
2982 * case, we know the offset is zero.
f1174f77 2983 */
46f8bc92 2984 if (reg_type == SCALAR_VALUE) {
638f5b90 2985 mark_reg_unknown(env, regs, value_regno);
46f8bc92 2986 } else {
638f5b90 2987 mark_reg_known_zero(env, regs,
61bd5218 2988 value_regno);
46f8bc92
MKL
2989 if (reg_type_may_be_null(reg_type))
2990 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
2991 /* A load of ctx field could have different
2992 * actual load size with the one encoded in the
2993 * insn. When the dst is PTR, it is for sure not
2994 * a sub-register.
2995 */
2996 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
9e15db66
AS
2997 if (reg_type == PTR_TO_BTF_ID)
2998 regs[value_regno].btf_id = btf_id;
46f8bc92 2999 }
638f5b90 3000 regs[value_regno].type = reg_type;
969bf05e 3001 }
17a52670 3002
f1174f77 3003 } else if (reg->type == PTR_TO_STACK) {
f1174f77 3004 off += reg->var_off.value;
e4298d25
DB
3005 err = check_stack_access(env, reg, off, size);
3006 if (err)
3007 return err;
8726679a 3008
f4d7e40a
AS
3009 state = func(env, reg);
3010 err = update_stack_depth(env, state, off);
3011 if (err)
3012 return err;
8726679a 3013
638f5b90 3014 if (t == BPF_WRITE)
61bd5218 3015 err = check_stack_write(env, state, off, size,
af86ca4e 3016 value_regno, insn_idx);
638f5b90 3017 else
61bd5218
JK
3018 err = check_stack_read(env, state, off, size,
3019 value_regno);
de8f3a83 3020 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 3021 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 3022 verbose(env, "cannot write into packet\n");
969bf05e
AS
3023 return -EACCES;
3024 }
4acf6c0b
BB
3025 if (t == BPF_WRITE && value_regno >= 0 &&
3026 is_pointer_value(env, value_regno)) {
61bd5218
JK
3027 verbose(env, "R%d leaks addr into packet\n",
3028 value_regno);
4acf6c0b
BB
3029 return -EACCES;
3030 }
9fd29c08 3031 err = check_packet_access(env, regno, off, size, false);
969bf05e 3032 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 3033 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
3034 } else if (reg->type == PTR_TO_FLOW_KEYS) {
3035 if (t == BPF_WRITE && value_regno >= 0 &&
3036 is_pointer_value(env, value_regno)) {
3037 verbose(env, "R%d leaks addr into flow keys\n",
3038 value_regno);
3039 return -EACCES;
3040 }
3041
3042 err = check_flow_keys_access(env, off, size);
3043 if (!err && t == BPF_READ && value_regno >= 0)
3044 mark_reg_unknown(env, regs, value_regno);
46f8bc92 3045 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 3046 if (t == BPF_WRITE) {
46f8bc92
MKL
3047 verbose(env, "R%d cannot write into %s\n",
3048 regno, reg_type_str[reg->type]);
c64b7983
JS
3049 return -EACCES;
3050 }
5f456649 3051 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
3052 if (!err && value_regno >= 0)
3053 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
3054 } else if (reg->type == PTR_TO_TP_BUFFER) {
3055 err = check_tp_buffer_access(env, reg, regno, off, size);
3056 if (!err && t == BPF_READ && value_regno >= 0)
3057 mark_reg_unknown(env, regs, value_regno);
9e15db66
AS
3058 } else if (reg->type == PTR_TO_BTF_ID) {
3059 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
3060 value_regno);
17a52670 3061 } else {
61bd5218
JK
3062 verbose(env, "R%d invalid mem access '%s'\n", regno,
3063 reg_type_str[reg->type]);
17a52670
AS
3064 return -EACCES;
3065 }
969bf05e 3066
f1174f77 3067 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 3068 regs[value_regno].type == SCALAR_VALUE) {
f1174f77 3069 /* b/h/w load zero-extends, mark upper bits as known 0 */
0c17d1d2 3070 coerce_reg_to_size(&regs[value_regno], size);
969bf05e 3071 }
17a52670
AS
3072 return err;
3073}
3074
31fd8581 3075static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 3076{
17a52670
AS
3077 int err;
3078
3079 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
3080 insn->imm != 0) {
61bd5218 3081 verbose(env, "BPF_XADD uses reserved fields\n");
17a52670
AS
3082 return -EINVAL;
3083 }
3084
3085 /* check src1 operand */
dc503a8a 3086 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
3087 if (err)
3088 return err;
3089
3090 /* check src2 operand */
dc503a8a 3091 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
3092 if (err)
3093 return err;
3094
6bdf6abc 3095 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 3096 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
3097 return -EACCES;
3098 }
3099
ca369602 3100 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 3101 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
3102 is_flow_key_reg(env, insn->dst_reg) ||
3103 is_sk_reg(env, insn->dst_reg)) {
ca369602 3104 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2a159c6f
DB
3105 insn->dst_reg,
3106 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
3107 return -EACCES;
3108 }
3109
17a52670 3110 /* check whether atomic_add can read the memory */
31fd8581 3111 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3112 BPF_SIZE(insn->code), BPF_READ, -1, true);
17a52670
AS
3113 if (err)
3114 return err;
3115
3116 /* check whether atomic_add can write into the same memory */
31fd8581 3117 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3118 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
17a52670
AS
3119}
3120
2011fccf
AI
3121static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
3122 int off, int access_size,
3123 bool zero_size_allowed)
3124{
3125 struct bpf_reg_state *reg = reg_state(env, regno);
3126
3127 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
3128 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
3129 if (tnum_is_const(reg->var_off)) {
3130 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
3131 regno, off, access_size);
3132 } else {
3133 char tn_buf[48];
3134
3135 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3136 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
3137 regno, tn_buf, access_size);
3138 }
3139 return -EACCES;
3140 }
3141 return 0;
3142}
3143
17a52670
AS
3144/* when register 'regno' is passed into function that will read 'access_size'
3145 * bytes from that pointer, make sure that it's within stack boundary
f1174f77
EC
3146 * and all elements of stack are initialized.
3147 * Unlike most pointer bounds-checking functions, this one doesn't take an
3148 * 'off' argument, so it has to add in reg->off itself.
17a52670 3149 */
58e2af8b 3150static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
435faee1
DB
3151 int access_size, bool zero_size_allowed,
3152 struct bpf_call_arg_meta *meta)
17a52670 3153{
2a159c6f 3154 struct bpf_reg_state *reg = reg_state(env, regno);
f4d7e40a 3155 struct bpf_func_state *state = func(env, reg);
f7cf25b2 3156 int err, min_off, max_off, i, j, slot, spi;
17a52670 3157
914cb781 3158 if (reg->type != PTR_TO_STACK) {
f1174f77 3159 /* Allow zero-byte read from NULL, regardless of pointer type */
8e2fe1d9 3160 if (zero_size_allowed && access_size == 0 &&
914cb781 3161 register_is_null(reg))
8e2fe1d9
DB
3162 return 0;
3163
61bd5218 3164 verbose(env, "R%d type=%s expected=%s\n", regno,
914cb781 3165 reg_type_str[reg->type],
8e2fe1d9 3166 reg_type_str[PTR_TO_STACK]);
17a52670 3167 return -EACCES;
8e2fe1d9 3168 }
17a52670 3169
2011fccf
AI
3170 if (tnum_is_const(reg->var_off)) {
3171 min_off = max_off = reg->var_off.value + reg->off;
3172 err = __check_stack_boundary(env, regno, min_off, access_size,
3173 zero_size_allowed);
3174 if (err)
3175 return err;
3176 } else {
088ec26d
AI
3177 /* Variable offset is prohibited for unprivileged mode for
3178 * simplicity since it requires corresponding support in
3179 * Spectre masking for stack ALU.
3180 * See also retrieve_ptr_limit().
3181 */
3182 if (!env->allow_ptr_leaks) {
3183 char tn_buf[48];
f1174f77 3184
088ec26d
AI
3185 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3186 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
3187 regno, tn_buf);
3188 return -EACCES;
3189 }
f2bcd05e
AI
3190 /* Only initialized buffer on stack is allowed to be accessed
3191 * with variable offset. With uninitialized buffer it's hard to
3192 * guarantee that whole memory is marked as initialized on
3193 * helper return since specific bounds are unknown what may
3194 * cause uninitialized stack leaking.
3195 */
3196 if (meta && meta->raw_mode)
3197 meta = NULL;
3198
107c26a7
AI
3199 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
3200 reg->smax_value <= -BPF_MAX_VAR_OFF) {
3201 verbose(env, "R%d unbounded indirect variable offset stack access\n",
3202 regno);
3203 return -EACCES;
3204 }
2011fccf 3205 min_off = reg->smin_value + reg->off;
107c26a7 3206 max_off = reg->smax_value + reg->off;
2011fccf
AI
3207 err = __check_stack_boundary(env, regno, min_off, access_size,
3208 zero_size_allowed);
107c26a7
AI
3209 if (err) {
3210 verbose(env, "R%d min value is outside of stack bound\n",
3211 regno);
2011fccf 3212 return err;
107c26a7 3213 }
2011fccf
AI
3214 err = __check_stack_boundary(env, regno, max_off, access_size,
3215 zero_size_allowed);
107c26a7
AI
3216 if (err) {
3217 verbose(env, "R%d max value is outside of stack bound\n",
3218 regno);
2011fccf 3219 return err;
107c26a7 3220 }
17a52670
AS
3221 }
3222
435faee1
DB
3223 if (meta && meta->raw_mode) {
3224 meta->access_size = access_size;
3225 meta->regno = regno;
3226 return 0;
3227 }
3228
2011fccf 3229 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
3230 u8 *stype;
3231
2011fccf 3232 slot = -i - 1;
638f5b90 3233 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
3234 if (state->allocated_stack <= slot)
3235 goto err;
3236 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3237 if (*stype == STACK_MISC)
3238 goto mark;
3239 if (*stype == STACK_ZERO) {
3240 /* helper can write anything into the stack */
3241 *stype = STACK_MISC;
3242 goto mark;
17a52670 3243 }
f7cf25b2
AS
3244 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3245 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
f54c7898 3246 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
f7cf25b2
AS
3247 for (j = 0; j < BPF_REG_SIZE; j++)
3248 state->stack[spi].slot_type[j] = STACK_MISC;
3249 goto mark;
3250 }
3251
cc2b14d5 3252err:
2011fccf
AI
3253 if (tnum_is_const(reg->var_off)) {
3254 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
3255 min_off, i - min_off, access_size);
3256 } else {
3257 char tn_buf[48];
3258
3259 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3260 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
3261 tn_buf, i - min_off, access_size);
3262 }
cc2b14d5
AS
3263 return -EACCES;
3264mark:
3265 /* reading any byte out of 8-byte 'spill_slot' will cause
3266 * the whole slot to be marked as 'read'
3267 */
679c782d 3268 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
3269 state->stack[spi].spilled_ptr.parent,
3270 REG_LIVE_READ64);
17a52670 3271 }
2011fccf 3272 return update_stack_depth(env, state, min_off);
17a52670
AS
3273}
3274
06c1c049
GB
3275static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
3276 int access_size, bool zero_size_allowed,
3277 struct bpf_call_arg_meta *meta)
3278{
638f5b90 3279 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
06c1c049 3280
f1174f77 3281 switch (reg->type) {
06c1c049 3282 case PTR_TO_PACKET:
de8f3a83 3283 case PTR_TO_PACKET_META:
9fd29c08
YS
3284 return check_packet_access(env, regno, reg->off, access_size,
3285 zero_size_allowed);
06c1c049 3286 case PTR_TO_MAP_VALUE:
591fe988
DB
3287 if (check_map_access_type(env, regno, reg->off, access_size,
3288 meta && meta->raw_mode ? BPF_WRITE :
3289 BPF_READ))
3290 return -EACCES;
9fd29c08
YS
3291 return check_map_access(env, regno, reg->off, access_size,
3292 zero_size_allowed);
f1174f77 3293 default: /* scalar_value|ptr_to_stack or invalid ptr */
06c1c049
GB
3294 return check_stack_boundary(env, regno, access_size,
3295 zero_size_allowed, meta);
3296 }
3297}
3298
d83525ca
AS
3299/* Implementation details:
3300 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3301 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3302 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3303 * value_or_null->value transition, since the verifier only cares about
3304 * the range of access to valid map value pointer and doesn't care about actual
3305 * address of the map element.
3306 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3307 * reg->id > 0 after value_or_null->value transition. By doing so
3308 * two bpf_map_lookups will be considered two different pointers that
3309 * point to different bpf_spin_locks.
3310 * The verifier allows taking only one bpf_spin_lock at a time to avoid
3311 * dead-locks.
3312 * Since only one bpf_spin_lock is allowed the checks are simpler than
3313 * reg_is_refcounted() logic. The verifier needs to remember only
3314 * one spin_lock instead of array of acquired_refs.
3315 * cur_state->active_spin_lock remembers which map value element got locked
3316 * and clears it after bpf_spin_unlock.
3317 */
3318static int process_spin_lock(struct bpf_verifier_env *env, int regno,
3319 bool is_lock)
3320{
3321 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3322 struct bpf_verifier_state *cur = env->cur_state;
3323 bool is_const = tnum_is_const(reg->var_off);
3324 struct bpf_map *map = reg->map_ptr;
3325 u64 val = reg->var_off.value;
3326
3327 if (reg->type != PTR_TO_MAP_VALUE) {
3328 verbose(env, "R%d is not a pointer to map_value\n", regno);
3329 return -EINVAL;
3330 }
3331 if (!is_const) {
3332 verbose(env,
3333 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3334 regno);
3335 return -EINVAL;
3336 }
3337 if (!map->btf) {
3338 verbose(env,
3339 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
3340 map->name);
3341 return -EINVAL;
3342 }
3343 if (!map_value_has_spin_lock(map)) {
3344 if (map->spin_lock_off == -E2BIG)
3345 verbose(env,
3346 "map '%s' has more than one 'struct bpf_spin_lock'\n",
3347 map->name);
3348 else if (map->spin_lock_off == -ENOENT)
3349 verbose(env,
3350 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
3351 map->name);
3352 else
3353 verbose(env,
3354 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3355 map->name);
3356 return -EINVAL;
3357 }
3358 if (map->spin_lock_off != val + reg->off) {
3359 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3360 val + reg->off);
3361 return -EINVAL;
3362 }
3363 if (is_lock) {
3364 if (cur->active_spin_lock) {
3365 verbose(env,
3366 "Locking two bpf_spin_locks are not allowed\n");
3367 return -EINVAL;
3368 }
3369 cur->active_spin_lock = reg->id;
3370 } else {
3371 if (!cur->active_spin_lock) {
3372 verbose(env, "bpf_spin_unlock without taking a lock\n");
3373 return -EINVAL;
3374 }
3375 if (cur->active_spin_lock != reg->id) {
3376 verbose(env, "bpf_spin_unlock of different lock\n");
3377 return -EINVAL;
3378 }
3379 cur->active_spin_lock = 0;
3380 }
3381 return 0;
3382}
3383
90133415
DB
3384static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
3385{
3386 return type == ARG_PTR_TO_MEM ||
3387 type == ARG_PTR_TO_MEM_OR_NULL ||
3388 type == ARG_PTR_TO_UNINIT_MEM;
3389}
3390
3391static bool arg_type_is_mem_size(enum bpf_arg_type type)
3392{
3393 return type == ARG_CONST_SIZE ||
3394 type == ARG_CONST_SIZE_OR_ZERO;
3395}
3396
57c3bb72
AI
3397static bool arg_type_is_int_ptr(enum bpf_arg_type type)
3398{
3399 return type == ARG_PTR_TO_INT ||
3400 type == ARG_PTR_TO_LONG;
3401}
3402
3403static int int_ptr_type_to_size(enum bpf_arg_type type)
3404{
3405 if (type == ARG_PTR_TO_INT)
3406 return sizeof(u32);
3407 else if (type == ARG_PTR_TO_LONG)
3408 return sizeof(u64);
3409
3410 return -EINVAL;
3411}
3412
58e2af8b 3413static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
33ff9823
DB
3414 enum bpf_arg_type arg_type,
3415 struct bpf_call_arg_meta *meta)
17a52670 3416{
638f5b90 3417 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6841de8b 3418 enum bpf_reg_type expected_type, type = reg->type;
17a52670
AS
3419 int err = 0;
3420
80f1d68c 3421 if (arg_type == ARG_DONTCARE)
17a52670
AS
3422 return 0;
3423
dc503a8a
EC
3424 err = check_reg_arg(env, regno, SRC_OP);
3425 if (err)
3426 return err;
17a52670 3427
1be7f75d
AS
3428 if (arg_type == ARG_ANYTHING) {
3429 if (is_pointer_value(env, regno)) {
61bd5218
JK
3430 verbose(env, "R%d leaks addr into helper function\n",
3431 regno);
1be7f75d
AS
3432 return -EACCES;
3433 }
80f1d68c 3434 return 0;
1be7f75d 3435 }
80f1d68c 3436
de8f3a83 3437 if (type_is_pkt_pointer(type) &&
3a0af8fd 3438 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 3439 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
3440 return -EACCES;
3441 }
3442
8e2fe1d9 3443 if (arg_type == ARG_PTR_TO_MAP_KEY ||
2ea864c5 3444 arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3445 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
3446 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
17a52670 3447 expected_type = PTR_TO_STACK;
6ac99e8f
MKL
3448 if (register_is_null(reg) &&
3449 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
3450 /* final test in check_stack_boundary() */;
3451 else if (!type_is_pkt_pointer(type) &&
3452 type != PTR_TO_MAP_VALUE &&
3453 type != expected_type)
6841de8b 3454 goto err_type;
39f19ebb
AS
3455 } else if (arg_type == ARG_CONST_SIZE ||
3456 arg_type == ARG_CONST_SIZE_OR_ZERO) {
f1174f77
EC
3457 expected_type = SCALAR_VALUE;
3458 if (type != expected_type)
6841de8b 3459 goto err_type;
17a52670
AS
3460 } else if (arg_type == ARG_CONST_MAP_PTR) {
3461 expected_type = CONST_PTR_TO_MAP;
6841de8b
AS
3462 if (type != expected_type)
3463 goto err_type;
608cd71a
AS
3464 } else if (arg_type == ARG_PTR_TO_CTX) {
3465 expected_type = PTR_TO_CTX;
6841de8b
AS
3466 if (type != expected_type)
3467 goto err_type;
58990d1f
DB
3468 err = check_ctx_reg(env, reg, regno);
3469 if (err < 0)
3470 return err;
46f8bc92
MKL
3471 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
3472 expected_type = PTR_TO_SOCK_COMMON;
3473 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
3474 if (!type_is_sk_pointer(type))
3475 goto err_type;
1b986589
MKL
3476 if (reg->ref_obj_id) {
3477 if (meta->ref_obj_id) {
3478 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3479 regno, reg->ref_obj_id,
3480 meta->ref_obj_id);
3481 return -EFAULT;
3482 }
3483 meta->ref_obj_id = reg->ref_obj_id;
fd978bf7 3484 }
6ac99e8f
MKL
3485 } else if (arg_type == ARG_PTR_TO_SOCKET) {
3486 expected_type = PTR_TO_SOCKET;
3487 if (type != expected_type)
3488 goto err_type;
a7658e1a
AS
3489 } else if (arg_type == ARG_PTR_TO_BTF_ID) {
3490 expected_type = PTR_TO_BTF_ID;
3491 if (type != expected_type)
3492 goto err_type;
3493 if (reg->btf_id != meta->btf_id) {
3494 verbose(env, "Helper has type %s got %s in R%d\n",
3495 kernel_type_name(meta->btf_id),
3496 kernel_type_name(reg->btf_id), regno);
3497
3498 return -EACCES;
3499 }
3500 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
3501 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
3502 regno);
3503 return -EACCES;
3504 }
d83525ca
AS
3505 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3506 if (meta->func_id == BPF_FUNC_spin_lock) {
3507 if (process_spin_lock(env, regno, true))
3508 return -EACCES;
3509 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
3510 if (process_spin_lock(env, regno, false))
3511 return -EACCES;
3512 } else {
3513 verbose(env, "verifier internal error\n");
3514 return -EFAULT;
3515 }
90133415 3516 } else if (arg_type_is_mem_ptr(arg_type)) {
8e2fe1d9
DB
3517 expected_type = PTR_TO_STACK;
3518 /* One exception here. In case function allows for NULL to be
f1174f77 3519 * passed in as argument, it's a SCALAR_VALUE type. Final test
8e2fe1d9
DB
3520 * happens during stack boundary checking.
3521 */
914cb781 3522 if (register_is_null(reg) &&
db1ac496 3523 arg_type == ARG_PTR_TO_MEM_OR_NULL)
6841de8b 3524 /* final test in check_stack_boundary() */;
de8f3a83
DB
3525 else if (!type_is_pkt_pointer(type) &&
3526 type != PTR_TO_MAP_VALUE &&
f1174f77 3527 type != expected_type)
6841de8b 3528 goto err_type;
39f19ebb 3529 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
57c3bb72
AI
3530 } else if (arg_type_is_int_ptr(arg_type)) {
3531 expected_type = PTR_TO_STACK;
3532 if (!type_is_pkt_pointer(type) &&
3533 type != PTR_TO_MAP_VALUE &&
3534 type != expected_type)
3535 goto err_type;
17a52670 3536 } else {
61bd5218 3537 verbose(env, "unsupported arg_type %d\n", arg_type);
17a52670
AS
3538 return -EFAULT;
3539 }
3540
17a52670
AS
3541 if (arg_type == ARG_CONST_MAP_PTR) {
3542 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
33ff9823 3543 meta->map_ptr = reg->map_ptr;
17a52670
AS
3544 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
3545 /* bpf_map_xxx(..., map_ptr, ..., key) call:
3546 * check that [key, key + map->key_size) are within
3547 * stack limits and initialized
3548 */
33ff9823 3549 if (!meta->map_ptr) {
17a52670
AS
3550 /* in function declaration map_ptr must come before
3551 * map_key, so that it's verified and known before
3552 * we have to check map_key here. Otherwise it means
3553 * that kernel subsystem misconfigured verifier
3554 */
61bd5218 3555 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
3556 return -EACCES;
3557 }
d71962f3
PC
3558 err = check_helper_mem_access(env, regno,
3559 meta->map_ptr->key_size, false,
3560 NULL);
2ea864c5 3561 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3562 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
3563 !register_is_null(reg)) ||
2ea864c5 3564 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
17a52670
AS
3565 /* bpf_map_xxx(..., map_ptr, ..., value) call:
3566 * check [value, value + map->value_size) validity
3567 */
33ff9823 3568 if (!meta->map_ptr) {
17a52670 3569 /* kernel subsystem misconfigured verifier */
61bd5218 3570 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
3571 return -EACCES;
3572 }
2ea864c5 3573 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
d71962f3
PC
3574 err = check_helper_mem_access(env, regno,
3575 meta->map_ptr->value_size, false,
2ea864c5 3576 meta);
90133415 3577 } else if (arg_type_is_mem_size(arg_type)) {
39f19ebb 3578 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
17a52670 3579
849fa506
YS
3580 /* remember the mem_size which may be used later
3581 * to refine return values.
3582 */
3583 meta->msize_smax_value = reg->smax_value;
3584 meta->msize_umax_value = reg->umax_value;
3585
f1174f77
EC
3586 /* The register is SCALAR_VALUE; the access check
3587 * happens using its boundaries.
06c1c049 3588 */
f1174f77 3589 if (!tnum_is_const(reg->var_off))
06c1c049
GB
3590 /* For unprivileged variable accesses, disable raw
3591 * mode so that the program is required to
3592 * initialize all the memory that the helper could
3593 * just partially fill up.
3594 */
3595 meta = NULL;
3596
b03c9f9f 3597 if (reg->smin_value < 0) {
61bd5218 3598 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
f1174f77
EC
3599 regno);
3600 return -EACCES;
3601 }
06c1c049 3602
b03c9f9f 3603 if (reg->umin_value == 0) {
f1174f77
EC
3604 err = check_helper_mem_access(env, regno - 1, 0,
3605 zero_size_allowed,
3606 meta);
06c1c049
GB
3607 if (err)
3608 return err;
06c1c049 3609 }
f1174f77 3610
b03c9f9f 3611 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
61bd5218 3612 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
f1174f77
EC
3613 regno);
3614 return -EACCES;
3615 }
3616 err = check_helper_mem_access(env, regno - 1,
b03c9f9f 3617 reg->umax_value,
f1174f77 3618 zero_size_allowed, meta);
b5dc0163
AS
3619 if (!err)
3620 err = mark_chain_precision(env, regno);
57c3bb72
AI
3621 } else if (arg_type_is_int_ptr(arg_type)) {
3622 int size = int_ptr_type_to_size(arg_type);
3623
3624 err = check_helper_mem_access(env, regno, size, false, meta);
3625 if (err)
3626 return err;
3627 err = check_ptr_alignment(env, reg, 0, size, true);
17a52670
AS
3628 }
3629
3630 return err;
6841de8b 3631err_type:
61bd5218 3632 verbose(env, "R%d type=%s expected=%s\n", regno,
6841de8b
AS
3633 reg_type_str[type], reg_type_str[expected_type]);
3634 return -EACCES;
17a52670
AS
3635}
3636
61bd5218
JK
3637static int check_map_func_compatibility(struct bpf_verifier_env *env,
3638 struct bpf_map *map, int func_id)
35578d79 3639{
35578d79
KX
3640 if (!map)
3641 return 0;
3642
6aff67c8
AS
3643 /* We need a two way check, first is from map perspective ... */
3644 switch (map->map_type) {
3645 case BPF_MAP_TYPE_PROG_ARRAY:
3646 if (func_id != BPF_FUNC_tail_call)
3647 goto error;
3648 break;
3649 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
3650 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 3651 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 3652 func_id != BPF_FUNC_skb_output &&
d831ee84
EC
3653 func_id != BPF_FUNC_perf_event_read_value &&
3654 func_id != BPF_FUNC_xdp_output)
6aff67c8
AS
3655 goto error;
3656 break;
3657 case BPF_MAP_TYPE_STACK_TRACE:
3658 if (func_id != BPF_FUNC_get_stackid)
3659 goto error;
3660 break;
4ed8ec52 3661 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 3662 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 3663 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
3664 goto error;
3665 break;
cd339431 3666 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 3667 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
3668 if (func_id != BPF_FUNC_get_local_storage)
3669 goto error;
3670 break;
546ac1ff 3671 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 3672 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
3673 if (func_id != BPF_FUNC_redirect_map &&
3674 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
3675 goto error;
3676 break;
fbfc504a
BT
3677 /* Restrict bpf side of cpumap and xskmap, open when use-cases
3678 * appear.
3679 */
6710e112
JDB
3680 case BPF_MAP_TYPE_CPUMAP:
3681 if (func_id != BPF_FUNC_redirect_map)
3682 goto error;
3683 break;
fada7fdc
JL
3684 case BPF_MAP_TYPE_XSKMAP:
3685 if (func_id != BPF_FUNC_redirect_map &&
3686 func_id != BPF_FUNC_map_lookup_elem)
3687 goto error;
3688 break;
56f668df 3689 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 3690 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
3691 if (func_id != BPF_FUNC_map_lookup_elem)
3692 goto error;
16a43625 3693 break;
174a79ff
JF
3694 case BPF_MAP_TYPE_SOCKMAP:
3695 if (func_id != BPF_FUNC_sk_redirect_map &&
3696 func_id != BPF_FUNC_sock_map_update &&
4f738adb 3697 func_id != BPF_FUNC_map_delete_elem &&
9fed9000
JS
3698 func_id != BPF_FUNC_msg_redirect_map &&
3699 func_id != BPF_FUNC_sk_select_reuseport)
174a79ff
JF
3700 goto error;
3701 break;
81110384
JF
3702 case BPF_MAP_TYPE_SOCKHASH:
3703 if (func_id != BPF_FUNC_sk_redirect_hash &&
3704 func_id != BPF_FUNC_sock_hash_update &&
3705 func_id != BPF_FUNC_map_delete_elem &&
9fed9000
JS
3706 func_id != BPF_FUNC_msg_redirect_hash &&
3707 func_id != BPF_FUNC_sk_select_reuseport)
81110384
JF
3708 goto error;
3709 break;
2dbb9b9e
MKL
3710 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
3711 if (func_id != BPF_FUNC_sk_select_reuseport)
3712 goto error;
3713 break;
f1a2e44a
MV
3714 case BPF_MAP_TYPE_QUEUE:
3715 case BPF_MAP_TYPE_STACK:
3716 if (func_id != BPF_FUNC_map_peek_elem &&
3717 func_id != BPF_FUNC_map_pop_elem &&
3718 func_id != BPF_FUNC_map_push_elem)
3719 goto error;
3720 break;
6ac99e8f
MKL
3721 case BPF_MAP_TYPE_SK_STORAGE:
3722 if (func_id != BPF_FUNC_sk_storage_get &&
3723 func_id != BPF_FUNC_sk_storage_delete)
3724 goto error;
3725 break;
6aff67c8
AS
3726 default:
3727 break;
3728 }
3729
3730 /* ... and second from the function itself. */
3731 switch (func_id) {
3732 case BPF_FUNC_tail_call:
3733 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
3734 goto error;
f910cefa 3735 if (env->subprog_cnt > 1) {
f4d7e40a
AS
3736 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3737 return -EINVAL;
3738 }
6aff67c8
AS
3739 break;
3740 case BPF_FUNC_perf_event_read:
3741 case BPF_FUNC_perf_event_output:
908432ca 3742 case BPF_FUNC_perf_event_read_value:
a7658e1a 3743 case BPF_FUNC_skb_output:
d831ee84 3744 case BPF_FUNC_xdp_output:
6aff67c8
AS
3745 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
3746 goto error;
3747 break;
3748 case BPF_FUNC_get_stackid:
3749 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
3750 goto error;
3751 break;
60d20f91 3752 case BPF_FUNC_current_task_under_cgroup:
747ea55e 3753 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
3754 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
3755 goto error;
3756 break;
97f91a7c 3757 case BPF_FUNC_redirect_map:
9c270af3 3758 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 3759 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
3760 map->map_type != BPF_MAP_TYPE_CPUMAP &&
3761 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
3762 goto error;
3763 break;
174a79ff 3764 case BPF_FUNC_sk_redirect_map:
4f738adb 3765 case BPF_FUNC_msg_redirect_map:
81110384 3766 case BPF_FUNC_sock_map_update:
174a79ff
JF
3767 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
3768 goto error;
3769 break;
81110384
JF
3770 case BPF_FUNC_sk_redirect_hash:
3771 case BPF_FUNC_msg_redirect_hash:
3772 case BPF_FUNC_sock_hash_update:
3773 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
3774 goto error;
3775 break;
cd339431 3776 case BPF_FUNC_get_local_storage:
b741f163
RG
3777 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
3778 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
3779 goto error;
3780 break;
2dbb9b9e 3781 case BPF_FUNC_sk_select_reuseport:
9fed9000
JS
3782 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
3783 map->map_type != BPF_MAP_TYPE_SOCKMAP &&
3784 map->map_type != BPF_MAP_TYPE_SOCKHASH)
2dbb9b9e
MKL
3785 goto error;
3786 break;
f1a2e44a
MV
3787 case BPF_FUNC_map_peek_elem:
3788 case BPF_FUNC_map_pop_elem:
3789 case BPF_FUNC_map_push_elem:
3790 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
3791 map->map_type != BPF_MAP_TYPE_STACK)
3792 goto error;
3793 break;
6ac99e8f
MKL
3794 case BPF_FUNC_sk_storage_get:
3795 case BPF_FUNC_sk_storage_delete:
3796 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
3797 goto error;
3798 break;
6aff67c8
AS
3799 default:
3800 break;
35578d79
KX
3801 }
3802
3803 return 0;
6aff67c8 3804error:
61bd5218 3805 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 3806 map->map_type, func_id_name(func_id), func_id);
6aff67c8 3807 return -EINVAL;
35578d79
KX
3808}
3809
90133415 3810static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
3811{
3812 int count = 0;
3813
39f19ebb 3814 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3815 count++;
39f19ebb 3816 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3817 count++;
39f19ebb 3818 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3819 count++;
39f19ebb 3820 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3821 count++;
39f19ebb 3822 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
3823 count++;
3824
90133415
DB
3825 /* We only support one arg being in raw mode at the moment,
3826 * which is sufficient for the helper functions we have
3827 * right now.
3828 */
3829 return count <= 1;
3830}
3831
3832static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
3833 enum bpf_arg_type arg_next)
3834{
3835 return (arg_type_is_mem_ptr(arg_curr) &&
3836 !arg_type_is_mem_size(arg_next)) ||
3837 (!arg_type_is_mem_ptr(arg_curr) &&
3838 arg_type_is_mem_size(arg_next));
3839}
3840
3841static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
3842{
3843 /* bpf_xxx(..., buf, len) call will access 'len'
3844 * bytes from memory 'buf'. Both arg types need
3845 * to be paired, so make sure there's no buggy
3846 * helper function specification.
3847 */
3848 if (arg_type_is_mem_size(fn->arg1_type) ||
3849 arg_type_is_mem_ptr(fn->arg5_type) ||
3850 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
3851 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
3852 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
3853 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
3854 return false;
3855
3856 return true;
3857}
3858
1b986589 3859static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
fd978bf7
JS
3860{
3861 int count = 0;
3862
1b986589 3863 if (arg_type_may_be_refcounted(fn->arg1_type))
fd978bf7 3864 count++;
1b986589 3865 if (arg_type_may_be_refcounted(fn->arg2_type))
fd978bf7 3866 count++;
1b986589 3867 if (arg_type_may_be_refcounted(fn->arg3_type))
fd978bf7 3868 count++;
1b986589 3869 if (arg_type_may_be_refcounted(fn->arg4_type))
fd978bf7 3870 count++;
1b986589 3871 if (arg_type_may_be_refcounted(fn->arg5_type))
fd978bf7
JS
3872 count++;
3873
1b986589
MKL
3874 /* A reference acquiring function cannot acquire
3875 * another refcounted ptr.
3876 */
3877 if (is_acquire_function(func_id) && count)
3878 return false;
3879
fd978bf7
JS
3880 /* We only support one arg being unreferenced at the moment,
3881 * which is sufficient for the helper functions we have right now.
3882 */
3883 return count <= 1;
3884}
3885
1b986589 3886static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
90133415
DB
3887{
3888 return check_raw_mode_ok(fn) &&
fd978bf7 3889 check_arg_pair_ok(fn) &&
1b986589 3890 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
435faee1
DB
3891}
3892
de8f3a83
DB
3893/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
3894 * are now invalid, so turn them into unknown SCALAR_VALUE.
f1174f77 3895 */
f4d7e40a
AS
3896static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
3897 struct bpf_func_state *state)
969bf05e 3898{
58e2af8b 3899 struct bpf_reg_state *regs = state->regs, *reg;
969bf05e
AS
3900 int i;
3901
3902 for (i = 0; i < MAX_BPF_REG; i++)
de8f3a83 3903 if (reg_is_pkt_pointer_any(&regs[i]))
61bd5218 3904 mark_reg_unknown(env, regs, i);
969bf05e 3905
f3709f69
JS
3906 bpf_for_each_spilled_reg(i, state, reg) {
3907 if (!reg)
969bf05e 3908 continue;
de8f3a83 3909 if (reg_is_pkt_pointer_any(reg))
f54c7898 3910 __mark_reg_unknown(env, reg);
969bf05e
AS
3911 }
3912}
3913
f4d7e40a
AS
3914static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
3915{
3916 struct bpf_verifier_state *vstate = env->cur_state;
3917 int i;
3918
3919 for (i = 0; i <= vstate->curframe; i++)
3920 __clear_all_pkt_pointers(env, vstate->frame[i]);
3921}
3922
fd978bf7 3923static void release_reg_references(struct bpf_verifier_env *env,
1b986589
MKL
3924 struct bpf_func_state *state,
3925 int ref_obj_id)
fd978bf7
JS
3926{
3927 struct bpf_reg_state *regs = state->regs, *reg;
3928 int i;
3929
3930 for (i = 0; i < MAX_BPF_REG; i++)
1b986589 3931 if (regs[i].ref_obj_id == ref_obj_id)
fd978bf7
JS
3932 mark_reg_unknown(env, regs, i);
3933
3934 bpf_for_each_spilled_reg(i, state, reg) {
3935 if (!reg)
3936 continue;
1b986589 3937 if (reg->ref_obj_id == ref_obj_id)
f54c7898 3938 __mark_reg_unknown(env, reg);
fd978bf7
JS
3939 }
3940}
3941
3942/* The pointer with the specified id has released its reference to kernel
3943 * resources. Identify all copies of the same pointer and clear the reference.
3944 */
3945static int release_reference(struct bpf_verifier_env *env,
1b986589 3946 int ref_obj_id)
fd978bf7
JS
3947{
3948 struct bpf_verifier_state *vstate = env->cur_state;
1b986589 3949 int err;
fd978bf7
JS
3950 int i;
3951
1b986589
MKL
3952 err = release_reference_state(cur_func(env), ref_obj_id);
3953 if (err)
3954 return err;
3955
fd978bf7 3956 for (i = 0; i <= vstate->curframe; i++)
1b986589 3957 release_reg_references(env, vstate->frame[i], ref_obj_id);
fd978bf7 3958
1b986589 3959 return 0;
fd978bf7
JS
3960}
3961
51c39bb1
AS
3962static void clear_caller_saved_regs(struct bpf_verifier_env *env,
3963 struct bpf_reg_state *regs)
3964{
3965 int i;
3966
3967 /* after the call registers r0 - r5 were scratched */
3968 for (i = 0; i < CALLER_SAVED_REGS; i++) {
3969 mark_reg_not_init(env, regs, caller_saved[i]);
3970 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
3971 }
3972}
3973
f4d7e40a
AS
3974static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
3975 int *insn_idx)
3976{
3977 struct bpf_verifier_state *state = env->cur_state;
51c39bb1 3978 struct bpf_func_info_aux *func_info_aux;
f4d7e40a 3979 struct bpf_func_state *caller, *callee;
fd978bf7 3980 int i, err, subprog, target_insn;
51c39bb1 3981 bool is_global = false;
f4d7e40a 3982
aada9ce6 3983 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 3984 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 3985 state->curframe + 2);
f4d7e40a
AS
3986 return -E2BIG;
3987 }
3988
3989 target_insn = *insn_idx + insn->imm;
3990 subprog = find_subprog(env, target_insn + 1);
3991 if (subprog < 0) {
3992 verbose(env, "verifier bug. No program starts at insn %d\n",
3993 target_insn + 1);
3994 return -EFAULT;
3995 }
3996
3997 caller = state->frame[state->curframe];
3998 if (state->frame[state->curframe + 1]) {
3999 verbose(env, "verifier bug. Frame %d already allocated\n",
4000 state->curframe + 1);
4001 return -EFAULT;
4002 }
4003
51c39bb1
AS
4004 func_info_aux = env->prog->aux->func_info_aux;
4005 if (func_info_aux)
4006 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
4007 err = btf_check_func_arg_match(env, subprog, caller->regs);
4008 if (err == -EFAULT)
4009 return err;
4010 if (is_global) {
4011 if (err) {
4012 verbose(env, "Caller passes invalid args into func#%d\n",
4013 subprog);
4014 return err;
4015 } else {
4016 if (env->log.level & BPF_LOG_LEVEL)
4017 verbose(env,
4018 "Func#%d is global and valid. Skipping.\n",
4019 subprog);
4020 clear_caller_saved_regs(env, caller->regs);
4021
4022 /* All global functions return SCALAR_VALUE */
4023 mark_reg_unknown(env, caller->regs, BPF_REG_0);
4024
4025 /* continue with next insn after call */
4026 return 0;
4027 }
4028 }
4029
f4d7e40a
AS
4030 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
4031 if (!callee)
4032 return -ENOMEM;
4033 state->frame[state->curframe + 1] = callee;
4034
4035 /* callee cannot access r0, r6 - r9 for reading and has to write
4036 * into its own stack before reading from it.
4037 * callee can read/write into caller's stack
4038 */
4039 init_func_state(env, callee,
4040 /* remember the callsite, it will be used by bpf_exit */
4041 *insn_idx /* callsite */,
4042 state->curframe + 1 /* frameno within this callchain */,
f910cefa 4043 subprog /* subprog number within this prog */);
f4d7e40a 4044
fd978bf7
JS
4045 /* Transfer references to the callee */
4046 err = transfer_reference_state(callee, caller);
4047 if (err)
4048 return err;
4049
679c782d
EC
4050 /* copy r1 - r5 args that callee can access. The copy includes parent
4051 * pointers, which connects us up to the liveness chain
4052 */
f4d7e40a
AS
4053 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
4054 callee->regs[i] = caller->regs[i];
4055
51c39bb1 4056 clear_caller_saved_regs(env, caller->regs);
f4d7e40a
AS
4057
4058 /* only increment it after check_reg_arg() finished */
4059 state->curframe++;
4060
4061 /* and go analyze first insn of the callee */
4062 *insn_idx = target_insn;
4063
06ee7115 4064 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4065 verbose(env, "caller:\n");
4066 print_verifier_state(env, caller);
4067 verbose(env, "callee:\n");
4068 print_verifier_state(env, callee);
4069 }
4070 return 0;
4071}
4072
4073static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
4074{
4075 struct bpf_verifier_state *state = env->cur_state;
4076 struct bpf_func_state *caller, *callee;
4077 struct bpf_reg_state *r0;
fd978bf7 4078 int err;
f4d7e40a
AS
4079
4080 callee = state->frame[state->curframe];
4081 r0 = &callee->regs[BPF_REG_0];
4082 if (r0->type == PTR_TO_STACK) {
4083 /* technically it's ok to return caller's stack pointer
4084 * (or caller's caller's pointer) back to the caller,
4085 * since these pointers are valid. Only current stack
4086 * pointer will be invalid as soon as function exits,
4087 * but let's be conservative
4088 */
4089 verbose(env, "cannot return stack pointer to the caller\n");
4090 return -EINVAL;
4091 }
4092
4093 state->curframe--;
4094 caller = state->frame[state->curframe];
4095 /* return to the caller whatever r0 had in the callee */
4096 caller->regs[BPF_REG_0] = *r0;
4097
fd978bf7
JS
4098 /* Transfer references to the caller */
4099 err = transfer_reference_state(caller, callee);
4100 if (err)
4101 return err;
4102
f4d7e40a 4103 *insn_idx = callee->callsite + 1;
06ee7115 4104 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4105 verbose(env, "returning from callee:\n");
4106 print_verifier_state(env, callee);
4107 verbose(env, "to caller at %d:\n", *insn_idx);
4108 print_verifier_state(env, caller);
4109 }
4110 /* clear everything in the callee */
4111 free_func_state(callee);
4112 state->frame[state->curframe + 1] = NULL;
4113 return 0;
4114}
4115
849fa506
YS
4116static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
4117 int func_id,
4118 struct bpf_call_arg_meta *meta)
4119{
4120 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
4121
4122 if (ret_type != RET_INTEGER ||
4123 (func_id != BPF_FUNC_get_stack &&
4124 func_id != BPF_FUNC_probe_read_str))
4125 return;
4126
4127 ret_reg->smax_value = meta->msize_smax_value;
4128 ret_reg->umax_value = meta->msize_umax_value;
4129 __reg_deduce_bounds(ret_reg);
4130 __reg_bound_offset(ret_reg);
4131}
4132
c93552c4
DB
4133static int
4134record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4135 int func_id, int insn_idx)
4136{
4137 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 4138 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
4139
4140 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
4141 func_id != BPF_FUNC_map_lookup_elem &&
4142 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
4143 func_id != BPF_FUNC_map_delete_elem &&
4144 func_id != BPF_FUNC_map_push_elem &&
4145 func_id != BPF_FUNC_map_pop_elem &&
4146 func_id != BPF_FUNC_map_peek_elem)
c93552c4 4147 return 0;
09772d92 4148
591fe988 4149 if (map == NULL) {
c93552c4
DB
4150 verbose(env, "kernel subsystem misconfigured verifier\n");
4151 return -EINVAL;
4152 }
4153
591fe988
DB
4154 /* In case of read-only, some additional restrictions
4155 * need to be applied in order to prevent altering the
4156 * state of the map from program side.
4157 */
4158 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
4159 (func_id == BPF_FUNC_map_delete_elem ||
4160 func_id == BPF_FUNC_map_update_elem ||
4161 func_id == BPF_FUNC_map_push_elem ||
4162 func_id == BPF_FUNC_map_pop_elem)) {
4163 verbose(env, "write into map forbidden\n");
4164 return -EACCES;
4165 }
4166
d2e4c1e6 4167 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4
DB
4168 bpf_map_ptr_store(aux, meta->map_ptr,
4169 meta->map_ptr->unpriv_array);
d2e4c1e6 4170 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4
DB
4171 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
4172 meta->map_ptr->unpriv_array);
4173 return 0;
4174}
4175
d2e4c1e6
DB
4176static int
4177record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4178 int func_id, int insn_idx)
4179{
4180 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
4181 struct bpf_reg_state *regs = cur_regs(env), *reg;
4182 struct bpf_map *map = meta->map_ptr;
4183 struct tnum range;
4184 u64 val;
cc52d914 4185 int err;
d2e4c1e6
DB
4186
4187 if (func_id != BPF_FUNC_tail_call)
4188 return 0;
4189 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
4190 verbose(env, "kernel subsystem misconfigured verifier\n");
4191 return -EINVAL;
4192 }
4193
4194 range = tnum_range(0, map->max_entries - 1);
4195 reg = &regs[BPF_REG_3];
4196
4197 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
4198 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4199 return 0;
4200 }
4201
cc52d914
DB
4202 err = mark_chain_precision(env, BPF_REG_3);
4203 if (err)
4204 return err;
4205
d2e4c1e6
DB
4206 val = reg->var_off.value;
4207 if (bpf_map_key_unseen(aux))
4208 bpf_map_key_store(aux, val);
4209 else if (!bpf_map_key_poisoned(aux) &&
4210 bpf_map_key_immediate(aux) != val)
4211 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4212 return 0;
4213}
4214
fd978bf7
JS
4215static int check_reference_leak(struct bpf_verifier_env *env)
4216{
4217 struct bpf_func_state *state = cur_func(env);
4218 int i;
4219
4220 for (i = 0; i < state->acquired_refs; i++) {
4221 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
4222 state->refs[i].id, state->refs[i].insn_idx);
4223 }
4224 return state->acquired_refs ? -EINVAL : 0;
4225}
4226
f4d7e40a 4227static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
17a52670 4228{
17a52670 4229 const struct bpf_func_proto *fn = NULL;
638f5b90 4230 struct bpf_reg_state *regs;
33ff9823 4231 struct bpf_call_arg_meta meta;
969bf05e 4232 bool changes_data;
17a52670
AS
4233 int i, err;
4234
4235 /* find function prototype */
4236 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
4237 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
4238 func_id);
17a52670
AS
4239 return -EINVAL;
4240 }
4241
00176a34 4242 if (env->ops->get_func_proto)
5e43f899 4243 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 4244 if (!fn) {
61bd5218
JK
4245 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
4246 func_id);
17a52670
AS
4247 return -EINVAL;
4248 }
4249
4250 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 4251 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 4252 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
4253 return -EINVAL;
4254 }
4255
04514d13 4256 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 4257 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
4258 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
4259 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
4260 func_id_name(func_id), func_id);
4261 return -EINVAL;
4262 }
969bf05e 4263
33ff9823 4264 memset(&meta, 0, sizeof(meta));
36bbef52 4265 meta.pkt_access = fn->pkt_access;
33ff9823 4266
1b986589 4267 err = check_func_proto(fn, func_id);
435faee1 4268 if (err) {
61bd5218 4269 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 4270 func_id_name(func_id), func_id);
435faee1
DB
4271 return err;
4272 }
4273
d83525ca 4274 meta.func_id = func_id;
17a52670 4275 /* check args */
a7658e1a 4276 for (i = 0; i < 5; i++) {
9cc31b3a
AS
4277 err = btf_resolve_helper_id(&env->log, fn, i);
4278 if (err > 0)
4279 meta.btf_id = err;
a7658e1a
AS
4280 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
4281 if (err)
4282 return err;
4283 }
17a52670 4284
c93552c4
DB
4285 err = record_func_map(env, &meta, func_id, insn_idx);
4286 if (err)
4287 return err;
4288
d2e4c1e6
DB
4289 err = record_func_key(env, &meta, func_id, insn_idx);
4290 if (err)
4291 return err;
4292
435faee1
DB
4293 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4294 * is inferred from register state.
4295 */
4296 for (i = 0; i < meta.access_size; i++) {
ca369602
DB
4297 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
4298 BPF_WRITE, -1, false);
435faee1
DB
4299 if (err)
4300 return err;
4301 }
4302
fd978bf7
JS
4303 if (func_id == BPF_FUNC_tail_call) {
4304 err = check_reference_leak(env);
4305 if (err) {
4306 verbose(env, "tail_call would lead to reference leak\n");
4307 return err;
4308 }
4309 } else if (is_release_function(func_id)) {
1b986589 4310 err = release_reference(env, meta.ref_obj_id);
46f8bc92
MKL
4311 if (err) {
4312 verbose(env, "func %s#%d reference has not been acquired before\n",
4313 func_id_name(func_id), func_id);
fd978bf7 4314 return err;
46f8bc92 4315 }
fd978bf7
JS
4316 }
4317
638f5b90 4318 regs = cur_regs(env);
cd339431
RG
4319
4320 /* check that flags argument in get_local_storage(map, flags) is 0,
4321 * this is required because get_local_storage() can't return an error.
4322 */
4323 if (func_id == BPF_FUNC_get_local_storage &&
4324 !register_is_null(&regs[BPF_REG_2])) {
4325 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
4326 return -EINVAL;
4327 }
4328
17a52670 4329 /* reset caller saved regs */
dc503a8a 4330 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 4331 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
4332 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4333 }
17a52670 4334
5327ed3d
JW
4335 /* helper call returns 64-bit value. */
4336 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
4337
dc503a8a 4338 /* update return register (already marked as written above) */
17a52670 4339 if (fn->ret_type == RET_INTEGER) {
f1174f77 4340 /* sets type to SCALAR_VALUE */
61bd5218 4341 mark_reg_unknown(env, regs, BPF_REG_0);
17a52670
AS
4342 } else if (fn->ret_type == RET_VOID) {
4343 regs[BPF_REG_0].type = NOT_INIT;
3e6a4b3e
RG
4344 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
4345 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
f1174f77 4346 /* There is no offset yet applied, variable or fixed */
61bd5218 4347 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
4348 /* remember map_ptr, so that check_map_access()
4349 * can check 'value_size' boundary of memory access
4350 * to map element returned from bpf_map_lookup_elem()
4351 */
33ff9823 4352 if (meta.map_ptr == NULL) {
61bd5218
JK
4353 verbose(env,
4354 "kernel subsystem misconfigured verifier\n");
17a52670
AS
4355 return -EINVAL;
4356 }
33ff9823 4357 regs[BPF_REG_0].map_ptr = meta.map_ptr;
4d31f301
DB
4358 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4359 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
e16d2f1a
AS
4360 if (map_value_has_spin_lock(meta.map_ptr))
4361 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301
DB
4362 } else {
4363 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
4364 regs[BPF_REG_0].id = ++env->id_gen;
4365 }
c64b7983
JS
4366 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
4367 mark_reg_known_zero(env, regs, BPF_REG_0);
4368 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
0f3adc28 4369 regs[BPF_REG_0].id = ++env->id_gen;
85a51f8c
LB
4370 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
4371 mark_reg_known_zero(env, regs, BPF_REG_0);
4372 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
4373 regs[BPF_REG_0].id = ++env->id_gen;
655a51e5
MKL
4374 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
4375 mark_reg_known_zero(env, regs, BPF_REG_0);
4376 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
4377 regs[BPF_REG_0].id = ++env->id_gen;
17a52670 4378 } else {
61bd5218 4379 verbose(env, "unknown return type %d of func %s#%d\n",
ebb676da 4380 fn->ret_type, func_id_name(func_id), func_id);
17a52670
AS
4381 return -EINVAL;
4382 }
04fd61ab 4383
0f3adc28 4384 if (is_ptr_cast_function(func_id)) {
1b986589
MKL
4385 /* For release_reference() */
4386 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
0f3adc28
LB
4387 } else if (is_acquire_function(func_id)) {
4388 int id = acquire_reference_state(env, insn_idx);
4389
4390 if (id < 0)
4391 return id;
4392 /* For mark_ptr_or_null_reg() */
4393 regs[BPF_REG_0].id = id;
4394 /* For release_reference() */
4395 regs[BPF_REG_0].ref_obj_id = id;
4396 }
1b986589 4397
849fa506
YS
4398 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
4399
61bd5218 4400 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
4401 if (err)
4402 return err;
04fd61ab 4403
c195651e
YS
4404 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
4405 const char *err_str;
4406
4407#ifdef CONFIG_PERF_EVENTS
4408 err = get_callchain_buffers(sysctl_perf_event_max_stack);
4409 err_str = "cannot get callchain buffer for func %s#%d\n";
4410#else
4411 err = -ENOTSUPP;
4412 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
4413#endif
4414 if (err) {
4415 verbose(env, err_str, func_id_name(func_id), func_id);
4416 return err;
4417 }
4418
4419 env->prog->has_callchain_buf = true;
4420 }
4421
969bf05e
AS
4422 if (changes_data)
4423 clear_all_pkt_pointers(env);
4424 return 0;
4425}
4426
b03c9f9f
EC
4427static bool signed_add_overflows(s64 a, s64 b)
4428{
4429 /* Do the add in u64, where overflow is well-defined */
4430 s64 res = (s64)((u64)a + (u64)b);
4431
4432 if (b < 0)
4433 return res > a;
4434 return res < a;
4435}
4436
4437static bool signed_sub_overflows(s64 a, s64 b)
4438{
4439 /* Do the sub in u64, where overflow is well-defined */
4440 s64 res = (s64)((u64)a - (u64)b);
4441
4442 if (b < 0)
4443 return res < a;
4444 return res > a;
969bf05e
AS
4445}
4446
bb7f0f98
AS
4447static bool check_reg_sane_offset(struct bpf_verifier_env *env,
4448 const struct bpf_reg_state *reg,
4449 enum bpf_reg_type type)
4450{
4451 bool known = tnum_is_const(reg->var_off);
4452 s64 val = reg->var_off.value;
4453 s64 smin = reg->smin_value;
4454
4455 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
4456 verbose(env, "math between %s pointer and %lld is not allowed\n",
4457 reg_type_str[type], val);
4458 return false;
4459 }
4460
4461 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
4462 verbose(env, "%s pointer offset %d is not allowed\n",
4463 reg_type_str[type], reg->off);
4464 return false;
4465 }
4466
4467 if (smin == S64_MIN) {
4468 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
4469 reg_type_str[type]);
4470 return false;
4471 }
4472
4473 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
4474 verbose(env, "value %lld makes %s pointer be out of bounds\n",
4475 smin, reg_type_str[type]);
4476 return false;
4477 }
4478
4479 return true;
4480}
4481
979d63d5
DB
4482static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
4483{
4484 return &env->insn_aux_data[env->insn_idx];
4485}
4486
4487static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
4488 u32 *ptr_limit, u8 opcode, bool off_is_neg)
4489{
4490 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
4491 (opcode == BPF_SUB && !off_is_neg);
4492 u32 off;
4493
4494 switch (ptr_reg->type) {
4495 case PTR_TO_STACK:
088ec26d
AI
4496 /* Indirect variable offset stack access is prohibited in
4497 * unprivileged mode so it's not handled here.
4498 */
979d63d5
DB
4499 off = ptr_reg->off + ptr_reg->var_off.value;
4500 if (mask_to_left)
4501 *ptr_limit = MAX_BPF_STACK + off;
4502 else
4503 *ptr_limit = -off;
4504 return 0;
4505 case PTR_TO_MAP_VALUE:
4506 if (mask_to_left) {
4507 *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
4508 } else {
4509 off = ptr_reg->smin_value + ptr_reg->off;
4510 *ptr_limit = ptr_reg->map_ptr->value_size - off;
4511 }
4512 return 0;
4513 default:
4514 return -EINVAL;
4515 }
4516}
4517
d3bd7413
DB
4518static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
4519 const struct bpf_insn *insn)
4520{
4521 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
4522}
4523
4524static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
4525 u32 alu_state, u32 alu_limit)
4526{
4527 /* If we arrived here from different branches with different
4528 * state or limits to sanitize, then this won't work.
4529 */
4530 if (aux->alu_state &&
4531 (aux->alu_state != alu_state ||
4532 aux->alu_limit != alu_limit))
4533 return -EACCES;
4534
4535 /* Corresponding fixup done in fixup_bpf_calls(). */
4536 aux->alu_state = alu_state;
4537 aux->alu_limit = alu_limit;
4538 return 0;
4539}
4540
4541static int sanitize_val_alu(struct bpf_verifier_env *env,
4542 struct bpf_insn *insn)
4543{
4544 struct bpf_insn_aux_data *aux = cur_aux(env);
4545
4546 if (can_skip_alu_sanitation(env, insn))
4547 return 0;
4548
4549 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
4550}
4551
979d63d5
DB
4552static int sanitize_ptr_alu(struct bpf_verifier_env *env,
4553 struct bpf_insn *insn,
4554 const struct bpf_reg_state *ptr_reg,
4555 struct bpf_reg_state *dst_reg,
4556 bool off_is_neg)
4557{
4558 struct bpf_verifier_state *vstate = env->cur_state;
4559 struct bpf_insn_aux_data *aux = cur_aux(env);
4560 bool ptr_is_dst_reg = ptr_reg == dst_reg;
4561 u8 opcode = BPF_OP(insn->code);
4562 u32 alu_state, alu_limit;
4563 struct bpf_reg_state tmp;
4564 bool ret;
4565
d3bd7413 4566 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
4567 return 0;
4568
4569 /* We already marked aux for masking from non-speculative
4570 * paths, thus we got here in the first place. We only care
4571 * to explore bad access from here.
4572 */
4573 if (vstate->speculative)
4574 goto do_sim;
4575
4576 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
4577 alu_state |= ptr_is_dst_reg ?
4578 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
4579
4580 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
4581 return 0;
d3bd7413 4582 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
979d63d5 4583 return -EACCES;
979d63d5
DB
4584do_sim:
4585 /* Simulate and find potential out-of-bounds access under
4586 * speculative execution from truncation as a result of
4587 * masking when off was not within expected range. If off
4588 * sits in dst, then we temporarily need to move ptr there
4589 * to simulate dst (== 0) +/-= ptr. Needed, for example,
4590 * for cases where we use K-based arithmetic in one direction
4591 * and truncated reg-based in the other in order to explore
4592 * bad access.
4593 */
4594 if (!ptr_is_dst_reg) {
4595 tmp = *dst_reg;
4596 *dst_reg = *ptr_reg;
4597 }
4598 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
0803278b 4599 if (!ptr_is_dst_reg && ret)
979d63d5
DB
4600 *dst_reg = tmp;
4601 return !ret ? -EFAULT : 0;
4602}
4603
f1174f77 4604/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
4605 * Caller should also handle BPF_MOV case separately.
4606 * If we return -EACCES, caller may want to try again treating pointer as a
4607 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
4608 */
4609static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
4610 struct bpf_insn *insn,
4611 const struct bpf_reg_state *ptr_reg,
4612 const struct bpf_reg_state *off_reg)
969bf05e 4613{
f4d7e40a
AS
4614 struct bpf_verifier_state *vstate = env->cur_state;
4615 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4616 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 4617 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
4618 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
4619 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
4620 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
4621 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
9d7eceed 4622 u32 dst = insn->dst_reg, src = insn->src_reg;
969bf05e 4623 u8 opcode = BPF_OP(insn->code);
979d63d5 4624 int ret;
969bf05e 4625
f1174f77 4626 dst_reg = &regs[dst];
969bf05e 4627
6f16101e
DB
4628 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
4629 smin_val > smax_val || umin_val > umax_val) {
4630 /* Taint dst register if offset had invalid bounds derived from
4631 * e.g. dead branches.
4632 */
f54c7898 4633 __mark_reg_unknown(env, dst_reg);
6f16101e 4634 return 0;
f1174f77
EC
4635 }
4636
4637 if (BPF_CLASS(insn->code) != BPF_ALU64) {
4638 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
82abbf8d
AS
4639 verbose(env,
4640 "R%d 32-bit pointer arithmetic prohibited\n",
4641 dst);
f1174f77 4642 return -EACCES;
969bf05e
AS
4643 }
4644
aad2eeaf
JS
4645 switch (ptr_reg->type) {
4646 case PTR_TO_MAP_VALUE_OR_NULL:
4647 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
4648 dst, reg_type_str[ptr_reg->type]);
f1174f77 4649 return -EACCES;
aad2eeaf
JS
4650 case CONST_PTR_TO_MAP:
4651 case PTR_TO_PACKET_END:
c64b7983
JS
4652 case PTR_TO_SOCKET:
4653 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
4654 case PTR_TO_SOCK_COMMON:
4655 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
4656 case PTR_TO_TCP_SOCK:
4657 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 4658 case PTR_TO_XDP_SOCK:
aad2eeaf
JS
4659 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
4660 dst, reg_type_str[ptr_reg->type]);
f1174f77 4661 return -EACCES;
9d7eceed
DB
4662 case PTR_TO_MAP_VALUE:
4663 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
4664 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
4665 off_reg == dst_reg ? dst : src);
4666 return -EACCES;
4667 }
4668 /* fall-through */
aad2eeaf
JS
4669 default:
4670 break;
f1174f77
EC
4671 }
4672
4673 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
4674 * The id may be overwritten later if we create a new variable offset.
969bf05e 4675 */
f1174f77
EC
4676 dst_reg->type = ptr_reg->type;
4677 dst_reg->id = ptr_reg->id;
969bf05e 4678
bb7f0f98
AS
4679 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
4680 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
4681 return -EINVAL;
4682
f1174f77
EC
4683 switch (opcode) {
4684 case BPF_ADD:
979d63d5
DB
4685 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4686 if (ret < 0) {
4687 verbose(env, "R%d tried to add from different maps or paths\n", dst);
4688 return ret;
4689 }
f1174f77
EC
4690 /* We can take a fixed offset as long as it doesn't overflow
4691 * the s32 'off' field
969bf05e 4692 */
b03c9f9f
EC
4693 if (known && (ptr_reg->off + smin_val ==
4694 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 4695 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
4696 dst_reg->smin_value = smin_ptr;
4697 dst_reg->smax_value = smax_ptr;
4698 dst_reg->umin_value = umin_ptr;
4699 dst_reg->umax_value = umax_ptr;
f1174f77 4700 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 4701 dst_reg->off = ptr_reg->off + smin_val;
0962590e 4702 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
4703 break;
4704 }
f1174f77
EC
4705 /* A new variable offset is created. Note that off_reg->off
4706 * == 0, since it's a scalar.
4707 * dst_reg gets the pointer type and since some positive
4708 * integer value was added to the pointer, give it a new 'id'
4709 * if it's a PTR_TO_PACKET.
4710 * this creates a new 'base' pointer, off_reg (variable) gets
4711 * added into the variable offset, and we copy the fixed offset
4712 * from ptr_reg.
969bf05e 4713 */
b03c9f9f
EC
4714 if (signed_add_overflows(smin_ptr, smin_val) ||
4715 signed_add_overflows(smax_ptr, smax_val)) {
4716 dst_reg->smin_value = S64_MIN;
4717 dst_reg->smax_value = S64_MAX;
4718 } else {
4719 dst_reg->smin_value = smin_ptr + smin_val;
4720 dst_reg->smax_value = smax_ptr + smax_val;
4721 }
4722 if (umin_ptr + umin_val < umin_ptr ||
4723 umax_ptr + umax_val < umax_ptr) {
4724 dst_reg->umin_value = 0;
4725 dst_reg->umax_value = U64_MAX;
4726 } else {
4727 dst_reg->umin_value = umin_ptr + umin_val;
4728 dst_reg->umax_value = umax_ptr + umax_val;
4729 }
f1174f77
EC
4730 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
4731 dst_reg->off = ptr_reg->off;
0962590e 4732 dst_reg->raw = ptr_reg->raw;
de8f3a83 4733 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
4734 dst_reg->id = ++env->id_gen;
4735 /* something was added to pkt_ptr, set range to zero */
0962590e 4736 dst_reg->raw = 0;
f1174f77
EC
4737 }
4738 break;
4739 case BPF_SUB:
979d63d5
DB
4740 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4741 if (ret < 0) {
4742 verbose(env, "R%d tried to sub from different maps or paths\n", dst);
4743 return ret;
4744 }
f1174f77
EC
4745 if (dst_reg == off_reg) {
4746 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
4747 verbose(env, "R%d tried to subtract pointer from scalar\n",
4748 dst);
f1174f77
EC
4749 return -EACCES;
4750 }
4751 /* We don't allow subtraction from FP, because (according to
4752 * test_verifier.c test "invalid fp arithmetic", JITs might not
4753 * be able to deal with it.
969bf05e 4754 */
f1174f77 4755 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
4756 verbose(env, "R%d subtraction from stack pointer prohibited\n",
4757 dst);
f1174f77
EC
4758 return -EACCES;
4759 }
b03c9f9f
EC
4760 if (known && (ptr_reg->off - smin_val ==
4761 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 4762 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
4763 dst_reg->smin_value = smin_ptr;
4764 dst_reg->smax_value = smax_ptr;
4765 dst_reg->umin_value = umin_ptr;
4766 dst_reg->umax_value = umax_ptr;
f1174f77
EC
4767 dst_reg->var_off = ptr_reg->var_off;
4768 dst_reg->id = ptr_reg->id;
b03c9f9f 4769 dst_reg->off = ptr_reg->off - smin_val;
0962590e 4770 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
4771 break;
4772 }
f1174f77
EC
4773 /* A new variable offset is created. If the subtrahend is known
4774 * nonnegative, then any reg->range we had before is still good.
969bf05e 4775 */
b03c9f9f
EC
4776 if (signed_sub_overflows(smin_ptr, smax_val) ||
4777 signed_sub_overflows(smax_ptr, smin_val)) {
4778 /* Overflow possible, we know nothing */
4779 dst_reg->smin_value = S64_MIN;
4780 dst_reg->smax_value = S64_MAX;
4781 } else {
4782 dst_reg->smin_value = smin_ptr - smax_val;
4783 dst_reg->smax_value = smax_ptr - smin_val;
4784 }
4785 if (umin_ptr < umax_val) {
4786 /* Overflow possible, we know nothing */
4787 dst_reg->umin_value = 0;
4788 dst_reg->umax_value = U64_MAX;
4789 } else {
4790 /* Cannot overflow (as long as bounds are consistent) */
4791 dst_reg->umin_value = umin_ptr - umax_val;
4792 dst_reg->umax_value = umax_ptr - umin_val;
4793 }
f1174f77
EC
4794 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
4795 dst_reg->off = ptr_reg->off;
0962590e 4796 dst_reg->raw = ptr_reg->raw;
de8f3a83 4797 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
4798 dst_reg->id = ++env->id_gen;
4799 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 4800 if (smin_val < 0)
0962590e 4801 dst_reg->raw = 0;
43188702 4802 }
f1174f77
EC
4803 break;
4804 case BPF_AND:
4805 case BPF_OR:
4806 case BPF_XOR:
82abbf8d
AS
4807 /* bitwise ops on pointers are troublesome, prohibit. */
4808 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
4809 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
4810 return -EACCES;
4811 default:
4812 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
4813 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
4814 dst, bpf_alu_string[opcode >> 4]);
f1174f77 4815 return -EACCES;
43188702
JF
4816 }
4817
bb7f0f98
AS
4818 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
4819 return -EINVAL;
4820
b03c9f9f
EC
4821 __update_reg_bounds(dst_reg);
4822 __reg_deduce_bounds(dst_reg);
4823 __reg_bound_offset(dst_reg);
0d6303db
DB
4824
4825 /* For unprivileged we require that resulting offset must be in bounds
4826 * in order to be able to sanitize access later on.
4827 */
e4298d25
DB
4828 if (!env->allow_ptr_leaks) {
4829 if (dst_reg->type == PTR_TO_MAP_VALUE &&
4830 check_map_access(env, dst, dst_reg->off, 1, false)) {
4831 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
4832 "prohibited for !root\n", dst);
4833 return -EACCES;
4834 } else if (dst_reg->type == PTR_TO_STACK &&
4835 check_stack_access(env, dst_reg, dst_reg->off +
4836 dst_reg->var_off.value, 1)) {
4837 verbose(env, "R%d stack pointer arithmetic goes out of range, "
4838 "prohibited for !root\n", dst);
4839 return -EACCES;
4840 }
0d6303db
DB
4841 }
4842
43188702
JF
4843 return 0;
4844}
4845
468f6eaf
JH
4846/* WARNING: This function does calculations on 64-bit values, but the actual
4847 * execution may occur on 32-bit values. Therefore, things like bitshifts
4848 * need extra checks in the 32-bit case.
4849 */
f1174f77
EC
4850static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
4851 struct bpf_insn *insn,
4852 struct bpf_reg_state *dst_reg,
4853 struct bpf_reg_state src_reg)
969bf05e 4854{
638f5b90 4855 struct bpf_reg_state *regs = cur_regs(env);
48461135 4856 u8 opcode = BPF_OP(insn->code);
f1174f77 4857 bool src_known, dst_known;
b03c9f9f
EC
4858 s64 smin_val, smax_val;
4859 u64 umin_val, umax_val;
468f6eaf 4860 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
d3bd7413
DB
4861 u32 dst = insn->dst_reg;
4862 int ret;
48461135 4863
b799207e
JH
4864 if (insn_bitness == 32) {
4865 /* Relevant for 32-bit RSH: Information can propagate towards
4866 * LSB, so it isn't sufficient to only truncate the output to
4867 * 32 bits.
4868 */
4869 coerce_reg_to_size(dst_reg, 4);
4870 coerce_reg_to_size(&src_reg, 4);
4871 }
4872
b03c9f9f
EC
4873 smin_val = src_reg.smin_value;
4874 smax_val = src_reg.smax_value;
4875 umin_val = src_reg.umin_value;
4876 umax_val = src_reg.umax_value;
f1174f77
EC
4877 src_known = tnum_is_const(src_reg.var_off);
4878 dst_known = tnum_is_const(dst_reg->var_off);
f23cc643 4879
6f16101e
DB
4880 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
4881 smin_val > smax_val || umin_val > umax_val) {
4882 /* Taint dst register if offset had invalid bounds derived from
4883 * e.g. dead branches.
4884 */
f54c7898 4885 __mark_reg_unknown(env, dst_reg);
6f16101e
DB
4886 return 0;
4887 }
4888
bb7f0f98
AS
4889 if (!src_known &&
4890 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
f54c7898 4891 __mark_reg_unknown(env, dst_reg);
bb7f0f98
AS
4892 return 0;
4893 }
4894
48461135
JB
4895 switch (opcode) {
4896 case BPF_ADD:
d3bd7413
DB
4897 ret = sanitize_val_alu(env, insn);
4898 if (ret < 0) {
4899 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
4900 return ret;
4901 }
b03c9f9f
EC
4902 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
4903 signed_add_overflows(dst_reg->smax_value, smax_val)) {
4904 dst_reg->smin_value = S64_MIN;
4905 dst_reg->smax_value = S64_MAX;
4906 } else {
4907 dst_reg->smin_value += smin_val;
4908 dst_reg->smax_value += smax_val;
4909 }
4910 if (dst_reg->umin_value + umin_val < umin_val ||
4911 dst_reg->umax_value + umax_val < umax_val) {
4912 dst_reg->umin_value = 0;
4913 dst_reg->umax_value = U64_MAX;
4914 } else {
4915 dst_reg->umin_value += umin_val;
4916 dst_reg->umax_value += umax_val;
4917 }
f1174f77 4918 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
48461135
JB
4919 break;
4920 case BPF_SUB:
d3bd7413
DB
4921 ret = sanitize_val_alu(env, insn);
4922 if (ret < 0) {
4923 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
4924 return ret;
4925 }
b03c9f9f
EC
4926 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
4927 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
4928 /* Overflow possible, we know nothing */
4929 dst_reg->smin_value = S64_MIN;
4930 dst_reg->smax_value = S64_MAX;
4931 } else {
4932 dst_reg->smin_value -= smax_val;
4933 dst_reg->smax_value -= smin_val;
4934 }
4935 if (dst_reg->umin_value < umax_val) {
4936 /* Overflow possible, we know nothing */
4937 dst_reg->umin_value = 0;
4938 dst_reg->umax_value = U64_MAX;
4939 } else {
4940 /* Cannot overflow (as long as bounds are consistent) */
4941 dst_reg->umin_value -= umax_val;
4942 dst_reg->umax_value -= umin_val;
4943 }
f1174f77 4944 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
48461135
JB
4945 break;
4946 case BPF_MUL:
b03c9f9f
EC
4947 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
4948 if (smin_val < 0 || dst_reg->smin_value < 0) {
f1174f77 4949 /* Ain't nobody got time to multiply that sign */
b03c9f9f
EC
4950 __mark_reg_unbounded(dst_reg);
4951 __update_reg_bounds(dst_reg);
f1174f77
EC
4952 break;
4953 }
b03c9f9f
EC
4954 /* Both values are positive, so we can work with unsigned and
4955 * copy the result to signed (unless it exceeds S64_MAX).
f1174f77 4956 */
b03c9f9f
EC
4957 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
4958 /* Potential overflow, we know nothing */
4959 __mark_reg_unbounded(dst_reg);
4960 /* (except what we can learn from the var_off) */
4961 __update_reg_bounds(dst_reg);
4962 break;
4963 }
4964 dst_reg->umin_value *= umin_val;
4965 dst_reg->umax_value *= umax_val;
4966 if (dst_reg->umax_value > S64_MAX) {
4967 /* Overflow possible, we know nothing */
4968 dst_reg->smin_value = S64_MIN;
4969 dst_reg->smax_value = S64_MAX;
4970 } else {
4971 dst_reg->smin_value = dst_reg->umin_value;
4972 dst_reg->smax_value = dst_reg->umax_value;
4973 }
48461135
JB
4974 break;
4975 case BPF_AND:
f1174f77 4976 if (src_known && dst_known) {
b03c9f9f
EC
4977 __mark_reg_known(dst_reg, dst_reg->var_off.value &
4978 src_reg.var_off.value);
f1174f77
EC
4979 break;
4980 }
b03c9f9f
EC
4981 /* We get our minimum from the var_off, since that's inherently
4982 * bitwise. Our maximum is the minimum of the operands' maxima.
f23cc643 4983 */
f1174f77 4984 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
b03c9f9f
EC
4985 dst_reg->umin_value = dst_reg->var_off.value;
4986 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
4987 if (dst_reg->smin_value < 0 || smin_val < 0) {
4988 /* Lose signed bounds when ANDing negative numbers,
4989 * ain't nobody got time for that.
4990 */
4991 dst_reg->smin_value = S64_MIN;
4992 dst_reg->smax_value = S64_MAX;
4993 } else {
4994 /* ANDing two positives gives a positive, so safe to
4995 * cast result into s64.
4996 */
4997 dst_reg->smin_value = dst_reg->umin_value;
4998 dst_reg->smax_value = dst_reg->umax_value;
4999 }
5000 /* We may learn something more from the var_off */
5001 __update_reg_bounds(dst_reg);
f1174f77
EC
5002 break;
5003 case BPF_OR:
5004 if (src_known && dst_known) {
b03c9f9f
EC
5005 __mark_reg_known(dst_reg, dst_reg->var_off.value |
5006 src_reg.var_off.value);
f1174f77
EC
5007 break;
5008 }
b03c9f9f
EC
5009 /* We get our maximum from the var_off, and our minimum is the
5010 * maximum of the operands' minima
f1174f77
EC
5011 */
5012 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
b03c9f9f
EC
5013 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
5014 dst_reg->umax_value = dst_reg->var_off.value |
5015 dst_reg->var_off.mask;
5016 if (dst_reg->smin_value < 0 || smin_val < 0) {
5017 /* Lose signed bounds when ORing negative numbers,
5018 * ain't nobody got time for that.
5019 */
5020 dst_reg->smin_value = S64_MIN;
5021 dst_reg->smax_value = S64_MAX;
f1174f77 5022 } else {
b03c9f9f
EC
5023 /* ORing two positives gives a positive, so safe to
5024 * cast result into s64.
5025 */
5026 dst_reg->smin_value = dst_reg->umin_value;
5027 dst_reg->smax_value = dst_reg->umax_value;
f1174f77 5028 }
b03c9f9f
EC
5029 /* We may learn something more from the var_off */
5030 __update_reg_bounds(dst_reg);
48461135
JB
5031 break;
5032 case BPF_LSH:
468f6eaf
JH
5033 if (umax_val >= insn_bitness) {
5034 /* Shifts greater than 31 or 63 are undefined.
5035 * This includes shifts by a negative number.
b03c9f9f 5036 */
61bd5218 5037 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
5038 break;
5039 }
b03c9f9f
EC
5040 /* We lose all sign bit information (except what we can pick
5041 * up from var_off)
48461135 5042 */
b03c9f9f
EC
5043 dst_reg->smin_value = S64_MIN;
5044 dst_reg->smax_value = S64_MAX;
5045 /* If we might shift our top bit out, then we know nothing */
5046 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
5047 dst_reg->umin_value = 0;
5048 dst_reg->umax_value = U64_MAX;
d1174416 5049 } else {
b03c9f9f
EC
5050 dst_reg->umin_value <<= umin_val;
5051 dst_reg->umax_value <<= umax_val;
d1174416 5052 }
afbe1a5b 5053 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
b03c9f9f
EC
5054 /* We may learn something more from the var_off */
5055 __update_reg_bounds(dst_reg);
48461135
JB
5056 break;
5057 case BPF_RSH:
468f6eaf
JH
5058 if (umax_val >= insn_bitness) {
5059 /* Shifts greater than 31 or 63 are undefined.
5060 * This includes shifts by a negative number.
b03c9f9f 5061 */
61bd5218 5062 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
5063 break;
5064 }
4374f256
EC
5065 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
5066 * be negative, then either:
5067 * 1) src_reg might be zero, so the sign bit of the result is
5068 * unknown, so we lose our signed bounds
5069 * 2) it's known negative, thus the unsigned bounds capture the
5070 * signed bounds
5071 * 3) the signed bounds cross zero, so they tell us nothing
5072 * about the result
5073 * If the value in dst_reg is known nonnegative, then again the
5074 * unsigned bounts capture the signed bounds.
5075 * Thus, in all cases it suffices to blow away our signed bounds
5076 * and rely on inferring new ones from the unsigned bounds and
5077 * var_off of the result.
5078 */
5079 dst_reg->smin_value = S64_MIN;
5080 dst_reg->smax_value = S64_MAX;
afbe1a5b 5081 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
b03c9f9f
EC
5082 dst_reg->umin_value >>= umax_val;
5083 dst_reg->umax_value >>= umin_val;
5084 /* We may learn something more from the var_off */
5085 __update_reg_bounds(dst_reg);
48461135 5086 break;
9cbe1f5a
YS
5087 case BPF_ARSH:
5088 if (umax_val >= insn_bitness) {
5089 /* Shifts greater than 31 or 63 are undefined.
5090 * This includes shifts by a negative number.
5091 */
5092 mark_reg_unknown(env, regs, insn->dst_reg);
5093 break;
5094 }
5095
5096 /* Upon reaching here, src_known is true and
5097 * umax_val is equal to umin_val.
5098 */
0af2ffc9
DB
5099 if (insn_bitness == 32) {
5100 dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val);
5101 dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val);
5102 } else {
5103 dst_reg->smin_value >>= umin_val;
5104 dst_reg->smax_value >>= umin_val;
5105 }
5106
5107 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val,
5108 insn_bitness);
9cbe1f5a
YS
5109
5110 /* blow away the dst_reg umin_value/umax_value and rely on
5111 * dst_reg var_off to refine the result.
5112 */
5113 dst_reg->umin_value = 0;
5114 dst_reg->umax_value = U64_MAX;
5115 __update_reg_bounds(dst_reg);
5116 break;
48461135 5117 default:
61bd5218 5118 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
5119 break;
5120 }
5121
468f6eaf
JH
5122 if (BPF_CLASS(insn->code) != BPF_ALU64) {
5123 /* 32-bit ALU ops are (32,32)->32 */
5124 coerce_reg_to_size(dst_reg, 4);
468f6eaf
JH
5125 }
5126
b03c9f9f
EC
5127 __reg_deduce_bounds(dst_reg);
5128 __reg_bound_offset(dst_reg);
f1174f77
EC
5129 return 0;
5130}
5131
5132/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
5133 * and var_off.
5134 */
5135static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
5136 struct bpf_insn *insn)
5137{
f4d7e40a
AS
5138 struct bpf_verifier_state *vstate = env->cur_state;
5139 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5140 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
5141 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
5142 u8 opcode = BPF_OP(insn->code);
b5dc0163 5143 int err;
f1174f77
EC
5144
5145 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
5146 src_reg = NULL;
5147 if (dst_reg->type != SCALAR_VALUE)
5148 ptr_reg = dst_reg;
5149 if (BPF_SRC(insn->code) == BPF_X) {
5150 src_reg = &regs[insn->src_reg];
f1174f77
EC
5151 if (src_reg->type != SCALAR_VALUE) {
5152 if (dst_reg->type != SCALAR_VALUE) {
5153 /* Combining two pointers by any ALU op yields
82abbf8d
AS
5154 * an arbitrary scalar. Disallow all math except
5155 * pointer subtraction
f1174f77 5156 */
dd066823 5157 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
5158 mark_reg_unknown(env, regs, insn->dst_reg);
5159 return 0;
f1174f77 5160 }
82abbf8d
AS
5161 verbose(env, "R%d pointer %s pointer prohibited\n",
5162 insn->dst_reg,
5163 bpf_alu_string[opcode >> 4]);
5164 return -EACCES;
f1174f77
EC
5165 } else {
5166 /* scalar += pointer
5167 * This is legal, but we have to reverse our
5168 * src/dest handling in computing the range
5169 */
b5dc0163
AS
5170 err = mark_chain_precision(env, insn->dst_reg);
5171 if (err)
5172 return err;
82abbf8d
AS
5173 return adjust_ptr_min_max_vals(env, insn,
5174 src_reg, dst_reg);
f1174f77
EC
5175 }
5176 } else if (ptr_reg) {
5177 /* pointer += scalar */
b5dc0163
AS
5178 err = mark_chain_precision(env, insn->src_reg);
5179 if (err)
5180 return err;
82abbf8d
AS
5181 return adjust_ptr_min_max_vals(env, insn,
5182 dst_reg, src_reg);
f1174f77
EC
5183 }
5184 } else {
5185 /* Pretend the src is a reg with a known value, since we only
5186 * need to be able to read from this state.
5187 */
5188 off_reg.type = SCALAR_VALUE;
b03c9f9f 5189 __mark_reg_known(&off_reg, insn->imm);
f1174f77 5190 src_reg = &off_reg;
82abbf8d
AS
5191 if (ptr_reg) /* pointer += K */
5192 return adjust_ptr_min_max_vals(env, insn,
5193 ptr_reg, src_reg);
f1174f77
EC
5194 }
5195
5196 /* Got here implies adding two SCALAR_VALUEs */
5197 if (WARN_ON_ONCE(ptr_reg)) {
f4d7e40a 5198 print_verifier_state(env, state);
61bd5218 5199 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
5200 return -EINVAL;
5201 }
5202 if (WARN_ON(!src_reg)) {
f4d7e40a 5203 print_verifier_state(env, state);
61bd5218 5204 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
5205 return -EINVAL;
5206 }
5207 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
5208}
5209
17a52670 5210/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 5211static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 5212{
638f5b90 5213 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
5214 u8 opcode = BPF_OP(insn->code);
5215 int err;
5216
5217 if (opcode == BPF_END || opcode == BPF_NEG) {
5218 if (opcode == BPF_NEG) {
5219 if (BPF_SRC(insn->code) != 0 ||
5220 insn->src_reg != BPF_REG_0 ||
5221 insn->off != 0 || insn->imm != 0) {
61bd5218 5222 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
5223 return -EINVAL;
5224 }
5225 } else {
5226 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68
EC
5227 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
5228 BPF_CLASS(insn->code) == BPF_ALU64) {
61bd5218 5229 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
5230 return -EINVAL;
5231 }
5232 }
5233
5234 /* check src operand */
dc503a8a 5235 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
5236 if (err)
5237 return err;
5238
1be7f75d 5239 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 5240 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
5241 insn->dst_reg);
5242 return -EACCES;
5243 }
5244
17a52670 5245 /* check dest operand */
dc503a8a 5246 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
5247 if (err)
5248 return err;
5249
5250 } else if (opcode == BPF_MOV) {
5251
5252 if (BPF_SRC(insn->code) == BPF_X) {
5253 if (insn->imm != 0 || insn->off != 0) {
61bd5218 5254 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
5255 return -EINVAL;
5256 }
5257
5258 /* check src operand */
dc503a8a 5259 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
5260 if (err)
5261 return err;
5262 } else {
5263 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 5264 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
5265 return -EINVAL;
5266 }
5267 }
5268
fbeb1603
AF
5269 /* check dest operand, mark as required later */
5270 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
5271 if (err)
5272 return err;
5273
5274 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
5275 struct bpf_reg_state *src_reg = regs + insn->src_reg;
5276 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
5277
17a52670
AS
5278 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5279 /* case: R1 = R2
5280 * copy register state to dest reg
5281 */
e434b8cd
JW
5282 *dst_reg = *src_reg;
5283 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 5284 dst_reg->subreg_def = DEF_NOT_SUBREG;
17a52670 5285 } else {
f1174f77 5286 /* R1 = (u32) R2 */
1be7f75d 5287 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
5288 verbose(env,
5289 "R%d partial copy of pointer\n",
1be7f75d
AS
5290 insn->src_reg);
5291 return -EACCES;
e434b8cd
JW
5292 } else if (src_reg->type == SCALAR_VALUE) {
5293 *dst_reg = *src_reg;
5294 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 5295 dst_reg->subreg_def = env->insn_idx + 1;
e434b8cd
JW
5296 } else {
5297 mark_reg_unknown(env, regs,
5298 insn->dst_reg);
1be7f75d 5299 }
e434b8cd 5300 coerce_reg_to_size(dst_reg, 4);
17a52670
AS
5301 }
5302 } else {
5303 /* case: R = imm
5304 * remember the value we stored into this reg
5305 */
fbeb1603
AF
5306 /* clear any state __mark_reg_known doesn't set */
5307 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 5308 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
5309 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5310 __mark_reg_known(regs + insn->dst_reg,
5311 insn->imm);
5312 } else {
5313 __mark_reg_known(regs + insn->dst_reg,
5314 (u32)insn->imm);
5315 }
17a52670
AS
5316 }
5317
5318 } else if (opcode > BPF_END) {
61bd5218 5319 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
5320 return -EINVAL;
5321
5322 } else { /* all other ALU ops: and, sub, xor, add, ... */
5323
17a52670
AS
5324 if (BPF_SRC(insn->code) == BPF_X) {
5325 if (insn->imm != 0 || insn->off != 0) {
61bd5218 5326 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
5327 return -EINVAL;
5328 }
5329 /* check src1 operand */
dc503a8a 5330 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
5331 if (err)
5332 return err;
5333 } else {
5334 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 5335 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
5336 return -EINVAL;
5337 }
5338 }
5339
5340 /* check src2 operand */
dc503a8a 5341 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
5342 if (err)
5343 return err;
5344
5345 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
5346 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 5347 verbose(env, "div by zero\n");
17a52670
AS
5348 return -EINVAL;
5349 }
5350
229394e8
RV
5351 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
5352 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
5353 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
5354
5355 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 5356 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
5357 return -EINVAL;
5358 }
5359 }
5360
1a0dc1ac 5361 /* check dest operand */
dc503a8a 5362 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
5363 if (err)
5364 return err;
5365
f1174f77 5366 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
5367 }
5368
5369 return 0;
5370}
5371
c6a9efa1
PC
5372static void __find_good_pkt_pointers(struct bpf_func_state *state,
5373 struct bpf_reg_state *dst_reg,
5374 enum bpf_reg_type type, u16 new_range)
5375{
5376 struct bpf_reg_state *reg;
5377 int i;
5378
5379 for (i = 0; i < MAX_BPF_REG; i++) {
5380 reg = &state->regs[i];
5381 if (reg->type == type && reg->id == dst_reg->id)
5382 /* keep the maximum range already checked */
5383 reg->range = max(reg->range, new_range);
5384 }
5385
5386 bpf_for_each_spilled_reg(i, state, reg) {
5387 if (!reg)
5388 continue;
5389 if (reg->type == type && reg->id == dst_reg->id)
5390 reg->range = max(reg->range, new_range);
5391 }
5392}
5393
f4d7e40a 5394static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 5395 struct bpf_reg_state *dst_reg,
f8ddadc4 5396 enum bpf_reg_type type,
fb2a311a 5397 bool range_right_open)
969bf05e 5398{
fb2a311a 5399 u16 new_range;
c6a9efa1 5400 int i;
2d2be8ca 5401
fb2a311a
DB
5402 if (dst_reg->off < 0 ||
5403 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
5404 /* This doesn't give us any range */
5405 return;
5406
b03c9f9f
EC
5407 if (dst_reg->umax_value > MAX_PACKET_OFF ||
5408 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
5409 /* Risk of overflow. For instance, ptr + (1<<63) may be less
5410 * than pkt_end, but that's because it's also less than pkt.
5411 */
5412 return;
5413
fb2a311a
DB
5414 new_range = dst_reg->off;
5415 if (range_right_open)
5416 new_range--;
5417
5418 /* Examples for register markings:
2d2be8ca 5419 *
fb2a311a 5420 * pkt_data in dst register:
2d2be8ca
DB
5421 *
5422 * r2 = r3;
5423 * r2 += 8;
5424 * if (r2 > pkt_end) goto <handle exception>
5425 * <access okay>
5426 *
b4e432f1
DB
5427 * r2 = r3;
5428 * r2 += 8;
5429 * if (r2 < pkt_end) goto <access okay>
5430 * <handle exception>
5431 *
2d2be8ca
DB
5432 * Where:
5433 * r2 == dst_reg, pkt_end == src_reg
5434 * r2=pkt(id=n,off=8,r=0)
5435 * r3=pkt(id=n,off=0,r=0)
5436 *
fb2a311a 5437 * pkt_data in src register:
2d2be8ca
DB
5438 *
5439 * r2 = r3;
5440 * r2 += 8;
5441 * if (pkt_end >= r2) goto <access okay>
5442 * <handle exception>
5443 *
b4e432f1
DB
5444 * r2 = r3;
5445 * r2 += 8;
5446 * if (pkt_end <= r2) goto <handle exception>
5447 * <access okay>
5448 *
2d2be8ca
DB
5449 * Where:
5450 * pkt_end == dst_reg, r2 == src_reg
5451 * r2=pkt(id=n,off=8,r=0)
5452 * r3=pkt(id=n,off=0,r=0)
5453 *
5454 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
5455 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
5456 * and [r3, r3 + 8-1) respectively is safe to access depending on
5457 * the check.
969bf05e 5458 */
2d2be8ca 5459
f1174f77
EC
5460 /* If our ids match, then we must have the same max_value. And we
5461 * don't care about the other reg's fixed offset, since if it's too big
5462 * the range won't allow anything.
5463 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
5464 */
c6a9efa1
PC
5465 for (i = 0; i <= vstate->curframe; i++)
5466 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
5467 new_range);
969bf05e
AS
5468}
5469
4f7b3e82
AS
5470/* compute branch direction of the expression "if (reg opcode val) goto target;"
5471 * and return:
5472 * 1 - branch will be taken and "goto target" will be executed
5473 * 0 - branch will not be taken and fall-through to next insn
5474 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
5475 */
092ed096
JW
5476static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
5477 bool is_jmp32)
4f7b3e82 5478{
092ed096 5479 struct bpf_reg_state reg_lo;
a72dafaf
JW
5480 s64 sval;
5481
4f7b3e82
AS
5482 if (__is_pointer_value(false, reg))
5483 return -1;
5484
092ed096
JW
5485 if (is_jmp32) {
5486 reg_lo = *reg;
5487 reg = &reg_lo;
5488 /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
5489 * could truncate high bits and update umin/umax according to
5490 * information of low bits.
5491 */
5492 coerce_reg_to_size(reg, 4);
5493 /* smin/smax need special handling. For example, after coerce,
5494 * if smin_value is 0x00000000ffffffffLL, the value is -1 when
5495 * used as operand to JMP32. It is a negative number from s32's
5496 * point of view, while it is a positive number when seen as
5497 * s64. The smin/smax are kept as s64, therefore, when used with
5498 * JMP32, they need to be transformed into s32, then sign
5499 * extended back to s64.
5500 *
5501 * Also, smin/smax were copied from umin/umax. If umin/umax has
5502 * different sign bit, then min/max relationship doesn't
5503 * maintain after casting into s32, for this case, set smin/smax
5504 * to safest range.
5505 */
5506 if ((reg->umax_value ^ reg->umin_value) &
5507 (1ULL << 31)) {
5508 reg->smin_value = S32_MIN;
5509 reg->smax_value = S32_MAX;
5510 }
5511 reg->smin_value = (s64)(s32)reg->smin_value;
5512 reg->smax_value = (s64)(s32)reg->smax_value;
5513
5514 val = (u32)val;
5515 sval = (s64)(s32)val;
5516 } else {
5517 sval = (s64)val;
5518 }
a72dafaf 5519
4f7b3e82
AS
5520 switch (opcode) {
5521 case BPF_JEQ:
5522 if (tnum_is_const(reg->var_off))
5523 return !!tnum_equals_const(reg->var_off, val);
5524 break;
5525 case BPF_JNE:
5526 if (tnum_is_const(reg->var_off))
5527 return !tnum_equals_const(reg->var_off, val);
5528 break;
960ea056
JK
5529 case BPF_JSET:
5530 if ((~reg->var_off.mask & reg->var_off.value) & val)
5531 return 1;
5532 if (!((reg->var_off.mask | reg->var_off.value) & val))
5533 return 0;
5534 break;
4f7b3e82
AS
5535 case BPF_JGT:
5536 if (reg->umin_value > val)
5537 return 1;
5538 else if (reg->umax_value <= val)
5539 return 0;
5540 break;
5541 case BPF_JSGT:
a72dafaf 5542 if (reg->smin_value > sval)
4f7b3e82 5543 return 1;
a72dafaf 5544 else if (reg->smax_value < sval)
4f7b3e82
AS
5545 return 0;
5546 break;
5547 case BPF_JLT:
5548 if (reg->umax_value < val)
5549 return 1;
5550 else if (reg->umin_value >= val)
5551 return 0;
5552 break;
5553 case BPF_JSLT:
a72dafaf 5554 if (reg->smax_value < sval)
4f7b3e82 5555 return 1;
a72dafaf 5556 else if (reg->smin_value >= sval)
4f7b3e82
AS
5557 return 0;
5558 break;
5559 case BPF_JGE:
5560 if (reg->umin_value >= val)
5561 return 1;
5562 else if (reg->umax_value < val)
5563 return 0;
5564 break;
5565 case BPF_JSGE:
a72dafaf 5566 if (reg->smin_value >= sval)
4f7b3e82 5567 return 1;
a72dafaf 5568 else if (reg->smax_value < sval)
4f7b3e82
AS
5569 return 0;
5570 break;
5571 case BPF_JLE:
5572 if (reg->umax_value <= val)
5573 return 1;
5574 else if (reg->umin_value > val)
5575 return 0;
5576 break;
5577 case BPF_JSLE:
a72dafaf 5578 if (reg->smax_value <= sval)
4f7b3e82 5579 return 1;
a72dafaf 5580 else if (reg->smin_value > sval)
4f7b3e82
AS
5581 return 0;
5582 break;
5583 }
5584
5585 return -1;
5586}
5587
092ed096
JW
5588/* Generate min value of the high 32-bit from TNUM info. */
5589static u64 gen_hi_min(struct tnum var)
5590{
5591 return var.value & ~0xffffffffULL;
5592}
5593
5594/* Generate max value of the high 32-bit from TNUM info. */
5595static u64 gen_hi_max(struct tnum var)
5596{
5597 return (var.value | var.mask) & ~0xffffffffULL;
5598}
5599
5600/* Return true if VAL is compared with a s64 sign extended from s32, and they
5601 * are with the same signedness.
5602 */
5603static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
5604{
5605 return ((s32)sval >= 0 &&
5606 reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
5607 ((s32)sval < 0 &&
5608 reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
5609}
5610
48461135
JB
5611/* Adjusts the register min/max values in the case that the dst_reg is the
5612 * variable register that we are working on, and src_reg is a constant or we're
5613 * simply doing a BPF_K check.
f1174f77 5614 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
5615 */
5616static void reg_set_min_max(struct bpf_reg_state *true_reg,
5617 struct bpf_reg_state *false_reg, u64 val,
092ed096 5618 u8 opcode, bool is_jmp32)
48461135 5619{
a72dafaf
JW
5620 s64 sval;
5621
f1174f77
EC
5622 /* If the dst_reg is a pointer, we can't learn anything about its
5623 * variable offset from the compare (unless src_reg were a pointer into
5624 * the same object, but we don't bother with that.
5625 * Since false_reg and true_reg have the same type by construction, we
5626 * only need to check one of them for pointerness.
5627 */
5628 if (__is_pointer_value(false, false_reg))
5629 return;
4cabc5b1 5630
092ed096
JW
5631 val = is_jmp32 ? (u32)val : val;
5632 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
a72dafaf 5633
48461135
JB
5634 switch (opcode) {
5635 case BPF_JEQ:
48461135 5636 case BPF_JNE:
a72dafaf
JW
5637 {
5638 struct bpf_reg_state *reg =
5639 opcode == BPF_JEQ ? true_reg : false_reg;
5640
5641 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
5642 * if it is true we know the value for sure. Likewise for
5643 * BPF_JNE.
48461135 5644 */
092ed096
JW
5645 if (is_jmp32) {
5646 u64 old_v = reg->var_off.value;
5647 u64 hi_mask = ~0xffffffffULL;
5648
5649 reg->var_off.value = (old_v & hi_mask) | val;
5650 reg->var_off.mask &= hi_mask;
5651 } else {
5652 __mark_reg_known(reg, val);
5653 }
48461135 5654 break;
a72dafaf 5655 }
960ea056
JK
5656 case BPF_JSET:
5657 false_reg->var_off = tnum_and(false_reg->var_off,
5658 tnum_const(~val));
5659 if (is_power_of_2(val))
5660 true_reg->var_off = tnum_or(true_reg->var_off,
5661 tnum_const(val));
5662 break;
48461135 5663 case BPF_JGE:
a72dafaf
JW
5664 case BPF_JGT:
5665 {
5666 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
5667 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
5668
092ed096
JW
5669 if (is_jmp32) {
5670 false_umax += gen_hi_max(false_reg->var_off);
5671 true_umin += gen_hi_min(true_reg->var_off);
5672 }
a72dafaf
JW
5673 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5674 true_reg->umin_value = max(true_reg->umin_value, true_umin);
b03c9f9f 5675 break;
a72dafaf 5676 }
48461135 5677 case BPF_JSGE:
a72dafaf
JW
5678 case BPF_JSGT:
5679 {
5680 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
5681 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
5682
092ed096
JW
5683 /* If the full s64 was not sign-extended from s32 then don't
5684 * deduct further info.
5685 */
5686 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5687 break;
a72dafaf
JW
5688 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5689 true_reg->smin_value = max(true_reg->smin_value, true_smin);
48461135 5690 break;
a72dafaf 5691 }
b4e432f1 5692 case BPF_JLE:
a72dafaf
JW
5693 case BPF_JLT:
5694 {
5695 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
5696 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
5697
092ed096
JW
5698 if (is_jmp32) {
5699 false_umin += gen_hi_min(false_reg->var_off);
5700 true_umax += gen_hi_max(true_reg->var_off);
5701 }
a72dafaf
JW
5702 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5703 true_reg->umax_value = min(true_reg->umax_value, true_umax);
b4e432f1 5704 break;
a72dafaf 5705 }
b4e432f1 5706 case BPF_JSLE:
a72dafaf
JW
5707 case BPF_JSLT:
5708 {
5709 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
5710 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
5711
092ed096
JW
5712 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5713 break;
a72dafaf
JW
5714 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5715 true_reg->smax_value = min(true_reg->smax_value, true_smax);
b4e432f1 5716 break;
a72dafaf 5717 }
48461135
JB
5718 default:
5719 break;
5720 }
5721
b03c9f9f
EC
5722 __reg_deduce_bounds(false_reg);
5723 __reg_deduce_bounds(true_reg);
5724 /* We might have learned some bits from the bounds. */
5725 __reg_bound_offset(false_reg);
5726 __reg_bound_offset(true_reg);
581738a6
YS
5727 if (is_jmp32) {
5728 __reg_bound_offset32(false_reg);
5729 __reg_bound_offset32(true_reg);
5730 }
b03c9f9f
EC
5731 /* Intersecting with the old var_off might have improved our bounds
5732 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5733 * then new var_off is (0; 0x7f...fc) which improves our umax.
5734 */
5735 __update_reg_bounds(false_reg);
5736 __update_reg_bounds(true_reg);
48461135
JB
5737}
5738
f1174f77
EC
5739/* Same as above, but for the case that dst_reg holds a constant and src_reg is
5740 * the variable reg.
48461135
JB
5741 */
5742static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
5743 struct bpf_reg_state *false_reg, u64 val,
092ed096 5744 u8 opcode, bool is_jmp32)
48461135 5745{
a72dafaf
JW
5746 s64 sval;
5747
f1174f77
EC
5748 if (__is_pointer_value(false, false_reg))
5749 return;
4cabc5b1 5750
092ed096
JW
5751 val = is_jmp32 ? (u32)val : val;
5752 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
a72dafaf 5753
48461135
JB
5754 switch (opcode) {
5755 case BPF_JEQ:
48461135 5756 case BPF_JNE:
a72dafaf
JW
5757 {
5758 struct bpf_reg_state *reg =
5759 opcode == BPF_JEQ ? true_reg : false_reg;
5760
092ed096
JW
5761 if (is_jmp32) {
5762 u64 old_v = reg->var_off.value;
5763 u64 hi_mask = ~0xffffffffULL;
5764
5765 reg->var_off.value = (old_v & hi_mask) | val;
5766 reg->var_off.mask &= hi_mask;
5767 } else {
5768 __mark_reg_known(reg, val);
5769 }
48461135 5770 break;
a72dafaf 5771 }
960ea056
JK
5772 case BPF_JSET:
5773 false_reg->var_off = tnum_and(false_reg->var_off,
5774 tnum_const(~val));
5775 if (is_power_of_2(val))
5776 true_reg->var_off = tnum_or(true_reg->var_off,
5777 tnum_const(val));
5778 break;
48461135 5779 case BPF_JGE:
a72dafaf
JW
5780 case BPF_JGT:
5781 {
5782 u64 false_umin = opcode == BPF_JGT ? val : val + 1;
5783 u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
5784
092ed096
JW
5785 if (is_jmp32) {
5786 false_umin += gen_hi_min(false_reg->var_off);
5787 true_umax += gen_hi_max(true_reg->var_off);
5788 }
a72dafaf
JW
5789 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5790 true_reg->umax_value = min(true_reg->umax_value, true_umax);
b03c9f9f 5791 break;
a72dafaf 5792 }
48461135 5793 case BPF_JSGE:
a72dafaf
JW
5794 case BPF_JSGT:
5795 {
5796 s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1;
5797 s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
5798
092ed096
JW
5799 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5800 break;
a72dafaf
JW
5801 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5802 true_reg->smax_value = min(true_reg->smax_value, true_smax);
48461135 5803 break;
a72dafaf 5804 }
b4e432f1 5805 case BPF_JLE:
a72dafaf
JW
5806 case BPF_JLT:
5807 {
5808 u64 false_umax = opcode == BPF_JLT ? val : val - 1;
5809 u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
5810
092ed096
JW
5811 if (is_jmp32) {
5812 false_umax += gen_hi_max(false_reg->var_off);
5813 true_umin += gen_hi_min(true_reg->var_off);
5814 }
a72dafaf
JW
5815 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5816 true_reg->umin_value = max(true_reg->umin_value, true_umin);
b4e432f1 5817 break;
a72dafaf 5818 }
b4e432f1 5819 case BPF_JSLE:
a72dafaf
JW
5820 case BPF_JSLT:
5821 {
5822 s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1;
5823 s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
5824
092ed096
JW
5825 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5826 break;
a72dafaf
JW
5827 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5828 true_reg->smin_value = max(true_reg->smin_value, true_smin);
b4e432f1 5829 break;
a72dafaf 5830 }
48461135
JB
5831 default:
5832 break;
5833 }
5834
b03c9f9f
EC
5835 __reg_deduce_bounds(false_reg);
5836 __reg_deduce_bounds(true_reg);
5837 /* We might have learned some bits from the bounds. */
5838 __reg_bound_offset(false_reg);
5839 __reg_bound_offset(true_reg);
581738a6
YS
5840 if (is_jmp32) {
5841 __reg_bound_offset32(false_reg);
5842 __reg_bound_offset32(true_reg);
5843 }
b03c9f9f
EC
5844 /* Intersecting with the old var_off might have improved our bounds
5845 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5846 * then new var_off is (0; 0x7f...fc) which improves our umax.
5847 */
5848 __update_reg_bounds(false_reg);
5849 __update_reg_bounds(true_reg);
f1174f77
EC
5850}
5851
5852/* Regs are known to be equal, so intersect their min/max/var_off */
5853static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
5854 struct bpf_reg_state *dst_reg)
5855{
b03c9f9f
EC
5856 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
5857 dst_reg->umin_value);
5858 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
5859 dst_reg->umax_value);
5860 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
5861 dst_reg->smin_value);
5862 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
5863 dst_reg->smax_value);
f1174f77
EC
5864 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
5865 dst_reg->var_off);
b03c9f9f
EC
5866 /* We might have learned new bounds from the var_off. */
5867 __update_reg_bounds(src_reg);
5868 __update_reg_bounds(dst_reg);
5869 /* We might have learned something about the sign bit. */
5870 __reg_deduce_bounds(src_reg);
5871 __reg_deduce_bounds(dst_reg);
5872 /* We might have learned some bits from the bounds. */
5873 __reg_bound_offset(src_reg);
5874 __reg_bound_offset(dst_reg);
5875 /* Intersecting with the old var_off might have improved our bounds
5876 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5877 * then new var_off is (0; 0x7f...fc) which improves our umax.
5878 */
5879 __update_reg_bounds(src_reg);
5880 __update_reg_bounds(dst_reg);
f1174f77
EC
5881}
5882
5883static void reg_combine_min_max(struct bpf_reg_state *true_src,
5884 struct bpf_reg_state *true_dst,
5885 struct bpf_reg_state *false_src,
5886 struct bpf_reg_state *false_dst,
5887 u8 opcode)
5888{
5889 switch (opcode) {
5890 case BPF_JEQ:
5891 __reg_combine_min_max(true_src, true_dst);
5892 break;
5893 case BPF_JNE:
5894 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 5895 break;
4cabc5b1 5896 }
48461135
JB
5897}
5898
fd978bf7
JS
5899static void mark_ptr_or_null_reg(struct bpf_func_state *state,
5900 struct bpf_reg_state *reg, u32 id,
840b9615 5901 bool is_null)
57a09bf0 5902{
840b9615 5903 if (reg_type_may_be_null(reg->type) && reg->id == id) {
f1174f77
EC
5904 /* Old offset (both fixed and variable parts) should
5905 * have been known-zero, because we don't allow pointer
5906 * arithmetic on pointers that might be NULL.
5907 */
b03c9f9f
EC
5908 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
5909 !tnum_equals_const(reg->var_off, 0) ||
f1174f77 5910 reg->off)) {
b03c9f9f
EC
5911 __mark_reg_known_zero(reg);
5912 reg->off = 0;
f1174f77
EC
5913 }
5914 if (is_null) {
5915 reg->type = SCALAR_VALUE;
840b9615
JS
5916 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
5917 if (reg->map_ptr->inner_map_meta) {
5918 reg->type = CONST_PTR_TO_MAP;
5919 reg->map_ptr = reg->map_ptr->inner_map_meta;
fada7fdc
JL
5920 } else if (reg->map_ptr->map_type ==
5921 BPF_MAP_TYPE_XSKMAP) {
5922 reg->type = PTR_TO_XDP_SOCK;
840b9615
JS
5923 } else {
5924 reg->type = PTR_TO_MAP_VALUE;
5925 }
c64b7983
JS
5926 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
5927 reg->type = PTR_TO_SOCKET;
46f8bc92
MKL
5928 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
5929 reg->type = PTR_TO_SOCK_COMMON;
655a51e5
MKL
5930 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
5931 reg->type = PTR_TO_TCP_SOCK;
56f668df 5932 }
1b986589
MKL
5933 if (is_null) {
5934 /* We don't need id and ref_obj_id from this point
5935 * onwards anymore, thus we should better reset it,
5936 * so that state pruning has chances to take effect.
5937 */
5938 reg->id = 0;
5939 reg->ref_obj_id = 0;
5940 } else if (!reg_may_point_to_spin_lock(reg)) {
5941 /* For not-NULL ptr, reg->ref_obj_id will be reset
5942 * in release_reg_references().
5943 *
5944 * reg->id is still used by spin_lock ptr. Other
5945 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
5946 */
5947 reg->id = 0;
56f668df 5948 }
57a09bf0
TG
5949 }
5950}
5951
c6a9efa1
PC
5952static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
5953 bool is_null)
5954{
5955 struct bpf_reg_state *reg;
5956 int i;
5957
5958 for (i = 0; i < MAX_BPF_REG; i++)
5959 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
5960
5961 bpf_for_each_spilled_reg(i, state, reg) {
5962 if (!reg)
5963 continue;
5964 mark_ptr_or_null_reg(state, reg, id, is_null);
5965 }
5966}
5967
57a09bf0
TG
5968/* The logic is similar to find_good_pkt_pointers(), both could eventually
5969 * be folded together at some point.
5970 */
840b9615
JS
5971static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
5972 bool is_null)
57a09bf0 5973{
f4d7e40a 5974 struct bpf_func_state *state = vstate->frame[vstate->curframe];
c6a9efa1 5975 struct bpf_reg_state *regs = state->regs;
1b986589 5976 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 5977 u32 id = regs[regno].id;
c6a9efa1 5978 int i;
57a09bf0 5979
1b986589
MKL
5980 if (ref_obj_id && ref_obj_id == id && is_null)
5981 /* regs[regno] is in the " == NULL" branch.
5982 * No one could have freed the reference state before
5983 * doing the NULL check.
5984 */
5985 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 5986
c6a9efa1
PC
5987 for (i = 0; i <= vstate->curframe; i++)
5988 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
57a09bf0
TG
5989}
5990
5beca081
DB
5991static bool try_match_pkt_pointers(const struct bpf_insn *insn,
5992 struct bpf_reg_state *dst_reg,
5993 struct bpf_reg_state *src_reg,
5994 struct bpf_verifier_state *this_branch,
5995 struct bpf_verifier_state *other_branch)
5996{
5997 if (BPF_SRC(insn->code) != BPF_X)
5998 return false;
5999
092ed096
JW
6000 /* Pointers are always 64-bit. */
6001 if (BPF_CLASS(insn->code) == BPF_JMP32)
6002 return false;
6003
5beca081
DB
6004 switch (BPF_OP(insn->code)) {
6005 case BPF_JGT:
6006 if ((dst_reg->type == PTR_TO_PACKET &&
6007 src_reg->type == PTR_TO_PACKET_END) ||
6008 (dst_reg->type == PTR_TO_PACKET_META &&
6009 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6010 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
6011 find_good_pkt_pointers(this_branch, dst_reg,
6012 dst_reg->type, false);
6013 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6014 src_reg->type == PTR_TO_PACKET) ||
6015 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6016 src_reg->type == PTR_TO_PACKET_META)) {
6017 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
6018 find_good_pkt_pointers(other_branch, src_reg,
6019 src_reg->type, true);
6020 } else {
6021 return false;
6022 }
6023 break;
6024 case BPF_JLT:
6025 if ((dst_reg->type == PTR_TO_PACKET &&
6026 src_reg->type == PTR_TO_PACKET_END) ||
6027 (dst_reg->type == PTR_TO_PACKET_META &&
6028 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6029 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
6030 find_good_pkt_pointers(other_branch, dst_reg,
6031 dst_reg->type, true);
6032 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6033 src_reg->type == PTR_TO_PACKET) ||
6034 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6035 src_reg->type == PTR_TO_PACKET_META)) {
6036 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
6037 find_good_pkt_pointers(this_branch, src_reg,
6038 src_reg->type, false);
6039 } else {
6040 return false;
6041 }
6042 break;
6043 case BPF_JGE:
6044 if ((dst_reg->type == PTR_TO_PACKET &&
6045 src_reg->type == PTR_TO_PACKET_END) ||
6046 (dst_reg->type == PTR_TO_PACKET_META &&
6047 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6048 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
6049 find_good_pkt_pointers(this_branch, dst_reg,
6050 dst_reg->type, true);
6051 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6052 src_reg->type == PTR_TO_PACKET) ||
6053 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6054 src_reg->type == PTR_TO_PACKET_META)) {
6055 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
6056 find_good_pkt_pointers(other_branch, src_reg,
6057 src_reg->type, false);
6058 } else {
6059 return false;
6060 }
6061 break;
6062 case BPF_JLE:
6063 if ((dst_reg->type == PTR_TO_PACKET &&
6064 src_reg->type == PTR_TO_PACKET_END) ||
6065 (dst_reg->type == PTR_TO_PACKET_META &&
6066 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6067 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
6068 find_good_pkt_pointers(other_branch, dst_reg,
6069 dst_reg->type, false);
6070 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6071 src_reg->type == PTR_TO_PACKET) ||
6072 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6073 src_reg->type == PTR_TO_PACKET_META)) {
6074 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
6075 find_good_pkt_pointers(this_branch, src_reg,
6076 src_reg->type, true);
6077 } else {
6078 return false;
6079 }
6080 break;
6081 default:
6082 return false;
6083 }
6084
6085 return true;
6086}
6087
58e2af8b 6088static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
6089 struct bpf_insn *insn, int *insn_idx)
6090{
f4d7e40a
AS
6091 struct bpf_verifier_state *this_branch = env->cur_state;
6092 struct bpf_verifier_state *other_branch;
6093 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 6094 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
17a52670 6095 u8 opcode = BPF_OP(insn->code);
092ed096 6096 bool is_jmp32;
fb8d251e 6097 int pred = -1;
17a52670
AS
6098 int err;
6099
092ed096
JW
6100 /* Only conditional jumps are expected to reach here. */
6101 if (opcode == BPF_JA || opcode > BPF_JSLE) {
6102 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
6103 return -EINVAL;
6104 }
6105
6106 if (BPF_SRC(insn->code) == BPF_X) {
6107 if (insn->imm != 0) {
092ed096 6108 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6109 return -EINVAL;
6110 }
6111
6112 /* check src1 operand */
dc503a8a 6113 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6114 if (err)
6115 return err;
1be7f75d
AS
6116
6117 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 6118 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
6119 insn->src_reg);
6120 return -EACCES;
6121 }
fb8d251e 6122 src_reg = &regs[insn->src_reg];
17a52670
AS
6123 } else {
6124 if (insn->src_reg != BPF_REG_0) {
092ed096 6125 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6126 return -EINVAL;
6127 }
6128 }
6129
6130 /* check src2 operand */
dc503a8a 6131 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6132 if (err)
6133 return err;
6134
1a0dc1ac 6135 dst_reg = &regs[insn->dst_reg];
092ed096 6136 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 6137
fb8d251e
AS
6138 if (BPF_SRC(insn->code) == BPF_K)
6139 pred = is_branch_taken(dst_reg, insn->imm,
6140 opcode, is_jmp32);
6141 else if (src_reg->type == SCALAR_VALUE &&
6142 tnum_is_const(src_reg->var_off))
6143 pred = is_branch_taken(dst_reg, src_reg->var_off.value,
6144 opcode, is_jmp32);
b5dc0163
AS
6145 if (pred >= 0) {
6146 err = mark_chain_precision(env, insn->dst_reg);
6147 if (BPF_SRC(insn->code) == BPF_X && !err)
6148 err = mark_chain_precision(env, insn->src_reg);
6149 if (err)
6150 return err;
6151 }
fb8d251e
AS
6152 if (pred == 1) {
6153 /* only follow the goto, ignore fall-through */
6154 *insn_idx += insn->off;
6155 return 0;
6156 } else if (pred == 0) {
6157 /* only follow fall-through branch, since
6158 * that's where the program will go
6159 */
6160 return 0;
17a52670
AS
6161 }
6162
979d63d5
DB
6163 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
6164 false);
17a52670
AS
6165 if (!other_branch)
6166 return -EFAULT;
f4d7e40a 6167 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 6168
48461135
JB
6169 /* detect if we are comparing against a constant value so we can adjust
6170 * our min/max values for our dst register.
f1174f77
EC
6171 * this is only legit if both are scalars (or pointers to the same
6172 * object, I suppose, but we don't support that right now), because
6173 * otherwise the different base pointers mean the offsets aren't
6174 * comparable.
48461135
JB
6175 */
6176 if (BPF_SRC(insn->code) == BPF_X) {
092ed096
JW
6177 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
6178 struct bpf_reg_state lo_reg0 = *dst_reg;
6179 struct bpf_reg_state lo_reg1 = *src_reg;
6180 struct bpf_reg_state *src_lo, *dst_lo;
6181
6182 dst_lo = &lo_reg0;
6183 src_lo = &lo_reg1;
6184 coerce_reg_to_size(dst_lo, 4);
6185 coerce_reg_to_size(src_lo, 4);
6186
f1174f77 6187 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
6188 src_reg->type == SCALAR_VALUE) {
6189 if (tnum_is_const(src_reg->var_off) ||
6190 (is_jmp32 && tnum_is_const(src_lo->var_off)))
f4d7e40a 6191 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096
JW
6192 dst_reg,
6193 is_jmp32
6194 ? src_lo->var_off.value
6195 : src_reg->var_off.value,
6196 opcode, is_jmp32);
6197 else if (tnum_is_const(dst_reg->var_off) ||
6198 (is_jmp32 && tnum_is_const(dst_lo->var_off)))
f4d7e40a 6199 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096
JW
6200 src_reg,
6201 is_jmp32
6202 ? dst_lo->var_off.value
6203 : dst_reg->var_off.value,
6204 opcode, is_jmp32);
6205 else if (!is_jmp32 &&
6206 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 6207 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
6208 reg_combine_min_max(&other_branch_regs[insn->src_reg],
6209 &other_branch_regs[insn->dst_reg],
092ed096 6210 src_reg, dst_reg, opcode);
f1174f77
EC
6211 }
6212 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 6213 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 6214 dst_reg, insn->imm, opcode, is_jmp32);
48461135
JB
6215 }
6216
092ed096
JW
6217 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
6218 * NOTE: these optimizations below are related with pointer comparison
6219 * which will never be JMP32.
6220 */
6221 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 6222 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
840b9615
JS
6223 reg_type_may_be_null(dst_reg->type)) {
6224 /* Mark all identical registers in each branch as either
57a09bf0
TG
6225 * safe or unknown depending R == 0 or R != 0 conditional.
6226 */
840b9615
JS
6227 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
6228 opcode == BPF_JNE);
6229 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
6230 opcode == BPF_JEQ);
5beca081
DB
6231 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
6232 this_branch, other_branch) &&
6233 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
6234 verbose(env, "R%d pointer comparison prohibited\n",
6235 insn->dst_reg);
1be7f75d 6236 return -EACCES;
17a52670 6237 }
06ee7115 6238 if (env->log.level & BPF_LOG_LEVEL)
f4d7e40a 6239 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
6240 return 0;
6241}
6242
17a52670 6243/* verify BPF_LD_IMM64 instruction */
58e2af8b 6244static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 6245{
d8eca5bb 6246 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 6247 struct bpf_reg_state *regs = cur_regs(env);
d8eca5bb 6248 struct bpf_map *map;
17a52670
AS
6249 int err;
6250
6251 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 6252 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
6253 return -EINVAL;
6254 }
6255 if (insn->off != 0) {
61bd5218 6256 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
6257 return -EINVAL;
6258 }
6259
dc503a8a 6260 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
6261 if (err)
6262 return err;
6263
6b173873 6264 if (insn->src_reg == 0) {
6b173873
JK
6265 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
6266
f1174f77 6267 regs[insn->dst_reg].type = SCALAR_VALUE;
b03c9f9f 6268 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 6269 return 0;
6b173873 6270 }
17a52670 6271
d8eca5bb
DB
6272 map = env->used_maps[aux->map_index];
6273 mark_reg_known_zero(env, regs, insn->dst_reg);
6274 regs[insn->dst_reg].map_ptr = map;
6275
6276 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
6277 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
6278 regs[insn->dst_reg].off = aux->map_off;
6279 if (map_value_has_spin_lock(map))
6280 regs[insn->dst_reg].id = ++env->id_gen;
6281 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
6282 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
6283 } else {
6284 verbose(env, "bpf verifier is misconfigured\n");
6285 return -EINVAL;
6286 }
17a52670 6287
17a52670
AS
6288 return 0;
6289}
6290
96be4325
DB
6291static bool may_access_skb(enum bpf_prog_type type)
6292{
6293 switch (type) {
6294 case BPF_PROG_TYPE_SOCKET_FILTER:
6295 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 6296 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
6297 return true;
6298 default:
6299 return false;
6300 }
6301}
6302
ddd872bc
AS
6303/* verify safety of LD_ABS|LD_IND instructions:
6304 * - they can only appear in the programs where ctx == skb
6305 * - since they are wrappers of function calls, they scratch R1-R5 registers,
6306 * preserve R6-R9, and store return value into R0
6307 *
6308 * Implicit input:
6309 * ctx == skb == R6 == CTX
6310 *
6311 * Explicit input:
6312 * SRC == any register
6313 * IMM == 32-bit immediate
6314 *
6315 * Output:
6316 * R0 - 8/16/32-bit skb data converted to cpu endianness
6317 */
58e2af8b 6318static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 6319{
638f5b90 6320 struct bpf_reg_state *regs = cur_regs(env);
6d4f151a 6321 static const int ctx_reg = BPF_REG_6;
ddd872bc 6322 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
6323 int i, err;
6324
24701ece 6325 if (!may_access_skb(env->prog->type)) {
61bd5218 6326 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
6327 return -EINVAL;
6328 }
6329
e0cea7ce
DB
6330 if (!env->ops->gen_ld_abs) {
6331 verbose(env, "bpf verifier is misconfigured\n");
6332 return -EINVAL;
6333 }
6334
f910cefa 6335 if (env->subprog_cnt > 1) {
f4d7e40a
AS
6336 /* when program has LD_ABS insn JITs and interpreter assume
6337 * that r1 == ctx == skb which is not the case for callees
6338 * that can have arbitrary arguments. It's problematic
6339 * for main prog as well since JITs would need to analyze
6340 * all functions in order to make proper register save/restore
6341 * decisions in the main prog. Hence disallow LD_ABS with calls
6342 */
6343 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
6344 return -EINVAL;
6345 }
6346
ddd872bc 6347 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 6348 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 6349 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 6350 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
6351 return -EINVAL;
6352 }
6353
6354 /* check whether implicit source operand (register R6) is readable */
6d4f151a 6355 err = check_reg_arg(env, ctx_reg, SRC_OP);
ddd872bc
AS
6356 if (err)
6357 return err;
6358
fd978bf7
JS
6359 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
6360 * gen_ld_abs() may terminate the program at runtime, leading to
6361 * reference leak.
6362 */
6363 err = check_reference_leak(env);
6364 if (err) {
6365 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
6366 return err;
6367 }
6368
d83525ca
AS
6369 if (env->cur_state->active_spin_lock) {
6370 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
6371 return -EINVAL;
6372 }
6373
6d4f151a 6374 if (regs[ctx_reg].type != PTR_TO_CTX) {
61bd5218
JK
6375 verbose(env,
6376 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
6377 return -EINVAL;
6378 }
6379
6380 if (mode == BPF_IND) {
6381 /* check explicit source operand */
dc503a8a 6382 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
6383 if (err)
6384 return err;
6385 }
6386
6d4f151a
DB
6387 err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
6388 if (err < 0)
6389 return err;
6390
ddd872bc 6391 /* reset caller saved regs to unreadable */
dc503a8a 6392 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 6393 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
6394 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6395 }
ddd872bc
AS
6396
6397 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
6398 * the value fetched from the packet.
6399 * Already marked as written above.
ddd872bc 6400 */
61bd5218 6401 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
6402 /* ld_abs load up to 32-bit skb data. */
6403 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
6404 return 0;
6405}
6406
390ee7e2
AS
6407static int check_return_code(struct bpf_verifier_env *env)
6408{
5cf1e914 6409 struct tnum enforce_attach_type_range = tnum_unknown;
27ae7997 6410 const struct bpf_prog *prog = env->prog;
390ee7e2
AS
6411 struct bpf_reg_state *reg;
6412 struct tnum range = tnum_range(0, 1);
27ae7997
MKL
6413 int err;
6414
6415 /* The struct_ops func-ptr's return type could be "void" */
6416 if (env->prog->type == BPF_PROG_TYPE_STRUCT_OPS &&
6417 !prog->aux->attach_func_proto->type)
6418 return 0;
6419
6420 /* eBPF calling convetion is such that R0 is used
6421 * to return the value from eBPF program.
6422 * Make sure that it's readable at this time
6423 * of bpf_exit, which means that program wrote
6424 * something into it earlier
6425 */
6426 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
6427 if (err)
6428 return err;
6429
6430 if (is_pointer_value(env, BPF_REG_0)) {
6431 verbose(env, "R0 leaks addr as return value\n");
6432 return -EACCES;
6433 }
390ee7e2
AS
6434
6435 switch (env->prog->type) {
983695fa
DB
6436 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
6437 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
6438 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
6439 range = tnum_range(1, 1);
ed4ed404 6440 break;
390ee7e2 6441 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 6442 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
6443 range = tnum_range(0, 3);
6444 enforce_attach_type_range = tnum_range(2, 3);
6445 }
ed4ed404 6446 break;
390ee7e2
AS
6447 case BPF_PROG_TYPE_CGROUP_SOCK:
6448 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 6449 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 6450 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 6451 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 6452 break;
15ab09bd
AS
6453 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6454 if (!env->prog->aux->attach_btf_id)
6455 return 0;
6456 range = tnum_const(0);
6457 break;
390ee7e2
AS
6458 default:
6459 return 0;
6460 }
6461
638f5b90 6462 reg = cur_regs(env) + BPF_REG_0;
390ee7e2 6463 if (reg->type != SCALAR_VALUE) {
61bd5218 6464 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
390ee7e2
AS
6465 reg_type_str[reg->type]);
6466 return -EINVAL;
6467 }
6468
6469 if (!tnum_in(range, reg->var_off)) {
5cf1e914 6470 char tn_buf[48];
6471
61bd5218 6472 verbose(env, "At program exit the register R0 ");
390ee7e2 6473 if (!tnum_is_unknown(reg->var_off)) {
390ee7e2 6474 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 6475 verbose(env, "has value %s", tn_buf);
390ee7e2 6476 } else {
61bd5218 6477 verbose(env, "has unknown scalar value");
390ee7e2 6478 }
5cf1e914 6479 tnum_strn(tn_buf, sizeof(tn_buf), range);
983695fa 6480 verbose(env, " should have been in %s\n", tn_buf);
390ee7e2
AS
6481 return -EINVAL;
6482 }
5cf1e914 6483
6484 if (!tnum_is_unknown(enforce_attach_type_range) &&
6485 tnum_in(enforce_attach_type_range, reg->var_off))
6486 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
6487 return 0;
6488}
6489
475fb78f
AS
6490/* non-recursive DFS pseudo code
6491 * 1 procedure DFS-iterative(G,v):
6492 * 2 label v as discovered
6493 * 3 let S be a stack
6494 * 4 S.push(v)
6495 * 5 while S is not empty
6496 * 6 t <- S.pop()
6497 * 7 if t is what we're looking for:
6498 * 8 return t
6499 * 9 for all edges e in G.adjacentEdges(t) do
6500 * 10 if edge e is already labelled
6501 * 11 continue with the next edge
6502 * 12 w <- G.adjacentVertex(t,e)
6503 * 13 if vertex w is not discovered and not explored
6504 * 14 label e as tree-edge
6505 * 15 label w as discovered
6506 * 16 S.push(w)
6507 * 17 continue at 5
6508 * 18 else if vertex w is discovered
6509 * 19 label e as back-edge
6510 * 20 else
6511 * 21 // vertex w is explored
6512 * 22 label e as forward- or cross-edge
6513 * 23 label t as explored
6514 * 24 S.pop()
6515 *
6516 * convention:
6517 * 0x10 - discovered
6518 * 0x11 - discovered and fall-through edge labelled
6519 * 0x12 - discovered and fall-through and branch edges labelled
6520 * 0x20 - explored
6521 */
6522
6523enum {
6524 DISCOVERED = 0x10,
6525 EXPLORED = 0x20,
6526 FALLTHROUGH = 1,
6527 BRANCH = 2,
6528};
6529
dc2a4ebc
AS
6530static u32 state_htab_size(struct bpf_verifier_env *env)
6531{
6532 return env->prog->len;
6533}
6534
5d839021
AS
6535static struct bpf_verifier_state_list **explored_state(
6536 struct bpf_verifier_env *env,
6537 int idx)
6538{
dc2a4ebc
AS
6539 struct bpf_verifier_state *cur = env->cur_state;
6540 struct bpf_func_state *state = cur->frame[cur->curframe];
6541
6542 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
6543}
6544
6545static void init_explored_state(struct bpf_verifier_env *env, int idx)
6546{
a8f500af 6547 env->insn_aux_data[idx].prune_point = true;
5d839021 6548}
f1bca824 6549
475fb78f
AS
6550/* t, w, e - match pseudo-code above:
6551 * t - index of current instruction
6552 * w - next instruction
6553 * e - edge
6554 */
2589726d
AS
6555static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
6556 bool loop_ok)
475fb78f 6557{
7df737e9
AS
6558 int *insn_stack = env->cfg.insn_stack;
6559 int *insn_state = env->cfg.insn_state;
6560
475fb78f
AS
6561 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
6562 return 0;
6563
6564 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
6565 return 0;
6566
6567 if (w < 0 || w >= env->prog->len) {
d9762e84 6568 verbose_linfo(env, t, "%d: ", t);
61bd5218 6569 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
6570 return -EINVAL;
6571 }
6572
f1bca824
AS
6573 if (e == BRANCH)
6574 /* mark branch target for state pruning */
5d839021 6575 init_explored_state(env, w);
f1bca824 6576
475fb78f
AS
6577 if (insn_state[w] == 0) {
6578 /* tree-edge */
6579 insn_state[t] = DISCOVERED | e;
6580 insn_state[w] = DISCOVERED;
7df737e9 6581 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 6582 return -E2BIG;
7df737e9 6583 insn_stack[env->cfg.cur_stack++] = w;
475fb78f
AS
6584 return 1;
6585 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2589726d
AS
6586 if (loop_ok && env->allow_ptr_leaks)
6587 return 0;
d9762e84
MKL
6588 verbose_linfo(env, t, "%d: ", t);
6589 verbose_linfo(env, w, "%d: ", w);
61bd5218 6590 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
6591 return -EINVAL;
6592 } else if (insn_state[w] == EXPLORED) {
6593 /* forward- or cross-edge */
6594 insn_state[t] = DISCOVERED | e;
6595 } else {
61bd5218 6596 verbose(env, "insn state internal bug\n");
475fb78f
AS
6597 return -EFAULT;
6598 }
6599 return 0;
6600}
6601
6602/* non-recursive depth-first-search to detect loops in BPF program
6603 * loop == back-edge in directed graph
6604 */
58e2af8b 6605static int check_cfg(struct bpf_verifier_env *env)
475fb78f
AS
6606{
6607 struct bpf_insn *insns = env->prog->insnsi;
6608 int insn_cnt = env->prog->len;
7df737e9 6609 int *insn_stack, *insn_state;
475fb78f
AS
6610 int ret = 0;
6611 int i, t;
6612
7df737e9 6613 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
6614 if (!insn_state)
6615 return -ENOMEM;
6616
7df737e9 6617 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 6618 if (!insn_stack) {
71dde681 6619 kvfree(insn_state);
475fb78f
AS
6620 return -ENOMEM;
6621 }
6622
6623 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
6624 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 6625 env->cfg.cur_stack = 1;
475fb78f
AS
6626
6627peek_stack:
7df737e9 6628 if (env->cfg.cur_stack == 0)
475fb78f 6629 goto check_state;
7df737e9 6630 t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 6631
092ed096
JW
6632 if (BPF_CLASS(insns[t].code) == BPF_JMP ||
6633 BPF_CLASS(insns[t].code) == BPF_JMP32) {
475fb78f
AS
6634 u8 opcode = BPF_OP(insns[t].code);
6635
6636 if (opcode == BPF_EXIT) {
6637 goto mark_explored;
6638 } else if (opcode == BPF_CALL) {
2589726d 6639 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
6640 if (ret == 1)
6641 goto peek_stack;
6642 else if (ret < 0)
6643 goto err_free;
07016151 6644 if (t + 1 < insn_cnt)
5d839021 6645 init_explored_state(env, t + 1);
cc8b0b92 6646 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
5d839021 6647 init_explored_state(env, t);
2589726d
AS
6648 ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
6649 env, false);
cc8b0b92
AS
6650 if (ret == 1)
6651 goto peek_stack;
6652 else if (ret < 0)
6653 goto err_free;
6654 }
475fb78f
AS
6655 } else if (opcode == BPF_JA) {
6656 if (BPF_SRC(insns[t].code) != BPF_K) {
6657 ret = -EINVAL;
6658 goto err_free;
6659 }
6660 /* unconditional jump with single edge */
6661 ret = push_insn(t, t + insns[t].off + 1,
2589726d 6662 FALLTHROUGH, env, true);
475fb78f
AS
6663 if (ret == 1)
6664 goto peek_stack;
6665 else if (ret < 0)
6666 goto err_free;
b5dc0163
AS
6667 /* unconditional jmp is not a good pruning point,
6668 * but it's marked, since backtracking needs
6669 * to record jmp history in is_state_visited().
6670 */
6671 init_explored_state(env, t + insns[t].off + 1);
f1bca824
AS
6672 /* tell verifier to check for equivalent states
6673 * after every call and jump
6674 */
c3de6317 6675 if (t + 1 < insn_cnt)
5d839021 6676 init_explored_state(env, t + 1);
475fb78f
AS
6677 } else {
6678 /* conditional jump with two edges */
5d839021 6679 init_explored_state(env, t);
2589726d 6680 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
475fb78f
AS
6681 if (ret == 1)
6682 goto peek_stack;
6683 else if (ret < 0)
6684 goto err_free;
6685
2589726d 6686 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
475fb78f
AS
6687 if (ret == 1)
6688 goto peek_stack;
6689 else if (ret < 0)
6690 goto err_free;
6691 }
6692 } else {
6693 /* all other non-branch instructions with single
6694 * fall-through edge
6695 */
2589726d 6696 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
6697 if (ret == 1)
6698 goto peek_stack;
6699 else if (ret < 0)
6700 goto err_free;
6701 }
6702
6703mark_explored:
6704 insn_state[t] = EXPLORED;
7df737e9 6705 if (env->cfg.cur_stack-- <= 0) {
61bd5218 6706 verbose(env, "pop stack internal bug\n");
475fb78f
AS
6707 ret = -EFAULT;
6708 goto err_free;
6709 }
6710 goto peek_stack;
6711
6712check_state:
6713 for (i = 0; i < insn_cnt; i++) {
6714 if (insn_state[i] != EXPLORED) {
61bd5218 6715 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
6716 ret = -EINVAL;
6717 goto err_free;
6718 }
6719 }
6720 ret = 0; /* cfg looks good */
6721
6722err_free:
71dde681
AS
6723 kvfree(insn_state);
6724 kvfree(insn_stack);
7df737e9 6725 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
6726 return ret;
6727}
6728
838e9690
YS
6729/* The minimum supported BTF func info size */
6730#define MIN_BPF_FUNCINFO_SIZE 8
6731#define MAX_FUNCINFO_REC_SIZE 252
6732
c454a46b
MKL
6733static int check_btf_func(struct bpf_verifier_env *env,
6734 const union bpf_attr *attr,
6735 union bpf_attr __user *uattr)
838e9690 6736{
d0b2818e 6737 u32 i, nfuncs, urec_size, min_size;
838e9690 6738 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 6739 struct bpf_func_info *krecord;
8c1b6e69 6740 struct bpf_func_info_aux *info_aux = NULL;
838e9690 6741 const struct btf_type *type;
c454a46b
MKL
6742 struct bpf_prog *prog;
6743 const struct btf *btf;
838e9690 6744 void __user *urecord;
d0b2818e 6745 u32 prev_offset = 0;
838e9690
YS
6746 int ret = 0;
6747
6748 nfuncs = attr->func_info_cnt;
6749 if (!nfuncs)
6750 return 0;
6751
6752 if (nfuncs != env->subprog_cnt) {
6753 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
6754 return -EINVAL;
6755 }
6756
6757 urec_size = attr->func_info_rec_size;
6758 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
6759 urec_size > MAX_FUNCINFO_REC_SIZE ||
6760 urec_size % sizeof(u32)) {
6761 verbose(env, "invalid func info rec size %u\n", urec_size);
6762 return -EINVAL;
6763 }
6764
c454a46b
MKL
6765 prog = env->prog;
6766 btf = prog->aux->btf;
838e9690
YS
6767
6768 urecord = u64_to_user_ptr(attr->func_info);
6769 min_size = min_t(u32, krec_size, urec_size);
6770
ba64e7d8 6771 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
6772 if (!krecord)
6773 return -ENOMEM;
8c1b6e69
AS
6774 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
6775 if (!info_aux)
6776 goto err_free;
ba64e7d8 6777
838e9690
YS
6778 for (i = 0; i < nfuncs; i++) {
6779 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
6780 if (ret) {
6781 if (ret == -E2BIG) {
6782 verbose(env, "nonzero tailing record in func info");
6783 /* set the size kernel expects so loader can zero
6784 * out the rest of the record.
6785 */
6786 if (put_user(min_size, &uattr->func_info_rec_size))
6787 ret = -EFAULT;
6788 }
c454a46b 6789 goto err_free;
838e9690
YS
6790 }
6791
ba64e7d8 6792 if (copy_from_user(&krecord[i], urecord, min_size)) {
838e9690 6793 ret = -EFAULT;
c454a46b 6794 goto err_free;
838e9690
YS
6795 }
6796
d30d42e0 6797 /* check insn_off */
838e9690 6798 if (i == 0) {
d30d42e0 6799 if (krecord[i].insn_off) {
838e9690 6800 verbose(env,
d30d42e0
MKL
6801 "nonzero insn_off %u for the first func info record",
6802 krecord[i].insn_off);
838e9690 6803 ret = -EINVAL;
c454a46b 6804 goto err_free;
838e9690 6805 }
d30d42e0 6806 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
6807 verbose(env,
6808 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 6809 krecord[i].insn_off, prev_offset);
838e9690 6810 ret = -EINVAL;
c454a46b 6811 goto err_free;
838e9690
YS
6812 }
6813
d30d42e0 6814 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690
YS
6815 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
6816 ret = -EINVAL;
c454a46b 6817 goto err_free;
838e9690
YS
6818 }
6819
6820 /* check type_id */
ba64e7d8 6821 type = btf_type_by_id(btf, krecord[i].type_id);
51c39bb1 6822 if (!type || !btf_type_is_func(type)) {
838e9690 6823 verbose(env, "invalid type id %d in func info",
ba64e7d8 6824 krecord[i].type_id);
838e9690 6825 ret = -EINVAL;
c454a46b 6826 goto err_free;
838e9690 6827 }
51c39bb1 6828 info_aux[i].linkage = BTF_INFO_VLEN(type->info);
d30d42e0 6829 prev_offset = krecord[i].insn_off;
838e9690
YS
6830 urecord += urec_size;
6831 }
6832
ba64e7d8
YS
6833 prog->aux->func_info = krecord;
6834 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 6835 prog->aux->func_info_aux = info_aux;
838e9690
YS
6836 return 0;
6837
c454a46b 6838err_free:
ba64e7d8 6839 kvfree(krecord);
8c1b6e69 6840 kfree(info_aux);
838e9690
YS
6841 return ret;
6842}
6843
ba64e7d8
YS
6844static void adjust_btf_func(struct bpf_verifier_env *env)
6845{
8c1b6e69 6846 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
6847 int i;
6848
8c1b6e69 6849 if (!aux->func_info)
ba64e7d8
YS
6850 return;
6851
6852 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 6853 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
6854}
6855
c454a46b
MKL
6856#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
6857 sizeof(((struct bpf_line_info *)(0))->line_col))
6858#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
6859
6860static int check_btf_line(struct bpf_verifier_env *env,
6861 const union bpf_attr *attr,
6862 union bpf_attr __user *uattr)
6863{
6864 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
6865 struct bpf_subprog_info *sub;
6866 struct bpf_line_info *linfo;
6867 struct bpf_prog *prog;
6868 const struct btf *btf;
6869 void __user *ulinfo;
6870 int err;
6871
6872 nr_linfo = attr->line_info_cnt;
6873 if (!nr_linfo)
6874 return 0;
6875
6876 rec_size = attr->line_info_rec_size;
6877 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
6878 rec_size > MAX_LINEINFO_REC_SIZE ||
6879 rec_size & (sizeof(u32) - 1))
6880 return -EINVAL;
6881
6882 /* Need to zero it in case the userspace may
6883 * pass in a smaller bpf_line_info object.
6884 */
6885 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
6886 GFP_KERNEL | __GFP_NOWARN);
6887 if (!linfo)
6888 return -ENOMEM;
6889
6890 prog = env->prog;
6891 btf = prog->aux->btf;
6892
6893 s = 0;
6894 sub = env->subprog_info;
6895 ulinfo = u64_to_user_ptr(attr->line_info);
6896 expected_size = sizeof(struct bpf_line_info);
6897 ncopy = min_t(u32, expected_size, rec_size);
6898 for (i = 0; i < nr_linfo; i++) {
6899 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
6900 if (err) {
6901 if (err == -E2BIG) {
6902 verbose(env, "nonzero tailing record in line_info");
6903 if (put_user(expected_size,
6904 &uattr->line_info_rec_size))
6905 err = -EFAULT;
6906 }
6907 goto err_free;
6908 }
6909
6910 if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
6911 err = -EFAULT;
6912 goto err_free;
6913 }
6914
6915 /*
6916 * Check insn_off to ensure
6917 * 1) strictly increasing AND
6918 * 2) bounded by prog->len
6919 *
6920 * The linfo[0].insn_off == 0 check logically falls into
6921 * the later "missing bpf_line_info for func..." case
6922 * because the first linfo[0].insn_off must be the
6923 * first sub also and the first sub must have
6924 * subprog_info[0].start == 0.
6925 */
6926 if ((i && linfo[i].insn_off <= prev_offset) ||
6927 linfo[i].insn_off >= prog->len) {
6928 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
6929 i, linfo[i].insn_off, prev_offset,
6930 prog->len);
6931 err = -EINVAL;
6932 goto err_free;
6933 }
6934
fdbaa0be
MKL
6935 if (!prog->insnsi[linfo[i].insn_off].code) {
6936 verbose(env,
6937 "Invalid insn code at line_info[%u].insn_off\n",
6938 i);
6939 err = -EINVAL;
6940 goto err_free;
6941 }
6942
23127b33
MKL
6943 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
6944 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
6945 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
6946 err = -EINVAL;
6947 goto err_free;
6948 }
6949
6950 if (s != env->subprog_cnt) {
6951 if (linfo[i].insn_off == sub[s].start) {
6952 sub[s].linfo_idx = i;
6953 s++;
6954 } else if (sub[s].start < linfo[i].insn_off) {
6955 verbose(env, "missing bpf_line_info for func#%u\n", s);
6956 err = -EINVAL;
6957 goto err_free;
6958 }
6959 }
6960
6961 prev_offset = linfo[i].insn_off;
6962 ulinfo += rec_size;
6963 }
6964
6965 if (s != env->subprog_cnt) {
6966 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
6967 env->subprog_cnt - s, s);
6968 err = -EINVAL;
6969 goto err_free;
6970 }
6971
6972 prog->aux->linfo = linfo;
6973 prog->aux->nr_linfo = nr_linfo;
6974
6975 return 0;
6976
6977err_free:
6978 kvfree(linfo);
6979 return err;
6980}
6981
6982static int check_btf_info(struct bpf_verifier_env *env,
6983 const union bpf_attr *attr,
6984 union bpf_attr __user *uattr)
6985{
6986 struct btf *btf;
6987 int err;
6988
6989 if (!attr->func_info_cnt && !attr->line_info_cnt)
6990 return 0;
6991
6992 btf = btf_get_by_fd(attr->prog_btf_fd);
6993 if (IS_ERR(btf))
6994 return PTR_ERR(btf);
6995 env->prog->aux->btf = btf;
6996
6997 err = check_btf_func(env, attr, uattr);
6998 if (err)
6999 return err;
7000
7001 err = check_btf_line(env, attr, uattr);
7002 if (err)
7003 return err;
7004
7005 return 0;
ba64e7d8
YS
7006}
7007
f1174f77
EC
7008/* check %cur's range satisfies %old's */
7009static bool range_within(struct bpf_reg_state *old,
7010 struct bpf_reg_state *cur)
7011{
b03c9f9f
EC
7012 return old->umin_value <= cur->umin_value &&
7013 old->umax_value >= cur->umax_value &&
7014 old->smin_value <= cur->smin_value &&
7015 old->smax_value >= cur->smax_value;
f1174f77
EC
7016}
7017
7018/* Maximum number of register states that can exist at once */
7019#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
7020struct idpair {
7021 u32 old;
7022 u32 cur;
7023};
7024
7025/* If in the old state two registers had the same id, then they need to have
7026 * the same id in the new state as well. But that id could be different from
7027 * the old state, so we need to track the mapping from old to new ids.
7028 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
7029 * regs with old id 5 must also have new id 9 for the new state to be safe. But
7030 * regs with a different old id could still have new id 9, we don't care about
7031 * that.
7032 * So we look through our idmap to see if this old id has been seen before. If
7033 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 7034 */
f1174f77 7035static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
969bf05e 7036{
f1174f77 7037 unsigned int i;
969bf05e 7038
f1174f77
EC
7039 for (i = 0; i < ID_MAP_SIZE; i++) {
7040 if (!idmap[i].old) {
7041 /* Reached an empty slot; haven't seen this id before */
7042 idmap[i].old = old_id;
7043 idmap[i].cur = cur_id;
7044 return true;
7045 }
7046 if (idmap[i].old == old_id)
7047 return idmap[i].cur == cur_id;
7048 }
7049 /* We ran out of idmap slots, which should be impossible */
7050 WARN_ON_ONCE(1);
7051 return false;
7052}
7053
9242b5f5
AS
7054static void clean_func_state(struct bpf_verifier_env *env,
7055 struct bpf_func_state *st)
7056{
7057 enum bpf_reg_liveness live;
7058 int i, j;
7059
7060 for (i = 0; i < BPF_REG_FP; i++) {
7061 live = st->regs[i].live;
7062 /* liveness must not touch this register anymore */
7063 st->regs[i].live |= REG_LIVE_DONE;
7064 if (!(live & REG_LIVE_READ))
7065 /* since the register is unused, clear its state
7066 * to make further comparison simpler
7067 */
f54c7898 7068 __mark_reg_not_init(env, &st->regs[i]);
9242b5f5
AS
7069 }
7070
7071 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
7072 live = st->stack[i].spilled_ptr.live;
7073 /* liveness must not touch this stack slot anymore */
7074 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
7075 if (!(live & REG_LIVE_READ)) {
f54c7898 7076 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
9242b5f5
AS
7077 for (j = 0; j < BPF_REG_SIZE; j++)
7078 st->stack[i].slot_type[j] = STACK_INVALID;
7079 }
7080 }
7081}
7082
7083static void clean_verifier_state(struct bpf_verifier_env *env,
7084 struct bpf_verifier_state *st)
7085{
7086 int i;
7087
7088 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
7089 /* all regs in this state in all frames were already marked */
7090 return;
7091
7092 for (i = 0; i <= st->curframe; i++)
7093 clean_func_state(env, st->frame[i]);
7094}
7095
7096/* the parentage chains form a tree.
7097 * the verifier states are added to state lists at given insn and
7098 * pushed into state stack for future exploration.
7099 * when the verifier reaches bpf_exit insn some of the verifer states
7100 * stored in the state lists have their final liveness state already,
7101 * but a lot of states will get revised from liveness point of view when
7102 * the verifier explores other branches.
7103 * Example:
7104 * 1: r0 = 1
7105 * 2: if r1 == 100 goto pc+1
7106 * 3: r0 = 2
7107 * 4: exit
7108 * when the verifier reaches exit insn the register r0 in the state list of
7109 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
7110 * of insn 2 and goes exploring further. At the insn 4 it will walk the
7111 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
7112 *
7113 * Since the verifier pushes the branch states as it sees them while exploring
7114 * the program the condition of walking the branch instruction for the second
7115 * time means that all states below this branch were already explored and
7116 * their final liveness markes are already propagated.
7117 * Hence when the verifier completes the search of state list in is_state_visited()
7118 * we can call this clean_live_states() function to mark all liveness states
7119 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
7120 * will not be used.
7121 * This function also clears the registers and stack for states that !READ
7122 * to simplify state merging.
7123 *
7124 * Important note here that walking the same branch instruction in the callee
7125 * doesn't meant that the states are DONE. The verifier has to compare
7126 * the callsites
7127 */
7128static void clean_live_states(struct bpf_verifier_env *env, int insn,
7129 struct bpf_verifier_state *cur)
7130{
7131 struct bpf_verifier_state_list *sl;
7132 int i;
7133
5d839021 7134 sl = *explored_state(env, insn);
a8f500af 7135 while (sl) {
2589726d
AS
7136 if (sl->state.branches)
7137 goto next;
dc2a4ebc
AS
7138 if (sl->state.insn_idx != insn ||
7139 sl->state.curframe != cur->curframe)
9242b5f5
AS
7140 goto next;
7141 for (i = 0; i <= cur->curframe; i++)
7142 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
7143 goto next;
7144 clean_verifier_state(env, &sl->state);
7145next:
7146 sl = sl->next;
7147 }
7148}
7149
f1174f77 7150/* Returns true if (rold safe implies rcur safe) */
1b688a19
EC
7151static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
7152 struct idpair *idmap)
f1174f77 7153{
f4d7e40a
AS
7154 bool equal;
7155
dc503a8a
EC
7156 if (!(rold->live & REG_LIVE_READ))
7157 /* explored state didn't use this */
7158 return true;
7159
679c782d 7160 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
f4d7e40a
AS
7161
7162 if (rold->type == PTR_TO_STACK)
7163 /* two stack pointers are equal only if they're pointing to
7164 * the same stack frame, since fp-8 in foo != fp-8 in bar
7165 */
7166 return equal && rold->frameno == rcur->frameno;
7167
7168 if (equal)
969bf05e
AS
7169 return true;
7170
f1174f77
EC
7171 if (rold->type == NOT_INIT)
7172 /* explored state can't have used this */
969bf05e 7173 return true;
f1174f77
EC
7174 if (rcur->type == NOT_INIT)
7175 return false;
7176 switch (rold->type) {
7177 case SCALAR_VALUE:
7178 if (rcur->type == SCALAR_VALUE) {
b5dc0163
AS
7179 if (!rold->precise && !rcur->precise)
7180 return true;
f1174f77
EC
7181 /* new val must satisfy old val knowledge */
7182 return range_within(rold, rcur) &&
7183 tnum_in(rold->var_off, rcur->var_off);
7184 } else {
179d1c56
JH
7185 /* We're trying to use a pointer in place of a scalar.
7186 * Even if the scalar was unbounded, this could lead to
7187 * pointer leaks because scalars are allowed to leak
7188 * while pointers are not. We could make this safe in
7189 * special cases if root is calling us, but it's
7190 * probably not worth the hassle.
f1174f77 7191 */
179d1c56 7192 return false;
f1174f77
EC
7193 }
7194 case PTR_TO_MAP_VALUE:
1b688a19
EC
7195 /* If the new min/max/var_off satisfy the old ones and
7196 * everything else matches, we are OK.
d83525ca
AS
7197 * 'id' is not compared, since it's only used for maps with
7198 * bpf_spin_lock inside map element and in such cases if
7199 * the rest of the prog is valid for one map element then
7200 * it's valid for all map elements regardless of the key
7201 * used in bpf_map_lookup()
1b688a19
EC
7202 */
7203 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
7204 range_within(rold, rcur) &&
7205 tnum_in(rold->var_off, rcur->var_off);
f1174f77
EC
7206 case PTR_TO_MAP_VALUE_OR_NULL:
7207 /* a PTR_TO_MAP_VALUE could be safe to use as a
7208 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
7209 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
7210 * checked, doing so could have affected others with the same
7211 * id, and we can't check for that because we lost the id when
7212 * we converted to a PTR_TO_MAP_VALUE.
7213 */
7214 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
7215 return false;
7216 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
7217 return false;
7218 /* Check our ids match any regs they're supposed to */
7219 return check_ids(rold->id, rcur->id, idmap);
de8f3a83 7220 case PTR_TO_PACKET_META:
f1174f77 7221 case PTR_TO_PACKET:
de8f3a83 7222 if (rcur->type != rold->type)
f1174f77
EC
7223 return false;
7224 /* We must have at least as much range as the old ptr
7225 * did, so that any accesses which were safe before are
7226 * still safe. This is true even if old range < old off,
7227 * since someone could have accessed through (ptr - k), or
7228 * even done ptr -= k in a register, to get a safe access.
7229 */
7230 if (rold->range > rcur->range)
7231 return false;
7232 /* If the offsets don't match, we can't trust our alignment;
7233 * nor can we be sure that we won't fall out of range.
7234 */
7235 if (rold->off != rcur->off)
7236 return false;
7237 /* id relations must be preserved */
7238 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
7239 return false;
7240 /* new val must satisfy old val knowledge */
7241 return range_within(rold, rcur) &&
7242 tnum_in(rold->var_off, rcur->var_off);
7243 case PTR_TO_CTX:
7244 case CONST_PTR_TO_MAP:
f1174f77 7245 case PTR_TO_PACKET_END:
d58e468b 7246 case PTR_TO_FLOW_KEYS:
c64b7983
JS
7247 case PTR_TO_SOCKET:
7248 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
7249 case PTR_TO_SOCK_COMMON:
7250 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
7251 case PTR_TO_TCP_SOCK:
7252 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 7253 case PTR_TO_XDP_SOCK:
f1174f77
EC
7254 /* Only valid matches are exact, which memcmp() above
7255 * would have accepted
7256 */
7257 default:
7258 /* Don't know what's going on, just say it's not safe */
7259 return false;
7260 }
969bf05e 7261
f1174f77
EC
7262 /* Shouldn't get here; if we do, say it's not safe */
7263 WARN_ON_ONCE(1);
969bf05e
AS
7264 return false;
7265}
7266
f4d7e40a
AS
7267static bool stacksafe(struct bpf_func_state *old,
7268 struct bpf_func_state *cur,
638f5b90
AS
7269 struct idpair *idmap)
7270{
7271 int i, spi;
7272
638f5b90
AS
7273 /* walk slots of the explored stack and ignore any additional
7274 * slots in the current stack, since explored(safe) state
7275 * didn't use them
7276 */
7277 for (i = 0; i < old->allocated_stack; i++) {
7278 spi = i / BPF_REG_SIZE;
7279
b233920c
AS
7280 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
7281 i += BPF_REG_SIZE - 1;
cc2b14d5 7282 /* explored state didn't use this */
fd05e57b 7283 continue;
b233920c 7284 }
cc2b14d5 7285
638f5b90
AS
7286 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
7287 continue;
19e2dbb7
AS
7288
7289 /* explored stack has more populated slots than current stack
7290 * and these slots were used
7291 */
7292 if (i >= cur->allocated_stack)
7293 return false;
7294
cc2b14d5
AS
7295 /* if old state was safe with misc data in the stack
7296 * it will be safe with zero-initialized stack.
7297 * The opposite is not true
7298 */
7299 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
7300 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
7301 continue;
638f5b90
AS
7302 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
7303 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
7304 /* Ex: old explored (safe) state has STACK_SPILL in
7305 * this stack slot, but current has has STACK_MISC ->
7306 * this verifier states are not equivalent,
7307 * return false to continue verification of this path
7308 */
7309 return false;
7310 if (i % BPF_REG_SIZE)
7311 continue;
7312 if (old->stack[spi].slot_type[0] != STACK_SPILL)
7313 continue;
7314 if (!regsafe(&old->stack[spi].spilled_ptr,
7315 &cur->stack[spi].spilled_ptr,
7316 idmap))
7317 /* when explored and current stack slot are both storing
7318 * spilled registers, check that stored pointers types
7319 * are the same as well.
7320 * Ex: explored safe path could have stored
7321 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
7322 * but current path has stored:
7323 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
7324 * such verifier states are not equivalent.
7325 * return false to continue verification of this path
7326 */
7327 return false;
7328 }
7329 return true;
7330}
7331
fd978bf7
JS
7332static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
7333{
7334 if (old->acquired_refs != cur->acquired_refs)
7335 return false;
7336 return !memcmp(old->refs, cur->refs,
7337 sizeof(*old->refs) * old->acquired_refs);
7338}
7339
f1bca824
AS
7340/* compare two verifier states
7341 *
7342 * all states stored in state_list are known to be valid, since
7343 * verifier reached 'bpf_exit' instruction through them
7344 *
7345 * this function is called when verifier exploring different branches of
7346 * execution popped from the state stack. If it sees an old state that has
7347 * more strict register state and more strict stack state then this execution
7348 * branch doesn't need to be explored further, since verifier already
7349 * concluded that more strict state leads to valid finish.
7350 *
7351 * Therefore two states are equivalent if register state is more conservative
7352 * and explored stack state is more conservative than the current one.
7353 * Example:
7354 * explored current
7355 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
7356 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
7357 *
7358 * In other words if current stack state (one being explored) has more
7359 * valid slots than old one that already passed validation, it means
7360 * the verifier can stop exploring and conclude that current state is valid too
7361 *
7362 * Similarly with registers. If explored state has register type as invalid
7363 * whereas register type in current state is meaningful, it means that
7364 * the current state will reach 'bpf_exit' instruction safely
7365 */
f4d7e40a
AS
7366static bool func_states_equal(struct bpf_func_state *old,
7367 struct bpf_func_state *cur)
f1bca824 7368{
f1174f77
EC
7369 struct idpair *idmap;
7370 bool ret = false;
f1bca824
AS
7371 int i;
7372
f1174f77
EC
7373 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
7374 /* If we failed to allocate the idmap, just say it's not safe */
7375 if (!idmap)
1a0dc1ac 7376 return false;
f1174f77
EC
7377
7378 for (i = 0; i < MAX_BPF_REG; i++) {
1b688a19 7379 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
f1174f77 7380 goto out_free;
f1bca824
AS
7381 }
7382
638f5b90
AS
7383 if (!stacksafe(old, cur, idmap))
7384 goto out_free;
fd978bf7
JS
7385
7386 if (!refsafe(old, cur))
7387 goto out_free;
f1174f77
EC
7388 ret = true;
7389out_free:
7390 kfree(idmap);
7391 return ret;
f1bca824
AS
7392}
7393
f4d7e40a
AS
7394static bool states_equal(struct bpf_verifier_env *env,
7395 struct bpf_verifier_state *old,
7396 struct bpf_verifier_state *cur)
7397{
7398 int i;
7399
7400 if (old->curframe != cur->curframe)
7401 return false;
7402
979d63d5
DB
7403 /* Verification state from speculative execution simulation
7404 * must never prune a non-speculative execution one.
7405 */
7406 if (old->speculative && !cur->speculative)
7407 return false;
7408
d83525ca
AS
7409 if (old->active_spin_lock != cur->active_spin_lock)
7410 return false;
7411
f4d7e40a
AS
7412 /* for states to be equal callsites have to be the same
7413 * and all frame states need to be equivalent
7414 */
7415 for (i = 0; i <= old->curframe; i++) {
7416 if (old->frame[i]->callsite != cur->frame[i]->callsite)
7417 return false;
7418 if (!func_states_equal(old->frame[i], cur->frame[i]))
7419 return false;
7420 }
7421 return true;
7422}
7423
5327ed3d
JW
7424/* Return 0 if no propagation happened. Return negative error code if error
7425 * happened. Otherwise, return the propagated bit.
7426 */
55e7f3b5
JW
7427static int propagate_liveness_reg(struct bpf_verifier_env *env,
7428 struct bpf_reg_state *reg,
7429 struct bpf_reg_state *parent_reg)
7430{
5327ed3d
JW
7431 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
7432 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
7433 int err;
7434
5327ed3d
JW
7435 /* When comes here, read flags of PARENT_REG or REG could be any of
7436 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
7437 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
7438 */
7439 if (parent_flag == REG_LIVE_READ64 ||
7440 /* Or if there is no read flag from REG. */
7441 !flag ||
7442 /* Or if the read flag from REG is the same as PARENT_REG. */
7443 parent_flag == flag)
55e7f3b5
JW
7444 return 0;
7445
5327ed3d 7446 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
7447 if (err)
7448 return err;
7449
5327ed3d 7450 return flag;
55e7f3b5
JW
7451}
7452
8e9cd9ce 7453/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
7454 * straight-line code between a state and its parent. When we arrive at an
7455 * equivalent state (jump target or such) we didn't arrive by the straight-line
7456 * code, so read marks in the state must propagate to the parent regardless
7457 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 7458 * in mark_reg_read() is for.
8e9cd9ce 7459 */
f4d7e40a
AS
7460static int propagate_liveness(struct bpf_verifier_env *env,
7461 const struct bpf_verifier_state *vstate,
7462 struct bpf_verifier_state *vparent)
dc503a8a 7463{
3f8cafa4 7464 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 7465 struct bpf_func_state *state, *parent;
3f8cafa4 7466 int i, frame, err = 0;
dc503a8a 7467
f4d7e40a
AS
7468 if (vparent->curframe != vstate->curframe) {
7469 WARN(1, "propagate_live: parent frame %d current frame %d\n",
7470 vparent->curframe, vstate->curframe);
7471 return -EFAULT;
7472 }
dc503a8a
EC
7473 /* Propagate read liveness of registers... */
7474 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 7475 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
7476 parent = vparent->frame[frame];
7477 state = vstate->frame[frame];
7478 parent_reg = parent->regs;
7479 state_reg = state->regs;
83d16312
JK
7480 /* We don't need to worry about FP liveness, it's read-only */
7481 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
7482 err = propagate_liveness_reg(env, &state_reg[i],
7483 &parent_reg[i]);
5327ed3d 7484 if (err < 0)
3f8cafa4 7485 return err;
5327ed3d
JW
7486 if (err == REG_LIVE_READ64)
7487 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 7488 }
f4d7e40a 7489
1b04aee7 7490 /* Propagate stack slots. */
f4d7e40a
AS
7491 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
7492 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
7493 parent_reg = &parent->stack[i].spilled_ptr;
7494 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
7495 err = propagate_liveness_reg(env, state_reg,
7496 parent_reg);
5327ed3d 7497 if (err < 0)
3f8cafa4 7498 return err;
dc503a8a
EC
7499 }
7500 }
5327ed3d 7501 return 0;
dc503a8a
EC
7502}
7503
a3ce685d
AS
7504/* find precise scalars in the previous equivalent state and
7505 * propagate them into the current state
7506 */
7507static int propagate_precision(struct bpf_verifier_env *env,
7508 const struct bpf_verifier_state *old)
7509{
7510 struct bpf_reg_state *state_reg;
7511 struct bpf_func_state *state;
7512 int i, err = 0;
7513
7514 state = old->frame[old->curframe];
7515 state_reg = state->regs;
7516 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
7517 if (state_reg->type != SCALAR_VALUE ||
7518 !state_reg->precise)
7519 continue;
7520 if (env->log.level & BPF_LOG_LEVEL2)
7521 verbose(env, "propagating r%d\n", i);
7522 err = mark_chain_precision(env, i);
7523 if (err < 0)
7524 return err;
7525 }
7526
7527 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
7528 if (state->stack[i].slot_type[0] != STACK_SPILL)
7529 continue;
7530 state_reg = &state->stack[i].spilled_ptr;
7531 if (state_reg->type != SCALAR_VALUE ||
7532 !state_reg->precise)
7533 continue;
7534 if (env->log.level & BPF_LOG_LEVEL2)
7535 verbose(env, "propagating fp%d\n",
7536 (-i - 1) * BPF_REG_SIZE);
7537 err = mark_chain_precision_stack(env, i);
7538 if (err < 0)
7539 return err;
7540 }
7541 return 0;
7542}
7543
2589726d
AS
7544static bool states_maybe_looping(struct bpf_verifier_state *old,
7545 struct bpf_verifier_state *cur)
7546{
7547 struct bpf_func_state *fold, *fcur;
7548 int i, fr = cur->curframe;
7549
7550 if (old->curframe != fr)
7551 return false;
7552
7553 fold = old->frame[fr];
7554 fcur = cur->frame[fr];
7555 for (i = 0; i < MAX_BPF_REG; i++)
7556 if (memcmp(&fold->regs[i], &fcur->regs[i],
7557 offsetof(struct bpf_reg_state, parent)))
7558 return false;
7559 return true;
7560}
7561
7562
58e2af8b 7563static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 7564{
58e2af8b 7565 struct bpf_verifier_state_list *new_sl;
9f4686c4 7566 struct bpf_verifier_state_list *sl, **pprev;
679c782d 7567 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 7568 int i, j, err, states_cnt = 0;
10d274e8 7569 bool add_new_state = env->test_state_freq ? true : false;
f1bca824 7570
b5dc0163 7571 cur->last_insn_idx = env->prev_insn_idx;
a8f500af 7572 if (!env->insn_aux_data[insn_idx].prune_point)
f1bca824
AS
7573 /* this 'insn_idx' instruction wasn't marked, so we will not
7574 * be doing state search here
7575 */
7576 return 0;
7577
2589726d
AS
7578 /* bpf progs typically have pruning point every 4 instructions
7579 * http://vger.kernel.org/bpfconf2019.html#session-1
7580 * Do not add new state for future pruning if the verifier hasn't seen
7581 * at least 2 jumps and at least 8 instructions.
7582 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
7583 * In tests that amounts to up to 50% reduction into total verifier
7584 * memory consumption and 20% verifier time speedup.
7585 */
7586 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
7587 env->insn_processed - env->prev_insn_processed >= 8)
7588 add_new_state = true;
7589
a8f500af
AS
7590 pprev = explored_state(env, insn_idx);
7591 sl = *pprev;
7592
9242b5f5
AS
7593 clean_live_states(env, insn_idx, cur);
7594
a8f500af 7595 while (sl) {
dc2a4ebc
AS
7596 states_cnt++;
7597 if (sl->state.insn_idx != insn_idx)
7598 goto next;
2589726d
AS
7599 if (sl->state.branches) {
7600 if (states_maybe_looping(&sl->state, cur) &&
7601 states_equal(env, &sl->state, cur)) {
7602 verbose_linfo(env, insn_idx, "; ");
7603 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
7604 return -EINVAL;
7605 }
7606 /* if the verifier is processing a loop, avoid adding new state
7607 * too often, since different loop iterations have distinct
7608 * states and may not help future pruning.
7609 * This threshold shouldn't be too low to make sure that
7610 * a loop with large bound will be rejected quickly.
7611 * The most abusive loop will be:
7612 * r1 += 1
7613 * if r1 < 1000000 goto pc-2
7614 * 1M insn_procssed limit / 100 == 10k peak states.
7615 * This threshold shouldn't be too high either, since states
7616 * at the end of the loop are likely to be useful in pruning.
7617 */
7618 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
7619 env->insn_processed - env->prev_insn_processed < 100)
7620 add_new_state = false;
7621 goto miss;
7622 }
638f5b90 7623 if (states_equal(env, &sl->state, cur)) {
9f4686c4 7624 sl->hit_cnt++;
f1bca824 7625 /* reached equivalent register/stack state,
dc503a8a
EC
7626 * prune the search.
7627 * Registers read by the continuation are read by us.
8e9cd9ce
EC
7628 * If we have any write marks in env->cur_state, they
7629 * will prevent corresponding reads in the continuation
7630 * from reaching our parent (an explored_state). Our
7631 * own state will get the read marks recorded, but
7632 * they'll be immediately forgotten as we're pruning
7633 * this state and will pop a new one.
f1bca824 7634 */
f4d7e40a 7635 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
7636
7637 /* if previous state reached the exit with precision and
7638 * current state is equivalent to it (except precsion marks)
7639 * the precision needs to be propagated back in
7640 * the current state.
7641 */
7642 err = err ? : push_jmp_history(env, cur);
7643 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
7644 if (err)
7645 return err;
f1bca824 7646 return 1;
dc503a8a 7647 }
2589726d
AS
7648miss:
7649 /* when new state is not going to be added do not increase miss count.
7650 * Otherwise several loop iterations will remove the state
7651 * recorded earlier. The goal of these heuristics is to have
7652 * states from some iterations of the loop (some in the beginning
7653 * and some at the end) to help pruning.
7654 */
7655 if (add_new_state)
7656 sl->miss_cnt++;
9f4686c4
AS
7657 /* heuristic to determine whether this state is beneficial
7658 * to keep checking from state equivalence point of view.
7659 * Higher numbers increase max_states_per_insn and verification time,
7660 * but do not meaningfully decrease insn_processed.
7661 */
7662 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
7663 /* the state is unlikely to be useful. Remove it to
7664 * speed up verification
7665 */
7666 *pprev = sl->next;
7667 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
7668 u32 br = sl->state.branches;
7669
7670 WARN_ONCE(br,
7671 "BUG live_done but branches_to_explore %d\n",
7672 br);
9f4686c4
AS
7673 free_verifier_state(&sl->state, false);
7674 kfree(sl);
7675 env->peak_states--;
7676 } else {
7677 /* cannot free this state, since parentage chain may
7678 * walk it later. Add it for free_list instead to
7679 * be freed at the end of verification
7680 */
7681 sl->next = env->free_list;
7682 env->free_list = sl;
7683 }
7684 sl = *pprev;
7685 continue;
7686 }
dc2a4ebc 7687next:
9f4686c4
AS
7688 pprev = &sl->next;
7689 sl = *pprev;
f1bca824
AS
7690 }
7691
06ee7115
AS
7692 if (env->max_states_per_insn < states_cnt)
7693 env->max_states_per_insn = states_cnt;
7694
ceefbc96 7695 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
b5dc0163 7696 return push_jmp_history(env, cur);
ceefbc96 7697
2589726d 7698 if (!add_new_state)
b5dc0163 7699 return push_jmp_history(env, cur);
ceefbc96 7700
2589726d
AS
7701 /* There were no equivalent states, remember the current one.
7702 * Technically the current state is not proven to be safe yet,
f4d7e40a 7703 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 7704 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 7705 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
7706 * again on the way to bpf_exit.
7707 * When looping the sl->state.branches will be > 0 and this state
7708 * will not be considered for equivalence until branches == 0.
f1bca824 7709 */
638f5b90 7710 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
7711 if (!new_sl)
7712 return -ENOMEM;
06ee7115
AS
7713 env->total_states++;
7714 env->peak_states++;
2589726d
AS
7715 env->prev_jmps_processed = env->jmps_processed;
7716 env->prev_insn_processed = env->insn_processed;
f1bca824
AS
7717
7718 /* add new state to the head of linked list */
679c782d
EC
7719 new = &new_sl->state;
7720 err = copy_verifier_state(new, cur);
1969db47 7721 if (err) {
679c782d 7722 free_verifier_state(new, false);
1969db47
AS
7723 kfree(new_sl);
7724 return err;
7725 }
dc2a4ebc 7726 new->insn_idx = insn_idx;
2589726d
AS
7727 WARN_ONCE(new->branches != 1,
7728 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 7729
2589726d 7730 cur->parent = new;
b5dc0163
AS
7731 cur->first_insn_idx = insn_idx;
7732 clear_jmp_history(cur);
5d839021
AS
7733 new_sl->next = *explored_state(env, insn_idx);
7734 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
7735 /* connect new state to parentage chain. Current frame needs all
7736 * registers connected. Only r6 - r9 of the callers are alive (pushed
7737 * to the stack implicitly by JITs) so in callers' frames connect just
7738 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
7739 * the state of the call instruction (with WRITTEN set), and r0 comes
7740 * from callee with its full parentage chain, anyway.
7741 */
8e9cd9ce
EC
7742 /* clear write marks in current state: the writes we did are not writes
7743 * our child did, so they don't screen off its reads from us.
7744 * (There are no read marks in current state, because reads always mark
7745 * their parent and current state never has children yet. Only
7746 * explored_states can get read marks.)
7747 */
eea1c227
AS
7748 for (j = 0; j <= cur->curframe; j++) {
7749 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
7750 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
7751 for (i = 0; i < BPF_REG_FP; i++)
7752 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
7753 }
f4d7e40a
AS
7754
7755 /* all stack frames are accessible from callee, clear them all */
7756 for (j = 0; j <= cur->curframe; j++) {
7757 struct bpf_func_state *frame = cur->frame[j];
679c782d 7758 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 7759
679c782d 7760 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 7761 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
7762 frame->stack[i].spilled_ptr.parent =
7763 &newframe->stack[i].spilled_ptr;
7764 }
f4d7e40a 7765 }
f1bca824
AS
7766 return 0;
7767}
7768
c64b7983
JS
7769/* Return true if it's OK to have the same insn return a different type. */
7770static bool reg_type_mismatch_ok(enum bpf_reg_type type)
7771{
7772 switch (type) {
7773 case PTR_TO_CTX:
7774 case PTR_TO_SOCKET:
7775 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
7776 case PTR_TO_SOCK_COMMON:
7777 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
7778 case PTR_TO_TCP_SOCK:
7779 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 7780 case PTR_TO_XDP_SOCK:
2a02759e 7781 case PTR_TO_BTF_ID:
c64b7983
JS
7782 return false;
7783 default:
7784 return true;
7785 }
7786}
7787
7788/* If an instruction was previously used with particular pointer types, then we
7789 * need to be careful to avoid cases such as the below, where it may be ok
7790 * for one branch accessing the pointer, but not ok for the other branch:
7791 *
7792 * R1 = sock_ptr
7793 * goto X;
7794 * ...
7795 * R1 = some_other_valid_ptr;
7796 * goto X;
7797 * ...
7798 * R2 = *(u32 *)(R1 + 0);
7799 */
7800static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
7801{
7802 return src != prev && (!reg_type_mismatch_ok(src) ||
7803 !reg_type_mismatch_ok(prev));
7804}
7805
58e2af8b 7806static int do_check(struct bpf_verifier_env *env)
17a52670 7807{
51c39bb1 7808 struct bpf_verifier_state *state = env->cur_state;
17a52670 7809 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 7810 struct bpf_reg_state *regs;
06ee7115 7811 int insn_cnt = env->prog->len;
17a52670 7812 bool do_print_state = false;
b5dc0163 7813 int prev_insn_idx = -1;
17a52670 7814
17a52670
AS
7815 for (;;) {
7816 struct bpf_insn *insn;
7817 u8 class;
7818 int err;
7819
b5dc0163 7820 env->prev_insn_idx = prev_insn_idx;
c08435ec 7821 if (env->insn_idx >= insn_cnt) {
61bd5218 7822 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 7823 env->insn_idx, insn_cnt);
17a52670
AS
7824 return -EFAULT;
7825 }
7826
c08435ec 7827 insn = &insns[env->insn_idx];
17a52670
AS
7828 class = BPF_CLASS(insn->code);
7829
06ee7115 7830 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
7831 verbose(env,
7832 "BPF program is too large. Processed %d insn\n",
06ee7115 7833 env->insn_processed);
17a52670
AS
7834 return -E2BIG;
7835 }
7836
c08435ec 7837 err = is_state_visited(env, env->insn_idx);
f1bca824
AS
7838 if (err < 0)
7839 return err;
7840 if (err == 1) {
7841 /* found equivalent state, can prune the search */
06ee7115 7842 if (env->log.level & BPF_LOG_LEVEL) {
f1bca824 7843 if (do_print_state)
979d63d5
DB
7844 verbose(env, "\nfrom %d to %d%s: safe\n",
7845 env->prev_insn_idx, env->insn_idx,
7846 env->cur_state->speculative ?
7847 " (speculative execution)" : "");
f1bca824 7848 else
c08435ec 7849 verbose(env, "%d: safe\n", env->insn_idx);
f1bca824
AS
7850 }
7851 goto process_bpf_exit;
7852 }
7853
c3494801
AS
7854 if (signal_pending(current))
7855 return -EAGAIN;
7856
3c2ce60b
DB
7857 if (need_resched())
7858 cond_resched();
7859
06ee7115
AS
7860 if (env->log.level & BPF_LOG_LEVEL2 ||
7861 (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
7862 if (env->log.level & BPF_LOG_LEVEL2)
c08435ec 7863 verbose(env, "%d:", env->insn_idx);
c5fc9692 7864 else
979d63d5
DB
7865 verbose(env, "\nfrom %d to %d%s:",
7866 env->prev_insn_idx, env->insn_idx,
7867 env->cur_state->speculative ?
7868 " (speculative execution)" : "");
f4d7e40a 7869 print_verifier_state(env, state->frame[state->curframe]);
17a52670
AS
7870 do_print_state = false;
7871 }
7872
06ee7115 7873 if (env->log.level & BPF_LOG_LEVEL) {
7105e828
DB
7874 const struct bpf_insn_cbs cbs = {
7875 .cb_print = verbose,
abe08840 7876 .private_data = env,
7105e828
DB
7877 };
7878
c08435ec
DB
7879 verbose_linfo(env, env->insn_idx, "; ");
7880 verbose(env, "%d: ", env->insn_idx);
abe08840 7881 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
17a52670
AS
7882 }
7883
cae1927c 7884 if (bpf_prog_is_dev_bound(env->prog->aux)) {
c08435ec
DB
7885 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
7886 env->prev_insn_idx);
cae1927c
JK
7887 if (err)
7888 return err;
7889 }
13a27dfc 7890
638f5b90 7891 regs = cur_regs(env);
51c39bb1 7892 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
b5dc0163 7893 prev_insn_idx = env->insn_idx;
fd978bf7 7894
17a52670 7895 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 7896 err = check_alu_op(env, insn);
17a52670
AS
7897 if (err)
7898 return err;
7899
7900 } else if (class == BPF_LDX) {
3df126f3 7901 enum bpf_reg_type *prev_src_type, src_reg_type;
9bac3d6d
AS
7902
7903 /* check for reserved fields is already done */
7904
17a52670 7905 /* check src operand */
dc503a8a 7906 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
7907 if (err)
7908 return err;
7909
dc503a8a 7910 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
7911 if (err)
7912 return err;
7913
725f9dcd
AS
7914 src_reg_type = regs[insn->src_reg].type;
7915
17a52670
AS
7916 /* check that memory (src_reg + off) is readable,
7917 * the state of dst_reg will be updated by this func
7918 */
c08435ec
DB
7919 err = check_mem_access(env, env->insn_idx, insn->src_reg,
7920 insn->off, BPF_SIZE(insn->code),
7921 BPF_READ, insn->dst_reg, false);
17a52670
AS
7922 if (err)
7923 return err;
7924
c08435ec 7925 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
7926
7927 if (*prev_src_type == NOT_INIT) {
9bac3d6d
AS
7928 /* saw a valid insn
7929 * dst_reg = *(u32 *)(src_reg + off)
3df126f3 7930 * save type to validate intersecting paths
9bac3d6d 7931 */
3df126f3 7932 *prev_src_type = src_reg_type;
9bac3d6d 7933
c64b7983 7934 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
9bac3d6d
AS
7935 /* ABuser program is trying to use the same insn
7936 * dst_reg = *(u32*) (src_reg + off)
7937 * with different pointer types:
7938 * src_reg == ctx in one branch and
7939 * src_reg == stack|map in some other branch.
7940 * Reject it.
7941 */
61bd5218 7942 verbose(env, "same insn cannot be used with different pointers\n");
9bac3d6d
AS
7943 return -EINVAL;
7944 }
7945
17a52670 7946 } else if (class == BPF_STX) {
3df126f3 7947 enum bpf_reg_type *prev_dst_type, dst_reg_type;
d691f9e8 7948
17a52670 7949 if (BPF_MODE(insn->code) == BPF_XADD) {
c08435ec 7950 err = check_xadd(env, env->insn_idx, insn);
17a52670
AS
7951 if (err)
7952 return err;
c08435ec 7953 env->insn_idx++;
17a52670
AS
7954 continue;
7955 }
7956
17a52670 7957 /* check src1 operand */
dc503a8a 7958 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
7959 if (err)
7960 return err;
7961 /* check src2 operand */
dc503a8a 7962 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
7963 if (err)
7964 return err;
7965
d691f9e8
AS
7966 dst_reg_type = regs[insn->dst_reg].type;
7967
17a52670 7968 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
7969 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7970 insn->off, BPF_SIZE(insn->code),
7971 BPF_WRITE, insn->src_reg, false);
17a52670
AS
7972 if (err)
7973 return err;
7974
c08435ec 7975 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
7976
7977 if (*prev_dst_type == NOT_INIT) {
7978 *prev_dst_type = dst_reg_type;
c64b7983 7979 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
61bd5218 7980 verbose(env, "same insn cannot be used with different pointers\n");
d691f9e8
AS
7981 return -EINVAL;
7982 }
7983
17a52670
AS
7984 } else if (class == BPF_ST) {
7985 if (BPF_MODE(insn->code) != BPF_MEM ||
7986 insn->src_reg != BPF_REG_0) {
61bd5218 7987 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
7988 return -EINVAL;
7989 }
7990 /* check src operand */
dc503a8a 7991 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
7992 if (err)
7993 return err;
7994
f37a8cb8 7995 if (is_ctx_reg(env, insn->dst_reg)) {
9d2be44a 7996 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
2a159c6f
DB
7997 insn->dst_reg,
7998 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
7999 return -EACCES;
8000 }
8001
17a52670 8002 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
8003 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
8004 insn->off, BPF_SIZE(insn->code),
8005 BPF_WRITE, -1, false);
17a52670
AS
8006 if (err)
8007 return err;
8008
092ed096 8009 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
8010 u8 opcode = BPF_OP(insn->code);
8011
2589726d 8012 env->jmps_processed++;
17a52670
AS
8013 if (opcode == BPF_CALL) {
8014 if (BPF_SRC(insn->code) != BPF_K ||
8015 insn->off != 0 ||
f4d7e40a
AS
8016 (insn->src_reg != BPF_REG_0 &&
8017 insn->src_reg != BPF_PSEUDO_CALL) ||
092ed096
JW
8018 insn->dst_reg != BPF_REG_0 ||
8019 class == BPF_JMP32) {
61bd5218 8020 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
8021 return -EINVAL;
8022 }
8023
d83525ca
AS
8024 if (env->cur_state->active_spin_lock &&
8025 (insn->src_reg == BPF_PSEUDO_CALL ||
8026 insn->imm != BPF_FUNC_spin_unlock)) {
8027 verbose(env, "function calls are not allowed while holding a lock\n");
8028 return -EINVAL;
8029 }
f4d7e40a 8030 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 8031 err = check_func_call(env, insn, &env->insn_idx);
f4d7e40a 8032 else
c08435ec 8033 err = check_helper_call(env, insn->imm, env->insn_idx);
17a52670
AS
8034 if (err)
8035 return err;
8036
8037 } else if (opcode == BPF_JA) {
8038 if (BPF_SRC(insn->code) != BPF_K ||
8039 insn->imm != 0 ||
8040 insn->src_reg != BPF_REG_0 ||
092ed096
JW
8041 insn->dst_reg != BPF_REG_0 ||
8042 class == BPF_JMP32) {
61bd5218 8043 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
8044 return -EINVAL;
8045 }
8046
c08435ec 8047 env->insn_idx += insn->off + 1;
17a52670
AS
8048 continue;
8049
8050 } else if (opcode == BPF_EXIT) {
8051 if (BPF_SRC(insn->code) != BPF_K ||
8052 insn->imm != 0 ||
8053 insn->src_reg != BPF_REG_0 ||
092ed096
JW
8054 insn->dst_reg != BPF_REG_0 ||
8055 class == BPF_JMP32) {
61bd5218 8056 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
8057 return -EINVAL;
8058 }
8059
d83525ca
AS
8060 if (env->cur_state->active_spin_lock) {
8061 verbose(env, "bpf_spin_unlock is missing\n");
8062 return -EINVAL;
8063 }
8064
f4d7e40a
AS
8065 if (state->curframe) {
8066 /* exit from nested function */
c08435ec 8067 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
8068 if (err)
8069 return err;
8070 do_print_state = true;
8071 continue;
8072 }
8073
fd978bf7
JS
8074 err = check_reference_leak(env);
8075 if (err)
8076 return err;
8077
390ee7e2
AS
8078 err = check_return_code(env);
8079 if (err)
8080 return err;
f1bca824 8081process_bpf_exit:
2589726d 8082 update_branch_counts(env, env->cur_state);
b5dc0163 8083 err = pop_stack(env, &prev_insn_idx,
c08435ec 8084 &env->insn_idx);
638f5b90
AS
8085 if (err < 0) {
8086 if (err != -ENOENT)
8087 return err;
17a52670
AS
8088 break;
8089 } else {
8090 do_print_state = true;
8091 continue;
8092 }
8093 } else {
c08435ec 8094 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
8095 if (err)
8096 return err;
8097 }
8098 } else if (class == BPF_LD) {
8099 u8 mode = BPF_MODE(insn->code);
8100
8101 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
8102 err = check_ld_abs(env, insn);
8103 if (err)
8104 return err;
8105
17a52670
AS
8106 } else if (mode == BPF_IMM) {
8107 err = check_ld_imm(env, insn);
8108 if (err)
8109 return err;
8110
c08435ec 8111 env->insn_idx++;
51c39bb1 8112 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
17a52670 8113 } else {
61bd5218 8114 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
8115 return -EINVAL;
8116 }
8117 } else {
61bd5218 8118 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
8119 return -EINVAL;
8120 }
8121
c08435ec 8122 env->insn_idx++;
17a52670
AS
8123 }
8124
8125 return 0;
8126}
8127
56f668df
MKL
8128static int check_map_prealloc(struct bpf_map *map)
8129{
8130 return (map->map_type != BPF_MAP_TYPE_HASH &&
bcc6b1b7
MKL
8131 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
8132 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
56f668df
MKL
8133 !(map->map_flags & BPF_F_NO_PREALLOC);
8134}
8135
d83525ca
AS
8136static bool is_tracing_prog_type(enum bpf_prog_type type)
8137{
8138 switch (type) {
8139 case BPF_PROG_TYPE_KPROBE:
8140 case BPF_PROG_TYPE_TRACEPOINT:
8141 case BPF_PROG_TYPE_PERF_EVENT:
8142 case BPF_PROG_TYPE_RAW_TRACEPOINT:
8143 return true;
8144 default:
8145 return false;
8146 }
8147}
8148
94dacdbd
TG
8149static bool is_preallocated_map(struct bpf_map *map)
8150{
8151 if (!check_map_prealloc(map))
8152 return false;
8153 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
8154 return false;
8155 return true;
8156}
8157
61bd5218
JK
8158static int check_map_prog_compatibility(struct bpf_verifier_env *env,
8159 struct bpf_map *map,
fdc15d38
AS
8160 struct bpf_prog *prog)
8161
8162{
94dacdbd
TG
8163 /*
8164 * Validate that trace type programs use preallocated hash maps.
8165 *
8166 * For programs attached to PERF events this is mandatory as the
8167 * perf NMI can hit any arbitrary code sequence.
8168 *
8169 * All other trace types using preallocated hash maps are unsafe as
8170 * well because tracepoint or kprobes can be inside locked regions
8171 * of the memory allocator or at a place where a recursion into the
8172 * memory allocator would see inconsistent state.
8173 *
2ed905c5
TG
8174 * On RT enabled kernels run-time allocation of all trace type
8175 * programs is strictly prohibited due to lock type constraints. On
8176 * !RT kernels it is allowed for backwards compatibility reasons for
8177 * now, but warnings are emitted so developers are made aware of
8178 * the unsafety and can fix their programs before this is enforced.
56f668df 8179 */
94dacdbd
TG
8180 if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) {
8181 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
61bd5218 8182 verbose(env, "perf_event programs can only use preallocated hash map\n");
56f668df
MKL
8183 return -EINVAL;
8184 }
2ed905c5
TG
8185 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
8186 verbose(env, "trace type programs can only use preallocated hash map\n");
8187 return -EINVAL;
8188 }
94dacdbd
TG
8189 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
8190 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
fdc15d38 8191 }
a3884572 8192
d83525ca
AS
8193 if ((is_tracing_prog_type(prog->type) ||
8194 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
8195 map_value_has_spin_lock(map)) {
8196 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
8197 return -EINVAL;
8198 }
8199
a3884572 8200 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
09728266 8201 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
8202 verbose(env, "offload device mismatch between prog and map\n");
8203 return -EINVAL;
8204 }
8205
85d33df3
MKL
8206 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
8207 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
8208 return -EINVAL;
8209 }
8210
fdc15d38
AS
8211 return 0;
8212}
8213
b741f163
RG
8214static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
8215{
8216 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
8217 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
8218}
8219
0246e64d
AS
8220/* look for pseudo eBPF instructions that access map FDs and
8221 * replace them with actual map pointers
8222 */
58e2af8b 8223static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
0246e64d
AS
8224{
8225 struct bpf_insn *insn = env->prog->insnsi;
8226 int insn_cnt = env->prog->len;
fdc15d38 8227 int i, j, err;
0246e64d 8228
f1f7714e 8229 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
8230 if (err)
8231 return err;
8232
0246e64d 8233 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 8234 if (BPF_CLASS(insn->code) == BPF_LDX &&
d691f9e8 8235 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
61bd5218 8236 verbose(env, "BPF_LDX uses reserved fields\n");
9bac3d6d
AS
8237 return -EINVAL;
8238 }
8239
d691f9e8
AS
8240 if (BPF_CLASS(insn->code) == BPF_STX &&
8241 ((BPF_MODE(insn->code) != BPF_MEM &&
8242 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
61bd5218 8243 verbose(env, "BPF_STX uses reserved fields\n");
d691f9e8
AS
8244 return -EINVAL;
8245 }
8246
0246e64d 8247 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 8248 struct bpf_insn_aux_data *aux;
0246e64d
AS
8249 struct bpf_map *map;
8250 struct fd f;
d8eca5bb 8251 u64 addr;
0246e64d
AS
8252
8253 if (i == insn_cnt - 1 || insn[1].code != 0 ||
8254 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
8255 insn[1].off != 0) {
61bd5218 8256 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
8257 return -EINVAL;
8258 }
8259
d8eca5bb 8260 if (insn[0].src_reg == 0)
0246e64d
AS
8261 /* valid generic load 64-bit imm */
8262 goto next_insn;
8263
d8eca5bb
DB
8264 /* In final convert_pseudo_ld_imm64() step, this is
8265 * converted into regular 64-bit imm load insn.
8266 */
8267 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
8268 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
8269 (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
8270 insn[1].imm != 0)) {
8271 verbose(env,
8272 "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
8273 return -EINVAL;
8274 }
8275
20182390 8276 f = fdget(insn[0].imm);
c2101297 8277 map = __bpf_map_get(f);
0246e64d 8278 if (IS_ERR(map)) {
61bd5218 8279 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 8280 insn[0].imm);
0246e64d
AS
8281 return PTR_ERR(map);
8282 }
8283
61bd5218 8284 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
8285 if (err) {
8286 fdput(f);
8287 return err;
8288 }
8289
d8eca5bb
DB
8290 aux = &env->insn_aux_data[i];
8291 if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
8292 addr = (unsigned long)map;
8293 } else {
8294 u32 off = insn[1].imm;
8295
8296 if (off >= BPF_MAX_VAR_OFF) {
8297 verbose(env, "direct value offset of %u is not allowed\n", off);
8298 fdput(f);
8299 return -EINVAL;
8300 }
8301
8302 if (!map->ops->map_direct_value_addr) {
8303 verbose(env, "no direct value access support for this map type\n");
8304 fdput(f);
8305 return -EINVAL;
8306 }
8307
8308 err = map->ops->map_direct_value_addr(map, &addr, off);
8309 if (err) {
8310 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
8311 map->value_size, off);
8312 fdput(f);
8313 return err;
8314 }
8315
8316 aux->map_off = off;
8317 addr += off;
8318 }
8319
8320 insn[0].imm = (u32)addr;
8321 insn[1].imm = addr >> 32;
0246e64d
AS
8322
8323 /* check whether we recorded this map already */
d8eca5bb 8324 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 8325 if (env->used_maps[j] == map) {
d8eca5bb 8326 aux->map_index = j;
0246e64d
AS
8327 fdput(f);
8328 goto next_insn;
8329 }
d8eca5bb 8330 }
0246e64d
AS
8331
8332 if (env->used_map_cnt >= MAX_USED_MAPS) {
8333 fdput(f);
8334 return -E2BIG;
8335 }
8336
0246e64d
AS
8337 /* hold the map. If the program is rejected by verifier,
8338 * the map will be released by release_maps() or it
8339 * will be used by the valid program until it's unloaded
ab7f5bf0 8340 * and all maps are released in free_used_maps()
0246e64d 8341 */
1e0bd5a0 8342 bpf_map_inc(map);
d8eca5bb
DB
8343
8344 aux->map_index = env->used_map_cnt;
92117d84
AS
8345 env->used_maps[env->used_map_cnt++] = map;
8346
b741f163 8347 if (bpf_map_is_cgroup_storage(map) &&
e4730423 8348 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 8349 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
8350 fdput(f);
8351 return -EBUSY;
8352 }
8353
0246e64d
AS
8354 fdput(f);
8355next_insn:
8356 insn++;
8357 i++;
5e581dad
DB
8358 continue;
8359 }
8360
8361 /* Basic sanity check before we invest more work here. */
8362 if (!bpf_opcode_in_insntable(insn->code)) {
8363 verbose(env, "unknown opcode %02x\n", insn->code);
8364 return -EINVAL;
0246e64d
AS
8365 }
8366 }
8367
8368 /* now all pseudo BPF_LD_IMM64 instructions load valid
8369 * 'struct bpf_map *' into a register instead of user map_fd.
8370 * These pointers will be used later by verifier to validate map access.
8371 */
8372 return 0;
8373}
8374
8375/* drop refcnt of maps used by the rejected program */
58e2af8b 8376static void release_maps(struct bpf_verifier_env *env)
0246e64d 8377{
a2ea0746
DB
8378 __bpf_free_used_maps(env->prog->aux, env->used_maps,
8379 env->used_map_cnt);
0246e64d
AS
8380}
8381
8382/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 8383static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
8384{
8385 struct bpf_insn *insn = env->prog->insnsi;
8386 int insn_cnt = env->prog->len;
8387 int i;
8388
8389 for (i = 0; i < insn_cnt; i++, insn++)
8390 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
8391 insn->src_reg = 0;
8392}
8393
8041902d
AS
8394/* single env->prog->insni[off] instruction was replaced with the range
8395 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
8396 * [0, off) and [off, end) to new locations, so the patched range stays zero
8397 */
b325fbca
JW
8398static int adjust_insn_aux_data(struct bpf_verifier_env *env,
8399 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d
AS
8400{
8401 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
b325fbca
JW
8402 struct bpf_insn *insn = new_prog->insnsi;
8403 u32 prog_len;
c131187d 8404 int i;
8041902d 8405
b325fbca
JW
8406 /* aux info at OFF always needs adjustment, no matter fast path
8407 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
8408 * original insn at old prog.
8409 */
8410 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
8411
8041902d
AS
8412 if (cnt == 1)
8413 return 0;
b325fbca 8414 prog_len = new_prog->len;
fad953ce
KC
8415 new_data = vzalloc(array_size(prog_len,
8416 sizeof(struct bpf_insn_aux_data)));
8041902d
AS
8417 if (!new_data)
8418 return -ENOMEM;
8419 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
8420 memcpy(new_data + off + cnt - 1, old_data + off,
8421 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 8422 for (i = off; i < off + cnt - 1; i++) {
51c39bb1 8423 new_data[i].seen = env->pass_cnt;
b325fbca
JW
8424 new_data[i].zext_dst = insn_has_def32(env, insn + i);
8425 }
8041902d
AS
8426 env->insn_aux_data = new_data;
8427 vfree(old_data);
8428 return 0;
8429}
8430
cc8b0b92
AS
8431static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
8432{
8433 int i;
8434
8435 if (len == 1)
8436 return;
4cb3d99c
JW
8437 /* NOTE: fake 'exit' subprog should be updated as well. */
8438 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 8439 if (env->subprog_info[i].start <= off)
cc8b0b92 8440 continue;
9c8105bd 8441 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
8442 }
8443}
8444
8041902d
AS
8445static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
8446 const struct bpf_insn *patch, u32 len)
8447{
8448 struct bpf_prog *new_prog;
8449
8450 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
8451 if (IS_ERR(new_prog)) {
8452 if (PTR_ERR(new_prog) == -ERANGE)
8453 verbose(env,
8454 "insn %d cannot be patched due to 16-bit range\n",
8455 env->insn_aux_data[off].orig_idx);
8041902d 8456 return NULL;
4f73379e 8457 }
b325fbca 8458 if (adjust_insn_aux_data(env, new_prog, off, len))
8041902d 8459 return NULL;
cc8b0b92 8460 adjust_subprog_starts(env, off, len);
8041902d
AS
8461 return new_prog;
8462}
8463
52875a04
JK
8464static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
8465 u32 off, u32 cnt)
8466{
8467 int i, j;
8468
8469 /* find first prog starting at or after off (first to remove) */
8470 for (i = 0; i < env->subprog_cnt; i++)
8471 if (env->subprog_info[i].start >= off)
8472 break;
8473 /* find first prog starting at or after off + cnt (first to stay) */
8474 for (j = i; j < env->subprog_cnt; j++)
8475 if (env->subprog_info[j].start >= off + cnt)
8476 break;
8477 /* if j doesn't start exactly at off + cnt, we are just removing
8478 * the front of previous prog
8479 */
8480 if (env->subprog_info[j].start != off + cnt)
8481 j--;
8482
8483 if (j > i) {
8484 struct bpf_prog_aux *aux = env->prog->aux;
8485 int move;
8486
8487 /* move fake 'exit' subprog as well */
8488 move = env->subprog_cnt + 1 - j;
8489
8490 memmove(env->subprog_info + i,
8491 env->subprog_info + j,
8492 sizeof(*env->subprog_info) * move);
8493 env->subprog_cnt -= j - i;
8494
8495 /* remove func_info */
8496 if (aux->func_info) {
8497 move = aux->func_info_cnt - j;
8498
8499 memmove(aux->func_info + i,
8500 aux->func_info + j,
8501 sizeof(*aux->func_info) * move);
8502 aux->func_info_cnt -= j - i;
8503 /* func_info->insn_off is set after all code rewrites,
8504 * in adjust_btf_func() - no need to adjust
8505 */
8506 }
8507 } else {
8508 /* convert i from "first prog to remove" to "first to adjust" */
8509 if (env->subprog_info[i].start == off)
8510 i++;
8511 }
8512
8513 /* update fake 'exit' subprog as well */
8514 for (; i <= env->subprog_cnt; i++)
8515 env->subprog_info[i].start -= cnt;
8516
8517 return 0;
8518}
8519
8520static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
8521 u32 cnt)
8522{
8523 struct bpf_prog *prog = env->prog;
8524 u32 i, l_off, l_cnt, nr_linfo;
8525 struct bpf_line_info *linfo;
8526
8527 nr_linfo = prog->aux->nr_linfo;
8528 if (!nr_linfo)
8529 return 0;
8530
8531 linfo = prog->aux->linfo;
8532
8533 /* find first line info to remove, count lines to be removed */
8534 for (i = 0; i < nr_linfo; i++)
8535 if (linfo[i].insn_off >= off)
8536 break;
8537
8538 l_off = i;
8539 l_cnt = 0;
8540 for (; i < nr_linfo; i++)
8541 if (linfo[i].insn_off < off + cnt)
8542 l_cnt++;
8543 else
8544 break;
8545
8546 /* First live insn doesn't match first live linfo, it needs to "inherit"
8547 * last removed linfo. prog is already modified, so prog->len == off
8548 * means no live instructions after (tail of the program was removed).
8549 */
8550 if (prog->len != off && l_cnt &&
8551 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
8552 l_cnt--;
8553 linfo[--i].insn_off = off + cnt;
8554 }
8555
8556 /* remove the line info which refer to the removed instructions */
8557 if (l_cnt) {
8558 memmove(linfo + l_off, linfo + i,
8559 sizeof(*linfo) * (nr_linfo - i));
8560
8561 prog->aux->nr_linfo -= l_cnt;
8562 nr_linfo = prog->aux->nr_linfo;
8563 }
8564
8565 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
8566 for (i = l_off; i < nr_linfo; i++)
8567 linfo[i].insn_off -= cnt;
8568
8569 /* fix up all subprogs (incl. 'exit') which start >= off */
8570 for (i = 0; i <= env->subprog_cnt; i++)
8571 if (env->subprog_info[i].linfo_idx > l_off) {
8572 /* program may have started in the removed region but
8573 * may not be fully removed
8574 */
8575 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
8576 env->subprog_info[i].linfo_idx -= l_cnt;
8577 else
8578 env->subprog_info[i].linfo_idx = l_off;
8579 }
8580
8581 return 0;
8582}
8583
8584static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
8585{
8586 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8587 unsigned int orig_prog_len = env->prog->len;
8588 int err;
8589
08ca90af
JK
8590 if (bpf_prog_is_dev_bound(env->prog->aux))
8591 bpf_prog_offload_remove_insns(env, off, cnt);
8592
52875a04
JK
8593 err = bpf_remove_insns(env->prog, off, cnt);
8594 if (err)
8595 return err;
8596
8597 err = adjust_subprog_starts_after_remove(env, off, cnt);
8598 if (err)
8599 return err;
8600
8601 err = bpf_adj_linfo_after_remove(env, off, cnt);
8602 if (err)
8603 return err;
8604
8605 memmove(aux_data + off, aux_data + off + cnt,
8606 sizeof(*aux_data) * (orig_prog_len - off - cnt));
8607
8608 return 0;
8609}
8610
2a5418a1
DB
8611/* The verifier does more data flow analysis than llvm and will not
8612 * explore branches that are dead at run time. Malicious programs can
8613 * have dead code too. Therefore replace all dead at-run-time code
8614 * with 'ja -1'.
8615 *
8616 * Just nops are not optimal, e.g. if they would sit at the end of the
8617 * program and through another bug we would manage to jump there, then
8618 * we'd execute beyond program memory otherwise. Returning exception
8619 * code also wouldn't work since we can have subprogs where the dead
8620 * code could be located.
c131187d
AS
8621 */
8622static void sanitize_dead_code(struct bpf_verifier_env *env)
8623{
8624 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 8625 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
8626 struct bpf_insn *insn = env->prog->insnsi;
8627 const int insn_cnt = env->prog->len;
8628 int i;
8629
8630 for (i = 0; i < insn_cnt; i++) {
8631 if (aux_data[i].seen)
8632 continue;
2a5418a1 8633 memcpy(insn + i, &trap, sizeof(trap));
c131187d
AS
8634 }
8635}
8636
e2ae4ca2
JK
8637static bool insn_is_cond_jump(u8 code)
8638{
8639 u8 op;
8640
092ed096
JW
8641 if (BPF_CLASS(code) == BPF_JMP32)
8642 return true;
8643
e2ae4ca2
JK
8644 if (BPF_CLASS(code) != BPF_JMP)
8645 return false;
8646
8647 op = BPF_OP(code);
8648 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
8649}
8650
8651static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
8652{
8653 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8654 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8655 struct bpf_insn *insn = env->prog->insnsi;
8656 const int insn_cnt = env->prog->len;
8657 int i;
8658
8659 for (i = 0; i < insn_cnt; i++, insn++) {
8660 if (!insn_is_cond_jump(insn->code))
8661 continue;
8662
8663 if (!aux_data[i + 1].seen)
8664 ja.off = insn->off;
8665 else if (!aux_data[i + 1 + insn->off].seen)
8666 ja.off = 0;
8667 else
8668 continue;
8669
08ca90af
JK
8670 if (bpf_prog_is_dev_bound(env->prog->aux))
8671 bpf_prog_offload_replace_insn(env, i, &ja);
8672
e2ae4ca2
JK
8673 memcpy(insn, &ja, sizeof(ja));
8674 }
8675}
8676
52875a04
JK
8677static int opt_remove_dead_code(struct bpf_verifier_env *env)
8678{
8679 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8680 int insn_cnt = env->prog->len;
8681 int i, err;
8682
8683 for (i = 0; i < insn_cnt; i++) {
8684 int j;
8685
8686 j = 0;
8687 while (i + j < insn_cnt && !aux_data[i + j].seen)
8688 j++;
8689 if (!j)
8690 continue;
8691
8692 err = verifier_remove_insns(env, i, j);
8693 if (err)
8694 return err;
8695 insn_cnt = env->prog->len;
8696 }
8697
8698 return 0;
8699}
8700
a1b14abc
JK
8701static int opt_remove_nops(struct bpf_verifier_env *env)
8702{
8703 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8704 struct bpf_insn *insn = env->prog->insnsi;
8705 int insn_cnt = env->prog->len;
8706 int i, err;
8707
8708 for (i = 0; i < insn_cnt; i++) {
8709 if (memcmp(&insn[i], &ja, sizeof(ja)))
8710 continue;
8711
8712 err = verifier_remove_insns(env, i, 1);
8713 if (err)
8714 return err;
8715 insn_cnt--;
8716 i--;
8717 }
8718
8719 return 0;
8720}
8721
d6c2308c
JW
8722static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
8723 const union bpf_attr *attr)
a4b1d3c1 8724{
d6c2308c 8725 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 8726 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 8727 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 8728 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 8729 struct bpf_prog *new_prog;
d6c2308c 8730 bool rnd_hi32;
a4b1d3c1 8731
d6c2308c 8732 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 8733 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
8734 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
8735 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
8736 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
8737 for (i = 0; i < len; i++) {
8738 int adj_idx = i + delta;
8739 struct bpf_insn insn;
8740
d6c2308c
JW
8741 insn = insns[adj_idx];
8742 if (!aux[adj_idx].zext_dst) {
8743 u8 code, class;
8744 u32 imm_rnd;
8745
8746 if (!rnd_hi32)
8747 continue;
8748
8749 code = insn.code;
8750 class = BPF_CLASS(code);
8751 if (insn_no_def(&insn))
8752 continue;
8753
8754 /* NOTE: arg "reg" (the fourth one) is only used for
8755 * BPF_STX which has been ruled out in above
8756 * check, it is safe to pass NULL here.
8757 */
8758 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
8759 if (class == BPF_LD &&
8760 BPF_MODE(code) == BPF_IMM)
8761 i++;
8762 continue;
8763 }
8764
8765 /* ctx load could be transformed into wider load. */
8766 if (class == BPF_LDX &&
8767 aux[adj_idx].ptr_type == PTR_TO_CTX)
8768 continue;
8769
8770 imm_rnd = get_random_int();
8771 rnd_hi32_patch[0] = insn;
8772 rnd_hi32_patch[1].imm = imm_rnd;
8773 rnd_hi32_patch[3].dst_reg = insn.dst_reg;
8774 patch = rnd_hi32_patch;
8775 patch_len = 4;
8776 goto apply_patch_buffer;
8777 }
8778
8779 if (!bpf_jit_needs_zext())
a4b1d3c1
JW
8780 continue;
8781
a4b1d3c1
JW
8782 zext_patch[0] = insn;
8783 zext_patch[1].dst_reg = insn.dst_reg;
8784 zext_patch[1].src_reg = insn.dst_reg;
d6c2308c
JW
8785 patch = zext_patch;
8786 patch_len = 2;
8787apply_patch_buffer:
8788 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
8789 if (!new_prog)
8790 return -ENOMEM;
8791 env->prog = new_prog;
8792 insns = new_prog->insnsi;
8793 aux = env->insn_aux_data;
d6c2308c 8794 delta += patch_len - 1;
a4b1d3c1
JW
8795 }
8796
8797 return 0;
8798}
8799
c64b7983
JS
8800/* convert load instructions that access fields of a context type into a
8801 * sequence of instructions that access fields of the underlying structure:
8802 * struct __sk_buff -> struct sk_buff
8803 * struct bpf_sock_ops -> struct sock
9bac3d6d 8804 */
58e2af8b 8805static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 8806{
00176a34 8807 const struct bpf_verifier_ops *ops = env->ops;
f96da094 8808 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 8809 const int insn_cnt = env->prog->len;
36bbef52 8810 struct bpf_insn insn_buf[16], *insn;
46f53a65 8811 u32 target_size, size_default, off;
9bac3d6d 8812 struct bpf_prog *new_prog;
d691f9e8 8813 enum bpf_access_type type;
f96da094 8814 bool is_narrower_load;
9bac3d6d 8815
b09928b9
DB
8816 if (ops->gen_prologue || env->seen_direct_write) {
8817 if (!ops->gen_prologue) {
8818 verbose(env, "bpf verifier is misconfigured\n");
8819 return -EINVAL;
8820 }
36bbef52
DB
8821 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
8822 env->prog);
8823 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 8824 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
8825 return -EINVAL;
8826 } else if (cnt) {
8041902d 8827 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
8828 if (!new_prog)
8829 return -ENOMEM;
8041902d 8830
36bbef52 8831 env->prog = new_prog;
3df126f3 8832 delta += cnt - 1;
36bbef52
DB
8833 }
8834 }
8835
c64b7983 8836 if (bpf_prog_is_dev_bound(env->prog->aux))
9bac3d6d
AS
8837 return 0;
8838
3df126f3 8839 insn = env->prog->insnsi + delta;
36bbef52 8840
9bac3d6d 8841 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983
JS
8842 bpf_convert_ctx_access_t convert_ctx_access;
8843
62c7989b
DB
8844 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
8845 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
8846 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
ea2e7ce5 8847 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
d691f9e8 8848 type = BPF_READ;
62c7989b
DB
8849 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
8850 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
8851 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
ea2e7ce5 8852 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
d691f9e8
AS
8853 type = BPF_WRITE;
8854 else
9bac3d6d
AS
8855 continue;
8856
af86ca4e
AS
8857 if (type == BPF_WRITE &&
8858 env->insn_aux_data[i + delta].sanitize_stack_off) {
8859 struct bpf_insn patch[] = {
8860 /* Sanitize suspicious stack slot with zero.
8861 * There are no memory dependencies for this store,
8862 * since it's only using frame pointer and immediate
8863 * constant of zero
8864 */
8865 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
8866 env->insn_aux_data[i + delta].sanitize_stack_off,
8867 0),
8868 /* the original STX instruction will immediately
8869 * overwrite the same stack slot with appropriate value
8870 */
8871 *insn,
8872 };
8873
8874 cnt = ARRAY_SIZE(patch);
8875 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
8876 if (!new_prog)
8877 return -ENOMEM;
8878
8879 delta += cnt - 1;
8880 env->prog = new_prog;
8881 insn = new_prog->insnsi + i + delta;
8882 continue;
8883 }
8884
c64b7983
JS
8885 switch (env->insn_aux_data[i + delta].ptr_type) {
8886 case PTR_TO_CTX:
8887 if (!ops->convert_ctx_access)
8888 continue;
8889 convert_ctx_access = ops->convert_ctx_access;
8890 break;
8891 case PTR_TO_SOCKET:
46f8bc92 8892 case PTR_TO_SOCK_COMMON:
c64b7983
JS
8893 convert_ctx_access = bpf_sock_convert_ctx_access;
8894 break;
655a51e5
MKL
8895 case PTR_TO_TCP_SOCK:
8896 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
8897 break;
fada7fdc
JL
8898 case PTR_TO_XDP_SOCK:
8899 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
8900 break;
2a02759e 8901 case PTR_TO_BTF_ID:
27ae7997
MKL
8902 if (type == BPF_READ) {
8903 insn->code = BPF_LDX | BPF_PROBE_MEM |
8904 BPF_SIZE((insn)->code);
8905 env->prog->aux->num_exentries++;
8906 } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
2a02759e
AS
8907 verbose(env, "Writes through BTF pointers are not allowed\n");
8908 return -EINVAL;
8909 }
2a02759e 8910 continue;
c64b7983 8911 default:
9bac3d6d 8912 continue;
c64b7983 8913 }
9bac3d6d 8914
31fd8581 8915 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 8916 size = BPF_LDST_BYTES(insn);
31fd8581
YS
8917
8918 /* If the read access is a narrower load of the field,
8919 * convert to a 4/8-byte load, to minimum program type specific
8920 * convert_ctx_access changes. If conversion is successful,
8921 * we will apply proper mask to the result.
8922 */
f96da094 8923 is_narrower_load = size < ctx_field_size;
46f53a65
AI
8924 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
8925 off = insn->off;
31fd8581 8926 if (is_narrower_load) {
f96da094
DB
8927 u8 size_code;
8928
8929 if (type == BPF_WRITE) {
61bd5218 8930 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
8931 return -EINVAL;
8932 }
31fd8581 8933
f96da094 8934 size_code = BPF_H;
31fd8581
YS
8935 if (ctx_field_size == 4)
8936 size_code = BPF_W;
8937 else if (ctx_field_size == 8)
8938 size_code = BPF_DW;
f96da094 8939
bc23105c 8940 insn->off = off & ~(size_default - 1);
31fd8581
YS
8941 insn->code = BPF_LDX | BPF_MEM | size_code;
8942 }
f96da094
DB
8943
8944 target_size = 0;
c64b7983
JS
8945 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
8946 &target_size);
f96da094
DB
8947 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
8948 (ctx_field_size && !target_size)) {
61bd5218 8949 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
8950 return -EINVAL;
8951 }
f96da094
DB
8952
8953 if (is_narrower_load && size < target_size) {
d895a0f1
IL
8954 u8 shift = bpf_ctx_narrow_access_offset(
8955 off, size, size_default) * 8;
46f53a65
AI
8956 if (ctx_field_size <= 4) {
8957 if (shift)
8958 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
8959 insn->dst_reg,
8960 shift);
31fd8581 8961 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 8962 (1 << size * 8) - 1);
46f53a65
AI
8963 } else {
8964 if (shift)
8965 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
8966 insn->dst_reg,
8967 shift);
31fd8581 8968 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 8969 (1ULL << size * 8) - 1);
46f53a65 8970 }
31fd8581 8971 }
9bac3d6d 8972
8041902d 8973 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
8974 if (!new_prog)
8975 return -ENOMEM;
8976
3df126f3 8977 delta += cnt - 1;
9bac3d6d
AS
8978
8979 /* keep walking new program and skip insns we just inserted */
8980 env->prog = new_prog;
3df126f3 8981 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
8982 }
8983
8984 return 0;
8985}
8986
1c2a088a
AS
8987static int jit_subprogs(struct bpf_verifier_env *env)
8988{
8989 struct bpf_prog *prog = env->prog, **func, *tmp;
8990 int i, j, subprog_start, subprog_end = 0, len, subprog;
7105e828 8991 struct bpf_insn *insn;
1c2a088a 8992 void *old_bpf_func;
c454a46b 8993 int err;
1c2a088a 8994
f910cefa 8995 if (env->subprog_cnt <= 1)
1c2a088a
AS
8996 return 0;
8997
7105e828 8998 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
1c2a088a
AS
8999 if (insn->code != (BPF_JMP | BPF_CALL) ||
9000 insn->src_reg != BPF_PSEUDO_CALL)
9001 continue;
c7a89784
DB
9002 /* Upon error here we cannot fall back to interpreter but
9003 * need a hard reject of the program. Thus -EFAULT is
9004 * propagated in any case.
9005 */
1c2a088a
AS
9006 subprog = find_subprog(env, i + insn->imm + 1);
9007 if (subprog < 0) {
9008 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
9009 i + insn->imm + 1);
9010 return -EFAULT;
9011 }
9012 /* temporarily remember subprog id inside insn instead of
9013 * aux_data, since next loop will split up all insns into funcs
9014 */
f910cefa 9015 insn->off = subprog;
1c2a088a
AS
9016 /* remember original imm in case JIT fails and fallback
9017 * to interpreter will be needed
9018 */
9019 env->insn_aux_data[i].call_imm = insn->imm;
9020 /* point imm to __bpf_call_base+1 from JITs point of view */
9021 insn->imm = 1;
9022 }
9023
c454a46b
MKL
9024 err = bpf_prog_alloc_jited_linfo(prog);
9025 if (err)
9026 goto out_undo_insn;
9027
9028 err = -ENOMEM;
6396bb22 9029 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 9030 if (!func)
c7a89784 9031 goto out_undo_insn;
1c2a088a 9032
f910cefa 9033 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 9034 subprog_start = subprog_end;
4cb3d99c 9035 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
9036
9037 len = subprog_end - subprog_start;
492ecee8
AS
9038 /* BPF_PROG_RUN doesn't call subprogs directly,
9039 * hence main prog stats include the runtime of subprogs.
9040 * subprogs don't have IDs and not reachable via prog_get_next_id
9041 * func[i]->aux->stats will never be accessed and stays NULL
9042 */
9043 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
9044 if (!func[i])
9045 goto out_free;
9046 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
9047 len * sizeof(struct bpf_insn));
4f74d809 9048 func[i]->type = prog->type;
1c2a088a 9049 func[i]->len = len;
4f74d809
DB
9050 if (bpf_prog_calc_tag(func[i]))
9051 goto out_free;
1c2a088a 9052 func[i]->is_func = 1;
ba64e7d8
YS
9053 func[i]->aux->func_idx = i;
9054 /* the btf and func_info will be freed only at prog->aux */
9055 func[i]->aux->btf = prog->aux->btf;
9056 func[i]->aux->func_info = prog->aux->func_info;
9057
1c2a088a
AS
9058 /* Use bpf_prog_F_tag to indicate functions in stack traces.
9059 * Long term would need debug info to populate names
9060 */
9061 func[i]->aux->name[0] = 'F';
9c8105bd 9062 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 9063 func[i]->jit_requested = 1;
c454a46b
MKL
9064 func[i]->aux->linfo = prog->aux->linfo;
9065 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
9066 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
9067 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
1c2a088a
AS
9068 func[i] = bpf_int_jit_compile(func[i]);
9069 if (!func[i]->jited) {
9070 err = -ENOTSUPP;
9071 goto out_free;
9072 }
9073 cond_resched();
9074 }
9075 /* at this point all bpf functions were successfully JITed
9076 * now populate all bpf_calls with correct addresses and
9077 * run last pass of JIT
9078 */
f910cefa 9079 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9080 insn = func[i]->insnsi;
9081 for (j = 0; j < func[i]->len; j++, insn++) {
9082 if (insn->code != (BPF_JMP | BPF_CALL) ||
9083 insn->src_reg != BPF_PSEUDO_CALL)
9084 continue;
9085 subprog = insn->off;
0d306c31
PB
9086 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
9087 __bpf_call_base;
1c2a088a 9088 }
2162fed4
SD
9089
9090 /* we use the aux data to keep a list of the start addresses
9091 * of the JITed images for each function in the program
9092 *
9093 * for some architectures, such as powerpc64, the imm field
9094 * might not be large enough to hold the offset of the start
9095 * address of the callee's JITed image from __bpf_call_base
9096 *
9097 * in such cases, we can lookup the start address of a callee
9098 * by using its subprog id, available from the off field of
9099 * the call instruction, as an index for this list
9100 */
9101 func[i]->aux->func = func;
9102 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 9103 }
f910cefa 9104 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9105 old_bpf_func = func[i]->bpf_func;
9106 tmp = bpf_int_jit_compile(func[i]);
9107 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
9108 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 9109 err = -ENOTSUPP;
1c2a088a
AS
9110 goto out_free;
9111 }
9112 cond_resched();
9113 }
9114
9115 /* finally lock prog and jit images for all functions and
9116 * populate kallsysm
9117 */
f910cefa 9118 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9119 bpf_prog_lock_ro(func[i]);
9120 bpf_prog_kallsyms_add(func[i]);
9121 }
7105e828
DB
9122
9123 /* Last step: make now unused interpreter insns from main
9124 * prog consistent for later dump requests, so they can
9125 * later look the same as if they were interpreted only.
9126 */
9127 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
7105e828
DB
9128 if (insn->code != (BPF_JMP | BPF_CALL) ||
9129 insn->src_reg != BPF_PSEUDO_CALL)
9130 continue;
9131 insn->off = env->insn_aux_data[i].call_imm;
9132 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 9133 insn->imm = subprog;
7105e828
DB
9134 }
9135
1c2a088a
AS
9136 prog->jited = 1;
9137 prog->bpf_func = func[0]->bpf_func;
9138 prog->aux->func = func;
f910cefa 9139 prog->aux->func_cnt = env->subprog_cnt;
c454a46b 9140 bpf_prog_free_unused_jited_linfo(prog);
1c2a088a
AS
9141 return 0;
9142out_free:
f910cefa 9143 for (i = 0; i < env->subprog_cnt; i++)
1c2a088a
AS
9144 if (func[i])
9145 bpf_jit_free(func[i]);
9146 kfree(func);
c7a89784 9147out_undo_insn:
1c2a088a
AS
9148 /* cleanup main prog to be interpreted */
9149 prog->jit_requested = 0;
9150 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
9151 if (insn->code != (BPF_JMP | BPF_CALL) ||
9152 insn->src_reg != BPF_PSEUDO_CALL)
9153 continue;
9154 insn->off = 0;
9155 insn->imm = env->insn_aux_data[i].call_imm;
9156 }
c454a46b 9157 bpf_prog_free_jited_linfo(prog);
1c2a088a
AS
9158 return err;
9159}
9160
1ea47e01
AS
9161static int fixup_call_args(struct bpf_verifier_env *env)
9162{
19d28fbd 9163#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9164 struct bpf_prog *prog = env->prog;
9165 struct bpf_insn *insn = prog->insnsi;
9166 int i, depth;
19d28fbd 9167#endif
e4052d06 9168 int err = 0;
1ea47e01 9169
e4052d06
QM
9170 if (env->prog->jit_requested &&
9171 !bpf_prog_is_dev_bound(env->prog->aux)) {
19d28fbd
DM
9172 err = jit_subprogs(env);
9173 if (err == 0)
1c2a088a 9174 return 0;
c7a89784
DB
9175 if (err == -EFAULT)
9176 return err;
19d28fbd
DM
9177 }
9178#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9179 for (i = 0; i < prog->len; i++, insn++) {
9180 if (insn->code != (BPF_JMP | BPF_CALL) ||
9181 insn->src_reg != BPF_PSEUDO_CALL)
9182 continue;
9183 depth = get_callee_stack_depth(env, insn, i);
9184 if (depth < 0)
9185 return depth;
9186 bpf_patch_call_args(insn, depth);
9187 }
19d28fbd
DM
9188 err = 0;
9189#endif
9190 return err;
1ea47e01
AS
9191}
9192
79741b3b 9193/* fixup insn->imm field of bpf_call instructions
81ed18ab 9194 * and inline eligible helpers as explicit sequence of BPF instructions
e245c5c6
AS
9195 *
9196 * this function is called after eBPF program passed verification
9197 */
79741b3b 9198static int fixup_bpf_calls(struct bpf_verifier_env *env)
e245c5c6 9199{
79741b3b 9200 struct bpf_prog *prog = env->prog;
d2e4c1e6 9201 bool expect_blinding = bpf_jit_blinding_enabled(prog);
79741b3b 9202 struct bpf_insn *insn = prog->insnsi;
e245c5c6 9203 const struct bpf_func_proto *fn;
79741b3b 9204 const int insn_cnt = prog->len;
09772d92 9205 const struct bpf_map_ops *ops;
c93552c4 9206 struct bpf_insn_aux_data *aux;
81ed18ab
AS
9207 struct bpf_insn insn_buf[16];
9208 struct bpf_prog *new_prog;
9209 struct bpf_map *map_ptr;
d2e4c1e6 9210 int i, ret, cnt, delta = 0;
e245c5c6 9211
79741b3b 9212 for (i = 0; i < insn_cnt; i++, insn++) {
f6b1b3bf
DB
9213 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
9214 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9215 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 9216 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf
DB
9217 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
9218 struct bpf_insn mask_and_div[] = {
9219 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9220 /* Rx div 0 -> 0 */
9221 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
9222 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
9223 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9224 *insn,
9225 };
9226 struct bpf_insn mask_and_mod[] = {
9227 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9228 /* Rx mod 0 -> Rx */
9229 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
9230 *insn,
9231 };
9232 struct bpf_insn *patchlet;
9233
9234 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9235 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
9236 patchlet = mask_and_div + (is64 ? 1 : 0);
9237 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
9238 } else {
9239 patchlet = mask_and_mod + (is64 ? 1 : 0);
9240 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
9241 }
9242
9243 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
9244 if (!new_prog)
9245 return -ENOMEM;
9246
9247 delta += cnt - 1;
9248 env->prog = prog = new_prog;
9249 insn = new_prog->insnsi + i + delta;
9250 continue;
9251 }
9252
e0cea7ce
DB
9253 if (BPF_CLASS(insn->code) == BPF_LD &&
9254 (BPF_MODE(insn->code) == BPF_ABS ||
9255 BPF_MODE(insn->code) == BPF_IND)) {
9256 cnt = env->ops->gen_ld_abs(insn, insn_buf);
9257 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9258 verbose(env, "bpf verifier is misconfigured\n");
9259 return -EINVAL;
9260 }
9261
9262 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9263 if (!new_prog)
9264 return -ENOMEM;
9265
9266 delta += cnt - 1;
9267 env->prog = prog = new_prog;
9268 insn = new_prog->insnsi + i + delta;
9269 continue;
9270 }
9271
979d63d5
DB
9272 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
9273 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
9274 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
9275 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
9276 struct bpf_insn insn_buf[16];
9277 struct bpf_insn *patch = &insn_buf[0];
9278 bool issrc, isneg;
9279 u32 off_reg;
9280
9281 aux = &env->insn_aux_data[i + delta];
3612af78
DB
9282 if (!aux->alu_state ||
9283 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
9284 continue;
9285
9286 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
9287 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
9288 BPF_ALU_SANITIZE_SRC;
9289
9290 off_reg = issrc ? insn->src_reg : insn->dst_reg;
9291 if (isneg)
9292 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9293 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
9294 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
9295 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
9296 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
9297 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
9298 if (issrc) {
9299 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
9300 off_reg);
9301 insn->src_reg = BPF_REG_AX;
9302 } else {
9303 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
9304 BPF_REG_AX);
9305 }
9306 if (isneg)
9307 insn->code = insn->code == code_add ?
9308 code_sub : code_add;
9309 *patch++ = *insn;
9310 if (issrc && isneg)
9311 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9312 cnt = patch - insn_buf;
9313
9314 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9315 if (!new_prog)
9316 return -ENOMEM;
9317
9318 delta += cnt - 1;
9319 env->prog = prog = new_prog;
9320 insn = new_prog->insnsi + i + delta;
9321 continue;
9322 }
9323
79741b3b
AS
9324 if (insn->code != (BPF_JMP | BPF_CALL))
9325 continue;
cc8b0b92
AS
9326 if (insn->src_reg == BPF_PSEUDO_CALL)
9327 continue;
e245c5c6 9328
79741b3b
AS
9329 if (insn->imm == BPF_FUNC_get_route_realm)
9330 prog->dst_needed = 1;
9331 if (insn->imm == BPF_FUNC_get_prandom_u32)
9332 bpf_user_rnd_init_once();
9802d865
JB
9333 if (insn->imm == BPF_FUNC_override_return)
9334 prog->kprobe_override = 1;
79741b3b 9335 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
9336 /* If we tail call into other programs, we
9337 * cannot make any assumptions since they can
9338 * be replaced dynamically during runtime in
9339 * the program array.
9340 */
9341 prog->cb_access = 1;
80a58d02 9342 env->prog->aux->stack_depth = MAX_BPF_STACK;
e647815a 9343 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 9344
79741b3b
AS
9345 /* mark bpf_tail_call as different opcode to avoid
9346 * conditional branch in the interpeter for every normal
9347 * call and to prevent accidental JITing by JIT compiler
9348 * that doesn't support bpf_tail_call yet
e245c5c6 9349 */
79741b3b 9350 insn->imm = 0;
71189fa9 9351 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 9352
c93552c4 9353 aux = &env->insn_aux_data[i + delta];
cc52d914
DB
9354 if (env->allow_ptr_leaks && !expect_blinding &&
9355 prog->jit_requested &&
d2e4c1e6
DB
9356 !bpf_map_key_poisoned(aux) &&
9357 !bpf_map_ptr_poisoned(aux) &&
9358 !bpf_map_ptr_unpriv(aux)) {
9359 struct bpf_jit_poke_descriptor desc = {
9360 .reason = BPF_POKE_REASON_TAIL_CALL,
9361 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
9362 .tail_call.key = bpf_map_key_immediate(aux),
9363 };
9364
9365 ret = bpf_jit_add_poke_descriptor(prog, &desc);
9366 if (ret < 0) {
9367 verbose(env, "adding tail call poke descriptor failed\n");
9368 return ret;
9369 }
9370
9371 insn->imm = ret + 1;
9372 continue;
9373 }
9374
c93552c4
DB
9375 if (!bpf_map_ptr_unpriv(aux))
9376 continue;
9377
b2157399
AS
9378 /* instead of changing every JIT dealing with tail_call
9379 * emit two extra insns:
9380 * if (index >= max_entries) goto out;
9381 * index &= array->index_mask;
9382 * to avoid out-of-bounds cpu speculation
9383 */
c93552c4 9384 if (bpf_map_ptr_poisoned(aux)) {
40950343 9385 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
9386 return -EINVAL;
9387 }
c93552c4 9388
d2e4c1e6 9389 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
9390 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
9391 map_ptr->max_entries, 2);
9392 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
9393 container_of(map_ptr,
9394 struct bpf_array,
9395 map)->index_mask);
9396 insn_buf[2] = *insn;
9397 cnt = 3;
9398 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9399 if (!new_prog)
9400 return -ENOMEM;
9401
9402 delta += cnt - 1;
9403 env->prog = prog = new_prog;
9404 insn = new_prog->insnsi + i + delta;
79741b3b
AS
9405 continue;
9406 }
e245c5c6 9407
89c63074 9408 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
9409 * and other inlining handlers are currently limited to 64 bit
9410 * only.
89c63074 9411 */
60b58afc 9412 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
9413 (insn->imm == BPF_FUNC_map_lookup_elem ||
9414 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
9415 insn->imm == BPF_FUNC_map_delete_elem ||
9416 insn->imm == BPF_FUNC_map_push_elem ||
9417 insn->imm == BPF_FUNC_map_pop_elem ||
9418 insn->imm == BPF_FUNC_map_peek_elem)) {
c93552c4
DB
9419 aux = &env->insn_aux_data[i + delta];
9420 if (bpf_map_ptr_poisoned(aux))
9421 goto patch_call_imm;
9422
d2e4c1e6 9423 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
9424 ops = map_ptr->ops;
9425 if (insn->imm == BPF_FUNC_map_lookup_elem &&
9426 ops->map_gen_lookup) {
9427 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
9428 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9429 verbose(env, "bpf verifier is misconfigured\n");
9430 return -EINVAL;
9431 }
81ed18ab 9432
09772d92
DB
9433 new_prog = bpf_patch_insn_data(env, i + delta,
9434 insn_buf, cnt);
9435 if (!new_prog)
9436 return -ENOMEM;
81ed18ab 9437
09772d92
DB
9438 delta += cnt - 1;
9439 env->prog = prog = new_prog;
9440 insn = new_prog->insnsi + i + delta;
9441 continue;
9442 }
81ed18ab 9443
09772d92
DB
9444 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
9445 (void *(*)(struct bpf_map *map, void *key))NULL));
9446 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
9447 (int (*)(struct bpf_map *map, void *key))NULL));
9448 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
9449 (int (*)(struct bpf_map *map, void *key, void *value,
9450 u64 flags))NULL));
84430d42
DB
9451 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
9452 (int (*)(struct bpf_map *map, void *value,
9453 u64 flags))NULL));
9454 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
9455 (int (*)(struct bpf_map *map, void *value))NULL));
9456 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
9457 (int (*)(struct bpf_map *map, void *value))NULL));
9458
09772d92
DB
9459 switch (insn->imm) {
9460 case BPF_FUNC_map_lookup_elem:
9461 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
9462 __bpf_call_base;
9463 continue;
9464 case BPF_FUNC_map_update_elem:
9465 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
9466 __bpf_call_base;
9467 continue;
9468 case BPF_FUNC_map_delete_elem:
9469 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
9470 __bpf_call_base;
9471 continue;
84430d42
DB
9472 case BPF_FUNC_map_push_elem:
9473 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
9474 __bpf_call_base;
9475 continue;
9476 case BPF_FUNC_map_pop_elem:
9477 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
9478 __bpf_call_base;
9479 continue;
9480 case BPF_FUNC_map_peek_elem:
9481 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
9482 __bpf_call_base;
9483 continue;
09772d92 9484 }
81ed18ab 9485
09772d92 9486 goto patch_call_imm;
81ed18ab
AS
9487 }
9488
5576b991
MKL
9489 if (prog->jit_requested && BITS_PER_LONG == 64 &&
9490 insn->imm == BPF_FUNC_jiffies64) {
9491 struct bpf_insn ld_jiffies_addr[2] = {
9492 BPF_LD_IMM64(BPF_REG_0,
9493 (unsigned long)&jiffies),
9494 };
9495
9496 insn_buf[0] = ld_jiffies_addr[0];
9497 insn_buf[1] = ld_jiffies_addr[1];
9498 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
9499 BPF_REG_0, 0);
9500 cnt = 3;
9501
9502 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
9503 cnt);
9504 if (!new_prog)
9505 return -ENOMEM;
9506
9507 delta += cnt - 1;
9508 env->prog = prog = new_prog;
9509 insn = new_prog->insnsi + i + delta;
9510 continue;
9511 }
9512
81ed18ab 9513patch_call_imm:
5e43f899 9514 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
9515 /* all functions that have prototype and verifier allowed
9516 * programs to call them, must be real in-kernel functions
9517 */
9518 if (!fn->func) {
61bd5218
JK
9519 verbose(env,
9520 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
9521 func_id_name(insn->imm), insn->imm);
9522 return -EFAULT;
e245c5c6 9523 }
79741b3b 9524 insn->imm = fn->func - __bpf_call_base;
e245c5c6 9525 }
e245c5c6 9526
d2e4c1e6
DB
9527 /* Since poke tab is now finalized, publish aux to tracker. */
9528 for (i = 0; i < prog->aux->size_poke_tab; i++) {
9529 map_ptr = prog->aux->poke_tab[i].tail_call.map;
9530 if (!map_ptr->ops->map_poke_track ||
9531 !map_ptr->ops->map_poke_untrack ||
9532 !map_ptr->ops->map_poke_run) {
9533 verbose(env, "bpf verifier is misconfigured\n");
9534 return -EINVAL;
9535 }
9536
9537 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
9538 if (ret < 0) {
9539 verbose(env, "tracking tail call prog failed\n");
9540 return ret;
9541 }
9542 }
9543
79741b3b
AS
9544 return 0;
9545}
e245c5c6 9546
58e2af8b 9547static void free_states(struct bpf_verifier_env *env)
f1bca824 9548{
58e2af8b 9549 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
9550 int i;
9551
9f4686c4
AS
9552 sl = env->free_list;
9553 while (sl) {
9554 sln = sl->next;
9555 free_verifier_state(&sl->state, false);
9556 kfree(sl);
9557 sl = sln;
9558 }
51c39bb1 9559 env->free_list = NULL;
9f4686c4 9560
f1bca824
AS
9561 if (!env->explored_states)
9562 return;
9563
dc2a4ebc 9564 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
9565 sl = env->explored_states[i];
9566
a8f500af
AS
9567 while (sl) {
9568 sln = sl->next;
9569 free_verifier_state(&sl->state, false);
9570 kfree(sl);
9571 sl = sln;
9572 }
51c39bb1 9573 env->explored_states[i] = NULL;
f1bca824 9574 }
51c39bb1 9575}
f1bca824 9576
51c39bb1
AS
9577/* The verifier is using insn_aux_data[] to store temporary data during
9578 * verification and to store information for passes that run after the
9579 * verification like dead code sanitization. do_check_common() for subprogram N
9580 * may analyze many other subprograms. sanitize_insn_aux_data() clears all
9581 * temporary data after do_check_common() finds that subprogram N cannot be
9582 * verified independently. pass_cnt counts the number of times
9583 * do_check_common() was run and insn->aux->seen tells the pass number
9584 * insn_aux_data was touched. These variables are compared to clear temporary
9585 * data from failed pass. For testing and experiments do_check_common() can be
9586 * run multiple times even when prior attempt to verify is unsuccessful.
9587 */
9588static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
9589{
9590 struct bpf_insn *insn = env->prog->insnsi;
9591 struct bpf_insn_aux_data *aux;
9592 int i, class;
9593
9594 for (i = 0; i < env->prog->len; i++) {
9595 class = BPF_CLASS(insn[i].code);
9596 if (class != BPF_LDX && class != BPF_STX)
9597 continue;
9598 aux = &env->insn_aux_data[i];
9599 if (aux->seen != env->pass_cnt)
9600 continue;
9601 memset(aux, 0, offsetof(typeof(*aux), orig_idx));
9602 }
f1bca824
AS
9603}
9604
51c39bb1
AS
9605static int do_check_common(struct bpf_verifier_env *env, int subprog)
9606{
9607 struct bpf_verifier_state *state;
9608 struct bpf_reg_state *regs;
9609 int ret, i;
9610
9611 env->prev_linfo = NULL;
9612 env->pass_cnt++;
9613
9614 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
9615 if (!state)
9616 return -ENOMEM;
9617 state->curframe = 0;
9618 state->speculative = false;
9619 state->branches = 1;
9620 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
9621 if (!state->frame[0]) {
9622 kfree(state);
9623 return -ENOMEM;
9624 }
9625 env->cur_state = state;
9626 init_func_state(env, state->frame[0],
9627 BPF_MAIN_FUNC /* callsite */,
9628 0 /* frameno */,
9629 subprog);
9630
9631 regs = state->frame[state->curframe]->regs;
be8704ff 9632 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
51c39bb1
AS
9633 ret = btf_prepare_func_args(env, subprog, regs);
9634 if (ret)
9635 goto out;
9636 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
9637 if (regs[i].type == PTR_TO_CTX)
9638 mark_reg_known_zero(env, regs, i);
9639 else if (regs[i].type == SCALAR_VALUE)
9640 mark_reg_unknown(env, regs, i);
9641 }
9642 } else {
9643 /* 1st arg to a function */
9644 regs[BPF_REG_1].type = PTR_TO_CTX;
9645 mark_reg_known_zero(env, regs, BPF_REG_1);
9646 ret = btf_check_func_arg_match(env, subprog, regs);
9647 if (ret == -EFAULT)
9648 /* unlikely verifier bug. abort.
9649 * ret == 0 and ret < 0 are sadly acceptable for
9650 * main() function due to backward compatibility.
9651 * Like socket filter program may be written as:
9652 * int bpf_prog(struct pt_regs *ctx)
9653 * and never dereference that ctx in the program.
9654 * 'struct pt_regs' is a type mismatch for socket
9655 * filter that should be using 'struct __sk_buff'.
9656 */
9657 goto out;
9658 }
9659
9660 ret = do_check(env);
9661out:
f59bbfc2
AS
9662 /* check for NULL is necessary, since cur_state can be freed inside
9663 * do_check() under memory pressure.
9664 */
9665 if (env->cur_state) {
9666 free_verifier_state(env->cur_state, true);
9667 env->cur_state = NULL;
9668 }
51c39bb1
AS
9669 while (!pop_stack(env, NULL, NULL));
9670 free_states(env);
9671 if (ret)
9672 /* clean aux data in case subprog was rejected */
9673 sanitize_insn_aux_data(env);
9674 return ret;
9675}
9676
9677/* Verify all global functions in a BPF program one by one based on their BTF.
9678 * All global functions must pass verification. Otherwise the whole program is rejected.
9679 * Consider:
9680 * int bar(int);
9681 * int foo(int f)
9682 * {
9683 * return bar(f);
9684 * }
9685 * int bar(int b)
9686 * {
9687 * ...
9688 * }
9689 * foo() will be verified first for R1=any_scalar_value. During verification it
9690 * will be assumed that bar() already verified successfully and call to bar()
9691 * from foo() will be checked for type match only. Later bar() will be verified
9692 * independently to check that it's safe for R1=any_scalar_value.
9693 */
9694static int do_check_subprogs(struct bpf_verifier_env *env)
9695{
9696 struct bpf_prog_aux *aux = env->prog->aux;
9697 int i, ret;
9698
9699 if (!aux->func_info)
9700 return 0;
9701
9702 for (i = 1; i < env->subprog_cnt; i++) {
9703 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
9704 continue;
9705 env->insn_idx = env->subprog_info[i].start;
9706 WARN_ON_ONCE(env->insn_idx == 0);
9707 ret = do_check_common(env, i);
9708 if (ret) {
9709 return ret;
9710 } else if (env->log.level & BPF_LOG_LEVEL) {
9711 verbose(env,
9712 "Func#%d is safe for any args that match its prototype\n",
9713 i);
9714 }
9715 }
9716 return 0;
9717}
9718
9719static int do_check_main(struct bpf_verifier_env *env)
9720{
9721 int ret;
9722
9723 env->insn_idx = 0;
9724 ret = do_check_common(env, 0);
9725 if (!ret)
9726 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
9727 return ret;
9728}
9729
9730
06ee7115
AS
9731static void print_verification_stats(struct bpf_verifier_env *env)
9732{
9733 int i;
9734
9735 if (env->log.level & BPF_LOG_STATS) {
9736 verbose(env, "verification time %lld usec\n",
9737 div_u64(env->verification_time, 1000));
9738 verbose(env, "stack depth ");
9739 for (i = 0; i < env->subprog_cnt; i++) {
9740 u32 depth = env->subprog_info[i].stack_depth;
9741
9742 verbose(env, "%d", depth);
9743 if (i + 1 < env->subprog_cnt)
9744 verbose(env, "+");
9745 }
9746 verbose(env, "\n");
9747 }
9748 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
9749 "total_states %d peak_states %d mark_read %d\n",
9750 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
9751 env->max_states_per_insn, env->total_states,
9752 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
9753}
9754
27ae7997
MKL
9755static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
9756{
9757 const struct btf_type *t, *func_proto;
9758 const struct bpf_struct_ops *st_ops;
9759 const struct btf_member *member;
9760 struct bpf_prog *prog = env->prog;
9761 u32 btf_id, member_idx;
9762 const char *mname;
9763
9764 btf_id = prog->aux->attach_btf_id;
9765 st_ops = bpf_struct_ops_find(btf_id);
9766 if (!st_ops) {
9767 verbose(env, "attach_btf_id %u is not a supported struct\n",
9768 btf_id);
9769 return -ENOTSUPP;
9770 }
9771
9772 t = st_ops->type;
9773 member_idx = prog->expected_attach_type;
9774 if (member_idx >= btf_type_vlen(t)) {
9775 verbose(env, "attach to invalid member idx %u of struct %s\n",
9776 member_idx, st_ops->name);
9777 return -EINVAL;
9778 }
9779
9780 member = &btf_type_member(t)[member_idx];
9781 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
9782 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
9783 NULL);
9784 if (!func_proto) {
9785 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
9786 mname, member_idx, st_ops->name);
9787 return -EINVAL;
9788 }
9789
9790 if (st_ops->check_member) {
9791 int err = st_ops->check_member(t, member);
9792
9793 if (err) {
9794 verbose(env, "attach to unsupported member %s of struct %s\n",
9795 mname, st_ops->name);
9796 return err;
9797 }
9798 }
9799
9800 prog->aux->attach_func_proto = func_proto;
9801 prog->aux->attach_func_name = mname;
9802 env->ops = st_ops->verifier_ops;
9803
9804 return 0;
9805}
6ba43b76
KS
9806#define SECURITY_PREFIX "security_"
9807
9808static int check_attach_modify_return(struct bpf_verifier_env *env)
9809{
9810 struct bpf_prog *prog = env->prog;
9811 unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr;
9812
6ba43b76
KS
9813 /* This is expected to be cleaned up in the future with the KRSI effort
9814 * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h.
9815 */
69191754
KS
9816 if (within_error_injection_list(addr) ||
9817 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
9818 sizeof(SECURITY_PREFIX) - 1))
6ba43b76 9819 return 0;
6ba43b76
KS
9820
9821 verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n",
9822 prog->aux->attach_btf_id, prog->aux->attach_func_name);
9823
9824 return -EINVAL;
9825}
27ae7997 9826
38207291
MKL
9827static int check_attach_btf_id(struct bpf_verifier_env *env)
9828{
9829 struct bpf_prog *prog = env->prog;
be8704ff 9830 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
5b92a28a 9831 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
38207291 9832 u32 btf_id = prog->aux->attach_btf_id;
f1b9509c 9833 const char prefix[] = "btf_trace_";
5b92a28a 9834 int ret = 0, subprog = -1, i;
fec56f58 9835 struct bpf_trampoline *tr;
38207291 9836 const struct btf_type *t;
5b92a28a 9837 bool conservative = true;
38207291 9838 const char *tname;
5b92a28a 9839 struct btf *btf;
fec56f58 9840 long addr;
5b92a28a 9841 u64 key;
38207291 9842
27ae7997
MKL
9843 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
9844 return check_struct_ops_btf_id(env);
9845
be8704ff 9846 if (prog->type != BPF_PROG_TYPE_TRACING && !prog_extension)
f1b9509c 9847 return 0;
38207291 9848
f1b9509c
AS
9849 if (!btf_id) {
9850 verbose(env, "Tracing programs must provide btf_id\n");
9851 return -EINVAL;
9852 }
5b92a28a
AS
9853 btf = bpf_prog_get_target_btf(prog);
9854 if (!btf) {
9855 verbose(env,
9856 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
9857 return -EINVAL;
9858 }
9859 t = btf_type_by_id(btf, btf_id);
f1b9509c
AS
9860 if (!t) {
9861 verbose(env, "attach_btf_id %u is invalid\n", btf_id);
9862 return -EINVAL;
9863 }
5b92a28a 9864 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c
AS
9865 if (!tname) {
9866 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
9867 return -EINVAL;
9868 }
5b92a28a
AS
9869 if (tgt_prog) {
9870 struct bpf_prog_aux *aux = tgt_prog->aux;
9871
9872 for (i = 0; i < aux->func_info_cnt; i++)
9873 if (aux->func_info[i].type_id == btf_id) {
9874 subprog = i;
9875 break;
9876 }
9877 if (subprog == -1) {
9878 verbose(env, "Subprog %s doesn't exist\n", tname);
9879 return -EINVAL;
9880 }
9881 conservative = aux->func_info_aux[subprog].unreliable;
be8704ff
AS
9882 if (prog_extension) {
9883 if (conservative) {
9884 verbose(env,
9885 "Cannot replace static functions\n");
9886 return -EINVAL;
9887 }
9888 if (!prog->jit_requested) {
9889 verbose(env,
9890 "Extension programs should be JITed\n");
9891 return -EINVAL;
9892 }
9893 env->ops = bpf_verifier_ops[tgt_prog->type];
9894 }
9895 if (!tgt_prog->jited) {
9896 verbose(env, "Can attach to only JITed progs\n");
9897 return -EINVAL;
9898 }
9899 if (tgt_prog->type == prog->type) {
9900 /* Cannot fentry/fexit another fentry/fexit program.
9901 * Cannot attach program extension to another extension.
9902 * It's ok to attach fentry/fexit to extension program.
9903 */
9904 verbose(env, "Cannot recursively attach\n");
9905 return -EINVAL;
9906 }
9907 if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
9908 prog_extension &&
9909 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
9910 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
9911 /* Program extensions can extend all program types
9912 * except fentry/fexit. The reason is the following.
9913 * The fentry/fexit programs are used for performance
9914 * analysis, stats and can be attached to any program
9915 * type except themselves. When extension program is
9916 * replacing XDP function it is necessary to allow
9917 * performance analysis of all functions. Both original
9918 * XDP program and its program extension. Hence
9919 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
9920 * allowed. If extending of fentry/fexit was allowed it
9921 * would be possible to create long call chain
9922 * fentry->extension->fentry->extension beyond
9923 * reasonable stack size. Hence extending fentry is not
9924 * allowed.
9925 */
9926 verbose(env, "Cannot extend fentry/fexit\n");
9927 return -EINVAL;
9928 }
5b92a28a
AS
9929 key = ((u64)aux->id) << 32 | btf_id;
9930 } else {
be8704ff
AS
9931 if (prog_extension) {
9932 verbose(env, "Cannot replace kernel functions\n");
9933 return -EINVAL;
9934 }
5b92a28a
AS
9935 key = btf_id;
9936 }
f1b9509c
AS
9937
9938 switch (prog->expected_attach_type) {
9939 case BPF_TRACE_RAW_TP:
5b92a28a
AS
9940 if (tgt_prog) {
9941 verbose(env,
9942 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
9943 return -EINVAL;
9944 }
38207291
MKL
9945 if (!btf_type_is_typedef(t)) {
9946 verbose(env, "attach_btf_id %u is not a typedef\n",
9947 btf_id);
9948 return -EINVAL;
9949 }
f1b9509c 9950 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
38207291
MKL
9951 verbose(env, "attach_btf_id %u points to wrong type name %s\n",
9952 btf_id, tname);
9953 return -EINVAL;
9954 }
9955 tname += sizeof(prefix) - 1;
5b92a28a 9956 t = btf_type_by_id(btf, t->type);
38207291
MKL
9957 if (!btf_type_is_ptr(t))
9958 /* should never happen in valid vmlinux build */
9959 return -EINVAL;
5b92a28a 9960 t = btf_type_by_id(btf, t->type);
38207291
MKL
9961 if (!btf_type_is_func_proto(t))
9962 /* should never happen in valid vmlinux build */
9963 return -EINVAL;
9964
9965 /* remember two read only pointers that are valid for
9966 * the life time of the kernel
9967 */
9968 prog->aux->attach_func_name = tname;
9969 prog->aux->attach_func_proto = t;
9970 prog->aux->attach_btf_trace = true;
f1b9509c 9971 return 0;
be8704ff
AS
9972 default:
9973 if (!prog_extension)
9974 return -EINVAL;
9975 /* fallthrough */
ae240823 9976 case BPF_MODIFY_RETURN:
fec56f58
AS
9977 case BPF_TRACE_FENTRY:
9978 case BPF_TRACE_FEXIT:
9979 if (!btf_type_is_func(t)) {
9980 verbose(env, "attach_btf_id %u is not a function\n",
9981 btf_id);
9982 return -EINVAL;
9983 }
be8704ff
AS
9984 if (prog_extension &&
9985 btf_check_type_match(env, prog, btf, t))
9986 return -EINVAL;
5b92a28a 9987 t = btf_type_by_id(btf, t->type);
fec56f58
AS
9988 if (!btf_type_is_func_proto(t))
9989 return -EINVAL;
5b92a28a 9990 tr = bpf_trampoline_lookup(key);
fec56f58
AS
9991 if (!tr)
9992 return -ENOMEM;
9993 prog->aux->attach_func_name = tname;
5b92a28a 9994 /* t is either vmlinux type or another program's type */
fec56f58
AS
9995 prog->aux->attach_func_proto = t;
9996 mutex_lock(&tr->mutex);
9997 if (tr->func.addr) {
9998 prog->aux->trampoline = tr;
9999 goto out;
10000 }
5b92a28a
AS
10001 if (tgt_prog && conservative) {
10002 prog->aux->attach_func_proto = NULL;
10003 t = NULL;
10004 }
10005 ret = btf_distill_func_proto(&env->log, btf, t,
fec56f58
AS
10006 tname, &tr->func.model);
10007 if (ret < 0)
10008 goto out;
5b92a28a 10009 if (tgt_prog) {
e9eeec58
YS
10010 if (subprog == 0)
10011 addr = (long) tgt_prog->bpf_func;
10012 else
10013 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a
AS
10014 } else {
10015 addr = kallsyms_lookup_name(tname);
10016 if (!addr) {
10017 verbose(env,
10018 "The address of function %s cannot be found\n",
10019 tname);
10020 ret = -ENOENT;
10021 goto out;
10022 }
fec56f58
AS
10023 }
10024 tr->func.addr = (void *)addr;
10025 prog->aux->trampoline = tr;
6ba43b76
KS
10026
10027 if (prog->expected_attach_type == BPF_MODIFY_RETURN)
10028 ret = check_attach_modify_return(env);
fec56f58
AS
10029out:
10030 mutex_unlock(&tr->mutex);
10031 if (ret)
10032 bpf_trampoline_put(tr);
10033 return ret;
38207291 10034 }
38207291
MKL
10035}
10036
838e9690
YS
10037int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
10038 union bpf_attr __user *uattr)
51580e79 10039{
06ee7115 10040 u64 start_time = ktime_get_ns();
58e2af8b 10041 struct bpf_verifier_env *env;
b9193c1b 10042 struct bpf_verifier_log *log;
9e4c24e7 10043 int i, len, ret = -EINVAL;
e2ae4ca2 10044 bool is_priv;
51580e79 10045
eba0c929
AB
10046 /* no program is valid */
10047 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
10048 return -EINVAL;
10049
58e2af8b 10050 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
10051 * allocate/free it every time bpf_check() is called
10052 */
58e2af8b 10053 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
10054 if (!env)
10055 return -ENOMEM;
61bd5218 10056 log = &env->log;
cbd35700 10057
9e4c24e7 10058 len = (*prog)->len;
fad953ce 10059 env->insn_aux_data =
9e4c24e7 10060 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
10061 ret = -ENOMEM;
10062 if (!env->insn_aux_data)
10063 goto err_free_env;
9e4c24e7
JK
10064 for (i = 0; i < len; i++)
10065 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 10066 env->prog = *prog;
00176a34 10067 env->ops = bpf_verifier_ops[env->prog->type];
45a73c17 10068 is_priv = capable(CAP_SYS_ADMIN);
0246e64d 10069
8580ac94
AS
10070 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
10071 mutex_lock(&bpf_verifier_lock);
10072 if (!btf_vmlinux)
10073 btf_vmlinux = btf_parse_vmlinux();
10074 mutex_unlock(&bpf_verifier_lock);
10075 }
10076
cbd35700 10077 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
10078 if (!is_priv)
10079 mutex_lock(&bpf_verifier_lock);
cbd35700
AS
10080
10081 if (attr->log_level || attr->log_buf || attr->log_size) {
10082 /* user requested verbose verifier output
10083 * and supplied buffer to store the verification trace
10084 */
e7bf8249
JK
10085 log->level = attr->log_level;
10086 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
10087 log->len_total = attr->log_size;
cbd35700
AS
10088
10089 ret = -EINVAL;
e7bf8249 10090 /* log attributes have to be sane */
7a9f5c65 10091 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
06ee7115 10092 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
3df126f3 10093 goto err_unlock;
cbd35700 10094 }
1ad2f583 10095
8580ac94
AS
10096 if (IS_ERR(btf_vmlinux)) {
10097 /* Either gcc or pahole or kernel are broken. */
10098 verbose(env, "in-kernel BTF is malformed\n");
10099 ret = PTR_ERR(btf_vmlinux);
38207291 10100 goto skip_full_check;
8580ac94
AS
10101 }
10102
1ad2f583
DB
10103 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
10104 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 10105 env->strict_alignment = true;
e9ee9efc
DM
10106 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
10107 env->strict_alignment = false;
cbd35700 10108
e2ae4ca2
JK
10109 env->allow_ptr_leaks = is_priv;
10110
10d274e8
AS
10111 if (is_priv)
10112 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
10113
f4e3ec0d
JK
10114 ret = replace_map_fd_with_map_ptr(env);
10115 if (ret < 0)
10116 goto skip_full_check;
10117
cae1927c 10118 if (bpf_prog_is_dev_bound(env->prog->aux)) {
a40a2632 10119 ret = bpf_prog_offload_verifier_prep(env->prog);
ab3f0063 10120 if (ret)
f4e3ec0d 10121 goto skip_full_check;
ab3f0063
JK
10122 }
10123
dc2a4ebc 10124 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 10125 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
10126 GFP_USER);
10127 ret = -ENOMEM;
10128 if (!env->explored_states)
10129 goto skip_full_check;
10130
d9762e84 10131 ret = check_subprogs(env);
475fb78f
AS
10132 if (ret < 0)
10133 goto skip_full_check;
10134
c454a46b 10135 ret = check_btf_info(env, attr, uattr);
838e9690
YS
10136 if (ret < 0)
10137 goto skip_full_check;
10138
be8704ff
AS
10139 ret = check_attach_btf_id(env);
10140 if (ret)
10141 goto skip_full_check;
10142
d9762e84
MKL
10143 ret = check_cfg(env);
10144 if (ret < 0)
10145 goto skip_full_check;
10146
51c39bb1
AS
10147 ret = do_check_subprogs(env);
10148 ret = ret ?: do_check_main(env);
cbd35700 10149
c941ce9c
QM
10150 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
10151 ret = bpf_prog_offload_finalize(env);
10152
0246e64d 10153skip_full_check:
51c39bb1 10154 kvfree(env->explored_states);
0246e64d 10155
c131187d 10156 if (ret == 0)
9b38c405 10157 ret = check_max_stack_depth(env);
c131187d 10158
9b38c405 10159 /* instruction rewrites happen after this point */
e2ae4ca2
JK
10160 if (is_priv) {
10161 if (ret == 0)
10162 opt_hard_wire_dead_code_branches(env);
52875a04
JK
10163 if (ret == 0)
10164 ret = opt_remove_dead_code(env);
a1b14abc
JK
10165 if (ret == 0)
10166 ret = opt_remove_nops(env);
52875a04
JK
10167 } else {
10168 if (ret == 0)
10169 sanitize_dead_code(env);
e2ae4ca2
JK
10170 }
10171
9bac3d6d
AS
10172 if (ret == 0)
10173 /* program is valid, convert *(u32*)(ctx + off) accesses */
10174 ret = convert_ctx_accesses(env);
10175
e245c5c6 10176 if (ret == 0)
79741b3b 10177 ret = fixup_bpf_calls(env);
e245c5c6 10178
a4b1d3c1
JW
10179 /* do 32-bit optimization after insn patching has done so those patched
10180 * insns could be handled correctly.
10181 */
d6c2308c
JW
10182 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
10183 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
10184 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
10185 : false;
a4b1d3c1
JW
10186 }
10187
1ea47e01
AS
10188 if (ret == 0)
10189 ret = fixup_call_args(env);
10190
06ee7115
AS
10191 env->verification_time = ktime_get_ns() - start_time;
10192 print_verification_stats(env);
10193
a2a7d570 10194 if (log->level && bpf_verifier_log_full(log))
cbd35700 10195 ret = -ENOSPC;
a2a7d570 10196 if (log->level && !log->ubuf) {
cbd35700 10197 ret = -EFAULT;
a2a7d570 10198 goto err_release_maps;
cbd35700
AS
10199 }
10200
0246e64d
AS
10201 if (ret == 0 && env->used_map_cnt) {
10202 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
10203 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
10204 sizeof(env->used_maps[0]),
10205 GFP_KERNEL);
0246e64d 10206
9bac3d6d 10207 if (!env->prog->aux->used_maps) {
0246e64d 10208 ret = -ENOMEM;
a2a7d570 10209 goto err_release_maps;
0246e64d
AS
10210 }
10211
9bac3d6d 10212 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 10213 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 10214 env->prog->aux->used_map_cnt = env->used_map_cnt;
0246e64d
AS
10215
10216 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
10217 * bpf_ld_imm64 instructions
10218 */
10219 convert_pseudo_ld_imm64(env);
10220 }
cbd35700 10221
ba64e7d8
YS
10222 if (ret == 0)
10223 adjust_btf_func(env);
10224
a2a7d570 10225err_release_maps:
9bac3d6d 10226 if (!env->prog->aux->used_maps)
0246e64d 10227 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 10228 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
10229 */
10230 release_maps(env);
9bac3d6d 10231 *prog = env->prog;
3df126f3 10232err_unlock:
45a73c17
AS
10233 if (!is_priv)
10234 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
10235 vfree(env->insn_aux_data);
10236err_free_env:
10237 kfree(env);
51580e79
AS
10238 return ret;
10239}