]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/verifier.c
bpf: lsm: Provide attachment points for BPF LSM programs
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
51580e79
AS
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
838e9690 11#include <linux/btf.h>
58e2af8b 12#include <linux/bpf_verifier.h>
51580e79
AS
13#include <linux/filter.h>
14#include <net/netlink.h>
15#include <linux/file.h>
16#include <linux/vmalloc.h>
ebb676da 17#include <linux/stringify.h>
cc8b0b92
AS
18#include <linux/bsearch.h>
19#include <linux/sort.h>
c195651e 20#include <linux/perf_event.h>
d9762e84 21#include <linux/ctype.h>
6ba43b76 22#include <linux/error-injection.h>
51580e79 23
f4ac7e0b
JK
24#include "disasm.h"
25
00176a34 26static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 27#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
28 [_id] = & _name ## _verifier_ops,
29#define BPF_MAP_TYPE(_id, _ops)
30#include <linux/bpf_types.h>
31#undef BPF_PROG_TYPE
32#undef BPF_MAP_TYPE
33};
34
51580e79
AS
35/* bpf_check() is a static code analyzer that walks eBPF program
36 * instruction by instruction and updates register/stack state.
37 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
38 *
39 * The first pass is depth-first-search to check that the program is a DAG.
40 * It rejects the following programs:
41 * - larger than BPF_MAXINSNS insns
42 * - if loop is present (detected via back-edge)
43 * - unreachable insns exist (shouldn't be a forest. program = one function)
44 * - out of bounds or malformed jumps
45 * The second pass is all possible path descent from the 1st insn.
46 * Since it's analyzing all pathes through the program, the length of the
eba38a96 47 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
48 * insn is less then 4K, but there are too many branches that change stack/regs.
49 * Number of 'branches to be analyzed' is limited to 1k
50 *
51 * On entry to each instruction, each register has a type, and the instruction
52 * changes the types of the registers depending on instruction semantics.
53 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
54 * copied to R1.
55 *
56 * All registers are 64-bit.
57 * R0 - return register
58 * R1-R5 argument passing registers
59 * R6-R9 callee saved registers
60 * R10 - frame pointer read-only
61 *
62 * At the start of BPF program the register R1 contains a pointer to bpf_context
63 * and has type PTR_TO_CTX.
64 *
65 * Verifier tracks arithmetic operations on pointers in case:
66 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
67 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
68 * 1st insn copies R10 (which has FRAME_PTR) type into R1
69 * and 2nd arithmetic instruction is pattern matched to recognize
70 * that it wants to construct a pointer to some element within stack.
71 * So after 2nd insn, the register R1 has type PTR_TO_STACK
72 * (and -20 constant is saved for further stack bounds checking).
73 * Meaning that this reg is a pointer to stack plus known immediate constant.
74 *
f1174f77 75 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 76 * means the register has some value, but it's not a valid pointer.
f1174f77 77 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
78 *
79 * When verifier sees load or store instructions the type of base register
c64b7983
JS
80 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
81 * four pointer types recognized by check_mem_access() function.
51580e79
AS
82 *
83 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
84 * and the range of [ptr, ptr + map's value_size) is accessible.
85 *
86 * registers used to pass values to function calls are checked against
87 * function argument constraints.
88 *
89 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
90 * It means that the register type passed to this function must be
91 * PTR_TO_STACK and it will be used inside the function as
92 * 'pointer to map element key'
93 *
94 * For example the argument constraints for bpf_map_lookup_elem():
95 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
96 * .arg1_type = ARG_CONST_MAP_PTR,
97 * .arg2_type = ARG_PTR_TO_MAP_KEY,
98 *
99 * ret_type says that this function returns 'pointer to map elem value or null'
100 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
101 * 2nd argument should be a pointer to stack, which will be used inside
102 * the helper function as a pointer to map element key.
103 *
104 * On the kernel side the helper function looks like:
105 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
106 * {
107 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
108 * void *key = (void *) (unsigned long) r2;
109 * void *value;
110 *
111 * here kernel can access 'key' and 'map' pointers safely, knowing that
112 * [key, key + map->key_size) bytes are valid and were initialized on
113 * the stack of eBPF program.
114 * }
115 *
116 * Corresponding eBPF program may look like:
117 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
118 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
119 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
120 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
121 * here verifier looks at prototype of map_lookup_elem() and sees:
122 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
123 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
124 *
125 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
126 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
127 * and were initialized prior to this call.
128 * If it's ok, then verifier allows this BPF_CALL insn and looks at
129 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
130 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
131 * returns ether pointer to map value or NULL.
132 *
133 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
134 * insn, the register holding that pointer in the true branch changes state to
135 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
136 * branch. See check_cond_jmp_op().
137 *
138 * After the call R0 is set to return type of the function and registers R1-R5
139 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
140 *
141 * The following reference types represent a potential reference to a kernel
142 * resource which, after first being allocated, must be checked and freed by
143 * the BPF program:
144 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
145 *
146 * When the verifier sees a helper call return a reference type, it allocates a
147 * pointer id for the reference and stores it in the current function state.
148 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
149 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
150 * passes through a NULL-check conditional. For the branch wherein the state is
151 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
152 *
153 * For each helper function that allocates a reference, such as
154 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
155 * bpf_sk_release(). When a reference type passes into the release function,
156 * the verifier also releases the reference. If any unchecked or unreleased
157 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
158 */
159
17a52670 160/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 161struct bpf_verifier_stack_elem {
17a52670
AS
162 /* verifer state is 'st'
163 * before processing instruction 'insn_idx'
164 * and after processing instruction 'prev_insn_idx'
165 */
58e2af8b 166 struct bpf_verifier_state st;
17a52670
AS
167 int insn_idx;
168 int prev_insn_idx;
58e2af8b 169 struct bpf_verifier_stack_elem *next;
cbd35700
AS
170};
171
b285fcb7 172#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 173#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 174
d2e4c1e6
DB
175#define BPF_MAP_KEY_POISON (1ULL << 63)
176#define BPF_MAP_KEY_SEEN (1ULL << 62)
177
c93552c4
DB
178#define BPF_MAP_PTR_UNPRIV 1UL
179#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
180 POISON_POINTER_DELTA))
181#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
182
183static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
184{
d2e4c1e6 185 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
186}
187
188static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
189{
d2e4c1e6 190 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
191}
192
193static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
194 const struct bpf_map *map, bool unpriv)
195{
196 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
197 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
198 aux->map_ptr_state = (unsigned long)map |
199 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
200}
201
202static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
203{
204 return aux->map_key_state & BPF_MAP_KEY_POISON;
205}
206
207static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
208{
209 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
210}
211
212static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
213{
214 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
215}
216
217static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
218{
219 bool poisoned = bpf_map_key_poisoned(aux);
220
221 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
222 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 223}
fad73a1a 224
33ff9823
DB
225struct bpf_call_arg_meta {
226 struct bpf_map *map_ptr;
435faee1 227 bool raw_mode;
36bbef52 228 bool pkt_access;
435faee1
DB
229 int regno;
230 int access_size;
849fa506
YS
231 s64 msize_smax_value;
232 u64 msize_umax_value;
1b986589 233 int ref_obj_id;
d83525ca 234 int func_id;
a7658e1a 235 u32 btf_id;
33ff9823
DB
236};
237
8580ac94
AS
238struct btf *btf_vmlinux;
239
cbd35700
AS
240static DEFINE_MUTEX(bpf_verifier_lock);
241
d9762e84
MKL
242static const struct bpf_line_info *
243find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
244{
245 const struct bpf_line_info *linfo;
246 const struct bpf_prog *prog;
247 u32 i, nr_linfo;
248
249 prog = env->prog;
250 nr_linfo = prog->aux->nr_linfo;
251
252 if (!nr_linfo || insn_off >= prog->len)
253 return NULL;
254
255 linfo = prog->aux->linfo;
256 for (i = 1; i < nr_linfo; i++)
257 if (insn_off < linfo[i].insn_off)
258 break;
259
260 return &linfo[i - 1];
261}
262
77d2e05a
MKL
263void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
264 va_list args)
cbd35700 265{
a2a7d570 266 unsigned int n;
cbd35700 267
a2a7d570 268 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
a2a7d570
JK
269
270 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
271 "verifier log line truncated - local buffer too short\n");
272
273 n = min(log->len_total - log->len_used - 1, n);
274 log->kbuf[n] = '\0';
275
8580ac94
AS
276 if (log->level == BPF_LOG_KERNEL) {
277 pr_err("BPF:%s\n", log->kbuf);
278 return;
279 }
a2a7d570
JK
280 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
281 log->len_used += n;
282 else
283 log->ubuf = NULL;
cbd35700 284}
abe08840
JO
285
286/* log_level controls verbosity level of eBPF verifier.
287 * bpf_verifier_log_write() is used to dump the verification trace to the log,
288 * so the user can figure out what's wrong with the program
430e68d1 289 */
abe08840
JO
290__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
291 const char *fmt, ...)
292{
293 va_list args;
294
77d2e05a
MKL
295 if (!bpf_verifier_log_needed(&env->log))
296 return;
297
abe08840 298 va_start(args, fmt);
77d2e05a 299 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
300 va_end(args);
301}
302EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
303
304__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
305{
77d2e05a 306 struct bpf_verifier_env *env = private_data;
abe08840
JO
307 va_list args;
308
77d2e05a
MKL
309 if (!bpf_verifier_log_needed(&env->log))
310 return;
311
abe08840 312 va_start(args, fmt);
77d2e05a 313 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
314 va_end(args);
315}
cbd35700 316
9e15db66
AS
317__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
318 const char *fmt, ...)
319{
320 va_list args;
321
322 if (!bpf_verifier_log_needed(log))
323 return;
324
325 va_start(args, fmt);
326 bpf_verifier_vlog(log, fmt, args);
327 va_end(args);
328}
329
d9762e84
MKL
330static const char *ltrim(const char *s)
331{
332 while (isspace(*s))
333 s++;
334
335 return s;
336}
337
338__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
339 u32 insn_off,
340 const char *prefix_fmt, ...)
341{
342 const struct bpf_line_info *linfo;
343
344 if (!bpf_verifier_log_needed(&env->log))
345 return;
346
347 linfo = find_linfo(env, insn_off);
348 if (!linfo || linfo == env->prev_linfo)
349 return;
350
351 if (prefix_fmt) {
352 va_list args;
353
354 va_start(args, prefix_fmt);
355 bpf_verifier_vlog(&env->log, prefix_fmt, args);
356 va_end(args);
357 }
358
359 verbose(env, "%s\n",
360 ltrim(btf_name_by_offset(env->prog->aux->btf,
361 linfo->line_off)));
362
363 env->prev_linfo = linfo;
364}
365
de8f3a83
DB
366static bool type_is_pkt_pointer(enum bpf_reg_type type)
367{
368 return type == PTR_TO_PACKET ||
369 type == PTR_TO_PACKET_META;
370}
371
46f8bc92
MKL
372static bool type_is_sk_pointer(enum bpf_reg_type type)
373{
374 return type == PTR_TO_SOCKET ||
655a51e5 375 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
376 type == PTR_TO_TCP_SOCK ||
377 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
378}
379
840b9615
JS
380static bool reg_type_may_be_null(enum bpf_reg_type type)
381{
fd978bf7 382 return type == PTR_TO_MAP_VALUE_OR_NULL ||
46f8bc92 383 type == PTR_TO_SOCKET_OR_NULL ||
655a51e5
MKL
384 type == PTR_TO_SOCK_COMMON_OR_NULL ||
385 type == PTR_TO_TCP_SOCK_OR_NULL;
fd978bf7
JS
386}
387
d83525ca
AS
388static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
389{
390 return reg->type == PTR_TO_MAP_VALUE &&
391 map_value_has_spin_lock(reg->map_ptr);
392}
393
cba368c1
MKL
394static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
395{
396 return type == PTR_TO_SOCKET ||
397 type == PTR_TO_SOCKET_OR_NULL ||
398 type == PTR_TO_TCP_SOCK ||
399 type == PTR_TO_TCP_SOCK_OR_NULL;
400}
401
1b986589 402static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
fd978bf7 403{
1b986589 404 return type == ARG_PTR_TO_SOCK_COMMON;
fd978bf7
JS
405}
406
407/* Determine whether the function releases some resources allocated by another
408 * function call. The first reference type argument will be assumed to be
409 * released by release_reference().
410 */
411static bool is_release_function(enum bpf_func_id func_id)
412{
6acc9b43 413 return func_id == BPF_FUNC_sk_release;
840b9615
JS
414}
415
46f8bc92
MKL
416static bool is_acquire_function(enum bpf_func_id func_id)
417{
418 return func_id == BPF_FUNC_sk_lookup_tcp ||
edbf8c01
LB
419 func_id == BPF_FUNC_sk_lookup_udp ||
420 func_id == BPF_FUNC_skc_lookup_tcp;
46f8bc92
MKL
421}
422
1b986589
MKL
423static bool is_ptr_cast_function(enum bpf_func_id func_id)
424{
425 return func_id == BPF_FUNC_tcp_sock ||
426 func_id == BPF_FUNC_sk_fullsock;
427}
428
17a52670
AS
429/* string representation of 'enum bpf_reg_type' */
430static const char * const reg_type_str[] = {
431 [NOT_INIT] = "?",
f1174f77 432 [SCALAR_VALUE] = "inv",
17a52670
AS
433 [PTR_TO_CTX] = "ctx",
434 [CONST_PTR_TO_MAP] = "map_ptr",
435 [PTR_TO_MAP_VALUE] = "map_value",
436 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
17a52670 437 [PTR_TO_STACK] = "fp",
969bf05e 438 [PTR_TO_PACKET] = "pkt",
de8f3a83 439 [PTR_TO_PACKET_META] = "pkt_meta",
969bf05e 440 [PTR_TO_PACKET_END] = "pkt_end",
d58e468b 441 [PTR_TO_FLOW_KEYS] = "flow_keys",
c64b7983
JS
442 [PTR_TO_SOCKET] = "sock",
443 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
46f8bc92
MKL
444 [PTR_TO_SOCK_COMMON] = "sock_common",
445 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
655a51e5
MKL
446 [PTR_TO_TCP_SOCK] = "tcp_sock",
447 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
9df1c28b 448 [PTR_TO_TP_BUFFER] = "tp_buffer",
fada7fdc 449 [PTR_TO_XDP_SOCK] = "xdp_sock",
9e15db66 450 [PTR_TO_BTF_ID] = "ptr_",
17a52670
AS
451};
452
8efea21d
EC
453static char slot_type_char[] = {
454 [STACK_INVALID] = '?',
455 [STACK_SPILL] = 'r',
456 [STACK_MISC] = 'm',
457 [STACK_ZERO] = '0',
458};
459
4e92024a
AS
460static void print_liveness(struct bpf_verifier_env *env,
461 enum bpf_reg_liveness live)
462{
9242b5f5 463 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
464 verbose(env, "_");
465 if (live & REG_LIVE_READ)
466 verbose(env, "r");
467 if (live & REG_LIVE_WRITTEN)
468 verbose(env, "w");
9242b5f5
AS
469 if (live & REG_LIVE_DONE)
470 verbose(env, "D");
4e92024a
AS
471}
472
f4d7e40a
AS
473static struct bpf_func_state *func(struct bpf_verifier_env *env,
474 const struct bpf_reg_state *reg)
475{
476 struct bpf_verifier_state *cur = env->cur_state;
477
478 return cur->frame[reg->frameno];
479}
480
9e15db66
AS
481const char *kernel_type_name(u32 id)
482{
483 return btf_name_by_offset(btf_vmlinux,
484 btf_type_by_id(btf_vmlinux, id)->name_off);
485}
486
61bd5218 487static void print_verifier_state(struct bpf_verifier_env *env,
f4d7e40a 488 const struct bpf_func_state *state)
17a52670 489{
f4d7e40a 490 const struct bpf_reg_state *reg;
17a52670
AS
491 enum bpf_reg_type t;
492 int i;
493
f4d7e40a
AS
494 if (state->frameno)
495 verbose(env, " frame%d:", state->frameno);
17a52670 496 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
497 reg = &state->regs[i];
498 t = reg->type;
17a52670
AS
499 if (t == NOT_INIT)
500 continue;
4e92024a
AS
501 verbose(env, " R%d", i);
502 print_liveness(env, reg->live);
503 verbose(env, "=%s", reg_type_str[t]);
b5dc0163
AS
504 if (t == SCALAR_VALUE && reg->precise)
505 verbose(env, "P");
f1174f77
EC
506 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
507 tnum_is_const(reg->var_off)) {
508 /* reg->off should be 0 for SCALAR_VALUE */
61bd5218 509 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 510 } else {
9e15db66
AS
511 if (t == PTR_TO_BTF_ID)
512 verbose(env, "%s", kernel_type_name(reg->btf_id));
cba368c1
MKL
513 verbose(env, "(id=%d", reg->id);
514 if (reg_type_may_be_refcounted_or_null(t))
515 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
f1174f77 516 if (t != SCALAR_VALUE)
61bd5218 517 verbose(env, ",off=%d", reg->off);
de8f3a83 518 if (type_is_pkt_pointer(t))
61bd5218 519 verbose(env, ",r=%d", reg->range);
f1174f77
EC
520 else if (t == CONST_PTR_TO_MAP ||
521 t == PTR_TO_MAP_VALUE ||
522 t == PTR_TO_MAP_VALUE_OR_NULL)
61bd5218 523 verbose(env, ",ks=%d,vs=%d",
f1174f77
EC
524 reg->map_ptr->key_size,
525 reg->map_ptr->value_size);
7d1238f2
EC
526 if (tnum_is_const(reg->var_off)) {
527 /* Typically an immediate SCALAR_VALUE, but
528 * could be a pointer whose offset is too big
529 * for reg->off
530 */
61bd5218 531 verbose(env, ",imm=%llx", reg->var_off.value);
7d1238f2
EC
532 } else {
533 if (reg->smin_value != reg->umin_value &&
534 reg->smin_value != S64_MIN)
61bd5218 535 verbose(env, ",smin_value=%lld",
7d1238f2
EC
536 (long long)reg->smin_value);
537 if (reg->smax_value != reg->umax_value &&
538 reg->smax_value != S64_MAX)
61bd5218 539 verbose(env, ",smax_value=%lld",
7d1238f2
EC
540 (long long)reg->smax_value);
541 if (reg->umin_value != 0)
61bd5218 542 verbose(env, ",umin_value=%llu",
7d1238f2
EC
543 (unsigned long long)reg->umin_value);
544 if (reg->umax_value != U64_MAX)
61bd5218 545 verbose(env, ",umax_value=%llu",
7d1238f2
EC
546 (unsigned long long)reg->umax_value);
547 if (!tnum_is_unknown(reg->var_off)) {
548 char tn_buf[48];
f1174f77 549
7d1238f2 550 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 551 verbose(env, ",var_off=%s", tn_buf);
7d1238f2 552 }
f1174f77 553 }
61bd5218 554 verbose(env, ")");
f1174f77 555 }
17a52670 556 }
638f5b90 557 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
558 char types_buf[BPF_REG_SIZE + 1];
559 bool valid = false;
560 int j;
561
562 for (j = 0; j < BPF_REG_SIZE; j++) {
563 if (state->stack[i].slot_type[j] != STACK_INVALID)
564 valid = true;
565 types_buf[j] = slot_type_char[
566 state->stack[i].slot_type[j]];
567 }
568 types_buf[BPF_REG_SIZE] = 0;
569 if (!valid)
570 continue;
571 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
572 print_liveness(env, state->stack[i].spilled_ptr.live);
b5dc0163
AS
573 if (state->stack[i].slot_type[0] == STACK_SPILL) {
574 reg = &state->stack[i].spilled_ptr;
575 t = reg->type;
576 verbose(env, "=%s", reg_type_str[t]);
577 if (t == SCALAR_VALUE && reg->precise)
578 verbose(env, "P");
579 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
580 verbose(env, "%lld", reg->var_off.value + reg->off);
581 } else {
8efea21d 582 verbose(env, "=%s", types_buf);
b5dc0163 583 }
17a52670 584 }
fd978bf7
JS
585 if (state->acquired_refs && state->refs[0].id) {
586 verbose(env, " refs=%d", state->refs[0].id);
587 for (i = 1; i < state->acquired_refs; i++)
588 if (state->refs[i].id)
589 verbose(env, ",%d", state->refs[i].id);
590 }
61bd5218 591 verbose(env, "\n");
17a52670
AS
592}
593
84dbf350
JS
594#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
595static int copy_##NAME##_state(struct bpf_func_state *dst, \
596 const struct bpf_func_state *src) \
597{ \
598 if (!src->FIELD) \
599 return 0; \
600 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
601 /* internal bug, make state invalid to reject the program */ \
602 memset(dst, 0, sizeof(*dst)); \
603 return -EFAULT; \
604 } \
605 memcpy(dst->FIELD, src->FIELD, \
606 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
607 return 0; \
638f5b90 608}
fd978bf7
JS
609/* copy_reference_state() */
610COPY_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
611/* copy_stack_state() */
612COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
613#undef COPY_STATE_FN
614
615#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
616static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
617 bool copy_old) \
618{ \
619 u32 old_size = state->COUNT; \
620 struct bpf_##NAME##_state *new_##FIELD; \
621 int slot = size / SIZE; \
622 \
623 if (size <= old_size || !size) { \
624 if (copy_old) \
625 return 0; \
626 state->COUNT = slot * SIZE; \
627 if (!size && old_size) { \
628 kfree(state->FIELD); \
629 state->FIELD = NULL; \
630 } \
631 return 0; \
632 } \
633 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
634 GFP_KERNEL); \
635 if (!new_##FIELD) \
636 return -ENOMEM; \
637 if (copy_old) { \
638 if (state->FIELD) \
639 memcpy(new_##FIELD, state->FIELD, \
640 sizeof(*new_##FIELD) * (old_size / SIZE)); \
641 memset(new_##FIELD + old_size / SIZE, 0, \
642 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
643 } \
644 state->COUNT = slot * SIZE; \
645 kfree(state->FIELD); \
646 state->FIELD = new_##FIELD; \
647 return 0; \
648}
fd978bf7
JS
649/* realloc_reference_state() */
650REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
651/* realloc_stack_state() */
652REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
653#undef REALLOC_STATE_FN
638f5b90
AS
654
655/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
656 * make it consume minimal amount of memory. check_stack_write() access from
f4d7e40a 657 * the program calls into realloc_func_state() to grow the stack size.
84dbf350
JS
658 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
659 * which realloc_stack_state() copies over. It points to previous
660 * bpf_verifier_state which is never reallocated.
638f5b90 661 */
fd978bf7
JS
662static int realloc_func_state(struct bpf_func_state *state, int stack_size,
663 int refs_size, bool copy_old)
638f5b90 664{
fd978bf7
JS
665 int err = realloc_reference_state(state, refs_size, copy_old);
666 if (err)
667 return err;
668 return realloc_stack_state(state, stack_size, copy_old);
669}
670
671/* Acquire a pointer id from the env and update the state->refs to include
672 * this new pointer reference.
673 * On success, returns a valid pointer id to associate with the register
674 * On failure, returns a negative errno.
638f5b90 675 */
fd978bf7 676static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 677{
fd978bf7
JS
678 struct bpf_func_state *state = cur_func(env);
679 int new_ofs = state->acquired_refs;
680 int id, err;
681
682 err = realloc_reference_state(state, state->acquired_refs + 1, true);
683 if (err)
684 return err;
685 id = ++env->id_gen;
686 state->refs[new_ofs].id = id;
687 state->refs[new_ofs].insn_idx = insn_idx;
638f5b90 688
fd978bf7
JS
689 return id;
690}
691
692/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 693static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
694{
695 int i, last_idx;
696
fd978bf7
JS
697 last_idx = state->acquired_refs - 1;
698 for (i = 0; i < state->acquired_refs; i++) {
699 if (state->refs[i].id == ptr_id) {
700 if (last_idx && i != last_idx)
701 memcpy(&state->refs[i], &state->refs[last_idx],
702 sizeof(*state->refs));
703 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
704 state->acquired_refs--;
638f5b90 705 return 0;
638f5b90 706 }
638f5b90 707 }
46f8bc92 708 return -EINVAL;
fd978bf7
JS
709}
710
711static int transfer_reference_state(struct bpf_func_state *dst,
712 struct bpf_func_state *src)
713{
714 int err = realloc_reference_state(dst, src->acquired_refs, false);
715 if (err)
716 return err;
717 err = copy_reference_state(dst, src);
718 if (err)
719 return err;
638f5b90
AS
720 return 0;
721}
722
f4d7e40a
AS
723static void free_func_state(struct bpf_func_state *state)
724{
5896351e
AS
725 if (!state)
726 return;
fd978bf7 727 kfree(state->refs);
f4d7e40a
AS
728 kfree(state->stack);
729 kfree(state);
730}
731
b5dc0163
AS
732static void clear_jmp_history(struct bpf_verifier_state *state)
733{
734 kfree(state->jmp_history);
735 state->jmp_history = NULL;
736 state->jmp_history_cnt = 0;
737}
738
1969db47
AS
739static void free_verifier_state(struct bpf_verifier_state *state,
740 bool free_self)
638f5b90 741{
f4d7e40a
AS
742 int i;
743
744 for (i = 0; i <= state->curframe; i++) {
745 free_func_state(state->frame[i]);
746 state->frame[i] = NULL;
747 }
b5dc0163 748 clear_jmp_history(state);
1969db47
AS
749 if (free_self)
750 kfree(state);
638f5b90
AS
751}
752
753/* copy verifier state from src to dst growing dst stack space
754 * when necessary to accommodate larger src stack
755 */
f4d7e40a
AS
756static int copy_func_state(struct bpf_func_state *dst,
757 const struct bpf_func_state *src)
638f5b90
AS
758{
759 int err;
760
fd978bf7
JS
761 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
762 false);
763 if (err)
764 return err;
765 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
766 err = copy_reference_state(dst, src);
638f5b90
AS
767 if (err)
768 return err;
638f5b90
AS
769 return copy_stack_state(dst, src);
770}
771
f4d7e40a
AS
772static int copy_verifier_state(struct bpf_verifier_state *dst_state,
773 const struct bpf_verifier_state *src)
774{
775 struct bpf_func_state *dst;
b5dc0163 776 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
f4d7e40a
AS
777 int i, err;
778
b5dc0163
AS
779 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
780 kfree(dst_state->jmp_history);
781 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
782 if (!dst_state->jmp_history)
783 return -ENOMEM;
784 }
785 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
786 dst_state->jmp_history_cnt = src->jmp_history_cnt;
787
f4d7e40a
AS
788 /* if dst has more stack frames then src frame, free them */
789 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
790 free_func_state(dst_state->frame[i]);
791 dst_state->frame[i] = NULL;
792 }
979d63d5 793 dst_state->speculative = src->speculative;
f4d7e40a 794 dst_state->curframe = src->curframe;
d83525ca 795 dst_state->active_spin_lock = src->active_spin_lock;
2589726d
AS
796 dst_state->branches = src->branches;
797 dst_state->parent = src->parent;
b5dc0163
AS
798 dst_state->first_insn_idx = src->first_insn_idx;
799 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
800 for (i = 0; i <= src->curframe; i++) {
801 dst = dst_state->frame[i];
802 if (!dst) {
803 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
804 if (!dst)
805 return -ENOMEM;
806 dst_state->frame[i] = dst;
807 }
808 err = copy_func_state(dst, src->frame[i]);
809 if (err)
810 return err;
811 }
812 return 0;
813}
814
2589726d
AS
815static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
816{
817 while (st) {
818 u32 br = --st->branches;
819
820 /* WARN_ON(br > 1) technically makes sense here,
821 * but see comment in push_stack(), hence:
822 */
823 WARN_ONCE((int)br < 0,
824 "BUG update_branch_counts:branches_to_explore=%d\n",
825 br);
826 if (br)
827 break;
828 st = st->parent;
829 }
830}
831
638f5b90
AS
832static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
833 int *insn_idx)
834{
835 struct bpf_verifier_state *cur = env->cur_state;
836 struct bpf_verifier_stack_elem *elem, *head = env->head;
837 int err;
17a52670
AS
838
839 if (env->head == NULL)
638f5b90 840 return -ENOENT;
17a52670 841
638f5b90
AS
842 if (cur) {
843 err = copy_verifier_state(cur, &head->st);
844 if (err)
845 return err;
846 }
847 if (insn_idx)
848 *insn_idx = head->insn_idx;
17a52670 849 if (prev_insn_idx)
638f5b90
AS
850 *prev_insn_idx = head->prev_insn_idx;
851 elem = head->next;
1969db47 852 free_verifier_state(&head->st, false);
638f5b90 853 kfree(head);
17a52670
AS
854 env->head = elem;
855 env->stack_size--;
638f5b90 856 return 0;
17a52670
AS
857}
858
58e2af8b 859static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
860 int insn_idx, int prev_insn_idx,
861 bool speculative)
17a52670 862{
638f5b90 863 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 864 struct bpf_verifier_stack_elem *elem;
638f5b90 865 int err;
17a52670 866
638f5b90 867 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
868 if (!elem)
869 goto err;
870
17a52670
AS
871 elem->insn_idx = insn_idx;
872 elem->prev_insn_idx = prev_insn_idx;
873 elem->next = env->head;
874 env->head = elem;
875 env->stack_size++;
1969db47
AS
876 err = copy_verifier_state(&elem->st, cur);
877 if (err)
878 goto err;
979d63d5 879 elem->st.speculative |= speculative;
b285fcb7
AS
880 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
881 verbose(env, "The sequence of %d jumps is too complex.\n",
882 env->stack_size);
17a52670
AS
883 goto err;
884 }
2589726d
AS
885 if (elem->st.parent) {
886 ++elem->st.parent->branches;
887 /* WARN_ON(branches > 2) technically makes sense here,
888 * but
889 * 1. speculative states will bump 'branches' for non-branch
890 * instructions
891 * 2. is_state_visited() heuristics may decide not to create
892 * a new state for a sequence of branches and all such current
893 * and cloned states will be pointing to a single parent state
894 * which might have large 'branches' count.
895 */
896 }
17a52670
AS
897 return &elem->st;
898err:
5896351e
AS
899 free_verifier_state(env->cur_state, true);
900 env->cur_state = NULL;
17a52670 901 /* pop all elements and return */
638f5b90 902 while (!pop_stack(env, NULL, NULL));
17a52670
AS
903 return NULL;
904}
905
906#define CALLER_SAVED_REGS 6
907static const int caller_saved[CALLER_SAVED_REGS] = {
908 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
909};
910
f54c7898
DB
911static void __mark_reg_not_init(const struct bpf_verifier_env *env,
912 struct bpf_reg_state *reg);
f1174f77 913
b03c9f9f
EC
914/* Mark the unknown part of a register (variable offset or scalar value) as
915 * known to have the value @imm.
916 */
917static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
918{
a9c676bc
AS
919 /* Clear id, off, and union(map_ptr, range) */
920 memset(((u8 *)reg) + sizeof(reg->type), 0,
921 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
b03c9f9f
EC
922 reg->var_off = tnum_const(imm);
923 reg->smin_value = (s64)imm;
924 reg->smax_value = (s64)imm;
925 reg->umin_value = imm;
926 reg->umax_value = imm;
927}
928
f1174f77
EC
929/* Mark the 'variable offset' part of a register as zero. This should be
930 * used only on registers holding a pointer type.
931 */
932static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 933{
b03c9f9f 934 __mark_reg_known(reg, 0);
f1174f77 935}
a9789ef9 936
cc2b14d5
AS
937static void __mark_reg_const_zero(struct bpf_reg_state *reg)
938{
939 __mark_reg_known(reg, 0);
cc2b14d5
AS
940 reg->type = SCALAR_VALUE;
941}
942
61bd5218
JK
943static void mark_reg_known_zero(struct bpf_verifier_env *env,
944 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
945{
946 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 947 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
948 /* Something bad happened, let's kill all regs */
949 for (regno = 0; regno < MAX_BPF_REG; regno++)
f54c7898 950 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
951 return;
952 }
953 __mark_reg_known_zero(regs + regno);
954}
955
de8f3a83
DB
956static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
957{
958 return type_is_pkt_pointer(reg->type);
959}
960
961static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
962{
963 return reg_is_pkt_pointer(reg) ||
964 reg->type == PTR_TO_PACKET_END;
965}
966
967/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
968static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
969 enum bpf_reg_type which)
970{
971 /* The register can already have a range from prior markings.
972 * This is fine as long as it hasn't been advanced from its
973 * origin.
974 */
975 return reg->type == which &&
976 reg->id == 0 &&
977 reg->off == 0 &&
978 tnum_equals_const(reg->var_off, 0);
979}
980
b03c9f9f
EC
981/* Attempts to improve min/max values based on var_off information */
982static void __update_reg_bounds(struct bpf_reg_state *reg)
983{
984 /* min signed is max(sign bit) | min(other bits) */
985 reg->smin_value = max_t(s64, reg->smin_value,
986 reg->var_off.value | (reg->var_off.mask & S64_MIN));
987 /* max signed is min(sign bit) | max(other bits) */
988 reg->smax_value = min_t(s64, reg->smax_value,
989 reg->var_off.value | (reg->var_off.mask & S64_MAX));
990 reg->umin_value = max(reg->umin_value, reg->var_off.value);
991 reg->umax_value = min(reg->umax_value,
992 reg->var_off.value | reg->var_off.mask);
993}
994
995/* Uses signed min/max values to inform unsigned, and vice-versa */
996static void __reg_deduce_bounds(struct bpf_reg_state *reg)
997{
998 /* Learn sign from signed bounds.
999 * If we cannot cross the sign boundary, then signed and unsigned bounds
1000 * are the same, so combine. This works even in the negative case, e.g.
1001 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1002 */
1003 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1004 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1005 reg->umin_value);
1006 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1007 reg->umax_value);
1008 return;
1009 }
1010 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1011 * boundary, so we must be careful.
1012 */
1013 if ((s64)reg->umax_value >= 0) {
1014 /* Positive. We can't learn anything from the smin, but smax
1015 * is positive, hence safe.
1016 */
1017 reg->smin_value = reg->umin_value;
1018 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1019 reg->umax_value);
1020 } else if ((s64)reg->umin_value < 0) {
1021 /* Negative. We can't learn anything from the smax, but smin
1022 * is negative, hence safe.
1023 */
1024 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1025 reg->umin_value);
1026 reg->smax_value = reg->umax_value;
1027 }
1028}
1029
1030/* Attempts to improve var_off based on unsigned min/max information */
1031static void __reg_bound_offset(struct bpf_reg_state *reg)
1032{
1033 reg->var_off = tnum_intersect(reg->var_off,
1034 tnum_range(reg->umin_value,
1035 reg->umax_value));
1036}
1037
581738a6
YS
1038static void __reg_bound_offset32(struct bpf_reg_state *reg)
1039{
1040 u64 mask = 0xffffFFFF;
1041 struct tnum range = tnum_range(reg->umin_value & mask,
1042 reg->umax_value & mask);
1043 struct tnum lo32 = tnum_cast(reg->var_off, 4);
1044 struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
1045
1046 reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
1047}
1048
b03c9f9f
EC
1049/* Reset the min/max bounds of a register */
1050static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1051{
1052 reg->smin_value = S64_MIN;
1053 reg->smax_value = S64_MAX;
1054 reg->umin_value = 0;
1055 reg->umax_value = U64_MAX;
1056}
1057
f1174f77 1058/* Mark a register as having a completely unknown (scalar) value. */
f54c7898
DB
1059static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1060 struct bpf_reg_state *reg)
f1174f77 1061{
a9c676bc
AS
1062 /*
1063 * Clear type, id, off, and union(map_ptr, range) and
1064 * padding between 'type' and union
1065 */
1066 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 1067 reg->type = SCALAR_VALUE;
f1174f77 1068 reg->var_off = tnum_unknown;
f4d7e40a 1069 reg->frameno = 0;
f54c7898
DB
1070 reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1071 true : false;
b03c9f9f 1072 __mark_reg_unbounded(reg);
f1174f77
EC
1073}
1074
61bd5218
JK
1075static void mark_reg_unknown(struct bpf_verifier_env *env,
1076 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1077{
1078 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1079 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
1080 /* Something bad happened, let's kill all regs except FP */
1081 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1082 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1083 return;
1084 }
f54c7898 1085 __mark_reg_unknown(env, regs + regno);
f1174f77
EC
1086}
1087
f54c7898
DB
1088static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1089 struct bpf_reg_state *reg)
f1174f77 1090{
f54c7898 1091 __mark_reg_unknown(env, reg);
f1174f77
EC
1092 reg->type = NOT_INIT;
1093}
1094
61bd5218
JK
1095static void mark_reg_not_init(struct bpf_verifier_env *env,
1096 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1097{
1098 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1099 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
1100 /* Something bad happened, let's kill all regs except FP */
1101 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1102 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1103 return;
1104 }
f54c7898 1105 __mark_reg_not_init(env, regs + regno);
a9789ef9
DB
1106}
1107
5327ed3d 1108#define DEF_NOT_SUBREG (0)
61bd5218 1109static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 1110 struct bpf_func_state *state)
17a52670 1111{
f4d7e40a 1112 struct bpf_reg_state *regs = state->regs;
17a52670
AS
1113 int i;
1114
dc503a8a 1115 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 1116 mark_reg_not_init(env, regs, i);
dc503a8a 1117 regs[i].live = REG_LIVE_NONE;
679c782d 1118 regs[i].parent = NULL;
5327ed3d 1119 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 1120 }
17a52670
AS
1121
1122 /* frame pointer */
f1174f77 1123 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 1124 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 1125 regs[BPF_REG_FP].frameno = state->frameno;
6760bf2d
DB
1126}
1127
f4d7e40a
AS
1128#define BPF_MAIN_FUNC (-1)
1129static void init_func_state(struct bpf_verifier_env *env,
1130 struct bpf_func_state *state,
1131 int callsite, int frameno, int subprogno)
1132{
1133 state->callsite = callsite;
1134 state->frameno = frameno;
1135 state->subprogno = subprogno;
1136 init_reg_state(env, state);
1137}
1138
17a52670
AS
1139enum reg_arg_type {
1140 SRC_OP, /* register is used as source operand */
1141 DST_OP, /* register is used as destination operand */
1142 DST_OP_NO_MARK /* same as above, check only, don't mark */
1143};
1144
cc8b0b92
AS
1145static int cmp_subprogs(const void *a, const void *b)
1146{
9c8105bd
JW
1147 return ((struct bpf_subprog_info *)a)->start -
1148 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
1149}
1150
1151static int find_subprog(struct bpf_verifier_env *env, int off)
1152{
9c8105bd 1153 struct bpf_subprog_info *p;
cc8b0b92 1154
9c8105bd
JW
1155 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1156 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
1157 if (!p)
1158 return -ENOENT;
9c8105bd 1159 return p - env->subprog_info;
cc8b0b92
AS
1160
1161}
1162
1163static int add_subprog(struct bpf_verifier_env *env, int off)
1164{
1165 int insn_cnt = env->prog->len;
1166 int ret;
1167
1168 if (off >= insn_cnt || off < 0) {
1169 verbose(env, "call to invalid destination\n");
1170 return -EINVAL;
1171 }
1172 ret = find_subprog(env, off);
1173 if (ret >= 0)
1174 return 0;
4cb3d99c 1175 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
1176 verbose(env, "too many subprograms\n");
1177 return -E2BIG;
1178 }
9c8105bd
JW
1179 env->subprog_info[env->subprog_cnt++].start = off;
1180 sort(env->subprog_info, env->subprog_cnt,
1181 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
cc8b0b92
AS
1182 return 0;
1183}
1184
1185static int check_subprogs(struct bpf_verifier_env *env)
1186{
1187 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
9c8105bd 1188 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92
AS
1189 struct bpf_insn *insn = env->prog->insnsi;
1190 int insn_cnt = env->prog->len;
1191
f910cefa
JW
1192 /* Add entry function. */
1193 ret = add_subprog(env, 0);
1194 if (ret < 0)
1195 return ret;
1196
cc8b0b92
AS
1197 /* determine subprog starts. The end is one before the next starts */
1198 for (i = 0; i < insn_cnt; i++) {
1199 if (insn[i].code != (BPF_JMP | BPF_CALL))
1200 continue;
1201 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1202 continue;
1203 if (!env->allow_ptr_leaks) {
1204 verbose(env, "function calls to other bpf functions are allowed for root only\n");
1205 return -EPERM;
1206 }
cc8b0b92
AS
1207 ret = add_subprog(env, i + insn[i].imm + 1);
1208 if (ret < 0)
1209 return ret;
1210 }
1211
4cb3d99c
JW
1212 /* Add a fake 'exit' subprog which could simplify subprog iteration
1213 * logic. 'subprog_cnt' should not be increased.
1214 */
1215 subprog[env->subprog_cnt].start = insn_cnt;
1216
06ee7115 1217 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 1218 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 1219 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92
AS
1220
1221 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
1222 subprog_start = subprog[cur_subprog].start;
1223 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1224 for (i = 0; i < insn_cnt; i++) {
1225 u8 code = insn[i].code;
1226
092ed096 1227 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
1228 goto next;
1229 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1230 goto next;
1231 off = i + insn[i].off + 1;
1232 if (off < subprog_start || off >= subprog_end) {
1233 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1234 return -EINVAL;
1235 }
1236next:
1237 if (i == subprog_end - 1) {
1238 /* to avoid fall-through from one subprog into another
1239 * the last insn of the subprog should be either exit
1240 * or unconditional jump back
1241 */
1242 if (code != (BPF_JMP | BPF_EXIT) &&
1243 code != (BPF_JMP | BPF_JA)) {
1244 verbose(env, "last insn is not an exit or jmp\n");
1245 return -EINVAL;
1246 }
1247 subprog_start = subprog_end;
4cb3d99c
JW
1248 cur_subprog++;
1249 if (cur_subprog < env->subprog_cnt)
9c8105bd 1250 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1251 }
1252 }
1253 return 0;
1254}
1255
679c782d
EC
1256/* Parentage chain of this register (or stack slot) should take care of all
1257 * issues like callee-saved registers, stack slot allocation time, etc.
1258 */
f4d7e40a 1259static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 1260 const struct bpf_reg_state *state,
5327ed3d 1261 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
1262{
1263 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 1264 int cnt = 0;
dc503a8a
EC
1265
1266 while (parent) {
1267 /* if read wasn't screened by an earlier write ... */
679c782d 1268 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 1269 break;
9242b5f5
AS
1270 if (parent->live & REG_LIVE_DONE) {
1271 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1272 reg_type_str[parent->type],
1273 parent->var_off.value, parent->off);
1274 return -EFAULT;
1275 }
5327ed3d
JW
1276 /* The first condition is more likely to be true than the
1277 * second, checked it first.
1278 */
1279 if ((parent->live & REG_LIVE_READ) == flag ||
1280 parent->live & REG_LIVE_READ64)
25af32da
AS
1281 /* The parentage chain never changes and
1282 * this parent was already marked as LIVE_READ.
1283 * There is no need to keep walking the chain again and
1284 * keep re-marking all parents as LIVE_READ.
1285 * This case happens when the same register is read
1286 * multiple times without writes into it in-between.
5327ed3d
JW
1287 * Also, if parent has the stronger REG_LIVE_READ64 set,
1288 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
1289 */
1290 break;
dc503a8a 1291 /* ... then we depend on parent's value */
5327ed3d
JW
1292 parent->live |= flag;
1293 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1294 if (flag == REG_LIVE_READ64)
1295 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
1296 state = parent;
1297 parent = state->parent;
f4d7e40a 1298 writes = true;
06ee7115 1299 cnt++;
dc503a8a 1300 }
06ee7115
AS
1301
1302 if (env->longest_mark_read_walk < cnt)
1303 env->longest_mark_read_walk = cnt;
f4d7e40a 1304 return 0;
dc503a8a
EC
1305}
1306
5327ed3d
JW
1307/* This function is supposed to be used by the following 32-bit optimization
1308 * code only. It returns TRUE if the source or destination register operates
1309 * on 64-bit, otherwise return FALSE.
1310 */
1311static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
1312 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
1313{
1314 u8 code, class, op;
1315
1316 code = insn->code;
1317 class = BPF_CLASS(code);
1318 op = BPF_OP(code);
1319 if (class == BPF_JMP) {
1320 /* BPF_EXIT for "main" will reach here. Return TRUE
1321 * conservatively.
1322 */
1323 if (op == BPF_EXIT)
1324 return true;
1325 if (op == BPF_CALL) {
1326 /* BPF to BPF call will reach here because of marking
1327 * caller saved clobber with DST_OP_NO_MARK for which we
1328 * don't care the register def because they are anyway
1329 * marked as NOT_INIT already.
1330 */
1331 if (insn->src_reg == BPF_PSEUDO_CALL)
1332 return false;
1333 /* Helper call will reach here because of arg type
1334 * check, conservatively return TRUE.
1335 */
1336 if (t == SRC_OP)
1337 return true;
1338
1339 return false;
1340 }
1341 }
1342
1343 if (class == BPF_ALU64 || class == BPF_JMP ||
1344 /* BPF_END always use BPF_ALU class. */
1345 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
1346 return true;
1347
1348 if (class == BPF_ALU || class == BPF_JMP32)
1349 return false;
1350
1351 if (class == BPF_LDX) {
1352 if (t != SRC_OP)
1353 return BPF_SIZE(code) == BPF_DW;
1354 /* LDX source must be ptr. */
1355 return true;
1356 }
1357
1358 if (class == BPF_STX) {
1359 if (reg->type != SCALAR_VALUE)
1360 return true;
1361 return BPF_SIZE(code) == BPF_DW;
1362 }
1363
1364 if (class == BPF_LD) {
1365 u8 mode = BPF_MODE(code);
1366
1367 /* LD_IMM64 */
1368 if (mode == BPF_IMM)
1369 return true;
1370
1371 /* Both LD_IND and LD_ABS return 32-bit data. */
1372 if (t != SRC_OP)
1373 return false;
1374
1375 /* Implicit ctx ptr. */
1376 if (regno == BPF_REG_6)
1377 return true;
1378
1379 /* Explicit source could be any width. */
1380 return true;
1381 }
1382
1383 if (class == BPF_ST)
1384 /* The only source register for BPF_ST is a ptr. */
1385 return true;
1386
1387 /* Conservatively return true at default. */
1388 return true;
1389}
1390
b325fbca
JW
1391/* Return TRUE if INSN doesn't have explicit value define. */
1392static bool insn_no_def(struct bpf_insn *insn)
1393{
1394 u8 class = BPF_CLASS(insn->code);
1395
1396 return (class == BPF_JMP || class == BPF_JMP32 ||
1397 class == BPF_STX || class == BPF_ST);
1398}
1399
1400/* Return TRUE if INSN has defined any 32-bit value explicitly. */
1401static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1402{
1403 if (insn_no_def(insn))
1404 return false;
1405
1406 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1407}
1408
5327ed3d
JW
1409static void mark_insn_zext(struct bpf_verifier_env *env,
1410 struct bpf_reg_state *reg)
1411{
1412 s32 def_idx = reg->subreg_def;
1413
1414 if (def_idx == DEF_NOT_SUBREG)
1415 return;
1416
1417 env->insn_aux_data[def_idx - 1].zext_dst = true;
1418 /* The dst will be zero extended, so won't be sub-register anymore. */
1419 reg->subreg_def = DEF_NOT_SUBREG;
1420}
1421
dc503a8a 1422static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
1423 enum reg_arg_type t)
1424{
f4d7e40a
AS
1425 struct bpf_verifier_state *vstate = env->cur_state;
1426 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 1427 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 1428 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 1429 bool rw64;
dc503a8a 1430
17a52670 1431 if (regno >= MAX_BPF_REG) {
61bd5218 1432 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
1433 return -EINVAL;
1434 }
1435
c342dc10 1436 reg = &regs[regno];
5327ed3d 1437 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
1438 if (t == SRC_OP) {
1439 /* check whether register used as source operand can be read */
c342dc10 1440 if (reg->type == NOT_INIT) {
61bd5218 1441 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
1442 return -EACCES;
1443 }
679c782d 1444 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
1445 if (regno == BPF_REG_FP)
1446 return 0;
1447
5327ed3d
JW
1448 if (rw64)
1449 mark_insn_zext(env, reg);
1450
1451 return mark_reg_read(env, reg, reg->parent,
1452 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
1453 } else {
1454 /* check whether register used as dest operand can be written to */
1455 if (regno == BPF_REG_FP) {
61bd5218 1456 verbose(env, "frame pointer is read only\n");
17a52670
AS
1457 return -EACCES;
1458 }
c342dc10 1459 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 1460 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 1461 if (t == DST_OP)
61bd5218 1462 mark_reg_unknown(env, regs, regno);
17a52670
AS
1463 }
1464 return 0;
1465}
1466
b5dc0163
AS
1467/* for any branch, call, exit record the history of jmps in the given state */
1468static int push_jmp_history(struct bpf_verifier_env *env,
1469 struct bpf_verifier_state *cur)
1470{
1471 u32 cnt = cur->jmp_history_cnt;
1472 struct bpf_idx_pair *p;
1473
1474 cnt++;
1475 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1476 if (!p)
1477 return -ENOMEM;
1478 p[cnt - 1].idx = env->insn_idx;
1479 p[cnt - 1].prev_idx = env->prev_insn_idx;
1480 cur->jmp_history = p;
1481 cur->jmp_history_cnt = cnt;
1482 return 0;
1483}
1484
1485/* Backtrack one insn at a time. If idx is not at the top of recorded
1486 * history then previous instruction came from straight line execution.
1487 */
1488static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
1489 u32 *history)
1490{
1491 u32 cnt = *history;
1492
1493 if (cnt && st->jmp_history[cnt - 1].idx == i) {
1494 i = st->jmp_history[cnt - 1].prev_idx;
1495 (*history)--;
1496 } else {
1497 i--;
1498 }
1499 return i;
1500}
1501
1502/* For given verifier state backtrack_insn() is called from the last insn to
1503 * the first insn. Its purpose is to compute a bitmask of registers and
1504 * stack slots that needs precision in the parent verifier state.
1505 */
1506static int backtrack_insn(struct bpf_verifier_env *env, int idx,
1507 u32 *reg_mask, u64 *stack_mask)
1508{
1509 const struct bpf_insn_cbs cbs = {
1510 .cb_print = verbose,
1511 .private_data = env,
1512 };
1513 struct bpf_insn *insn = env->prog->insnsi + idx;
1514 u8 class = BPF_CLASS(insn->code);
1515 u8 opcode = BPF_OP(insn->code);
1516 u8 mode = BPF_MODE(insn->code);
1517 u32 dreg = 1u << insn->dst_reg;
1518 u32 sreg = 1u << insn->src_reg;
1519 u32 spi;
1520
1521 if (insn->code == 0)
1522 return 0;
1523 if (env->log.level & BPF_LOG_LEVEL) {
1524 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
1525 verbose(env, "%d: ", idx);
1526 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1527 }
1528
1529 if (class == BPF_ALU || class == BPF_ALU64) {
1530 if (!(*reg_mask & dreg))
1531 return 0;
1532 if (opcode == BPF_MOV) {
1533 if (BPF_SRC(insn->code) == BPF_X) {
1534 /* dreg = sreg
1535 * dreg needs precision after this insn
1536 * sreg needs precision before this insn
1537 */
1538 *reg_mask &= ~dreg;
1539 *reg_mask |= sreg;
1540 } else {
1541 /* dreg = K
1542 * dreg needs precision after this insn.
1543 * Corresponding register is already marked
1544 * as precise=true in this verifier state.
1545 * No further markings in parent are necessary
1546 */
1547 *reg_mask &= ~dreg;
1548 }
1549 } else {
1550 if (BPF_SRC(insn->code) == BPF_X) {
1551 /* dreg += sreg
1552 * both dreg and sreg need precision
1553 * before this insn
1554 */
1555 *reg_mask |= sreg;
1556 } /* else dreg += K
1557 * dreg still needs precision before this insn
1558 */
1559 }
1560 } else if (class == BPF_LDX) {
1561 if (!(*reg_mask & dreg))
1562 return 0;
1563 *reg_mask &= ~dreg;
1564
1565 /* scalars can only be spilled into stack w/o losing precision.
1566 * Load from any other memory can be zero extended.
1567 * The desire to keep that precision is already indicated
1568 * by 'precise' mark in corresponding register of this state.
1569 * No further tracking necessary.
1570 */
1571 if (insn->src_reg != BPF_REG_FP)
1572 return 0;
1573 if (BPF_SIZE(insn->code) != BPF_DW)
1574 return 0;
1575
1576 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
1577 * that [fp - off] slot contains scalar that needs to be
1578 * tracked with precision
1579 */
1580 spi = (-insn->off - 1) / BPF_REG_SIZE;
1581 if (spi >= 64) {
1582 verbose(env, "BUG spi %d\n", spi);
1583 WARN_ONCE(1, "verifier backtracking bug");
1584 return -EFAULT;
1585 }
1586 *stack_mask |= 1ull << spi;
b3b50f05 1587 } else if (class == BPF_STX || class == BPF_ST) {
b5dc0163 1588 if (*reg_mask & dreg)
b3b50f05 1589 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
1590 * to access memory. It means backtracking
1591 * encountered a case of pointer subtraction.
1592 */
1593 return -ENOTSUPP;
1594 /* scalars can only be spilled into stack */
1595 if (insn->dst_reg != BPF_REG_FP)
1596 return 0;
1597 if (BPF_SIZE(insn->code) != BPF_DW)
1598 return 0;
1599 spi = (-insn->off - 1) / BPF_REG_SIZE;
1600 if (spi >= 64) {
1601 verbose(env, "BUG spi %d\n", spi);
1602 WARN_ONCE(1, "verifier backtracking bug");
1603 return -EFAULT;
1604 }
1605 if (!(*stack_mask & (1ull << spi)))
1606 return 0;
1607 *stack_mask &= ~(1ull << spi);
b3b50f05
AN
1608 if (class == BPF_STX)
1609 *reg_mask |= sreg;
b5dc0163
AS
1610 } else if (class == BPF_JMP || class == BPF_JMP32) {
1611 if (opcode == BPF_CALL) {
1612 if (insn->src_reg == BPF_PSEUDO_CALL)
1613 return -ENOTSUPP;
1614 /* regular helper call sets R0 */
1615 *reg_mask &= ~1;
1616 if (*reg_mask & 0x3f) {
1617 /* if backtracing was looking for registers R1-R5
1618 * they should have been found already.
1619 */
1620 verbose(env, "BUG regs %x\n", *reg_mask);
1621 WARN_ONCE(1, "verifier backtracking bug");
1622 return -EFAULT;
1623 }
1624 } else if (opcode == BPF_EXIT) {
1625 return -ENOTSUPP;
1626 }
1627 } else if (class == BPF_LD) {
1628 if (!(*reg_mask & dreg))
1629 return 0;
1630 *reg_mask &= ~dreg;
1631 /* It's ld_imm64 or ld_abs or ld_ind.
1632 * For ld_imm64 no further tracking of precision
1633 * into parent is necessary
1634 */
1635 if (mode == BPF_IND || mode == BPF_ABS)
1636 /* to be analyzed */
1637 return -ENOTSUPP;
b5dc0163
AS
1638 }
1639 return 0;
1640}
1641
1642/* the scalar precision tracking algorithm:
1643 * . at the start all registers have precise=false.
1644 * . scalar ranges are tracked as normal through alu and jmp insns.
1645 * . once precise value of the scalar register is used in:
1646 * . ptr + scalar alu
1647 * . if (scalar cond K|scalar)
1648 * . helper_call(.., scalar, ...) where ARG_CONST is expected
1649 * backtrack through the verifier states and mark all registers and
1650 * stack slots with spilled constants that these scalar regisers
1651 * should be precise.
1652 * . during state pruning two registers (or spilled stack slots)
1653 * are equivalent if both are not precise.
1654 *
1655 * Note the verifier cannot simply walk register parentage chain,
1656 * since many different registers and stack slots could have been
1657 * used to compute single precise scalar.
1658 *
1659 * The approach of starting with precise=true for all registers and then
1660 * backtrack to mark a register as not precise when the verifier detects
1661 * that program doesn't care about specific value (e.g., when helper
1662 * takes register as ARG_ANYTHING parameter) is not safe.
1663 *
1664 * It's ok to walk single parentage chain of the verifier states.
1665 * It's possible that this backtracking will go all the way till 1st insn.
1666 * All other branches will be explored for needing precision later.
1667 *
1668 * The backtracking needs to deal with cases like:
1669 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1670 * r9 -= r8
1671 * r5 = r9
1672 * if r5 > 0x79f goto pc+7
1673 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1674 * r5 += 1
1675 * ...
1676 * call bpf_perf_event_output#25
1677 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1678 *
1679 * and this case:
1680 * r6 = 1
1681 * call foo // uses callee's r6 inside to compute r0
1682 * r0 += r6
1683 * if r0 == 0 goto
1684 *
1685 * to track above reg_mask/stack_mask needs to be independent for each frame.
1686 *
1687 * Also if parent's curframe > frame where backtracking started,
1688 * the verifier need to mark registers in both frames, otherwise callees
1689 * may incorrectly prune callers. This is similar to
1690 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1691 *
1692 * For now backtracking falls back into conservative marking.
1693 */
1694static void mark_all_scalars_precise(struct bpf_verifier_env *env,
1695 struct bpf_verifier_state *st)
1696{
1697 struct bpf_func_state *func;
1698 struct bpf_reg_state *reg;
1699 int i, j;
1700
1701 /* big hammer: mark all scalars precise in this path.
1702 * pop_stack may still get !precise scalars.
1703 */
1704 for (; st; st = st->parent)
1705 for (i = 0; i <= st->curframe; i++) {
1706 func = st->frame[i];
1707 for (j = 0; j < BPF_REG_FP; j++) {
1708 reg = &func->regs[j];
1709 if (reg->type != SCALAR_VALUE)
1710 continue;
1711 reg->precise = true;
1712 }
1713 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
1714 if (func->stack[j].slot_type[0] != STACK_SPILL)
1715 continue;
1716 reg = &func->stack[j].spilled_ptr;
1717 if (reg->type != SCALAR_VALUE)
1718 continue;
1719 reg->precise = true;
1720 }
1721 }
1722}
1723
a3ce685d
AS
1724static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1725 int spi)
b5dc0163
AS
1726{
1727 struct bpf_verifier_state *st = env->cur_state;
1728 int first_idx = st->first_insn_idx;
1729 int last_idx = env->insn_idx;
1730 struct bpf_func_state *func;
1731 struct bpf_reg_state *reg;
a3ce685d
AS
1732 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
1733 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
b5dc0163 1734 bool skip_first = true;
a3ce685d 1735 bool new_marks = false;
b5dc0163
AS
1736 int i, err;
1737
1738 if (!env->allow_ptr_leaks)
1739 /* backtracking is root only for now */
1740 return 0;
1741
1742 func = st->frame[st->curframe];
a3ce685d
AS
1743 if (regno >= 0) {
1744 reg = &func->regs[regno];
1745 if (reg->type != SCALAR_VALUE) {
1746 WARN_ONCE(1, "backtracing misuse");
1747 return -EFAULT;
1748 }
1749 if (!reg->precise)
1750 new_marks = true;
1751 else
1752 reg_mask = 0;
1753 reg->precise = true;
b5dc0163 1754 }
b5dc0163 1755
a3ce685d
AS
1756 while (spi >= 0) {
1757 if (func->stack[spi].slot_type[0] != STACK_SPILL) {
1758 stack_mask = 0;
1759 break;
1760 }
1761 reg = &func->stack[spi].spilled_ptr;
1762 if (reg->type != SCALAR_VALUE) {
1763 stack_mask = 0;
1764 break;
1765 }
1766 if (!reg->precise)
1767 new_marks = true;
1768 else
1769 stack_mask = 0;
1770 reg->precise = true;
1771 break;
1772 }
1773
1774 if (!new_marks)
1775 return 0;
1776 if (!reg_mask && !stack_mask)
1777 return 0;
b5dc0163
AS
1778 for (;;) {
1779 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
1780 u32 history = st->jmp_history_cnt;
1781
1782 if (env->log.level & BPF_LOG_LEVEL)
1783 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
1784 for (i = last_idx;;) {
1785 if (skip_first) {
1786 err = 0;
1787 skip_first = false;
1788 } else {
1789 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
1790 }
1791 if (err == -ENOTSUPP) {
1792 mark_all_scalars_precise(env, st);
1793 return 0;
1794 } else if (err) {
1795 return err;
1796 }
1797 if (!reg_mask && !stack_mask)
1798 /* Found assignment(s) into tracked register in this state.
1799 * Since this state is already marked, just return.
1800 * Nothing to be tracked further in the parent state.
1801 */
1802 return 0;
1803 if (i == first_idx)
1804 break;
1805 i = get_prev_insn_idx(st, i, &history);
1806 if (i >= env->prog->len) {
1807 /* This can happen if backtracking reached insn 0
1808 * and there are still reg_mask or stack_mask
1809 * to backtrack.
1810 * It means the backtracking missed the spot where
1811 * particular register was initialized with a constant.
1812 */
1813 verbose(env, "BUG backtracking idx %d\n", i);
1814 WARN_ONCE(1, "verifier backtracking bug");
1815 return -EFAULT;
1816 }
1817 }
1818 st = st->parent;
1819 if (!st)
1820 break;
1821
a3ce685d 1822 new_marks = false;
b5dc0163
AS
1823 func = st->frame[st->curframe];
1824 bitmap_from_u64(mask, reg_mask);
1825 for_each_set_bit(i, mask, 32) {
1826 reg = &func->regs[i];
a3ce685d
AS
1827 if (reg->type != SCALAR_VALUE) {
1828 reg_mask &= ~(1u << i);
b5dc0163 1829 continue;
a3ce685d 1830 }
b5dc0163
AS
1831 if (!reg->precise)
1832 new_marks = true;
1833 reg->precise = true;
1834 }
1835
1836 bitmap_from_u64(mask, stack_mask);
1837 for_each_set_bit(i, mask, 64) {
1838 if (i >= func->allocated_stack / BPF_REG_SIZE) {
2339cd6c
AS
1839 /* the sequence of instructions:
1840 * 2: (bf) r3 = r10
1841 * 3: (7b) *(u64 *)(r3 -8) = r0
1842 * 4: (79) r4 = *(u64 *)(r10 -8)
1843 * doesn't contain jmps. It's backtracked
1844 * as a single block.
1845 * During backtracking insn 3 is not recognized as
1846 * stack access, so at the end of backtracking
1847 * stack slot fp-8 is still marked in stack_mask.
1848 * However the parent state may not have accessed
1849 * fp-8 and it's "unallocated" stack space.
1850 * In such case fallback to conservative.
b5dc0163 1851 */
2339cd6c
AS
1852 mark_all_scalars_precise(env, st);
1853 return 0;
b5dc0163
AS
1854 }
1855
a3ce685d
AS
1856 if (func->stack[i].slot_type[0] != STACK_SPILL) {
1857 stack_mask &= ~(1ull << i);
b5dc0163 1858 continue;
a3ce685d 1859 }
b5dc0163 1860 reg = &func->stack[i].spilled_ptr;
a3ce685d
AS
1861 if (reg->type != SCALAR_VALUE) {
1862 stack_mask &= ~(1ull << i);
b5dc0163 1863 continue;
a3ce685d 1864 }
b5dc0163
AS
1865 if (!reg->precise)
1866 new_marks = true;
1867 reg->precise = true;
1868 }
1869 if (env->log.level & BPF_LOG_LEVEL) {
1870 print_verifier_state(env, func);
1871 verbose(env, "parent %s regs=%x stack=%llx marks\n",
1872 new_marks ? "didn't have" : "already had",
1873 reg_mask, stack_mask);
1874 }
1875
a3ce685d
AS
1876 if (!reg_mask && !stack_mask)
1877 break;
b5dc0163
AS
1878 if (!new_marks)
1879 break;
1880
1881 last_idx = st->last_insn_idx;
1882 first_idx = st->first_insn_idx;
1883 }
1884 return 0;
1885}
1886
a3ce685d
AS
1887static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
1888{
1889 return __mark_chain_precision(env, regno, -1);
1890}
1891
1892static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
1893{
1894 return __mark_chain_precision(env, -1, spi);
1895}
b5dc0163 1896
1be7f75d
AS
1897static bool is_spillable_regtype(enum bpf_reg_type type)
1898{
1899 switch (type) {
1900 case PTR_TO_MAP_VALUE:
1901 case PTR_TO_MAP_VALUE_OR_NULL:
1902 case PTR_TO_STACK:
1903 case PTR_TO_CTX:
969bf05e 1904 case PTR_TO_PACKET:
de8f3a83 1905 case PTR_TO_PACKET_META:
969bf05e 1906 case PTR_TO_PACKET_END:
d58e468b 1907 case PTR_TO_FLOW_KEYS:
1be7f75d 1908 case CONST_PTR_TO_MAP:
c64b7983
JS
1909 case PTR_TO_SOCKET:
1910 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
1911 case PTR_TO_SOCK_COMMON:
1912 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
1913 case PTR_TO_TCP_SOCK:
1914 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 1915 case PTR_TO_XDP_SOCK:
65726b5b 1916 case PTR_TO_BTF_ID:
1be7f75d
AS
1917 return true;
1918 default:
1919 return false;
1920 }
1921}
1922
cc2b14d5
AS
1923/* Does this register contain a constant zero? */
1924static bool register_is_null(struct bpf_reg_state *reg)
1925{
1926 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1927}
1928
f7cf25b2
AS
1929static bool register_is_const(struct bpf_reg_state *reg)
1930{
1931 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
1932}
1933
1934static void save_register_state(struct bpf_func_state *state,
1935 int spi, struct bpf_reg_state *reg)
1936{
1937 int i;
1938
1939 state->stack[spi].spilled_ptr = *reg;
1940 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1941
1942 for (i = 0; i < BPF_REG_SIZE; i++)
1943 state->stack[spi].slot_type[i] = STACK_SPILL;
1944}
1945
17a52670
AS
1946/* check_stack_read/write functions track spill/fill of registers,
1947 * stack boundary and alignment are checked in check_mem_access()
1948 */
61bd5218 1949static int check_stack_write(struct bpf_verifier_env *env,
f4d7e40a 1950 struct bpf_func_state *state, /* func where register points to */
af86ca4e 1951 int off, int size, int value_regno, int insn_idx)
17a52670 1952{
f4d7e40a 1953 struct bpf_func_state *cur; /* state of the current function */
638f5b90 1954 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
b5dc0163 1955 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
f7cf25b2 1956 struct bpf_reg_state *reg = NULL;
638f5b90 1957
f4d7e40a 1958 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
fd978bf7 1959 state->acquired_refs, true);
638f5b90
AS
1960 if (err)
1961 return err;
9c399760
AS
1962 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1963 * so it's aligned access and [off, off + size) are within stack limits
1964 */
638f5b90
AS
1965 if (!env->allow_ptr_leaks &&
1966 state->stack[spi].slot_type[0] == STACK_SPILL &&
1967 size != BPF_REG_SIZE) {
1968 verbose(env, "attempt to corrupt spilled pointer on stack\n");
1969 return -EACCES;
1970 }
17a52670 1971
f4d7e40a 1972 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
1973 if (value_regno >= 0)
1974 reg = &cur->regs[value_regno];
17a52670 1975
f7cf25b2
AS
1976 if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
1977 !register_is_null(reg) && env->allow_ptr_leaks) {
b5dc0163
AS
1978 if (dst_reg != BPF_REG_FP) {
1979 /* The backtracking logic can only recognize explicit
1980 * stack slot address like [fp - 8]. Other spill of
1981 * scalar via different register has to be conervative.
1982 * Backtrack from here and mark all registers as precise
1983 * that contributed into 'reg' being a constant.
1984 */
1985 err = mark_chain_precision(env, value_regno);
1986 if (err)
1987 return err;
1988 }
f7cf25b2
AS
1989 save_register_state(state, spi, reg);
1990 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 1991 /* register containing pointer is being spilled into stack */
9c399760 1992 if (size != BPF_REG_SIZE) {
f7cf25b2 1993 verbose_linfo(env, insn_idx, "; ");
61bd5218 1994 verbose(env, "invalid size of register spill\n");
17a52670
AS
1995 return -EACCES;
1996 }
1997
f7cf25b2 1998 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
1999 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2000 return -EINVAL;
2001 }
2002
f7cf25b2
AS
2003 if (!env->allow_ptr_leaks) {
2004 bool sanitize = false;
17a52670 2005
f7cf25b2
AS
2006 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
2007 register_is_const(&state->stack[spi].spilled_ptr))
2008 sanitize = true;
2009 for (i = 0; i < BPF_REG_SIZE; i++)
2010 if (state->stack[spi].slot_type[i] == STACK_MISC) {
2011 sanitize = true;
2012 break;
2013 }
2014 if (sanitize) {
af86ca4e
AS
2015 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
2016 int soff = (-spi - 1) * BPF_REG_SIZE;
2017
2018 /* detected reuse of integer stack slot with a pointer
2019 * which means either llvm is reusing stack slot or
2020 * an attacker is trying to exploit CVE-2018-3639
2021 * (speculative store bypass)
2022 * Have to sanitize that slot with preemptive
2023 * store of zero.
2024 */
2025 if (*poff && *poff != soff) {
2026 /* disallow programs where single insn stores
2027 * into two different stack slots, since verifier
2028 * cannot sanitize them
2029 */
2030 verbose(env,
2031 "insn %d cannot access two stack slots fp%d and fp%d",
2032 insn_idx, *poff, soff);
2033 return -EINVAL;
2034 }
2035 *poff = soff;
2036 }
af86ca4e 2037 }
f7cf25b2 2038 save_register_state(state, spi, reg);
9c399760 2039 } else {
cc2b14d5
AS
2040 u8 type = STACK_MISC;
2041
679c782d
EC
2042 /* regular write of data into stack destroys any spilled ptr */
2043 state->stack[spi].spilled_ptr.type = NOT_INIT;
0bae2d4d
JW
2044 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2045 if (state->stack[spi].slot_type[0] == STACK_SPILL)
2046 for (i = 0; i < BPF_REG_SIZE; i++)
2047 state->stack[spi].slot_type[i] = STACK_MISC;
9c399760 2048
cc2b14d5
AS
2049 /* only mark the slot as written if all 8 bytes were written
2050 * otherwise read propagation may incorrectly stop too soon
2051 * when stack slots are partially written.
2052 * This heuristic means that read propagation will be
2053 * conservative, since it will add reg_live_read marks
2054 * to stack slots all the way to first state when programs
2055 * writes+reads less than 8 bytes
2056 */
2057 if (size == BPF_REG_SIZE)
2058 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2059
2060 /* when we zero initialize stack slots mark them as such */
b5dc0163
AS
2061 if (reg && register_is_null(reg)) {
2062 /* backtracking doesn't work for STACK_ZERO yet. */
2063 err = mark_chain_precision(env, value_regno);
2064 if (err)
2065 return err;
cc2b14d5 2066 type = STACK_ZERO;
b5dc0163 2067 }
cc2b14d5 2068
0bae2d4d 2069 /* Mark slots affected by this stack write. */
9c399760 2070 for (i = 0; i < size; i++)
638f5b90 2071 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 2072 type;
17a52670
AS
2073 }
2074 return 0;
2075}
2076
61bd5218 2077static int check_stack_read(struct bpf_verifier_env *env,
f4d7e40a
AS
2078 struct bpf_func_state *reg_state /* func where register points to */,
2079 int off, int size, int value_regno)
17a52670 2080{
f4d7e40a
AS
2081 struct bpf_verifier_state *vstate = env->cur_state;
2082 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 2083 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 2084 struct bpf_reg_state *reg;
638f5b90 2085 u8 *stype;
17a52670 2086
f4d7e40a 2087 if (reg_state->allocated_stack <= slot) {
638f5b90
AS
2088 verbose(env, "invalid read from stack off %d+0 size %d\n",
2089 off, size);
2090 return -EACCES;
2091 }
f4d7e40a 2092 stype = reg_state->stack[spi].slot_type;
f7cf25b2 2093 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 2094
638f5b90 2095 if (stype[0] == STACK_SPILL) {
9c399760 2096 if (size != BPF_REG_SIZE) {
f7cf25b2
AS
2097 if (reg->type != SCALAR_VALUE) {
2098 verbose_linfo(env, env->insn_idx, "; ");
2099 verbose(env, "invalid size of register fill\n");
2100 return -EACCES;
2101 }
2102 if (value_regno >= 0) {
2103 mark_reg_unknown(env, state->regs, value_regno);
2104 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2105 }
2106 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2107 return 0;
17a52670 2108 }
9c399760 2109 for (i = 1; i < BPF_REG_SIZE; i++) {
638f5b90 2110 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
61bd5218 2111 verbose(env, "corrupted spill memory\n");
17a52670
AS
2112 return -EACCES;
2113 }
2114 }
2115
dc503a8a 2116 if (value_regno >= 0) {
17a52670 2117 /* restore register state from stack */
f7cf25b2 2118 state->regs[value_regno] = *reg;
2f18f62e
AS
2119 /* mark reg as written since spilled pointer state likely
2120 * has its liveness marks cleared by is_state_visited()
2121 * which resets stack/reg liveness for state transitions
2122 */
2123 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
dc503a8a 2124 }
f7cf25b2 2125 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670 2126 } else {
cc2b14d5
AS
2127 int zeros = 0;
2128
17a52670 2129 for (i = 0; i < size; i++) {
cc2b14d5
AS
2130 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
2131 continue;
2132 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
2133 zeros++;
2134 continue;
17a52670 2135 }
cc2b14d5
AS
2136 verbose(env, "invalid read from stack off %d+%d size %d\n",
2137 off, i, size);
2138 return -EACCES;
2139 }
f7cf25b2 2140 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
cc2b14d5
AS
2141 if (value_regno >= 0) {
2142 if (zeros == size) {
2143 /* any size read into register is zero extended,
2144 * so the whole register == const_zero
2145 */
2146 __mark_reg_const_zero(&state->regs[value_regno]);
b5dc0163
AS
2147 /* backtracking doesn't support STACK_ZERO yet,
2148 * so mark it precise here, so that later
2149 * backtracking can stop here.
2150 * Backtracking may not need this if this register
2151 * doesn't participate in pointer adjustment.
2152 * Forward propagation of precise flag is not
2153 * necessary either. This mark is only to stop
2154 * backtracking. Any register that contributed
2155 * to const 0 was marked precise before spill.
2156 */
2157 state->regs[value_regno].precise = true;
cc2b14d5
AS
2158 } else {
2159 /* have read misc data from the stack */
2160 mark_reg_unknown(env, state->regs, value_regno);
2161 }
2162 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
17a52670 2163 }
17a52670 2164 }
f7cf25b2 2165 return 0;
17a52670
AS
2166}
2167
e4298d25
DB
2168static int check_stack_access(struct bpf_verifier_env *env,
2169 const struct bpf_reg_state *reg,
2170 int off, int size)
2171{
2172 /* Stack accesses must be at a fixed offset, so that we
2173 * can determine what type of data were returned. See
2174 * check_stack_read().
2175 */
2176 if (!tnum_is_const(reg->var_off)) {
2177 char tn_buf[48];
2178
2179 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1fbd20f8 2180 verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
e4298d25
DB
2181 tn_buf, off, size);
2182 return -EACCES;
2183 }
2184
2185 if (off >= 0 || off < -MAX_BPF_STACK) {
2186 verbose(env, "invalid stack off=%d size=%d\n", off, size);
2187 return -EACCES;
2188 }
2189
2190 return 0;
2191}
2192
591fe988
DB
2193static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
2194 int off, int size, enum bpf_access_type type)
2195{
2196 struct bpf_reg_state *regs = cur_regs(env);
2197 struct bpf_map *map = regs[regno].map_ptr;
2198 u32 cap = bpf_map_flags_to_cap(map);
2199
2200 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
2201 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
2202 map->value_size, off, size);
2203 return -EACCES;
2204 }
2205
2206 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
2207 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
2208 map->value_size, off, size);
2209 return -EACCES;
2210 }
2211
2212 return 0;
2213}
2214
17a52670 2215/* check read/write into map element returned by bpf_map_lookup_elem() */
f1174f77 2216static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2217 int size, bool zero_size_allowed)
17a52670 2218{
638f5b90
AS
2219 struct bpf_reg_state *regs = cur_regs(env);
2220 struct bpf_map *map = regs[regno].map_ptr;
17a52670 2221
9fd29c08
YS
2222 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2223 off + size > map->value_size) {
61bd5218 2224 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
17a52670
AS
2225 map->value_size, off, size);
2226 return -EACCES;
2227 }
2228 return 0;
2229}
2230
f1174f77
EC
2231/* check read/write into a map element with possible variable offset */
2232static int check_map_access(struct bpf_verifier_env *env, u32 regno,
9fd29c08 2233 int off, int size, bool zero_size_allowed)
dbcfe5f7 2234{
f4d7e40a
AS
2235 struct bpf_verifier_state *vstate = env->cur_state;
2236 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
2237 struct bpf_reg_state *reg = &state->regs[regno];
2238 int err;
2239
f1174f77
EC
2240 /* We may have adjusted the register to this map value, so we
2241 * need to try adding each of min_value and max_value to off
2242 * to make sure our theoretical access will be safe.
dbcfe5f7 2243 */
06ee7115 2244 if (env->log.level & BPF_LOG_LEVEL)
61bd5218 2245 print_verifier_state(env, state);
b7137c4e 2246
dbcfe5f7
GB
2247 /* The minimum value is only important with signed
2248 * comparisons where we can't assume the floor of a
2249 * value is 0. If we are using signed variables for our
2250 * index'es we need to make sure that whatever we use
2251 * will have a set floor within our range.
2252 */
b7137c4e
DB
2253 if (reg->smin_value < 0 &&
2254 (reg->smin_value == S64_MIN ||
2255 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2256 reg->smin_value + off < 0)) {
61bd5218 2257 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
2258 regno);
2259 return -EACCES;
2260 }
9fd29c08
YS
2261 err = __check_map_access(env, regno, reg->smin_value + off, size,
2262 zero_size_allowed);
dbcfe5f7 2263 if (err) {
61bd5218
JK
2264 verbose(env, "R%d min value is outside of the array range\n",
2265 regno);
dbcfe5f7
GB
2266 return err;
2267 }
2268
b03c9f9f
EC
2269 /* If we haven't set a max value then we need to bail since we can't be
2270 * sure we won't do bad things.
2271 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 2272 */
b03c9f9f 2273 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
61bd5218 2274 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
dbcfe5f7
GB
2275 regno);
2276 return -EACCES;
2277 }
9fd29c08
YS
2278 err = __check_map_access(env, regno, reg->umax_value + off, size,
2279 zero_size_allowed);
f1174f77 2280 if (err)
61bd5218
JK
2281 verbose(env, "R%d max value is outside of the array range\n",
2282 regno);
d83525ca
AS
2283
2284 if (map_value_has_spin_lock(reg->map_ptr)) {
2285 u32 lock = reg->map_ptr->spin_lock_off;
2286
2287 /* if any part of struct bpf_spin_lock can be touched by
2288 * load/store reject this program.
2289 * To check that [x1, x2) overlaps with [y1, y2)
2290 * it is sufficient to check x1 < y2 && y1 < x2.
2291 */
2292 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
2293 lock < reg->umax_value + off + size) {
2294 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
2295 return -EACCES;
2296 }
2297 }
f1174f77 2298 return err;
dbcfe5f7
GB
2299}
2300
969bf05e
AS
2301#define MAX_PACKET_OFF 0xffff
2302
58e2af8b 2303static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
2304 const struct bpf_call_arg_meta *meta,
2305 enum bpf_access_type t)
4acf6c0b 2306{
36bbef52 2307 switch (env->prog->type) {
5d66fa7d 2308 /* Program types only with direct read access go here! */
3a0af8fd
TG
2309 case BPF_PROG_TYPE_LWT_IN:
2310 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 2311 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 2312 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 2313 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 2314 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
2315 if (t == BPF_WRITE)
2316 return false;
7e57fbb2 2317 /* fallthrough */
5d66fa7d
DB
2318
2319 /* Program types with direct read + write access go here! */
36bbef52
DB
2320 case BPF_PROG_TYPE_SCHED_CLS:
2321 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 2322 case BPF_PROG_TYPE_XDP:
3a0af8fd 2323 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 2324 case BPF_PROG_TYPE_SK_SKB:
4f738adb 2325 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
2326 if (meta)
2327 return meta->pkt_access;
2328
2329 env->seen_direct_write = true;
4acf6c0b 2330 return true;
0d01da6a
SF
2331
2332 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2333 if (t == BPF_WRITE)
2334 env->seen_direct_write = true;
2335
2336 return true;
2337
4acf6c0b
BB
2338 default:
2339 return false;
2340 }
2341}
2342
f1174f77 2343static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
9fd29c08 2344 int off, int size, bool zero_size_allowed)
969bf05e 2345{
638f5b90 2346 struct bpf_reg_state *regs = cur_regs(env);
58e2af8b 2347 struct bpf_reg_state *reg = &regs[regno];
969bf05e 2348
9fd29c08
YS
2349 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2350 (u64)off + size > reg->range) {
61bd5218 2351 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
d91b28ed 2352 off, size, regno, reg->id, reg->off, reg->range);
969bf05e
AS
2353 return -EACCES;
2354 }
2355 return 0;
2356}
2357
f1174f77 2358static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2359 int size, bool zero_size_allowed)
f1174f77 2360{
638f5b90 2361 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
2362 struct bpf_reg_state *reg = &regs[regno];
2363 int err;
2364
2365 /* We may have added a variable offset to the packet pointer; but any
2366 * reg->range we have comes after that. We are only checking the fixed
2367 * offset.
2368 */
2369
2370 /* We don't allow negative numbers, because we aren't tracking enough
2371 * detail to prove they're safe.
2372 */
b03c9f9f 2373 if (reg->smin_value < 0) {
61bd5218 2374 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
2375 regno);
2376 return -EACCES;
2377 }
9fd29c08 2378 err = __check_packet_access(env, regno, off, size, zero_size_allowed);
f1174f77 2379 if (err) {
61bd5218 2380 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
2381 return err;
2382 }
e647815a
JW
2383
2384 /* __check_packet_access has made sure "off + size - 1" is within u16.
2385 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2386 * otherwise find_good_pkt_pointers would have refused to set range info
2387 * that __check_packet_access would have rejected this pkt access.
2388 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2389 */
2390 env->prog->aux->max_pkt_offset =
2391 max_t(u32, env->prog->aux->max_pkt_offset,
2392 off + reg->umax_value + size - 1);
2393
f1174f77
EC
2394 return err;
2395}
2396
2397/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 2398static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66
AS
2399 enum bpf_access_type t, enum bpf_reg_type *reg_type,
2400 u32 *btf_id)
17a52670 2401{
f96da094
DB
2402 struct bpf_insn_access_aux info = {
2403 .reg_type = *reg_type,
9e15db66 2404 .log = &env->log,
f96da094 2405 };
31fd8581 2406
4f9218aa 2407 if (env->ops->is_valid_access &&
5e43f899 2408 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
2409 /* A non zero info.ctx_field_size indicates that this field is a
2410 * candidate for later verifier transformation to load the whole
2411 * field and then apply a mask when accessed with a narrower
2412 * access than actual ctx access size. A zero info.ctx_field_size
2413 * will only allow for whole field access and rejects any other
2414 * type of narrower access.
31fd8581 2415 */
23994631 2416 *reg_type = info.reg_type;
31fd8581 2417
9e15db66
AS
2418 if (*reg_type == PTR_TO_BTF_ID)
2419 *btf_id = info.btf_id;
2420 else
2421 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
32bbe007
AS
2422 /* remember the offset of last byte accessed in ctx */
2423 if (env->prog->aux->max_ctx_offset < off + size)
2424 env->prog->aux->max_ctx_offset = off + size;
17a52670 2425 return 0;
32bbe007 2426 }
17a52670 2427
61bd5218 2428 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
2429 return -EACCES;
2430}
2431
d58e468b
PP
2432static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
2433 int size)
2434{
2435 if (size < 0 || off < 0 ||
2436 (u64)off + size > sizeof(struct bpf_flow_keys)) {
2437 verbose(env, "invalid access to flow keys off=%d size=%d\n",
2438 off, size);
2439 return -EACCES;
2440 }
2441 return 0;
2442}
2443
5f456649
MKL
2444static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
2445 u32 regno, int off, int size,
2446 enum bpf_access_type t)
c64b7983
JS
2447{
2448 struct bpf_reg_state *regs = cur_regs(env);
2449 struct bpf_reg_state *reg = &regs[regno];
5f456649 2450 struct bpf_insn_access_aux info = {};
46f8bc92 2451 bool valid;
c64b7983
JS
2452
2453 if (reg->smin_value < 0) {
2454 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2455 regno);
2456 return -EACCES;
2457 }
2458
46f8bc92
MKL
2459 switch (reg->type) {
2460 case PTR_TO_SOCK_COMMON:
2461 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
2462 break;
2463 case PTR_TO_SOCKET:
2464 valid = bpf_sock_is_valid_access(off, size, t, &info);
2465 break;
655a51e5
MKL
2466 case PTR_TO_TCP_SOCK:
2467 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
2468 break;
fada7fdc
JL
2469 case PTR_TO_XDP_SOCK:
2470 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
2471 break;
46f8bc92
MKL
2472 default:
2473 valid = false;
c64b7983
JS
2474 }
2475
5f456649 2476
46f8bc92
MKL
2477 if (valid) {
2478 env->insn_aux_data[insn_idx].ctx_field_size =
2479 info.ctx_field_size;
2480 return 0;
2481 }
2482
2483 verbose(env, "R%d invalid %s access off=%d size=%d\n",
2484 regno, reg_type_str[reg->type], off, size);
2485
2486 return -EACCES;
c64b7983
JS
2487}
2488
4cabc5b1
DB
2489static bool __is_pointer_value(bool allow_ptr_leaks,
2490 const struct bpf_reg_state *reg)
1be7f75d 2491{
4cabc5b1 2492 if (allow_ptr_leaks)
1be7f75d
AS
2493 return false;
2494
f1174f77 2495 return reg->type != SCALAR_VALUE;
1be7f75d
AS
2496}
2497
2a159c6f
DB
2498static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
2499{
2500 return cur_regs(env) + regno;
2501}
2502
4cabc5b1
DB
2503static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2504{
2a159c6f 2505 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
2506}
2507
f37a8cb8
DB
2508static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2509{
2a159c6f 2510 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 2511
46f8bc92
MKL
2512 return reg->type == PTR_TO_CTX;
2513}
2514
2515static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
2516{
2517 const struct bpf_reg_state *reg = reg_state(env, regno);
2518
2519 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
2520}
2521
ca369602
DB
2522static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
2523{
2a159c6f 2524 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
2525
2526 return type_is_pkt_pointer(reg->type);
2527}
2528
4b5defde
DB
2529static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
2530{
2531 const struct bpf_reg_state *reg = reg_state(env, regno);
2532
2533 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2534 return reg->type == PTR_TO_FLOW_KEYS;
2535}
2536
61bd5218
JK
2537static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
2538 const struct bpf_reg_state *reg,
d1174416 2539 int off, int size, bool strict)
969bf05e 2540{
f1174f77 2541 struct tnum reg_off;
e07b98d9 2542 int ip_align;
d1174416
DM
2543
2544 /* Byte size accesses are always allowed. */
2545 if (!strict || size == 1)
2546 return 0;
2547
e4eda884
DM
2548 /* For platforms that do not have a Kconfig enabling
2549 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2550 * NET_IP_ALIGN is universally set to '2'. And on platforms
2551 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2552 * to this code only in strict mode where we want to emulate
2553 * the NET_IP_ALIGN==2 checking. Therefore use an
2554 * unconditional IP align value of '2'.
e07b98d9 2555 */
e4eda884 2556 ip_align = 2;
f1174f77
EC
2557
2558 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
2559 if (!tnum_is_aligned(reg_off, size)) {
2560 char tn_buf[48];
2561
2562 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
2563 verbose(env,
2564 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 2565 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
2566 return -EACCES;
2567 }
79adffcd 2568
969bf05e
AS
2569 return 0;
2570}
2571
61bd5218
JK
2572static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
2573 const struct bpf_reg_state *reg,
f1174f77
EC
2574 const char *pointer_desc,
2575 int off, int size, bool strict)
79adffcd 2576{
f1174f77
EC
2577 struct tnum reg_off;
2578
2579 /* Byte size accesses are always allowed. */
2580 if (!strict || size == 1)
2581 return 0;
2582
2583 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
2584 if (!tnum_is_aligned(reg_off, size)) {
2585 char tn_buf[48];
2586
2587 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 2588 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 2589 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
2590 return -EACCES;
2591 }
2592
969bf05e
AS
2593 return 0;
2594}
2595
e07b98d9 2596static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
2597 const struct bpf_reg_state *reg, int off,
2598 int size, bool strict_alignment_once)
79adffcd 2599{
ca369602 2600 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 2601 const char *pointer_desc = "";
d1174416 2602
79adffcd
DB
2603 switch (reg->type) {
2604 case PTR_TO_PACKET:
de8f3a83
DB
2605 case PTR_TO_PACKET_META:
2606 /* Special case, because of NET_IP_ALIGN. Given metadata sits
2607 * right in front, treat it the very same way.
2608 */
61bd5218 2609 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
2610 case PTR_TO_FLOW_KEYS:
2611 pointer_desc = "flow keys ";
2612 break;
f1174f77
EC
2613 case PTR_TO_MAP_VALUE:
2614 pointer_desc = "value ";
2615 break;
2616 case PTR_TO_CTX:
2617 pointer_desc = "context ";
2618 break;
2619 case PTR_TO_STACK:
2620 pointer_desc = "stack ";
a5ec6ae1
JH
2621 /* The stack spill tracking logic in check_stack_write()
2622 * and check_stack_read() relies on stack accesses being
2623 * aligned.
2624 */
2625 strict = true;
f1174f77 2626 break;
c64b7983
JS
2627 case PTR_TO_SOCKET:
2628 pointer_desc = "sock ";
2629 break;
46f8bc92
MKL
2630 case PTR_TO_SOCK_COMMON:
2631 pointer_desc = "sock_common ";
2632 break;
655a51e5
MKL
2633 case PTR_TO_TCP_SOCK:
2634 pointer_desc = "tcp_sock ";
2635 break;
fada7fdc
JL
2636 case PTR_TO_XDP_SOCK:
2637 pointer_desc = "xdp_sock ";
2638 break;
79adffcd 2639 default:
f1174f77 2640 break;
79adffcd 2641 }
61bd5218
JK
2642 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
2643 strict);
79adffcd
DB
2644}
2645
f4d7e40a
AS
2646static int update_stack_depth(struct bpf_verifier_env *env,
2647 const struct bpf_func_state *func,
2648 int off)
2649{
9c8105bd 2650 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
2651
2652 if (stack >= -off)
2653 return 0;
2654
2655 /* update known max for given subprogram */
9c8105bd 2656 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
2657 return 0;
2658}
f4d7e40a 2659
70a87ffe
AS
2660/* starting from main bpf function walk all instructions of the function
2661 * and recursively walk all callees that given function can call.
2662 * Ignore jump and exit insns.
2663 * Since recursion is prevented by check_cfg() this algorithm
2664 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2665 */
2666static int check_max_stack_depth(struct bpf_verifier_env *env)
2667{
9c8105bd
JW
2668 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
2669 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 2670 struct bpf_insn *insn = env->prog->insnsi;
70a87ffe
AS
2671 int ret_insn[MAX_CALL_FRAMES];
2672 int ret_prog[MAX_CALL_FRAMES];
f4d7e40a 2673
70a87ffe
AS
2674process_func:
2675 /* round up to 32-bytes, since this is granularity
2676 * of interpreter stack size
2677 */
9c8105bd 2678 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 2679 if (depth > MAX_BPF_STACK) {
f4d7e40a 2680 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 2681 frame + 1, depth);
f4d7e40a
AS
2682 return -EACCES;
2683 }
70a87ffe 2684continue_func:
4cb3d99c 2685 subprog_end = subprog[idx + 1].start;
70a87ffe
AS
2686 for (; i < subprog_end; i++) {
2687 if (insn[i].code != (BPF_JMP | BPF_CALL))
2688 continue;
2689 if (insn[i].src_reg != BPF_PSEUDO_CALL)
2690 continue;
2691 /* remember insn and function to return to */
2692 ret_insn[frame] = i + 1;
9c8105bd 2693 ret_prog[frame] = idx;
70a87ffe
AS
2694
2695 /* find the callee */
2696 i = i + insn[i].imm + 1;
9c8105bd
JW
2697 idx = find_subprog(env, i);
2698 if (idx < 0) {
70a87ffe
AS
2699 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2700 i);
2701 return -EFAULT;
2702 }
70a87ffe
AS
2703 frame++;
2704 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
2705 verbose(env, "the call stack of %d frames is too deep !\n",
2706 frame);
2707 return -E2BIG;
70a87ffe
AS
2708 }
2709 goto process_func;
2710 }
2711 /* end of for() loop means the last insn of the 'subprog'
2712 * was reached. Doesn't matter whether it was JA or EXIT
2713 */
2714 if (frame == 0)
2715 return 0;
9c8105bd 2716 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
2717 frame--;
2718 i = ret_insn[frame];
9c8105bd 2719 idx = ret_prog[frame];
70a87ffe 2720 goto continue_func;
f4d7e40a
AS
2721}
2722
19d28fbd 2723#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
2724static int get_callee_stack_depth(struct bpf_verifier_env *env,
2725 const struct bpf_insn *insn, int idx)
2726{
2727 int start = idx + insn->imm + 1, subprog;
2728
2729 subprog = find_subprog(env, start);
2730 if (subprog < 0) {
2731 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2732 start);
2733 return -EFAULT;
2734 }
9c8105bd 2735 return env->subprog_info[subprog].stack_depth;
1ea47e01 2736}
19d28fbd 2737#endif
1ea47e01 2738
51c39bb1
AS
2739int check_ctx_reg(struct bpf_verifier_env *env,
2740 const struct bpf_reg_state *reg, int regno)
58990d1f
DB
2741{
2742 /* Access to ctx or passing it to a helper is only allowed in
2743 * its original, unmodified form.
2744 */
2745
2746 if (reg->off) {
2747 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
2748 regno, reg->off);
2749 return -EACCES;
2750 }
2751
2752 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2753 char tn_buf[48];
2754
2755 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2756 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
2757 return -EACCES;
2758 }
2759
2760 return 0;
2761}
2762
9df1c28b
MM
2763static int check_tp_buffer_access(struct bpf_verifier_env *env,
2764 const struct bpf_reg_state *reg,
2765 int regno, int off, int size)
2766{
2767 if (off < 0) {
2768 verbose(env,
2769 "R%d invalid tracepoint buffer access: off=%d, size=%d",
2770 regno, off, size);
2771 return -EACCES;
2772 }
2773 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2774 char tn_buf[48];
2775
2776 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2777 verbose(env,
2778 "R%d invalid variable buffer offset: off=%d, var_off=%s",
2779 regno, off, tn_buf);
2780 return -EACCES;
2781 }
2782 if (off + size > env->prog->aux->max_tp_access)
2783 env->prog->aux->max_tp_access = off + size;
2784
2785 return 0;
2786}
2787
2788
0c17d1d2
JH
2789/* truncate register to smaller size (in bytes)
2790 * must be called with size < BPF_REG_SIZE
2791 */
2792static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
2793{
2794 u64 mask;
2795
2796 /* clear high bits in bit representation */
2797 reg->var_off = tnum_cast(reg->var_off, size);
2798
2799 /* fix arithmetic bounds */
2800 mask = ((u64)1 << (size * 8)) - 1;
2801 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
2802 reg->umin_value &= mask;
2803 reg->umax_value &= mask;
2804 } else {
2805 reg->umin_value = 0;
2806 reg->umax_value = mask;
2807 }
2808 reg->smin_value = reg->umin_value;
2809 reg->smax_value = reg->umax_value;
2810}
2811
a23740ec
AN
2812static bool bpf_map_is_rdonly(const struct bpf_map *map)
2813{
2814 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
2815}
2816
2817static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
2818{
2819 void *ptr;
2820 u64 addr;
2821 int err;
2822
2823 err = map->ops->map_direct_value_addr(map, &addr, off);
2824 if (err)
2825 return err;
2dedd7d2 2826 ptr = (void *)(long)addr + off;
a23740ec
AN
2827
2828 switch (size) {
2829 case sizeof(u8):
2830 *val = (u64)*(u8 *)ptr;
2831 break;
2832 case sizeof(u16):
2833 *val = (u64)*(u16 *)ptr;
2834 break;
2835 case sizeof(u32):
2836 *val = (u64)*(u32 *)ptr;
2837 break;
2838 case sizeof(u64):
2839 *val = *(u64 *)ptr;
2840 break;
2841 default:
2842 return -EINVAL;
2843 }
2844 return 0;
2845}
2846
9e15db66
AS
2847static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
2848 struct bpf_reg_state *regs,
2849 int regno, int off, int size,
2850 enum bpf_access_type atype,
2851 int value_regno)
2852{
2853 struct bpf_reg_state *reg = regs + regno;
2854 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
2855 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
2856 u32 btf_id;
2857 int ret;
2858
9e15db66
AS
2859 if (off < 0) {
2860 verbose(env,
2861 "R%d is ptr_%s invalid negative access: off=%d\n",
2862 regno, tname, off);
2863 return -EACCES;
2864 }
2865 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2866 char tn_buf[48];
2867
2868 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2869 verbose(env,
2870 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
2871 regno, tname, off, tn_buf);
2872 return -EACCES;
2873 }
2874
27ae7997
MKL
2875 if (env->ops->btf_struct_access) {
2876 ret = env->ops->btf_struct_access(&env->log, t, off, size,
2877 atype, &btf_id);
2878 } else {
2879 if (atype != BPF_READ) {
2880 verbose(env, "only read is supported\n");
2881 return -EACCES;
2882 }
2883
2884 ret = btf_struct_access(&env->log, t, off, size, atype,
2885 &btf_id);
2886 }
2887
9e15db66
AS
2888 if (ret < 0)
2889 return ret;
2890
27ae7997
MKL
2891 if (atype == BPF_READ) {
2892 if (ret == SCALAR_VALUE) {
2893 mark_reg_unknown(env, regs, value_regno);
2894 return 0;
2895 }
2896 mark_reg_known_zero(env, regs, value_regno);
2897 regs[value_regno].type = PTR_TO_BTF_ID;
2898 regs[value_regno].btf_id = btf_id;
9e15db66 2899 }
27ae7997 2900
9e15db66
AS
2901 return 0;
2902}
2903
17a52670
AS
2904/* check whether memory at (regno + off) is accessible for t = (read | write)
2905 * if t==write, value_regno is a register which value is stored into memory
2906 * if t==read, value_regno is a register which will receive the value from memory
2907 * if t==write && value_regno==-1, some unknown value is stored into memory
2908 * if t==read && value_regno==-1, don't care what we read from memory
2909 */
ca369602
DB
2910static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
2911 int off, int bpf_size, enum bpf_access_type t,
2912 int value_regno, bool strict_alignment_once)
17a52670 2913{
638f5b90
AS
2914 struct bpf_reg_state *regs = cur_regs(env);
2915 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 2916 struct bpf_func_state *state;
17a52670
AS
2917 int size, err = 0;
2918
2919 size = bpf_size_to_bytes(bpf_size);
2920 if (size < 0)
2921 return size;
2922
f1174f77 2923 /* alignment checks will add in reg->off themselves */
ca369602 2924 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
2925 if (err)
2926 return err;
17a52670 2927
f1174f77
EC
2928 /* for access checks, reg->off is just part of off */
2929 off += reg->off;
2930
2931 if (reg->type == PTR_TO_MAP_VALUE) {
1be7f75d
AS
2932 if (t == BPF_WRITE && value_regno >= 0 &&
2933 is_pointer_value(env, value_regno)) {
61bd5218 2934 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
2935 return -EACCES;
2936 }
591fe988
DB
2937 err = check_map_access_type(env, regno, off, size, t);
2938 if (err)
2939 return err;
9fd29c08 2940 err = check_map_access(env, regno, off, size, false);
a23740ec
AN
2941 if (!err && t == BPF_READ && value_regno >= 0) {
2942 struct bpf_map *map = reg->map_ptr;
2943
2944 /* if map is read-only, track its contents as scalars */
2945 if (tnum_is_const(reg->var_off) &&
2946 bpf_map_is_rdonly(map) &&
2947 map->ops->map_direct_value_addr) {
2948 int map_off = off + reg->var_off.value;
2949 u64 val = 0;
2950
2951 err = bpf_map_direct_read(map, map_off, size,
2952 &val);
2953 if (err)
2954 return err;
2955
2956 regs[value_regno].type = SCALAR_VALUE;
2957 __mark_reg_known(&regs[value_regno], val);
2958 } else {
2959 mark_reg_unknown(env, regs, value_regno);
2960 }
2961 }
1a0dc1ac 2962 } else if (reg->type == PTR_TO_CTX) {
f1174f77 2963 enum bpf_reg_type reg_type = SCALAR_VALUE;
9e15db66 2964 u32 btf_id = 0;
19de99f7 2965
1be7f75d
AS
2966 if (t == BPF_WRITE && value_regno >= 0 &&
2967 is_pointer_value(env, value_regno)) {
61bd5218 2968 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
2969 return -EACCES;
2970 }
f1174f77 2971
58990d1f
DB
2972 err = check_ctx_reg(env, reg, regno);
2973 if (err < 0)
2974 return err;
2975
9e15db66
AS
2976 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
2977 if (err)
2978 verbose_linfo(env, insn_idx, "; ");
969bf05e 2979 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 2980 /* ctx access returns either a scalar, or a
de8f3a83
DB
2981 * PTR_TO_PACKET[_META,_END]. In the latter
2982 * case, we know the offset is zero.
f1174f77 2983 */
46f8bc92 2984 if (reg_type == SCALAR_VALUE) {
638f5b90 2985 mark_reg_unknown(env, regs, value_regno);
46f8bc92 2986 } else {
638f5b90 2987 mark_reg_known_zero(env, regs,
61bd5218 2988 value_regno);
46f8bc92
MKL
2989 if (reg_type_may_be_null(reg_type))
2990 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
2991 /* A load of ctx field could have different
2992 * actual load size with the one encoded in the
2993 * insn. When the dst is PTR, it is for sure not
2994 * a sub-register.
2995 */
2996 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
9e15db66
AS
2997 if (reg_type == PTR_TO_BTF_ID)
2998 regs[value_regno].btf_id = btf_id;
46f8bc92 2999 }
638f5b90 3000 regs[value_regno].type = reg_type;
969bf05e 3001 }
17a52670 3002
f1174f77 3003 } else if (reg->type == PTR_TO_STACK) {
f1174f77 3004 off += reg->var_off.value;
e4298d25
DB
3005 err = check_stack_access(env, reg, off, size);
3006 if (err)
3007 return err;
8726679a 3008
f4d7e40a
AS
3009 state = func(env, reg);
3010 err = update_stack_depth(env, state, off);
3011 if (err)
3012 return err;
8726679a 3013
638f5b90 3014 if (t == BPF_WRITE)
61bd5218 3015 err = check_stack_write(env, state, off, size,
af86ca4e 3016 value_regno, insn_idx);
638f5b90 3017 else
61bd5218
JK
3018 err = check_stack_read(env, state, off, size,
3019 value_regno);
de8f3a83 3020 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 3021 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 3022 verbose(env, "cannot write into packet\n");
969bf05e
AS
3023 return -EACCES;
3024 }
4acf6c0b
BB
3025 if (t == BPF_WRITE && value_regno >= 0 &&
3026 is_pointer_value(env, value_regno)) {
61bd5218
JK
3027 verbose(env, "R%d leaks addr into packet\n",
3028 value_regno);
4acf6c0b
BB
3029 return -EACCES;
3030 }
9fd29c08 3031 err = check_packet_access(env, regno, off, size, false);
969bf05e 3032 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 3033 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
3034 } else if (reg->type == PTR_TO_FLOW_KEYS) {
3035 if (t == BPF_WRITE && value_regno >= 0 &&
3036 is_pointer_value(env, value_regno)) {
3037 verbose(env, "R%d leaks addr into flow keys\n",
3038 value_regno);
3039 return -EACCES;
3040 }
3041
3042 err = check_flow_keys_access(env, off, size);
3043 if (!err && t == BPF_READ && value_regno >= 0)
3044 mark_reg_unknown(env, regs, value_regno);
46f8bc92 3045 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 3046 if (t == BPF_WRITE) {
46f8bc92
MKL
3047 verbose(env, "R%d cannot write into %s\n",
3048 regno, reg_type_str[reg->type]);
c64b7983
JS
3049 return -EACCES;
3050 }
5f456649 3051 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
3052 if (!err && value_regno >= 0)
3053 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
3054 } else if (reg->type == PTR_TO_TP_BUFFER) {
3055 err = check_tp_buffer_access(env, reg, regno, off, size);
3056 if (!err && t == BPF_READ && value_regno >= 0)
3057 mark_reg_unknown(env, regs, value_regno);
9e15db66
AS
3058 } else if (reg->type == PTR_TO_BTF_ID) {
3059 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
3060 value_regno);
17a52670 3061 } else {
61bd5218
JK
3062 verbose(env, "R%d invalid mem access '%s'\n", regno,
3063 reg_type_str[reg->type]);
17a52670
AS
3064 return -EACCES;
3065 }
969bf05e 3066
f1174f77 3067 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 3068 regs[value_regno].type == SCALAR_VALUE) {
f1174f77 3069 /* b/h/w load zero-extends, mark upper bits as known 0 */
0c17d1d2 3070 coerce_reg_to_size(&regs[value_regno], size);
969bf05e 3071 }
17a52670
AS
3072 return err;
3073}
3074
31fd8581 3075static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 3076{
17a52670
AS
3077 int err;
3078
3079 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
3080 insn->imm != 0) {
61bd5218 3081 verbose(env, "BPF_XADD uses reserved fields\n");
17a52670
AS
3082 return -EINVAL;
3083 }
3084
3085 /* check src1 operand */
dc503a8a 3086 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
3087 if (err)
3088 return err;
3089
3090 /* check src2 operand */
dc503a8a 3091 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
3092 if (err)
3093 return err;
3094
6bdf6abc 3095 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 3096 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
3097 return -EACCES;
3098 }
3099
ca369602 3100 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 3101 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
3102 is_flow_key_reg(env, insn->dst_reg) ||
3103 is_sk_reg(env, insn->dst_reg)) {
ca369602 3104 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2a159c6f
DB
3105 insn->dst_reg,
3106 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
3107 return -EACCES;
3108 }
3109
17a52670 3110 /* check whether atomic_add can read the memory */
31fd8581 3111 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3112 BPF_SIZE(insn->code), BPF_READ, -1, true);
17a52670
AS
3113 if (err)
3114 return err;
3115
3116 /* check whether atomic_add can write into the same memory */
31fd8581 3117 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3118 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
17a52670
AS
3119}
3120
2011fccf
AI
3121static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
3122 int off, int access_size,
3123 bool zero_size_allowed)
3124{
3125 struct bpf_reg_state *reg = reg_state(env, regno);
3126
3127 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
3128 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
3129 if (tnum_is_const(reg->var_off)) {
3130 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
3131 regno, off, access_size);
3132 } else {
3133 char tn_buf[48];
3134
3135 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3136 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
3137 regno, tn_buf, access_size);
3138 }
3139 return -EACCES;
3140 }
3141 return 0;
3142}
3143
17a52670
AS
3144/* when register 'regno' is passed into function that will read 'access_size'
3145 * bytes from that pointer, make sure that it's within stack boundary
f1174f77
EC
3146 * and all elements of stack are initialized.
3147 * Unlike most pointer bounds-checking functions, this one doesn't take an
3148 * 'off' argument, so it has to add in reg->off itself.
17a52670 3149 */
58e2af8b 3150static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
435faee1
DB
3151 int access_size, bool zero_size_allowed,
3152 struct bpf_call_arg_meta *meta)
17a52670 3153{
2a159c6f 3154 struct bpf_reg_state *reg = reg_state(env, regno);
f4d7e40a 3155 struct bpf_func_state *state = func(env, reg);
f7cf25b2 3156 int err, min_off, max_off, i, j, slot, spi;
17a52670 3157
914cb781 3158 if (reg->type != PTR_TO_STACK) {
f1174f77 3159 /* Allow zero-byte read from NULL, regardless of pointer type */
8e2fe1d9 3160 if (zero_size_allowed && access_size == 0 &&
914cb781 3161 register_is_null(reg))
8e2fe1d9
DB
3162 return 0;
3163
61bd5218 3164 verbose(env, "R%d type=%s expected=%s\n", regno,
914cb781 3165 reg_type_str[reg->type],
8e2fe1d9 3166 reg_type_str[PTR_TO_STACK]);
17a52670 3167 return -EACCES;
8e2fe1d9 3168 }
17a52670 3169
2011fccf
AI
3170 if (tnum_is_const(reg->var_off)) {
3171 min_off = max_off = reg->var_off.value + reg->off;
3172 err = __check_stack_boundary(env, regno, min_off, access_size,
3173 zero_size_allowed);
3174 if (err)
3175 return err;
3176 } else {
088ec26d
AI
3177 /* Variable offset is prohibited for unprivileged mode for
3178 * simplicity since it requires corresponding support in
3179 * Spectre masking for stack ALU.
3180 * See also retrieve_ptr_limit().
3181 */
3182 if (!env->allow_ptr_leaks) {
3183 char tn_buf[48];
f1174f77 3184
088ec26d
AI
3185 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3186 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
3187 regno, tn_buf);
3188 return -EACCES;
3189 }
f2bcd05e
AI
3190 /* Only initialized buffer on stack is allowed to be accessed
3191 * with variable offset. With uninitialized buffer it's hard to
3192 * guarantee that whole memory is marked as initialized on
3193 * helper return since specific bounds are unknown what may
3194 * cause uninitialized stack leaking.
3195 */
3196 if (meta && meta->raw_mode)
3197 meta = NULL;
3198
107c26a7
AI
3199 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
3200 reg->smax_value <= -BPF_MAX_VAR_OFF) {
3201 verbose(env, "R%d unbounded indirect variable offset stack access\n",
3202 regno);
3203 return -EACCES;
3204 }
2011fccf 3205 min_off = reg->smin_value + reg->off;
107c26a7 3206 max_off = reg->smax_value + reg->off;
2011fccf
AI
3207 err = __check_stack_boundary(env, regno, min_off, access_size,
3208 zero_size_allowed);
107c26a7
AI
3209 if (err) {
3210 verbose(env, "R%d min value is outside of stack bound\n",
3211 regno);
2011fccf 3212 return err;
107c26a7 3213 }
2011fccf
AI
3214 err = __check_stack_boundary(env, regno, max_off, access_size,
3215 zero_size_allowed);
107c26a7
AI
3216 if (err) {
3217 verbose(env, "R%d max value is outside of stack bound\n",
3218 regno);
2011fccf 3219 return err;
107c26a7 3220 }
17a52670
AS
3221 }
3222
435faee1
DB
3223 if (meta && meta->raw_mode) {
3224 meta->access_size = access_size;
3225 meta->regno = regno;
3226 return 0;
3227 }
3228
2011fccf 3229 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
3230 u8 *stype;
3231
2011fccf 3232 slot = -i - 1;
638f5b90 3233 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
3234 if (state->allocated_stack <= slot)
3235 goto err;
3236 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3237 if (*stype == STACK_MISC)
3238 goto mark;
3239 if (*stype == STACK_ZERO) {
3240 /* helper can write anything into the stack */
3241 *stype = STACK_MISC;
3242 goto mark;
17a52670 3243 }
f7cf25b2
AS
3244 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3245 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
f54c7898 3246 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
f7cf25b2
AS
3247 for (j = 0; j < BPF_REG_SIZE; j++)
3248 state->stack[spi].slot_type[j] = STACK_MISC;
3249 goto mark;
3250 }
3251
cc2b14d5 3252err:
2011fccf
AI
3253 if (tnum_is_const(reg->var_off)) {
3254 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
3255 min_off, i - min_off, access_size);
3256 } else {
3257 char tn_buf[48];
3258
3259 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3260 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
3261 tn_buf, i - min_off, access_size);
3262 }
cc2b14d5
AS
3263 return -EACCES;
3264mark:
3265 /* reading any byte out of 8-byte 'spill_slot' will cause
3266 * the whole slot to be marked as 'read'
3267 */
679c782d 3268 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
3269 state->stack[spi].spilled_ptr.parent,
3270 REG_LIVE_READ64);
17a52670 3271 }
2011fccf 3272 return update_stack_depth(env, state, min_off);
17a52670
AS
3273}
3274
06c1c049
GB
3275static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
3276 int access_size, bool zero_size_allowed,
3277 struct bpf_call_arg_meta *meta)
3278{
638f5b90 3279 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
06c1c049 3280
f1174f77 3281 switch (reg->type) {
06c1c049 3282 case PTR_TO_PACKET:
de8f3a83 3283 case PTR_TO_PACKET_META:
9fd29c08
YS
3284 return check_packet_access(env, regno, reg->off, access_size,
3285 zero_size_allowed);
06c1c049 3286 case PTR_TO_MAP_VALUE:
591fe988
DB
3287 if (check_map_access_type(env, regno, reg->off, access_size,
3288 meta && meta->raw_mode ? BPF_WRITE :
3289 BPF_READ))
3290 return -EACCES;
9fd29c08
YS
3291 return check_map_access(env, regno, reg->off, access_size,
3292 zero_size_allowed);
f1174f77 3293 default: /* scalar_value|ptr_to_stack or invalid ptr */
06c1c049
GB
3294 return check_stack_boundary(env, regno, access_size,
3295 zero_size_allowed, meta);
3296 }
3297}
3298
d83525ca
AS
3299/* Implementation details:
3300 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3301 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3302 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3303 * value_or_null->value transition, since the verifier only cares about
3304 * the range of access to valid map value pointer and doesn't care about actual
3305 * address of the map element.
3306 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3307 * reg->id > 0 after value_or_null->value transition. By doing so
3308 * two bpf_map_lookups will be considered two different pointers that
3309 * point to different bpf_spin_locks.
3310 * The verifier allows taking only one bpf_spin_lock at a time to avoid
3311 * dead-locks.
3312 * Since only one bpf_spin_lock is allowed the checks are simpler than
3313 * reg_is_refcounted() logic. The verifier needs to remember only
3314 * one spin_lock instead of array of acquired_refs.
3315 * cur_state->active_spin_lock remembers which map value element got locked
3316 * and clears it after bpf_spin_unlock.
3317 */
3318static int process_spin_lock(struct bpf_verifier_env *env, int regno,
3319 bool is_lock)
3320{
3321 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3322 struct bpf_verifier_state *cur = env->cur_state;
3323 bool is_const = tnum_is_const(reg->var_off);
3324 struct bpf_map *map = reg->map_ptr;
3325 u64 val = reg->var_off.value;
3326
3327 if (reg->type != PTR_TO_MAP_VALUE) {
3328 verbose(env, "R%d is not a pointer to map_value\n", regno);
3329 return -EINVAL;
3330 }
3331 if (!is_const) {
3332 verbose(env,
3333 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3334 regno);
3335 return -EINVAL;
3336 }
3337 if (!map->btf) {
3338 verbose(env,
3339 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
3340 map->name);
3341 return -EINVAL;
3342 }
3343 if (!map_value_has_spin_lock(map)) {
3344 if (map->spin_lock_off == -E2BIG)
3345 verbose(env,
3346 "map '%s' has more than one 'struct bpf_spin_lock'\n",
3347 map->name);
3348 else if (map->spin_lock_off == -ENOENT)
3349 verbose(env,
3350 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
3351 map->name);
3352 else
3353 verbose(env,
3354 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3355 map->name);
3356 return -EINVAL;
3357 }
3358 if (map->spin_lock_off != val + reg->off) {
3359 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3360 val + reg->off);
3361 return -EINVAL;
3362 }
3363 if (is_lock) {
3364 if (cur->active_spin_lock) {
3365 verbose(env,
3366 "Locking two bpf_spin_locks are not allowed\n");
3367 return -EINVAL;
3368 }
3369 cur->active_spin_lock = reg->id;
3370 } else {
3371 if (!cur->active_spin_lock) {
3372 verbose(env, "bpf_spin_unlock without taking a lock\n");
3373 return -EINVAL;
3374 }
3375 if (cur->active_spin_lock != reg->id) {
3376 verbose(env, "bpf_spin_unlock of different lock\n");
3377 return -EINVAL;
3378 }
3379 cur->active_spin_lock = 0;
3380 }
3381 return 0;
3382}
3383
90133415
DB
3384static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
3385{
3386 return type == ARG_PTR_TO_MEM ||
3387 type == ARG_PTR_TO_MEM_OR_NULL ||
3388 type == ARG_PTR_TO_UNINIT_MEM;
3389}
3390
3391static bool arg_type_is_mem_size(enum bpf_arg_type type)
3392{
3393 return type == ARG_CONST_SIZE ||
3394 type == ARG_CONST_SIZE_OR_ZERO;
3395}
3396
57c3bb72
AI
3397static bool arg_type_is_int_ptr(enum bpf_arg_type type)
3398{
3399 return type == ARG_PTR_TO_INT ||
3400 type == ARG_PTR_TO_LONG;
3401}
3402
3403static int int_ptr_type_to_size(enum bpf_arg_type type)
3404{
3405 if (type == ARG_PTR_TO_INT)
3406 return sizeof(u32);
3407 else if (type == ARG_PTR_TO_LONG)
3408 return sizeof(u64);
3409
3410 return -EINVAL;
3411}
3412
58e2af8b 3413static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
33ff9823
DB
3414 enum bpf_arg_type arg_type,
3415 struct bpf_call_arg_meta *meta)
17a52670 3416{
638f5b90 3417 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6841de8b 3418 enum bpf_reg_type expected_type, type = reg->type;
17a52670
AS
3419 int err = 0;
3420
80f1d68c 3421 if (arg_type == ARG_DONTCARE)
17a52670
AS
3422 return 0;
3423
dc503a8a
EC
3424 err = check_reg_arg(env, regno, SRC_OP);
3425 if (err)
3426 return err;
17a52670 3427
1be7f75d
AS
3428 if (arg_type == ARG_ANYTHING) {
3429 if (is_pointer_value(env, regno)) {
61bd5218
JK
3430 verbose(env, "R%d leaks addr into helper function\n",
3431 regno);
1be7f75d
AS
3432 return -EACCES;
3433 }
80f1d68c 3434 return 0;
1be7f75d 3435 }
80f1d68c 3436
de8f3a83 3437 if (type_is_pkt_pointer(type) &&
3a0af8fd 3438 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 3439 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
3440 return -EACCES;
3441 }
3442
8e2fe1d9 3443 if (arg_type == ARG_PTR_TO_MAP_KEY ||
2ea864c5 3444 arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3445 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
3446 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
17a52670 3447 expected_type = PTR_TO_STACK;
6ac99e8f
MKL
3448 if (register_is_null(reg) &&
3449 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
3450 /* final test in check_stack_boundary() */;
3451 else if (!type_is_pkt_pointer(type) &&
3452 type != PTR_TO_MAP_VALUE &&
3453 type != expected_type)
6841de8b 3454 goto err_type;
39f19ebb
AS
3455 } else if (arg_type == ARG_CONST_SIZE ||
3456 arg_type == ARG_CONST_SIZE_OR_ZERO) {
f1174f77
EC
3457 expected_type = SCALAR_VALUE;
3458 if (type != expected_type)
6841de8b 3459 goto err_type;
17a52670
AS
3460 } else if (arg_type == ARG_CONST_MAP_PTR) {
3461 expected_type = CONST_PTR_TO_MAP;
6841de8b
AS
3462 if (type != expected_type)
3463 goto err_type;
f318903c
DB
3464 } else if (arg_type == ARG_PTR_TO_CTX ||
3465 arg_type == ARG_PTR_TO_CTX_OR_NULL) {
608cd71a 3466 expected_type = PTR_TO_CTX;
f318903c
DB
3467 if (!(register_is_null(reg) &&
3468 arg_type == ARG_PTR_TO_CTX_OR_NULL)) {
3469 if (type != expected_type)
3470 goto err_type;
3471 err = check_ctx_reg(env, reg, regno);
3472 if (err < 0)
3473 return err;
3474 }
46f8bc92
MKL
3475 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
3476 expected_type = PTR_TO_SOCK_COMMON;
3477 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
3478 if (!type_is_sk_pointer(type))
3479 goto err_type;
1b986589
MKL
3480 if (reg->ref_obj_id) {
3481 if (meta->ref_obj_id) {
3482 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3483 regno, reg->ref_obj_id,
3484 meta->ref_obj_id);
3485 return -EFAULT;
3486 }
3487 meta->ref_obj_id = reg->ref_obj_id;
fd978bf7 3488 }
6ac99e8f
MKL
3489 } else if (arg_type == ARG_PTR_TO_SOCKET) {
3490 expected_type = PTR_TO_SOCKET;
3491 if (type != expected_type)
3492 goto err_type;
a7658e1a
AS
3493 } else if (arg_type == ARG_PTR_TO_BTF_ID) {
3494 expected_type = PTR_TO_BTF_ID;
3495 if (type != expected_type)
3496 goto err_type;
3497 if (reg->btf_id != meta->btf_id) {
3498 verbose(env, "Helper has type %s got %s in R%d\n",
3499 kernel_type_name(meta->btf_id),
3500 kernel_type_name(reg->btf_id), regno);
3501
3502 return -EACCES;
3503 }
3504 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
3505 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
3506 regno);
3507 return -EACCES;
3508 }
d83525ca
AS
3509 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3510 if (meta->func_id == BPF_FUNC_spin_lock) {
3511 if (process_spin_lock(env, regno, true))
3512 return -EACCES;
3513 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
3514 if (process_spin_lock(env, regno, false))
3515 return -EACCES;
3516 } else {
3517 verbose(env, "verifier internal error\n");
3518 return -EFAULT;
3519 }
90133415 3520 } else if (arg_type_is_mem_ptr(arg_type)) {
8e2fe1d9
DB
3521 expected_type = PTR_TO_STACK;
3522 /* One exception here. In case function allows for NULL to be
f1174f77 3523 * passed in as argument, it's a SCALAR_VALUE type. Final test
8e2fe1d9
DB
3524 * happens during stack boundary checking.
3525 */
914cb781 3526 if (register_is_null(reg) &&
db1ac496 3527 arg_type == ARG_PTR_TO_MEM_OR_NULL)
6841de8b 3528 /* final test in check_stack_boundary() */;
de8f3a83
DB
3529 else if (!type_is_pkt_pointer(type) &&
3530 type != PTR_TO_MAP_VALUE &&
f1174f77 3531 type != expected_type)
6841de8b 3532 goto err_type;
39f19ebb 3533 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
57c3bb72
AI
3534 } else if (arg_type_is_int_ptr(arg_type)) {
3535 expected_type = PTR_TO_STACK;
3536 if (!type_is_pkt_pointer(type) &&
3537 type != PTR_TO_MAP_VALUE &&
3538 type != expected_type)
3539 goto err_type;
17a52670 3540 } else {
61bd5218 3541 verbose(env, "unsupported arg_type %d\n", arg_type);
17a52670
AS
3542 return -EFAULT;
3543 }
3544
17a52670
AS
3545 if (arg_type == ARG_CONST_MAP_PTR) {
3546 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
33ff9823 3547 meta->map_ptr = reg->map_ptr;
17a52670
AS
3548 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
3549 /* bpf_map_xxx(..., map_ptr, ..., key) call:
3550 * check that [key, key + map->key_size) are within
3551 * stack limits and initialized
3552 */
33ff9823 3553 if (!meta->map_ptr) {
17a52670
AS
3554 /* in function declaration map_ptr must come before
3555 * map_key, so that it's verified and known before
3556 * we have to check map_key here. Otherwise it means
3557 * that kernel subsystem misconfigured verifier
3558 */
61bd5218 3559 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
3560 return -EACCES;
3561 }
d71962f3
PC
3562 err = check_helper_mem_access(env, regno,
3563 meta->map_ptr->key_size, false,
3564 NULL);
2ea864c5 3565 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3566 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
3567 !register_is_null(reg)) ||
2ea864c5 3568 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
17a52670
AS
3569 /* bpf_map_xxx(..., map_ptr, ..., value) call:
3570 * check [value, value + map->value_size) validity
3571 */
33ff9823 3572 if (!meta->map_ptr) {
17a52670 3573 /* kernel subsystem misconfigured verifier */
61bd5218 3574 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
3575 return -EACCES;
3576 }
2ea864c5 3577 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
d71962f3
PC
3578 err = check_helper_mem_access(env, regno,
3579 meta->map_ptr->value_size, false,
2ea864c5 3580 meta);
90133415 3581 } else if (arg_type_is_mem_size(arg_type)) {
39f19ebb 3582 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
17a52670 3583
849fa506
YS
3584 /* remember the mem_size which may be used later
3585 * to refine return values.
3586 */
3587 meta->msize_smax_value = reg->smax_value;
3588 meta->msize_umax_value = reg->umax_value;
3589
f1174f77
EC
3590 /* The register is SCALAR_VALUE; the access check
3591 * happens using its boundaries.
06c1c049 3592 */
f1174f77 3593 if (!tnum_is_const(reg->var_off))
06c1c049
GB
3594 /* For unprivileged variable accesses, disable raw
3595 * mode so that the program is required to
3596 * initialize all the memory that the helper could
3597 * just partially fill up.
3598 */
3599 meta = NULL;
3600
b03c9f9f 3601 if (reg->smin_value < 0) {
61bd5218 3602 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
f1174f77
EC
3603 regno);
3604 return -EACCES;
3605 }
06c1c049 3606
b03c9f9f 3607 if (reg->umin_value == 0) {
f1174f77
EC
3608 err = check_helper_mem_access(env, regno - 1, 0,
3609 zero_size_allowed,
3610 meta);
06c1c049
GB
3611 if (err)
3612 return err;
06c1c049 3613 }
f1174f77 3614
b03c9f9f 3615 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
61bd5218 3616 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
f1174f77
EC
3617 regno);
3618 return -EACCES;
3619 }
3620 err = check_helper_mem_access(env, regno - 1,
b03c9f9f 3621 reg->umax_value,
f1174f77 3622 zero_size_allowed, meta);
b5dc0163
AS
3623 if (!err)
3624 err = mark_chain_precision(env, regno);
57c3bb72
AI
3625 } else if (arg_type_is_int_ptr(arg_type)) {
3626 int size = int_ptr_type_to_size(arg_type);
3627
3628 err = check_helper_mem_access(env, regno, size, false, meta);
3629 if (err)
3630 return err;
3631 err = check_ptr_alignment(env, reg, 0, size, true);
17a52670
AS
3632 }
3633
3634 return err;
6841de8b 3635err_type:
61bd5218 3636 verbose(env, "R%d type=%s expected=%s\n", regno,
6841de8b
AS
3637 reg_type_str[type], reg_type_str[expected_type]);
3638 return -EACCES;
17a52670
AS
3639}
3640
61bd5218
JK
3641static int check_map_func_compatibility(struct bpf_verifier_env *env,
3642 struct bpf_map *map, int func_id)
35578d79 3643{
35578d79
KX
3644 if (!map)
3645 return 0;
3646
6aff67c8
AS
3647 /* We need a two way check, first is from map perspective ... */
3648 switch (map->map_type) {
3649 case BPF_MAP_TYPE_PROG_ARRAY:
3650 if (func_id != BPF_FUNC_tail_call)
3651 goto error;
3652 break;
3653 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
3654 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 3655 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 3656 func_id != BPF_FUNC_skb_output &&
d831ee84
EC
3657 func_id != BPF_FUNC_perf_event_read_value &&
3658 func_id != BPF_FUNC_xdp_output)
6aff67c8
AS
3659 goto error;
3660 break;
3661 case BPF_MAP_TYPE_STACK_TRACE:
3662 if (func_id != BPF_FUNC_get_stackid)
3663 goto error;
3664 break;
4ed8ec52 3665 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 3666 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 3667 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
3668 goto error;
3669 break;
cd339431 3670 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 3671 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
3672 if (func_id != BPF_FUNC_get_local_storage)
3673 goto error;
3674 break;
546ac1ff 3675 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 3676 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
3677 if (func_id != BPF_FUNC_redirect_map &&
3678 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
3679 goto error;
3680 break;
fbfc504a
BT
3681 /* Restrict bpf side of cpumap and xskmap, open when use-cases
3682 * appear.
3683 */
6710e112
JDB
3684 case BPF_MAP_TYPE_CPUMAP:
3685 if (func_id != BPF_FUNC_redirect_map)
3686 goto error;
3687 break;
fada7fdc
JL
3688 case BPF_MAP_TYPE_XSKMAP:
3689 if (func_id != BPF_FUNC_redirect_map &&
3690 func_id != BPF_FUNC_map_lookup_elem)
3691 goto error;
3692 break;
56f668df 3693 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 3694 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
3695 if (func_id != BPF_FUNC_map_lookup_elem)
3696 goto error;
16a43625 3697 break;
174a79ff
JF
3698 case BPF_MAP_TYPE_SOCKMAP:
3699 if (func_id != BPF_FUNC_sk_redirect_map &&
3700 func_id != BPF_FUNC_sock_map_update &&
4f738adb 3701 func_id != BPF_FUNC_map_delete_elem &&
9fed9000
JS
3702 func_id != BPF_FUNC_msg_redirect_map &&
3703 func_id != BPF_FUNC_sk_select_reuseport)
174a79ff
JF
3704 goto error;
3705 break;
81110384
JF
3706 case BPF_MAP_TYPE_SOCKHASH:
3707 if (func_id != BPF_FUNC_sk_redirect_hash &&
3708 func_id != BPF_FUNC_sock_hash_update &&
3709 func_id != BPF_FUNC_map_delete_elem &&
9fed9000
JS
3710 func_id != BPF_FUNC_msg_redirect_hash &&
3711 func_id != BPF_FUNC_sk_select_reuseport)
81110384
JF
3712 goto error;
3713 break;
2dbb9b9e
MKL
3714 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
3715 if (func_id != BPF_FUNC_sk_select_reuseport)
3716 goto error;
3717 break;
f1a2e44a
MV
3718 case BPF_MAP_TYPE_QUEUE:
3719 case BPF_MAP_TYPE_STACK:
3720 if (func_id != BPF_FUNC_map_peek_elem &&
3721 func_id != BPF_FUNC_map_pop_elem &&
3722 func_id != BPF_FUNC_map_push_elem)
3723 goto error;
3724 break;
6ac99e8f
MKL
3725 case BPF_MAP_TYPE_SK_STORAGE:
3726 if (func_id != BPF_FUNC_sk_storage_get &&
3727 func_id != BPF_FUNC_sk_storage_delete)
3728 goto error;
3729 break;
6aff67c8
AS
3730 default:
3731 break;
3732 }
3733
3734 /* ... and second from the function itself. */
3735 switch (func_id) {
3736 case BPF_FUNC_tail_call:
3737 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
3738 goto error;
f910cefa 3739 if (env->subprog_cnt > 1) {
f4d7e40a
AS
3740 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3741 return -EINVAL;
3742 }
6aff67c8
AS
3743 break;
3744 case BPF_FUNC_perf_event_read:
3745 case BPF_FUNC_perf_event_output:
908432ca 3746 case BPF_FUNC_perf_event_read_value:
a7658e1a 3747 case BPF_FUNC_skb_output:
d831ee84 3748 case BPF_FUNC_xdp_output:
6aff67c8
AS
3749 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
3750 goto error;
3751 break;
3752 case BPF_FUNC_get_stackid:
3753 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
3754 goto error;
3755 break;
60d20f91 3756 case BPF_FUNC_current_task_under_cgroup:
747ea55e 3757 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
3758 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
3759 goto error;
3760 break;
97f91a7c 3761 case BPF_FUNC_redirect_map:
9c270af3 3762 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 3763 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
3764 map->map_type != BPF_MAP_TYPE_CPUMAP &&
3765 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
3766 goto error;
3767 break;
174a79ff 3768 case BPF_FUNC_sk_redirect_map:
4f738adb 3769 case BPF_FUNC_msg_redirect_map:
81110384 3770 case BPF_FUNC_sock_map_update:
174a79ff
JF
3771 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
3772 goto error;
3773 break;
81110384
JF
3774 case BPF_FUNC_sk_redirect_hash:
3775 case BPF_FUNC_msg_redirect_hash:
3776 case BPF_FUNC_sock_hash_update:
3777 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
3778 goto error;
3779 break;
cd339431 3780 case BPF_FUNC_get_local_storage:
b741f163
RG
3781 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
3782 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
3783 goto error;
3784 break;
2dbb9b9e 3785 case BPF_FUNC_sk_select_reuseport:
9fed9000
JS
3786 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
3787 map->map_type != BPF_MAP_TYPE_SOCKMAP &&
3788 map->map_type != BPF_MAP_TYPE_SOCKHASH)
2dbb9b9e
MKL
3789 goto error;
3790 break;
f1a2e44a
MV
3791 case BPF_FUNC_map_peek_elem:
3792 case BPF_FUNC_map_pop_elem:
3793 case BPF_FUNC_map_push_elem:
3794 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
3795 map->map_type != BPF_MAP_TYPE_STACK)
3796 goto error;
3797 break;
6ac99e8f
MKL
3798 case BPF_FUNC_sk_storage_get:
3799 case BPF_FUNC_sk_storage_delete:
3800 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
3801 goto error;
3802 break;
6aff67c8
AS
3803 default:
3804 break;
35578d79
KX
3805 }
3806
3807 return 0;
6aff67c8 3808error:
61bd5218 3809 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 3810 map->map_type, func_id_name(func_id), func_id);
6aff67c8 3811 return -EINVAL;
35578d79
KX
3812}
3813
90133415 3814static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
3815{
3816 int count = 0;
3817
39f19ebb 3818 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3819 count++;
39f19ebb 3820 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3821 count++;
39f19ebb 3822 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3823 count++;
39f19ebb 3824 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3825 count++;
39f19ebb 3826 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
3827 count++;
3828
90133415
DB
3829 /* We only support one arg being in raw mode at the moment,
3830 * which is sufficient for the helper functions we have
3831 * right now.
3832 */
3833 return count <= 1;
3834}
3835
3836static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
3837 enum bpf_arg_type arg_next)
3838{
3839 return (arg_type_is_mem_ptr(arg_curr) &&
3840 !arg_type_is_mem_size(arg_next)) ||
3841 (!arg_type_is_mem_ptr(arg_curr) &&
3842 arg_type_is_mem_size(arg_next));
3843}
3844
3845static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
3846{
3847 /* bpf_xxx(..., buf, len) call will access 'len'
3848 * bytes from memory 'buf'. Both arg types need
3849 * to be paired, so make sure there's no buggy
3850 * helper function specification.
3851 */
3852 if (arg_type_is_mem_size(fn->arg1_type) ||
3853 arg_type_is_mem_ptr(fn->arg5_type) ||
3854 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
3855 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
3856 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
3857 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
3858 return false;
3859
3860 return true;
3861}
3862
1b986589 3863static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
fd978bf7
JS
3864{
3865 int count = 0;
3866
1b986589 3867 if (arg_type_may_be_refcounted(fn->arg1_type))
fd978bf7 3868 count++;
1b986589 3869 if (arg_type_may_be_refcounted(fn->arg2_type))
fd978bf7 3870 count++;
1b986589 3871 if (arg_type_may_be_refcounted(fn->arg3_type))
fd978bf7 3872 count++;
1b986589 3873 if (arg_type_may_be_refcounted(fn->arg4_type))
fd978bf7 3874 count++;
1b986589 3875 if (arg_type_may_be_refcounted(fn->arg5_type))
fd978bf7
JS
3876 count++;
3877
1b986589
MKL
3878 /* A reference acquiring function cannot acquire
3879 * another refcounted ptr.
3880 */
3881 if (is_acquire_function(func_id) && count)
3882 return false;
3883
fd978bf7
JS
3884 /* We only support one arg being unreferenced at the moment,
3885 * which is sufficient for the helper functions we have right now.
3886 */
3887 return count <= 1;
3888}
3889
1b986589 3890static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
90133415
DB
3891{
3892 return check_raw_mode_ok(fn) &&
fd978bf7 3893 check_arg_pair_ok(fn) &&
1b986589 3894 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
435faee1
DB
3895}
3896
de8f3a83
DB
3897/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
3898 * are now invalid, so turn them into unknown SCALAR_VALUE.
f1174f77 3899 */
f4d7e40a
AS
3900static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
3901 struct bpf_func_state *state)
969bf05e 3902{
58e2af8b 3903 struct bpf_reg_state *regs = state->regs, *reg;
969bf05e
AS
3904 int i;
3905
3906 for (i = 0; i < MAX_BPF_REG; i++)
de8f3a83 3907 if (reg_is_pkt_pointer_any(&regs[i]))
61bd5218 3908 mark_reg_unknown(env, regs, i);
969bf05e 3909
f3709f69
JS
3910 bpf_for_each_spilled_reg(i, state, reg) {
3911 if (!reg)
969bf05e 3912 continue;
de8f3a83 3913 if (reg_is_pkt_pointer_any(reg))
f54c7898 3914 __mark_reg_unknown(env, reg);
969bf05e
AS
3915 }
3916}
3917
f4d7e40a
AS
3918static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
3919{
3920 struct bpf_verifier_state *vstate = env->cur_state;
3921 int i;
3922
3923 for (i = 0; i <= vstate->curframe; i++)
3924 __clear_all_pkt_pointers(env, vstate->frame[i]);
3925}
3926
fd978bf7 3927static void release_reg_references(struct bpf_verifier_env *env,
1b986589
MKL
3928 struct bpf_func_state *state,
3929 int ref_obj_id)
fd978bf7
JS
3930{
3931 struct bpf_reg_state *regs = state->regs, *reg;
3932 int i;
3933
3934 for (i = 0; i < MAX_BPF_REG; i++)
1b986589 3935 if (regs[i].ref_obj_id == ref_obj_id)
fd978bf7
JS
3936 mark_reg_unknown(env, regs, i);
3937
3938 bpf_for_each_spilled_reg(i, state, reg) {
3939 if (!reg)
3940 continue;
1b986589 3941 if (reg->ref_obj_id == ref_obj_id)
f54c7898 3942 __mark_reg_unknown(env, reg);
fd978bf7
JS
3943 }
3944}
3945
3946/* The pointer with the specified id has released its reference to kernel
3947 * resources. Identify all copies of the same pointer and clear the reference.
3948 */
3949static int release_reference(struct bpf_verifier_env *env,
1b986589 3950 int ref_obj_id)
fd978bf7
JS
3951{
3952 struct bpf_verifier_state *vstate = env->cur_state;
1b986589 3953 int err;
fd978bf7
JS
3954 int i;
3955
1b986589
MKL
3956 err = release_reference_state(cur_func(env), ref_obj_id);
3957 if (err)
3958 return err;
3959
fd978bf7 3960 for (i = 0; i <= vstate->curframe; i++)
1b986589 3961 release_reg_references(env, vstate->frame[i], ref_obj_id);
fd978bf7 3962
1b986589 3963 return 0;
fd978bf7
JS
3964}
3965
51c39bb1
AS
3966static void clear_caller_saved_regs(struct bpf_verifier_env *env,
3967 struct bpf_reg_state *regs)
3968{
3969 int i;
3970
3971 /* after the call registers r0 - r5 were scratched */
3972 for (i = 0; i < CALLER_SAVED_REGS; i++) {
3973 mark_reg_not_init(env, regs, caller_saved[i]);
3974 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
3975 }
3976}
3977
f4d7e40a
AS
3978static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
3979 int *insn_idx)
3980{
3981 struct bpf_verifier_state *state = env->cur_state;
51c39bb1 3982 struct bpf_func_info_aux *func_info_aux;
f4d7e40a 3983 struct bpf_func_state *caller, *callee;
fd978bf7 3984 int i, err, subprog, target_insn;
51c39bb1 3985 bool is_global = false;
f4d7e40a 3986
aada9ce6 3987 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 3988 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 3989 state->curframe + 2);
f4d7e40a
AS
3990 return -E2BIG;
3991 }
3992
3993 target_insn = *insn_idx + insn->imm;
3994 subprog = find_subprog(env, target_insn + 1);
3995 if (subprog < 0) {
3996 verbose(env, "verifier bug. No program starts at insn %d\n",
3997 target_insn + 1);
3998 return -EFAULT;
3999 }
4000
4001 caller = state->frame[state->curframe];
4002 if (state->frame[state->curframe + 1]) {
4003 verbose(env, "verifier bug. Frame %d already allocated\n",
4004 state->curframe + 1);
4005 return -EFAULT;
4006 }
4007
51c39bb1
AS
4008 func_info_aux = env->prog->aux->func_info_aux;
4009 if (func_info_aux)
4010 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
4011 err = btf_check_func_arg_match(env, subprog, caller->regs);
4012 if (err == -EFAULT)
4013 return err;
4014 if (is_global) {
4015 if (err) {
4016 verbose(env, "Caller passes invalid args into func#%d\n",
4017 subprog);
4018 return err;
4019 } else {
4020 if (env->log.level & BPF_LOG_LEVEL)
4021 verbose(env,
4022 "Func#%d is global and valid. Skipping.\n",
4023 subprog);
4024 clear_caller_saved_regs(env, caller->regs);
4025
4026 /* All global functions return SCALAR_VALUE */
4027 mark_reg_unknown(env, caller->regs, BPF_REG_0);
4028
4029 /* continue with next insn after call */
4030 return 0;
4031 }
4032 }
4033
f4d7e40a
AS
4034 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
4035 if (!callee)
4036 return -ENOMEM;
4037 state->frame[state->curframe + 1] = callee;
4038
4039 /* callee cannot access r0, r6 - r9 for reading and has to write
4040 * into its own stack before reading from it.
4041 * callee can read/write into caller's stack
4042 */
4043 init_func_state(env, callee,
4044 /* remember the callsite, it will be used by bpf_exit */
4045 *insn_idx /* callsite */,
4046 state->curframe + 1 /* frameno within this callchain */,
f910cefa 4047 subprog /* subprog number within this prog */);
f4d7e40a 4048
fd978bf7
JS
4049 /* Transfer references to the callee */
4050 err = transfer_reference_state(callee, caller);
4051 if (err)
4052 return err;
4053
679c782d
EC
4054 /* copy r1 - r5 args that callee can access. The copy includes parent
4055 * pointers, which connects us up to the liveness chain
4056 */
f4d7e40a
AS
4057 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
4058 callee->regs[i] = caller->regs[i];
4059
51c39bb1 4060 clear_caller_saved_regs(env, caller->regs);
f4d7e40a
AS
4061
4062 /* only increment it after check_reg_arg() finished */
4063 state->curframe++;
4064
4065 /* and go analyze first insn of the callee */
4066 *insn_idx = target_insn;
4067
06ee7115 4068 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4069 verbose(env, "caller:\n");
4070 print_verifier_state(env, caller);
4071 verbose(env, "callee:\n");
4072 print_verifier_state(env, callee);
4073 }
4074 return 0;
4075}
4076
4077static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
4078{
4079 struct bpf_verifier_state *state = env->cur_state;
4080 struct bpf_func_state *caller, *callee;
4081 struct bpf_reg_state *r0;
fd978bf7 4082 int err;
f4d7e40a
AS
4083
4084 callee = state->frame[state->curframe];
4085 r0 = &callee->regs[BPF_REG_0];
4086 if (r0->type == PTR_TO_STACK) {
4087 /* technically it's ok to return caller's stack pointer
4088 * (or caller's caller's pointer) back to the caller,
4089 * since these pointers are valid. Only current stack
4090 * pointer will be invalid as soon as function exits,
4091 * but let's be conservative
4092 */
4093 verbose(env, "cannot return stack pointer to the caller\n");
4094 return -EINVAL;
4095 }
4096
4097 state->curframe--;
4098 caller = state->frame[state->curframe];
4099 /* return to the caller whatever r0 had in the callee */
4100 caller->regs[BPF_REG_0] = *r0;
4101
fd978bf7
JS
4102 /* Transfer references to the caller */
4103 err = transfer_reference_state(caller, callee);
4104 if (err)
4105 return err;
4106
f4d7e40a 4107 *insn_idx = callee->callsite + 1;
06ee7115 4108 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4109 verbose(env, "returning from callee:\n");
4110 print_verifier_state(env, callee);
4111 verbose(env, "to caller at %d:\n", *insn_idx);
4112 print_verifier_state(env, caller);
4113 }
4114 /* clear everything in the callee */
4115 free_func_state(callee);
4116 state->frame[state->curframe + 1] = NULL;
4117 return 0;
4118}
4119
849fa506
YS
4120static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
4121 int func_id,
4122 struct bpf_call_arg_meta *meta)
4123{
4124 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
4125
4126 if (ret_type != RET_INTEGER ||
4127 (func_id != BPF_FUNC_get_stack &&
4128 func_id != BPF_FUNC_probe_read_str))
4129 return;
4130
4131 ret_reg->smax_value = meta->msize_smax_value;
4132 ret_reg->umax_value = meta->msize_umax_value;
4133 __reg_deduce_bounds(ret_reg);
4134 __reg_bound_offset(ret_reg);
4135}
4136
c93552c4
DB
4137static int
4138record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4139 int func_id, int insn_idx)
4140{
4141 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 4142 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
4143
4144 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
4145 func_id != BPF_FUNC_map_lookup_elem &&
4146 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
4147 func_id != BPF_FUNC_map_delete_elem &&
4148 func_id != BPF_FUNC_map_push_elem &&
4149 func_id != BPF_FUNC_map_pop_elem &&
4150 func_id != BPF_FUNC_map_peek_elem)
c93552c4 4151 return 0;
09772d92 4152
591fe988 4153 if (map == NULL) {
c93552c4
DB
4154 verbose(env, "kernel subsystem misconfigured verifier\n");
4155 return -EINVAL;
4156 }
4157
591fe988
DB
4158 /* In case of read-only, some additional restrictions
4159 * need to be applied in order to prevent altering the
4160 * state of the map from program side.
4161 */
4162 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
4163 (func_id == BPF_FUNC_map_delete_elem ||
4164 func_id == BPF_FUNC_map_update_elem ||
4165 func_id == BPF_FUNC_map_push_elem ||
4166 func_id == BPF_FUNC_map_pop_elem)) {
4167 verbose(env, "write into map forbidden\n");
4168 return -EACCES;
4169 }
4170
d2e4c1e6 4171 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4
DB
4172 bpf_map_ptr_store(aux, meta->map_ptr,
4173 meta->map_ptr->unpriv_array);
d2e4c1e6 4174 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4
DB
4175 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
4176 meta->map_ptr->unpriv_array);
4177 return 0;
4178}
4179
d2e4c1e6
DB
4180static int
4181record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4182 int func_id, int insn_idx)
4183{
4184 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
4185 struct bpf_reg_state *regs = cur_regs(env), *reg;
4186 struct bpf_map *map = meta->map_ptr;
4187 struct tnum range;
4188 u64 val;
cc52d914 4189 int err;
d2e4c1e6
DB
4190
4191 if (func_id != BPF_FUNC_tail_call)
4192 return 0;
4193 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
4194 verbose(env, "kernel subsystem misconfigured verifier\n");
4195 return -EINVAL;
4196 }
4197
4198 range = tnum_range(0, map->max_entries - 1);
4199 reg = &regs[BPF_REG_3];
4200
4201 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
4202 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4203 return 0;
4204 }
4205
cc52d914
DB
4206 err = mark_chain_precision(env, BPF_REG_3);
4207 if (err)
4208 return err;
4209
d2e4c1e6
DB
4210 val = reg->var_off.value;
4211 if (bpf_map_key_unseen(aux))
4212 bpf_map_key_store(aux, val);
4213 else if (!bpf_map_key_poisoned(aux) &&
4214 bpf_map_key_immediate(aux) != val)
4215 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4216 return 0;
4217}
4218
fd978bf7
JS
4219static int check_reference_leak(struct bpf_verifier_env *env)
4220{
4221 struct bpf_func_state *state = cur_func(env);
4222 int i;
4223
4224 for (i = 0; i < state->acquired_refs; i++) {
4225 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
4226 state->refs[i].id, state->refs[i].insn_idx);
4227 }
4228 return state->acquired_refs ? -EINVAL : 0;
4229}
4230
f4d7e40a 4231static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
17a52670 4232{
17a52670 4233 const struct bpf_func_proto *fn = NULL;
638f5b90 4234 struct bpf_reg_state *regs;
33ff9823 4235 struct bpf_call_arg_meta meta;
969bf05e 4236 bool changes_data;
17a52670
AS
4237 int i, err;
4238
4239 /* find function prototype */
4240 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
4241 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
4242 func_id);
17a52670
AS
4243 return -EINVAL;
4244 }
4245
00176a34 4246 if (env->ops->get_func_proto)
5e43f899 4247 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 4248 if (!fn) {
61bd5218
JK
4249 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
4250 func_id);
17a52670
AS
4251 return -EINVAL;
4252 }
4253
4254 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 4255 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 4256 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
4257 return -EINVAL;
4258 }
4259
04514d13 4260 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 4261 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
4262 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
4263 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
4264 func_id_name(func_id), func_id);
4265 return -EINVAL;
4266 }
969bf05e 4267
33ff9823 4268 memset(&meta, 0, sizeof(meta));
36bbef52 4269 meta.pkt_access = fn->pkt_access;
33ff9823 4270
1b986589 4271 err = check_func_proto(fn, func_id);
435faee1 4272 if (err) {
61bd5218 4273 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 4274 func_id_name(func_id), func_id);
435faee1
DB
4275 return err;
4276 }
4277
d83525ca 4278 meta.func_id = func_id;
17a52670 4279 /* check args */
a7658e1a 4280 for (i = 0; i < 5; i++) {
9cc31b3a
AS
4281 err = btf_resolve_helper_id(&env->log, fn, i);
4282 if (err > 0)
4283 meta.btf_id = err;
a7658e1a
AS
4284 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
4285 if (err)
4286 return err;
4287 }
17a52670 4288
c93552c4
DB
4289 err = record_func_map(env, &meta, func_id, insn_idx);
4290 if (err)
4291 return err;
4292
d2e4c1e6
DB
4293 err = record_func_key(env, &meta, func_id, insn_idx);
4294 if (err)
4295 return err;
4296
435faee1
DB
4297 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4298 * is inferred from register state.
4299 */
4300 for (i = 0; i < meta.access_size; i++) {
ca369602
DB
4301 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
4302 BPF_WRITE, -1, false);
435faee1
DB
4303 if (err)
4304 return err;
4305 }
4306
fd978bf7
JS
4307 if (func_id == BPF_FUNC_tail_call) {
4308 err = check_reference_leak(env);
4309 if (err) {
4310 verbose(env, "tail_call would lead to reference leak\n");
4311 return err;
4312 }
4313 } else if (is_release_function(func_id)) {
1b986589 4314 err = release_reference(env, meta.ref_obj_id);
46f8bc92
MKL
4315 if (err) {
4316 verbose(env, "func %s#%d reference has not been acquired before\n",
4317 func_id_name(func_id), func_id);
fd978bf7 4318 return err;
46f8bc92 4319 }
fd978bf7
JS
4320 }
4321
638f5b90 4322 regs = cur_regs(env);
cd339431
RG
4323
4324 /* check that flags argument in get_local_storage(map, flags) is 0,
4325 * this is required because get_local_storage() can't return an error.
4326 */
4327 if (func_id == BPF_FUNC_get_local_storage &&
4328 !register_is_null(&regs[BPF_REG_2])) {
4329 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
4330 return -EINVAL;
4331 }
4332
17a52670 4333 /* reset caller saved regs */
dc503a8a 4334 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 4335 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
4336 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4337 }
17a52670 4338
5327ed3d
JW
4339 /* helper call returns 64-bit value. */
4340 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
4341
dc503a8a 4342 /* update return register (already marked as written above) */
17a52670 4343 if (fn->ret_type == RET_INTEGER) {
f1174f77 4344 /* sets type to SCALAR_VALUE */
61bd5218 4345 mark_reg_unknown(env, regs, BPF_REG_0);
17a52670
AS
4346 } else if (fn->ret_type == RET_VOID) {
4347 regs[BPF_REG_0].type = NOT_INIT;
3e6a4b3e
RG
4348 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
4349 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
f1174f77 4350 /* There is no offset yet applied, variable or fixed */
61bd5218 4351 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
4352 /* remember map_ptr, so that check_map_access()
4353 * can check 'value_size' boundary of memory access
4354 * to map element returned from bpf_map_lookup_elem()
4355 */
33ff9823 4356 if (meta.map_ptr == NULL) {
61bd5218
JK
4357 verbose(env,
4358 "kernel subsystem misconfigured verifier\n");
17a52670
AS
4359 return -EINVAL;
4360 }
33ff9823 4361 regs[BPF_REG_0].map_ptr = meta.map_ptr;
4d31f301
DB
4362 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4363 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
e16d2f1a
AS
4364 if (map_value_has_spin_lock(meta.map_ptr))
4365 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301
DB
4366 } else {
4367 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
4368 regs[BPF_REG_0].id = ++env->id_gen;
4369 }
c64b7983
JS
4370 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
4371 mark_reg_known_zero(env, regs, BPF_REG_0);
4372 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
0f3adc28 4373 regs[BPF_REG_0].id = ++env->id_gen;
85a51f8c
LB
4374 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
4375 mark_reg_known_zero(env, regs, BPF_REG_0);
4376 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
4377 regs[BPF_REG_0].id = ++env->id_gen;
655a51e5
MKL
4378 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
4379 mark_reg_known_zero(env, regs, BPF_REG_0);
4380 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
4381 regs[BPF_REG_0].id = ++env->id_gen;
17a52670 4382 } else {
61bd5218 4383 verbose(env, "unknown return type %d of func %s#%d\n",
ebb676da 4384 fn->ret_type, func_id_name(func_id), func_id);
17a52670
AS
4385 return -EINVAL;
4386 }
04fd61ab 4387
0f3adc28 4388 if (is_ptr_cast_function(func_id)) {
1b986589
MKL
4389 /* For release_reference() */
4390 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
0f3adc28
LB
4391 } else if (is_acquire_function(func_id)) {
4392 int id = acquire_reference_state(env, insn_idx);
4393
4394 if (id < 0)
4395 return id;
4396 /* For mark_ptr_or_null_reg() */
4397 regs[BPF_REG_0].id = id;
4398 /* For release_reference() */
4399 regs[BPF_REG_0].ref_obj_id = id;
4400 }
1b986589 4401
849fa506
YS
4402 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
4403
61bd5218 4404 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
4405 if (err)
4406 return err;
04fd61ab 4407
c195651e
YS
4408 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
4409 const char *err_str;
4410
4411#ifdef CONFIG_PERF_EVENTS
4412 err = get_callchain_buffers(sysctl_perf_event_max_stack);
4413 err_str = "cannot get callchain buffer for func %s#%d\n";
4414#else
4415 err = -ENOTSUPP;
4416 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
4417#endif
4418 if (err) {
4419 verbose(env, err_str, func_id_name(func_id), func_id);
4420 return err;
4421 }
4422
4423 env->prog->has_callchain_buf = true;
4424 }
4425
969bf05e
AS
4426 if (changes_data)
4427 clear_all_pkt_pointers(env);
4428 return 0;
4429}
4430
b03c9f9f
EC
4431static bool signed_add_overflows(s64 a, s64 b)
4432{
4433 /* Do the add in u64, where overflow is well-defined */
4434 s64 res = (s64)((u64)a + (u64)b);
4435
4436 if (b < 0)
4437 return res > a;
4438 return res < a;
4439}
4440
4441static bool signed_sub_overflows(s64 a, s64 b)
4442{
4443 /* Do the sub in u64, where overflow is well-defined */
4444 s64 res = (s64)((u64)a - (u64)b);
4445
4446 if (b < 0)
4447 return res < a;
4448 return res > a;
969bf05e
AS
4449}
4450
bb7f0f98
AS
4451static bool check_reg_sane_offset(struct bpf_verifier_env *env,
4452 const struct bpf_reg_state *reg,
4453 enum bpf_reg_type type)
4454{
4455 bool known = tnum_is_const(reg->var_off);
4456 s64 val = reg->var_off.value;
4457 s64 smin = reg->smin_value;
4458
4459 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
4460 verbose(env, "math between %s pointer and %lld is not allowed\n",
4461 reg_type_str[type], val);
4462 return false;
4463 }
4464
4465 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
4466 verbose(env, "%s pointer offset %d is not allowed\n",
4467 reg_type_str[type], reg->off);
4468 return false;
4469 }
4470
4471 if (smin == S64_MIN) {
4472 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
4473 reg_type_str[type]);
4474 return false;
4475 }
4476
4477 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
4478 verbose(env, "value %lld makes %s pointer be out of bounds\n",
4479 smin, reg_type_str[type]);
4480 return false;
4481 }
4482
4483 return true;
4484}
4485
979d63d5
DB
4486static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
4487{
4488 return &env->insn_aux_data[env->insn_idx];
4489}
4490
4491static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
4492 u32 *ptr_limit, u8 opcode, bool off_is_neg)
4493{
4494 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
4495 (opcode == BPF_SUB && !off_is_neg);
4496 u32 off;
4497
4498 switch (ptr_reg->type) {
4499 case PTR_TO_STACK:
088ec26d
AI
4500 /* Indirect variable offset stack access is prohibited in
4501 * unprivileged mode so it's not handled here.
4502 */
979d63d5
DB
4503 off = ptr_reg->off + ptr_reg->var_off.value;
4504 if (mask_to_left)
4505 *ptr_limit = MAX_BPF_STACK + off;
4506 else
4507 *ptr_limit = -off;
4508 return 0;
4509 case PTR_TO_MAP_VALUE:
4510 if (mask_to_left) {
4511 *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
4512 } else {
4513 off = ptr_reg->smin_value + ptr_reg->off;
4514 *ptr_limit = ptr_reg->map_ptr->value_size - off;
4515 }
4516 return 0;
4517 default:
4518 return -EINVAL;
4519 }
4520}
4521
d3bd7413
DB
4522static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
4523 const struct bpf_insn *insn)
4524{
4525 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
4526}
4527
4528static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
4529 u32 alu_state, u32 alu_limit)
4530{
4531 /* If we arrived here from different branches with different
4532 * state or limits to sanitize, then this won't work.
4533 */
4534 if (aux->alu_state &&
4535 (aux->alu_state != alu_state ||
4536 aux->alu_limit != alu_limit))
4537 return -EACCES;
4538
4539 /* Corresponding fixup done in fixup_bpf_calls(). */
4540 aux->alu_state = alu_state;
4541 aux->alu_limit = alu_limit;
4542 return 0;
4543}
4544
4545static int sanitize_val_alu(struct bpf_verifier_env *env,
4546 struct bpf_insn *insn)
4547{
4548 struct bpf_insn_aux_data *aux = cur_aux(env);
4549
4550 if (can_skip_alu_sanitation(env, insn))
4551 return 0;
4552
4553 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
4554}
4555
979d63d5
DB
4556static int sanitize_ptr_alu(struct bpf_verifier_env *env,
4557 struct bpf_insn *insn,
4558 const struct bpf_reg_state *ptr_reg,
4559 struct bpf_reg_state *dst_reg,
4560 bool off_is_neg)
4561{
4562 struct bpf_verifier_state *vstate = env->cur_state;
4563 struct bpf_insn_aux_data *aux = cur_aux(env);
4564 bool ptr_is_dst_reg = ptr_reg == dst_reg;
4565 u8 opcode = BPF_OP(insn->code);
4566 u32 alu_state, alu_limit;
4567 struct bpf_reg_state tmp;
4568 bool ret;
4569
d3bd7413 4570 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
4571 return 0;
4572
4573 /* We already marked aux for masking from non-speculative
4574 * paths, thus we got here in the first place. We only care
4575 * to explore bad access from here.
4576 */
4577 if (vstate->speculative)
4578 goto do_sim;
4579
4580 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
4581 alu_state |= ptr_is_dst_reg ?
4582 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
4583
4584 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
4585 return 0;
d3bd7413 4586 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
979d63d5 4587 return -EACCES;
979d63d5
DB
4588do_sim:
4589 /* Simulate and find potential out-of-bounds access under
4590 * speculative execution from truncation as a result of
4591 * masking when off was not within expected range. If off
4592 * sits in dst, then we temporarily need to move ptr there
4593 * to simulate dst (== 0) +/-= ptr. Needed, for example,
4594 * for cases where we use K-based arithmetic in one direction
4595 * and truncated reg-based in the other in order to explore
4596 * bad access.
4597 */
4598 if (!ptr_is_dst_reg) {
4599 tmp = *dst_reg;
4600 *dst_reg = *ptr_reg;
4601 }
4602 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
0803278b 4603 if (!ptr_is_dst_reg && ret)
979d63d5
DB
4604 *dst_reg = tmp;
4605 return !ret ? -EFAULT : 0;
4606}
4607
f1174f77 4608/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
4609 * Caller should also handle BPF_MOV case separately.
4610 * If we return -EACCES, caller may want to try again treating pointer as a
4611 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
4612 */
4613static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
4614 struct bpf_insn *insn,
4615 const struct bpf_reg_state *ptr_reg,
4616 const struct bpf_reg_state *off_reg)
969bf05e 4617{
f4d7e40a
AS
4618 struct bpf_verifier_state *vstate = env->cur_state;
4619 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4620 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 4621 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
4622 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
4623 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
4624 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
4625 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
9d7eceed 4626 u32 dst = insn->dst_reg, src = insn->src_reg;
969bf05e 4627 u8 opcode = BPF_OP(insn->code);
979d63d5 4628 int ret;
969bf05e 4629
f1174f77 4630 dst_reg = &regs[dst];
969bf05e 4631
6f16101e
DB
4632 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
4633 smin_val > smax_val || umin_val > umax_val) {
4634 /* Taint dst register if offset had invalid bounds derived from
4635 * e.g. dead branches.
4636 */
f54c7898 4637 __mark_reg_unknown(env, dst_reg);
6f16101e 4638 return 0;
f1174f77
EC
4639 }
4640
4641 if (BPF_CLASS(insn->code) != BPF_ALU64) {
4642 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
82abbf8d
AS
4643 verbose(env,
4644 "R%d 32-bit pointer arithmetic prohibited\n",
4645 dst);
f1174f77 4646 return -EACCES;
969bf05e
AS
4647 }
4648
aad2eeaf
JS
4649 switch (ptr_reg->type) {
4650 case PTR_TO_MAP_VALUE_OR_NULL:
4651 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
4652 dst, reg_type_str[ptr_reg->type]);
f1174f77 4653 return -EACCES;
aad2eeaf
JS
4654 case CONST_PTR_TO_MAP:
4655 case PTR_TO_PACKET_END:
c64b7983
JS
4656 case PTR_TO_SOCKET:
4657 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
4658 case PTR_TO_SOCK_COMMON:
4659 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
4660 case PTR_TO_TCP_SOCK:
4661 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 4662 case PTR_TO_XDP_SOCK:
aad2eeaf
JS
4663 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
4664 dst, reg_type_str[ptr_reg->type]);
f1174f77 4665 return -EACCES;
9d7eceed
DB
4666 case PTR_TO_MAP_VALUE:
4667 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
4668 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
4669 off_reg == dst_reg ? dst : src);
4670 return -EACCES;
4671 }
4672 /* fall-through */
aad2eeaf
JS
4673 default:
4674 break;
f1174f77
EC
4675 }
4676
4677 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
4678 * The id may be overwritten later if we create a new variable offset.
969bf05e 4679 */
f1174f77
EC
4680 dst_reg->type = ptr_reg->type;
4681 dst_reg->id = ptr_reg->id;
969bf05e 4682
bb7f0f98
AS
4683 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
4684 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
4685 return -EINVAL;
4686
f1174f77
EC
4687 switch (opcode) {
4688 case BPF_ADD:
979d63d5
DB
4689 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4690 if (ret < 0) {
4691 verbose(env, "R%d tried to add from different maps or paths\n", dst);
4692 return ret;
4693 }
f1174f77
EC
4694 /* We can take a fixed offset as long as it doesn't overflow
4695 * the s32 'off' field
969bf05e 4696 */
b03c9f9f
EC
4697 if (known && (ptr_reg->off + smin_val ==
4698 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 4699 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
4700 dst_reg->smin_value = smin_ptr;
4701 dst_reg->smax_value = smax_ptr;
4702 dst_reg->umin_value = umin_ptr;
4703 dst_reg->umax_value = umax_ptr;
f1174f77 4704 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 4705 dst_reg->off = ptr_reg->off + smin_val;
0962590e 4706 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
4707 break;
4708 }
f1174f77
EC
4709 /* A new variable offset is created. Note that off_reg->off
4710 * == 0, since it's a scalar.
4711 * dst_reg gets the pointer type and since some positive
4712 * integer value was added to the pointer, give it a new 'id'
4713 * if it's a PTR_TO_PACKET.
4714 * this creates a new 'base' pointer, off_reg (variable) gets
4715 * added into the variable offset, and we copy the fixed offset
4716 * from ptr_reg.
969bf05e 4717 */
b03c9f9f
EC
4718 if (signed_add_overflows(smin_ptr, smin_val) ||
4719 signed_add_overflows(smax_ptr, smax_val)) {
4720 dst_reg->smin_value = S64_MIN;
4721 dst_reg->smax_value = S64_MAX;
4722 } else {
4723 dst_reg->smin_value = smin_ptr + smin_val;
4724 dst_reg->smax_value = smax_ptr + smax_val;
4725 }
4726 if (umin_ptr + umin_val < umin_ptr ||
4727 umax_ptr + umax_val < umax_ptr) {
4728 dst_reg->umin_value = 0;
4729 dst_reg->umax_value = U64_MAX;
4730 } else {
4731 dst_reg->umin_value = umin_ptr + umin_val;
4732 dst_reg->umax_value = umax_ptr + umax_val;
4733 }
f1174f77
EC
4734 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
4735 dst_reg->off = ptr_reg->off;
0962590e 4736 dst_reg->raw = ptr_reg->raw;
de8f3a83 4737 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
4738 dst_reg->id = ++env->id_gen;
4739 /* something was added to pkt_ptr, set range to zero */
0962590e 4740 dst_reg->raw = 0;
f1174f77
EC
4741 }
4742 break;
4743 case BPF_SUB:
979d63d5
DB
4744 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4745 if (ret < 0) {
4746 verbose(env, "R%d tried to sub from different maps or paths\n", dst);
4747 return ret;
4748 }
f1174f77
EC
4749 if (dst_reg == off_reg) {
4750 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
4751 verbose(env, "R%d tried to subtract pointer from scalar\n",
4752 dst);
f1174f77
EC
4753 return -EACCES;
4754 }
4755 /* We don't allow subtraction from FP, because (according to
4756 * test_verifier.c test "invalid fp arithmetic", JITs might not
4757 * be able to deal with it.
969bf05e 4758 */
f1174f77 4759 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
4760 verbose(env, "R%d subtraction from stack pointer prohibited\n",
4761 dst);
f1174f77
EC
4762 return -EACCES;
4763 }
b03c9f9f
EC
4764 if (known && (ptr_reg->off - smin_val ==
4765 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 4766 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
4767 dst_reg->smin_value = smin_ptr;
4768 dst_reg->smax_value = smax_ptr;
4769 dst_reg->umin_value = umin_ptr;
4770 dst_reg->umax_value = umax_ptr;
f1174f77
EC
4771 dst_reg->var_off = ptr_reg->var_off;
4772 dst_reg->id = ptr_reg->id;
b03c9f9f 4773 dst_reg->off = ptr_reg->off - smin_val;
0962590e 4774 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
4775 break;
4776 }
f1174f77
EC
4777 /* A new variable offset is created. If the subtrahend is known
4778 * nonnegative, then any reg->range we had before is still good.
969bf05e 4779 */
b03c9f9f
EC
4780 if (signed_sub_overflows(smin_ptr, smax_val) ||
4781 signed_sub_overflows(smax_ptr, smin_val)) {
4782 /* Overflow possible, we know nothing */
4783 dst_reg->smin_value = S64_MIN;
4784 dst_reg->smax_value = S64_MAX;
4785 } else {
4786 dst_reg->smin_value = smin_ptr - smax_val;
4787 dst_reg->smax_value = smax_ptr - smin_val;
4788 }
4789 if (umin_ptr < umax_val) {
4790 /* Overflow possible, we know nothing */
4791 dst_reg->umin_value = 0;
4792 dst_reg->umax_value = U64_MAX;
4793 } else {
4794 /* Cannot overflow (as long as bounds are consistent) */
4795 dst_reg->umin_value = umin_ptr - umax_val;
4796 dst_reg->umax_value = umax_ptr - umin_val;
4797 }
f1174f77
EC
4798 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
4799 dst_reg->off = ptr_reg->off;
0962590e 4800 dst_reg->raw = ptr_reg->raw;
de8f3a83 4801 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
4802 dst_reg->id = ++env->id_gen;
4803 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 4804 if (smin_val < 0)
0962590e 4805 dst_reg->raw = 0;
43188702 4806 }
f1174f77
EC
4807 break;
4808 case BPF_AND:
4809 case BPF_OR:
4810 case BPF_XOR:
82abbf8d
AS
4811 /* bitwise ops on pointers are troublesome, prohibit. */
4812 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
4813 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
4814 return -EACCES;
4815 default:
4816 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
4817 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
4818 dst, bpf_alu_string[opcode >> 4]);
f1174f77 4819 return -EACCES;
43188702
JF
4820 }
4821
bb7f0f98
AS
4822 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
4823 return -EINVAL;
4824
b03c9f9f
EC
4825 __update_reg_bounds(dst_reg);
4826 __reg_deduce_bounds(dst_reg);
4827 __reg_bound_offset(dst_reg);
0d6303db
DB
4828
4829 /* For unprivileged we require that resulting offset must be in bounds
4830 * in order to be able to sanitize access later on.
4831 */
e4298d25
DB
4832 if (!env->allow_ptr_leaks) {
4833 if (dst_reg->type == PTR_TO_MAP_VALUE &&
4834 check_map_access(env, dst, dst_reg->off, 1, false)) {
4835 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
4836 "prohibited for !root\n", dst);
4837 return -EACCES;
4838 } else if (dst_reg->type == PTR_TO_STACK &&
4839 check_stack_access(env, dst_reg, dst_reg->off +
4840 dst_reg->var_off.value, 1)) {
4841 verbose(env, "R%d stack pointer arithmetic goes out of range, "
4842 "prohibited for !root\n", dst);
4843 return -EACCES;
4844 }
0d6303db
DB
4845 }
4846
43188702
JF
4847 return 0;
4848}
4849
07cd2631
JF
4850static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
4851 struct bpf_reg_state *src_reg)
4852{
4853 s64 smin_val = src_reg->smin_value;
4854 s64 smax_val = src_reg->smax_value;
4855 u64 umin_val = src_reg->umin_value;
4856 u64 umax_val = src_reg->umax_value;
4857
4858 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
4859 signed_add_overflows(dst_reg->smax_value, smax_val)) {
4860 dst_reg->smin_value = S64_MIN;
4861 dst_reg->smax_value = S64_MAX;
4862 } else {
4863 dst_reg->smin_value += smin_val;
4864 dst_reg->smax_value += smax_val;
4865 }
4866 if (dst_reg->umin_value + umin_val < umin_val ||
4867 dst_reg->umax_value + umax_val < umax_val) {
4868 dst_reg->umin_value = 0;
4869 dst_reg->umax_value = U64_MAX;
4870 } else {
4871 dst_reg->umin_value += umin_val;
4872 dst_reg->umax_value += umax_val;
4873 }
4874 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg->var_off);
4875}
4876
4877static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
4878 struct bpf_reg_state *src_reg)
4879{
4880 s64 smin_val = src_reg->smin_value;
4881 s64 smax_val = src_reg->smax_value;
4882 u64 umin_val = src_reg->umin_value;
4883 u64 umax_val = src_reg->umax_value;
4884
4885 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
4886 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
4887 /* Overflow possible, we know nothing */
4888 dst_reg->smin_value = S64_MIN;
4889 dst_reg->smax_value = S64_MAX;
4890 } else {
4891 dst_reg->smin_value -= smax_val;
4892 dst_reg->smax_value -= smin_val;
4893 }
4894 if (dst_reg->umin_value < umax_val) {
4895 /* Overflow possible, we know nothing */
4896 dst_reg->umin_value = 0;
4897 dst_reg->umax_value = U64_MAX;
4898 } else {
4899 /* Cannot overflow (as long as bounds are consistent) */
4900 dst_reg->umin_value -= umax_val;
4901 dst_reg->umax_value -= umin_val;
4902 }
4903 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg->var_off);
4904}
4905
4906static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
4907 struct bpf_reg_state *src_reg)
4908{
4909 s64 smin_val = src_reg->smin_value;
4910 u64 umin_val = src_reg->umin_value;
4911 u64 umax_val = src_reg->umax_value;
4912
4913 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg->var_off);
4914 if (smin_val < 0 || dst_reg->smin_value < 0) {
4915 /* Ain't nobody got time to multiply that sign */
4916 __mark_reg_unbounded(dst_reg);
4917 __update_reg_bounds(dst_reg);
4918 return;
4919 }
4920 /* Both values are positive, so we can work with unsigned and
4921 * copy the result to signed (unless it exceeds S64_MAX).
4922 */
4923 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
4924 /* Potential overflow, we know nothing */
4925 __mark_reg_unbounded(dst_reg);
4926 /* (except what we can learn from the var_off) */
4927 __update_reg_bounds(dst_reg);
4928 return;
4929 }
4930 dst_reg->umin_value *= umin_val;
4931 dst_reg->umax_value *= umax_val;
4932 if (dst_reg->umax_value > S64_MAX) {
4933 /* Overflow possible, we know nothing */
4934 dst_reg->smin_value = S64_MIN;
4935 dst_reg->smax_value = S64_MAX;
4936 } else {
4937 dst_reg->smin_value = dst_reg->umin_value;
4938 dst_reg->smax_value = dst_reg->umax_value;
4939 }
4940}
4941
4942static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
4943 struct bpf_reg_state *src_reg)
4944{
4945 s64 smin_val = src_reg->smin_value;
4946 u64 umax_val = src_reg->umax_value;
4947
4948 /* We get our minimum from the var_off, since that's inherently
4949 * bitwise. Our maximum is the minimum of the operands' maxima.
4950 */
4951 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg->var_off);
4952 dst_reg->umin_value = dst_reg->var_off.value;
4953 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
4954 if (dst_reg->smin_value < 0 || smin_val < 0) {
4955 /* Lose signed bounds when ANDing negative numbers,
4956 * ain't nobody got time for that.
4957 */
4958 dst_reg->smin_value = S64_MIN;
4959 dst_reg->smax_value = S64_MAX;
4960 } else {
4961 /* ANDing two positives gives a positive, so safe to
4962 * cast result into s64.
4963 */
4964 dst_reg->smin_value = dst_reg->umin_value;
4965 dst_reg->smax_value = dst_reg->umax_value;
4966 }
4967 /* We may learn something more from the var_off */
4968 __update_reg_bounds(dst_reg);
4969}
4970
4971static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
4972 struct bpf_reg_state *src_reg)
4973{
4974 s64 smin_val = src_reg->smin_value;
4975 u64 umin_val = src_reg->umin_value;
4976
4977 /* We get our maximum from the var_off, and our minimum is the
4978 * maximum of the operands' minima
4979 */
4980 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg->var_off);
4981 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
4982 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
4983 if (dst_reg->smin_value < 0 || smin_val < 0) {
4984 /* Lose signed bounds when ORing negative numbers,
4985 * ain't nobody got time for that.
4986 */
4987 dst_reg->smin_value = S64_MIN;
4988 dst_reg->smax_value = S64_MAX;
4989 } else {
4990 /* ORing two positives gives a positive, so safe to
4991 * cast result into s64.
4992 */
4993 dst_reg->smin_value = dst_reg->umin_value;
4994 dst_reg->smax_value = dst_reg->umax_value;
4995 }
4996 /* We may learn something more from the var_off */
4997 __update_reg_bounds(dst_reg);
4998}
4999
5000static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
5001 struct bpf_reg_state *src_reg)
5002{
5003 u64 umax_val = src_reg->umax_value;
5004 u64 umin_val = src_reg->umin_value;
5005
5006 /* We lose all sign bit information (except what we can pick
5007 * up from var_off)
5008 */
5009 dst_reg->smin_value = S64_MIN;
5010 dst_reg->smax_value = S64_MAX;
5011 /* If we might shift our top bit out, then we know nothing */
5012 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
5013 dst_reg->umin_value = 0;
5014 dst_reg->umax_value = U64_MAX;
5015 } else {
5016 dst_reg->umin_value <<= umin_val;
5017 dst_reg->umax_value <<= umax_val;
5018 }
5019 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
5020 /* We may learn something more from the var_off */
5021 __update_reg_bounds(dst_reg);
5022}
5023
5024static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
5025 struct bpf_reg_state *src_reg)
5026{
5027 u64 umax_val = src_reg->umax_value;
5028 u64 umin_val = src_reg->umin_value;
5029
5030 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
5031 * be negative, then either:
5032 * 1) src_reg might be zero, so the sign bit of the result is
5033 * unknown, so we lose our signed bounds
5034 * 2) it's known negative, thus the unsigned bounds capture the
5035 * signed bounds
5036 * 3) the signed bounds cross zero, so they tell us nothing
5037 * about the result
5038 * If the value in dst_reg is known nonnegative, then again the
5039 * unsigned bounts capture the signed bounds.
5040 * Thus, in all cases it suffices to blow away our signed bounds
5041 * and rely on inferring new ones from the unsigned bounds and
5042 * var_off of the result.
5043 */
5044 dst_reg->smin_value = S64_MIN;
5045 dst_reg->smax_value = S64_MAX;
5046 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
5047 dst_reg->umin_value >>= umax_val;
5048 dst_reg->umax_value >>= umin_val;
5049 /* We may learn something more from the var_off */
5050 __update_reg_bounds(dst_reg);
5051}
5052
5053static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
5054 struct bpf_reg_state *src_reg,
5055 u64 insn_bitness)
5056{
5057 u64 umin_val = src_reg->umin_value;
5058
5059 /* Upon reaching here, src_known is true and
5060 * umax_val is equal to umin_val.
5061 */
5062 if (insn_bitness == 32) {
5063 dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val);
5064 dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val);
5065 } else {
5066 dst_reg->smin_value >>= umin_val;
5067 dst_reg->smax_value >>= umin_val;
5068 }
5069
5070 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val,
5071 insn_bitness);
5072
5073 /* blow away the dst_reg umin_value/umax_value and rely on
5074 * dst_reg var_off to refine the result.
5075 */
5076 dst_reg->umin_value = 0;
5077 dst_reg->umax_value = U64_MAX;
5078 __update_reg_bounds(dst_reg);
5079}
5080
468f6eaf
JH
5081/* WARNING: This function does calculations on 64-bit values, but the actual
5082 * execution may occur on 32-bit values. Therefore, things like bitshifts
5083 * need extra checks in the 32-bit case.
5084 */
f1174f77
EC
5085static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
5086 struct bpf_insn *insn,
5087 struct bpf_reg_state *dst_reg,
5088 struct bpf_reg_state src_reg)
969bf05e 5089{
638f5b90 5090 struct bpf_reg_state *regs = cur_regs(env);
48461135 5091 u8 opcode = BPF_OP(insn->code);
f1174f77 5092 bool src_known, dst_known;
b03c9f9f
EC
5093 s64 smin_val, smax_val;
5094 u64 umin_val, umax_val;
468f6eaf 5095 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
d3bd7413
DB
5096 u32 dst = insn->dst_reg;
5097 int ret;
48461135 5098
b799207e
JH
5099 if (insn_bitness == 32) {
5100 /* Relevant for 32-bit RSH: Information can propagate towards
5101 * LSB, so it isn't sufficient to only truncate the output to
5102 * 32 bits.
5103 */
5104 coerce_reg_to_size(dst_reg, 4);
5105 coerce_reg_to_size(&src_reg, 4);
5106 }
5107
b03c9f9f
EC
5108 smin_val = src_reg.smin_value;
5109 smax_val = src_reg.smax_value;
5110 umin_val = src_reg.umin_value;
5111 umax_val = src_reg.umax_value;
f1174f77
EC
5112 src_known = tnum_is_const(src_reg.var_off);
5113 dst_known = tnum_is_const(dst_reg->var_off);
f23cc643 5114
6f16101e
DB
5115 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
5116 smin_val > smax_val || umin_val > umax_val) {
5117 /* Taint dst register if offset had invalid bounds derived from
5118 * e.g. dead branches.
5119 */
f54c7898 5120 __mark_reg_unknown(env, dst_reg);
6f16101e
DB
5121 return 0;
5122 }
5123
bb7f0f98
AS
5124 if (!src_known &&
5125 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
f54c7898 5126 __mark_reg_unknown(env, dst_reg);
bb7f0f98
AS
5127 return 0;
5128 }
5129
48461135
JB
5130 switch (opcode) {
5131 case BPF_ADD:
d3bd7413
DB
5132 ret = sanitize_val_alu(env, insn);
5133 if (ret < 0) {
5134 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
5135 return ret;
5136 }
07cd2631 5137 scalar_min_max_add(dst_reg, &src_reg);
48461135
JB
5138 break;
5139 case BPF_SUB:
d3bd7413
DB
5140 ret = sanitize_val_alu(env, insn);
5141 if (ret < 0) {
5142 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
5143 return ret;
5144 }
07cd2631 5145 scalar_min_max_sub(dst_reg, &src_reg);
48461135
JB
5146 break;
5147 case BPF_MUL:
07cd2631 5148 scalar_min_max_mul(dst_reg, &src_reg);
48461135
JB
5149 break;
5150 case BPF_AND:
f1174f77 5151 if (src_known && dst_known) {
b03c9f9f
EC
5152 __mark_reg_known(dst_reg, dst_reg->var_off.value &
5153 src_reg.var_off.value);
f1174f77
EC
5154 break;
5155 }
07cd2631 5156 scalar_min_max_and(dst_reg, &src_reg);
f1174f77
EC
5157 break;
5158 case BPF_OR:
5159 if (src_known && dst_known) {
b03c9f9f
EC
5160 __mark_reg_known(dst_reg, dst_reg->var_off.value |
5161 src_reg.var_off.value);
f1174f77
EC
5162 break;
5163 }
07cd2631 5164 scalar_min_max_or(dst_reg, &src_reg);
48461135
JB
5165 break;
5166 case BPF_LSH:
468f6eaf
JH
5167 if (umax_val >= insn_bitness) {
5168 /* Shifts greater than 31 or 63 are undefined.
5169 * This includes shifts by a negative number.
b03c9f9f 5170 */
61bd5218 5171 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
5172 break;
5173 }
07cd2631 5174 scalar_min_max_lsh(dst_reg, &src_reg);
48461135
JB
5175 break;
5176 case BPF_RSH:
468f6eaf
JH
5177 if (umax_val >= insn_bitness) {
5178 /* Shifts greater than 31 or 63 are undefined.
5179 * This includes shifts by a negative number.
b03c9f9f 5180 */
61bd5218 5181 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
5182 break;
5183 }
07cd2631 5184 scalar_min_max_rsh(dst_reg, &src_reg);
48461135 5185 break;
9cbe1f5a
YS
5186 case BPF_ARSH:
5187 if (umax_val >= insn_bitness) {
5188 /* Shifts greater than 31 or 63 are undefined.
5189 * This includes shifts by a negative number.
5190 */
5191 mark_reg_unknown(env, regs, insn->dst_reg);
5192 break;
5193 }
07cd2631 5194 scalar_min_max_arsh(dst_reg, &src_reg, insn_bitness);
9cbe1f5a 5195 break;
48461135 5196 default:
61bd5218 5197 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
5198 break;
5199 }
5200
468f6eaf
JH
5201 if (BPF_CLASS(insn->code) != BPF_ALU64) {
5202 /* 32-bit ALU ops are (32,32)->32 */
5203 coerce_reg_to_size(dst_reg, 4);
468f6eaf
JH
5204 }
5205
294f2fc6 5206 __update_reg_bounds(dst_reg);
b03c9f9f
EC
5207 __reg_deduce_bounds(dst_reg);
5208 __reg_bound_offset(dst_reg);
f1174f77
EC
5209 return 0;
5210}
5211
5212/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
5213 * and var_off.
5214 */
5215static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
5216 struct bpf_insn *insn)
5217{
f4d7e40a
AS
5218 struct bpf_verifier_state *vstate = env->cur_state;
5219 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5220 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
5221 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
5222 u8 opcode = BPF_OP(insn->code);
b5dc0163 5223 int err;
f1174f77
EC
5224
5225 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
5226 src_reg = NULL;
5227 if (dst_reg->type != SCALAR_VALUE)
5228 ptr_reg = dst_reg;
5229 if (BPF_SRC(insn->code) == BPF_X) {
5230 src_reg = &regs[insn->src_reg];
f1174f77
EC
5231 if (src_reg->type != SCALAR_VALUE) {
5232 if (dst_reg->type != SCALAR_VALUE) {
5233 /* Combining two pointers by any ALU op yields
82abbf8d
AS
5234 * an arbitrary scalar. Disallow all math except
5235 * pointer subtraction
f1174f77 5236 */
dd066823 5237 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
5238 mark_reg_unknown(env, regs, insn->dst_reg);
5239 return 0;
f1174f77 5240 }
82abbf8d
AS
5241 verbose(env, "R%d pointer %s pointer prohibited\n",
5242 insn->dst_reg,
5243 bpf_alu_string[opcode >> 4]);
5244 return -EACCES;
f1174f77
EC
5245 } else {
5246 /* scalar += pointer
5247 * This is legal, but we have to reverse our
5248 * src/dest handling in computing the range
5249 */
b5dc0163
AS
5250 err = mark_chain_precision(env, insn->dst_reg);
5251 if (err)
5252 return err;
82abbf8d
AS
5253 return adjust_ptr_min_max_vals(env, insn,
5254 src_reg, dst_reg);
f1174f77
EC
5255 }
5256 } else if (ptr_reg) {
5257 /* pointer += scalar */
b5dc0163
AS
5258 err = mark_chain_precision(env, insn->src_reg);
5259 if (err)
5260 return err;
82abbf8d
AS
5261 return adjust_ptr_min_max_vals(env, insn,
5262 dst_reg, src_reg);
f1174f77
EC
5263 }
5264 } else {
5265 /* Pretend the src is a reg with a known value, since we only
5266 * need to be able to read from this state.
5267 */
5268 off_reg.type = SCALAR_VALUE;
b03c9f9f 5269 __mark_reg_known(&off_reg, insn->imm);
f1174f77 5270 src_reg = &off_reg;
82abbf8d
AS
5271 if (ptr_reg) /* pointer += K */
5272 return adjust_ptr_min_max_vals(env, insn,
5273 ptr_reg, src_reg);
f1174f77
EC
5274 }
5275
5276 /* Got here implies adding two SCALAR_VALUEs */
5277 if (WARN_ON_ONCE(ptr_reg)) {
f4d7e40a 5278 print_verifier_state(env, state);
61bd5218 5279 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
5280 return -EINVAL;
5281 }
5282 if (WARN_ON(!src_reg)) {
f4d7e40a 5283 print_verifier_state(env, state);
61bd5218 5284 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
5285 return -EINVAL;
5286 }
5287 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
5288}
5289
17a52670 5290/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 5291static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 5292{
638f5b90 5293 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
5294 u8 opcode = BPF_OP(insn->code);
5295 int err;
5296
5297 if (opcode == BPF_END || opcode == BPF_NEG) {
5298 if (opcode == BPF_NEG) {
5299 if (BPF_SRC(insn->code) != 0 ||
5300 insn->src_reg != BPF_REG_0 ||
5301 insn->off != 0 || insn->imm != 0) {
61bd5218 5302 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
5303 return -EINVAL;
5304 }
5305 } else {
5306 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68
EC
5307 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
5308 BPF_CLASS(insn->code) == BPF_ALU64) {
61bd5218 5309 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
5310 return -EINVAL;
5311 }
5312 }
5313
5314 /* check src operand */
dc503a8a 5315 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
5316 if (err)
5317 return err;
5318
1be7f75d 5319 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 5320 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
5321 insn->dst_reg);
5322 return -EACCES;
5323 }
5324
17a52670 5325 /* check dest operand */
dc503a8a 5326 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
5327 if (err)
5328 return err;
5329
5330 } else if (opcode == BPF_MOV) {
5331
5332 if (BPF_SRC(insn->code) == BPF_X) {
5333 if (insn->imm != 0 || insn->off != 0) {
61bd5218 5334 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
5335 return -EINVAL;
5336 }
5337
5338 /* check src operand */
dc503a8a 5339 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
5340 if (err)
5341 return err;
5342 } else {
5343 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 5344 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
5345 return -EINVAL;
5346 }
5347 }
5348
fbeb1603
AF
5349 /* check dest operand, mark as required later */
5350 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
5351 if (err)
5352 return err;
5353
5354 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
5355 struct bpf_reg_state *src_reg = regs + insn->src_reg;
5356 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
5357
17a52670
AS
5358 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5359 /* case: R1 = R2
5360 * copy register state to dest reg
5361 */
e434b8cd
JW
5362 *dst_reg = *src_reg;
5363 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 5364 dst_reg->subreg_def = DEF_NOT_SUBREG;
17a52670 5365 } else {
f1174f77 5366 /* R1 = (u32) R2 */
1be7f75d 5367 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
5368 verbose(env,
5369 "R%d partial copy of pointer\n",
1be7f75d
AS
5370 insn->src_reg);
5371 return -EACCES;
e434b8cd
JW
5372 } else if (src_reg->type == SCALAR_VALUE) {
5373 *dst_reg = *src_reg;
5374 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 5375 dst_reg->subreg_def = env->insn_idx + 1;
e434b8cd
JW
5376 } else {
5377 mark_reg_unknown(env, regs,
5378 insn->dst_reg);
1be7f75d 5379 }
e434b8cd 5380 coerce_reg_to_size(dst_reg, 4);
17a52670
AS
5381 }
5382 } else {
5383 /* case: R = imm
5384 * remember the value we stored into this reg
5385 */
fbeb1603
AF
5386 /* clear any state __mark_reg_known doesn't set */
5387 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 5388 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
5389 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5390 __mark_reg_known(regs + insn->dst_reg,
5391 insn->imm);
5392 } else {
5393 __mark_reg_known(regs + insn->dst_reg,
5394 (u32)insn->imm);
5395 }
17a52670
AS
5396 }
5397
5398 } else if (opcode > BPF_END) {
61bd5218 5399 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
5400 return -EINVAL;
5401
5402 } else { /* all other ALU ops: and, sub, xor, add, ... */
5403
17a52670
AS
5404 if (BPF_SRC(insn->code) == BPF_X) {
5405 if (insn->imm != 0 || insn->off != 0) {
61bd5218 5406 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
5407 return -EINVAL;
5408 }
5409 /* check src1 operand */
dc503a8a 5410 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
5411 if (err)
5412 return err;
5413 } else {
5414 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 5415 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
5416 return -EINVAL;
5417 }
5418 }
5419
5420 /* check src2 operand */
dc503a8a 5421 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
5422 if (err)
5423 return err;
5424
5425 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
5426 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 5427 verbose(env, "div by zero\n");
17a52670
AS
5428 return -EINVAL;
5429 }
5430
229394e8
RV
5431 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
5432 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
5433 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
5434
5435 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 5436 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
5437 return -EINVAL;
5438 }
5439 }
5440
1a0dc1ac 5441 /* check dest operand */
dc503a8a 5442 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
5443 if (err)
5444 return err;
5445
f1174f77 5446 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
5447 }
5448
5449 return 0;
5450}
5451
c6a9efa1
PC
5452static void __find_good_pkt_pointers(struct bpf_func_state *state,
5453 struct bpf_reg_state *dst_reg,
5454 enum bpf_reg_type type, u16 new_range)
5455{
5456 struct bpf_reg_state *reg;
5457 int i;
5458
5459 for (i = 0; i < MAX_BPF_REG; i++) {
5460 reg = &state->regs[i];
5461 if (reg->type == type && reg->id == dst_reg->id)
5462 /* keep the maximum range already checked */
5463 reg->range = max(reg->range, new_range);
5464 }
5465
5466 bpf_for_each_spilled_reg(i, state, reg) {
5467 if (!reg)
5468 continue;
5469 if (reg->type == type && reg->id == dst_reg->id)
5470 reg->range = max(reg->range, new_range);
5471 }
5472}
5473
f4d7e40a 5474static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 5475 struct bpf_reg_state *dst_reg,
f8ddadc4 5476 enum bpf_reg_type type,
fb2a311a 5477 bool range_right_open)
969bf05e 5478{
fb2a311a 5479 u16 new_range;
c6a9efa1 5480 int i;
2d2be8ca 5481
fb2a311a
DB
5482 if (dst_reg->off < 0 ||
5483 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
5484 /* This doesn't give us any range */
5485 return;
5486
b03c9f9f
EC
5487 if (dst_reg->umax_value > MAX_PACKET_OFF ||
5488 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
5489 /* Risk of overflow. For instance, ptr + (1<<63) may be less
5490 * than pkt_end, but that's because it's also less than pkt.
5491 */
5492 return;
5493
fb2a311a
DB
5494 new_range = dst_reg->off;
5495 if (range_right_open)
5496 new_range--;
5497
5498 /* Examples for register markings:
2d2be8ca 5499 *
fb2a311a 5500 * pkt_data in dst register:
2d2be8ca
DB
5501 *
5502 * r2 = r3;
5503 * r2 += 8;
5504 * if (r2 > pkt_end) goto <handle exception>
5505 * <access okay>
5506 *
b4e432f1
DB
5507 * r2 = r3;
5508 * r2 += 8;
5509 * if (r2 < pkt_end) goto <access okay>
5510 * <handle exception>
5511 *
2d2be8ca
DB
5512 * Where:
5513 * r2 == dst_reg, pkt_end == src_reg
5514 * r2=pkt(id=n,off=8,r=0)
5515 * r3=pkt(id=n,off=0,r=0)
5516 *
fb2a311a 5517 * pkt_data in src register:
2d2be8ca
DB
5518 *
5519 * r2 = r3;
5520 * r2 += 8;
5521 * if (pkt_end >= r2) goto <access okay>
5522 * <handle exception>
5523 *
b4e432f1
DB
5524 * r2 = r3;
5525 * r2 += 8;
5526 * if (pkt_end <= r2) goto <handle exception>
5527 * <access okay>
5528 *
2d2be8ca
DB
5529 * Where:
5530 * pkt_end == dst_reg, r2 == src_reg
5531 * r2=pkt(id=n,off=8,r=0)
5532 * r3=pkt(id=n,off=0,r=0)
5533 *
5534 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
5535 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
5536 * and [r3, r3 + 8-1) respectively is safe to access depending on
5537 * the check.
969bf05e 5538 */
2d2be8ca 5539
f1174f77
EC
5540 /* If our ids match, then we must have the same max_value. And we
5541 * don't care about the other reg's fixed offset, since if it's too big
5542 * the range won't allow anything.
5543 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
5544 */
c6a9efa1
PC
5545 for (i = 0; i <= vstate->curframe; i++)
5546 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
5547 new_range);
969bf05e
AS
5548}
5549
4f7b3e82
AS
5550/* compute branch direction of the expression "if (reg opcode val) goto target;"
5551 * and return:
5552 * 1 - branch will be taken and "goto target" will be executed
5553 * 0 - branch will not be taken and fall-through to next insn
5554 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
5555 */
092ed096
JW
5556static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
5557 bool is_jmp32)
4f7b3e82 5558{
092ed096 5559 struct bpf_reg_state reg_lo;
a72dafaf
JW
5560 s64 sval;
5561
4f7b3e82
AS
5562 if (__is_pointer_value(false, reg))
5563 return -1;
5564
092ed096
JW
5565 if (is_jmp32) {
5566 reg_lo = *reg;
5567 reg = &reg_lo;
5568 /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
5569 * could truncate high bits and update umin/umax according to
5570 * information of low bits.
5571 */
5572 coerce_reg_to_size(reg, 4);
5573 /* smin/smax need special handling. For example, after coerce,
5574 * if smin_value is 0x00000000ffffffffLL, the value is -1 when
5575 * used as operand to JMP32. It is a negative number from s32's
5576 * point of view, while it is a positive number when seen as
5577 * s64. The smin/smax are kept as s64, therefore, when used with
5578 * JMP32, they need to be transformed into s32, then sign
5579 * extended back to s64.
5580 *
5581 * Also, smin/smax were copied from umin/umax. If umin/umax has
5582 * different sign bit, then min/max relationship doesn't
5583 * maintain after casting into s32, for this case, set smin/smax
5584 * to safest range.
5585 */
5586 if ((reg->umax_value ^ reg->umin_value) &
5587 (1ULL << 31)) {
5588 reg->smin_value = S32_MIN;
5589 reg->smax_value = S32_MAX;
5590 }
5591 reg->smin_value = (s64)(s32)reg->smin_value;
5592 reg->smax_value = (s64)(s32)reg->smax_value;
5593
5594 val = (u32)val;
5595 sval = (s64)(s32)val;
5596 } else {
5597 sval = (s64)val;
5598 }
a72dafaf 5599
4f7b3e82
AS
5600 switch (opcode) {
5601 case BPF_JEQ:
5602 if (tnum_is_const(reg->var_off))
5603 return !!tnum_equals_const(reg->var_off, val);
5604 break;
5605 case BPF_JNE:
5606 if (tnum_is_const(reg->var_off))
5607 return !tnum_equals_const(reg->var_off, val);
5608 break;
960ea056
JK
5609 case BPF_JSET:
5610 if ((~reg->var_off.mask & reg->var_off.value) & val)
5611 return 1;
5612 if (!((reg->var_off.mask | reg->var_off.value) & val))
5613 return 0;
5614 break;
4f7b3e82
AS
5615 case BPF_JGT:
5616 if (reg->umin_value > val)
5617 return 1;
5618 else if (reg->umax_value <= val)
5619 return 0;
5620 break;
5621 case BPF_JSGT:
a72dafaf 5622 if (reg->smin_value > sval)
4f7b3e82 5623 return 1;
a72dafaf 5624 else if (reg->smax_value < sval)
4f7b3e82
AS
5625 return 0;
5626 break;
5627 case BPF_JLT:
5628 if (reg->umax_value < val)
5629 return 1;
5630 else if (reg->umin_value >= val)
5631 return 0;
5632 break;
5633 case BPF_JSLT:
a72dafaf 5634 if (reg->smax_value < sval)
4f7b3e82 5635 return 1;
a72dafaf 5636 else if (reg->smin_value >= sval)
4f7b3e82
AS
5637 return 0;
5638 break;
5639 case BPF_JGE:
5640 if (reg->umin_value >= val)
5641 return 1;
5642 else if (reg->umax_value < val)
5643 return 0;
5644 break;
5645 case BPF_JSGE:
a72dafaf 5646 if (reg->smin_value >= sval)
4f7b3e82 5647 return 1;
a72dafaf 5648 else if (reg->smax_value < sval)
4f7b3e82
AS
5649 return 0;
5650 break;
5651 case BPF_JLE:
5652 if (reg->umax_value <= val)
5653 return 1;
5654 else if (reg->umin_value > val)
5655 return 0;
5656 break;
5657 case BPF_JSLE:
a72dafaf 5658 if (reg->smax_value <= sval)
4f7b3e82 5659 return 1;
a72dafaf 5660 else if (reg->smin_value > sval)
4f7b3e82
AS
5661 return 0;
5662 break;
5663 }
5664
5665 return -1;
5666}
5667
092ed096
JW
5668/* Generate min value of the high 32-bit from TNUM info. */
5669static u64 gen_hi_min(struct tnum var)
5670{
5671 return var.value & ~0xffffffffULL;
5672}
5673
5674/* Generate max value of the high 32-bit from TNUM info. */
5675static u64 gen_hi_max(struct tnum var)
5676{
5677 return (var.value | var.mask) & ~0xffffffffULL;
5678}
5679
5680/* Return true if VAL is compared with a s64 sign extended from s32, and they
5681 * are with the same signedness.
5682 */
5683static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
5684{
5685 return ((s32)sval >= 0 &&
5686 reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
5687 ((s32)sval < 0 &&
5688 reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
5689}
5690
48461135
JB
5691/* Adjusts the register min/max values in the case that the dst_reg is the
5692 * variable register that we are working on, and src_reg is a constant or we're
5693 * simply doing a BPF_K check.
f1174f77 5694 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
5695 */
5696static void reg_set_min_max(struct bpf_reg_state *true_reg,
5697 struct bpf_reg_state *false_reg, u64 val,
092ed096 5698 u8 opcode, bool is_jmp32)
48461135 5699{
a72dafaf
JW
5700 s64 sval;
5701
f1174f77
EC
5702 /* If the dst_reg is a pointer, we can't learn anything about its
5703 * variable offset from the compare (unless src_reg were a pointer into
5704 * the same object, but we don't bother with that.
5705 * Since false_reg and true_reg have the same type by construction, we
5706 * only need to check one of them for pointerness.
5707 */
5708 if (__is_pointer_value(false, false_reg))
5709 return;
4cabc5b1 5710
092ed096
JW
5711 val = is_jmp32 ? (u32)val : val;
5712 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
a72dafaf 5713
48461135
JB
5714 switch (opcode) {
5715 case BPF_JEQ:
48461135 5716 case BPF_JNE:
a72dafaf
JW
5717 {
5718 struct bpf_reg_state *reg =
5719 opcode == BPF_JEQ ? true_reg : false_reg;
5720
5721 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
5722 * if it is true we know the value for sure. Likewise for
5723 * BPF_JNE.
48461135 5724 */
092ed096
JW
5725 if (is_jmp32) {
5726 u64 old_v = reg->var_off.value;
5727 u64 hi_mask = ~0xffffffffULL;
5728
5729 reg->var_off.value = (old_v & hi_mask) | val;
5730 reg->var_off.mask &= hi_mask;
5731 } else {
5732 __mark_reg_known(reg, val);
5733 }
48461135 5734 break;
a72dafaf 5735 }
960ea056
JK
5736 case BPF_JSET:
5737 false_reg->var_off = tnum_and(false_reg->var_off,
5738 tnum_const(~val));
5739 if (is_power_of_2(val))
5740 true_reg->var_off = tnum_or(true_reg->var_off,
5741 tnum_const(val));
5742 break;
48461135 5743 case BPF_JGE:
a72dafaf
JW
5744 case BPF_JGT:
5745 {
5746 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
5747 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
5748
092ed096
JW
5749 if (is_jmp32) {
5750 false_umax += gen_hi_max(false_reg->var_off);
5751 true_umin += gen_hi_min(true_reg->var_off);
5752 }
a72dafaf
JW
5753 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5754 true_reg->umin_value = max(true_reg->umin_value, true_umin);
b03c9f9f 5755 break;
a72dafaf 5756 }
48461135 5757 case BPF_JSGE:
a72dafaf
JW
5758 case BPF_JSGT:
5759 {
5760 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
5761 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
5762
092ed096
JW
5763 /* If the full s64 was not sign-extended from s32 then don't
5764 * deduct further info.
5765 */
5766 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5767 break;
a72dafaf
JW
5768 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5769 true_reg->smin_value = max(true_reg->smin_value, true_smin);
48461135 5770 break;
a72dafaf 5771 }
b4e432f1 5772 case BPF_JLE:
a72dafaf
JW
5773 case BPF_JLT:
5774 {
5775 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
5776 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
5777
092ed096
JW
5778 if (is_jmp32) {
5779 false_umin += gen_hi_min(false_reg->var_off);
5780 true_umax += gen_hi_max(true_reg->var_off);
5781 }
a72dafaf
JW
5782 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5783 true_reg->umax_value = min(true_reg->umax_value, true_umax);
b4e432f1 5784 break;
a72dafaf 5785 }
b4e432f1 5786 case BPF_JSLE:
a72dafaf
JW
5787 case BPF_JSLT:
5788 {
5789 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
5790 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
5791
092ed096
JW
5792 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5793 break;
a72dafaf
JW
5794 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5795 true_reg->smax_value = min(true_reg->smax_value, true_smax);
b4e432f1 5796 break;
a72dafaf 5797 }
48461135
JB
5798 default:
5799 break;
5800 }
5801
b03c9f9f
EC
5802 __reg_deduce_bounds(false_reg);
5803 __reg_deduce_bounds(true_reg);
5804 /* We might have learned some bits from the bounds. */
5805 __reg_bound_offset(false_reg);
5806 __reg_bound_offset(true_reg);
581738a6
YS
5807 if (is_jmp32) {
5808 __reg_bound_offset32(false_reg);
5809 __reg_bound_offset32(true_reg);
5810 }
b03c9f9f
EC
5811 /* Intersecting with the old var_off might have improved our bounds
5812 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5813 * then new var_off is (0; 0x7f...fc) which improves our umax.
5814 */
5815 __update_reg_bounds(false_reg);
5816 __update_reg_bounds(true_reg);
48461135
JB
5817}
5818
f1174f77
EC
5819/* Same as above, but for the case that dst_reg holds a constant and src_reg is
5820 * the variable reg.
48461135
JB
5821 */
5822static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
5823 struct bpf_reg_state *false_reg, u64 val,
092ed096 5824 u8 opcode, bool is_jmp32)
48461135 5825{
a72dafaf
JW
5826 s64 sval;
5827
f1174f77
EC
5828 if (__is_pointer_value(false, false_reg))
5829 return;
4cabc5b1 5830
092ed096
JW
5831 val = is_jmp32 ? (u32)val : val;
5832 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
a72dafaf 5833
48461135
JB
5834 switch (opcode) {
5835 case BPF_JEQ:
48461135 5836 case BPF_JNE:
a72dafaf
JW
5837 {
5838 struct bpf_reg_state *reg =
5839 opcode == BPF_JEQ ? true_reg : false_reg;
5840
092ed096
JW
5841 if (is_jmp32) {
5842 u64 old_v = reg->var_off.value;
5843 u64 hi_mask = ~0xffffffffULL;
5844
5845 reg->var_off.value = (old_v & hi_mask) | val;
5846 reg->var_off.mask &= hi_mask;
5847 } else {
5848 __mark_reg_known(reg, val);
5849 }
48461135 5850 break;
a72dafaf 5851 }
960ea056
JK
5852 case BPF_JSET:
5853 false_reg->var_off = tnum_and(false_reg->var_off,
5854 tnum_const(~val));
5855 if (is_power_of_2(val))
5856 true_reg->var_off = tnum_or(true_reg->var_off,
5857 tnum_const(val));
5858 break;
48461135 5859 case BPF_JGE:
a72dafaf
JW
5860 case BPF_JGT:
5861 {
5862 u64 false_umin = opcode == BPF_JGT ? val : val + 1;
5863 u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
5864
092ed096
JW
5865 if (is_jmp32) {
5866 false_umin += gen_hi_min(false_reg->var_off);
5867 true_umax += gen_hi_max(true_reg->var_off);
5868 }
a72dafaf
JW
5869 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5870 true_reg->umax_value = min(true_reg->umax_value, true_umax);
b03c9f9f 5871 break;
a72dafaf 5872 }
48461135 5873 case BPF_JSGE:
a72dafaf
JW
5874 case BPF_JSGT:
5875 {
5876 s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1;
5877 s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
5878
092ed096
JW
5879 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5880 break;
a72dafaf
JW
5881 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5882 true_reg->smax_value = min(true_reg->smax_value, true_smax);
48461135 5883 break;
a72dafaf 5884 }
b4e432f1 5885 case BPF_JLE:
a72dafaf
JW
5886 case BPF_JLT:
5887 {
5888 u64 false_umax = opcode == BPF_JLT ? val : val - 1;
5889 u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
5890
092ed096
JW
5891 if (is_jmp32) {
5892 false_umax += gen_hi_max(false_reg->var_off);
5893 true_umin += gen_hi_min(true_reg->var_off);
5894 }
a72dafaf
JW
5895 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5896 true_reg->umin_value = max(true_reg->umin_value, true_umin);
b4e432f1 5897 break;
a72dafaf 5898 }
b4e432f1 5899 case BPF_JSLE:
a72dafaf
JW
5900 case BPF_JSLT:
5901 {
5902 s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1;
5903 s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
5904
092ed096
JW
5905 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5906 break;
a72dafaf
JW
5907 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5908 true_reg->smin_value = max(true_reg->smin_value, true_smin);
b4e432f1 5909 break;
a72dafaf 5910 }
48461135
JB
5911 default:
5912 break;
5913 }
5914
b03c9f9f
EC
5915 __reg_deduce_bounds(false_reg);
5916 __reg_deduce_bounds(true_reg);
5917 /* We might have learned some bits from the bounds. */
5918 __reg_bound_offset(false_reg);
5919 __reg_bound_offset(true_reg);
581738a6
YS
5920 if (is_jmp32) {
5921 __reg_bound_offset32(false_reg);
5922 __reg_bound_offset32(true_reg);
5923 }
b03c9f9f
EC
5924 /* Intersecting with the old var_off might have improved our bounds
5925 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5926 * then new var_off is (0; 0x7f...fc) which improves our umax.
5927 */
5928 __update_reg_bounds(false_reg);
5929 __update_reg_bounds(true_reg);
f1174f77
EC
5930}
5931
5932/* Regs are known to be equal, so intersect their min/max/var_off */
5933static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
5934 struct bpf_reg_state *dst_reg)
5935{
b03c9f9f
EC
5936 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
5937 dst_reg->umin_value);
5938 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
5939 dst_reg->umax_value);
5940 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
5941 dst_reg->smin_value);
5942 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
5943 dst_reg->smax_value);
f1174f77
EC
5944 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
5945 dst_reg->var_off);
b03c9f9f
EC
5946 /* We might have learned new bounds from the var_off. */
5947 __update_reg_bounds(src_reg);
5948 __update_reg_bounds(dst_reg);
5949 /* We might have learned something about the sign bit. */
5950 __reg_deduce_bounds(src_reg);
5951 __reg_deduce_bounds(dst_reg);
5952 /* We might have learned some bits from the bounds. */
5953 __reg_bound_offset(src_reg);
5954 __reg_bound_offset(dst_reg);
5955 /* Intersecting with the old var_off might have improved our bounds
5956 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5957 * then new var_off is (0; 0x7f...fc) which improves our umax.
5958 */
5959 __update_reg_bounds(src_reg);
5960 __update_reg_bounds(dst_reg);
f1174f77
EC
5961}
5962
5963static void reg_combine_min_max(struct bpf_reg_state *true_src,
5964 struct bpf_reg_state *true_dst,
5965 struct bpf_reg_state *false_src,
5966 struct bpf_reg_state *false_dst,
5967 u8 opcode)
5968{
5969 switch (opcode) {
5970 case BPF_JEQ:
5971 __reg_combine_min_max(true_src, true_dst);
5972 break;
5973 case BPF_JNE:
5974 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 5975 break;
4cabc5b1 5976 }
48461135
JB
5977}
5978
fd978bf7
JS
5979static void mark_ptr_or_null_reg(struct bpf_func_state *state,
5980 struct bpf_reg_state *reg, u32 id,
840b9615 5981 bool is_null)
57a09bf0 5982{
840b9615 5983 if (reg_type_may_be_null(reg->type) && reg->id == id) {
f1174f77
EC
5984 /* Old offset (both fixed and variable parts) should
5985 * have been known-zero, because we don't allow pointer
5986 * arithmetic on pointers that might be NULL.
5987 */
b03c9f9f
EC
5988 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
5989 !tnum_equals_const(reg->var_off, 0) ||
f1174f77 5990 reg->off)) {
b03c9f9f
EC
5991 __mark_reg_known_zero(reg);
5992 reg->off = 0;
f1174f77
EC
5993 }
5994 if (is_null) {
5995 reg->type = SCALAR_VALUE;
840b9615
JS
5996 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
5997 if (reg->map_ptr->inner_map_meta) {
5998 reg->type = CONST_PTR_TO_MAP;
5999 reg->map_ptr = reg->map_ptr->inner_map_meta;
fada7fdc
JL
6000 } else if (reg->map_ptr->map_type ==
6001 BPF_MAP_TYPE_XSKMAP) {
6002 reg->type = PTR_TO_XDP_SOCK;
840b9615
JS
6003 } else {
6004 reg->type = PTR_TO_MAP_VALUE;
6005 }
c64b7983
JS
6006 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
6007 reg->type = PTR_TO_SOCKET;
46f8bc92
MKL
6008 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
6009 reg->type = PTR_TO_SOCK_COMMON;
655a51e5
MKL
6010 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
6011 reg->type = PTR_TO_TCP_SOCK;
56f668df 6012 }
1b986589
MKL
6013 if (is_null) {
6014 /* We don't need id and ref_obj_id from this point
6015 * onwards anymore, thus we should better reset it,
6016 * so that state pruning has chances to take effect.
6017 */
6018 reg->id = 0;
6019 reg->ref_obj_id = 0;
6020 } else if (!reg_may_point_to_spin_lock(reg)) {
6021 /* For not-NULL ptr, reg->ref_obj_id will be reset
6022 * in release_reg_references().
6023 *
6024 * reg->id is still used by spin_lock ptr. Other
6025 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
6026 */
6027 reg->id = 0;
56f668df 6028 }
57a09bf0
TG
6029 }
6030}
6031
c6a9efa1
PC
6032static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
6033 bool is_null)
6034{
6035 struct bpf_reg_state *reg;
6036 int i;
6037
6038 for (i = 0; i < MAX_BPF_REG; i++)
6039 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
6040
6041 bpf_for_each_spilled_reg(i, state, reg) {
6042 if (!reg)
6043 continue;
6044 mark_ptr_or_null_reg(state, reg, id, is_null);
6045 }
6046}
6047
57a09bf0
TG
6048/* The logic is similar to find_good_pkt_pointers(), both could eventually
6049 * be folded together at some point.
6050 */
840b9615
JS
6051static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
6052 bool is_null)
57a09bf0 6053{
f4d7e40a 6054 struct bpf_func_state *state = vstate->frame[vstate->curframe];
c6a9efa1 6055 struct bpf_reg_state *regs = state->regs;
1b986589 6056 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 6057 u32 id = regs[regno].id;
c6a9efa1 6058 int i;
57a09bf0 6059
1b986589
MKL
6060 if (ref_obj_id && ref_obj_id == id && is_null)
6061 /* regs[regno] is in the " == NULL" branch.
6062 * No one could have freed the reference state before
6063 * doing the NULL check.
6064 */
6065 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 6066
c6a9efa1
PC
6067 for (i = 0; i <= vstate->curframe; i++)
6068 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
57a09bf0
TG
6069}
6070
5beca081
DB
6071static bool try_match_pkt_pointers(const struct bpf_insn *insn,
6072 struct bpf_reg_state *dst_reg,
6073 struct bpf_reg_state *src_reg,
6074 struct bpf_verifier_state *this_branch,
6075 struct bpf_verifier_state *other_branch)
6076{
6077 if (BPF_SRC(insn->code) != BPF_X)
6078 return false;
6079
092ed096
JW
6080 /* Pointers are always 64-bit. */
6081 if (BPF_CLASS(insn->code) == BPF_JMP32)
6082 return false;
6083
5beca081
DB
6084 switch (BPF_OP(insn->code)) {
6085 case BPF_JGT:
6086 if ((dst_reg->type == PTR_TO_PACKET &&
6087 src_reg->type == PTR_TO_PACKET_END) ||
6088 (dst_reg->type == PTR_TO_PACKET_META &&
6089 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6090 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
6091 find_good_pkt_pointers(this_branch, dst_reg,
6092 dst_reg->type, false);
6093 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6094 src_reg->type == PTR_TO_PACKET) ||
6095 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6096 src_reg->type == PTR_TO_PACKET_META)) {
6097 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
6098 find_good_pkt_pointers(other_branch, src_reg,
6099 src_reg->type, true);
6100 } else {
6101 return false;
6102 }
6103 break;
6104 case BPF_JLT:
6105 if ((dst_reg->type == PTR_TO_PACKET &&
6106 src_reg->type == PTR_TO_PACKET_END) ||
6107 (dst_reg->type == PTR_TO_PACKET_META &&
6108 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6109 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
6110 find_good_pkt_pointers(other_branch, dst_reg,
6111 dst_reg->type, true);
6112 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6113 src_reg->type == PTR_TO_PACKET) ||
6114 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6115 src_reg->type == PTR_TO_PACKET_META)) {
6116 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
6117 find_good_pkt_pointers(this_branch, src_reg,
6118 src_reg->type, false);
6119 } else {
6120 return false;
6121 }
6122 break;
6123 case BPF_JGE:
6124 if ((dst_reg->type == PTR_TO_PACKET &&
6125 src_reg->type == PTR_TO_PACKET_END) ||
6126 (dst_reg->type == PTR_TO_PACKET_META &&
6127 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6128 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
6129 find_good_pkt_pointers(this_branch, dst_reg,
6130 dst_reg->type, true);
6131 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6132 src_reg->type == PTR_TO_PACKET) ||
6133 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6134 src_reg->type == PTR_TO_PACKET_META)) {
6135 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
6136 find_good_pkt_pointers(other_branch, src_reg,
6137 src_reg->type, false);
6138 } else {
6139 return false;
6140 }
6141 break;
6142 case BPF_JLE:
6143 if ((dst_reg->type == PTR_TO_PACKET &&
6144 src_reg->type == PTR_TO_PACKET_END) ||
6145 (dst_reg->type == PTR_TO_PACKET_META &&
6146 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6147 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
6148 find_good_pkt_pointers(other_branch, dst_reg,
6149 dst_reg->type, false);
6150 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6151 src_reg->type == PTR_TO_PACKET) ||
6152 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6153 src_reg->type == PTR_TO_PACKET_META)) {
6154 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
6155 find_good_pkt_pointers(this_branch, src_reg,
6156 src_reg->type, true);
6157 } else {
6158 return false;
6159 }
6160 break;
6161 default:
6162 return false;
6163 }
6164
6165 return true;
6166}
6167
58e2af8b 6168static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
6169 struct bpf_insn *insn, int *insn_idx)
6170{
f4d7e40a
AS
6171 struct bpf_verifier_state *this_branch = env->cur_state;
6172 struct bpf_verifier_state *other_branch;
6173 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 6174 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
17a52670 6175 u8 opcode = BPF_OP(insn->code);
092ed096 6176 bool is_jmp32;
fb8d251e 6177 int pred = -1;
17a52670
AS
6178 int err;
6179
092ed096
JW
6180 /* Only conditional jumps are expected to reach here. */
6181 if (opcode == BPF_JA || opcode > BPF_JSLE) {
6182 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
6183 return -EINVAL;
6184 }
6185
6186 if (BPF_SRC(insn->code) == BPF_X) {
6187 if (insn->imm != 0) {
092ed096 6188 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6189 return -EINVAL;
6190 }
6191
6192 /* check src1 operand */
dc503a8a 6193 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6194 if (err)
6195 return err;
1be7f75d
AS
6196
6197 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 6198 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
6199 insn->src_reg);
6200 return -EACCES;
6201 }
fb8d251e 6202 src_reg = &regs[insn->src_reg];
17a52670
AS
6203 } else {
6204 if (insn->src_reg != BPF_REG_0) {
092ed096 6205 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6206 return -EINVAL;
6207 }
6208 }
6209
6210 /* check src2 operand */
dc503a8a 6211 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6212 if (err)
6213 return err;
6214
1a0dc1ac 6215 dst_reg = &regs[insn->dst_reg];
092ed096 6216 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 6217
fb8d251e
AS
6218 if (BPF_SRC(insn->code) == BPF_K)
6219 pred = is_branch_taken(dst_reg, insn->imm,
6220 opcode, is_jmp32);
6221 else if (src_reg->type == SCALAR_VALUE &&
6222 tnum_is_const(src_reg->var_off))
6223 pred = is_branch_taken(dst_reg, src_reg->var_off.value,
6224 opcode, is_jmp32);
b5dc0163
AS
6225 if (pred >= 0) {
6226 err = mark_chain_precision(env, insn->dst_reg);
6227 if (BPF_SRC(insn->code) == BPF_X && !err)
6228 err = mark_chain_precision(env, insn->src_reg);
6229 if (err)
6230 return err;
6231 }
fb8d251e
AS
6232 if (pred == 1) {
6233 /* only follow the goto, ignore fall-through */
6234 *insn_idx += insn->off;
6235 return 0;
6236 } else if (pred == 0) {
6237 /* only follow fall-through branch, since
6238 * that's where the program will go
6239 */
6240 return 0;
17a52670
AS
6241 }
6242
979d63d5
DB
6243 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
6244 false);
17a52670
AS
6245 if (!other_branch)
6246 return -EFAULT;
f4d7e40a 6247 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 6248
48461135
JB
6249 /* detect if we are comparing against a constant value so we can adjust
6250 * our min/max values for our dst register.
f1174f77
EC
6251 * this is only legit if both are scalars (or pointers to the same
6252 * object, I suppose, but we don't support that right now), because
6253 * otherwise the different base pointers mean the offsets aren't
6254 * comparable.
48461135
JB
6255 */
6256 if (BPF_SRC(insn->code) == BPF_X) {
092ed096
JW
6257 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
6258 struct bpf_reg_state lo_reg0 = *dst_reg;
6259 struct bpf_reg_state lo_reg1 = *src_reg;
6260 struct bpf_reg_state *src_lo, *dst_lo;
6261
6262 dst_lo = &lo_reg0;
6263 src_lo = &lo_reg1;
6264 coerce_reg_to_size(dst_lo, 4);
6265 coerce_reg_to_size(src_lo, 4);
6266
f1174f77 6267 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
6268 src_reg->type == SCALAR_VALUE) {
6269 if (tnum_is_const(src_reg->var_off) ||
6270 (is_jmp32 && tnum_is_const(src_lo->var_off)))
f4d7e40a 6271 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096
JW
6272 dst_reg,
6273 is_jmp32
6274 ? src_lo->var_off.value
6275 : src_reg->var_off.value,
6276 opcode, is_jmp32);
6277 else if (tnum_is_const(dst_reg->var_off) ||
6278 (is_jmp32 && tnum_is_const(dst_lo->var_off)))
f4d7e40a 6279 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096
JW
6280 src_reg,
6281 is_jmp32
6282 ? dst_lo->var_off.value
6283 : dst_reg->var_off.value,
6284 opcode, is_jmp32);
6285 else if (!is_jmp32 &&
6286 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 6287 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
6288 reg_combine_min_max(&other_branch_regs[insn->src_reg],
6289 &other_branch_regs[insn->dst_reg],
092ed096 6290 src_reg, dst_reg, opcode);
f1174f77
EC
6291 }
6292 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 6293 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 6294 dst_reg, insn->imm, opcode, is_jmp32);
48461135
JB
6295 }
6296
092ed096
JW
6297 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
6298 * NOTE: these optimizations below are related with pointer comparison
6299 * which will never be JMP32.
6300 */
6301 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 6302 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
840b9615
JS
6303 reg_type_may_be_null(dst_reg->type)) {
6304 /* Mark all identical registers in each branch as either
57a09bf0
TG
6305 * safe or unknown depending R == 0 or R != 0 conditional.
6306 */
840b9615
JS
6307 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
6308 opcode == BPF_JNE);
6309 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
6310 opcode == BPF_JEQ);
5beca081
DB
6311 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
6312 this_branch, other_branch) &&
6313 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
6314 verbose(env, "R%d pointer comparison prohibited\n",
6315 insn->dst_reg);
1be7f75d 6316 return -EACCES;
17a52670 6317 }
06ee7115 6318 if (env->log.level & BPF_LOG_LEVEL)
f4d7e40a 6319 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
6320 return 0;
6321}
6322
17a52670 6323/* verify BPF_LD_IMM64 instruction */
58e2af8b 6324static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 6325{
d8eca5bb 6326 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 6327 struct bpf_reg_state *regs = cur_regs(env);
d8eca5bb 6328 struct bpf_map *map;
17a52670
AS
6329 int err;
6330
6331 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 6332 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
6333 return -EINVAL;
6334 }
6335 if (insn->off != 0) {
61bd5218 6336 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
6337 return -EINVAL;
6338 }
6339
dc503a8a 6340 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
6341 if (err)
6342 return err;
6343
6b173873 6344 if (insn->src_reg == 0) {
6b173873
JK
6345 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
6346
f1174f77 6347 regs[insn->dst_reg].type = SCALAR_VALUE;
b03c9f9f 6348 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 6349 return 0;
6b173873 6350 }
17a52670 6351
d8eca5bb
DB
6352 map = env->used_maps[aux->map_index];
6353 mark_reg_known_zero(env, regs, insn->dst_reg);
6354 regs[insn->dst_reg].map_ptr = map;
6355
6356 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
6357 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
6358 regs[insn->dst_reg].off = aux->map_off;
6359 if (map_value_has_spin_lock(map))
6360 regs[insn->dst_reg].id = ++env->id_gen;
6361 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
6362 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
6363 } else {
6364 verbose(env, "bpf verifier is misconfigured\n");
6365 return -EINVAL;
6366 }
17a52670 6367
17a52670
AS
6368 return 0;
6369}
6370
96be4325
DB
6371static bool may_access_skb(enum bpf_prog_type type)
6372{
6373 switch (type) {
6374 case BPF_PROG_TYPE_SOCKET_FILTER:
6375 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 6376 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
6377 return true;
6378 default:
6379 return false;
6380 }
6381}
6382
ddd872bc
AS
6383/* verify safety of LD_ABS|LD_IND instructions:
6384 * - they can only appear in the programs where ctx == skb
6385 * - since they are wrappers of function calls, they scratch R1-R5 registers,
6386 * preserve R6-R9, and store return value into R0
6387 *
6388 * Implicit input:
6389 * ctx == skb == R6 == CTX
6390 *
6391 * Explicit input:
6392 * SRC == any register
6393 * IMM == 32-bit immediate
6394 *
6395 * Output:
6396 * R0 - 8/16/32-bit skb data converted to cpu endianness
6397 */
58e2af8b 6398static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 6399{
638f5b90 6400 struct bpf_reg_state *regs = cur_regs(env);
6d4f151a 6401 static const int ctx_reg = BPF_REG_6;
ddd872bc 6402 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
6403 int i, err;
6404
24701ece 6405 if (!may_access_skb(env->prog->type)) {
61bd5218 6406 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
6407 return -EINVAL;
6408 }
6409
e0cea7ce
DB
6410 if (!env->ops->gen_ld_abs) {
6411 verbose(env, "bpf verifier is misconfigured\n");
6412 return -EINVAL;
6413 }
6414
f910cefa 6415 if (env->subprog_cnt > 1) {
f4d7e40a
AS
6416 /* when program has LD_ABS insn JITs and interpreter assume
6417 * that r1 == ctx == skb which is not the case for callees
6418 * that can have arbitrary arguments. It's problematic
6419 * for main prog as well since JITs would need to analyze
6420 * all functions in order to make proper register save/restore
6421 * decisions in the main prog. Hence disallow LD_ABS with calls
6422 */
6423 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
6424 return -EINVAL;
6425 }
6426
ddd872bc 6427 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 6428 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 6429 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 6430 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
6431 return -EINVAL;
6432 }
6433
6434 /* check whether implicit source operand (register R6) is readable */
6d4f151a 6435 err = check_reg_arg(env, ctx_reg, SRC_OP);
ddd872bc
AS
6436 if (err)
6437 return err;
6438
fd978bf7
JS
6439 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
6440 * gen_ld_abs() may terminate the program at runtime, leading to
6441 * reference leak.
6442 */
6443 err = check_reference_leak(env);
6444 if (err) {
6445 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
6446 return err;
6447 }
6448
d83525ca
AS
6449 if (env->cur_state->active_spin_lock) {
6450 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
6451 return -EINVAL;
6452 }
6453
6d4f151a 6454 if (regs[ctx_reg].type != PTR_TO_CTX) {
61bd5218
JK
6455 verbose(env,
6456 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
6457 return -EINVAL;
6458 }
6459
6460 if (mode == BPF_IND) {
6461 /* check explicit source operand */
dc503a8a 6462 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
6463 if (err)
6464 return err;
6465 }
6466
6d4f151a
DB
6467 err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
6468 if (err < 0)
6469 return err;
6470
ddd872bc 6471 /* reset caller saved regs to unreadable */
dc503a8a 6472 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 6473 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
6474 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6475 }
ddd872bc
AS
6476
6477 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
6478 * the value fetched from the packet.
6479 * Already marked as written above.
ddd872bc 6480 */
61bd5218 6481 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
6482 /* ld_abs load up to 32-bit skb data. */
6483 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
6484 return 0;
6485}
6486
390ee7e2
AS
6487static int check_return_code(struct bpf_verifier_env *env)
6488{
5cf1e914 6489 struct tnum enforce_attach_type_range = tnum_unknown;
27ae7997 6490 const struct bpf_prog *prog = env->prog;
390ee7e2
AS
6491 struct bpf_reg_state *reg;
6492 struct tnum range = tnum_range(0, 1);
27ae7997
MKL
6493 int err;
6494
6495 /* The struct_ops func-ptr's return type could be "void" */
6496 if (env->prog->type == BPF_PROG_TYPE_STRUCT_OPS &&
6497 !prog->aux->attach_func_proto->type)
6498 return 0;
6499
6500 /* eBPF calling convetion is such that R0 is used
6501 * to return the value from eBPF program.
6502 * Make sure that it's readable at this time
6503 * of bpf_exit, which means that program wrote
6504 * something into it earlier
6505 */
6506 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
6507 if (err)
6508 return err;
6509
6510 if (is_pointer_value(env, BPF_REG_0)) {
6511 verbose(env, "R0 leaks addr as return value\n");
6512 return -EACCES;
6513 }
390ee7e2
AS
6514
6515 switch (env->prog->type) {
983695fa
DB
6516 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
6517 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
6518 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
6519 range = tnum_range(1, 1);
ed4ed404 6520 break;
390ee7e2 6521 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 6522 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
6523 range = tnum_range(0, 3);
6524 enforce_attach_type_range = tnum_range(2, 3);
6525 }
ed4ed404 6526 break;
390ee7e2
AS
6527 case BPF_PROG_TYPE_CGROUP_SOCK:
6528 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 6529 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 6530 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 6531 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 6532 break;
15ab09bd
AS
6533 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6534 if (!env->prog->aux->attach_btf_id)
6535 return 0;
6536 range = tnum_const(0);
6537 break;
390ee7e2
AS
6538 default:
6539 return 0;
6540 }
6541
638f5b90 6542 reg = cur_regs(env) + BPF_REG_0;
390ee7e2 6543 if (reg->type != SCALAR_VALUE) {
61bd5218 6544 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
390ee7e2
AS
6545 reg_type_str[reg->type]);
6546 return -EINVAL;
6547 }
6548
6549 if (!tnum_in(range, reg->var_off)) {
5cf1e914 6550 char tn_buf[48];
6551
61bd5218 6552 verbose(env, "At program exit the register R0 ");
390ee7e2 6553 if (!tnum_is_unknown(reg->var_off)) {
390ee7e2 6554 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 6555 verbose(env, "has value %s", tn_buf);
390ee7e2 6556 } else {
61bd5218 6557 verbose(env, "has unknown scalar value");
390ee7e2 6558 }
5cf1e914 6559 tnum_strn(tn_buf, sizeof(tn_buf), range);
983695fa 6560 verbose(env, " should have been in %s\n", tn_buf);
390ee7e2
AS
6561 return -EINVAL;
6562 }
5cf1e914 6563
6564 if (!tnum_is_unknown(enforce_attach_type_range) &&
6565 tnum_in(enforce_attach_type_range, reg->var_off))
6566 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
6567 return 0;
6568}
6569
475fb78f
AS
6570/* non-recursive DFS pseudo code
6571 * 1 procedure DFS-iterative(G,v):
6572 * 2 label v as discovered
6573 * 3 let S be a stack
6574 * 4 S.push(v)
6575 * 5 while S is not empty
6576 * 6 t <- S.pop()
6577 * 7 if t is what we're looking for:
6578 * 8 return t
6579 * 9 for all edges e in G.adjacentEdges(t) do
6580 * 10 if edge e is already labelled
6581 * 11 continue with the next edge
6582 * 12 w <- G.adjacentVertex(t,e)
6583 * 13 if vertex w is not discovered and not explored
6584 * 14 label e as tree-edge
6585 * 15 label w as discovered
6586 * 16 S.push(w)
6587 * 17 continue at 5
6588 * 18 else if vertex w is discovered
6589 * 19 label e as back-edge
6590 * 20 else
6591 * 21 // vertex w is explored
6592 * 22 label e as forward- or cross-edge
6593 * 23 label t as explored
6594 * 24 S.pop()
6595 *
6596 * convention:
6597 * 0x10 - discovered
6598 * 0x11 - discovered and fall-through edge labelled
6599 * 0x12 - discovered and fall-through and branch edges labelled
6600 * 0x20 - explored
6601 */
6602
6603enum {
6604 DISCOVERED = 0x10,
6605 EXPLORED = 0x20,
6606 FALLTHROUGH = 1,
6607 BRANCH = 2,
6608};
6609
dc2a4ebc
AS
6610static u32 state_htab_size(struct bpf_verifier_env *env)
6611{
6612 return env->prog->len;
6613}
6614
5d839021
AS
6615static struct bpf_verifier_state_list **explored_state(
6616 struct bpf_verifier_env *env,
6617 int idx)
6618{
dc2a4ebc
AS
6619 struct bpf_verifier_state *cur = env->cur_state;
6620 struct bpf_func_state *state = cur->frame[cur->curframe];
6621
6622 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
6623}
6624
6625static void init_explored_state(struct bpf_verifier_env *env, int idx)
6626{
a8f500af 6627 env->insn_aux_data[idx].prune_point = true;
5d839021 6628}
f1bca824 6629
475fb78f
AS
6630/* t, w, e - match pseudo-code above:
6631 * t - index of current instruction
6632 * w - next instruction
6633 * e - edge
6634 */
2589726d
AS
6635static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
6636 bool loop_ok)
475fb78f 6637{
7df737e9
AS
6638 int *insn_stack = env->cfg.insn_stack;
6639 int *insn_state = env->cfg.insn_state;
6640
475fb78f
AS
6641 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
6642 return 0;
6643
6644 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
6645 return 0;
6646
6647 if (w < 0 || w >= env->prog->len) {
d9762e84 6648 verbose_linfo(env, t, "%d: ", t);
61bd5218 6649 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
6650 return -EINVAL;
6651 }
6652
f1bca824
AS
6653 if (e == BRANCH)
6654 /* mark branch target for state pruning */
5d839021 6655 init_explored_state(env, w);
f1bca824 6656
475fb78f
AS
6657 if (insn_state[w] == 0) {
6658 /* tree-edge */
6659 insn_state[t] = DISCOVERED | e;
6660 insn_state[w] = DISCOVERED;
7df737e9 6661 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 6662 return -E2BIG;
7df737e9 6663 insn_stack[env->cfg.cur_stack++] = w;
475fb78f
AS
6664 return 1;
6665 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2589726d
AS
6666 if (loop_ok && env->allow_ptr_leaks)
6667 return 0;
d9762e84
MKL
6668 verbose_linfo(env, t, "%d: ", t);
6669 verbose_linfo(env, w, "%d: ", w);
61bd5218 6670 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
6671 return -EINVAL;
6672 } else if (insn_state[w] == EXPLORED) {
6673 /* forward- or cross-edge */
6674 insn_state[t] = DISCOVERED | e;
6675 } else {
61bd5218 6676 verbose(env, "insn state internal bug\n");
475fb78f
AS
6677 return -EFAULT;
6678 }
6679 return 0;
6680}
6681
6682/* non-recursive depth-first-search to detect loops in BPF program
6683 * loop == back-edge in directed graph
6684 */
58e2af8b 6685static int check_cfg(struct bpf_verifier_env *env)
475fb78f
AS
6686{
6687 struct bpf_insn *insns = env->prog->insnsi;
6688 int insn_cnt = env->prog->len;
7df737e9 6689 int *insn_stack, *insn_state;
475fb78f
AS
6690 int ret = 0;
6691 int i, t;
6692
7df737e9 6693 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
6694 if (!insn_state)
6695 return -ENOMEM;
6696
7df737e9 6697 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 6698 if (!insn_stack) {
71dde681 6699 kvfree(insn_state);
475fb78f
AS
6700 return -ENOMEM;
6701 }
6702
6703 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
6704 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 6705 env->cfg.cur_stack = 1;
475fb78f
AS
6706
6707peek_stack:
7df737e9 6708 if (env->cfg.cur_stack == 0)
475fb78f 6709 goto check_state;
7df737e9 6710 t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 6711
092ed096
JW
6712 if (BPF_CLASS(insns[t].code) == BPF_JMP ||
6713 BPF_CLASS(insns[t].code) == BPF_JMP32) {
475fb78f
AS
6714 u8 opcode = BPF_OP(insns[t].code);
6715
6716 if (opcode == BPF_EXIT) {
6717 goto mark_explored;
6718 } else if (opcode == BPF_CALL) {
2589726d 6719 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
6720 if (ret == 1)
6721 goto peek_stack;
6722 else if (ret < 0)
6723 goto err_free;
07016151 6724 if (t + 1 < insn_cnt)
5d839021 6725 init_explored_state(env, t + 1);
cc8b0b92 6726 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
5d839021 6727 init_explored_state(env, t);
2589726d
AS
6728 ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
6729 env, false);
cc8b0b92
AS
6730 if (ret == 1)
6731 goto peek_stack;
6732 else if (ret < 0)
6733 goto err_free;
6734 }
475fb78f
AS
6735 } else if (opcode == BPF_JA) {
6736 if (BPF_SRC(insns[t].code) != BPF_K) {
6737 ret = -EINVAL;
6738 goto err_free;
6739 }
6740 /* unconditional jump with single edge */
6741 ret = push_insn(t, t + insns[t].off + 1,
2589726d 6742 FALLTHROUGH, env, true);
475fb78f
AS
6743 if (ret == 1)
6744 goto peek_stack;
6745 else if (ret < 0)
6746 goto err_free;
b5dc0163
AS
6747 /* unconditional jmp is not a good pruning point,
6748 * but it's marked, since backtracking needs
6749 * to record jmp history in is_state_visited().
6750 */
6751 init_explored_state(env, t + insns[t].off + 1);
f1bca824
AS
6752 /* tell verifier to check for equivalent states
6753 * after every call and jump
6754 */
c3de6317 6755 if (t + 1 < insn_cnt)
5d839021 6756 init_explored_state(env, t + 1);
475fb78f
AS
6757 } else {
6758 /* conditional jump with two edges */
5d839021 6759 init_explored_state(env, t);
2589726d 6760 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
475fb78f
AS
6761 if (ret == 1)
6762 goto peek_stack;
6763 else if (ret < 0)
6764 goto err_free;
6765
2589726d 6766 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
475fb78f
AS
6767 if (ret == 1)
6768 goto peek_stack;
6769 else if (ret < 0)
6770 goto err_free;
6771 }
6772 } else {
6773 /* all other non-branch instructions with single
6774 * fall-through edge
6775 */
2589726d 6776 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
6777 if (ret == 1)
6778 goto peek_stack;
6779 else if (ret < 0)
6780 goto err_free;
6781 }
6782
6783mark_explored:
6784 insn_state[t] = EXPLORED;
7df737e9 6785 if (env->cfg.cur_stack-- <= 0) {
61bd5218 6786 verbose(env, "pop stack internal bug\n");
475fb78f
AS
6787 ret = -EFAULT;
6788 goto err_free;
6789 }
6790 goto peek_stack;
6791
6792check_state:
6793 for (i = 0; i < insn_cnt; i++) {
6794 if (insn_state[i] != EXPLORED) {
61bd5218 6795 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
6796 ret = -EINVAL;
6797 goto err_free;
6798 }
6799 }
6800 ret = 0; /* cfg looks good */
6801
6802err_free:
71dde681
AS
6803 kvfree(insn_state);
6804 kvfree(insn_stack);
7df737e9 6805 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
6806 return ret;
6807}
6808
838e9690
YS
6809/* The minimum supported BTF func info size */
6810#define MIN_BPF_FUNCINFO_SIZE 8
6811#define MAX_FUNCINFO_REC_SIZE 252
6812
c454a46b
MKL
6813static int check_btf_func(struct bpf_verifier_env *env,
6814 const union bpf_attr *attr,
6815 union bpf_attr __user *uattr)
838e9690 6816{
d0b2818e 6817 u32 i, nfuncs, urec_size, min_size;
838e9690 6818 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 6819 struct bpf_func_info *krecord;
8c1b6e69 6820 struct bpf_func_info_aux *info_aux = NULL;
838e9690 6821 const struct btf_type *type;
c454a46b
MKL
6822 struct bpf_prog *prog;
6823 const struct btf *btf;
838e9690 6824 void __user *urecord;
d0b2818e 6825 u32 prev_offset = 0;
838e9690
YS
6826 int ret = 0;
6827
6828 nfuncs = attr->func_info_cnt;
6829 if (!nfuncs)
6830 return 0;
6831
6832 if (nfuncs != env->subprog_cnt) {
6833 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
6834 return -EINVAL;
6835 }
6836
6837 urec_size = attr->func_info_rec_size;
6838 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
6839 urec_size > MAX_FUNCINFO_REC_SIZE ||
6840 urec_size % sizeof(u32)) {
6841 verbose(env, "invalid func info rec size %u\n", urec_size);
6842 return -EINVAL;
6843 }
6844
c454a46b
MKL
6845 prog = env->prog;
6846 btf = prog->aux->btf;
838e9690
YS
6847
6848 urecord = u64_to_user_ptr(attr->func_info);
6849 min_size = min_t(u32, krec_size, urec_size);
6850
ba64e7d8 6851 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
6852 if (!krecord)
6853 return -ENOMEM;
8c1b6e69
AS
6854 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
6855 if (!info_aux)
6856 goto err_free;
ba64e7d8 6857
838e9690
YS
6858 for (i = 0; i < nfuncs; i++) {
6859 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
6860 if (ret) {
6861 if (ret == -E2BIG) {
6862 verbose(env, "nonzero tailing record in func info");
6863 /* set the size kernel expects so loader can zero
6864 * out the rest of the record.
6865 */
6866 if (put_user(min_size, &uattr->func_info_rec_size))
6867 ret = -EFAULT;
6868 }
c454a46b 6869 goto err_free;
838e9690
YS
6870 }
6871
ba64e7d8 6872 if (copy_from_user(&krecord[i], urecord, min_size)) {
838e9690 6873 ret = -EFAULT;
c454a46b 6874 goto err_free;
838e9690
YS
6875 }
6876
d30d42e0 6877 /* check insn_off */
838e9690 6878 if (i == 0) {
d30d42e0 6879 if (krecord[i].insn_off) {
838e9690 6880 verbose(env,
d30d42e0
MKL
6881 "nonzero insn_off %u for the first func info record",
6882 krecord[i].insn_off);
838e9690 6883 ret = -EINVAL;
c454a46b 6884 goto err_free;
838e9690 6885 }
d30d42e0 6886 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
6887 verbose(env,
6888 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 6889 krecord[i].insn_off, prev_offset);
838e9690 6890 ret = -EINVAL;
c454a46b 6891 goto err_free;
838e9690
YS
6892 }
6893
d30d42e0 6894 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690
YS
6895 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
6896 ret = -EINVAL;
c454a46b 6897 goto err_free;
838e9690
YS
6898 }
6899
6900 /* check type_id */
ba64e7d8 6901 type = btf_type_by_id(btf, krecord[i].type_id);
51c39bb1 6902 if (!type || !btf_type_is_func(type)) {
838e9690 6903 verbose(env, "invalid type id %d in func info",
ba64e7d8 6904 krecord[i].type_id);
838e9690 6905 ret = -EINVAL;
c454a46b 6906 goto err_free;
838e9690 6907 }
51c39bb1 6908 info_aux[i].linkage = BTF_INFO_VLEN(type->info);
d30d42e0 6909 prev_offset = krecord[i].insn_off;
838e9690
YS
6910 urecord += urec_size;
6911 }
6912
ba64e7d8
YS
6913 prog->aux->func_info = krecord;
6914 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 6915 prog->aux->func_info_aux = info_aux;
838e9690
YS
6916 return 0;
6917
c454a46b 6918err_free:
ba64e7d8 6919 kvfree(krecord);
8c1b6e69 6920 kfree(info_aux);
838e9690
YS
6921 return ret;
6922}
6923
ba64e7d8
YS
6924static void adjust_btf_func(struct bpf_verifier_env *env)
6925{
8c1b6e69 6926 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
6927 int i;
6928
8c1b6e69 6929 if (!aux->func_info)
ba64e7d8
YS
6930 return;
6931
6932 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 6933 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
6934}
6935
c454a46b
MKL
6936#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
6937 sizeof(((struct bpf_line_info *)(0))->line_col))
6938#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
6939
6940static int check_btf_line(struct bpf_verifier_env *env,
6941 const union bpf_attr *attr,
6942 union bpf_attr __user *uattr)
6943{
6944 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
6945 struct bpf_subprog_info *sub;
6946 struct bpf_line_info *linfo;
6947 struct bpf_prog *prog;
6948 const struct btf *btf;
6949 void __user *ulinfo;
6950 int err;
6951
6952 nr_linfo = attr->line_info_cnt;
6953 if (!nr_linfo)
6954 return 0;
6955
6956 rec_size = attr->line_info_rec_size;
6957 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
6958 rec_size > MAX_LINEINFO_REC_SIZE ||
6959 rec_size & (sizeof(u32) - 1))
6960 return -EINVAL;
6961
6962 /* Need to zero it in case the userspace may
6963 * pass in a smaller bpf_line_info object.
6964 */
6965 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
6966 GFP_KERNEL | __GFP_NOWARN);
6967 if (!linfo)
6968 return -ENOMEM;
6969
6970 prog = env->prog;
6971 btf = prog->aux->btf;
6972
6973 s = 0;
6974 sub = env->subprog_info;
6975 ulinfo = u64_to_user_ptr(attr->line_info);
6976 expected_size = sizeof(struct bpf_line_info);
6977 ncopy = min_t(u32, expected_size, rec_size);
6978 for (i = 0; i < nr_linfo; i++) {
6979 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
6980 if (err) {
6981 if (err == -E2BIG) {
6982 verbose(env, "nonzero tailing record in line_info");
6983 if (put_user(expected_size,
6984 &uattr->line_info_rec_size))
6985 err = -EFAULT;
6986 }
6987 goto err_free;
6988 }
6989
6990 if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
6991 err = -EFAULT;
6992 goto err_free;
6993 }
6994
6995 /*
6996 * Check insn_off to ensure
6997 * 1) strictly increasing AND
6998 * 2) bounded by prog->len
6999 *
7000 * The linfo[0].insn_off == 0 check logically falls into
7001 * the later "missing bpf_line_info for func..." case
7002 * because the first linfo[0].insn_off must be the
7003 * first sub also and the first sub must have
7004 * subprog_info[0].start == 0.
7005 */
7006 if ((i && linfo[i].insn_off <= prev_offset) ||
7007 linfo[i].insn_off >= prog->len) {
7008 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
7009 i, linfo[i].insn_off, prev_offset,
7010 prog->len);
7011 err = -EINVAL;
7012 goto err_free;
7013 }
7014
fdbaa0be
MKL
7015 if (!prog->insnsi[linfo[i].insn_off].code) {
7016 verbose(env,
7017 "Invalid insn code at line_info[%u].insn_off\n",
7018 i);
7019 err = -EINVAL;
7020 goto err_free;
7021 }
7022
23127b33
MKL
7023 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
7024 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
7025 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
7026 err = -EINVAL;
7027 goto err_free;
7028 }
7029
7030 if (s != env->subprog_cnt) {
7031 if (linfo[i].insn_off == sub[s].start) {
7032 sub[s].linfo_idx = i;
7033 s++;
7034 } else if (sub[s].start < linfo[i].insn_off) {
7035 verbose(env, "missing bpf_line_info for func#%u\n", s);
7036 err = -EINVAL;
7037 goto err_free;
7038 }
7039 }
7040
7041 prev_offset = linfo[i].insn_off;
7042 ulinfo += rec_size;
7043 }
7044
7045 if (s != env->subprog_cnt) {
7046 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
7047 env->subprog_cnt - s, s);
7048 err = -EINVAL;
7049 goto err_free;
7050 }
7051
7052 prog->aux->linfo = linfo;
7053 prog->aux->nr_linfo = nr_linfo;
7054
7055 return 0;
7056
7057err_free:
7058 kvfree(linfo);
7059 return err;
7060}
7061
7062static int check_btf_info(struct bpf_verifier_env *env,
7063 const union bpf_attr *attr,
7064 union bpf_attr __user *uattr)
7065{
7066 struct btf *btf;
7067 int err;
7068
7069 if (!attr->func_info_cnt && !attr->line_info_cnt)
7070 return 0;
7071
7072 btf = btf_get_by_fd(attr->prog_btf_fd);
7073 if (IS_ERR(btf))
7074 return PTR_ERR(btf);
7075 env->prog->aux->btf = btf;
7076
7077 err = check_btf_func(env, attr, uattr);
7078 if (err)
7079 return err;
7080
7081 err = check_btf_line(env, attr, uattr);
7082 if (err)
7083 return err;
7084
7085 return 0;
ba64e7d8
YS
7086}
7087
f1174f77
EC
7088/* check %cur's range satisfies %old's */
7089static bool range_within(struct bpf_reg_state *old,
7090 struct bpf_reg_state *cur)
7091{
b03c9f9f
EC
7092 return old->umin_value <= cur->umin_value &&
7093 old->umax_value >= cur->umax_value &&
7094 old->smin_value <= cur->smin_value &&
7095 old->smax_value >= cur->smax_value;
f1174f77
EC
7096}
7097
7098/* Maximum number of register states that can exist at once */
7099#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
7100struct idpair {
7101 u32 old;
7102 u32 cur;
7103};
7104
7105/* If in the old state two registers had the same id, then they need to have
7106 * the same id in the new state as well. But that id could be different from
7107 * the old state, so we need to track the mapping from old to new ids.
7108 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
7109 * regs with old id 5 must also have new id 9 for the new state to be safe. But
7110 * regs with a different old id could still have new id 9, we don't care about
7111 * that.
7112 * So we look through our idmap to see if this old id has been seen before. If
7113 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 7114 */
f1174f77 7115static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
969bf05e 7116{
f1174f77 7117 unsigned int i;
969bf05e 7118
f1174f77
EC
7119 for (i = 0; i < ID_MAP_SIZE; i++) {
7120 if (!idmap[i].old) {
7121 /* Reached an empty slot; haven't seen this id before */
7122 idmap[i].old = old_id;
7123 idmap[i].cur = cur_id;
7124 return true;
7125 }
7126 if (idmap[i].old == old_id)
7127 return idmap[i].cur == cur_id;
7128 }
7129 /* We ran out of idmap slots, which should be impossible */
7130 WARN_ON_ONCE(1);
7131 return false;
7132}
7133
9242b5f5
AS
7134static void clean_func_state(struct bpf_verifier_env *env,
7135 struct bpf_func_state *st)
7136{
7137 enum bpf_reg_liveness live;
7138 int i, j;
7139
7140 for (i = 0; i < BPF_REG_FP; i++) {
7141 live = st->regs[i].live;
7142 /* liveness must not touch this register anymore */
7143 st->regs[i].live |= REG_LIVE_DONE;
7144 if (!(live & REG_LIVE_READ))
7145 /* since the register is unused, clear its state
7146 * to make further comparison simpler
7147 */
f54c7898 7148 __mark_reg_not_init(env, &st->regs[i]);
9242b5f5
AS
7149 }
7150
7151 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
7152 live = st->stack[i].spilled_ptr.live;
7153 /* liveness must not touch this stack slot anymore */
7154 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
7155 if (!(live & REG_LIVE_READ)) {
f54c7898 7156 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
9242b5f5
AS
7157 for (j = 0; j < BPF_REG_SIZE; j++)
7158 st->stack[i].slot_type[j] = STACK_INVALID;
7159 }
7160 }
7161}
7162
7163static void clean_verifier_state(struct bpf_verifier_env *env,
7164 struct bpf_verifier_state *st)
7165{
7166 int i;
7167
7168 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
7169 /* all regs in this state in all frames were already marked */
7170 return;
7171
7172 for (i = 0; i <= st->curframe; i++)
7173 clean_func_state(env, st->frame[i]);
7174}
7175
7176/* the parentage chains form a tree.
7177 * the verifier states are added to state lists at given insn and
7178 * pushed into state stack for future exploration.
7179 * when the verifier reaches bpf_exit insn some of the verifer states
7180 * stored in the state lists have their final liveness state already,
7181 * but a lot of states will get revised from liveness point of view when
7182 * the verifier explores other branches.
7183 * Example:
7184 * 1: r0 = 1
7185 * 2: if r1 == 100 goto pc+1
7186 * 3: r0 = 2
7187 * 4: exit
7188 * when the verifier reaches exit insn the register r0 in the state list of
7189 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
7190 * of insn 2 and goes exploring further. At the insn 4 it will walk the
7191 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
7192 *
7193 * Since the verifier pushes the branch states as it sees them while exploring
7194 * the program the condition of walking the branch instruction for the second
7195 * time means that all states below this branch were already explored and
7196 * their final liveness markes are already propagated.
7197 * Hence when the verifier completes the search of state list in is_state_visited()
7198 * we can call this clean_live_states() function to mark all liveness states
7199 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
7200 * will not be used.
7201 * This function also clears the registers and stack for states that !READ
7202 * to simplify state merging.
7203 *
7204 * Important note here that walking the same branch instruction in the callee
7205 * doesn't meant that the states are DONE. The verifier has to compare
7206 * the callsites
7207 */
7208static void clean_live_states(struct bpf_verifier_env *env, int insn,
7209 struct bpf_verifier_state *cur)
7210{
7211 struct bpf_verifier_state_list *sl;
7212 int i;
7213
5d839021 7214 sl = *explored_state(env, insn);
a8f500af 7215 while (sl) {
2589726d
AS
7216 if (sl->state.branches)
7217 goto next;
dc2a4ebc
AS
7218 if (sl->state.insn_idx != insn ||
7219 sl->state.curframe != cur->curframe)
9242b5f5
AS
7220 goto next;
7221 for (i = 0; i <= cur->curframe; i++)
7222 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
7223 goto next;
7224 clean_verifier_state(env, &sl->state);
7225next:
7226 sl = sl->next;
7227 }
7228}
7229
f1174f77 7230/* Returns true if (rold safe implies rcur safe) */
1b688a19
EC
7231static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
7232 struct idpair *idmap)
f1174f77 7233{
f4d7e40a
AS
7234 bool equal;
7235
dc503a8a
EC
7236 if (!(rold->live & REG_LIVE_READ))
7237 /* explored state didn't use this */
7238 return true;
7239
679c782d 7240 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
f4d7e40a
AS
7241
7242 if (rold->type == PTR_TO_STACK)
7243 /* two stack pointers are equal only if they're pointing to
7244 * the same stack frame, since fp-8 in foo != fp-8 in bar
7245 */
7246 return equal && rold->frameno == rcur->frameno;
7247
7248 if (equal)
969bf05e
AS
7249 return true;
7250
f1174f77
EC
7251 if (rold->type == NOT_INIT)
7252 /* explored state can't have used this */
969bf05e 7253 return true;
f1174f77
EC
7254 if (rcur->type == NOT_INIT)
7255 return false;
7256 switch (rold->type) {
7257 case SCALAR_VALUE:
7258 if (rcur->type == SCALAR_VALUE) {
b5dc0163
AS
7259 if (!rold->precise && !rcur->precise)
7260 return true;
f1174f77
EC
7261 /* new val must satisfy old val knowledge */
7262 return range_within(rold, rcur) &&
7263 tnum_in(rold->var_off, rcur->var_off);
7264 } else {
179d1c56
JH
7265 /* We're trying to use a pointer in place of a scalar.
7266 * Even if the scalar was unbounded, this could lead to
7267 * pointer leaks because scalars are allowed to leak
7268 * while pointers are not. We could make this safe in
7269 * special cases if root is calling us, but it's
7270 * probably not worth the hassle.
f1174f77 7271 */
179d1c56 7272 return false;
f1174f77
EC
7273 }
7274 case PTR_TO_MAP_VALUE:
1b688a19
EC
7275 /* If the new min/max/var_off satisfy the old ones and
7276 * everything else matches, we are OK.
d83525ca
AS
7277 * 'id' is not compared, since it's only used for maps with
7278 * bpf_spin_lock inside map element and in such cases if
7279 * the rest of the prog is valid for one map element then
7280 * it's valid for all map elements regardless of the key
7281 * used in bpf_map_lookup()
1b688a19
EC
7282 */
7283 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
7284 range_within(rold, rcur) &&
7285 tnum_in(rold->var_off, rcur->var_off);
f1174f77
EC
7286 case PTR_TO_MAP_VALUE_OR_NULL:
7287 /* a PTR_TO_MAP_VALUE could be safe to use as a
7288 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
7289 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
7290 * checked, doing so could have affected others with the same
7291 * id, and we can't check for that because we lost the id when
7292 * we converted to a PTR_TO_MAP_VALUE.
7293 */
7294 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
7295 return false;
7296 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
7297 return false;
7298 /* Check our ids match any regs they're supposed to */
7299 return check_ids(rold->id, rcur->id, idmap);
de8f3a83 7300 case PTR_TO_PACKET_META:
f1174f77 7301 case PTR_TO_PACKET:
de8f3a83 7302 if (rcur->type != rold->type)
f1174f77
EC
7303 return false;
7304 /* We must have at least as much range as the old ptr
7305 * did, so that any accesses which were safe before are
7306 * still safe. This is true even if old range < old off,
7307 * since someone could have accessed through (ptr - k), or
7308 * even done ptr -= k in a register, to get a safe access.
7309 */
7310 if (rold->range > rcur->range)
7311 return false;
7312 /* If the offsets don't match, we can't trust our alignment;
7313 * nor can we be sure that we won't fall out of range.
7314 */
7315 if (rold->off != rcur->off)
7316 return false;
7317 /* id relations must be preserved */
7318 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
7319 return false;
7320 /* new val must satisfy old val knowledge */
7321 return range_within(rold, rcur) &&
7322 tnum_in(rold->var_off, rcur->var_off);
7323 case PTR_TO_CTX:
7324 case CONST_PTR_TO_MAP:
f1174f77 7325 case PTR_TO_PACKET_END:
d58e468b 7326 case PTR_TO_FLOW_KEYS:
c64b7983
JS
7327 case PTR_TO_SOCKET:
7328 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
7329 case PTR_TO_SOCK_COMMON:
7330 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
7331 case PTR_TO_TCP_SOCK:
7332 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 7333 case PTR_TO_XDP_SOCK:
f1174f77
EC
7334 /* Only valid matches are exact, which memcmp() above
7335 * would have accepted
7336 */
7337 default:
7338 /* Don't know what's going on, just say it's not safe */
7339 return false;
7340 }
969bf05e 7341
f1174f77
EC
7342 /* Shouldn't get here; if we do, say it's not safe */
7343 WARN_ON_ONCE(1);
969bf05e
AS
7344 return false;
7345}
7346
f4d7e40a
AS
7347static bool stacksafe(struct bpf_func_state *old,
7348 struct bpf_func_state *cur,
638f5b90
AS
7349 struct idpair *idmap)
7350{
7351 int i, spi;
7352
638f5b90
AS
7353 /* walk slots of the explored stack and ignore any additional
7354 * slots in the current stack, since explored(safe) state
7355 * didn't use them
7356 */
7357 for (i = 0; i < old->allocated_stack; i++) {
7358 spi = i / BPF_REG_SIZE;
7359
b233920c
AS
7360 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
7361 i += BPF_REG_SIZE - 1;
cc2b14d5 7362 /* explored state didn't use this */
fd05e57b 7363 continue;
b233920c 7364 }
cc2b14d5 7365
638f5b90
AS
7366 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
7367 continue;
19e2dbb7
AS
7368
7369 /* explored stack has more populated slots than current stack
7370 * and these slots were used
7371 */
7372 if (i >= cur->allocated_stack)
7373 return false;
7374
cc2b14d5
AS
7375 /* if old state was safe with misc data in the stack
7376 * it will be safe with zero-initialized stack.
7377 * The opposite is not true
7378 */
7379 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
7380 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
7381 continue;
638f5b90
AS
7382 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
7383 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
7384 /* Ex: old explored (safe) state has STACK_SPILL in
7385 * this stack slot, but current has has STACK_MISC ->
7386 * this verifier states are not equivalent,
7387 * return false to continue verification of this path
7388 */
7389 return false;
7390 if (i % BPF_REG_SIZE)
7391 continue;
7392 if (old->stack[spi].slot_type[0] != STACK_SPILL)
7393 continue;
7394 if (!regsafe(&old->stack[spi].spilled_ptr,
7395 &cur->stack[spi].spilled_ptr,
7396 idmap))
7397 /* when explored and current stack slot are both storing
7398 * spilled registers, check that stored pointers types
7399 * are the same as well.
7400 * Ex: explored safe path could have stored
7401 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
7402 * but current path has stored:
7403 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
7404 * such verifier states are not equivalent.
7405 * return false to continue verification of this path
7406 */
7407 return false;
7408 }
7409 return true;
7410}
7411
fd978bf7
JS
7412static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
7413{
7414 if (old->acquired_refs != cur->acquired_refs)
7415 return false;
7416 return !memcmp(old->refs, cur->refs,
7417 sizeof(*old->refs) * old->acquired_refs);
7418}
7419
f1bca824
AS
7420/* compare two verifier states
7421 *
7422 * all states stored in state_list are known to be valid, since
7423 * verifier reached 'bpf_exit' instruction through them
7424 *
7425 * this function is called when verifier exploring different branches of
7426 * execution popped from the state stack. If it sees an old state that has
7427 * more strict register state and more strict stack state then this execution
7428 * branch doesn't need to be explored further, since verifier already
7429 * concluded that more strict state leads to valid finish.
7430 *
7431 * Therefore two states are equivalent if register state is more conservative
7432 * and explored stack state is more conservative than the current one.
7433 * Example:
7434 * explored current
7435 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
7436 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
7437 *
7438 * In other words if current stack state (one being explored) has more
7439 * valid slots than old one that already passed validation, it means
7440 * the verifier can stop exploring and conclude that current state is valid too
7441 *
7442 * Similarly with registers. If explored state has register type as invalid
7443 * whereas register type in current state is meaningful, it means that
7444 * the current state will reach 'bpf_exit' instruction safely
7445 */
f4d7e40a
AS
7446static bool func_states_equal(struct bpf_func_state *old,
7447 struct bpf_func_state *cur)
f1bca824 7448{
f1174f77
EC
7449 struct idpair *idmap;
7450 bool ret = false;
f1bca824
AS
7451 int i;
7452
f1174f77
EC
7453 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
7454 /* If we failed to allocate the idmap, just say it's not safe */
7455 if (!idmap)
1a0dc1ac 7456 return false;
f1174f77
EC
7457
7458 for (i = 0; i < MAX_BPF_REG; i++) {
1b688a19 7459 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
f1174f77 7460 goto out_free;
f1bca824
AS
7461 }
7462
638f5b90
AS
7463 if (!stacksafe(old, cur, idmap))
7464 goto out_free;
fd978bf7
JS
7465
7466 if (!refsafe(old, cur))
7467 goto out_free;
f1174f77
EC
7468 ret = true;
7469out_free:
7470 kfree(idmap);
7471 return ret;
f1bca824
AS
7472}
7473
f4d7e40a
AS
7474static bool states_equal(struct bpf_verifier_env *env,
7475 struct bpf_verifier_state *old,
7476 struct bpf_verifier_state *cur)
7477{
7478 int i;
7479
7480 if (old->curframe != cur->curframe)
7481 return false;
7482
979d63d5
DB
7483 /* Verification state from speculative execution simulation
7484 * must never prune a non-speculative execution one.
7485 */
7486 if (old->speculative && !cur->speculative)
7487 return false;
7488
d83525ca
AS
7489 if (old->active_spin_lock != cur->active_spin_lock)
7490 return false;
7491
f4d7e40a
AS
7492 /* for states to be equal callsites have to be the same
7493 * and all frame states need to be equivalent
7494 */
7495 for (i = 0; i <= old->curframe; i++) {
7496 if (old->frame[i]->callsite != cur->frame[i]->callsite)
7497 return false;
7498 if (!func_states_equal(old->frame[i], cur->frame[i]))
7499 return false;
7500 }
7501 return true;
7502}
7503
5327ed3d
JW
7504/* Return 0 if no propagation happened. Return negative error code if error
7505 * happened. Otherwise, return the propagated bit.
7506 */
55e7f3b5
JW
7507static int propagate_liveness_reg(struct bpf_verifier_env *env,
7508 struct bpf_reg_state *reg,
7509 struct bpf_reg_state *parent_reg)
7510{
5327ed3d
JW
7511 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
7512 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
7513 int err;
7514
5327ed3d
JW
7515 /* When comes here, read flags of PARENT_REG or REG could be any of
7516 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
7517 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
7518 */
7519 if (parent_flag == REG_LIVE_READ64 ||
7520 /* Or if there is no read flag from REG. */
7521 !flag ||
7522 /* Or if the read flag from REG is the same as PARENT_REG. */
7523 parent_flag == flag)
55e7f3b5
JW
7524 return 0;
7525
5327ed3d 7526 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
7527 if (err)
7528 return err;
7529
5327ed3d 7530 return flag;
55e7f3b5
JW
7531}
7532
8e9cd9ce 7533/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
7534 * straight-line code between a state and its parent. When we arrive at an
7535 * equivalent state (jump target or such) we didn't arrive by the straight-line
7536 * code, so read marks in the state must propagate to the parent regardless
7537 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 7538 * in mark_reg_read() is for.
8e9cd9ce 7539 */
f4d7e40a
AS
7540static int propagate_liveness(struct bpf_verifier_env *env,
7541 const struct bpf_verifier_state *vstate,
7542 struct bpf_verifier_state *vparent)
dc503a8a 7543{
3f8cafa4 7544 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 7545 struct bpf_func_state *state, *parent;
3f8cafa4 7546 int i, frame, err = 0;
dc503a8a 7547
f4d7e40a
AS
7548 if (vparent->curframe != vstate->curframe) {
7549 WARN(1, "propagate_live: parent frame %d current frame %d\n",
7550 vparent->curframe, vstate->curframe);
7551 return -EFAULT;
7552 }
dc503a8a
EC
7553 /* Propagate read liveness of registers... */
7554 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 7555 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
7556 parent = vparent->frame[frame];
7557 state = vstate->frame[frame];
7558 parent_reg = parent->regs;
7559 state_reg = state->regs;
83d16312
JK
7560 /* We don't need to worry about FP liveness, it's read-only */
7561 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
7562 err = propagate_liveness_reg(env, &state_reg[i],
7563 &parent_reg[i]);
5327ed3d 7564 if (err < 0)
3f8cafa4 7565 return err;
5327ed3d
JW
7566 if (err == REG_LIVE_READ64)
7567 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 7568 }
f4d7e40a 7569
1b04aee7 7570 /* Propagate stack slots. */
f4d7e40a
AS
7571 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
7572 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
7573 parent_reg = &parent->stack[i].spilled_ptr;
7574 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
7575 err = propagate_liveness_reg(env, state_reg,
7576 parent_reg);
5327ed3d 7577 if (err < 0)
3f8cafa4 7578 return err;
dc503a8a
EC
7579 }
7580 }
5327ed3d 7581 return 0;
dc503a8a
EC
7582}
7583
a3ce685d
AS
7584/* find precise scalars in the previous equivalent state and
7585 * propagate them into the current state
7586 */
7587static int propagate_precision(struct bpf_verifier_env *env,
7588 const struct bpf_verifier_state *old)
7589{
7590 struct bpf_reg_state *state_reg;
7591 struct bpf_func_state *state;
7592 int i, err = 0;
7593
7594 state = old->frame[old->curframe];
7595 state_reg = state->regs;
7596 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
7597 if (state_reg->type != SCALAR_VALUE ||
7598 !state_reg->precise)
7599 continue;
7600 if (env->log.level & BPF_LOG_LEVEL2)
7601 verbose(env, "propagating r%d\n", i);
7602 err = mark_chain_precision(env, i);
7603 if (err < 0)
7604 return err;
7605 }
7606
7607 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
7608 if (state->stack[i].slot_type[0] != STACK_SPILL)
7609 continue;
7610 state_reg = &state->stack[i].spilled_ptr;
7611 if (state_reg->type != SCALAR_VALUE ||
7612 !state_reg->precise)
7613 continue;
7614 if (env->log.level & BPF_LOG_LEVEL2)
7615 verbose(env, "propagating fp%d\n",
7616 (-i - 1) * BPF_REG_SIZE);
7617 err = mark_chain_precision_stack(env, i);
7618 if (err < 0)
7619 return err;
7620 }
7621 return 0;
7622}
7623
2589726d
AS
7624static bool states_maybe_looping(struct bpf_verifier_state *old,
7625 struct bpf_verifier_state *cur)
7626{
7627 struct bpf_func_state *fold, *fcur;
7628 int i, fr = cur->curframe;
7629
7630 if (old->curframe != fr)
7631 return false;
7632
7633 fold = old->frame[fr];
7634 fcur = cur->frame[fr];
7635 for (i = 0; i < MAX_BPF_REG; i++)
7636 if (memcmp(&fold->regs[i], &fcur->regs[i],
7637 offsetof(struct bpf_reg_state, parent)))
7638 return false;
7639 return true;
7640}
7641
7642
58e2af8b 7643static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 7644{
58e2af8b 7645 struct bpf_verifier_state_list *new_sl;
9f4686c4 7646 struct bpf_verifier_state_list *sl, **pprev;
679c782d 7647 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 7648 int i, j, err, states_cnt = 0;
10d274e8 7649 bool add_new_state = env->test_state_freq ? true : false;
f1bca824 7650
b5dc0163 7651 cur->last_insn_idx = env->prev_insn_idx;
a8f500af 7652 if (!env->insn_aux_data[insn_idx].prune_point)
f1bca824
AS
7653 /* this 'insn_idx' instruction wasn't marked, so we will not
7654 * be doing state search here
7655 */
7656 return 0;
7657
2589726d
AS
7658 /* bpf progs typically have pruning point every 4 instructions
7659 * http://vger.kernel.org/bpfconf2019.html#session-1
7660 * Do not add new state for future pruning if the verifier hasn't seen
7661 * at least 2 jumps and at least 8 instructions.
7662 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
7663 * In tests that amounts to up to 50% reduction into total verifier
7664 * memory consumption and 20% verifier time speedup.
7665 */
7666 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
7667 env->insn_processed - env->prev_insn_processed >= 8)
7668 add_new_state = true;
7669
a8f500af
AS
7670 pprev = explored_state(env, insn_idx);
7671 sl = *pprev;
7672
9242b5f5
AS
7673 clean_live_states(env, insn_idx, cur);
7674
a8f500af 7675 while (sl) {
dc2a4ebc
AS
7676 states_cnt++;
7677 if (sl->state.insn_idx != insn_idx)
7678 goto next;
2589726d
AS
7679 if (sl->state.branches) {
7680 if (states_maybe_looping(&sl->state, cur) &&
7681 states_equal(env, &sl->state, cur)) {
7682 verbose_linfo(env, insn_idx, "; ");
7683 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
7684 return -EINVAL;
7685 }
7686 /* if the verifier is processing a loop, avoid adding new state
7687 * too often, since different loop iterations have distinct
7688 * states and may not help future pruning.
7689 * This threshold shouldn't be too low to make sure that
7690 * a loop with large bound will be rejected quickly.
7691 * The most abusive loop will be:
7692 * r1 += 1
7693 * if r1 < 1000000 goto pc-2
7694 * 1M insn_procssed limit / 100 == 10k peak states.
7695 * This threshold shouldn't be too high either, since states
7696 * at the end of the loop are likely to be useful in pruning.
7697 */
7698 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
7699 env->insn_processed - env->prev_insn_processed < 100)
7700 add_new_state = false;
7701 goto miss;
7702 }
638f5b90 7703 if (states_equal(env, &sl->state, cur)) {
9f4686c4 7704 sl->hit_cnt++;
f1bca824 7705 /* reached equivalent register/stack state,
dc503a8a
EC
7706 * prune the search.
7707 * Registers read by the continuation are read by us.
8e9cd9ce
EC
7708 * If we have any write marks in env->cur_state, they
7709 * will prevent corresponding reads in the continuation
7710 * from reaching our parent (an explored_state). Our
7711 * own state will get the read marks recorded, but
7712 * they'll be immediately forgotten as we're pruning
7713 * this state and will pop a new one.
f1bca824 7714 */
f4d7e40a 7715 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
7716
7717 /* if previous state reached the exit with precision and
7718 * current state is equivalent to it (except precsion marks)
7719 * the precision needs to be propagated back in
7720 * the current state.
7721 */
7722 err = err ? : push_jmp_history(env, cur);
7723 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
7724 if (err)
7725 return err;
f1bca824 7726 return 1;
dc503a8a 7727 }
2589726d
AS
7728miss:
7729 /* when new state is not going to be added do not increase miss count.
7730 * Otherwise several loop iterations will remove the state
7731 * recorded earlier. The goal of these heuristics is to have
7732 * states from some iterations of the loop (some in the beginning
7733 * and some at the end) to help pruning.
7734 */
7735 if (add_new_state)
7736 sl->miss_cnt++;
9f4686c4
AS
7737 /* heuristic to determine whether this state is beneficial
7738 * to keep checking from state equivalence point of view.
7739 * Higher numbers increase max_states_per_insn and verification time,
7740 * but do not meaningfully decrease insn_processed.
7741 */
7742 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
7743 /* the state is unlikely to be useful. Remove it to
7744 * speed up verification
7745 */
7746 *pprev = sl->next;
7747 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
7748 u32 br = sl->state.branches;
7749
7750 WARN_ONCE(br,
7751 "BUG live_done but branches_to_explore %d\n",
7752 br);
9f4686c4
AS
7753 free_verifier_state(&sl->state, false);
7754 kfree(sl);
7755 env->peak_states--;
7756 } else {
7757 /* cannot free this state, since parentage chain may
7758 * walk it later. Add it for free_list instead to
7759 * be freed at the end of verification
7760 */
7761 sl->next = env->free_list;
7762 env->free_list = sl;
7763 }
7764 sl = *pprev;
7765 continue;
7766 }
dc2a4ebc 7767next:
9f4686c4
AS
7768 pprev = &sl->next;
7769 sl = *pprev;
f1bca824
AS
7770 }
7771
06ee7115
AS
7772 if (env->max_states_per_insn < states_cnt)
7773 env->max_states_per_insn = states_cnt;
7774
ceefbc96 7775 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
b5dc0163 7776 return push_jmp_history(env, cur);
ceefbc96 7777
2589726d 7778 if (!add_new_state)
b5dc0163 7779 return push_jmp_history(env, cur);
ceefbc96 7780
2589726d
AS
7781 /* There were no equivalent states, remember the current one.
7782 * Technically the current state is not proven to be safe yet,
f4d7e40a 7783 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 7784 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 7785 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
7786 * again on the way to bpf_exit.
7787 * When looping the sl->state.branches will be > 0 and this state
7788 * will not be considered for equivalence until branches == 0.
f1bca824 7789 */
638f5b90 7790 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
7791 if (!new_sl)
7792 return -ENOMEM;
06ee7115
AS
7793 env->total_states++;
7794 env->peak_states++;
2589726d
AS
7795 env->prev_jmps_processed = env->jmps_processed;
7796 env->prev_insn_processed = env->insn_processed;
f1bca824
AS
7797
7798 /* add new state to the head of linked list */
679c782d
EC
7799 new = &new_sl->state;
7800 err = copy_verifier_state(new, cur);
1969db47 7801 if (err) {
679c782d 7802 free_verifier_state(new, false);
1969db47
AS
7803 kfree(new_sl);
7804 return err;
7805 }
dc2a4ebc 7806 new->insn_idx = insn_idx;
2589726d
AS
7807 WARN_ONCE(new->branches != 1,
7808 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 7809
2589726d 7810 cur->parent = new;
b5dc0163
AS
7811 cur->first_insn_idx = insn_idx;
7812 clear_jmp_history(cur);
5d839021
AS
7813 new_sl->next = *explored_state(env, insn_idx);
7814 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
7815 /* connect new state to parentage chain. Current frame needs all
7816 * registers connected. Only r6 - r9 of the callers are alive (pushed
7817 * to the stack implicitly by JITs) so in callers' frames connect just
7818 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
7819 * the state of the call instruction (with WRITTEN set), and r0 comes
7820 * from callee with its full parentage chain, anyway.
7821 */
8e9cd9ce
EC
7822 /* clear write marks in current state: the writes we did are not writes
7823 * our child did, so they don't screen off its reads from us.
7824 * (There are no read marks in current state, because reads always mark
7825 * their parent and current state never has children yet. Only
7826 * explored_states can get read marks.)
7827 */
eea1c227
AS
7828 for (j = 0; j <= cur->curframe; j++) {
7829 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
7830 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
7831 for (i = 0; i < BPF_REG_FP; i++)
7832 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
7833 }
f4d7e40a
AS
7834
7835 /* all stack frames are accessible from callee, clear them all */
7836 for (j = 0; j <= cur->curframe; j++) {
7837 struct bpf_func_state *frame = cur->frame[j];
679c782d 7838 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 7839
679c782d 7840 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 7841 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
7842 frame->stack[i].spilled_ptr.parent =
7843 &newframe->stack[i].spilled_ptr;
7844 }
f4d7e40a 7845 }
f1bca824
AS
7846 return 0;
7847}
7848
c64b7983
JS
7849/* Return true if it's OK to have the same insn return a different type. */
7850static bool reg_type_mismatch_ok(enum bpf_reg_type type)
7851{
7852 switch (type) {
7853 case PTR_TO_CTX:
7854 case PTR_TO_SOCKET:
7855 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
7856 case PTR_TO_SOCK_COMMON:
7857 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
7858 case PTR_TO_TCP_SOCK:
7859 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 7860 case PTR_TO_XDP_SOCK:
2a02759e 7861 case PTR_TO_BTF_ID:
c64b7983
JS
7862 return false;
7863 default:
7864 return true;
7865 }
7866}
7867
7868/* If an instruction was previously used with particular pointer types, then we
7869 * need to be careful to avoid cases such as the below, where it may be ok
7870 * for one branch accessing the pointer, but not ok for the other branch:
7871 *
7872 * R1 = sock_ptr
7873 * goto X;
7874 * ...
7875 * R1 = some_other_valid_ptr;
7876 * goto X;
7877 * ...
7878 * R2 = *(u32 *)(R1 + 0);
7879 */
7880static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
7881{
7882 return src != prev && (!reg_type_mismatch_ok(src) ||
7883 !reg_type_mismatch_ok(prev));
7884}
7885
58e2af8b 7886static int do_check(struct bpf_verifier_env *env)
17a52670 7887{
51c39bb1 7888 struct bpf_verifier_state *state = env->cur_state;
17a52670 7889 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 7890 struct bpf_reg_state *regs;
06ee7115 7891 int insn_cnt = env->prog->len;
17a52670 7892 bool do_print_state = false;
b5dc0163 7893 int prev_insn_idx = -1;
17a52670 7894
17a52670
AS
7895 for (;;) {
7896 struct bpf_insn *insn;
7897 u8 class;
7898 int err;
7899
b5dc0163 7900 env->prev_insn_idx = prev_insn_idx;
c08435ec 7901 if (env->insn_idx >= insn_cnt) {
61bd5218 7902 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 7903 env->insn_idx, insn_cnt);
17a52670
AS
7904 return -EFAULT;
7905 }
7906
c08435ec 7907 insn = &insns[env->insn_idx];
17a52670
AS
7908 class = BPF_CLASS(insn->code);
7909
06ee7115 7910 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
7911 verbose(env,
7912 "BPF program is too large. Processed %d insn\n",
06ee7115 7913 env->insn_processed);
17a52670
AS
7914 return -E2BIG;
7915 }
7916
c08435ec 7917 err = is_state_visited(env, env->insn_idx);
f1bca824
AS
7918 if (err < 0)
7919 return err;
7920 if (err == 1) {
7921 /* found equivalent state, can prune the search */
06ee7115 7922 if (env->log.level & BPF_LOG_LEVEL) {
f1bca824 7923 if (do_print_state)
979d63d5
DB
7924 verbose(env, "\nfrom %d to %d%s: safe\n",
7925 env->prev_insn_idx, env->insn_idx,
7926 env->cur_state->speculative ?
7927 " (speculative execution)" : "");
f1bca824 7928 else
c08435ec 7929 verbose(env, "%d: safe\n", env->insn_idx);
f1bca824
AS
7930 }
7931 goto process_bpf_exit;
7932 }
7933
c3494801
AS
7934 if (signal_pending(current))
7935 return -EAGAIN;
7936
3c2ce60b
DB
7937 if (need_resched())
7938 cond_resched();
7939
06ee7115
AS
7940 if (env->log.level & BPF_LOG_LEVEL2 ||
7941 (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
7942 if (env->log.level & BPF_LOG_LEVEL2)
c08435ec 7943 verbose(env, "%d:", env->insn_idx);
c5fc9692 7944 else
979d63d5
DB
7945 verbose(env, "\nfrom %d to %d%s:",
7946 env->prev_insn_idx, env->insn_idx,
7947 env->cur_state->speculative ?
7948 " (speculative execution)" : "");
f4d7e40a 7949 print_verifier_state(env, state->frame[state->curframe]);
17a52670
AS
7950 do_print_state = false;
7951 }
7952
06ee7115 7953 if (env->log.level & BPF_LOG_LEVEL) {
7105e828
DB
7954 const struct bpf_insn_cbs cbs = {
7955 .cb_print = verbose,
abe08840 7956 .private_data = env,
7105e828
DB
7957 };
7958
c08435ec
DB
7959 verbose_linfo(env, env->insn_idx, "; ");
7960 verbose(env, "%d: ", env->insn_idx);
abe08840 7961 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
17a52670
AS
7962 }
7963
cae1927c 7964 if (bpf_prog_is_dev_bound(env->prog->aux)) {
c08435ec
DB
7965 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
7966 env->prev_insn_idx);
cae1927c
JK
7967 if (err)
7968 return err;
7969 }
13a27dfc 7970
638f5b90 7971 regs = cur_regs(env);
51c39bb1 7972 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
b5dc0163 7973 prev_insn_idx = env->insn_idx;
fd978bf7 7974
17a52670 7975 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 7976 err = check_alu_op(env, insn);
17a52670
AS
7977 if (err)
7978 return err;
7979
7980 } else if (class == BPF_LDX) {
3df126f3 7981 enum bpf_reg_type *prev_src_type, src_reg_type;
9bac3d6d
AS
7982
7983 /* check for reserved fields is already done */
7984
17a52670 7985 /* check src operand */
dc503a8a 7986 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
7987 if (err)
7988 return err;
7989
dc503a8a 7990 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
7991 if (err)
7992 return err;
7993
725f9dcd
AS
7994 src_reg_type = regs[insn->src_reg].type;
7995
17a52670
AS
7996 /* check that memory (src_reg + off) is readable,
7997 * the state of dst_reg will be updated by this func
7998 */
c08435ec
DB
7999 err = check_mem_access(env, env->insn_idx, insn->src_reg,
8000 insn->off, BPF_SIZE(insn->code),
8001 BPF_READ, insn->dst_reg, false);
17a52670
AS
8002 if (err)
8003 return err;
8004
c08435ec 8005 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
8006
8007 if (*prev_src_type == NOT_INIT) {
9bac3d6d
AS
8008 /* saw a valid insn
8009 * dst_reg = *(u32 *)(src_reg + off)
3df126f3 8010 * save type to validate intersecting paths
9bac3d6d 8011 */
3df126f3 8012 *prev_src_type = src_reg_type;
9bac3d6d 8013
c64b7983 8014 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
9bac3d6d
AS
8015 /* ABuser program is trying to use the same insn
8016 * dst_reg = *(u32*) (src_reg + off)
8017 * with different pointer types:
8018 * src_reg == ctx in one branch and
8019 * src_reg == stack|map in some other branch.
8020 * Reject it.
8021 */
61bd5218 8022 verbose(env, "same insn cannot be used with different pointers\n");
9bac3d6d
AS
8023 return -EINVAL;
8024 }
8025
17a52670 8026 } else if (class == BPF_STX) {
3df126f3 8027 enum bpf_reg_type *prev_dst_type, dst_reg_type;
d691f9e8 8028
17a52670 8029 if (BPF_MODE(insn->code) == BPF_XADD) {
c08435ec 8030 err = check_xadd(env, env->insn_idx, insn);
17a52670
AS
8031 if (err)
8032 return err;
c08435ec 8033 env->insn_idx++;
17a52670
AS
8034 continue;
8035 }
8036
17a52670 8037 /* check src1 operand */
dc503a8a 8038 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
8039 if (err)
8040 return err;
8041 /* check src2 operand */
dc503a8a 8042 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
8043 if (err)
8044 return err;
8045
d691f9e8
AS
8046 dst_reg_type = regs[insn->dst_reg].type;
8047
17a52670 8048 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
8049 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
8050 insn->off, BPF_SIZE(insn->code),
8051 BPF_WRITE, insn->src_reg, false);
17a52670
AS
8052 if (err)
8053 return err;
8054
c08435ec 8055 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
8056
8057 if (*prev_dst_type == NOT_INIT) {
8058 *prev_dst_type = dst_reg_type;
c64b7983 8059 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
61bd5218 8060 verbose(env, "same insn cannot be used with different pointers\n");
d691f9e8
AS
8061 return -EINVAL;
8062 }
8063
17a52670
AS
8064 } else if (class == BPF_ST) {
8065 if (BPF_MODE(insn->code) != BPF_MEM ||
8066 insn->src_reg != BPF_REG_0) {
61bd5218 8067 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
8068 return -EINVAL;
8069 }
8070 /* check src operand */
dc503a8a 8071 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
8072 if (err)
8073 return err;
8074
f37a8cb8 8075 if (is_ctx_reg(env, insn->dst_reg)) {
9d2be44a 8076 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
2a159c6f
DB
8077 insn->dst_reg,
8078 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
8079 return -EACCES;
8080 }
8081
17a52670 8082 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
8083 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
8084 insn->off, BPF_SIZE(insn->code),
8085 BPF_WRITE, -1, false);
17a52670
AS
8086 if (err)
8087 return err;
8088
092ed096 8089 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
8090 u8 opcode = BPF_OP(insn->code);
8091
2589726d 8092 env->jmps_processed++;
17a52670
AS
8093 if (opcode == BPF_CALL) {
8094 if (BPF_SRC(insn->code) != BPF_K ||
8095 insn->off != 0 ||
f4d7e40a
AS
8096 (insn->src_reg != BPF_REG_0 &&
8097 insn->src_reg != BPF_PSEUDO_CALL) ||
092ed096
JW
8098 insn->dst_reg != BPF_REG_0 ||
8099 class == BPF_JMP32) {
61bd5218 8100 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
8101 return -EINVAL;
8102 }
8103
d83525ca
AS
8104 if (env->cur_state->active_spin_lock &&
8105 (insn->src_reg == BPF_PSEUDO_CALL ||
8106 insn->imm != BPF_FUNC_spin_unlock)) {
8107 verbose(env, "function calls are not allowed while holding a lock\n");
8108 return -EINVAL;
8109 }
f4d7e40a 8110 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 8111 err = check_func_call(env, insn, &env->insn_idx);
f4d7e40a 8112 else
c08435ec 8113 err = check_helper_call(env, insn->imm, env->insn_idx);
17a52670
AS
8114 if (err)
8115 return err;
8116
8117 } else if (opcode == BPF_JA) {
8118 if (BPF_SRC(insn->code) != BPF_K ||
8119 insn->imm != 0 ||
8120 insn->src_reg != BPF_REG_0 ||
092ed096
JW
8121 insn->dst_reg != BPF_REG_0 ||
8122 class == BPF_JMP32) {
61bd5218 8123 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
8124 return -EINVAL;
8125 }
8126
c08435ec 8127 env->insn_idx += insn->off + 1;
17a52670
AS
8128 continue;
8129
8130 } else if (opcode == BPF_EXIT) {
8131 if (BPF_SRC(insn->code) != BPF_K ||
8132 insn->imm != 0 ||
8133 insn->src_reg != BPF_REG_0 ||
092ed096
JW
8134 insn->dst_reg != BPF_REG_0 ||
8135 class == BPF_JMP32) {
61bd5218 8136 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
8137 return -EINVAL;
8138 }
8139
d83525ca
AS
8140 if (env->cur_state->active_spin_lock) {
8141 verbose(env, "bpf_spin_unlock is missing\n");
8142 return -EINVAL;
8143 }
8144
f4d7e40a
AS
8145 if (state->curframe) {
8146 /* exit from nested function */
c08435ec 8147 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
8148 if (err)
8149 return err;
8150 do_print_state = true;
8151 continue;
8152 }
8153
fd978bf7
JS
8154 err = check_reference_leak(env);
8155 if (err)
8156 return err;
8157
390ee7e2
AS
8158 err = check_return_code(env);
8159 if (err)
8160 return err;
f1bca824 8161process_bpf_exit:
2589726d 8162 update_branch_counts(env, env->cur_state);
b5dc0163 8163 err = pop_stack(env, &prev_insn_idx,
c08435ec 8164 &env->insn_idx);
638f5b90
AS
8165 if (err < 0) {
8166 if (err != -ENOENT)
8167 return err;
17a52670
AS
8168 break;
8169 } else {
8170 do_print_state = true;
8171 continue;
8172 }
8173 } else {
c08435ec 8174 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
8175 if (err)
8176 return err;
8177 }
8178 } else if (class == BPF_LD) {
8179 u8 mode = BPF_MODE(insn->code);
8180
8181 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
8182 err = check_ld_abs(env, insn);
8183 if (err)
8184 return err;
8185
17a52670
AS
8186 } else if (mode == BPF_IMM) {
8187 err = check_ld_imm(env, insn);
8188 if (err)
8189 return err;
8190
c08435ec 8191 env->insn_idx++;
51c39bb1 8192 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
17a52670 8193 } else {
61bd5218 8194 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
8195 return -EINVAL;
8196 }
8197 } else {
61bd5218 8198 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
8199 return -EINVAL;
8200 }
8201
c08435ec 8202 env->insn_idx++;
17a52670
AS
8203 }
8204
8205 return 0;
8206}
8207
56f668df
MKL
8208static int check_map_prealloc(struct bpf_map *map)
8209{
8210 return (map->map_type != BPF_MAP_TYPE_HASH &&
bcc6b1b7
MKL
8211 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
8212 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
56f668df
MKL
8213 !(map->map_flags & BPF_F_NO_PREALLOC);
8214}
8215
d83525ca
AS
8216static bool is_tracing_prog_type(enum bpf_prog_type type)
8217{
8218 switch (type) {
8219 case BPF_PROG_TYPE_KPROBE:
8220 case BPF_PROG_TYPE_TRACEPOINT:
8221 case BPF_PROG_TYPE_PERF_EVENT:
8222 case BPF_PROG_TYPE_RAW_TRACEPOINT:
8223 return true;
8224 default:
8225 return false;
8226 }
8227}
8228
94dacdbd
TG
8229static bool is_preallocated_map(struct bpf_map *map)
8230{
8231 if (!check_map_prealloc(map))
8232 return false;
8233 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
8234 return false;
8235 return true;
8236}
8237
61bd5218
JK
8238static int check_map_prog_compatibility(struct bpf_verifier_env *env,
8239 struct bpf_map *map,
fdc15d38
AS
8240 struct bpf_prog *prog)
8241
8242{
94dacdbd
TG
8243 /*
8244 * Validate that trace type programs use preallocated hash maps.
8245 *
8246 * For programs attached to PERF events this is mandatory as the
8247 * perf NMI can hit any arbitrary code sequence.
8248 *
8249 * All other trace types using preallocated hash maps are unsafe as
8250 * well because tracepoint or kprobes can be inside locked regions
8251 * of the memory allocator or at a place where a recursion into the
8252 * memory allocator would see inconsistent state.
8253 *
2ed905c5
TG
8254 * On RT enabled kernels run-time allocation of all trace type
8255 * programs is strictly prohibited due to lock type constraints. On
8256 * !RT kernels it is allowed for backwards compatibility reasons for
8257 * now, but warnings are emitted so developers are made aware of
8258 * the unsafety and can fix their programs before this is enforced.
56f668df 8259 */
94dacdbd
TG
8260 if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) {
8261 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
61bd5218 8262 verbose(env, "perf_event programs can only use preallocated hash map\n");
56f668df
MKL
8263 return -EINVAL;
8264 }
2ed905c5
TG
8265 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
8266 verbose(env, "trace type programs can only use preallocated hash map\n");
8267 return -EINVAL;
8268 }
94dacdbd
TG
8269 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
8270 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
fdc15d38 8271 }
a3884572 8272
d83525ca
AS
8273 if ((is_tracing_prog_type(prog->type) ||
8274 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
8275 map_value_has_spin_lock(map)) {
8276 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
8277 return -EINVAL;
8278 }
8279
a3884572 8280 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
09728266 8281 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
8282 verbose(env, "offload device mismatch between prog and map\n");
8283 return -EINVAL;
8284 }
8285
85d33df3
MKL
8286 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
8287 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
8288 return -EINVAL;
8289 }
8290
fdc15d38
AS
8291 return 0;
8292}
8293
b741f163
RG
8294static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
8295{
8296 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
8297 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
8298}
8299
0246e64d
AS
8300/* look for pseudo eBPF instructions that access map FDs and
8301 * replace them with actual map pointers
8302 */
58e2af8b 8303static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
0246e64d
AS
8304{
8305 struct bpf_insn *insn = env->prog->insnsi;
8306 int insn_cnt = env->prog->len;
fdc15d38 8307 int i, j, err;
0246e64d 8308
f1f7714e 8309 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
8310 if (err)
8311 return err;
8312
0246e64d 8313 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 8314 if (BPF_CLASS(insn->code) == BPF_LDX &&
d691f9e8 8315 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
61bd5218 8316 verbose(env, "BPF_LDX uses reserved fields\n");
9bac3d6d
AS
8317 return -EINVAL;
8318 }
8319
d691f9e8
AS
8320 if (BPF_CLASS(insn->code) == BPF_STX &&
8321 ((BPF_MODE(insn->code) != BPF_MEM &&
8322 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
61bd5218 8323 verbose(env, "BPF_STX uses reserved fields\n");
d691f9e8
AS
8324 return -EINVAL;
8325 }
8326
0246e64d 8327 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 8328 struct bpf_insn_aux_data *aux;
0246e64d
AS
8329 struct bpf_map *map;
8330 struct fd f;
d8eca5bb 8331 u64 addr;
0246e64d
AS
8332
8333 if (i == insn_cnt - 1 || insn[1].code != 0 ||
8334 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
8335 insn[1].off != 0) {
61bd5218 8336 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
8337 return -EINVAL;
8338 }
8339
d8eca5bb 8340 if (insn[0].src_reg == 0)
0246e64d
AS
8341 /* valid generic load 64-bit imm */
8342 goto next_insn;
8343
d8eca5bb
DB
8344 /* In final convert_pseudo_ld_imm64() step, this is
8345 * converted into regular 64-bit imm load insn.
8346 */
8347 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
8348 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
8349 (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
8350 insn[1].imm != 0)) {
8351 verbose(env,
8352 "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
8353 return -EINVAL;
8354 }
8355
20182390 8356 f = fdget(insn[0].imm);
c2101297 8357 map = __bpf_map_get(f);
0246e64d 8358 if (IS_ERR(map)) {
61bd5218 8359 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 8360 insn[0].imm);
0246e64d
AS
8361 return PTR_ERR(map);
8362 }
8363
61bd5218 8364 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
8365 if (err) {
8366 fdput(f);
8367 return err;
8368 }
8369
d8eca5bb
DB
8370 aux = &env->insn_aux_data[i];
8371 if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
8372 addr = (unsigned long)map;
8373 } else {
8374 u32 off = insn[1].imm;
8375
8376 if (off >= BPF_MAX_VAR_OFF) {
8377 verbose(env, "direct value offset of %u is not allowed\n", off);
8378 fdput(f);
8379 return -EINVAL;
8380 }
8381
8382 if (!map->ops->map_direct_value_addr) {
8383 verbose(env, "no direct value access support for this map type\n");
8384 fdput(f);
8385 return -EINVAL;
8386 }
8387
8388 err = map->ops->map_direct_value_addr(map, &addr, off);
8389 if (err) {
8390 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
8391 map->value_size, off);
8392 fdput(f);
8393 return err;
8394 }
8395
8396 aux->map_off = off;
8397 addr += off;
8398 }
8399
8400 insn[0].imm = (u32)addr;
8401 insn[1].imm = addr >> 32;
0246e64d
AS
8402
8403 /* check whether we recorded this map already */
d8eca5bb 8404 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 8405 if (env->used_maps[j] == map) {
d8eca5bb 8406 aux->map_index = j;
0246e64d
AS
8407 fdput(f);
8408 goto next_insn;
8409 }
d8eca5bb 8410 }
0246e64d
AS
8411
8412 if (env->used_map_cnt >= MAX_USED_MAPS) {
8413 fdput(f);
8414 return -E2BIG;
8415 }
8416
0246e64d
AS
8417 /* hold the map. If the program is rejected by verifier,
8418 * the map will be released by release_maps() or it
8419 * will be used by the valid program until it's unloaded
ab7f5bf0 8420 * and all maps are released in free_used_maps()
0246e64d 8421 */
1e0bd5a0 8422 bpf_map_inc(map);
d8eca5bb
DB
8423
8424 aux->map_index = env->used_map_cnt;
92117d84
AS
8425 env->used_maps[env->used_map_cnt++] = map;
8426
b741f163 8427 if (bpf_map_is_cgroup_storage(map) &&
e4730423 8428 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 8429 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
8430 fdput(f);
8431 return -EBUSY;
8432 }
8433
0246e64d
AS
8434 fdput(f);
8435next_insn:
8436 insn++;
8437 i++;
5e581dad
DB
8438 continue;
8439 }
8440
8441 /* Basic sanity check before we invest more work here. */
8442 if (!bpf_opcode_in_insntable(insn->code)) {
8443 verbose(env, "unknown opcode %02x\n", insn->code);
8444 return -EINVAL;
0246e64d
AS
8445 }
8446 }
8447
8448 /* now all pseudo BPF_LD_IMM64 instructions load valid
8449 * 'struct bpf_map *' into a register instead of user map_fd.
8450 * These pointers will be used later by verifier to validate map access.
8451 */
8452 return 0;
8453}
8454
8455/* drop refcnt of maps used by the rejected program */
58e2af8b 8456static void release_maps(struct bpf_verifier_env *env)
0246e64d 8457{
a2ea0746
DB
8458 __bpf_free_used_maps(env->prog->aux, env->used_maps,
8459 env->used_map_cnt);
0246e64d
AS
8460}
8461
8462/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 8463static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
8464{
8465 struct bpf_insn *insn = env->prog->insnsi;
8466 int insn_cnt = env->prog->len;
8467 int i;
8468
8469 for (i = 0; i < insn_cnt; i++, insn++)
8470 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
8471 insn->src_reg = 0;
8472}
8473
8041902d
AS
8474/* single env->prog->insni[off] instruction was replaced with the range
8475 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
8476 * [0, off) and [off, end) to new locations, so the patched range stays zero
8477 */
b325fbca
JW
8478static int adjust_insn_aux_data(struct bpf_verifier_env *env,
8479 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d
AS
8480{
8481 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
b325fbca
JW
8482 struct bpf_insn *insn = new_prog->insnsi;
8483 u32 prog_len;
c131187d 8484 int i;
8041902d 8485
b325fbca
JW
8486 /* aux info at OFF always needs adjustment, no matter fast path
8487 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
8488 * original insn at old prog.
8489 */
8490 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
8491
8041902d
AS
8492 if (cnt == 1)
8493 return 0;
b325fbca 8494 prog_len = new_prog->len;
fad953ce
KC
8495 new_data = vzalloc(array_size(prog_len,
8496 sizeof(struct bpf_insn_aux_data)));
8041902d
AS
8497 if (!new_data)
8498 return -ENOMEM;
8499 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
8500 memcpy(new_data + off + cnt - 1, old_data + off,
8501 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 8502 for (i = off; i < off + cnt - 1; i++) {
51c39bb1 8503 new_data[i].seen = env->pass_cnt;
b325fbca
JW
8504 new_data[i].zext_dst = insn_has_def32(env, insn + i);
8505 }
8041902d
AS
8506 env->insn_aux_data = new_data;
8507 vfree(old_data);
8508 return 0;
8509}
8510
cc8b0b92
AS
8511static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
8512{
8513 int i;
8514
8515 if (len == 1)
8516 return;
4cb3d99c
JW
8517 /* NOTE: fake 'exit' subprog should be updated as well. */
8518 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 8519 if (env->subprog_info[i].start <= off)
cc8b0b92 8520 continue;
9c8105bd 8521 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
8522 }
8523}
8524
8041902d
AS
8525static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
8526 const struct bpf_insn *patch, u32 len)
8527{
8528 struct bpf_prog *new_prog;
8529
8530 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
8531 if (IS_ERR(new_prog)) {
8532 if (PTR_ERR(new_prog) == -ERANGE)
8533 verbose(env,
8534 "insn %d cannot be patched due to 16-bit range\n",
8535 env->insn_aux_data[off].orig_idx);
8041902d 8536 return NULL;
4f73379e 8537 }
b325fbca 8538 if (adjust_insn_aux_data(env, new_prog, off, len))
8041902d 8539 return NULL;
cc8b0b92 8540 adjust_subprog_starts(env, off, len);
8041902d
AS
8541 return new_prog;
8542}
8543
52875a04
JK
8544static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
8545 u32 off, u32 cnt)
8546{
8547 int i, j;
8548
8549 /* find first prog starting at or after off (first to remove) */
8550 for (i = 0; i < env->subprog_cnt; i++)
8551 if (env->subprog_info[i].start >= off)
8552 break;
8553 /* find first prog starting at or after off + cnt (first to stay) */
8554 for (j = i; j < env->subprog_cnt; j++)
8555 if (env->subprog_info[j].start >= off + cnt)
8556 break;
8557 /* if j doesn't start exactly at off + cnt, we are just removing
8558 * the front of previous prog
8559 */
8560 if (env->subprog_info[j].start != off + cnt)
8561 j--;
8562
8563 if (j > i) {
8564 struct bpf_prog_aux *aux = env->prog->aux;
8565 int move;
8566
8567 /* move fake 'exit' subprog as well */
8568 move = env->subprog_cnt + 1 - j;
8569
8570 memmove(env->subprog_info + i,
8571 env->subprog_info + j,
8572 sizeof(*env->subprog_info) * move);
8573 env->subprog_cnt -= j - i;
8574
8575 /* remove func_info */
8576 if (aux->func_info) {
8577 move = aux->func_info_cnt - j;
8578
8579 memmove(aux->func_info + i,
8580 aux->func_info + j,
8581 sizeof(*aux->func_info) * move);
8582 aux->func_info_cnt -= j - i;
8583 /* func_info->insn_off is set after all code rewrites,
8584 * in adjust_btf_func() - no need to adjust
8585 */
8586 }
8587 } else {
8588 /* convert i from "first prog to remove" to "first to adjust" */
8589 if (env->subprog_info[i].start == off)
8590 i++;
8591 }
8592
8593 /* update fake 'exit' subprog as well */
8594 for (; i <= env->subprog_cnt; i++)
8595 env->subprog_info[i].start -= cnt;
8596
8597 return 0;
8598}
8599
8600static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
8601 u32 cnt)
8602{
8603 struct bpf_prog *prog = env->prog;
8604 u32 i, l_off, l_cnt, nr_linfo;
8605 struct bpf_line_info *linfo;
8606
8607 nr_linfo = prog->aux->nr_linfo;
8608 if (!nr_linfo)
8609 return 0;
8610
8611 linfo = prog->aux->linfo;
8612
8613 /* find first line info to remove, count lines to be removed */
8614 for (i = 0; i < nr_linfo; i++)
8615 if (linfo[i].insn_off >= off)
8616 break;
8617
8618 l_off = i;
8619 l_cnt = 0;
8620 for (; i < nr_linfo; i++)
8621 if (linfo[i].insn_off < off + cnt)
8622 l_cnt++;
8623 else
8624 break;
8625
8626 /* First live insn doesn't match first live linfo, it needs to "inherit"
8627 * last removed linfo. prog is already modified, so prog->len == off
8628 * means no live instructions after (tail of the program was removed).
8629 */
8630 if (prog->len != off && l_cnt &&
8631 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
8632 l_cnt--;
8633 linfo[--i].insn_off = off + cnt;
8634 }
8635
8636 /* remove the line info which refer to the removed instructions */
8637 if (l_cnt) {
8638 memmove(linfo + l_off, linfo + i,
8639 sizeof(*linfo) * (nr_linfo - i));
8640
8641 prog->aux->nr_linfo -= l_cnt;
8642 nr_linfo = prog->aux->nr_linfo;
8643 }
8644
8645 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
8646 for (i = l_off; i < nr_linfo; i++)
8647 linfo[i].insn_off -= cnt;
8648
8649 /* fix up all subprogs (incl. 'exit') which start >= off */
8650 for (i = 0; i <= env->subprog_cnt; i++)
8651 if (env->subprog_info[i].linfo_idx > l_off) {
8652 /* program may have started in the removed region but
8653 * may not be fully removed
8654 */
8655 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
8656 env->subprog_info[i].linfo_idx -= l_cnt;
8657 else
8658 env->subprog_info[i].linfo_idx = l_off;
8659 }
8660
8661 return 0;
8662}
8663
8664static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
8665{
8666 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8667 unsigned int orig_prog_len = env->prog->len;
8668 int err;
8669
08ca90af
JK
8670 if (bpf_prog_is_dev_bound(env->prog->aux))
8671 bpf_prog_offload_remove_insns(env, off, cnt);
8672
52875a04
JK
8673 err = bpf_remove_insns(env->prog, off, cnt);
8674 if (err)
8675 return err;
8676
8677 err = adjust_subprog_starts_after_remove(env, off, cnt);
8678 if (err)
8679 return err;
8680
8681 err = bpf_adj_linfo_after_remove(env, off, cnt);
8682 if (err)
8683 return err;
8684
8685 memmove(aux_data + off, aux_data + off + cnt,
8686 sizeof(*aux_data) * (orig_prog_len - off - cnt));
8687
8688 return 0;
8689}
8690
2a5418a1
DB
8691/* The verifier does more data flow analysis than llvm and will not
8692 * explore branches that are dead at run time. Malicious programs can
8693 * have dead code too. Therefore replace all dead at-run-time code
8694 * with 'ja -1'.
8695 *
8696 * Just nops are not optimal, e.g. if they would sit at the end of the
8697 * program and through another bug we would manage to jump there, then
8698 * we'd execute beyond program memory otherwise. Returning exception
8699 * code also wouldn't work since we can have subprogs where the dead
8700 * code could be located.
c131187d
AS
8701 */
8702static void sanitize_dead_code(struct bpf_verifier_env *env)
8703{
8704 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 8705 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
8706 struct bpf_insn *insn = env->prog->insnsi;
8707 const int insn_cnt = env->prog->len;
8708 int i;
8709
8710 for (i = 0; i < insn_cnt; i++) {
8711 if (aux_data[i].seen)
8712 continue;
2a5418a1 8713 memcpy(insn + i, &trap, sizeof(trap));
c131187d
AS
8714 }
8715}
8716
e2ae4ca2
JK
8717static bool insn_is_cond_jump(u8 code)
8718{
8719 u8 op;
8720
092ed096
JW
8721 if (BPF_CLASS(code) == BPF_JMP32)
8722 return true;
8723
e2ae4ca2
JK
8724 if (BPF_CLASS(code) != BPF_JMP)
8725 return false;
8726
8727 op = BPF_OP(code);
8728 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
8729}
8730
8731static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
8732{
8733 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8734 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8735 struct bpf_insn *insn = env->prog->insnsi;
8736 const int insn_cnt = env->prog->len;
8737 int i;
8738
8739 for (i = 0; i < insn_cnt; i++, insn++) {
8740 if (!insn_is_cond_jump(insn->code))
8741 continue;
8742
8743 if (!aux_data[i + 1].seen)
8744 ja.off = insn->off;
8745 else if (!aux_data[i + 1 + insn->off].seen)
8746 ja.off = 0;
8747 else
8748 continue;
8749
08ca90af
JK
8750 if (bpf_prog_is_dev_bound(env->prog->aux))
8751 bpf_prog_offload_replace_insn(env, i, &ja);
8752
e2ae4ca2
JK
8753 memcpy(insn, &ja, sizeof(ja));
8754 }
8755}
8756
52875a04
JK
8757static int opt_remove_dead_code(struct bpf_verifier_env *env)
8758{
8759 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8760 int insn_cnt = env->prog->len;
8761 int i, err;
8762
8763 for (i = 0; i < insn_cnt; i++) {
8764 int j;
8765
8766 j = 0;
8767 while (i + j < insn_cnt && !aux_data[i + j].seen)
8768 j++;
8769 if (!j)
8770 continue;
8771
8772 err = verifier_remove_insns(env, i, j);
8773 if (err)
8774 return err;
8775 insn_cnt = env->prog->len;
8776 }
8777
8778 return 0;
8779}
8780
a1b14abc
JK
8781static int opt_remove_nops(struct bpf_verifier_env *env)
8782{
8783 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8784 struct bpf_insn *insn = env->prog->insnsi;
8785 int insn_cnt = env->prog->len;
8786 int i, err;
8787
8788 for (i = 0; i < insn_cnt; i++) {
8789 if (memcmp(&insn[i], &ja, sizeof(ja)))
8790 continue;
8791
8792 err = verifier_remove_insns(env, i, 1);
8793 if (err)
8794 return err;
8795 insn_cnt--;
8796 i--;
8797 }
8798
8799 return 0;
8800}
8801
d6c2308c
JW
8802static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
8803 const union bpf_attr *attr)
a4b1d3c1 8804{
d6c2308c 8805 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 8806 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 8807 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 8808 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 8809 struct bpf_prog *new_prog;
d6c2308c 8810 bool rnd_hi32;
a4b1d3c1 8811
d6c2308c 8812 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 8813 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
8814 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
8815 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
8816 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
8817 for (i = 0; i < len; i++) {
8818 int adj_idx = i + delta;
8819 struct bpf_insn insn;
8820
d6c2308c
JW
8821 insn = insns[adj_idx];
8822 if (!aux[adj_idx].zext_dst) {
8823 u8 code, class;
8824 u32 imm_rnd;
8825
8826 if (!rnd_hi32)
8827 continue;
8828
8829 code = insn.code;
8830 class = BPF_CLASS(code);
8831 if (insn_no_def(&insn))
8832 continue;
8833
8834 /* NOTE: arg "reg" (the fourth one) is only used for
8835 * BPF_STX which has been ruled out in above
8836 * check, it is safe to pass NULL here.
8837 */
8838 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
8839 if (class == BPF_LD &&
8840 BPF_MODE(code) == BPF_IMM)
8841 i++;
8842 continue;
8843 }
8844
8845 /* ctx load could be transformed into wider load. */
8846 if (class == BPF_LDX &&
8847 aux[adj_idx].ptr_type == PTR_TO_CTX)
8848 continue;
8849
8850 imm_rnd = get_random_int();
8851 rnd_hi32_patch[0] = insn;
8852 rnd_hi32_patch[1].imm = imm_rnd;
8853 rnd_hi32_patch[3].dst_reg = insn.dst_reg;
8854 patch = rnd_hi32_patch;
8855 patch_len = 4;
8856 goto apply_patch_buffer;
8857 }
8858
8859 if (!bpf_jit_needs_zext())
a4b1d3c1
JW
8860 continue;
8861
a4b1d3c1
JW
8862 zext_patch[0] = insn;
8863 zext_patch[1].dst_reg = insn.dst_reg;
8864 zext_patch[1].src_reg = insn.dst_reg;
d6c2308c
JW
8865 patch = zext_patch;
8866 patch_len = 2;
8867apply_patch_buffer:
8868 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
8869 if (!new_prog)
8870 return -ENOMEM;
8871 env->prog = new_prog;
8872 insns = new_prog->insnsi;
8873 aux = env->insn_aux_data;
d6c2308c 8874 delta += patch_len - 1;
a4b1d3c1
JW
8875 }
8876
8877 return 0;
8878}
8879
c64b7983
JS
8880/* convert load instructions that access fields of a context type into a
8881 * sequence of instructions that access fields of the underlying structure:
8882 * struct __sk_buff -> struct sk_buff
8883 * struct bpf_sock_ops -> struct sock
9bac3d6d 8884 */
58e2af8b 8885static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 8886{
00176a34 8887 const struct bpf_verifier_ops *ops = env->ops;
f96da094 8888 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 8889 const int insn_cnt = env->prog->len;
36bbef52 8890 struct bpf_insn insn_buf[16], *insn;
46f53a65 8891 u32 target_size, size_default, off;
9bac3d6d 8892 struct bpf_prog *new_prog;
d691f9e8 8893 enum bpf_access_type type;
f96da094 8894 bool is_narrower_load;
9bac3d6d 8895
b09928b9
DB
8896 if (ops->gen_prologue || env->seen_direct_write) {
8897 if (!ops->gen_prologue) {
8898 verbose(env, "bpf verifier is misconfigured\n");
8899 return -EINVAL;
8900 }
36bbef52
DB
8901 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
8902 env->prog);
8903 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 8904 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
8905 return -EINVAL;
8906 } else if (cnt) {
8041902d 8907 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
8908 if (!new_prog)
8909 return -ENOMEM;
8041902d 8910
36bbef52 8911 env->prog = new_prog;
3df126f3 8912 delta += cnt - 1;
36bbef52
DB
8913 }
8914 }
8915
c64b7983 8916 if (bpf_prog_is_dev_bound(env->prog->aux))
9bac3d6d
AS
8917 return 0;
8918
3df126f3 8919 insn = env->prog->insnsi + delta;
36bbef52 8920
9bac3d6d 8921 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983
JS
8922 bpf_convert_ctx_access_t convert_ctx_access;
8923
62c7989b
DB
8924 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
8925 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
8926 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
ea2e7ce5 8927 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
d691f9e8 8928 type = BPF_READ;
62c7989b
DB
8929 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
8930 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
8931 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
ea2e7ce5 8932 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
d691f9e8
AS
8933 type = BPF_WRITE;
8934 else
9bac3d6d
AS
8935 continue;
8936
af86ca4e
AS
8937 if (type == BPF_WRITE &&
8938 env->insn_aux_data[i + delta].sanitize_stack_off) {
8939 struct bpf_insn patch[] = {
8940 /* Sanitize suspicious stack slot with zero.
8941 * There are no memory dependencies for this store,
8942 * since it's only using frame pointer and immediate
8943 * constant of zero
8944 */
8945 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
8946 env->insn_aux_data[i + delta].sanitize_stack_off,
8947 0),
8948 /* the original STX instruction will immediately
8949 * overwrite the same stack slot with appropriate value
8950 */
8951 *insn,
8952 };
8953
8954 cnt = ARRAY_SIZE(patch);
8955 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
8956 if (!new_prog)
8957 return -ENOMEM;
8958
8959 delta += cnt - 1;
8960 env->prog = new_prog;
8961 insn = new_prog->insnsi + i + delta;
8962 continue;
8963 }
8964
c64b7983
JS
8965 switch (env->insn_aux_data[i + delta].ptr_type) {
8966 case PTR_TO_CTX:
8967 if (!ops->convert_ctx_access)
8968 continue;
8969 convert_ctx_access = ops->convert_ctx_access;
8970 break;
8971 case PTR_TO_SOCKET:
46f8bc92 8972 case PTR_TO_SOCK_COMMON:
c64b7983
JS
8973 convert_ctx_access = bpf_sock_convert_ctx_access;
8974 break;
655a51e5
MKL
8975 case PTR_TO_TCP_SOCK:
8976 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
8977 break;
fada7fdc
JL
8978 case PTR_TO_XDP_SOCK:
8979 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
8980 break;
2a02759e 8981 case PTR_TO_BTF_ID:
27ae7997
MKL
8982 if (type == BPF_READ) {
8983 insn->code = BPF_LDX | BPF_PROBE_MEM |
8984 BPF_SIZE((insn)->code);
8985 env->prog->aux->num_exentries++;
8986 } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
2a02759e
AS
8987 verbose(env, "Writes through BTF pointers are not allowed\n");
8988 return -EINVAL;
8989 }
2a02759e 8990 continue;
c64b7983 8991 default:
9bac3d6d 8992 continue;
c64b7983 8993 }
9bac3d6d 8994
31fd8581 8995 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 8996 size = BPF_LDST_BYTES(insn);
31fd8581
YS
8997
8998 /* If the read access is a narrower load of the field,
8999 * convert to a 4/8-byte load, to minimum program type specific
9000 * convert_ctx_access changes. If conversion is successful,
9001 * we will apply proper mask to the result.
9002 */
f96da094 9003 is_narrower_load = size < ctx_field_size;
46f53a65
AI
9004 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
9005 off = insn->off;
31fd8581 9006 if (is_narrower_load) {
f96da094
DB
9007 u8 size_code;
9008
9009 if (type == BPF_WRITE) {
61bd5218 9010 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
9011 return -EINVAL;
9012 }
31fd8581 9013
f96da094 9014 size_code = BPF_H;
31fd8581
YS
9015 if (ctx_field_size == 4)
9016 size_code = BPF_W;
9017 else if (ctx_field_size == 8)
9018 size_code = BPF_DW;
f96da094 9019
bc23105c 9020 insn->off = off & ~(size_default - 1);
31fd8581
YS
9021 insn->code = BPF_LDX | BPF_MEM | size_code;
9022 }
f96da094
DB
9023
9024 target_size = 0;
c64b7983
JS
9025 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
9026 &target_size);
f96da094
DB
9027 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
9028 (ctx_field_size && !target_size)) {
61bd5218 9029 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
9030 return -EINVAL;
9031 }
f96da094
DB
9032
9033 if (is_narrower_load && size < target_size) {
d895a0f1
IL
9034 u8 shift = bpf_ctx_narrow_access_offset(
9035 off, size, size_default) * 8;
46f53a65
AI
9036 if (ctx_field_size <= 4) {
9037 if (shift)
9038 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
9039 insn->dst_reg,
9040 shift);
31fd8581 9041 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 9042 (1 << size * 8) - 1);
46f53a65
AI
9043 } else {
9044 if (shift)
9045 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
9046 insn->dst_reg,
9047 shift);
31fd8581 9048 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 9049 (1ULL << size * 8) - 1);
46f53a65 9050 }
31fd8581 9051 }
9bac3d6d 9052
8041902d 9053 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
9054 if (!new_prog)
9055 return -ENOMEM;
9056
3df126f3 9057 delta += cnt - 1;
9bac3d6d
AS
9058
9059 /* keep walking new program and skip insns we just inserted */
9060 env->prog = new_prog;
3df126f3 9061 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
9062 }
9063
9064 return 0;
9065}
9066
1c2a088a
AS
9067static int jit_subprogs(struct bpf_verifier_env *env)
9068{
9069 struct bpf_prog *prog = env->prog, **func, *tmp;
9070 int i, j, subprog_start, subprog_end = 0, len, subprog;
7105e828 9071 struct bpf_insn *insn;
1c2a088a 9072 void *old_bpf_func;
c454a46b 9073 int err;
1c2a088a 9074
f910cefa 9075 if (env->subprog_cnt <= 1)
1c2a088a
AS
9076 return 0;
9077
7105e828 9078 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
1c2a088a
AS
9079 if (insn->code != (BPF_JMP | BPF_CALL) ||
9080 insn->src_reg != BPF_PSEUDO_CALL)
9081 continue;
c7a89784
DB
9082 /* Upon error here we cannot fall back to interpreter but
9083 * need a hard reject of the program. Thus -EFAULT is
9084 * propagated in any case.
9085 */
1c2a088a
AS
9086 subprog = find_subprog(env, i + insn->imm + 1);
9087 if (subprog < 0) {
9088 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
9089 i + insn->imm + 1);
9090 return -EFAULT;
9091 }
9092 /* temporarily remember subprog id inside insn instead of
9093 * aux_data, since next loop will split up all insns into funcs
9094 */
f910cefa 9095 insn->off = subprog;
1c2a088a
AS
9096 /* remember original imm in case JIT fails and fallback
9097 * to interpreter will be needed
9098 */
9099 env->insn_aux_data[i].call_imm = insn->imm;
9100 /* point imm to __bpf_call_base+1 from JITs point of view */
9101 insn->imm = 1;
9102 }
9103
c454a46b
MKL
9104 err = bpf_prog_alloc_jited_linfo(prog);
9105 if (err)
9106 goto out_undo_insn;
9107
9108 err = -ENOMEM;
6396bb22 9109 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 9110 if (!func)
c7a89784 9111 goto out_undo_insn;
1c2a088a 9112
f910cefa 9113 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 9114 subprog_start = subprog_end;
4cb3d99c 9115 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
9116
9117 len = subprog_end - subprog_start;
492ecee8
AS
9118 /* BPF_PROG_RUN doesn't call subprogs directly,
9119 * hence main prog stats include the runtime of subprogs.
9120 * subprogs don't have IDs and not reachable via prog_get_next_id
9121 * func[i]->aux->stats will never be accessed and stays NULL
9122 */
9123 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
9124 if (!func[i])
9125 goto out_free;
9126 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
9127 len * sizeof(struct bpf_insn));
4f74d809 9128 func[i]->type = prog->type;
1c2a088a 9129 func[i]->len = len;
4f74d809
DB
9130 if (bpf_prog_calc_tag(func[i]))
9131 goto out_free;
1c2a088a 9132 func[i]->is_func = 1;
ba64e7d8
YS
9133 func[i]->aux->func_idx = i;
9134 /* the btf and func_info will be freed only at prog->aux */
9135 func[i]->aux->btf = prog->aux->btf;
9136 func[i]->aux->func_info = prog->aux->func_info;
9137
1c2a088a
AS
9138 /* Use bpf_prog_F_tag to indicate functions in stack traces.
9139 * Long term would need debug info to populate names
9140 */
9141 func[i]->aux->name[0] = 'F';
9c8105bd 9142 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 9143 func[i]->jit_requested = 1;
c454a46b
MKL
9144 func[i]->aux->linfo = prog->aux->linfo;
9145 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
9146 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
9147 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
1c2a088a
AS
9148 func[i] = bpf_int_jit_compile(func[i]);
9149 if (!func[i]->jited) {
9150 err = -ENOTSUPP;
9151 goto out_free;
9152 }
9153 cond_resched();
9154 }
9155 /* at this point all bpf functions were successfully JITed
9156 * now populate all bpf_calls with correct addresses and
9157 * run last pass of JIT
9158 */
f910cefa 9159 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9160 insn = func[i]->insnsi;
9161 for (j = 0; j < func[i]->len; j++, insn++) {
9162 if (insn->code != (BPF_JMP | BPF_CALL) ||
9163 insn->src_reg != BPF_PSEUDO_CALL)
9164 continue;
9165 subprog = insn->off;
0d306c31
PB
9166 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
9167 __bpf_call_base;
1c2a088a 9168 }
2162fed4
SD
9169
9170 /* we use the aux data to keep a list of the start addresses
9171 * of the JITed images for each function in the program
9172 *
9173 * for some architectures, such as powerpc64, the imm field
9174 * might not be large enough to hold the offset of the start
9175 * address of the callee's JITed image from __bpf_call_base
9176 *
9177 * in such cases, we can lookup the start address of a callee
9178 * by using its subprog id, available from the off field of
9179 * the call instruction, as an index for this list
9180 */
9181 func[i]->aux->func = func;
9182 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 9183 }
f910cefa 9184 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9185 old_bpf_func = func[i]->bpf_func;
9186 tmp = bpf_int_jit_compile(func[i]);
9187 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
9188 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 9189 err = -ENOTSUPP;
1c2a088a
AS
9190 goto out_free;
9191 }
9192 cond_resched();
9193 }
9194
9195 /* finally lock prog and jit images for all functions and
9196 * populate kallsysm
9197 */
f910cefa 9198 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9199 bpf_prog_lock_ro(func[i]);
9200 bpf_prog_kallsyms_add(func[i]);
9201 }
7105e828
DB
9202
9203 /* Last step: make now unused interpreter insns from main
9204 * prog consistent for later dump requests, so they can
9205 * later look the same as if they were interpreted only.
9206 */
9207 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
7105e828
DB
9208 if (insn->code != (BPF_JMP | BPF_CALL) ||
9209 insn->src_reg != BPF_PSEUDO_CALL)
9210 continue;
9211 insn->off = env->insn_aux_data[i].call_imm;
9212 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 9213 insn->imm = subprog;
7105e828
DB
9214 }
9215
1c2a088a
AS
9216 prog->jited = 1;
9217 prog->bpf_func = func[0]->bpf_func;
9218 prog->aux->func = func;
f910cefa 9219 prog->aux->func_cnt = env->subprog_cnt;
c454a46b 9220 bpf_prog_free_unused_jited_linfo(prog);
1c2a088a
AS
9221 return 0;
9222out_free:
f910cefa 9223 for (i = 0; i < env->subprog_cnt; i++)
1c2a088a
AS
9224 if (func[i])
9225 bpf_jit_free(func[i]);
9226 kfree(func);
c7a89784 9227out_undo_insn:
1c2a088a
AS
9228 /* cleanup main prog to be interpreted */
9229 prog->jit_requested = 0;
9230 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
9231 if (insn->code != (BPF_JMP | BPF_CALL) ||
9232 insn->src_reg != BPF_PSEUDO_CALL)
9233 continue;
9234 insn->off = 0;
9235 insn->imm = env->insn_aux_data[i].call_imm;
9236 }
c454a46b 9237 bpf_prog_free_jited_linfo(prog);
1c2a088a
AS
9238 return err;
9239}
9240
1ea47e01
AS
9241static int fixup_call_args(struct bpf_verifier_env *env)
9242{
19d28fbd 9243#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9244 struct bpf_prog *prog = env->prog;
9245 struct bpf_insn *insn = prog->insnsi;
9246 int i, depth;
19d28fbd 9247#endif
e4052d06 9248 int err = 0;
1ea47e01 9249
e4052d06
QM
9250 if (env->prog->jit_requested &&
9251 !bpf_prog_is_dev_bound(env->prog->aux)) {
19d28fbd
DM
9252 err = jit_subprogs(env);
9253 if (err == 0)
1c2a088a 9254 return 0;
c7a89784
DB
9255 if (err == -EFAULT)
9256 return err;
19d28fbd
DM
9257 }
9258#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9259 for (i = 0; i < prog->len; i++, insn++) {
9260 if (insn->code != (BPF_JMP | BPF_CALL) ||
9261 insn->src_reg != BPF_PSEUDO_CALL)
9262 continue;
9263 depth = get_callee_stack_depth(env, insn, i);
9264 if (depth < 0)
9265 return depth;
9266 bpf_patch_call_args(insn, depth);
9267 }
19d28fbd
DM
9268 err = 0;
9269#endif
9270 return err;
1ea47e01
AS
9271}
9272
79741b3b 9273/* fixup insn->imm field of bpf_call instructions
81ed18ab 9274 * and inline eligible helpers as explicit sequence of BPF instructions
e245c5c6
AS
9275 *
9276 * this function is called after eBPF program passed verification
9277 */
79741b3b 9278static int fixup_bpf_calls(struct bpf_verifier_env *env)
e245c5c6 9279{
79741b3b 9280 struct bpf_prog *prog = env->prog;
d2e4c1e6 9281 bool expect_blinding = bpf_jit_blinding_enabled(prog);
79741b3b 9282 struct bpf_insn *insn = prog->insnsi;
e245c5c6 9283 const struct bpf_func_proto *fn;
79741b3b 9284 const int insn_cnt = prog->len;
09772d92 9285 const struct bpf_map_ops *ops;
c93552c4 9286 struct bpf_insn_aux_data *aux;
81ed18ab
AS
9287 struct bpf_insn insn_buf[16];
9288 struct bpf_prog *new_prog;
9289 struct bpf_map *map_ptr;
d2e4c1e6 9290 int i, ret, cnt, delta = 0;
e245c5c6 9291
79741b3b 9292 for (i = 0; i < insn_cnt; i++, insn++) {
f6b1b3bf
DB
9293 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
9294 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9295 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 9296 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf
DB
9297 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
9298 struct bpf_insn mask_and_div[] = {
9299 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9300 /* Rx div 0 -> 0 */
9301 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
9302 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
9303 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9304 *insn,
9305 };
9306 struct bpf_insn mask_and_mod[] = {
9307 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9308 /* Rx mod 0 -> Rx */
9309 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
9310 *insn,
9311 };
9312 struct bpf_insn *patchlet;
9313
9314 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9315 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
9316 patchlet = mask_and_div + (is64 ? 1 : 0);
9317 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
9318 } else {
9319 patchlet = mask_and_mod + (is64 ? 1 : 0);
9320 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
9321 }
9322
9323 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
9324 if (!new_prog)
9325 return -ENOMEM;
9326
9327 delta += cnt - 1;
9328 env->prog = prog = new_prog;
9329 insn = new_prog->insnsi + i + delta;
9330 continue;
9331 }
9332
e0cea7ce
DB
9333 if (BPF_CLASS(insn->code) == BPF_LD &&
9334 (BPF_MODE(insn->code) == BPF_ABS ||
9335 BPF_MODE(insn->code) == BPF_IND)) {
9336 cnt = env->ops->gen_ld_abs(insn, insn_buf);
9337 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9338 verbose(env, "bpf verifier is misconfigured\n");
9339 return -EINVAL;
9340 }
9341
9342 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9343 if (!new_prog)
9344 return -ENOMEM;
9345
9346 delta += cnt - 1;
9347 env->prog = prog = new_prog;
9348 insn = new_prog->insnsi + i + delta;
9349 continue;
9350 }
9351
979d63d5
DB
9352 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
9353 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
9354 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
9355 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
9356 struct bpf_insn insn_buf[16];
9357 struct bpf_insn *patch = &insn_buf[0];
9358 bool issrc, isneg;
9359 u32 off_reg;
9360
9361 aux = &env->insn_aux_data[i + delta];
3612af78
DB
9362 if (!aux->alu_state ||
9363 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
9364 continue;
9365
9366 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
9367 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
9368 BPF_ALU_SANITIZE_SRC;
9369
9370 off_reg = issrc ? insn->src_reg : insn->dst_reg;
9371 if (isneg)
9372 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9373 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
9374 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
9375 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
9376 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
9377 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
9378 if (issrc) {
9379 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
9380 off_reg);
9381 insn->src_reg = BPF_REG_AX;
9382 } else {
9383 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
9384 BPF_REG_AX);
9385 }
9386 if (isneg)
9387 insn->code = insn->code == code_add ?
9388 code_sub : code_add;
9389 *patch++ = *insn;
9390 if (issrc && isneg)
9391 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9392 cnt = patch - insn_buf;
9393
9394 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9395 if (!new_prog)
9396 return -ENOMEM;
9397
9398 delta += cnt - 1;
9399 env->prog = prog = new_prog;
9400 insn = new_prog->insnsi + i + delta;
9401 continue;
9402 }
9403
79741b3b
AS
9404 if (insn->code != (BPF_JMP | BPF_CALL))
9405 continue;
cc8b0b92
AS
9406 if (insn->src_reg == BPF_PSEUDO_CALL)
9407 continue;
e245c5c6 9408
79741b3b
AS
9409 if (insn->imm == BPF_FUNC_get_route_realm)
9410 prog->dst_needed = 1;
9411 if (insn->imm == BPF_FUNC_get_prandom_u32)
9412 bpf_user_rnd_init_once();
9802d865
JB
9413 if (insn->imm == BPF_FUNC_override_return)
9414 prog->kprobe_override = 1;
79741b3b 9415 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
9416 /* If we tail call into other programs, we
9417 * cannot make any assumptions since they can
9418 * be replaced dynamically during runtime in
9419 * the program array.
9420 */
9421 prog->cb_access = 1;
80a58d02 9422 env->prog->aux->stack_depth = MAX_BPF_STACK;
e647815a 9423 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 9424
79741b3b
AS
9425 /* mark bpf_tail_call as different opcode to avoid
9426 * conditional branch in the interpeter for every normal
9427 * call and to prevent accidental JITing by JIT compiler
9428 * that doesn't support bpf_tail_call yet
e245c5c6 9429 */
79741b3b 9430 insn->imm = 0;
71189fa9 9431 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 9432
c93552c4 9433 aux = &env->insn_aux_data[i + delta];
cc52d914
DB
9434 if (env->allow_ptr_leaks && !expect_blinding &&
9435 prog->jit_requested &&
d2e4c1e6
DB
9436 !bpf_map_key_poisoned(aux) &&
9437 !bpf_map_ptr_poisoned(aux) &&
9438 !bpf_map_ptr_unpriv(aux)) {
9439 struct bpf_jit_poke_descriptor desc = {
9440 .reason = BPF_POKE_REASON_TAIL_CALL,
9441 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
9442 .tail_call.key = bpf_map_key_immediate(aux),
9443 };
9444
9445 ret = bpf_jit_add_poke_descriptor(prog, &desc);
9446 if (ret < 0) {
9447 verbose(env, "adding tail call poke descriptor failed\n");
9448 return ret;
9449 }
9450
9451 insn->imm = ret + 1;
9452 continue;
9453 }
9454
c93552c4
DB
9455 if (!bpf_map_ptr_unpriv(aux))
9456 continue;
9457
b2157399
AS
9458 /* instead of changing every JIT dealing with tail_call
9459 * emit two extra insns:
9460 * if (index >= max_entries) goto out;
9461 * index &= array->index_mask;
9462 * to avoid out-of-bounds cpu speculation
9463 */
c93552c4 9464 if (bpf_map_ptr_poisoned(aux)) {
40950343 9465 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
9466 return -EINVAL;
9467 }
c93552c4 9468
d2e4c1e6 9469 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
9470 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
9471 map_ptr->max_entries, 2);
9472 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
9473 container_of(map_ptr,
9474 struct bpf_array,
9475 map)->index_mask);
9476 insn_buf[2] = *insn;
9477 cnt = 3;
9478 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9479 if (!new_prog)
9480 return -ENOMEM;
9481
9482 delta += cnt - 1;
9483 env->prog = prog = new_prog;
9484 insn = new_prog->insnsi + i + delta;
79741b3b
AS
9485 continue;
9486 }
e245c5c6 9487
89c63074 9488 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
9489 * and other inlining handlers are currently limited to 64 bit
9490 * only.
89c63074 9491 */
60b58afc 9492 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
9493 (insn->imm == BPF_FUNC_map_lookup_elem ||
9494 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
9495 insn->imm == BPF_FUNC_map_delete_elem ||
9496 insn->imm == BPF_FUNC_map_push_elem ||
9497 insn->imm == BPF_FUNC_map_pop_elem ||
9498 insn->imm == BPF_FUNC_map_peek_elem)) {
c93552c4
DB
9499 aux = &env->insn_aux_data[i + delta];
9500 if (bpf_map_ptr_poisoned(aux))
9501 goto patch_call_imm;
9502
d2e4c1e6 9503 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
9504 ops = map_ptr->ops;
9505 if (insn->imm == BPF_FUNC_map_lookup_elem &&
9506 ops->map_gen_lookup) {
9507 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
9508 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9509 verbose(env, "bpf verifier is misconfigured\n");
9510 return -EINVAL;
9511 }
81ed18ab 9512
09772d92
DB
9513 new_prog = bpf_patch_insn_data(env, i + delta,
9514 insn_buf, cnt);
9515 if (!new_prog)
9516 return -ENOMEM;
81ed18ab 9517
09772d92
DB
9518 delta += cnt - 1;
9519 env->prog = prog = new_prog;
9520 insn = new_prog->insnsi + i + delta;
9521 continue;
9522 }
81ed18ab 9523
09772d92
DB
9524 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
9525 (void *(*)(struct bpf_map *map, void *key))NULL));
9526 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
9527 (int (*)(struct bpf_map *map, void *key))NULL));
9528 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
9529 (int (*)(struct bpf_map *map, void *key, void *value,
9530 u64 flags))NULL));
84430d42
DB
9531 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
9532 (int (*)(struct bpf_map *map, void *value,
9533 u64 flags))NULL));
9534 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
9535 (int (*)(struct bpf_map *map, void *value))NULL));
9536 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
9537 (int (*)(struct bpf_map *map, void *value))NULL));
9538
09772d92
DB
9539 switch (insn->imm) {
9540 case BPF_FUNC_map_lookup_elem:
9541 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
9542 __bpf_call_base;
9543 continue;
9544 case BPF_FUNC_map_update_elem:
9545 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
9546 __bpf_call_base;
9547 continue;
9548 case BPF_FUNC_map_delete_elem:
9549 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
9550 __bpf_call_base;
9551 continue;
84430d42
DB
9552 case BPF_FUNC_map_push_elem:
9553 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
9554 __bpf_call_base;
9555 continue;
9556 case BPF_FUNC_map_pop_elem:
9557 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
9558 __bpf_call_base;
9559 continue;
9560 case BPF_FUNC_map_peek_elem:
9561 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
9562 __bpf_call_base;
9563 continue;
09772d92 9564 }
81ed18ab 9565
09772d92 9566 goto patch_call_imm;
81ed18ab
AS
9567 }
9568
5576b991
MKL
9569 if (prog->jit_requested && BITS_PER_LONG == 64 &&
9570 insn->imm == BPF_FUNC_jiffies64) {
9571 struct bpf_insn ld_jiffies_addr[2] = {
9572 BPF_LD_IMM64(BPF_REG_0,
9573 (unsigned long)&jiffies),
9574 };
9575
9576 insn_buf[0] = ld_jiffies_addr[0];
9577 insn_buf[1] = ld_jiffies_addr[1];
9578 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
9579 BPF_REG_0, 0);
9580 cnt = 3;
9581
9582 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
9583 cnt);
9584 if (!new_prog)
9585 return -ENOMEM;
9586
9587 delta += cnt - 1;
9588 env->prog = prog = new_prog;
9589 insn = new_prog->insnsi + i + delta;
9590 continue;
9591 }
9592
81ed18ab 9593patch_call_imm:
5e43f899 9594 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
9595 /* all functions that have prototype and verifier allowed
9596 * programs to call them, must be real in-kernel functions
9597 */
9598 if (!fn->func) {
61bd5218
JK
9599 verbose(env,
9600 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
9601 func_id_name(insn->imm), insn->imm);
9602 return -EFAULT;
e245c5c6 9603 }
79741b3b 9604 insn->imm = fn->func - __bpf_call_base;
e245c5c6 9605 }
e245c5c6 9606
d2e4c1e6
DB
9607 /* Since poke tab is now finalized, publish aux to tracker. */
9608 for (i = 0; i < prog->aux->size_poke_tab; i++) {
9609 map_ptr = prog->aux->poke_tab[i].tail_call.map;
9610 if (!map_ptr->ops->map_poke_track ||
9611 !map_ptr->ops->map_poke_untrack ||
9612 !map_ptr->ops->map_poke_run) {
9613 verbose(env, "bpf verifier is misconfigured\n");
9614 return -EINVAL;
9615 }
9616
9617 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
9618 if (ret < 0) {
9619 verbose(env, "tracking tail call prog failed\n");
9620 return ret;
9621 }
9622 }
9623
79741b3b
AS
9624 return 0;
9625}
e245c5c6 9626
58e2af8b 9627static void free_states(struct bpf_verifier_env *env)
f1bca824 9628{
58e2af8b 9629 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
9630 int i;
9631
9f4686c4
AS
9632 sl = env->free_list;
9633 while (sl) {
9634 sln = sl->next;
9635 free_verifier_state(&sl->state, false);
9636 kfree(sl);
9637 sl = sln;
9638 }
51c39bb1 9639 env->free_list = NULL;
9f4686c4 9640
f1bca824
AS
9641 if (!env->explored_states)
9642 return;
9643
dc2a4ebc 9644 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
9645 sl = env->explored_states[i];
9646
a8f500af
AS
9647 while (sl) {
9648 sln = sl->next;
9649 free_verifier_state(&sl->state, false);
9650 kfree(sl);
9651 sl = sln;
9652 }
51c39bb1 9653 env->explored_states[i] = NULL;
f1bca824 9654 }
51c39bb1 9655}
f1bca824 9656
51c39bb1
AS
9657/* The verifier is using insn_aux_data[] to store temporary data during
9658 * verification and to store information for passes that run after the
9659 * verification like dead code sanitization. do_check_common() for subprogram N
9660 * may analyze many other subprograms. sanitize_insn_aux_data() clears all
9661 * temporary data after do_check_common() finds that subprogram N cannot be
9662 * verified independently. pass_cnt counts the number of times
9663 * do_check_common() was run and insn->aux->seen tells the pass number
9664 * insn_aux_data was touched. These variables are compared to clear temporary
9665 * data from failed pass. For testing and experiments do_check_common() can be
9666 * run multiple times even when prior attempt to verify is unsuccessful.
9667 */
9668static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
9669{
9670 struct bpf_insn *insn = env->prog->insnsi;
9671 struct bpf_insn_aux_data *aux;
9672 int i, class;
9673
9674 for (i = 0; i < env->prog->len; i++) {
9675 class = BPF_CLASS(insn[i].code);
9676 if (class != BPF_LDX && class != BPF_STX)
9677 continue;
9678 aux = &env->insn_aux_data[i];
9679 if (aux->seen != env->pass_cnt)
9680 continue;
9681 memset(aux, 0, offsetof(typeof(*aux), orig_idx));
9682 }
f1bca824
AS
9683}
9684
51c39bb1
AS
9685static int do_check_common(struct bpf_verifier_env *env, int subprog)
9686{
9687 struct bpf_verifier_state *state;
9688 struct bpf_reg_state *regs;
9689 int ret, i;
9690
9691 env->prev_linfo = NULL;
9692 env->pass_cnt++;
9693
9694 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
9695 if (!state)
9696 return -ENOMEM;
9697 state->curframe = 0;
9698 state->speculative = false;
9699 state->branches = 1;
9700 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
9701 if (!state->frame[0]) {
9702 kfree(state);
9703 return -ENOMEM;
9704 }
9705 env->cur_state = state;
9706 init_func_state(env, state->frame[0],
9707 BPF_MAIN_FUNC /* callsite */,
9708 0 /* frameno */,
9709 subprog);
9710
9711 regs = state->frame[state->curframe]->regs;
be8704ff 9712 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
51c39bb1
AS
9713 ret = btf_prepare_func_args(env, subprog, regs);
9714 if (ret)
9715 goto out;
9716 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
9717 if (regs[i].type == PTR_TO_CTX)
9718 mark_reg_known_zero(env, regs, i);
9719 else if (regs[i].type == SCALAR_VALUE)
9720 mark_reg_unknown(env, regs, i);
9721 }
9722 } else {
9723 /* 1st arg to a function */
9724 regs[BPF_REG_1].type = PTR_TO_CTX;
9725 mark_reg_known_zero(env, regs, BPF_REG_1);
9726 ret = btf_check_func_arg_match(env, subprog, regs);
9727 if (ret == -EFAULT)
9728 /* unlikely verifier bug. abort.
9729 * ret == 0 and ret < 0 are sadly acceptable for
9730 * main() function due to backward compatibility.
9731 * Like socket filter program may be written as:
9732 * int bpf_prog(struct pt_regs *ctx)
9733 * and never dereference that ctx in the program.
9734 * 'struct pt_regs' is a type mismatch for socket
9735 * filter that should be using 'struct __sk_buff'.
9736 */
9737 goto out;
9738 }
9739
9740 ret = do_check(env);
9741out:
f59bbfc2
AS
9742 /* check for NULL is necessary, since cur_state can be freed inside
9743 * do_check() under memory pressure.
9744 */
9745 if (env->cur_state) {
9746 free_verifier_state(env->cur_state, true);
9747 env->cur_state = NULL;
9748 }
51c39bb1
AS
9749 while (!pop_stack(env, NULL, NULL));
9750 free_states(env);
9751 if (ret)
9752 /* clean aux data in case subprog was rejected */
9753 sanitize_insn_aux_data(env);
9754 return ret;
9755}
9756
9757/* Verify all global functions in a BPF program one by one based on their BTF.
9758 * All global functions must pass verification. Otherwise the whole program is rejected.
9759 * Consider:
9760 * int bar(int);
9761 * int foo(int f)
9762 * {
9763 * return bar(f);
9764 * }
9765 * int bar(int b)
9766 * {
9767 * ...
9768 * }
9769 * foo() will be verified first for R1=any_scalar_value. During verification it
9770 * will be assumed that bar() already verified successfully and call to bar()
9771 * from foo() will be checked for type match only. Later bar() will be verified
9772 * independently to check that it's safe for R1=any_scalar_value.
9773 */
9774static int do_check_subprogs(struct bpf_verifier_env *env)
9775{
9776 struct bpf_prog_aux *aux = env->prog->aux;
9777 int i, ret;
9778
9779 if (!aux->func_info)
9780 return 0;
9781
9782 for (i = 1; i < env->subprog_cnt; i++) {
9783 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
9784 continue;
9785 env->insn_idx = env->subprog_info[i].start;
9786 WARN_ON_ONCE(env->insn_idx == 0);
9787 ret = do_check_common(env, i);
9788 if (ret) {
9789 return ret;
9790 } else if (env->log.level & BPF_LOG_LEVEL) {
9791 verbose(env,
9792 "Func#%d is safe for any args that match its prototype\n",
9793 i);
9794 }
9795 }
9796 return 0;
9797}
9798
9799static int do_check_main(struct bpf_verifier_env *env)
9800{
9801 int ret;
9802
9803 env->insn_idx = 0;
9804 ret = do_check_common(env, 0);
9805 if (!ret)
9806 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
9807 return ret;
9808}
9809
9810
06ee7115
AS
9811static void print_verification_stats(struct bpf_verifier_env *env)
9812{
9813 int i;
9814
9815 if (env->log.level & BPF_LOG_STATS) {
9816 verbose(env, "verification time %lld usec\n",
9817 div_u64(env->verification_time, 1000));
9818 verbose(env, "stack depth ");
9819 for (i = 0; i < env->subprog_cnt; i++) {
9820 u32 depth = env->subprog_info[i].stack_depth;
9821
9822 verbose(env, "%d", depth);
9823 if (i + 1 < env->subprog_cnt)
9824 verbose(env, "+");
9825 }
9826 verbose(env, "\n");
9827 }
9828 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
9829 "total_states %d peak_states %d mark_read %d\n",
9830 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
9831 env->max_states_per_insn, env->total_states,
9832 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
9833}
9834
27ae7997
MKL
9835static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
9836{
9837 const struct btf_type *t, *func_proto;
9838 const struct bpf_struct_ops *st_ops;
9839 const struct btf_member *member;
9840 struct bpf_prog *prog = env->prog;
9841 u32 btf_id, member_idx;
9842 const char *mname;
9843
9844 btf_id = prog->aux->attach_btf_id;
9845 st_ops = bpf_struct_ops_find(btf_id);
9846 if (!st_ops) {
9847 verbose(env, "attach_btf_id %u is not a supported struct\n",
9848 btf_id);
9849 return -ENOTSUPP;
9850 }
9851
9852 t = st_ops->type;
9853 member_idx = prog->expected_attach_type;
9854 if (member_idx >= btf_type_vlen(t)) {
9855 verbose(env, "attach to invalid member idx %u of struct %s\n",
9856 member_idx, st_ops->name);
9857 return -EINVAL;
9858 }
9859
9860 member = &btf_type_member(t)[member_idx];
9861 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
9862 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
9863 NULL);
9864 if (!func_proto) {
9865 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
9866 mname, member_idx, st_ops->name);
9867 return -EINVAL;
9868 }
9869
9870 if (st_ops->check_member) {
9871 int err = st_ops->check_member(t, member);
9872
9873 if (err) {
9874 verbose(env, "attach to unsupported member %s of struct %s\n",
9875 mname, st_ops->name);
9876 return err;
9877 }
9878 }
9879
9880 prog->aux->attach_func_proto = func_proto;
9881 prog->aux->attach_func_name = mname;
9882 env->ops = st_ops->verifier_ops;
9883
9884 return 0;
9885}
6ba43b76
KS
9886#define SECURITY_PREFIX "security_"
9887
9888static int check_attach_modify_return(struct bpf_verifier_env *env)
9889{
9890 struct bpf_prog *prog = env->prog;
9891 unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr;
9892
6ba43b76
KS
9893 /* This is expected to be cleaned up in the future with the KRSI effort
9894 * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h.
9895 */
69191754
KS
9896 if (within_error_injection_list(addr) ||
9897 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
9898 sizeof(SECURITY_PREFIX) - 1))
6ba43b76 9899 return 0;
6ba43b76
KS
9900
9901 verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n",
9902 prog->aux->attach_btf_id, prog->aux->attach_func_name);
9903
9904 return -EINVAL;
9905}
27ae7997 9906
38207291
MKL
9907static int check_attach_btf_id(struct bpf_verifier_env *env)
9908{
9909 struct bpf_prog *prog = env->prog;
be8704ff 9910 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
5b92a28a 9911 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
38207291 9912 u32 btf_id = prog->aux->attach_btf_id;
f1b9509c 9913 const char prefix[] = "btf_trace_";
5b92a28a 9914 int ret = 0, subprog = -1, i;
fec56f58 9915 struct bpf_trampoline *tr;
38207291 9916 const struct btf_type *t;
5b92a28a 9917 bool conservative = true;
38207291 9918 const char *tname;
5b92a28a 9919 struct btf *btf;
fec56f58 9920 long addr;
5b92a28a 9921 u64 key;
38207291 9922
27ae7997
MKL
9923 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
9924 return check_struct_ops_btf_id(env);
9925
be8704ff 9926 if (prog->type != BPF_PROG_TYPE_TRACING && !prog_extension)
f1b9509c 9927 return 0;
38207291 9928
f1b9509c
AS
9929 if (!btf_id) {
9930 verbose(env, "Tracing programs must provide btf_id\n");
9931 return -EINVAL;
9932 }
5b92a28a
AS
9933 btf = bpf_prog_get_target_btf(prog);
9934 if (!btf) {
9935 verbose(env,
9936 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
9937 return -EINVAL;
9938 }
9939 t = btf_type_by_id(btf, btf_id);
f1b9509c
AS
9940 if (!t) {
9941 verbose(env, "attach_btf_id %u is invalid\n", btf_id);
9942 return -EINVAL;
9943 }
5b92a28a 9944 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c
AS
9945 if (!tname) {
9946 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
9947 return -EINVAL;
9948 }
5b92a28a
AS
9949 if (tgt_prog) {
9950 struct bpf_prog_aux *aux = tgt_prog->aux;
9951
9952 for (i = 0; i < aux->func_info_cnt; i++)
9953 if (aux->func_info[i].type_id == btf_id) {
9954 subprog = i;
9955 break;
9956 }
9957 if (subprog == -1) {
9958 verbose(env, "Subprog %s doesn't exist\n", tname);
9959 return -EINVAL;
9960 }
9961 conservative = aux->func_info_aux[subprog].unreliable;
be8704ff
AS
9962 if (prog_extension) {
9963 if (conservative) {
9964 verbose(env,
9965 "Cannot replace static functions\n");
9966 return -EINVAL;
9967 }
9968 if (!prog->jit_requested) {
9969 verbose(env,
9970 "Extension programs should be JITed\n");
9971 return -EINVAL;
9972 }
9973 env->ops = bpf_verifier_ops[tgt_prog->type];
9974 }
9975 if (!tgt_prog->jited) {
9976 verbose(env, "Can attach to only JITed progs\n");
9977 return -EINVAL;
9978 }
9979 if (tgt_prog->type == prog->type) {
9980 /* Cannot fentry/fexit another fentry/fexit program.
9981 * Cannot attach program extension to another extension.
9982 * It's ok to attach fentry/fexit to extension program.
9983 */
9984 verbose(env, "Cannot recursively attach\n");
9985 return -EINVAL;
9986 }
9987 if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
9988 prog_extension &&
9989 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
9990 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
9991 /* Program extensions can extend all program types
9992 * except fentry/fexit. The reason is the following.
9993 * The fentry/fexit programs are used for performance
9994 * analysis, stats and can be attached to any program
9995 * type except themselves. When extension program is
9996 * replacing XDP function it is necessary to allow
9997 * performance analysis of all functions. Both original
9998 * XDP program and its program extension. Hence
9999 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
10000 * allowed. If extending of fentry/fexit was allowed it
10001 * would be possible to create long call chain
10002 * fentry->extension->fentry->extension beyond
10003 * reasonable stack size. Hence extending fentry is not
10004 * allowed.
10005 */
10006 verbose(env, "Cannot extend fentry/fexit\n");
10007 return -EINVAL;
10008 }
5b92a28a
AS
10009 key = ((u64)aux->id) << 32 | btf_id;
10010 } else {
be8704ff
AS
10011 if (prog_extension) {
10012 verbose(env, "Cannot replace kernel functions\n");
10013 return -EINVAL;
10014 }
5b92a28a
AS
10015 key = btf_id;
10016 }
f1b9509c
AS
10017
10018 switch (prog->expected_attach_type) {
10019 case BPF_TRACE_RAW_TP:
5b92a28a
AS
10020 if (tgt_prog) {
10021 verbose(env,
10022 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
10023 return -EINVAL;
10024 }
38207291
MKL
10025 if (!btf_type_is_typedef(t)) {
10026 verbose(env, "attach_btf_id %u is not a typedef\n",
10027 btf_id);
10028 return -EINVAL;
10029 }
f1b9509c 10030 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
38207291
MKL
10031 verbose(env, "attach_btf_id %u points to wrong type name %s\n",
10032 btf_id, tname);
10033 return -EINVAL;
10034 }
10035 tname += sizeof(prefix) - 1;
5b92a28a 10036 t = btf_type_by_id(btf, t->type);
38207291
MKL
10037 if (!btf_type_is_ptr(t))
10038 /* should never happen in valid vmlinux build */
10039 return -EINVAL;
5b92a28a 10040 t = btf_type_by_id(btf, t->type);
38207291
MKL
10041 if (!btf_type_is_func_proto(t))
10042 /* should never happen in valid vmlinux build */
10043 return -EINVAL;
10044
10045 /* remember two read only pointers that are valid for
10046 * the life time of the kernel
10047 */
10048 prog->aux->attach_func_name = tname;
10049 prog->aux->attach_func_proto = t;
10050 prog->aux->attach_btf_trace = true;
f1b9509c 10051 return 0;
be8704ff
AS
10052 default:
10053 if (!prog_extension)
10054 return -EINVAL;
10055 /* fallthrough */
ae240823 10056 case BPF_MODIFY_RETURN:
fec56f58
AS
10057 case BPF_TRACE_FENTRY:
10058 case BPF_TRACE_FEXIT:
10059 if (!btf_type_is_func(t)) {
10060 verbose(env, "attach_btf_id %u is not a function\n",
10061 btf_id);
10062 return -EINVAL;
10063 }
be8704ff
AS
10064 if (prog_extension &&
10065 btf_check_type_match(env, prog, btf, t))
10066 return -EINVAL;
5b92a28a 10067 t = btf_type_by_id(btf, t->type);
fec56f58
AS
10068 if (!btf_type_is_func_proto(t))
10069 return -EINVAL;
5b92a28a 10070 tr = bpf_trampoline_lookup(key);
fec56f58
AS
10071 if (!tr)
10072 return -ENOMEM;
10073 prog->aux->attach_func_name = tname;
5b92a28a 10074 /* t is either vmlinux type or another program's type */
fec56f58
AS
10075 prog->aux->attach_func_proto = t;
10076 mutex_lock(&tr->mutex);
10077 if (tr->func.addr) {
10078 prog->aux->trampoline = tr;
10079 goto out;
10080 }
5b92a28a
AS
10081 if (tgt_prog && conservative) {
10082 prog->aux->attach_func_proto = NULL;
10083 t = NULL;
10084 }
10085 ret = btf_distill_func_proto(&env->log, btf, t,
fec56f58
AS
10086 tname, &tr->func.model);
10087 if (ret < 0)
10088 goto out;
5b92a28a 10089 if (tgt_prog) {
e9eeec58
YS
10090 if (subprog == 0)
10091 addr = (long) tgt_prog->bpf_func;
10092 else
10093 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a
AS
10094 } else {
10095 addr = kallsyms_lookup_name(tname);
10096 if (!addr) {
10097 verbose(env,
10098 "The address of function %s cannot be found\n",
10099 tname);
10100 ret = -ENOENT;
10101 goto out;
10102 }
fec56f58
AS
10103 }
10104 tr->func.addr = (void *)addr;
10105 prog->aux->trampoline = tr;
6ba43b76
KS
10106
10107 if (prog->expected_attach_type == BPF_MODIFY_RETURN)
10108 ret = check_attach_modify_return(env);
fec56f58
AS
10109out:
10110 mutex_unlock(&tr->mutex);
10111 if (ret)
10112 bpf_trampoline_put(tr);
10113 return ret;
38207291 10114 }
38207291
MKL
10115}
10116
838e9690
YS
10117int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
10118 union bpf_attr __user *uattr)
51580e79 10119{
06ee7115 10120 u64 start_time = ktime_get_ns();
58e2af8b 10121 struct bpf_verifier_env *env;
b9193c1b 10122 struct bpf_verifier_log *log;
9e4c24e7 10123 int i, len, ret = -EINVAL;
e2ae4ca2 10124 bool is_priv;
51580e79 10125
eba0c929
AB
10126 /* no program is valid */
10127 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
10128 return -EINVAL;
10129
58e2af8b 10130 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
10131 * allocate/free it every time bpf_check() is called
10132 */
58e2af8b 10133 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
10134 if (!env)
10135 return -ENOMEM;
61bd5218 10136 log = &env->log;
cbd35700 10137
9e4c24e7 10138 len = (*prog)->len;
fad953ce 10139 env->insn_aux_data =
9e4c24e7 10140 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
10141 ret = -ENOMEM;
10142 if (!env->insn_aux_data)
10143 goto err_free_env;
9e4c24e7
JK
10144 for (i = 0; i < len; i++)
10145 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 10146 env->prog = *prog;
00176a34 10147 env->ops = bpf_verifier_ops[env->prog->type];
45a73c17 10148 is_priv = capable(CAP_SYS_ADMIN);
0246e64d 10149
8580ac94
AS
10150 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
10151 mutex_lock(&bpf_verifier_lock);
10152 if (!btf_vmlinux)
10153 btf_vmlinux = btf_parse_vmlinux();
10154 mutex_unlock(&bpf_verifier_lock);
10155 }
10156
cbd35700 10157 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
10158 if (!is_priv)
10159 mutex_lock(&bpf_verifier_lock);
cbd35700
AS
10160
10161 if (attr->log_level || attr->log_buf || attr->log_size) {
10162 /* user requested verbose verifier output
10163 * and supplied buffer to store the verification trace
10164 */
e7bf8249
JK
10165 log->level = attr->log_level;
10166 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
10167 log->len_total = attr->log_size;
cbd35700
AS
10168
10169 ret = -EINVAL;
e7bf8249 10170 /* log attributes have to be sane */
7a9f5c65 10171 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
06ee7115 10172 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
3df126f3 10173 goto err_unlock;
cbd35700 10174 }
1ad2f583 10175
8580ac94
AS
10176 if (IS_ERR(btf_vmlinux)) {
10177 /* Either gcc or pahole or kernel are broken. */
10178 verbose(env, "in-kernel BTF is malformed\n");
10179 ret = PTR_ERR(btf_vmlinux);
38207291 10180 goto skip_full_check;
8580ac94
AS
10181 }
10182
1ad2f583
DB
10183 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
10184 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 10185 env->strict_alignment = true;
e9ee9efc
DM
10186 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
10187 env->strict_alignment = false;
cbd35700 10188
e2ae4ca2
JK
10189 env->allow_ptr_leaks = is_priv;
10190
10d274e8
AS
10191 if (is_priv)
10192 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
10193
f4e3ec0d
JK
10194 ret = replace_map_fd_with_map_ptr(env);
10195 if (ret < 0)
10196 goto skip_full_check;
10197
cae1927c 10198 if (bpf_prog_is_dev_bound(env->prog->aux)) {
a40a2632 10199 ret = bpf_prog_offload_verifier_prep(env->prog);
ab3f0063 10200 if (ret)
f4e3ec0d 10201 goto skip_full_check;
ab3f0063
JK
10202 }
10203
dc2a4ebc 10204 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 10205 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
10206 GFP_USER);
10207 ret = -ENOMEM;
10208 if (!env->explored_states)
10209 goto skip_full_check;
10210
d9762e84 10211 ret = check_subprogs(env);
475fb78f
AS
10212 if (ret < 0)
10213 goto skip_full_check;
10214
c454a46b 10215 ret = check_btf_info(env, attr, uattr);
838e9690
YS
10216 if (ret < 0)
10217 goto skip_full_check;
10218
be8704ff
AS
10219 ret = check_attach_btf_id(env);
10220 if (ret)
10221 goto skip_full_check;
10222
d9762e84
MKL
10223 ret = check_cfg(env);
10224 if (ret < 0)
10225 goto skip_full_check;
10226
51c39bb1
AS
10227 ret = do_check_subprogs(env);
10228 ret = ret ?: do_check_main(env);
cbd35700 10229
c941ce9c
QM
10230 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
10231 ret = bpf_prog_offload_finalize(env);
10232
0246e64d 10233skip_full_check:
51c39bb1 10234 kvfree(env->explored_states);
0246e64d 10235
c131187d 10236 if (ret == 0)
9b38c405 10237 ret = check_max_stack_depth(env);
c131187d 10238
9b38c405 10239 /* instruction rewrites happen after this point */
e2ae4ca2
JK
10240 if (is_priv) {
10241 if (ret == 0)
10242 opt_hard_wire_dead_code_branches(env);
52875a04
JK
10243 if (ret == 0)
10244 ret = opt_remove_dead_code(env);
a1b14abc
JK
10245 if (ret == 0)
10246 ret = opt_remove_nops(env);
52875a04
JK
10247 } else {
10248 if (ret == 0)
10249 sanitize_dead_code(env);
e2ae4ca2
JK
10250 }
10251
9bac3d6d
AS
10252 if (ret == 0)
10253 /* program is valid, convert *(u32*)(ctx + off) accesses */
10254 ret = convert_ctx_accesses(env);
10255
e245c5c6 10256 if (ret == 0)
79741b3b 10257 ret = fixup_bpf_calls(env);
e245c5c6 10258
a4b1d3c1
JW
10259 /* do 32-bit optimization after insn patching has done so those patched
10260 * insns could be handled correctly.
10261 */
d6c2308c
JW
10262 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
10263 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
10264 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
10265 : false;
a4b1d3c1
JW
10266 }
10267
1ea47e01
AS
10268 if (ret == 0)
10269 ret = fixup_call_args(env);
10270
06ee7115
AS
10271 env->verification_time = ktime_get_ns() - start_time;
10272 print_verification_stats(env);
10273
a2a7d570 10274 if (log->level && bpf_verifier_log_full(log))
cbd35700 10275 ret = -ENOSPC;
a2a7d570 10276 if (log->level && !log->ubuf) {
cbd35700 10277 ret = -EFAULT;
a2a7d570 10278 goto err_release_maps;
cbd35700
AS
10279 }
10280
0246e64d
AS
10281 if (ret == 0 && env->used_map_cnt) {
10282 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
10283 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
10284 sizeof(env->used_maps[0]),
10285 GFP_KERNEL);
0246e64d 10286
9bac3d6d 10287 if (!env->prog->aux->used_maps) {
0246e64d 10288 ret = -ENOMEM;
a2a7d570 10289 goto err_release_maps;
0246e64d
AS
10290 }
10291
9bac3d6d 10292 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 10293 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 10294 env->prog->aux->used_map_cnt = env->used_map_cnt;
0246e64d
AS
10295
10296 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
10297 * bpf_ld_imm64 instructions
10298 */
10299 convert_pseudo_ld_imm64(env);
10300 }
cbd35700 10301
ba64e7d8
YS
10302 if (ret == 0)
10303 adjust_btf_func(env);
10304
a2a7d570 10305err_release_maps:
9bac3d6d 10306 if (!env->prog->aux->used_maps)
0246e64d 10307 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 10308 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
10309 */
10310 release_maps(env);
9bac3d6d 10311 *prog = env->prog;
3df126f3 10312err_unlock:
45a73c17
AS
10313 if (!is_priv)
10314 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
10315 vfree(env->insn_aux_data);
10316err_free_env:
10317 kfree(env);
51580e79
AS
10318 return ret;
10319}