]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/bpf/core.c
nfp: bpf: fix latency bug when updating stack index register
[mirror_ubuntu-jammy-kernel.git] / kernel / bpf / core.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
f5bffecd
AS
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
f5bffecd 16 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
f5bffecd 18 */
738cbe72 19
838e9690 20#include <uapi/linux/btf.h>
f5bffecd
AS
21#include <linux/filter.h>
22#include <linux/skbuff.h>
60a3b225 23#include <linux/vmalloc.h>
738cbe72
DB
24#include <linux/random.h>
25#include <linux/moduleloader.h>
09756af4 26#include <linux/bpf.h>
838e9690 27#include <linux/btf.h>
39853cc0 28#include <linux/frame.h>
74451e66
DB
29#include <linux/rbtree_latch.h>
30#include <linux/kallsyms.h>
31#include <linux/rcupdate.h>
c195651e 32#include <linux/perf_event.h>
f5bffecd 33
3324b584
DB
34#include <asm/unaligned.h>
35
f5bffecd
AS
36/* Registers */
37#define BPF_R0 regs[BPF_REG_0]
38#define BPF_R1 regs[BPF_REG_1]
39#define BPF_R2 regs[BPF_REG_2]
40#define BPF_R3 regs[BPF_REG_3]
41#define BPF_R4 regs[BPF_REG_4]
42#define BPF_R5 regs[BPF_REG_5]
43#define BPF_R6 regs[BPF_REG_6]
44#define BPF_R7 regs[BPF_REG_7]
45#define BPF_R8 regs[BPF_REG_8]
46#define BPF_R9 regs[BPF_REG_9]
47#define BPF_R10 regs[BPF_REG_10]
48
49/* Named registers */
50#define DST regs[insn->dst_reg]
51#define SRC regs[insn->src_reg]
52#define FP regs[BPF_REG_FP]
144cd91c 53#define AX regs[BPF_REG_AX]
f5bffecd
AS
54#define ARG1 regs[BPF_REG_ARG1]
55#define CTX regs[BPF_REG_CTX]
56#define IMM insn->imm
57
58/* No hurry in this branch
59 *
60 * Exported for the bpf jit load helper.
61 */
62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63{
64 u8 *ptr = NULL;
65
66 if (k >= SKF_NET_OFF)
67 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68 else if (k >= SKF_LL_OFF)
69 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
3324b584 70
f5bffecd
AS
71 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72 return ptr;
73
74 return NULL;
75}
76
492ecee8 77struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
60a3b225 78{
19809c2d 79 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
09756af4 80 struct bpf_prog_aux *aux;
60a3b225
DB
81 struct bpf_prog *fp;
82
83 size = round_up(size, PAGE_SIZE);
84 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85 if (fp == NULL)
86 return NULL;
87
09756af4
AS
88 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89 if (aux == NULL) {
60a3b225
DB
90 vfree(fp);
91 return NULL;
92 }
93
94 fp->pages = size / PAGE_SIZE;
09756af4 95 fp->aux = aux;
e9d8afa9 96 fp->aux->prog = fp;
60b58afc 97 fp->jit_requested = ebpf_jit_enabled();
60a3b225 98
74451e66
DB
99 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
100
60a3b225
DB
101 return fp;
102}
492ecee8
AS
103
104struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
105{
106 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
107 struct bpf_prog *prog;
4b911304 108 int cpu;
492ecee8
AS
109
110 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
111 if (!prog)
112 return NULL;
113
114 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
115 if (!prog->aux->stats) {
116 kfree(prog->aux);
117 vfree(prog);
118 return NULL;
119 }
120
4b911304
ED
121 for_each_possible_cpu(cpu) {
122 struct bpf_prog_stats *pstats;
123
124 pstats = per_cpu_ptr(prog->aux->stats, cpu);
125 u64_stats_init(&pstats->syncp);
126 }
492ecee8
AS
127 return prog;
128}
60a3b225
DB
129EXPORT_SYMBOL_GPL(bpf_prog_alloc);
130
c454a46b
MKL
131int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
132{
133 if (!prog->aux->nr_linfo || !prog->jit_requested)
134 return 0;
135
136 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
137 sizeof(*prog->aux->jited_linfo),
138 GFP_KERNEL | __GFP_NOWARN);
139 if (!prog->aux->jited_linfo)
140 return -ENOMEM;
141
142 return 0;
143}
144
145void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
146{
147 kfree(prog->aux->jited_linfo);
148 prog->aux->jited_linfo = NULL;
149}
150
151void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
152{
153 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
154 bpf_prog_free_jited_linfo(prog);
155}
156
157/* The jit engine is responsible to provide an array
158 * for insn_off to the jited_off mapping (insn_to_jit_off).
159 *
160 * The idx to this array is the insn_off. Hence, the insn_off
161 * here is relative to the prog itself instead of the main prog.
162 * This array has one entry for each xlated bpf insn.
163 *
164 * jited_off is the byte off to the last byte of the jited insn.
165 *
166 * Hence, with
167 * insn_start:
168 * The first bpf insn off of the prog. The insn off
169 * here is relative to the main prog.
170 * e.g. if prog is a subprog, insn_start > 0
171 * linfo_idx:
172 * The prog's idx to prog->aux->linfo and jited_linfo
173 *
174 * jited_linfo[linfo_idx] = prog->bpf_func
175 *
176 * For i > linfo_idx,
177 *
178 * jited_linfo[i] = prog->bpf_func +
179 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
180 */
181void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
182 const u32 *insn_to_jit_off)
183{
184 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
185 const struct bpf_line_info *linfo;
186 void **jited_linfo;
187
188 if (!prog->aux->jited_linfo)
189 /* Userspace did not provide linfo */
190 return;
191
192 linfo_idx = prog->aux->linfo_idx;
193 linfo = &prog->aux->linfo[linfo_idx];
194 insn_start = linfo[0].insn_off;
195 insn_end = insn_start + prog->len;
196
197 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
198 jited_linfo[0] = prog->bpf_func;
199
200 nr_linfo = prog->aux->nr_linfo - linfo_idx;
201
202 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
203 /* The verifier ensures that linfo[i].insn_off is
204 * strictly increasing
205 */
206 jited_linfo[i] = prog->bpf_func +
207 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
208}
209
210void bpf_prog_free_linfo(struct bpf_prog *prog)
211{
212 bpf_prog_free_jited_linfo(prog);
213 kvfree(prog->aux->linfo);
214}
215
60a3b225
DB
216struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
217 gfp_t gfp_extra_flags)
218{
19809c2d 219 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
60a3b225 220 struct bpf_prog *fp;
5ccb071e
DB
221 u32 pages, delta;
222 int ret;
60a3b225
DB
223
224 BUG_ON(fp_old == NULL);
225
226 size = round_up(size, PAGE_SIZE);
5ccb071e
DB
227 pages = size / PAGE_SIZE;
228 if (pages <= fp_old->pages)
60a3b225
DB
229 return fp_old;
230
5ccb071e
DB
231 delta = pages - fp_old->pages;
232 ret = __bpf_prog_charge(fp_old->aux->user, delta);
233 if (ret)
234 return NULL;
235
60a3b225 236 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
5ccb071e
DB
237 if (fp == NULL) {
238 __bpf_prog_uncharge(fp_old->aux->user, delta);
239 } else {
60a3b225 240 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
5ccb071e 241 fp->pages = pages;
e9d8afa9 242 fp->aux->prog = fp;
60a3b225 243
09756af4 244 /* We keep fp->aux from fp_old around in the new
60a3b225
DB
245 * reallocated structure.
246 */
09756af4 247 fp_old->aux = NULL;
60a3b225
DB
248 __bpf_prog_free(fp_old);
249 }
250
251 return fp;
252}
60a3b225
DB
253
254void __bpf_prog_free(struct bpf_prog *fp)
255{
492ecee8
AS
256 if (fp->aux) {
257 free_percpu(fp->aux->stats);
258 kfree(fp->aux);
259 }
60a3b225
DB
260 vfree(fp);
261}
60a3b225 262
f1f7714e 263int bpf_prog_calc_tag(struct bpf_prog *fp)
7bd509e3
DB
264{
265 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
f1f7714e
DB
266 u32 raw_size = bpf_prog_tag_scratch_size(fp);
267 u32 digest[SHA_DIGEST_WORDS];
aafe6ae9 268 u32 ws[SHA_WORKSPACE_WORDS];
7bd509e3 269 u32 i, bsize, psize, blocks;
aafe6ae9 270 struct bpf_insn *dst;
7bd509e3 271 bool was_ld_map;
aafe6ae9 272 u8 *raw, *todo;
7bd509e3
DB
273 __be32 *result;
274 __be64 *bits;
275
aafe6ae9
DB
276 raw = vmalloc(raw_size);
277 if (!raw)
278 return -ENOMEM;
279
f1f7714e 280 sha_init(digest);
7bd509e3
DB
281 memset(ws, 0, sizeof(ws));
282
283 /* We need to take out the map fd for the digest calculation
284 * since they are unstable from user space side.
285 */
aafe6ae9 286 dst = (void *)raw;
7bd509e3
DB
287 for (i = 0, was_ld_map = false; i < fp->len; i++) {
288 dst[i] = fp->insnsi[i];
289 if (!was_ld_map &&
290 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
d8eca5bb
DB
291 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
292 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
7bd509e3
DB
293 was_ld_map = true;
294 dst[i].imm = 0;
295 } else if (was_ld_map &&
296 dst[i].code == 0 &&
297 dst[i].dst_reg == 0 &&
298 dst[i].src_reg == 0 &&
299 dst[i].off == 0) {
300 was_ld_map = false;
301 dst[i].imm = 0;
302 } else {
303 was_ld_map = false;
304 }
305 }
306
aafe6ae9
DB
307 psize = bpf_prog_insn_size(fp);
308 memset(&raw[psize], 0, raw_size - psize);
7bd509e3
DB
309 raw[psize++] = 0x80;
310
311 bsize = round_up(psize, SHA_MESSAGE_BYTES);
312 blocks = bsize / SHA_MESSAGE_BYTES;
aafe6ae9 313 todo = raw;
7bd509e3
DB
314 if (bsize - psize >= sizeof(__be64)) {
315 bits = (__be64 *)(todo + bsize - sizeof(__be64));
316 } else {
317 bits = (__be64 *)(todo + bsize + bits_offset);
318 blocks++;
319 }
320 *bits = cpu_to_be64((psize - 1) << 3);
321
322 while (blocks--) {
f1f7714e 323 sha_transform(digest, todo, ws);
7bd509e3
DB
324 todo += SHA_MESSAGE_BYTES;
325 }
326
f1f7714e 327 result = (__force __be32 *)digest;
7bd509e3 328 for (i = 0; i < SHA_DIGEST_WORDS; i++)
f1f7714e
DB
329 result[i] = cpu_to_be32(digest[i]);
330 memcpy(fp->tag, result, sizeof(fp->tag));
aafe6ae9
DB
331
332 vfree(raw);
333 return 0;
7bd509e3
DB
334}
335
2cbd95a5 336static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
af959b18 337 s32 end_new, s32 curr, const bool probe_pass)
c237ee5e 338{
050fad7c 339 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
2cbd95a5 340 s32 delta = end_new - end_old;
050fad7c
DB
341 s64 imm = insn->imm;
342
2cbd95a5 343 if (curr < pos && curr + imm + 1 >= end_old)
050fad7c 344 imm += delta;
2cbd95a5 345 else if (curr >= end_new && curr + imm + 1 < end_new)
050fad7c
DB
346 imm -= delta;
347 if (imm < imm_min || imm > imm_max)
348 return -ERANGE;
349 if (!probe_pass)
350 insn->imm = imm;
351 return 0;
352}
353
2cbd95a5 354static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
af959b18 355 s32 end_new, s32 curr, const bool probe_pass)
050fad7c
DB
356{
357 const s32 off_min = S16_MIN, off_max = S16_MAX;
2cbd95a5 358 s32 delta = end_new - end_old;
050fad7c
DB
359 s32 off = insn->off;
360
2cbd95a5 361 if (curr < pos && curr + off + 1 >= end_old)
050fad7c 362 off += delta;
2cbd95a5 363 else if (curr >= end_new && curr + off + 1 < end_new)
050fad7c
DB
364 off -= delta;
365 if (off < off_min || off > off_max)
366 return -ERANGE;
367 if (!probe_pass)
368 insn->off = off;
369 return 0;
370}
371
2cbd95a5
JK
372static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
373 s32 end_new, const bool probe_pass)
050fad7c 374{
2cbd95a5 375 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
c237ee5e 376 struct bpf_insn *insn = prog->insnsi;
050fad7c 377 int ret = 0;
c237ee5e
DB
378
379 for (i = 0; i < insn_cnt; i++, insn++) {
050fad7c
DB
380 u8 code;
381
382 /* In the probing pass we still operate on the original,
383 * unpatched image in order to check overflows before we
384 * do any other adjustments. Therefore skip the patchlet.
385 */
386 if (probe_pass && i == pos) {
2cbd95a5
JK
387 i = end_new;
388 insn = prog->insnsi + end_old;
050fad7c 389 }
1ea47e01 390 code = insn->code;
092ed096
JW
391 if ((BPF_CLASS(code) != BPF_JMP &&
392 BPF_CLASS(code) != BPF_JMP32) ||
050fad7c 393 BPF_OP(code) == BPF_EXIT)
1ea47e01 394 continue;
050fad7c 395 /* Adjust offset of jmps if we cross patch boundaries. */
1ea47e01 396 if (BPF_OP(code) == BPF_CALL) {
050fad7c 397 if (insn->src_reg != BPF_PSEUDO_CALL)
1ea47e01 398 continue;
2cbd95a5
JK
399 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
400 end_new, i, probe_pass);
1ea47e01 401 } else {
2cbd95a5
JK
402 ret = bpf_adj_delta_to_off(insn, pos, end_old,
403 end_new, i, probe_pass);
1ea47e01 404 }
050fad7c
DB
405 if (ret)
406 break;
c237ee5e 407 }
050fad7c
DB
408
409 return ret;
c237ee5e
DB
410}
411
c454a46b
MKL
412static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
413{
414 struct bpf_line_info *linfo;
415 u32 i, nr_linfo;
416
417 nr_linfo = prog->aux->nr_linfo;
418 if (!nr_linfo || !delta)
419 return;
420
421 linfo = prog->aux->linfo;
422
423 for (i = 0; i < nr_linfo; i++)
424 if (off < linfo[i].insn_off)
425 break;
426
427 /* Push all off < linfo[i].insn_off by delta */
428 for (; i < nr_linfo; i++)
429 linfo[i].insn_off += delta;
430}
431
c237ee5e
DB
432struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
433 const struct bpf_insn *patch, u32 len)
434{
435 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
050fad7c 436 const u32 cnt_max = S16_MAX;
c237ee5e 437 struct bpf_prog *prog_adj;
4f73379e 438 int err;
c237ee5e
DB
439
440 /* Since our patchlet doesn't expand the image, we're done. */
441 if (insn_delta == 0) {
442 memcpy(prog->insnsi + off, patch, sizeof(*patch));
443 return prog;
444 }
445
446 insn_adj_cnt = prog->len + insn_delta;
447
050fad7c
DB
448 /* Reject anything that would potentially let the insn->off
449 * target overflow when we have excessive program expansions.
450 * We need to probe here before we do any reallocation where
451 * we afterwards may not fail anymore.
452 */
453 if (insn_adj_cnt > cnt_max &&
4f73379e
AS
454 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
455 return ERR_PTR(err);
050fad7c 456
c237ee5e
DB
457 /* Several new instructions need to be inserted. Make room
458 * for them. Likely, there's no need for a new allocation as
459 * last page could have large enough tailroom.
460 */
461 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
462 GFP_USER);
463 if (!prog_adj)
4f73379e 464 return ERR_PTR(-ENOMEM);
c237ee5e
DB
465
466 prog_adj->len = insn_adj_cnt;
467
468 /* Patching happens in 3 steps:
469 *
470 * 1) Move over tail of insnsi from next instruction onwards,
471 * so we can patch the single target insn with one or more
472 * new ones (patching is always from 1 to n insns, n > 0).
473 * 2) Inject new instructions at the target location.
474 * 3) Adjust branch offsets if necessary.
475 */
476 insn_rest = insn_adj_cnt - off - len;
477
478 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
479 sizeof(*patch) * insn_rest);
480 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
481
050fad7c
DB
482 /* We are guaranteed to not fail at this point, otherwise
483 * the ship has sailed to reverse to the original state. An
484 * overflow cannot happen at this point.
485 */
2cbd95a5 486 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
c237ee5e 487
c454a46b
MKL
488 bpf_adj_linfo(prog_adj, off, insn_delta);
489
c237ee5e
DB
490 return prog_adj;
491}
492
52875a04
JK
493int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
494{
495 /* Branch offsets can't overflow when program is shrinking, no need
496 * to call bpf_adj_branches(..., true) here
497 */
498 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
499 sizeof(struct bpf_insn) * (prog->len - off - cnt));
500 prog->len -= cnt;
501
502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
503}
504
7d1982b4
DB
505void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
506{
507 int i;
508
509 for (i = 0; i < fp->aux->func_cnt; i++)
510 bpf_prog_kallsyms_del(fp->aux->func[i]);
511}
512
513void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
514{
515 bpf_prog_kallsyms_del_subprogs(fp);
516 bpf_prog_kallsyms_del(fp);
517}
518
b954d834 519#ifdef CONFIG_BPF_JIT
fa9dd599
DB
520/* All BPF JIT sysctl knobs here. */
521int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
522int bpf_jit_harden __read_mostly;
523int bpf_jit_kallsyms __read_mostly;
fdadd049 524long bpf_jit_limit __read_mostly;
fa9dd599 525
74451e66
DB
526static __always_inline void
527bpf_get_prog_addr_region(const struct bpf_prog *prog,
528 unsigned long *symbol_start,
529 unsigned long *symbol_end)
530{
531 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
532 unsigned long addr = (unsigned long)hdr;
533
534 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
535
536 *symbol_start = addr;
537 *symbol_end = addr + hdr->pages * PAGE_SIZE;
538}
539
6ee52e2a 540void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
74451e66 541{
368211fb 542 const char *end = sym + KSYM_NAME_LEN;
838e9690
YS
543 const struct btf_type *type;
544 const char *func_name;
368211fb 545
74451e66 546 BUILD_BUG_ON(sizeof("bpf_prog_") +
368211fb
MKL
547 sizeof(prog->tag) * 2 +
548 /* name has been null terminated.
549 * We should need +1 for the '_' preceding
550 * the name. However, the null character
551 * is double counted between the name and the
552 * sizeof("bpf_prog_") above, so we omit
553 * the +1 here.
554 */
555 sizeof(prog->aux->name) > KSYM_NAME_LEN);
74451e66
DB
556
557 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
558 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
838e9690
YS
559
560 /* prog->aux->name will be ignored if full btf name is available */
7337224f 561 if (prog->aux->func_info_cnt) {
ba64e7d8
YS
562 type = btf_type_by_id(prog->aux->btf,
563 prog->aux->func_info[prog->aux->func_idx].type_id);
838e9690
YS
564 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
565 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
566 return;
567 }
568
368211fb
MKL
569 if (prog->aux->name[0])
570 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
571 else
572 *sym = 0;
74451e66
DB
573}
574
575static __always_inline unsigned long
576bpf_get_prog_addr_start(struct latch_tree_node *n)
577{
578 unsigned long symbol_start, symbol_end;
579 const struct bpf_prog_aux *aux;
580
581 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
582 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
583
584 return symbol_start;
585}
586
587static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
588 struct latch_tree_node *b)
589{
590 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
591}
592
593static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
594{
595 unsigned long val = (unsigned long)key;
596 unsigned long symbol_start, symbol_end;
597 const struct bpf_prog_aux *aux;
598
599 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
600 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
601
602 if (val < symbol_start)
603 return -1;
604 if (val >= symbol_end)
605 return 1;
606
607 return 0;
608}
609
610static const struct latch_tree_ops bpf_tree_ops = {
611 .less = bpf_tree_less,
612 .comp = bpf_tree_comp,
613};
614
615static DEFINE_SPINLOCK(bpf_lock);
616static LIST_HEAD(bpf_kallsyms);
617static struct latch_tree_root bpf_tree __cacheline_aligned;
618
74451e66
DB
619static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
620{
621 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
622 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
623 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
624}
625
626static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
627{
628 if (list_empty(&aux->ksym_lnode))
629 return;
630
631 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
632 list_del_rcu(&aux->ksym_lnode);
633}
634
635static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
636{
637 return fp->jited && !bpf_prog_was_classic(fp);
638}
639
640static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
641{
642 return list_empty(&fp->aux->ksym_lnode) ||
643 fp->aux->ksym_lnode.prev == LIST_POISON2;
644}
645
646void bpf_prog_kallsyms_add(struct bpf_prog *fp)
647{
74451e66
DB
648 if (!bpf_prog_kallsyms_candidate(fp) ||
649 !capable(CAP_SYS_ADMIN))
650 return;
651
d24f7c7f 652 spin_lock_bh(&bpf_lock);
74451e66 653 bpf_prog_ksym_node_add(fp->aux);
d24f7c7f 654 spin_unlock_bh(&bpf_lock);
74451e66
DB
655}
656
657void bpf_prog_kallsyms_del(struct bpf_prog *fp)
658{
74451e66
DB
659 if (!bpf_prog_kallsyms_candidate(fp))
660 return;
661
d24f7c7f 662 spin_lock_bh(&bpf_lock);
74451e66 663 bpf_prog_ksym_node_del(fp->aux);
d24f7c7f 664 spin_unlock_bh(&bpf_lock);
74451e66
DB
665}
666
667static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
668{
669 struct latch_tree_node *n;
670
671 if (!bpf_jit_kallsyms_enabled())
672 return NULL;
673
674 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
675 return n ?
676 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
677 NULL;
678}
679
680const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
681 unsigned long *off, char *sym)
682{
683 unsigned long symbol_start, symbol_end;
684 struct bpf_prog *prog;
685 char *ret = NULL;
686
687 rcu_read_lock();
688 prog = bpf_prog_kallsyms_find(addr);
689 if (prog) {
690 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
691 bpf_get_prog_name(prog, sym);
692
693 ret = sym;
694 if (size)
695 *size = symbol_end - symbol_start;
696 if (off)
697 *off = addr - symbol_start;
698 }
699 rcu_read_unlock();
700
701 return ret;
702}
703
704bool is_bpf_text_address(unsigned long addr)
705{
706 bool ret;
707
708 rcu_read_lock();
709 ret = bpf_prog_kallsyms_find(addr) != NULL;
710 rcu_read_unlock();
711
712 return ret;
713}
714
715int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
716 char *sym)
717{
74451e66
DB
718 struct bpf_prog_aux *aux;
719 unsigned int it = 0;
720 int ret = -ERANGE;
721
722 if (!bpf_jit_kallsyms_enabled())
723 return ret;
724
725 rcu_read_lock();
726 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
727 if (it++ != symnum)
728 continue;
729
74451e66
DB
730 bpf_get_prog_name(aux->prog, sym);
731
df073470 732 *value = (unsigned long)aux->prog->bpf_func;
74451e66
DB
733 *type = BPF_SYM_ELF_TYPE;
734
735 ret = 0;
736 break;
737 }
738 rcu_read_unlock();
739
740 return ret;
741}
742
ede95a63
DB
743static atomic_long_t bpf_jit_current;
744
fdadd049
DB
745/* Can be overridden by an arch's JIT compiler if it has a custom,
746 * dedicated BPF backend memory area, or if neither of the two
747 * below apply.
748 */
749u64 __weak bpf_jit_alloc_exec_limit(void)
750{
ede95a63 751#if defined(MODULES_VADDR)
fdadd049
DB
752 return MODULES_END - MODULES_VADDR;
753#else
754 return VMALLOC_END - VMALLOC_START;
755#endif
756}
757
ede95a63
DB
758static int __init bpf_jit_charge_init(void)
759{
760 /* Only used as heuristic here to derive limit. */
fdadd049
DB
761 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
762 PAGE_SIZE), LONG_MAX);
ede95a63
DB
763 return 0;
764}
765pure_initcall(bpf_jit_charge_init);
ede95a63
DB
766
767static int bpf_jit_charge_modmem(u32 pages)
768{
769 if (atomic_long_add_return(pages, &bpf_jit_current) >
770 (bpf_jit_limit >> PAGE_SHIFT)) {
771 if (!capable(CAP_SYS_ADMIN)) {
772 atomic_long_sub(pages, &bpf_jit_current);
773 return -EPERM;
774 }
775 }
776
777 return 0;
778}
779
780static void bpf_jit_uncharge_modmem(u32 pages)
781{
782 atomic_long_sub(pages, &bpf_jit_current);
783}
784
dc002bb6
AB
785void *__weak bpf_jit_alloc_exec(unsigned long size)
786{
787 return module_alloc(size);
788}
789
790void __weak bpf_jit_free_exec(void *addr)
791{
792 module_memfree(addr);
793}
794
738cbe72
DB
795struct bpf_binary_header *
796bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
797 unsigned int alignment,
798 bpf_jit_fill_hole_t bpf_fill_ill_insns)
799{
800 struct bpf_binary_header *hdr;
ede95a63 801 u32 size, hole, start, pages;
738cbe72
DB
802
803 /* Most of BPF filters are really small, but if some of them
804 * fill a page, allow at least 128 extra bytes to insert a
805 * random section of illegal instructions.
806 */
807 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
ede95a63
DB
808 pages = size / PAGE_SIZE;
809
810 if (bpf_jit_charge_modmem(pages))
811 return NULL;
dc002bb6 812 hdr = bpf_jit_alloc_exec(size);
ede95a63
DB
813 if (!hdr) {
814 bpf_jit_uncharge_modmem(pages);
738cbe72 815 return NULL;
ede95a63 816 }
738cbe72
DB
817
818 /* Fill space with illegal/arch-dep instructions. */
819 bpf_fill_ill_insns(hdr, size);
820
ede95a63 821 hdr->pages = pages;
738cbe72
DB
822 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
823 PAGE_SIZE - sizeof(*hdr));
b7552e1b 824 start = (get_random_int() % hole) & ~(alignment - 1);
738cbe72
DB
825
826 /* Leave a random number of instructions before BPF code. */
827 *image_ptr = &hdr->image[start];
828
829 return hdr;
830}
831
832void bpf_jit_binary_free(struct bpf_binary_header *hdr)
833{
ede95a63
DB
834 u32 pages = hdr->pages;
835
dc002bb6 836 bpf_jit_free_exec(hdr);
ede95a63 837 bpf_jit_uncharge_modmem(pages);
738cbe72 838}
4f3446bb 839
74451e66
DB
840/* This symbol is only overridden by archs that have different
841 * requirements than the usual eBPF JITs, f.e. when they only
842 * implement cBPF JIT, do not set images read-only, etc.
843 */
844void __weak bpf_jit_free(struct bpf_prog *fp)
845{
846 if (fp->jited) {
847 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
848
74451e66
DB
849 bpf_jit_binary_free(hdr);
850
851 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
852 }
853
854 bpf_prog_unlock_free(fp);
855}
856
e2c95a61
DB
857int bpf_jit_get_func_addr(const struct bpf_prog *prog,
858 const struct bpf_insn *insn, bool extra_pass,
859 u64 *func_addr, bool *func_addr_fixed)
860{
861 s16 off = insn->off;
862 s32 imm = insn->imm;
863 u8 *addr;
864
865 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
866 if (!*func_addr_fixed) {
867 /* Place-holder address till the last pass has collected
868 * all addresses for JITed subprograms in which case we
869 * can pick them up from prog->aux.
870 */
871 if (!extra_pass)
872 addr = NULL;
873 else if (prog->aux->func &&
874 off >= 0 && off < prog->aux->func_cnt)
875 addr = (u8 *)prog->aux->func[off]->bpf_func;
876 else
877 return -EINVAL;
878 } else {
879 /* Address of a BPF helper call. Since part of the core
880 * kernel, it's always at a fixed location. __bpf_call_base
881 * and the helper with imm relative to it are both in core
882 * kernel.
883 */
884 addr = (u8 *)__bpf_call_base + imm;
885 }
886
887 *func_addr = (unsigned long)addr;
888 return 0;
889}
890
4f3446bb
DB
891static int bpf_jit_blind_insn(const struct bpf_insn *from,
892 const struct bpf_insn *aux,
893 struct bpf_insn *to_buff)
894{
895 struct bpf_insn *to = to_buff;
b7552e1b 896 u32 imm_rnd = get_random_int();
4f3446bb
DB
897 s16 off;
898
899 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
900 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
901
9b73bfdd
DB
902 /* Constraints on AX register:
903 *
904 * AX register is inaccessible from user space. It is mapped in
905 * all JITs, and used here for constant blinding rewrites. It is
906 * typically "stateless" meaning its contents are only valid within
907 * the executed instruction, but not across several instructions.
908 * There are a few exceptions however which are further detailed
909 * below.
910 *
911 * Constant blinding is only used by JITs, not in the interpreter.
912 * The interpreter uses AX in some occasions as a local temporary
913 * register e.g. in DIV or MOD instructions.
914 *
915 * In restricted circumstances, the verifier can also use the AX
916 * register for rewrites as long as they do not interfere with
917 * the above cases!
918 */
919 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
920 goto out;
921
4f3446bb
DB
922 if (from->imm == 0 &&
923 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
924 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
925 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
926 goto out;
927 }
928
929 switch (from->code) {
930 case BPF_ALU | BPF_ADD | BPF_K:
931 case BPF_ALU | BPF_SUB | BPF_K:
932 case BPF_ALU | BPF_AND | BPF_K:
933 case BPF_ALU | BPF_OR | BPF_K:
934 case BPF_ALU | BPF_XOR | BPF_K:
935 case BPF_ALU | BPF_MUL | BPF_K:
936 case BPF_ALU | BPF_MOV | BPF_K:
937 case BPF_ALU | BPF_DIV | BPF_K:
938 case BPF_ALU | BPF_MOD | BPF_K:
939 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
940 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
941 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
942 break;
943
944 case BPF_ALU64 | BPF_ADD | BPF_K:
945 case BPF_ALU64 | BPF_SUB | BPF_K:
946 case BPF_ALU64 | BPF_AND | BPF_K:
947 case BPF_ALU64 | BPF_OR | BPF_K:
948 case BPF_ALU64 | BPF_XOR | BPF_K:
949 case BPF_ALU64 | BPF_MUL | BPF_K:
950 case BPF_ALU64 | BPF_MOV | BPF_K:
951 case BPF_ALU64 | BPF_DIV | BPF_K:
952 case BPF_ALU64 | BPF_MOD | BPF_K:
953 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
954 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
955 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
956 break;
957
958 case BPF_JMP | BPF_JEQ | BPF_K:
959 case BPF_JMP | BPF_JNE | BPF_K:
960 case BPF_JMP | BPF_JGT | BPF_K:
92b31a9a 961 case BPF_JMP | BPF_JLT | BPF_K:
4f3446bb 962 case BPF_JMP | BPF_JGE | BPF_K:
92b31a9a 963 case BPF_JMP | BPF_JLE | BPF_K:
4f3446bb 964 case BPF_JMP | BPF_JSGT | BPF_K:
92b31a9a 965 case BPF_JMP | BPF_JSLT | BPF_K:
4f3446bb 966 case BPF_JMP | BPF_JSGE | BPF_K:
92b31a9a 967 case BPF_JMP | BPF_JSLE | BPF_K:
4f3446bb
DB
968 case BPF_JMP | BPF_JSET | BPF_K:
969 /* Accommodate for extra offset in case of a backjump. */
970 off = from->off;
971 if (off < 0)
972 off -= 2;
973 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
974 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
975 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
976 break;
977
a7b76c88
JW
978 case BPF_JMP32 | BPF_JEQ | BPF_K:
979 case BPF_JMP32 | BPF_JNE | BPF_K:
980 case BPF_JMP32 | BPF_JGT | BPF_K:
981 case BPF_JMP32 | BPF_JLT | BPF_K:
982 case BPF_JMP32 | BPF_JGE | BPF_K:
983 case BPF_JMP32 | BPF_JLE | BPF_K:
984 case BPF_JMP32 | BPF_JSGT | BPF_K:
985 case BPF_JMP32 | BPF_JSLT | BPF_K:
986 case BPF_JMP32 | BPF_JSGE | BPF_K:
987 case BPF_JMP32 | BPF_JSLE | BPF_K:
988 case BPF_JMP32 | BPF_JSET | BPF_K:
989 /* Accommodate for extra offset in case of a backjump. */
990 off = from->off;
991 if (off < 0)
992 off -= 2;
993 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
994 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
995 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
996 off);
997 break;
998
4f3446bb
DB
999 case BPF_LD | BPF_IMM | BPF_DW:
1000 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1001 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1002 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1003 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1004 break;
1005 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1006 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1007 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1008 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1009 break;
1010
1011 case BPF_ST | BPF_MEM | BPF_DW:
1012 case BPF_ST | BPF_MEM | BPF_W:
1013 case BPF_ST | BPF_MEM | BPF_H:
1014 case BPF_ST | BPF_MEM | BPF_B:
1015 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1016 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1017 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1018 break;
1019 }
1020out:
1021 return to - to_buff;
1022}
1023
1024static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1025 gfp_t gfp_extra_flags)
1026{
19809c2d 1027 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
4f3446bb
DB
1028 struct bpf_prog *fp;
1029
1030 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1031 if (fp != NULL) {
4f3446bb
DB
1032 /* aux->prog still points to the fp_other one, so
1033 * when promoting the clone to the real program,
1034 * this still needs to be adapted.
1035 */
1036 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1037 }
1038
1039 return fp;
1040}
1041
1042static void bpf_prog_clone_free(struct bpf_prog *fp)
1043{
1044 /* aux was stolen by the other clone, so we cannot free
1045 * it from this path! It will be freed eventually by the
1046 * other program on release.
1047 *
1048 * At this point, we don't need a deferred release since
1049 * clone is guaranteed to not be locked.
1050 */
1051 fp->aux = NULL;
1052 __bpf_prog_free(fp);
1053}
1054
1055void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1056{
1057 /* We have to repoint aux->prog to self, as we don't
1058 * know whether fp here is the clone or the original.
1059 */
1060 fp->aux->prog = fp;
1061 bpf_prog_clone_free(fp_other);
1062}
1063
1064struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1065{
1066 struct bpf_insn insn_buff[16], aux[2];
1067 struct bpf_prog *clone, *tmp;
1068 int insn_delta, insn_cnt;
1069 struct bpf_insn *insn;
1070 int i, rewritten;
1071
1c2a088a 1072 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
4f3446bb
DB
1073 return prog;
1074
1075 clone = bpf_prog_clone_create(prog, GFP_USER);
1076 if (!clone)
1077 return ERR_PTR(-ENOMEM);
1078
1079 insn_cnt = clone->len;
1080 insn = clone->insnsi;
1081
1082 for (i = 0; i < insn_cnt; i++, insn++) {
1083 /* We temporarily need to hold the original ld64 insn
1084 * so that we can still access the first part in the
1085 * second blinding run.
1086 */
1087 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1088 insn[1].code == 0)
1089 memcpy(aux, insn, sizeof(aux));
1090
1091 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
1092 if (!rewritten)
1093 continue;
1094
1095 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
4f73379e 1096 if (IS_ERR(tmp)) {
4f3446bb
DB
1097 /* Patching may have repointed aux->prog during
1098 * realloc from the original one, so we need to
1099 * fix it up here on error.
1100 */
1101 bpf_jit_prog_release_other(prog, clone);
4f73379e 1102 return tmp;
4f3446bb
DB
1103 }
1104
1105 clone = tmp;
1106 insn_delta = rewritten - 1;
1107
1108 /* Walk new program and skip insns we just inserted. */
1109 insn = clone->insnsi + i + insn_delta;
1110 insn_cnt += insn_delta;
1111 i += insn_delta;
1112 }
1113
1c2a088a 1114 clone->blinded = 1;
4f3446bb
DB
1115 return clone;
1116}
b954d834 1117#endif /* CONFIG_BPF_JIT */
738cbe72 1118
f5bffecd
AS
1119/* Base function for offset calculation. Needs to go into .text section,
1120 * therefore keeping it non-static as well; will also be used by JITs
7105e828
DB
1121 * anyway later on, so do not let the compiler omit it. This also needs
1122 * to go into kallsyms for correlation from e.g. bpftool, so naming
1123 * must not change.
f5bffecd
AS
1124 */
1125noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1126{
1127 return 0;
1128}
4d9c5c53 1129EXPORT_SYMBOL_GPL(__bpf_call_base);
f5bffecd 1130
5e581dad
DB
1131/* All UAPI available opcodes. */
1132#define BPF_INSN_MAP(INSN_2, INSN_3) \
1133 /* 32 bit ALU operations. */ \
1134 /* Register based. */ \
2dc6b100
JW
1135 INSN_3(ALU, ADD, X), \
1136 INSN_3(ALU, SUB, X), \
1137 INSN_3(ALU, AND, X), \
1138 INSN_3(ALU, OR, X), \
1139 INSN_3(ALU, LSH, X), \
1140 INSN_3(ALU, RSH, X), \
1141 INSN_3(ALU, XOR, X), \
1142 INSN_3(ALU, MUL, X), \
1143 INSN_3(ALU, MOV, X), \
1144 INSN_3(ALU, ARSH, X), \
1145 INSN_3(ALU, DIV, X), \
1146 INSN_3(ALU, MOD, X), \
5e581dad
DB
1147 INSN_2(ALU, NEG), \
1148 INSN_3(ALU, END, TO_BE), \
1149 INSN_3(ALU, END, TO_LE), \
1150 /* Immediate based. */ \
2dc6b100
JW
1151 INSN_3(ALU, ADD, K), \
1152 INSN_3(ALU, SUB, K), \
1153 INSN_3(ALU, AND, K), \
1154 INSN_3(ALU, OR, K), \
1155 INSN_3(ALU, LSH, K), \
1156 INSN_3(ALU, RSH, K), \
1157 INSN_3(ALU, XOR, K), \
1158 INSN_3(ALU, MUL, K), \
1159 INSN_3(ALU, MOV, K), \
1160 INSN_3(ALU, ARSH, K), \
1161 INSN_3(ALU, DIV, K), \
1162 INSN_3(ALU, MOD, K), \
5e581dad
DB
1163 /* 64 bit ALU operations. */ \
1164 /* Register based. */ \
1165 INSN_3(ALU64, ADD, X), \
1166 INSN_3(ALU64, SUB, X), \
1167 INSN_3(ALU64, AND, X), \
1168 INSN_3(ALU64, OR, X), \
1169 INSN_3(ALU64, LSH, X), \
1170 INSN_3(ALU64, RSH, X), \
1171 INSN_3(ALU64, XOR, X), \
1172 INSN_3(ALU64, MUL, X), \
1173 INSN_3(ALU64, MOV, X), \
1174 INSN_3(ALU64, ARSH, X), \
1175 INSN_3(ALU64, DIV, X), \
1176 INSN_3(ALU64, MOD, X), \
1177 INSN_2(ALU64, NEG), \
1178 /* Immediate based. */ \
1179 INSN_3(ALU64, ADD, K), \
1180 INSN_3(ALU64, SUB, K), \
1181 INSN_3(ALU64, AND, K), \
1182 INSN_3(ALU64, OR, K), \
1183 INSN_3(ALU64, LSH, K), \
1184 INSN_3(ALU64, RSH, K), \
1185 INSN_3(ALU64, XOR, K), \
1186 INSN_3(ALU64, MUL, K), \
1187 INSN_3(ALU64, MOV, K), \
1188 INSN_3(ALU64, ARSH, K), \
1189 INSN_3(ALU64, DIV, K), \
1190 INSN_3(ALU64, MOD, K), \
1191 /* Call instruction. */ \
1192 INSN_2(JMP, CALL), \
1193 /* Exit instruction. */ \
1194 INSN_2(JMP, EXIT), \
503a8865
JW
1195 /* 32-bit Jump instructions. */ \
1196 /* Register based. */ \
1197 INSN_3(JMP32, JEQ, X), \
1198 INSN_3(JMP32, JNE, X), \
1199 INSN_3(JMP32, JGT, X), \
1200 INSN_3(JMP32, JLT, X), \
1201 INSN_3(JMP32, JGE, X), \
1202 INSN_3(JMP32, JLE, X), \
1203 INSN_3(JMP32, JSGT, X), \
1204 INSN_3(JMP32, JSLT, X), \
1205 INSN_3(JMP32, JSGE, X), \
1206 INSN_3(JMP32, JSLE, X), \
1207 INSN_3(JMP32, JSET, X), \
1208 /* Immediate based. */ \
1209 INSN_3(JMP32, JEQ, K), \
1210 INSN_3(JMP32, JNE, K), \
1211 INSN_3(JMP32, JGT, K), \
1212 INSN_3(JMP32, JLT, K), \
1213 INSN_3(JMP32, JGE, K), \
1214 INSN_3(JMP32, JLE, K), \
1215 INSN_3(JMP32, JSGT, K), \
1216 INSN_3(JMP32, JSLT, K), \
1217 INSN_3(JMP32, JSGE, K), \
1218 INSN_3(JMP32, JSLE, K), \
1219 INSN_3(JMP32, JSET, K), \
5e581dad
DB
1220 /* Jump instructions. */ \
1221 /* Register based. */ \
1222 INSN_3(JMP, JEQ, X), \
1223 INSN_3(JMP, JNE, X), \
1224 INSN_3(JMP, JGT, X), \
1225 INSN_3(JMP, JLT, X), \
1226 INSN_3(JMP, JGE, X), \
1227 INSN_3(JMP, JLE, X), \
1228 INSN_3(JMP, JSGT, X), \
1229 INSN_3(JMP, JSLT, X), \
1230 INSN_3(JMP, JSGE, X), \
1231 INSN_3(JMP, JSLE, X), \
1232 INSN_3(JMP, JSET, X), \
1233 /* Immediate based. */ \
1234 INSN_3(JMP, JEQ, K), \
1235 INSN_3(JMP, JNE, K), \
1236 INSN_3(JMP, JGT, K), \
1237 INSN_3(JMP, JLT, K), \
1238 INSN_3(JMP, JGE, K), \
1239 INSN_3(JMP, JLE, K), \
1240 INSN_3(JMP, JSGT, K), \
1241 INSN_3(JMP, JSLT, K), \
1242 INSN_3(JMP, JSGE, K), \
1243 INSN_3(JMP, JSLE, K), \
1244 INSN_3(JMP, JSET, K), \
1245 INSN_2(JMP, JA), \
1246 /* Store instructions. */ \
1247 /* Register based. */ \
1248 INSN_3(STX, MEM, B), \
1249 INSN_3(STX, MEM, H), \
1250 INSN_3(STX, MEM, W), \
1251 INSN_3(STX, MEM, DW), \
1252 INSN_3(STX, XADD, W), \
1253 INSN_3(STX, XADD, DW), \
1254 /* Immediate based. */ \
1255 INSN_3(ST, MEM, B), \
1256 INSN_3(ST, MEM, H), \
1257 INSN_3(ST, MEM, W), \
1258 INSN_3(ST, MEM, DW), \
1259 /* Load instructions. */ \
1260 /* Register based. */ \
1261 INSN_3(LDX, MEM, B), \
1262 INSN_3(LDX, MEM, H), \
1263 INSN_3(LDX, MEM, W), \
1264 INSN_3(LDX, MEM, DW), \
1265 /* Immediate based. */ \
e0cea7ce 1266 INSN_3(LD, IMM, DW)
5e581dad
DB
1267
1268bool bpf_opcode_in_insntable(u8 code)
1269{
1270#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1271#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1272 static const bool public_insntable[256] = {
1273 [0 ... 255] = false,
1274 /* Now overwrite non-defaults ... */
1275 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
e0cea7ce
DB
1276 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1277 [BPF_LD | BPF_ABS | BPF_B] = true,
1278 [BPF_LD | BPF_ABS | BPF_H] = true,
1279 [BPF_LD | BPF_ABS | BPF_W] = true,
1280 [BPF_LD | BPF_IND | BPF_B] = true,
1281 [BPF_LD | BPF_IND | BPF_H] = true,
1282 [BPF_LD | BPF_IND | BPF_W] = true,
5e581dad
DB
1283 };
1284#undef BPF_INSN_3_TBL
1285#undef BPF_INSN_2_TBL
1286 return public_insntable[code];
1287}
1288
290af866 1289#ifndef CONFIG_BPF_JIT_ALWAYS_ON
f5bffecd 1290/**
7ae457c1 1291 * __bpf_prog_run - run eBPF program on a given context
de1da68d 1292 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
7ae457c1 1293 * @insn: is the array of eBPF instructions
de1da68d 1294 * @stack: is the eBPF storage stack
f5bffecd 1295 *
7ae457c1 1296 * Decode and execute eBPF instructions.
f5bffecd 1297 */
3193c083 1298static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
f5bffecd 1299{
5e581dad
DB
1300#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1301#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
e55a7325 1302 static const void * const jumptable[256] __annotate_jump_table = {
f5bffecd
AS
1303 [0 ... 255] = &&default_label,
1304 /* Now overwrite non-defaults ... */
5e581dad
DB
1305 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1306 /* Non-UAPI available opcodes. */
1ea47e01 1307 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
71189fa9 1308 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
f5bffecd 1309 };
5e581dad
DB
1310#undef BPF_INSN_3_LBL
1311#undef BPF_INSN_2_LBL
04fd61ab 1312 u32 tail_call_cnt = 0;
f5bffecd
AS
1313
1314#define CONT ({ insn++; goto select_insn; })
1315#define CONT_JMP ({ insn++; goto select_insn; })
1316
f5bffecd
AS
1317select_insn:
1318 goto *jumptable[insn->code];
1319
1320 /* ALU */
1321#define ALU(OPCODE, OP) \
1322 ALU64_##OPCODE##_X: \
1323 DST = DST OP SRC; \
1324 CONT; \
1325 ALU_##OPCODE##_X: \
1326 DST = (u32) DST OP (u32) SRC; \
1327 CONT; \
1328 ALU64_##OPCODE##_K: \
1329 DST = DST OP IMM; \
1330 CONT; \
1331 ALU_##OPCODE##_K: \
1332 DST = (u32) DST OP (u32) IMM; \
1333 CONT;
1334
1335 ALU(ADD, +)
1336 ALU(SUB, -)
1337 ALU(AND, &)
1338 ALU(OR, |)
1339 ALU(LSH, <<)
1340 ALU(RSH, >>)
1341 ALU(XOR, ^)
1342 ALU(MUL, *)
1343#undef ALU
1344 ALU_NEG:
1345 DST = (u32) -DST;
1346 CONT;
1347 ALU64_NEG:
1348 DST = -DST;
1349 CONT;
1350 ALU_MOV_X:
1351 DST = (u32) SRC;
1352 CONT;
1353 ALU_MOV_K:
1354 DST = (u32) IMM;
1355 CONT;
1356 ALU64_MOV_X:
1357 DST = SRC;
1358 CONT;
1359 ALU64_MOV_K:
1360 DST = IMM;
1361 CONT;
02ab695b
AS
1362 LD_IMM_DW:
1363 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1364 insn++;
1365 CONT;
2dc6b100 1366 ALU_ARSH_X:
75672dda 1367 DST = (u64) (u32) (((s32) DST) >> SRC);
2dc6b100
JW
1368 CONT;
1369 ALU_ARSH_K:
75672dda 1370 DST = (u64) (u32) (((s32) DST) >> IMM);
2dc6b100 1371 CONT;
f5bffecd
AS
1372 ALU64_ARSH_X:
1373 (*(s64 *) &DST) >>= SRC;
1374 CONT;
1375 ALU64_ARSH_K:
1376 (*(s64 *) &DST) >>= IMM;
1377 CONT;
1378 ALU64_MOD_X:
144cd91c
DB
1379 div64_u64_rem(DST, SRC, &AX);
1380 DST = AX;
f5bffecd
AS
1381 CONT;
1382 ALU_MOD_X:
144cd91c
DB
1383 AX = (u32) DST;
1384 DST = do_div(AX, (u32) SRC);
f5bffecd
AS
1385 CONT;
1386 ALU64_MOD_K:
144cd91c
DB
1387 div64_u64_rem(DST, IMM, &AX);
1388 DST = AX;
f5bffecd
AS
1389 CONT;
1390 ALU_MOD_K:
144cd91c
DB
1391 AX = (u32) DST;
1392 DST = do_div(AX, (u32) IMM);
f5bffecd
AS
1393 CONT;
1394 ALU64_DIV_X:
876a7ae6 1395 DST = div64_u64(DST, SRC);
f5bffecd
AS
1396 CONT;
1397 ALU_DIV_X:
144cd91c
DB
1398 AX = (u32) DST;
1399 do_div(AX, (u32) SRC);
1400 DST = (u32) AX;
f5bffecd
AS
1401 CONT;
1402 ALU64_DIV_K:
876a7ae6 1403 DST = div64_u64(DST, IMM);
f5bffecd
AS
1404 CONT;
1405 ALU_DIV_K:
144cd91c
DB
1406 AX = (u32) DST;
1407 do_div(AX, (u32) IMM);
1408 DST = (u32) AX;
f5bffecd
AS
1409 CONT;
1410 ALU_END_TO_BE:
1411 switch (IMM) {
1412 case 16:
1413 DST = (__force u16) cpu_to_be16(DST);
1414 break;
1415 case 32:
1416 DST = (__force u32) cpu_to_be32(DST);
1417 break;
1418 case 64:
1419 DST = (__force u64) cpu_to_be64(DST);
1420 break;
1421 }
1422 CONT;
1423 ALU_END_TO_LE:
1424 switch (IMM) {
1425 case 16:
1426 DST = (__force u16) cpu_to_le16(DST);
1427 break;
1428 case 32:
1429 DST = (__force u32) cpu_to_le32(DST);
1430 break;
1431 case 64:
1432 DST = (__force u64) cpu_to_le64(DST);
1433 break;
1434 }
1435 CONT;
1436
1437 /* CALL */
1438 JMP_CALL:
1439 /* Function call scratches BPF_R1-BPF_R5 registers,
1440 * preserves BPF_R6-BPF_R9, and stores return value
1441 * into BPF_R0.
1442 */
1443 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1444 BPF_R4, BPF_R5);
1445 CONT;
1446
1ea47e01
AS
1447 JMP_CALL_ARGS:
1448 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1449 BPF_R3, BPF_R4,
1450 BPF_R5,
1451 insn + insn->off + 1);
1452 CONT;
1453
04fd61ab
AS
1454 JMP_TAIL_CALL: {
1455 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1456 struct bpf_array *array = container_of(map, struct bpf_array, map);
1457 struct bpf_prog *prog;
90caccdd 1458 u32 index = BPF_R3;
04fd61ab
AS
1459
1460 if (unlikely(index >= array->map.max_entries))
1461 goto out;
04fd61ab
AS
1462 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1463 goto out;
1464
1465 tail_call_cnt++;
1466
2a36f0b9 1467 prog = READ_ONCE(array->ptrs[index]);
1ca1cc98 1468 if (!prog)
04fd61ab
AS
1469 goto out;
1470
c4675f93
DB
1471 /* ARG1 at this point is guaranteed to point to CTX from
1472 * the verifier side due to the fact that the tail call is
1473 * handeled like a helper, that is, bpf_tail_call_proto,
1474 * where arg1_type is ARG_PTR_TO_CTX.
1475 */
04fd61ab
AS
1476 insn = prog->insnsi;
1477 goto select_insn;
1478out:
1479 CONT;
1480 }
f5bffecd
AS
1481 JMP_JA:
1482 insn += insn->off;
1483 CONT;
f5bffecd
AS
1484 JMP_EXIT:
1485 return BPF_R0;
503a8865
JW
1486 /* JMP */
1487#define COND_JMP(SIGN, OPCODE, CMP_OP) \
1488 JMP_##OPCODE##_X: \
1489 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1490 insn += insn->off; \
1491 CONT_JMP; \
1492 } \
1493 CONT; \
1494 JMP32_##OPCODE##_X: \
1495 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1496 insn += insn->off; \
1497 CONT_JMP; \
1498 } \
1499 CONT; \
1500 JMP_##OPCODE##_K: \
1501 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1502 insn += insn->off; \
1503 CONT_JMP; \
1504 } \
1505 CONT; \
1506 JMP32_##OPCODE##_K: \
1507 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1508 insn += insn->off; \
1509 CONT_JMP; \
1510 } \
1511 CONT;
1512 COND_JMP(u, JEQ, ==)
1513 COND_JMP(u, JNE, !=)
1514 COND_JMP(u, JGT, >)
1515 COND_JMP(u, JLT, <)
1516 COND_JMP(u, JGE, >=)
1517 COND_JMP(u, JLE, <=)
1518 COND_JMP(u, JSET, &)
1519 COND_JMP(s, JSGT, >)
1520 COND_JMP(s, JSLT, <)
1521 COND_JMP(s, JSGE, >=)
1522 COND_JMP(s, JSLE, <=)
1523#undef COND_JMP
f5bffecd
AS
1524 /* STX and ST and LDX*/
1525#define LDST(SIZEOP, SIZE) \
1526 STX_MEM_##SIZEOP: \
1527 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1528 CONT; \
1529 ST_MEM_##SIZEOP: \
1530 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1531 CONT; \
1532 LDX_MEM_##SIZEOP: \
1533 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1534 CONT;
1535
1536 LDST(B, u8)
1537 LDST(H, u16)
1538 LDST(W, u32)
1539 LDST(DW, u64)
1540#undef LDST
1541 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1542 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1543 (DST + insn->off));
1544 CONT;
1545 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1546 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1547 (DST + insn->off));
1548 CONT;
f5bffecd
AS
1549
1550 default_label:
5e581dad
DB
1551 /* If we ever reach this, we have a bug somewhere. Die hard here
1552 * instead of just returning 0; we could be somewhere in a subprog,
1553 * so execution could continue otherwise which we do /not/ want.
1554 *
1555 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1556 */
1557 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1558 BUG_ON(1);
f5bffecd
AS
1559 return 0;
1560}
f696b8f4 1561
b870aa90
AS
1562#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1563#define DEFINE_BPF_PROG_RUN(stack_size) \
1564static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1565{ \
1566 u64 stack[stack_size / sizeof(u64)]; \
144cd91c 1567 u64 regs[MAX_BPF_EXT_REG]; \
b870aa90
AS
1568\
1569 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1570 ARG1 = (u64) (unsigned long) ctx; \
1571 return ___bpf_prog_run(regs, insn, stack); \
f696b8f4 1572}
f5bffecd 1573
1ea47e01
AS
1574#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1575#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1576static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1577 const struct bpf_insn *insn) \
1578{ \
1579 u64 stack[stack_size / sizeof(u64)]; \
144cd91c 1580 u64 regs[MAX_BPF_EXT_REG]; \
1ea47e01
AS
1581\
1582 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1583 BPF_R1 = r1; \
1584 BPF_R2 = r2; \
1585 BPF_R3 = r3; \
1586 BPF_R4 = r4; \
1587 BPF_R5 = r5; \
1588 return ___bpf_prog_run(regs, insn, stack); \
1589}
1590
b870aa90
AS
1591#define EVAL1(FN, X) FN(X)
1592#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1593#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1594#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1595#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1596#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1597
1598EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1599EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1600EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1601
1ea47e01
AS
1602EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1603EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1604EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1605
b870aa90
AS
1606#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1607
1608static unsigned int (*interpreters[])(const void *ctx,
1609 const struct bpf_insn *insn) = {
1610EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1611EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1612EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1613};
1ea47e01
AS
1614#undef PROG_NAME_LIST
1615#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1616static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1617 const struct bpf_insn *insn) = {
1618EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1619EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1620EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1621};
1622#undef PROG_NAME_LIST
1623
1624void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1625{
1626 stack_depth = max_t(u32, stack_depth, 1);
1627 insn->off = (s16) insn->imm;
1628 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1629 __bpf_call_base_args;
1630 insn->code = BPF_JMP | BPF_CALL_ARGS;
1631}
b870aa90 1632
290af866 1633#else
fa9dd599
DB
1634static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1635 const struct bpf_insn *insn)
290af866 1636{
fa9dd599
DB
1637 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1638 * is not working properly, so warn about it!
1639 */
1640 WARN_ON_ONCE(1);
290af866
AS
1641 return 0;
1642}
1643#endif
1644
3324b584
DB
1645bool bpf_prog_array_compatible(struct bpf_array *array,
1646 const struct bpf_prog *fp)
04fd61ab 1647{
9802d865
JB
1648 if (fp->kprobe_override)
1649 return false;
1650
3324b584
DB
1651 if (!array->owner_prog_type) {
1652 /* There's no owner yet where we could check for
1653 * compatibility.
1654 */
04fd61ab
AS
1655 array->owner_prog_type = fp->type;
1656 array->owner_jited = fp->jited;
3324b584
DB
1657
1658 return true;
04fd61ab 1659 }
3324b584
DB
1660
1661 return array->owner_prog_type == fp->type &&
1662 array->owner_jited == fp->jited;
04fd61ab
AS
1663}
1664
3324b584 1665static int bpf_check_tail_call(const struct bpf_prog *fp)
04fd61ab
AS
1666{
1667 struct bpf_prog_aux *aux = fp->aux;
1668 int i;
1669
1670 for (i = 0; i < aux->used_map_cnt; i++) {
3324b584 1671 struct bpf_map *map = aux->used_maps[i];
04fd61ab 1672 struct bpf_array *array;
04fd61ab 1673
04fd61ab
AS
1674 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1675 continue;
3324b584 1676
04fd61ab
AS
1677 array = container_of(map, struct bpf_array, map);
1678 if (!bpf_prog_array_compatible(array, fp))
1679 return -EINVAL;
1680 }
1681
1682 return 0;
1683}
1684
9facc336
DB
1685static void bpf_prog_select_func(struct bpf_prog *fp)
1686{
1687#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1688 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1689
1690 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1691#else
1692 fp->bpf_func = __bpf_prog_ret0_warn;
1693#endif
1694}
1695
f5bffecd 1696/**
3324b584 1697 * bpf_prog_select_runtime - select exec runtime for BPF program
7ae457c1 1698 * @fp: bpf_prog populated with internal BPF program
d1c55ab5 1699 * @err: pointer to error variable
f5bffecd 1700 *
3324b584
DB
1701 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1702 * The BPF program will be executed via BPF_PROG_RUN() macro.
f5bffecd 1703 */
d1c55ab5 1704struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
f5bffecd 1705{
9facc336
DB
1706 /* In case of BPF to BPF calls, verifier did all the prep
1707 * work with regards to JITing, etc.
1708 */
1709 if (fp->bpf_func)
1710 goto finalize;
8007e40a 1711
9facc336 1712 bpf_prog_select_func(fp);
f5bffecd 1713
d1c55ab5
DB
1714 /* eBPF JITs can rewrite the program in case constant
1715 * blinding is active. However, in case of error during
1716 * blinding, bpf_int_jit_compile() must always return a
1717 * valid program, which in this case would simply not
1718 * be JITed, but falls back to the interpreter.
1719 */
ab3f0063 1720 if (!bpf_prog_is_dev_bound(fp->aux)) {
c454a46b
MKL
1721 *err = bpf_prog_alloc_jited_linfo(fp);
1722 if (*err)
1723 return fp;
1724
ab3f0063 1725 fp = bpf_int_jit_compile(fp);
290af866 1726 if (!fp->jited) {
c454a46b
MKL
1727 bpf_prog_free_jited_linfo(fp);
1728#ifdef CONFIG_BPF_JIT_ALWAYS_ON
290af866
AS
1729 *err = -ENOTSUPP;
1730 return fp;
290af866 1731#endif
c454a46b
MKL
1732 } else {
1733 bpf_prog_free_unused_jited_linfo(fp);
1734 }
ab3f0063
JK
1735 } else {
1736 *err = bpf_prog_offload_compile(fp);
1737 if (*err)
1738 return fp;
1739 }
9facc336
DB
1740
1741finalize:
60a3b225 1742 bpf_prog_lock_ro(fp);
04fd61ab 1743
3324b584
DB
1744 /* The tail call compatibility check can only be done at
1745 * this late stage as we need to determine, if we deal
1746 * with JITed or non JITed program concatenations and not
1747 * all eBPF JITs might immediately support all features.
1748 */
d1c55ab5 1749 *err = bpf_check_tail_call(fp);
85782e03 1750
d1c55ab5 1751 return fp;
f5bffecd 1752}
7ae457c1 1753EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
f5bffecd 1754
e87c6bc3
YS
1755static unsigned int __bpf_prog_ret1(const void *ctx,
1756 const struct bpf_insn *insn)
1757{
1758 return 1;
1759}
1760
1761static struct bpf_prog_dummy {
1762 struct bpf_prog prog;
1763} dummy_bpf_prog = {
1764 .prog = {
1765 .bpf_func = __bpf_prog_ret1,
1766 },
1767};
1768
324bda9e
AS
1769/* to avoid allocating empty bpf_prog_array for cgroups that
1770 * don't have bpf program attached use one global 'empty_prog_array'
1771 * It will not be modified the caller of bpf_prog_array_alloc()
1772 * (since caller requested prog_cnt == 0)
1773 * that pointer should be 'freed' by bpf_prog_array_free()
1774 */
1775static struct {
1776 struct bpf_prog_array hdr;
1777 struct bpf_prog *null_prog;
1778} empty_prog_array = {
1779 .null_prog = NULL,
1780};
1781
d29ab6e1 1782struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
324bda9e
AS
1783{
1784 if (prog_cnt)
1785 return kzalloc(sizeof(struct bpf_prog_array) +
394e40a2
RG
1786 sizeof(struct bpf_prog_array_item) *
1787 (prog_cnt + 1),
324bda9e
AS
1788 flags);
1789
1790 return &empty_prog_array.hdr;
1791}
1792
54e9c9d4 1793void bpf_prog_array_free(struct bpf_prog_array *progs)
324bda9e 1794{
54e9c9d4 1795 if (!progs || progs == &empty_prog_array.hdr)
324bda9e
AS
1796 return;
1797 kfree_rcu(progs, rcu);
1798}
1799
54e9c9d4 1800int bpf_prog_array_length(struct bpf_prog_array *array)
468e2f64 1801{
394e40a2 1802 struct bpf_prog_array_item *item;
468e2f64
AS
1803 u32 cnt = 0;
1804
54e9c9d4 1805 for (item = array->items; item->prog; item++)
394e40a2 1806 if (item->prog != &dummy_bpf_prog.prog)
c8c088ba 1807 cnt++;
468e2f64
AS
1808 return cnt;
1809}
1810
0d01da6a
SF
1811bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1812{
1813 struct bpf_prog_array_item *item;
1814
1815 for (item = array->items; item->prog; item++)
1816 if (item->prog != &dummy_bpf_prog.prog)
1817 return false;
1818 return true;
1819}
394e40a2 1820
54e9c9d4 1821static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
3a38bb98
YS
1822 u32 *prog_ids,
1823 u32 request_cnt)
1824{
394e40a2 1825 struct bpf_prog_array_item *item;
3a38bb98
YS
1826 int i = 0;
1827
54e9c9d4 1828 for (item = array->items; item->prog; item++) {
394e40a2 1829 if (item->prog == &dummy_bpf_prog.prog)
3a38bb98 1830 continue;
394e40a2 1831 prog_ids[i] = item->prog->aux->id;
3a38bb98 1832 if (++i == request_cnt) {
394e40a2 1833 item++;
3a38bb98
YS
1834 break;
1835 }
1836 }
1837
394e40a2 1838 return !!(item->prog);
3a38bb98
YS
1839}
1840
54e9c9d4 1841int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
468e2f64
AS
1842 __u32 __user *prog_ids, u32 cnt)
1843{
0911287c 1844 unsigned long err = 0;
0911287c 1845 bool nospc;
3a38bb98 1846 u32 *ids;
0911287c
AS
1847
1848 /* users of this function are doing:
1849 * cnt = bpf_prog_array_length();
1850 * if (cnt > 0)
1851 * bpf_prog_array_copy_to_user(..., cnt);
54e9c9d4 1852 * so below kcalloc doesn't need extra cnt > 0 check.
0911287c 1853 */
9c481b90 1854 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
0911287c
AS
1855 if (!ids)
1856 return -ENOMEM;
394e40a2 1857 nospc = bpf_prog_array_copy_core(array, ids, cnt);
0911287c
AS
1858 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1859 kfree(ids);
1860 if (err)
1861 return -EFAULT;
1862 if (nospc)
468e2f64
AS
1863 return -ENOSPC;
1864 return 0;
1865}
1866
54e9c9d4 1867void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
e87c6bc3
YS
1868 struct bpf_prog *old_prog)
1869{
54e9c9d4 1870 struct bpf_prog_array_item *item;
e87c6bc3 1871
54e9c9d4 1872 for (item = array->items; item->prog; item++)
394e40a2
RG
1873 if (item->prog == old_prog) {
1874 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
e87c6bc3
YS
1875 break;
1876 }
1877}
1878
54e9c9d4 1879int bpf_prog_array_copy(struct bpf_prog_array *old_array,
e87c6bc3
YS
1880 struct bpf_prog *exclude_prog,
1881 struct bpf_prog *include_prog,
1882 struct bpf_prog_array **new_array)
1883{
1884 int new_prog_cnt, carry_prog_cnt = 0;
394e40a2 1885 struct bpf_prog_array_item *existing;
e87c6bc3 1886 struct bpf_prog_array *array;
170a7e3e 1887 bool found_exclude = false;
e87c6bc3
YS
1888 int new_prog_idx = 0;
1889
1890 /* Figure out how many existing progs we need to carry over to
1891 * the new array.
1892 */
1893 if (old_array) {
394e40a2
RG
1894 existing = old_array->items;
1895 for (; existing->prog; existing++) {
1896 if (existing->prog == exclude_prog) {
170a7e3e
SY
1897 found_exclude = true;
1898 continue;
1899 }
394e40a2 1900 if (existing->prog != &dummy_bpf_prog.prog)
e87c6bc3 1901 carry_prog_cnt++;
394e40a2 1902 if (existing->prog == include_prog)
e87c6bc3
YS
1903 return -EEXIST;
1904 }
1905 }
1906
170a7e3e
SY
1907 if (exclude_prog && !found_exclude)
1908 return -ENOENT;
1909
e87c6bc3
YS
1910 /* How many progs (not NULL) will be in the new array? */
1911 new_prog_cnt = carry_prog_cnt;
1912 if (include_prog)
1913 new_prog_cnt += 1;
1914
1915 /* Do we have any prog (not NULL) in the new array? */
1916 if (!new_prog_cnt) {
1917 *new_array = NULL;
1918 return 0;
1919 }
1920
1921 /* +1 as the end of prog_array is marked with NULL */
1922 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1923 if (!array)
1924 return -ENOMEM;
1925
1926 /* Fill in the new prog array */
1927 if (carry_prog_cnt) {
394e40a2
RG
1928 existing = old_array->items;
1929 for (; existing->prog; existing++)
1930 if (existing->prog != exclude_prog &&
1931 existing->prog != &dummy_bpf_prog.prog) {
1932 array->items[new_prog_idx++].prog =
1933 existing->prog;
1934 }
e87c6bc3
YS
1935 }
1936 if (include_prog)
394e40a2
RG
1937 array->items[new_prog_idx++].prog = include_prog;
1938 array->items[new_prog_idx].prog = NULL;
e87c6bc3
YS
1939 *new_array = array;
1940 return 0;
1941}
1942
54e9c9d4 1943int bpf_prog_array_copy_info(struct bpf_prog_array *array,
3a38bb98
YS
1944 u32 *prog_ids, u32 request_cnt,
1945 u32 *prog_cnt)
f371b304
YS
1946{
1947 u32 cnt = 0;
1948
1949 if (array)
1950 cnt = bpf_prog_array_length(array);
1951
3a38bb98 1952 *prog_cnt = cnt;
f371b304
YS
1953
1954 /* return early if user requested only program count or nothing to copy */
1955 if (!request_cnt || !cnt)
1956 return 0;
1957
3a38bb98 1958 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
394e40a2 1959 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
3a38bb98 1960 : 0;
f371b304
YS
1961}
1962
60a3b225
DB
1963static void bpf_prog_free_deferred(struct work_struct *work)
1964{
09756af4 1965 struct bpf_prog_aux *aux;
1c2a088a 1966 int i;
60a3b225 1967
09756af4 1968 aux = container_of(work, struct bpf_prog_aux, work);
ab3f0063
JK
1969 if (bpf_prog_is_dev_bound(aux))
1970 bpf_prog_offload_destroy(aux->prog);
c195651e
YS
1971#ifdef CONFIG_PERF_EVENTS
1972 if (aux->prog->has_callchain_buf)
1973 put_callchain_buffers();
1974#endif
1c2a088a
AS
1975 for (i = 0; i < aux->func_cnt; i++)
1976 bpf_jit_free(aux->func[i]);
1977 if (aux->func_cnt) {
1978 kfree(aux->func);
1979 bpf_prog_unlock_free(aux->prog);
1980 } else {
1981 bpf_jit_free(aux->prog);
1982 }
60a3b225
DB
1983}
1984
1985/* Free internal BPF program */
7ae457c1 1986void bpf_prog_free(struct bpf_prog *fp)
f5bffecd 1987{
09756af4 1988 struct bpf_prog_aux *aux = fp->aux;
60a3b225 1989
09756af4 1990 INIT_WORK(&aux->work, bpf_prog_free_deferred);
09756af4 1991 schedule_work(&aux->work);
f5bffecd 1992}
7ae457c1 1993EXPORT_SYMBOL_GPL(bpf_prog_free);
f89b7755 1994
3ad00405
DB
1995/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1996static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1997
1998void bpf_user_rnd_init_once(void)
1999{
2000 prandom_init_once(&bpf_user_rnd_state);
2001}
2002
f3694e00 2003BPF_CALL_0(bpf_user_rnd_u32)
3ad00405
DB
2004{
2005 /* Should someone ever have the rather unwise idea to use some
2006 * of the registers passed into this function, then note that
2007 * this function is called from native eBPF and classic-to-eBPF
2008 * transformations. Register assignments from both sides are
2009 * different, f.e. classic always sets fn(ctx, A, X) here.
2010 */
2011 struct rnd_state *state;
2012 u32 res;
2013
2014 state = &get_cpu_var(bpf_user_rnd_state);
2015 res = prandom_u32_state(state);
b761fe22 2016 put_cpu_var(bpf_user_rnd_state);
3ad00405
DB
2017
2018 return res;
2019}
2020
3ba67dab
DB
2021/* Weak definitions of helper functions in case we don't have bpf syscall. */
2022const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2023const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2024const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
f1a2e44a
MV
2025const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2026const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2027const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
d83525ca
AS
2028const struct bpf_func_proto bpf_spin_lock_proto __weak;
2029const struct bpf_func_proto bpf_spin_unlock_proto __weak;
3ba67dab 2030
03e69b50 2031const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
c04167ce 2032const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2d0e30c3 2033const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
17ca8cbf 2034const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
bd570ff9 2035
ffeedafb
AS
2036const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2037const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2038const struct bpf_func_proto bpf_get_current_comm_proto __weak;
bf6fa2c8 2039const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
cd339431 2040const struct bpf_func_proto bpf_get_local_storage_proto __weak;
bd570ff9 2041
0756ea3e
AS
2042const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2043{
2044 return NULL;
2045}
03e69b50 2046
555c8a86
DB
2047u64 __weak
2048bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2049 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 2050{
555c8a86 2051 return -ENOTSUPP;
bd570ff9 2052}
6cb5fb38 2053EXPORT_SYMBOL_GPL(bpf_event_output);
bd570ff9 2054
3324b584
DB
2055/* Always built-in helper functions. */
2056const struct bpf_func_proto bpf_tail_call_proto = {
2057 .func = NULL,
2058 .gpl_only = false,
2059 .ret_type = RET_VOID,
2060 .arg1_type = ARG_PTR_TO_CTX,
2061 .arg2_type = ARG_CONST_MAP_PTR,
2062 .arg3_type = ARG_ANYTHING,
2063};
2064
9383191d
DB
2065/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2066 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2067 * eBPF and implicitly also cBPF can get JITed!
2068 */
d1c55ab5 2069struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
3324b584 2070{
d1c55ab5 2071 return prog;
3324b584
DB
2072}
2073
9383191d
DB
2074/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2075 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2076 */
2077void __weak bpf_jit_compile(struct bpf_prog *prog)
2078{
2079}
2080
17bedab2 2081bool __weak bpf_helper_changes_pkt_data(void *func)
969bf05e
AS
2082{
2083 return false;
2084}
2085
a4b1d3c1
JW
2086/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2087 * analysis code and wants explicit zero extension inserted by verifier.
2088 * Otherwise, return FALSE.
2089 */
2090bool __weak bpf_jit_needs_zext(void)
2091{
2092 return false;
2093}
2094
f89b7755
AS
2095/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2096 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2097 */
2098int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2099 int len)
2100{
2101 return -EFAULT;
2102}
a67edbf4 2103
492ecee8
AS
2104DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2105EXPORT_SYMBOL(bpf_stats_enabled_key);
492ecee8 2106
a67edbf4
DB
2107/* All definitions of tracepoints related to BPF. */
2108#define CREATE_TRACE_POINTS
2109#include <linux/bpf_trace.h>
2110
2111EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
e7d47989 2112EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);