]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/bpf/core.c
Merge tag 'core-core-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / kernel / bpf / core.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
f5bffecd
AS
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
f5bffecd 16 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
f5bffecd 18 */
738cbe72 19
838e9690 20#include <uapi/linux/btf.h>
f5bffecd
AS
21#include <linux/filter.h>
22#include <linux/skbuff.h>
60a3b225 23#include <linux/vmalloc.h>
738cbe72
DB
24#include <linux/random.h>
25#include <linux/moduleloader.h>
09756af4 26#include <linux/bpf.h>
838e9690 27#include <linux/btf.h>
39853cc0 28#include <linux/frame.h>
74451e66
DB
29#include <linux/rbtree_latch.h>
30#include <linux/kallsyms.h>
31#include <linux/rcupdate.h>
c195651e 32#include <linux/perf_event.h>
3dec541b 33#include <linux/extable.h>
b7b3fc8d 34#include <linux/log2.h>
3324b584
DB
35#include <asm/unaligned.h>
36
f5bffecd
AS
37/* Registers */
38#define BPF_R0 regs[BPF_REG_0]
39#define BPF_R1 regs[BPF_REG_1]
40#define BPF_R2 regs[BPF_REG_2]
41#define BPF_R3 regs[BPF_REG_3]
42#define BPF_R4 regs[BPF_REG_4]
43#define BPF_R5 regs[BPF_REG_5]
44#define BPF_R6 regs[BPF_REG_6]
45#define BPF_R7 regs[BPF_REG_7]
46#define BPF_R8 regs[BPF_REG_8]
47#define BPF_R9 regs[BPF_REG_9]
48#define BPF_R10 regs[BPF_REG_10]
49
50/* Named registers */
51#define DST regs[insn->dst_reg]
52#define SRC regs[insn->src_reg]
53#define FP regs[BPF_REG_FP]
144cd91c 54#define AX regs[BPF_REG_AX]
f5bffecd
AS
55#define ARG1 regs[BPF_REG_ARG1]
56#define CTX regs[BPF_REG_CTX]
57#define IMM insn->imm
58
59/* No hurry in this branch
60 *
61 * Exported for the bpf jit load helper.
62 */
63void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
64{
65 u8 *ptr = NULL;
66
67 if (k >= SKF_NET_OFF)
68 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 else if (k >= SKF_LL_OFF)
70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
3324b584 71
f5bffecd
AS
72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 return ptr;
74
75 return NULL;
76}
77
492ecee8 78struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
60a3b225 79{
19809c2d 80 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
09756af4 81 struct bpf_prog_aux *aux;
60a3b225
DB
82 struct bpf_prog *fp;
83
84 size = round_up(size, PAGE_SIZE);
85 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
86 if (fp == NULL)
87 return NULL;
88
09756af4
AS
89 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
90 if (aux == NULL) {
60a3b225
DB
91 vfree(fp);
92 return NULL;
93 }
94
95 fp->pages = size / PAGE_SIZE;
09756af4 96 fp->aux = aux;
e9d8afa9 97 fp->aux->prog = fp;
60b58afc 98 fp->jit_requested = ebpf_jit_enabled();
60a3b225 99
74451e66
DB
100 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
101
60a3b225
DB
102 return fp;
103}
492ecee8
AS
104
105struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
106{
107 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
108 struct bpf_prog *prog;
4b911304 109 int cpu;
492ecee8
AS
110
111 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
112 if (!prog)
113 return NULL;
114
115 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
116 if (!prog->aux->stats) {
117 kfree(prog->aux);
118 vfree(prog);
119 return NULL;
120 }
121
4b911304
ED
122 for_each_possible_cpu(cpu) {
123 struct bpf_prog_stats *pstats;
124
125 pstats = per_cpu_ptr(prog->aux->stats, cpu);
126 u64_stats_init(&pstats->syncp);
127 }
492ecee8
AS
128 return prog;
129}
60a3b225
DB
130EXPORT_SYMBOL_GPL(bpf_prog_alloc);
131
c454a46b
MKL
132int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
133{
134 if (!prog->aux->nr_linfo || !prog->jit_requested)
135 return 0;
136
137 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
138 sizeof(*prog->aux->jited_linfo),
139 GFP_KERNEL | __GFP_NOWARN);
140 if (!prog->aux->jited_linfo)
141 return -ENOMEM;
142
143 return 0;
144}
145
146void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
147{
148 kfree(prog->aux->jited_linfo);
149 prog->aux->jited_linfo = NULL;
150}
151
152void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
153{
154 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
155 bpf_prog_free_jited_linfo(prog);
156}
157
158/* The jit engine is responsible to provide an array
159 * for insn_off to the jited_off mapping (insn_to_jit_off).
160 *
161 * The idx to this array is the insn_off. Hence, the insn_off
162 * here is relative to the prog itself instead of the main prog.
163 * This array has one entry for each xlated bpf insn.
164 *
165 * jited_off is the byte off to the last byte of the jited insn.
166 *
167 * Hence, with
168 * insn_start:
169 * The first bpf insn off of the prog. The insn off
170 * here is relative to the main prog.
171 * e.g. if prog is a subprog, insn_start > 0
172 * linfo_idx:
173 * The prog's idx to prog->aux->linfo and jited_linfo
174 *
175 * jited_linfo[linfo_idx] = prog->bpf_func
176 *
177 * For i > linfo_idx,
178 *
179 * jited_linfo[i] = prog->bpf_func +
180 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
181 */
182void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
183 const u32 *insn_to_jit_off)
184{
185 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
186 const struct bpf_line_info *linfo;
187 void **jited_linfo;
188
189 if (!prog->aux->jited_linfo)
190 /* Userspace did not provide linfo */
191 return;
192
193 linfo_idx = prog->aux->linfo_idx;
194 linfo = &prog->aux->linfo[linfo_idx];
195 insn_start = linfo[0].insn_off;
196 insn_end = insn_start + prog->len;
197
198 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
199 jited_linfo[0] = prog->bpf_func;
200
201 nr_linfo = prog->aux->nr_linfo - linfo_idx;
202
203 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
204 /* The verifier ensures that linfo[i].insn_off is
205 * strictly increasing
206 */
207 jited_linfo[i] = prog->bpf_func +
208 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
209}
210
211void bpf_prog_free_linfo(struct bpf_prog *prog)
212{
213 bpf_prog_free_jited_linfo(prog);
214 kvfree(prog->aux->linfo);
215}
216
60a3b225
DB
217struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
218 gfp_t gfp_extra_flags)
219{
19809c2d 220 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
60a3b225 221 struct bpf_prog *fp;
5ccb071e
DB
222 u32 pages, delta;
223 int ret;
60a3b225
DB
224
225 BUG_ON(fp_old == NULL);
226
227 size = round_up(size, PAGE_SIZE);
5ccb071e
DB
228 pages = size / PAGE_SIZE;
229 if (pages <= fp_old->pages)
60a3b225
DB
230 return fp_old;
231
5ccb071e
DB
232 delta = pages - fp_old->pages;
233 ret = __bpf_prog_charge(fp_old->aux->user, delta);
234 if (ret)
235 return NULL;
236
60a3b225 237 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
5ccb071e
DB
238 if (fp == NULL) {
239 __bpf_prog_uncharge(fp_old->aux->user, delta);
240 } else {
60a3b225 241 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
5ccb071e 242 fp->pages = pages;
e9d8afa9 243 fp->aux->prog = fp;
60a3b225 244
09756af4 245 /* We keep fp->aux from fp_old around in the new
60a3b225
DB
246 * reallocated structure.
247 */
09756af4 248 fp_old->aux = NULL;
60a3b225
DB
249 __bpf_prog_free(fp_old);
250 }
251
252 return fp;
253}
60a3b225
DB
254
255void __bpf_prog_free(struct bpf_prog *fp)
256{
492ecee8
AS
257 if (fp->aux) {
258 free_percpu(fp->aux->stats);
a66886fe 259 kfree(fp->aux->poke_tab);
492ecee8
AS
260 kfree(fp->aux);
261 }
60a3b225
DB
262 vfree(fp);
263}
60a3b225 264
f1f7714e 265int bpf_prog_calc_tag(struct bpf_prog *fp)
7bd509e3
DB
266{
267 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
f1f7714e
DB
268 u32 raw_size = bpf_prog_tag_scratch_size(fp);
269 u32 digest[SHA_DIGEST_WORDS];
aafe6ae9 270 u32 ws[SHA_WORKSPACE_WORDS];
7bd509e3 271 u32 i, bsize, psize, blocks;
aafe6ae9 272 struct bpf_insn *dst;
7bd509e3 273 bool was_ld_map;
aafe6ae9 274 u8 *raw, *todo;
7bd509e3
DB
275 __be32 *result;
276 __be64 *bits;
277
aafe6ae9
DB
278 raw = vmalloc(raw_size);
279 if (!raw)
280 return -ENOMEM;
281
f1f7714e 282 sha_init(digest);
7bd509e3
DB
283 memset(ws, 0, sizeof(ws));
284
285 /* We need to take out the map fd for the digest calculation
286 * since they are unstable from user space side.
287 */
aafe6ae9 288 dst = (void *)raw;
7bd509e3
DB
289 for (i = 0, was_ld_map = false; i < fp->len; i++) {
290 dst[i] = fp->insnsi[i];
291 if (!was_ld_map &&
292 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
d8eca5bb
DB
293 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
294 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
7bd509e3
DB
295 was_ld_map = true;
296 dst[i].imm = 0;
297 } else if (was_ld_map &&
298 dst[i].code == 0 &&
299 dst[i].dst_reg == 0 &&
300 dst[i].src_reg == 0 &&
301 dst[i].off == 0) {
302 was_ld_map = false;
303 dst[i].imm = 0;
304 } else {
305 was_ld_map = false;
306 }
307 }
308
aafe6ae9
DB
309 psize = bpf_prog_insn_size(fp);
310 memset(&raw[psize], 0, raw_size - psize);
7bd509e3
DB
311 raw[psize++] = 0x80;
312
313 bsize = round_up(psize, SHA_MESSAGE_BYTES);
314 blocks = bsize / SHA_MESSAGE_BYTES;
aafe6ae9 315 todo = raw;
7bd509e3
DB
316 if (bsize - psize >= sizeof(__be64)) {
317 bits = (__be64 *)(todo + bsize - sizeof(__be64));
318 } else {
319 bits = (__be64 *)(todo + bsize + bits_offset);
320 blocks++;
321 }
322 *bits = cpu_to_be64((psize - 1) << 3);
323
324 while (blocks--) {
f1f7714e 325 sha_transform(digest, todo, ws);
7bd509e3
DB
326 todo += SHA_MESSAGE_BYTES;
327 }
328
f1f7714e 329 result = (__force __be32 *)digest;
7bd509e3 330 for (i = 0; i < SHA_DIGEST_WORDS; i++)
f1f7714e
DB
331 result[i] = cpu_to_be32(digest[i]);
332 memcpy(fp->tag, result, sizeof(fp->tag));
aafe6ae9
DB
333
334 vfree(raw);
335 return 0;
7bd509e3
DB
336}
337
2cbd95a5 338static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
af959b18 339 s32 end_new, s32 curr, const bool probe_pass)
c237ee5e 340{
050fad7c 341 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
2cbd95a5 342 s32 delta = end_new - end_old;
050fad7c
DB
343 s64 imm = insn->imm;
344
2cbd95a5 345 if (curr < pos && curr + imm + 1 >= end_old)
050fad7c 346 imm += delta;
2cbd95a5 347 else if (curr >= end_new && curr + imm + 1 < end_new)
050fad7c
DB
348 imm -= delta;
349 if (imm < imm_min || imm > imm_max)
350 return -ERANGE;
351 if (!probe_pass)
352 insn->imm = imm;
353 return 0;
354}
355
2cbd95a5 356static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
af959b18 357 s32 end_new, s32 curr, const bool probe_pass)
050fad7c
DB
358{
359 const s32 off_min = S16_MIN, off_max = S16_MAX;
2cbd95a5 360 s32 delta = end_new - end_old;
050fad7c
DB
361 s32 off = insn->off;
362
2cbd95a5 363 if (curr < pos && curr + off + 1 >= end_old)
050fad7c 364 off += delta;
2cbd95a5 365 else if (curr >= end_new && curr + off + 1 < end_new)
050fad7c
DB
366 off -= delta;
367 if (off < off_min || off > off_max)
368 return -ERANGE;
369 if (!probe_pass)
370 insn->off = off;
371 return 0;
372}
373
2cbd95a5
JK
374static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
375 s32 end_new, const bool probe_pass)
050fad7c 376{
2cbd95a5 377 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
c237ee5e 378 struct bpf_insn *insn = prog->insnsi;
050fad7c 379 int ret = 0;
c237ee5e
DB
380
381 for (i = 0; i < insn_cnt; i++, insn++) {
050fad7c
DB
382 u8 code;
383
384 /* In the probing pass we still operate on the original,
385 * unpatched image in order to check overflows before we
386 * do any other adjustments. Therefore skip the patchlet.
387 */
388 if (probe_pass && i == pos) {
2cbd95a5
JK
389 i = end_new;
390 insn = prog->insnsi + end_old;
050fad7c 391 }
1ea47e01 392 code = insn->code;
092ed096
JW
393 if ((BPF_CLASS(code) != BPF_JMP &&
394 BPF_CLASS(code) != BPF_JMP32) ||
050fad7c 395 BPF_OP(code) == BPF_EXIT)
1ea47e01 396 continue;
050fad7c 397 /* Adjust offset of jmps if we cross patch boundaries. */
1ea47e01 398 if (BPF_OP(code) == BPF_CALL) {
050fad7c 399 if (insn->src_reg != BPF_PSEUDO_CALL)
1ea47e01 400 continue;
2cbd95a5
JK
401 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
402 end_new, i, probe_pass);
1ea47e01 403 } else {
2cbd95a5
JK
404 ret = bpf_adj_delta_to_off(insn, pos, end_old,
405 end_new, i, probe_pass);
1ea47e01 406 }
050fad7c
DB
407 if (ret)
408 break;
c237ee5e 409 }
050fad7c
DB
410
411 return ret;
c237ee5e
DB
412}
413
c454a46b
MKL
414static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
415{
416 struct bpf_line_info *linfo;
417 u32 i, nr_linfo;
418
419 nr_linfo = prog->aux->nr_linfo;
420 if (!nr_linfo || !delta)
421 return;
422
423 linfo = prog->aux->linfo;
424
425 for (i = 0; i < nr_linfo; i++)
426 if (off < linfo[i].insn_off)
427 break;
428
429 /* Push all off < linfo[i].insn_off by delta */
430 for (; i < nr_linfo; i++)
431 linfo[i].insn_off += delta;
432}
433
c237ee5e
DB
434struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
435 const struct bpf_insn *patch, u32 len)
436{
437 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
050fad7c 438 const u32 cnt_max = S16_MAX;
c237ee5e 439 struct bpf_prog *prog_adj;
4f73379e 440 int err;
c237ee5e
DB
441
442 /* Since our patchlet doesn't expand the image, we're done. */
443 if (insn_delta == 0) {
444 memcpy(prog->insnsi + off, patch, sizeof(*patch));
445 return prog;
446 }
447
448 insn_adj_cnt = prog->len + insn_delta;
449
050fad7c
DB
450 /* Reject anything that would potentially let the insn->off
451 * target overflow when we have excessive program expansions.
452 * We need to probe here before we do any reallocation where
453 * we afterwards may not fail anymore.
454 */
455 if (insn_adj_cnt > cnt_max &&
4f73379e
AS
456 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
457 return ERR_PTR(err);
050fad7c 458
c237ee5e
DB
459 /* Several new instructions need to be inserted. Make room
460 * for them. Likely, there's no need for a new allocation as
461 * last page could have large enough tailroom.
462 */
463 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
464 GFP_USER);
465 if (!prog_adj)
4f73379e 466 return ERR_PTR(-ENOMEM);
c237ee5e
DB
467
468 prog_adj->len = insn_adj_cnt;
469
470 /* Patching happens in 3 steps:
471 *
472 * 1) Move over tail of insnsi from next instruction onwards,
473 * so we can patch the single target insn with one or more
474 * new ones (patching is always from 1 to n insns, n > 0).
475 * 2) Inject new instructions at the target location.
476 * 3) Adjust branch offsets if necessary.
477 */
478 insn_rest = insn_adj_cnt - off - len;
479
480 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
481 sizeof(*patch) * insn_rest);
482 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
483
050fad7c
DB
484 /* We are guaranteed to not fail at this point, otherwise
485 * the ship has sailed to reverse to the original state. An
486 * overflow cannot happen at this point.
487 */
2cbd95a5 488 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
c237ee5e 489
c454a46b
MKL
490 bpf_adj_linfo(prog_adj, off, insn_delta);
491
c237ee5e
DB
492 return prog_adj;
493}
494
52875a04
JK
495int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
496{
497 /* Branch offsets can't overflow when program is shrinking, no need
498 * to call bpf_adj_branches(..., true) here
499 */
500 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
501 sizeof(struct bpf_insn) * (prog->len - off - cnt));
502 prog->len -= cnt;
503
504 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
505}
506
cd7455f1 507static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
7d1982b4
DB
508{
509 int i;
510
511 for (i = 0; i < fp->aux->func_cnt; i++)
512 bpf_prog_kallsyms_del(fp->aux->func[i]);
513}
514
515void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
516{
517 bpf_prog_kallsyms_del_subprogs(fp);
518 bpf_prog_kallsyms_del(fp);
519}
520
b954d834 521#ifdef CONFIG_BPF_JIT
fa9dd599
DB
522/* All BPF JIT sysctl knobs here. */
523int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
524int bpf_jit_harden __read_mostly;
525int bpf_jit_kallsyms __read_mostly;
fdadd049 526long bpf_jit_limit __read_mostly;
fa9dd599 527
74451e66
DB
528static __always_inline void
529bpf_get_prog_addr_region(const struct bpf_prog *prog,
530 unsigned long *symbol_start,
531 unsigned long *symbol_end)
532{
533 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
534 unsigned long addr = (unsigned long)hdr;
535
536 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
537
538 *symbol_start = addr;
539 *symbol_end = addr + hdr->pages * PAGE_SIZE;
540}
541
6ee52e2a 542void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
74451e66 543{
368211fb 544 const char *end = sym + KSYM_NAME_LEN;
838e9690
YS
545 const struct btf_type *type;
546 const char *func_name;
368211fb 547
74451e66 548 BUILD_BUG_ON(sizeof("bpf_prog_") +
368211fb
MKL
549 sizeof(prog->tag) * 2 +
550 /* name has been null terminated.
551 * We should need +1 for the '_' preceding
552 * the name. However, the null character
553 * is double counted between the name and the
554 * sizeof("bpf_prog_") above, so we omit
555 * the +1 here.
556 */
557 sizeof(prog->aux->name) > KSYM_NAME_LEN);
74451e66
DB
558
559 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
560 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
838e9690
YS
561
562 /* prog->aux->name will be ignored if full btf name is available */
7337224f 563 if (prog->aux->func_info_cnt) {
ba64e7d8
YS
564 type = btf_type_by_id(prog->aux->btf,
565 prog->aux->func_info[prog->aux->func_idx].type_id);
838e9690
YS
566 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
567 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
568 return;
569 }
570
368211fb
MKL
571 if (prog->aux->name[0])
572 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
573 else
574 *sym = 0;
74451e66
DB
575}
576
577static __always_inline unsigned long
578bpf_get_prog_addr_start(struct latch_tree_node *n)
579{
580 unsigned long symbol_start, symbol_end;
581 const struct bpf_prog_aux *aux;
582
583 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
584 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
585
586 return symbol_start;
587}
588
589static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
590 struct latch_tree_node *b)
591{
592 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
593}
594
595static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
596{
597 unsigned long val = (unsigned long)key;
598 unsigned long symbol_start, symbol_end;
599 const struct bpf_prog_aux *aux;
600
601 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
602 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
603
604 if (val < symbol_start)
605 return -1;
606 if (val >= symbol_end)
607 return 1;
608
609 return 0;
610}
611
612static const struct latch_tree_ops bpf_tree_ops = {
613 .less = bpf_tree_less,
614 .comp = bpf_tree_comp,
615};
616
617static DEFINE_SPINLOCK(bpf_lock);
618static LIST_HEAD(bpf_kallsyms);
619static struct latch_tree_root bpf_tree __cacheline_aligned;
620
74451e66
DB
621static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
622{
623 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
624 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
625 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
626}
627
628static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
629{
630 if (list_empty(&aux->ksym_lnode))
631 return;
632
633 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
634 list_del_rcu(&aux->ksym_lnode);
635}
636
637static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
638{
639 return fp->jited && !bpf_prog_was_classic(fp);
640}
641
642static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
643{
644 return list_empty(&fp->aux->ksym_lnode) ||
645 fp->aux->ksym_lnode.prev == LIST_POISON2;
646}
647
648void bpf_prog_kallsyms_add(struct bpf_prog *fp)
649{
74451e66
DB
650 if (!bpf_prog_kallsyms_candidate(fp) ||
651 !capable(CAP_SYS_ADMIN))
652 return;
653
d24f7c7f 654 spin_lock_bh(&bpf_lock);
74451e66 655 bpf_prog_ksym_node_add(fp->aux);
d24f7c7f 656 spin_unlock_bh(&bpf_lock);
74451e66
DB
657}
658
659void bpf_prog_kallsyms_del(struct bpf_prog *fp)
660{
74451e66
DB
661 if (!bpf_prog_kallsyms_candidate(fp))
662 return;
663
d24f7c7f 664 spin_lock_bh(&bpf_lock);
74451e66 665 bpf_prog_ksym_node_del(fp->aux);
d24f7c7f 666 spin_unlock_bh(&bpf_lock);
74451e66
DB
667}
668
669static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
670{
671 struct latch_tree_node *n;
672
74451e66
DB
673 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
674 return n ?
675 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
676 NULL;
677}
678
679const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
680 unsigned long *off, char *sym)
681{
682 unsigned long symbol_start, symbol_end;
683 struct bpf_prog *prog;
684 char *ret = NULL;
685
686 rcu_read_lock();
687 prog = bpf_prog_kallsyms_find(addr);
688 if (prog) {
689 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
690 bpf_get_prog_name(prog, sym);
691
692 ret = sym;
693 if (size)
694 *size = symbol_end - symbol_start;
695 if (off)
696 *off = addr - symbol_start;
697 }
698 rcu_read_unlock();
699
700 return ret;
701}
702
703bool is_bpf_text_address(unsigned long addr)
704{
705 bool ret;
706
707 rcu_read_lock();
708 ret = bpf_prog_kallsyms_find(addr) != NULL;
709 rcu_read_unlock();
710
711 return ret;
712}
713
3dec541b
AS
714const struct exception_table_entry *search_bpf_extables(unsigned long addr)
715{
716 const struct exception_table_entry *e = NULL;
717 struct bpf_prog *prog;
718
719 rcu_read_lock();
720 prog = bpf_prog_kallsyms_find(addr);
721 if (!prog)
722 goto out;
723 if (!prog->aux->num_exentries)
724 goto out;
725
726 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
727out:
728 rcu_read_unlock();
729 return e;
730}
731
74451e66
DB
732int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
733 char *sym)
734{
74451e66
DB
735 struct bpf_prog_aux *aux;
736 unsigned int it = 0;
737 int ret = -ERANGE;
738
739 if (!bpf_jit_kallsyms_enabled())
740 return ret;
741
742 rcu_read_lock();
743 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
744 if (it++ != symnum)
745 continue;
746
74451e66
DB
747 bpf_get_prog_name(aux->prog, sym);
748
df073470 749 *value = (unsigned long)aux->prog->bpf_func;
74451e66
DB
750 *type = BPF_SYM_ELF_TYPE;
751
752 ret = 0;
753 break;
754 }
755 rcu_read_unlock();
756
757 return ret;
758}
759
a66886fe
DB
760int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
761 struct bpf_jit_poke_descriptor *poke)
762{
763 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
764 static const u32 poke_tab_max = 1024;
765 u32 slot = prog->aux->size_poke_tab;
766 u32 size = slot + 1;
767
768 if (size > poke_tab_max)
769 return -ENOSPC;
770 if (poke->ip || poke->ip_stable || poke->adj_off)
771 return -EINVAL;
772
773 switch (poke->reason) {
774 case BPF_POKE_REASON_TAIL_CALL:
775 if (!poke->tail_call.map)
776 return -EINVAL;
777 break;
778 default:
779 return -EINVAL;
780 }
781
782 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
783 if (!tab)
784 return -ENOMEM;
785
786 memcpy(&tab[slot], poke, sizeof(*poke));
787 prog->aux->size_poke_tab = size;
788 prog->aux->poke_tab = tab;
789
790 return slot;
791}
792
ede95a63
DB
793static atomic_long_t bpf_jit_current;
794
fdadd049
DB
795/* Can be overridden by an arch's JIT compiler if it has a custom,
796 * dedicated BPF backend memory area, or if neither of the two
797 * below apply.
798 */
799u64 __weak bpf_jit_alloc_exec_limit(void)
800{
ede95a63 801#if defined(MODULES_VADDR)
fdadd049
DB
802 return MODULES_END - MODULES_VADDR;
803#else
804 return VMALLOC_END - VMALLOC_START;
805#endif
806}
807
ede95a63
DB
808static int __init bpf_jit_charge_init(void)
809{
810 /* Only used as heuristic here to derive limit. */
fdadd049
DB
811 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
812 PAGE_SIZE), LONG_MAX);
ede95a63
DB
813 return 0;
814}
815pure_initcall(bpf_jit_charge_init);
ede95a63
DB
816
817static int bpf_jit_charge_modmem(u32 pages)
818{
819 if (atomic_long_add_return(pages, &bpf_jit_current) >
820 (bpf_jit_limit >> PAGE_SHIFT)) {
821 if (!capable(CAP_SYS_ADMIN)) {
822 atomic_long_sub(pages, &bpf_jit_current);
823 return -EPERM;
824 }
825 }
826
827 return 0;
828}
829
830static void bpf_jit_uncharge_modmem(u32 pages)
831{
832 atomic_long_sub(pages, &bpf_jit_current);
833}
834
dc002bb6
AB
835void *__weak bpf_jit_alloc_exec(unsigned long size)
836{
837 return module_alloc(size);
838}
839
840void __weak bpf_jit_free_exec(void *addr)
841{
842 module_memfree(addr);
843}
844
738cbe72
DB
845struct bpf_binary_header *
846bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
847 unsigned int alignment,
848 bpf_jit_fill_hole_t bpf_fill_ill_insns)
849{
850 struct bpf_binary_header *hdr;
ede95a63 851 u32 size, hole, start, pages;
738cbe72 852
b7b3fc8d
IL
853 WARN_ON_ONCE(!is_power_of_2(alignment) ||
854 alignment > BPF_IMAGE_ALIGNMENT);
855
738cbe72
DB
856 /* Most of BPF filters are really small, but if some of them
857 * fill a page, allow at least 128 extra bytes to insert a
858 * random section of illegal instructions.
859 */
860 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
ede95a63
DB
861 pages = size / PAGE_SIZE;
862
863 if (bpf_jit_charge_modmem(pages))
864 return NULL;
dc002bb6 865 hdr = bpf_jit_alloc_exec(size);
ede95a63
DB
866 if (!hdr) {
867 bpf_jit_uncharge_modmem(pages);
738cbe72 868 return NULL;
ede95a63 869 }
738cbe72
DB
870
871 /* Fill space with illegal/arch-dep instructions. */
872 bpf_fill_ill_insns(hdr, size);
873
ede95a63 874 hdr->pages = pages;
738cbe72
DB
875 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
876 PAGE_SIZE - sizeof(*hdr));
b7552e1b 877 start = (get_random_int() % hole) & ~(alignment - 1);
738cbe72
DB
878
879 /* Leave a random number of instructions before BPF code. */
880 *image_ptr = &hdr->image[start];
881
882 return hdr;
883}
884
885void bpf_jit_binary_free(struct bpf_binary_header *hdr)
886{
ede95a63
DB
887 u32 pages = hdr->pages;
888
dc002bb6 889 bpf_jit_free_exec(hdr);
ede95a63 890 bpf_jit_uncharge_modmem(pages);
738cbe72 891}
4f3446bb 892
74451e66
DB
893/* This symbol is only overridden by archs that have different
894 * requirements than the usual eBPF JITs, f.e. when they only
895 * implement cBPF JIT, do not set images read-only, etc.
896 */
897void __weak bpf_jit_free(struct bpf_prog *fp)
898{
899 if (fp->jited) {
900 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
901
74451e66
DB
902 bpf_jit_binary_free(hdr);
903
904 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
905 }
906
907 bpf_prog_unlock_free(fp);
908}
909
e2c95a61
DB
910int bpf_jit_get_func_addr(const struct bpf_prog *prog,
911 const struct bpf_insn *insn, bool extra_pass,
912 u64 *func_addr, bool *func_addr_fixed)
913{
914 s16 off = insn->off;
915 s32 imm = insn->imm;
916 u8 *addr;
917
918 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
919 if (!*func_addr_fixed) {
920 /* Place-holder address till the last pass has collected
921 * all addresses for JITed subprograms in which case we
922 * can pick them up from prog->aux.
923 */
924 if (!extra_pass)
925 addr = NULL;
926 else if (prog->aux->func &&
927 off >= 0 && off < prog->aux->func_cnt)
928 addr = (u8 *)prog->aux->func[off]->bpf_func;
929 else
930 return -EINVAL;
931 } else {
932 /* Address of a BPF helper call. Since part of the core
933 * kernel, it's always at a fixed location. __bpf_call_base
934 * and the helper with imm relative to it are both in core
935 * kernel.
936 */
937 addr = (u8 *)__bpf_call_base + imm;
938 }
939
940 *func_addr = (unsigned long)addr;
941 return 0;
942}
943
4f3446bb
DB
944static int bpf_jit_blind_insn(const struct bpf_insn *from,
945 const struct bpf_insn *aux,
ede7c460
NR
946 struct bpf_insn *to_buff,
947 bool emit_zext)
4f3446bb
DB
948{
949 struct bpf_insn *to = to_buff;
b7552e1b 950 u32 imm_rnd = get_random_int();
4f3446bb
DB
951 s16 off;
952
953 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
954 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
955
9b73bfdd
DB
956 /* Constraints on AX register:
957 *
958 * AX register is inaccessible from user space. It is mapped in
959 * all JITs, and used here for constant blinding rewrites. It is
960 * typically "stateless" meaning its contents are only valid within
961 * the executed instruction, but not across several instructions.
962 * There are a few exceptions however which are further detailed
963 * below.
964 *
965 * Constant blinding is only used by JITs, not in the interpreter.
966 * The interpreter uses AX in some occasions as a local temporary
967 * register e.g. in DIV or MOD instructions.
968 *
969 * In restricted circumstances, the verifier can also use the AX
970 * register for rewrites as long as they do not interfere with
971 * the above cases!
972 */
973 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
974 goto out;
975
4f3446bb
DB
976 if (from->imm == 0 &&
977 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
978 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
979 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
980 goto out;
981 }
982
983 switch (from->code) {
984 case BPF_ALU | BPF_ADD | BPF_K:
985 case BPF_ALU | BPF_SUB | BPF_K:
986 case BPF_ALU | BPF_AND | BPF_K:
987 case BPF_ALU | BPF_OR | BPF_K:
988 case BPF_ALU | BPF_XOR | BPF_K:
989 case BPF_ALU | BPF_MUL | BPF_K:
990 case BPF_ALU | BPF_MOV | BPF_K:
991 case BPF_ALU | BPF_DIV | BPF_K:
992 case BPF_ALU | BPF_MOD | BPF_K:
993 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
994 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
995 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
996 break;
997
998 case BPF_ALU64 | BPF_ADD | BPF_K:
999 case BPF_ALU64 | BPF_SUB | BPF_K:
1000 case BPF_ALU64 | BPF_AND | BPF_K:
1001 case BPF_ALU64 | BPF_OR | BPF_K:
1002 case BPF_ALU64 | BPF_XOR | BPF_K:
1003 case BPF_ALU64 | BPF_MUL | BPF_K:
1004 case BPF_ALU64 | BPF_MOV | BPF_K:
1005 case BPF_ALU64 | BPF_DIV | BPF_K:
1006 case BPF_ALU64 | BPF_MOD | BPF_K:
1007 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1008 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1009 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1010 break;
1011
1012 case BPF_JMP | BPF_JEQ | BPF_K:
1013 case BPF_JMP | BPF_JNE | BPF_K:
1014 case BPF_JMP | BPF_JGT | BPF_K:
92b31a9a 1015 case BPF_JMP | BPF_JLT | BPF_K:
4f3446bb 1016 case BPF_JMP | BPF_JGE | BPF_K:
92b31a9a 1017 case BPF_JMP | BPF_JLE | BPF_K:
4f3446bb 1018 case BPF_JMP | BPF_JSGT | BPF_K:
92b31a9a 1019 case BPF_JMP | BPF_JSLT | BPF_K:
4f3446bb 1020 case BPF_JMP | BPF_JSGE | BPF_K:
92b31a9a 1021 case BPF_JMP | BPF_JSLE | BPF_K:
4f3446bb
DB
1022 case BPF_JMP | BPF_JSET | BPF_K:
1023 /* Accommodate for extra offset in case of a backjump. */
1024 off = from->off;
1025 if (off < 0)
1026 off -= 2;
1027 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1028 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1029 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1030 break;
1031
a7b76c88
JW
1032 case BPF_JMP32 | BPF_JEQ | BPF_K:
1033 case BPF_JMP32 | BPF_JNE | BPF_K:
1034 case BPF_JMP32 | BPF_JGT | BPF_K:
1035 case BPF_JMP32 | BPF_JLT | BPF_K:
1036 case BPF_JMP32 | BPF_JGE | BPF_K:
1037 case BPF_JMP32 | BPF_JLE | BPF_K:
1038 case BPF_JMP32 | BPF_JSGT | BPF_K:
1039 case BPF_JMP32 | BPF_JSLT | BPF_K:
1040 case BPF_JMP32 | BPF_JSGE | BPF_K:
1041 case BPF_JMP32 | BPF_JSLE | BPF_K:
1042 case BPF_JMP32 | BPF_JSET | BPF_K:
1043 /* Accommodate for extra offset in case of a backjump. */
1044 off = from->off;
1045 if (off < 0)
1046 off -= 2;
1047 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1048 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1049 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1050 off);
1051 break;
1052
4f3446bb
DB
1053 case BPF_LD | BPF_IMM | BPF_DW:
1054 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1055 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1056 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1057 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1058 break;
1059 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1060 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1061 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
ede7c460
NR
1062 if (emit_zext)
1063 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
4f3446bb
DB
1064 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1065 break;
1066
1067 case BPF_ST | BPF_MEM | BPF_DW:
1068 case BPF_ST | BPF_MEM | BPF_W:
1069 case BPF_ST | BPF_MEM | BPF_H:
1070 case BPF_ST | BPF_MEM | BPF_B:
1071 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1072 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1073 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1074 break;
1075 }
1076out:
1077 return to - to_buff;
1078}
1079
1080static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1081 gfp_t gfp_extra_flags)
1082{
19809c2d 1083 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
4f3446bb
DB
1084 struct bpf_prog *fp;
1085
1086 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1087 if (fp != NULL) {
4f3446bb
DB
1088 /* aux->prog still points to the fp_other one, so
1089 * when promoting the clone to the real program,
1090 * this still needs to be adapted.
1091 */
1092 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1093 }
1094
1095 return fp;
1096}
1097
1098static void bpf_prog_clone_free(struct bpf_prog *fp)
1099{
1100 /* aux was stolen by the other clone, so we cannot free
1101 * it from this path! It will be freed eventually by the
1102 * other program on release.
1103 *
1104 * At this point, we don't need a deferred release since
1105 * clone is guaranteed to not be locked.
1106 */
1107 fp->aux = NULL;
1108 __bpf_prog_free(fp);
1109}
1110
1111void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1112{
1113 /* We have to repoint aux->prog to self, as we don't
1114 * know whether fp here is the clone or the original.
1115 */
1116 fp->aux->prog = fp;
1117 bpf_prog_clone_free(fp_other);
1118}
1119
1120struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1121{
1122 struct bpf_insn insn_buff[16], aux[2];
1123 struct bpf_prog *clone, *tmp;
1124 int insn_delta, insn_cnt;
1125 struct bpf_insn *insn;
1126 int i, rewritten;
1127
1c2a088a 1128 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
4f3446bb
DB
1129 return prog;
1130
1131 clone = bpf_prog_clone_create(prog, GFP_USER);
1132 if (!clone)
1133 return ERR_PTR(-ENOMEM);
1134
1135 insn_cnt = clone->len;
1136 insn = clone->insnsi;
1137
1138 for (i = 0; i < insn_cnt; i++, insn++) {
1139 /* We temporarily need to hold the original ld64 insn
1140 * so that we can still access the first part in the
1141 * second blinding run.
1142 */
1143 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1144 insn[1].code == 0)
1145 memcpy(aux, insn, sizeof(aux));
1146
ede7c460
NR
1147 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1148 clone->aux->verifier_zext);
4f3446bb
DB
1149 if (!rewritten)
1150 continue;
1151
1152 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
4f73379e 1153 if (IS_ERR(tmp)) {
4f3446bb
DB
1154 /* Patching may have repointed aux->prog during
1155 * realloc from the original one, so we need to
1156 * fix it up here on error.
1157 */
1158 bpf_jit_prog_release_other(prog, clone);
4f73379e 1159 return tmp;
4f3446bb
DB
1160 }
1161
1162 clone = tmp;
1163 insn_delta = rewritten - 1;
1164
1165 /* Walk new program and skip insns we just inserted. */
1166 insn = clone->insnsi + i + insn_delta;
1167 insn_cnt += insn_delta;
1168 i += insn_delta;
1169 }
1170
1c2a088a 1171 clone->blinded = 1;
4f3446bb
DB
1172 return clone;
1173}
b954d834 1174#endif /* CONFIG_BPF_JIT */
738cbe72 1175
f5bffecd
AS
1176/* Base function for offset calculation. Needs to go into .text section,
1177 * therefore keeping it non-static as well; will also be used by JITs
7105e828
DB
1178 * anyway later on, so do not let the compiler omit it. This also needs
1179 * to go into kallsyms for correlation from e.g. bpftool, so naming
1180 * must not change.
f5bffecd
AS
1181 */
1182noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1183{
1184 return 0;
1185}
4d9c5c53 1186EXPORT_SYMBOL_GPL(__bpf_call_base);
f5bffecd 1187
5e581dad
DB
1188/* All UAPI available opcodes. */
1189#define BPF_INSN_MAP(INSN_2, INSN_3) \
1190 /* 32 bit ALU operations. */ \
1191 /* Register based. */ \
2dc6b100
JW
1192 INSN_3(ALU, ADD, X), \
1193 INSN_3(ALU, SUB, X), \
1194 INSN_3(ALU, AND, X), \
1195 INSN_3(ALU, OR, X), \
1196 INSN_3(ALU, LSH, X), \
1197 INSN_3(ALU, RSH, X), \
1198 INSN_3(ALU, XOR, X), \
1199 INSN_3(ALU, MUL, X), \
1200 INSN_3(ALU, MOV, X), \
1201 INSN_3(ALU, ARSH, X), \
1202 INSN_3(ALU, DIV, X), \
1203 INSN_3(ALU, MOD, X), \
5e581dad
DB
1204 INSN_2(ALU, NEG), \
1205 INSN_3(ALU, END, TO_BE), \
1206 INSN_3(ALU, END, TO_LE), \
1207 /* Immediate based. */ \
2dc6b100
JW
1208 INSN_3(ALU, ADD, K), \
1209 INSN_3(ALU, SUB, K), \
1210 INSN_3(ALU, AND, K), \
1211 INSN_3(ALU, OR, K), \
1212 INSN_3(ALU, LSH, K), \
1213 INSN_3(ALU, RSH, K), \
1214 INSN_3(ALU, XOR, K), \
1215 INSN_3(ALU, MUL, K), \
1216 INSN_3(ALU, MOV, K), \
1217 INSN_3(ALU, ARSH, K), \
1218 INSN_3(ALU, DIV, K), \
1219 INSN_3(ALU, MOD, K), \
5e581dad
DB
1220 /* 64 bit ALU operations. */ \
1221 /* Register based. */ \
1222 INSN_3(ALU64, ADD, X), \
1223 INSN_3(ALU64, SUB, X), \
1224 INSN_3(ALU64, AND, X), \
1225 INSN_3(ALU64, OR, X), \
1226 INSN_3(ALU64, LSH, X), \
1227 INSN_3(ALU64, RSH, X), \
1228 INSN_3(ALU64, XOR, X), \
1229 INSN_3(ALU64, MUL, X), \
1230 INSN_3(ALU64, MOV, X), \
1231 INSN_3(ALU64, ARSH, X), \
1232 INSN_3(ALU64, DIV, X), \
1233 INSN_3(ALU64, MOD, X), \
1234 INSN_2(ALU64, NEG), \
1235 /* Immediate based. */ \
1236 INSN_3(ALU64, ADD, K), \
1237 INSN_3(ALU64, SUB, K), \
1238 INSN_3(ALU64, AND, K), \
1239 INSN_3(ALU64, OR, K), \
1240 INSN_3(ALU64, LSH, K), \
1241 INSN_3(ALU64, RSH, K), \
1242 INSN_3(ALU64, XOR, K), \
1243 INSN_3(ALU64, MUL, K), \
1244 INSN_3(ALU64, MOV, K), \
1245 INSN_3(ALU64, ARSH, K), \
1246 INSN_3(ALU64, DIV, K), \
1247 INSN_3(ALU64, MOD, K), \
1248 /* Call instruction. */ \
1249 INSN_2(JMP, CALL), \
1250 /* Exit instruction. */ \
1251 INSN_2(JMP, EXIT), \
503a8865
JW
1252 /* 32-bit Jump instructions. */ \
1253 /* Register based. */ \
1254 INSN_3(JMP32, JEQ, X), \
1255 INSN_3(JMP32, JNE, X), \
1256 INSN_3(JMP32, JGT, X), \
1257 INSN_3(JMP32, JLT, X), \
1258 INSN_3(JMP32, JGE, X), \
1259 INSN_3(JMP32, JLE, X), \
1260 INSN_3(JMP32, JSGT, X), \
1261 INSN_3(JMP32, JSLT, X), \
1262 INSN_3(JMP32, JSGE, X), \
1263 INSN_3(JMP32, JSLE, X), \
1264 INSN_3(JMP32, JSET, X), \
1265 /* Immediate based. */ \
1266 INSN_3(JMP32, JEQ, K), \
1267 INSN_3(JMP32, JNE, K), \
1268 INSN_3(JMP32, JGT, K), \
1269 INSN_3(JMP32, JLT, K), \
1270 INSN_3(JMP32, JGE, K), \
1271 INSN_3(JMP32, JLE, K), \
1272 INSN_3(JMP32, JSGT, K), \
1273 INSN_3(JMP32, JSLT, K), \
1274 INSN_3(JMP32, JSGE, K), \
1275 INSN_3(JMP32, JSLE, K), \
1276 INSN_3(JMP32, JSET, K), \
5e581dad
DB
1277 /* Jump instructions. */ \
1278 /* Register based. */ \
1279 INSN_3(JMP, JEQ, X), \
1280 INSN_3(JMP, JNE, X), \
1281 INSN_3(JMP, JGT, X), \
1282 INSN_3(JMP, JLT, X), \
1283 INSN_3(JMP, JGE, X), \
1284 INSN_3(JMP, JLE, X), \
1285 INSN_3(JMP, JSGT, X), \
1286 INSN_3(JMP, JSLT, X), \
1287 INSN_3(JMP, JSGE, X), \
1288 INSN_3(JMP, JSLE, X), \
1289 INSN_3(JMP, JSET, X), \
1290 /* Immediate based. */ \
1291 INSN_3(JMP, JEQ, K), \
1292 INSN_3(JMP, JNE, K), \
1293 INSN_3(JMP, JGT, K), \
1294 INSN_3(JMP, JLT, K), \
1295 INSN_3(JMP, JGE, K), \
1296 INSN_3(JMP, JLE, K), \
1297 INSN_3(JMP, JSGT, K), \
1298 INSN_3(JMP, JSLT, K), \
1299 INSN_3(JMP, JSGE, K), \
1300 INSN_3(JMP, JSLE, K), \
1301 INSN_3(JMP, JSET, K), \
1302 INSN_2(JMP, JA), \
1303 /* Store instructions. */ \
1304 /* Register based. */ \
1305 INSN_3(STX, MEM, B), \
1306 INSN_3(STX, MEM, H), \
1307 INSN_3(STX, MEM, W), \
1308 INSN_3(STX, MEM, DW), \
1309 INSN_3(STX, XADD, W), \
1310 INSN_3(STX, XADD, DW), \
1311 /* Immediate based. */ \
1312 INSN_3(ST, MEM, B), \
1313 INSN_3(ST, MEM, H), \
1314 INSN_3(ST, MEM, W), \
1315 INSN_3(ST, MEM, DW), \
1316 /* Load instructions. */ \
1317 /* Register based. */ \
1318 INSN_3(LDX, MEM, B), \
1319 INSN_3(LDX, MEM, H), \
1320 INSN_3(LDX, MEM, W), \
1321 INSN_3(LDX, MEM, DW), \
1322 /* Immediate based. */ \
e0cea7ce 1323 INSN_3(LD, IMM, DW)
5e581dad
DB
1324
1325bool bpf_opcode_in_insntable(u8 code)
1326{
1327#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1328#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1329 static const bool public_insntable[256] = {
1330 [0 ... 255] = false,
1331 /* Now overwrite non-defaults ... */
1332 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
e0cea7ce
DB
1333 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1334 [BPF_LD | BPF_ABS | BPF_B] = true,
1335 [BPF_LD | BPF_ABS | BPF_H] = true,
1336 [BPF_LD | BPF_ABS | BPF_W] = true,
1337 [BPF_LD | BPF_IND | BPF_B] = true,
1338 [BPF_LD | BPF_IND | BPF_H] = true,
1339 [BPF_LD | BPF_IND | BPF_W] = true,
5e581dad
DB
1340 };
1341#undef BPF_INSN_3_TBL
1342#undef BPF_INSN_2_TBL
1343 return public_insntable[code];
1344}
1345
290af866 1346#ifndef CONFIG_BPF_JIT_ALWAYS_ON
6e07a634 1347u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
2a02759e
AS
1348{
1349 memset(dst, 0, size);
1350 return -EFAULT;
1351}
6e07a634 1352
f5bffecd 1353/**
7ae457c1 1354 * __bpf_prog_run - run eBPF program on a given context
de1da68d 1355 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
7ae457c1 1356 * @insn: is the array of eBPF instructions
de1da68d 1357 * @stack: is the eBPF storage stack
f5bffecd 1358 *
7ae457c1 1359 * Decode and execute eBPF instructions.
f5bffecd 1360 */
3193c083 1361static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
f5bffecd 1362{
5e581dad
DB
1363#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1364#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
e55a7325 1365 static const void * const jumptable[256] __annotate_jump_table = {
f5bffecd
AS
1366 [0 ... 255] = &&default_label,
1367 /* Now overwrite non-defaults ... */
5e581dad
DB
1368 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1369 /* Non-UAPI available opcodes. */
1ea47e01 1370 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
71189fa9 1371 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
2a02759e
AS
1372 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1373 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1374 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1375 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
f5bffecd 1376 };
5e581dad
DB
1377#undef BPF_INSN_3_LBL
1378#undef BPF_INSN_2_LBL
04fd61ab 1379 u32 tail_call_cnt = 0;
f5bffecd
AS
1380
1381#define CONT ({ insn++; goto select_insn; })
1382#define CONT_JMP ({ insn++; goto select_insn; })
1383
f5bffecd
AS
1384select_insn:
1385 goto *jumptable[insn->code];
1386
1387 /* ALU */
1388#define ALU(OPCODE, OP) \
1389 ALU64_##OPCODE##_X: \
1390 DST = DST OP SRC; \
1391 CONT; \
1392 ALU_##OPCODE##_X: \
1393 DST = (u32) DST OP (u32) SRC; \
1394 CONT; \
1395 ALU64_##OPCODE##_K: \
1396 DST = DST OP IMM; \
1397 CONT; \
1398 ALU_##OPCODE##_K: \
1399 DST = (u32) DST OP (u32) IMM; \
1400 CONT;
1401
1402 ALU(ADD, +)
1403 ALU(SUB, -)
1404 ALU(AND, &)
1405 ALU(OR, |)
1406 ALU(LSH, <<)
1407 ALU(RSH, >>)
1408 ALU(XOR, ^)
1409 ALU(MUL, *)
1410#undef ALU
1411 ALU_NEG:
1412 DST = (u32) -DST;
1413 CONT;
1414 ALU64_NEG:
1415 DST = -DST;
1416 CONT;
1417 ALU_MOV_X:
1418 DST = (u32) SRC;
1419 CONT;
1420 ALU_MOV_K:
1421 DST = (u32) IMM;
1422 CONT;
1423 ALU64_MOV_X:
1424 DST = SRC;
1425 CONT;
1426 ALU64_MOV_K:
1427 DST = IMM;
1428 CONT;
02ab695b
AS
1429 LD_IMM_DW:
1430 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1431 insn++;
1432 CONT;
2dc6b100 1433 ALU_ARSH_X:
75672dda 1434 DST = (u64) (u32) (((s32) DST) >> SRC);
2dc6b100
JW
1435 CONT;
1436 ALU_ARSH_K:
75672dda 1437 DST = (u64) (u32) (((s32) DST) >> IMM);
2dc6b100 1438 CONT;
f5bffecd
AS
1439 ALU64_ARSH_X:
1440 (*(s64 *) &DST) >>= SRC;
1441 CONT;
1442 ALU64_ARSH_K:
1443 (*(s64 *) &DST) >>= IMM;
1444 CONT;
1445 ALU64_MOD_X:
144cd91c
DB
1446 div64_u64_rem(DST, SRC, &AX);
1447 DST = AX;
f5bffecd
AS
1448 CONT;
1449 ALU_MOD_X:
144cd91c
DB
1450 AX = (u32) DST;
1451 DST = do_div(AX, (u32) SRC);
f5bffecd
AS
1452 CONT;
1453 ALU64_MOD_K:
144cd91c
DB
1454 div64_u64_rem(DST, IMM, &AX);
1455 DST = AX;
f5bffecd
AS
1456 CONT;
1457 ALU_MOD_K:
144cd91c
DB
1458 AX = (u32) DST;
1459 DST = do_div(AX, (u32) IMM);
f5bffecd
AS
1460 CONT;
1461 ALU64_DIV_X:
876a7ae6 1462 DST = div64_u64(DST, SRC);
f5bffecd
AS
1463 CONT;
1464 ALU_DIV_X:
144cd91c
DB
1465 AX = (u32) DST;
1466 do_div(AX, (u32) SRC);
1467 DST = (u32) AX;
f5bffecd
AS
1468 CONT;
1469 ALU64_DIV_K:
876a7ae6 1470 DST = div64_u64(DST, IMM);
f5bffecd
AS
1471 CONT;
1472 ALU_DIV_K:
144cd91c
DB
1473 AX = (u32) DST;
1474 do_div(AX, (u32) IMM);
1475 DST = (u32) AX;
f5bffecd
AS
1476 CONT;
1477 ALU_END_TO_BE:
1478 switch (IMM) {
1479 case 16:
1480 DST = (__force u16) cpu_to_be16(DST);
1481 break;
1482 case 32:
1483 DST = (__force u32) cpu_to_be32(DST);
1484 break;
1485 case 64:
1486 DST = (__force u64) cpu_to_be64(DST);
1487 break;
1488 }
1489 CONT;
1490 ALU_END_TO_LE:
1491 switch (IMM) {
1492 case 16:
1493 DST = (__force u16) cpu_to_le16(DST);
1494 break;
1495 case 32:
1496 DST = (__force u32) cpu_to_le32(DST);
1497 break;
1498 case 64:
1499 DST = (__force u64) cpu_to_le64(DST);
1500 break;
1501 }
1502 CONT;
1503
1504 /* CALL */
1505 JMP_CALL:
1506 /* Function call scratches BPF_R1-BPF_R5 registers,
1507 * preserves BPF_R6-BPF_R9, and stores return value
1508 * into BPF_R0.
1509 */
1510 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1511 BPF_R4, BPF_R5);
1512 CONT;
1513
1ea47e01
AS
1514 JMP_CALL_ARGS:
1515 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1516 BPF_R3, BPF_R4,
1517 BPF_R5,
1518 insn + insn->off + 1);
1519 CONT;
1520
04fd61ab
AS
1521 JMP_TAIL_CALL: {
1522 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1523 struct bpf_array *array = container_of(map, struct bpf_array, map);
1524 struct bpf_prog *prog;
90caccdd 1525 u32 index = BPF_R3;
04fd61ab
AS
1526
1527 if (unlikely(index >= array->map.max_entries))
1528 goto out;
04fd61ab
AS
1529 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1530 goto out;
1531
1532 tail_call_cnt++;
1533
2a36f0b9 1534 prog = READ_ONCE(array->ptrs[index]);
1ca1cc98 1535 if (!prog)
04fd61ab
AS
1536 goto out;
1537
c4675f93
DB
1538 /* ARG1 at this point is guaranteed to point to CTX from
1539 * the verifier side due to the fact that the tail call is
1540 * handeled like a helper, that is, bpf_tail_call_proto,
1541 * where arg1_type is ARG_PTR_TO_CTX.
1542 */
04fd61ab
AS
1543 insn = prog->insnsi;
1544 goto select_insn;
1545out:
1546 CONT;
1547 }
f5bffecd
AS
1548 JMP_JA:
1549 insn += insn->off;
1550 CONT;
f5bffecd
AS
1551 JMP_EXIT:
1552 return BPF_R0;
503a8865
JW
1553 /* JMP */
1554#define COND_JMP(SIGN, OPCODE, CMP_OP) \
1555 JMP_##OPCODE##_X: \
1556 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1557 insn += insn->off; \
1558 CONT_JMP; \
1559 } \
1560 CONT; \
1561 JMP32_##OPCODE##_X: \
1562 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1563 insn += insn->off; \
1564 CONT_JMP; \
1565 } \
1566 CONT; \
1567 JMP_##OPCODE##_K: \
1568 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1569 insn += insn->off; \
1570 CONT_JMP; \
1571 } \
1572 CONT; \
1573 JMP32_##OPCODE##_K: \
1574 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1575 insn += insn->off; \
1576 CONT_JMP; \
1577 } \
1578 CONT;
1579 COND_JMP(u, JEQ, ==)
1580 COND_JMP(u, JNE, !=)
1581 COND_JMP(u, JGT, >)
1582 COND_JMP(u, JLT, <)
1583 COND_JMP(u, JGE, >=)
1584 COND_JMP(u, JLE, <=)
1585 COND_JMP(u, JSET, &)
1586 COND_JMP(s, JSGT, >)
1587 COND_JMP(s, JSLT, <)
1588 COND_JMP(s, JSGE, >=)
1589 COND_JMP(s, JSLE, <=)
1590#undef COND_JMP
f5bffecd
AS
1591 /* STX and ST and LDX*/
1592#define LDST(SIZEOP, SIZE) \
1593 STX_MEM_##SIZEOP: \
1594 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1595 CONT; \
1596 ST_MEM_##SIZEOP: \
1597 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1598 CONT; \
1599 LDX_MEM_##SIZEOP: \
1600 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1601 CONT;
1602
1603 LDST(B, u8)
1604 LDST(H, u16)
1605 LDST(W, u32)
1606 LDST(DW, u64)
1607#undef LDST
6e07a634
DB
1608#define LDX_PROBE(SIZEOP, SIZE) \
1609 LDX_PROBE_MEM_##SIZEOP: \
85d31dd0 1610 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
2a02759e
AS
1611 CONT;
1612 LDX_PROBE(B, 1)
1613 LDX_PROBE(H, 2)
1614 LDX_PROBE(W, 4)
1615 LDX_PROBE(DW, 8)
1616#undef LDX_PROBE
1617
f5bffecd
AS
1618 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1619 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1620 (DST + insn->off));
1621 CONT;
1622 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1623 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1624 (DST + insn->off));
1625 CONT;
f5bffecd
AS
1626
1627 default_label:
5e581dad
DB
1628 /* If we ever reach this, we have a bug somewhere. Die hard here
1629 * instead of just returning 0; we could be somewhere in a subprog,
1630 * so execution could continue otherwise which we do /not/ want.
1631 *
1632 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1633 */
1634 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1635 BUG_ON(1);
f5bffecd
AS
1636 return 0;
1637}
f696b8f4 1638
b870aa90
AS
1639#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1640#define DEFINE_BPF_PROG_RUN(stack_size) \
1641static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1642{ \
1643 u64 stack[stack_size / sizeof(u64)]; \
144cd91c 1644 u64 regs[MAX_BPF_EXT_REG]; \
b870aa90
AS
1645\
1646 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1647 ARG1 = (u64) (unsigned long) ctx; \
1648 return ___bpf_prog_run(regs, insn, stack); \
f696b8f4 1649}
f5bffecd 1650
1ea47e01
AS
1651#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1652#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1653static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1654 const struct bpf_insn *insn) \
1655{ \
1656 u64 stack[stack_size / sizeof(u64)]; \
144cd91c 1657 u64 regs[MAX_BPF_EXT_REG]; \
1ea47e01
AS
1658\
1659 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1660 BPF_R1 = r1; \
1661 BPF_R2 = r2; \
1662 BPF_R3 = r3; \
1663 BPF_R4 = r4; \
1664 BPF_R5 = r5; \
1665 return ___bpf_prog_run(regs, insn, stack); \
1666}
1667
b870aa90
AS
1668#define EVAL1(FN, X) FN(X)
1669#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1670#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1671#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1672#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1673#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1674
1675EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1676EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1677EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1678
1ea47e01
AS
1679EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1680EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1681EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1682
b870aa90
AS
1683#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1684
1685static unsigned int (*interpreters[])(const void *ctx,
1686 const struct bpf_insn *insn) = {
1687EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1688EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1689EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1690};
1ea47e01
AS
1691#undef PROG_NAME_LIST
1692#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1693static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1694 const struct bpf_insn *insn) = {
1695EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1696EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1697EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1698};
1699#undef PROG_NAME_LIST
1700
1701void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1702{
1703 stack_depth = max_t(u32, stack_depth, 1);
1704 insn->off = (s16) insn->imm;
1705 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1706 __bpf_call_base_args;
1707 insn->code = BPF_JMP | BPF_CALL_ARGS;
1708}
b870aa90 1709
290af866 1710#else
fa9dd599
DB
1711static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1712 const struct bpf_insn *insn)
290af866 1713{
fa9dd599
DB
1714 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1715 * is not working properly, so warn about it!
1716 */
1717 WARN_ON_ONCE(1);
290af866
AS
1718 return 0;
1719}
1720#endif
1721
3324b584
DB
1722bool bpf_prog_array_compatible(struct bpf_array *array,
1723 const struct bpf_prog *fp)
04fd61ab 1724{
9802d865
JB
1725 if (fp->kprobe_override)
1726 return false;
1727
2beee5f5 1728 if (!array->aux->type) {
3324b584
DB
1729 /* There's no owner yet where we could check for
1730 * compatibility.
1731 */
2beee5f5
DB
1732 array->aux->type = fp->type;
1733 array->aux->jited = fp->jited;
3324b584 1734 return true;
04fd61ab 1735 }
3324b584 1736
2beee5f5
DB
1737 return array->aux->type == fp->type &&
1738 array->aux->jited == fp->jited;
04fd61ab
AS
1739}
1740
3324b584 1741static int bpf_check_tail_call(const struct bpf_prog *fp)
04fd61ab
AS
1742{
1743 struct bpf_prog_aux *aux = fp->aux;
1744 int i;
1745
1746 for (i = 0; i < aux->used_map_cnt; i++) {
3324b584 1747 struct bpf_map *map = aux->used_maps[i];
04fd61ab 1748 struct bpf_array *array;
04fd61ab 1749
04fd61ab
AS
1750 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1751 continue;
3324b584 1752
04fd61ab
AS
1753 array = container_of(map, struct bpf_array, map);
1754 if (!bpf_prog_array_compatible(array, fp))
1755 return -EINVAL;
1756 }
1757
1758 return 0;
1759}
1760
9facc336
DB
1761static void bpf_prog_select_func(struct bpf_prog *fp)
1762{
1763#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1764 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1765
1766 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1767#else
1768 fp->bpf_func = __bpf_prog_ret0_warn;
1769#endif
1770}
1771
f5bffecd 1772/**
3324b584 1773 * bpf_prog_select_runtime - select exec runtime for BPF program
7ae457c1 1774 * @fp: bpf_prog populated with internal BPF program
d1c55ab5 1775 * @err: pointer to error variable
f5bffecd 1776 *
3324b584
DB
1777 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1778 * The BPF program will be executed via BPF_PROG_RUN() macro.
f5bffecd 1779 */
d1c55ab5 1780struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
f5bffecd 1781{
9facc336
DB
1782 /* In case of BPF to BPF calls, verifier did all the prep
1783 * work with regards to JITing, etc.
1784 */
1785 if (fp->bpf_func)
1786 goto finalize;
8007e40a 1787
9facc336 1788 bpf_prog_select_func(fp);
f5bffecd 1789
d1c55ab5
DB
1790 /* eBPF JITs can rewrite the program in case constant
1791 * blinding is active. However, in case of error during
1792 * blinding, bpf_int_jit_compile() must always return a
1793 * valid program, which in this case would simply not
1794 * be JITed, but falls back to the interpreter.
1795 */
ab3f0063 1796 if (!bpf_prog_is_dev_bound(fp->aux)) {
c454a46b
MKL
1797 *err = bpf_prog_alloc_jited_linfo(fp);
1798 if (*err)
1799 return fp;
1800
ab3f0063 1801 fp = bpf_int_jit_compile(fp);
290af866 1802 if (!fp->jited) {
c454a46b
MKL
1803 bpf_prog_free_jited_linfo(fp);
1804#ifdef CONFIG_BPF_JIT_ALWAYS_ON
290af866
AS
1805 *err = -ENOTSUPP;
1806 return fp;
290af866 1807#endif
c454a46b
MKL
1808 } else {
1809 bpf_prog_free_unused_jited_linfo(fp);
1810 }
ab3f0063
JK
1811 } else {
1812 *err = bpf_prog_offload_compile(fp);
1813 if (*err)
1814 return fp;
1815 }
9facc336
DB
1816
1817finalize:
60a3b225 1818 bpf_prog_lock_ro(fp);
04fd61ab 1819
3324b584
DB
1820 /* The tail call compatibility check can only be done at
1821 * this late stage as we need to determine, if we deal
1822 * with JITed or non JITed program concatenations and not
1823 * all eBPF JITs might immediately support all features.
1824 */
d1c55ab5 1825 *err = bpf_check_tail_call(fp);
85782e03 1826
d1c55ab5 1827 return fp;
f5bffecd 1828}
7ae457c1 1829EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
f5bffecd 1830
e87c6bc3
YS
1831static unsigned int __bpf_prog_ret1(const void *ctx,
1832 const struct bpf_insn *insn)
1833{
1834 return 1;
1835}
1836
1837static struct bpf_prog_dummy {
1838 struct bpf_prog prog;
1839} dummy_bpf_prog = {
1840 .prog = {
1841 .bpf_func = __bpf_prog_ret1,
1842 },
1843};
1844
324bda9e
AS
1845/* to avoid allocating empty bpf_prog_array for cgroups that
1846 * don't have bpf program attached use one global 'empty_prog_array'
1847 * It will not be modified the caller of bpf_prog_array_alloc()
1848 * (since caller requested prog_cnt == 0)
1849 * that pointer should be 'freed' by bpf_prog_array_free()
1850 */
1851static struct {
1852 struct bpf_prog_array hdr;
1853 struct bpf_prog *null_prog;
1854} empty_prog_array = {
1855 .null_prog = NULL,
1856};
1857
d29ab6e1 1858struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
324bda9e
AS
1859{
1860 if (prog_cnt)
1861 return kzalloc(sizeof(struct bpf_prog_array) +
394e40a2
RG
1862 sizeof(struct bpf_prog_array_item) *
1863 (prog_cnt + 1),
324bda9e
AS
1864 flags);
1865
1866 return &empty_prog_array.hdr;
1867}
1868
54e9c9d4 1869void bpf_prog_array_free(struct bpf_prog_array *progs)
324bda9e 1870{
54e9c9d4 1871 if (!progs || progs == &empty_prog_array.hdr)
324bda9e
AS
1872 return;
1873 kfree_rcu(progs, rcu);
1874}
1875
54e9c9d4 1876int bpf_prog_array_length(struct bpf_prog_array *array)
468e2f64 1877{
394e40a2 1878 struct bpf_prog_array_item *item;
468e2f64
AS
1879 u32 cnt = 0;
1880
54e9c9d4 1881 for (item = array->items; item->prog; item++)
394e40a2 1882 if (item->prog != &dummy_bpf_prog.prog)
c8c088ba 1883 cnt++;
468e2f64
AS
1884 return cnt;
1885}
1886
0d01da6a
SF
1887bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1888{
1889 struct bpf_prog_array_item *item;
1890
1891 for (item = array->items; item->prog; item++)
1892 if (item->prog != &dummy_bpf_prog.prog)
1893 return false;
1894 return true;
1895}
394e40a2 1896
54e9c9d4 1897static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
3a38bb98
YS
1898 u32 *prog_ids,
1899 u32 request_cnt)
1900{
394e40a2 1901 struct bpf_prog_array_item *item;
3a38bb98
YS
1902 int i = 0;
1903
54e9c9d4 1904 for (item = array->items; item->prog; item++) {
394e40a2 1905 if (item->prog == &dummy_bpf_prog.prog)
3a38bb98 1906 continue;
394e40a2 1907 prog_ids[i] = item->prog->aux->id;
3a38bb98 1908 if (++i == request_cnt) {
394e40a2 1909 item++;
3a38bb98
YS
1910 break;
1911 }
1912 }
1913
394e40a2 1914 return !!(item->prog);
3a38bb98
YS
1915}
1916
54e9c9d4 1917int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
468e2f64
AS
1918 __u32 __user *prog_ids, u32 cnt)
1919{
0911287c 1920 unsigned long err = 0;
0911287c 1921 bool nospc;
3a38bb98 1922 u32 *ids;
0911287c
AS
1923
1924 /* users of this function are doing:
1925 * cnt = bpf_prog_array_length();
1926 * if (cnt > 0)
1927 * bpf_prog_array_copy_to_user(..., cnt);
54e9c9d4 1928 * so below kcalloc doesn't need extra cnt > 0 check.
0911287c 1929 */
9c481b90 1930 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
0911287c
AS
1931 if (!ids)
1932 return -ENOMEM;
394e40a2 1933 nospc = bpf_prog_array_copy_core(array, ids, cnt);
0911287c
AS
1934 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1935 kfree(ids);
1936 if (err)
1937 return -EFAULT;
1938 if (nospc)
468e2f64
AS
1939 return -ENOSPC;
1940 return 0;
1941}
1942
54e9c9d4 1943void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
e87c6bc3
YS
1944 struct bpf_prog *old_prog)
1945{
54e9c9d4 1946 struct bpf_prog_array_item *item;
e87c6bc3 1947
54e9c9d4 1948 for (item = array->items; item->prog; item++)
394e40a2
RG
1949 if (item->prog == old_prog) {
1950 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
e87c6bc3
YS
1951 break;
1952 }
1953}
1954
54e9c9d4 1955int bpf_prog_array_copy(struct bpf_prog_array *old_array,
e87c6bc3
YS
1956 struct bpf_prog *exclude_prog,
1957 struct bpf_prog *include_prog,
1958 struct bpf_prog_array **new_array)
1959{
1960 int new_prog_cnt, carry_prog_cnt = 0;
394e40a2 1961 struct bpf_prog_array_item *existing;
e87c6bc3 1962 struct bpf_prog_array *array;
170a7e3e 1963 bool found_exclude = false;
e87c6bc3
YS
1964 int new_prog_idx = 0;
1965
1966 /* Figure out how many existing progs we need to carry over to
1967 * the new array.
1968 */
1969 if (old_array) {
394e40a2
RG
1970 existing = old_array->items;
1971 for (; existing->prog; existing++) {
1972 if (existing->prog == exclude_prog) {
170a7e3e
SY
1973 found_exclude = true;
1974 continue;
1975 }
394e40a2 1976 if (existing->prog != &dummy_bpf_prog.prog)
e87c6bc3 1977 carry_prog_cnt++;
394e40a2 1978 if (existing->prog == include_prog)
e87c6bc3
YS
1979 return -EEXIST;
1980 }
1981 }
1982
170a7e3e
SY
1983 if (exclude_prog && !found_exclude)
1984 return -ENOENT;
1985
e87c6bc3
YS
1986 /* How many progs (not NULL) will be in the new array? */
1987 new_prog_cnt = carry_prog_cnt;
1988 if (include_prog)
1989 new_prog_cnt += 1;
1990
1991 /* Do we have any prog (not NULL) in the new array? */
1992 if (!new_prog_cnt) {
1993 *new_array = NULL;
1994 return 0;
1995 }
1996
1997 /* +1 as the end of prog_array is marked with NULL */
1998 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1999 if (!array)
2000 return -ENOMEM;
2001
2002 /* Fill in the new prog array */
2003 if (carry_prog_cnt) {
394e40a2
RG
2004 existing = old_array->items;
2005 for (; existing->prog; existing++)
2006 if (existing->prog != exclude_prog &&
2007 existing->prog != &dummy_bpf_prog.prog) {
2008 array->items[new_prog_idx++].prog =
2009 existing->prog;
2010 }
e87c6bc3
YS
2011 }
2012 if (include_prog)
394e40a2
RG
2013 array->items[new_prog_idx++].prog = include_prog;
2014 array->items[new_prog_idx].prog = NULL;
e87c6bc3
YS
2015 *new_array = array;
2016 return 0;
2017}
2018
54e9c9d4 2019int bpf_prog_array_copy_info(struct bpf_prog_array *array,
3a38bb98
YS
2020 u32 *prog_ids, u32 request_cnt,
2021 u32 *prog_cnt)
f371b304
YS
2022{
2023 u32 cnt = 0;
2024
2025 if (array)
2026 cnt = bpf_prog_array_length(array);
2027
3a38bb98 2028 *prog_cnt = cnt;
f371b304
YS
2029
2030 /* return early if user requested only program count or nothing to copy */
2031 if (!request_cnt || !cnt)
2032 return 0;
2033
3a38bb98 2034 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
394e40a2 2035 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
3a38bb98 2036 : 0;
f371b304
YS
2037}
2038
6332be04
DB
2039static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux)
2040{
2041 enum bpf_cgroup_storage_type stype;
2042
2043 for_each_cgroup_storage_type(stype) {
2044 if (!aux->cgroup_storage[stype])
2045 continue;
e4730423 2046 bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]);
6332be04
DB
2047 }
2048}
2049
a2ea0746
DB
2050void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2051 struct bpf_map **used_maps, u32 len)
6332be04 2052{
da765a2f 2053 struct bpf_map *map;
a2ea0746 2054 u32 i;
6332be04
DB
2055
2056 bpf_free_cgroup_storage(aux);
a2ea0746
DB
2057 for (i = 0; i < len; i++) {
2058 map = used_maps[i];
da765a2f
DB
2059 if (map->ops->map_poke_untrack)
2060 map->ops->map_poke_untrack(map, aux);
2061 bpf_map_put(map);
2062 }
a2ea0746
DB
2063}
2064
2065static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2066{
2067 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
6332be04
DB
2068 kfree(aux->used_maps);
2069}
2070
60a3b225
DB
2071static void bpf_prog_free_deferred(struct work_struct *work)
2072{
09756af4 2073 struct bpf_prog_aux *aux;
1c2a088a 2074 int i;
60a3b225 2075
09756af4 2076 aux = container_of(work, struct bpf_prog_aux, work);
6332be04 2077 bpf_free_used_maps(aux);
ab3f0063
JK
2078 if (bpf_prog_is_dev_bound(aux))
2079 bpf_prog_offload_destroy(aux->prog);
c195651e
YS
2080#ifdef CONFIG_PERF_EVENTS
2081 if (aux->prog->has_callchain_buf)
2082 put_callchain_buffers();
2083#endif
fec56f58 2084 bpf_trampoline_put(aux->trampoline);
1c2a088a
AS
2085 for (i = 0; i < aux->func_cnt; i++)
2086 bpf_jit_free(aux->func[i]);
2087 if (aux->func_cnt) {
2088 kfree(aux->func);
2089 bpf_prog_unlock_free(aux->prog);
2090 } else {
2091 bpf_jit_free(aux->prog);
2092 }
60a3b225
DB
2093}
2094
2095/* Free internal BPF program */
7ae457c1 2096void bpf_prog_free(struct bpf_prog *fp)
f5bffecd 2097{
09756af4 2098 struct bpf_prog_aux *aux = fp->aux;
60a3b225 2099
5b92a28a
AS
2100 if (aux->linked_prog)
2101 bpf_prog_put(aux->linked_prog);
09756af4 2102 INIT_WORK(&aux->work, bpf_prog_free_deferred);
09756af4 2103 schedule_work(&aux->work);
f5bffecd 2104}
7ae457c1 2105EXPORT_SYMBOL_GPL(bpf_prog_free);
f89b7755 2106
3ad00405
DB
2107/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2108static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2109
2110void bpf_user_rnd_init_once(void)
2111{
2112 prandom_init_once(&bpf_user_rnd_state);
2113}
2114
f3694e00 2115BPF_CALL_0(bpf_user_rnd_u32)
3ad00405
DB
2116{
2117 /* Should someone ever have the rather unwise idea to use some
2118 * of the registers passed into this function, then note that
2119 * this function is called from native eBPF and classic-to-eBPF
2120 * transformations. Register assignments from both sides are
2121 * different, f.e. classic always sets fn(ctx, A, X) here.
2122 */
2123 struct rnd_state *state;
2124 u32 res;
2125
2126 state = &get_cpu_var(bpf_user_rnd_state);
2127 res = prandom_u32_state(state);
b761fe22 2128 put_cpu_var(bpf_user_rnd_state);
3ad00405
DB
2129
2130 return res;
2131}
2132
3ba67dab
DB
2133/* Weak definitions of helper functions in case we don't have bpf syscall. */
2134const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2135const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2136const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
f1a2e44a
MV
2137const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2138const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2139const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
d83525ca
AS
2140const struct bpf_func_proto bpf_spin_lock_proto __weak;
2141const struct bpf_func_proto bpf_spin_unlock_proto __weak;
3ba67dab 2142
03e69b50 2143const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
c04167ce 2144const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2d0e30c3 2145const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
17ca8cbf 2146const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
bd570ff9 2147
ffeedafb
AS
2148const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2149const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2150const struct bpf_func_proto bpf_get_current_comm_proto __weak;
bf6fa2c8 2151const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
cd339431 2152const struct bpf_func_proto bpf_get_local_storage_proto __weak;
bd570ff9 2153
0756ea3e
AS
2154const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2155{
2156 return NULL;
2157}
03e69b50 2158
555c8a86
DB
2159u64 __weak
2160bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2161 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 2162{
555c8a86 2163 return -ENOTSUPP;
bd570ff9 2164}
6cb5fb38 2165EXPORT_SYMBOL_GPL(bpf_event_output);
bd570ff9 2166
3324b584
DB
2167/* Always built-in helper functions. */
2168const struct bpf_func_proto bpf_tail_call_proto = {
2169 .func = NULL,
2170 .gpl_only = false,
2171 .ret_type = RET_VOID,
2172 .arg1_type = ARG_PTR_TO_CTX,
2173 .arg2_type = ARG_CONST_MAP_PTR,
2174 .arg3_type = ARG_ANYTHING,
2175};
2176
9383191d
DB
2177/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2178 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2179 * eBPF and implicitly also cBPF can get JITed!
2180 */
d1c55ab5 2181struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
3324b584 2182{
d1c55ab5 2183 return prog;
3324b584
DB
2184}
2185
9383191d
DB
2186/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2187 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2188 */
2189void __weak bpf_jit_compile(struct bpf_prog *prog)
2190{
2191}
2192
17bedab2 2193bool __weak bpf_helper_changes_pkt_data(void *func)
969bf05e
AS
2194{
2195 return false;
2196}
2197
a4b1d3c1
JW
2198/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2199 * analysis code and wants explicit zero extension inserted by verifier.
2200 * Otherwise, return FALSE.
2201 */
2202bool __weak bpf_jit_needs_zext(void)
2203{
2204 return false;
2205}
2206
f89b7755
AS
2207/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2208 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2209 */
2210int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2211 int len)
2212{
2213 return -EFAULT;
2214}
a67edbf4 2215
5964b200
AS
2216int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2217 void *addr1, void *addr2)
2218{
2219 return -ENOTSUPP;
2220}
2221
492ecee8
AS
2222DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2223EXPORT_SYMBOL(bpf_stats_enabled_key);
492ecee8 2224
a67edbf4
DB
2225/* All definitions of tracepoints related to BPF. */
2226#define CREATE_TRACE_POINTS
2227#include <linux/bpf_trace.h>
2228
2229EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
e7d47989 2230EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);