]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/bpf/core.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / core.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
f5bffecd
AS
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
f5bffecd 16 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
f5bffecd 18 */
738cbe72 19
838e9690 20#include <uapi/linux/btf.h>
f5bffecd
AS
21#include <linux/filter.h>
22#include <linux/skbuff.h>
60a3b225 23#include <linux/vmalloc.h>
738cbe72
DB
24#include <linux/random.h>
25#include <linux/moduleloader.h>
09756af4 26#include <linux/bpf.h>
838e9690 27#include <linux/btf.h>
00089c04 28#include <linux/objtool.h>
74451e66
DB
29#include <linux/rbtree_latch.h>
30#include <linux/kallsyms.h>
31#include <linux/rcupdate.h>
c195651e 32#include <linux/perf_event.h>
3dec541b 33#include <linux/extable.h>
b7b3fc8d 34#include <linux/log2.h>
3324b584
DB
35#include <asm/unaligned.h>
36
f5bffecd
AS
37/* Registers */
38#define BPF_R0 regs[BPF_REG_0]
39#define BPF_R1 regs[BPF_REG_1]
40#define BPF_R2 regs[BPF_REG_2]
41#define BPF_R3 regs[BPF_REG_3]
42#define BPF_R4 regs[BPF_REG_4]
43#define BPF_R5 regs[BPF_REG_5]
44#define BPF_R6 regs[BPF_REG_6]
45#define BPF_R7 regs[BPF_REG_7]
46#define BPF_R8 regs[BPF_REG_8]
47#define BPF_R9 regs[BPF_REG_9]
48#define BPF_R10 regs[BPF_REG_10]
49
50/* Named registers */
51#define DST regs[insn->dst_reg]
52#define SRC regs[insn->src_reg]
53#define FP regs[BPF_REG_FP]
144cd91c 54#define AX regs[BPF_REG_AX]
f5bffecd
AS
55#define ARG1 regs[BPF_REG_ARG1]
56#define CTX regs[BPF_REG_CTX]
57#define IMM insn->imm
58
59/* No hurry in this branch
60 *
61 * Exported for the bpf jit load helper.
62 */
63void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
64{
65 u8 *ptr = NULL;
66
67 if (k >= SKF_NET_OFF)
68 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 else if (k >= SKF_LL_OFF)
70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
3324b584 71
f5bffecd
AS
72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 return ptr;
74
75 return NULL;
76}
77
492ecee8 78struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
60a3b225 79{
ddf8503c 80 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
09756af4 81 struct bpf_prog_aux *aux;
60a3b225
DB
82 struct bpf_prog *fp;
83
84 size = round_up(size, PAGE_SIZE);
88dca4ca 85 fp = __vmalloc(size, gfp_flags);
60a3b225
DB
86 if (fp == NULL)
87 return NULL;
88
ddf8503c 89 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
09756af4 90 if (aux == NULL) {
60a3b225
DB
91 vfree(fp);
92 return NULL;
93 }
94
95 fp->pages = size / PAGE_SIZE;
09756af4 96 fp->aux = aux;
e9d8afa9 97 fp->aux->prog = fp;
60b58afc 98 fp->jit_requested = ebpf_jit_enabled();
60a3b225 99
ecb60d1c 100 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
984fe94f 101 mutex_init(&fp->aux->used_maps_mutex);
3aac1ead 102 mutex_init(&fp->aux->dst_mutex);
74451e66 103
60a3b225
DB
104 return fp;
105}
492ecee8
AS
106
107struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
108{
ddf8503c 109 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
492ecee8 110 struct bpf_prog *prog;
4b911304 111 int cpu;
492ecee8
AS
112
113 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
114 if (!prog)
115 return NULL;
116
117 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
118 if (!prog->aux->stats) {
119 kfree(prog->aux);
120 vfree(prog);
121 return NULL;
122 }
123
4b911304
ED
124 for_each_possible_cpu(cpu) {
125 struct bpf_prog_stats *pstats;
126
127 pstats = per_cpu_ptr(prog->aux->stats, cpu);
128 u64_stats_init(&pstats->syncp);
129 }
492ecee8
AS
130 return prog;
131}
60a3b225
DB
132EXPORT_SYMBOL_GPL(bpf_prog_alloc);
133
c454a46b
MKL
134int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
135{
136 if (!prog->aux->nr_linfo || !prog->jit_requested)
137 return 0;
138
139 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
140 sizeof(*prog->aux->jited_linfo),
ddf8503c 141 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
c454a46b
MKL
142 if (!prog->aux->jited_linfo)
143 return -ENOMEM;
144
145 return 0;
146}
147
148void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
149{
150 kfree(prog->aux->jited_linfo);
151 prog->aux->jited_linfo = NULL;
152}
153
154void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
155{
156 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
157 bpf_prog_free_jited_linfo(prog);
158}
159
160/* The jit engine is responsible to provide an array
161 * for insn_off to the jited_off mapping (insn_to_jit_off).
162 *
163 * The idx to this array is the insn_off. Hence, the insn_off
164 * here is relative to the prog itself instead of the main prog.
165 * This array has one entry for each xlated bpf insn.
166 *
167 * jited_off is the byte off to the last byte of the jited insn.
168 *
169 * Hence, with
170 * insn_start:
171 * The first bpf insn off of the prog. The insn off
172 * here is relative to the main prog.
173 * e.g. if prog is a subprog, insn_start > 0
174 * linfo_idx:
175 * The prog's idx to prog->aux->linfo and jited_linfo
176 *
177 * jited_linfo[linfo_idx] = prog->bpf_func
178 *
179 * For i > linfo_idx,
180 *
181 * jited_linfo[i] = prog->bpf_func +
182 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
183 */
184void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
185 const u32 *insn_to_jit_off)
186{
187 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
188 const struct bpf_line_info *linfo;
189 void **jited_linfo;
190
191 if (!prog->aux->jited_linfo)
192 /* Userspace did not provide linfo */
193 return;
194
195 linfo_idx = prog->aux->linfo_idx;
196 linfo = &prog->aux->linfo[linfo_idx];
197 insn_start = linfo[0].insn_off;
198 insn_end = insn_start + prog->len;
199
200 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
201 jited_linfo[0] = prog->bpf_func;
202
203 nr_linfo = prog->aux->nr_linfo - linfo_idx;
204
205 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
206 /* The verifier ensures that linfo[i].insn_off is
207 * strictly increasing
208 */
209 jited_linfo[i] = prog->bpf_func +
210 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
211}
212
213void bpf_prog_free_linfo(struct bpf_prog *prog)
214{
215 bpf_prog_free_jited_linfo(prog);
216 kvfree(prog->aux->linfo);
217}
218
60a3b225
DB
219struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
220 gfp_t gfp_extra_flags)
221{
ddf8503c 222 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
60a3b225 223 struct bpf_prog *fp;
3ac1f01b 224 u32 pages;
60a3b225 225
60a3b225 226 size = round_up(size, PAGE_SIZE);
5ccb071e
DB
227 pages = size / PAGE_SIZE;
228 if (pages <= fp_old->pages)
60a3b225
DB
229 return fp_old;
230
88dca4ca 231 fp = __vmalloc(size, gfp_flags);
3ac1f01b 232 if (fp) {
60a3b225 233 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
5ccb071e 234 fp->pages = pages;
e9d8afa9 235 fp->aux->prog = fp;
60a3b225 236
09756af4 237 /* We keep fp->aux from fp_old around in the new
60a3b225
DB
238 * reallocated structure.
239 */
09756af4 240 fp_old->aux = NULL;
60a3b225
DB
241 __bpf_prog_free(fp_old);
242 }
243
244 return fp;
245}
60a3b225
DB
246
247void __bpf_prog_free(struct bpf_prog *fp)
248{
492ecee8 249 if (fp->aux) {
984fe94f 250 mutex_destroy(&fp->aux->used_maps_mutex);
3aac1ead 251 mutex_destroy(&fp->aux->dst_mutex);
492ecee8 252 free_percpu(fp->aux->stats);
a66886fe 253 kfree(fp->aux->poke_tab);
492ecee8
AS
254 kfree(fp->aux);
255 }
60a3b225
DB
256 vfree(fp);
257}
60a3b225 258
f1f7714e 259int bpf_prog_calc_tag(struct bpf_prog *fp)
7bd509e3 260{
6b0b0fa2 261 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
f1f7714e 262 u32 raw_size = bpf_prog_tag_scratch_size(fp);
6b0b0fa2
EB
263 u32 digest[SHA1_DIGEST_WORDS];
264 u32 ws[SHA1_WORKSPACE_WORDS];
7bd509e3 265 u32 i, bsize, psize, blocks;
aafe6ae9 266 struct bpf_insn *dst;
7bd509e3 267 bool was_ld_map;
aafe6ae9 268 u8 *raw, *todo;
7bd509e3
DB
269 __be32 *result;
270 __be64 *bits;
271
aafe6ae9
DB
272 raw = vmalloc(raw_size);
273 if (!raw)
274 return -ENOMEM;
275
6b0b0fa2 276 sha1_init(digest);
7bd509e3
DB
277 memset(ws, 0, sizeof(ws));
278
279 /* We need to take out the map fd for the digest calculation
280 * since they are unstable from user space side.
281 */
aafe6ae9 282 dst = (void *)raw;
7bd509e3
DB
283 for (i = 0, was_ld_map = false; i < fp->len; i++) {
284 dst[i] = fp->insnsi[i];
285 if (!was_ld_map &&
286 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
d8eca5bb
DB
287 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
288 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
7bd509e3
DB
289 was_ld_map = true;
290 dst[i].imm = 0;
291 } else if (was_ld_map &&
292 dst[i].code == 0 &&
293 dst[i].dst_reg == 0 &&
294 dst[i].src_reg == 0 &&
295 dst[i].off == 0) {
296 was_ld_map = false;
297 dst[i].imm = 0;
298 } else {
299 was_ld_map = false;
300 }
301 }
302
aafe6ae9
DB
303 psize = bpf_prog_insn_size(fp);
304 memset(&raw[psize], 0, raw_size - psize);
7bd509e3
DB
305 raw[psize++] = 0x80;
306
6b0b0fa2
EB
307 bsize = round_up(psize, SHA1_BLOCK_SIZE);
308 blocks = bsize / SHA1_BLOCK_SIZE;
aafe6ae9 309 todo = raw;
7bd509e3
DB
310 if (bsize - psize >= sizeof(__be64)) {
311 bits = (__be64 *)(todo + bsize - sizeof(__be64));
312 } else {
313 bits = (__be64 *)(todo + bsize + bits_offset);
314 blocks++;
315 }
316 *bits = cpu_to_be64((psize - 1) << 3);
317
318 while (blocks--) {
6b0b0fa2
EB
319 sha1_transform(digest, todo, ws);
320 todo += SHA1_BLOCK_SIZE;
7bd509e3
DB
321 }
322
f1f7714e 323 result = (__force __be32 *)digest;
6b0b0fa2 324 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
f1f7714e
DB
325 result[i] = cpu_to_be32(digest[i]);
326 memcpy(fp->tag, result, sizeof(fp->tag));
aafe6ae9
DB
327
328 vfree(raw);
329 return 0;
7bd509e3
DB
330}
331
2cbd95a5 332static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
af959b18 333 s32 end_new, s32 curr, const bool probe_pass)
c237ee5e 334{
050fad7c 335 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
2cbd95a5 336 s32 delta = end_new - end_old;
050fad7c
DB
337 s64 imm = insn->imm;
338
2cbd95a5 339 if (curr < pos && curr + imm + 1 >= end_old)
050fad7c 340 imm += delta;
2cbd95a5 341 else if (curr >= end_new && curr + imm + 1 < end_new)
050fad7c
DB
342 imm -= delta;
343 if (imm < imm_min || imm > imm_max)
344 return -ERANGE;
345 if (!probe_pass)
346 insn->imm = imm;
347 return 0;
348}
349
2cbd95a5 350static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
af959b18 351 s32 end_new, s32 curr, const bool probe_pass)
050fad7c
DB
352{
353 const s32 off_min = S16_MIN, off_max = S16_MAX;
2cbd95a5 354 s32 delta = end_new - end_old;
050fad7c
DB
355 s32 off = insn->off;
356
2cbd95a5 357 if (curr < pos && curr + off + 1 >= end_old)
050fad7c 358 off += delta;
2cbd95a5 359 else if (curr >= end_new && curr + off + 1 < end_new)
050fad7c
DB
360 off -= delta;
361 if (off < off_min || off > off_max)
362 return -ERANGE;
363 if (!probe_pass)
364 insn->off = off;
365 return 0;
366}
367
2cbd95a5
JK
368static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
369 s32 end_new, const bool probe_pass)
050fad7c 370{
2cbd95a5 371 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
c237ee5e 372 struct bpf_insn *insn = prog->insnsi;
050fad7c 373 int ret = 0;
c237ee5e
DB
374
375 for (i = 0; i < insn_cnt; i++, insn++) {
050fad7c
DB
376 u8 code;
377
378 /* In the probing pass we still operate on the original,
379 * unpatched image in order to check overflows before we
380 * do any other adjustments. Therefore skip the patchlet.
381 */
382 if (probe_pass && i == pos) {
2cbd95a5
JK
383 i = end_new;
384 insn = prog->insnsi + end_old;
050fad7c 385 }
1ea47e01 386 code = insn->code;
092ed096
JW
387 if ((BPF_CLASS(code) != BPF_JMP &&
388 BPF_CLASS(code) != BPF_JMP32) ||
050fad7c 389 BPF_OP(code) == BPF_EXIT)
1ea47e01 390 continue;
050fad7c 391 /* Adjust offset of jmps if we cross patch boundaries. */
1ea47e01 392 if (BPF_OP(code) == BPF_CALL) {
050fad7c 393 if (insn->src_reg != BPF_PSEUDO_CALL)
1ea47e01 394 continue;
2cbd95a5
JK
395 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
396 end_new, i, probe_pass);
1ea47e01 397 } else {
2cbd95a5
JK
398 ret = bpf_adj_delta_to_off(insn, pos, end_old,
399 end_new, i, probe_pass);
1ea47e01 400 }
050fad7c
DB
401 if (ret)
402 break;
c237ee5e 403 }
050fad7c
DB
404
405 return ret;
c237ee5e
DB
406}
407
c454a46b
MKL
408static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
409{
410 struct bpf_line_info *linfo;
411 u32 i, nr_linfo;
412
413 nr_linfo = prog->aux->nr_linfo;
414 if (!nr_linfo || !delta)
415 return;
416
417 linfo = prog->aux->linfo;
418
419 for (i = 0; i < nr_linfo; i++)
420 if (off < linfo[i].insn_off)
421 break;
422
423 /* Push all off < linfo[i].insn_off by delta */
424 for (; i < nr_linfo; i++)
425 linfo[i].insn_off += delta;
426}
427
c237ee5e
DB
428struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
429 const struct bpf_insn *patch, u32 len)
430{
431 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
050fad7c 432 const u32 cnt_max = S16_MAX;
c237ee5e 433 struct bpf_prog *prog_adj;
4f73379e 434 int err;
c237ee5e
DB
435
436 /* Since our patchlet doesn't expand the image, we're done. */
437 if (insn_delta == 0) {
438 memcpy(prog->insnsi + off, patch, sizeof(*patch));
439 return prog;
440 }
441
442 insn_adj_cnt = prog->len + insn_delta;
443
050fad7c
DB
444 /* Reject anything that would potentially let the insn->off
445 * target overflow when we have excessive program expansions.
446 * We need to probe here before we do any reallocation where
447 * we afterwards may not fail anymore.
448 */
449 if (insn_adj_cnt > cnt_max &&
4f73379e
AS
450 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
451 return ERR_PTR(err);
050fad7c 452
c237ee5e
DB
453 /* Several new instructions need to be inserted. Make room
454 * for them. Likely, there's no need for a new allocation as
455 * last page could have large enough tailroom.
456 */
457 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
458 GFP_USER);
459 if (!prog_adj)
4f73379e 460 return ERR_PTR(-ENOMEM);
c237ee5e
DB
461
462 prog_adj->len = insn_adj_cnt;
463
464 /* Patching happens in 3 steps:
465 *
466 * 1) Move over tail of insnsi from next instruction onwards,
467 * so we can patch the single target insn with one or more
468 * new ones (patching is always from 1 to n insns, n > 0).
469 * 2) Inject new instructions at the target location.
470 * 3) Adjust branch offsets if necessary.
471 */
472 insn_rest = insn_adj_cnt - off - len;
473
474 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
475 sizeof(*patch) * insn_rest);
476 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
477
050fad7c
DB
478 /* We are guaranteed to not fail at this point, otherwise
479 * the ship has sailed to reverse to the original state. An
480 * overflow cannot happen at this point.
481 */
2cbd95a5 482 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
c237ee5e 483
c454a46b
MKL
484 bpf_adj_linfo(prog_adj, off, insn_delta);
485
c237ee5e
DB
486 return prog_adj;
487}
488
52875a04
JK
489int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
490{
491 /* Branch offsets can't overflow when program is shrinking, no need
492 * to call bpf_adj_branches(..., true) here
493 */
494 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
495 sizeof(struct bpf_insn) * (prog->len - off - cnt));
496 prog->len -= cnt;
497
498 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
499}
500
cd7455f1 501static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
7d1982b4
DB
502{
503 int i;
504
505 for (i = 0; i < fp->aux->func_cnt; i++)
506 bpf_prog_kallsyms_del(fp->aux->func[i]);
507}
508
509void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
510{
511 bpf_prog_kallsyms_del_subprogs(fp);
512 bpf_prog_kallsyms_del(fp);
513}
514
b954d834 515#ifdef CONFIG_BPF_JIT
fa9dd599 516/* All BPF JIT sysctl knobs here. */
81c22041
DB
517int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
518int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
fa9dd599 519int bpf_jit_harden __read_mostly;
fdadd049 520long bpf_jit_limit __read_mostly;
fa9dd599 521
535911c8
JO
522static void
523bpf_prog_ksym_set_addr(struct bpf_prog *prog)
74451e66
DB
524{
525 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
526 unsigned long addr = (unsigned long)hdr;
527
528 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
529
535911c8
JO
530 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
531 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
74451e66
DB
532}
533
bfea9a85
JO
534static void
535bpf_prog_ksym_set_name(struct bpf_prog *prog)
74451e66 536{
bfea9a85 537 char *sym = prog->aux->ksym.name;
368211fb 538 const char *end = sym + KSYM_NAME_LEN;
838e9690
YS
539 const struct btf_type *type;
540 const char *func_name;
368211fb 541
74451e66 542 BUILD_BUG_ON(sizeof("bpf_prog_") +
368211fb
MKL
543 sizeof(prog->tag) * 2 +
544 /* name has been null terminated.
545 * We should need +1 for the '_' preceding
546 * the name. However, the null character
547 * is double counted between the name and the
548 * sizeof("bpf_prog_") above, so we omit
549 * the +1 here.
550 */
551 sizeof(prog->aux->name) > KSYM_NAME_LEN);
74451e66
DB
552
553 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
554 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
838e9690
YS
555
556 /* prog->aux->name will be ignored if full btf name is available */
7337224f 557 if (prog->aux->func_info_cnt) {
ba64e7d8
YS
558 type = btf_type_by_id(prog->aux->btf,
559 prog->aux->func_info[prog->aux->func_idx].type_id);
838e9690
YS
560 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
561 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
562 return;
563 }
564
368211fb
MKL
565 if (prog->aux->name[0])
566 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
567 else
568 *sym = 0;
74451e66
DB
569}
570
ca4424c9 571static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
74451e66 572{
ca4424c9 573 return container_of(n, struct bpf_ksym, tnode)->start;
74451e66
DB
574}
575
576static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
577 struct latch_tree_node *b)
578{
ca4424c9 579 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
74451e66
DB
580}
581
582static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
583{
584 unsigned long val = (unsigned long)key;
ca4424c9 585 const struct bpf_ksym *ksym;
74451e66 586
ca4424c9 587 ksym = container_of(n, struct bpf_ksym, tnode);
74451e66 588
ca4424c9 589 if (val < ksym->start)
74451e66 590 return -1;
ca4424c9 591 if (val >= ksym->end)
74451e66
DB
592 return 1;
593
594 return 0;
595}
596
597static const struct latch_tree_ops bpf_tree_ops = {
598 .less = bpf_tree_less,
599 .comp = bpf_tree_comp,
600};
601
602static DEFINE_SPINLOCK(bpf_lock);
603static LIST_HEAD(bpf_kallsyms);
604static struct latch_tree_root bpf_tree __cacheline_aligned;
605
dba122fb 606void bpf_ksym_add(struct bpf_ksym *ksym)
74451e66 607{
dba122fb
JO
608 spin_lock_bh(&bpf_lock);
609 WARN_ON_ONCE(!list_empty(&ksym->lnode));
610 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
611 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
612 spin_unlock_bh(&bpf_lock);
74451e66
DB
613}
614
dba122fb 615static void __bpf_ksym_del(struct bpf_ksym *ksym)
74451e66 616{
dba122fb 617 if (list_empty(&ksym->lnode))
74451e66
DB
618 return;
619
dba122fb
JO
620 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
621 list_del_rcu(&ksym->lnode);
622}
623
624void bpf_ksym_del(struct bpf_ksym *ksym)
625{
626 spin_lock_bh(&bpf_lock);
627 __bpf_ksym_del(ksym);
628 spin_unlock_bh(&bpf_lock);
74451e66
DB
629}
630
631static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
632{
633 return fp->jited && !bpf_prog_was_classic(fp);
634}
635
636static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
637{
ecb60d1c
JO
638 return list_empty(&fp->aux->ksym.lnode) ||
639 fp->aux->ksym.lnode.prev == LIST_POISON2;
74451e66
DB
640}
641
642void bpf_prog_kallsyms_add(struct bpf_prog *fp)
643{
74451e66 644 if (!bpf_prog_kallsyms_candidate(fp) ||
2c78ee89 645 !bpf_capable())
74451e66
DB
646 return;
647
535911c8 648 bpf_prog_ksym_set_addr(fp);
bfea9a85 649 bpf_prog_ksym_set_name(fp);
cbd76f8d 650 fp->aux->ksym.prog = true;
535911c8 651
dba122fb 652 bpf_ksym_add(&fp->aux->ksym);
74451e66
DB
653}
654
655void bpf_prog_kallsyms_del(struct bpf_prog *fp)
656{
74451e66
DB
657 if (!bpf_prog_kallsyms_candidate(fp))
658 return;
659
dba122fb 660 bpf_ksym_del(&fp->aux->ksym);
74451e66
DB
661}
662
eda0c929
JO
663static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
664{
665 struct latch_tree_node *n;
666
667 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
668 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
669}
670
74451e66
DB
671const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
672 unsigned long *off, char *sym)
673{
eda0c929 674 struct bpf_ksym *ksym;
74451e66
DB
675 char *ret = NULL;
676
677 rcu_read_lock();
eda0c929
JO
678 ksym = bpf_ksym_find(addr);
679 if (ksym) {
680 unsigned long symbol_start = ksym->start;
681 unsigned long symbol_end = ksym->end;
535911c8 682
eda0c929 683 strncpy(sym, ksym->name, KSYM_NAME_LEN);
74451e66
DB
684
685 ret = sym;
686 if (size)
687 *size = symbol_end - symbol_start;
688 if (off)
689 *off = addr - symbol_start;
690 }
691 rcu_read_unlock();
692
693 return ret;
694}
695
696bool is_bpf_text_address(unsigned long addr)
697{
698 bool ret;
699
700 rcu_read_lock();
eda0c929 701 ret = bpf_ksym_find(addr) != NULL;
74451e66
DB
702 rcu_read_unlock();
703
704 return ret;
705}
706
cbd76f8d
JO
707static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
708{
709 struct bpf_ksym *ksym = bpf_ksym_find(addr);
710
711 return ksym && ksym->prog ?
712 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
713 NULL;
714}
715
3dec541b
AS
716const struct exception_table_entry *search_bpf_extables(unsigned long addr)
717{
718 const struct exception_table_entry *e = NULL;
719 struct bpf_prog *prog;
720
721 rcu_read_lock();
cbd76f8d 722 prog = bpf_prog_ksym_find(addr);
3dec541b
AS
723 if (!prog)
724 goto out;
725 if (!prog->aux->num_exentries)
726 goto out;
727
728 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
729out:
730 rcu_read_unlock();
731 return e;
732}
733
74451e66
DB
734int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
735 char *sym)
736{
ecb60d1c 737 struct bpf_ksym *ksym;
74451e66
DB
738 unsigned int it = 0;
739 int ret = -ERANGE;
740
741 if (!bpf_jit_kallsyms_enabled())
742 return ret;
743
744 rcu_read_lock();
ecb60d1c 745 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
74451e66
DB
746 if (it++ != symnum)
747 continue;
748
ecb60d1c 749 strncpy(sym, ksym->name, KSYM_NAME_LEN);
74451e66 750
ecb60d1c 751 *value = ksym->start;
74451e66
DB
752 *type = BPF_SYM_ELF_TYPE;
753
754 ret = 0;
755 break;
756 }
757 rcu_read_unlock();
758
759 return ret;
760}
761
a66886fe
DB
762int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
763 struct bpf_jit_poke_descriptor *poke)
764{
765 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
766 static const u32 poke_tab_max = 1024;
767 u32 slot = prog->aux->size_poke_tab;
768 u32 size = slot + 1;
769
770 if (size > poke_tab_max)
771 return -ENOSPC;
cf71b174 772 if (poke->tailcall_target || poke->tailcall_target_stable ||
ebf7d1f5 773 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
a66886fe
DB
774 return -EINVAL;
775
776 switch (poke->reason) {
777 case BPF_POKE_REASON_TAIL_CALL:
778 if (!poke->tail_call.map)
779 return -EINVAL;
780 break;
781 default:
782 return -EINVAL;
783 }
784
785 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
786 if (!tab)
787 return -ENOMEM;
788
789 memcpy(&tab[slot], poke, sizeof(*poke));
790 prog->aux->size_poke_tab = size;
791 prog->aux->poke_tab = tab;
792
793 return slot;
794}
795
ede95a63
DB
796static atomic_long_t bpf_jit_current;
797
fdadd049
DB
798/* Can be overridden by an arch's JIT compiler if it has a custom,
799 * dedicated BPF backend memory area, or if neither of the two
800 * below apply.
801 */
802u64 __weak bpf_jit_alloc_exec_limit(void)
803{
ede95a63 804#if defined(MODULES_VADDR)
fdadd049
DB
805 return MODULES_END - MODULES_VADDR;
806#else
807 return VMALLOC_END - VMALLOC_START;
808#endif
809}
810
ede95a63
DB
811static int __init bpf_jit_charge_init(void)
812{
813 /* Only used as heuristic here to derive limit. */
fdadd049
DB
814 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
815 PAGE_SIZE), LONG_MAX);
ede95a63
DB
816 return 0;
817}
818pure_initcall(bpf_jit_charge_init);
ede95a63 819
b9fe5bd1 820int bpf_jit_charge_modmem(u32 pages)
ede95a63
DB
821{
822 if (atomic_long_add_return(pages, &bpf_jit_current) >
823 (bpf_jit_limit >> PAGE_SHIFT)) {
824 if (!capable(CAP_SYS_ADMIN)) {
825 atomic_long_sub(pages, &bpf_jit_current);
826 return -EPERM;
827 }
828 }
829
830 return 0;
831}
832
b9fe5bd1 833void bpf_jit_uncharge_modmem(u32 pages)
ede95a63
DB
834{
835 atomic_long_sub(pages, &bpf_jit_current);
836}
837
dc002bb6
AB
838void *__weak bpf_jit_alloc_exec(unsigned long size)
839{
840 return module_alloc(size);
841}
842
843void __weak bpf_jit_free_exec(void *addr)
844{
845 module_memfree(addr);
846}
847
738cbe72
DB
848struct bpf_binary_header *
849bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
850 unsigned int alignment,
851 bpf_jit_fill_hole_t bpf_fill_ill_insns)
852{
853 struct bpf_binary_header *hdr;
ede95a63 854 u32 size, hole, start, pages;
738cbe72 855
b7b3fc8d
IL
856 WARN_ON_ONCE(!is_power_of_2(alignment) ||
857 alignment > BPF_IMAGE_ALIGNMENT);
858
738cbe72
DB
859 /* Most of BPF filters are really small, but if some of them
860 * fill a page, allow at least 128 extra bytes to insert a
861 * random section of illegal instructions.
862 */
863 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
ede95a63
DB
864 pages = size / PAGE_SIZE;
865
866 if (bpf_jit_charge_modmem(pages))
867 return NULL;
dc002bb6 868 hdr = bpf_jit_alloc_exec(size);
ede95a63
DB
869 if (!hdr) {
870 bpf_jit_uncharge_modmem(pages);
738cbe72 871 return NULL;
ede95a63 872 }
738cbe72
DB
873
874 /* Fill space with illegal/arch-dep instructions. */
875 bpf_fill_ill_insns(hdr, size);
876
ede95a63 877 hdr->pages = pages;
738cbe72
DB
878 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
879 PAGE_SIZE - sizeof(*hdr));
b7552e1b 880 start = (get_random_int() % hole) & ~(alignment - 1);
738cbe72
DB
881
882 /* Leave a random number of instructions before BPF code. */
883 *image_ptr = &hdr->image[start];
884
885 return hdr;
886}
887
888void bpf_jit_binary_free(struct bpf_binary_header *hdr)
889{
ede95a63
DB
890 u32 pages = hdr->pages;
891
dc002bb6 892 bpf_jit_free_exec(hdr);
ede95a63 893 bpf_jit_uncharge_modmem(pages);
738cbe72 894}
4f3446bb 895
74451e66
DB
896/* This symbol is only overridden by archs that have different
897 * requirements than the usual eBPF JITs, f.e. when they only
898 * implement cBPF JIT, do not set images read-only, etc.
899 */
900void __weak bpf_jit_free(struct bpf_prog *fp)
901{
902 if (fp->jited) {
903 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
904
74451e66
DB
905 bpf_jit_binary_free(hdr);
906
907 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
908 }
909
910 bpf_prog_unlock_free(fp);
911}
912
e2c95a61
DB
913int bpf_jit_get_func_addr(const struct bpf_prog *prog,
914 const struct bpf_insn *insn, bool extra_pass,
915 u64 *func_addr, bool *func_addr_fixed)
916{
917 s16 off = insn->off;
918 s32 imm = insn->imm;
919 u8 *addr;
920
921 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
922 if (!*func_addr_fixed) {
923 /* Place-holder address till the last pass has collected
924 * all addresses for JITed subprograms in which case we
925 * can pick them up from prog->aux.
926 */
927 if (!extra_pass)
928 addr = NULL;
929 else if (prog->aux->func &&
930 off >= 0 && off < prog->aux->func_cnt)
931 addr = (u8 *)prog->aux->func[off]->bpf_func;
932 else
933 return -EINVAL;
934 } else {
935 /* Address of a BPF helper call. Since part of the core
936 * kernel, it's always at a fixed location. __bpf_call_base
937 * and the helper with imm relative to it are both in core
938 * kernel.
939 */
940 addr = (u8 *)__bpf_call_base + imm;
941 }
942
943 *func_addr = (unsigned long)addr;
944 return 0;
945}
946
4f3446bb
DB
947static int bpf_jit_blind_insn(const struct bpf_insn *from,
948 const struct bpf_insn *aux,
ede7c460
NR
949 struct bpf_insn *to_buff,
950 bool emit_zext)
4f3446bb
DB
951{
952 struct bpf_insn *to = to_buff;
b7552e1b 953 u32 imm_rnd = get_random_int();
4f3446bb
DB
954 s16 off;
955
956 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
957 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
958
9b73bfdd
DB
959 /* Constraints on AX register:
960 *
961 * AX register is inaccessible from user space. It is mapped in
962 * all JITs, and used here for constant blinding rewrites. It is
963 * typically "stateless" meaning its contents are only valid within
964 * the executed instruction, but not across several instructions.
965 * There are a few exceptions however which are further detailed
966 * below.
967 *
968 * Constant blinding is only used by JITs, not in the interpreter.
969 * The interpreter uses AX in some occasions as a local temporary
970 * register e.g. in DIV or MOD instructions.
971 *
972 * In restricted circumstances, the verifier can also use the AX
973 * register for rewrites as long as they do not interfere with
974 * the above cases!
975 */
976 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
977 goto out;
978
4f3446bb
DB
979 if (from->imm == 0 &&
980 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
981 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
982 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
983 goto out;
984 }
985
986 switch (from->code) {
987 case BPF_ALU | BPF_ADD | BPF_K:
988 case BPF_ALU | BPF_SUB | BPF_K:
989 case BPF_ALU | BPF_AND | BPF_K:
990 case BPF_ALU | BPF_OR | BPF_K:
991 case BPF_ALU | BPF_XOR | BPF_K:
992 case BPF_ALU | BPF_MUL | BPF_K:
993 case BPF_ALU | BPF_MOV | BPF_K:
994 case BPF_ALU | BPF_DIV | BPF_K:
995 case BPF_ALU | BPF_MOD | BPF_K:
996 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
997 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
998 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
999 break;
1000
1001 case BPF_ALU64 | BPF_ADD | BPF_K:
1002 case BPF_ALU64 | BPF_SUB | BPF_K:
1003 case BPF_ALU64 | BPF_AND | BPF_K:
1004 case BPF_ALU64 | BPF_OR | BPF_K:
1005 case BPF_ALU64 | BPF_XOR | BPF_K:
1006 case BPF_ALU64 | BPF_MUL | BPF_K:
1007 case BPF_ALU64 | BPF_MOV | BPF_K:
1008 case BPF_ALU64 | BPF_DIV | BPF_K:
1009 case BPF_ALU64 | BPF_MOD | BPF_K:
1010 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1011 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1012 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1013 break;
1014
1015 case BPF_JMP | BPF_JEQ | BPF_K:
1016 case BPF_JMP | BPF_JNE | BPF_K:
1017 case BPF_JMP | BPF_JGT | BPF_K:
92b31a9a 1018 case BPF_JMP | BPF_JLT | BPF_K:
4f3446bb 1019 case BPF_JMP | BPF_JGE | BPF_K:
92b31a9a 1020 case BPF_JMP | BPF_JLE | BPF_K:
4f3446bb 1021 case BPF_JMP | BPF_JSGT | BPF_K:
92b31a9a 1022 case BPF_JMP | BPF_JSLT | BPF_K:
4f3446bb 1023 case BPF_JMP | BPF_JSGE | BPF_K:
92b31a9a 1024 case BPF_JMP | BPF_JSLE | BPF_K:
4f3446bb
DB
1025 case BPF_JMP | BPF_JSET | BPF_K:
1026 /* Accommodate for extra offset in case of a backjump. */
1027 off = from->off;
1028 if (off < 0)
1029 off -= 2;
1030 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1031 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1032 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1033 break;
1034
a7b76c88
JW
1035 case BPF_JMP32 | BPF_JEQ | BPF_K:
1036 case BPF_JMP32 | BPF_JNE | BPF_K:
1037 case BPF_JMP32 | BPF_JGT | BPF_K:
1038 case BPF_JMP32 | BPF_JLT | BPF_K:
1039 case BPF_JMP32 | BPF_JGE | BPF_K:
1040 case BPF_JMP32 | BPF_JLE | BPF_K:
1041 case BPF_JMP32 | BPF_JSGT | BPF_K:
1042 case BPF_JMP32 | BPF_JSLT | BPF_K:
1043 case BPF_JMP32 | BPF_JSGE | BPF_K:
1044 case BPF_JMP32 | BPF_JSLE | BPF_K:
1045 case BPF_JMP32 | BPF_JSET | BPF_K:
1046 /* Accommodate for extra offset in case of a backjump. */
1047 off = from->off;
1048 if (off < 0)
1049 off -= 2;
1050 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1051 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1052 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1053 off);
1054 break;
1055
4f3446bb
DB
1056 case BPF_LD | BPF_IMM | BPF_DW:
1057 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1058 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1059 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1060 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1061 break;
1062 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1063 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1064 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
ede7c460
NR
1065 if (emit_zext)
1066 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
4f3446bb
DB
1067 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1068 break;
1069
1070 case BPF_ST | BPF_MEM | BPF_DW:
1071 case BPF_ST | BPF_MEM | BPF_W:
1072 case BPF_ST | BPF_MEM | BPF_H:
1073 case BPF_ST | BPF_MEM | BPF_B:
1074 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1075 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1076 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1077 break;
1078 }
1079out:
1080 return to - to_buff;
1081}
1082
1083static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1084 gfp_t gfp_extra_flags)
1085{
19809c2d 1086 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
4f3446bb
DB
1087 struct bpf_prog *fp;
1088
88dca4ca 1089 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
4f3446bb 1090 if (fp != NULL) {
4f3446bb
DB
1091 /* aux->prog still points to the fp_other one, so
1092 * when promoting the clone to the real program,
1093 * this still needs to be adapted.
1094 */
1095 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1096 }
1097
1098 return fp;
1099}
1100
1101static void bpf_prog_clone_free(struct bpf_prog *fp)
1102{
1103 /* aux was stolen by the other clone, so we cannot free
1104 * it from this path! It will be freed eventually by the
1105 * other program on release.
1106 *
1107 * At this point, we don't need a deferred release since
1108 * clone is guaranteed to not be locked.
1109 */
1110 fp->aux = NULL;
1111 __bpf_prog_free(fp);
1112}
1113
1114void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1115{
1116 /* We have to repoint aux->prog to self, as we don't
1117 * know whether fp here is the clone or the original.
1118 */
1119 fp->aux->prog = fp;
1120 bpf_prog_clone_free(fp_other);
1121}
1122
1123struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1124{
1125 struct bpf_insn insn_buff[16], aux[2];
1126 struct bpf_prog *clone, *tmp;
1127 int insn_delta, insn_cnt;
1128 struct bpf_insn *insn;
1129 int i, rewritten;
1130
1c2a088a 1131 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
4f3446bb
DB
1132 return prog;
1133
1134 clone = bpf_prog_clone_create(prog, GFP_USER);
1135 if (!clone)
1136 return ERR_PTR(-ENOMEM);
1137
1138 insn_cnt = clone->len;
1139 insn = clone->insnsi;
1140
1141 for (i = 0; i < insn_cnt; i++, insn++) {
1142 /* We temporarily need to hold the original ld64 insn
1143 * so that we can still access the first part in the
1144 * second blinding run.
1145 */
1146 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1147 insn[1].code == 0)
1148 memcpy(aux, insn, sizeof(aux));
1149
ede7c460
NR
1150 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1151 clone->aux->verifier_zext);
4f3446bb
DB
1152 if (!rewritten)
1153 continue;
1154
1155 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
4f73379e 1156 if (IS_ERR(tmp)) {
4f3446bb
DB
1157 /* Patching may have repointed aux->prog during
1158 * realloc from the original one, so we need to
1159 * fix it up here on error.
1160 */
1161 bpf_jit_prog_release_other(prog, clone);
4f73379e 1162 return tmp;
4f3446bb
DB
1163 }
1164
1165 clone = tmp;
1166 insn_delta = rewritten - 1;
1167
1168 /* Walk new program and skip insns we just inserted. */
1169 insn = clone->insnsi + i + insn_delta;
1170 insn_cnt += insn_delta;
1171 i += insn_delta;
1172 }
1173
1c2a088a 1174 clone->blinded = 1;
4f3446bb
DB
1175 return clone;
1176}
b954d834 1177#endif /* CONFIG_BPF_JIT */
738cbe72 1178
f5bffecd
AS
1179/* Base function for offset calculation. Needs to go into .text section,
1180 * therefore keeping it non-static as well; will also be used by JITs
7105e828
DB
1181 * anyway later on, so do not let the compiler omit it. This also needs
1182 * to go into kallsyms for correlation from e.g. bpftool, so naming
1183 * must not change.
f5bffecd
AS
1184 */
1185noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1186{
1187 return 0;
1188}
4d9c5c53 1189EXPORT_SYMBOL_GPL(__bpf_call_base);
f5bffecd 1190
5e581dad
DB
1191/* All UAPI available opcodes. */
1192#define BPF_INSN_MAP(INSN_2, INSN_3) \
1193 /* 32 bit ALU operations. */ \
1194 /* Register based. */ \
2dc6b100
JW
1195 INSN_3(ALU, ADD, X), \
1196 INSN_3(ALU, SUB, X), \
1197 INSN_3(ALU, AND, X), \
1198 INSN_3(ALU, OR, X), \
1199 INSN_3(ALU, LSH, X), \
1200 INSN_3(ALU, RSH, X), \
1201 INSN_3(ALU, XOR, X), \
1202 INSN_3(ALU, MUL, X), \
1203 INSN_3(ALU, MOV, X), \
1204 INSN_3(ALU, ARSH, X), \
1205 INSN_3(ALU, DIV, X), \
1206 INSN_3(ALU, MOD, X), \
5e581dad
DB
1207 INSN_2(ALU, NEG), \
1208 INSN_3(ALU, END, TO_BE), \
1209 INSN_3(ALU, END, TO_LE), \
1210 /* Immediate based. */ \
2dc6b100
JW
1211 INSN_3(ALU, ADD, K), \
1212 INSN_3(ALU, SUB, K), \
1213 INSN_3(ALU, AND, K), \
1214 INSN_3(ALU, OR, K), \
1215 INSN_3(ALU, LSH, K), \
1216 INSN_3(ALU, RSH, K), \
1217 INSN_3(ALU, XOR, K), \
1218 INSN_3(ALU, MUL, K), \
1219 INSN_3(ALU, MOV, K), \
1220 INSN_3(ALU, ARSH, K), \
1221 INSN_3(ALU, DIV, K), \
1222 INSN_3(ALU, MOD, K), \
5e581dad
DB
1223 /* 64 bit ALU operations. */ \
1224 /* Register based. */ \
1225 INSN_3(ALU64, ADD, X), \
1226 INSN_3(ALU64, SUB, X), \
1227 INSN_3(ALU64, AND, X), \
1228 INSN_3(ALU64, OR, X), \
1229 INSN_3(ALU64, LSH, X), \
1230 INSN_3(ALU64, RSH, X), \
1231 INSN_3(ALU64, XOR, X), \
1232 INSN_3(ALU64, MUL, X), \
1233 INSN_3(ALU64, MOV, X), \
1234 INSN_3(ALU64, ARSH, X), \
1235 INSN_3(ALU64, DIV, X), \
1236 INSN_3(ALU64, MOD, X), \
1237 INSN_2(ALU64, NEG), \
1238 /* Immediate based. */ \
1239 INSN_3(ALU64, ADD, K), \
1240 INSN_3(ALU64, SUB, K), \
1241 INSN_3(ALU64, AND, K), \
1242 INSN_3(ALU64, OR, K), \
1243 INSN_3(ALU64, LSH, K), \
1244 INSN_3(ALU64, RSH, K), \
1245 INSN_3(ALU64, XOR, K), \
1246 INSN_3(ALU64, MUL, K), \
1247 INSN_3(ALU64, MOV, K), \
1248 INSN_3(ALU64, ARSH, K), \
1249 INSN_3(ALU64, DIV, K), \
1250 INSN_3(ALU64, MOD, K), \
1251 /* Call instruction. */ \
1252 INSN_2(JMP, CALL), \
1253 /* Exit instruction. */ \
1254 INSN_2(JMP, EXIT), \
503a8865
JW
1255 /* 32-bit Jump instructions. */ \
1256 /* Register based. */ \
1257 INSN_3(JMP32, JEQ, X), \
1258 INSN_3(JMP32, JNE, X), \
1259 INSN_3(JMP32, JGT, X), \
1260 INSN_3(JMP32, JLT, X), \
1261 INSN_3(JMP32, JGE, X), \
1262 INSN_3(JMP32, JLE, X), \
1263 INSN_3(JMP32, JSGT, X), \
1264 INSN_3(JMP32, JSLT, X), \
1265 INSN_3(JMP32, JSGE, X), \
1266 INSN_3(JMP32, JSLE, X), \
1267 INSN_3(JMP32, JSET, X), \
1268 /* Immediate based. */ \
1269 INSN_3(JMP32, JEQ, K), \
1270 INSN_3(JMP32, JNE, K), \
1271 INSN_3(JMP32, JGT, K), \
1272 INSN_3(JMP32, JLT, K), \
1273 INSN_3(JMP32, JGE, K), \
1274 INSN_3(JMP32, JLE, K), \
1275 INSN_3(JMP32, JSGT, K), \
1276 INSN_3(JMP32, JSLT, K), \
1277 INSN_3(JMP32, JSGE, K), \
1278 INSN_3(JMP32, JSLE, K), \
1279 INSN_3(JMP32, JSET, K), \
5e581dad
DB
1280 /* Jump instructions. */ \
1281 /* Register based. */ \
1282 INSN_3(JMP, JEQ, X), \
1283 INSN_3(JMP, JNE, X), \
1284 INSN_3(JMP, JGT, X), \
1285 INSN_3(JMP, JLT, X), \
1286 INSN_3(JMP, JGE, X), \
1287 INSN_3(JMP, JLE, X), \
1288 INSN_3(JMP, JSGT, X), \
1289 INSN_3(JMP, JSLT, X), \
1290 INSN_3(JMP, JSGE, X), \
1291 INSN_3(JMP, JSLE, X), \
1292 INSN_3(JMP, JSET, X), \
1293 /* Immediate based. */ \
1294 INSN_3(JMP, JEQ, K), \
1295 INSN_3(JMP, JNE, K), \
1296 INSN_3(JMP, JGT, K), \
1297 INSN_3(JMP, JLT, K), \
1298 INSN_3(JMP, JGE, K), \
1299 INSN_3(JMP, JLE, K), \
1300 INSN_3(JMP, JSGT, K), \
1301 INSN_3(JMP, JSLT, K), \
1302 INSN_3(JMP, JSGE, K), \
1303 INSN_3(JMP, JSLE, K), \
1304 INSN_3(JMP, JSET, K), \
1305 INSN_2(JMP, JA), \
1306 /* Store instructions. */ \
1307 /* Register based. */ \
1308 INSN_3(STX, MEM, B), \
1309 INSN_3(STX, MEM, H), \
1310 INSN_3(STX, MEM, W), \
1311 INSN_3(STX, MEM, DW), \
1312 INSN_3(STX, XADD, W), \
1313 INSN_3(STX, XADD, DW), \
1314 /* Immediate based. */ \
1315 INSN_3(ST, MEM, B), \
1316 INSN_3(ST, MEM, H), \
1317 INSN_3(ST, MEM, W), \
1318 INSN_3(ST, MEM, DW), \
1319 /* Load instructions. */ \
1320 /* Register based. */ \
1321 INSN_3(LDX, MEM, B), \
1322 INSN_3(LDX, MEM, H), \
1323 INSN_3(LDX, MEM, W), \
1324 INSN_3(LDX, MEM, DW), \
1325 /* Immediate based. */ \
e0cea7ce 1326 INSN_3(LD, IMM, DW)
5e581dad
DB
1327
1328bool bpf_opcode_in_insntable(u8 code)
1329{
1330#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1331#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1332 static const bool public_insntable[256] = {
1333 [0 ... 255] = false,
1334 /* Now overwrite non-defaults ... */
1335 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
e0cea7ce
DB
1336 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1337 [BPF_LD | BPF_ABS | BPF_B] = true,
1338 [BPF_LD | BPF_ABS | BPF_H] = true,
1339 [BPF_LD | BPF_ABS | BPF_W] = true,
1340 [BPF_LD | BPF_IND | BPF_B] = true,
1341 [BPF_LD | BPF_IND | BPF_H] = true,
1342 [BPF_LD | BPF_IND | BPF_W] = true,
5e581dad
DB
1343 };
1344#undef BPF_INSN_3_TBL
1345#undef BPF_INSN_2_TBL
1346 return public_insntable[code];
1347}
1348
290af866 1349#ifndef CONFIG_BPF_JIT_ALWAYS_ON
6e07a634 1350u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
2a02759e
AS
1351{
1352 memset(dst, 0, size);
1353 return -EFAULT;
1354}
6e07a634 1355
f5bffecd 1356/**
7ae457c1 1357 * __bpf_prog_run - run eBPF program on a given context
de1da68d 1358 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
7ae457c1 1359 * @insn: is the array of eBPF instructions
de1da68d 1360 * @stack: is the eBPF storage stack
f5bffecd 1361 *
7ae457c1 1362 * Decode and execute eBPF instructions.
f5bffecd 1363 */
080b6f40 1364static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
f5bffecd 1365{
5e581dad
DB
1366#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1367#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
e55a7325 1368 static const void * const jumptable[256] __annotate_jump_table = {
f5bffecd
AS
1369 [0 ... 255] = &&default_label,
1370 /* Now overwrite non-defaults ... */
5e581dad
DB
1371 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1372 /* Non-UAPI available opcodes. */
1ea47e01 1373 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
71189fa9 1374 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
2a02759e
AS
1375 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1376 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1377 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1378 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
f5bffecd 1379 };
5e581dad
DB
1380#undef BPF_INSN_3_LBL
1381#undef BPF_INSN_2_LBL
04fd61ab 1382 u32 tail_call_cnt = 0;
f5bffecd
AS
1383
1384#define CONT ({ insn++; goto select_insn; })
1385#define CONT_JMP ({ insn++; goto select_insn; })
1386
f5bffecd
AS
1387select_insn:
1388 goto *jumptable[insn->code];
1389
1390 /* ALU */
1391#define ALU(OPCODE, OP) \
1392 ALU64_##OPCODE##_X: \
1393 DST = DST OP SRC; \
1394 CONT; \
1395 ALU_##OPCODE##_X: \
1396 DST = (u32) DST OP (u32) SRC; \
1397 CONT; \
1398 ALU64_##OPCODE##_K: \
1399 DST = DST OP IMM; \
1400 CONT; \
1401 ALU_##OPCODE##_K: \
1402 DST = (u32) DST OP (u32) IMM; \
1403 CONT;
1404
1405 ALU(ADD, +)
1406 ALU(SUB, -)
1407 ALU(AND, &)
1408 ALU(OR, |)
1409 ALU(LSH, <<)
1410 ALU(RSH, >>)
1411 ALU(XOR, ^)
1412 ALU(MUL, *)
1413#undef ALU
1414 ALU_NEG:
1415 DST = (u32) -DST;
1416 CONT;
1417 ALU64_NEG:
1418 DST = -DST;
1419 CONT;
1420 ALU_MOV_X:
1421 DST = (u32) SRC;
1422 CONT;
1423 ALU_MOV_K:
1424 DST = (u32) IMM;
1425 CONT;
1426 ALU64_MOV_X:
1427 DST = SRC;
1428 CONT;
1429 ALU64_MOV_K:
1430 DST = IMM;
1431 CONT;
02ab695b
AS
1432 LD_IMM_DW:
1433 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1434 insn++;
1435 CONT;
2dc6b100 1436 ALU_ARSH_X:
75672dda 1437 DST = (u64) (u32) (((s32) DST) >> SRC);
2dc6b100
JW
1438 CONT;
1439 ALU_ARSH_K:
75672dda 1440 DST = (u64) (u32) (((s32) DST) >> IMM);
2dc6b100 1441 CONT;
f5bffecd
AS
1442 ALU64_ARSH_X:
1443 (*(s64 *) &DST) >>= SRC;
1444 CONT;
1445 ALU64_ARSH_K:
1446 (*(s64 *) &DST) >>= IMM;
1447 CONT;
1448 ALU64_MOD_X:
144cd91c
DB
1449 div64_u64_rem(DST, SRC, &AX);
1450 DST = AX;
f5bffecd
AS
1451 CONT;
1452 ALU_MOD_X:
144cd91c
DB
1453 AX = (u32) DST;
1454 DST = do_div(AX, (u32) SRC);
f5bffecd
AS
1455 CONT;
1456 ALU64_MOD_K:
144cd91c
DB
1457 div64_u64_rem(DST, IMM, &AX);
1458 DST = AX;
f5bffecd
AS
1459 CONT;
1460 ALU_MOD_K:
144cd91c
DB
1461 AX = (u32) DST;
1462 DST = do_div(AX, (u32) IMM);
f5bffecd
AS
1463 CONT;
1464 ALU64_DIV_X:
876a7ae6 1465 DST = div64_u64(DST, SRC);
f5bffecd
AS
1466 CONT;
1467 ALU_DIV_X:
144cd91c
DB
1468 AX = (u32) DST;
1469 do_div(AX, (u32) SRC);
1470 DST = (u32) AX;
f5bffecd
AS
1471 CONT;
1472 ALU64_DIV_K:
876a7ae6 1473 DST = div64_u64(DST, IMM);
f5bffecd
AS
1474 CONT;
1475 ALU_DIV_K:
144cd91c
DB
1476 AX = (u32) DST;
1477 do_div(AX, (u32) IMM);
1478 DST = (u32) AX;
f5bffecd
AS
1479 CONT;
1480 ALU_END_TO_BE:
1481 switch (IMM) {
1482 case 16:
1483 DST = (__force u16) cpu_to_be16(DST);
1484 break;
1485 case 32:
1486 DST = (__force u32) cpu_to_be32(DST);
1487 break;
1488 case 64:
1489 DST = (__force u64) cpu_to_be64(DST);
1490 break;
1491 }
1492 CONT;
1493 ALU_END_TO_LE:
1494 switch (IMM) {
1495 case 16:
1496 DST = (__force u16) cpu_to_le16(DST);
1497 break;
1498 case 32:
1499 DST = (__force u32) cpu_to_le32(DST);
1500 break;
1501 case 64:
1502 DST = (__force u64) cpu_to_le64(DST);
1503 break;
1504 }
1505 CONT;
1506
1507 /* CALL */
1508 JMP_CALL:
1509 /* Function call scratches BPF_R1-BPF_R5 registers,
1510 * preserves BPF_R6-BPF_R9, and stores return value
1511 * into BPF_R0.
1512 */
1513 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1514 BPF_R4, BPF_R5);
1515 CONT;
1516
1ea47e01
AS
1517 JMP_CALL_ARGS:
1518 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1519 BPF_R3, BPF_R4,
1520 BPF_R5,
1521 insn + insn->off + 1);
1522 CONT;
1523
04fd61ab
AS
1524 JMP_TAIL_CALL: {
1525 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1526 struct bpf_array *array = container_of(map, struct bpf_array, map);
1527 struct bpf_prog *prog;
90caccdd 1528 u32 index = BPF_R3;
04fd61ab
AS
1529
1530 if (unlikely(index >= array->map.max_entries))
1531 goto out;
04fd61ab
AS
1532 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1533 goto out;
1534
1535 tail_call_cnt++;
1536
2a36f0b9 1537 prog = READ_ONCE(array->ptrs[index]);
1ca1cc98 1538 if (!prog)
04fd61ab
AS
1539 goto out;
1540
c4675f93
DB
1541 /* ARG1 at this point is guaranteed to point to CTX from
1542 * the verifier side due to the fact that the tail call is
0142dddc 1543 * handled like a helper, that is, bpf_tail_call_proto,
c4675f93
DB
1544 * where arg1_type is ARG_PTR_TO_CTX.
1545 */
04fd61ab
AS
1546 insn = prog->insnsi;
1547 goto select_insn;
1548out:
1549 CONT;
1550 }
f5bffecd
AS
1551 JMP_JA:
1552 insn += insn->off;
1553 CONT;
f5bffecd
AS
1554 JMP_EXIT:
1555 return BPF_R0;
503a8865
JW
1556 /* JMP */
1557#define COND_JMP(SIGN, OPCODE, CMP_OP) \
1558 JMP_##OPCODE##_X: \
1559 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1560 insn += insn->off; \
1561 CONT_JMP; \
1562 } \
1563 CONT; \
1564 JMP32_##OPCODE##_X: \
1565 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1566 insn += insn->off; \
1567 CONT_JMP; \
1568 } \
1569 CONT; \
1570 JMP_##OPCODE##_K: \
1571 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1572 insn += insn->off; \
1573 CONT_JMP; \
1574 } \
1575 CONT; \
1576 JMP32_##OPCODE##_K: \
1577 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1578 insn += insn->off; \
1579 CONT_JMP; \
1580 } \
1581 CONT;
1582 COND_JMP(u, JEQ, ==)
1583 COND_JMP(u, JNE, !=)
1584 COND_JMP(u, JGT, >)
1585 COND_JMP(u, JLT, <)
1586 COND_JMP(u, JGE, >=)
1587 COND_JMP(u, JLE, <=)
1588 COND_JMP(u, JSET, &)
1589 COND_JMP(s, JSGT, >)
1590 COND_JMP(s, JSLT, <)
1591 COND_JMP(s, JSGE, >=)
1592 COND_JMP(s, JSLE, <=)
1593#undef COND_JMP
f5bffecd
AS
1594 /* STX and ST and LDX*/
1595#define LDST(SIZEOP, SIZE) \
1596 STX_MEM_##SIZEOP: \
1597 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1598 CONT; \
1599 ST_MEM_##SIZEOP: \
1600 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1601 CONT; \
1602 LDX_MEM_##SIZEOP: \
1603 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1604 CONT;
1605
1606 LDST(B, u8)
1607 LDST(H, u16)
1608 LDST(W, u32)
1609 LDST(DW, u64)
1610#undef LDST
6e07a634
DB
1611#define LDX_PROBE(SIZEOP, SIZE) \
1612 LDX_PROBE_MEM_##SIZEOP: \
85d31dd0 1613 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
2a02759e
AS
1614 CONT;
1615 LDX_PROBE(B, 1)
1616 LDX_PROBE(H, 2)
1617 LDX_PROBE(W, 4)
1618 LDX_PROBE(DW, 8)
1619#undef LDX_PROBE
1620
f5bffecd
AS
1621 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1622 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1623 (DST + insn->off));
1624 CONT;
1625 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1626 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1627 (DST + insn->off));
1628 CONT;
f5bffecd
AS
1629
1630 default_label:
5e581dad
DB
1631 /* If we ever reach this, we have a bug somewhere. Die hard here
1632 * instead of just returning 0; we could be somewhere in a subprog,
1633 * so execution could continue otherwise which we do /not/ want.
1634 *
1635 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1636 */
1637 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1638 BUG_ON(1);
f5bffecd
AS
1639 return 0;
1640}
f696b8f4 1641
b870aa90
AS
1642#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1643#define DEFINE_BPF_PROG_RUN(stack_size) \
1644static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1645{ \
1646 u64 stack[stack_size / sizeof(u64)]; \
144cd91c 1647 u64 regs[MAX_BPF_EXT_REG]; \
b870aa90
AS
1648\
1649 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1650 ARG1 = (u64) (unsigned long) ctx; \
1651 return ___bpf_prog_run(regs, insn, stack); \
f696b8f4 1652}
f5bffecd 1653
1ea47e01
AS
1654#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1655#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1656static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1657 const struct bpf_insn *insn) \
1658{ \
1659 u64 stack[stack_size / sizeof(u64)]; \
144cd91c 1660 u64 regs[MAX_BPF_EXT_REG]; \
1ea47e01
AS
1661\
1662 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1663 BPF_R1 = r1; \
1664 BPF_R2 = r2; \
1665 BPF_R3 = r3; \
1666 BPF_R4 = r4; \
1667 BPF_R5 = r5; \
1668 return ___bpf_prog_run(regs, insn, stack); \
1669}
1670
b870aa90
AS
1671#define EVAL1(FN, X) FN(X)
1672#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1673#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1674#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1675#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1676#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1677
1678EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1679EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1680EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1681
1ea47e01
AS
1682EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1683EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1684EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1685
b870aa90
AS
1686#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1687
1688static unsigned int (*interpreters[])(const void *ctx,
1689 const struct bpf_insn *insn) = {
1690EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1691EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1692EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1693};
1ea47e01
AS
1694#undef PROG_NAME_LIST
1695#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1696static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1697 const struct bpf_insn *insn) = {
1698EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1699EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1700EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1701};
1702#undef PROG_NAME_LIST
1703
1704void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1705{
1706 stack_depth = max_t(u32, stack_depth, 1);
1707 insn->off = (s16) insn->imm;
1708 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1709 __bpf_call_base_args;
1710 insn->code = BPF_JMP | BPF_CALL_ARGS;
1711}
b870aa90 1712
290af866 1713#else
fa9dd599
DB
1714static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1715 const struct bpf_insn *insn)
290af866 1716{
fa9dd599
DB
1717 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1718 * is not working properly, so warn about it!
1719 */
1720 WARN_ON_ONCE(1);
290af866
AS
1721 return 0;
1722}
1723#endif
1724
3324b584
DB
1725bool bpf_prog_array_compatible(struct bpf_array *array,
1726 const struct bpf_prog *fp)
04fd61ab 1727{
9802d865
JB
1728 if (fp->kprobe_override)
1729 return false;
1730
2beee5f5 1731 if (!array->aux->type) {
3324b584
DB
1732 /* There's no owner yet where we could check for
1733 * compatibility.
1734 */
2beee5f5
DB
1735 array->aux->type = fp->type;
1736 array->aux->jited = fp->jited;
3324b584 1737 return true;
04fd61ab 1738 }
3324b584 1739
2beee5f5
DB
1740 return array->aux->type == fp->type &&
1741 array->aux->jited == fp->jited;
04fd61ab
AS
1742}
1743
3324b584 1744static int bpf_check_tail_call(const struct bpf_prog *fp)
04fd61ab
AS
1745{
1746 struct bpf_prog_aux *aux = fp->aux;
984fe94f 1747 int i, ret = 0;
04fd61ab 1748
984fe94f 1749 mutex_lock(&aux->used_maps_mutex);
04fd61ab 1750 for (i = 0; i < aux->used_map_cnt; i++) {
3324b584 1751 struct bpf_map *map = aux->used_maps[i];
04fd61ab 1752 struct bpf_array *array;
04fd61ab 1753
04fd61ab
AS
1754 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1755 continue;
3324b584 1756
04fd61ab 1757 array = container_of(map, struct bpf_array, map);
984fe94f
YZ
1758 if (!bpf_prog_array_compatible(array, fp)) {
1759 ret = -EINVAL;
1760 goto out;
1761 }
04fd61ab
AS
1762 }
1763
984fe94f
YZ
1764out:
1765 mutex_unlock(&aux->used_maps_mutex);
1766 return ret;
04fd61ab
AS
1767}
1768
9facc336
DB
1769static void bpf_prog_select_func(struct bpf_prog *fp)
1770{
1771#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1772 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1773
1774 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1775#else
1776 fp->bpf_func = __bpf_prog_ret0_warn;
1777#endif
1778}
1779
f5bffecd 1780/**
3324b584 1781 * bpf_prog_select_runtime - select exec runtime for BPF program
7ae457c1 1782 * @fp: bpf_prog populated with internal BPF program
d1c55ab5 1783 * @err: pointer to error variable
f5bffecd 1784 *
3324b584
DB
1785 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1786 * The BPF program will be executed via BPF_PROG_RUN() macro.
f5bffecd 1787 */
d1c55ab5 1788struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
f5bffecd 1789{
9facc336
DB
1790 /* In case of BPF to BPF calls, verifier did all the prep
1791 * work with regards to JITing, etc.
1792 */
1793 if (fp->bpf_func)
1794 goto finalize;
8007e40a 1795
9facc336 1796 bpf_prog_select_func(fp);
f5bffecd 1797
d1c55ab5
DB
1798 /* eBPF JITs can rewrite the program in case constant
1799 * blinding is active. However, in case of error during
1800 * blinding, bpf_int_jit_compile() must always return a
1801 * valid program, which in this case would simply not
1802 * be JITed, but falls back to the interpreter.
1803 */
ab3f0063 1804 if (!bpf_prog_is_dev_bound(fp->aux)) {
c454a46b
MKL
1805 *err = bpf_prog_alloc_jited_linfo(fp);
1806 if (*err)
1807 return fp;
1808
ab3f0063 1809 fp = bpf_int_jit_compile(fp);
290af866 1810 if (!fp->jited) {
c454a46b
MKL
1811 bpf_prog_free_jited_linfo(fp);
1812#ifdef CONFIG_BPF_JIT_ALWAYS_ON
290af866
AS
1813 *err = -ENOTSUPP;
1814 return fp;
290af866 1815#endif
c454a46b
MKL
1816 } else {
1817 bpf_prog_free_unused_jited_linfo(fp);
1818 }
ab3f0063
JK
1819 } else {
1820 *err = bpf_prog_offload_compile(fp);
1821 if (*err)
1822 return fp;
1823 }
9facc336
DB
1824
1825finalize:
60a3b225 1826 bpf_prog_lock_ro(fp);
04fd61ab 1827
3324b584
DB
1828 /* The tail call compatibility check can only be done at
1829 * this late stage as we need to determine, if we deal
1830 * with JITed or non JITed program concatenations and not
1831 * all eBPF JITs might immediately support all features.
1832 */
d1c55ab5 1833 *err = bpf_check_tail_call(fp);
85782e03 1834
d1c55ab5 1835 return fp;
f5bffecd 1836}
7ae457c1 1837EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
f5bffecd 1838
e87c6bc3
YS
1839static unsigned int __bpf_prog_ret1(const void *ctx,
1840 const struct bpf_insn *insn)
1841{
1842 return 1;
1843}
1844
1845static struct bpf_prog_dummy {
1846 struct bpf_prog prog;
1847} dummy_bpf_prog = {
1848 .prog = {
1849 .bpf_func = __bpf_prog_ret1,
1850 },
1851};
1852
324bda9e
AS
1853/* to avoid allocating empty bpf_prog_array for cgroups that
1854 * don't have bpf program attached use one global 'empty_prog_array'
1855 * It will not be modified the caller of bpf_prog_array_alloc()
1856 * (since caller requested prog_cnt == 0)
1857 * that pointer should be 'freed' by bpf_prog_array_free()
1858 */
1859static struct {
1860 struct bpf_prog_array hdr;
1861 struct bpf_prog *null_prog;
1862} empty_prog_array = {
1863 .null_prog = NULL,
1864};
1865
d29ab6e1 1866struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
324bda9e
AS
1867{
1868 if (prog_cnt)
1869 return kzalloc(sizeof(struct bpf_prog_array) +
394e40a2
RG
1870 sizeof(struct bpf_prog_array_item) *
1871 (prog_cnt + 1),
324bda9e
AS
1872 flags);
1873
1874 return &empty_prog_array.hdr;
1875}
1876
54e9c9d4 1877void bpf_prog_array_free(struct bpf_prog_array *progs)
324bda9e 1878{
54e9c9d4 1879 if (!progs || progs == &empty_prog_array.hdr)
324bda9e
AS
1880 return;
1881 kfree_rcu(progs, rcu);
1882}
1883
54e9c9d4 1884int bpf_prog_array_length(struct bpf_prog_array *array)
468e2f64 1885{
394e40a2 1886 struct bpf_prog_array_item *item;
468e2f64
AS
1887 u32 cnt = 0;
1888
54e9c9d4 1889 for (item = array->items; item->prog; item++)
394e40a2 1890 if (item->prog != &dummy_bpf_prog.prog)
c8c088ba 1891 cnt++;
468e2f64
AS
1892 return cnt;
1893}
1894
0d01da6a
SF
1895bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1896{
1897 struct bpf_prog_array_item *item;
1898
1899 for (item = array->items; item->prog; item++)
1900 if (item->prog != &dummy_bpf_prog.prog)
1901 return false;
1902 return true;
1903}
394e40a2 1904
54e9c9d4 1905static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
3a38bb98
YS
1906 u32 *prog_ids,
1907 u32 request_cnt)
1908{
394e40a2 1909 struct bpf_prog_array_item *item;
3a38bb98
YS
1910 int i = 0;
1911
54e9c9d4 1912 for (item = array->items; item->prog; item++) {
394e40a2 1913 if (item->prog == &dummy_bpf_prog.prog)
3a38bb98 1914 continue;
394e40a2 1915 prog_ids[i] = item->prog->aux->id;
3a38bb98 1916 if (++i == request_cnt) {
394e40a2 1917 item++;
3a38bb98
YS
1918 break;
1919 }
1920 }
1921
394e40a2 1922 return !!(item->prog);
3a38bb98
YS
1923}
1924
54e9c9d4 1925int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
468e2f64
AS
1926 __u32 __user *prog_ids, u32 cnt)
1927{
0911287c 1928 unsigned long err = 0;
0911287c 1929 bool nospc;
3a38bb98 1930 u32 *ids;
0911287c
AS
1931
1932 /* users of this function are doing:
1933 * cnt = bpf_prog_array_length();
1934 * if (cnt > 0)
1935 * bpf_prog_array_copy_to_user(..., cnt);
54e9c9d4 1936 * so below kcalloc doesn't need extra cnt > 0 check.
0911287c 1937 */
9c481b90 1938 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
0911287c
AS
1939 if (!ids)
1940 return -ENOMEM;
394e40a2 1941 nospc = bpf_prog_array_copy_core(array, ids, cnt);
0911287c
AS
1942 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1943 kfree(ids);
1944 if (err)
1945 return -EFAULT;
1946 if (nospc)
468e2f64
AS
1947 return -ENOSPC;
1948 return 0;
1949}
1950
54e9c9d4 1951void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
e87c6bc3
YS
1952 struct bpf_prog *old_prog)
1953{
54e9c9d4 1954 struct bpf_prog_array_item *item;
e87c6bc3 1955
54e9c9d4 1956 for (item = array->items; item->prog; item++)
394e40a2
RG
1957 if (item->prog == old_prog) {
1958 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
e87c6bc3
YS
1959 break;
1960 }
1961}
1962
ce3aa9cc
JS
1963/**
1964 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
1965 * index into the program array with
1966 * a dummy no-op program.
1967 * @array: a bpf_prog_array
1968 * @index: the index of the program to replace
1969 *
1970 * Skips over dummy programs, by not counting them, when calculating
b8c1a309 1971 * the position of the program to replace.
ce3aa9cc
JS
1972 *
1973 * Return:
1974 * * 0 - Success
1975 * * -EINVAL - Invalid index value. Must be a non-negative integer.
1976 * * -ENOENT - Index out of range
1977 */
1978int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
1979{
1980 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
1981}
1982
1983/**
1984 * bpf_prog_array_update_at() - Updates the program at the given index
1985 * into the program array.
1986 * @array: a bpf_prog_array
1987 * @index: the index of the program to update
1988 * @prog: the program to insert into the array
1989 *
1990 * Skips over dummy programs, by not counting them, when calculating
1991 * the position of the program to update.
1992 *
1993 * Return:
1994 * * 0 - Success
1995 * * -EINVAL - Invalid index value. Must be a non-negative integer.
1996 * * -ENOENT - Index out of range
1997 */
1998int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1999 struct bpf_prog *prog)
2000{
2001 struct bpf_prog_array_item *item;
2002
2003 if (unlikely(index < 0))
2004 return -EINVAL;
2005
2006 for (item = array->items; item->prog; item++) {
2007 if (item->prog == &dummy_bpf_prog.prog)
2008 continue;
2009 if (!index) {
2010 WRITE_ONCE(item->prog, prog);
2011 return 0;
2012 }
2013 index--;
2014 }
2015 return -ENOENT;
2016}
2017
54e9c9d4 2018int bpf_prog_array_copy(struct bpf_prog_array *old_array,
e87c6bc3
YS
2019 struct bpf_prog *exclude_prog,
2020 struct bpf_prog *include_prog,
2021 struct bpf_prog_array **new_array)
2022{
2023 int new_prog_cnt, carry_prog_cnt = 0;
394e40a2 2024 struct bpf_prog_array_item *existing;
e87c6bc3 2025 struct bpf_prog_array *array;
170a7e3e 2026 bool found_exclude = false;
e87c6bc3
YS
2027 int new_prog_idx = 0;
2028
2029 /* Figure out how many existing progs we need to carry over to
2030 * the new array.
2031 */
2032 if (old_array) {
394e40a2
RG
2033 existing = old_array->items;
2034 for (; existing->prog; existing++) {
2035 if (existing->prog == exclude_prog) {
170a7e3e
SY
2036 found_exclude = true;
2037 continue;
2038 }
394e40a2 2039 if (existing->prog != &dummy_bpf_prog.prog)
e87c6bc3 2040 carry_prog_cnt++;
394e40a2 2041 if (existing->prog == include_prog)
e87c6bc3
YS
2042 return -EEXIST;
2043 }
2044 }
2045
170a7e3e
SY
2046 if (exclude_prog && !found_exclude)
2047 return -ENOENT;
2048
e87c6bc3
YS
2049 /* How many progs (not NULL) will be in the new array? */
2050 new_prog_cnt = carry_prog_cnt;
2051 if (include_prog)
2052 new_prog_cnt += 1;
2053
2054 /* Do we have any prog (not NULL) in the new array? */
2055 if (!new_prog_cnt) {
2056 *new_array = NULL;
2057 return 0;
2058 }
2059
2060 /* +1 as the end of prog_array is marked with NULL */
2061 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2062 if (!array)
2063 return -ENOMEM;
2064
2065 /* Fill in the new prog array */
2066 if (carry_prog_cnt) {
394e40a2
RG
2067 existing = old_array->items;
2068 for (; existing->prog; existing++)
2069 if (existing->prog != exclude_prog &&
2070 existing->prog != &dummy_bpf_prog.prog) {
2071 array->items[new_prog_idx++].prog =
2072 existing->prog;
2073 }
e87c6bc3
YS
2074 }
2075 if (include_prog)
394e40a2
RG
2076 array->items[new_prog_idx++].prog = include_prog;
2077 array->items[new_prog_idx].prog = NULL;
e87c6bc3
YS
2078 *new_array = array;
2079 return 0;
2080}
2081
54e9c9d4 2082int bpf_prog_array_copy_info(struct bpf_prog_array *array,
3a38bb98
YS
2083 u32 *prog_ids, u32 request_cnt,
2084 u32 *prog_cnt)
f371b304
YS
2085{
2086 u32 cnt = 0;
2087
2088 if (array)
2089 cnt = bpf_prog_array_length(array);
2090
3a38bb98 2091 *prog_cnt = cnt;
f371b304
YS
2092
2093 /* return early if user requested only program count or nothing to copy */
2094 if (!request_cnt || !cnt)
2095 return 0;
2096
3a38bb98 2097 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
394e40a2 2098 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
3a38bb98 2099 : 0;
f371b304
YS
2100}
2101
a2ea0746
DB
2102void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2103 struct bpf_map **used_maps, u32 len)
6332be04 2104{
da765a2f 2105 struct bpf_map *map;
a2ea0746 2106 u32 i;
6332be04 2107
a2ea0746
DB
2108 for (i = 0; i < len; i++) {
2109 map = used_maps[i];
da765a2f
DB
2110 if (map->ops->map_poke_untrack)
2111 map->ops->map_poke_untrack(map, aux);
2112 bpf_map_put(map);
2113 }
a2ea0746
DB
2114}
2115
2116static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2117{
2118 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
6332be04
DB
2119 kfree(aux->used_maps);
2120}
2121
60a3b225
DB
2122static void bpf_prog_free_deferred(struct work_struct *work)
2123{
09756af4 2124 struct bpf_prog_aux *aux;
1c2a088a 2125 int i;
60a3b225 2126
09756af4 2127 aux = container_of(work, struct bpf_prog_aux, work);
6332be04 2128 bpf_free_used_maps(aux);
ab3f0063
JK
2129 if (bpf_prog_is_dev_bound(aux))
2130 bpf_prog_offload_destroy(aux->prog);
c195651e
YS
2131#ifdef CONFIG_PERF_EVENTS
2132 if (aux->prog->has_callchain_buf)
2133 put_callchain_buffers();
2134#endif
3aac1ead
THJ
2135 if (aux->dst_trampoline)
2136 bpf_trampoline_put(aux->dst_trampoline);
1c2a088a
AS
2137 for (i = 0; i < aux->func_cnt; i++)
2138 bpf_jit_free(aux->func[i]);
2139 if (aux->func_cnt) {
2140 kfree(aux->func);
2141 bpf_prog_unlock_free(aux->prog);
2142 } else {
2143 bpf_jit_free(aux->prog);
2144 }
60a3b225
DB
2145}
2146
2147/* Free internal BPF program */
7ae457c1 2148void bpf_prog_free(struct bpf_prog *fp)
f5bffecd 2149{
09756af4 2150 struct bpf_prog_aux *aux = fp->aux;
60a3b225 2151
3aac1ead
THJ
2152 if (aux->dst_prog)
2153 bpf_prog_put(aux->dst_prog);
09756af4 2154 INIT_WORK(&aux->work, bpf_prog_free_deferred);
09756af4 2155 schedule_work(&aux->work);
f5bffecd 2156}
7ae457c1 2157EXPORT_SYMBOL_GPL(bpf_prog_free);
f89b7755 2158
3ad00405
DB
2159/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2160static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2161
2162void bpf_user_rnd_init_once(void)
2163{
2164 prandom_init_once(&bpf_user_rnd_state);
2165}
2166
f3694e00 2167BPF_CALL_0(bpf_user_rnd_u32)
3ad00405
DB
2168{
2169 /* Should someone ever have the rather unwise idea to use some
2170 * of the registers passed into this function, then note that
2171 * this function is called from native eBPF and classic-to-eBPF
2172 * transformations. Register assignments from both sides are
2173 * different, f.e. classic always sets fn(ctx, A, X) here.
2174 */
2175 struct rnd_state *state;
2176 u32 res;
2177
2178 state = &get_cpu_var(bpf_user_rnd_state);
2179 res = prandom_u32_state(state);
b761fe22 2180 put_cpu_var(bpf_user_rnd_state);
3ad00405
DB
2181
2182 return res;
2183}
2184
6890896b
SF
2185BPF_CALL_0(bpf_get_raw_cpu_id)
2186{
2187 return raw_smp_processor_id();
2188}
2189
3ba67dab
DB
2190/* Weak definitions of helper functions in case we don't have bpf syscall. */
2191const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2192const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2193const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
f1a2e44a
MV
2194const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2195const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2196const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
d83525ca
AS
2197const struct bpf_func_proto bpf_spin_lock_proto __weak;
2198const struct bpf_func_proto bpf_spin_unlock_proto __weak;
5576b991 2199const struct bpf_func_proto bpf_jiffies64_proto __weak;
3ba67dab 2200
03e69b50 2201const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
c04167ce 2202const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2d0e30c3 2203const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
17ca8cbf 2204const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
71d19214 2205const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
d0551261 2206const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
bd570ff9 2207
ffeedafb
AS
2208const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2209const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2210const struct bpf_func_proto bpf_get_current_comm_proto __weak;
bf6fa2c8 2211const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
0f09abd1 2212const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
cd339431 2213const struct bpf_func_proto bpf_get_local_storage_proto __weak;
b4490c5c 2214const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
c4d0bfb4 2215const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
eb411377 2216const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
bd570ff9 2217
0756ea3e
AS
2218const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2219{
2220 return NULL;
2221}
03e69b50 2222
555c8a86
DB
2223u64 __weak
2224bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2225 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 2226{
555c8a86 2227 return -ENOTSUPP;
bd570ff9 2228}
6cb5fb38 2229EXPORT_SYMBOL_GPL(bpf_event_output);
bd570ff9 2230
3324b584
DB
2231/* Always built-in helper functions. */
2232const struct bpf_func_proto bpf_tail_call_proto = {
2233 .func = NULL,
2234 .gpl_only = false,
2235 .ret_type = RET_VOID,
2236 .arg1_type = ARG_PTR_TO_CTX,
2237 .arg2_type = ARG_CONST_MAP_PTR,
2238 .arg3_type = ARG_ANYTHING,
2239};
2240
9383191d
DB
2241/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2242 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2243 * eBPF and implicitly also cBPF can get JITed!
2244 */
d1c55ab5 2245struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
3324b584 2246{
d1c55ab5 2247 return prog;
3324b584
DB
2248}
2249
9383191d
DB
2250/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2251 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2252 */
2253void __weak bpf_jit_compile(struct bpf_prog *prog)
2254{
2255}
2256
17bedab2 2257bool __weak bpf_helper_changes_pkt_data(void *func)
969bf05e
AS
2258{
2259 return false;
2260}
2261
a4b1d3c1
JW
2262/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2263 * analysis code and wants explicit zero extension inserted by verifier.
2264 * Otherwise, return FALSE.
2265 */
2266bool __weak bpf_jit_needs_zext(void)
2267{
2268 return false;
2269}
2270
f89b7755
AS
2271/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2272 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2273 */
2274int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2275 int len)
2276{
2277 return -EFAULT;
2278}
a67edbf4 2279
5964b200
AS
2280int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2281 void *addr1, void *addr2)
2282{
2283 return -ENOTSUPP;
2284}
2285
492ecee8
AS
2286DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2287EXPORT_SYMBOL(bpf_stats_enabled_key);
492ecee8 2288
a67edbf4
DB
2289/* All definitions of tracepoints related to BPF. */
2290#define CREATE_TRACE_POINTS
2291#include <linux/bpf_trace.h>
2292
2293EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
e7d47989 2294EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);