2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
35 #include <asm/unaligned.h>
36 #include <asm/barrier.h>
39 #define BPF_R0 regs[BPF_REG_0]
40 #define BPF_R1 regs[BPF_REG_1]
41 #define BPF_R2 regs[BPF_REG_2]
42 #define BPF_R3 regs[BPF_REG_3]
43 #define BPF_R4 regs[BPF_REG_4]
44 #define BPF_R5 regs[BPF_REG_5]
45 #define BPF_R6 regs[BPF_REG_6]
46 #define BPF_R7 regs[BPF_REG_7]
47 #define BPF_R8 regs[BPF_REG_8]
48 #define BPF_R9 regs[BPF_REG_9]
49 #define BPF_R10 regs[BPF_REG_10]
52 #define DST regs[insn->dst_reg]
53 #define SRC regs[insn->src_reg]
54 #define FP regs[BPF_REG_FP]
55 #define ARG1 regs[BPF_REG_ARG1]
56 #define CTX regs[BPF_REG_CTX]
59 /* No hurry in this branch
61 * Exported for the bpf jit load helper.
63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff
*skb
, int k
, unsigned int size
)
68 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
69 else if (k
>= SKF_LL_OFF
)
70 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
72 if (ptr
>= skb
->head
&& ptr
+ size
<= skb_tail_pointer(skb
))
78 struct bpf_prog
*bpf_prog_alloc(unsigned int size
, gfp_t gfp_extra_flags
)
80 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
| gfp_extra_flags
;
81 struct bpf_prog_aux
*aux
;
84 size
= round_up(size
, PAGE_SIZE
);
85 fp
= __vmalloc(size
, gfp_flags
, PAGE_KERNEL
);
89 kmemcheck_annotate_bitfield(fp
, meta
);
91 aux
= kzalloc(sizeof(*aux
), GFP_KERNEL
| gfp_extra_flags
);
97 fp
->pages
= size
/ PAGE_SIZE
;
101 INIT_LIST_HEAD_RCU(&fp
->aux
->ksym_lnode
);
105 EXPORT_SYMBOL_GPL(bpf_prog_alloc
);
107 struct bpf_prog
*bpf_prog_realloc(struct bpf_prog
*fp_old
, unsigned int size
,
108 gfp_t gfp_extra_flags
)
110 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
| gfp_extra_flags
;
115 BUG_ON(fp_old
== NULL
);
117 size
= round_up(size
, PAGE_SIZE
);
118 pages
= size
/ PAGE_SIZE
;
119 if (pages
<= fp_old
->pages
)
122 delta
= pages
- fp_old
->pages
;
123 ret
= __bpf_prog_charge(fp_old
->aux
->user
, delta
);
127 fp
= __vmalloc(size
, gfp_flags
, PAGE_KERNEL
);
129 __bpf_prog_uncharge(fp_old
->aux
->user
, delta
);
131 kmemcheck_annotate_bitfield(fp
, meta
);
133 memcpy(fp
, fp_old
, fp_old
->pages
* PAGE_SIZE
);
137 /* We keep fp->aux from fp_old around in the new
138 * reallocated structure.
141 __bpf_prog_free(fp_old
);
147 void __bpf_prog_free(struct bpf_prog
*fp
)
153 int bpf_prog_calc_tag(struct bpf_prog
*fp
)
155 const u32 bits_offset
= SHA_MESSAGE_BYTES
- sizeof(__be64
);
156 u32 raw_size
= bpf_prog_tag_scratch_size(fp
);
157 u32 digest
[SHA_DIGEST_WORDS
];
158 u32 ws
[SHA_WORKSPACE_WORDS
];
159 u32 i
, bsize
, psize
, blocks
;
160 struct bpf_insn
*dst
;
166 raw
= vmalloc(raw_size
);
171 memset(ws
, 0, sizeof(ws
));
173 /* We need to take out the map fd for the digest calculation
174 * since they are unstable from user space side.
177 for (i
= 0, was_ld_map
= false; i
< fp
->len
; i
++) {
178 dst
[i
] = fp
->insnsi
[i
];
180 dst
[i
].code
== (BPF_LD
| BPF_IMM
| BPF_DW
) &&
181 dst
[i
].src_reg
== BPF_PSEUDO_MAP_FD
) {
184 } else if (was_ld_map
&&
186 dst
[i
].dst_reg
== 0 &&
187 dst
[i
].src_reg
== 0 &&
196 psize
= bpf_prog_insn_size(fp
);
197 memset(&raw
[psize
], 0, raw_size
- psize
);
200 bsize
= round_up(psize
, SHA_MESSAGE_BYTES
);
201 blocks
= bsize
/ SHA_MESSAGE_BYTES
;
203 if (bsize
- psize
>= sizeof(__be64
)) {
204 bits
= (__be64
*)(todo
+ bsize
- sizeof(__be64
));
206 bits
= (__be64
*)(todo
+ bsize
+ bits_offset
);
209 *bits
= cpu_to_be64((psize
- 1) << 3);
212 sha_transform(digest
, todo
, ws
);
213 todo
+= SHA_MESSAGE_BYTES
;
216 result
= (__force __be32
*)digest
;
217 for (i
= 0; i
< SHA_DIGEST_WORDS
; i
++)
218 result
[i
] = cpu_to_be32(digest
[i
]);
219 memcpy(fp
->tag
, result
, sizeof(fp
->tag
));
225 static bool bpf_is_jmp_and_has_target(const struct bpf_insn
*insn
)
227 return BPF_CLASS(insn
->code
) == BPF_JMP
&&
228 /* Call and Exit are both special jumps with no
229 * target inside the BPF instruction image.
231 BPF_OP(insn
->code
) != BPF_CALL
&&
232 BPF_OP(insn
->code
) != BPF_EXIT
;
235 static void bpf_adj_branches(struct bpf_prog
*prog
, u32 pos
, u32 delta
)
237 struct bpf_insn
*insn
= prog
->insnsi
;
238 u32 i
, insn_cnt
= prog
->len
;
240 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
241 if (!bpf_is_jmp_and_has_target(insn
))
244 /* Adjust offset of jmps if we cross boundaries. */
245 if (i
< pos
&& i
+ insn
->off
+ 1 > pos
)
247 else if (i
> pos
+ delta
&& i
+ insn
->off
+ 1 <= pos
+ delta
)
252 struct bpf_prog
*bpf_patch_insn_single(struct bpf_prog
*prog
, u32 off
,
253 const struct bpf_insn
*patch
, u32 len
)
255 u32 insn_adj_cnt
, insn_rest
, insn_delta
= len
- 1;
256 struct bpf_prog
*prog_adj
;
258 /* Since our patchlet doesn't expand the image, we're done. */
259 if (insn_delta
== 0) {
260 memcpy(prog
->insnsi
+ off
, patch
, sizeof(*patch
));
264 insn_adj_cnt
= prog
->len
+ insn_delta
;
266 /* Several new instructions need to be inserted. Make room
267 * for them. Likely, there's no need for a new allocation as
268 * last page could have large enough tailroom.
270 prog_adj
= bpf_prog_realloc(prog
, bpf_prog_size(insn_adj_cnt
),
275 prog_adj
->len
= insn_adj_cnt
;
277 /* Patching happens in 3 steps:
279 * 1) Move over tail of insnsi from next instruction onwards,
280 * so we can patch the single target insn with one or more
281 * new ones (patching is always from 1 to n insns, n > 0).
282 * 2) Inject new instructions at the target location.
283 * 3) Adjust branch offsets if necessary.
285 insn_rest
= insn_adj_cnt
- off
- len
;
287 memmove(prog_adj
->insnsi
+ off
+ len
, prog_adj
->insnsi
+ off
+ 1,
288 sizeof(*patch
) * insn_rest
);
289 memcpy(prog_adj
->insnsi
+ off
, patch
, sizeof(*patch
) * len
);
291 bpf_adj_branches(prog_adj
, off
, insn_delta
);
296 #ifdef CONFIG_BPF_JIT
297 static __always_inline
void
298 bpf_get_prog_addr_region(const struct bpf_prog
*prog
,
299 unsigned long *symbol_start
,
300 unsigned long *symbol_end
)
302 const struct bpf_binary_header
*hdr
= bpf_jit_binary_hdr(prog
);
303 unsigned long addr
= (unsigned long)hdr
;
305 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog
));
307 *symbol_start
= addr
;
308 *symbol_end
= addr
+ hdr
->pages
* PAGE_SIZE
;
311 static void bpf_get_prog_name(const struct bpf_prog
*prog
, char *sym
)
313 BUILD_BUG_ON(sizeof("bpf_prog_") +
314 sizeof(prog
->tag
) * 2 + 1 > KSYM_NAME_LEN
);
316 sym
+= snprintf(sym
, KSYM_NAME_LEN
, "bpf_prog_");
317 sym
= bin2hex(sym
, prog
->tag
, sizeof(prog
->tag
));
321 static __always_inline
unsigned long
322 bpf_get_prog_addr_start(struct latch_tree_node
*n
)
324 unsigned long symbol_start
, symbol_end
;
325 const struct bpf_prog_aux
*aux
;
327 aux
= container_of(n
, struct bpf_prog_aux
, ksym_tnode
);
328 bpf_get_prog_addr_region(aux
->prog
, &symbol_start
, &symbol_end
);
333 static __always_inline
bool bpf_tree_less(struct latch_tree_node
*a
,
334 struct latch_tree_node
*b
)
336 return bpf_get_prog_addr_start(a
) < bpf_get_prog_addr_start(b
);
339 static __always_inline
int bpf_tree_comp(void *key
, struct latch_tree_node
*n
)
341 unsigned long val
= (unsigned long)key
;
342 unsigned long symbol_start
, symbol_end
;
343 const struct bpf_prog_aux
*aux
;
345 aux
= container_of(n
, struct bpf_prog_aux
, ksym_tnode
);
346 bpf_get_prog_addr_region(aux
->prog
, &symbol_start
, &symbol_end
);
348 if (val
< symbol_start
)
350 if (val
>= symbol_end
)
356 static const struct latch_tree_ops bpf_tree_ops
= {
357 .less
= bpf_tree_less
,
358 .comp
= bpf_tree_comp
,
361 static DEFINE_SPINLOCK(bpf_lock
);
362 static LIST_HEAD(bpf_kallsyms
);
363 static struct latch_tree_root bpf_tree __cacheline_aligned
;
365 int bpf_jit_kallsyms __read_mostly
;
367 static void bpf_prog_ksym_node_add(struct bpf_prog_aux
*aux
)
369 WARN_ON_ONCE(!list_empty(&aux
->ksym_lnode
));
370 list_add_tail_rcu(&aux
->ksym_lnode
, &bpf_kallsyms
);
371 latch_tree_insert(&aux
->ksym_tnode
, &bpf_tree
, &bpf_tree_ops
);
374 static void bpf_prog_ksym_node_del(struct bpf_prog_aux
*aux
)
376 if (list_empty(&aux
->ksym_lnode
))
379 latch_tree_erase(&aux
->ksym_tnode
, &bpf_tree
, &bpf_tree_ops
);
380 list_del_rcu(&aux
->ksym_lnode
);
383 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog
*fp
)
385 return fp
->jited
&& !bpf_prog_was_classic(fp
);
388 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog
*fp
)
390 return list_empty(&fp
->aux
->ksym_lnode
) ||
391 fp
->aux
->ksym_lnode
.prev
== LIST_POISON2
;
394 void bpf_prog_kallsyms_add(struct bpf_prog
*fp
)
396 if (!bpf_prog_kallsyms_candidate(fp
) ||
397 !capable(CAP_SYS_ADMIN
))
400 spin_lock_bh(&bpf_lock
);
401 bpf_prog_ksym_node_add(fp
->aux
);
402 spin_unlock_bh(&bpf_lock
);
405 void bpf_prog_kallsyms_del(struct bpf_prog
*fp
)
407 if (!bpf_prog_kallsyms_candidate(fp
))
410 spin_lock_bh(&bpf_lock
);
411 bpf_prog_ksym_node_del(fp
->aux
);
412 spin_unlock_bh(&bpf_lock
);
415 static struct bpf_prog
*bpf_prog_kallsyms_find(unsigned long addr
)
417 struct latch_tree_node
*n
;
419 if (!bpf_jit_kallsyms_enabled())
422 n
= latch_tree_find((void *)addr
, &bpf_tree
, &bpf_tree_ops
);
424 container_of(n
, struct bpf_prog_aux
, ksym_tnode
)->prog
:
428 const char *__bpf_address_lookup(unsigned long addr
, unsigned long *size
,
429 unsigned long *off
, char *sym
)
431 unsigned long symbol_start
, symbol_end
;
432 struct bpf_prog
*prog
;
436 prog
= bpf_prog_kallsyms_find(addr
);
438 bpf_get_prog_addr_region(prog
, &symbol_start
, &symbol_end
);
439 bpf_get_prog_name(prog
, sym
);
443 *size
= symbol_end
- symbol_start
;
445 *off
= addr
- symbol_start
;
452 bool is_bpf_text_address(unsigned long addr
)
457 ret
= bpf_prog_kallsyms_find(addr
) != NULL
;
463 int bpf_get_kallsym(unsigned int symnum
, unsigned long *value
, char *type
,
466 unsigned long symbol_start
, symbol_end
;
467 struct bpf_prog_aux
*aux
;
471 if (!bpf_jit_kallsyms_enabled())
475 list_for_each_entry_rcu(aux
, &bpf_kallsyms
, ksym_lnode
) {
479 bpf_get_prog_addr_region(aux
->prog
, &symbol_start
, &symbol_end
);
480 bpf_get_prog_name(aux
->prog
, sym
);
482 *value
= symbol_start
;
483 *type
= BPF_SYM_ELF_TYPE
;
493 struct bpf_binary_header
*
494 bpf_jit_binary_alloc(unsigned int proglen
, u8
**image_ptr
,
495 unsigned int alignment
,
496 bpf_jit_fill_hole_t bpf_fill_ill_insns
)
498 struct bpf_binary_header
*hdr
;
499 unsigned int size
, hole
, start
;
501 /* Most of BPF filters are really small, but if some of them
502 * fill a page, allow at least 128 extra bytes to insert a
503 * random section of illegal instructions.
505 size
= round_up(proglen
+ sizeof(*hdr
) + 128, PAGE_SIZE
);
506 hdr
= module_alloc(size
);
510 /* Fill space with illegal/arch-dep instructions. */
511 bpf_fill_ill_insns(hdr
, size
);
513 hdr
->pages
= size
/ PAGE_SIZE
;
514 hole
= min_t(unsigned int, size
- (proglen
+ sizeof(*hdr
)),
515 PAGE_SIZE
- sizeof(*hdr
));
516 start
= (get_random_int() % hole
) & ~(alignment
- 1);
518 /* Leave a random number of instructions before BPF code. */
519 *image_ptr
= &hdr
->image
[start
];
524 void bpf_jit_binary_free(struct bpf_binary_header
*hdr
)
529 /* This symbol is only overridden by archs that have different
530 * requirements than the usual eBPF JITs, f.e. when they only
531 * implement cBPF JIT, do not set images read-only, etc.
533 void __weak
bpf_jit_free(struct bpf_prog
*fp
)
536 struct bpf_binary_header
*hdr
= bpf_jit_binary_hdr(fp
);
538 bpf_jit_binary_unlock_ro(hdr
);
539 bpf_jit_binary_free(hdr
);
541 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp
));
544 bpf_prog_unlock_free(fp
);
547 int bpf_jit_harden __read_mostly
;
549 static int bpf_jit_blind_insn(const struct bpf_insn
*from
,
550 const struct bpf_insn
*aux
,
551 struct bpf_insn
*to_buff
)
553 struct bpf_insn
*to
= to_buff
;
554 u32 imm_rnd
= get_random_int();
557 BUILD_BUG_ON(BPF_REG_AX
+ 1 != MAX_BPF_JIT_REG
);
558 BUILD_BUG_ON(MAX_BPF_REG
+ 1 != MAX_BPF_JIT_REG
);
560 if (from
->imm
== 0 &&
561 (from
->code
== (BPF_ALU
| BPF_MOV
| BPF_K
) ||
562 from
->code
== (BPF_ALU64
| BPF_MOV
| BPF_K
))) {
563 *to
++ = BPF_ALU64_REG(BPF_XOR
, from
->dst_reg
, from
->dst_reg
);
567 switch (from
->code
) {
568 case BPF_ALU
| BPF_ADD
| BPF_K
:
569 case BPF_ALU
| BPF_SUB
| BPF_K
:
570 case BPF_ALU
| BPF_AND
| BPF_K
:
571 case BPF_ALU
| BPF_OR
| BPF_K
:
572 case BPF_ALU
| BPF_XOR
| BPF_K
:
573 case BPF_ALU
| BPF_MUL
| BPF_K
:
574 case BPF_ALU
| BPF_MOV
| BPF_K
:
575 case BPF_ALU
| BPF_DIV
| BPF_K
:
576 case BPF_ALU
| BPF_MOD
| BPF_K
:
577 *to
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
578 *to
++ = BPF_ALU32_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
579 *to
++ = BPF_ALU32_REG(from
->code
, from
->dst_reg
, BPF_REG_AX
);
582 case BPF_ALU64
| BPF_ADD
| BPF_K
:
583 case BPF_ALU64
| BPF_SUB
| BPF_K
:
584 case BPF_ALU64
| BPF_AND
| BPF_K
:
585 case BPF_ALU64
| BPF_OR
| BPF_K
:
586 case BPF_ALU64
| BPF_XOR
| BPF_K
:
587 case BPF_ALU64
| BPF_MUL
| BPF_K
:
588 case BPF_ALU64
| BPF_MOV
| BPF_K
:
589 case BPF_ALU64
| BPF_DIV
| BPF_K
:
590 case BPF_ALU64
| BPF_MOD
| BPF_K
:
591 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
592 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
593 *to
++ = BPF_ALU64_REG(from
->code
, from
->dst_reg
, BPF_REG_AX
);
596 case BPF_JMP
| BPF_JEQ
| BPF_K
:
597 case BPF_JMP
| BPF_JNE
| BPF_K
:
598 case BPF_JMP
| BPF_JGT
| BPF_K
:
599 case BPF_JMP
| BPF_JGE
| BPF_K
:
600 case BPF_JMP
| BPF_JSGT
| BPF_K
:
601 case BPF_JMP
| BPF_JSGE
| BPF_K
:
602 case BPF_JMP
| BPF_JSET
| BPF_K
:
603 /* Accommodate for extra offset in case of a backjump. */
607 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
608 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
609 *to
++ = BPF_JMP_REG(from
->code
, from
->dst_reg
, BPF_REG_AX
, off
);
612 case BPF_LD
| BPF_ABS
| BPF_W
:
613 case BPF_LD
| BPF_ABS
| BPF_H
:
614 case BPF_LD
| BPF_ABS
| BPF_B
:
615 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
616 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
617 *to
++ = BPF_LD_IND(from
->code
, BPF_REG_AX
, 0);
620 case BPF_LD
| BPF_IND
| BPF_W
:
621 case BPF_LD
| BPF_IND
| BPF_H
:
622 case BPF_LD
| BPF_IND
| BPF_B
:
623 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
624 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
625 *to
++ = BPF_ALU32_REG(BPF_ADD
, BPF_REG_AX
, from
->src_reg
);
626 *to
++ = BPF_LD_IND(from
->code
, BPF_REG_AX
, 0);
629 case BPF_LD
| BPF_IMM
| BPF_DW
:
630 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ aux
[1].imm
);
631 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
632 *to
++ = BPF_ALU64_IMM(BPF_LSH
, BPF_REG_AX
, 32);
633 *to
++ = BPF_ALU64_REG(BPF_MOV
, aux
[0].dst_reg
, BPF_REG_AX
);
635 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
636 *to
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ aux
[0].imm
);
637 *to
++ = BPF_ALU32_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
638 *to
++ = BPF_ALU64_REG(BPF_OR
, aux
[0].dst_reg
, BPF_REG_AX
);
641 case BPF_ST
| BPF_MEM
| BPF_DW
:
642 case BPF_ST
| BPF_MEM
| BPF_W
:
643 case BPF_ST
| BPF_MEM
| BPF_H
:
644 case BPF_ST
| BPF_MEM
| BPF_B
:
645 *to
++ = BPF_ALU64_IMM(BPF_MOV
, BPF_REG_AX
, imm_rnd
^ from
->imm
);
646 *to
++ = BPF_ALU64_IMM(BPF_XOR
, BPF_REG_AX
, imm_rnd
);
647 *to
++ = BPF_STX_MEM(from
->code
, from
->dst_reg
, BPF_REG_AX
, from
->off
);
654 static struct bpf_prog
*bpf_prog_clone_create(struct bpf_prog
*fp_other
,
655 gfp_t gfp_extra_flags
)
657 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
| gfp_extra_flags
;
660 fp
= __vmalloc(fp_other
->pages
* PAGE_SIZE
, gfp_flags
, PAGE_KERNEL
);
662 kmemcheck_annotate_bitfield(fp
, meta
);
664 /* aux->prog still points to the fp_other one, so
665 * when promoting the clone to the real program,
666 * this still needs to be adapted.
668 memcpy(fp
, fp_other
, fp_other
->pages
* PAGE_SIZE
);
674 static void bpf_prog_clone_free(struct bpf_prog
*fp
)
676 /* aux was stolen by the other clone, so we cannot free
677 * it from this path! It will be freed eventually by the
678 * other program on release.
680 * At this point, we don't need a deferred release since
681 * clone is guaranteed to not be locked.
687 void bpf_jit_prog_release_other(struct bpf_prog
*fp
, struct bpf_prog
*fp_other
)
689 /* We have to repoint aux->prog to self, as we don't
690 * know whether fp here is the clone or the original.
693 bpf_prog_clone_free(fp_other
);
696 struct bpf_prog
*bpf_jit_blind_constants(struct bpf_prog
*prog
)
698 struct bpf_insn insn_buff
[16], aux
[2];
699 struct bpf_prog
*clone
, *tmp
;
700 int insn_delta
, insn_cnt
;
701 struct bpf_insn
*insn
;
704 if (!bpf_jit_blinding_enabled())
707 clone
= bpf_prog_clone_create(prog
, GFP_USER
);
709 return ERR_PTR(-ENOMEM
);
711 insn_cnt
= clone
->len
;
712 insn
= clone
->insnsi
;
714 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
715 /* We temporarily need to hold the original ld64 insn
716 * so that we can still access the first part in the
717 * second blinding run.
719 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
) &&
721 memcpy(aux
, insn
, sizeof(aux
));
723 rewritten
= bpf_jit_blind_insn(insn
, aux
, insn_buff
);
727 tmp
= bpf_patch_insn_single(clone
, i
, insn_buff
, rewritten
);
729 /* Patching may have repointed aux->prog during
730 * realloc from the original one, so we need to
731 * fix it up here on error.
733 bpf_jit_prog_release_other(prog
, clone
);
734 return ERR_PTR(-ENOMEM
);
738 insn_delta
= rewritten
- 1;
740 /* Walk new program and skip insns we just inserted. */
741 insn
= clone
->insnsi
+ i
+ insn_delta
;
742 insn_cnt
+= insn_delta
;
748 #endif /* CONFIG_BPF_JIT */
750 /* Base function for offset calculation. Needs to go into .text section,
751 * therefore keeping it non-static as well; will also be used by JITs
752 * anyway later on, so do not let the compiler omit it.
754 noinline u64
__bpf_call_base(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
758 EXPORT_SYMBOL_GPL(__bpf_call_base
);
761 * __bpf_prog_run - run eBPF program on a given context
762 * @ctx: is the data we are operating on
763 * @insn: is the array of eBPF instructions
765 * Decode and execute eBPF instructions.
767 static unsigned int ___bpf_prog_run(u64
*regs
, const struct bpf_insn
*insn
,
771 static const void *jumptable
[256] = {
772 [0 ... 255] = &&default_label
,
773 /* Now overwrite non-defaults ... */
774 /* 32 bit ALU operations */
775 [BPF_ALU
| BPF_ADD
| BPF_X
] = &&ALU_ADD_X
,
776 [BPF_ALU
| BPF_ADD
| BPF_K
] = &&ALU_ADD_K
,
777 [BPF_ALU
| BPF_SUB
| BPF_X
] = &&ALU_SUB_X
,
778 [BPF_ALU
| BPF_SUB
| BPF_K
] = &&ALU_SUB_K
,
779 [BPF_ALU
| BPF_AND
| BPF_X
] = &&ALU_AND_X
,
780 [BPF_ALU
| BPF_AND
| BPF_K
] = &&ALU_AND_K
,
781 [BPF_ALU
| BPF_OR
| BPF_X
] = &&ALU_OR_X
,
782 [BPF_ALU
| BPF_OR
| BPF_K
] = &&ALU_OR_K
,
783 [BPF_ALU
| BPF_LSH
| BPF_X
] = &&ALU_LSH_X
,
784 [BPF_ALU
| BPF_LSH
| BPF_K
] = &&ALU_LSH_K
,
785 [BPF_ALU
| BPF_RSH
| BPF_X
] = &&ALU_RSH_X
,
786 [BPF_ALU
| BPF_RSH
| BPF_K
] = &&ALU_RSH_K
,
787 [BPF_ALU
| BPF_XOR
| BPF_X
] = &&ALU_XOR_X
,
788 [BPF_ALU
| BPF_XOR
| BPF_K
] = &&ALU_XOR_K
,
789 [BPF_ALU
| BPF_MUL
| BPF_X
] = &&ALU_MUL_X
,
790 [BPF_ALU
| BPF_MUL
| BPF_K
] = &&ALU_MUL_K
,
791 [BPF_ALU
| BPF_MOV
| BPF_X
] = &&ALU_MOV_X
,
792 [BPF_ALU
| BPF_MOV
| BPF_K
] = &&ALU_MOV_K
,
793 [BPF_ALU
| BPF_DIV
| BPF_X
] = &&ALU_DIV_X
,
794 [BPF_ALU
| BPF_DIV
| BPF_K
] = &&ALU_DIV_K
,
795 [BPF_ALU
| BPF_MOD
| BPF_X
] = &&ALU_MOD_X
,
796 [BPF_ALU
| BPF_MOD
| BPF_K
] = &&ALU_MOD_K
,
797 [BPF_ALU
| BPF_NEG
] = &&ALU_NEG
,
798 [BPF_ALU
| BPF_END
| BPF_TO_BE
] = &&ALU_END_TO_BE
,
799 [BPF_ALU
| BPF_END
| BPF_TO_LE
] = &&ALU_END_TO_LE
,
800 /* 64 bit ALU operations */
801 [BPF_ALU64
| BPF_ADD
| BPF_X
] = &&ALU64_ADD_X
,
802 [BPF_ALU64
| BPF_ADD
| BPF_K
] = &&ALU64_ADD_K
,
803 [BPF_ALU64
| BPF_SUB
| BPF_X
] = &&ALU64_SUB_X
,
804 [BPF_ALU64
| BPF_SUB
| BPF_K
] = &&ALU64_SUB_K
,
805 [BPF_ALU64
| BPF_AND
| BPF_X
] = &&ALU64_AND_X
,
806 [BPF_ALU64
| BPF_AND
| BPF_K
] = &&ALU64_AND_K
,
807 [BPF_ALU64
| BPF_OR
| BPF_X
] = &&ALU64_OR_X
,
808 [BPF_ALU64
| BPF_OR
| BPF_K
] = &&ALU64_OR_K
,
809 [BPF_ALU64
| BPF_LSH
| BPF_X
] = &&ALU64_LSH_X
,
810 [BPF_ALU64
| BPF_LSH
| BPF_K
] = &&ALU64_LSH_K
,
811 [BPF_ALU64
| BPF_RSH
| BPF_X
] = &&ALU64_RSH_X
,
812 [BPF_ALU64
| BPF_RSH
| BPF_K
] = &&ALU64_RSH_K
,
813 [BPF_ALU64
| BPF_XOR
| BPF_X
] = &&ALU64_XOR_X
,
814 [BPF_ALU64
| BPF_XOR
| BPF_K
] = &&ALU64_XOR_K
,
815 [BPF_ALU64
| BPF_MUL
| BPF_X
] = &&ALU64_MUL_X
,
816 [BPF_ALU64
| BPF_MUL
| BPF_K
] = &&ALU64_MUL_K
,
817 [BPF_ALU64
| BPF_MOV
| BPF_X
] = &&ALU64_MOV_X
,
818 [BPF_ALU64
| BPF_MOV
| BPF_K
] = &&ALU64_MOV_K
,
819 [BPF_ALU64
| BPF_ARSH
| BPF_X
] = &&ALU64_ARSH_X
,
820 [BPF_ALU64
| BPF_ARSH
| BPF_K
] = &&ALU64_ARSH_K
,
821 [BPF_ALU64
| BPF_DIV
| BPF_X
] = &&ALU64_DIV_X
,
822 [BPF_ALU64
| BPF_DIV
| BPF_K
] = &&ALU64_DIV_K
,
823 [BPF_ALU64
| BPF_MOD
| BPF_X
] = &&ALU64_MOD_X
,
824 [BPF_ALU64
| BPF_MOD
| BPF_K
] = &&ALU64_MOD_K
,
825 [BPF_ALU64
| BPF_NEG
] = &&ALU64_NEG
,
826 /* Call instruction */
827 [BPF_JMP
| BPF_CALL
] = &&JMP_CALL
,
828 [BPF_JMP
| BPF_TAIL_CALL
] = &&JMP_TAIL_CALL
,
830 [BPF_JMP
| BPF_JA
] = &&JMP_JA
,
831 [BPF_JMP
| BPF_JEQ
| BPF_X
] = &&JMP_JEQ_X
,
832 [BPF_JMP
| BPF_JEQ
| BPF_K
] = &&JMP_JEQ_K
,
833 [BPF_JMP
| BPF_JNE
| BPF_X
] = &&JMP_JNE_X
,
834 [BPF_JMP
| BPF_JNE
| BPF_K
] = &&JMP_JNE_K
,
835 [BPF_JMP
| BPF_JGT
| BPF_X
] = &&JMP_JGT_X
,
836 [BPF_JMP
| BPF_JGT
| BPF_K
] = &&JMP_JGT_K
,
837 [BPF_JMP
| BPF_JGE
| BPF_X
] = &&JMP_JGE_X
,
838 [BPF_JMP
| BPF_JGE
| BPF_K
] = &&JMP_JGE_K
,
839 [BPF_JMP
| BPF_JSGT
| BPF_X
] = &&JMP_JSGT_X
,
840 [BPF_JMP
| BPF_JSGT
| BPF_K
] = &&JMP_JSGT_K
,
841 [BPF_JMP
| BPF_JSGE
| BPF_X
] = &&JMP_JSGE_X
,
842 [BPF_JMP
| BPF_JSGE
| BPF_K
] = &&JMP_JSGE_K
,
843 [BPF_JMP
| BPF_JSET
| BPF_X
] = &&JMP_JSET_X
,
844 [BPF_JMP
| BPF_JSET
| BPF_K
] = &&JMP_JSET_K
,
846 [BPF_JMP
| BPF_EXIT
] = &&JMP_EXIT
,
847 /* Store instructions */
848 [BPF_STX
| BPF_MEM
| BPF_B
] = &&STX_MEM_B
,
849 [BPF_STX
| BPF_MEM
| BPF_H
] = &&STX_MEM_H
,
850 [BPF_STX
| BPF_MEM
| BPF_W
] = &&STX_MEM_W
,
851 [BPF_STX
| BPF_MEM
| BPF_DW
] = &&STX_MEM_DW
,
852 [BPF_STX
| BPF_XADD
| BPF_W
] = &&STX_XADD_W
,
853 [BPF_STX
| BPF_XADD
| BPF_DW
] = &&STX_XADD_DW
,
854 [BPF_ST
| BPF_MEM
| BPF_B
] = &&ST_MEM_B
,
855 [BPF_ST
| BPF_MEM
| BPF_H
] = &&ST_MEM_H
,
856 [BPF_ST
| BPF_MEM
| BPF_W
] = &&ST_MEM_W
,
857 [BPF_ST
| BPF_MEM
| BPF_DW
] = &&ST_MEM_DW
,
858 /* Load instructions */
859 [BPF_LDX
| BPF_MEM
| BPF_B
] = &&LDX_MEM_B
,
860 [BPF_LDX
| BPF_MEM
| BPF_H
] = &&LDX_MEM_H
,
861 [BPF_LDX
| BPF_MEM
| BPF_W
] = &&LDX_MEM_W
,
862 [BPF_LDX
| BPF_MEM
| BPF_DW
] = &&LDX_MEM_DW
,
863 [BPF_LD
| BPF_ABS
| BPF_W
] = &&LD_ABS_W
,
864 [BPF_LD
| BPF_ABS
| BPF_H
] = &&LD_ABS_H
,
865 [BPF_LD
| BPF_ABS
| BPF_B
] = &&LD_ABS_B
,
866 [BPF_LD
| BPF_IND
| BPF_W
] = &&LD_IND_W
,
867 [BPF_LD
| BPF_IND
| BPF_H
] = &&LD_IND_H
,
868 [BPF_LD
| BPF_IND
| BPF_B
] = &&LD_IND_B
,
869 [BPF_LD
| BPF_IMM
| BPF_DW
] = &&LD_IMM_DW
,
871 u32 tail_call_cnt
= 0;
875 #define CONT ({ insn++; goto select_insn; })
876 #define CONT_JMP ({ insn++; goto select_insn; })
879 goto *jumptable
[insn
->code
];
882 #define ALU(OPCODE, OP) \
883 ALU64_##OPCODE##_X: \
887 DST = (u32) DST OP (u32) SRC; \
889 ALU64_##OPCODE##_K: \
893 DST = (u32) DST OP (u32) IMM; \
925 DST
= (u64
) (u32
) insn
[0].imm
| ((u64
) (u32
) insn
[1].imm
) << 32;
929 (*(s64
*) &DST
) >>= SRC
;
932 (*(s64
*) &DST
) >>= IMM
;
935 if (unlikely(SRC
== 0))
937 div64_u64_rem(DST
, SRC
, &tmp
);
941 if (unlikely(SRC
== 0))
944 DST
= do_div(tmp
, (u32
) SRC
);
947 div64_u64_rem(DST
, IMM
, &tmp
);
952 DST
= do_div(tmp
, (u32
) IMM
);
955 if (unlikely(SRC
== 0))
957 DST
= div64_u64(DST
, SRC
);
960 if (unlikely(SRC
== 0))
963 do_div(tmp
, (u32
) SRC
);
967 DST
= div64_u64(DST
, IMM
);
971 do_div(tmp
, (u32
) IMM
);
977 DST
= (__force u16
) cpu_to_be16(DST
);
980 DST
= (__force u32
) cpu_to_be32(DST
);
983 DST
= (__force u64
) cpu_to_be64(DST
);
990 DST
= (__force u16
) cpu_to_le16(DST
);
993 DST
= (__force u32
) cpu_to_le32(DST
);
996 DST
= (__force u64
) cpu_to_le64(DST
);
1003 /* Function call scratches BPF_R1-BPF_R5 registers,
1004 * preserves BPF_R6-BPF_R9, and stores return value
1007 BPF_R0
= (__bpf_call_base
+ insn
->imm
)(BPF_R1
, BPF_R2
, BPF_R3
,
1012 struct bpf_map
*map
= (struct bpf_map
*) (unsigned long) BPF_R2
;
1013 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
1014 struct bpf_prog
*prog
;
1017 if (unlikely(index
>= array
->map
.max_entries
))
1019 if (unlikely(tail_call_cnt
> MAX_TAIL_CALL_CNT
))
1024 prog
= READ_ONCE(array
->ptrs
[index
]);
1028 /* ARG1 at this point is guaranteed to point to CTX from
1029 * the verifier side due to the fact that the tail call is
1030 * handeled like a helper, that is, bpf_tail_call_proto,
1031 * where arg1_type is ARG_PTR_TO_CTX.
1033 insn
= prog
->insnsi
;
1091 if (((s64
) DST
) > ((s64
) SRC
)) {
1097 if (((s64
) DST
) > ((s64
) IMM
)) {
1103 if (((s64
) DST
) >= ((s64
) SRC
)) {
1109 if (((s64
) DST
) >= ((s64
) IMM
)) {
1129 /* STX and ST and LDX*/
1130 #define LDST(SIZEOP, SIZE) \
1132 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1135 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1139 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1147 STX_XADD_W
: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1148 atomic_add((u32
) SRC
, (atomic_t
*)(unsigned long)
1151 STX_XADD_DW
: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1152 atomic64_add((u64
) SRC
, (atomic64_t
*)(unsigned long)
1155 LD_ABS_W
: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1158 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1159 * appearing in the programs where ctx == skb
1160 * (see may_access_skb() in the verifier). All programs
1161 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1162 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1163 * verifier will check that BPF_R6 == ctx.
1165 * BPF_ABS and BPF_IND are wrappers of function calls,
1166 * so they scratch BPF_R1-BPF_R5 registers, preserve
1167 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1170 * ctx == skb == BPF_R6 == CTX
1173 * SRC == any register
1174 * IMM == 32-bit immediate
1177 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1180 ptr
= bpf_load_pointer((struct sk_buff
*) (unsigned long) CTX
, off
, 4, &tmp
);
1181 if (likely(ptr
!= NULL
)) {
1182 BPF_R0
= get_unaligned_be32(ptr
);
1187 LD_ABS_H
: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1190 ptr
= bpf_load_pointer((struct sk_buff
*) (unsigned long) CTX
, off
, 2, &tmp
);
1191 if (likely(ptr
!= NULL
)) {
1192 BPF_R0
= get_unaligned_be16(ptr
);
1197 LD_ABS_B
: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1200 ptr
= bpf_load_pointer((struct sk_buff
*) (unsigned long) CTX
, off
, 1, &tmp
);
1201 if (likely(ptr
!= NULL
)) {
1202 BPF_R0
= *(u8
*)ptr
;
1207 LD_IND_W
: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1210 LD_IND_H
: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1213 LD_IND_B
: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1218 /* If we ever reach this, we have a bug somewhere. */
1219 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn
->code
);
1222 STACK_FRAME_NON_STANDARD(___bpf_prog_run
); /* jump table */
1224 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1225 #define DEFINE_BPF_PROG_RUN(stack_size) \
1226 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1228 u64 stack[stack_size / sizeof(u64)]; \
1229 u64 regs[MAX_BPF_REG]; \
1231 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1232 ARG1 = (u64) (unsigned long) ctx; \
1233 return ___bpf_prog_run(regs, insn, stack); \
1236 #define EVAL1(FN, X) FN(X)
1237 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1238 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1239 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1240 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1241 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1243 EVAL6(DEFINE_BPF_PROG_RUN
, 32, 64, 96, 128, 160, 192);
1244 EVAL6(DEFINE_BPF_PROG_RUN
, 224, 256, 288, 320, 352, 384);
1245 EVAL4(DEFINE_BPF_PROG_RUN
, 416, 448, 480, 512);
1247 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1249 static unsigned int (*interpreters
[])(const void *ctx
,
1250 const struct bpf_insn
*insn
) = {
1251 EVAL6(PROG_NAME_LIST
, 32, 64, 96, 128, 160, 192)
1252 EVAL6(PROG_NAME_LIST
, 224, 256, 288, 320, 352, 384)
1253 EVAL4(PROG_NAME_LIST
, 416, 448, 480, 512)
1256 bool bpf_prog_array_compatible(struct bpf_array
*array
,
1257 const struct bpf_prog
*fp
)
1259 if (!array
->owner_prog_type
) {
1260 /* There's no owner yet where we could check for
1263 array
->owner_prog_type
= fp
->type
;
1264 array
->owner_jited
= fp
->jited
;
1269 return array
->owner_prog_type
== fp
->type
&&
1270 array
->owner_jited
== fp
->jited
;
1273 static int bpf_check_tail_call(const struct bpf_prog
*fp
)
1275 struct bpf_prog_aux
*aux
= fp
->aux
;
1278 for (i
= 0; i
< aux
->used_map_cnt
; i
++) {
1279 struct bpf_map
*map
= aux
->used_maps
[i
];
1280 struct bpf_array
*array
;
1282 if (map
->map_type
!= BPF_MAP_TYPE_PROG_ARRAY
)
1285 array
= container_of(map
, struct bpf_array
, map
);
1286 if (!bpf_prog_array_compatible(array
, fp
))
1294 * bpf_prog_select_runtime - select exec runtime for BPF program
1295 * @fp: bpf_prog populated with internal BPF program
1296 * @err: pointer to error variable
1298 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1299 * The BPF program will be executed via BPF_PROG_RUN() macro.
1301 struct bpf_prog
*bpf_prog_select_runtime(struct bpf_prog
*fp
, int *err
)
1303 u32 stack_depth
= max_t(u32
, fp
->aux
->stack_depth
, 1);
1305 fp
->bpf_func
= interpreters
[(round_up(stack_depth
, 32) / 32) - 1];
1307 /* eBPF JITs can rewrite the program in case constant
1308 * blinding is active. However, in case of error during
1309 * blinding, bpf_int_jit_compile() must always return a
1310 * valid program, which in this case would simply not
1311 * be JITed, but falls back to the interpreter.
1313 fp
= bpf_int_jit_compile(fp
);
1314 bpf_prog_lock_ro(fp
);
1316 /* The tail call compatibility check can only be done at
1317 * this late stage as we need to determine, if we deal
1318 * with JITed or non JITed program concatenations and not
1319 * all eBPF JITs might immediately support all features.
1321 *err
= bpf_check_tail_call(fp
);
1325 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime
);
1327 static void bpf_prog_free_deferred(struct work_struct
*work
)
1329 struct bpf_prog_aux
*aux
;
1331 aux
= container_of(work
, struct bpf_prog_aux
, work
);
1332 bpf_jit_free(aux
->prog
);
1335 /* Free internal BPF program */
1336 void bpf_prog_free(struct bpf_prog
*fp
)
1338 struct bpf_prog_aux
*aux
= fp
->aux
;
1340 INIT_WORK(&aux
->work
, bpf_prog_free_deferred
);
1341 schedule_work(&aux
->work
);
1343 EXPORT_SYMBOL_GPL(bpf_prog_free
);
1345 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1346 static DEFINE_PER_CPU(struct rnd_state
, bpf_user_rnd_state
);
1348 void bpf_user_rnd_init_once(void)
1350 prandom_init_once(&bpf_user_rnd_state
);
1353 BPF_CALL_0(bpf_user_rnd_u32
)
1355 /* Should someone ever have the rather unwise idea to use some
1356 * of the registers passed into this function, then note that
1357 * this function is called from native eBPF and classic-to-eBPF
1358 * transformations. Register assignments from both sides are
1359 * different, f.e. classic always sets fn(ctx, A, X) here.
1361 struct rnd_state
*state
;
1364 state
= &get_cpu_var(bpf_user_rnd_state
);
1365 res
= prandom_u32_state(state
);
1366 put_cpu_var(bpf_user_rnd_state
);
1371 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1372 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak
;
1373 const struct bpf_func_proto bpf_map_update_elem_proto __weak
;
1374 const struct bpf_func_proto bpf_map_delete_elem_proto __weak
;
1376 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak
;
1377 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak
;
1378 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak
;
1379 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak
;
1381 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak
;
1382 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak
;
1383 const struct bpf_func_proto bpf_get_current_comm_proto __weak
;
1385 const struct bpf_func_proto
* __weak
bpf_get_trace_printk_proto(void)
1391 bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
1392 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
1397 /* Always built-in helper functions. */
1398 const struct bpf_func_proto bpf_tail_call_proto
= {
1401 .ret_type
= RET_VOID
,
1402 .arg1_type
= ARG_PTR_TO_CTX
,
1403 .arg2_type
= ARG_CONST_MAP_PTR
,
1404 .arg3_type
= ARG_ANYTHING
,
1407 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1408 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1409 * eBPF and implicitly also cBPF can get JITed!
1411 struct bpf_prog
* __weak
bpf_int_jit_compile(struct bpf_prog
*prog
)
1416 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1417 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1419 void __weak
bpf_jit_compile(struct bpf_prog
*prog
)
1423 bool __weak
bpf_helper_changes_pkt_data(void *func
)
1428 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1429 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1431 int __weak
skb_copy_bits(const struct sk_buff
*skb
, int offset
, void *to
,
1437 /* All definitions of tracepoints related to BPF. */
1438 #define CREATE_TRACE_POINTS
1439 #include <linux/bpf_trace.h>
1441 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception
);
1443 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type
);
1444 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu
);