1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 #include <uapi/linux/bpf.h>
9 #include <linux/workqueue.h>
10 #include <linux/file.h>
11 #include <linux/percpu.h>
12 #include <linux/err.h>
13 #include <linux/rbtree_latch.h>
14 #include <linux/numa.h>
15 #include <linux/mm_types.h>
16 #include <linux/wait.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/refcount.h>
19 #include <linux/mutex.h>
20 #include <linux/module.h>
21 #include <linux/kallsyms.h>
23 struct bpf_verifier_env
;
24 struct bpf_verifier_log
;
33 struct exception_table_entry
;
35 extern struct idr btf_idr
;
36 extern spinlock_t btf_idr_lock
;
38 /* map is generic key/value storage optionally accesible by eBPF programs */
40 /* funcs callable from userspace (via syscall) */
41 int (*map_alloc_check
)(union bpf_attr
*attr
);
42 struct bpf_map
*(*map_alloc
)(union bpf_attr
*attr
);
43 void (*map_release
)(struct bpf_map
*map
, struct file
*map_file
);
44 void (*map_free
)(struct bpf_map
*map
);
45 int (*map_get_next_key
)(struct bpf_map
*map
, void *key
, void *next_key
);
46 void (*map_release_uref
)(struct bpf_map
*map
);
47 void *(*map_lookup_elem_sys_only
)(struct bpf_map
*map
, void *key
);
48 int (*map_lookup_batch
)(struct bpf_map
*map
, const union bpf_attr
*attr
,
49 union bpf_attr __user
*uattr
);
50 int (*map_lookup_and_delete_batch
)(struct bpf_map
*map
,
51 const union bpf_attr
*attr
,
52 union bpf_attr __user
*uattr
);
53 int (*map_update_batch
)(struct bpf_map
*map
, const union bpf_attr
*attr
,
54 union bpf_attr __user
*uattr
);
55 int (*map_delete_batch
)(struct bpf_map
*map
, const union bpf_attr
*attr
,
56 union bpf_attr __user
*uattr
);
58 /* funcs callable from userspace and from eBPF programs */
59 void *(*map_lookup_elem
)(struct bpf_map
*map
, void *key
);
60 int (*map_update_elem
)(struct bpf_map
*map
, void *key
, void *value
, u64 flags
);
61 int (*map_delete_elem
)(struct bpf_map
*map
, void *key
);
62 int (*map_push_elem
)(struct bpf_map
*map
, void *value
, u64 flags
);
63 int (*map_pop_elem
)(struct bpf_map
*map
, void *value
);
64 int (*map_peek_elem
)(struct bpf_map
*map
, void *value
);
66 /* funcs called by prog_array and perf_event_array map */
67 void *(*map_fd_get_ptr
)(struct bpf_map
*map
, struct file
*map_file
,
69 void (*map_fd_put_ptr
)(void *ptr
);
70 u32 (*map_gen_lookup
)(struct bpf_map
*map
, struct bpf_insn
*insn_buf
);
71 u32 (*map_fd_sys_lookup_elem
)(void *ptr
);
72 void (*map_seq_show_elem
)(struct bpf_map
*map
, void *key
,
74 int (*map_check_btf
)(const struct bpf_map
*map
,
75 const struct btf
*btf
,
76 const struct btf_type
*key_type
,
77 const struct btf_type
*value_type
);
79 /* Prog poke tracking helpers. */
80 int (*map_poke_track
)(struct bpf_map
*map
, struct bpf_prog_aux
*aux
);
81 void (*map_poke_untrack
)(struct bpf_map
*map
, struct bpf_prog_aux
*aux
);
82 void (*map_poke_run
)(struct bpf_map
*map
, u32 key
, struct bpf_prog
*old
,
83 struct bpf_prog
*new);
85 /* Direct value access helpers. */
86 int (*map_direct_value_addr
)(const struct bpf_map
*map
,
88 int (*map_direct_value_meta
)(const struct bpf_map
*map
,
90 int (*map_mmap
)(struct bpf_map
*map
, struct vm_area_struct
*vma
);
93 struct bpf_map_memory
{
95 struct user_struct
*user
;
99 /* The first two cachelines with read-mostly members of which some
100 * are also accessed in fast-path (e.g. ops, max_entries).
102 const struct bpf_map_ops
*ops ____cacheline_aligned
;
103 struct bpf_map
*inner_map_meta
;
104 #ifdef CONFIG_SECURITY
107 enum bpf_map_type map_type
;
112 int spin_lock_off
; /* >=0 valid offset, <0 error */
116 u32 btf_value_type_id
;
118 struct bpf_map_memory memory
;
119 char name
[BPF_OBJ_NAME_LEN
];
120 u32 btf_vmlinux_value_type_id
;
122 bool frozen
; /* write-once; write-protected by freeze_mutex */
125 /* The 3rd and 4th cacheline with misc members to avoid false sharing
126 * particularly with refcounting.
128 atomic64_t refcnt ____cacheline_aligned
;
130 struct work_struct work
;
131 struct mutex freeze_mutex
;
132 u64 writecnt
; /* writable mmap cnt; protected by freeze_mutex */
135 static inline bool map_value_has_spin_lock(const struct bpf_map
*map
)
137 return map
->spin_lock_off
>= 0;
140 static inline void check_and_init_map_lock(struct bpf_map
*map
, void *dst
)
142 if (likely(!map_value_has_spin_lock(map
)))
144 *(struct bpf_spin_lock
*)(dst
+ map
->spin_lock_off
) =
145 (struct bpf_spin_lock
){};
148 /* copy everything but bpf_spin_lock */
149 static inline void copy_map_value(struct bpf_map
*map
, void *dst
, void *src
)
151 if (unlikely(map_value_has_spin_lock(map
))) {
152 u32 off
= map
->spin_lock_off
;
154 memcpy(dst
, src
, off
);
155 memcpy(dst
+ off
+ sizeof(struct bpf_spin_lock
),
156 src
+ off
+ sizeof(struct bpf_spin_lock
),
157 map
->value_size
- off
- sizeof(struct bpf_spin_lock
));
159 memcpy(dst
, src
, map
->value_size
);
162 void copy_map_value_locked(struct bpf_map
*map
, void *dst
, void *src
,
164 int bpf_obj_name_cpy(char *dst
, const char *src
, unsigned int size
);
166 struct bpf_offload_dev
;
167 struct bpf_offloaded_map
;
169 struct bpf_map_dev_ops
{
170 int (*map_get_next_key
)(struct bpf_offloaded_map
*map
,
171 void *key
, void *next_key
);
172 int (*map_lookup_elem
)(struct bpf_offloaded_map
*map
,
173 void *key
, void *value
);
174 int (*map_update_elem
)(struct bpf_offloaded_map
*map
,
175 void *key
, void *value
, u64 flags
);
176 int (*map_delete_elem
)(struct bpf_offloaded_map
*map
, void *key
);
179 struct bpf_offloaded_map
{
181 struct net_device
*netdev
;
182 const struct bpf_map_dev_ops
*dev_ops
;
184 struct list_head offloads
;
187 static inline struct bpf_offloaded_map
*map_to_offmap(struct bpf_map
*map
)
189 return container_of(map
, struct bpf_offloaded_map
, map
);
192 static inline bool bpf_map_offload_neutral(const struct bpf_map
*map
)
194 return map
->map_type
== BPF_MAP_TYPE_PERF_EVENT_ARRAY
;
197 static inline bool bpf_map_support_seq_show(const struct bpf_map
*map
)
199 return (map
->btf_value_type_id
|| map
->btf_vmlinux_value_type_id
) &&
200 map
->ops
->map_seq_show_elem
;
203 int map_check_no_btf(const struct bpf_map
*map
,
204 const struct btf
*btf
,
205 const struct btf_type
*key_type
,
206 const struct btf_type
*value_type
);
208 extern const struct bpf_map_ops bpf_map_offload_ops
;
210 /* function argument constraints */
212 ARG_DONTCARE
= 0, /* unused argument in helper function */
214 /* the following constraints used to prototype
215 * bpf_map_lookup/update/delete_elem() functions
217 ARG_CONST_MAP_PTR
, /* const argument used as pointer to bpf_map */
218 ARG_PTR_TO_MAP_KEY
, /* pointer to stack used as map key */
219 ARG_PTR_TO_MAP_VALUE
, /* pointer to stack used as map value */
220 ARG_PTR_TO_UNINIT_MAP_VALUE
, /* pointer to valid memory used to store a map value */
221 ARG_PTR_TO_MAP_VALUE_OR_NULL
, /* pointer to stack used as map value or NULL */
223 /* the following constraints used to prototype bpf_memcmp() and other
224 * functions that access data on eBPF program stack
226 ARG_PTR_TO_MEM
, /* pointer to valid memory (stack, packet, map value) */
227 ARG_PTR_TO_MEM_OR_NULL
, /* pointer to valid memory or NULL */
228 ARG_PTR_TO_UNINIT_MEM
, /* pointer to memory does not need to be initialized,
229 * helper function must fill all bytes or clear
230 * them in error case.
233 ARG_CONST_SIZE
, /* number of bytes accessed from memory */
234 ARG_CONST_SIZE_OR_ZERO
, /* number of bytes accessed from memory or 0 */
236 ARG_PTR_TO_CTX
, /* pointer to context */
237 ARG_PTR_TO_CTX_OR_NULL
, /* pointer to context or NULL */
238 ARG_ANYTHING
, /* any (initialized) argument is ok */
239 ARG_PTR_TO_SPIN_LOCK
, /* pointer to bpf_spin_lock */
240 ARG_PTR_TO_SOCK_COMMON
, /* pointer to sock_common */
241 ARG_PTR_TO_INT
, /* pointer to int */
242 ARG_PTR_TO_LONG
, /* pointer to long */
243 ARG_PTR_TO_SOCKET
, /* pointer to bpf_sock (fullsock) */
244 ARG_PTR_TO_BTF_ID
, /* pointer to in-kernel struct */
247 /* type of values returned from helper functions */
248 enum bpf_return_type
{
249 RET_INTEGER
, /* function returns integer */
250 RET_VOID
, /* function doesn't return anything */
251 RET_PTR_TO_MAP_VALUE
, /* returns a pointer to map elem value */
252 RET_PTR_TO_MAP_VALUE_OR_NULL
, /* returns a pointer to map elem value or NULL */
253 RET_PTR_TO_SOCKET_OR_NULL
, /* returns a pointer to a socket or NULL */
254 RET_PTR_TO_TCP_SOCK_OR_NULL
, /* returns a pointer to a tcp_sock or NULL */
255 RET_PTR_TO_SOCK_COMMON_OR_NULL
, /* returns a pointer to a sock_common or NULL */
258 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
259 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
260 * instructions after verifying
262 struct bpf_func_proto
{
263 u64 (*func
)(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
266 enum bpf_return_type ret_type
;
269 enum bpf_arg_type arg1_type
;
270 enum bpf_arg_type arg2_type
;
271 enum bpf_arg_type arg3_type
;
272 enum bpf_arg_type arg4_type
;
273 enum bpf_arg_type arg5_type
;
275 enum bpf_arg_type arg_type
[5];
277 int *btf_id
; /* BTF ids of arguments */
280 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
281 * the first argument to eBPF programs.
282 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
286 enum bpf_access_type
{
291 /* types of values stored in eBPF registers */
292 /* Pointer types represent:
295 * pointer + (u16) var
296 * pointer + (u16) var + imm
297 * if (range > 0) then [ptr, ptr + range - off) is safe to access
298 * if (id > 0) means that some 'var' was added
299 * if (off > 0) means that 'imm' was added
302 NOT_INIT
= 0, /* nothing was written into register */
303 SCALAR_VALUE
, /* reg doesn't contain a valid pointer */
304 PTR_TO_CTX
, /* reg points to bpf_context */
305 CONST_PTR_TO_MAP
, /* reg points to struct bpf_map */
306 PTR_TO_MAP_VALUE
, /* reg points to map element value */
307 PTR_TO_MAP_VALUE_OR_NULL
,/* points to map elem value or NULL */
308 PTR_TO_STACK
, /* reg == frame_pointer + offset */
309 PTR_TO_PACKET_META
, /* skb->data - meta_len */
310 PTR_TO_PACKET
, /* reg points to skb->data */
311 PTR_TO_PACKET_END
, /* skb->data + headlen */
312 PTR_TO_FLOW_KEYS
, /* reg points to bpf_flow_keys */
313 PTR_TO_SOCKET
, /* reg points to struct bpf_sock */
314 PTR_TO_SOCKET_OR_NULL
, /* reg points to struct bpf_sock or NULL */
315 PTR_TO_SOCK_COMMON
, /* reg points to sock_common */
316 PTR_TO_SOCK_COMMON_OR_NULL
, /* reg points to sock_common or NULL */
317 PTR_TO_TCP_SOCK
, /* reg points to struct tcp_sock */
318 PTR_TO_TCP_SOCK_OR_NULL
, /* reg points to struct tcp_sock or NULL */
319 PTR_TO_TP_BUFFER
, /* reg points to a writable raw tp's buffer */
320 PTR_TO_XDP_SOCK
, /* reg points to struct xdp_sock */
321 PTR_TO_BTF_ID
, /* reg points to kernel struct */
324 /* The information passed from prog-specific *_is_valid_access
325 * back to the verifier.
327 struct bpf_insn_access_aux
{
328 enum bpf_reg_type reg_type
;
333 struct bpf_verifier_log
*log
; /* for verbose logs */
337 bpf_ctx_record_field_size(struct bpf_insn_access_aux
*aux
, u32 size
)
339 aux
->ctx_field_size
= size
;
342 struct bpf_prog_ops
{
343 int (*test_run
)(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
344 union bpf_attr __user
*uattr
);
347 struct bpf_verifier_ops
{
348 /* return eBPF function prototype for verification */
349 const struct bpf_func_proto
*
350 (*get_func_proto
)(enum bpf_func_id func_id
,
351 const struct bpf_prog
*prog
);
353 /* return true if 'size' wide access at offset 'off' within bpf_context
354 * with 'type' (read or write) is allowed
356 bool (*is_valid_access
)(int off
, int size
, enum bpf_access_type type
,
357 const struct bpf_prog
*prog
,
358 struct bpf_insn_access_aux
*info
);
359 int (*gen_prologue
)(struct bpf_insn
*insn
, bool direct_write
,
360 const struct bpf_prog
*prog
);
361 int (*gen_ld_abs
)(const struct bpf_insn
*orig
,
362 struct bpf_insn
*insn_buf
);
363 u32 (*convert_ctx_access
)(enum bpf_access_type type
,
364 const struct bpf_insn
*src
,
365 struct bpf_insn
*dst
,
366 struct bpf_prog
*prog
, u32
*target_size
);
367 int (*btf_struct_access
)(struct bpf_verifier_log
*log
,
368 const struct btf_type
*t
, int off
, int size
,
369 enum bpf_access_type atype
,
373 struct bpf_prog_offload_ops
{
374 /* verifier basic callbacks */
375 int (*insn_hook
)(struct bpf_verifier_env
*env
,
376 int insn_idx
, int prev_insn_idx
);
377 int (*finalize
)(struct bpf_verifier_env
*env
);
378 /* verifier optimization callbacks (called after .finalize) */
379 int (*replace_insn
)(struct bpf_verifier_env
*env
, u32 off
,
380 struct bpf_insn
*insn
);
381 int (*remove_insns
)(struct bpf_verifier_env
*env
, u32 off
, u32 cnt
);
382 /* program management callbacks */
383 int (*prepare
)(struct bpf_prog
*prog
);
384 int (*translate
)(struct bpf_prog
*prog
);
385 void (*destroy
)(struct bpf_prog
*prog
);
388 struct bpf_prog_offload
{
389 struct bpf_prog
*prog
;
390 struct net_device
*netdev
;
391 struct bpf_offload_dev
*offdev
;
393 struct list_head offloads
;
400 enum bpf_cgroup_storage_type
{
401 BPF_CGROUP_STORAGE_SHARED
,
402 BPF_CGROUP_STORAGE_PERCPU
,
403 __BPF_CGROUP_STORAGE_MAX
406 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
408 /* The longest tracepoint has 12 args.
409 * See include/trace/bpf_probe.h
411 #define MAX_BPF_FUNC_ARGS 12
413 struct bpf_prog_stats
{
416 struct u64_stats_sync syncp
;
417 } __aligned(2 * sizeof(u64
));
419 struct btf_func_model
{
422 u8 arg_size
[MAX_BPF_FUNC_ARGS
];
425 /* Restore arguments before returning from trampoline to let original function
426 * continue executing. This flag is used for fentry progs when there are no
429 #define BPF_TRAMP_F_RESTORE_REGS BIT(0)
430 /* Call original function after fentry progs, but before fexit progs.
431 * Makes sense for fentry/fexit, normal calls and indirect calls.
433 #define BPF_TRAMP_F_CALL_ORIG BIT(1)
434 /* Skip current frame and return to parent. Makes sense for fentry/fexit
435 * programs only. Should not be used with normal calls and indirect calls.
437 #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
439 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
440 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
442 #define BPF_MAX_TRAMP_PROGS 40
444 struct bpf_tramp_progs
{
445 struct bpf_prog
*progs
[BPF_MAX_TRAMP_PROGS
];
449 /* Different use cases for BPF trampoline:
450 * 1. replace nop at the function entry (kprobe equivalent)
451 * flags = BPF_TRAMP_F_RESTORE_REGS
452 * fentry = a set of programs to run before returning from trampoline
454 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
455 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
456 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
457 * fentry = a set of program to run before calling original function
458 * fexit = a set of program to run after original function
460 * 3. replace direct call instruction anywhere in the function body
461 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
463 * fentry = a set of programs to run before returning from trampoline
464 * With flags = BPF_TRAMP_F_CALL_ORIG
465 * orig_call = original callback addr or direct function addr
466 * fentry = a set of program to run before calling original function
467 * fexit = a set of program to run after original function
469 int arch_prepare_bpf_trampoline(void *image
, void *image_end
,
470 const struct btf_func_model
*m
, u32 flags
,
471 struct bpf_tramp_progs
*tprogs
,
473 /* these two functions are called from generated trampoline */
474 u64 notrace
__bpf_prog_enter(void);
475 void notrace
__bpf_prog_exit(struct bpf_prog
*prog
, u64 start
);
480 char name
[KSYM_NAME_LEN
];
481 struct list_head lnode
;
482 struct latch_tree_node tnode
;
486 enum bpf_tramp_prog_type
{
489 BPF_TRAMP_MODIFY_RETURN
,
491 BPF_TRAMP_REPLACE
, /* more than MAX */
494 struct bpf_trampoline
{
495 /* hlist for trampoline_table */
496 struct hlist_node hlist
;
497 /* serializes access to fields of this trampoline */
502 struct btf_func_model model
;
506 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
507 * program by replacing one of its functions. func.addr is the address
508 * of the function it replaced.
510 struct bpf_prog
*extension_prog
;
511 /* list of BPF programs using this trampoline */
512 struct hlist_head progs_hlist
[BPF_TRAMP_MAX
];
513 /* Number of attached programs. A counter per kind. */
514 int progs_cnt
[BPF_TRAMP_MAX
];
515 /* Executable image of trampoline */
518 struct bpf_ksym ksym
;
521 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
523 struct bpf_dispatcher_prog
{
524 struct bpf_prog
*prog
;
528 struct bpf_dispatcher
{
529 /* dispatcher mutex */
532 struct bpf_dispatcher_prog progs
[BPF_DISPATCHER_MAX
];
536 struct bpf_ksym ksym
;
539 static __always_inline
unsigned int bpf_dispatcher_nop_func(
541 const struct bpf_insn
*insnsi
,
542 unsigned int (*bpf_func
)(const void *,
543 const struct bpf_insn
*))
545 return bpf_func(ctx
, insnsi
);
547 #ifdef CONFIG_BPF_JIT
548 struct bpf_trampoline
*bpf_trampoline_lookup(u64 key
);
549 int bpf_trampoline_link_prog(struct bpf_prog
*prog
);
550 int bpf_trampoline_unlink_prog(struct bpf_prog
*prog
);
551 void bpf_trampoline_put(struct bpf_trampoline
*tr
);
552 #define BPF_DISPATCHER_INIT(_name) { \
553 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
554 .func = &_name##_func, \
561 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
565 #define DEFINE_BPF_DISPATCHER(name) \
566 noinline unsigned int bpf_dispatcher_##name##_func( \
568 const struct bpf_insn *insnsi, \
569 unsigned int (*bpf_func)(const void *, \
570 const struct bpf_insn *)) \
572 return bpf_func(ctx, insnsi); \
574 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
575 struct bpf_dispatcher bpf_dispatcher_##name = \
576 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
577 #define DECLARE_BPF_DISPATCHER(name) \
578 unsigned int bpf_dispatcher_##name##_func( \
580 const struct bpf_insn *insnsi, \
581 unsigned int (*bpf_func)(const void *, \
582 const struct bpf_insn *)); \
583 extern struct bpf_dispatcher bpf_dispatcher_##name;
584 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
585 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
586 void bpf_dispatcher_change_prog(struct bpf_dispatcher
*d
, struct bpf_prog
*from
,
587 struct bpf_prog
*to
);
588 /* Called only from JIT-enabled code, so there's no need for stubs. */
589 void *bpf_jit_alloc_exec_page(void);
590 void bpf_image_ksym_add(void *data
, struct bpf_ksym
*ksym
);
591 void bpf_image_ksym_del(struct bpf_ksym
*ksym
);
592 void bpf_ksym_add(struct bpf_ksym
*ksym
);
593 void bpf_ksym_del(struct bpf_ksym
*ksym
);
595 static inline struct bpf_trampoline
*bpf_trampoline_lookup(u64 key
)
599 static inline int bpf_trampoline_link_prog(struct bpf_prog
*prog
)
603 static inline int bpf_trampoline_unlink_prog(struct bpf_prog
*prog
)
607 static inline void bpf_trampoline_put(struct bpf_trampoline
*tr
) {}
608 #define DEFINE_BPF_DISPATCHER(name)
609 #define DECLARE_BPF_DISPATCHER(name)
610 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
611 #define BPF_DISPATCHER_PTR(name) NULL
612 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher
*d
,
613 struct bpf_prog
*from
,
614 struct bpf_prog
*to
) {}
615 static inline bool is_bpf_image_address(unsigned long address
)
621 struct bpf_func_info_aux
{
626 enum bpf_jit_poke_reason
{
627 BPF_POKE_REASON_TAIL_CALL
,
630 /* Descriptor of pokes pointing /into/ the JITed image. */
631 struct bpf_jit_poke_descriptor
{
644 struct bpf_prog_aux
{
652 u32 func_cnt
; /* used by non-func prog as the number of func progs */
653 u32 func_idx
; /* 0 for non-func prog, the index in func array for func prog */
654 u32 attach_btf_id
; /* in-kernel BTF type id to attach to */
655 struct bpf_prog
*linked_prog
;
656 bool verifier_zext
; /* Zero extensions has been inserted by verifier. */
657 bool offload_requested
;
658 bool attach_btf_trace
; /* true if attaching to BTF-enabled raw tp */
659 bool func_proto_unreliable
;
660 enum bpf_tramp_prog_type trampoline_prog_type
;
661 struct bpf_trampoline
*trampoline
;
662 struct hlist_node tramp_hlist
;
663 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
664 const struct btf_type
*attach_func_proto
;
665 /* function name for valid attach_btf_id */
666 const char *attach_func_name
;
667 struct bpf_prog
**func
;
668 void *jit_data
; /* JIT specific data. arch dependent */
669 struct bpf_jit_poke_descriptor
*poke_tab
;
671 struct bpf_ksym ksym
;
672 const struct bpf_prog_ops
*ops
;
673 struct bpf_map
**used_maps
;
674 struct bpf_prog
*prog
;
675 struct user_struct
*user
;
676 u64 load_time
; /* ns since boottime */
677 struct bpf_map
*cgroup_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
];
678 char name
[BPF_OBJ_NAME_LEN
];
679 #ifdef CONFIG_SECURITY
682 struct bpf_prog_offload
*offload
;
684 struct bpf_func_info
*func_info
;
685 struct bpf_func_info_aux
*func_info_aux
;
686 /* bpf_line_info loaded from userspace. linfo->insn_off
687 * has the xlated insn offset.
688 * Both the main and sub prog share the same linfo.
689 * The subprog can access its first linfo by
690 * using the linfo_idx.
692 struct bpf_line_info
*linfo
;
693 /* jited_linfo is the jited addr of the linfo. It has a
694 * one to one mapping to linfo:
695 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
696 * Both the main and sub prog share the same jited_linfo.
697 * The subprog can access its first jited_linfo by
698 * using the linfo_idx.
703 /* subprog can use linfo_idx to access its first linfo and
705 * main prog always has linfo_idx == 0
709 struct exception_table_entry
*extable
;
710 struct bpf_prog_stats __percpu
*stats
;
712 struct work_struct work
;
717 struct bpf_array_aux
{
718 /* 'Ownership' of prog array is claimed by the first program that
719 * is going to use this map or by the first program which FD is
720 * stored in the map to make sure that all callers and callees have
721 * the same prog type and JITed flag.
723 enum bpf_prog_type type
;
725 /* Programs with direct jumps into programs part of this array. */
726 struct list_head poke_progs
;
728 struct mutex poke_mutex
;
729 struct work_struct work
;
732 struct bpf_struct_ops_value
;
736 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
737 struct bpf_struct_ops
{
738 const struct bpf_verifier_ops
*verifier_ops
;
739 int (*init
)(struct btf
*btf
);
740 int (*check_member
)(const struct btf_type
*t
,
741 const struct btf_member
*member
);
742 int (*init_member
)(const struct btf_type
*t
,
743 const struct btf_member
*member
,
744 void *kdata
, const void *udata
);
745 int (*reg
)(void *kdata
);
746 void (*unreg
)(void *kdata
);
747 const struct btf_type
*type
;
748 const struct btf_type
*value_type
;
750 struct btf_func_model func_models
[BPF_STRUCT_OPS_MAX_NR_MEMBERS
];
755 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
756 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
757 const struct bpf_struct_ops
*bpf_struct_ops_find(u32 type_id
);
758 void bpf_struct_ops_init(struct btf
*btf
, struct bpf_verifier_log
*log
);
759 bool bpf_struct_ops_get(const void *kdata
);
760 void bpf_struct_ops_put(const void *kdata
);
761 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map
*map
, void *key
,
763 static inline bool bpf_try_module_get(const void *data
, struct module
*owner
)
765 if (owner
== BPF_MODULE_OWNER
)
766 return bpf_struct_ops_get(data
);
768 return try_module_get(owner
);
770 static inline void bpf_module_put(const void *data
, struct module
*owner
)
772 if (owner
== BPF_MODULE_OWNER
)
773 bpf_struct_ops_put(data
);
778 static inline const struct bpf_struct_ops
*bpf_struct_ops_find(u32 type_id
)
782 static inline void bpf_struct_ops_init(struct btf
*btf
,
783 struct bpf_verifier_log
*log
)
786 static inline bool bpf_try_module_get(const void *data
, struct module
*owner
)
788 return try_module_get(owner
);
790 static inline void bpf_module_put(const void *data
, struct module
*owner
)
794 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map
*map
,
806 struct bpf_array_aux
*aux
;
808 char value
[0] __aligned(8);
809 void *ptrs
[0] __aligned(8);
810 void __percpu
*pptrs
[0] __aligned(8);
814 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
815 #define MAX_TAIL_CALL_CNT 32
817 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
818 BPF_F_RDONLY_PROG | \
822 #define BPF_MAP_CAN_READ BIT(0)
823 #define BPF_MAP_CAN_WRITE BIT(1)
825 static inline u32
bpf_map_flags_to_cap(struct bpf_map
*map
)
827 u32 access_flags
= map
->map_flags
& (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
);
829 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
832 if (access_flags
& BPF_F_RDONLY_PROG
)
833 return BPF_MAP_CAN_READ
;
834 else if (access_flags
& BPF_F_WRONLY_PROG
)
835 return BPF_MAP_CAN_WRITE
;
837 return BPF_MAP_CAN_READ
| BPF_MAP_CAN_WRITE
;
840 static inline bool bpf_map_flags_access_ok(u32 access_flags
)
842 return (access_flags
& (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
)) !=
843 (BPF_F_RDONLY_PROG
| BPF_F_WRONLY_PROG
);
846 struct bpf_event_entry
{
847 struct perf_event
*event
;
848 struct file
*perf_file
;
849 struct file
*map_file
;
853 bool bpf_prog_array_compatible(struct bpf_array
*array
, const struct bpf_prog
*fp
);
854 int bpf_prog_calc_tag(struct bpf_prog
*fp
);
855 const char *kernel_type_name(u32 btf_type_id
);
857 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void);
859 typedef unsigned long (*bpf_ctx_copy_t
)(void *dst
, const void *src
,
860 unsigned long off
, unsigned long len
);
861 typedef u32 (*bpf_convert_ctx_access_t
)(enum bpf_access_type type
,
862 const struct bpf_insn
*src
,
863 struct bpf_insn
*dst
,
864 struct bpf_prog
*prog
,
867 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
868 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
);
870 /* an array of programs to be executed under rcu_lock.
873 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
875 * the structure returned by bpf_prog_array_alloc() should be populated
876 * with program pointers and the last pointer must be NULL.
877 * The user has to keep refcnt on the program and make sure the program
878 * is removed from the array before bpf_prog_put().
879 * The 'struct bpf_prog_array *' should only be replaced with xchg()
880 * since other cpus are walking the array of pointers in parallel.
882 struct bpf_prog_array_item
{
883 struct bpf_prog
*prog
;
884 struct bpf_cgroup_storage
*cgroup_storage
[MAX_BPF_CGROUP_STORAGE_TYPE
];
887 struct bpf_prog_array
{
889 struct bpf_prog_array_item items
[];
892 struct bpf_prog_array
*bpf_prog_array_alloc(u32 prog_cnt
, gfp_t flags
);
893 void bpf_prog_array_free(struct bpf_prog_array
*progs
);
894 int bpf_prog_array_length(struct bpf_prog_array
*progs
);
895 bool bpf_prog_array_is_empty(struct bpf_prog_array
*array
);
896 int bpf_prog_array_copy_to_user(struct bpf_prog_array
*progs
,
897 __u32 __user
*prog_ids
, u32 cnt
);
899 void bpf_prog_array_delete_safe(struct bpf_prog_array
*progs
,
900 struct bpf_prog
*old_prog
);
901 int bpf_prog_array_copy_info(struct bpf_prog_array
*array
,
902 u32
*prog_ids
, u32 request_cnt
,
904 int bpf_prog_array_copy(struct bpf_prog_array
*old_array
,
905 struct bpf_prog
*exclude_prog
,
906 struct bpf_prog
*include_prog
,
907 struct bpf_prog_array
**new_array
);
909 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
911 struct bpf_prog_array_item *_item; \
912 struct bpf_prog *_prog; \
913 struct bpf_prog_array *_array; \
917 _array = rcu_dereference(array); \
918 if (unlikely(check_non_null && !_array))\
920 _item = &_array->items[0]; \
921 while ((_prog = READ_ONCE(_item->prog))) { \
922 bpf_cgroup_storage_set(_item->cgroup_storage); \
923 _ret &= func(_prog, ctx); \
932 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
933 * so BPF programs can request cwr for TCP packets.
935 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
936 * packet. This macro changes the behavior so the low order bit
937 * indicates whether the packet should be dropped (0) or not (1)
938 * and the next bit is a congestion notification bit. This could be
939 * used by TCP to call tcp_enter_cwr()
941 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
944 * 2: drop packet and cn
945 * 3: keep packet and cn
947 * This macro then converts it to one of the NET_XMIT or an error
948 * code that is then interpreted as drop packet (and no cn):
949 * 0: NET_XMIT_SUCCESS skb should be transmitted
950 * 1: NET_XMIT_DROP skb should be dropped and cn
951 * 2: NET_XMIT_CN skb should be transmitted and cn
952 * 3: -EPERM skb should be dropped
954 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
956 struct bpf_prog_array_item *_item; \
957 struct bpf_prog *_prog; \
958 struct bpf_prog_array *_array; \
964 _array = rcu_dereference(array); \
965 _item = &_array->items[0]; \
966 while ((_prog = READ_ONCE(_item->prog))) { \
967 bpf_cgroup_storage_set(_item->cgroup_storage); \
968 ret = func(_prog, ctx); \
976 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
978 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \
982 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \
983 __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
985 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
986 __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
988 #ifdef CONFIG_BPF_SYSCALL
989 DECLARE_PER_CPU(int, bpf_prog_active
);
992 * Block execution of BPF programs attached to instrumentation (perf,
993 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
994 * these events can happen inside a region which holds a map bucket lock
995 * and can deadlock on it.
997 * Use the preemption safe inc/dec variants on RT because migrate disable
998 * is preemptible on RT and preemption in the middle of the RMW operation
999 * might lead to inconsistent state. Use the raw variants for non RT
1000 * kernels as migrate_disable() maps to preempt_disable() so the slightly
1001 * more expensive save operation can be avoided.
1003 static inline void bpf_disable_instrumentation(void)
1006 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
1007 this_cpu_inc(bpf_prog_active
);
1009 __this_cpu_inc(bpf_prog_active
);
1012 static inline void bpf_enable_instrumentation(void)
1014 if (IS_ENABLED(CONFIG_PREEMPT_RT
))
1015 this_cpu_dec(bpf_prog_active
);
1017 __this_cpu_dec(bpf_prog_active
);
1021 extern const struct file_operations bpf_map_fops
;
1022 extern const struct file_operations bpf_prog_fops
;
1024 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1025 extern const struct bpf_prog_ops _name ## _prog_ops; \
1026 extern const struct bpf_verifier_ops _name ## _verifier_ops;
1027 #define BPF_MAP_TYPE(_id, _ops) \
1028 extern const struct bpf_map_ops _ops;
1029 #include <linux/bpf_types.h>
1030 #undef BPF_PROG_TYPE
1033 extern const struct bpf_prog_ops bpf_offload_prog_ops
;
1034 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops
;
1035 extern const struct bpf_verifier_ops xdp_analyzer_ops
;
1037 struct bpf_prog
*bpf_prog_get(u32 ufd
);
1038 struct bpf_prog
*bpf_prog_get_type_dev(u32 ufd
, enum bpf_prog_type type
,
1040 void bpf_prog_add(struct bpf_prog
*prog
, int i
);
1041 void bpf_prog_sub(struct bpf_prog
*prog
, int i
);
1042 void bpf_prog_inc(struct bpf_prog
*prog
);
1043 struct bpf_prog
* __must_check
bpf_prog_inc_not_zero(struct bpf_prog
*prog
);
1044 void bpf_prog_put(struct bpf_prog
*prog
);
1045 int __bpf_prog_charge(struct user_struct
*user
, u32 pages
);
1046 void __bpf_prog_uncharge(struct user_struct
*user
, u32 pages
);
1047 void __bpf_free_used_maps(struct bpf_prog_aux
*aux
,
1048 struct bpf_map
**used_maps
, u32 len
);
1050 void bpf_prog_free_id(struct bpf_prog
*prog
, bool do_idr_lock
);
1051 void bpf_map_free_id(struct bpf_map
*map
, bool do_idr_lock
);
1053 struct bpf_map
*bpf_map_get(u32 ufd
);
1054 struct bpf_map
*bpf_map_get_with_uref(u32 ufd
);
1055 struct bpf_map
*__bpf_map_get(struct fd f
);
1056 void bpf_map_inc(struct bpf_map
*map
);
1057 void bpf_map_inc_with_uref(struct bpf_map
*map
);
1058 struct bpf_map
* __must_check
bpf_map_inc_not_zero(struct bpf_map
*map
);
1059 void bpf_map_put_with_uref(struct bpf_map
*map
);
1060 void bpf_map_put(struct bpf_map
*map
);
1061 int bpf_map_charge_memlock(struct bpf_map
*map
, u32 pages
);
1062 void bpf_map_uncharge_memlock(struct bpf_map
*map
, u32 pages
);
1063 int bpf_map_charge_init(struct bpf_map_memory
*mem
, u64 size
);
1064 void bpf_map_charge_finish(struct bpf_map_memory
*mem
);
1065 void bpf_map_charge_move(struct bpf_map_memory
*dst
,
1066 struct bpf_map_memory
*src
);
1067 void *bpf_map_area_alloc(u64 size
, int numa_node
);
1068 void *bpf_map_area_mmapable_alloc(u64 size
, int numa_node
);
1069 void bpf_map_area_free(void *base
);
1070 void bpf_map_init_from_attr(struct bpf_map
*map
, union bpf_attr
*attr
);
1071 int generic_map_lookup_batch(struct bpf_map
*map
,
1072 const union bpf_attr
*attr
,
1073 union bpf_attr __user
*uattr
);
1074 int generic_map_update_batch(struct bpf_map
*map
,
1075 const union bpf_attr
*attr
,
1076 union bpf_attr __user
*uattr
);
1077 int generic_map_delete_batch(struct bpf_map
*map
,
1078 const union bpf_attr
*attr
,
1079 union bpf_attr __user
*uattr
);
1081 extern int sysctl_unprivileged_bpf_disabled
;
1083 int bpf_map_new_fd(struct bpf_map
*map
, int flags
);
1084 int bpf_prog_new_fd(struct bpf_prog
*prog
);
1088 const struct bpf_link_ops
*ops
;
1089 struct bpf_prog
*prog
;
1090 struct work_struct work
;
1093 struct bpf_link_ops
{
1094 void (*release
)(struct bpf_link
*link
);
1095 void (*dealloc
)(struct bpf_link
*link
);
1099 void bpf_link_init(struct bpf_link
*link
, const struct bpf_link_ops
*ops
,
1100 struct bpf_prog
*prog
);
1101 void bpf_link_cleanup(struct bpf_link
*link
, struct file
*link_file
,
1103 void bpf_link_inc(struct bpf_link
*link
);
1104 void bpf_link_put(struct bpf_link
*link
);
1105 int bpf_link_new_fd(struct bpf_link
*link
);
1106 struct file
*bpf_link_new_file(struct bpf_link
*link
, int *reserved_fd
);
1107 struct bpf_link
*bpf_link_get_from_fd(u32 ufd
);
1109 int bpf_obj_pin_user(u32 ufd
, const char __user
*pathname
);
1110 int bpf_obj_get_user(const char __user
*pathname
, int flags
);
1112 int bpf_percpu_hash_copy(struct bpf_map
*map
, void *key
, void *value
);
1113 int bpf_percpu_array_copy(struct bpf_map
*map
, void *key
, void *value
);
1114 int bpf_percpu_hash_update(struct bpf_map
*map
, void *key
, void *value
,
1116 int bpf_percpu_array_update(struct bpf_map
*map
, void *key
, void *value
,
1119 int bpf_stackmap_copy(struct bpf_map
*map
, void *key
, void *value
);
1121 int bpf_fd_array_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
1122 void *key
, void *value
, u64 map_flags
);
1123 int bpf_fd_array_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
);
1124 int bpf_fd_htab_map_update_elem(struct bpf_map
*map
, struct file
*map_file
,
1125 void *key
, void *value
, u64 map_flags
);
1126 int bpf_fd_htab_map_lookup_elem(struct bpf_map
*map
, void *key
, u32
*value
);
1128 int bpf_get_file_flag(int flags
);
1129 int bpf_check_uarg_tail_zero(void __user
*uaddr
, size_t expected_size
,
1130 size_t actual_size
);
1132 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1133 * forced to use 'long' read/writes to try to atomically copy long counters.
1134 * Best-effort only. No barriers here, since it _will_ race with concurrent
1135 * updates from BPF programs. Called from bpf syscall and mostly used with
1136 * size 8 or 16 bytes, so ask compiler to inline it.
1138 static inline void bpf_long_memcpy(void *dst
, const void *src
, u32 size
)
1140 const long *lsrc
= src
;
1143 size
/= sizeof(long);
1148 /* verify correctness of eBPF program */
1149 int bpf_check(struct bpf_prog
**fp
, union bpf_attr
*attr
,
1150 union bpf_attr __user
*uattr
);
1151 void bpf_patch_call_args(struct bpf_insn
*insn
, u32 stack_depth
);
1157 struct bpf_dtab_netdev
*__dev_map_lookup_elem(struct bpf_map
*map
, u32 key
);
1158 struct bpf_dtab_netdev
*__dev_map_hash_lookup_elem(struct bpf_map
*map
, u32 key
);
1159 void __dev_flush(void);
1160 int dev_xdp_enqueue(struct net_device
*dev
, struct xdp_buff
*xdp
,
1161 struct net_device
*dev_rx
);
1162 int dev_map_enqueue(struct bpf_dtab_netdev
*dst
, struct xdp_buff
*xdp
,
1163 struct net_device
*dev_rx
);
1164 int dev_map_generic_redirect(struct bpf_dtab_netdev
*dst
, struct sk_buff
*skb
,
1165 struct bpf_prog
*xdp_prog
);
1167 struct bpf_cpu_map_entry
*__cpu_map_lookup_elem(struct bpf_map
*map
, u32 key
);
1168 void __cpu_map_flush(void);
1169 int cpu_map_enqueue(struct bpf_cpu_map_entry
*rcpu
, struct xdp_buff
*xdp
,
1170 struct net_device
*dev_rx
);
1172 /* Return map's numa specified by userspace */
1173 static inline int bpf_map_attr_numa_node(const union bpf_attr
*attr
)
1175 return (attr
->map_flags
& BPF_F_NUMA_NODE
) ?
1176 attr
->numa_node
: NUMA_NO_NODE
;
1179 struct bpf_prog
*bpf_prog_get_type_path(const char *name
, enum bpf_prog_type type
);
1180 int array_map_alloc_check(union bpf_attr
*attr
);
1182 int bpf_prog_test_run_xdp(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
1183 union bpf_attr __user
*uattr
);
1184 int bpf_prog_test_run_skb(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
1185 union bpf_attr __user
*uattr
);
1186 int bpf_prog_test_run_tracing(struct bpf_prog
*prog
,
1187 const union bpf_attr
*kattr
,
1188 union bpf_attr __user
*uattr
);
1189 int bpf_prog_test_run_flow_dissector(struct bpf_prog
*prog
,
1190 const union bpf_attr
*kattr
,
1191 union bpf_attr __user
*uattr
);
1192 bool btf_ctx_access(int off
, int size
, enum bpf_access_type type
,
1193 const struct bpf_prog
*prog
,
1194 struct bpf_insn_access_aux
*info
);
1195 int btf_struct_access(struct bpf_verifier_log
*log
,
1196 const struct btf_type
*t
, int off
, int size
,
1197 enum bpf_access_type atype
,
1199 int btf_resolve_helper_id(struct bpf_verifier_log
*log
,
1200 const struct bpf_func_proto
*fn
, int);
1202 int btf_distill_func_proto(struct bpf_verifier_log
*log
,
1204 const struct btf_type
*func_proto
,
1205 const char *func_name
,
1206 struct btf_func_model
*m
);
1208 struct bpf_reg_state
;
1209 int btf_check_func_arg_match(struct bpf_verifier_env
*env
, int subprog
,
1210 struct bpf_reg_state
*regs
);
1211 int btf_prepare_func_args(struct bpf_verifier_env
*env
, int subprog
,
1212 struct bpf_reg_state
*reg
);
1213 int btf_check_type_match(struct bpf_verifier_env
*env
, struct bpf_prog
*prog
,
1214 struct btf
*btf
, const struct btf_type
*t
);
1216 struct bpf_prog
*bpf_prog_by_id(u32 id
);
1218 #else /* !CONFIG_BPF_SYSCALL */
1219 static inline struct bpf_prog
*bpf_prog_get(u32 ufd
)
1221 return ERR_PTR(-EOPNOTSUPP
);
1224 static inline struct bpf_prog
*bpf_prog_get_type_dev(u32 ufd
,
1225 enum bpf_prog_type type
,
1228 return ERR_PTR(-EOPNOTSUPP
);
1231 static inline void bpf_prog_add(struct bpf_prog
*prog
, int i
)
1235 static inline void bpf_prog_sub(struct bpf_prog
*prog
, int i
)
1239 static inline void bpf_prog_put(struct bpf_prog
*prog
)
1243 static inline void bpf_prog_inc(struct bpf_prog
*prog
)
1247 static inline struct bpf_prog
*__must_check
1248 bpf_prog_inc_not_zero(struct bpf_prog
*prog
)
1250 return ERR_PTR(-EOPNOTSUPP
);
1253 static inline int __bpf_prog_charge(struct user_struct
*user
, u32 pages
)
1258 static inline void __bpf_prog_uncharge(struct user_struct
*user
, u32 pages
)
1262 static inline int bpf_obj_get_user(const char __user
*pathname
, int flags
)
1267 static inline struct net_device
*__dev_map_lookup_elem(struct bpf_map
*map
,
1273 static inline struct net_device
*__dev_map_hash_lookup_elem(struct bpf_map
*map
,
1279 static inline void __dev_flush(void)
1284 struct bpf_dtab_netdev
;
1287 int dev_xdp_enqueue(struct net_device
*dev
, struct xdp_buff
*xdp
,
1288 struct net_device
*dev_rx
)
1294 int dev_map_enqueue(struct bpf_dtab_netdev
*dst
, struct xdp_buff
*xdp
,
1295 struct net_device
*dev_rx
)
1302 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev
*dst
,
1303 struct sk_buff
*skb
,
1304 struct bpf_prog
*xdp_prog
)
1310 struct bpf_cpu_map_entry
*__cpu_map_lookup_elem(struct bpf_map
*map
, u32 key
)
1315 static inline void __cpu_map_flush(void)
1319 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry
*rcpu
,
1320 struct xdp_buff
*xdp
,
1321 struct net_device
*dev_rx
)
1326 static inline struct bpf_prog
*bpf_prog_get_type_path(const char *name
,
1327 enum bpf_prog_type type
)
1329 return ERR_PTR(-EOPNOTSUPP
);
1332 static inline int bpf_prog_test_run_xdp(struct bpf_prog
*prog
,
1333 const union bpf_attr
*kattr
,
1334 union bpf_attr __user
*uattr
)
1339 static inline int bpf_prog_test_run_skb(struct bpf_prog
*prog
,
1340 const union bpf_attr
*kattr
,
1341 union bpf_attr __user
*uattr
)
1346 static inline int bpf_prog_test_run_tracing(struct bpf_prog
*prog
,
1347 const union bpf_attr
*kattr
,
1348 union bpf_attr __user
*uattr
)
1353 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog
*prog
,
1354 const union bpf_attr
*kattr
,
1355 union bpf_attr __user
*uattr
)
1360 static inline void bpf_map_put(struct bpf_map
*map
)
1364 static inline struct bpf_prog
*bpf_prog_by_id(u32 id
)
1366 return ERR_PTR(-ENOTSUPP
);
1368 #endif /* CONFIG_BPF_SYSCALL */
1370 static inline struct bpf_prog
*bpf_prog_get_type(u32 ufd
,
1371 enum bpf_prog_type type
)
1373 return bpf_prog_get_type_dev(ufd
, type
, false);
1376 bool bpf_prog_get_ok(struct bpf_prog
*, enum bpf_prog_type
*, bool);
1378 int bpf_prog_offload_compile(struct bpf_prog
*prog
);
1379 void bpf_prog_offload_destroy(struct bpf_prog
*prog
);
1380 int bpf_prog_offload_info_fill(struct bpf_prog_info
*info
,
1381 struct bpf_prog
*prog
);
1383 int bpf_map_offload_info_fill(struct bpf_map_info
*info
, struct bpf_map
*map
);
1385 int bpf_map_offload_lookup_elem(struct bpf_map
*map
, void *key
, void *value
);
1386 int bpf_map_offload_update_elem(struct bpf_map
*map
,
1387 void *key
, void *value
, u64 flags
);
1388 int bpf_map_offload_delete_elem(struct bpf_map
*map
, void *key
);
1389 int bpf_map_offload_get_next_key(struct bpf_map
*map
,
1390 void *key
, void *next_key
);
1392 bool bpf_offload_prog_map_match(struct bpf_prog
*prog
, struct bpf_map
*map
);
1394 struct bpf_offload_dev
*
1395 bpf_offload_dev_create(const struct bpf_prog_offload_ops
*ops
, void *priv
);
1396 void bpf_offload_dev_destroy(struct bpf_offload_dev
*offdev
);
1397 void *bpf_offload_dev_priv(struct bpf_offload_dev
*offdev
);
1398 int bpf_offload_dev_netdev_register(struct bpf_offload_dev
*offdev
,
1399 struct net_device
*netdev
);
1400 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev
*offdev
,
1401 struct net_device
*netdev
);
1402 bool bpf_offload_dev_match(struct bpf_prog
*prog
, struct net_device
*netdev
);
1404 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
1405 int bpf_prog_offload_init(struct bpf_prog
*prog
, union bpf_attr
*attr
);
1407 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux
*aux
)
1409 return aux
->offload_requested
;
1412 static inline bool bpf_map_is_dev_bound(struct bpf_map
*map
)
1414 return unlikely(map
->ops
== &bpf_map_offload_ops
);
1417 struct bpf_map
*bpf_map_offload_map_alloc(union bpf_attr
*attr
);
1418 void bpf_map_offload_map_free(struct bpf_map
*map
);
1420 static inline int bpf_prog_offload_init(struct bpf_prog
*prog
,
1421 union bpf_attr
*attr
)
1426 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux
*aux
)
1431 static inline bool bpf_map_is_dev_bound(struct bpf_map
*map
)
1436 static inline struct bpf_map
*bpf_map_offload_map_alloc(union bpf_attr
*attr
)
1438 return ERR_PTR(-EOPNOTSUPP
);
1441 static inline void bpf_map_offload_map_free(struct bpf_map
*map
)
1444 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
1446 #if defined(CONFIG_BPF_STREAM_PARSER)
1447 int sock_map_prog_update(struct bpf_map
*map
, struct bpf_prog
*prog
, u32 which
);
1448 int sock_map_get_from_fd(const union bpf_attr
*attr
, struct bpf_prog
*prog
);
1449 void sock_map_unhash(struct sock
*sk
);
1450 void sock_map_close(struct sock
*sk
, long timeout
);
1452 static inline int sock_map_prog_update(struct bpf_map
*map
,
1453 struct bpf_prog
*prog
, u32 which
)
1458 static inline int sock_map_get_from_fd(const union bpf_attr
*attr
,
1459 struct bpf_prog
*prog
)
1463 #endif /* CONFIG_BPF_STREAM_PARSER */
1465 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
1466 void bpf_sk_reuseport_detach(struct sock
*sk
);
1467 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map
*map
, void *key
,
1469 int bpf_fd_reuseport_array_update_elem(struct bpf_map
*map
, void *key
,
1470 void *value
, u64 map_flags
);
1472 static inline void bpf_sk_reuseport_detach(struct sock
*sk
)
1476 #ifdef CONFIG_BPF_SYSCALL
1477 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map
*map
,
1478 void *key
, void *value
)
1483 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map
*map
,
1484 void *key
, void *value
,
1489 #endif /* CONFIG_BPF_SYSCALL */
1490 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
1492 /* verifier prototypes for helper functions called from eBPF programs */
1493 extern const struct bpf_func_proto bpf_map_lookup_elem_proto
;
1494 extern const struct bpf_func_proto bpf_map_update_elem_proto
;
1495 extern const struct bpf_func_proto bpf_map_delete_elem_proto
;
1496 extern const struct bpf_func_proto bpf_map_push_elem_proto
;
1497 extern const struct bpf_func_proto bpf_map_pop_elem_proto
;
1498 extern const struct bpf_func_proto bpf_map_peek_elem_proto
;
1500 extern const struct bpf_func_proto bpf_get_prandom_u32_proto
;
1501 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto
;
1502 extern const struct bpf_func_proto bpf_get_numa_node_id_proto
;
1503 extern const struct bpf_func_proto bpf_tail_call_proto
;
1504 extern const struct bpf_func_proto bpf_ktime_get_ns_proto
;
1505 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto
;
1506 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto
;
1507 extern const struct bpf_func_proto bpf_get_current_comm_proto
;
1508 extern const struct bpf_func_proto bpf_get_stackid_proto
;
1509 extern const struct bpf_func_proto bpf_get_stack_proto
;
1510 extern const struct bpf_func_proto bpf_sock_map_update_proto
;
1511 extern const struct bpf_func_proto bpf_sock_hash_update_proto
;
1512 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto
;
1513 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto
;
1514 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto
;
1515 extern const struct bpf_func_proto bpf_msg_redirect_map_proto
;
1516 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto
;
1517 extern const struct bpf_func_proto bpf_sk_redirect_map_proto
;
1518 extern const struct bpf_func_proto bpf_spin_lock_proto
;
1519 extern const struct bpf_func_proto bpf_spin_unlock_proto
;
1520 extern const struct bpf_func_proto bpf_get_local_storage_proto
;
1521 extern const struct bpf_func_proto bpf_strtol_proto
;
1522 extern const struct bpf_func_proto bpf_strtoul_proto
;
1523 extern const struct bpf_func_proto bpf_tcp_sock_proto
;
1524 extern const struct bpf_func_proto bpf_jiffies64_proto
;
1525 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto
;
1527 const struct bpf_func_proto
*bpf_tracing_func_proto(
1528 enum bpf_func_id func_id
, const struct bpf_prog
*prog
);
1530 /* Shared helpers among cBPF and eBPF. */
1531 void bpf_user_rnd_init_once(void);
1532 u64
bpf_user_rnd_u32(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
1534 #if defined(CONFIG_NET)
1535 bool bpf_sock_common_is_valid_access(int off
, int size
,
1536 enum bpf_access_type type
,
1537 struct bpf_insn_access_aux
*info
);
1538 bool bpf_sock_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1539 struct bpf_insn_access_aux
*info
);
1540 u32
bpf_sock_convert_ctx_access(enum bpf_access_type type
,
1541 const struct bpf_insn
*si
,
1542 struct bpf_insn
*insn_buf
,
1543 struct bpf_prog
*prog
,
1546 static inline bool bpf_sock_common_is_valid_access(int off
, int size
,
1547 enum bpf_access_type type
,
1548 struct bpf_insn_access_aux
*info
)
1552 static inline bool bpf_sock_is_valid_access(int off
, int size
,
1553 enum bpf_access_type type
,
1554 struct bpf_insn_access_aux
*info
)
1558 static inline u32
bpf_sock_convert_ctx_access(enum bpf_access_type type
,
1559 const struct bpf_insn
*si
,
1560 struct bpf_insn
*insn_buf
,
1561 struct bpf_prog
*prog
,
1569 struct sk_reuseport_kern
{
1570 struct sk_buff
*skb
;
1572 struct sock
*selected_sk
;
1578 bool bpf_tcp_sock_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1579 struct bpf_insn_access_aux
*info
);
1581 u32
bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type
,
1582 const struct bpf_insn
*si
,
1583 struct bpf_insn
*insn_buf
,
1584 struct bpf_prog
*prog
,
1587 bool bpf_xdp_sock_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1588 struct bpf_insn_access_aux
*info
);
1590 u32
bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type
,
1591 const struct bpf_insn
*si
,
1592 struct bpf_insn
*insn_buf
,
1593 struct bpf_prog
*prog
,
1596 static inline bool bpf_tcp_sock_is_valid_access(int off
, int size
,
1597 enum bpf_access_type type
,
1598 struct bpf_insn_access_aux
*info
)
1603 static inline u32
bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type
,
1604 const struct bpf_insn
*si
,
1605 struct bpf_insn
*insn_buf
,
1606 struct bpf_prog
*prog
,
1611 static inline bool bpf_xdp_sock_is_valid_access(int off
, int size
,
1612 enum bpf_access_type type
,
1613 struct bpf_insn_access_aux
*info
)
1618 static inline u32
bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type
,
1619 const struct bpf_insn
*si
,
1620 struct bpf_insn
*insn_buf
,
1621 struct bpf_prog
*prog
,
1626 #endif /* CONFIG_INET */
1628 enum bpf_text_poke_type
{
1633 int bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
1634 void *addr1
, void *addr2
);
1636 #endif /* _LINUX_BPF_H */