]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - include/uapi/linux/bpf.h
bpf: Fix a typo of reuseport map in bpf.h.
[mirror_ubuntu-hirsute-kernel.git] / include / uapi / linux / bpf.h
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8 #ifndef _UAPI__LINUX_BPF_H__
9 #define _UAPI__LINUX_BPF_H__
10
11 #include <linux/types.h>
12 #include <linux/bpf_common.h>
13
14 /* Extended instruction set based on top of classic BPF */
15
16 /* instruction classes */
17 #define BPF_JMP32 0x06 /* jmp mode in word width */
18 #define BPF_ALU64 0x07 /* alu mode in double word width */
19
20 /* ld/ldx fields */
21 #define BPF_DW 0x18 /* double word (64-bit) */
22 #define BPF_XADD 0xc0 /* exclusive add */
23
24 /* alu/jmp fields */
25 #define BPF_MOV 0xb0 /* mov reg to reg */
26 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
27
28 /* change endianness of a register */
29 #define BPF_END 0xd0 /* flags for endianness conversion: */
30 #define BPF_TO_LE 0x00 /* convert to little-endian */
31 #define BPF_TO_BE 0x08 /* convert to big-endian */
32 #define BPF_FROM_LE BPF_TO_LE
33 #define BPF_FROM_BE BPF_TO_BE
34
35 /* jmp encodings */
36 #define BPF_JNE 0x50 /* jump != */
37 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */
38 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
39 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
40 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
41 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */
42 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
43 #define BPF_CALL 0x80 /* function call */
44 #define BPF_EXIT 0x90 /* function return */
45
46 /* Register numbers */
47 enum {
48 BPF_REG_0 = 0,
49 BPF_REG_1,
50 BPF_REG_2,
51 BPF_REG_3,
52 BPF_REG_4,
53 BPF_REG_5,
54 BPF_REG_6,
55 BPF_REG_7,
56 BPF_REG_8,
57 BPF_REG_9,
58 BPF_REG_10,
59 __MAX_BPF_REG,
60 };
61
62 /* BPF has 10 general purpose 64-bit registers and stack frame. */
63 #define MAX_BPF_REG __MAX_BPF_REG
64
65 struct bpf_insn {
66 __u8 code; /* opcode */
67 __u8 dst_reg:4; /* dest register */
68 __u8 src_reg:4; /* source register */
69 __s16 off; /* signed offset */
70 __s32 imm; /* signed immediate constant */
71 };
72
73 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
74 struct bpf_lpm_trie_key {
75 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
76 __u8 data[0]; /* Arbitrary size */
77 };
78
79 struct bpf_cgroup_storage_key {
80 __u64 cgroup_inode_id; /* cgroup inode id */
81 __u32 attach_type; /* program attach type */
82 };
83
84 union bpf_iter_link_info {
85 struct {
86 __u32 map_fd;
87 } map;
88 };
89
90 /* BPF syscall commands, see bpf(2) man-page for details. */
91 enum bpf_cmd {
92 BPF_MAP_CREATE,
93 BPF_MAP_LOOKUP_ELEM,
94 BPF_MAP_UPDATE_ELEM,
95 BPF_MAP_DELETE_ELEM,
96 BPF_MAP_GET_NEXT_KEY,
97 BPF_PROG_LOAD,
98 BPF_OBJ_PIN,
99 BPF_OBJ_GET,
100 BPF_PROG_ATTACH,
101 BPF_PROG_DETACH,
102 BPF_PROG_TEST_RUN,
103 BPF_PROG_GET_NEXT_ID,
104 BPF_MAP_GET_NEXT_ID,
105 BPF_PROG_GET_FD_BY_ID,
106 BPF_MAP_GET_FD_BY_ID,
107 BPF_OBJ_GET_INFO_BY_FD,
108 BPF_PROG_QUERY,
109 BPF_RAW_TRACEPOINT_OPEN,
110 BPF_BTF_LOAD,
111 BPF_BTF_GET_FD_BY_ID,
112 BPF_TASK_FD_QUERY,
113 BPF_MAP_LOOKUP_AND_DELETE_ELEM,
114 BPF_MAP_FREEZE,
115 BPF_BTF_GET_NEXT_ID,
116 BPF_MAP_LOOKUP_BATCH,
117 BPF_MAP_LOOKUP_AND_DELETE_BATCH,
118 BPF_MAP_UPDATE_BATCH,
119 BPF_MAP_DELETE_BATCH,
120 BPF_LINK_CREATE,
121 BPF_LINK_UPDATE,
122 BPF_LINK_GET_FD_BY_ID,
123 BPF_LINK_GET_NEXT_ID,
124 BPF_ENABLE_STATS,
125 BPF_ITER_CREATE,
126 BPF_LINK_DETACH,
127 BPF_PROG_BIND_MAP,
128 };
129
130 enum bpf_map_type {
131 BPF_MAP_TYPE_UNSPEC,
132 BPF_MAP_TYPE_HASH,
133 BPF_MAP_TYPE_ARRAY,
134 BPF_MAP_TYPE_PROG_ARRAY,
135 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
136 BPF_MAP_TYPE_PERCPU_HASH,
137 BPF_MAP_TYPE_PERCPU_ARRAY,
138 BPF_MAP_TYPE_STACK_TRACE,
139 BPF_MAP_TYPE_CGROUP_ARRAY,
140 BPF_MAP_TYPE_LRU_HASH,
141 BPF_MAP_TYPE_LRU_PERCPU_HASH,
142 BPF_MAP_TYPE_LPM_TRIE,
143 BPF_MAP_TYPE_ARRAY_OF_MAPS,
144 BPF_MAP_TYPE_HASH_OF_MAPS,
145 BPF_MAP_TYPE_DEVMAP,
146 BPF_MAP_TYPE_SOCKMAP,
147 BPF_MAP_TYPE_CPUMAP,
148 BPF_MAP_TYPE_XSKMAP,
149 BPF_MAP_TYPE_SOCKHASH,
150 BPF_MAP_TYPE_CGROUP_STORAGE,
151 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
152 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
153 BPF_MAP_TYPE_QUEUE,
154 BPF_MAP_TYPE_STACK,
155 BPF_MAP_TYPE_SK_STORAGE,
156 BPF_MAP_TYPE_DEVMAP_HASH,
157 BPF_MAP_TYPE_STRUCT_OPS,
158 BPF_MAP_TYPE_RINGBUF,
159 BPF_MAP_TYPE_INODE_STORAGE,
160 BPF_MAP_TYPE_TASK_STORAGE,
161 };
162
163 /* Note that tracing related programs such as
164 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
165 * are not subject to a stable API since kernel internal data
166 * structures can change from release to release and may
167 * therefore break existing tracing BPF programs. Tracing BPF
168 * programs correspond to /a/ specific kernel which is to be
169 * analyzed, and not /a/ specific kernel /and/ all future ones.
170 */
171 enum bpf_prog_type {
172 BPF_PROG_TYPE_UNSPEC,
173 BPF_PROG_TYPE_SOCKET_FILTER,
174 BPF_PROG_TYPE_KPROBE,
175 BPF_PROG_TYPE_SCHED_CLS,
176 BPF_PROG_TYPE_SCHED_ACT,
177 BPF_PROG_TYPE_TRACEPOINT,
178 BPF_PROG_TYPE_XDP,
179 BPF_PROG_TYPE_PERF_EVENT,
180 BPF_PROG_TYPE_CGROUP_SKB,
181 BPF_PROG_TYPE_CGROUP_SOCK,
182 BPF_PROG_TYPE_LWT_IN,
183 BPF_PROG_TYPE_LWT_OUT,
184 BPF_PROG_TYPE_LWT_XMIT,
185 BPF_PROG_TYPE_SOCK_OPS,
186 BPF_PROG_TYPE_SK_SKB,
187 BPF_PROG_TYPE_CGROUP_DEVICE,
188 BPF_PROG_TYPE_SK_MSG,
189 BPF_PROG_TYPE_RAW_TRACEPOINT,
190 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
191 BPF_PROG_TYPE_LWT_SEG6LOCAL,
192 BPF_PROG_TYPE_LIRC_MODE2,
193 BPF_PROG_TYPE_SK_REUSEPORT,
194 BPF_PROG_TYPE_FLOW_DISSECTOR,
195 BPF_PROG_TYPE_CGROUP_SYSCTL,
196 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
197 BPF_PROG_TYPE_CGROUP_SOCKOPT,
198 BPF_PROG_TYPE_TRACING,
199 BPF_PROG_TYPE_STRUCT_OPS,
200 BPF_PROG_TYPE_EXT,
201 BPF_PROG_TYPE_LSM,
202 BPF_PROG_TYPE_SK_LOOKUP,
203 };
204
205 enum bpf_attach_type {
206 BPF_CGROUP_INET_INGRESS,
207 BPF_CGROUP_INET_EGRESS,
208 BPF_CGROUP_INET_SOCK_CREATE,
209 BPF_CGROUP_SOCK_OPS,
210 BPF_SK_SKB_STREAM_PARSER,
211 BPF_SK_SKB_STREAM_VERDICT,
212 BPF_CGROUP_DEVICE,
213 BPF_SK_MSG_VERDICT,
214 BPF_CGROUP_INET4_BIND,
215 BPF_CGROUP_INET6_BIND,
216 BPF_CGROUP_INET4_CONNECT,
217 BPF_CGROUP_INET6_CONNECT,
218 BPF_CGROUP_INET4_POST_BIND,
219 BPF_CGROUP_INET6_POST_BIND,
220 BPF_CGROUP_UDP4_SENDMSG,
221 BPF_CGROUP_UDP6_SENDMSG,
222 BPF_LIRC_MODE2,
223 BPF_FLOW_DISSECTOR,
224 BPF_CGROUP_SYSCTL,
225 BPF_CGROUP_UDP4_RECVMSG,
226 BPF_CGROUP_UDP6_RECVMSG,
227 BPF_CGROUP_GETSOCKOPT,
228 BPF_CGROUP_SETSOCKOPT,
229 BPF_TRACE_RAW_TP,
230 BPF_TRACE_FENTRY,
231 BPF_TRACE_FEXIT,
232 BPF_MODIFY_RETURN,
233 BPF_LSM_MAC,
234 BPF_TRACE_ITER,
235 BPF_CGROUP_INET4_GETPEERNAME,
236 BPF_CGROUP_INET6_GETPEERNAME,
237 BPF_CGROUP_INET4_GETSOCKNAME,
238 BPF_CGROUP_INET6_GETSOCKNAME,
239 BPF_XDP_DEVMAP,
240 BPF_CGROUP_INET_SOCK_RELEASE,
241 BPF_XDP_CPUMAP,
242 BPF_SK_LOOKUP,
243 BPF_XDP,
244 __MAX_BPF_ATTACH_TYPE
245 };
246
247 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
248
249 enum bpf_link_type {
250 BPF_LINK_TYPE_UNSPEC = 0,
251 BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
252 BPF_LINK_TYPE_TRACING = 2,
253 BPF_LINK_TYPE_CGROUP = 3,
254 BPF_LINK_TYPE_ITER = 4,
255 BPF_LINK_TYPE_NETNS = 5,
256 BPF_LINK_TYPE_XDP = 6,
257
258 MAX_BPF_LINK_TYPE,
259 };
260
261 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
262 *
263 * NONE(default): No further bpf programs allowed in the subtree.
264 *
265 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
266 * the program in this cgroup yields to sub-cgroup program.
267 *
268 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
269 * that cgroup program gets run in addition to the program in this cgroup.
270 *
271 * Only one program is allowed to be attached to a cgroup with
272 * NONE or BPF_F_ALLOW_OVERRIDE flag.
273 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
274 * release old program and attach the new one. Attach flags has to match.
275 *
276 * Multiple programs are allowed to be attached to a cgroup with
277 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
278 * (those that were attached first, run first)
279 * The programs of sub-cgroup are executed first, then programs of
280 * this cgroup and then programs of parent cgroup.
281 * When children program makes decision (like picking TCP CA or sock bind)
282 * parent program has a chance to override it.
283 *
284 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
285 * programs for a cgroup. Though it's possible to replace an old program at
286 * any position by also specifying BPF_F_REPLACE flag and position itself in
287 * replace_bpf_fd attribute. Old program at this position will be released.
288 *
289 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
290 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
291 * Ex1:
292 * cgrp1 (MULTI progs A, B) ->
293 * cgrp2 (OVERRIDE prog C) ->
294 * cgrp3 (MULTI prog D) ->
295 * cgrp4 (OVERRIDE prog E) ->
296 * cgrp5 (NONE prog F)
297 * the event in cgrp5 triggers execution of F,D,A,B in that order.
298 * if prog F is detached, the execution is E,D,A,B
299 * if prog F and D are detached, the execution is E,A,B
300 * if prog F, E and D are detached, the execution is C,A,B
301 *
302 * All eligible programs are executed regardless of return code from
303 * earlier programs.
304 */
305 #define BPF_F_ALLOW_OVERRIDE (1U << 0)
306 #define BPF_F_ALLOW_MULTI (1U << 1)
307 #define BPF_F_REPLACE (1U << 2)
308
309 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
310 * verifier will perform strict alignment checking as if the kernel
311 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
312 * and NET_IP_ALIGN defined to 2.
313 */
314 #define BPF_F_STRICT_ALIGNMENT (1U << 0)
315
316 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
317 * verifier will allow any alignment whatsoever. On platforms
318 * with strict alignment requirements for loads ands stores (such
319 * as sparc and mips) the verifier validates that all loads and
320 * stores provably follow this requirement. This flag turns that
321 * checking and enforcement off.
322 *
323 * It is mostly used for testing when we want to validate the
324 * context and memory access aspects of the verifier, but because
325 * of an unaligned access the alignment check would trigger before
326 * the one we are interested in.
327 */
328 #define BPF_F_ANY_ALIGNMENT (1U << 1)
329
330 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
331 * Verifier does sub-register def/use analysis and identifies instructions whose
332 * def only matters for low 32-bit, high 32-bit is never referenced later
333 * through implicit zero extension. Therefore verifier notifies JIT back-ends
334 * that it is safe to ignore clearing high 32-bit for these instructions. This
335 * saves some back-ends a lot of code-gen. However such optimization is not
336 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
337 * hence hasn't used verifier's analysis result. But, we really want to have a
338 * way to be able to verify the correctness of the described optimization on
339 * x86_64 on which testsuites are frequently exercised.
340 *
341 * So, this flag is introduced. Once it is set, verifier will randomize high
342 * 32-bit for those instructions who has been identified as safe to ignore them.
343 * Then, if verifier is not doing correct analysis, such randomization will
344 * regress tests to expose bugs.
345 */
346 #define BPF_F_TEST_RND_HI32 (1U << 2)
347
348 /* The verifier internal test flag. Behavior is undefined */
349 #define BPF_F_TEST_STATE_FREQ (1U << 3)
350
351 /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
352 * restrict map and helper usage for such programs. Sleepable BPF programs can
353 * only be attached to hooks where kernel execution context allows sleeping.
354 * Such programs are allowed to use helpers that may sleep like
355 * bpf_copy_from_user().
356 */
357 #define BPF_F_SLEEPABLE (1U << 4)
358
359 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have
360 * the following extensions:
361 *
362 * insn[0].src_reg: BPF_PSEUDO_MAP_FD
363 * insn[0].imm: map fd
364 * insn[1].imm: 0
365 * insn[0].off: 0
366 * insn[1].off: 0
367 * ldimm64 rewrite: address of map
368 * verifier type: CONST_PTR_TO_MAP
369 */
370 #define BPF_PSEUDO_MAP_FD 1
371 /* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE
372 * insn[0].imm: map fd
373 * insn[1].imm: offset into value
374 * insn[0].off: 0
375 * insn[1].off: 0
376 * ldimm64 rewrite: address of map[0]+offset
377 * verifier type: PTR_TO_MAP_VALUE
378 */
379 #define BPF_PSEUDO_MAP_VALUE 2
380 /* insn[0].src_reg: BPF_PSEUDO_BTF_ID
381 * insn[0].imm: kernel btd id of VAR
382 * insn[1].imm: 0
383 * insn[0].off: 0
384 * insn[1].off: 0
385 * ldimm64 rewrite: address of the kernel variable
386 * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
387 * is struct/union.
388 */
389 #define BPF_PSEUDO_BTF_ID 3
390
391 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
392 * offset to another bpf function
393 */
394 #define BPF_PSEUDO_CALL 1
395
396 /* flags for BPF_MAP_UPDATE_ELEM command */
397 enum {
398 BPF_ANY = 0, /* create new element or update existing */
399 BPF_NOEXIST = 1, /* create new element if it didn't exist */
400 BPF_EXIST = 2, /* update existing element */
401 BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
402 };
403
404 /* flags for BPF_MAP_CREATE command */
405 enum {
406 BPF_F_NO_PREALLOC = (1U << 0),
407 /* Instead of having one common LRU list in the
408 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
409 * which can scale and perform better.
410 * Note, the LRU nodes (including free nodes) cannot be moved
411 * across different LRU lists.
412 */
413 BPF_F_NO_COMMON_LRU = (1U << 1),
414 /* Specify numa node during map creation */
415 BPF_F_NUMA_NODE = (1U << 2),
416
417 /* Flags for accessing BPF object from syscall side. */
418 BPF_F_RDONLY = (1U << 3),
419 BPF_F_WRONLY = (1U << 4),
420
421 /* Flag for stack_map, store build_id+offset instead of pointer */
422 BPF_F_STACK_BUILD_ID = (1U << 5),
423
424 /* Zero-initialize hash function seed. This should only be used for testing. */
425 BPF_F_ZERO_SEED = (1U << 6),
426
427 /* Flags for accessing BPF object from program side. */
428 BPF_F_RDONLY_PROG = (1U << 7),
429 BPF_F_WRONLY_PROG = (1U << 8),
430
431 /* Clone map from listener for newly accepted socket */
432 BPF_F_CLONE = (1U << 9),
433
434 /* Enable memory-mapping BPF map */
435 BPF_F_MMAPABLE = (1U << 10),
436
437 /* Share perf_event among processes */
438 BPF_F_PRESERVE_ELEMS = (1U << 11),
439
440 /* Create a map that is suitable to be an inner map with dynamic max entries */
441 BPF_F_INNER_MAP = (1U << 12),
442 };
443
444 /* Flags for BPF_PROG_QUERY. */
445
446 /* Query effective (directly attached + inherited from ancestor cgroups)
447 * programs that will be executed for events within a cgroup.
448 * attach_flags with this flag are returned only for directly attached programs.
449 */
450 #define BPF_F_QUERY_EFFECTIVE (1U << 0)
451
452 /* Flags for BPF_PROG_TEST_RUN */
453
454 /* If set, run the test on the cpu specified by bpf_attr.test.cpu */
455 #define BPF_F_TEST_RUN_ON_CPU (1U << 0)
456
457 /* type for BPF_ENABLE_STATS */
458 enum bpf_stats_type {
459 /* enabled run_time_ns and run_cnt */
460 BPF_STATS_RUN_TIME = 0,
461 };
462
463 enum bpf_stack_build_id_status {
464 /* user space need an empty entry to identify end of a trace */
465 BPF_STACK_BUILD_ID_EMPTY = 0,
466 /* with valid build_id and offset */
467 BPF_STACK_BUILD_ID_VALID = 1,
468 /* couldn't get build_id, fallback to ip */
469 BPF_STACK_BUILD_ID_IP = 2,
470 };
471
472 #define BPF_BUILD_ID_SIZE 20
473 struct bpf_stack_build_id {
474 __s32 status;
475 unsigned char build_id[BPF_BUILD_ID_SIZE];
476 union {
477 __u64 offset;
478 __u64 ip;
479 };
480 };
481
482 #define BPF_OBJ_NAME_LEN 16U
483
484 union bpf_attr {
485 struct { /* anonymous struct used by BPF_MAP_CREATE command */
486 __u32 map_type; /* one of enum bpf_map_type */
487 __u32 key_size; /* size of key in bytes */
488 __u32 value_size; /* size of value in bytes */
489 __u32 max_entries; /* max number of entries in a map */
490 __u32 map_flags; /* BPF_MAP_CREATE related
491 * flags defined above.
492 */
493 __u32 inner_map_fd; /* fd pointing to the inner map */
494 __u32 numa_node; /* numa node (effective only if
495 * BPF_F_NUMA_NODE is set).
496 */
497 char map_name[BPF_OBJ_NAME_LEN];
498 __u32 map_ifindex; /* ifindex of netdev to create on */
499 __u32 btf_fd; /* fd pointing to a BTF type data */
500 __u32 btf_key_type_id; /* BTF type_id of the key */
501 __u32 btf_value_type_id; /* BTF type_id of the value */
502 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
503 * struct stored as the
504 * map value
505 */
506 };
507
508 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
509 __u32 map_fd;
510 __aligned_u64 key;
511 union {
512 __aligned_u64 value;
513 __aligned_u64 next_key;
514 };
515 __u64 flags;
516 };
517
518 struct { /* struct used by BPF_MAP_*_BATCH commands */
519 __aligned_u64 in_batch; /* start batch,
520 * NULL to start from beginning
521 */
522 __aligned_u64 out_batch; /* output: next start batch */
523 __aligned_u64 keys;
524 __aligned_u64 values;
525 __u32 count; /* input/output:
526 * input: # of key/value
527 * elements
528 * output: # of filled elements
529 */
530 __u32 map_fd;
531 __u64 elem_flags;
532 __u64 flags;
533 } batch;
534
535 struct { /* anonymous struct used by BPF_PROG_LOAD command */
536 __u32 prog_type; /* one of enum bpf_prog_type */
537 __u32 insn_cnt;
538 __aligned_u64 insns;
539 __aligned_u64 license;
540 __u32 log_level; /* verbosity level of verifier */
541 __u32 log_size; /* size of user buffer */
542 __aligned_u64 log_buf; /* user supplied buffer */
543 __u32 kern_version; /* not used */
544 __u32 prog_flags;
545 char prog_name[BPF_OBJ_NAME_LEN];
546 __u32 prog_ifindex; /* ifindex of netdev to prep for */
547 /* For some prog types expected attach type must be known at
548 * load time to verify attach type specific parts of prog
549 * (context accesses, allowed helpers, etc).
550 */
551 __u32 expected_attach_type;
552 __u32 prog_btf_fd; /* fd pointing to BTF type data */
553 __u32 func_info_rec_size; /* userspace bpf_func_info size */
554 __aligned_u64 func_info; /* func info */
555 __u32 func_info_cnt; /* number of bpf_func_info records */
556 __u32 line_info_rec_size; /* userspace bpf_line_info size */
557 __aligned_u64 line_info; /* line info */
558 __u32 line_info_cnt; /* number of bpf_line_info records */
559 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */
560 union {
561 /* valid prog_fd to attach to bpf prog */
562 __u32 attach_prog_fd;
563 /* or valid module BTF object fd or 0 to attach to vmlinux */
564 __u32 attach_btf_obj_fd;
565 };
566 };
567
568 struct { /* anonymous struct used by BPF_OBJ_* commands */
569 __aligned_u64 pathname;
570 __u32 bpf_fd;
571 __u32 file_flags;
572 };
573
574 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
575 __u32 target_fd; /* container object to attach to */
576 __u32 attach_bpf_fd; /* eBPF program to attach */
577 __u32 attach_type;
578 __u32 attach_flags;
579 __u32 replace_bpf_fd; /* previously attached eBPF
580 * program to replace if
581 * BPF_F_REPLACE is used
582 */
583 };
584
585 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
586 __u32 prog_fd;
587 __u32 retval;
588 __u32 data_size_in; /* input: len of data_in */
589 __u32 data_size_out; /* input/output: len of data_out
590 * returns ENOSPC if data_out
591 * is too small.
592 */
593 __aligned_u64 data_in;
594 __aligned_u64 data_out;
595 __u32 repeat;
596 __u32 duration;
597 __u32 ctx_size_in; /* input: len of ctx_in */
598 __u32 ctx_size_out; /* input/output: len of ctx_out
599 * returns ENOSPC if ctx_out
600 * is too small.
601 */
602 __aligned_u64 ctx_in;
603 __aligned_u64 ctx_out;
604 __u32 flags;
605 __u32 cpu;
606 } test;
607
608 struct { /* anonymous struct used by BPF_*_GET_*_ID */
609 union {
610 __u32 start_id;
611 __u32 prog_id;
612 __u32 map_id;
613 __u32 btf_id;
614 __u32 link_id;
615 };
616 __u32 next_id;
617 __u32 open_flags;
618 };
619
620 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
621 __u32 bpf_fd;
622 __u32 info_len;
623 __aligned_u64 info;
624 } info;
625
626 struct { /* anonymous struct used by BPF_PROG_QUERY command */
627 __u32 target_fd; /* container object to query */
628 __u32 attach_type;
629 __u32 query_flags;
630 __u32 attach_flags;
631 __aligned_u64 prog_ids;
632 __u32 prog_cnt;
633 } query;
634
635 struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
636 __u64 name;
637 __u32 prog_fd;
638 } raw_tracepoint;
639
640 struct { /* anonymous struct for BPF_BTF_LOAD */
641 __aligned_u64 btf;
642 __aligned_u64 btf_log_buf;
643 __u32 btf_size;
644 __u32 btf_log_size;
645 __u32 btf_log_level;
646 };
647
648 struct {
649 __u32 pid; /* input: pid */
650 __u32 fd; /* input: fd */
651 __u32 flags; /* input: flags */
652 __u32 buf_len; /* input/output: buf len */
653 __aligned_u64 buf; /* input/output:
654 * tp_name for tracepoint
655 * symbol for kprobe
656 * filename for uprobe
657 */
658 __u32 prog_id; /* output: prod_id */
659 __u32 fd_type; /* output: BPF_FD_TYPE_* */
660 __u64 probe_offset; /* output: probe_offset */
661 __u64 probe_addr; /* output: probe_addr */
662 } task_fd_query;
663
664 struct { /* struct used by BPF_LINK_CREATE command */
665 __u32 prog_fd; /* eBPF program to attach */
666 union {
667 __u32 target_fd; /* object to attach to */
668 __u32 target_ifindex; /* target ifindex */
669 };
670 __u32 attach_type; /* attach type */
671 __u32 flags; /* extra flags */
672 union {
673 __u32 target_btf_id; /* btf_id of target to attach to */
674 struct {
675 __aligned_u64 iter_info; /* extra bpf_iter_link_info */
676 __u32 iter_info_len; /* iter_info length */
677 };
678 };
679 } link_create;
680
681 struct { /* struct used by BPF_LINK_UPDATE command */
682 __u32 link_fd; /* link fd */
683 /* new program fd to update link with */
684 __u32 new_prog_fd;
685 __u32 flags; /* extra flags */
686 /* expected link's program fd; is specified only if
687 * BPF_F_REPLACE flag is set in flags */
688 __u32 old_prog_fd;
689 } link_update;
690
691 struct {
692 __u32 link_fd;
693 } link_detach;
694
695 struct { /* struct used by BPF_ENABLE_STATS command */
696 __u32 type;
697 } enable_stats;
698
699 struct { /* struct used by BPF_ITER_CREATE command */
700 __u32 link_fd;
701 __u32 flags;
702 } iter_create;
703
704 struct { /* struct used by BPF_PROG_BIND_MAP command */
705 __u32 prog_fd;
706 __u32 map_fd;
707 __u32 flags; /* extra flags */
708 } prog_bind_map;
709
710 } __attribute__((aligned(8)));
711
712 /* The description below is an attempt at providing documentation to eBPF
713 * developers about the multiple available eBPF helper functions. It can be
714 * parsed and used to produce a manual page. The workflow is the following,
715 * and requires the rst2man utility:
716 *
717 * $ ./scripts/bpf_helpers_doc.py \
718 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
719 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
720 * $ man /tmp/bpf-helpers.7
721 *
722 * Note that in order to produce this external documentation, some RST
723 * formatting is used in the descriptions to get "bold" and "italics" in
724 * manual pages. Also note that the few trailing white spaces are
725 * intentional, removing them would break paragraphs for rst2man.
726 *
727 * Start of BPF helper function descriptions:
728 *
729 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
730 * Description
731 * Perform a lookup in *map* for an entry associated to *key*.
732 * Return
733 * Map value associated to *key*, or **NULL** if no entry was
734 * found.
735 *
736 * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
737 * Description
738 * Add or update the value of the entry associated to *key* in
739 * *map* with *value*. *flags* is one of:
740 *
741 * **BPF_NOEXIST**
742 * The entry for *key* must not exist in the map.
743 * **BPF_EXIST**
744 * The entry for *key* must already exist in the map.
745 * **BPF_ANY**
746 * No condition on the existence of the entry for *key*.
747 *
748 * Flag value **BPF_NOEXIST** cannot be used for maps of types
749 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
750 * elements always exist), the helper would return an error.
751 * Return
752 * 0 on success, or a negative error in case of failure.
753 *
754 * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
755 * Description
756 * Delete entry with *key* from *map*.
757 * Return
758 * 0 on success, or a negative error in case of failure.
759 *
760 * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
761 * Description
762 * For tracing programs, safely attempt to read *size* bytes from
763 * kernel space address *unsafe_ptr* and store the data in *dst*.
764 *
765 * Generally, use **bpf_probe_read_user**\ () or
766 * **bpf_probe_read_kernel**\ () instead.
767 * Return
768 * 0 on success, or a negative error in case of failure.
769 *
770 * u64 bpf_ktime_get_ns(void)
771 * Description
772 * Return the time elapsed since system boot, in nanoseconds.
773 * Does not include time the system was suspended.
774 * See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
775 * Return
776 * Current *ktime*.
777 *
778 * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
779 * Description
780 * This helper is a "printk()-like" facility for debugging. It
781 * prints a message defined by format *fmt* (of size *fmt_size*)
782 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
783 * available. It can take up to three additional **u64**
784 * arguments (as an eBPF helpers, the total number of arguments is
785 * limited to five).
786 *
787 * Each time the helper is called, it appends a line to the trace.
788 * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
789 * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
790 * The format of the trace is customizable, and the exact output
791 * one will get depends on the options set in
792 * *\/sys/kernel/debug/tracing/trace_options* (see also the
793 * *README* file under the same directory). However, it usually
794 * defaults to something like:
795 *
796 * ::
797 *
798 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg>
799 *
800 * In the above:
801 *
802 * * ``telnet`` is the name of the current task.
803 * * ``470`` is the PID of the current task.
804 * * ``001`` is the CPU number on which the task is
805 * running.
806 * * In ``.N..``, each character refers to a set of
807 * options (whether irqs are enabled, scheduling
808 * options, whether hard/softirqs are running, level of
809 * preempt_disabled respectively). **N** means that
810 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
811 * are set.
812 * * ``419421.045894`` is a timestamp.
813 * * ``0x00000001`` is a fake value used by BPF for the
814 * instruction pointer register.
815 * * ``<formatted msg>`` is the message formatted with
816 * *fmt*.
817 *
818 * The conversion specifiers supported by *fmt* are similar, but
819 * more limited than for printk(). They are **%d**, **%i**,
820 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
821 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
822 * of field, padding with zeroes, etc.) is available, and the
823 * helper will return **-EINVAL** (but print nothing) if it
824 * encounters an unknown specifier.
825 *
826 * Also, note that **bpf_trace_printk**\ () is slow, and should
827 * only be used for debugging purposes. For this reason, a notice
828 * block (spanning several lines) is printed to kernel logs and
829 * states that the helper should not be used "for production use"
830 * the first time this helper is used (or more precisely, when
831 * **trace_printk**\ () buffers are allocated). For passing values
832 * to user space, perf events should be preferred.
833 * Return
834 * The number of bytes written to the buffer, or a negative error
835 * in case of failure.
836 *
837 * u32 bpf_get_prandom_u32(void)
838 * Description
839 * Get a pseudo-random number.
840 *
841 * From a security point of view, this helper uses its own
842 * pseudo-random internal state, and cannot be used to infer the
843 * seed of other random functions in the kernel. However, it is
844 * essential to note that the generator used by the helper is not
845 * cryptographically secure.
846 * Return
847 * A random 32-bit unsigned value.
848 *
849 * u32 bpf_get_smp_processor_id(void)
850 * Description
851 * Get the SMP (symmetric multiprocessing) processor id. Note that
852 * all programs run with preemption disabled, which means that the
853 * SMP processor id is stable during all the execution of the
854 * program.
855 * Return
856 * The SMP id of the processor running the program.
857 *
858 * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
859 * Description
860 * Store *len* bytes from address *from* into the packet
861 * associated to *skb*, at *offset*. *flags* are a combination of
862 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
863 * checksum for the packet after storing the bytes) and
864 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
865 * **->swhash** and *skb*\ **->l4hash** to 0).
866 *
867 * A call to this helper is susceptible to change the underlying
868 * packet buffer. Therefore, at load time, all checks on pointers
869 * previously done by the verifier are invalidated and must be
870 * performed again, if the helper is used in combination with
871 * direct packet access.
872 * Return
873 * 0 on success, or a negative error in case of failure.
874 *
875 * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
876 * Description
877 * Recompute the layer 3 (e.g. IP) checksum for the packet
878 * associated to *skb*. Computation is incremental, so the helper
879 * must know the former value of the header field that was
880 * modified (*from*), the new value of this field (*to*), and the
881 * number of bytes (2 or 4) for this field, stored in *size*.
882 * Alternatively, it is possible to store the difference between
883 * the previous and the new values of the header field in *to*, by
884 * setting *from* and *size* to 0. For both methods, *offset*
885 * indicates the location of the IP checksum within the packet.
886 *
887 * This helper works in combination with **bpf_csum_diff**\ (),
888 * which does not update the checksum in-place, but offers more
889 * flexibility and can handle sizes larger than 2 or 4 for the
890 * checksum to update.
891 *
892 * A call to this helper is susceptible to change the underlying
893 * packet buffer. Therefore, at load time, all checks on pointers
894 * previously done by the verifier are invalidated and must be
895 * performed again, if the helper is used in combination with
896 * direct packet access.
897 * Return
898 * 0 on success, or a negative error in case of failure.
899 *
900 * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
901 * Description
902 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
903 * packet associated to *skb*. Computation is incremental, so the
904 * helper must know the former value of the header field that was
905 * modified (*from*), the new value of this field (*to*), and the
906 * number of bytes (2 or 4) for this field, stored on the lowest
907 * four bits of *flags*. Alternatively, it is possible to store
908 * the difference between the previous and the new values of the
909 * header field in *to*, by setting *from* and the four lowest
910 * bits of *flags* to 0. For both methods, *offset* indicates the
911 * location of the IP checksum within the packet. In addition to
912 * the size of the field, *flags* can be added (bitwise OR) actual
913 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
914 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
915 * for updates resulting in a null checksum the value is set to
916 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
917 * the checksum is to be computed against a pseudo-header.
918 *
919 * This helper works in combination with **bpf_csum_diff**\ (),
920 * which does not update the checksum in-place, but offers more
921 * flexibility and can handle sizes larger than 2 or 4 for the
922 * checksum to update.
923 *
924 * A call to this helper is susceptible to change the underlying
925 * packet buffer. Therefore, at load time, all checks on pointers
926 * previously done by the verifier are invalidated and must be
927 * performed again, if the helper is used in combination with
928 * direct packet access.
929 * Return
930 * 0 on success, or a negative error in case of failure.
931 *
932 * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
933 * Description
934 * This special helper is used to trigger a "tail call", or in
935 * other words, to jump into another eBPF program. The same stack
936 * frame is used (but values on stack and in registers for the
937 * caller are not accessible to the callee). This mechanism allows
938 * for program chaining, either for raising the maximum number of
939 * available eBPF instructions, or to execute given programs in
940 * conditional blocks. For security reasons, there is an upper
941 * limit to the number of successive tail calls that can be
942 * performed.
943 *
944 * Upon call of this helper, the program attempts to jump into a
945 * program referenced at index *index* in *prog_array_map*, a
946 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
947 * *ctx*, a pointer to the context.
948 *
949 * If the call succeeds, the kernel immediately runs the first
950 * instruction of the new program. This is not a function call,
951 * and it never returns to the previous program. If the call
952 * fails, then the helper has no effect, and the caller continues
953 * to run its subsequent instructions. A call can fail if the
954 * destination program for the jump does not exist (i.e. *index*
955 * is superior to the number of entries in *prog_array_map*), or
956 * if the maximum number of tail calls has been reached for this
957 * chain of programs. This limit is defined in the kernel by the
958 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
959 * which is currently set to 32.
960 * Return
961 * 0 on success, or a negative error in case of failure.
962 *
963 * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
964 * Description
965 * Clone and redirect the packet associated to *skb* to another
966 * net device of index *ifindex*. Both ingress and egress
967 * interfaces can be used for redirection. The **BPF_F_INGRESS**
968 * value in *flags* is used to make the distinction (ingress path
969 * is selected if the flag is present, egress path otherwise).
970 * This is the only flag supported for now.
971 *
972 * In comparison with **bpf_redirect**\ () helper,
973 * **bpf_clone_redirect**\ () has the associated cost of
974 * duplicating the packet buffer, but this can be executed out of
975 * the eBPF program. Conversely, **bpf_redirect**\ () is more
976 * efficient, but it is handled through an action code where the
977 * redirection happens only after the eBPF program has returned.
978 *
979 * A call to this helper is susceptible to change the underlying
980 * packet buffer. Therefore, at load time, all checks on pointers
981 * previously done by the verifier are invalidated and must be
982 * performed again, if the helper is used in combination with
983 * direct packet access.
984 * Return
985 * 0 on success, or a negative error in case of failure.
986 *
987 * u64 bpf_get_current_pid_tgid(void)
988 * Return
989 * A 64-bit integer containing the current tgid and pid, and
990 * created as such:
991 * *current_task*\ **->tgid << 32 \|**
992 * *current_task*\ **->pid**.
993 *
994 * u64 bpf_get_current_uid_gid(void)
995 * Return
996 * A 64-bit integer containing the current GID and UID, and
997 * created as such: *current_gid* **<< 32 \|** *current_uid*.
998 *
999 * long bpf_get_current_comm(void *buf, u32 size_of_buf)
1000 * Description
1001 * Copy the **comm** attribute of the current task into *buf* of
1002 * *size_of_buf*. The **comm** attribute contains the name of
1003 * the executable (excluding the path) for the current task. The
1004 * *size_of_buf* must be strictly positive. On success, the
1005 * helper makes sure that the *buf* is NUL-terminated. On failure,
1006 * it is filled with zeroes.
1007 * Return
1008 * 0 on success, or a negative error in case of failure.
1009 *
1010 * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
1011 * Description
1012 * Retrieve the classid for the current task, i.e. for the net_cls
1013 * cgroup to which *skb* belongs.
1014 *
1015 * This helper can be used on TC egress path, but not on ingress.
1016 *
1017 * The net_cls cgroup provides an interface to tag network packets
1018 * based on a user-provided identifier for all traffic coming from
1019 * the tasks belonging to the related cgroup. See also the related
1020 * kernel documentation, available from the Linux sources in file
1021 * *Documentation/admin-guide/cgroup-v1/net_cls.rst*.
1022 *
1023 * The Linux kernel has two versions for cgroups: there are
1024 * cgroups v1 and cgroups v2. Both are available to users, who can
1025 * use a mixture of them, but note that the net_cls cgroup is for
1026 * cgroup v1 only. This makes it incompatible with BPF programs
1027 * run on cgroups, which is a cgroup-v2-only feature (a socket can
1028 * only hold data for one version of cgroups at a time).
1029 *
1030 * This helper is only available is the kernel was compiled with
1031 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
1032 * "**y**" or to "**m**".
1033 * Return
1034 * The classid, or 0 for the default unconfigured classid.
1035 *
1036 * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
1037 * Description
1038 * Push a *vlan_tci* (VLAN tag control information) of protocol
1039 * *vlan_proto* to the packet associated to *skb*, then update
1040 * the checksum. Note that if *vlan_proto* is different from
1041 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
1042 * be **ETH_P_8021Q**.
1043 *
1044 * A call to this helper is susceptible to change the underlying
1045 * packet buffer. Therefore, at load time, all checks on pointers
1046 * previously done by the verifier are invalidated and must be
1047 * performed again, if the helper is used in combination with
1048 * direct packet access.
1049 * Return
1050 * 0 on success, or a negative error in case of failure.
1051 *
1052 * long bpf_skb_vlan_pop(struct sk_buff *skb)
1053 * Description
1054 * Pop a VLAN header from the packet associated to *skb*.
1055 *
1056 * A call to this helper is susceptible to change the underlying
1057 * packet buffer. Therefore, at load time, all checks on pointers
1058 * previously done by the verifier are invalidated and must be
1059 * performed again, if the helper is used in combination with
1060 * direct packet access.
1061 * Return
1062 * 0 on success, or a negative error in case of failure.
1063 *
1064 * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
1065 * Description
1066 * Get tunnel metadata. This helper takes a pointer *key* to an
1067 * empty **struct bpf_tunnel_key** of **size**, that will be
1068 * filled with tunnel metadata for the packet associated to *skb*.
1069 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
1070 * indicates that the tunnel is based on IPv6 protocol instead of
1071 * IPv4.
1072 *
1073 * The **struct bpf_tunnel_key** is an object that generalizes the
1074 * principal parameters used by various tunneling protocols into a
1075 * single struct. This way, it can be used to easily make a
1076 * decision based on the contents of the encapsulation header,
1077 * "summarized" in this struct. In particular, it holds the IP
1078 * address of the remote end (IPv4 or IPv6, depending on the case)
1079 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
1080 * this struct exposes the *key*\ **->tunnel_id**, which is
1081 * generally mapped to a VNI (Virtual Network Identifier), making
1082 * it programmable together with the **bpf_skb_set_tunnel_key**\
1083 * () helper.
1084 *
1085 * Let's imagine that the following code is part of a program
1086 * attached to the TC ingress interface, on one end of a GRE
1087 * tunnel, and is supposed to filter out all messages coming from
1088 * remote ends with IPv4 address other than 10.0.0.1:
1089 *
1090 * ::
1091 *
1092 * int ret;
1093 * struct bpf_tunnel_key key = {};
1094 *
1095 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
1096 * if (ret < 0)
1097 * return TC_ACT_SHOT; // drop packet
1098 *
1099 * if (key.remote_ipv4 != 0x0a000001)
1100 * return TC_ACT_SHOT; // drop packet
1101 *
1102 * return TC_ACT_OK; // accept packet
1103 *
1104 * This interface can also be used with all encapsulation devices
1105 * that can operate in "collect metadata" mode: instead of having
1106 * one network device per specific configuration, the "collect
1107 * metadata" mode only requires a single device where the
1108 * configuration can be extracted from this helper.
1109 *
1110 * This can be used together with various tunnels such as VXLan,
1111 * Geneve, GRE or IP in IP (IPIP).
1112 * Return
1113 * 0 on success, or a negative error in case of failure.
1114 *
1115 * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
1116 * Description
1117 * Populate tunnel metadata for packet associated to *skb.* The
1118 * tunnel metadata is set to the contents of *key*, of *size*. The
1119 * *flags* can be set to a combination of the following values:
1120 *
1121 * **BPF_F_TUNINFO_IPV6**
1122 * Indicate that the tunnel is based on IPv6 protocol
1123 * instead of IPv4.
1124 * **BPF_F_ZERO_CSUM_TX**
1125 * For IPv4 packets, add a flag to tunnel metadata
1126 * indicating that checksum computation should be skipped
1127 * and checksum set to zeroes.
1128 * **BPF_F_DONT_FRAGMENT**
1129 * Add a flag to tunnel metadata indicating that the
1130 * packet should not be fragmented.
1131 * **BPF_F_SEQ_NUMBER**
1132 * Add a flag to tunnel metadata indicating that a
1133 * sequence number should be added to tunnel header before
1134 * sending the packet. This flag was added for GRE
1135 * encapsulation, but might be used with other protocols
1136 * as well in the future.
1137 *
1138 * Here is a typical usage on the transmit path:
1139 *
1140 * ::
1141 *
1142 * struct bpf_tunnel_key key;
1143 * populate key ...
1144 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
1145 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
1146 *
1147 * See also the description of the **bpf_skb_get_tunnel_key**\ ()
1148 * helper for additional information.
1149 * Return
1150 * 0 on success, or a negative error in case of failure.
1151 *
1152 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
1153 * Description
1154 * Read the value of a perf event counter. This helper relies on a
1155 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
1156 * the perf event counter is selected when *map* is updated with
1157 * perf event file descriptors. The *map* is an array whose size
1158 * is the number of available CPUs, and each cell contains a value
1159 * relative to one CPU. The value to retrieve is indicated by
1160 * *flags*, that contains the index of the CPU to look up, masked
1161 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1162 * **BPF_F_CURRENT_CPU** to indicate that the value for the
1163 * current CPU should be retrieved.
1164 *
1165 * Note that before Linux 4.13, only hardware perf event can be
1166 * retrieved.
1167 *
1168 * Also, be aware that the newer helper
1169 * **bpf_perf_event_read_value**\ () is recommended over
1170 * **bpf_perf_event_read**\ () in general. The latter has some ABI
1171 * quirks where error and counter value are used as a return code
1172 * (which is wrong to do since ranges may overlap). This issue is
1173 * fixed with **bpf_perf_event_read_value**\ (), which at the same
1174 * time provides more features over the **bpf_perf_event_read**\
1175 * () interface. Please refer to the description of
1176 * **bpf_perf_event_read_value**\ () for details.
1177 * Return
1178 * The value of the perf event counter read from the map, or a
1179 * negative error code in case of failure.
1180 *
1181 * long bpf_redirect(u32 ifindex, u64 flags)
1182 * Description
1183 * Redirect the packet to another net device of index *ifindex*.
1184 * This helper is somewhat similar to **bpf_clone_redirect**\
1185 * (), except that the packet is not cloned, which provides
1186 * increased performance.
1187 *
1188 * Except for XDP, both ingress and egress interfaces can be used
1189 * for redirection. The **BPF_F_INGRESS** value in *flags* is used
1190 * to make the distinction (ingress path is selected if the flag
1191 * is present, egress path otherwise). Currently, XDP only
1192 * supports redirection to the egress interface, and accepts no
1193 * flag at all.
1194 *
1195 * The same effect can also be attained with the more generic
1196 * **bpf_redirect_map**\ (), which uses a BPF map to store the
1197 * redirect target instead of providing it directly to the helper.
1198 * Return
1199 * For XDP, the helper returns **XDP_REDIRECT** on success or
1200 * **XDP_ABORTED** on error. For other program types, the values
1201 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
1202 * error.
1203 *
1204 * u32 bpf_get_route_realm(struct sk_buff *skb)
1205 * Description
1206 * Retrieve the realm or the route, that is to say the
1207 * **tclassid** field of the destination for the *skb*. The
1208 * identifier retrieved is a user-provided tag, similar to the
1209 * one used with the net_cls cgroup (see description for
1210 * **bpf_get_cgroup_classid**\ () helper), but here this tag is
1211 * held by a route (a destination entry), not by a task.
1212 *
1213 * Retrieving this identifier works with the clsact TC egress hook
1214 * (see also **tc-bpf(8)**), or alternatively on conventional
1215 * classful egress qdiscs, but not on TC ingress path. In case of
1216 * clsact TC egress hook, this has the advantage that, internally,
1217 * the destination entry has not been dropped yet in the transmit
1218 * path. Therefore, the destination entry does not need to be
1219 * artificially held via **netif_keep_dst**\ () for a classful
1220 * qdisc until the *skb* is freed.
1221 *
1222 * This helper is available only if the kernel was compiled with
1223 * **CONFIG_IP_ROUTE_CLASSID** configuration option.
1224 * Return
1225 * The realm of the route for the packet associated to *skb*, or 0
1226 * if none was found.
1227 *
1228 * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
1229 * Description
1230 * Write raw *data* blob into a special BPF perf event held by
1231 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
1232 * event must have the following attributes: **PERF_SAMPLE_RAW**
1233 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
1234 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
1235 *
1236 * The *flags* are used to indicate the index in *map* for which
1237 * the value must be put, masked with **BPF_F_INDEX_MASK**.
1238 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
1239 * to indicate that the index of the current CPU core should be
1240 * used.
1241 *
1242 * The value to write, of *size*, is passed through eBPF stack and
1243 * pointed by *data*.
1244 *
1245 * The context of the program *ctx* needs also be passed to the
1246 * helper.
1247 *
1248 * On user space, a program willing to read the values needs to
1249 * call **perf_event_open**\ () on the perf event (either for
1250 * one or for all CPUs) and to store the file descriptor into the
1251 * *map*. This must be done before the eBPF program can send data
1252 * into it. An example is available in file
1253 * *samples/bpf/trace_output_user.c* in the Linux kernel source
1254 * tree (the eBPF program counterpart is in
1255 * *samples/bpf/trace_output_kern.c*).
1256 *
1257 * **bpf_perf_event_output**\ () achieves better performance
1258 * than **bpf_trace_printk**\ () for sharing data with user
1259 * space, and is much better suitable for streaming data from eBPF
1260 * programs.
1261 *
1262 * Note that this helper is not restricted to tracing use cases
1263 * and can be used with programs attached to TC or XDP as well,
1264 * where it allows for passing data to user space listeners. Data
1265 * can be:
1266 *
1267 * * Only custom structs,
1268 * * Only the packet payload, or
1269 * * A combination of both.
1270 * Return
1271 * 0 on success, or a negative error in case of failure.
1272 *
1273 * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
1274 * Description
1275 * This helper was provided as an easy way to load data from a
1276 * packet. It can be used to load *len* bytes from *offset* from
1277 * the packet associated to *skb*, into the buffer pointed by
1278 * *to*.
1279 *
1280 * Since Linux 4.7, usage of this helper has mostly been replaced
1281 * by "direct packet access", enabling packet data to be
1282 * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
1283 * pointing respectively to the first byte of packet data and to
1284 * the byte after the last byte of packet data. However, it
1285 * remains useful if one wishes to read large quantities of data
1286 * at once from a packet into the eBPF stack.
1287 * Return
1288 * 0 on success, or a negative error in case of failure.
1289 *
1290 * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
1291 * Description
1292 * Walk a user or a kernel stack and return its id. To achieve
1293 * this, the helper needs *ctx*, which is a pointer to the context
1294 * on which the tracing program is executed, and a pointer to a
1295 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
1296 *
1297 * The last argument, *flags*, holds the number of stack frames to
1298 * skip (from 0 to 255), masked with
1299 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1300 * a combination of the following flags:
1301 *
1302 * **BPF_F_USER_STACK**
1303 * Collect a user space stack instead of a kernel stack.
1304 * **BPF_F_FAST_STACK_CMP**
1305 * Compare stacks by hash only.
1306 * **BPF_F_REUSE_STACKID**
1307 * If two different stacks hash into the same *stackid*,
1308 * discard the old one.
1309 *
1310 * The stack id retrieved is a 32 bit long integer handle which
1311 * can be further combined with other data (including other stack
1312 * ids) and used as a key into maps. This can be useful for
1313 * generating a variety of graphs (such as flame graphs or off-cpu
1314 * graphs).
1315 *
1316 * For walking a stack, this helper is an improvement over
1317 * **bpf_probe_read**\ (), which can be used with unrolled loops
1318 * but is not efficient and consumes a lot of eBPF instructions.
1319 * Instead, **bpf_get_stackid**\ () can collect up to
1320 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
1321 * this limit can be controlled with the **sysctl** program, and
1322 * that it should be manually increased in order to profile long
1323 * user stacks (such as stacks for Java programs). To do so, use:
1324 *
1325 * ::
1326 *
1327 * # sysctl kernel.perf_event_max_stack=<new value>
1328 * Return
1329 * The positive or null stack id on success, or a negative error
1330 * in case of failure.
1331 *
1332 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
1333 * Description
1334 * Compute a checksum difference, from the raw buffer pointed by
1335 * *from*, of length *from_size* (that must be a multiple of 4),
1336 * towards the raw buffer pointed by *to*, of size *to_size*
1337 * (same remark). An optional *seed* can be added to the value
1338 * (this can be cascaded, the seed may come from a previous call
1339 * to the helper).
1340 *
1341 * This is flexible enough to be used in several ways:
1342 *
1343 * * With *from_size* == 0, *to_size* > 0 and *seed* set to
1344 * checksum, it can be used when pushing new data.
1345 * * With *from_size* > 0, *to_size* == 0 and *seed* set to
1346 * checksum, it can be used when removing data from a packet.
1347 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
1348 * can be used to compute a diff. Note that *from_size* and
1349 * *to_size* do not need to be equal.
1350 *
1351 * This helper can be used in combination with
1352 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
1353 * which one can feed in the difference computed with
1354 * **bpf_csum_diff**\ ().
1355 * Return
1356 * The checksum result, or a negative error code in case of
1357 * failure.
1358 *
1359 * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
1360 * Description
1361 * Retrieve tunnel options metadata for the packet associated to
1362 * *skb*, and store the raw tunnel option data to the buffer *opt*
1363 * of *size*.
1364 *
1365 * This helper can be used with encapsulation devices that can
1366 * operate in "collect metadata" mode (please refer to the related
1367 * note in the description of **bpf_skb_get_tunnel_key**\ () for
1368 * more details). A particular example where this can be used is
1369 * in combination with the Geneve encapsulation protocol, where it
1370 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
1371 * and retrieving arbitrary TLVs (Type-Length-Value headers) from
1372 * the eBPF program. This allows for full customization of these
1373 * headers.
1374 * Return
1375 * The size of the option data retrieved.
1376 *
1377 * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
1378 * Description
1379 * Set tunnel options metadata for the packet associated to *skb*
1380 * to the option data contained in the raw buffer *opt* of *size*.
1381 *
1382 * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
1383 * helper for additional information.
1384 * Return
1385 * 0 on success, or a negative error in case of failure.
1386 *
1387 * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
1388 * Description
1389 * Change the protocol of the *skb* to *proto*. Currently
1390 * supported are transition from IPv4 to IPv6, and from IPv6 to
1391 * IPv4. The helper takes care of the groundwork for the
1392 * transition, including resizing the socket buffer. The eBPF
1393 * program is expected to fill the new headers, if any, via
1394 * **skb_store_bytes**\ () and to recompute the checksums with
1395 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
1396 * (). The main case for this helper is to perform NAT64
1397 * operations out of an eBPF program.
1398 *
1399 * Internally, the GSO type is marked as dodgy so that headers are
1400 * checked and segments are recalculated by the GSO/GRO engine.
1401 * The size for GSO target is adapted as well.
1402 *
1403 * All values for *flags* are reserved for future usage, and must
1404 * be left at zero.
1405 *
1406 * A call to this helper is susceptible to change the underlying
1407 * packet buffer. Therefore, at load time, all checks on pointers
1408 * previously done by the verifier are invalidated and must be
1409 * performed again, if the helper is used in combination with
1410 * direct packet access.
1411 * Return
1412 * 0 on success, or a negative error in case of failure.
1413 *
1414 * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
1415 * Description
1416 * Change the packet type for the packet associated to *skb*. This
1417 * comes down to setting *skb*\ **->pkt_type** to *type*, except
1418 * the eBPF program does not have a write access to *skb*\
1419 * **->pkt_type** beside this helper. Using a helper here allows
1420 * for graceful handling of errors.
1421 *
1422 * The major use case is to change incoming *skb*s to
1423 * **PACKET_HOST** in a programmatic way instead of having to
1424 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
1425 * example.
1426 *
1427 * Note that *type* only allows certain values. At this time, they
1428 * are:
1429 *
1430 * **PACKET_HOST**
1431 * Packet is for us.
1432 * **PACKET_BROADCAST**
1433 * Send packet to all.
1434 * **PACKET_MULTICAST**
1435 * Send packet to group.
1436 * **PACKET_OTHERHOST**
1437 * Send packet to someone else.
1438 * Return
1439 * 0 on success, or a negative error in case of failure.
1440 *
1441 * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
1442 * Description
1443 * Check whether *skb* is a descendant of the cgroup2 held by
1444 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1445 * Return
1446 * The return value depends on the result of the test, and can be:
1447 *
1448 * * 0, if the *skb* failed the cgroup2 descendant test.
1449 * * 1, if the *skb* succeeded the cgroup2 descendant test.
1450 * * A negative error code, if an error occurred.
1451 *
1452 * u32 bpf_get_hash_recalc(struct sk_buff *skb)
1453 * Description
1454 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
1455 * not set, in particular if the hash was cleared due to mangling,
1456 * recompute this hash. Later accesses to the hash can be done
1457 * directly with *skb*\ **->hash**.
1458 *
1459 * Calling **bpf_set_hash_invalid**\ (), changing a packet
1460 * prototype with **bpf_skb_change_proto**\ (), or calling
1461 * **bpf_skb_store_bytes**\ () with the
1462 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
1463 * the hash and to trigger a new computation for the next call to
1464 * **bpf_get_hash_recalc**\ ().
1465 * Return
1466 * The 32-bit hash.
1467 *
1468 * u64 bpf_get_current_task(void)
1469 * Return
1470 * A pointer to the current task struct.
1471 *
1472 * long bpf_probe_write_user(void *dst, const void *src, u32 len)
1473 * Description
1474 * Attempt in a safe way to write *len* bytes from the buffer
1475 * *src* to *dst* in memory. It only works for threads that are in
1476 * user context, and *dst* must be a valid user space address.
1477 *
1478 * This helper should not be used to implement any kind of
1479 * security mechanism because of TOC-TOU attacks, but rather to
1480 * debug, divert, and manipulate execution of semi-cooperative
1481 * processes.
1482 *
1483 * Keep in mind that this feature is meant for experiments, and it
1484 * has a risk of crashing the system and running programs.
1485 * Therefore, when an eBPF program using this helper is attached,
1486 * a warning including PID and process name is printed to kernel
1487 * logs.
1488 * Return
1489 * 0 on success, or a negative error in case of failure.
1490 *
1491 * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
1492 * Description
1493 * Check whether the probe is being run is the context of a given
1494 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
1495 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1496 * Return
1497 * The return value depends on the result of the test, and can be:
1498 *
1499 * * 0, if current task belongs to the cgroup2.
1500 * * 1, if current task does not belong to the cgroup2.
1501 * * A negative error code, if an error occurred.
1502 *
1503 * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
1504 * Description
1505 * Resize (trim or grow) the packet associated to *skb* to the
1506 * new *len*. The *flags* are reserved for future usage, and must
1507 * be left at zero.
1508 *
1509 * The basic idea is that the helper performs the needed work to
1510 * change the size of the packet, then the eBPF program rewrites
1511 * the rest via helpers like **bpf_skb_store_bytes**\ (),
1512 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
1513 * and others. This helper is a slow path utility intended for
1514 * replies with control messages. And because it is targeted for
1515 * slow path, the helper itself can afford to be slow: it
1516 * implicitly linearizes, unclones and drops offloads from the
1517 * *skb*.
1518 *
1519 * A call to this helper is susceptible to change the underlying
1520 * packet buffer. Therefore, at load time, all checks on pointers
1521 * previously done by the verifier are invalidated and must be
1522 * performed again, if the helper is used in combination with
1523 * direct packet access.
1524 * Return
1525 * 0 on success, or a negative error in case of failure.
1526 *
1527 * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
1528 * Description
1529 * Pull in non-linear data in case the *skb* is non-linear and not
1530 * all of *len* are part of the linear section. Make *len* bytes
1531 * from *skb* readable and writable. If a zero value is passed for
1532 * *len*, then the whole length of the *skb* is pulled.
1533 *
1534 * This helper is only needed for reading and writing with direct
1535 * packet access.
1536 *
1537 * For direct packet access, testing that offsets to access
1538 * are within packet boundaries (test on *skb*\ **->data_end**) is
1539 * susceptible to fail if offsets are invalid, or if the requested
1540 * data is in non-linear parts of the *skb*. On failure the
1541 * program can just bail out, or in the case of a non-linear
1542 * buffer, use a helper to make the data available. The
1543 * **bpf_skb_load_bytes**\ () helper is a first solution to access
1544 * the data. Another one consists in using **bpf_skb_pull_data**
1545 * to pull in once the non-linear parts, then retesting and
1546 * eventually access the data.
1547 *
1548 * At the same time, this also makes sure the *skb* is uncloned,
1549 * which is a necessary condition for direct write. As this needs
1550 * to be an invariant for the write part only, the verifier
1551 * detects writes and adds a prologue that is calling
1552 * **bpf_skb_pull_data()** to effectively unclone the *skb* from
1553 * the very beginning in case it is indeed cloned.
1554 *
1555 * A call to this helper is susceptible to change the underlying
1556 * packet buffer. Therefore, at load time, all checks on pointers
1557 * previously done by the verifier are invalidated and must be
1558 * performed again, if the helper is used in combination with
1559 * direct packet access.
1560 * Return
1561 * 0 on success, or a negative error in case of failure.
1562 *
1563 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
1564 * Description
1565 * Add the checksum *csum* into *skb*\ **->csum** in case the
1566 * driver has supplied a checksum for the entire packet into that
1567 * field. Return an error otherwise. This helper is intended to be
1568 * used in combination with **bpf_csum_diff**\ (), in particular
1569 * when the checksum needs to be updated after data has been
1570 * written into the packet through direct packet access.
1571 * Return
1572 * The checksum on success, or a negative error code in case of
1573 * failure.
1574 *
1575 * void bpf_set_hash_invalid(struct sk_buff *skb)
1576 * Description
1577 * Invalidate the current *skb*\ **->hash**. It can be used after
1578 * mangling on headers through direct packet access, in order to
1579 * indicate that the hash is outdated and to trigger a
1580 * recalculation the next time the kernel tries to access this
1581 * hash or when the **bpf_get_hash_recalc**\ () helper is called.
1582 *
1583 * long bpf_get_numa_node_id(void)
1584 * Description
1585 * Return the id of the current NUMA node. The primary use case
1586 * for this helper is the selection of sockets for the local NUMA
1587 * node, when the program is attached to sockets using the
1588 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
1589 * but the helper is also available to other eBPF program types,
1590 * similarly to **bpf_get_smp_processor_id**\ ().
1591 * Return
1592 * The id of current NUMA node.
1593 *
1594 * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
1595 * Description
1596 * Grows headroom of packet associated to *skb* and adjusts the
1597 * offset of the MAC header accordingly, adding *len* bytes of
1598 * space. It automatically extends and reallocates memory as
1599 * required.
1600 *
1601 * This helper can be used on a layer 3 *skb* to push a MAC header
1602 * for redirection into a layer 2 device.
1603 *
1604 * All values for *flags* are reserved for future usage, and must
1605 * be left at zero.
1606 *
1607 * A call to this helper is susceptible to change the underlying
1608 * packet buffer. Therefore, at load time, all checks on pointers
1609 * previously done by the verifier are invalidated and must be
1610 * performed again, if the helper is used in combination with
1611 * direct packet access.
1612 * Return
1613 * 0 on success, or a negative error in case of failure.
1614 *
1615 * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
1616 * Description
1617 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
1618 * it is possible to use a negative value for *delta*. This helper
1619 * can be used to prepare the packet for pushing or popping
1620 * headers.
1621 *
1622 * A call to this helper is susceptible to change the underlying
1623 * packet buffer. Therefore, at load time, all checks on pointers
1624 * previously done by the verifier are invalidated and must be
1625 * performed again, if the helper is used in combination with
1626 * direct packet access.
1627 * Return
1628 * 0 on success, or a negative error in case of failure.
1629 *
1630 * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
1631 * Description
1632 * Copy a NUL terminated string from an unsafe kernel address
1633 * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
1634 * more details.
1635 *
1636 * Generally, use **bpf_probe_read_user_str**\ () or
1637 * **bpf_probe_read_kernel_str**\ () instead.
1638 * Return
1639 * On success, the strictly positive length of the string,
1640 * including the trailing NUL character. On error, a negative
1641 * value.
1642 *
1643 * u64 bpf_get_socket_cookie(struct sk_buff *skb)
1644 * Description
1645 * If the **struct sk_buff** pointed by *skb* has a known socket,
1646 * retrieve the cookie (generated by the kernel) of this socket.
1647 * If no cookie has been set yet, generate a new cookie. Once
1648 * generated, the socket cookie remains stable for the life of the
1649 * socket. This helper can be useful for monitoring per socket
1650 * networking traffic statistics as it provides a global socket
1651 * identifier that can be assumed unique.
1652 * Return
1653 * A 8-byte long non-decreasing number on success, or 0 if the
1654 * socket field is missing inside *skb*.
1655 *
1656 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
1657 * Description
1658 * Equivalent to bpf_get_socket_cookie() helper that accepts
1659 * *skb*, but gets socket from **struct bpf_sock_addr** context.
1660 * Return
1661 * A 8-byte long non-decreasing number.
1662 *
1663 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
1664 * Description
1665 * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
1666 * *skb*, but gets socket from **struct bpf_sock_ops** context.
1667 * Return
1668 * A 8-byte long non-decreasing number.
1669 *
1670 * u32 bpf_get_socket_uid(struct sk_buff *skb)
1671 * Return
1672 * The owner UID of the socket associated to *skb*. If the socket
1673 * is **NULL**, or if it is not a full socket (i.e. if it is a
1674 * time-wait or a request socket instead), **overflowuid** value
1675 * is returned (note that **overflowuid** might also be the actual
1676 * UID value for the socket).
1677 *
1678 * long bpf_set_hash(struct sk_buff *skb, u32 hash)
1679 * Description
1680 * Set the full hash for *skb* (set the field *skb*\ **->hash**)
1681 * to value *hash*.
1682 * Return
1683 * 0
1684 *
1685 * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
1686 * Description
1687 * Emulate a call to **setsockopt()** on the socket associated to
1688 * *bpf_socket*, which must be a full socket. The *level* at
1689 * which the option resides and the name *optname* of the option
1690 * must be specified, see **setsockopt(2)** for more information.
1691 * The option value of length *optlen* is pointed by *optval*.
1692 *
1693 * *bpf_socket* should be one of the following:
1694 *
1695 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
1696 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
1697 * and **BPF_CGROUP_INET6_CONNECT**.
1698 *
1699 * This helper actually implements a subset of **setsockopt()**.
1700 * It supports the following *level*\ s:
1701 *
1702 * * **SOL_SOCKET**, which supports the following *optname*\ s:
1703 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
1704 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
1705 * **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
1706 * * **IPPROTO_TCP**, which supports the following *optname*\ s:
1707 * **TCP_CONGESTION**, **TCP_BPF_IW**,
1708 * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
1709 * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
1710 * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**.
1711 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1712 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1713 * Return
1714 * 0 on success, or a negative error in case of failure.
1715 *
1716 * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
1717 * Description
1718 * Grow or shrink the room for data in the packet associated to
1719 * *skb* by *len_diff*, and according to the selected *mode*.
1720 *
1721 * By default, the helper will reset any offloaded checksum
1722 * indicator of the skb to CHECKSUM_NONE. This can be avoided
1723 * by the following flag:
1724 *
1725 * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
1726 * checksum data of the skb to CHECKSUM_NONE.
1727 *
1728 * There are two supported modes at this time:
1729 *
1730 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
1731 * (room space is added or removed below the layer 2 header).
1732 *
1733 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
1734 * (room space is added or removed below the layer 3 header).
1735 *
1736 * The following flags are supported at this time:
1737 *
1738 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
1739 * Adjusting mss in this way is not allowed for datagrams.
1740 *
1741 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
1742 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
1743 * Any new space is reserved to hold a tunnel header.
1744 * Configure skb offsets and other fields accordingly.
1745 *
1746 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
1747 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
1748 * Use with ENCAP_L3 flags to further specify the tunnel type.
1749 *
1750 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
1751 * Use with ENCAP_L3/L4 flags to further specify the tunnel
1752 * type; *len* is the length of the inner MAC header.
1753 *
1754 * A call to this helper is susceptible to change the underlying
1755 * packet buffer. Therefore, at load time, all checks on pointers
1756 * previously done by the verifier are invalidated and must be
1757 * performed again, if the helper is used in combination with
1758 * direct packet access.
1759 * Return
1760 * 0 on success, or a negative error in case of failure.
1761 *
1762 * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1763 * Description
1764 * Redirect the packet to the endpoint referenced by *map* at
1765 * index *key*. Depending on its type, this *map* can contain
1766 * references to net devices (for forwarding packets through other
1767 * ports), or to CPUs (for redirecting XDP frames to another CPU;
1768 * but this is only implemented for native XDP (with driver
1769 * support) as of this writing).
1770 *
1771 * The lower two bits of *flags* are used as the return code if
1772 * the map lookup fails. This is so that the return value can be
1773 * one of the XDP program return codes up to **XDP_TX**, as chosen
1774 * by the caller. Any higher bits in the *flags* argument must be
1775 * unset.
1776 *
1777 * See also **bpf_redirect**\ (), which only supports redirecting
1778 * to an ifindex, but doesn't require a map to do so.
1779 * Return
1780 * **XDP_REDIRECT** on success, or the value of the two lower bits
1781 * of the *flags* argument on error.
1782 *
1783 * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
1784 * Description
1785 * Redirect the packet to the socket referenced by *map* (of type
1786 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1787 * egress interfaces can be used for redirection. The
1788 * **BPF_F_INGRESS** value in *flags* is used to make the
1789 * distinction (ingress path is selected if the flag is present,
1790 * egress path otherwise). This is the only flag supported for now.
1791 * Return
1792 * **SK_PASS** on success, or **SK_DROP** on error.
1793 *
1794 * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
1795 * Description
1796 * Add an entry to, or update a *map* referencing sockets. The
1797 * *skops* is used as a new value for the entry associated to
1798 * *key*. *flags* is one of:
1799 *
1800 * **BPF_NOEXIST**
1801 * The entry for *key* must not exist in the map.
1802 * **BPF_EXIST**
1803 * The entry for *key* must already exist in the map.
1804 * **BPF_ANY**
1805 * No condition on the existence of the entry for *key*.
1806 *
1807 * If the *map* has eBPF programs (parser and verdict), those will
1808 * be inherited by the socket being added. If the socket is
1809 * already attached to eBPF programs, this results in an error.
1810 * Return
1811 * 0 on success, or a negative error in case of failure.
1812 *
1813 * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
1814 * Description
1815 * Adjust the address pointed by *xdp_md*\ **->data_meta** by
1816 * *delta* (which can be positive or negative). Note that this
1817 * operation modifies the address stored in *xdp_md*\ **->data**,
1818 * so the latter must be loaded only after the helper has been
1819 * called.
1820 *
1821 * The use of *xdp_md*\ **->data_meta** is optional and programs
1822 * are not required to use it. The rationale is that when the
1823 * packet is processed with XDP (e.g. as DoS filter), it is
1824 * possible to push further meta data along with it before passing
1825 * to the stack, and to give the guarantee that an ingress eBPF
1826 * program attached as a TC classifier on the same device can pick
1827 * this up for further post-processing. Since TC works with socket
1828 * buffers, it remains possible to set from XDP the **mark** or
1829 * **priority** pointers, or other pointers for the socket buffer.
1830 * Having this scratch space generic and programmable allows for
1831 * more flexibility as the user is free to store whatever meta
1832 * data they need.
1833 *
1834 * A call to this helper is susceptible to change the underlying
1835 * packet buffer. Therefore, at load time, all checks on pointers
1836 * previously done by the verifier are invalidated and must be
1837 * performed again, if the helper is used in combination with
1838 * direct packet access.
1839 * Return
1840 * 0 on success, or a negative error in case of failure.
1841 *
1842 * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
1843 * Description
1844 * Read the value of a perf event counter, and store it into *buf*
1845 * of size *buf_size*. This helper relies on a *map* of type
1846 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
1847 * counter is selected when *map* is updated with perf event file
1848 * descriptors. The *map* is an array whose size is the number of
1849 * available CPUs, and each cell contains a value relative to one
1850 * CPU. The value to retrieve is indicated by *flags*, that
1851 * contains the index of the CPU to look up, masked with
1852 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1853 * **BPF_F_CURRENT_CPU** to indicate that the value for the
1854 * current CPU should be retrieved.
1855 *
1856 * This helper behaves in a way close to
1857 * **bpf_perf_event_read**\ () helper, save that instead of
1858 * just returning the value observed, it fills the *buf*
1859 * structure. This allows for additional data to be retrieved: in
1860 * particular, the enabled and running times (in *buf*\
1861 * **->enabled** and *buf*\ **->running**, respectively) are
1862 * copied. In general, **bpf_perf_event_read_value**\ () is
1863 * recommended over **bpf_perf_event_read**\ (), which has some
1864 * ABI issues and provides fewer functionalities.
1865 *
1866 * These values are interesting, because hardware PMU (Performance
1867 * Monitoring Unit) counters are limited resources. When there are
1868 * more PMU based perf events opened than available counters,
1869 * kernel will multiplex these events so each event gets certain
1870 * percentage (but not all) of the PMU time. In case that
1871 * multiplexing happens, the number of samples or counter value
1872 * will not reflect the case compared to when no multiplexing
1873 * occurs. This makes comparison between different runs difficult.
1874 * Typically, the counter value should be normalized before
1875 * comparing to other experiments. The usual normalization is done
1876 * as follows.
1877 *
1878 * ::
1879 *
1880 * normalized_counter = counter * t_enabled / t_running
1881 *
1882 * Where t_enabled is the time enabled for event and t_running is
1883 * the time running for event since last normalization. The
1884 * enabled and running times are accumulated since the perf event
1885 * open. To achieve scaling factor between two invocations of an
1886 * eBPF program, users can use CPU id as the key (which is
1887 * typical for perf array usage model) to remember the previous
1888 * value and do the calculation inside the eBPF program.
1889 * Return
1890 * 0 on success, or a negative error in case of failure.
1891 *
1892 * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
1893 * Description
1894 * For en eBPF program attached to a perf event, retrieve the
1895 * value of the event counter associated to *ctx* and store it in
1896 * the structure pointed by *buf* and of size *buf_size*. Enabled
1897 * and running times are also stored in the structure (see
1898 * description of helper **bpf_perf_event_read_value**\ () for
1899 * more details).
1900 * Return
1901 * 0 on success, or a negative error in case of failure.
1902 *
1903 * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
1904 * Description
1905 * Emulate a call to **getsockopt()** on the socket associated to
1906 * *bpf_socket*, which must be a full socket. The *level* at
1907 * which the option resides and the name *optname* of the option
1908 * must be specified, see **getsockopt(2)** for more information.
1909 * The retrieved value is stored in the structure pointed by
1910 * *opval* and of length *optlen*.
1911 *
1912 * *bpf_socket* should be one of the following:
1913 *
1914 * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
1915 * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
1916 * and **BPF_CGROUP_INET6_CONNECT**.
1917 *
1918 * This helper actually implements a subset of **getsockopt()**.
1919 * It supports the following *level*\ s:
1920 *
1921 * * **IPPROTO_TCP**, which supports *optname*
1922 * **TCP_CONGESTION**.
1923 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1924 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1925 * Return
1926 * 0 on success, or a negative error in case of failure.
1927 *
1928 * long bpf_override_return(struct pt_regs *regs, u64 rc)
1929 * Description
1930 * Used for error injection, this helper uses kprobes to override
1931 * the return value of the probed function, and to set it to *rc*.
1932 * The first argument is the context *regs* on which the kprobe
1933 * works.
1934 *
1935 * This helper works by setting the PC (program counter)
1936 * to an override function which is run in place of the original
1937 * probed function. This means the probed function is not run at
1938 * all. The replacement function just returns with the required
1939 * value.
1940 *
1941 * This helper has security implications, and thus is subject to
1942 * restrictions. It is only available if the kernel was compiled
1943 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
1944 * option, and in this case it only works on functions tagged with
1945 * **ALLOW_ERROR_INJECTION** in the kernel code.
1946 *
1947 * Also, the helper is only available for the architectures having
1948 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
1949 * x86 architecture is the only one to support this feature.
1950 * Return
1951 * 0
1952 *
1953 * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
1954 * Description
1955 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
1956 * for the full TCP socket associated to *bpf_sock_ops* to
1957 * *argval*.
1958 *
1959 * The primary use of this field is to determine if there should
1960 * be calls to eBPF programs of type
1961 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
1962 * code. A program of the same type can change its value, per
1963 * connection and as necessary, when the connection is
1964 * established. This field is directly accessible for reading, but
1965 * this helper must be used for updates in order to return an
1966 * error if an eBPF program tries to set a callback that is not
1967 * supported in the current kernel.
1968 *
1969 * *argval* is a flag array which can combine these flags:
1970 *
1971 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
1972 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
1973 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
1974 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
1975 *
1976 * Therefore, this function can be used to clear a callback flag by
1977 * setting the appropriate bit to zero. e.g. to disable the RTO
1978 * callback:
1979 *
1980 * **bpf_sock_ops_cb_flags_set(bpf_sock,**
1981 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
1982 *
1983 * Here are some examples of where one could call such eBPF
1984 * program:
1985 *
1986 * * When RTO fires.
1987 * * When a packet is retransmitted.
1988 * * When the connection terminates.
1989 * * When a packet is sent.
1990 * * When a packet is received.
1991 * Return
1992 * Code **-EINVAL** if the socket is not a full TCP socket;
1993 * otherwise, a positive number containing the bits that could not
1994 * be set is returned (which comes down to 0 if all bits were set
1995 * as required).
1996 *
1997 * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
1998 * Description
1999 * This helper is used in programs implementing policies at the
2000 * socket level. If the message *msg* is allowed to pass (i.e. if
2001 * the verdict eBPF program returns **SK_PASS**), redirect it to
2002 * the socket referenced by *map* (of type
2003 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
2004 * egress interfaces can be used for redirection. The
2005 * **BPF_F_INGRESS** value in *flags* is used to make the
2006 * distinction (ingress path is selected if the flag is present,
2007 * egress path otherwise). This is the only flag supported for now.
2008 * Return
2009 * **SK_PASS** on success, or **SK_DROP** on error.
2010 *
2011 * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
2012 * Description
2013 * For socket policies, apply the verdict of the eBPF program to
2014 * the next *bytes* (number of bytes) of message *msg*.
2015 *
2016 * For example, this helper can be used in the following cases:
2017 *
2018 * * A single **sendmsg**\ () or **sendfile**\ () system call
2019 * contains multiple logical messages that the eBPF program is
2020 * supposed to read and for which it should apply a verdict.
2021 * * An eBPF program only cares to read the first *bytes* of a
2022 * *msg*. If the message has a large payload, then setting up
2023 * and calling the eBPF program repeatedly for all bytes, even
2024 * though the verdict is already known, would create unnecessary
2025 * overhead.
2026 *
2027 * When called from within an eBPF program, the helper sets a
2028 * counter internal to the BPF infrastructure, that is used to
2029 * apply the last verdict to the next *bytes*. If *bytes* is
2030 * smaller than the current data being processed from a
2031 * **sendmsg**\ () or **sendfile**\ () system call, the first
2032 * *bytes* will be sent and the eBPF program will be re-run with
2033 * the pointer for start of data pointing to byte number *bytes*
2034 * **+ 1**. If *bytes* is larger than the current data being
2035 * processed, then the eBPF verdict will be applied to multiple
2036 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
2037 * consumed.
2038 *
2039 * Note that if a socket closes with the internal counter holding
2040 * a non-zero value, this is not a problem because data is not
2041 * being buffered for *bytes* and is sent as it is received.
2042 * Return
2043 * 0
2044 *
2045 * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
2046 * Description
2047 * For socket policies, prevent the execution of the verdict eBPF
2048 * program for message *msg* until *bytes* (byte number) have been
2049 * accumulated.
2050 *
2051 * This can be used when one needs a specific number of bytes
2052 * before a verdict can be assigned, even if the data spans
2053 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
2054 * case would be a user calling **sendmsg**\ () repeatedly with
2055 * 1-byte long message segments. Obviously, this is bad for
2056 * performance, but it is still valid. If the eBPF program needs
2057 * *bytes* bytes to validate a header, this helper can be used to
2058 * prevent the eBPF program to be called again until *bytes* have
2059 * been accumulated.
2060 * Return
2061 * 0
2062 *
2063 * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
2064 * Description
2065 * For socket policies, pull in non-linear data from user space
2066 * for *msg* and set pointers *msg*\ **->data** and *msg*\
2067 * **->data_end** to *start* and *end* bytes offsets into *msg*,
2068 * respectively.
2069 *
2070 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
2071 * *msg* it can only parse data that the (**data**, **data_end**)
2072 * pointers have already consumed. For **sendmsg**\ () hooks this
2073 * is likely the first scatterlist element. But for calls relying
2074 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
2075 * be the range (**0**, **0**) because the data is shared with
2076 * user space and by default the objective is to avoid allowing
2077 * user space to modify data while (or after) eBPF verdict is
2078 * being decided. This helper can be used to pull in data and to
2079 * set the start and end pointer to given values. Data will be
2080 * copied if necessary (i.e. if data was not linear and if start
2081 * and end pointers do not point to the same chunk).
2082 *
2083 * A call to this helper is susceptible to change the underlying
2084 * packet buffer. Therefore, at load time, all checks on pointers
2085 * previously done by the verifier are invalidated and must be
2086 * performed again, if the helper is used in combination with
2087 * direct packet access.
2088 *
2089 * All values for *flags* are reserved for future usage, and must
2090 * be left at zero.
2091 * Return
2092 * 0 on success, or a negative error in case of failure.
2093 *
2094 * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
2095 * Description
2096 * Bind the socket associated to *ctx* to the address pointed by
2097 * *addr*, of length *addr_len*. This allows for making outgoing
2098 * connection from the desired IP address, which can be useful for
2099 * example when all processes inside a cgroup should use one
2100 * single IP address on a host that has multiple IP configured.
2101 *
2102 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
2103 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
2104 * **AF_INET6**). It's advised to pass zero port (**sin_port**
2105 * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
2106 * behavior and lets the kernel efficiently pick up an unused
2107 * port as long as 4-tuple is unique. Passing non-zero port might
2108 * lead to degraded performance.
2109 * Return
2110 * 0 on success, or a negative error in case of failure.
2111 *
2112 * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
2113 * Description
2114 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
2115 * possible to both shrink and grow the packet tail.
2116 * Shrink done via *delta* being a negative integer.
2117 *
2118 * A call to this helper is susceptible to change the underlying
2119 * packet buffer. Therefore, at load time, all checks on pointers
2120 * previously done by the verifier are invalidated and must be
2121 * performed again, if the helper is used in combination with
2122 * direct packet access.
2123 * Return
2124 * 0 on success, or a negative error in case of failure.
2125 *
2126 * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
2127 * Description
2128 * Retrieve the XFRM state (IP transform framework, see also
2129 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
2130 *
2131 * The retrieved value is stored in the **struct bpf_xfrm_state**
2132 * pointed by *xfrm_state* and of length *size*.
2133 *
2134 * All values for *flags* are reserved for future usage, and must
2135 * be left at zero.
2136 *
2137 * This helper is available only if the kernel was compiled with
2138 * **CONFIG_XFRM** configuration option.
2139 * Return
2140 * 0 on success, or a negative error in case of failure.
2141 *
2142 * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
2143 * Description
2144 * Return a user or a kernel stack in bpf program provided buffer.
2145 * To achieve this, the helper needs *ctx*, which is a pointer
2146 * to the context on which the tracing program is executed.
2147 * To store the stacktrace, the bpf program provides *buf* with
2148 * a nonnegative *size*.
2149 *
2150 * The last argument, *flags*, holds the number of stack frames to
2151 * skip (from 0 to 255), masked with
2152 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
2153 * the following flags:
2154 *
2155 * **BPF_F_USER_STACK**
2156 * Collect a user space stack instead of a kernel stack.
2157 * **BPF_F_USER_BUILD_ID**
2158 * Collect buildid+offset instead of ips for user stack,
2159 * only valid if **BPF_F_USER_STACK** is also specified.
2160 *
2161 * **bpf_get_stack**\ () can collect up to
2162 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
2163 * to sufficient large buffer size. Note that
2164 * this limit can be controlled with the **sysctl** program, and
2165 * that it should be manually increased in order to profile long
2166 * user stacks (such as stacks for Java programs). To do so, use:
2167 *
2168 * ::
2169 *
2170 * # sysctl kernel.perf_event_max_stack=<new value>
2171 * Return
2172 * A non-negative value equal to or less than *size* on success,
2173 * or a negative error in case of failure.
2174 *
2175 * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
2176 * Description
2177 * This helper is similar to **bpf_skb_load_bytes**\ () in that
2178 * it provides an easy way to load *len* bytes from *offset*
2179 * from the packet associated to *skb*, into the buffer pointed
2180 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that
2181 * a fifth argument *start_header* exists in order to select a
2182 * base offset to start from. *start_header* can be one of:
2183 *
2184 * **BPF_HDR_START_MAC**
2185 * Base offset to load data from is *skb*'s mac header.
2186 * **BPF_HDR_START_NET**
2187 * Base offset to load data from is *skb*'s network header.
2188 *
2189 * In general, "direct packet access" is the preferred method to
2190 * access packet data, however, this helper is in particular useful
2191 * in socket filters where *skb*\ **->data** does not always point
2192 * to the start of the mac header and where "direct packet access"
2193 * is not available.
2194 * Return
2195 * 0 on success, or a negative error in case of failure.
2196 *
2197 * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
2198 * Description
2199 * Do FIB lookup in kernel tables using parameters in *params*.
2200 * If lookup is successful and result shows packet is to be
2201 * forwarded, the neighbor tables are searched for the nexthop.
2202 * If successful (ie., FIB lookup shows forwarding and nexthop
2203 * is resolved), the nexthop address is returned in ipv4_dst
2204 * or ipv6_dst based on family, smac is set to mac address of
2205 * egress device, dmac is set to nexthop mac address, rt_metric
2206 * is set to metric from route (IPv4/IPv6 only), and ifindex
2207 * is set to the device index of the nexthop from the FIB lookup.
2208 *
2209 * *plen* argument is the size of the passed in struct.
2210 * *flags* argument can be a combination of one or more of the
2211 * following values:
2212 *
2213 * **BPF_FIB_LOOKUP_DIRECT**
2214 * Do a direct table lookup vs full lookup using FIB
2215 * rules.
2216 * **BPF_FIB_LOOKUP_OUTPUT**
2217 * Perform lookup from an egress perspective (default is
2218 * ingress).
2219 *
2220 * *ctx* is either **struct xdp_md** for XDP programs or
2221 * **struct sk_buff** tc cls_act programs.
2222 * Return
2223 * * < 0 if any input argument is invalid
2224 * * 0 on success (packet is forwarded, nexthop neighbor exists)
2225 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
2226 * packet is not forwarded or needs assist from full stack
2227 *
2228 * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
2229 * Description
2230 * Add an entry to, or update a sockhash *map* referencing sockets.
2231 * The *skops* is used as a new value for the entry associated to
2232 * *key*. *flags* is one of:
2233 *
2234 * **BPF_NOEXIST**
2235 * The entry for *key* must not exist in the map.
2236 * **BPF_EXIST**
2237 * The entry for *key* must already exist in the map.
2238 * **BPF_ANY**
2239 * No condition on the existence of the entry for *key*.
2240 *
2241 * If the *map* has eBPF programs (parser and verdict), those will
2242 * be inherited by the socket being added. If the socket is
2243 * already attached to eBPF programs, this results in an error.
2244 * Return
2245 * 0 on success, or a negative error in case of failure.
2246 *
2247 * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
2248 * Description
2249 * This helper is used in programs implementing policies at the
2250 * socket level. If the message *msg* is allowed to pass (i.e. if
2251 * the verdict eBPF program returns **SK_PASS**), redirect it to
2252 * the socket referenced by *map* (of type
2253 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
2254 * egress interfaces can be used for redirection. The
2255 * **BPF_F_INGRESS** value in *flags* is used to make the
2256 * distinction (ingress path is selected if the flag is present,
2257 * egress path otherwise). This is the only flag supported for now.
2258 * Return
2259 * **SK_PASS** on success, or **SK_DROP** on error.
2260 *
2261 * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
2262 * Description
2263 * This helper is used in programs implementing policies at the
2264 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
2265 * if the verdict eBPF program returns **SK_PASS**), redirect it
2266 * to the socket referenced by *map* (of type
2267 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
2268 * egress interfaces can be used for redirection. The
2269 * **BPF_F_INGRESS** value in *flags* is used to make the
2270 * distinction (ingress path is selected if the flag is present,
2271 * egress otherwise). This is the only flag supported for now.
2272 * Return
2273 * **SK_PASS** on success, or **SK_DROP** on error.
2274 *
2275 * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
2276 * Description
2277 * Encapsulate the packet associated to *skb* within a Layer 3
2278 * protocol header. This header is provided in the buffer at
2279 * address *hdr*, with *len* its size in bytes. *type* indicates
2280 * the protocol of the header and can be one of:
2281 *
2282 * **BPF_LWT_ENCAP_SEG6**
2283 * IPv6 encapsulation with Segment Routing Header
2284 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
2285 * the IPv6 header is computed by the kernel.
2286 * **BPF_LWT_ENCAP_SEG6_INLINE**
2287 * Only works if *skb* contains an IPv6 packet. Insert a
2288 * Segment Routing Header (**struct ipv6_sr_hdr**) inside
2289 * the IPv6 header.
2290 * **BPF_LWT_ENCAP_IP**
2291 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
2292 * must be IPv4 or IPv6, followed by zero or more
2293 * additional headers, up to **LWT_BPF_MAX_HEADROOM**
2294 * total bytes in all prepended headers. Please note that
2295 * if **skb_is_gso**\ (*skb*) is true, no more than two
2296 * headers can be prepended, and the inner header, if
2297 * present, should be either GRE or UDP/GUE.
2298 *
2299 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
2300 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
2301 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
2302 * **BPF_PROG_TYPE_LWT_XMIT**.
2303 *
2304 * A call to this helper is susceptible to change the underlying
2305 * packet buffer. Therefore, at load time, all checks on pointers
2306 * previously done by the verifier are invalidated and must be
2307 * performed again, if the helper is used in combination with
2308 * direct packet access.
2309 * Return
2310 * 0 on success, or a negative error in case of failure.
2311 *
2312 * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
2313 * Description
2314 * Store *len* bytes from address *from* into the packet
2315 * associated to *skb*, at *offset*. Only the flags, tag and TLVs
2316 * inside the outermost IPv6 Segment Routing Header can be
2317 * modified through this helper.
2318 *
2319 * A call to this helper is susceptible to change the underlying
2320 * packet buffer. Therefore, at load time, all checks on pointers
2321 * previously done by the verifier are invalidated and must be
2322 * performed again, if the helper is used in combination with
2323 * direct packet access.
2324 * Return
2325 * 0 on success, or a negative error in case of failure.
2326 *
2327 * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
2328 * Description
2329 * Adjust the size allocated to TLVs in the outermost IPv6
2330 * Segment Routing Header contained in the packet associated to
2331 * *skb*, at position *offset* by *delta* bytes. Only offsets
2332 * after the segments are accepted. *delta* can be as well
2333 * positive (growing) as negative (shrinking).
2334 *
2335 * A call to this helper is susceptible to change the underlying
2336 * packet buffer. Therefore, at load time, all checks on pointers
2337 * previously done by the verifier are invalidated and must be
2338 * performed again, if the helper is used in combination with
2339 * direct packet access.
2340 * Return
2341 * 0 on success, or a negative error in case of failure.
2342 *
2343 * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
2344 * Description
2345 * Apply an IPv6 Segment Routing action of type *action* to the
2346 * packet associated to *skb*. Each action takes a parameter
2347 * contained at address *param*, and of length *param_len* bytes.
2348 * *action* can be one of:
2349 *
2350 * **SEG6_LOCAL_ACTION_END_X**
2351 * End.X action: Endpoint with Layer-3 cross-connect.
2352 * Type of *param*: **struct in6_addr**.
2353 * **SEG6_LOCAL_ACTION_END_T**
2354 * End.T action: Endpoint with specific IPv6 table lookup.
2355 * Type of *param*: **int**.
2356 * **SEG6_LOCAL_ACTION_END_B6**
2357 * End.B6 action: Endpoint bound to an SRv6 policy.
2358 * Type of *param*: **struct ipv6_sr_hdr**.
2359 * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
2360 * End.B6.Encap action: Endpoint bound to an SRv6
2361 * encapsulation policy.
2362 * Type of *param*: **struct ipv6_sr_hdr**.
2363 *
2364 * A call to this helper is susceptible to change the underlying
2365 * packet buffer. Therefore, at load time, all checks on pointers
2366 * previously done by the verifier are invalidated and must be
2367 * performed again, if the helper is used in combination with
2368 * direct packet access.
2369 * Return
2370 * 0 on success, or a negative error in case of failure.
2371 *
2372 * long bpf_rc_repeat(void *ctx)
2373 * Description
2374 * This helper is used in programs implementing IR decoding, to
2375 * report a successfully decoded repeat key message. This delays
2376 * the generation of a key up event for previously generated
2377 * key down event.
2378 *
2379 * Some IR protocols like NEC have a special IR message for
2380 * repeating last button, for when a button is held down.
2381 *
2382 * The *ctx* should point to the lirc sample as passed into
2383 * the program.
2384 *
2385 * This helper is only available is the kernel was compiled with
2386 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2387 * "**y**".
2388 * Return
2389 * 0
2390 *
2391 * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
2392 * Description
2393 * This helper is used in programs implementing IR decoding, to
2394 * report a successfully decoded key press with *scancode*,
2395 * *toggle* value in the given *protocol*. The scancode will be
2396 * translated to a keycode using the rc keymap, and reported as
2397 * an input key down event. After a period a key up event is
2398 * generated. This period can be extended by calling either
2399 * **bpf_rc_keydown**\ () again with the same values, or calling
2400 * **bpf_rc_repeat**\ ().
2401 *
2402 * Some protocols include a toggle bit, in case the button was
2403 * released and pressed again between consecutive scancodes.
2404 *
2405 * The *ctx* should point to the lirc sample as passed into
2406 * the program.
2407 *
2408 * The *protocol* is the decoded protocol number (see
2409 * **enum rc_proto** for some predefined values).
2410 *
2411 * This helper is only available is the kernel was compiled with
2412 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2413 * "**y**".
2414 * Return
2415 * 0
2416 *
2417 * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
2418 * Description
2419 * Return the cgroup v2 id of the socket associated with the *skb*.
2420 * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
2421 * helper for cgroup v1 by providing a tag resp. identifier that
2422 * can be matched on or used for map lookups e.g. to implement
2423 * policy. The cgroup v2 id of a given path in the hierarchy is
2424 * exposed in user space through the f_handle API in order to get
2425 * to the same 64-bit id.
2426 *
2427 * This helper can be used on TC egress path, but not on ingress,
2428 * and is available only if the kernel was compiled with the
2429 * **CONFIG_SOCK_CGROUP_DATA** configuration option.
2430 * Return
2431 * The id is returned or 0 in case the id could not be retrieved.
2432 *
2433 * u64 bpf_get_current_cgroup_id(void)
2434 * Return
2435 * A 64-bit integer containing the current cgroup id based
2436 * on the cgroup within which the current task is running.
2437 *
2438 * void *bpf_get_local_storage(void *map, u64 flags)
2439 * Description
2440 * Get the pointer to the local storage area.
2441 * The type and the size of the local storage is defined
2442 * by the *map* argument.
2443 * The *flags* meaning is specific for each map type,
2444 * and has to be 0 for cgroup local storage.
2445 *
2446 * Depending on the BPF program type, a local storage area
2447 * can be shared between multiple instances of the BPF program,
2448 * running simultaneously.
2449 *
2450 * A user should care about the synchronization by himself.
2451 * For example, by using the **BPF_STX_XADD** instruction to alter
2452 * the shared data.
2453 * Return
2454 * A pointer to the local storage area.
2455 *
2456 * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
2457 * Description
2458 * Select a **SO_REUSEPORT** socket from a
2459 * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
2460 * It checks the selected socket is matching the incoming
2461 * request in the socket buffer.
2462 * Return
2463 * 0 on success, or a negative error in case of failure.
2464 *
2465 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2466 * Description
2467 * Return id of cgroup v2 that is ancestor of cgroup associated
2468 * with the *skb* at the *ancestor_level*. The root cgroup is at
2469 * *ancestor_level* zero and each step down the hierarchy
2470 * increments the level. If *ancestor_level* == level of cgroup
2471 * associated with *skb*, then return value will be same as that
2472 * of **bpf_skb_cgroup_id**\ ().
2473 *
2474 * The helper is useful to implement policies based on cgroups
2475 * that are upper in hierarchy than immediate cgroup associated
2476 * with *skb*.
2477 *
2478 * The format of returned id and helper limitations are same as in
2479 * **bpf_skb_cgroup_id**\ ().
2480 * Return
2481 * The id is returned or 0 in case the id could not be retrieved.
2482 *
2483 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2484 * Description
2485 * Look for TCP socket matching *tuple*, optionally in a child
2486 * network namespace *netns*. The return value must be checked,
2487 * and if non-**NULL**, released via **bpf_sk_release**\ ().
2488 *
2489 * The *ctx* should point to the context of the program, such as
2490 * the skb or socket (depending on the hook in use). This is used
2491 * to determine the base network namespace for the lookup.
2492 *
2493 * *tuple_size* must be one of:
2494 *
2495 * **sizeof**\ (*tuple*\ **->ipv4**)
2496 * Look for an IPv4 socket.
2497 * **sizeof**\ (*tuple*\ **->ipv6**)
2498 * Look for an IPv6 socket.
2499 *
2500 * If the *netns* is a negative signed 32-bit integer, then the
2501 * socket lookup table in the netns associated with the *ctx*
2502 * will be used. For the TC hooks, this is the netns of the device
2503 * in the skb. For socket hooks, this is the netns of the socket.
2504 * If *netns* is any other signed 32-bit value greater than or
2505 * equal to zero then it specifies the ID of the netns relative to
2506 * the netns associated with the *ctx*. *netns* values beyond the
2507 * range of 32-bit integers are reserved for future use.
2508 *
2509 * All values for *flags* are reserved for future usage, and must
2510 * be left at zero.
2511 *
2512 * This helper is available only if the kernel was compiled with
2513 * **CONFIG_NET** configuration option.
2514 * Return
2515 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2516 * For sockets with reuseport option, the **struct bpf_sock**
2517 * result is from *reuse*\ **->socks**\ [] using the hash of the
2518 * tuple.
2519 *
2520 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2521 * Description
2522 * Look for UDP socket matching *tuple*, optionally in a child
2523 * network namespace *netns*. The return value must be checked,
2524 * and if non-**NULL**, released via **bpf_sk_release**\ ().
2525 *
2526 * The *ctx* should point to the context of the program, such as
2527 * the skb or socket (depending on the hook in use). This is used
2528 * to determine the base network namespace for the lookup.
2529 *
2530 * *tuple_size* must be one of:
2531 *
2532 * **sizeof**\ (*tuple*\ **->ipv4**)
2533 * Look for an IPv4 socket.
2534 * **sizeof**\ (*tuple*\ **->ipv6**)
2535 * Look for an IPv6 socket.
2536 *
2537 * If the *netns* is a negative signed 32-bit integer, then the
2538 * socket lookup table in the netns associated with the *ctx*
2539 * will be used. For the TC hooks, this is the netns of the device
2540 * in the skb. For socket hooks, this is the netns of the socket.
2541 * If *netns* is any other signed 32-bit value greater than or
2542 * equal to zero then it specifies the ID of the netns relative to
2543 * the netns associated with the *ctx*. *netns* values beyond the
2544 * range of 32-bit integers are reserved for future use.
2545 *
2546 * All values for *flags* are reserved for future usage, and must
2547 * be left at zero.
2548 *
2549 * This helper is available only if the kernel was compiled with
2550 * **CONFIG_NET** configuration option.
2551 * Return
2552 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2553 * For sockets with reuseport option, the **struct bpf_sock**
2554 * result is from *reuse*\ **->socks**\ [] using the hash of the
2555 * tuple.
2556 *
2557 * long bpf_sk_release(void *sock)
2558 * Description
2559 * Release the reference held by *sock*. *sock* must be a
2560 * non-**NULL** pointer that was returned from
2561 * **bpf_sk_lookup_xxx**\ ().
2562 * Return
2563 * 0 on success, or a negative error in case of failure.
2564 *
2565 * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
2566 * Description
2567 * Push an element *value* in *map*. *flags* is one of:
2568 *
2569 * **BPF_EXIST**
2570 * If the queue/stack is full, the oldest element is
2571 * removed to make room for this.
2572 * Return
2573 * 0 on success, or a negative error in case of failure.
2574 *
2575 * long bpf_map_pop_elem(struct bpf_map *map, void *value)
2576 * Description
2577 * Pop an element from *map*.
2578 * Return
2579 * 0 on success, or a negative error in case of failure.
2580 *
2581 * long bpf_map_peek_elem(struct bpf_map *map, void *value)
2582 * Description
2583 * Get an element from *map* without removing it.
2584 * Return
2585 * 0 on success, or a negative error in case of failure.
2586 *
2587 * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
2588 * Description
2589 * For socket policies, insert *len* bytes into *msg* at offset
2590 * *start*.
2591 *
2592 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
2593 * *msg* it may want to insert metadata or options into the *msg*.
2594 * This can later be read and used by any of the lower layer BPF
2595 * hooks.
2596 *
2597 * This helper may fail if under memory pressure (a malloc
2598 * fails) in these cases BPF programs will get an appropriate
2599 * error and BPF programs will need to handle them.
2600 * Return
2601 * 0 on success, or a negative error in case of failure.
2602 *
2603 * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
2604 * Description
2605 * Will remove *len* bytes from a *msg* starting at byte *start*.
2606 * This may result in **ENOMEM** errors under certain situations if
2607 * an allocation and copy are required due to a full ring buffer.
2608 * However, the helper will try to avoid doing the allocation
2609 * if possible. Other errors can occur if input parameters are
2610 * invalid either due to *start* byte not being valid part of *msg*
2611 * payload and/or *pop* value being to large.
2612 * Return
2613 * 0 on success, or a negative error in case of failure.
2614 *
2615 * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
2616 * Description
2617 * This helper is used in programs implementing IR decoding, to
2618 * report a successfully decoded pointer movement.
2619 *
2620 * The *ctx* should point to the lirc sample as passed into
2621 * the program.
2622 *
2623 * This helper is only available is the kernel was compiled with
2624 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2625 * "**y**".
2626 * Return
2627 * 0
2628 *
2629 * long bpf_spin_lock(struct bpf_spin_lock *lock)
2630 * Description
2631 * Acquire a spinlock represented by the pointer *lock*, which is
2632 * stored as part of a value of a map. Taking the lock allows to
2633 * safely update the rest of the fields in that value. The
2634 * spinlock can (and must) later be released with a call to
2635 * **bpf_spin_unlock**\ (\ *lock*\ ).
2636 *
2637 * Spinlocks in BPF programs come with a number of restrictions
2638 * and constraints:
2639 *
2640 * * **bpf_spin_lock** objects are only allowed inside maps of
2641 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
2642 * list could be extended in the future).
2643 * * BTF description of the map is mandatory.
2644 * * The BPF program can take ONE lock at a time, since taking two
2645 * or more could cause dead locks.
2646 * * Only one **struct bpf_spin_lock** is allowed per map element.
2647 * * When the lock is taken, calls (either BPF to BPF or helpers)
2648 * are not allowed.
2649 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
2650 * allowed inside a spinlock-ed region.
2651 * * The BPF program MUST call **bpf_spin_unlock**\ () to release
2652 * the lock, on all execution paths, before it returns.
2653 * * The BPF program can access **struct bpf_spin_lock** only via
2654 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
2655 * helpers. Loading or storing data into the **struct
2656 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
2657 * * To use the **bpf_spin_lock**\ () helper, the BTF description
2658 * of the map value must be a struct and have **struct
2659 * bpf_spin_lock** *anyname*\ **;** field at the top level.
2660 * Nested lock inside another struct is not allowed.
2661 * * The **struct bpf_spin_lock** *lock* field in a map value must
2662 * be aligned on a multiple of 4 bytes in that value.
2663 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
2664 * the **bpf_spin_lock** field to user space.
2665 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
2666 * a BPF program, do not update the **bpf_spin_lock** field.
2667 * * **bpf_spin_lock** cannot be on the stack or inside a
2668 * networking packet (it can only be inside of a map values).
2669 * * **bpf_spin_lock** is available to root only.
2670 * * Tracing programs and socket filter programs cannot use
2671 * **bpf_spin_lock**\ () due to insufficient preemption checks
2672 * (but this may change in the future).
2673 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
2674 * Return
2675 * 0
2676 *
2677 * long bpf_spin_unlock(struct bpf_spin_lock *lock)
2678 * Description
2679 * Release the *lock* previously locked by a call to
2680 * **bpf_spin_lock**\ (\ *lock*\ ).
2681 * Return
2682 * 0
2683 *
2684 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
2685 * Description
2686 * This helper gets a **struct bpf_sock** pointer such
2687 * that all the fields in this **bpf_sock** can be accessed.
2688 * Return
2689 * A **struct bpf_sock** pointer on success, or **NULL** in
2690 * case of failure.
2691 *
2692 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
2693 * Description
2694 * This helper gets a **struct bpf_tcp_sock** pointer from a
2695 * **struct bpf_sock** pointer.
2696 * Return
2697 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
2698 * case of failure.
2699 *
2700 * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
2701 * Description
2702 * Set ECN (Explicit Congestion Notification) field of IP header
2703 * to **CE** (Congestion Encountered) if current value is **ECT**
2704 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
2705 * and IPv4.
2706 * Return
2707 * 1 if the **CE** flag is set (either by the current helper call
2708 * or because it was already present), 0 if it is not set.
2709 *
2710 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
2711 * Description
2712 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
2713 * **bpf_sk_release**\ () is unnecessary and not allowed.
2714 * Return
2715 * A **struct bpf_sock** pointer on success, or **NULL** in
2716 * case of failure.
2717 *
2718 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2719 * Description
2720 * Look for TCP socket matching *tuple*, optionally in a child
2721 * network namespace *netns*. The return value must be checked,
2722 * and if non-**NULL**, released via **bpf_sk_release**\ ().
2723 *
2724 * This function is identical to **bpf_sk_lookup_tcp**\ (), except
2725 * that it also returns timewait or request sockets. Use
2726 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
2727 * full structure.
2728 *
2729 * This helper is available only if the kernel was compiled with
2730 * **CONFIG_NET** configuration option.
2731 * Return
2732 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2733 * For sockets with reuseport option, the **struct bpf_sock**
2734 * result is from *reuse*\ **->socks**\ [] using the hash of the
2735 * tuple.
2736 *
2737 * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
2738 * Description
2739 * Check whether *iph* and *th* contain a valid SYN cookie ACK for
2740 * the listening socket in *sk*.
2741 *
2742 * *iph* points to the start of the IPv4 or IPv6 header, while
2743 * *iph_len* contains **sizeof**\ (**struct iphdr**) or
2744 * **sizeof**\ (**struct ip6hdr**).
2745 *
2746 * *th* points to the start of the TCP header, while *th_len*
2747 * contains **sizeof**\ (**struct tcphdr**).
2748 * Return
2749 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
2750 * error otherwise.
2751 *
2752 * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
2753 * Description
2754 * Get name of sysctl in /proc/sys/ and copy it into provided by
2755 * program buffer *buf* of size *buf_len*.
2756 *
2757 * The buffer is always NUL terminated, unless it's zero-sized.
2758 *
2759 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
2760 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
2761 * only (e.g. "tcp_mem").
2762 * Return
2763 * Number of character copied (not including the trailing NUL).
2764 *
2765 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
2766 * truncated name in this case).
2767 *
2768 * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
2769 * Description
2770 * Get current value of sysctl as it is presented in /proc/sys
2771 * (incl. newline, etc), and copy it as a string into provided
2772 * by program buffer *buf* of size *buf_len*.
2773 *
2774 * The whole value is copied, no matter what file position user
2775 * space issued e.g. sys_read at.
2776 *
2777 * The buffer is always NUL terminated, unless it's zero-sized.
2778 * Return
2779 * Number of character copied (not including the trailing NUL).
2780 *
2781 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
2782 * truncated name in this case).
2783 *
2784 * **-EINVAL** if current value was unavailable, e.g. because
2785 * sysctl is uninitialized and read returns -EIO for it.
2786 *
2787 * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
2788 * Description
2789 * Get new value being written by user space to sysctl (before
2790 * the actual write happens) and copy it as a string into
2791 * provided by program buffer *buf* of size *buf_len*.
2792 *
2793 * User space may write new value at file position > 0.
2794 *
2795 * The buffer is always NUL terminated, unless it's zero-sized.
2796 * Return
2797 * Number of character copied (not including the trailing NUL).
2798 *
2799 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
2800 * truncated name in this case).
2801 *
2802 * **-EINVAL** if sysctl is being read.
2803 *
2804 * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
2805 * Description
2806 * Override new value being written by user space to sysctl with
2807 * value provided by program in buffer *buf* of size *buf_len*.
2808 *
2809 * *buf* should contain a string in same form as provided by user
2810 * space on sysctl write.
2811 *
2812 * User space may write new value at file position > 0. To override
2813 * the whole sysctl value file position should be set to zero.
2814 * Return
2815 * 0 on success.
2816 *
2817 * **-E2BIG** if the *buf_len* is too big.
2818 *
2819 * **-EINVAL** if sysctl is being read.
2820 *
2821 * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
2822 * Description
2823 * Convert the initial part of the string from buffer *buf* of
2824 * size *buf_len* to a long integer according to the given base
2825 * and save the result in *res*.
2826 *
2827 * The string may begin with an arbitrary amount of white space
2828 * (as determined by **isspace**\ (3)) followed by a single
2829 * optional '**-**' sign.
2830 *
2831 * Five least significant bits of *flags* encode base, other bits
2832 * are currently unused.
2833 *
2834 * Base must be either 8, 10, 16 or 0 to detect it automatically
2835 * similar to user space **strtol**\ (3).
2836 * Return
2837 * Number of characters consumed on success. Must be positive but
2838 * no more than *buf_len*.
2839 *
2840 * **-EINVAL** if no valid digits were found or unsupported base
2841 * was provided.
2842 *
2843 * **-ERANGE** if resulting value was out of range.
2844 *
2845 * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
2846 * Description
2847 * Convert the initial part of the string from buffer *buf* of
2848 * size *buf_len* to an unsigned long integer according to the
2849 * given base and save the result in *res*.
2850 *
2851 * The string may begin with an arbitrary amount of white space
2852 * (as determined by **isspace**\ (3)).
2853 *
2854 * Five least significant bits of *flags* encode base, other bits
2855 * are currently unused.
2856 *
2857 * Base must be either 8, 10, 16 or 0 to detect it automatically
2858 * similar to user space **strtoul**\ (3).
2859 * Return
2860 * Number of characters consumed on success. Must be positive but
2861 * no more than *buf_len*.
2862 *
2863 * **-EINVAL** if no valid digits were found or unsupported base
2864 * was provided.
2865 *
2866 * **-ERANGE** if resulting value was out of range.
2867 *
2868 * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
2869 * Description
2870 * Get a bpf-local-storage from a *sk*.
2871 *
2872 * Logically, it could be thought of getting the value from
2873 * a *map* with *sk* as the **key**. From this
2874 * perspective, the usage is not much different from
2875 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
2876 * helper enforces the key must be a full socket and the map must
2877 * be a **BPF_MAP_TYPE_SK_STORAGE** also.
2878 *
2879 * Underneath, the value is stored locally at *sk* instead of
2880 * the *map*. The *map* is used as the bpf-local-storage
2881 * "type". The bpf-local-storage "type" (i.e. the *map*) is
2882 * searched against all bpf-local-storages residing at *sk*.
2883 *
2884 * *sk* is a kernel **struct sock** pointer for LSM program.
2885 * *sk* is a **struct bpf_sock** pointer for other program types.
2886 *
2887 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
2888 * used such that a new bpf-local-storage will be
2889 * created if one does not exist. *value* can be used
2890 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
2891 * the initial value of a bpf-local-storage. If *value* is
2892 * **NULL**, the new bpf-local-storage will be zero initialized.
2893 * Return
2894 * A bpf-local-storage pointer is returned on success.
2895 *
2896 * **NULL** if not found or there was an error in adding
2897 * a new bpf-local-storage.
2898 *
2899 * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
2900 * Description
2901 * Delete a bpf-local-storage from a *sk*.
2902 * Return
2903 * 0 on success.
2904 *
2905 * **-ENOENT** if the bpf-local-storage cannot be found.
2906 * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
2907 *
2908 * long bpf_send_signal(u32 sig)
2909 * Description
2910 * Send signal *sig* to the process of the current task.
2911 * The signal may be delivered to any of this process's threads.
2912 * Return
2913 * 0 on success or successfully queued.
2914 *
2915 * **-EBUSY** if work queue under nmi is full.
2916 *
2917 * **-EINVAL** if *sig* is invalid.
2918 *
2919 * **-EPERM** if no permission to send the *sig*.
2920 *
2921 * **-EAGAIN** if bpf program can try again.
2922 *
2923 * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
2924 * Description
2925 * Try to issue a SYN cookie for the packet with corresponding
2926 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
2927 *
2928 * *iph* points to the start of the IPv4 or IPv6 header, while
2929 * *iph_len* contains **sizeof**\ (**struct iphdr**) or
2930 * **sizeof**\ (**struct ip6hdr**).
2931 *
2932 * *th* points to the start of the TCP header, while *th_len*
2933 * contains the length of the TCP header.
2934 * Return
2935 * On success, lower 32 bits hold the generated SYN cookie in
2936 * followed by 16 bits which hold the MSS value for that cookie,
2937 * and the top 16 bits are unused.
2938 *
2939 * On failure, the returned value is one of the following:
2940 *
2941 * **-EINVAL** SYN cookie cannot be issued due to error
2942 *
2943 * **-ENOENT** SYN cookie should not be issued (no SYN flood)
2944 *
2945 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
2946 *
2947 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6
2948 *
2949 * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
2950 * Description
2951 * Write raw *data* blob into a special BPF perf event held by
2952 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
2953 * event must have the following attributes: **PERF_SAMPLE_RAW**
2954 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
2955 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
2956 *
2957 * The *flags* are used to indicate the index in *map* for which
2958 * the value must be put, masked with **BPF_F_INDEX_MASK**.
2959 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
2960 * to indicate that the index of the current CPU core should be
2961 * used.
2962 *
2963 * The value to write, of *size*, is passed through eBPF stack and
2964 * pointed by *data*.
2965 *
2966 * *ctx* is a pointer to in-kernel struct sk_buff.
2967 *
2968 * This helper is similar to **bpf_perf_event_output**\ () but
2969 * restricted to raw_tracepoint bpf programs.
2970 * Return
2971 * 0 on success, or a negative error in case of failure.
2972 *
2973 * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
2974 * Description
2975 * Safely attempt to read *size* bytes from user space address
2976 * *unsafe_ptr* and store the data in *dst*.
2977 * Return
2978 * 0 on success, or a negative error in case of failure.
2979 *
2980 * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
2981 * Description
2982 * Safely attempt to read *size* bytes from kernel space address
2983 * *unsafe_ptr* and store the data in *dst*.
2984 * Return
2985 * 0 on success, or a negative error in case of failure.
2986 *
2987 * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
2988 * Description
2989 * Copy a NUL terminated string from an unsafe user address
2990 * *unsafe_ptr* to *dst*. The *size* should include the
2991 * terminating NUL byte. In case the string length is smaller than
2992 * *size*, the target is not padded with further NUL bytes. If the
2993 * string length is larger than *size*, just *size*-1 bytes are
2994 * copied and the last byte is set to NUL.
2995 *
2996 * On success, the length of the copied string is returned. This
2997 * makes this helper useful in tracing programs for reading
2998 * strings, and more importantly to get its length at runtime. See
2999 * the following snippet:
3000 *
3001 * ::
3002 *
3003 * SEC("kprobe/sys_open")
3004 * void bpf_sys_open(struct pt_regs *ctx)
3005 * {
3006 * char buf[PATHLEN]; // PATHLEN is defined to 256
3007 * int res = bpf_probe_read_user_str(buf, sizeof(buf),
3008 * ctx->di);
3009 *
3010 * // Consume buf, for example push it to
3011 * // userspace via bpf_perf_event_output(); we
3012 * // can use res (the string length) as event
3013 * // size, after checking its boundaries.
3014 * }
3015 *
3016 * In comparison, using **bpf_probe_read_user**\ () helper here
3017 * instead to read the string would require to estimate the length
3018 * at compile time, and would often result in copying more memory
3019 * than necessary.
3020 *
3021 * Another useful use case is when parsing individual process
3022 * arguments or individual environment variables navigating
3023 * *current*\ **->mm->arg_start** and *current*\
3024 * **->mm->env_start**: using this helper and the return value,
3025 * one can quickly iterate at the right offset of the memory area.
3026 * Return
3027 * On success, the strictly positive length of the string,
3028 * including the trailing NUL character. On error, a negative
3029 * value.
3030 *
3031 * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
3032 * Description
3033 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
3034 * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
3035 * Return
3036 * On success, the strictly positive length of the string, including
3037 * the trailing NUL character. On error, a negative value.
3038 *
3039 * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
3040 * Description
3041 * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
3042 * *rcv_nxt* is the ack_seq to be sent out.
3043 * Return
3044 * 0 on success, or a negative error in case of failure.
3045 *
3046 * long bpf_send_signal_thread(u32 sig)
3047 * Description
3048 * Send signal *sig* to the thread corresponding to the current task.
3049 * Return
3050 * 0 on success or successfully queued.
3051 *
3052 * **-EBUSY** if work queue under nmi is full.
3053 *
3054 * **-EINVAL** if *sig* is invalid.
3055 *
3056 * **-EPERM** if no permission to send the *sig*.
3057 *
3058 * **-EAGAIN** if bpf program can try again.
3059 *
3060 * u64 bpf_jiffies64(void)
3061 * Description
3062 * Obtain the 64bit jiffies
3063 * Return
3064 * The 64 bit jiffies
3065 *
3066 * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
3067 * Description
3068 * For an eBPF program attached to a perf event, retrieve the
3069 * branch records (**struct perf_branch_entry**) associated to *ctx*
3070 * and store it in the buffer pointed by *buf* up to size
3071 * *size* bytes.
3072 * Return
3073 * On success, number of bytes written to *buf*. On error, a
3074 * negative value.
3075 *
3076 * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
3077 * instead return the number of bytes required to store all the
3078 * branch entries. If this flag is set, *buf* may be NULL.
3079 *
3080 * **-EINVAL** if arguments invalid or **size** not a multiple
3081 * of **sizeof**\ (**struct perf_branch_entry**\ ).
3082 *
3083 * **-ENOENT** if architecture does not support branch records.
3084 *
3085 * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
3086 * Description
3087 * Returns 0 on success, values for *pid* and *tgid* as seen from the current
3088 * *namespace* will be returned in *nsdata*.
3089 * Return
3090 * 0 on success, or one of the following in case of failure:
3091 *
3092 * **-EINVAL** if dev and inum supplied don't match dev_t and inode number
3093 * with nsfs of current task, or if dev conversion to dev_t lost high bits.
3094 *
3095 * **-ENOENT** if pidns does not exists for the current task.
3096 *
3097 * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
3098 * Description
3099 * Write raw *data* blob into a special BPF perf event held by
3100 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
3101 * event must have the following attributes: **PERF_SAMPLE_RAW**
3102 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
3103 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
3104 *
3105 * The *flags* are used to indicate the index in *map* for which
3106 * the value must be put, masked with **BPF_F_INDEX_MASK**.
3107 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
3108 * to indicate that the index of the current CPU core should be
3109 * used.
3110 *
3111 * The value to write, of *size*, is passed through eBPF stack and
3112 * pointed by *data*.
3113 *
3114 * *ctx* is a pointer to in-kernel struct xdp_buff.
3115 *
3116 * This helper is similar to **bpf_perf_eventoutput**\ () but
3117 * restricted to raw_tracepoint bpf programs.
3118 * Return
3119 * 0 on success, or a negative error in case of failure.
3120 *
3121 * u64 bpf_get_netns_cookie(void *ctx)
3122 * Description
3123 * Retrieve the cookie (generated by the kernel) of the network
3124 * namespace the input *ctx* is associated with. The network
3125 * namespace cookie remains stable for its lifetime and provides
3126 * a global identifier that can be assumed unique. If *ctx* is
3127 * NULL, then the helper returns the cookie for the initial
3128 * network namespace. The cookie itself is very similar to that
3129 * of **bpf_get_socket_cookie**\ () helper, but for network
3130 * namespaces instead of sockets.
3131 * Return
3132 * A 8-byte long opaque number.
3133 *
3134 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
3135 * Description
3136 * Return id of cgroup v2 that is ancestor of the cgroup associated
3137 * with the current task at the *ancestor_level*. The root cgroup
3138 * is at *ancestor_level* zero and each step down the hierarchy
3139 * increments the level. If *ancestor_level* == level of cgroup
3140 * associated with the current task, then return value will be the
3141 * same as that of **bpf_get_current_cgroup_id**\ ().
3142 *
3143 * The helper is useful to implement policies based on cgroups
3144 * that are upper in hierarchy than immediate cgroup associated
3145 * with the current task.
3146 *
3147 * The format of returned id and helper limitations are same as in
3148 * **bpf_get_current_cgroup_id**\ ().
3149 * Return
3150 * The id is returned or 0 in case the id could not be retrieved.
3151 *
3152 * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
3153 * Description
3154 * Helper is overloaded depending on BPF program type. This
3155 * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
3156 * **BPF_PROG_TYPE_SCHED_ACT** programs.
3157 *
3158 * Assign the *sk* to the *skb*. When combined with appropriate
3159 * routing configuration to receive the packet towards the socket,
3160 * will cause *skb* to be delivered to the specified socket.
3161 * Subsequent redirection of *skb* via **bpf_redirect**\ (),
3162 * **bpf_clone_redirect**\ () or other methods outside of BPF may
3163 * interfere with successful delivery to the socket.
3164 *
3165 * This operation is only valid from TC ingress path.
3166 *
3167 * The *flags* argument must be zero.
3168 * Return
3169 * 0 on success, or a negative error in case of failure:
3170 *
3171 * **-EINVAL** if specified *flags* are not supported.
3172 *
3173 * **-ENOENT** if the socket is unavailable for assignment.
3174 *
3175 * **-ENETUNREACH** if the socket is unreachable (wrong netns).
3176 *
3177 * **-EOPNOTSUPP** if the operation is not supported, for example
3178 * a call from outside of TC ingress.
3179 *
3180 * **-ESOCKTNOSUPPORT** if the socket type is not supported
3181 * (reuseport).
3182 *
3183 * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
3184 * Description
3185 * Helper is overloaded depending on BPF program type. This
3186 * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
3187 *
3188 * Select the *sk* as a result of a socket lookup.
3189 *
3190 * For the operation to succeed passed socket must be compatible
3191 * with the packet description provided by the *ctx* object.
3192 *
3193 * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
3194 * be an exact match. While IP family (**AF_INET** or
3195 * **AF_INET6**) must be compatible, that is IPv6 sockets
3196 * that are not v6-only can be selected for IPv4 packets.
3197 *
3198 * Only TCP listeners and UDP unconnected sockets can be
3199 * selected. *sk* can also be NULL to reset any previous
3200 * selection.
3201 *
3202 * *flags* argument can combination of following values:
3203 *
3204 * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous
3205 * socket selection, potentially done by a BPF program
3206 * that ran before us.
3207 *
3208 * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
3209 * load-balancing within reuseport group for the socket
3210 * being selected.
3211 *
3212 * On success *ctx->sk* will point to the selected socket.
3213 *
3214 * Return
3215 * 0 on success, or a negative errno in case of failure.
3216 *
3217 * * **-EAFNOSUPPORT** if socket family (*sk->family*) is
3218 * not compatible with packet family (*ctx->family*).
3219 *
3220 * * **-EEXIST** if socket has been already selected,
3221 * potentially by another program, and
3222 * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
3223 *
3224 * * **-EINVAL** if unsupported flags were specified.
3225 *
3226 * * **-EPROTOTYPE** if socket L4 protocol
3227 * (*sk->protocol*) doesn't match packet protocol
3228 * (*ctx->protocol*).
3229 *
3230 * * **-ESOCKTNOSUPPORT** if socket is not in allowed
3231 * state (TCP listening or UDP unconnected).
3232 *
3233 * u64 bpf_ktime_get_boot_ns(void)
3234 * Description
3235 * Return the time elapsed since system boot, in nanoseconds.
3236 * Does include the time the system was suspended.
3237 * See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
3238 * Return
3239 * Current *ktime*.
3240 *
3241 * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
3242 * Description
3243 * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
3244 * out the format string.
3245 * The *m* represents the seq_file. The *fmt* and *fmt_size* are for
3246 * the format string itself. The *data* and *data_len* are format string
3247 * arguments. The *data* are a **u64** array and corresponding format string
3248 * values are stored in the array. For strings and pointers where pointees
3249 * are accessed, only the pointer values are stored in the *data* array.
3250 * The *data_len* is the size of *data* in bytes.
3251 *
3252 * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
3253 * Reading kernel memory may fail due to either invalid address or
3254 * valid address but requiring a major memory fault. If reading kernel memory
3255 * fails, the string for **%s** will be an empty string, and the ip
3256 * address for **%p{i,I}{4,6}** will be 0. Not returning error to
3257 * bpf program is consistent with what **bpf_trace_printk**\ () does for now.
3258 * Return
3259 * 0 on success, or a negative error in case of failure:
3260 *
3261 * **-EBUSY** if per-CPU memory copy buffer is busy, can try again
3262 * by returning 1 from bpf program.
3263 *
3264 * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
3265 *
3266 * **-E2BIG** if *fmt* contains too many format specifiers.
3267 *
3268 * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
3269 *
3270 * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
3271 * Description
3272 * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
3273 * The *m* represents the seq_file. The *data* and *len* represent the
3274 * data to write in bytes.
3275 * Return
3276 * 0 on success, or a negative error in case of failure:
3277 *
3278 * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
3279 *
3280 * u64 bpf_sk_cgroup_id(void *sk)
3281 * Description
3282 * Return the cgroup v2 id of the socket *sk*.
3283 *
3284 * *sk* must be a non-**NULL** pointer to a socket, e.g. one
3285 * returned from **bpf_sk_lookup_xxx**\ (),
3286 * **bpf_sk_fullsock**\ (), etc. The format of returned id is
3287 * same as in **bpf_skb_cgroup_id**\ ().
3288 *
3289 * This helper is available only if the kernel was compiled with
3290 * the **CONFIG_SOCK_CGROUP_DATA** configuration option.
3291 * Return
3292 * The id is returned or 0 in case the id could not be retrieved.
3293 *
3294 * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
3295 * Description
3296 * Return id of cgroup v2 that is ancestor of cgroup associated
3297 * with the *sk* at the *ancestor_level*. The root cgroup is at
3298 * *ancestor_level* zero and each step down the hierarchy
3299 * increments the level. If *ancestor_level* == level of cgroup
3300 * associated with *sk*, then return value will be same as that
3301 * of **bpf_sk_cgroup_id**\ ().
3302 *
3303 * The helper is useful to implement policies based on cgroups
3304 * that are upper in hierarchy than immediate cgroup associated
3305 * with *sk*.
3306 *
3307 * The format of returned id and helper limitations are same as in
3308 * **bpf_sk_cgroup_id**\ ().
3309 * Return
3310 * The id is returned or 0 in case the id could not be retrieved.
3311 *
3312 * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
3313 * Description
3314 * Copy *size* bytes from *data* into a ring buffer *ringbuf*.
3315 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
3316 * of new data availability is sent.
3317 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
3318 * of new data availability is sent unconditionally.
3319 * Return
3320 * 0 on success, or a negative error in case of failure.
3321 *
3322 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
3323 * Description
3324 * Reserve *size* bytes of payload in a ring buffer *ringbuf*.
3325 * Return
3326 * Valid pointer with *size* bytes of memory available; NULL,
3327 * otherwise.
3328 *
3329 * void bpf_ringbuf_submit(void *data, u64 flags)
3330 * Description
3331 * Submit reserved ring buffer sample, pointed to by *data*.
3332 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
3333 * of new data availability is sent.
3334 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
3335 * of new data availability is sent unconditionally.
3336 * Return
3337 * Nothing. Always succeeds.
3338 *
3339 * void bpf_ringbuf_discard(void *data, u64 flags)
3340 * Description
3341 * Discard reserved ring buffer sample, pointed to by *data*.
3342 * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
3343 * of new data availability is sent.
3344 * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
3345 * of new data availability is sent unconditionally.
3346 * Return
3347 * Nothing. Always succeeds.
3348 *
3349 * u64 bpf_ringbuf_query(void *ringbuf, u64 flags)
3350 * Description
3351 * Query various characteristics of provided ring buffer. What
3352 * exactly is queries is determined by *flags*:
3353 *
3354 * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
3355 * * **BPF_RB_RING_SIZE**: The size of ring buffer.
3356 * * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
3357 * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
3358 *
3359 * Data returned is just a momentary snapshot of actual values
3360 * and could be inaccurate, so this facility should be used to
3361 * power heuristics and for reporting, not to make 100% correct
3362 * calculation.
3363 * Return
3364 * Requested value, or 0, if *flags* are not recognized.
3365 *
3366 * long bpf_csum_level(struct sk_buff *skb, u64 level)
3367 * Description
3368 * Change the skbs checksum level by one layer up or down, or
3369 * reset it entirely to none in order to have the stack perform
3370 * checksum validation. The level is applicable to the following
3371 * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
3372 * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
3373 * through **bpf_skb_adjust_room**\ () helper with passing in
3374 * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call
3375 * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
3376 * the UDP header is removed. Similarly, an encap of the latter
3377 * into the former could be accompanied by a helper call to
3378 * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
3379 * skb is still intended to be processed in higher layers of the
3380 * stack instead of just egressing at tc.
3381 *
3382 * There are three supported level settings at this time:
3383 *
3384 * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
3385 * with CHECKSUM_UNNECESSARY.
3386 * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
3387 * with CHECKSUM_UNNECESSARY.
3388 * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
3389 * sets CHECKSUM_NONE to force checksum validation by the stack.
3390 * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
3391 * skb->csum_level.
3392 * Return
3393 * 0 on success, or a negative error in case of failure. In the
3394 * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
3395 * is returned or the error code -EACCES in case the skb is not
3396 * subject to CHECKSUM_UNNECESSARY.
3397 *
3398 * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
3399 * Description
3400 * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
3401 * Return
3402 * *sk* if casting is valid, or **NULL** otherwise.
3403 *
3404 * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
3405 * Description
3406 * Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
3407 * Return
3408 * *sk* if casting is valid, or **NULL** otherwise.
3409 *
3410 * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
3411 * Description
3412 * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
3413 * Return
3414 * *sk* if casting is valid, or **NULL** otherwise.
3415 *
3416 * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
3417 * Description
3418 * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
3419 * Return
3420 * *sk* if casting is valid, or **NULL** otherwise.
3421 *
3422 * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
3423 * Description
3424 * Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
3425 * Return
3426 * *sk* if casting is valid, or **NULL** otherwise.
3427 *
3428 * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
3429 * Description
3430 * Return a user or a kernel stack in bpf program provided buffer.
3431 * To achieve this, the helper needs *task*, which is a valid
3432 * pointer to **struct task_struct**. To store the stacktrace, the
3433 * bpf program provides *buf* with a nonnegative *size*.
3434 *
3435 * The last argument, *flags*, holds the number of stack frames to
3436 * skip (from 0 to 255), masked with
3437 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
3438 * the following flags:
3439 *
3440 * **BPF_F_USER_STACK**
3441 * Collect a user space stack instead of a kernel stack.
3442 * **BPF_F_USER_BUILD_ID**
3443 * Collect buildid+offset instead of ips for user stack,
3444 * only valid if **BPF_F_USER_STACK** is also specified.
3445 *
3446 * **bpf_get_task_stack**\ () can collect up to
3447 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
3448 * to sufficient large buffer size. Note that
3449 * this limit can be controlled with the **sysctl** program, and
3450 * that it should be manually increased in order to profile long
3451 * user stacks (such as stacks for Java programs). To do so, use:
3452 *
3453 * ::
3454 *
3455 * # sysctl kernel.perf_event_max_stack=<new value>
3456 * Return
3457 * A non-negative value equal to or less than *size* on success,
3458 * or a negative error in case of failure.
3459 *
3460 * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
3461 * Description
3462 * Load header option. Support reading a particular TCP header
3463 * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
3464 *
3465 * If *flags* is 0, it will search the option from the
3466 * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
3467 * has details on what skb_data contains under different
3468 * *skops*\ **->op**.
3469 *
3470 * The first byte of the *searchby_res* specifies the
3471 * kind that it wants to search.
3472 *
3473 * If the searching kind is an experimental kind
3474 * (i.e. 253 or 254 according to RFC6994). It also
3475 * needs to specify the "magic" which is either
3476 * 2 bytes or 4 bytes. It then also needs to
3477 * specify the size of the magic by using
3478 * the 2nd byte which is "kind-length" of a TCP
3479 * header option and the "kind-length" also
3480 * includes the first 2 bytes "kind" and "kind-length"
3481 * itself as a normal TCP header option also does.
3482 *
3483 * For example, to search experimental kind 254 with
3484 * 2 byte magic 0xeB9F, the searchby_res should be
3485 * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
3486 *
3487 * To search for the standard window scale option (3),
3488 * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
3489 * Note, kind-length must be 0 for regular option.
3490 *
3491 * Searching for No-Op (0) and End-of-Option-List (1) are
3492 * not supported.
3493 *
3494 * *len* must be at least 2 bytes which is the minimal size
3495 * of a header option.
3496 *
3497 * Supported flags:
3498 *
3499 * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
3500 * saved_syn packet or the just-received syn packet.
3501 *
3502 * Return
3503 * > 0 when found, the header option is copied to *searchby_res*.
3504 * The return value is the total length copied. On failure, a
3505 * negative error code is returned:
3506 *
3507 * **-EINVAL** if a parameter is invalid.
3508 *
3509 * **-ENOMSG** if the option is not found.
3510 *
3511 * **-ENOENT** if no syn packet is available when
3512 * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
3513 *
3514 * **-ENOSPC** if there is not enough space. Only *len* number of
3515 * bytes are copied.
3516 *
3517 * **-EFAULT** on failure to parse the header options in the
3518 * packet.
3519 *
3520 * **-EPERM** if the helper cannot be used under the current
3521 * *skops*\ **->op**.
3522 *
3523 * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
3524 * Description
3525 * Store header option. The data will be copied
3526 * from buffer *from* with length *len* to the TCP header.
3527 *
3528 * The buffer *from* should have the whole option that
3529 * includes the kind, kind-length, and the actual
3530 * option data. The *len* must be at least kind-length
3531 * long. The kind-length does not have to be 4 byte
3532 * aligned. The kernel will take care of the padding
3533 * and setting the 4 bytes aligned value to th->doff.
3534 *
3535 * This helper will check for duplicated option
3536 * by searching the same option in the outgoing skb.
3537 *
3538 * This helper can only be called during
3539 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
3540 *
3541 * Return
3542 * 0 on success, or negative error in case of failure:
3543 *
3544 * **-EINVAL** If param is invalid.
3545 *
3546 * **-ENOSPC** if there is not enough space in the header.
3547 * Nothing has been written
3548 *
3549 * **-EEXIST** if the option already exists.
3550 *
3551 * **-EFAULT** on failrue to parse the existing header options.
3552 *
3553 * **-EPERM** if the helper cannot be used under the current
3554 * *skops*\ **->op**.
3555 *
3556 * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
3557 * Description
3558 * Reserve *len* bytes for the bpf header option. The
3559 * space will be used by **bpf_store_hdr_opt**\ () later in
3560 * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
3561 *
3562 * If **bpf_reserve_hdr_opt**\ () is called multiple times,
3563 * the total number of bytes will be reserved.
3564 *
3565 * This helper can only be called during
3566 * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
3567 *
3568 * Return
3569 * 0 on success, or negative error in case of failure:
3570 *
3571 * **-EINVAL** if a parameter is invalid.
3572 *
3573 * **-ENOSPC** if there is not enough space in the header.
3574 *
3575 * **-EPERM** if the helper cannot be used under the current
3576 * *skops*\ **->op**.
3577 *
3578 * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
3579 * Description
3580 * Get a bpf_local_storage from an *inode*.
3581 *
3582 * Logically, it could be thought of as getting the value from
3583 * a *map* with *inode* as the **key**. From this
3584 * perspective, the usage is not much different from
3585 * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
3586 * helper enforces the key must be an inode and the map must also
3587 * be a **BPF_MAP_TYPE_INODE_STORAGE**.
3588 *
3589 * Underneath, the value is stored locally at *inode* instead of
3590 * the *map*. The *map* is used as the bpf-local-storage
3591 * "type". The bpf-local-storage "type" (i.e. the *map*) is
3592 * searched against all bpf_local_storage residing at *inode*.
3593 *
3594 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
3595 * used such that a new bpf_local_storage will be
3596 * created if one does not exist. *value* can be used
3597 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
3598 * the initial value of a bpf_local_storage. If *value* is
3599 * **NULL**, the new bpf_local_storage will be zero initialized.
3600 * Return
3601 * A bpf_local_storage pointer is returned on success.
3602 *
3603 * **NULL** if not found or there was an error in adding
3604 * a new bpf_local_storage.
3605 *
3606 * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
3607 * Description
3608 * Delete a bpf_local_storage from an *inode*.
3609 * Return
3610 * 0 on success.
3611 *
3612 * **-ENOENT** if the bpf_local_storage cannot be found.
3613 *
3614 * long bpf_d_path(struct path *path, char *buf, u32 sz)
3615 * Description
3616 * Return full path for given **struct path** object, which
3617 * needs to be the kernel BTF *path* object. The path is
3618 * returned in the provided buffer *buf* of size *sz* and
3619 * is zero terminated.
3620 *
3621 * Return
3622 * On success, the strictly positive length of the string,
3623 * including the trailing NUL character. On error, a negative
3624 * value.
3625 *
3626 * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
3627 * Description
3628 * Read *size* bytes from user space address *user_ptr* and store
3629 * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
3630 * Return
3631 * 0 on success, or a negative error in case of failure.
3632 *
3633 * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
3634 * Description
3635 * Use BTF to store a string representation of *ptr*->ptr in *str*,
3636 * using *ptr*->type_id. This value should specify the type
3637 * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
3638 * can be used to look up vmlinux BTF type ids. Traversing the
3639 * data structure using BTF, the type information and values are
3640 * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
3641 * the pointer data is carried out to avoid kernel crashes during
3642 * operation. Smaller types can use string space on the stack;
3643 * larger programs can use map data to store the string
3644 * representation.
3645 *
3646 * The string can be subsequently shared with userspace via
3647 * bpf_perf_event_output() or ring buffer interfaces.
3648 * bpf_trace_printk() is to be avoided as it places too small
3649 * a limit on string size to be useful.
3650 *
3651 * *flags* is a combination of
3652 *
3653 * **BTF_F_COMPACT**
3654 * no formatting around type information
3655 * **BTF_F_NONAME**
3656 * no struct/union member names/types
3657 * **BTF_F_PTR_RAW**
3658 * show raw (unobfuscated) pointer values;
3659 * equivalent to printk specifier %px.
3660 * **BTF_F_ZERO**
3661 * show zero-valued struct/union members; they
3662 * are not displayed by default
3663 *
3664 * Return
3665 * The number of bytes that were written (or would have been
3666 * written if output had to be truncated due to string size),
3667 * or a negative error in cases of failure.
3668 *
3669 * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
3670 * Description
3671 * Use BTF to write to seq_write a string representation of
3672 * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
3673 * *flags* are identical to those used for bpf_snprintf_btf.
3674 * Return
3675 * 0 on success or a negative error in case of failure.
3676 *
3677 * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
3678 * Description
3679 * See **bpf_get_cgroup_classid**\ () for the main description.
3680 * This helper differs from **bpf_get_cgroup_classid**\ () in that
3681 * the cgroup v1 net_cls class is retrieved only from the *skb*'s
3682 * associated socket instead of the current process.
3683 * Return
3684 * The id is returned or 0 in case the id could not be retrieved.
3685 *
3686 * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags)
3687 * Description
3688 * Redirect the packet to another net device of index *ifindex*
3689 * and fill in L2 addresses from neighboring subsystem. This helper
3690 * is somewhat similar to **bpf_redirect**\ (), except that it
3691 * populates L2 addresses as well, meaning, internally, the helper
3692 * relies on the neighbor lookup for the L2 address of the nexthop.
3693 *
3694 * The helper will perform a FIB lookup based on the skb's
3695 * networking header to get the address of the next hop, unless
3696 * this is supplied by the caller in the *params* argument. The
3697 * *plen* argument indicates the len of *params* and should be set
3698 * to 0 if *params* is NULL.
3699 *
3700 * The *flags* argument is reserved and must be 0. The helper is
3701 * currently only supported for tc BPF program types, and enabled
3702 * for IPv4 and IPv6 protocols.
3703 * Return
3704 * The helper returns **TC_ACT_REDIRECT** on success or
3705 * **TC_ACT_SHOT** on error.
3706 *
3707 * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
3708 * Description
3709 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
3710 * pointer to the percpu kernel variable on *cpu*. A ksym is an
3711 * extern variable decorated with '__ksym'. For ksym, there is a
3712 * global var (either static or global) defined of the same name
3713 * in the kernel. The ksym is percpu if the global var is percpu.
3714 * The returned pointer points to the global percpu var on *cpu*.
3715 *
3716 * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
3717 * kernel, except that bpf_per_cpu_ptr() may return NULL. This
3718 * happens if *cpu* is larger than nr_cpu_ids. The caller of
3719 * bpf_per_cpu_ptr() must check the returned value.
3720 * Return
3721 * A pointer pointing to the kernel percpu variable on *cpu*, or
3722 * NULL, if *cpu* is invalid.
3723 *
3724 * void *bpf_this_cpu_ptr(const void *percpu_ptr)
3725 * Description
3726 * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
3727 * pointer to the percpu kernel variable on this cpu. See the
3728 * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
3729 *
3730 * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
3731 * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
3732 * never return NULL.
3733 * Return
3734 * A pointer pointing to the kernel percpu variable on this cpu.
3735 *
3736 * long bpf_redirect_peer(u32 ifindex, u64 flags)
3737 * Description
3738 * Redirect the packet to another net device of index *ifindex*.
3739 * This helper is somewhat similar to **bpf_redirect**\ (), except
3740 * that the redirection happens to the *ifindex*' peer device and
3741 * the netns switch takes place from ingress to ingress without
3742 * going through the CPU's backlog queue.
3743 *
3744 * The *flags* argument is reserved and must be 0. The helper is
3745 * currently only supported for tc BPF program types at the ingress
3746 * hook and for veth device types. The peer device must reside in a
3747 * different network namespace.
3748 * Return
3749 * The helper returns **TC_ACT_REDIRECT** on success or
3750 * **TC_ACT_SHOT** on error.
3751 *
3752 * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
3753 * Description
3754 * Get a bpf_local_storage from the *task*.
3755 *
3756 * Logically, it could be thought of as getting the value from
3757 * a *map* with *task* as the **key**. From this
3758 * perspective, the usage is not much different from
3759 * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
3760 * helper enforces the key must be an task_struct and the map must also
3761 * be a **BPF_MAP_TYPE_TASK_STORAGE**.
3762 *
3763 * Underneath, the value is stored locally at *task* instead of
3764 * the *map*. The *map* is used as the bpf-local-storage
3765 * "type". The bpf-local-storage "type" (i.e. the *map*) is
3766 * searched against all bpf_local_storage residing at *task*.
3767 *
3768 * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
3769 * used such that a new bpf_local_storage will be
3770 * created if one does not exist. *value* can be used
3771 * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
3772 * the initial value of a bpf_local_storage. If *value* is
3773 * **NULL**, the new bpf_local_storage will be zero initialized.
3774 * Return
3775 * A bpf_local_storage pointer is returned on success.
3776 *
3777 * **NULL** if not found or there was an error in adding
3778 * a new bpf_local_storage.
3779 *
3780 * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
3781 * Description
3782 * Delete a bpf_local_storage from a *task*.
3783 * Return
3784 * 0 on success.
3785 *
3786 * **-ENOENT** if the bpf_local_storage cannot be found.
3787 *
3788 * struct task_struct *bpf_get_current_task_btf(void)
3789 * Description
3790 * Return a BTF pointer to the "current" task.
3791 * This pointer can also be used in helpers that accept an
3792 * *ARG_PTR_TO_BTF_ID* of type *task_struct*.
3793 * Return
3794 * Pointer to the current task.
3795 *
3796 * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags)
3797 * Description
3798 * Set or clear certain options on *bprm*:
3799 *
3800 * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
3801 * which sets the **AT_SECURE** auxv for glibc. The bit
3802 * is cleared if the flag is not specified.
3803 * Return
3804 * **-EINVAL** if invalid *flags* are passed, zero otherwise.
3805 *
3806 * u64 bpf_ktime_get_coarse_ns(void)
3807 * Description
3808 * Return a coarse-grained version of the time elapsed since
3809 * system boot, in nanoseconds. Does not include time the system
3810 * was suspended.
3811 *
3812 * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
3813 * Return
3814 * Current *ktime*.
3815 *
3816 * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
3817 * Description
3818 * Returns the stored IMA hash of the *inode* (if it's avaialable).
3819 * If the hash is larger than *size*, then only *size*
3820 * bytes will be copied to *dst*
3821 * Return
3822 * The **hash_algo** is returned on success,
3823 * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
3824 * invalid arguments are passed.
3825 *
3826 * struct socket *bpf_sock_from_file(struct file *file)
3827 * Description
3828 * If the given file represents a socket, returns the associated
3829 * socket.
3830 * Return
3831 * A pointer to a struct socket on success or NULL if the file is
3832 * not a socket.
3833 */
3834 #define __BPF_FUNC_MAPPER(FN) \
3835 FN(unspec), \
3836 FN(map_lookup_elem), \
3837 FN(map_update_elem), \
3838 FN(map_delete_elem), \
3839 FN(probe_read), \
3840 FN(ktime_get_ns), \
3841 FN(trace_printk), \
3842 FN(get_prandom_u32), \
3843 FN(get_smp_processor_id), \
3844 FN(skb_store_bytes), \
3845 FN(l3_csum_replace), \
3846 FN(l4_csum_replace), \
3847 FN(tail_call), \
3848 FN(clone_redirect), \
3849 FN(get_current_pid_tgid), \
3850 FN(get_current_uid_gid), \
3851 FN(get_current_comm), \
3852 FN(get_cgroup_classid), \
3853 FN(skb_vlan_push), \
3854 FN(skb_vlan_pop), \
3855 FN(skb_get_tunnel_key), \
3856 FN(skb_set_tunnel_key), \
3857 FN(perf_event_read), \
3858 FN(redirect), \
3859 FN(get_route_realm), \
3860 FN(perf_event_output), \
3861 FN(skb_load_bytes), \
3862 FN(get_stackid), \
3863 FN(csum_diff), \
3864 FN(skb_get_tunnel_opt), \
3865 FN(skb_set_tunnel_opt), \
3866 FN(skb_change_proto), \
3867 FN(skb_change_type), \
3868 FN(skb_under_cgroup), \
3869 FN(get_hash_recalc), \
3870 FN(get_current_task), \
3871 FN(probe_write_user), \
3872 FN(current_task_under_cgroup), \
3873 FN(skb_change_tail), \
3874 FN(skb_pull_data), \
3875 FN(csum_update), \
3876 FN(set_hash_invalid), \
3877 FN(get_numa_node_id), \
3878 FN(skb_change_head), \
3879 FN(xdp_adjust_head), \
3880 FN(probe_read_str), \
3881 FN(get_socket_cookie), \
3882 FN(get_socket_uid), \
3883 FN(set_hash), \
3884 FN(setsockopt), \
3885 FN(skb_adjust_room), \
3886 FN(redirect_map), \
3887 FN(sk_redirect_map), \
3888 FN(sock_map_update), \
3889 FN(xdp_adjust_meta), \
3890 FN(perf_event_read_value), \
3891 FN(perf_prog_read_value), \
3892 FN(getsockopt), \
3893 FN(override_return), \
3894 FN(sock_ops_cb_flags_set), \
3895 FN(msg_redirect_map), \
3896 FN(msg_apply_bytes), \
3897 FN(msg_cork_bytes), \
3898 FN(msg_pull_data), \
3899 FN(bind), \
3900 FN(xdp_adjust_tail), \
3901 FN(skb_get_xfrm_state), \
3902 FN(get_stack), \
3903 FN(skb_load_bytes_relative), \
3904 FN(fib_lookup), \
3905 FN(sock_hash_update), \
3906 FN(msg_redirect_hash), \
3907 FN(sk_redirect_hash), \
3908 FN(lwt_push_encap), \
3909 FN(lwt_seg6_store_bytes), \
3910 FN(lwt_seg6_adjust_srh), \
3911 FN(lwt_seg6_action), \
3912 FN(rc_repeat), \
3913 FN(rc_keydown), \
3914 FN(skb_cgroup_id), \
3915 FN(get_current_cgroup_id), \
3916 FN(get_local_storage), \
3917 FN(sk_select_reuseport), \
3918 FN(skb_ancestor_cgroup_id), \
3919 FN(sk_lookup_tcp), \
3920 FN(sk_lookup_udp), \
3921 FN(sk_release), \
3922 FN(map_push_elem), \
3923 FN(map_pop_elem), \
3924 FN(map_peek_elem), \
3925 FN(msg_push_data), \
3926 FN(msg_pop_data), \
3927 FN(rc_pointer_rel), \
3928 FN(spin_lock), \
3929 FN(spin_unlock), \
3930 FN(sk_fullsock), \
3931 FN(tcp_sock), \
3932 FN(skb_ecn_set_ce), \
3933 FN(get_listener_sock), \
3934 FN(skc_lookup_tcp), \
3935 FN(tcp_check_syncookie), \
3936 FN(sysctl_get_name), \
3937 FN(sysctl_get_current_value), \
3938 FN(sysctl_get_new_value), \
3939 FN(sysctl_set_new_value), \
3940 FN(strtol), \
3941 FN(strtoul), \
3942 FN(sk_storage_get), \
3943 FN(sk_storage_delete), \
3944 FN(send_signal), \
3945 FN(tcp_gen_syncookie), \
3946 FN(skb_output), \
3947 FN(probe_read_user), \
3948 FN(probe_read_kernel), \
3949 FN(probe_read_user_str), \
3950 FN(probe_read_kernel_str), \
3951 FN(tcp_send_ack), \
3952 FN(send_signal_thread), \
3953 FN(jiffies64), \
3954 FN(read_branch_records), \
3955 FN(get_ns_current_pid_tgid), \
3956 FN(xdp_output), \
3957 FN(get_netns_cookie), \
3958 FN(get_current_ancestor_cgroup_id), \
3959 FN(sk_assign), \
3960 FN(ktime_get_boot_ns), \
3961 FN(seq_printf), \
3962 FN(seq_write), \
3963 FN(sk_cgroup_id), \
3964 FN(sk_ancestor_cgroup_id), \
3965 FN(ringbuf_output), \
3966 FN(ringbuf_reserve), \
3967 FN(ringbuf_submit), \
3968 FN(ringbuf_discard), \
3969 FN(ringbuf_query), \
3970 FN(csum_level), \
3971 FN(skc_to_tcp6_sock), \
3972 FN(skc_to_tcp_sock), \
3973 FN(skc_to_tcp_timewait_sock), \
3974 FN(skc_to_tcp_request_sock), \
3975 FN(skc_to_udp6_sock), \
3976 FN(get_task_stack), \
3977 FN(load_hdr_opt), \
3978 FN(store_hdr_opt), \
3979 FN(reserve_hdr_opt), \
3980 FN(inode_storage_get), \
3981 FN(inode_storage_delete), \
3982 FN(d_path), \
3983 FN(copy_from_user), \
3984 FN(snprintf_btf), \
3985 FN(seq_printf_btf), \
3986 FN(skb_cgroup_classid), \
3987 FN(redirect_neigh), \
3988 FN(per_cpu_ptr), \
3989 FN(this_cpu_ptr), \
3990 FN(redirect_peer), \
3991 FN(task_storage_get), \
3992 FN(task_storage_delete), \
3993 FN(get_current_task_btf), \
3994 FN(bprm_opts_set), \
3995 FN(ktime_get_coarse_ns), \
3996 FN(ima_inode_hash), \
3997 FN(sock_from_file), \
3998 /* */
3999
4000 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
4001 * function eBPF program intends to call
4002 */
4003 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
4004 enum bpf_func_id {
4005 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
4006 __BPF_FUNC_MAX_ID,
4007 };
4008 #undef __BPF_ENUM_FN
4009
4010 /* All flags used by eBPF helper functions, placed here. */
4011
4012 /* BPF_FUNC_skb_store_bytes flags. */
4013 enum {
4014 BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
4015 BPF_F_INVALIDATE_HASH = (1ULL << 1),
4016 };
4017
4018 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
4019 * First 4 bits are for passing the header field size.
4020 */
4021 enum {
4022 BPF_F_HDR_FIELD_MASK = 0xfULL,
4023 };
4024
4025 /* BPF_FUNC_l4_csum_replace flags. */
4026 enum {
4027 BPF_F_PSEUDO_HDR = (1ULL << 4),
4028 BPF_F_MARK_MANGLED_0 = (1ULL << 5),
4029 BPF_F_MARK_ENFORCE = (1ULL << 6),
4030 };
4031
4032 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
4033 enum {
4034 BPF_F_INGRESS = (1ULL << 0),
4035 };
4036
4037 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
4038 enum {
4039 BPF_F_TUNINFO_IPV6 = (1ULL << 0),
4040 };
4041
4042 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
4043 enum {
4044 BPF_F_SKIP_FIELD_MASK = 0xffULL,
4045 BPF_F_USER_STACK = (1ULL << 8),
4046 /* flags used by BPF_FUNC_get_stackid only. */
4047 BPF_F_FAST_STACK_CMP = (1ULL << 9),
4048 BPF_F_REUSE_STACKID = (1ULL << 10),
4049 /* flags used by BPF_FUNC_get_stack only. */
4050 BPF_F_USER_BUILD_ID = (1ULL << 11),
4051 };
4052
4053 /* BPF_FUNC_skb_set_tunnel_key flags. */
4054 enum {
4055 BPF_F_ZERO_CSUM_TX = (1ULL << 1),
4056 BPF_F_DONT_FRAGMENT = (1ULL << 2),
4057 BPF_F_SEQ_NUMBER = (1ULL << 3),
4058 };
4059
4060 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
4061 * BPF_FUNC_perf_event_read_value flags.
4062 */
4063 enum {
4064 BPF_F_INDEX_MASK = 0xffffffffULL,
4065 BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
4066 /* BPF_FUNC_perf_event_output for sk_buff input context. */
4067 BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
4068 };
4069
4070 /* Current network namespace */
4071 enum {
4072 BPF_F_CURRENT_NETNS = (-1L),
4073 };
4074
4075 /* BPF_FUNC_csum_level level values. */
4076 enum {
4077 BPF_CSUM_LEVEL_QUERY,
4078 BPF_CSUM_LEVEL_INC,
4079 BPF_CSUM_LEVEL_DEC,
4080 BPF_CSUM_LEVEL_RESET,
4081 };
4082
4083 /* BPF_FUNC_skb_adjust_room flags. */
4084 enum {
4085 BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
4086 BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
4087 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
4088 BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
4089 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
4090 BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
4091 };
4092
4093 enum {
4094 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
4095 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
4096 };
4097
4098 #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
4099 BPF_ADJ_ROOM_ENCAP_L2_MASK) \
4100 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
4101
4102 /* BPF_FUNC_sysctl_get_name flags. */
4103 enum {
4104 BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
4105 };
4106
4107 /* BPF_FUNC_<kernel_obj>_storage_get flags */
4108 enum {
4109 BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),
4110 /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
4111 * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
4112 */
4113 BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
4114 };
4115
4116 /* BPF_FUNC_read_branch_records flags. */
4117 enum {
4118 BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
4119 };
4120
4121 /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and
4122 * BPF_FUNC_bpf_ringbuf_output flags.
4123 */
4124 enum {
4125 BPF_RB_NO_WAKEUP = (1ULL << 0),
4126 BPF_RB_FORCE_WAKEUP = (1ULL << 1),
4127 };
4128
4129 /* BPF_FUNC_bpf_ringbuf_query flags */
4130 enum {
4131 BPF_RB_AVAIL_DATA = 0,
4132 BPF_RB_RING_SIZE = 1,
4133 BPF_RB_CONS_POS = 2,
4134 BPF_RB_PROD_POS = 3,
4135 };
4136
4137 /* BPF ring buffer constants */
4138 enum {
4139 BPF_RINGBUF_BUSY_BIT = (1U << 31),
4140 BPF_RINGBUF_DISCARD_BIT = (1U << 30),
4141 BPF_RINGBUF_HDR_SZ = 8,
4142 };
4143
4144 /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
4145 enum {
4146 BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0),
4147 BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1),
4148 };
4149
4150 /* Mode for BPF_FUNC_skb_adjust_room helper. */
4151 enum bpf_adj_room_mode {
4152 BPF_ADJ_ROOM_NET,
4153 BPF_ADJ_ROOM_MAC,
4154 };
4155
4156 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
4157 enum bpf_hdr_start_off {
4158 BPF_HDR_START_MAC,
4159 BPF_HDR_START_NET,
4160 };
4161
4162 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
4163 enum bpf_lwt_encap_mode {
4164 BPF_LWT_ENCAP_SEG6,
4165 BPF_LWT_ENCAP_SEG6_INLINE,
4166 BPF_LWT_ENCAP_IP,
4167 };
4168
4169 /* Flags for bpf_bprm_opts_set helper */
4170 enum {
4171 BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
4172 };
4173
4174 #define __bpf_md_ptr(type, name) \
4175 union { \
4176 type name; \
4177 __u64 :64; \
4178 } __attribute__((aligned(8)))
4179
4180 /* user accessible mirror of in-kernel sk_buff.
4181 * new fields can only be added to the end of this structure
4182 */
4183 struct __sk_buff {
4184 __u32 len;
4185 __u32 pkt_type;
4186 __u32 mark;
4187 __u32 queue_mapping;
4188 __u32 protocol;
4189 __u32 vlan_present;
4190 __u32 vlan_tci;
4191 __u32 vlan_proto;
4192 __u32 priority;
4193 __u32 ingress_ifindex;
4194 __u32 ifindex;
4195 __u32 tc_index;
4196 __u32 cb[5];
4197 __u32 hash;
4198 __u32 tc_classid;
4199 __u32 data;
4200 __u32 data_end;
4201 __u32 napi_id;
4202
4203 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
4204 __u32 family;
4205 __u32 remote_ip4; /* Stored in network byte order */
4206 __u32 local_ip4; /* Stored in network byte order */
4207 __u32 remote_ip6[4]; /* Stored in network byte order */
4208 __u32 local_ip6[4]; /* Stored in network byte order */
4209 __u32 remote_port; /* Stored in network byte order */
4210 __u32 local_port; /* stored in host byte order */
4211 /* ... here. */
4212
4213 __u32 data_meta;
4214 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
4215 __u64 tstamp;
4216 __u32 wire_len;
4217 __u32 gso_segs;
4218 __bpf_md_ptr(struct bpf_sock *, sk);
4219 __u32 gso_size;
4220 };
4221
4222 struct bpf_tunnel_key {
4223 __u32 tunnel_id;
4224 union {
4225 __u32 remote_ipv4;
4226 __u32 remote_ipv6[4];
4227 };
4228 __u8 tunnel_tos;
4229 __u8 tunnel_ttl;
4230 __u16 tunnel_ext; /* Padding, future use. */
4231 __u32 tunnel_label;
4232 };
4233
4234 /* user accessible mirror of in-kernel xfrm_state.
4235 * new fields can only be added to the end of this structure
4236 */
4237 struct bpf_xfrm_state {
4238 __u32 reqid;
4239 __u32 spi; /* Stored in network byte order */
4240 __u16 family;
4241 __u16 ext; /* Padding, future use. */
4242 union {
4243 __u32 remote_ipv4; /* Stored in network byte order */
4244 __u32 remote_ipv6[4]; /* Stored in network byte order */
4245 };
4246 };
4247
4248 /* Generic BPF return codes which all BPF program types may support.
4249 * The values are binary compatible with their TC_ACT_* counter-part to
4250 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
4251 * programs.
4252 *
4253 * XDP is handled seprately, see XDP_*.
4254 */
4255 enum bpf_ret_code {
4256 BPF_OK = 0,
4257 /* 1 reserved */
4258 BPF_DROP = 2,
4259 /* 3-6 reserved */
4260 BPF_REDIRECT = 7,
4261 /* >127 are reserved for prog type specific return codes.
4262 *
4263 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
4264 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
4265 * changed and should be routed based on its new L3 header.
4266 * (This is an L3 redirect, as opposed to L2 redirect
4267 * represented by BPF_REDIRECT above).
4268 */
4269 BPF_LWT_REROUTE = 128,
4270 };
4271
4272 struct bpf_sock {
4273 __u32 bound_dev_if;
4274 __u32 family;
4275 __u32 type;
4276 __u32 protocol;
4277 __u32 mark;
4278 __u32 priority;
4279 /* IP address also allows 1 and 2 bytes access */
4280 __u32 src_ip4;
4281 __u32 src_ip6[4];
4282 __u32 src_port; /* host byte order */
4283 __u32 dst_port; /* network byte order */
4284 __u32 dst_ip4;
4285 __u32 dst_ip6[4];
4286 __u32 state;
4287 __s32 rx_queue_mapping;
4288 };
4289
4290 struct bpf_tcp_sock {
4291 __u32 snd_cwnd; /* Sending congestion window */
4292 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
4293 __u32 rtt_min;
4294 __u32 snd_ssthresh; /* Slow start size threshold */
4295 __u32 rcv_nxt; /* What we want to receive next */
4296 __u32 snd_nxt; /* Next sequence we send */
4297 __u32 snd_una; /* First byte we want an ack for */
4298 __u32 mss_cache; /* Cached effective mss, not including SACKS */
4299 __u32 ecn_flags; /* ECN status bits. */
4300 __u32 rate_delivered; /* saved rate sample: packets delivered */
4301 __u32 rate_interval_us; /* saved rate sample: time elapsed */
4302 __u32 packets_out; /* Packets which are "in flight" */
4303 __u32 retrans_out; /* Retransmitted packets out */
4304 __u32 total_retrans; /* Total retransmits for entire connection */
4305 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
4306 * total number of segments in.
4307 */
4308 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
4309 * total number of data segments in.
4310 */
4311 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
4312 * The total number of segments sent.
4313 */
4314 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
4315 * total number of data segments sent.
4316 */
4317 __u32 lost_out; /* Lost packets */
4318 __u32 sacked_out; /* SACK'd packets */
4319 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
4320 * sum(delta(rcv_nxt)), or how many bytes
4321 * were acked.
4322 */
4323 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
4324 * sum(delta(snd_una)), or how many bytes
4325 * were acked.
4326 */
4327 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
4328 * total number of DSACK blocks received
4329 */
4330 __u32 delivered; /* Total data packets delivered incl. rexmits */
4331 __u32 delivered_ce; /* Like the above but only ECE marked packets */
4332 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */
4333 };
4334
4335 struct bpf_sock_tuple {
4336 union {
4337 struct {
4338 __be32 saddr;
4339 __be32 daddr;
4340 __be16 sport;
4341 __be16 dport;
4342 } ipv4;
4343 struct {
4344 __be32 saddr[4];
4345 __be32 daddr[4];
4346 __be16 sport;
4347 __be16 dport;
4348 } ipv6;
4349 };
4350 };
4351
4352 struct bpf_xdp_sock {
4353 __u32 queue_id;
4354 };
4355
4356 #define XDP_PACKET_HEADROOM 256
4357
4358 /* User return codes for XDP prog type.
4359 * A valid XDP program must return one of these defined values. All other
4360 * return codes are reserved for future use. Unknown return codes will
4361 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
4362 */
4363 enum xdp_action {
4364 XDP_ABORTED = 0,
4365 XDP_DROP,
4366 XDP_PASS,
4367 XDP_TX,
4368 XDP_REDIRECT,
4369 };
4370
4371 /* user accessible metadata for XDP packet hook
4372 * new fields must be added to the end of this structure
4373 */
4374 struct xdp_md {
4375 __u32 data;
4376 __u32 data_end;
4377 __u32 data_meta;
4378 /* Below access go through struct xdp_rxq_info */
4379 __u32 ingress_ifindex; /* rxq->dev->ifindex */
4380 __u32 rx_queue_index; /* rxq->queue_index */
4381
4382 __u32 egress_ifindex; /* txq->dev->ifindex */
4383 };
4384
4385 /* DEVMAP map-value layout
4386 *
4387 * The struct data-layout of map-value is a configuration interface.
4388 * New members can only be added to the end of this structure.
4389 */
4390 struct bpf_devmap_val {
4391 __u32 ifindex; /* device index */
4392 union {
4393 int fd; /* prog fd on map write */
4394 __u32 id; /* prog id on map read */
4395 } bpf_prog;
4396 };
4397
4398 /* CPUMAP map-value layout
4399 *
4400 * The struct data-layout of map-value is a configuration interface.
4401 * New members can only be added to the end of this structure.
4402 */
4403 struct bpf_cpumap_val {
4404 __u32 qsize; /* queue size to remote target CPU */
4405 union {
4406 int fd; /* prog fd on map write */
4407 __u32 id; /* prog id on map read */
4408 } bpf_prog;
4409 };
4410
4411 enum sk_action {
4412 SK_DROP = 0,
4413 SK_PASS,
4414 };
4415
4416 /* user accessible metadata for SK_MSG packet hook, new fields must
4417 * be added to the end of this structure
4418 */
4419 struct sk_msg_md {
4420 __bpf_md_ptr(void *, data);
4421 __bpf_md_ptr(void *, data_end);
4422
4423 __u32 family;
4424 __u32 remote_ip4; /* Stored in network byte order */
4425 __u32 local_ip4; /* Stored in network byte order */
4426 __u32 remote_ip6[4]; /* Stored in network byte order */
4427 __u32 local_ip6[4]; /* Stored in network byte order */
4428 __u32 remote_port; /* Stored in network byte order */
4429 __u32 local_port; /* stored in host byte order */
4430 __u32 size; /* Total size of sk_msg */
4431
4432 __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */
4433 };
4434
4435 struct sk_reuseport_md {
4436 /*
4437 * Start of directly accessible data. It begins from
4438 * the tcp/udp header.
4439 */
4440 __bpf_md_ptr(void *, data);
4441 /* End of directly accessible data */
4442 __bpf_md_ptr(void *, data_end);
4443 /*
4444 * Total length of packet (starting from the tcp/udp header).
4445 * Note that the directly accessible bytes (data_end - data)
4446 * could be less than this "len". Those bytes could be
4447 * indirectly read by a helper "bpf_skb_load_bytes()".
4448 */
4449 __u32 len;
4450 /*
4451 * Eth protocol in the mac header (network byte order). e.g.
4452 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
4453 */
4454 __u32 eth_protocol;
4455 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
4456 __u32 bind_inany; /* Is sock bound to an INANY address? */
4457 __u32 hash; /* A hash of the packet 4 tuples */
4458 };
4459
4460 #define BPF_TAG_SIZE 8
4461
4462 struct bpf_prog_info {
4463 __u32 type;
4464 __u32 id;
4465 __u8 tag[BPF_TAG_SIZE];
4466 __u32 jited_prog_len;
4467 __u32 xlated_prog_len;
4468 __aligned_u64 jited_prog_insns;
4469 __aligned_u64 xlated_prog_insns;
4470 __u64 load_time; /* ns since boottime */
4471 __u32 created_by_uid;
4472 __u32 nr_map_ids;
4473 __aligned_u64 map_ids;
4474 char name[BPF_OBJ_NAME_LEN];
4475 __u32 ifindex;
4476 __u32 gpl_compatible:1;
4477 __u32 :31; /* alignment pad */
4478 __u64 netns_dev;
4479 __u64 netns_ino;
4480 __u32 nr_jited_ksyms;
4481 __u32 nr_jited_func_lens;
4482 __aligned_u64 jited_ksyms;
4483 __aligned_u64 jited_func_lens;
4484 __u32 btf_id;
4485 __u32 func_info_rec_size;
4486 __aligned_u64 func_info;
4487 __u32 nr_func_info;
4488 __u32 nr_line_info;
4489 __aligned_u64 line_info;
4490 __aligned_u64 jited_line_info;
4491 __u32 nr_jited_line_info;
4492 __u32 line_info_rec_size;
4493 __u32 jited_line_info_rec_size;
4494 __u32 nr_prog_tags;
4495 __aligned_u64 prog_tags;
4496 __u64 run_time_ns;
4497 __u64 run_cnt;
4498 } __attribute__((aligned(8)));
4499
4500 struct bpf_map_info {
4501 __u32 type;
4502 __u32 id;
4503 __u32 key_size;
4504 __u32 value_size;
4505 __u32 max_entries;
4506 __u32 map_flags;
4507 char name[BPF_OBJ_NAME_LEN];
4508 __u32 ifindex;
4509 __u32 btf_vmlinux_value_type_id;
4510 __u64 netns_dev;
4511 __u64 netns_ino;
4512 __u32 btf_id;
4513 __u32 btf_key_type_id;
4514 __u32 btf_value_type_id;
4515 } __attribute__((aligned(8)));
4516
4517 struct bpf_btf_info {
4518 __aligned_u64 btf;
4519 __u32 btf_size;
4520 __u32 id;
4521 __aligned_u64 name;
4522 __u32 name_len;
4523 __u32 kernel_btf;
4524 } __attribute__((aligned(8)));
4525
4526 struct bpf_link_info {
4527 __u32 type;
4528 __u32 id;
4529 __u32 prog_id;
4530 union {
4531 struct {
4532 __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
4533 __u32 tp_name_len; /* in/out: tp_name buffer len */
4534 } raw_tracepoint;
4535 struct {
4536 __u32 attach_type;
4537 } tracing;
4538 struct {
4539 __u64 cgroup_id;
4540 __u32 attach_type;
4541 } cgroup;
4542 struct {
4543 __aligned_u64 target_name; /* in/out: target_name buffer ptr */
4544 __u32 target_name_len; /* in/out: target_name buffer len */
4545 union {
4546 struct {
4547 __u32 map_id;
4548 } map;
4549 };
4550 } iter;
4551 struct {
4552 __u32 netns_ino;
4553 __u32 attach_type;
4554 } netns;
4555 struct {
4556 __u32 ifindex;
4557 } xdp;
4558 };
4559 } __attribute__((aligned(8)));
4560
4561 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
4562 * by user and intended to be used by socket (e.g. to bind to, depends on
4563 * attach type).
4564 */
4565 struct bpf_sock_addr {
4566 __u32 user_family; /* Allows 4-byte read, but no write. */
4567 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
4568 * Stored in network byte order.
4569 */
4570 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
4571 * Stored in network byte order.
4572 */
4573 __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write.
4574 * Stored in network byte order
4575 */
4576 __u32 family; /* Allows 4-byte read, but no write */
4577 __u32 type; /* Allows 4-byte read, but no write */
4578 __u32 protocol; /* Allows 4-byte read, but no write */
4579 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
4580 * Stored in network byte order.
4581 */
4582 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
4583 * Stored in network byte order.
4584 */
4585 __bpf_md_ptr(struct bpf_sock *, sk);
4586 };
4587
4588 /* User bpf_sock_ops struct to access socket values and specify request ops
4589 * and their replies.
4590 * Some of this fields are in network (bigendian) byte order and may need
4591 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
4592 * New fields can only be added at the end of this structure
4593 */
4594 struct bpf_sock_ops {
4595 __u32 op;
4596 union {
4597 __u32 args[4]; /* Optionally passed to bpf program */
4598 __u32 reply; /* Returned by bpf program */
4599 __u32 replylong[4]; /* Optionally returned by bpf prog */
4600 };
4601 __u32 family;
4602 __u32 remote_ip4; /* Stored in network byte order */
4603 __u32 local_ip4; /* Stored in network byte order */
4604 __u32 remote_ip6[4]; /* Stored in network byte order */
4605 __u32 local_ip6[4]; /* Stored in network byte order */
4606 __u32 remote_port; /* Stored in network byte order */
4607 __u32 local_port; /* stored in host byte order */
4608 __u32 is_fullsock; /* Some TCP fields are only valid if
4609 * there is a full socket. If not, the
4610 * fields read as zero.
4611 */
4612 __u32 snd_cwnd;
4613 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
4614 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
4615 __u32 state;
4616 __u32 rtt_min;
4617 __u32 snd_ssthresh;
4618 __u32 rcv_nxt;
4619 __u32 snd_nxt;
4620 __u32 snd_una;
4621 __u32 mss_cache;
4622 __u32 ecn_flags;
4623 __u32 rate_delivered;
4624 __u32 rate_interval_us;
4625 __u32 packets_out;
4626 __u32 retrans_out;
4627 __u32 total_retrans;
4628 __u32 segs_in;
4629 __u32 data_segs_in;
4630 __u32 segs_out;
4631 __u32 data_segs_out;
4632 __u32 lost_out;
4633 __u32 sacked_out;
4634 __u32 sk_txhash;
4635 __u64 bytes_received;
4636 __u64 bytes_acked;
4637 __bpf_md_ptr(struct bpf_sock *, sk);
4638 /* [skb_data, skb_data_end) covers the whole TCP header.
4639 *
4640 * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
4641 * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the
4642 * header has not been written.
4643 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
4644 * been written so far.
4645 * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes
4646 * the 3WHS.
4647 * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
4648 * the 3WHS.
4649 *
4650 * bpf_load_hdr_opt() can also be used to read a particular option.
4651 */
4652 __bpf_md_ptr(void *, skb_data);
4653 __bpf_md_ptr(void *, skb_data_end);
4654 __u32 skb_len; /* The total length of a packet.
4655 * It includes the header, options,
4656 * and payload.
4657 */
4658 __u32 skb_tcp_flags; /* tcp_flags of the header. It provides
4659 * an easy way to check for tcp_flags
4660 * without parsing skb_data.
4661 *
4662 * In particular, the skb_tcp_flags
4663 * will still be available in
4664 * BPF_SOCK_OPS_HDR_OPT_LEN even though
4665 * the outgoing header has not
4666 * been written yet.
4667 */
4668 };
4669
4670 /* Definitions for bpf_sock_ops_cb_flags */
4671 enum {
4672 BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
4673 BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
4674 BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
4675 BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
4676 /* Call bpf for all received TCP headers. The bpf prog will be
4677 * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
4678 *
4679 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
4680 * for the header option related helpers that will be useful
4681 * to the bpf programs.
4682 *
4683 * It could be used at the client/active side (i.e. connect() side)
4684 * when the server told it that the server was in syncookie
4685 * mode and required the active side to resend the bpf-written
4686 * options. The active side can keep writing the bpf-options until
4687 * it received a valid packet from the server side to confirm
4688 * the earlier packet (and options) has been received. The later
4689 * example patch is using it like this at the active side when the
4690 * server is in syncookie mode.
4691 *
4692 * The bpf prog will usually turn this off in the common cases.
4693 */
4694 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
4695 /* Call bpf when kernel has received a header option that
4696 * the kernel cannot handle. The bpf prog will be called under
4697 * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
4698 *
4699 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
4700 * for the header option related helpers that will be useful
4701 * to the bpf programs.
4702 */
4703 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
4704 /* Call bpf when the kernel is writing header options for the
4705 * outgoing packet. The bpf prog will first be called
4706 * to reserve space in a skb under
4707 * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then
4708 * the bpf prog will be called to write the header option(s)
4709 * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
4710 *
4711 * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
4712 * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
4713 * related helpers that will be useful to the bpf programs.
4714 *
4715 * The kernel gets its chance to reserve space and write
4716 * options first before the BPF program does.
4717 */
4718 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
4719 /* Mask of all currently supported cb flags */
4720 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
4721 };
4722
4723 /* List of known BPF sock_ops operators.
4724 * New entries can only be added at the end
4725 */
4726 enum {
4727 BPF_SOCK_OPS_VOID,
4728 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
4729 * -1 if default value should be used
4730 */
4731 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
4732 * window (in packets) or -1 if default
4733 * value should be used
4734 */
4735 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
4736 * active connection is initialized
4737 */
4738 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
4739 * active connection is
4740 * established
4741 */
4742 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
4743 * passive connection is
4744 * established
4745 */
4746 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
4747 * needs ECN
4748 */
4749 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
4750 * based on the path and may be
4751 * dependent on the congestion control
4752 * algorithm. In general it indicates
4753 * a congestion threshold. RTTs above
4754 * this indicate congestion
4755 */
4756 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
4757 * Arg1: value of icsk_retransmits
4758 * Arg2: value of icsk_rto
4759 * Arg3: whether RTO has expired
4760 */
4761 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
4762 * Arg1: sequence number of 1st byte
4763 * Arg2: # segments
4764 * Arg3: return value of
4765 * tcp_transmit_skb (0 => success)
4766 */
4767 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
4768 * Arg1: old_state
4769 * Arg2: new_state
4770 */
4771 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
4772 * socket transition to LISTEN state.
4773 */
4774 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
4775 */
4776 BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
4777 * It will be called to handle
4778 * the packets received at
4779 * an already established
4780 * connection.
4781 *
4782 * sock_ops->skb_data:
4783 * Referring to the received skb.
4784 * It covers the TCP header only.
4785 *
4786 * bpf_load_hdr_opt() can also
4787 * be used to search for a
4788 * particular option.
4789 */
4790 BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the
4791 * header option later in
4792 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
4793 * Arg1: bool want_cookie. (in
4794 * writing SYNACK only)
4795 *
4796 * sock_ops->skb_data:
4797 * Not available because no header has
4798 * been written yet.
4799 *
4800 * sock_ops->skb_tcp_flags:
4801 * The tcp_flags of the
4802 * outgoing skb. (e.g. SYN, ACK, FIN).
4803 *
4804 * bpf_reserve_hdr_opt() should
4805 * be used to reserve space.
4806 */
4807 BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options
4808 * Arg1: bool want_cookie. (in
4809 * writing SYNACK only)
4810 *
4811 * sock_ops->skb_data:
4812 * Referring to the outgoing skb.
4813 * It covers the TCP header
4814 * that has already been written
4815 * by the kernel and the
4816 * earlier bpf-progs.
4817 *
4818 * sock_ops->skb_tcp_flags:
4819 * The tcp_flags of the outgoing
4820 * skb. (e.g. SYN, ACK, FIN).
4821 *
4822 * bpf_store_hdr_opt() should
4823 * be used to write the
4824 * option.
4825 *
4826 * bpf_load_hdr_opt() can also
4827 * be used to search for a
4828 * particular option that
4829 * has already been written
4830 * by the kernel or the
4831 * earlier bpf-progs.
4832 */
4833 };
4834
4835 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
4836 * changes between the TCP and BPF versions. Ideally this should never happen.
4837 * If it does, we need to add code to convert them before calling
4838 * the BPF sock_ops function.
4839 */
4840 enum {
4841 BPF_TCP_ESTABLISHED = 1,
4842 BPF_TCP_SYN_SENT,
4843 BPF_TCP_SYN_RECV,
4844 BPF_TCP_FIN_WAIT1,
4845 BPF_TCP_FIN_WAIT2,
4846 BPF_TCP_TIME_WAIT,
4847 BPF_TCP_CLOSE,
4848 BPF_TCP_CLOSE_WAIT,
4849 BPF_TCP_LAST_ACK,
4850 BPF_TCP_LISTEN,
4851 BPF_TCP_CLOSING, /* Now a valid state */
4852 BPF_TCP_NEW_SYN_RECV,
4853
4854 BPF_TCP_MAX_STATES /* Leave at the end! */
4855 };
4856
4857 enum {
4858 TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
4859 TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
4860 TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */
4861 TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */
4862 /* Copy the SYN pkt to optval
4863 *
4864 * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the
4865 * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
4866 * to only getting from the saved_syn. It can either get the
4867 * syn packet from:
4868 *
4869 * 1. the just-received SYN packet (only available when writing the
4870 * SYNACK). It will be useful when it is not necessary to
4871 * save the SYN packet for latter use. It is also the only way
4872 * to get the SYN during syncookie mode because the syn
4873 * packet cannot be saved during syncookie.
4874 *
4875 * OR
4876 *
4877 * 2. the earlier saved syn which was done by
4878 * bpf_setsockopt(TCP_SAVE_SYN).
4879 *
4880 * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
4881 * SYN packet is obtained.
4882 *
4883 * If the bpf-prog does not need the IP[46] header, the
4884 * bpf-prog can avoid parsing the IP header by using
4885 * TCP_BPF_SYN. Otherwise, the bpf-prog can get both
4886 * IP[46] and TCP header by using TCP_BPF_SYN_IP.
4887 *
4888 * >0: Total number of bytes copied
4889 * -ENOSPC: Not enough space in optval. Only optlen number of
4890 * bytes is copied.
4891 * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
4892 * is not saved by setsockopt(TCP_SAVE_SYN).
4893 */
4894 TCP_BPF_SYN = 1005, /* Copy the TCP header */
4895 TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
4896 TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
4897 };
4898
4899 enum {
4900 BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
4901 };
4902
4903 /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
4904 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
4905 */
4906 enum {
4907 BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the
4908 * total option spaces
4909 * required for an established
4910 * sk in order to calculate the
4911 * MSS. No skb is actually
4912 * sent.
4913 */
4914 BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode
4915 * when sending a SYN.
4916 */
4917 };
4918
4919 struct bpf_perf_event_value {
4920 __u64 counter;
4921 __u64 enabled;
4922 __u64 running;
4923 };
4924
4925 enum {
4926 BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
4927 BPF_DEVCG_ACC_READ = (1ULL << 1),
4928 BPF_DEVCG_ACC_WRITE = (1ULL << 2),
4929 };
4930
4931 enum {
4932 BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
4933 BPF_DEVCG_DEV_CHAR = (1ULL << 1),
4934 };
4935
4936 struct bpf_cgroup_dev_ctx {
4937 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
4938 __u32 access_type;
4939 __u32 major;
4940 __u32 minor;
4941 };
4942
4943 struct bpf_raw_tracepoint_args {
4944 __u64 args[0];
4945 };
4946
4947 /* DIRECT: Skip the FIB rules and go to FIB table associated with device
4948 * OUTPUT: Do lookup from egress perspective; default is ingress
4949 */
4950 enum {
4951 BPF_FIB_LOOKUP_DIRECT = (1U << 0),
4952 BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
4953 };
4954
4955 enum {
4956 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
4957 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
4958 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
4959 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
4960 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
4961 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
4962 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
4963 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
4964 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
4965 };
4966
4967 struct bpf_fib_lookup {
4968 /* input: network family for lookup (AF_INET, AF_INET6)
4969 * output: network family of egress nexthop
4970 */
4971 __u8 family;
4972
4973 /* set if lookup is to consider L4 data - e.g., FIB rules */
4974 __u8 l4_protocol;
4975 __be16 sport;
4976 __be16 dport;
4977
4978 /* total length of packet from network header - used for MTU check */
4979 __u16 tot_len;
4980
4981 /* input: L3 device index for lookup
4982 * output: device index from FIB lookup
4983 */
4984 __u32 ifindex;
4985
4986 union {
4987 /* inputs to lookup */
4988 __u8 tos; /* AF_INET */
4989 __be32 flowinfo; /* AF_INET6, flow_label + priority */
4990
4991 /* output: metric of fib result (IPv4/IPv6 only) */
4992 __u32 rt_metric;
4993 };
4994
4995 union {
4996 __be32 ipv4_src;
4997 __u32 ipv6_src[4]; /* in6_addr; network order */
4998 };
4999
5000 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
5001 * network header. output: bpf_fib_lookup sets to gateway address
5002 * if FIB lookup returns gateway route
5003 */
5004 union {
5005 __be32 ipv4_dst;
5006 __u32 ipv6_dst[4]; /* in6_addr; network order */
5007 };
5008
5009 /* output */
5010 __be16 h_vlan_proto;
5011 __be16 h_vlan_TCI;
5012 __u8 smac[6]; /* ETH_ALEN */
5013 __u8 dmac[6]; /* ETH_ALEN */
5014 };
5015
5016 struct bpf_redir_neigh {
5017 /* network family for lookup (AF_INET, AF_INET6) */
5018 __u32 nh_family;
5019 /* network address of nexthop; skips fib lookup to find gateway */
5020 union {
5021 __be32 ipv4_nh;
5022 __u32 ipv6_nh[4]; /* in6_addr; network order */
5023 };
5024 };
5025
5026 enum bpf_task_fd_type {
5027 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
5028 BPF_FD_TYPE_TRACEPOINT, /* tp name */
5029 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */
5030 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */
5031 BPF_FD_TYPE_UPROBE, /* filename + offset */
5032 BPF_FD_TYPE_URETPROBE, /* filename + offset */
5033 };
5034
5035 enum {
5036 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
5037 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
5038 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
5039 };
5040
5041 struct bpf_flow_keys {
5042 __u16 nhoff;
5043 __u16 thoff;
5044 __u16 addr_proto; /* ETH_P_* of valid addrs */
5045 __u8 is_frag;
5046 __u8 is_first_frag;
5047 __u8 is_encap;
5048 __u8 ip_proto;
5049 __be16 n_proto;
5050 __be16 sport;
5051 __be16 dport;
5052 union {
5053 struct {
5054 __be32 ipv4_src;
5055 __be32 ipv4_dst;
5056 };
5057 struct {
5058 __u32 ipv6_src[4]; /* in6_addr; network order */
5059 __u32 ipv6_dst[4]; /* in6_addr; network order */
5060 };
5061 };
5062 __u32 flags;
5063 __be32 flow_label;
5064 };
5065
5066 struct bpf_func_info {
5067 __u32 insn_off;
5068 __u32 type_id;
5069 };
5070
5071 #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
5072 #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
5073
5074 struct bpf_line_info {
5075 __u32 insn_off;
5076 __u32 file_name_off;
5077 __u32 line_off;
5078 __u32 line_col;
5079 };
5080
5081 struct bpf_spin_lock {
5082 __u32 val;
5083 };
5084
5085 struct bpf_sysctl {
5086 __u32 write; /* Sysctl is being read (= 0) or written (= 1).
5087 * Allows 1,2,4-byte read, but no write.
5088 */
5089 __u32 file_pos; /* Sysctl file position to read from, write to.
5090 * Allows 1,2,4-byte read an 4-byte write.
5091 */
5092 };
5093
5094 struct bpf_sockopt {
5095 __bpf_md_ptr(struct bpf_sock *, sk);
5096 __bpf_md_ptr(void *, optval);
5097 __bpf_md_ptr(void *, optval_end);
5098
5099 __s32 level;
5100 __s32 optname;
5101 __s32 optlen;
5102 __s32 retval;
5103 };
5104
5105 struct bpf_pidns_info {
5106 __u32 pid;
5107 __u32 tgid;
5108 };
5109
5110 /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
5111 struct bpf_sk_lookup {
5112 __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
5113
5114 __u32 family; /* Protocol family (AF_INET, AF_INET6) */
5115 __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
5116 __u32 remote_ip4; /* Network byte order */
5117 __u32 remote_ip6[4]; /* Network byte order */
5118 __u32 remote_port; /* Network byte order */
5119 __u32 local_ip4; /* Network byte order */
5120 __u32 local_ip6[4]; /* Network byte order */
5121 __u32 local_port; /* Host byte order */
5122 };
5123
5124 /*
5125 * struct btf_ptr is used for typed pointer representation; the
5126 * type id is used to render the pointer data as the appropriate type
5127 * via the bpf_snprintf_btf() helper described above. A flags field -
5128 * potentially to specify additional details about the BTF pointer
5129 * (rather than its mode of display) - is included for future use.
5130 * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
5131 */
5132 struct btf_ptr {
5133 void *ptr;
5134 __u32 type_id;
5135 __u32 flags; /* BTF ptr flags; unused at present. */
5136 };
5137
5138 /*
5139 * Flags to control bpf_snprintf_btf() behaviour.
5140 * - BTF_F_COMPACT: no formatting around type information
5141 * - BTF_F_NONAME: no struct/union member names/types
5142 * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
5143 * equivalent to %px.
5144 * - BTF_F_ZERO: show zero-valued struct/union members; they
5145 * are not displayed by default
5146 */
5147 enum {
5148 BTF_F_COMPACT = (1ULL << 0),
5149 BTF_F_NONAME = (1ULL << 1),
5150 BTF_F_PTR_RAW = (1ULL << 2),
5151 BTF_F_ZERO = (1ULL << 3),
5152 };
5153
5154 #endif /* _UAPI__LINUX_BPF_H__ */