]> git.proxmox.com Git - mirror_iproute2.git/blob - include/uapi/linux/bpf.h
657645801e81f519c074a21454e389b7fc50f111
[mirror_iproute2.git] / include / uapi / linux / bpf.h
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8 #ifndef __LINUX_BPF_H__
9 #define __LINUX_BPF_H__
10
11 #include <linux/types.h>
12 #include <linux/bpf_common.h>
13
14 /* Extended instruction set based on top of classic BPF */
15
16 /* instruction classes */
17 #define BPF_JMP32 0x06 /* jmp mode in word width */
18 #define BPF_ALU64 0x07 /* alu mode in double word width */
19
20 /* ld/ldx fields */
21 #define BPF_DW 0x18 /* double word (64-bit) */
22 #define BPF_XADD 0xc0 /* exclusive add */
23
24 /* alu/jmp fields */
25 #define BPF_MOV 0xb0 /* mov reg to reg */
26 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
27
28 /* change endianness of a register */
29 #define BPF_END 0xd0 /* flags for endianness conversion: */
30 #define BPF_TO_LE 0x00 /* convert to little-endian */
31 #define BPF_TO_BE 0x08 /* convert to big-endian */
32 #define BPF_FROM_LE BPF_TO_LE
33 #define BPF_FROM_BE BPF_TO_BE
34
35 /* jmp encodings */
36 #define BPF_JNE 0x50 /* jump != */
37 #define BPF_JLT 0xa0 /* LT is unsigned, '<' */
38 #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
39 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
40 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
41 #define BPF_JSLT 0xc0 /* SLT is signed, '<' */
42 #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
43 #define BPF_CALL 0x80 /* function call */
44 #define BPF_EXIT 0x90 /* function return */
45
46 /* Register numbers */
47 enum {
48 BPF_REG_0 = 0,
49 BPF_REG_1,
50 BPF_REG_2,
51 BPF_REG_3,
52 BPF_REG_4,
53 BPF_REG_5,
54 BPF_REG_6,
55 BPF_REG_7,
56 BPF_REG_8,
57 BPF_REG_9,
58 BPF_REG_10,
59 __MAX_BPF_REG,
60 };
61
62 /* BPF has 10 general purpose 64-bit registers and stack frame. */
63 #define MAX_BPF_REG __MAX_BPF_REG
64
65 struct bpf_insn {
66 __u8 code; /* opcode */
67 __u8 dst_reg:4; /* dest register */
68 __u8 src_reg:4; /* source register */
69 __s16 off; /* signed offset */
70 __s32 imm; /* signed immediate constant */
71 };
72
73 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
74 struct bpf_lpm_trie_key {
75 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
76 __u8 data[0]; /* Arbitrary size */
77 };
78
79 struct bpf_cgroup_storage_key {
80 __u64 cgroup_inode_id; /* cgroup inode id */
81 __u32 attach_type; /* program attach type */
82 };
83
84 /* BPF syscall commands, see bpf(2) man-page for details. */
85 enum bpf_cmd {
86 BPF_MAP_CREATE,
87 BPF_MAP_LOOKUP_ELEM,
88 BPF_MAP_UPDATE_ELEM,
89 BPF_MAP_DELETE_ELEM,
90 BPF_MAP_GET_NEXT_KEY,
91 BPF_PROG_LOAD,
92 BPF_OBJ_PIN,
93 BPF_OBJ_GET,
94 BPF_PROG_ATTACH,
95 BPF_PROG_DETACH,
96 BPF_PROG_TEST_RUN,
97 BPF_PROG_GET_NEXT_ID,
98 BPF_MAP_GET_NEXT_ID,
99 BPF_PROG_GET_FD_BY_ID,
100 BPF_MAP_GET_FD_BY_ID,
101 BPF_OBJ_GET_INFO_BY_FD,
102 BPF_PROG_QUERY,
103 BPF_RAW_TRACEPOINT_OPEN,
104 BPF_BTF_LOAD,
105 BPF_BTF_GET_FD_BY_ID,
106 BPF_TASK_FD_QUERY,
107 BPF_MAP_LOOKUP_AND_DELETE_ELEM,
108 BPF_MAP_FREEZE,
109 BPF_BTF_GET_NEXT_ID,
110 BPF_MAP_LOOKUP_BATCH,
111 BPF_MAP_LOOKUP_AND_DELETE_BATCH,
112 BPF_MAP_UPDATE_BATCH,
113 BPF_MAP_DELETE_BATCH,
114 };
115
116 enum bpf_map_type {
117 BPF_MAP_TYPE_UNSPEC,
118 BPF_MAP_TYPE_HASH,
119 BPF_MAP_TYPE_ARRAY,
120 BPF_MAP_TYPE_PROG_ARRAY,
121 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
122 BPF_MAP_TYPE_PERCPU_HASH,
123 BPF_MAP_TYPE_PERCPU_ARRAY,
124 BPF_MAP_TYPE_STACK_TRACE,
125 BPF_MAP_TYPE_CGROUP_ARRAY,
126 BPF_MAP_TYPE_LRU_HASH,
127 BPF_MAP_TYPE_LRU_PERCPU_HASH,
128 BPF_MAP_TYPE_LPM_TRIE,
129 BPF_MAP_TYPE_ARRAY_OF_MAPS,
130 BPF_MAP_TYPE_HASH_OF_MAPS,
131 BPF_MAP_TYPE_DEVMAP,
132 BPF_MAP_TYPE_SOCKMAP,
133 BPF_MAP_TYPE_CPUMAP,
134 BPF_MAP_TYPE_XSKMAP,
135 BPF_MAP_TYPE_SOCKHASH,
136 BPF_MAP_TYPE_CGROUP_STORAGE,
137 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
138 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
139 BPF_MAP_TYPE_QUEUE,
140 BPF_MAP_TYPE_STACK,
141 BPF_MAP_TYPE_SK_STORAGE,
142 BPF_MAP_TYPE_DEVMAP_HASH,
143 BPF_MAP_TYPE_STRUCT_OPS,
144 };
145
146 /* Note that tracing related programs such as
147 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
148 * are not subject to a stable API since kernel internal data
149 * structures can change from release to release and may
150 * therefore break existing tracing BPF programs. Tracing BPF
151 * programs correspond to /a/ specific kernel which is to be
152 * analyzed, and not /a/ specific kernel /and/ all future ones.
153 */
154 enum bpf_prog_type {
155 BPF_PROG_TYPE_UNSPEC,
156 BPF_PROG_TYPE_SOCKET_FILTER,
157 BPF_PROG_TYPE_KPROBE,
158 BPF_PROG_TYPE_SCHED_CLS,
159 BPF_PROG_TYPE_SCHED_ACT,
160 BPF_PROG_TYPE_TRACEPOINT,
161 BPF_PROG_TYPE_XDP,
162 BPF_PROG_TYPE_PERF_EVENT,
163 BPF_PROG_TYPE_CGROUP_SKB,
164 BPF_PROG_TYPE_CGROUP_SOCK,
165 BPF_PROG_TYPE_LWT_IN,
166 BPF_PROG_TYPE_LWT_OUT,
167 BPF_PROG_TYPE_LWT_XMIT,
168 BPF_PROG_TYPE_SOCK_OPS,
169 BPF_PROG_TYPE_SK_SKB,
170 BPF_PROG_TYPE_CGROUP_DEVICE,
171 BPF_PROG_TYPE_SK_MSG,
172 BPF_PROG_TYPE_RAW_TRACEPOINT,
173 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
174 BPF_PROG_TYPE_LWT_SEG6LOCAL,
175 BPF_PROG_TYPE_LIRC_MODE2,
176 BPF_PROG_TYPE_SK_REUSEPORT,
177 BPF_PROG_TYPE_FLOW_DISSECTOR,
178 BPF_PROG_TYPE_CGROUP_SYSCTL,
179 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
180 BPF_PROG_TYPE_CGROUP_SOCKOPT,
181 BPF_PROG_TYPE_TRACING,
182 BPF_PROG_TYPE_STRUCT_OPS,
183 BPF_PROG_TYPE_EXT,
184 };
185
186 enum bpf_attach_type {
187 BPF_CGROUP_INET_INGRESS,
188 BPF_CGROUP_INET_EGRESS,
189 BPF_CGROUP_INET_SOCK_CREATE,
190 BPF_CGROUP_SOCK_OPS,
191 BPF_SK_SKB_STREAM_PARSER,
192 BPF_SK_SKB_STREAM_VERDICT,
193 BPF_CGROUP_DEVICE,
194 BPF_SK_MSG_VERDICT,
195 BPF_CGROUP_INET4_BIND,
196 BPF_CGROUP_INET6_BIND,
197 BPF_CGROUP_INET4_CONNECT,
198 BPF_CGROUP_INET6_CONNECT,
199 BPF_CGROUP_INET4_POST_BIND,
200 BPF_CGROUP_INET6_POST_BIND,
201 BPF_CGROUP_UDP4_SENDMSG,
202 BPF_CGROUP_UDP6_SENDMSG,
203 BPF_LIRC_MODE2,
204 BPF_FLOW_DISSECTOR,
205 BPF_CGROUP_SYSCTL,
206 BPF_CGROUP_UDP4_RECVMSG,
207 BPF_CGROUP_UDP6_RECVMSG,
208 BPF_CGROUP_GETSOCKOPT,
209 BPF_CGROUP_SETSOCKOPT,
210 BPF_TRACE_RAW_TP,
211 BPF_TRACE_FENTRY,
212 BPF_TRACE_FEXIT,
213 __MAX_BPF_ATTACH_TYPE
214 };
215
216 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
217
218 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
219 *
220 * NONE(default): No further bpf programs allowed in the subtree.
221 *
222 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
223 * the program in this cgroup yields to sub-cgroup program.
224 *
225 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
226 * that cgroup program gets run in addition to the program in this cgroup.
227 *
228 * Only one program is allowed to be attached to a cgroup with
229 * NONE or BPF_F_ALLOW_OVERRIDE flag.
230 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
231 * release old program and attach the new one. Attach flags has to match.
232 *
233 * Multiple programs are allowed to be attached to a cgroup with
234 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
235 * (those that were attached first, run first)
236 * The programs of sub-cgroup are executed first, then programs of
237 * this cgroup and then programs of parent cgroup.
238 * When children program makes decision (like picking TCP CA or sock bind)
239 * parent program has a chance to override it.
240 *
241 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
242 * programs for a cgroup. Though it's possible to replace an old program at
243 * any position by also specifying BPF_F_REPLACE flag and position itself in
244 * replace_bpf_fd attribute. Old program at this position will be released.
245 *
246 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
247 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
248 * Ex1:
249 * cgrp1 (MULTI progs A, B) ->
250 * cgrp2 (OVERRIDE prog C) ->
251 * cgrp3 (MULTI prog D) ->
252 * cgrp4 (OVERRIDE prog E) ->
253 * cgrp5 (NONE prog F)
254 * the event in cgrp5 triggers execution of F,D,A,B in that order.
255 * if prog F is detached, the execution is E,D,A,B
256 * if prog F and D are detached, the execution is E,A,B
257 * if prog F, E and D are detached, the execution is C,A,B
258 *
259 * All eligible programs are executed regardless of return code from
260 * earlier programs.
261 */
262 #define BPF_F_ALLOW_OVERRIDE (1U << 0)
263 #define BPF_F_ALLOW_MULTI (1U << 1)
264 #define BPF_F_REPLACE (1U << 2)
265
266 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
267 * verifier will perform strict alignment checking as if the kernel
268 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
269 * and NET_IP_ALIGN defined to 2.
270 */
271 #define BPF_F_STRICT_ALIGNMENT (1U << 0)
272
273 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
274 * verifier will allow any alignment whatsoever. On platforms
275 * with strict alignment requirements for loads ands stores (such
276 * as sparc and mips) the verifier validates that all loads and
277 * stores provably follow this requirement. This flag turns that
278 * checking and enforcement off.
279 *
280 * It is mostly used for testing when we want to validate the
281 * context and memory access aspects of the verifier, but because
282 * of an unaligned access the alignment check would trigger before
283 * the one we are interested in.
284 */
285 #define BPF_F_ANY_ALIGNMENT (1U << 1)
286
287 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
288 * Verifier does sub-register def/use analysis and identifies instructions whose
289 * def only matters for low 32-bit, high 32-bit is never referenced later
290 * through implicit zero extension. Therefore verifier notifies JIT back-ends
291 * that it is safe to ignore clearing high 32-bit for these instructions. This
292 * saves some back-ends a lot of code-gen. However such optimization is not
293 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
294 * hence hasn't used verifier's analysis result. But, we really want to have a
295 * way to be able to verify the correctness of the described optimization on
296 * x86_64 on which testsuites are frequently exercised.
297 *
298 * So, this flag is introduced. Once it is set, verifier will randomize high
299 * 32-bit for those instructions who has been identified as safe to ignore them.
300 * Then, if verifier is not doing correct analysis, such randomization will
301 * regress tests to expose bugs.
302 */
303 #define BPF_F_TEST_RND_HI32 (1U << 2)
304
305 /* The verifier internal test flag. Behavior is undefined */
306 #define BPF_F_TEST_STATE_FREQ (1U << 3)
307
308 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have
309 * two extensions:
310 *
311 * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
312 * insn[0].imm: map fd map fd
313 * insn[1].imm: 0 offset into value
314 * insn[0].off: 0 0
315 * insn[1].off: 0 0
316 * ldimm64 rewrite: address of map address of map[0]+offset
317 * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
318 */
319 #define BPF_PSEUDO_MAP_FD 1
320 #define BPF_PSEUDO_MAP_VALUE 2
321
322 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
323 * offset to another bpf function
324 */
325 #define BPF_PSEUDO_CALL 1
326
327 /* flags for BPF_MAP_UPDATE_ELEM command */
328 #define BPF_ANY 0 /* create new element or update existing */
329 #define BPF_NOEXIST 1 /* create new element if it didn't exist */
330 #define BPF_EXIST 2 /* update existing element */
331 #define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
332
333 /* flags for BPF_MAP_CREATE command */
334 #define BPF_F_NO_PREALLOC (1U << 0)
335 /* Instead of having one common LRU list in the
336 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
337 * which can scale and perform better.
338 * Note, the LRU nodes (including free nodes) cannot be moved
339 * across different LRU lists.
340 */
341 #define BPF_F_NO_COMMON_LRU (1U << 1)
342 /* Specify numa node during map creation */
343 #define BPF_F_NUMA_NODE (1U << 2)
344
345 #define BPF_OBJ_NAME_LEN 16U
346
347 /* Flags for accessing BPF object from syscall side. */
348 #define BPF_F_RDONLY (1U << 3)
349 #define BPF_F_WRONLY (1U << 4)
350
351 /* Flag for stack_map, store build_id+offset instead of pointer */
352 #define BPF_F_STACK_BUILD_ID (1U << 5)
353
354 /* Zero-initialize hash function seed. This should only be used for testing. */
355 #define BPF_F_ZERO_SEED (1U << 6)
356
357 /* Flags for accessing BPF object from program side. */
358 #define BPF_F_RDONLY_PROG (1U << 7)
359 #define BPF_F_WRONLY_PROG (1U << 8)
360
361 /* Clone map from listener for newly accepted socket */
362 #define BPF_F_CLONE (1U << 9)
363
364 /* Enable memory-mapping BPF map */
365 #define BPF_F_MMAPABLE (1U << 10)
366
367 /* Flags for BPF_PROG_QUERY. */
368
369 /* Query effective (directly attached + inherited from ancestor cgroups)
370 * programs that will be executed for events within a cgroup.
371 * attach_flags with this flag are returned only for directly attached programs.
372 */
373 #define BPF_F_QUERY_EFFECTIVE (1U << 0)
374
375 enum bpf_stack_build_id_status {
376 /* user space need an empty entry to identify end of a trace */
377 BPF_STACK_BUILD_ID_EMPTY = 0,
378 /* with valid build_id and offset */
379 BPF_STACK_BUILD_ID_VALID = 1,
380 /* couldn't get build_id, fallback to ip */
381 BPF_STACK_BUILD_ID_IP = 2,
382 };
383
384 #define BPF_BUILD_ID_SIZE 20
385 struct bpf_stack_build_id {
386 __s32 status;
387 unsigned char build_id[BPF_BUILD_ID_SIZE];
388 union {
389 __u64 offset;
390 __u64 ip;
391 };
392 };
393
394 union bpf_attr {
395 struct { /* anonymous struct used by BPF_MAP_CREATE command */
396 __u32 map_type; /* one of enum bpf_map_type */
397 __u32 key_size; /* size of key in bytes */
398 __u32 value_size; /* size of value in bytes */
399 __u32 max_entries; /* max number of entries in a map */
400 __u32 map_flags; /* BPF_MAP_CREATE related
401 * flags defined above.
402 */
403 __u32 inner_map_fd; /* fd pointing to the inner map */
404 __u32 numa_node; /* numa node (effective only if
405 * BPF_F_NUMA_NODE is set).
406 */
407 char map_name[BPF_OBJ_NAME_LEN];
408 __u32 map_ifindex; /* ifindex of netdev to create on */
409 __u32 btf_fd; /* fd pointing to a BTF type data */
410 __u32 btf_key_type_id; /* BTF type_id of the key */
411 __u32 btf_value_type_id; /* BTF type_id of the value */
412 __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
413 * struct stored as the
414 * map value
415 */
416 };
417
418 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
419 __u32 map_fd;
420 __aligned_u64 key;
421 union {
422 __aligned_u64 value;
423 __aligned_u64 next_key;
424 };
425 __u64 flags;
426 };
427
428 struct { /* struct used by BPF_MAP_*_BATCH commands */
429 __aligned_u64 in_batch; /* start batch,
430 * NULL to start from beginning
431 */
432 __aligned_u64 out_batch; /* output: next start batch */
433 __aligned_u64 keys;
434 __aligned_u64 values;
435 __u32 count; /* input/output:
436 * input: # of key/value
437 * elements
438 * output: # of filled elements
439 */
440 __u32 map_fd;
441 __u64 elem_flags;
442 __u64 flags;
443 } batch;
444
445 struct { /* anonymous struct used by BPF_PROG_LOAD command */
446 __u32 prog_type; /* one of enum bpf_prog_type */
447 __u32 insn_cnt;
448 __aligned_u64 insns;
449 __aligned_u64 license;
450 __u32 log_level; /* verbosity level of verifier */
451 __u32 log_size; /* size of user buffer */
452 __aligned_u64 log_buf; /* user supplied buffer */
453 __u32 kern_version; /* not used */
454 __u32 prog_flags;
455 char prog_name[BPF_OBJ_NAME_LEN];
456 __u32 prog_ifindex; /* ifindex of netdev to prep for */
457 /* For some prog types expected attach type must be known at
458 * load time to verify attach type specific parts of prog
459 * (context accesses, allowed helpers, etc).
460 */
461 __u32 expected_attach_type;
462 __u32 prog_btf_fd; /* fd pointing to BTF type data */
463 __u32 func_info_rec_size; /* userspace bpf_func_info size */
464 __aligned_u64 func_info; /* func info */
465 __u32 func_info_cnt; /* number of bpf_func_info records */
466 __u32 line_info_rec_size; /* userspace bpf_line_info size */
467 __aligned_u64 line_info; /* line info */
468 __u32 line_info_cnt; /* number of bpf_line_info records */
469 __u32 attach_btf_id; /* in-kernel BTF type id to attach to */
470 __u32 attach_prog_fd; /* 0 to attach to vmlinux */
471 };
472
473 struct { /* anonymous struct used by BPF_OBJ_* commands */
474 __aligned_u64 pathname;
475 __u32 bpf_fd;
476 __u32 file_flags;
477 };
478
479 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
480 __u32 target_fd; /* container object to attach to */
481 __u32 attach_bpf_fd; /* eBPF program to attach */
482 __u32 attach_type;
483 __u32 attach_flags;
484 __u32 replace_bpf_fd; /* previously attached eBPF
485 * program to replace if
486 * BPF_F_REPLACE is used
487 */
488 };
489
490 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
491 __u32 prog_fd;
492 __u32 retval;
493 __u32 data_size_in; /* input: len of data_in */
494 __u32 data_size_out; /* input/output: len of data_out
495 * returns ENOSPC if data_out
496 * is too small.
497 */
498 __aligned_u64 data_in;
499 __aligned_u64 data_out;
500 __u32 repeat;
501 __u32 duration;
502 __u32 ctx_size_in; /* input: len of ctx_in */
503 __u32 ctx_size_out; /* input/output: len of ctx_out
504 * returns ENOSPC if ctx_out
505 * is too small.
506 */
507 __aligned_u64 ctx_in;
508 __aligned_u64 ctx_out;
509 } test;
510
511 struct { /* anonymous struct used by BPF_*_GET_*_ID */
512 union {
513 __u32 start_id;
514 __u32 prog_id;
515 __u32 map_id;
516 __u32 btf_id;
517 };
518 __u32 next_id;
519 __u32 open_flags;
520 };
521
522 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
523 __u32 bpf_fd;
524 __u32 info_len;
525 __aligned_u64 info;
526 } info;
527
528 struct { /* anonymous struct used by BPF_PROG_QUERY command */
529 __u32 target_fd; /* container object to query */
530 __u32 attach_type;
531 __u32 query_flags;
532 __u32 attach_flags;
533 __aligned_u64 prog_ids;
534 __u32 prog_cnt;
535 } query;
536
537 struct {
538 __u64 name;
539 __u32 prog_fd;
540 } raw_tracepoint;
541
542 struct { /* anonymous struct for BPF_BTF_LOAD */
543 __aligned_u64 btf;
544 __aligned_u64 btf_log_buf;
545 __u32 btf_size;
546 __u32 btf_log_size;
547 __u32 btf_log_level;
548 };
549
550 struct {
551 __u32 pid; /* input: pid */
552 __u32 fd; /* input: fd */
553 __u32 flags; /* input: flags */
554 __u32 buf_len; /* input/output: buf len */
555 __aligned_u64 buf; /* input/output:
556 * tp_name for tracepoint
557 * symbol for kprobe
558 * filename for uprobe
559 */
560 __u32 prog_id; /* output: prod_id */
561 __u32 fd_type; /* output: BPF_FD_TYPE_* */
562 __u64 probe_offset; /* output: probe_offset */
563 __u64 probe_addr; /* output: probe_addr */
564 } task_fd_query;
565 } __attribute__((aligned(8)));
566
567 /* The description below is an attempt at providing documentation to eBPF
568 * developers about the multiple available eBPF helper functions. It can be
569 * parsed and used to produce a manual page. The workflow is the following,
570 * and requires the rst2man utility:
571 *
572 * $ ./scripts/bpf_helpers_doc.py \
573 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
574 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
575 * $ man /tmp/bpf-helpers.7
576 *
577 * Note that in order to produce this external documentation, some RST
578 * formatting is used in the descriptions to get "bold" and "italics" in
579 * manual pages. Also note that the few trailing white spaces are
580 * intentional, removing them would break paragraphs for rst2man.
581 *
582 * Start of BPF helper function descriptions:
583 *
584 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
585 * Description
586 * Perform a lookup in *map* for an entry associated to *key*.
587 * Return
588 * Map value associated to *key*, or **NULL** if no entry was
589 * found.
590 *
591 * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
592 * Description
593 * Add or update the value of the entry associated to *key* in
594 * *map* with *value*. *flags* is one of:
595 *
596 * **BPF_NOEXIST**
597 * The entry for *key* must not exist in the map.
598 * **BPF_EXIST**
599 * The entry for *key* must already exist in the map.
600 * **BPF_ANY**
601 * No condition on the existence of the entry for *key*.
602 *
603 * Flag value **BPF_NOEXIST** cannot be used for maps of types
604 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
605 * elements always exist), the helper would return an error.
606 * Return
607 * 0 on success, or a negative error in case of failure.
608 *
609 * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
610 * Description
611 * Delete entry with *key* from *map*.
612 * Return
613 * 0 on success, or a negative error in case of failure.
614 *
615 * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
616 * Description
617 * For tracing programs, safely attempt to read *size* bytes from
618 * kernel space address *unsafe_ptr* and store the data in *dst*.
619 *
620 * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
621 * instead.
622 * Return
623 * 0 on success, or a negative error in case of failure.
624 *
625 * u64 bpf_ktime_get_ns(void)
626 * Description
627 * Return the time elapsed since system boot, in nanoseconds.
628 * Return
629 * Current *ktime*.
630 *
631 * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
632 * Description
633 * This helper is a "printk()-like" facility for debugging. It
634 * prints a message defined by format *fmt* (of size *fmt_size*)
635 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
636 * available. It can take up to three additional **u64**
637 * arguments (as an eBPF helpers, the total number of arguments is
638 * limited to five).
639 *
640 * Each time the helper is called, it appends a line to the trace.
641 * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
642 * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
643 * The format of the trace is customizable, and the exact output
644 * one will get depends on the options set in
645 * *\/sys/kernel/debug/tracing/trace_options* (see also the
646 * *README* file under the same directory). However, it usually
647 * defaults to something like:
648 *
649 * ::
650 *
651 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg>
652 *
653 * In the above:
654 *
655 * * ``telnet`` is the name of the current task.
656 * * ``470`` is the PID of the current task.
657 * * ``001`` is the CPU number on which the task is
658 * running.
659 * * In ``.N..``, each character refers to a set of
660 * options (whether irqs are enabled, scheduling
661 * options, whether hard/softirqs are running, level of
662 * preempt_disabled respectively). **N** means that
663 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
664 * are set.
665 * * ``419421.045894`` is a timestamp.
666 * * ``0x00000001`` is a fake value used by BPF for the
667 * instruction pointer register.
668 * * ``<formatted msg>`` is the message formatted with
669 * *fmt*.
670 *
671 * The conversion specifiers supported by *fmt* are similar, but
672 * more limited than for printk(). They are **%d**, **%i**,
673 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
674 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
675 * of field, padding with zeroes, etc.) is available, and the
676 * helper will return **-EINVAL** (but print nothing) if it
677 * encounters an unknown specifier.
678 *
679 * Also, note that **bpf_trace_printk**\ () is slow, and should
680 * only be used for debugging purposes. For this reason, a notice
681 * bloc (spanning several lines) is printed to kernel logs and
682 * states that the helper should not be used "for production use"
683 * the first time this helper is used (or more precisely, when
684 * **trace_printk**\ () buffers are allocated). For passing values
685 * to user space, perf events should be preferred.
686 * Return
687 * The number of bytes written to the buffer, or a negative error
688 * in case of failure.
689 *
690 * u32 bpf_get_prandom_u32(void)
691 * Description
692 * Get a pseudo-random number.
693 *
694 * From a security point of view, this helper uses its own
695 * pseudo-random internal state, and cannot be used to infer the
696 * seed of other random functions in the kernel. However, it is
697 * essential to note that the generator used by the helper is not
698 * cryptographically secure.
699 * Return
700 * A random 32-bit unsigned value.
701 *
702 * u32 bpf_get_smp_processor_id(void)
703 * Description
704 * Get the SMP (symmetric multiprocessing) processor id. Note that
705 * all programs run with preemption disabled, which means that the
706 * SMP processor id is stable during all the execution of the
707 * program.
708 * Return
709 * The SMP id of the processor running the program.
710 *
711 * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
712 * Description
713 * Store *len* bytes from address *from* into the packet
714 * associated to *skb*, at *offset*. *flags* are a combination of
715 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
716 * checksum for the packet after storing the bytes) and
717 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
718 * **->swhash** and *skb*\ **->l4hash** to 0).
719 *
720 * A call to this helper is susceptible to change the underlying
721 * packet buffer. Therefore, at load time, all checks on pointers
722 * previously done by the verifier are invalidated and must be
723 * performed again, if the helper is used in combination with
724 * direct packet access.
725 * Return
726 * 0 on success, or a negative error in case of failure.
727 *
728 * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
729 * Description
730 * Recompute the layer 3 (e.g. IP) checksum for the packet
731 * associated to *skb*. Computation is incremental, so the helper
732 * must know the former value of the header field that was
733 * modified (*from*), the new value of this field (*to*), and the
734 * number of bytes (2 or 4) for this field, stored in *size*.
735 * Alternatively, it is possible to store the difference between
736 * the previous and the new values of the header field in *to*, by
737 * setting *from* and *size* to 0. For both methods, *offset*
738 * indicates the location of the IP checksum within the packet.
739 *
740 * This helper works in combination with **bpf_csum_diff**\ (),
741 * which does not update the checksum in-place, but offers more
742 * flexibility and can handle sizes larger than 2 or 4 for the
743 * checksum to update.
744 *
745 * A call to this helper is susceptible to change the underlying
746 * packet buffer. Therefore, at load time, all checks on pointers
747 * previously done by the verifier are invalidated and must be
748 * performed again, if the helper is used in combination with
749 * direct packet access.
750 * Return
751 * 0 on success, or a negative error in case of failure.
752 *
753 * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
754 * Description
755 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
756 * packet associated to *skb*. Computation is incremental, so the
757 * helper must know the former value of the header field that was
758 * modified (*from*), the new value of this field (*to*), and the
759 * number of bytes (2 or 4) for this field, stored on the lowest
760 * four bits of *flags*. Alternatively, it is possible to store
761 * the difference between the previous and the new values of the
762 * header field in *to*, by setting *from* and the four lowest
763 * bits of *flags* to 0. For both methods, *offset* indicates the
764 * location of the IP checksum within the packet. In addition to
765 * the size of the field, *flags* can be added (bitwise OR) actual
766 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
767 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
768 * for updates resulting in a null checksum the value is set to
769 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
770 * the checksum is to be computed against a pseudo-header.
771 *
772 * This helper works in combination with **bpf_csum_diff**\ (),
773 * which does not update the checksum in-place, but offers more
774 * flexibility and can handle sizes larger than 2 or 4 for the
775 * checksum to update.
776 *
777 * A call to this helper is susceptible to change the underlying
778 * packet buffer. Therefore, at load time, all checks on pointers
779 * previously done by the verifier are invalidated and must be
780 * performed again, if the helper is used in combination with
781 * direct packet access.
782 * Return
783 * 0 on success, or a negative error in case of failure.
784 *
785 * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
786 * Description
787 * This special helper is used to trigger a "tail call", or in
788 * other words, to jump into another eBPF program. The same stack
789 * frame is used (but values on stack and in registers for the
790 * caller are not accessible to the callee). This mechanism allows
791 * for program chaining, either for raising the maximum number of
792 * available eBPF instructions, or to execute given programs in
793 * conditional blocks. For security reasons, there is an upper
794 * limit to the number of successive tail calls that can be
795 * performed.
796 *
797 * Upon call of this helper, the program attempts to jump into a
798 * program referenced at index *index* in *prog_array_map*, a
799 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
800 * *ctx*, a pointer to the context.
801 *
802 * If the call succeeds, the kernel immediately runs the first
803 * instruction of the new program. This is not a function call,
804 * and it never returns to the previous program. If the call
805 * fails, then the helper has no effect, and the caller continues
806 * to run its subsequent instructions. A call can fail if the
807 * destination program for the jump does not exist (i.e. *index*
808 * is superior to the number of entries in *prog_array_map*), or
809 * if the maximum number of tail calls has been reached for this
810 * chain of programs. This limit is defined in the kernel by the
811 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
812 * which is currently set to 32.
813 * Return
814 * 0 on success, or a negative error in case of failure.
815 *
816 * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
817 * Description
818 * Clone and redirect the packet associated to *skb* to another
819 * net device of index *ifindex*. Both ingress and egress
820 * interfaces can be used for redirection. The **BPF_F_INGRESS**
821 * value in *flags* is used to make the distinction (ingress path
822 * is selected if the flag is present, egress path otherwise).
823 * This is the only flag supported for now.
824 *
825 * In comparison with **bpf_redirect**\ () helper,
826 * **bpf_clone_redirect**\ () has the associated cost of
827 * duplicating the packet buffer, but this can be executed out of
828 * the eBPF program. Conversely, **bpf_redirect**\ () is more
829 * efficient, but it is handled through an action code where the
830 * redirection happens only after the eBPF program has returned.
831 *
832 * A call to this helper is susceptible to change the underlying
833 * packet buffer. Therefore, at load time, all checks on pointers
834 * previously done by the verifier are invalidated and must be
835 * performed again, if the helper is used in combination with
836 * direct packet access.
837 * Return
838 * 0 on success, or a negative error in case of failure.
839 *
840 * u64 bpf_get_current_pid_tgid(void)
841 * Return
842 * A 64-bit integer containing the current tgid and pid, and
843 * created as such:
844 * *current_task*\ **->tgid << 32 \|**
845 * *current_task*\ **->pid**.
846 *
847 * u64 bpf_get_current_uid_gid(void)
848 * Return
849 * A 64-bit integer containing the current GID and UID, and
850 * created as such: *current_gid* **<< 32 \|** *current_uid*.
851 *
852 * int bpf_get_current_comm(void *buf, u32 size_of_buf)
853 * Description
854 * Copy the **comm** attribute of the current task into *buf* of
855 * *size_of_buf*. The **comm** attribute contains the name of
856 * the executable (excluding the path) for the current task. The
857 * *size_of_buf* must be strictly positive. On success, the
858 * helper makes sure that the *buf* is NUL-terminated. On failure,
859 * it is filled with zeroes.
860 * Return
861 * 0 on success, or a negative error in case of failure.
862 *
863 * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
864 * Description
865 * Retrieve the classid for the current task, i.e. for the net_cls
866 * cgroup to which *skb* belongs.
867 *
868 * This helper can be used on TC egress path, but not on ingress.
869 *
870 * The net_cls cgroup provides an interface to tag network packets
871 * based on a user-provided identifier for all traffic coming from
872 * the tasks belonging to the related cgroup. See also the related
873 * kernel documentation, available from the Linux sources in file
874 * *Documentation/admin-guide/cgroup-v1/net_cls.rst*.
875 *
876 * The Linux kernel has two versions for cgroups: there are
877 * cgroups v1 and cgroups v2. Both are available to users, who can
878 * use a mixture of them, but note that the net_cls cgroup is for
879 * cgroup v1 only. This makes it incompatible with BPF programs
880 * run on cgroups, which is a cgroup-v2-only feature (a socket can
881 * only hold data for one version of cgroups at a time).
882 *
883 * This helper is only available is the kernel was compiled with
884 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
885 * "**y**" or to "**m**".
886 * Return
887 * The classid, or 0 for the default unconfigured classid.
888 *
889 * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
890 * Description
891 * Push a *vlan_tci* (VLAN tag control information) of protocol
892 * *vlan_proto* to the packet associated to *skb*, then update
893 * the checksum. Note that if *vlan_proto* is different from
894 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
895 * be **ETH_P_8021Q**.
896 *
897 * A call to this helper is susceptible to change the underlying
898 * packet buffer. Therefore, at load time, all checks on pointers
899 * previously done by the verifier are invalidated and must be
900 * performed again, if the helper is used in combination with
901 * direct packet access.
902 * Return
903 * 0 on success, or a negative error in case of failure.
904 *
905 * int bpf_skb_vlan_pop(struct sk_buff *skb)
906 * Description
907 * Pop a VLAN header from the packet associated to *skb*.
908 *
909 * A call to this helper is susceptible to change the underlying
910 * packet buffer. Therefore, at load time, all checks on pointers
911 * previously done by the verifier are invalidated and must be
912 * performed again, if the helper is used in combination with
913 * direct packet access.
914 * Return
915 * 0 on success, or a negative error in case of failure.
916 *
917 * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
918 * Description
919 * Get tunnel metadata. This helper takes a pointer *key* to an
920 * empty **struct bpf_tunnel_key** of **size**, that will be
921 * filled with tunnel metadata for the packet associated to *skb*.
922 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
923 * indicates that the tunnel is based on IPv6 protocol instead of
924 * IPv4.
925 *
926 * The **struct bpf_tunnel_key** is an object that generalizes the
927 * principal parameters used by various tunneling protocols into a
928 * single struct. This way, it can be used to easily make a
929 * decision based on the contents of the encapsulation header,
930 * "summarized" in this struct. In particular, it holds the IP
931 * address of the remote end (IPv4 or IPv6, depending on the case)
932 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
933 * this struct exposes the *key*\ **->tunnel_id**, which is
934 * generally mapped to a VNI (Virtual Network Identifier), making
935 * it programmable together with the **bpf_skb_set_tunnel_key**\
936 * () helper.
937 *
938 * Let's imagine that the following code is part of a program
939 * attached to the TC ingress interface, on one end of a GRE
940 * tunnel, and is supposed to filter out all messages coming from
941 * remote ends with IPv4 address other than 10.0.0.1:
942 *
943 * ::
944 *
945 * int ret;
946 * struct bpf_tunnel_key key = {};
947 *
948 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
949 * if (ret < 0)
950 * return TC_ACT_SHOT; // drop packet
951 *
952 * if (key.remote_ipv4 != 0x0a000001)
953 * return TC_ACT_SHOT; // drop packet
954 *
955 * return TC_ACT_OK; // accept packet
956 *
957 * This interface can also be used with all encapsulation devices
958 * that can operate in "collect metadata" mode: instead of having
959 * one network device per specific configuration, the "collect
960 * metadata" mode only requires a single device where the
961 * configuration can be extracted from this helper.
962 *
963 * This can be used together with various tunnels such as VXLan,
964 * Geneve, GRE or IP in IP (IPIP).
965 * Return
966 * 0 on success, or a negative error in case of failure.
967 *
968 * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
969 * Description
970 * Populate tunnel metadata for packet associated to *skb.* The
971 * tunnel metadata is set to the contents of *key*, of *size*. The
972 * *flags* can be set to a combination of the following values:
973 *
974 * **BPF_F_TUNINFO_IPV6**
975 * Indicate that the tunnel is based on IPv6 protocol
976 * instead of IPv4.
977 * **BPF_F_ZERO_CSUM_TX**
978 * For IPv4 packets, add a flag to tunnel metadata
979 * indicating that checksum computation should be skipped
980 * and checksum set to zeroes.
981 * **BPF_F_DONT_FRAGMENT**
982 * Add a flag to tunnel metadata indicating that the
983 * packet should not be fragmented.
984 * **BPF_F_SEQ_NUMBER**
985 * Add a flag to tunnel metadata indicating that a
986 * sequence number should be added to tunnel header before
987 * sending the packet. This flag was added for GRE
988 * encapsulation, but might be used with other protocols
989 * as well in the future.
990 *
991 * Here is a typical usage on the transmit path:
992 *
993 * ::
994 *
995 * struct bpf_tunnel_key key;
996 * populate key ...
997 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
998 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
999 *
1000 * See also the description of the **bpf_skb_get_tunnel_key**\ ()
1001 * helper for additional information.
1002 * Return
1003 * 0 on success, or a negative error in case of failure.
1004 *
1005 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
1006 * Description
1007 * Read the value of a perf event counter. This helper relies on a
1008 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
1009 * the perf event counter is selected when *map* is updated with
1010 * perf event file descriptors. The *map* is an array whose size
1011 * is the number of available CPUs, and each cell contains a value
1012 * relative to one CPU. The value to retrieve is indicated by
1013 * *flags*, that contains the index of the CPU to look up, masked
1014 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1015 * **BPF_F_CURRENT_CPU** to indicate that the value for the
1016 * current CPU should be retrieved.
1017 *
1018 * Note that before Linux 4.13, only hardware perf event can be
1019 * retrieved.
1020 *
1021 * Also, be aware that the newer helper
1022 * **bpf_perf_event_read_value**\ () is recommended over
1023 * **bpf_perf_event_read**\ () in general. The latter has some ABI
1024 * quirks where error and counter value are used as a return code
1025 * (which is wrong to do since ranges may overlap). This issue is
1026 * fixed with **bpf_perf_event_read_value**\ (), which at the same
1027 * time provides more features over the **bpf_perf_event_read**\
1028 * () interface. Please refer to the description of
1029 * **bpf_perf_event_read_value**\ () for details.
1030 * Return
1031 * The value of the perf event counter read from the map, or a
1032 * negative error code in case of failure.
1033 *
1034 * int bpf_redirect(u32 ifindex, u64 flags)
1035 * Description
1036 * Redirect the packet to another net device of index *ifindex*.
1037 * This helper is somewhat similar to **bpf_clone_redirect**\
1038 * (), except that the packet is not cloned, which provides
1039 * increased performance.
1040 *
1041 * Except for XDP, both ingress and egress interfaces can be used
1042 * for redirection. The **BPF_F_INGRESS** value in *flags* is used
1043 * to make the distinction (ingress path is selected if the flag
1044 * is present, egress path otherwise). Currently, XDP only
1045 * supports redirection to the egress interface, and accepts no
1046 * flag at all.
1047 *
1048 * The same effect can also be attained with the more generic
1049 * **bpf_redirect_map**\ (), which uses a BPF map to store the
1050 * redirect target instead of providing it directly to the helper.
1051 * Return
1052 * For XDP, the helper returns **XDP_REDIRECT** on success or
1053 * **XDP_ABORTED** on error. For other program types, the values
1054 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
1055 * error.
1056 *
1057 * u32 bpf_get_route_realm(struct sk_buff *skb)
1058 * Description
1059 * Retrieve the realm or the route, that is to say the
1060 * **tclassid** field of the destination for the *skb*. The
1061 * indentifier retrieved is a user-provided tag, similar to the
1062 * one used with the net_cls cgroup (see description for
1063 * **bpf_get_cgroup_classid**\ () helper), but here this tag is
1064 * held by a route (a destination entry), not by a task.
1065 *
1066 * Retrieving this identifier works with the clsact TC egress hook
1067 * (see also **tc-bpf(8)**), or alternatively on conventional
1068 * classful egress qdiscs, but not on TC ingress path. In case of
1069 * clsact TC egress hook, this has the advantage that, internally,
1070 * the destination entry has not been dropped yet in the transmit
1071 * path. Therefore, the destination entry does not need to be
1072 * artificially held via **netif_keep_dst**\ () for a classful
1073 * qdisc until the *skb* is freed.
1074 *
1075 * This helper is available only if the kernel was compiled with
1076 * **CONFIG_IP_ROUTE_CLASSID** configuration option.
1077 * Return
1078 * The realm of the route for the packet associated to *skb*, or 0
1079 * if none was found.
1080 *
1081 * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
1082 * Description
1083 * Write raw *data* blob into a special BPF perf event held by
1084 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
1085 * event must have the following attributes: **PERF_SAMPLE_RAW**
1086 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
1087 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
1088 *
1089 * The *flags* are used to indicate the index in *map* for which
1090 * the value must be put, masked with **BPF_F_INDEX_MASK**.
1091 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
1092 * to indicate that the index of the current CPU core should be
1093 * used.
1094 *
1095 * The value to write, of *size*, is passed through eBPF stack and
1096 * pointed by *data*.
1097 *
1098 * The context of the program *ctx* needs also be passed to the
1099 * helper.
1100 *
1101 * On user space, a program willing to read the values needs to
1102 * call **perf_event_open**\ () on the perf event (either for
1103 * one or for all CPUs) and to store the file descriptor into the
1104 * *map*. This must be done before the eBPF program can send data
1105 * into it. An example is available in file
1106 * *samples/bpf/trace_output_user.c* in the Linux kernel source
1107 * tree (the eBPF program counterpart is in
1108 * *samples/bpf/trace_output_kern.c*).
1109 *
1110 * **bpf_perf_event_output**\ () achieves better performance
1111 * than **bpf_trace_printk**\ () for sharing data with user
1112 * space, and is much better suitable for streaming data from eBPF
1113 * programs.
1114 *
1115 * Note that this helper is not restricted to tracing use cases
1116 * and can be used with programs attached to TC or XDP as well,
1117 * where it allows for passing data to user space listeners. Data
1118 * can be:
1119 *
1120 * * Only custom structs,
1121 * * Only the packet payload, or
1122 * * A combination of both.
1123 * Return
1124 * 0 on success, or a negative error in case of failure.
1125 *
1126 * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
1127 * Description
1128 * This helper was provided as an easy way to load data from a
1129 * packet. It can be used to load *len* bytes from *offset* from
1130 * the packet associated to *skb*, into the buffer pointed by
1131 * *to*.
1132 *
1133 * Since Linux 4.7, usage of this helper has mostly been replaced
1134 * by "direct packet access", enabling packet data to be
1135 * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
1136 * pointing respectively to the first byte of packet data and to
1137 * the byte after the last byte of packet data. However, it
1138 * remains useful if one wishes to read large quantities of data
1139 * at once from a packet into the eBPF stack.
1140 * Return
1141 * 0 on success, or a negative error in case of failure.
1142 *
1143 * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
1144 * Description
1145 * Walk a user or a kernel stack and return its id. To achieve
1146 * this, the helper needs *ctx*, which is a pointer to the context
1147 * on which the tracing program is executed, and a pointer to a
1148 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
1149 *
1150 * The last argument, *flags*, holds the number of stack frames to
1151 * skip (from 0 to 255), masked with
1152 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1153 * a combination of the following flags:
1154 *
1155 * **BPF_F_USER_STACK**
1156 * Collect a user space stack instead of a kernel stack.
1157 * **BPF_F_FAST_STACK_CMP**
1158 * Compare stacks by hash only.
1159 * **BPF_F_REUSE_STACKID**
1160 * If two different stacks hash into the same *stackid*,
1161 * discard the old one.
1162 *
1163 * The stack id retrieved is a 32 bit long integer handle which
1164 * can be further combined with other data (including other stack
1165 * ids) and used as a key into maps. This can be useful for
1166 * generating a variety of graphs (such as flame graphs or off-cpu
1167 * graphs).
1168 *
1169 * For walking a stack, this helper is an improvement over
1170 * **bpf_probe_read**\ (), which can be used with unrolled loops
1171 * but is not efficient and consumes a lot of eBPF instructions.
1172 * Instead, **bpf_get_stackid**\ () can collect up to
1173 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
1174 * this limit can be controlled with the **sysctl** program, and
1175 * that it should be manually increased in order to profile long
1176 * user stacks (such as stacks for Java programs). To do so, use:
1177 *
1178 * ::
1179 *
1180 * # sysctl kernel.perf_event_max_stack=<new value>
1181 * Return
1182 * The positive or null stack id on success, or a negative error
1183 * in case of failure.
1184 *
1185 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
1186 * Description
1187 * Compute a checksum difference, from the raw buffer pointed by
1188 * *from*, of length *from_size* (that must be a multiple of 4),
1189 * towards the raw buffer pointed by *to*, of size *to_size*
1190 * (same remark). An optional *seed* can be added to the value
1191 * (this can be cascaded, the seed may come from a previous call
1192 * to the helper).
1193 *
1194 * This is flexible enough to be used in several ways:
1195 *
1196 * * With *from_size* == 0, *to_size* > 0 and *seed* set to
1197 * checksum, it can be used when pushing new data.
1198 * * With *from_size* > 0, *to_size* == 0 and *seed* set to
1199 * checksum, it can be used when removing data from a packet.
1200 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
1201 * can be used to compute a diff. Note that *from_size* and
1202 * *to_size* do not need to be equal.
1203 *
1204 * This helper can be used in combination with
1205 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
1206 * which one can feed in the difference computed with
1207 * **bpf_csum_diff**\ ().
1208 * Return
1209 * The checksum result, or a negative error code in case of
1210 * failure.
1211 *
1212 * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
1213 * Description
1214 * Retrieve tunnel options metadata for the packet associated to
1215 * *skb*, and store the raw tunnel option data to the buffer *opt*
1216 * of *size*.
1217 *
1218 * This helper can be used with encapsulation devices that can
1219 * operate in "collect metadata" mode (please refer to the related
1220 * note in the description of **bpf_skb_get_tunnel_key**\ () for
1221 * more details). A particular example where this can be used is
1222 * in combination with the Geneve encapsulation protocol, where it
1223 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
1224 * and retrieving arbitrary TLVs (Type-Length-Value headers) from
1225 * the eBPF program. This allows for full customization of these
1226 * headers.
1227 * Return
1228 * The size of the option data retrieved.
1229 *
1230 * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
1231 * Description
1232 * Set tunnel options metadata for the packet associated to *skb*
1233 * to the option data contained in the raw buffer *opt* of *size*.
1234 *
1235 * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
1236 * helper for additional information.
1237 * Return
1238 * 0 on success, or a negative error in case of failure.
1239 *
1240 * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
1241 * Description
1242 * Change the protocol of the *skb* to *proto*. Currently
1243 * supported are transition from IPv4 to IPv6, and from IPv6 to
1244 * IPv4. The helper takes care of the groundwork for the
1245 * transition, including resizing the socket buffer. The eBPF
1246 * program is expected to fill the new headers, if any, via
1247 * **skb_store_bytes**\ () and to recompute the checksums with
1248 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
1249 * (). The main case for this helper is to perform NAT64
1250 * operations out of an eBPF program.
1251 *
1252 * Internally, the GSO type is marked as dodgy so that headers are
1253 * checked and segments are recalculated by the GSO/GRO engine.
1254 * The size for GSO target is adapted as well.
1255 *
1256 * All values for *flags* are reserved for future usage, and must
1257 * be left at zero.
1258 *
1259 * A call to this helper is susceptible to change the underlying
1260 * packet buffer. Therefore, at load time, all checks on pointers
1261 * previously done by the verifier are invalidated and must be
1262 * performed again, if the helper is used in combination with
1263 * direct packet access.
1264 * Return
1265 * 0 on success, or a negative error in case of failure.
1266 *
1267 * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
1268 * Description
1269 * Change the packet type for the packet associated to *skb*. This
1270 * comes down to setting *skb*\ **->pkt_type** to *type*, except
1271 * the eBPF program does not have a write access to *skb*\
1272 * **->pkt_type** beside this helper. Using a helper here allows
1273 * for graceful handling of errors.
1274 *
1275 * The major use case is to change incoming *skb*s to
1276 * **PACKET_HOST** in a programmatic way instead of having to
1277 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
1278 * example.
1279 *
1280 * Note that *type* only allows certain values. At this time, they
1281 * are:
1282 *
1283 * **PACKET_HOST**
1284 * Packet is for us.
1285 * **PACKET_BROADCAST**
1286 * Send packet to all.
1287 * **PACKET_MULTICAST**
1288 * Send packet to group.
1289 * **PACKET_OTHERHOST**
1290 * Send packet to someone else.
1291 * Return
1292 * 0 on success, or a negative error in case of failure.
1293 *
1294 * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
1295 * Description
1296 * Check whether *skb* is a descendant of the cgroup2 held by
1297 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1298 * Return
1299 * The return value depends on the result of the test, and can be:
1300 *
1301 * * 0, if the *skb* failed the cgroup2 descendant test.
1302 * * 1, if the *skb* succeeded the cgroup2 descendant test.
1303 * * A negative error code, if an error occurred.
1304 *
1305 * u32 bpf_get_hash_recalc(struct sk_buff *skb)
1306 * Description
1307 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
1308 * not set, in particular if the hash was cleared due to mangling,
1309 * recompute this hash. Later accesses to the hash can be done
1310 * directly with *skb*\ **->hash**.
1311 *
1312 * Calling **bpf_set_hash_invalid**\ (), changing a packet
1313 * prototype with **bpf_skb_change_proto**\ (), or calling
1314 * **bpf_skb_store_bytes**\ () with the
1315 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
1316 * the hash and to trigger a new computation for the next call to
1317 * **bpf_get_hash_recalc**\ ().
1318 * Return
1319 * The 32-bit hash.
1320 *
1321 * u64 bpf_get_current_task(void)
1322 * Return
1323 * A pointer to the current task struct.
1324 *
1325 * int bpf_probe_write_user(void *dst, const void *src, u32 len)
1326 * Description
1327 * Attempt in a safe way to write *len* bytes from the buffer
1328 * *src* to *dst* in memory. It only works for threads that are in
1329 * user context, and *dst* must be a valid user space address.
1330 *
1331 * This helper should not be used to implement any kind of
1332 * security mechanism because of TOC-TOU attacks, but rather to
1333 * debug, divert, and manipulate execution of semi-cooperative
1334 * processes.
1335 *
1336 * Keep in mind that this feature is meant for experiments, and it
1337 * has a risk of crashing the system and running programs.
1338 * Therefore, when an eBPF program using this helper is attached,
1339 * a warning including PID and process name is printed to kernel
1340 * logs.
1341 * Return
1342 * 0 on success, or a negative error in case of failure.
1343 *
1344 * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
1345 * Description
1346 * Check whether the probe is being run is the context of a given
1347 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
1348 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1349 * Return
1350 * The return value depends on the result of the test, and can be:
1351 *
1352 * * 0, if the *skb* task belongs to the cgroup2.
1353 * * 1, if the *skb* task does not belong to the cgroup2.
1354 * * A negative error code, if an error occurred.
1355 *
1356 * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
1357 * Description
1358 * Resize (trim or grow) the packet associated to *skb* to the
1359 * new *len*. The *flags* are reserved for future usage, and must
1360 * be left at zero.
1361 *
1362 * The basic idea is that the helper performs the needed work to
1363 * change the size of the packet, then the eBPF program rewrites
1364 * the rest via helpers like **bpf_skb_store_bytes**\ (),
1365 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
1366 * and others. This helper is a slow path utility intended for
1367 * replies with control messages. And because it is targeted for
1368 * slow path, the helper itself can afford to be slow: it
1369 * implicitly linearizes, unclones and drops offloads from the
1370 * *skb*.
1371 *
1372 * A call to this helper is susceptible to change the underlying
1373 * packet buffer. Therefore, at load time, all checks on pointers
1374 * previously done by the verifier are invalidated and must be
1375 * performed again, if the helper is used in combination with
1376 * direct packet access.
1377 * Return
1378 * 0 on success, or a negative error in case of failure.
1379 *
1380 * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
1381 * Description
1382 * Pull in non-linear data in case the *skb* is non-linear and not
1383 * all of *len* are part of the linear section. Make *len* bytes
1384 * from *skb* readable and writable. If a zero value is passed for
1385 * *len*, then the whole length of the *skb* is pulled.
1386 *
1387 * This helper is only needed for reading and writing with direct
1388 * packet access.
1389 *
1390 * For direct packet access, testing that offsets to access
1391 * are within packet boundaries (test on *skb*\ **->data_end**) is
1392 * susceptible to fail if offsets are invalid, or if the requested
1393 * data is in non-linear parts of the *skb*. On failure the
1394 * program can just bail out, or in the case of a non-linear
1395 * buffer, use a helper to make the data available. The
1396 * **bpf_skb_load_bytes**\ () helper is a first solution to access
1397 * the data. Another one consists in using **bpf_skb_pull_data**
1398 * to pull in once the non-linear parts, then retesting and
1399 * eventually access the data.
1400 *
1401 * At the same time, this also makes sure the *skb* is uncloned,
1402 * which is a necessary condition for direct write. As this needs
1403 * to be an invariant for the write part only, the verifier
1404 * detects writes and adds a prologue that is calling
1405 * **bpf_skb_pull_data()** to effectively unclone the *skb* from
1406 * the very beginning in case it is indeed cloned.
1407 *
1408 * A call to this helper is susceptible to change the underlying
1409 * packet buffer. Therefore, at load time, all checks on pointers
1410 * previously done by the verifier are invalidated and must be
1411 * performed again, if the helper is used in combination with
1412 * direct packet access.
1413 * Return
1414 * 0 on success, or a negative error in case of failure.
1415 *
1416 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
1417 * Description
1418 * Add the checksum *csum* into *skb*\ **->csum** in case the
1419 * driver has supplied a checksum for the entire packet into that
1420 * field. Return an error otherwise. This helper is intended to be
1421 * used in combination with **bpf_csum_diff**\ (), in particular
1422 * when the checksum needs to be updated after data has been
1423 * written into the packet through direct packet access.
1424 * Return
1425 * The checksum on success, or a negative error code in case of
1426 * failure.
1427 *
1428 * void bpf_set_hash_invalid(struct sk_buff *skb)
1429 * Description
1430 * Invalidate the current *skb*\ **->hash**. It can be used after
1431 * mangling on headers through direct packet access, in order to
1432 * indicate that the hash is outdated and to trigger a
1433 * recalculation the next time the kernel tries to access this
1434 * hash or when the **bpf_get_hash_recalc**\ () helper is called.
1435 *
1436 * int bpf_get_numa_node_id(void)
1437 * Description
1438 * Return the id of the current NUMA node. The primary use case
1439 * for this helper is the selection of sockets for the local NUMA
1440 * node, when the program is attached to sockets using the
1441 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
1442 * but the helper is also available to other eBPF program types,
1443 * similarly to **bpf_get_smp_processor_id**\ ().
1444 * Return
1445 * The id of current NUMA node.
1446 *
1447 * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
1448 * Description
1449 * Grows headroom of packet associated to *skb* and adjusts the
1450 * offset of the MAC header accordingly, adding *len* bytes of
1451 * space. It automatically extends and reallocates memory as
1452 * required.
1453 *
1454 * This helper can be used on a layer 3 *skb* to push a MAC header
1455 * for redirection into a layer 2 device.
1456 *
1457 * All values for *flags* are reserved for future usage, and must
1458 * be left at zero.
1459 *
1460 * A call to this helper is susceptible to change the underlying
1461 * packet buffer. Therefore, at load time, all checks on pointers
1462 * previously done by the verifier are invalidated and must be
1463 * performed again, if the helper is used in combination with
1464 * direct packet access.
1465 * Return
1466 * 0 on success, or a negative error in case of failure.
1467 *
1468 * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
1469 * Description
1470 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
1471 * it is possible to use a negative value for *delta*. This helper
1472 * can be used to prepare the packet for pushing or popping
1473 * headers.
1474 *
1475 * A call to this helper is susceptible to change the underlying
1476 * packet buffer. Therefore, at load time, all checks on pointers
1477 * previously done by the verifier are invalidated and must be
1478 * performed again, if the helper is used in combination with
1479 * direct packet access.
1480 * Return
1481 * 0 on success, or a negative error in case of failure.
1482 *
1483 * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
1484 * Description
1485 * Copy a NUL terminated string from an unsafe kernel address
1486 * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
1487 * more details.
1488 *
1489 * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
1490 * instead.
1491 * Return
1492 * On success, the strictly positive length of the string,
1493 * including the trailing NUL character. On error, a negative
1494 * value.
1495 *
1496 * u64 bpf_get_socket_cookie(struct sk_buff *skb)
1497 * Description
1498 * If the **struct sk_buff** pointed by *skb* has a known socket,
1499 * retrieve the cookie (generated by the kernel) of this socket.
1500 * If no cookie has been set yet, generate a new cookie. Once
1501 * generated, the socket cookie remains stable for the life of the
1502 * socket. This helper can be useful for monitoring per socket
1503 * networking traffic statistics as it provides a global socket
1504 * identifier that can be assumed unique.
1505 * Return
1506 * A 8-byte long non-decreasing number on success, or 0 if the
1507 * socket field is missing inside *skb*.
1508 *
1509 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
1510 * Description
1511 * Equivalent to bpf_get_socket_cookie() helper that accepts
1512 * *skb*, but gets socket from **struct bpf_sock_addr** context.
1513 * Return
1514 * A 8-byte long non-decreasing number.
1515 *
1516 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
1517 * Description
1518 * Equivalent to bpf_get_socket_cookie() helper that accepts
1519 * *skb*, but gets socket from **struct bpf_sock_ops** context.
1520 * Return
1521 * A 8-byte long non-decreasing number.
1522 *
1523 * u32 bpf_get_socket_uid(struct sk_buff *skb)
1524 * Return
1525 * The owner UID of the socket associated to *skb*. If the socket
1526 * is **NULL**, or if it is not a full socket (i.e. if it is a
1527 * time-wait or a request socket instead), **overflowuid** value
1528 * is returned (note that **overflowuid** might also be the actual
1529 * UID value for the socket).
1530 *
1531 * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
1532 * Description
1533 * Set the full hash for *skb* (set the field *skb*\ **->hash**)
1534 * to value *hash*.
1535 * Return
1536 * 0
1537 *
1538 * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
1539 * Description
1540 * Emulate a call to **setsockopt()** on the socket associated to
1541 * *bpf_socket*, which must be a full socket. The *level* at
1542 * which the option resides and the name *optname* of the option
1543 * must be specified, see **setsockopt(2)** for more information.
1544 * The option value of length *optlen* is pointed by *optval*.
1545 *
1546 * This helper actually implements a subset of **setsockopt()**.
1547 * It supports the following *level*\ s:
1548 *
1549 * * **SOL_SOCKET**, which supports the following *optname*\ s:
1550 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
1551 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
1552 * * **IPPROTO_TCP**, which supports the following *optname*\ s:
1553 * **TCP_CONGESTION**, **TCP_BPF_IW**,
1554 * **TCP_BPF_SNDCWND_CLAMP**.
1555 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1556 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1557 * Return
1558 * 0 on success, or a negative error in case of failure.
1559 *
1560 * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
1561 * Description
1562 * Grow or shrink the room for data in the packet associated to
1563 * *skb* by *len_diff*, and according to the selected *mode*.
1564 *
1565 * There are two supported modes at this time:
1566 *
1567 * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
1568 * (room space is added or removed below the layer 2 header).
1569 *
1570 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
1571 * (room space is added or removed below the layer 3 header).
1572 *
1573 * The following flags are supported at this time:
1574 *
1575 * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
1576 * Adjusting mss in this way is not allowed for datagrams.
1577 *
1578 * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
1579 * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
1580 * Any new space is reserved to hold a tunnel header.
1581 * Configure skb offsets and other fields accordingly.
1582 *
1583 * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
1584 * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
1585 * Use with ENCAP_L3 flags to further specify the tunnel type.
1586 *
1587 * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
1588 * Use with ENCAP_L3/L4 flags to further specify the tunnel
1589 * type; *len* is the length of the inner MAC header.
1590 *
1591 * A call to this helper is susceptible to change the underlying
1592 * packet buffer. Therefore, at load time, all checks on pointers
1593 * previously done by the verifier are invalidated and must be
1594 * performed again, if the helper is used in combination with
1595 * direct packet access.
1596 * Return
1597 * 0 on success, or a negative error in case of failure.
1598 *
1599 * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1600 * Description
1601 * Redirect the packet to the endpoint referenced by *map* at
1602 * index *key*. Depending on its type, this *map* can contain
1603 * references to net devices (for forwarding packets through other
1604 * ports), or to CPUs (for redirecting XDP frames to another CPU;
1605 * but this is only implemented for native XDP (with driver
1606 * support) as of this writing).
1607 *
1608 * The lower two bits of *flags* are used as the return code if
1609 * the map lookup fails. This is so that the return value can be
1610 * one of the XDP program return codes up to XDP_TX, as chosen by
1611 * the caller. Any higher bits in the *flags* argument must be
1612 * unset.
1613 *
1614 * See also bpf_redirect(), which only supports redirecting to an
1615 * ifindex, but doesn't require a map to do so.
1616 * Return
1617 * **XDP_REDIRECT** on success, or the value of the two lower bits
1618 * of the **flags* argument on error.
1619 *
1620 * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
1621 * Description
1622 * Redirect the packet to the socket referenced by *map* (of type
1623 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1624 * egress interfaces can be used for redirection. The
1625 * **BPF_F_INGRESS** value in *flags* is used to make the
1626 * distinction (ingress path is selected if the flag is present,
1627 * egress path otherwise). This is the only flag supported for now.
1628 * Return
1629 * **SK_PASS** on success, or **SK_DROP** on error.
1630 *
1631 * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
1632 * Description
1633 * Add an entry to, or update a *map* referencing sockets. The
1634 * *skops* is used as a new value for the entry associated to
1635 * *key*. *flags* is one of:
1636 *
1637 * **BPF_NOEXIST**
1638 * The entry for *key* must not exist in the map.
1639 * **BPF_EXIST**
1640 * The entry for *key* must already exist in the map.
1641 * **BPF_ANY**
1642 * No condition on the existence of the entry for *key*.
1643 *
1644 * If the *map* has eBPF programs (parser and verdict), those will
1645 * be inherited by the socket being added. If the socket is
1646 * already attached to eBPF programs, this results in an error.
1647 * Return
1648 * 0 on success, or a negative error in case of failure.
1649 *
1650 * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
1651 * Description
1652 * Adjust the address pointed by *xdp_md*\ **->data_meta** by
1653 * *delta* (which can be positive or negative). Note that this
1654 * operation modifies the address stored in *xdp_md*\ **->data**,
1655 * so the latter must be loaded only after the helper has been
1656 * called.
1657 *
1658 * The use of *xdp_md*\ **->data_meta** is optional and programs
1659 * are not required to use it. The rationale is that when the
1660 * packet is processed with XDP (e.g. as DoS filter), it is
1661 * possible to push further meta data along with it before passing
1662 * to the stack, and to give the guarantee that an ingress eBPF
1663 * program attached as a TC classifier on the same device can pick
1664 * this up for further post-processing. Since TC works with socket
1665 * buffers, it remains possible to set from XDP the **mark** or
1666 * **priority** pointers, or other pointers for the socket buffer.
1667 * Having this scratch space generic and programmable allows for
1668 * more flexibility as the user is free to store whatever meta
1669 * data they need.
1670 *
1671 * A call to this helper is susceptible to change the underlying
1672 * packet buffer. Therefore, at load time, all checks on pointers
1673 * previously done by the verifier are invalidated and must be
1674 * performed again, if the helper is used in combination with
1675 * direct packet access.
1676 * Return
1677 * 0 on success, or a negative error in case of failure.
1678 *
1679 * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
1680 * Description
1681 * Read the value of a perf event counter, and store it into *buf*
1682 * of size *buf_size*. This helper relies on a *map* of type
1683 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
1684 * counter is selected when *map* is updated with perf event file
1685 * descriptors. The *map* is an array whose size is the number of
1686 * available CPUs, and each cell contains a value relative to one
1687 * CPU. The value to retrieve is indicated by *flags*, that
1688 * contains the index of the CPU to look up, masked with
1689 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1690 * **BPF_F_CURRENT_CPU** to indicate that the value for the
1691 * current CPU should be retrieved.
1692 *
1693 * This helper behaves in a way close to
1694 * **bpf_perf_event_read**\ () helper, save that instead of
1695 * just returning the value observed, it fills the *buf*
1696 * structure. This allows for additional data to be retrieved: in
1697 * particular, the enabled and running times (in *buf*\
1698 * **->enabled** and *buf*\ **->running**, respectively) are
1699 * copied. In general, **bpf_perf_event_read_value**\ () is
1700 * recommended over **bpf_perf_event_read**\ (), which has some
1701 * ABI issues and provides fewer functionalities.
1702 *
1703 * These values are interesting, because hardware PMU (Performance
1704 * Monitoring Unit) counters are limited resources. When there are
1705 * more PMU based perf events opened than available counters,
1706 * kernel will multiplex these events so each event gets certain
1707 * percentage (but not all) of the PMU time. In case that
1708 * multiplexing happens, the number of samples or counter value
1709 * will not reflect the case compared to when no multiplexing
1710 * occurs. This makes comparison between different runs difficult.
1711 * Typically, the counter value should be normalized before
1712 * comparing to other experiments. The usual normalization is done
1713 * as follows.
1714 *
1715 * ::
1716 *
1717 * normalized_counter = counter * t_enabled / t_running
1718 *
1719 * Where t_enabled is the time enabled for event and t_running is
1720 * the time running for event since last normalization. The
1721 * enabled and running times are accumulated since the perf event
1722 * open. To achieve scaling factor between two invocations of an
1723 * eBPF program, users can can use CPU id as the key (which is
1724 * typical for perf array usage model) to remember the previous
1725 * value and do the calculation inside the eBPF program.
1726 * Return
1727 * 0 on success, or a negative error in case of failure.
1728 *
1729 * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
1730 * Description
1731 * For en eBPF program attached to a perf event, retrieve the
1732 * value of the event counter associated to *ctx* and store it in
1733 * the structure pointed by *buf* and of size *buf_size*. Enabled
1734 * and running times are also stored in the structure (see
1735 * description of helper **bpf_perf_event_read_value**\ () for
1736 * more details).
1737 * Return
1738 * 0 on success, or a negative error in case of failure.
1739 *
1740 * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
1741 * Description
1742 * Emulate a call to **getsockopt()** on the socket associated to
1743 * *bpf_socket*, which must be a full socket. The *level* at
1744 * which the option resides and the name *optname* of the option
1745 * must be specified, see **getsockopt(2)** for more information.
1746 * The retrieved value is stored in the structure pointed by
1747 * *opval* and of length *optlen*.
1748 *
1749 * This helper actually implements a subset of **getsockopt()**.
1750 * It supports the following *level*\ s:
1751 *
1752 * * **IPPROTO_TCP**, which supports *optname*
1753 * **TCP_CONGESTION**.
1754 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1755 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1756 * Return
1757 * 0 on success, or a negative error in case of failure.
1758 *
1759 * int bpf_override_return(struct pt_regs *regs, u64 rc)
1760 * Description
1761 * Used for error injection, this helper uses kprobes to override
1762 * the return value of the probed function, and to set it to *rc*.
1763 * The first argument is the context *regs* on which the kprobe
1764 * works.
1765 *
1766 * This helper works by setting setting the PC (program counter)
1767 * to an override function which is run in place of the original
1768 * probed function. This means the probed function is not run at
1769 * all. The replacement function just returns with the required
1770 * value.
1771 *
1772 * This helper has security implications, and thus is subject to
1773 * restrictions. It is only available if the kernel was compiled
1774 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
1775 * option, and in this case it only works on functions tagged with
1776 * **ALLOW_ERROR_INJECTION** in the kernel code.
1777 *
1778 * Also, the helper is only available for the architectures having
1779 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
1780 * x86 architecture is the only one to support this feature.
1781 * Return
1782 * 0
1783 *
1784 * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
1785 * Description
1786 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
1787 * for the full TCP socket associated to *bpf_sock_ops* to
1788 * *argval*.
1789 *
1790 * The primary use of this field is to determine if there should
1791 * be calls to eBPF programs of type
1792 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
1793 * code. A program of the same type can change its value, per
1794 * connection and as necessary, when the connection is
1795 * established. This field is directly accessible for reading, but
1796 * this helper must be used for updates in order to return an
1797 * error if an eBPF program tries to set a callback that is not
1798 * supported in the current kernel.
1799 *
1800 * *argval* is a flag array which can combine these flags:
1801 *
1802 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
1803 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
1804 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
1805 * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
1806 *
1807 * Therefore, this function can be used to clear a callback flag by
1808 * setting the appropriate bit to zero. e.g. to disable the RTO
1809 * callback:
1810 *
1811 * **bpf_sock_ops_cb_flags_set(bpf_sock,**
1812 * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
1813 *
1814 * Here are some examples of where one could call such eBPF
1815 * program:
1816 *
1817 * * When RTO fires.
1818 * * When a packet is retransmitted.
1819 * * When the connection terminates.
1820 * * When a packet is sent.
1821 * * When a packet is received.
1822 * Return
1823 * Code **-EINVAL** if the socket is not a full TCP socket;
1824 * otherwise, a positive number containing the bits that could not
1825 * be set is returned (which comes down to 0 if all bits were set
1826 * as required).
1827 *
1828 * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
1829 * Description
1830 * This helper is used in programs implementing policies at the
1831 * socket level. If the message *msg* is allowed to pass (i.e. if
1832 * the verdict eBPF program returns **SK_PASS**), redirect it to
1833 * the socket referenced by *map* (of type
1834 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1835 * egress interfaces can be used for redirection. The
1836 * **BPF_F_INGRESS** value in *flags* is used to make the
1837 * distinction (ingress path is selected if the flag is present,
1838 * egress path otherwise). This is the only flag supported for now.
1839 * Return
1840 * **SK_PASS** on success, or **SK_DROP** on error.
1841 *
1842 * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
1843 * Description
1844 * For socket policies, apply the verdict of the eBPF program to
1845 * the next *bytes* (number of bytes) of message *msg*.
1846 *
1847 * For example, this helper can be used in the following cases:
1848 *
1849 * * A single **sendmsg**\ () or **sendfile**\ () system call
1850 * contains multiple logical messages that the eBPF program is
1851 * supposed to read and for which it should apply a verdict.
1852 * * An eBPF program only cares to read the first *bytes* of a
1853 * *msg*. If the message has a large payload, then setting up
1854 * and calling the eBPF program repeatedly for all bytes, even
1855 * though the verdict is already known, would create unnecessary
1856 * overhead.
1857 *
1858 * When called from within an eBPF program, the helper sets a
1859 * counter internal to the BPF infrastructure, that is used to
1860 * apply the last verdict to the next *bytes*. If *bytes* is
1861 * smaller than the current data being processed from a
1862 * **sendmsg**\ () or **sendfile**\ () system call, the first
1863 * *bytes* will be sent and the eBPF program will be re-run with
1864 * the pointer for start of data pointing to byte number *bytes*
1865 * **+ 1**. If *bytes* is larger than the current data being
1866 * processed, then the eBPF verdict will be applied to multiple
1867 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
1868 * consumed.
1869 *
1870 * Note that if a socket closes with the internal counter holding
1871 * a non-zero value, this is not a problem because data is not
1872 * being buffered for *bytes* and is sent as it is received.
1873 * Return
1874 * 0
1875 *
1876 * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
1877 * Description
1878 * For socket policies, prevent the execution of the verdict eBPF
1879 * program for message *msg* until *bytes* (byte number) have been
1880 * accumulated.
1881 *
1882 * This can be used when one needs a specific number of bytes
1883 * before a verdict can be assigned, even if the data spans
1884 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
1885 * case would be a user calling **sendmsg**\ () repeatedly with
1886 * 1-byte long message segments. Obviously, this is bad for
1887 * performance, but it is still valid. If the eBPF program needs
1888 * *bytes* bytes to validate a header, this helper can be used to
1889 * prevent the eBPF program to be called again until *bytes* have
1890 * been accumulated.
1891 * Return
1892 * 0
1893 *
1894 * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
1895 * Description
1896 * For socket policies, pull in non-linear data from user space
1897 * for *msg* and set pointers *msg*\ **->data** and *msg*\
1898 * **->data_end** to *start* and *end* bytes offsets into *msg*,
1899 * respectively.
1900 *
1901 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
1902 * *msg* it can only parse data that the (**data**, **data_end**)
1903 * pointers have already consumed. For **sendmsg**\ () hooks this
1904 * is likely the first scatterlist element. But for calls relying
1905 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
1906 * be the range (**0**, **0**) because the data is shared with
1907 * user space and by default the objective is to avoid allowing
1908 * user space to modify data while (or after) eBPF verdict is
1909 * being decided. This helper can be used to pull in data and to
1910 * set the start and end pointer to given values. Data will be
1911 * copied if necessary (i.e. if data was not linear and if start
1912 * and end pointers do not point to the same chunk).
1913 *
1914 * A call to this helper is susceptible to change the underlying
1915 * packet buffer. Therefore, at load time, all checks on pointers
1916 * previously done by the verifier are invalidated and must be
1917 * performed again, if the helper is used in combination with
1918 * direct packet access.
1919 *
1920 * All values for *flags* are reserved for future usage, and must
1921 * be left at zero.
1922 * Return
1923 * 0 on success, or a negative error in case of failure.
1924 *
1925 * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
1926 * Description
1927 * Bind the socket associated to *ctx* to the address pointed by
1928 * *addr*, of length *addr_len*. This allows for making outgoing
1929 * connection from the desired IP address, which can be useful for
1930 * example when all processes inside a cgroup should use one
1931 * single IP address on a host that has multiple IP configured.
1932 *
1933 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
1934 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
1935 * **AF_INET6**). Looking for a free port to bind to can be
1936 * expensive, therefore binding to port is not permitted by the
1937 * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively)
1938 * must be set to zero.
1939 * Return
1940 * 0 on success, or a negative error in case of failure.
1941 *
1942 * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
1943 * Description
1944 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
1945 * only possible to shrink the packet as of this writing,
1946 * therefore *delta* must be a negative integer.
1947 *
1948 * A call to this helper is susceptible to change the underlying
1949 * packet buffer. Therefore, at load time, all checks on pointers
1950 * previously done by the verifier are invalidated and must be
1951 * performed again, if the helper is used in combination with
1952 * direct packet access.
1953 * Return
1954 * 0 on success, or a negative error in case of failure.
1955 *
1956 * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
1957 * Description
1958 * Retrieve the XFRM state (IP transform framework, see also
1959 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
1960 *
1961 * The retrieved value is stored in the **struct bpf_xfrm_state**
1962 * pointed by *xfrm_state* and of length *size*.
1963 *
1964 * All values for *flags* are reserved for future usage, and must
1965 * be left at zero.
1966 *
1967 * This helper is available only if the kernel was compiled with
1968 * **CONFIG_XFRM** configuration option.
1969 * Return
1970 * 0 on success, or a negative error in case of failure.
1971 *
1972 * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
1973 * Description
1974 * Return a user or a kernel stack in bpf program provided buffer.
1975 * To achieve this, the helper needs *ctx*, which is a pointer
1976 * to the context on which the tracing program is executed.
1977 * To store the stacktrace, the bpf program provides *buf* with
1978 * a nonnegative *size*.
1979 *
1980 * The last argument, *flags*, holds the number of stack frames to
1981 * skip (from 0 to 255), masked with
1982 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1983 * the following flags:
1984 *
1985 * **BPF_F_USER_STACK**
1986 * Collect a user space stack instead of a kernel stack.
1987 * **BPF_F_USER_BUILD_ID**
1988 * Collect buildid+offset instead of ips for user stack,
1989 * only valid if **BPF_F_USER_STACK** is also specified.
1990 *
1991 * **bpf_get_stack**\ () can collect up to
1992 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
1993 * to sufficient large buffer size. Note that
1994 * this limit can be controlled with the **sysctl** program, and
1995 * that it should be manually increased in order to profile long
1996 * user stacks (such as stacks for Java programs). To do so, use:
1997 *
1998 * ::
1999 *
2000 * # sysctl kernel.perf_event_max_stack=<new value>
2001 * Return
2002 * A non-negative value equal to or less than *size* on success,
2003 * or a negative error in case of failure.
2004 *
2005 * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
2006 * Description
2007 * This helper is similar to **bpf_skb_load_bytes**\ () in that
2008 * it provides an easy way to load *len* bytes from *offset*
2009 * from the packet associated to *skb*, into the buffer pointed
2010 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that
2011 * a fifth argument *start_header* exists in order to select a
2012 * base offset to start from. *start_header* can be one of:
2013 *
2014 * **BPF_HDR_START_MAC**
2015 * Base offset to load data from is *skb*'s mac header.
2016 * **BPF_HDR_START_NET**
2017 * Base offset to load data from is *skb*'s network header.
2018 *
2019 * In general, "direct packet access" is the preferred method to
2020 * access packet data, however, this helper is in particular useful
2021 * in socket filters where *skb*\ **->data** does not always point
2022 * to the start of the mac header and where "direct packet access"
2023 * is not available.
2024 * Return
2025 * 0 on success, or a negative error in case of failure.
2026 *
2027 * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
2028 * Description
2029 * Do FIB lookup in kernel tables using parameters in *params*.
2030 * If lookup is successful and result shows packet is to be
2031 * forwarded, the neighbor tables are searched for the nexthop.
2032 * If successful (ie., FIB lookup shows forwarding and nexthop
2033 * is resolved), the nexthop address is returned in ipv4_dst
2034 * or ipv6_dst based on family, smac is set to mac address of
2035 * egress device, dmac is set to nexthop mac address, rt_metric
2036 * is set to metric from route (IPv4/IPv6 only), and ifindex
2037 * is set to the device index of the nexthop from the FIB lookup.
2038 *
2039 * *plen* argument is the size of the passed in struct.
2040 * *flags* argument can be a combination of one or more of the
2041 * following values:
2042 *
2043 * **BPF_FIB_LOOKUP_DIRECT**
2044 * Do a direct table lookup vs full lookup using FIB
2045 * rules.
2046 * **BPF_FIB_LOOKUP_OUTPUT**
2047 * Perform lookup from an egress perspective (default is
2048 * ingress).
2049 *
2050 * *ctx* is either **struct xdp_md** for XDP programs or
2051 * **struct sk_buff** tc cls_act programs.
2052 * Return
2053 * * < 0 if any input argument is invalid
2054 * * 0 on success (packet is forwarded, nexthop neighbor exists)
2055 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
2056 * packet is not forwarded or needs assist from full stack
2057 *
2058 * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
2059 * Description
2060 * Add an entry to, or update a sockhash *map* referencing sockets.
2061 * The *skops* is used as a new value for the entry associated to
2062 * *key*. *flags* is one of:
2063 *
2064 * **BPF_NOEXIST**
2065 * The entry for *key* must not exist in the map.
2066 * **BPF_EXIST**
2067 * The entry for *key* must already exist in the map.
2068 * **BPF_ANY**
2069 * No condition on the existence of the entry for *key*.
2070 *
2071 * If the *map* has eBPF programs (parser and verdict), those will
2072 * be inherited by the socket being added. If the socket is
2073 * already attached to eBPF programs, this results in an error.
2074 * Return
2075 * 0 on success, or a negative error in case of failure.
2076 *
2077 * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
2078 * Description
2079 * This helper is used in programs implementing policies at the
2080 * socket level. If the message *msg* is allowed to pass (i.e. if
2081 * the verdict eBPF program returns **SK_PASS**), redirect it to
2082 * the socket referenced by *map* (of type
2083 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
2084 * egress interfaces can be used for redirection. The
2085 * **BPF_F_INGRESS** value in *flags* is used to make the
2086 * distinction (ingress path is selected if the flag is present,
2087 * egress path otherwise). This is the only flag supported for now.
2088 * Return
2089 * **SK_PASS** on success, or **SK_DROP** on error.
2090 *
2091 * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
2092 * Description
2093 * This helper is used in programs implementing policies at the
2094 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
2095 * if the verdeict eBPF program returns **SK_PASS**), redirect it
2096 * to the socket referenced by *map* (of type
2097 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
2098 * egress interfaces can be used for redirection. The
2099 * **BPF_F_INGRESS** value in *flags* is used to make the
2100 * distinction (ingress path is selected if the flag is present,
2101 * egress otherwise). This is the only flag supported for now.
2102 * Return
2103 * **SK_PASS** on success, or **SK_DROP** on error.
2104 *
2105 * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
2106 * Description
2107 * Encapsulate the packet associated to *skb* within a Layer 3
2108 * protocol header. This header is provided in the buffer at
2109 * address *hdr*, with *len* its size in bytes. *type* indicates
2110 * the protocol of the header and can be one of:
2111 *
2112 * **BPF_LWT_ENCAP_SEG6**
2113 * IPv6 encapsulation with Segment Routing Header
2114 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
2115 * the IPv6 header is computed by the kernel.
2116 * **BPF_LWT_ENCAP_SEG6_INLINE**
2117 * Only works if *skb* contains an IPv6 packet. Insert a
2118 * Segment Routing Header (**struct ipv6_sr_hdr**) inside
2119 * the IPv6 header.
2120 * **BPF_LWT_ENCAP_IP**
2121 * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
2122 * must be IPv4 or IPv6, followed by zero or more
2123 * additional headers, up to **LWT_BPF_MAX_HEADROOM**
2124 * total bytes in all prepended headers. Please note that
2125 * if **skb_is_gso**\ (*skb*) is true, no more than two
2126 * headers can be prepended, and the inner header, if
2127 * present, should be either GRE or UDP/GUE.
2128 *
2129 * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
2130 * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
2131 * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
2132 * **BPF_PROG_TYPE_LWT_XMIT**.
2133 *
2134 * A call to this helper is susceptible to change the underlying
2135 * packet buffer. Therefore, at load time, all checks on pointers
2136 * previously done by the verifier are invalidated and must be
2137 * performed again, if the helper is used in combination with
2138 * direct packet access.
2139 * Return
2140 * 0 on success, or a negative error in case of failure.
2141 *
2142 * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
2143 * Description
2144 * Store *len* bytes from address *from* into the packet
2145 * associated to *skb*, at *offset*. Only the flags, tag and TLVs
2146 * inside the outermost IPv6 Segment Routing Header can be
2147 * modified through this helper.
2148 *
2149 * A call to this helper is susceptible to change the underlying
2150 * packet buffer. Therefore, at load time, all checks on pointers
2151 * previously done by the verifier are invalidated and must be
2152 * performed again, if the helper is used in combination with
2153 * direct packet access.
2154 * Return
2155 * 0 on success, or a negative error in case of failure.
2156 *
2157 * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
2158 * Description
2159 * Adjust the size allocated to TLVs in the outermost IPv6
2160 * Segment Routing Header contained in the packet associated to
2161 * *skb*, at position *offset* by *delta* bytes. Only offsets
2162 * after the segments are accepted. *delta* can be as well
2163 * positive (growing) as negative (shrinking).
2164 *
2165 * A call to this helper is susceptible to change the underlying
2166 * packet buffer. Therefore, at load time, all checks on pointers
2167 * previously done by the verifier are invalidated and must be
2168 * performed again, if the helper is used in combination with
2169 * direct packet access.
2170 * Return
2171 * 0 on success, or a negative error in case of failure.
2172 *
2173 * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
2174 * Description
2175 * Apply an IPv6 Segment Routing action of type *action* to the
2176 * packet associated to *skb*. Each action takes a parameter
2177 * contained at address *param*, and of length *param_len* bytes.
2178 * *action* can be one of:
2179 *
2180 * **SEG6_LOCAL_ACTION_END_X**
2181 * End.X action: Endpoint with Layer-3 cross-connect.
2182 * Type of *param*: **struct in6_addr**.
2183 * **SEG6_LOCAL_ACTION_END_T**
2184 * End.T action: Endpoint with specific IPv6 table lookup.
2185 * Type of *param*: **int**.
2186 * **SEG6_LOCAL_ACTION_END_B6**
2187 * End.B6 action: Endpoint bound to an SRv6 policy.
2188 * Type of *param*: **struct ipv6_sr_hdr**.
2189 * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
2190 * End.B6.Encap action: Endpoint bound to an SRv6
2191 * encapsulation policy.
2192 * Type of *param*: **struct ipv6_sr_hdr**.
2193 *
2194 * A call to this helper is susceptible to change the underlying
2195 * packet buffer. Therefore, at load time, all checks on pointers
2196 * previously done by the verifier are invalidated and must be
2197 * performed again, if the helper is used in combination with
2198 * direct packet access.
2199 * Return
2200 * 0 on success, or a negative error in case of failure.
2201 *
2202 * int bpf_rc_repeat(void *ctx)
2203 * Description
2204 * This helper is used in programs implementing IR decoding, to
2205 * report a successfully decoded repeat key message. This delays
2206 * the generation of a key up event for previously generated
2207 * key down event.
2208 *
2209 * Some IR protocols like NEC have a special IR message for
2210 * repeating last button, for when a button is held down.
2211 *
2212 * The *ctx* should point to the lirc sample as passed into
2213 * the program.
2214 *
2215 * This helper is only available is the kernel was compiled with
2216 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2217 * "**y**".
2218 * Return
2219 * 0
2220 *
2221 * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
2222 * Description
2223 * This helper is used in programs implementing IR decoding, to
2224 * report a successfully decoded key press with *scancode*,
2225 * *toggle* value in the given *protocol*. The scancode will be
2226 * translated to a keycode using the rc keymap, and reported as
2227 * an input key down event. After a period a key up event is
2228 * generated. This period can be extended by calling either
2229 * **bpf_rc_keydown**\ () again with the same values, or calling
2230 * **bpf_rc_repeat**\ ().
2231 *
2232 * Some protocols include a toggle bit, in case the button was
2233 * released and pressed again between consecutive scancodes.
2234 *
2235 * The *ctx* should point to the lirc sample as passed into
2236 * the program.
2237 *
2238 * The *protocol* is the decoded protocol number (see
2239 * **enum rc_proto** for some predefined values).
2240 *
2241 * This helper is only available is the kernel was compiled with
2242 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2243 * "**y**".
2244 * Return
2245 * 0
2246 *
2247 * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
2248 * Description
2249 * Return the cgroup v2 id of the socket associated with the *skb*.
2250 * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
2251 * helper for cgroup v1 by providing a tag resp. identifier that
2252 * can be matched on or used for map lookups e.g. to implement
2253 * policy. The cgroup v2 id of a given path in the hierarchy is
2254 * exposed in user space through the f_handle API in order to get
2255 * to the same 64-bit id.
2256 *
2257 * This helper can be used on TC egress path, but not on ingress,
2258 * and is available only if the kernel was compiled with the
2259 * **CONFIG_SOCK_CGROUP_DATA** configuration option.
2260 * Return
2261 * The id is returned or 0 in case the id could not be retrieved.
2262 *
2263 * u64 bpf_get_current_cgroup_id(void)
2264 * Return
2265 * A 64-bit integer containing the current cgroup id based
2266 * on the cgroup within which the current task is running.
2267 *
2268 * void *bpf_get_local_storage(void *map, u64 flags)
2269 * Description
2270 * Get the pointer to the local storage area.
2271 * The type and the size of the local storage is defined
2272 * by the *map* argument.
2273 * The *flags* meaning is specific for each map type,
2274 * and has to be 0 for cgroup local storage.
2275 *
2276 * Depending on the BPF program type, a local storage area
2277 * can be shared between multiple instances of the BPF program,
2278 * running simultaneously.
2279 *
2280 * A user should care about the synchronization by himself.
2281 * For example, by using the **BPF_STX_XADD** instruction to alter
2282 * the shared data.
2283 * Return
2284 * A pointer to the local storage area.
2285 *
2286 * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
2287 * Description
2288 * Select a **SO_REUSEPORT** socket from a
2289 * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
2290 * It checks the selected socket is matching the incoming
2291 * request in the socket buffer.
2292 * Return
2293 * 0 on success, or a negative error in case of failure.
2294 *
2295 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2296 * Description
2297 * Return id of cgroup v2 that is ancestor of cgroup associated
2298 * with the *skb* at the *ancestor_level*. The root cgroup is at
2299 * *ancestor_level* zero and each step down the hierarchy
2300 * increments the level. If *ancestor_level* == level of cgroup
2301 * associated with *skb*, then return value will be same as that
2302 * of **bpf_skb_cgroup_id**\ ().
2303 *
2304 * The helper is useful to implement policies based on cgroups
2305 * that are upper in hierarchy than immediate cgroup associated
2306 * with *skb*.
2307 *
2308 * The format of returned id and helper limitations are same as in
2309 * **bpf_skb_cgroup_id**\ ().
2310 * Return
2311 * The id is returned or 0 in case the id could not be retrieved.
2312 *
2313 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2314 * Description
2315 * Look for TCP socket matching *tuple*, optionally in a child
2316 * network namespace *netns*. The return value must be checked,
2317 * and if non-**NULL**, released via **bpf_sk_release**\ ().
2318 *
2319 * The *ctx* should point to the context of the program, such as
2320 * the skb or socket (depending on the hook in use). This is used
2321 * to determine the base network namespace for the lookup.
2322 *
2323 * *tuple_size* must be one of:
2324 *
2325 * **sizeof**\ (*tuple*\ **->ipv4**)
2326 * Look for an IPv4 socket.
2327 * **sizeof**\ (*tuple*\ **->ipv6**)
2328 * Look for an IPv6 socket.
2329 *
2330 * If the *netns* is a negative signed 32-bit integer, then the
2331 * socket lookup table in the netns associated with the *ctx* will
2332 * will be used. For the TC hooks, this is the netns of the device
2333 * in the skb. For socket hooks, this is the netns of the socket.
2334 * If *netns* is any other signed 32-bit value greater than or
2335 * equal to zero then it specifies the ID of the netns relative to
2336 * the netns associated with the *ctx*. *netns* values beyond the
2337 * range of 32-bit integers are reserved for future use.
2338 *
2339 * All values for *flags* are reserved for future usage, and must
2340 * be left at zero.
2341 *
2342 * This helper is available only if the kernel was compiled with
2343 * **CONFIG_NET** configuration option.
2344 * Return
2345 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2346 * For sockets with reuseport option, the **struct bpf_sock**
2347 * result is from *reuse*\ **->socks**\ [] using the hash of the
2348 * tuple.
2349 *
2350 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2351 * Description
2352 * Look for UDP socket matching *tuple*, optionally in a child
2353 * network namespace *netns*. The return value must be checked,
2354 * and if non-**NULL**, released via **bpf_sk_release**\ ().
2355 *
2356 * The *ctx* should point to the context of the program, such as
2357 * the skb or socket (depending on the hook in use). This is used
2358 * to determine the base network namespace for the lookup.
2359 *
2360 * *tuple_size* must be one of:
2361 *
2362 * **sizeof**\ (*tuple*\ **->ipv4**)
2363 * Look for an IPv4 socket.
2364 * **sizeof**\ (*tuple*\ **->ipv6**)
2365 * Look for an IPv6 socket.
2366 *
2367 * If the *netns* is a negative signed 32-bit integer, then the
2368 * socket lookup table in the netns associated with the *ctx* will
2369 * will be used. For the TC hooks, this is the netns of the device
2370 * in the skb. For socket hooks, this is the netns of the socket.
2371 * If *netns* is any other signed 32-bit value greater than or
2372 * equal to zero then it specifies the ID of the netns relative to
2373 * the netns associated with the *ctx*. *netns* values beyond the
2374 * range of 32-bit integers are reserved for future use.
2375 *
2376 * All values for *flags* are reserved for future usage, and must
2377 * be left at zero.
2378 *
2379 * This helper is available only if the kernel was compiled with
2380 * **CONFIG_NET** configuration option.
2381 * Return
2382 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2383 * For sockets with reuseport option, the **struct bpf_sock**
2384 * result is from *reuse*\ **->socks**\ [] using the hash of the
2385 * tuple.
2386 *
2387 * int bpf_sk_release(struct bpf_sock *sock)
2388 * Description
2389 * Release the reference held by *sock*. *sock* must be a
2390 * non-**NULL** pointer that was returned from
2391 * **bpf_sk_lookup_xxx**\ ().
2392 * Return
2393 * 0 on success, or a negative error in case of failure.
2394 *
2395 * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
2396 * Description
2397 * Push an element *value* in *map*. *flags* is one of:
2398 *
2399 * **BPF_EXIST**
2400 * If the queue/stack is full, the oldest element is
2401 * removed to make room for this.
2402 * Return
2403 * 0 on success, or a negative error in case of failure.
2404 *
2405 * int bpf_map_pop_elem(struct bpf_map *map, void *value)
2406 * Description
2407 * Pop an element from *map*.
2408 * Return
2409 * 0 on success, or a negative error in case of failure.
2410 *
2411 * int bpf_map_peek_elem(struct bpf_map *map, void *value)
2412 * Description
2413 * Get an element from *map* without removing it.
2414 * Return
2415 * 0 on success, or a negative error in case of failure.
2416 *
2417 * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
2418 * Description
2419 * For socket policies, insert *len* bytes into *msg* at offset
2420 * *start*.
2421 *
2422 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
2423 * *msg* it may want to insert metadata or options into the *msg*.
2424 * This can later be read and used by any of the lower layer BPF
2425 * hooks.
2426 *
2427 * This helper may fail if under memory pressure (a malloc
2428 * fails) in these cases BPF programs will get an appropriate
2429 * error and BPF programs will need to handle them.
2430 * Return
2431 * 0 on success, or a negative error in case of failure.
2432 *
2433 * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
2434 * Description
2435 * Will remove *len* bytes from a *msg* starting at byte *start*.
2436 * This may result in **ENOMEM** errors under certain situations if
2437 * an allocation and copy are required due to a full ring buffer.
2438 * However, the helper will try to avoid doing the allocation
2439 * if possible. Other errors can occur if input parameters are
2440 * invalid either due to *start* byte not being valid part of *msg*
2441 * payload and/or *pop* value being to large.
2442 * Return
2443 * 0 on success, or a negative error in case of failure.
2444 *
2445 * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
2446 * Description
2447 * This helper is used in programs implementing IR decoding, to
2448 * report a successfully decoded pointer movement.
2449 *
2450 * The *ctx* should point to the lirc sample as passed into
2451 * the program.
2452 *
2453 * This helper is only available is the kernel was compiled with
2454 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2455 * "**y**".
2456 * Return
2457 * 0
2458 *
2459 * int bpf_spin_lock(struct bpf_spin_lock *lock)
2460 * Description
2461 * Acquire a spinlock represented by the pointer *lock*, which is
2462 * stored as part of a value of a map. Taking the lock allows to
2463 * safely update the rest of the fields in that value. The
2464 * spinlock can (and must) later be released with a call to
2465 * **bpf_spin_unlock**\ (\ *lock*\ ).
2466 *
2467 * Spinlocks in BPF programs come with a number of restrictions
2468 * and constraints:
2469 *
2470 * * **bpf_spin_lock** objects are only allowed inside maps of
2471 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
2472 * list could be extended in the future).
2473 * * BTF description of the map is mandatory.
2474 * * The BPF program can take ONE lock at a time, since taking two
2475 * or more could cause dead locks.
2476 * * Only one **struct bpf_spin_lock** is allowed per map element.
2477 * * When the lock is taken, calls (either BPF to BPF or helpers)
2478 * are not allowed.
2479 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
2480 * allowed inside a spinlock-ed region.
2481 * * The BPF program MUST call **bpf_spin_unlock**\ () to release
2482 * the lock, on all execution paths, before it returns.
2483 * * The BPF program can access **struct bpf_spin_lock** only via
2484 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
2485 * helpers. Loading or storing data into the **struct
2486 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
2487 * * To use the **bpf_spin_lock**\ () helper, the BTF description
2488 * of the map value must be a struct and have **struct
2489 * bpf_spin_lock** *anyname*\ **;** field at the top level.
2490 * Nested lock inside another struct is not allowed.
2491 * * The **struct bpf_spin_lock** *lock* field in a map value must
2492 * be aligned on a multiple of 4 bytes in that value.
2493 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
2494 * the **bpf_spin_lock** field to user space.
2495 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
2496 * a BPF program, do not update the **bpf_spin_lock** field.
2497 * * **bpf_spin_lock** cannot be on the stack or inside a
2498 * networking packet (it can only be inside of a map values).
2499 * * **bpf_spin_lock** is available to root only.
2500 * * Tracing programs and socket filter programs cannot use
2501 * **bpf_spin_lock**\ () due to insufficient preemption checks
2502 * (but this may change in the future).
2503 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
2504 * Return
2505 * 0
2506 *
2507 * int bpf_spin_unlock(struct bpf_spin_lock *lock)
2508 * Description
2509 * Release the *lock* previously locked by a call to
2510 * **bpf_spin_lock**\ (\ *lock*\ ).
2511 * Return
2512 * 0
2513 *
2514 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
2515 * Description
2516 * This helper gets a **struct bpf_sock** pointer such
2517 * that all the fields in this **bpf_sock** can be accessed.
2518 * Return
2519 * A **struct bpf_sock** pointer on success, or **NULL** in
2520 * case of failure.
2521 *
2522 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
2523 * Description
2524 * This helper gets a **struct bpf_tcp_sock** pointer from a
2525 * **struct bpf_sock** pointer.
2526 * Return
2527 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
2528 * case of failure.
2529 *
2530 * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
2531 * Description
2532 * Set ECN (Explicit Congestion Notification) field of IP header
2533 * to **CE** (Congestion Encountered) if current value is **ECT**
2534 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
2535 * and IPv4.
2536 * Return
2537 * 1 if the **CE** flag is set (either by the current helper call
2538 * or because it was already present), 0 if it is not set.
2539 *
2540 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
2541 * Description
2542 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
2543 * **bpf_sk_release**\ () is unnecessary and not allowed.
2544 * Return
2545 * A **struct bpf_sock** pointer on success, or **NULL** in
2546 * case of failure.
2547 *
2548 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2549 * Description
2550 * Look for TCP socket matching *tuple*, optionally in a child
2551 * network namespace *netns*. The return value must be checked,
2552 * and if non-**NULL**, released via **bpf_sk_release**\ ().
2553 *
2554 * This function is identical to **bpf_sk_lookup_tcp**\ (), except
2555 * that it also returns timewait or request sockets. Use
2556 * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
2557 * full structure.
2558 *
2559 * This helper is available only if the kernel was compiled with
2560 * **CONFIG_NET** configuration option.
2561 * Return
2562 * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
2563 * For sockets with reuseport option, the **struct bpf_sock**
2564 * result is from *reuse*\ **->socks**\ [] using the hash of the
2565 * tuple.
2566 *
2567 * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
2568 * Description
2569 * Check whether *iph* and *th* contain a valid SYN cookie ACK for
2570 * the listening socket in *sk*.
2571 *
2572 * *iph* points to the start of the IPv4 or IPv6 header, while
2573 * *iph_len* contains **sizeof**\ (**struct iphdr**) or
2574 * **sizeof**\ (**struct ip6hdr**).
2575 *
2576 * *th* points to the start of the TCP header, while *th_len*
2577 * contains **sizeof**\ (**struct tcphdr**).
2578 *
2579 * Return
2580 * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
2581 * error otherwise.
2582 *
2583 * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
2584 * Description
2585 * Get name of sysctl in /proc/sys/ and copy it into provided by
2586 * program buffer *buf* of size *buf_len*.
2587 *
2588 * The buffer is always NUL terminated, unless it's zero-sized.
2589 *
2590 * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
2591 * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
2592 * only (e.g. "tcp_mem").
2593 * Return
2594 * Number of character copied (not including the trailing NUL).
2595 *
2596 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
2597 * truncated name in this case).
2598 *
2599 * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
2600 * Description
2601 * Get current value of sysctl as it is presented in /proc/sys
2602 * (incl. newline, etc), and copy it as a string into provided
2603 * by program buffer *buf* of size *buf_len*.
2604 *
2605 * The whole value is copied, no matter what file position user
2606 * space issued e.g. sys_read at.
2607 *
2608 * The buffer is always NUL terminated, unless it's zero-sized.
2609 * Return
2610 * Number of character copied (not including the trailing NUL).
2611 *
2612 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
2613 * truncated name in this case).
2614 *
2615 * **-EINVAL** if current value was unavailable, e.g. because
2616 * sysctl is uninitialized and read returns -EIO for it.
2617 *
2618 * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
2619 * Description
2620 * Get new value being written by user space to sysctl (before
2621 * the actual write happens) and copy it as a string into
2622 * provided by program buffer *buf* of size *buf_len*.
2623 *
2624 * User space may write new value at file position > 0.
2625 *
2626 * The buffer is always NUL terminated, unless it's zero-sized.
2627 * Return
2628 * Number of character copied (not including the trailing NUL).
2629 *
2630 * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
2631 * truncated name in this case).
2632 *
2633 * **-EINVAL** if sysctl is being read.
2634 *
2635 * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
2636 * Description
2637 * Override new value being written by user space to sysctl with
2638 * value provided by program in buffer *buf* of size *buf_len*.
2639 *
2640 * *buf* should contain a string in same form as provided by user
2641 * space on sysctl write.
2642 *
2643 * User space may write new value at file position > 0. To override
2644 * the whole sysctl value file position should be set to zero.
2645 * Return
2646 * 0 on success.
2647 *
2648 * **-E2BIG** if the *buf_len* is too big.
2649 *
2650 * **-EINVAL** if sysctl is being read.
2651 *
2652 * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
2653 * Description
2654 * Convert the initial part of the string from buffer *buf* of
2655 * size *buf_len* to a long integer according to the given base
2656 * and save the result in *res*.
2657 *
2658 * The string may begin with an arbitrary amount of white space
2659 * (as determined by **isspace**\ (3)) followed by a single
2660 * optional '**-**' sign.
2661 *
2662 * Five least significant bits of *flags* encode base, other bits
2663 * are currently unused.
2664 *
2665 * Base must be either 8, 10, 16 or 0 to detect it automatically
2666 * similar to user space **strtol**\ (3).
2667 * Return
2668 * Number of characters consumed on success. Must be positive but
2669 * no more than *buf_len*.
2670 *
2671 * **-EINVAL** if no valid digits were found or unsupported base
2672 * was provided.
2673 *
2674 * **-ERANGE** if resulting value was out of range.
2675 *
2676 * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
2677 * Description
2678 * Convert the initial part of the string from buffer *buf* of
2679 * size *buf_len* to an unsigned long integer according to the
2680 * given base and save the result in *res*.
2681 *
2682 * The string may begin with an arbitrary amount of white space
2683 * (as determined by **isspace**\ (3)).
2684 *
2685 * Five least significant bits of *flags* encode base, other bits
2686 * are currently unused.
2687 *
2688 * Base must be either 8, 10, 16 or 0 to detect it automatically
2689 * similar to user space **strtoul**\ (3).
2690 * Return
2691 * Number of characters consumed on success. Must be positive but
2692 * no more than *buf_len*.
2693 *
2694 * **-EINVAL** if no valid digits were found or unsupported base
2695 * was provided.
2696 *
2697 * **-ERANGE** if resulting value was out of range.
2698 *
2699 * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
2700 * Description
2701 * Get a bpf-local-storage from a *sk*.
2702 *
2703 * Logically, it could be thought of getting the value from
2704 * a *map* with *sk* as the **key**. From this
2705 * perspective, the usage is not much different from
2706 * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
2707 * helper enforces the key must be a full socket and the map must
2708 * be a **BPF_MAP_TYPE_SK_STORAGE** also.
2709 *
2710 * Underneath, the value is stored locally at *sk* instead of
2711 * the *map*. The *map* is used as the bpf-local-storage
2712 * "type". The bpf-local-storage "type" (i.e. the *map*) is
2713 * searched against all bpf-local-storages residing at *sk*.
2714 *
2715 * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
2716 * used such that a new bpf-local-storage will be
2717 * created if one does not exist. *value* can be used
2718 * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
2719 * the initial value of a bpf-local-storage. If *value* is
2720 * **NULL**, the new bpf-local-storage will be zero initialized.
2721 * Return
2722 * A bpf-local-storage pointer is returned on success.
2723 *
2724 * **NULL** if not found or there was an error in adding
2725 * a new bpf-local-storage.
2726 *
2727 * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
2728 * Description
2729 * Delete a bpf-local-storage from a *sk*.
2730 * Return
2731 * 0 on success.
2732 *
2733 * **-ENOENT** if the bpf-local-storage cannot be found.
2734 *
2735 * int bpf_send_signal(u32 sig)
2736 * Description
2737 * Send signal *sig* to the process of the current task.
2738 * The signal may be delivered to any of this process's threads.
2739 * Return
2740 * 0 on success or successfully queued.
2741 *
2742 * **-EBUSY** if work queue under nmi is full.
2743 *
2744 * **-EINVAL** if *sig* is invalid.
2745 *
2746 * **-EPERM** if no permission to send the *sig*.
2747 *
2748 * **-EAGAIN** if bpf program can try again.
2749 *
2750 * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
2751 * Description
2752 * Try to issue a SYN cookie for the packet with corresponding
2753 * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
2754 *
2755 * *iph* points to the start of the IPv4 or IPv6 header, while
2756 * *iph_len* contains **sizeof**\ (**struct iphdr**) or
2757 * **sizeof**\ (**struct ip6hdr**).
2758 *
2759 * *th* points to the start of the TCP header, while *th_len*
2760 * contains the length of the TCP header.
2761 *
2762 * Return
2763 * On success, lower 32 bits hold the generated SYN cookie in
2764 * followed by 16 bits which hold the MSS value for that cookie,
2765 * and the top 16 bits are unused.
2766 *
2767 * On failure, the returned value is one of the following:
2768 *
2769 * **-EINVAL** SYN cookie cannot be issued due to error
2770 *
2771 * **-ENOENT** SYN cookie should not be issued (no SYN flood)
2772 *
2773 * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
2774 *
2775 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6
2776 *
2777 * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
2778 * Description
2779 * Write raw *data* blob into a special BPF perf event held by
2780 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
2781 * event must have the following attributes: **PERF_SAMPLE_RAW**
2782 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
2783 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
2784 *
2785 * The *flags* are used to indicate the index in *map* for which
2786 * the value must be put, masked with **BPF_F_INDEX_MASK**.
2787 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
2788 * to indicate that the index of the current CPU core should be
2789 * used.
2790 *
2791 * The value to write, of *size*, is passed through eBPF stack and
2792 * pointed by *data*.
2793 *
2794 * *ctx* is a pointer to in-kernel struct sk_buff.
2795 *
2796 * This helper is similar to **bpf_perf_event_output**\ () but
2797 * restricted to raw_tracepoint bpf programs.
2798 * Return
2799 * 0 on success, or a negative error in case of failure.
2800 *
2801 * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
2802 * Description
2803 * Safely attempt to read *size* bytes from user space address
2804 * *unsafe_ptr* and store the data in *dst*.
2805 * Return
2806 * 0 on success, or a negative error in case of failure.
2807 *
2808 * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
2809 * Description
2810 * Safely attempt to read *size* bytes from kernel space address
2811 * *unsafe_ptr* and store the data in *dst*.
2812 * Return
2813 * 0 on success, or a negative error in case of failure.
2814 *
2815 * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
2816 * Description
2817 * Copy a NUL terminated string from an unsafe user address
2818 * *unsafe_ptr* to *dst*. The *size* should include the
2819 * terminating NUL byte. In case the string length is smaller than
2820 * *size*, the target is not padded with further NUL bytes. If the
2821 * string length is larger than *size*, just *size*-1 bytes are
2822 * copied and the last byte is set to NUL.
2823 *
2824 * On success, the length of the copied string is returned. This
2825 * makes this helper useful in tracing programs for reading
2826 * strings, and more importantly to get its length at runtime. See
2827 * the following snippet:
2828 *
2829 * ::
2830 *
2831 * SEC("kprobe/sys_open")
2832 * void bpf_sys_open(struct pt_regs *ctx)
2833 * {
2834 * char buf[PATHLEN]; // PATHLEN is defined to 256
2835 * int res = bpf_probe_read_user_str(buf, sizeof(buf),
2836 * ctx->di);
2837 *
2838 * // Consume buf, for example push it to
2839 * // userspace via bpf_perf_event_output(); we
2840 * // can use res (the string length) as event
2841 * // size, after checking its boundaries.
2842 * }
2843 *
2844 * In comparison, using **bpf_probe_read_user()** helper here
2845 * instead to read the string would require to estimate the length
2846 * at compile time, and would often result in copying more memory
2847 * than necessary.
2848 *
2849 * Another useful use case is when parsing individual process
2850 * arguments or individual environment variables navigating
2851 * *current*\ **->mm->arg_start** and *current*\
2852 * **->mm->env_start**: using this helper and the return value,
2853 * one can quickly iterate at the right offset of the memory area.
2854 * Return
2855 * On success, the strictly positive length of the string,
2856 * including the trailing NUL character. On error, a negative
2857 * value.
2858 *
2859 * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
2860 * Description
2861 * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
2862 * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
2863 * Return
2864 * On success, the strictly positive length of the string, including
2865 * the trailing NUL character. On error, a negative value.
2866 *
2867 * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
2868 * Description
2869 * Send out a tcp-ack. *tp* is the in-kernel struct tcp_sock.
2870 * *rcv_nxt* is the ack_seq to be sent out.
2871 * Return
2872 * 0 on success, or a negative error in case of failure.
2873 *
2874 * int bpf_send_signal_thread(u32 sig)
2875 * Description
2876 * Send signal *sig* to the thread corresponding to the current task.
2877 * Return
2878 * 0 on success or successfully queued.
2879 *
2880 * **-EBUSY** if work queue under nmi is full.
2881 *
2882 * **-EINVAL** if *sig* is invalid.
2883 *
2884 * **-EPERM** if no permission to send the *sig*.
2885 *
2886 * **-EAGAIN** if bpf program can try again.
2887 *
2888 * u64 bpf_jiffies64(void)
2889 * Description
2890 * Obtain the 64bit jiffies
2891 * Return
2892 * The 64 bit jiffies
2893 */
2894 #define __BPF_FUNC_MAPPER(FN) \
2895 FN(unspec), \
2896 FN(map_lookup_elem), \
2897 FN(map_update_elem), \
2898 FN(map_delete_elem), \
2899 FN(probe_read), \
2900 FN(ktime_get_ns), \
2901 FN(trace_printk), \
2902 FN(get_prandom_u32), \
2903 FN(get_smp_processor_id), \
2904 FN(skb_store_bytes), \
2905 FN(l3_csum_replace), \
2906 FN(l4_csum_replace), \
2907 FN(tail_call), \
2908 FN(clone_redirect), \
2909 FN(get_current_pid_tgid), \
2910 FN(get_current_uid_gid), \
2911 FN(get_current_comm), \
2912 FN(get_cgroup_classid), \
2913 FN(skb_vlan_push), \
2914 FN(skb_vlan_pop), \
2915 FN(skb_get_tunnel_key), \
2916 FN(skb_set_tunnel_key), \
2917 FN(perf_event_read), \
2918 FN(redirect), \
2919 FN(get_route_realm), \
2920 FN(perf_event_output), \
2921 FN(skb_load_bytes), \
2922 FN(get_stackid), \
2923 FN(csum_diff), \
2924 FN(skb_get_tunnel_opt), \
2925 FN(skb_set_tunnel_opt), \
2926 FN(skb_change_proto), \
2927 FN(skb_change_type), \
2928 FN(skb_under_cgroup), \
2929 FN(get_hash_recalc), \
2930 FN(get_current_task), \
2931 FN(probe_write_user), \
2932 FN(current_task_under_cgroup), \
2933 FN(skb_change_tail), \
2934 FN(skb_pull_data), \
2935 FN(csum_update), \
2936 FN(set_hash_invalid), \
2937 FN(get_numa_node_id), \
2938 FN(skb_change_head), \
2939 FN(xdp_adjust_head), \
2940 FN(probe_read_str), \
2941 FN(get_socket_cookie), \
2942 FN(get_socket_uid), \
2943 FN(set_hash), \
2944 FN(setsockopt), \
2945 FN(skb_adjust_room), \
2946 FN(redirect_map), \
2947 FN(sk_redirect_map), \
2948 FN(sock_map_update), \
2949 FN(xdp_adjust_meta), \
2950 FN(perf_event_read_value), \
2951 FN(perf_prog_read_value), \
2952 FN(getsockopt), \
2953 FN(override_return), \
2954 FN(sock_ops_cb_flags_set), \
2955 FN(msg_redirect_map), \
2956 FN(msg_apply_bytes), \
2957 FN(msg_cork_bytes), \
2958 FN(msg_pull_data), \
2959 FN(bind), \
2960 FN(xdp_adjust_tail), \
2961 FN(skb_get_xfrm_state), \
2962 FN(get_stack), \
2963 FN(skb_load_bytes_relative), \
2964 FN(fib_lookup), \
2965 FN(sock_hash_update), \
2966 FN(msg_redirect_hash), \
2967 FN(sk_redirect_hash), \
2968 FN(lwt_push_encap), \
2969 FN(lwt_seg6_store_bytes), \
2970 FN(lwt_seg6_adjust_srh), \
2971 FN(lwt_seg6_action), \
2972 FN(rc_repeat), \
2973 FN(rc_keydown), \
2974 FN(skb_cgroup_id), \
2975 FN(get_current_cgroup_id), \
2976 FN(get_local_storage), \
2977 FN(sk_select_reuseport), \
2978 FN(skb_ancestor_cgroup_id), \
2979 FN(sk_lookup_tcp), \
2980 FN(sk_lookup_udp), \
2981 FN(sk_release), \
2982 FN(map_push_elem), \
2983 FN(map_pop_elem), \
2984 FN(map_peek_elem), \
2985 FN(msg_push_data), \
2986 FN(msg_pop_data), \
2987 FN(rc_pointer_rel), \
2988 FN(spin_lock), \
2989 FN(spin_unlock), \
2990 FN(sk_fullsock), \
2991 FN(tcp_sock), \
2992 FN(skb_ecn_set_ce), \
2993 FN(get_listener_sock), \
2994 FN(skc_lookup_tcp), \
2995 FN(tcp_check_syncookie), \
2996 FN(sysctl_get_name), \
2997 FN(sysctl_get_current_value), \
2998 FN(sysctl_get_new_value), \
2999 FN(sysctl_set_new_value), \
3000 FN(strtol), \
3001 FN(strtoul), \
3002 FN(sk_storage_get), \
3003 FN(sk_storage_delete), \
3004 FN(send_signal), \
3005 FN(tcp_gen_syncookie), \
3006 FN(skb_output), \
3007 FN(probe_read_user), \
3008 FN(probe_read_kernel), \
3009 FN(probe_read_user_str), \
3010 FN(probe_read_kernel_str), \
3011 FN(tcp_send_ack), \
3012 FN(send_signal_thread), \
3013 FN(jiffies64),
3014
3015 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
3016 * function eBPF program intends to call
3017 */
3018 #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
3019 enum bpf_func_id {
3020 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
3021 __BPF_FUNC_MAX_ID,
3022 };
3023 #undef __BPF_ENUM_FN
3024
3025 /* All flags used by eBPF helper functions, placed here. */
3026
3027 /* BPF_FUNC_skb_store_bytes flags. */
3028 #define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
3029 #define BPF_F_INVALIDATE_HASH (1ULL << 1)
3030
3031 /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
3032 * First 4 bits are for passing the header field size.
3033 */
3034 #define BPF_F_HDR_FIELD_MASK 0xfULL
3035
3036 /* BPF_FUNC_l4_csum_replace flags. */
3037 #define BPF_F_PSEUDO_HDR (1ULL << 4)
3038 #define BPF_F_MARK_MANGLED_0 (1ULL << 5)
3039 #define BPF_F_MARK_ENFORCE (1ULL << 6)
3040
3041 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
3042 #define BPF_F_INGRESS (1ULL << 0)
3043
3044 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
3045 #define BPF_F_TUNINFO_IPV6 (1ULL << 0)
3046
3047 /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
3048 #define BPF_F_SKIP_FIELD_MASK 0xffULL
3049 #define BPF_F_USER_STACK (1ULL << 8)
3050 /* flags used by BPF_FUNC_get_stackid only. */
3051 #define BPF_F_FAST_STACK_CMP (1ULL << 9)
3052 #define BPF_F_REUSE_STACKID (1ULL << 10)
3053 /* flags used by BPF_FUNC_get_stack only. */
3054 #define BPF_F_USER_BUILD_ID (1ULL << 11)
3055
3056 /* BPF_FUNC_skb_set_tunnel_key flags. */
3057 #define BPF_F_ZERO_CSUM_TX (1ULL << 1)
3058 #define BPF_F_DONT_FRAGMENT (1ULL << 2)
3059 #define BPF_F_SEQ_NUMBER (1ULL << 3)
3060
3061 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
3062 * BPF_FUNC_perf_event_read_value flags.
3063 */
3064 #define BPF_F_INDEX_MASK 0xffffffffULL
3065 #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
3066 /* BPF_FUNC_perf_event_output for sk_buff input context. */
3067 #define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
3068
3069 /* Current network namespace */
3070 #define BPF_F_CURRENT_NETNS (-1L)
3071
3072 /* BPF_FUNC_skb_adjust_room flags. */
3073 #define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0)
3074
3075 #define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff
3076 #define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56
3077
3078 #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
3079 #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
3080 #define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
3081 #define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
3082 #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
3083 BPF_ADJ_ROOM_ENCAP_L2_MASK) \
3084 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
3085
3086 /* BPF_FUNC_sysctl_get_name flags. */
3087 #define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
3088
3089 /* BPF_FUNC_sk_storage_get flags */
3090 #define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
3091
3092 /* Mode for BPF_FUNC_skb_adjust_room helper. */
3093 enum bpf_adj_room_mode {
3094 BPF_ADJ_ROOM_NET,
3095 BPF_ADJ_ROOM_MAC,
3096 };
3097
3098 /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
3099 enum bpf_hdr_start_off {
3100 BPF_HDR_START_MAC,
3101 BPF_HDR_START_NET,
3102 };
3103
3104 /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
3105 enum bpf_lwt_encap_mode {
3106 BPF_LWT_ENCAP_SEG6,
3107 BPF_LWT_ENCAP_SEG6_INLINE,
3108 BPF_LWT_ENCAP_IP,
3109 };
3110
3111 #define __bpf_md_ptr(type, name) \
3112 union { \
3113 type name; \
3114 __u64 :64; \
3115 } __attribute__((aligned(8)))
3116
3117 /* user accessible mirror of in-kernel sk_buff.
3118 * new fields can only be added to the end of this structure
3119 */
3120 struct __sk_buff {
3121 __u32 len;
3122 __u32 pkt_type;
3123 __u32 mark;
3124 __u32 queue_mapping;
3125 __u32 protocol;
3126 __u32 vlan_present;
3127 __u32 vlan_tci;
3128 __u32 vlan_proto;
3129 __u32 priority;
3130 __u32 ingress_ifindex;
3131 __u32 ifindex;
3132 __u32 tc_index;
3133 __u32 cb[5];
3134 __u32 hash;
3135 __u32 tc_classid;
3136 __u32 data;
3137 __u32 data_end;
3138 __u32 napi_id;
3139
3140 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
3141 __u32 family;
3142 __u32 remote_ip4; /* Stored in network byte order */
3143 __u32 local_ip4; /* Stored in network byte order */
3144 __u32 remote_ip6[4]; /* Stored in network byte order */
3145 __u32 local_ip6[4]; /* Stored in network byte order */
3146 __u32 remote_port; /* Stored in network byte order */
3147 __u32 local_port; /* stored in host byte order */
3148 /* ... here. */
3149
3150 __u32 data_meta;
3151 __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
3152 __u64 tstamp;
3153 __u32 wire_len;
3154 __u32 gso_segs;
3155 __bpf_md_ptr(struct bpf_sock *, sk);
3156 };
3157
3158 struct bpf_tunnel_key {
3159 __u32 tunnel_id;
3160 union {
3161 __u32 remote_ipv4;
3162 __u32 remote_ipv6[4];
3163 };
3164 __u8 tunnel_tos;
3165 __u8 tunnel_ttl;
3166 __u16 tunnel_ext; /* Padding, future use. */
3167 __u32 tunnel_label;
3168 };
3169
3170 /* user accessible mirror of in-kernel xfrm_state.
3171 * new fields can only be added to the end of this structure
3172 */
3173 struct bpf_xfrm_state {
3174 __u32 reqid;
3175 __u32 spi; /* Stored in network byte order */
3176 __u16 family;
3177 __u16 ext; /* Padding, future use. */
3178 union {
3179 __u32 remote_ipv4; /* Stored in network byte order */
3180 __u32 remote_ipv6[4]; /* Stored in network byte order */
3181 };
3182 };
3183
3184 /* Generic BPF return codes which all BPF program types may support.
3185 * The values are binary compatible with their TC_ACT_* counter-part to
3186 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
3187 * programs.
3188 *
3189 * XDP is handled seprately, see XDP_*.
3190 */
3191 enum bpf_ret_code {
3192 BPF_OK = 0,
3193 /* 1 reserved */
3194 BPF_DROP = 2,
3195 /* 3-6 reserved */
3196 BPF_REDIRECT = 7,
3197 /* >127 are reserved for prog type specific return codes.
3198 *
3199 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
3200 * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
3201 * changed and should be routed based on its new L3 header.
3202 * (This is an L3 redirect, as opposed to L2 redirect
3203 * represented by BPF_REDIRECT above).
3204 */
3205 BPF_LWT_REROUTE = 128,
3206 };
3207
3208 struct bpf_sock {
3209 __u32 bound_dev_if;
3210 __u32 family;
3211 __u32 type;
3212 __u32 protocol;
3213 __u32 mark;
3214 __u32 priority;
3215 /* IP address also allows 1 and 2 bytes access */
3216 __u32 src_ip4;
3217 __u32 src_ip6[4];
3218 __u32 src_port; /* host byte order */
3219 __u32 dst_port; /* network byte order */
3220 __u32 dst_ip4;
3221 __u32 dst_ip6[4];
3222 __u32 state;
3223 };
3224
3225 struct bpf_tcp_sock {
3226 __u32 snd_cwnd; /* Sending congestion window */
3227 __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
3228 __u32 rtt_min;
3229 __u32 snd_ssthresh; /* Slow start size threshold */
3230 __u32 rcv_nxt; /* What we want to receive next */
3231 __u32 snd_nxt; /* Next sequence we send */
3232 __u32 snd_una; /* First byte we want an ack for */
3233 __u32 mss_cache; /* Cached effective mss, not including SACKS */
3234 __u32 ecn_flags; /* ECN status bits. */
3235 __u32 rate_delivered; /* saved rate sample: packets delivered */
3236 __u32 rate_interval_us; /* saved rate sample: time elapsed */
3237 __u32 packets_out; /* Packets which are "in flight" */
3238 __u32 retrans_out; /* Retransmitted packets out */
3239 __u32 total_retrans; /* Total retransmits for entire connection */
3240 __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
3241 * total number of segments in.
3242 */
3243 __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
3244 * total number of data segments in.
3245 */
3246 __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
3247 * The total number of segments sent.
3248 */
3249 __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
3250 * total number of data segments sent.
3251 */
3252 __u32 lost_out; /* Lost packets */
3253 __u32 sacked_out; /* SACK'd packets */
3254 __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
3255 * sum(delta(rcv_nxt)), or how many bytes
3256 * were acked.
3257 */
3258 __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
3259 * sum(delta(snd_una)), or how many bytes
3260 * were acked.
3261 */
3262 __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
3263 * total number of DSACK blocks received
3264 */
3265 __u32 delivered; /* Total data packets delivered incl. rexmits */
3266 __u32 delivered_ce; /* Like the above but only ECE marked packets */
3267 __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */
3268 };
3269
3270 struct bpf_sock_tuple {
3271 union {
3272 struct {
3273 __be32 saddr;
3274 __be32 daddr;
3275 __be16 sport;
3276 __be16 dport;
3277 } ipv4;
3278 struct {
3279 __be32 saddr[4];
3280 __be32 daddr[4];
3281 __be16 sport;
3282 __be16 dport;
3283 } ipv6;
3284 };
3285 };
3286
3287 struct bpf_xdp_sock {
3288 __u32 queue_id;
3289 };
3290
3291 #define XDP_PACKET_HEADROOM 256
3292
3293 /* User return codes for XDP prog type.
3294 * A valid XDP program must return one of these defined values. All other
3295 * return codes are reserved for future use. Unknown return codes will
3296 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
3297 */
3298 enum xdp_action {
3299 XDP_ABORTED = 0,
3300 XDP_DROP,
3301 XDP_PASS,
3302 XDP_TX,
3303 XDP_REDIRECT,
3304 };
3305
3306 /* user accessible metadata for XDP packet hook
3307 * new fields must be added to the end of this structure
3308 */
3309 struct xdp_md {
3310 __u32 data;
3311 __u32 data_end;
3312 __u32 data_meta;
3313 /* Below access go through struct xdp_rxq_info */
3314 __u32 ingress_ifindex; /* rxq->dev->ifindex */
3315 __u32 rx_queue_index; /* rxq->queue_index */
3316 };
3317
3318 enum sk_action {
3319 SK_DROP = 0,
3320 SK_PASS,
3321 };
3322
3323 /* user accessible metadata for SK_MSG packet hook, new fields must
3324 * be added to the end of this structure
3325 */
3326 struct sk_msg_md {
3327 __bpf_md_ptr(void *, data);
3328 __bpf_md_ptr(void *, data_end);
3329
3330 __u32 family;
3331 __u32 remote_ip4; /* Stored in network byte order */
3332 __u32 local_ip4; /* Stored in network byte order */
3333 __u32 remote_ip6[4]; /* Stored in network byte order */
3334 __u32 local_ip6[4]; /* Stored in network byte order */
3335 __u32 remote_port; /* Stored in network byte order */
3336 __u32 local_port; /* stored in host byte order */
3337 __u32 size; /* Total size of sk_msg */
3338 };
3339
3340 struct sk_reuseport_md {
3341 /*
3342 * Start of directly accessible data. It begins from
3343 * the tcp/udp header.
3344 */
3345 __bpf_md_ptr(void *, data);
3346 /* End of directly accessible data */
3347 __bpf_md_ptr(void *, data_end);
3348 /*
3349 * Total length of packet (starting from the tcp/udp header).
3350 * Note that the directly accessible bytes (data_end - data)
3351 * could be less than this "len". Those bytes could be
3352 * indirectly read by a helper "bpf_skb_load_bytes()".
3353 */
3354 __u32 len;
3355 /*
3356 * Eth protocol in the mac header (network byte order). e.g.
3357 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
3358 */
3359 __u32 eth_protocol;
3360 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
3361 __u32 bind_inany; /* Is sock bound to an INANY address? */
3362 __u32 hash; /* A hash of the packet 4 tuples */
3363 };
3364
3365 #define BPF_TAG_SIZE 8
3366
3367 struct bpf_prog_info {
3368 __u32 type;
3369 __u32 id;
3370 __u8 tag[BPF_TAG_SIZE];
3371 __u32 jited_prog_len;
3372 __u32 xlated_prog_len;
3373 __aligned_u64 jited_prog_insns;
3374 __aligned_u64 xlated_prog_insns;
3375 __u64 load_time; /* ns since boottime */
3376 __u32 created_by_uid;
3377 __u32 nr_map_ids;
3378 __aligned_u64 map_ids;
3379 char name[BPF_OBJ_NAME_LEN];
3380 __u32 ifindex;
3381 __u32 gpl_compatible:1;
3382 __u32 :31; /* alignment pad */
3383 __u64 netns_dev;
3384 __u64 netns_ino;
3385 __u32 nr_jited_ksyms;
3386 __u32 nr_jited_func_lens;
3387 __aligned_u64 jited_ksyms;
3388 __aligned_u64 jited_func_lens;
3389 __u32 btf_id;
3390 __u32 func_info_rec_size;
3391 __aligned_u64 func_info;
3392 __u32 nr_func_info;
3393 __u32 nr_line_info;
3394 __aligned_u64 line_info;
3395 __aligned_u64 jited_line_info;
3396 __u32 nr_jited_line_info;
3397 __u32 line_info_rec_size;
3398 __u32 jited_line_info_rec_size;
3399 __u32 nr_prog_tags;
3400 __aligned_u64 prog_tags;
3401 __u64 run_time_ns;
3402 __u64 run_cnt;
3403 } __attribute__((aligned(8)));
3404
3405 struct bpf_map_info {
3406 __u32 type;
3407 __u32 id;
3408 __u32 key_size;
3409 __u32 value_size;
3410 __u32 max_entries;
3411 __u32 map_flags;
3412 char name[BPF_OBJ_NAME_LEN];
3413 __u32 ifindex;
3414 __u32 btf_vmlinux_value_type_id;
3415 __u64 netns_dev;
3416 __u64 netns_ino;
3417 __u32 btf_id;
3418 __u32 btf_key_type_id;
3419 __u32 btf_value_type_id;
3420 } __attribute__((aligned(8)));
3421
3422 struct bpf_btf_info {
3423 __aligned_u64 btf;
3424 __u32 btf_size;
3425 __u32 id;
3426 } __attribute__((aligned(8)));
3427
3428 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
3429 * by user and intended to be used by socket (e.g. to bind to, depends on
3430 * attach attach type).
3431 */
3432 struct bpf_sock_addr {
3433 __u32 user_family; /* Allows 4-byte read, but no write. */
3434 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
3435 * Stored in network byte order.
3436 */
3437 __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
3438 * Stored in network byte order.
3439 */
3440 __u32 user_port; /* Allows 4-byte read and write.
3441 * Stored in network byte order
3442 */
3443 __u32 family; /* Allows 4-byte read, but no write */
3444 __u32 type; /* Allows 4-byte read, but no write */
3445 __u32 protocol; /* Allows 4-byte read, but no write */
3446 __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
3447 * Stored in network byte order.
3448 */
3449 __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
3450 * Stored in network byte order.
3451 */
3452 __bpf_md_ptr(struct bpf_sock *, sk);
3453 };
3454
3455 /* User bpf_sock_ops struct to access socket values and specify request ops
3456 * and their replies.
3457 * Some of this fields are in network (bigendian) byte order and may need
3458 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
3459 * New fields can only be added at the end of this structure
3460 */
3461 struct bpf_sock_ops {
3462 __u32 op;
3463 union {
3464 __u32 args[4]; /* Optionally passed to bpf program */
3465 __u32 reply; /* Returned by bpf program */
3466 __u32 replylong[4]; /* Optionally returned by bpf prog */
3467 };
3468 __u32 family;
3469 __u32 remote_ip4; /* Stored in network byte order */
3470 __u32 local_ip4; /* Stored in network byte order */
3471 __u32 remote_ip6[4]; /* Stored in network byte order */
3472 __u32 local_ip6[4]; /* Stored in network byte order */
3473 __u32 remote_port; /* Stored in network byte order */
3474 __u32 local_port; /* stored in host byte order */
3475 __u32 is_fullsock; /* Some TCP fields are only valid if
3476 * there is a full socket. If not, the
3477 * fields read as zero.
3478 */
3479 __u32 snd_cwnd;
3480 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
3481 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
3482 __u32 state;
3483 __u32 rtt_min;
3484 __u32 snd_ssthresh;
3485 __u32 rcv_nxt;
3486 __u32 snd_nxt;
3487 __u32 snd_una;
3488 __u32 mss_cache;
3489 __u32 ecn_flags;
3490 __u32 rate_delivered;
3491 __u32 rate_interval_us;
3492 __u32 packets_out;
3493 __u32 retrans_out;
3494 __u32 total_retrans;
3495 __u32 segs_in;
3496 __u32 data_segs_in;
3497 __u32 segs_out;
3498 __u32 data_segs_out;
3499 __u32 lost_out;
3500 __u32 sacked_out;
3501 __u32 sk_txhash;
3502 __u64 bytes_received;
3503 __u64 bytes_acked;
3504 __bpf_md_ptr(struct bpf_sock *, sk);
3505 };
3506
3507 /* Definitions for bpf_sock_ops_cb_flags */
3508 #define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
3509 #define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
3510 #define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
3511 #define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3)
3512 #define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently
3513 * supported cb flags
3514 */
3515
3516 /* List of known BPF sock_ops operators.
3517 * New entries can only be added at the end
3518 */
3519 enum {
3520 BPF_SOCK_OPS_VOID,
3521 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
3522 * -1 if default value should be used
3523 */
3524 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
3525 * window (in packets) or -1 if default
3526 * value should be used
3527 */
3528 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
3529 * active connection is initialized
3530 */
3531 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
3532 * active connection is
3533 * established
3534 */
3535 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
3536 * passive connection is
3537 * established
3538 */
3539 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
3540 * needs ECN
3541 */
3542 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
3543 * based on the path and may be
3544 * dependent on the congestion control
3545 * algorithm. In general it indicates
3546 * a congestion threshold. RTTs above
3547 * this indicate congestion
3548 */
3549 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
3550 * Arg1: value of icsk_retransmits
3551 * Arg2: value of icsk_rto
3552 * Arg3: whether RTO has expired
3553 */
3554 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
3555 * Arg1: sequence number of 1st byte
3556 * Arg2: # segments
3557 * Arg3: return value of
3558 * tcp_transmit_skb (0 => success)
3559 */
3560 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
3561 * Arg1: old_state
3562 * Arg2: new_state
3563 */
3564 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
3565 * socket transition to LISTEN state.
3566 */
3567 BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
3568 */
3569 };
3570
3571 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
3572 * changes between the TCP and BPF versions. Ideally this should never happen.
3573 * If it does, we need to add code to convert them before calling
3574 * the BPF sock_ops function.
3575 */
3576 enum {
3577 BPF_TCP_ESTABLISHED = 1,
3578 BPF_TCP_SYN_SENT,
3579 BPF_TCP_SYN_RECV,
3580 BPF_TCP_FIN_WAIT1,
3581 BPF_TCP_FIN_WAIT2,
3582 BPF_TCP_TIME_WAIT,
3583 BPF_TCP_CLOSE,
3584 BPF_TCP_CLOSE_WAIT,
3585 BPF_TCP_LAST_ACK,
3586 BPF_TCP_LISTEN,
3587 BPF_TCP_CLOSING, /* Now a valid state */
3588 BPF_TCP_NEW_SYN_RECV,
3589
3590 BPF_TCP_MAX_STATES /* Leave at the end! */
3591 };
3592
3593 #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
3594 #define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
3595
3596 struct bpf_perf_event_value {
3597 __u64 counter;
3598 __u64 enabled;
3599 __u64 running;
3600 };
3601
3602 #define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
3603 #define BPF_DEVCG_ACC_READ (1ULL << 1)
3604 #define BPF_DEVCG_ACC_WRITE (1ULL << 2)
3605
3606 #define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
3607 #define BPF_DEVCG_DEV_CHAR (1ULL << 1)
3608
3609 struct bpf_cgroup_dev_ctx {
3610 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
3611 __u32 access_type;
3612 __u32 major;
3613 __u32 minor;
3614 };
3615
3616 struct bpf_raw_tracepoint_args {
3617 __u64 args[0];
3618 };
3619
3620 /* DIRECT: Skip the FIB rules and go to FIB table associated with device
3621 * OUTPUT: Do lookup from egress perspective; default is ingress
3622 */
3623 #define BPF_FIB_LOOKUP_DIRECT (1U << 0)
3624 #define BPF_FIB_LOOKUP_OUTPUT (1U << 1)
3625
3626 enum {
3627 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
3628 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
3629 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
3630 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
3631 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
3632 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
3633 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
3634 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
3635 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
3636 };
3637
3638 struct bpf_fib_lookup {
3639 /* input: network family for lookup (AF_INET, AF_INET6)
3640 * output: network family of egress nexthop
3641 */
3642 __u8 family;
3643
3644 /* set if lookup is to consider L4 data - e.g., FIB rules */
3645 __u8 l4_protocol;
3646 __be16 sport;
3647 __be16 dport;
3648
3649 /* total length of packet from network header - used for MTU check */
3650 __u16 tot_len;
3651
3652 /* input: L3 device index for lookup
3653 * output: device index from FIB lookup
3654 */
3655 __u32 ifindex;
3656
3657 union {
3658 /* inputs to lookup */
3659 __u8 tos; /* AF_INET */
3660 __be32 flowinfo; /* AF_INET6, flow_label + priority */
3661
3662 /* output: metric of fib result (IPv4/IPv6 only) */
3663 __u32 rt_metric;
3664 };
3665
3666 union {
3667 __be32 ipv4_src;
3668 __u32 ipv6_src[4]; /* in6_addr; network order */
3669 };
3670
3671 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
3672 * network header. output: bpf_fib_lookup sets to gateway address
3673 * if FIB lookup returns gateway route
3674 */
3675 union {
3676 __be32 ipv4_dst;
3677 __u32 ipv6_dst[4]; /* in6_addr; network order */
3678 };
3679
3680 /* output */
3681 __be16 h_vlan_proto;
3682 __be16 h_vlan_TCI;
3683 __u8 smac[6]; /* ETH_ALEN */
3684 __u8 dmac[6]; /* ETH_ALEN */
3685 };
3686
3687 enum bpf_task_fd_type {
3688 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
3689 BPF_FD_TYPE_TRACEPOINT, /* tp name */
3690 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */
3691 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */
3692 BPF_FD_TYPE_UPROBE, /* filename + offset */
3693 BPF_FD_TYPE_URETPROBE, /* filename + offset */
3694 };
3695
3696 #define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0)
3697 #define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1)
3698 #define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2)
3699
3700 struct bpf_flow_keys {
3701 __u16 nhoff;
3702 __u16 thoff;
3703 __u16 addr_proto; /* ETH_P_* of valid addrs */
3704 __u8 is_frag;
3705 __u8 is_first_frag;
3706 __u8 is_encap;
3707 __u8 ip_proto;
3708 __be16 n_proto;
3709 __be16 sport;
3710 __be16 dport;
3711 union {
3712 struct {
3713 __be32 ipv4_src;
3714 __be32 ipv4_dst;
3715 };
3716 struct {
3717 __u32 ipv6_src[4]; /* in6_addr; network order */
3718 __u32 ipv6_dst[4]; /* in6_addr; network order */
3719 };
3720 };
3721 __u32 flags;
3722 __be32 flow_label;
3723 };
3724
3725 struct bpf_func_info {
3726 __u32 insn_off;
3727 __u32 type_id;
3728 };
3729
3730 #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
3731 #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
3732
3733 struct bpf_line_info {
3734 __u32 insn_off;
3735 __u32 file_name_off;
3736 __u32 line_off;
3737 __u32 line_col;
3738 };
3739
3740 struct bpf_spin_lock {
3741 __u32 val;
3742 };
3743
3744 struct bpf_sysctl {
3745 __u32 write; /* Sysctl is being read (= 0) or written (= 1).
3746 * Allows 1,2,4-byte read, but no write.
3747 */
3748 __u32 file_pos; /* Sysctl file position to read from, write to.
3749 * Allows 1,2,4-byte read an 4-byte write.
3750 */
3751 };
3752
3753 struct bpf_sockopt {
3754 __bpf_md_ptr(struct bpf_sock *, sk);
3755 __bpf_md_ptr(void *, optval);
3756 __bpf_md_ptr(void *, optval_end);
3757
3758 __s32 level;
3759 __s32 optname;
3760 __s32 optlen;
3761 __s32 retval;
3762 };
3763
3764 #endif /* __LINUX_BPF_H__ */