]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/uapi/linux/bpf.h
bpf: btf: Introduce BTF ID
[mirror_ubuntu-jammy-kernel.git] / include / uapi / linux / bpf.h
CommitLineData
e2be04c7 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
daedfb22
AS
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#ifndef _UAPI__LINUX_BPF_H__
9#define _UAPI__LINUX_BPF_H__
10
11#include <linux/types.h>
c15952dc 12#include <linux/bpf_common.h>
daedfb22
AS
13
14/* Extended instruction set based on top of classic BPF */
15
16/* instruction classes */
17#define BPF_ALU64 0x07 /* alu mode in double word width */
18
19/* ld/ldx fields */
cb5f7334 20#define BPF_DW 0x18 /* double word (64-bit) */
daedfb22
AS
21#define BPF_XADD 0xc0 /* exclusive add */
22
23/* alu/jmp fields */
24#define BPF_MOV 0xb0 /* mov reg to reg */
25#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
26
27/* change endianness of a register */
28#define BPF_END 0xd0 /* flags for endianness conversion: */
29#define BPF_TO_LE 0x00 /* convert to little-endian */
30#define BPF_TO_BE 0x08 /* convert to big-endian */
31#define BPF_FROM_LE BPF_TO_LE
32#define BPF_FROM_BE BPF_TO_BE
33
92b31a9a 34/* jmp encodings */
daedfb22 35#define BPF_JNE 0x50 /* jump != */
92b31a9a
DB
36#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
37#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
daedfb22
AS
38#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
39#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
92b31a9a
DB
40#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
41#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
daedfb22
AS
42#define BPF_CALL 0x80 /* function call */
43#define BPF_EXIT 0x90 /* function return */
44
45/* Register numbers */
46enum {
47 BPF_REG_0 = 0,
48 BPF_REG_1,
49 BPF_REG_2,
50 BPF_REG_3,
51 BPF_REG_4,
52 BPF_REG_5,
53 BPF_REG_6,
54 BPF_REG_7,
55 BPF_REG_8,
56 BPF_REG_9,
57 BPF_REG_10,
58 __MAX_BPF_REG,
59};
60
61/* BPF has 10 general purpose 64-bit registers and stack frame. */
62#define MAX_BPF_REG __MAX_BPF_REG
63
64struct bpf_insn {
65 __u8 code; /* opcode */
66 __u8 dst_reg:4; /* dest register */
67 __u8 src_reg:4; /* source register */
68 __s16 off; /* signed offset */
69 __s32 imm; /* signed immediate constant */
70};
71
b95a5c4d
DM
72/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
73struct bpf_lpm_trie_key {
74 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
75 __u8 data[0]; /* Arbitrary size */
76};
77
b2197755 78/* BPF syscall commands, see bpf(2) man-page for details. */
99c55f7d 79enum bpf_cmd {
99c55f7d 80 BPF_MAP_CREATE,
db20fd2b 81 BPF_MAP_LOOKUP_ELEM,
db20fd2b 82 BPF_MAP_UPDATE_ELEM,
db20fd2b 83 BPF_MAP_DELETE_ELEM,
db20fd2b 84 BPF_MAP_GET_NEXT_KEY,
09756af4 85 BPF_PROG_LOAD,
b2197755
DB
86 BPF_OBJ_PIN,
87 BPF_OBJ_GET,
f4324551
DM
88 BPF_PROG_ATTACH,
89 BPF_PROG_DETACH,
1cf1cae9 90 BPF_PROG_TEST_RUN,
34ad5580
MKL
91 BPF_PROG_GET_NEXT_ID,
92 BPF_MAP_GET_NEXT_ID,
b16d9aa4 93 BPF_PROG_GET_FD_BY_ID,
bd5f5f4e 94 BPF_MAP_GET_FD_BY_ID,
1e270976 95 BPF_OBJ_GET_INFO_BY_FD,
468e2f64 96 BPF_PROG_QUERY,
c4f6699d 97 BPF_RAW_TRACEPOINT_OPEN,
f56a653c 98 BPF_BTF_LOAD,
78958fca 99 BPF_BTF_GET_FD_BY_ID,
99c55f7d
AS
100};
101
102enum bpf_map_type {
103 BPF_MAP_TYPE_UNSPEC,
0f8e4bd8 104 BPF_MAP_TYPE_HASH,
28fbcfa0 105 BPF_MAP_TYPE_ARRAY,
04fd61ab 106 BPF_MAP_TYPE_PROG_ARRAY,
ea317b26 107 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
824bd0ce 108 BPF_MAP_TYPE_PERCPU_HASH,
a10423b8 109 BPF_MAP_TYPE_PERCPU_ARRAY,
d5a3b1f6 110 BPF_MAP_TYPE_STACK_TRACE,
4ed8ec52 111 BPF_MAP_TYPE_CGROUP_ARRAY,
29ba732a 112 BPF_MAP_TYPE_LRU_HASH,
8f844938 113 BPF_MAP_TYPE_LRU_PERCPU_HASH,
b95a5c4d 114 BPF_MAP_TYPE_LPM_TRIE,
56f668df 115 BPF_MAP_TYPE_ARRAY_OF_MAPS,
bcc6b1b7 116 BPF_MAP_TYPE_HASH_OF_MAPS,
546ac1ff 117 BPF_MAP_TYPE_DEVMAP,
174a79ff 118 BPF_MAP_TYPE_SOCKMAP,
6710e112 119 BPF_MAP_TYPE_CPUMAP,
fbfc504a 120 BPF_MAP_TYPE_XSKMAP,
99c55f7d
AS
121};
122
09756af4
AS
123enum bpf_prog_type {
124 BPF_PROG_TYPE_UNSPEC,
ddd872bc 125 BPF_PROG_TYPE_SOCKET_FILTER,
2541517c 126 BPF_PROG_TYPE_KPROBE,
96be4325 127 BPF_PROG_TYPE_SCHED_CLS,
94caee8c 128 BPF_PROG_TYPE_SCHED_ACT,
98b5c2c6 129 BPF_PROG_TYPE_TRACEPOINT,
6a773a15 130 BPF_PROG_TYPE_XDP,
0515e599 131 BPF_PROG_TYPE_PERF_EVENT,
0e33661d 132 BPF_PROG_TYPE_CGROUP_SKB,
61023658 133 BPF_PROG_TYPE_CGROUP_SOCK,
3a0af8fd
TG
134 BPF_PROG_TYPE_LWT_IN,
135 BPF_PROG_TYPE_LWT_OUT,
136 BPF_PROG_TYPE_LWT_XMIT,
40304b2a 137 BPF_PROG_TYPE_SOCK_OPS,
b005fd18 138 BPF_PROG_TYPE_SK_SKB,
ebc614f6 139 BPF_PROG_TYPE_CGROUP_DEVICE,
4f738adb 140 BPF_PROG_TYPE_SK_MSG,
c4f6699d 141 BPF_PROG_TYPE_RAW_TRACEPOINT,
4fbac77d 142 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
09756af4
AS
143};
144
0e33661d
DM
145enum bpf_attach_type {
146 BPF_CGROUP_INET_INGRESS,
147 BPF_CGROUP_INET_EGRESS,
61023658 148 BPF_CGROUP_INET_SOCK_CREATE,
40304b2a 149 BPF_CGROUP_SOCK_OPS,
464bc0fd
JF
150 BPF_SK_SKB_STREAM_PARSER,
151 BPF_SK_SKB_STREAM_VERDICT,
ebc614f6 152 BPF_CGROUP_DEVICE,
4f738adb 153 BPF_SK_MSG_VERDICT,
4fbac77d
AI
154 BPF_CGROUP_INET4_BIND,
155 BPF_CGROUP_INET6_BIND,
d74bad4e
AI
156 BPF_CGROUP_INET4_CONNECT,
157 BPF_CGROUP_INET6_CONNECT,
aac3fc32
AI
158 BPF_CGROUP_INET4_POST_BIND,
159 BPF_CGROUP_INET6_POST_BIND,
0e33661d
DM
160 __MAX_BPF_ATTACH_TYPE
161};
162
163#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
164
324bda9e
AS
165/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
166 *
167 * NONE(default): No further bpf programs allowed in the subtree.
168 *
169 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
170 * the program in this cgroup yields to sub-cgroup program.
171 *
172 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
173 * that cgroup program gets run in addition to the program in this cgroup.
174 *
175 * Only one program is allowed to be attached to a cgroup with
176 * NONE or BPF_F_ALLOW_OVERRIDE flag.
177 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
178 * release old program and attach the new one. Attach flags has to match.
179 *
180 * Multiple programs are allowed to be attached to a cgroup with
181 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
182 * (those that were attached first, run first)
183 * The programs of sub-cgroup are executed first, then programs of
184 * this cgroup and then programs of parent cgroup.
185 * When children program makes decision (like picking TCP CA or sock bind)
186 * parent program has a chance to override it.
187 *
188 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
189 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
190 * Ex1:
191 * cgrp1 (MULTI progs A, B) ->
192 * cgrp2 (OVERRIDE prog C) ->
193 * cgrp3 (MULTI prog D) ->
194 * cgrp4 (OVERRIDE prog E) ->
195 * cgrp5 (NONE prog F)
196 * the event in cgrp5 triggers execution of F,D,A,B in that order.
197 * if prog F is detached, the execution is E,D,A,B
198 * if prog F and D are detached, the execution is E,A,B
199 * if prog F, E and D are detached, the execution is C,A,B
200 *
201 * All eligible programs are executed regardless of return code from
202 * earlier programs.
7f677633
AS
203 */
204#define BPF_F_ALLOW_OVERRIDE (1U << 0)
324bda9e 205#define BPF_F_ALLOW_MULTI (1U << 1)
7f677633 206
e07b98d9
DM
207/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
208 * verifier will perform strict alignment checking as if the kernel
209 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
210 * and NET_IP_ALIGN defined to 2.
211 */
212#define BPF_F_STRICT_ALIGNMENT (1U << 0)
213
cc8b0b92 214/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
f1a66f85
DB
215#define BPF_PSEUDO_MAP_FD 1
216
cc8b0b92
AS
217/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
218 * offset to another bpf function
219 */
220#define BPF_PSEUDO_CALL 1
221
3274f520
AS
222/* flags for BPF_MAP_UPDATE_ELEM command */
223#define BPF_ANY 0 /* create new element or update existing */
224#define BPF_NOEXIST 1 /* create new element if it didn't exist */
225#define BPF_EXIST 2 /* update existing element */
226
96eabe7a 227/* flags for BPF_MAP_CREATE command */
6c905981 228#define BPF_F_NO_PREALLOC (1U << 0)
29ba732a 229/* Instead of having one common LRU list in the
8f844938 230 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
29ba732a
MKL
231 * which can scale and perform better.
232 * Note, the LRU nodes (including free nodes) cannot be moved
233 * across different LRU lists.
234 */
235#define BPF_F_NO_COMMON_LRU (1U << 1)
96eabe7a
MKL
236/* Specify numa node during map creation */
237#define BPF_F_NUMA_NODE (1U << 2)
6c905981 238
468e2f64
AS
239/* flags for BPF_PROG_QUERY */
240#define BPF_F_QUERY_EFFECTIVE (1U << 0)
241
cb4d2b3f
MKL
242#define BPF_OBJ_NAME_LEN 16U
243
6e71b04a
CF
244/* Flags for accessing BPF object */
245#define BPF_F_RDONLY (1U << 3)
246#define BPF_F_WRONLY (1U << 4)
247
615755a7
SL
248/* Flag for stack_map, store build_id+offset instead of pointer */
249#define BPF_F_STACK_BUILD_ID (1U << 5)
250
251enum bpf_stack_build_id_status {
252 /* user space need an empty entry to identify end of a trace */
253 BPF_STACK_BUILD_ID_EMPTY = 0,
254 /* with valid build_id and offset */
255 BPF_STACK_BUILD_ID_VALID = 1,
256 /* couldn't get build_id, fallback to ip */
257 BPF_STACK_BUILD_ID_IP = 2,
258};
259
260#define BPF_BUILD_ID_SIZE 20
261struct bpf_stack_build_id {
262 __s32 status;
263 unsigned char build_id[BPF_BUILD_ID_SIZE];
264 union {
265 __u64 offset;
266 __u64 ip;
267 };
268};
269
99c55f7d
AS
270union bpf_attr {
271 struct { /* anonymous struct used by BPF_MAP_CREATE command */
272 __u32 map_type; /* one of enum bpf_map_type */
273 __u32 key_size; /* size of key in bytes */
274 __u32 value_size; /* size of value in bytes */
275 __u32 max_entries; /* max number of entries in a map */
96eabe7a
MKL
276 __u32 map_flags; /* BPF_MAP_CREATE related
277 * flags defined above.
278 */
56f668df 279 __u32 inner_map_fd; /* fd pointing to the inner map */
96eabe7a
MKL
280 __u32 numa_node; /* numa node (effective only if
281 * BPF_F_NUMA_NODE is set).
282 */
067cae47 283 char map_name[BPF_OBJ_NAME_LEN];
a3884572 284 __u32 map_ifindex; /* ifindex of netdev to create on */
a26ca7c9
MKL
285 __u32 btf_fd; /* fd pointing to a BTF type data */
286 __u32 btf_key_id; /* BTF type_id of the key */
287 __u32 btf_value_id; /* BTF type_id of the value */
99c55f7d 288 };
db20fd2b
AS
289
290 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
291 __u32 map_fd;
292 __aligned_u64 key;
293 union {
294 __aligned_u64 value;
295 __aligned_u64 next_key;
296 };
3274f520 297 __u64 flags;
db20fd2b 298 };
09756af4
AS
299
300 struct { /* anonymous struct used by BPF_PROG_LOAD command */
301 __u32 prog_type; /* one of enum bpf_prog_type */
302 __u32 insn_cnt;
303 __aligned_u64 insns;
304 __aligned_u64 license;
cbd35700
AS
305 __u32 log_level; /* verbosity level of verifier */
306 __u32 log_size; /* size of user buffer */
307 __aligned_u64 log_buf; /* user supplied buffer */
2541517c 308 __u32 kern_version; /* checked when prog_type=kprobe */
e07b98d9 309 __u32 prog_flags;
067cae47 310 char prog_name[BPF_OBJ_NAME_LEN];
1f6f4cb7 311 __u32 prog_ifindex; /* ifindex of netdev to prep for */
5e43f899
AI
312 /* For some prog types expected attach type must be known at
313 * load time to verify attach type specific parts of prog
314 * (context accesses, allowed helpers, etc).
315 */
316 __u32 expected_attach_type;
09756af4 317 };
b2197755
DB
318
319 struct { /* anonymous struct used by BPF_OBJ_* commands */
320 __aligned_u64 pathname;
321 __u32 bpf_fd;
6e71b04a 322 __u32 file_flags;
b2197755 323 };
f4324551
DM
324
325 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
326 __u32 target_fd; /* container object to attach to */
327 __u32 attach_bpf_fd; /* eBPF program to attach */
328 __u32 attach_type;
7f677633 329 __u32 attach_flags;
f4324551 330 };
1cf1cae9
AS
331
332 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
333 __u32 prog_fd;
334 __u32 retval;
335 __u32 data_size_in;
336 __u32 data_size_out;
337 __aligned_u64 data_in;
338 __aligned_u64 data_out;
339 __u32 repeat;
340 __u32 duration;
341 } test;
34ad5580 342
b16d9aa4
MKL
343 struct { /* anonymous struct used by BPF_*_GET_*_ID */
344 union {
345 __u32 start_id;
346 __u32 prog_id;
bd5f5f4e 347 __u32 map_id;
78958fca 348 __u32 btf_id;
b16d9aa4 349 };
34ad5580 350 __u32 next_id;
6e71b04a 351 __u32 open_flags;
34ad5580 352 };
1e270976
MKL
353
354 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
355 __u32 bpf_fd;
356 __u32 info_len;
357 __aligned_u64 info;
358 } info;
468e2f64
AS
359
360 struct { /* anonymous struct used by BPF_PROG_QUERY command */
361 __u32 target_fd; /* container object to query */
362 __u32 attach_type;
363 __u32 query_flags;
364 __u32 attach_flags;
365 __aligned_u64 prog_ids;
366 __u32 prog_cnt;
367 } query;
c4f6699d
AS
368
369 struct {
370 __u64 name;
371 __u32 prog_fd;
372 } raw_tracepoint;
f56a653c
MKL
373
374 struct { /* anonymous struct for BPF_BTF_LOAD */
375 __aligned_u64 btf;
376 __aligned_u64 btf_log_buf;
377 __u32 btf_size;
378 __u32 btf_log_size;
379 __u32 btf_log_level;
380 };
99c55f7d
AS
381} __attribute__((aligned(8)));
382
56a092c8
QM
383/* The description below is an attempt at providing documentation to eBPF
384 * developers about the multiple available eBPF helper functions. It can be
385 * parsed and used to produce a manual page. The workflow is the following,
386 * and requires the rst2man utility:
387 *
388 * $ ./scripts/bpf_helpers_doc.py \
389 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
390 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
391 * $ man /tmp/bpf-helpers.7
392 *
393 * Note that in order to produce this external documentation, some RST
394 * formatting is used in the descriptions to get "bold" and "italics" in
395 * manual pages. Also note that the few trailing white spaces are
396 * intentional, removing them would break paragraphs for rst2man.
397 *
398 * Start of BPF helper function descriptions:
ad4a5223
QM
399 *
400 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
401 * Description
402 * Perform a lookup in *map* for an entry associated to *key*.
403 * Return
404 * Map value associated to *key*, or **NULL** if no entry was
405 * found.
406 *
407 * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
408 * Description
409 * Add or update the value of the entry associated to *key* in
410 * *map* with *value*. *flags* is one of:
411 *
412 * **BPF_NOEXIST**
413 * The entry for *key* must not exist in the map.
414 * **BPF_EXIST**
415 * The entry for *key* must already exist in the map.
416 * **BPF_ANY**
417 * No condition on the existence of the entry for *key*.
418 *
419 * Flag value **BPF_NOEXIST** cannot be used for maps of types
420 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
421 * elements always exist), the helper would return an error.
422 * Return
423 * 0 on success, or a negative error in case of failure.
424 *
425 * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
426 * Description
427 * Delete entry with *key* from *map*.
428 * Return
429 * 0 on success, or a negative error in case of failure.
430 *
431 * int bpf_probe_read(void *dst, u32 size, const void *src)
432 * Description
433 * For tracing programs, safely attempt to read *size* bytes from
434 * address *src* and store the data in *dst*.
435 * Return
436 * 0 on success, or a negative error in case of failure.
437 *
438 * u64 bpf_ktime_get_ns(void)
439 * Description
440 * Return the time elapsed since system boot, in nanoseconds.
441 * Return
442 * Current *ktime*.
443 *
444 * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
445 * Description
446 * This helper is a "printk()-like" facility for debugging. It
447 * prints a message defined by format *fmt* (of size *fmt_size*)
448 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
449 * available. It can take up to three additional **u64**
450 * arguments (as an eBPF helpers, the total number of arguments is
451 * limited to five).
452 *
453 * Each time the helper is called, it appends a line to the trace.
454 * The format of the trace is customizable, and the exact output
455 * one will get depends on the options set in
456 * *\/sys/kernel/debug/tracing/trace_options* (see also the
457 * *README* file under the same directory). However, it usually
458 * defaults to something like:
459 *
460 * ::
461 *
462 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg>
463 *
464 * In the above:
465 *
466 * * ``telnet`` is the name of the current task.
467 * * ``470`` is the PID of the current task.
468 * * ``001`` is the CPU number on which the task is
469 * running.
470 * * In ``.N..``, each character refers to a set of
471 * options (whether irqs are enabled, scheduling
472 * options, whether hard/softirqs are running, level of
473 * preempt_disabled respectively). **N** means that
474 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
475 * are set.
476 * * ``419421.045894`` is a timestamp.
477 * * ``0x00000001`` is a fake value used by BPF for the
478 * instruction pointer register.
479 * * ``<formatted msg>`` is the message formatted with
480 * *fmt*.
481 *
482 * The conversion specifiers supported by *fmt* are similar, but
483 * more limited than for printk(). They are **%d**, **%i**,
484 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
485 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
486 * of field, padding with zeroes, etc.) is available, and the
487 * helper will return **-EINVAL** (but print nothing) if it
488 * encounters an unknown specifier.
489 *
490 * Also, note that **bpf_trace_printk**\ () is slow, and should
491 * only be used for debugging purposes. For this reason, a notice
492 * bloc (spanning several lines) is printed to kernel logs and
493 * states that the helper should not be used "for production use"
494 * the first time this helper is used (or more precisely, when
495 * **trace_printk**\ () buffers are allocated). For passing values
496 * to user space, perf events should be preferred.
497 * Return
498 * The number of bytes written to the buffer, or a negative error
499 * in case of failure.
500 *
1fdd08be
QM
501 * u32 bpf_get_prandom_u32(void)
502 * Description
503 * Get a pseudo-random number.
504 *
505 * From a security point of view, this helper uses its own
506 * pseudo-random internal state, and cannot be used to infer the
507 * seed of other random functions in the kernel. However, it is
508 * essential to note that the generator used by the helper is not
509 * cryptographically secure.
510 * Return
511 * A random 32-bit unsigned value.
512 *
513 * u32 bpf_get_smp_processor_id(void)
514 * Description
515 * Get the SMP (symmetric multiprocessing) processor id. Note that
516 * all programs run with preemption disabled, which means that the
517 * SMP processor id is stable during all the execution of the
518 * program.
519 * Return
520 * The SMP id of the processor running the program.
521 *
ad4a5223
QM
522 * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
523 * Description
524 * Store *len* bytes from address *from* into the packet
525 * associated to *skb*, at *offset*. *flags* are a combination of
526 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
527 * checksum for the packet after storing the bytes) and
528 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
529 * **->swhash** and *skb*\ **->l4hash** to 0).
530 *
531 * A call to this helper is susceptible to change the underlaying
532 * packet buffer. Therefore, at load time, all checks on pointers
533 * previously done by the verifier are invalidated and must be
534 * performed again, if the helper is used in combination with
535 * direct packet access.
536 * Return
537 * 0 on success, or a negative error in case of failure.
538 *
539 * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
540 * Description
541 * Recompute the layer 3 (e.g. IP) checksum for the packet
542 * associated to *skb*. Computation is incremental, so the helper
543 * must know the former value of the header field that was
544 * modified (*from*), the new value of this field (*to*), and the
545 * number of bytes (2 or 4) for this field, stored in *size*.
546 * Alternatively, it is possible to store the difference between
547 * the previous and the new values of the header field in *to*, by
548 * setting *from* and *size* to 0. For both methods, *offset*
549 * indicates the location of the IP checksum within the packet.
550 *
551 * This helper works in combination with **bpf_csum_diff**\ (),
552 * which does not update the checksum in-place, but offers more
553 * flexibility and can handle sizes larger than 2 or 4 for the
554 * checksum to update.
555 *
556 * A call to this helper is susceptible to change the underlaying
557 * packet buffer. Therefore, at load time, all checks on pointers
558 * previously done by the verifier are invalidated and must be
559 * performed again, if the helper is used in combination with
560 * direct packet access.
561 * Return
562 * 0 on success, or a negative error in case of failure.
563 *
564 * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
565 * Description
566 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
567 * packet associated to *skb*. Computation is incremental, so the
568 * helper must know the former value of the header field that was
569 * modified (*from*), the new value of this field (*to*), and the
570 * number of bytes (2 or 4) for this field, stored on the lowest
571 * four bits of *flags*. Alternatively, it is possible to store
572 * the difference between the previous and the new values of the
573 * header field in *to*, by setting *from* and the four lowest
574 * bits of *flags* to 0. For both methods, *offset* indicates the
575 * location of the IP checksum within the packet. In addition to
576 * the size of the field, *flags* can be added (bitwise OR) actual
577 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
578 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
579 * for updates resulting in a null checksum the value is set to
580 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
581 * the checksum is to be computed against a pseudo-header.
582 *
583 * This helper works in combination with **bpf_csum_diff**\ (),
584 * which does not update the checksum in-place, but offers more
585 * flexibility and can handle sizes larger than 2 or 4 for the
586 * checksum to update.
587 *
588 * A call to this helper is susceptible to change the underlaying
589 * packet buffer. Therefore, at load time, all checks on pointers
590 * previously done by the verifier are invalidated and must be
591 * performed again, if the helper is used in combination with
592 * direct packet access.
593 * Return
594 * 0 on success, or a negative error in case of failure.
595 *
596 * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
597 * Description
598 * This special helper is used to trigger a "tail call", or in
599 * other words, to jump into another eBPF program. The same stack
600 * frame is used (but values on stack and in registers for the
601 * caller are not accessible to the callee). This mechanism allows
602 * for program chaining, either for raising the maximum number of
603 * available eBPF instructions, or to execute given programs in
604 * conditional blocks. For security reasons, there is an upper
605 * limit to the number of successive tail calls that can be
606 * performed.
607 *
608 * Upon call of this helper, the program attempts to jump into a
609 * program referenced at index *index* in *prog_array_map*, a
610 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
611 * *ctx*, a pointer to the context.
612 *
613 * If the call succeeds, the kernel immediately runs the first
614 * instruction of the new program. This is not a function call,
615 * and it never returns to the previous program. If the call
616 * fails, then the helper has no effect, and the caller continues
617 * to run its subsequent instructions. A call can fail if the
618 * destination program for the jump does not exist (i.e. *index*
619 * is superior to the number of entries in *prog_array_map*), or
620 * if the maximum number of tail calls has been reached for this
621 * chain of programs. This limit is defined in the kernel by the
622 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
623 * which is currently set to 32.
624 * Return
625 * 0 on success, or a negative error in case of failure.
626 *
627 * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
628 * Description
629 * Clone and redirect the packet associated to *skb* to another
630 * net device of index *ifindex*. Both ingress and egress
631 * interfaces can be used for redirection. The **BPF_F_INGRESS**
632 * value in *flags* is used to make the distinction (ingress path
633 * is selected if the flag is present, egress path otherwise).
634 * This is the only flag supported for now.
635 *
636 * In comparison with **bpf_redirect**\ () helper,
637 * **bpf_clone_redirect**\ () has the associated cost of
638 * duplicating the packet buffer, but this can be executed out of
639 * the eBPF program. Conversely, **bpf_redirect**\ () is more
640 * efficient, but it is handled through an action code where the
641 * redirection happens only after the eBPF program has returned.
642 *
643 * A call to this helper is susceptible to change the underlaying
644 * packet buffer. Therefore, at load time, all checks on pointers
645 * previously done by the verifier are invalidated and must be
646 * performed again, if the helper is used in combination with
647 * direct packet access.
648 * Return
649 * 0 on success, or a negative error in case of failure.
c456dec4
QM
650 *
651 * u64 bpf_get_current_pid_tgid(void)
652 * Return
653 * A 64-bit integer containing the current tgid and pid, and
654 * created as such:
655 * *current_task*\ **->tgid << 32 \|**
656 * *current_task*\ **->pid**.
657 *
658 * u64 bpf_get_current_uid_gid(void)
659 * Return
660 * A 64-bit integer containing the current GID and UID, and
661 * created as such: *current_gid* **<< 32 \|** *current_uid*.
662 *
663 * int bpf_get_current_comm(char *buf, u32 size_of_buf)
664 * Description
665 * Copy the **comm** attribute of the current task into *buf* of
666 * *size_of_buf*. The **comm** attribute contains the name of
667 * the executable (excluding the path) for the current task. The
668 * *size_of_buf* must be strictly positive. On success, the
669 * helper makes sure that the *buf* is NUL-terminated. On failure,
670 * it is filled with zeroes.
671 * Return
672 * 0 on success, or a negative error in case of failure.
673 *
1fdd08be
QM
674 * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
675 * Description
676 * Retrieve the classid for the current task, i.e. for the net_cls
677 * cgroup to which *skb* belongs.
678 *
679 * This helper can be used on TC egress path, but not on ingress.
680 *
681 * The net_cls cgroup provides an interface to tag network packets
682 * based on a user-provided identifier for all traffic coming from
683 * the tasks belonging to the related cgroup. See also the related
684 * kernel documentation, available from the Linux sources in file
685 * *Documentation/cgroup-v1/net_cls.txt*.
686 *
687 * The Linux kernel has two versions for cgroups: there are
688 * cgroups v1 and cgroups v2. Both are available to users, who can
689 * use a mixture of them, but note that the net_cls cgroup is for
690 * cgroup v1 only. This makes it incompatible with BPF programs
691 * run on cgroups, which is a cgroup-v2-only feature (a socket can
692 * only hold data for one version of cgroups at a time).
693 *
694 * This helper is only available is the kernel was compiled with
695 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
696 * "**y**" or to "**m**".
697 * Return
698 * The classid, or 0 for the default unconfigured classid.
699 *
c456dec4
QM
700 * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
701 * Description
702 * Push a *vlan_tci* (VLAN tag control information) of protocol
703 * *vlan_proto* to the packet associated to *skb*, then update
704 * the checksum. Note that if *vlan_proto* is different from
705 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
706 * be **ETH_P_8021Q**.
707 *
708 * A call to this helper is susceptible to change the underlaying
709 * packet buffer. Therefore, at load time, all checks on pointers
710 * previously done by the verifier are invalidated and must be
711 * performed again, if the helper is used in combination with
712 * direct packet access.
713 * Return
714 * 0 on success, or a negative error in case of failure.
715 *
716 * int bpf_skb_vlan_pop(struct sk_buff *skb)
717 * Description
718 * Pop a VLAN header from the packet associated to *skb*.
719 *
720 * A call to this helper is susceptible to change the underlaying
721 * packet buffer. Therefore, at load time, all checks on pointers
722 * previously done by the verifier are invalidated and must be
723 * performed again, if the helper is used in combination with
724 * direct packet access.
725 * Return
726 * 0 on success, or a negative error in case of failure.
727 *
728 * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
729 * Description
730 * Get tunnel metadata. This helper takes a pointer *key* to an
731 * empty **struct bpf_tunnel_key** of **size**, that will be
732 * filled with tunnel metadata for the packet associated to *skb*.
733 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
734 * indicates that the tunnel is based on IPv6 protocol instead of
735 * IPv4.
736 *
737 * The **struct bpf_tunnel_key** is an object that generalizes the
738 * principal parameters used by various tunneling protocols into a
739 * single struct. This way, it can be used to easily make a
740 * decision based on the contents of the encapsulation header,
741 * "summarized" in this struct. In particular, it holds the IP
742 * address of the remote end (IPv4 or IPv6, depending on the case)
743 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
744 * this struct exposes the *key*\ **->tunnel_id**, which is
745 * generally mapped to a VNI (Virtual Network Identifier), making
746 * it programmable together with the **bpf_skb_set_tunnel_key**\
747 * () helper.
748 *
749 * Let's imagine that the following code is part of a program
750 * attached to the TC ingress interface, on one end of a GRE
751 * tunnel, and is supposed to filter out all messages coming from
752 * remote ends with IPv4 address other than 10.0.0.1:
753 *
754 * ::
755 *
756 * int ret;
757 * struct bpf_tunnel_key key = {};
758 *
759 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
760 * if (ret < 0)
761 * return TC_ACT_SHOT; // drop packet
762 *
763 * if (key.remote_ipv4 != 0x0a000001)
764 * return TC_ACT_SHOT; // drop packet
765 *
766 * return TC_ACT_OK; // accept packet
767 *
768 * This interface can also be used with all encapsulation devices
769 * that can operate in "collect metadata" mode: instead of having
770 * one network device per specific configuration, the "collect
771 * metadata" mode only requires a single device where the
772 * configuration can be extracted from this helper.
773 *
774 * This can be used together with various tunnels such as VXLan,
775 * Geneve, GRE or IP in IP (IPIP).
776 * Return
777 * 0 on success, or a negative error in case of failure.
778 *
779 * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
780 * Description
781 * Populate tunnel metadata for packet associated to *skb.* The
782 * tunnel metadata is set to the contents of *key*, of *size*. The
783 * *flags* can be set to a combination of the following values:
784 *
785 * **BPF_F_TUNINFO_IPV6**
786 * Indicate that the tunnel is based on IPv6 protocol
787 * instead of IPv4.
788 * **BPF_F_ZERO_CSUM_TX**
789 * For IPv4 packets, add a flag to tunnel metadata
790 * indicating that checksum computation should be skipped
791 * and checksum set to zeroes.
792 * **BPF_F_DONT_FRAGMENT**
793 * Add a flag to tunnel metadata indicating that the
794 * packet should not be fragmented.
795 * **BPF_F_SEQ_NUMBER**
796 * Add a flag to tunnel metadata indicating that a
797 * sequence number should be added to tunnel header before
798 * sending the packet. This flag was added for GRE
799 * encapsulation, but might be used with other protocols
800 * as well in the future.
801 *
802 * Here is a typical usage on the transmit path:
803 *
804 * ::
805 *
806 * struct bpf_tunnel_key key;
807 * populate key ...
808 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
809 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
810 *
811 * See also the description of the **bpf_skb_get_tunnel_key**\ ()
812 * helper for additional information.
813 * Return
814 * 0 on success, or a negative error in case of failure.
815 *
c6b5fb86
QM
816 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
817 * Description
818 * Read the value of a perf event counter. This helper relies on a
819 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
820 * the perf event counter is selected when *map* is updated with
821 * perf event file descriptors. The *map* is an array whose size
822 * is the number of available CPUs, and each cell contains a value
823 * relative to one CPU. The value to retrieve is indicated by
824 * *flags*, that contains the index of the CPU to look up, masked
825 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
826 * **BPF_F_CURRENT_CPU** to indicate that the value for the
827 * current CPU should be retrieved.
828 *
829 * Note that before Linux 4.13, only hardware perf event can be
830 * retrieved.
831 *
832 * Also, be aware that the newer helper
833 * **bpf_perf_event_read_value**\ () is recommended over
3bd5a09b 834 * **bpf_perf_event_read**\ () in general. The latter has some ABI
c6b5fb86
QM
835 * quirks where error and counter value are used as a return code
836 * (which is wrong to do since ranges may overlap). This issue is
3bd5a09b
QM
837 * fixed with **bpf_perf_event_read_value**\ (), which at the same
838 * time provides more features over the **bpf_perf_event_read**\
839 * () interface. Please refer to the description of
c6b5fb86
QM
840 * **bpf_perf_event_read_value**\ () for details.
841 * Return
842 * The value of the perf event counter read from the map, or a
843 * negative error code in case of failure.
844 *
c456dec4
QM
845 * int bpf_redirect(u32 ifindex, u64 flags)
846 * Description
847 * Redirect the packet to another net device of index *ifindex*.
848 * This helper is somewhat similar to **bpf_clone_redirect**\
849 * (), except that the packet is not cloned, which provides
850 * increased performance.
851 *
852 * Except for XDP, both ingress and egress interfaces can be used
853 * for redirection. The **BPF_F_INGRESS** value in *flags* is used
854 * to make the distinction (ingress path is selected if the flag
855 * is present, egress path otherwise). Currently, XDP only
856 * supports redirection to the egress interface, and accepts no
857 * flag at all.
858 *
859 * The same effect can be attained with the more generic
860 * **bpf_redirect_map**\ (), which requires specific maps to be
861 * used but offers better performance.
862 * Return
863 * For XDP, the helper returns **XDP_REDIRECT** on success or
864 * **XDP_ABORTED** on error. For other program types, the values
865 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
866 * error.
867 *
1fdd08be
QM
868 * u32 bpf_get_route_realm(struct sk_buff *skb)
869 * Description
870 * Retrieve the realm or the route, that is to say the
871 * **tclassid** field of the destination for the *skb*. The
872 * indentifier retrieved is a user-provided tag, similar to the
873 * one used with the net_cls cgroup (see description for
874 * **bpf_get_cgroup_classid**\ () helper), but here this tag is
875 * held by a route (a destination entry), not by a task.
876 *
877 * Retrieving this identifier works with the clsact TC egress hook
878 * (see also **tc-bpf(8)**), or alternatively on conventional
879 * classful egress qdiscs, but not on TC ingress path. In case of
880 * clsact TC egress hook, this has the advantage that, internally,
881 * the destination entry has not been dropped yet in the transmit
882 * path. Therefore, the destination entry does not need to be
883 * artificially held via **netif_keep_dst**\ () for a classful
884 * qdisc until the *skb* is freed.
885 *
886 * This helper is available only if the kernel was compiled with
887 * **CONFIG_IP_ROUTE_CLASSID** configuration option.
888 * Return
889 * The realm of the route for the packet associated to *skb*, or 0
890 * if none was found.
891 *
c456dec4
QM
892 * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
893 * Description
894 * Write raw *data* blob into a special BPF perf event held by
895 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
896 * event must have the following attributes: **PERF_SAMPLE_RAW**
897 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
898 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
899 *
900 * The *flags* are used to indicate the index in *map* for which
901 * the value must be put, masked with **BPF_F_INDEX_MASK**.
902 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
903 * to indicate that the index of the current CPU core should be
904 * used.
905 *
906 * The value to write, of *size*, is passed through eBPF stack and
907 * pointed by *data*.
908 *
909 * The context of the program *ctx* needs also be passed to the
910 * helper.
911 *
912 * On user space, a program willing to read the values needs to
913 * call **perf_event_open**\ () on the perf event (either for
914 * one or for all CPUs) and to store the file descriptor into the
915 * *map*. This must be done before the eBPF program can send data
916 * into it. An example is available in file
917 * *samples/bpf/trace_output_user.c* in the Linux kernel source
918 * tree (the eBPF program counterpart is in
919 * *samples/bpf/trace_output_kern.c*).
920 *
921 * **bpf_perf_event_output**\ () achieves better performance
922 * than **bpf_trace_printk**\ () for sharing data with user
923 * space, and is much better suitable for streaming data from eBPF
924 * programs.
925 *
926 * Note that this helper is not restricted to tracing use cases
927 * and can be used with programs attached to TC or XDP as well,
928 * where it allows for passing data to user space listeners. Data
929 * can be:
930 *
931 * * Only custom structs,
932 * * Only the packet payload, or
933 * * A combination of both.
934 * Return
935 * 0 on success, or a negative error in case of failure.
936 *
1fdd08be
QM
937 * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
938 * Description
939 * This helper was provided as an easy way to load data from a
940 * packet. It can be used to load *len* bytes from *offset* from
941 * the packet associated to *skb*, into the buffer pointed by
942 * *to*.
943 *
944 * Since Linux 4.7, usage of this helper has mostly been replaced
945 * by "direct packet access", enabling packet data to be
946 * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
947 * pointing respectively to the first byte of packet data and to
948 * the byte after the last byte of packet data. However, it
949 * remains useful if one wishes to read large quantities of data
950 * at once from a packet into the eBPF stack.
951 * Return
952 * 0 on success, or a negative error in case of failure.
953 *
c456dec4
QM
954 * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags)
955 * Description
956 * Walk a user or a kernel stack and return its id. To achieve
957 * this, the helper needs *ctx*, which is a pointer to the context
958 * on which the tracing program is executed, and a pointer to a
959 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
960 *
961 * The last argument, *flags*, holds the number of stack frames to
962 * skip (from 0 to 255), masked with
963 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
964 * a combination of the following flags:
965 *
966 * **BPF_F_USER_STACK**
967 * Collect a user space stack instead of a kernel stack.
968 * **BPF_F_FAST_STACK_CMP**
969 * Compare stacks by hash only.
970 * **BPF_F_REUSE_STACKID**
971 * If two different stacks hash into the same *stackid*,
972 * discard the old one.
973 *
974 * The stack id retrieved is a 32 bit long integer handle which
975 * can be further combined with other data (including other stack
976 * ids) and used as a key into maps. This can be useful for
977 * generating a variety of graphs (such as flame graphs or off-cpu
978 * graphs).
979 *
980 * For walking a stack, this helper is an improvement over
981 * **bpf_probe_read**\ (), which can be used with unrolled loops
982 * but is not efficient and consumes a lot of eBPF instructions.
983 * Instead, **bpf_get_stackid**\ () can collect up to
984 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
985 * this limit can be controlled with the **sysctl** program, and
986 * that it should be manually increased in order to profile long
987 * user stacks (such as stacks for Java programs). To do so, use:
988 *
989 * ::
990 *
991 * # sysctl kernel.perf_event_max_stack=<new value>
992 *
993 * Return
994 * The positive or null stack id on success, or a negative error
995 * in case of failure.
996 *
1fdd08be
QM
997 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
998 * Description
999 * Compute a checksum difference, from the raw buffer pointed by
1000 * *from*, of length *from_size* (that must be a multiple of 4),
1001 * towards the raw buffer pointed by *to*, of size *to_size*
1002 * (same remark). An optional *seed* can be added to the value
1003 * (this can be cascaded, the seed may come from a previous call
1004 * to the helper).
1005 *
1006 * This is flexible enough to be used in several ways:
1007 *
1008 * * With *from_size* == 0, *to_size* > 0 and *seed* set to
1009 * checksum, it can be used when pushing new data.
1010 * * With *from_size* > 0, *to_size* == 0 and *seed* set to
1011 * checksum, it can be used when removing data from a packet.
1012 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
1013 * can be used to compute a diff. Note that *from_size* and
1014 * *to_size* do not need to be equal.
1015 *
1016 * This helper can be used in combination with
1017 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
1018 * which one can feed in the difference computed with
1019 * **bpf_csum_diff**\ ().
1020 * Return
1021 * The checksum result, or a negative error code in case of
1022 * failure.
1023 *
1024 * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
1025 * Description
1026 * Retrieve tunnel options metadata for the packet associated to
1027 * *skb*, and store the raw tunnel option data to the buffer *opt*
1028 * of *size*.
1029 *
1030 * This helper can be used with encapsulation devices that can
1031 * operate in "collect metadata" mode (please refer to the related
1032 * note in the description of **bpf_skb_get_tunnel_key**\ () for
1033 * more details). A particular example where this can be used is
1034 * in combination with the Geneve encapsulation protocol, where it
1035 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
1036 * and retrieving arbitrary TLVs (Type-Length-Value headers) from
1037 * the eBPF program. This allows for full customization of these
1038 * headers.
1039 * Return
1040 * The size of the option data retrieved.
1041 *
1042 * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
1043 * Description
1044 * Set tunnel options metadata for the packet associated to *skb*
1045 * to the option data contained in the raw buffer *opt* of *size*.
1046 *
1047 * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
1048 * helper for additional information.
1049 * Return
1050 * 0 on success, or a negative error in case of failure.
1051 *
1052 * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
1053 * Description
1054 * Change the protocol of the *skb* to *proto*. Currently
1055 * supported are transition from IPv4 to IPv6, and from IPv6 to
1056 * IPv4. The helper takes care of the groundwork for the
1057 * transition, including resizing the socket buffer. The eBPF
1058 * program is expected to fill the new headers, if any, via
1059 * **skb_store_bytes**\ () and to recompute the checksums with
1060 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
1061 * (). The main case for this helper is to perform NAT64
1062 * operations out of an eBPF program.
1063 *
1064 * Internally, the GSO type is marked as dodgy so that headers are
1065 * checked and segments are recalculated by the GSO/GRO engine.
1066 * The size for GSO target is adapted as well.
1067 *
1068 * All values for *flags* are reserved for future usage, and must
1069 * be left at zero.
1070 *
1071 * A call to this helper is susceptible to change the underlaying
1072 * packet buffer. Therefore, at load time, all checks on pointers
1073 * previously done by the verifier are invalidated and must be
1074 * performed again, if the helper is used in combination with
1075 * direct packet access.
1076 * Return
1077 * 0 on success, or a negative error in case of failure.
1078 *
1079 * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
1080 * Description
1081 * Change the packet type for the packet associated to *skb*. This
1082 * comes down to setting *skb*\ **->pkt_type** to *type*, except
1083 * the eBPF program does not have a write access to *skb*\
1084 * **->pkt_type** beside this helper. Using a helper here allows
1085 * for graceful handling of errors.
1086 *
1087 * The major use case is to change incoming *skb*s to
1088 * **PACKET_HOST** in a programmatic way instead of having to
1089 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
1090 * example.
1091 *
1092 * Note that *type* only allows certain values. At this time, they
1093 * are:
1094 *
1095 * **PACKET_HOST**
1096 * Packet is for us.
1097 * **PACKET_BROADCAST**
1098 * Send packet to all.
1099 * **PACKET_MULTICAST**
1100 * Send packet to group.
1101 * **PACKET_OTHERHOST**
1102 * Send packet to someone else.
1103 * Return
1104 * 0 on success, or a negative error in case of failure.
1105 *
c6b5fb86
QM
1106 * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
1107 * Description
1108 * Check whether *skb* is a descendant of the cgroup2 held by
1109 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1110 * Return
1111 * The return value depends on the result of the test, and can be:
1112 *
1113 * * 0, if the *skb* failed the cgroup2 descendant test.
1114 * * 1, if the *skb* succeeded the cgroup2 descendant test.
1115 * * A negative error code, if an error occurred.
1116 *
fa15601a
QM
1117 * u32 bpf_get_hash_recalc(struct sk_buff *skb)
1118 * Description
1119 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
1120 * not set, in particular if the hash was cleared due to mangling,
1121 * recompute this hash. Later accesses to the hash can be done
1122 * directly with *skb*\ **->hash**.
1123 *
1124 * Calling **bpf_set_hash_invalid**\ (), changing a packet
1125 * prototype with **bpf_skb_change_proto**\ (), or calling
1126 * **bpf_skb_store_bytes**\ () with the
1127 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
1128 * the hash and to trigger a new computation for the next call to
1129 * **bpf_get_hash_recalc**\ ().
1130 * Return
1131 * The 32-bit hash.
1132 *
c456dec4
QM
1133 * u64 bpf_get_current_task(void)
1134 * Return
1135 * A pointer to the current task struct.
fa15601a 1136 *
c6b5fb86
QM
1137 * int bpf_probe_write_user(void *dst, const void *src, u32 len)
1138 * Description
1139 * Attempt in a safe way to write *len* bytes from the buffer
1140 * *src* to *dst* in memory. It only works for threads that are in
1141 * user context, and *dst* must be a valid user space address.
1142 *
1143 * This helper should not be used to implement any kind of
1144 * security mechanism because of TOC-TOU attacks, but rather to
1145 * debug, divert, and manipulate execution of semi-cooperative
1146 * processes.
1147 *
1148 * Keep in mind that this feature is meant for experiments, and it
1149 * has a risk of crashing the system and running programs.
1150 * Therefore, when an eBPF program using this helper is attached,
1151 * a warning including PID and process name is printed to kernel
1152 * logs.
1153 * Return
1154 * 0 on success, or a negative error in case of failure.
1155 *
1156 * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
1157 * Description
1158 * Check whether the probe is being run is the context of a given
1159 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
1160 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1161 * Return
1162 * The return value depends on the result of the test, and can be:
1163 *
1164 * * 0, if the *skb* task belongs to the cgroup2.
1165 * * 1, if the *skb* task does not belong to the cgroup2.
1166 * * A negative error code, if an error occurred.
1167 *
fa15601a
QM
1168 * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
1169 * Description
1170 * Resize (trim or grow) the packet associated to *skb* to the
1171 * new *len*. The *flags* are reserved for future usage, and must
1172 * be left at zero.
1173 *
1174 * The basic idea is that the helper performs the needed work to
1175 * change the size of the packet, then the eBPF program rewrites
1176 * the rest via helpers like **bpf_skb_store_bytes**\ (),
1177 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
1178 * and others. This helper is a slow path utility intended for
1179 * replies with control messages. And because it is targeted for
1180 * slow path, the helper itself can afford to be slow: it
1181 * implicitly linearizes, unclones and drops offloads from the
1182 * *skb*.
1183 *
1184 * A call to this helper is susceptible to change the underlaying
1185 * packet buffer. Therefore, at load time, all checks on pointers
1186 * previously done by the verifier are invalidated and must be
1187 * performed again, if the helper is used in combination with
1188 * direct packet access.
1189 * Return
1190 * 0 on success, or a negative error in case of failure.
1191 *
1192 * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
1193 * Description
1194 * Pull in non-linear data in case the *skb* is non-linear and not
1195 * all of *len* are part of the linear section. Make *len* bytes
1196 * from *skb* readable and writable. If a zero value is passed for
1197 * *len*, then the whole length of the *skb* is pulled.
1198 *
1199 * This helper is only needed for reading and writing with direct
1200 * packet access.
1201 *
1202 * For direct packet access, testing that offsets to access
1203 * are within packet boundaries (test on *skb*\ **->data_end**) is
1204 * susceptible to fail if offsets are invalid, or if the requested
1205 * data is in non-linear parts of the *skb*. On failure the
1206 * program can just bail out, or in the case of a non-linear
1207 * buffer, use a helper to make the data available. The
1208 * **bpf_skb_load_bytes**\ () helper is a first solution to access
1209 * the data. Another one consists in using **bpf_skb_pull_data**
1210 * to pull in once the non-linear parts, then retesting and
1211 * eventually access the data.
1212 *
1213 * At the same time, this also makes sure the *skb* is uncloned,
1214 * which is a necessary condition for direct write. As this needs
1215 * to be an invariant for the write part only, the verifier
1216 * detects writes and adds a prologue that is calling
1217 * **bpf_skb_pull_data()** to effectively unclone the *skb* from
1218 * the very beginning in case it is indeed cloned.
1219 *
1220 * A call to this helper is susceptible to change the underlaying
1221 * packet buffer. Therefore, at load time, all checks on pointers
1222 * previously done by the verifier are invalidated and must be
1223 * performed again, if the helper is used in combination with
1224 * direct packet access.
1225 * Return
1226 * 0 on success, or a negative error in case of failure.
1227 *
1228 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
1229 * Description
1230 * Add the checksum *csum* into *skb*\ **->csum** in case the
1231 * driver has supplied a checksum for the entire packet into that
1232 * field. Return an error otherwise. This helper is intended to be
1233 * used in combination with **bpf_csum_diff**\ (), in particular
1234 * when the checksum needs to be updated after data has been
1235 * written into the packet through direct packet access.
1236 * Return
1237 * The checksum on success, or a negative error code in case of
1238 * failure.
1239 *
1240 * void bpf_set_hash_invalid(struct sk_buff *skb)
1241 * Description
1242 * Invalidate the current *skb*\ **->hash**. It can be used after
1243 * mangling on headers through direct packet access, in order to
1244 * indicate that the hash is outdated and to trigger a
1245 * recalculation the next time the kernel tries to access this
1246 * hash or when the **bpf_get_hash_recalc**\ () helper is called.
1247 *
1248 * int bpf_get_numa_node_id(void)
1249 * Description
1250 * Return the id of the current NUMA node. The primary use case
1251 * for this helper is the selection of sockets for the local NUMA
1252 * node, when the program is attached to sockets using the
1253 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
1254 * but the helper is also available to other eBPF program types,
1255 * similarly to **bpf_get_smp_processor_id**\ ().
1256 * Return
1257 * The id of current NUMA node.
1258 *
c6b5fb86
QM
1259 * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
1260 * Description
1261 * Grows headroom of packet associated to *skb* and adjusts the
1262 * offset of the MAC header accordingly, adding *len* bytes of
1263 * space. It automatically extends and reallocates memory as
1264 * required.
1265 *
1266 * This helper can be used on a layer 3 *skb* to push a MAC header
1267 * for redirection into a layer 2 device.
1268 *
1269 * All values for *flags* are reserved for future usage, and must
1270 * be left at zero.
1271 *
1272 * A call to this helper is susceptible to change the underlaying
1273 * packet buffer. Therefore, at load time, all checks on pointers
1274 * previously done by the verifier are invalidated and must be
1275 * performed again, if the helper is used in combination with
1276 * direct packet access.
1277 * Return
1278 * 0 on success, or a negative error in case of failure.
1279 *
1280 * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
1281 * Description
1282 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
1283 * it is possible to use a negative value for *delta*. This helper
1284 * can be used to prepare the packet for pushing or popping
1285 * headers.
1286 *
1287 * A call to this helper is susceptible to change the underlaying
1288 * packet buffer. Therefore, at load time, all checks on pointers
1289 * previously done by the verifier are invalidated and must be
1290 * performed again, if the helper is used in combination with
1291 * direct packet access.
1292 * Return
1293 * 0 on success, or a negative error in case of failure.
1294 *
1295 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
1296 * Description
1297 * Copy a NUL terminated string from an unsafe address
1298 * *unsafe_ptr* to *dst*. The *size* should include the
1299 * terminating NUL byte. In case the string length is smaller than
1300 * *size*, the target is not padded with further NUL bytes. If the
1301 * string length is larger than *size*, just *size*-1 bytes are
1302 * copied and the last byte is set to NUL.
1303 *
1304 * On success, the length of the copied string is returned. This
1305 * makes this helper useful in tracing programs for reading
1306 * strings, and more importantly to get its length at runtime. See
1307 * the following snippet:
1308 *
1309 * ::
1310 *
1311 * SEC("kprobe/sys_open")
1312 * void bpf_sys_open(struct pt_regs *ctx)
1313 * {
1314 * char buf[PATHLEN]; // PATHLEN is defined to 256
1315 * int res = bpf_probe_read_str(buf, sizeof(buf),
1316 * ctx->di);
1317 *
1318 * // Consume buf, for example push it to
1319 * // userspace via bpf_perf_event_output(); we
1320 * // can use res (the string length) as event
1321 * // size, after checking its boundaries.
1322 * }
1323 *
1324 * In comparison, using **bpf_probe_read()** helper here instead
1325 * to read the string would require to estimate the length at
1326 * compile time, and would often result in copying more memory
1327 * than necessary.
1328 *
1329 * Another useful use case is when parsing individual process
1330 * arguments or individual environment variables navigating
1331 * *current*\ **->mm->arg_start** and *current*\
1332 * **->mm->env_start**: using this helper and the return value,
1333 * one can quickly iterate at the right offset of the memory area.
1334 * Return
1335 * On success, the strictly positive length of the string,
1336 * including the trailing NUL character. On error, a negative
1337 * value.
1338 *
1339 * u64 bpf_get_socket_cookie(struct sk_buff *skb)
1340 * Description
1341 * If the **struct sk_buff** pointed by *skb* has a known socket,
1342 * retrieve the cookie (generated by the kernel) of this socket.
1343 * If no cookie has been set yet, generate a new cookie. Once
1344 * generated, the socket cookie remains stable for the life of the
1345 * socket. This helper can be useful for monitoring per socket
1346 * networking traffic statistics as it provides a unique socket
1347 * identifier per namespace.
1348 * Return
1349 * A 8-byte long non-decreasing number on success, or 0 if the
1350 * socket field is missing inside *skb*.
1351 *
1352 * u32 bpf_get_socket_uid(struct sk_buff *skb)
1353 * Return
1354 * The owner UID of the socket associated to *skb*. If the socket
1355 * is **NULL**, or if it is not a full socket (i.e. if it is a
1356 * time-wait or a request socket instead), **overflowuid** value
1357 * is returned (note that **overflowuid** might also be the actual
1358 * UID value for the socket).
1359 *
fa15601a
QM
1360 * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
1361 * Description
1362 * Set the full hash for *skb* (set the field *skb*\ **->hash**)
1363 * to value *hash*.
1364 * Return
1365 * 0
1366 *
a3ef8e9a 1367 * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
7aa79a86
QM
1368 * Description
1369 * Emulate a call to **setsockopt()** on the socket associated to
1370 * *bpf_socket*, which must be a full socket. The *level* at
1371 * which the option resides and the name *optname* of the option
1372 * must be specified, see **setsockopt(2)** for more information.
1373 * The option value of length *optlen* is pointed by *optval*.
1374 *
1375 * This helper actually implements a subset of **setsockopt()**.
1376 * It supports the following *level*\ s:
1377 *
1378 * * **SOL_SOCKET**, which supports the following *optname*\ s:
1379 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
1380 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
1381 * * **IPPROTO_TCP**, which supports the following *optname*\ s:
1382 * **TCP_CONGESTION**, **TCP_BPF_IW**,
1383 * **TCP_BPF_SNDCWND_CLAMP**.
1384 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1385 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1386 * Return
1387 * 0 on success, or a negative error in case of failure.
1388 *
fa15601a
QM
1389 * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags)
1390 * Description
1391 * Grow or shrink the room for data in the packet associated to
1392 * *skb* by *len_diff*, and according to the selected *mode*.
1393 *
1394 * There is a single supported mode at this time:
1395 *
1396 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
1397 * (room space is added or removed below the layer 3 header).
1398 *
1399 * All values for *flags* are reserved for future usage, and must
1400 * be left at zero.
1401 *
1402 * A call to this helper is susceptible to change the underlaying
1403 * packet buffer. Therefore, at load time, all checks on pointers
1404 * previously done by the verifier are invalidated and must be
1405 * performed again, if the helper is used in combination with
1406 * direct packet access.
1407 * Return
1408 * 0 on success, or a negative error in case of failure.
1409 *
ab127040
QM
1410 * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1411 * Description
1412 * Redirect the packet to the endpoint referenced by *map* at
1413 * index *key*. Depending on its type, this *map* can contain
1414 * references to net devices (for forwarding packets through other
1415 * ports), or to CPUs (for redirecting XDP frames to another CPU;
1416 * but this is only implemented for native XDP (with driver
1417 * support) as of this writing).
1418 *
1419 * All values for *flags* are reserved for future usage, and must
1420 * be left at zero.
1421 *
1422 * When used to redirect packets to net devices, this helper
1423 * provides a high performance increase over **bpf_redirect**\ ().
1424 * This is due to various implementation details of the underlying
1425 * mechanisms, one of which is the fact that **bpf_redirect_map**\
1426 * () tries to send packet as a "bulk" to the device.
1427 * Return
1428 * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
1429 *
1430 * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1431 * Description
1432 * Redirect the packet to the socket referenced by *map* (of type
1433 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1434 * egress interfaces can be used for redirection. The
1435 * **BPF_F_INGRESS** value in *flags* is used to make the
1436 * distinction (ingress path is selected if the flag is present,
1437 * egress path otherwise). This is the only flag supported for now.
1438 * Return
1439 * **SK_PASS** on success, or **SK_DROP** on error.
1440 *
a3ef8e9a 1441 * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
ab127040
QM
1442 * Description
1443 * Add an entry to, or update a *map* referencing sockets. The
1444 * *skops* is used as a new value for the entry associated to
1445 * *key*. *flags* is one of:
1446 *
1447 * **BPF_NOEXIST**
1448 * The entry for *key* must not exist in the map.
1449 * **BPF_EXIST**
1450 * The entry for *key* must already exist in the map.
1451 * **BPF_ANY**
1452 * No condition on the existence of the entry for *key*.
1453 *
1454 * If the *map* has eBPF programs (parser and verdict), those will
1455 * be inherited by the socket being added. If the socket is
1456 * already attached to eBPF programs, this results in an error.
1457 * Return
1458 * 0 on success, or a negative error in case of failure.
1459 *
fa15601a
QM
1460 * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
1461 * Description
1462 * Adjust the address pointed by *xdp_md*\ **->data_meta** by
1463 * *delta* (which can be positive or negative). Note that this
1464 * operation modifies the address stored in *xdp_md*\ **->data**,
1465 * so the latter must be loaded only after the helper has been
1466 * called.
1467 *
1468 * The use of *xdp_md*\ **->data_meta** is optional and programs
1469 * are not required to use it. The rationale is that when the
1470 * packet is processed with XDP (e.g. as DoS filter), it is
1471 * possible to push further meta data along with it before passing
1472 * to the stack, and to give the guarantee that an ingress eBPF
1473 * program attached as a TC classifier on the same device can pick
1474 * this up for further post-processing. Since TC works with socket
1475 * buffers, it remains possible to set from XDP the **mark** or
1476 * **priority** pointers, or other pointers for the socket buffer.
1477 * Having this scratch space generic and programmable allows for
1478 * more flexibility as the user is free to store whatever meta
1479 * data they need.
1480 *
1481 * A call to this helper is susceptible to change the underlaying
1482 * packet buffer. Therefore, at load time, all checks on pointers
1483 * previously done by the verifier are invalidated and must be
1484 * performed again, if the helper is used in combination with
1485 * direct packet access.
1486 * Return
1487 * 0 on success, or a negative error in case of failure.
7aa79a86
QM
1488 *
1489 * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
1490 * Description
1491 * Read the value of a perf event counter, and store it into *buf*
1492 * of size *buf_size*. This helper relies on a *map* of type
1493 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
1494 * counter is selected when *map* is updated with perf event file
1495 * descriptors. The *map* is an array whose size is the number of
1496 * available CPUs, and each cell contains a value relative to one
1497 * CPU. The value to retrieve is indicated by *flags*, that
1498 * contains the index of the CPU to look up, masked with
1499 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1500 * **BPF_F_CURRENT_CPU** to indicate that the value for the
1501 * current CPU should be retrieved.
1502 *
1503 * This helper behaves in a way close to
1504 * **bpf_perf_event_read**\ () helper, save that instead of
1505 * just returning the value observed, it fills the *buf*
1506 * structure. This allows for additional data to be retrieved: in
1507 * particular, the enabled and running times (in *buf*\
1508 * **->enabled** and *buf*\ **->running**, respectively) are
1509 * copied. In general, **bpf_perf_event_read_value**\ () is
1510 * recommended over **bpf_perf_event_read**\ (), which has some
1511 * ABI issues and provides fewer functionalities.
1512 *
1513 * These values are interesting, because hardware PMU (Performance
1514 * Monitoring Unit) counters are limited resources. When there are
1515 * more PMU based perf events opened than available counters,
1516 * kernel will multiplex these events so each event gets certain
1517 * percentage (but not all) of the PMU time. In case that
1518 * multiplexing happens, the number of samples or counter value
1519 * will not reflect the case compared to when no multiplexing
1520 * occurs. This makes comparison between different runs difficult.
1521 * Typically, the counter value should be normalized before
1522 * comparing to other experiments. The usual normalization is done
1523 * as follows.
1524 *
1525 * ::
1526 *
1527 * normalized_counter = counter * t_enabled / t_running
1528 *
1529 * Where t_enabled is the time enabled for event and t_running is
1530 * the time running for event since last normalization. The
1531 * enabled and running times are accumulated since the perf event
1532 * open. To achieve scaling factor between two invocations of an
1533 * eBPF program, users can can use CPU id as the key (which is
1534 * typical for perf array usage model) to remember the previous
1535 * value and do the calculation inside the eBPF program.
1536 * Return
1537 * 0 on success, or a negative error in case of failure.
1538 *
a3ef8e9a 1539 * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
7aa79a86
QM
1540 * Description
1541 * For en eBPF program attached to a perf event, retrieve the
1542 * value of the event counter associated to *ctx* and store it in
1543 * the structure pointed by *buf* and of size *buf_size*. Enabled
1544 * and running times are also stored in the structure (see
1545 * description of helper **bpf_perf_event_read_value**\ () for
1546 * more details).
1547 * Return
1548 * 0 on success, or a negative error in case of failure.
1549 *
a3ef8e9a 1550 * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
7aa79a86
QM
1551 * Description
1552 * Emulate a call to **getsockopt()** on the socket associated to
1553 * *bpf_socket*, which must be a full socket. The *level* at
1554 * which the option resides and the name *optname* of the option
1555 * must be specified, see **getsockopt(2)** for more information.
1556 * The retrieved value is stored in the structure pointed by
1557 * *opval* and of length *optlen*.
1558 *
1559 * This helper actually implements a subset of **getsockopt()**.
1560 * It supports the following *level*\ s:
1561 *
1562 * * **IPPROTO_TCP**, which supports *optname*
1563 * **TCP_CONGESTION**.
1564 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1565 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1566 * Return
1567 * 0 on success, or a negative error in case of failure.
1568 *
1569 * int bpf_override_return(struct pt_reg *regs, u64 rc)
1570 * Description
1571 * Used for error injection, this helper uses kprobes to override
1572 * the return value of the probed function, and to set it to *rc*.
1573 * The first argument is the context *regs* on which the kprobe
1574 * works.
1575 *
1576 * This helper works by setting setting the PC (program counter)
1577 * to an override function which is run in place of the original
1578 * probed function. This means the probed function is not run at
1579 * all. The replacement function just returns with the required
1580 * value.
1581 *
1582 * This helper has security implications, and thus is subject to
1583 * restrictions. It is only available if the kernel was compiled
1584 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
1585 * option, and in this case it only works on functions tagged with
1586 * **ALLOW_ERROR_INJECTION** in the kernel code.
1587 *
1588 * Also, the helper is only available for the architectures having
1589 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
1590 * x86 architecture is the only one to support this feature.
1591 * Return
1592 * 0
1593 *
a3ef8e9a 1594 * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
7aa79a86
QM
1595 * Description
1596 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
1597 * for the full TCP socket associated to *bpf_sock_ops* to
1598 * *argval*.
1599 *
1600 * The primary use of this field is to determine if there should
1601 * be calls to eBPF programs of type
1602 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
1603 * code. A program of the same type can change its value, per
1604 * connection and as necessary, when the connection is
1605 * established. This field is directly accessible for reading, but
1606 * this helper must be used for updates in order to return an
1607 * error if an eBPF program tries to set a callback that is not
1608 * supported in the current kernel.
1609 *
1610 * The supported callback values that *argval* can combine are:
1611 *
1612 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
1613 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
1614 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
1615 *
1616 * Here are some examples of where one could call such eBPF
1617 * program:
1618 *
1619 * * When RTO fires.
1620 * * When a packet is retransmitted.
1621 * * When the connection terminates.
1622 * * When a packet is sent.
1623 * * When a packet is received.
1624 * Return
1625 * Code **-EINVAL** if the socket is not a full TCP socket;
1626 * otherwise, a positive number containing the bits that could not
1627 * be set is returned (which comes down to 0 if all bits were set
1628 * as required).
1629 *
ab127040
QM
1630 * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
1631 * Description
1632 * This helper is used in programs implementing policies at the
1633 * socket level. If the message *msg* is allowed to pass (i.e. if
1634 * the verdict eBPF program returns **SK_PASS**), redirect it to
1635 * the socket referenced by *map* (of type
1636 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1637 * egress interfaces can be used for redirection. The
1638 * **BPF_F_INGRESS** value in *flags* is used to make the
1639 * distinction (ingress path is selected if the flag is present,
1640 * egress path otherwise). This is the only flag supported for now.
1641 * Return
1642 * **SK_PASS** on success, or **SK_DROP** on error.
1643 *
1644 * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
1645 * Description
1646 * For socket policies, apply the verdict of the eBPF program to
1647 * the next *bytes* (number of bytes) of message *msg*.
1648 *
1649 * For example, this helper can be used in the following cases:
1650 *
1651 * * A single **sendmsg**\ () or **sendfile**\ () system call
1652 * contains multiple logical messages that the eBPF program is
1653 * supposed to read and for which it should apply a verdict.
1654 * * An eBPF program only cares to read the first *bytes* of a
1655 * *msg*. If the message has a large payload, then setting up
1656 * and calling the eBPF program repeatedly for all bytes, even
1657 * though the verdict is already known, would create unnecessary
1658 * overhead.
1659 *
1660 * When called from within an eBPF program, the helper sets a
1661 * counter internal to the BPF infrastructure, that is used to
1662 * apply the last verdict to the next *bytes*. If *bytes* is
1663 * smaller than the current data being processed from a
1664 * **sendmsg**\ () or **sendfile**\ () system call, the first
1665 * *bytes* will be sent and the eBPF program will be re-run with
1666 * the pointer for start of data pointing to byte number *bytes*
1667 * **+ 1**. If *bytes* is larger than the current data being
1668 * processed, then the eBPF verdict will be applied to multiple
1669 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
1670 * consumed.
1671 *
1672 * Note that if a socket closes with the internal counter holding
1673 * a non-zero value, this is not a problem because data is not
1674 * being buffered for *bytes* and is sent as it is received.
1675 * Return
1676 * 0
1677 *
1678 * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
1679 * Description
1680 * For socket policies, prevent the execution of the verdict eBPF
1681 * program for message *msg* until *bytes* (byte number) have been
1682 * accumulated.
1683 *
1684 * This can be used when one needs a specific number of bytes
1685 * before a verdict can be assigned, even if the data spans
1686 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
1687 * case would be a user calling **sendmsg**\ () repeatedly with
1688 * 1-byte long message segments. Obviously, this is bad for
1689 * performance, but it is still valid. If the eBPF program needs
1690 * *bytes* bytes to validate a header, this helper can be used to
1691 * prevent the eBPF program to be called again until *bytes* have
1692 * been accumulated.
1693 * Return
1694 * 0
1695 *
1696 * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
1697 * Description
1698 * For socket policies, pull in non-linear data from user space
1699 * for *msg* and set pointers *msg*\ **->data** and *msg*\
1700 * **->data_end** to *start* and *end* bytes offsets into *msg*,
1701 * respectively.
1702 *
1703 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
1704 * *msg* it can only parse data that the (**data**, **data_end**)
1705 * pointers have already consumed. For **sendmsg**\ () hooks this
1706 * is likely the first scatterlist element. But for calls relying
1707 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
1708 * be the range (**0**, **0**) because the data is shared with
1709 * user space and by default the objective is to avoid allowing
1710 * user space to modify data while (or after) eBPF verdict is
1711 * being decided. This helper can be used to pull in data and to
1712 * set the start and end pointer to given values. Data will be
1713 * copied if necessary (i.e. if data was not linear and if start
1714 * and end pointers do not point to the same chunk).
1715 *
1716 * A call to this helper is susceptible to change the underlaying
1717 * packet buffer. Therefore, at load time, all checks on pointers
1718 * previously done by the verifier are invalidated and must be
1719 * performed again, if the helper is used in combination with
1720 * direct packet access.
1721 *
1722 * All values for *flags* are reserved for future usage, and must
1723 * be left at zero.
1724 * Return
1725 * 0 on success, or a negative error in case of failure.
1726 *
a3ef8e9a 1727 * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
7aa79a86
QM
1728 * Description
1729 * Bind the socket associated to *ctx* to the address pointed by
1730 * *addr*, of length *addr_len*. This allows for making outgoing
1731 * connection from the desired IP address, which can be useful for
1732 * example when all processes inside a cgroup should use one
1733 * single IP address on a host that has multiple IP configured.
1734 *
1735 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
1736 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
1737 * **AF_INET6**). Looking for a free port to bind to can be
1738 * expensive, therefore binding to port is not permitted by the
1739 * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively)
1740 * must be set to zero.
1741 * Return
1742 * 0 on success, or a negative error in case of failure.
2d020dd7
QM
1743 *
1744 * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
1745 * Description
1746 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
1747 * only possible to shrink the packet as of this writing,
1748 * therefore *delta* must be a negative integer.
1749 *
1750 * A call to this helper is susceptible to change the underlaying
1751 * packet buffer. Therefore, at load time, all checks on pointers
1752 * previously done by the verifier are invalidated and must be
1753 * performed again, if the helper is used in combination with
1754 * direct packet access.
1755 * Return
1756 * 0 on success, or a negative error in case of failure.
1757 *
1758 * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
1759 * Description
1760 * Retrieve the XFRM state (IP transform framework, see also
1761 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
1762 *
1763 * The retrieved value is stored in the **struct bpf_xfrm_state**
1764 * pointed by *xfrm_state* and of length *size*.
1765 *
1766 * All values for *flags* are reserved for future usage, and must
1767 * be left at zero.
1768 *
1769 * This helper is available only if the kernel was compiled with
1770 * **CONFIG_XFRM** configuration option.
1771 * Return
1772 * 0 on success, or a negative error in case of failure.
c195651e
YS
1773 *
1774 * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
1775 * Description
79552fbc
QM
1776 * Return a user or a kernel stack in bpf program provided buffer.
1777 * To achieve this, the helper needs *ctx*, which is a pointer
1778 * to the context on which the tracing program is executed.
1779 * To store the stacktrace, the bpf program provides *buf* with
1780 * a nonnegative *size*.
1781 *
1782 * The last argument, *flags*, holds the number of stack frames to
1783 * skip (from 0 to 255), masked with
1784 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1785 * the following flags:
1786 *
1787 * **BPF_F_USER_STACK**
1788 * Collect a user space stack instead of a kernel stack.
1789 * **BPF_F_USER_BUILD_ID**
1790 * Collect buildid+offset instead of ips for user stack,
1791 * only valid if **BPF_F_USER_STACK** is also specified.
1792 *
1793 * **bpf_get_stack**\ () can collect up to
1794 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
1795 * to sufficient large buffer size. Note that
1796 * this limit can be controlled with the **sysctl** program, and
1797 * that it should be manually increased in order to profile long
1798 * user stacks (such as stacks for Java programs). To do so, use:
1799 *
1800 * ::
1801 *
1802 * # sysctl kernel.perf_event_max_stack=<new value>
c195651e
YS
1803 *
1804 * Return
1805 * a non-negative value equal to or less than size on success, or
1806 * a negative error in case of failure.
4e1ec56c
DB
1807 *
1808 * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
1809 * Description
1810 * This helper is similar to **bpf_skb_load_bytes**\ () in that
1811 * it provides an easy way to load *len* bytes from *offset*
1812 * from the packet associated to *skb*, into the buffer pointed
1813 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that
1814 * a fifth argument *start_header* exists in order to select a
1815 * base offset to start from. *start_header* can be one of:
1816 *
1817 * **BPF_HDR_START_MAC**
1818 * Base offset to load data from is *skb*'s mac header.
1819 * **BPF_HDR_START_NET**
1820 * Base offset to load data from is *skb*'s network header.
1821 *
1822 * In general, "direct packet access" is the preferred method to
1823 * access packet data, however, this helper is in particular useful
1824 * in socket filters where *skb*\ **->data** does not always point
1825 * to the start of the mac header and where "direct packet access"
1826 * is not available.
1827 *
1828 * Return
1829 * 0 on success, or a negative error in case of failure.
1830 *
ebb676da
TG
1831 */
1832#define __BPF_FUNC_MAPPER(FN) \
1833 FN(unspec), \
1834 FN(map_lookup_elem), \
1835 FN(map_update_elem), \
1836 FN(map_delete_elem), \
1837 FN(probe_read), \
1838 FN(ktime_get_ns), \
1839 FN(trace_printk), \
1840 FN(get_prandom_u32), \
1841 FN(get_smp_processor_id), \
1842 FN(skb_store_bytes), \
1843 FN(l3_csum_replace), \
1844 FN(l4_csum_replace), \
1845 FN(tail_call), \
1846 FN(clone_redirect), \
1847 FN(get_current_pid_tgid), \
1848 FN(get_current_uid_gid), \
1849 FN(get_current_comm), \
1850 FN(get_cgroup_classid), \
1851 FN(skb_vlan_push), \
1852 FN(skb_vlan_pop), \
1853 FN(skb_get_tunnel_key), \
1854 FN(skb_set_tunnel_key), \
1855 FN(perf_event_read), \
1856 FN(redirect), \
1857 FN(get_route_realm), \
1858 FN(perf_event_output), \
1859 FN(skb_load_bytes), \
1860 FN(get_stackid), \
1861 FN(csum_diff), \
1862 FN(skb_get_tunnel_opt), \
1863 FN(skb_set_tunnel_opt), \
1864 FN(skb_change_proto), \
1865 FN(skb_change_type), \
1866 FN(skb_under_cgroup), \
1867 FN(get_hash_recalc), \
1868 FN(get_current_task), \
1869 FN(probe_write_user), \
1870 FN(current_task_under_cgroup), \
1871 FN(skb_change_tail), \
1872 FN(skb_pull_data), \
1873 FN(csum_update), \
1874 FN(set_hash_invalid), \
3a0af8fd 1875 FN(get_numa_node_id), \
17bedab2 1876 FN(skb_change_head), \
a5e8c070 1877 FN(xdp_adjust_head), \
91b8270f 1878 FN(probe_read_str), \
6acc5c29 1879 FN(get_socket_cookie), \
ded092cd 1880 FN(get_socket_uid), \
8c4b4c7e 1881 FN(set_hash), \
2be7e212 1882 FN(setsockopt), \
97f91a7c 1883 FN(skb_adjust_room), \
174a79ff
JF
1884 FN(redirect_map), \
1885 FN(sk_redirect_map), \
1886 FN(sock_map_update), \
908432ca 1887 FN(xdp_adjust_meta), \
4bebdc7a 1888 FN(perf_event_read_value), \
cd86d1fd 1889 FN(perf_prog_read_value), \
9802d865 1890 FN(getsockopt), \
b13d8807 1891 FN(override_return), \
4f738adb 1892 FN(sock_ops_cb_flags_set), \
2a100317 1893 FN(msg_redirect_map), \
91843d54 1894 FN(msg_apply_bytes), \
015632bb 1895 FN(msg_cork_bytes), \
d74bad4e 1896 FN(msg_pull_data), \
b32cc5b9 1897 FN(bind), \
12bed760 1898 FN(xdp_adjust_tail), \
c195651e 1899 FN(skb_get_xfrm_state), \
4e1ec56c
DB
1900 FN(get_stack), \
1901 FN(skb_load_bytes_relative),
ebb676da 1902
09756af4
AS
1903/* integer value in 'imm' field of BPF_CALL instruction selects which helper
1904 * function eBPF program intends to call
1905 */
ebb676da 1906#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
09756af4 1907enum bpf_func_id {
ebb676da 1908 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
09756af4
AS
1909 __BPF_FUNC_MAX_ID,
1910};
ebb676da 1911#undef __BPF_ENUM_FN
09756af4 1912
781c53bc
DB
1913/* All flags used by eBPF helper functions, placed here. */
1914
1915/* BPF_FUNC_skb_store_bytes flags. */
1916#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
8afd54c8 1917#define BPF_F_INVALIDATE_HASH (1ULL << 1)
781c53bc
DB
1918
1919/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
1920 * First 4 bits are for passing the header field size.
1921 */
1922#define BPF_F_HDR_FIELD_MASK 0xfULL
1923
1924/* BPF_FUNC_l4_csum_replace flags. */
1925#define BPF_F_PSEUDO_HDR (1ULL << 4)
2f72959a 1926#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
d1b662ad 1927#define BPF_F_MARK_ENFORCE (1ULL << 6)
781c53bc
DB
1928
1929/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
1930#define BPF_F_INGRESS (1ULL << 0)
1931
c6c33454
DB
1932/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
1933#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
1934
c195651e 1935/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
d5a3b1f6
AS
1936#define BPF_F_SKIP_FIELD_MASK 0xffULL
1937#define BPF_F_USER_STACK (1ULL << 8)
c195651e 1938/* flags used by BPF_FUNC_get_stackid only. */
d5a3b1f6
AS
1939#define BPF_F_FAST_STACK_CMP (1ULL << 9)
1940#define BPF_F_REUSE_STACKID (1ULL << 10)
c195651e
YS
1941/* flags used by BPF_FUNC_get_stack only. */
1942#define BPF_F_USER_BUILD_ID (1ULL << 11)
d5a3b1f6 1943
2da897e5
DB
1944/* BPF_FUNC_skb_set_tunnel_key flags. */
1945#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
22080870 1946#define BPF_F_DONT_FRAGMENT (1ULL << 2)
77a5196a 1947#define BPF_F_SEQ_NUMBER (1ULL << 3)
2da897e5 1948
908432ca
YS
1949/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
1950 * BPF_FUNC_perf_event_read_value flags.
1951 */
1e33759c
DB
1952#define BPF_F_INDEX_MASK 0xffffffffULL
1953#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
555c8a86
DB
1954/* BPF_FUNC_perf_event_output for sk_buff input context. */
1955#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
1e33759c 1956
2be7e212
DB
1957/* Mode for BPF_FUNC_skb_adjust_room helper. */
1958enum bpf_adj_room_mode {
1959 BPF_ADJ_ROOM_NET,
1960};
1961
4e1ec56c
DB
1962/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
1963enum bpf_hdr_start_off {
1964 BPF_HDR_START_MAC,
1965 BPF_HDR_START_NET,
1966};
1967
9bac3d6d
AS
1968/* user accessible mirror of in-kernel sk_buff.
1969 * new fields can only be added to the end of this structure
1970 */
1971struct __sk_buff {
1972 __u32 len;
1973 __u32 pkt_type;
1974 __u32 mark;
1975 __u32 queue_mapping;
c2497395
AS
1976 __u32 protocol;
1977 __u32 vlan_present;
1978 __u32 vlan_tci;
27cd5452 1979 __u32 vlan_proto;
bcad5718 1980 __u32 priority;
37e82c2f
AS
1981 __u32 ingress_ifindex;
1982 __u32 ifindex;
d691f9e8
AS
1983 __u32 tc_index;
1984 __u32 cb[5];
ba7591d8 1985 __u32 hash;
045efa82 1986 __u32 tc_classid;
969bf05e
AS
1987 __u32 data;
1988 __u32 data_end;
b1d9fc41 1989 __u32 napi_id;
8a31db56 1990
de8f3a83 1991 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
8a31db56
JF
1992 __u32 family;
1993 __u32 remote_ip4; /* Stored in network byte order */
1994 __u32 local_ip4; /* Stored in network byte order */
1995 __u32 remote_ip6[4]; /* Stored in network byte order */
1996 __u32 local_ip6[4]; /* Stored in network byte order */
1997 __u32 remote_port; /* Stored in network byte order */
1998 __u32 local_port; /* stored in host byte order */
de8f3a83
DB
1999 /* ... here. */
2000
2001 __u32 data_meta;
9bac3d6d
AS
2002};
2003
d3aa45ce
AS
2004struct bpf_tunnel_key {
2005 __u32 tunnel_id;
c6c33454
DB
2006 union {
2007 __u32 remote_ipv4;
2008 __u32 remote_ipv6[4];
2009 };
2010 __u8 tunnel_tos;
2011 __u8 tunnel_ttl;
c0e760c9 2012 __u16 tunnel_ext;
4018ab18 2013 __u32 tunnel_label;
d3aa45ce
AS
2014};
2015
12bed760
EB
2016/* user accessible mirror of in-kernel xfrm_state.
2017 * new fields can only be added to the end of this structure
2018 */
2019struct bpf_xfrm_state {
2020 __u32 reqid;
2021 __u32 spi; /* Stored in network byte order */
2022 __u16 family;
2023 union {
2024 __u32 remote_ipv4; /* Stored in network byte order */
2025 __u32 remote_ipv6[4]; /* Stored in network byte order */
2026 };
2027};
2028
3a0af8fd
TG
2029/* Generic BPF return codes which all BPF program types may support.
2030 * The values are binary compatible with their TC_ACT_* counter-part to
2031 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
2032 * programs.
2033 *
2034 * XDP is handled seprately, see XDP_*.
2035 */
2036enum bpf_ret_code {
2037 BPF_OK = 0,
2038 /* 1 reserved */
2039 BPF_DROP = 2,
2040 /* 3-6 reserved */
2041 BPF_REDIRECT = 7,
2042 /* >127 are reserved for prog type specific return codes */
2043};
2044
61023658
DA
2045struct bpf_sock {
2046 __u32 bound_dev_if;
aa4c1037
DA
2047 __u32 family;
2048 __u32 type;
2049 __u32 protocol;
482dca93
DA
2050 __u32 mark;
2051 __u32 priority;
aac3fc32
AI
2052 __u32 src_ip4; /* Allows 1,2,4-byte read.
2053 * Stored in network byte order.
2054 */
2055 __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
2056 * Stored in network byte order.
2057 */
2058 __u32 src_port; /* Allows 4-byte read.
2059 * Stored in host byte order
2060 */
61023658
DA
2061};
2062
17bedab2
MKL
2063#define XDP_PACKET_HEADROOM 256
2064
6a773a15
BB
2065/* User return codes for XDP prog type.
2066 * A valid XDP program must return one of these defined values. All other
9beb8bed
DB
2067 * return codes are reserved for future use. Unknown return codes will
2068 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
6a773a15
BB
2069 */
2070enum xdp_action {
2071 XDP_ABORTED = 0,
2072 XDP_DROP,
2073 XDP_PASS,
6ce96ca3 2074 XDP_TX,
814abfab 2075 XDP_REDIRECT,
6a773a15
BB
2076};
2077
2078/* user accessible metadata for XDP packet hook
2079 * new fields must be added to the end of this structure
2080 */
2081struct xdp_md {
2082 __u32 data;
2083 __u32 data_end;
de8f3a83 2084 __u32 data_meta;
daaf24c6 2085 /* Below access go through struct xdp_rxq_info */
02dd3291
JDB
2086 __u32 ingress_ifindex; /* rxq->dev->ifindex */
2087 __u32 rx_queue_index; /* rxq->queue_index */
6a773a15
BB
2088};
2089
174a79ff 2090enum sk_action {
bfa64075
JF
2091 SK_DROP = 0,
2092 SK_PASS,
174a79ff
JF
2093};
2094
4f738adb
JF
2095/* user accessible metadata for SK_MSG packet hook, new fields must
2096 * be added to the end of this structure
2097 */
2098struct sk_msg_md {
2099 void *data;
2100 void *data_end;
2101};
2102
1e270976
MKL
2103#define BPF_TAG_SIZE 8
2104
2105struct bpf_prog_info {
2106 __u32 type;
2107 __u32 id;
2108 __u8 tag[BPF_TAG_SIZE];
2109 __u32 jited_prog_len;
2110 __u32 xlated_prog_len;
2111 __aligned_u64 jited_prog_insns;
2112 __aligned_u64 xlated_prog_insns;
cb4d2b3f
MKL
2113 __u64 load_time; /* ns since boottime */
2114 __u32 created_by_uid;
2115 __u32 nr_map_ids;
2116 __aligned_u64 map_ids;
067cae47 2117 char name[BPF_OBJ_NAME_LEN];
675fc275 2118 __u32 ifindex;
b85fab0e 2119 __u32 gpl_compatible:1;
675fc275
JK
2120 __u64 netns_dev;
2121 __u64 netns_ino;
1e270976
MKL
2122} __attribute__((aligned(8)));
2123
2124struct bpf_map_info {
2125 __u32 type;
2126 __u32 id;
2127 __u32 key_size;
2128 __u32 value_size;
2129 __u32 max_entries;
2130 __u32 map_flags;
067cae47 2131 char name[BPF_OBJ_NAME_LEN];
52775b33
JK
2132 __u32 ifindex;
2133 __u64 netns_dev;
2134 __u64 netns_ino;
78958fca
MKL
2135 __u32 btf_id;
2136 __u32 btf_key_id;
2137 __u32 btf_value_id;
1e270976
MKL
2138} __attribute__((aligned(8)));
2139
4fbac77d
AI
2140/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
2141 * by user and intended to be used by socket (e.g. to bind to, depends on
2142 * attach attach type).
2143 */
2144struct bpf_sock_addr {
2145 __u32 user_family; /* Allows 4-byte read, but no write. */
2146 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
2147 * Stored in network byte order.
2148 */
2149 __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
2150 * Stored in network byte order.
2151 */
2152 __u32 user_port; /* Allows 4-byte read and write.
2153 * Stored in network byte order
2154 */
2155 __u32 family; /* Allows 4-byte read, but no write */
2156 __u32 type; /* Allows 4-byte read, but no write */
2157 __u32 protocol; /* Allows 4-byte read, but no write */
2158};
2159
40304b2a
LB
2160/* User bpf_sock_ops struct to access socket values and specify request ops
2161 * and their replies.
2162 * Some of this fields are in network (bigendian) byte order and may need
2163 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
2164 * New fields can only be added at the end of this structure
2165 */
2166struct bpf_sock_ops {
2167 __u32 op;
2168 union {
de525be2
LB
2169 __u32 args[4]; /* Optionally passed to bpf program */
2170 __u32 reply; /* Returned by bpf program */
2171 __u32 replylong[4]; /* Optionally returned by bpf prog */
40304b2a
LB
2172 };
2173 __u32 family;
2174 __u32 remote_ip4; /* Stored in network byte order */
2175 __u32 local_ip4; /* Stored in network byte order */
2176 __u32 remote_ip6[4]; /* Stored in network byte order */
2177 __u32 local_ip6[4]; /* Stored in network byte order */
2178 __u32 remote_port; /* Stored in network byte order */
2179 __u32 local_port; /* stored in host byte order */
f19397a5
LB
2180 __u32 is_fullsock; /* Some TCP fields are only valid if
2181 * there is a full socket. If not, the
2182 * fields read as zero.
2183 */
2184 __u32 snd_cwnd;
2185 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
b13d8807 2186 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
44f0e430
LB
2187 __u32 state;
2188 __u32 rtt_min;
2189 __u32 snd_ssthresh;
2190 __u32 rcv_nxt;
2191 __u32 snd_nxt;
2192 __u32 snd_una;
2193 __u32 mss_cache;
2194 __u32 ecn_flags;
2195 __u32 rate_delivered;
2196 __u32 rate_interval_us;
2197 __u32 packets_out;
2198 __u32 retrans_out;
2199 __u32 total_retrans;
2200 __u32 segs_in;
2201 __u32 data_segs_in;
2202 __u32 segs_out;
2203 __u32 data_segs_out;
2204 __u32 lost_out;
2205 __u32 sacked_out;
2206 __u32 sk_txhash;
2207 __u64 bytes_received;
2208 __u64 bytes_acked;
40304b2a
LB
2209};
2210
b13d8807 2211/* Definitions for bpf_sock_ops_cb_flags */
f89013f6 2212#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
a31ad29e 2213#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
d4487491
LB
2214#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
2215#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
b13d8807
LB
2216 * supported cb flags
2217 */
2218
40304b2a
LB
2219/* List of known BPF sock_ops operators.
2220 * New entries can only be added at the end
2221 */
2222enum {
2223 BPF_SOCK_OPS_VOID,
8550f328
LB
2224 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
2225 * -1 if default value should be used
2226 */
13d3b1eb
LB
2227 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
2228 * window (in packets) or -1 if default
2229 * value should be used
2230 */
9872a4bd
LB
2231 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
2232 * active connection is initialized
2233 */
2234 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
2235 * active connection is
2236 * established
2237 */
2238 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
2239 * passive connection is
2240 * established
2241 */
91b5b21c
LB
2242 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
2243 * needs ECN
2244 */
e6546ef6
LB
2245 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
2246 * based on the path and may be
2247 * dependent on the congestion control
2248 * algorithm. In general it indicates
2249 * a congestion threshold. RTTs above
2250 * this indicate congestion
2251 */
f89013f6
LB
2252 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
2253 * Arg1: value of icsk_retransmits
2254 * Arg2: value of icsk_rto
2255 * Arg3: whether RTO has expired
2256 */
a31ad29e
LB
2257 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
2258 * Arg1: sequence number of 1st byte
2259 * Arg2: # segments
2260 * Arg3: return value of
2261 * tcp_transmit_skb (0 => success)
2262 */
d4487491
LB
2263 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
2264 * Arg1: old_state
2265 * Arg2: new_state
2266 */
2267};
2268
2269/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
2270 * changes between the TCP and BPF versions. Ideally this should never happen.
2271 * If it does, we need to add code to convert them before calling
2272 * the BPF sock_ops function.
2273 */
2274enum {
2275 BPF_TCP_ESTABLISHED = 1,
2276 BPF_TCP_SYN_SENT,
2277 BPF_TCP_SYN_RECV,
2278 BPF_TCP_FIN_WAIT1,
2279 BPF_TCP_FIN_WAIT2,
2280 BPF_TCP_TIME_WAIT,
2281 BPF_TCP_CLOSE,
2282 BPF_TCP_CLOSE_WAIT,
2283 BPF_TCP_LAST_ACK,
2284 BPF_TCP_LISTEN,
2285 BPF_TCP_CLOSING, /* Now a valid state */
2286 BPF_TCP_NEW_SYN_RECV,
2287
2288 BPF_TCP_MAX_STATES /* Leave at the end! */
40304b2a
LB
2289};
2290
fc747810 2291#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
13bf9641 2292#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
fc747810 2293
908432ca
YS
2294struct bpf_perf_event_value {
2295 __u64 counter;
2296 __u64 enabled;
2297 __u64 running;
2298};
2299
ebc614f6
RG
2300#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
2301#define BPF_DEVCG_ACC_READ (1ULL << 1)
2302#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
2303
2304#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
2305#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
2306
2307struct bpf_cgroup_dev_ctx {
06ef0ccb
YS
2308 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
2309 __u32 access_type;
ebc614f6
RG
2310 __u32 major;
2311 __u32 minor;
2312};
2313
c4f6699d
AS
2314struct bpf_raw_tracepoint_args {
2315 __u64 args[0];
2316};
2317
daedfb22 2318#endif /* _UAPI__LINUX_BPF_H__ */