1 /* SPDX-License-Identifier: GPL-2.0 */
7 * This file can be included into eBPF kernel programs. It contains
8 * a couple of useful helper functions, map/section ABI (bpf_elf.h),
9 * misc macros and some eBPF specific LLVM built-ins.
14 #include <linux/pkt_cls.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
18 #include <asm/byteorder.h>
22 /** libbpf pin type. */
23 enum libbpf_pin_type
{
25 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
29 /** Type helper macros. */
31 #define __uint(name, val) int (*name)[val]
32 #define __type(name, val) typeof(val) *name
33 #define __array(name, val) typeof(val) *name[]
38 # define __stringify(X) #X
41 #ifndef __maybe_unused
42 # define __maybe_unused __attribute__((__unused__))
46 # define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
50 # define likely(X) __builtin_expect(!!(X), 1)
54 # define unlikely(X) __builtin_expect(!!(X), 0)
58 # define htons(X) __constant_htons((X))
62 # define ntohs(X) __constant_ntohs((X))
66 # define htonl(X) __constant_htonl((X))
70 # define ntohl(X) __constant_ntohl((X))
74 # define __inline__ __attribute__((always_inline))
77 /** Section helper macros. */
80 # define __section(NAME) \
81 __attribute__((section(NAME), used))
84 #ifndef __section_tail
85 # define __section_tail(ID, KEY) \
86 __section(__stringify(ID) "/" __stringify(KEY))
89 #ifndef __section_xdp_entry
90 # define __section_xdp_entry \
91 __section(ELF_SECTION_PROG)
94 #ifndef __section_cls_entry
95 # define __section_cls_entry \
96 __section(ELF_SECTION_CLASSIFIER)
99 #ifndef __section_act_entry
100 # define __section_act_entry \
101 __section(ELF_SECTION_ACTION)
104 #ifndef __section_lwt_entry
105 # define __section_lwt_entry \
106 __section(ELF_SECTION_PROG)
109 #ifndef __section_license
110 # define __section_license \
111 __section(ELF_SECTION_LICENSE)
114 #ifndef __section_maps
115 # define __section_maps \
116 __section(ELF_SECTION_MAPS)
119 /** Declaration helper macros. */
122 # define BPF_LICENSE(NAME) \
123 char ____license[] __section_license = NAME
126 /** Classifier helper */
128 #ifndef BPF_H_DEFAULT
129 # define BPF_H_DEFAULT -1
132 /** BPF helper functions for tc. Individual flags are in linux/bpf.h */
135 # define __BPF_FUNC(NAME, ...) \
136 (* NAME)(__VA_ARGS__) __maybe_unused
140 # define BPF_FUNC(NAME, ...) \
141 __BPF_FUNC(NAME, __VA_ARGS__) = (void *) BPF_FUNC_##NAME
144 /* Map access/manipulation */
145 static void *BPF_FUNC(map_lookup_elem
, void *map
, const void *key
);
146 static int BPF_FUNC(map_update_elem
, void *map
, const void *key
,
147 const void *value
, uint32_t flags
);
148 static int BPF_FUNC(map_delete_elem
, void *map
, const void *key
);
151 static uint64_t BPF_FUNC(ktime_get_ns
);
155 /* FIXME: __attribute__ ((format(printf, 1, 3))) not possible unless
156 * llvm bug https://llvm.org/bugs/show_bug.cgi?id=26243 gets resolved.
157 * It would require ____fmt to be made const, which generates a reloc
160 static void BPF_FUNC(trace_printk
, const char *fmt
, int fmt_size
, ...);
163 # define printt(fmt, ...) \
165 char ____fmt[] = fmt; \
166 trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
171 static uint32_t BPF_FUNC(get_prandom_u32
);
174 static void BPF_FUNC(tail_call
, struct __sk_buff
*skb
, void *map
,
178 static uint32_t BPF_FUNC(get_smp_processor_id
);
179 static uint32_t BPF_FUNC(get_numa_node_id
);
181 /* Packet misc meta data */
182 static uint32_t BPF_FUNC(get_cgroup_classid
, struct __sk_buff
*skb
);
183 static int BPF_FUNC(skb_under_cgroup
, void *map
, uint32_t index
);
185 static uint32_t BPF_FUNC(get_route_realm
, struct __sk_buff
*skb
);
186 static uint32_t BPF_FUNC(get_hash_recalc
, struct __sk_buff
*skb
);
187 static uint32_t BPF_FUNC(set_hash_invalid
, struct __sk_buff
*skb
);
189 /* Packet redirection */
190 static int BPF_FUNC(redirect
, int ifindex
, uint32_t flags
);
191 static int BPF_FUNC(clone_redirect
, struct __sk_buff
*skb
, int ifindex
,
194 /* Packet manipulation */
195 static int BPF_FUNC(skb_load_bytes
, struct __sk_buff
*skb
, uint32_t off
,
196 void *to
, uint32_t len
);
197 static int BPF_FUNC(skb_store_bytes
, struct __sk_buff
*skb
, uint32_t off
,
198 const void *from
, uint32_t len
, uint32_t flags
);
200 static int BPF_FUNC(l3_csum_replace
, struct __sk_buff
*skb
, uint32_t off
,
201 uint32_t from
, uint32_t to
, uint32_t flags
);
202 static int BPF_FUNC(l4_csum_replace
, struct __sk_buff
*skb
, uint32_t off
,
203 uint32_t from
, uint32_t to
, uint32_t flags
);
204 static int BPF_FUNC(csum_diff
, const void *from
, uint32_t from_size
,
205 const void *to
, uint32_t to_size
, uint32_t seed
);
206 static int BPF_FUNC(csum_update
, struct __sk_buff
*skb
, uint32_t wsum
);
208 static int BPF_FUNC(skb_change_type
, struct __sk_buff
*skb
, uint32_t type
);
209 static int BPF_FUNC(skb_change_proto
, struct __sk_buff
*skb
, uint32_t proto
,
211 static int BPF_FUNC(skb_change_tail
, struct __sk_buff
*skb
, uint32_t nlen
,
214 static int BPF_FUNC(skb_pull_data
, struct __sk_buff
*skb
, uint32_t len
);
216 /* Event notification */
217 static int __BPF_FUNC(skb_event_output
, struct __sk_buff
*skb
, void *map
,
218 uint64_t index
, const void *data
, uint32_t size
) =
219 (void *) BPF_FUNC_perf_event_output
;
221 /* Packet vlan encap/decap */
222 static int BPF_FUNC(skb_vlan_push
, struct __sk_buff
*skb
, uint16_t proto
,
224 static int BPF_FUNC(skb_vlan_pop
, struct __sk_buff
*skb
);
226 /* Packet tunnel encap/decap */
227 static int BPF_FUNC(skb_get_tunnel_key
, struct __sk_buff
*skb
,
228 struct bpf_tunnel_key
*to
, uint32_t size
, uint32_t flags
);
229 static int BPF_FUNC(skb_set_tunnel_key
, struct __sk_buff
*skb
,
230 const struct bpf_tunnel_key
*from
, uint32_t size
,
233 static int BPF_FUNC(skb_get_tunnel_opt
, struct __sk_buff
*skb
,
234 void *to
, uint32_t size
);
235 static int BPF_FUNC(skb_set_tunnel_opt
, struct __sk_buff
*skb
,
236 const void *from
, uint32_t size
);
238 /** LLVM built-ins, mem*() routines work for constant size */
241 # define lock_xadd(ptr, val) ((void) __sync_fetch_and_add(ptr, val))
245 # define memset(s, c, n) __builtin_memset((s), (c), (n))
249 # define memcpy(d, s, n) __builtin_memcpy((d), (s), (n))
253 # define memmove(d, s, n) __builtin_memmove((d), (s), (n))
256 /* FIXME: __builtin_memcmp() is not yet fully useable unless llvm bug
257 * https://llvm.org/bugs/show_bug.cgi?id=26218 gets resolved. Also
258 * this one would generate a reloc entry (non-map), otherwise.
262 # define memcmp(a, b, n) __builtin_memcmp((a), (b), (n))
266 unsigned long long load_byte(void *skb
, unsigned long long off
)
267 asm ("llvm.bpf.load.byte");
269 unsigned long long load_half(void *skb
, unsigned long long off
)
270 asm ("llvm.bpf.load.half");
272 unsigned long long load_word(void *skb
, unsigned long long off
)
273 asm ("llvm.bpf.load.word");
275 #endif /* __BPF_API__ */