1 /* SPDX-License-Identifier: GPL-2.0 */
7 * This file can be included into eBPF kernel programs. It contains
8 * a couple of useful helper functions, map/section ABI (bpf_elf.h),
9 * misc macros and some eBPF specific LLVM built-ins.
14 #include <linux/pkt_cls.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
18 #include <asm/byteorder.h>
25 # define __stringify(X) #X
28 #ifndef __maybe_unused
29 # define __maybe_unused __attribute__((__unused__))
33 # define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
37 # define likely(X) __builtin_expect(!!(X), 1)
41 # define unlikely(X) __builtin_expect(!!(X), 0)
45 # define htons(X) __constant_htons((X))
49 # define ntohs(X) __constant_ntohs((X))
53 # define htonl(X) __constant_htonl((X))
57 # define ntohl(X) __constant_ntohl((X))
61 # define __inline__ __attribute__((always_inline))
64 /** Section helper macros. */
67 # define __section(NAME) \
68 __attribute__((section(NAME), used))
71 #ifndef __section_tail
72 # define __section_tail(ID, KEY) \
73 __section(__stringify(ID) "/" __stringify(KEY))
76 #ifndef __section_xdp_entry
77 # define __section_xdp_entry \
78 __section(ELF_SECTION_PROG)
81 #ifndef __section_cls_entry
82 # define __section_cls_entry \
83 __section(ELF_SECTION_CLASSIFIER)
86 #ifndef __section_act_entry
87 # define __section_act_entry \
88 __section(ELF_SECTION_ACTION)
91 #ifndef __section_lwt_entry
92 # define __section_lwt_entry \
93 __section(ELF_SECTION_PROG)
96 #ifndef __section_license
97 # define __section_license \
98 __section(ELF_SECTION_LICENSE)
101 #ifndef __section_maps
102 # define __section_maps \
103 __section(ELF_SECTION_MAPS)
106 /** Declaration helper macros. */
109 # define BPF_LICENSE(NAME) \
110 char ____license[] __section_license = NAME
113 /** Classifier helper */
115 #ifndef BPF_H_DEFAULT
116 # define BPF_H_DEFAULT -1
119 /** BPF helper functions for tc. Individual flags are in linux/bpf.h */
122 # define __BPF_FUNC(NAME, ...) \
123 (* NAME)(__VA_ARGS__) __maybe_unused
127 # define BPF_FUNC(NAME, ...) \
128 __BPF_FUNC(NAME, __VA_ARGS__) = (void *) BPF_FUNC_##NAME
131 /* Map access/manipulation */
132 static void *BPF_FUNC(map_lookup_elem
, void *map
, const void *key
);
133 static int BPF_FUNC(map_update_elem
, void *map
, const void *key
,
134 const void *value
, uint32_t flags
);
135 static int BPF_FUNC(map_delete_elem
, void *map
, const void *key
);
138 static uint64_t BPF_FUNC(ktime_get_ns
);
142 /* FIXME: __attribute__ ((format(printf, 1, 3))) not possible unless
143 * llvm bug https://llvm.org/bugs/show_bug.cgi?id=26243 gets resolved.
144 * It would require ____fmt to be made const, which generates a reloc
147 static void BPF_FUNC(trace_printk
, const char *fmt
, int fmt_size
, ...);
150 # define printt(fmt, ...) \
152 char ____fmt[] = fmt; \
153 trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
158 static uint32_t BPF_FUNC(get_prandom_u32
);
161 static void BPF_FUNC(tail_call
, struct __sk_buff
*skb
, void *map
,
165 static uint32_t BPF_FUNC(get_smp_processor_id
);
166 static uint32_t BPF_FUNC(get_numa_node_id
);
168 /* Packet misc meta data */
169 static uint32_t BPF_FUNC(get_cgroup_classid
, struct __sk_buff
*skb
);
170 static int BPF_FUNC(skb_under_cgroup
, void *map
, uint32_t index
);
172 static uint32_t BPF_FUNC(get_route_realm
, struct __sk_buff
*skb
);
173 static uint32_t BPF_FUNC(get_hash_recalc
, struct __sk_buff
*skb
);
174 static uint32_t BPF_FUNC(set_hash_invalid
, struct __sk_buff
*skb
);
176 /* Packet redirection */
177 static int BPF_FUNC(redirect
, int ifindex
, uint32_t flags
);
178 static int BPF_FUNC(clone_redirect
, struct __sk_buff
*skb
, int ifindex
,
181 /* Packet manipulation */
182 static int BPF_FUNC(skb_load_bytes
, struct __sk_buff
*skb
, uint32_t off
,
183 void *to
, uint32_t len
);
184 static int BPF_FUNC(skb_store_bytes
, struct __sk_buff
*skb
, uint32_t off
,
185 const void *from
, uint32_t len
, uint32_t flags
);
187 static int BPF_FUNC(l3_csum_replace
, struct __sk_buff
*skb
, uint32_t off
,
188 uint32_t from
, uint32_t to
, uint32_t flags
);
189 static int BPF_FUNC(l4_csum_replace
, struct __sk_buff
*skb
, uint32_t off
,
190 uint32_t from
, uint32_t to
, uint32_t flags
);
191 static int BPF_FUNC(csum_diff
, const void *from
, uint32_t from_size
,
192 const void *to
, uint32_t to_size
, uint32_t seed
);
193 static int BPF_FUNC(csum_update
, struct __sk_buff
*skb
, uint32_t wsum
);
195 static int BPF_FUNC(skb_change_type
, struct __sk_buff
*skb
, uint32_t type
);
196 static int BPF_FUNC(skb_change_proto
, struct __sk_buff
*skb
, uint32_t proto
,
198 static int BPF_FUNC(skb_change_tail
, struct __sk_buff
*skb
, uint32_t nlen
,
201 static int BPF_FUNC(skb_pull_data
, struct __sk_buff
*skb
, uint32_t len
);
203 /* Event notification */
204 static int __BPF_FUNC(skb_event_output
, struct __sk_buff
*skb
, void *map
,
205 uint64_t index
, const void *data
, uint32_t size
) =
206 (void *) BPF_FUNC_perf_event_output
;
208 /* Packet vlan encap/decap */
209 static int BPF_FUNC(skb_vlan_push
, struct __sk_buff
*skb
, uint16_t proto
,
211 static int BPF_FUNC(skb_vlan_pop
, struct __sk_buff
*skb
);
213 /* Packet tunnel encap/decap */
214 static int BPF_FUNC(skb_get_tunnel_key
, struct __sk_buff
*skb
,
215 struct bpf_tunnel_key
*to
, uint32_t size
, uint32_t flags
);
216 static int BPF_FUNC(skb_set_tunnel_key
, struct __sk_buff
*skb
,
217 const struct bpf_tunnel_key
*from
, uint32_t size
,
220 static int BPF_FUNC(skb_get_tunnel_opt
, struct __sk_buff
*skb
,
221 void *to
, uint32_t size
);
222 static int BPF_FUNC(skb_set_tunnel_opt
, struct __sk_buff
*skb
,
223 const void *from
, uint32_t size
);
225 /** LLVM built-ins, mem*() routines work for constant size */
228 # define lock_xadd(ptr, val) ((void) __sync_fetch_and_add(ptr, val))
232 # define memset(s, c, n) __builtin_memset((s), (c), (n))
236 # define memcpy(d, s, n) __builtin_memcpy((d), (s), (n))
240 # define memmove(d, s, n) __builtin_memmove((d), (s), (n))
243 /* FIXME: __builtin_memcmp() is not yet fully useable unless llvm bug
244 * https://llvm.org/bugs/show_bug.cgi?id=26218 gets resolved. Also
245 * this one would generate a reloc entry (non-map), otherwise.
249 # define memcmp(a, b, n) __builtin_memcmp((a), (b), (n))
253 unsigned long long load_byte(void *skb
, unsigned long long off
)
254 asm ("llvm.bpf.load.byte");
256 unsigned long long load_half(void *skb
, unsigned long long off
)
257 asm ("llvm.bpf.load.half");
259 unsigned long long load_word(void *skb
, unsigned long long off
)
260 asm ("llvm.bpf.load.word");
262 #endif /* __BPF_API__ */