]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /* | |
3 | * Linux Socket Filter Data Structures | |
4 | */ | |
5 | #ifndef __LINUX_FILTER_H__ | |
6 | #define __LINUX_FILTER_H__ | |
7 | ||
8 | #include <stdarg.h> | |
9 | ||
10 | #include <linux/atomic.h> | |
11 | #include <linux/refcount.h> | |
12 | #include <linux/compat.h> | |
13 | #include <linux/skbuff.h> | |
14 | #include <linux/linkage.h> | |
15 | #include <linux/printk.h> | |
16 | #include <linux/workqueue.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/capability.h> | |
19 | #include <linux/cryptohash.h> | |
20 | #include <linux/set_memory.h> | |
21 | #include <linux/kallsyms.h> | |
22 | #include <linux/if_vlan.h> | |
23 | ||
24 | #include <net/sch_generic.h> | |
25 | ||
26 | #include <uapi/linux/filter.h> | |
27 | #include <uapi/linux/bpf.h> | |
28 | ||
29 | struct sk_buff; | |
30 | struct sock; | |
31 | struct seccomp_data; | |
32 | struct bpf_prog_aux; | |
33 | struct xdp_rxq_info; | |
34 | struct xdp_buff; | |
35 | struct sock_reuseport; | |
36 | ||
37 | /* ArgX, context and stack frame pointer register positions. Note, | |
38 | * Arg1, Arg2, Arg3, etc are used as argument mappings of function | |
39 | * calls in BPF_CALL instruction. | |
40 | */ | |
41 | #define BPF_REG_ARG1 BPF_REG_1 | |
42 | #define BPF_REG_ARG2 BPF_REG_2 | |
43 | #define BPF_REG_ARG3 BPF_REG_3 | |
44 | #define BPF_REG_ARG4 BPF_REG_4 | |
45 | #define BPF_REG_ARG5 BPF_REG_5 | |
46 | #define BPF_REG_CTX BPF_REG_6 | |
47 | #define BPF_REG_FP BPF_REG_10 | |
48 | ||
49 | /* Additional register mappings for converted user programs. */ | |
50 | #define BPF_REG_A BPF_REG_0 | |
51 | #define BPF_REG_X BPF_REG_7 | |
52 | #define BPF_REG_TMP BPF_REG_2 /* scratch reg */ | |
53 | #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ | |
54 | #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ | |
55 | ||
56 | /* Kernel hidden auxiliary/helper register. */ | |
57 | #define BPF_REG_AX MAX_BPF_REG | |
58 | #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) | |
59 | #define MAX_BPF_JIT_REG MAX_BPF_EXT_REG | |
60 | ||
61 | /* unused opcode to mark special call to bpf_tail_call() helper */ | |
62 | #define BPF_TAIL_CALL 0xf0 | |
63 | ||
64 | /* unused opcode to mark call to interpreter with arguments */ | |
65 | #define BPF_CALL_ARGS 0xe0 | |
66 | ||
67 | /* As per nm, we expose JITed images as text (code) section for | |
68 | * kallsyms. That way, tools like perf can find it to match | |
69 | * addresses. | |
70 | */ | |
71 | #define BPF_SYM_ELF_TYPE 't' | |
72 | ||
73 | /* BPF program can access up to 512 bytes of stack space. */ | |
74 | #define MAX_BPF_STACK 512 | |
75 | ||
76 | /* Helper macros for filter block array initializers. */ | |
77 | ||
78 | /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ | |
79 | ||
80 | #define BPF_ALU64_REG(OP, DST, SRC) \ | |
81 | ((struct bpf_insn) { \ | |
82 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ | |
83 | .dst_reg = DST, \ | |
84 | .src_reg = SRC, \ | |
85 | .off = 0, \ | |
86 | .imm = 0 }) | |
87 | ||
88 | #define BPF_ALU32_REG(OP, DST, SRC) \ | |
89 | ((struct bpf_insn) { \ | |
90 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ | |
91 | .dst_reg = DST, \ | |
92 | .src_reg = SRC, \ | |
93 | .off = 0, \ | |
94 | .imm = 0 }) | |
95 | ||
96 | /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ | |
97 | ||
98 | #define BPF_ALU64_IMM(OP, DST, IMM) \ | |
99 | ((struct bpf_insn) { \ | |
100 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ | |
101 | .dst_reg = DST, \ | |
102 | .src_reg = 0, \ | |
103 | .off = 0, \ | |
104 | .imm = IMM }) | |
105 | ||
106 | #define BPF_ALU32_IMM(OP, DST, IMM) \ | |
107 | ((struct bpf_insn) { \ | |
108 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ | |
109 | .dst_reg = DST, \ | |
110 | .src_reg = 0, \ | |
111 | .off = 0, \ | |
112 | .imm = IMM }) | |
113 | ||
114 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ | |
115 | ||
116 | #define BPF_ENDIAN(TYPE, DST, LEN) \ | |
117 | ((struct bpf_insn) { \ | |
118 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ | |
119 | .dst_reg = DST, \ | |
120 | .src_reg = 0, \ | |
121 | .off = 0, \ | |
122 | .imm = LEN }) | |
123 | ||
124 | /* Short form of mov, dst_reg = src_reg */ | |
125 | ||
126 | #define BPF_MOV64_REG(DST, SRC) \ | |
127 | ((struct bpf_insn) { \ | |
128 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ | |
129 | .dst_reg = DST, \ | |
130 | .src_reg = SRC, \ | |
131 | .off = 0, \ | |
132 | .imm = 0 }) | |
133 | ||
134 | #define BPF_MOV32_REG(DST, SRC) \ | |
135 | ((struct bpf_insn) { \ | |
136 | .code = BPF_ALU | BPF_MOV | BPF_X, \ | |
137 | .dst_reg = DST, \ | |
138 | .src_reg = SRC, \ | |
139 | .off = 0, \ | |
140 | .imm = 0 }) | |
141 | ||
142 | /* Short form of mov, dst_reg = imm32 */ | |
143 | ||
144 | #define BPF_MOV64_IMM(DST, IMM) \ | |
145 | ((struct bpf_insn) { \ | |
146 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ | |
147 | .dst_reg = DST, \ | |
148 | .src_reg = 0, \ | |
149 | .off = 0, \ | |
150 | .imm = IMM }) | |
151 | ||
152 | #define BPF_MOV32_IMM(DST, IMM) \ | |
153 | ((struct bpf_insn) { \ | |
154 | .code = BPF_ALU | BPF_MOV | BPF_K, \ | |
155 | .dst_reg = DST, \ | |
156 | .src_reg = 0, \ | |
157 | .off = 0, \ | |
158 | .imm = IMM }) | |
159 | ||
160 | /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ | |
161 | #define BPF_LD_IMM64(DST, IMM) \ | |
162 | BPF_LD_IMM64_RAW(DST, 0, IMM) | |
163 | ||
164 | #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ | |
165 | ((struct bpf_insn) { \ | |
166 | .code = BPF_LD | BPF_DW | BPF_IMM, \ | |
167 | .dst_reg = DST, \ | |
168 | .src_reg = SRC, \ | |
169 | .off = 0, \ | |
170 | .imm = (__u32) (IMM) }), \ | |
171 | ((struct bpf_insn) { \ | |
172 | .code = 0, /* zero is reserved opcode */ \ | |
173 | .dst_reg = 0, \ | |
174 | .src_reg = 0, \ | |
175 | .off = 0, \ | |
176 | .imm = ((__u64) (IMM)) >> 32 }) | |
177 | ||
178 | /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ | |
179 | #define BPF_LD_MAP_FD(DST, MAP_FD) \ | |
180 | BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) | |
181 | ||
182 | /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ | |
183 | ||
184 | #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ | |
185 | ((struct bpf_insn) { \ | |
186 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ | |
187 | .dst_reg = DST, \ | |
188 | .src_reg = SRC, \ | |
189 | .off = 0, \ | |
190 | .imm = IMM }) | |
191 | ||
192 | #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ | |
193 | ((struct bpf_insn) { \ | |
194 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ | |
195 | .dst_reg = DST, \ | |
196 | .src_reg = SRC, \ | |
197 | .off = 0, \ | |
198 | .imm = IMM }) | |
199 | ||
200 | /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ | |
201 | ||
202 | #define BPF_LD_ABS(SIZE, IMM) \ | |
203 | ((struct bpf_insn) { \ | |
204 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ | |
205 | .dst_reg = 0, \ | |
206 | .src_reg = 0, \ | |
207 | .off = 0, \ | |
208 | .imm = IMM }) | |
209 | ||
210 | /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ | |
211 | ||
212 | #define BPF_LD_IND(SIZE, SRC, IMM) \ | |
213 | ((struct bpf_insn) { \ | |
214 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ | |
215 | .dst_reg = 0, \ | |
216 | .src_reg = SRC, \ | |
217 | .off = 0, \ | |
218 | .imm = IMM }) | |
219 | ||
220 | /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ | |
221 | ||
222 | #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ | |
223 | ((struct bpf_insn) { \ | |
224 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ | |
225 | .dst_reg = DST, \ | |
226 | .src_reg = SRC, \ | |
227 | .off = OFF, \ | |
228 | .imm = 0 }) | |
229 | ||
230 | /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ | |
231 | ||
232 | #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ | |
233 | ((struct bpf_insn) { \ | |
234 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ | |
235 | .dst_reg = DST, \ | |
236 | .src_reg = SRC, \ | |
237 | .off = OFF, \ | |
238 | .imm = 0 }) | |
239 | ||
240 | /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ | |
241 | ||
242 | #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ | |
243 | ((struct bpf_insn) { \ | |
244 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ | |
245 | .dst_reg = DST, \ | |
246 | .src_reg = SRC, \ | |
247 | .off = OFF, \ | |
248 | .imm = 0 }) | |
249 | ||
250 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ | |
251 | ||
252 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ | |
253 | ((struct bpf_insn) { \ | |
254 | .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ | |
255 | .dst_reg = DST, \ | |
256 | .src_reg = 0, \ | |
257 | .off = OFF, \ | |
258 | .imm = IMM }) | |
259 | ||
260 | /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ | |
261 | ||
262 | #define BPF_JMP_REG(OP, DST, SRC, OFF) \ | |
263 | ((struct bpf_insn) { \ | |
264 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ | |
265 | .dst_reg = DST, \ | |
266 | .src_reg = SRC, \ | |
267 | .off = OFF, \ | |
268 | .imm = 0 }) | |
269 | ||
270 | /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ | |
271 | ||
272 | #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ | |
273 | ((struct bpf_insn) { \ | |
274 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ | |
275 | .dst_reg = DST, \ | |
276 | .src_reg = 0, \ | |
277 | .off = OFF, \ | |
278 | .imm = IMM }) | |
279 | ||
280 | /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ | |
281 | ||
282 | #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ | |
283 | ((struct bpf_insn) { \ | |
284 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ | |
285 | .dst_reg = DST, \ | |
286 | .src_reg = SRC, \ | |
287 | .off = OFF, \ | |
288 | .imm = 0 }) | |
289 | ||
290 | /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ | |
291 | ||
292 | #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ | |
293 | ((struct bpf_insn) { \ | |
294 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ | |
295 | .dst_reg = DST, \ | |
296 | .src_reg = 0, \ | |
297 | .off = OFF, \ | |
298 | .imm = IMM }) | |
299 | ||
300 | /* Unconditional jumps, goto pc + off16 */ | |
301 | ||
302 | #define BPF_JMP_A(OFF) \ | |
303 | ((struct bpf_insn) { \ | |
304 | .code = BPF_JMP | BPF_JA, \ | |
305 | .dst_reg = 0, \ | |
306 | .src_reg = 0, \ | |
307 | .off = OFF, \ | |
308 | .imm = 0 }) | |
309 | ||
310 | /* Relative call */ | |
311 | ||
312 | #define BPF_CALL_REL(TGT) \ | |
313 | ((struct bpf_insn) { \ | |
314 | .code = BPF_JMP | BPF_CALL, \ | |
315 | .dst_reg = 0, \ | |
316 | .src_reg = BPF_PSEUDO_CALL, \ | |
317 | .off = 0, \ | |
318 | .imm = TGT }) | |
319 | ||
320 | /* Function call */ | |
321 | ||
322 | #define BPF_CAST_CALL(x) \ | |
323 | ((u64 (*)(u64, u64, u64, u64, u64))(x)) | |
324 | ||
325 | #define BPF_EMIT_CALL(FUNC) \ | |
326 | ((struct bpf_insn) { \ | |
327 | .code = BPF_JMP | BPF_CALL, \ | |
328 | .dst_reg = 0, \ | |
329 | .src_reg = 0, \ | |
330 | .off = 0, \ | |
331 | .imm = ((FUNC) - __bpf_call_base) }) | |
332 | ||
333 | /* Raw code statement block */ | |
334 | ||
335 | #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ | |
336 | ((struct bpf_insn) { \ | |
337 | .code = CODE, \ | |
338 | .dst_reg = DST, \ | |
339 | .src_reg = SRC, \ | |
340 | .off = OFF, \ | |
341 | .imm = IMM }) | |
342 | ||
343 | /* Program exit */ | |
344 | ||
345 | #define BPF_EXIT_INSN() \ | |
346 | ((struct bpf_insn) { \ | |
347 | .code = BPF_JMP | BPF_EXIT, \ | |
348 | .dst_reg = 0, \ | |
349 | .src_reg = 0, \ | |
350 | .off = 0, \ | |
351 | .imm = 0 }) | |
352 | ||
353 | /* Internal classic blocks for direct assignment */ | |
354 | ||
355 | #define __BPF_STMT(CODE, K) \ | |
356 | ((struct sock_filter) BPF_STMT(CODE, K)) | |
357 | ||
358 | #define __BPF_JUMP(CODE, K, JT, JF) \ | |
359 | ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) | |
360 | ||
361 | #define bytes_to_bpf_size(bytes) \ | |
362 | ({ \ | |
363 | int bpf_size = -EINVAL; \ | |
364 | \ | |
365 | if (bytes == sizeof(u8)) \ | |
366 | bpf_size = BPF_B; \ | |
367 | else if (bytes == sizeof(u16)) \ | |
368 | bpf_size = BPF_H; \ | |
369 | else if (bytes == sizeof(u32)) \ | |
370 | bpf_size = BPF_W; \ | |
371 | else if (bytes == sizeof(u64)) \ | |
372 | bpf_size = BPF_DW; \ | |
373 | \ | |
374 | bpf_size; \ | |
375 | }) | |
376 | ||
377 | #define bpf_size_to_bytes(bpf_size) \ | |
378 | ({ \ | |
379 | int bytes = -EINVAL; \ | |
380 | \ | |
381 | if (bpf_size == BPF_B) \ | |
382 | bytes = sizeof(u8); \ | |
383 | else if (bpf_size == BPF_H) \ | |
384 | bytes = sizeof(u16); \ | |
385 | else if (bpf_size == BPF_W) \ | |
386 | bytes = sizeof(u32); \ | |
387 | else if (bpf_size == BPF_DW) \ | |
388 | bytes = sizeof(u64); \ | |
389 | \ | |
390 | bytes; \ | |
391 | }) | |
392 | ||
393 | #define BPF_SIZEOF(type) \ | |
394 | ({ \ | |
395 | const int __size = bytes_to_bpf_size(sizeof(type)); \ | |
396 | BUILD_BUG_ON(__size < 0); \ | |
397 | __size; \ | |
398 | }) | |
399 | ||
400 | #define BPF_FIELD_SIZEOF(type, field) \ | |
401 | ({ \ | |
402 | const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ | |
403 | BUILD_BUG_ON(__size < 0); \ | |
404 | __size; \ | |
405 | }) | |
406 | ||
407 | #define BPF_LDST_BYTES(insn) \ | |
408 | ({ \ | |
409 | const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ | |
410 | WARN_ON(__size < 0); \ | |
411 | __size; \ | |
412 | }) | |
413 | ||
414 | #define __BPF_MAP_0(m, v, ...) v | |
415 | #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) | |
416 | #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) | |
417 | #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) | |
418 | #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) | |
419 | #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) | |
420 | ||
421 | #define __BPF_REG_0(...) __BPF_PAD(5) | |
422 | #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) | |
423 | #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) | |
424 | #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) | |
425 | #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) | |
426 | #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) | |
427 | ||
428 | #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) | |
429 | #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) | |
430 | ||
431 | #define __BPF_CAST(t, a) \ | |
432 | (__force t) \ | |
433 | (__force \ | |
434 | typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ | |
435 | (unsigned long)0, (t)0))) a | |
436 | #define __BPF_V void | |
437 | #define __BPF_N | |
438 | ||
439 | #define __BPF_DECL_ARGS(t, a) t a | |
440 | #define __BPF_DECL_REGS(t, a) u64 a | |
441 | ||
442 | #define __BPF_PAD(n) \ | |
443 | __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ | |
444 | u64, __ur_3, u64, __ur_4, u64, __ur_5) | |
445 | ||
446 | #define BPF_CALL_x(x, name, ...) \ | |
447 | static __always_inline \ | |
448 | u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ | |
449 | u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ | |
450 | u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ | |
451 | { \ | |
452 | return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ | |
453 | } \ | |
454 | static __always_inline \ | |
455 | u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) | |
456 | ||
457 | #define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) | |
458 | #define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) | |
459 | #define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) | |
460 | #define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) | |
461 | #define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) | |
462 | #define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) | |
463 | ||
464 | #define bpf_ctx_range(TYPE, MEMBER) \ | |
465 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | |
466 | #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ | |
467 | offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 | |
468 | #if BITS_PER_LONG == 64 | |
469 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | |
470 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | |
471 | #else | |
472 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | |
473 | offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 | |
474 | #endif /* BITS_PER_LONG == 64 */ | |
475 | ||
476 | #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ | |
477 | ({ \ | |
478 | BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ | |
479 | *(PTR_SIZE) = (SIZE); \ | |
480 | offsetof(TYPE, MEMBER); \ | |
481 | }) | |
482 | ||
483 | #ifdef CONFIG_COMPAT | |
484 | /* A struct sock_filter is architecture independent. */ | |
485 | struct compat_sock_fprog { | |
486 | u16 len; | |
487 | compat_uptr_t filter; /* struct sock_filter * */ | |
488 | }; | |
489 | #endif | |
490 | ||
491 | struct sock_fprog_kern { | |
492 | u16 len; | |
493 | struct sock_filter *filter; | |
494 | }; | |
495 | ||
496 | struct bpf_binary_header { | |
497 | u32 pages; | |
498 | /* Some arches need word alignment for their instructions */ | |
499 | u8 image[] __aligned(4); | |
500 | }; | |
501 | ||
502 | struct bpf_prog { | |
503 | u16 pages; /* Number of allocated pages */ | |
504 | u16 jited:1, /* Is our filter JIT'ed? */ | |
505 | jit_requested:1,/* archs need to JIT the prog */ | |
506 | undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ | |
507 | gpl_compatible:1, /* Is filter GPL compatible? */ | |
508 | cb_access:1, /* Is control block accessed? */ | |
509 | dst_needed:1, /* Do we need dst entry? */ | |
510 | blinded:1, /* Was blinded */ | |
511 | is_func:1, /* program is a bpf function */ | |
512 | kprobe_override:1, /* Do we override a kprobe? */ | |
513 | has_callchain_buf:1; /* callchain buffer allocated? */ | |
514 | enum bpf_prog_type type; /* Type of BPF program */ | |
515 | enum bpf_attach_type expected_attach_type; /* For some prog types */ | |
516 | u32 len; /* Number of filter blocks */ | |
517 | u32 jited_len; /* Size of jited insns in bytes */ | |
518 | u8 tag[BPF_TAG_SIZE]; | |
519 | struct bpf_prog_aux *aux; /* Auxiliary fields */ | |
520 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ | |
521 | unsigned int (*bpf_func)(const void *ctx, | |
522 | const struct bpf_insn *insn); | |
523 | /* Instructions for interpreter */ | |
524 | union { | |
525 | struct sock_filter insns[0]; | |
526 | struct bpf_insn insnsi[0]; | |
527 | }; | |
528 | }; | |
529 | ||
530 | struct sk_filter { | |
531 | refcount_t refcnt; | |
532 | struct rcu_head rcu; | |
533 | struct bpf_prog *prog; | |
534 | }; | |
535 | ||
536 | DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); | |
537 | ||
538 | #define BPF_PROG_RUN(prog, ctx) ({ \ | |
539 | u32 ret; \ | |
540 | cant_sleep(); \ | |
541 | if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ | |
542 | struct bpf_prog_stats *stats; \ | |
543 | u64 start = sched_clock(); \ | |
544 | ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ | |
545 | stats = this_cpu_ptr(prog->aux->stats); \ | |
546 | u64_stats_update_begin(&stats->syncp); \ | |
547 | stats->cnt++; \ | |
548 | stats->nsecs += sched_clock() - start; \ | |
549 | u64_stats_update_end(&stats->syncp); \ | |
550 | } else { \ | |
551 | ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ | |
552 | } \ | |
553 | ret; }) | |
554 | ||
555 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN | |
556 | ||
557 | struct bpf_skb_data_end { | |
558 | struct qdisc_skb_cb qdisc_cb; | |
559 | void *data_meta; | |
560 | void *data_end; | |
561 | }; | |
562 | ||
563 | struct bpf_redirect_info { | |
564 | u32 ifindex; | |
565 | u32 flags; | |
566 | struct bpf_map *map; | |
567 | struct bpf_map *map_to_flush; | |
568 | u32 kern_flags; | |
569 | }; | |
570 | ||
571 | DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); | |
572 | ||
573 | /* flags for bpf_redirect_info kern_flags */ | |
574 | #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ | |
575 | ||
576 | /* Compute the linear packet data range [data, data_end) which | |
577 | * will be accessed by various program types (cls_bpf, act_bpf, | |
578 | * lwt, ...). Subsystems allowing direct data access must (!) | |
579 | * ensure that cb[] area can be written to when BPF program is | |
580 | * invoked (otherwise cb[] save/restore is necessary). | |
581 | */ | |
582 | static inline void bpf_compute_data_pointers(struct sk_buff *skb) | |
583 | { | |
584 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
585 | ||
586 | BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); | |
587 | cb->data_meta = skb->data - skb_metadata_len(skb); | |
588 | cb->data_end = skb->data + skb_headlen(skb); | |
589 | } | |
590 | ||
591 | /* Similar to bpf_compute_data_pointers(), except that save orginal | |
592 | * data in cb->data and cb->meta_data for restore. | |
593 | */ | |
594 | static inline void bpf_compute_and_save_data_end( | |
595 | struct sk_buff *skb, void **saved_data_end) | |
596 | { | |
597 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
598 | ||
599 | *saved_data_end = cb->data_end; | |
600 | cb->data_end = skb->data + skb_headlen(skb); | |
601 | } | |
602 | ||
603 | /* Restore data saved by bpf_compute_data_pointers(). */ | |
604 | static inline void bpf_restore_data_end( | |
605 | struct sk_buff *skb, void *saved_data_end) | |
606 | { | |
607 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
608 | ||
609 | cb->data_end = saved_data_end; | |
610 | } | |
611 | ||
612 | static inline u8 *bpf_skb_cb(struct sk_buff *skb) | |
613 | { | |
614 | /* eBPF programs may read/write skb->cb[] area to transfer meta | |
615 | * data between tail calls. Since this also needs to work with | |
616 | * tc, that scratch memory is mapped to qdisc_skb_cb's data area. | |
617 | * | |
618 | * In some socket filter cases, the cb unfortunately needs to be | |
619 | * saved/restored so that protocol specific skb->cb[] data won't | |
620 | * be lost. In any case, due to unpriviledged eBPF programs | |
621 | * attached to sockets, we need to clear the bpf_skb_cb() area | |
622 | * to not leak previous contents to user space. | |
623 | */ | |
624 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); | |
625 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != | |
626 | FIELD_SIZEOF(struct qdisc_skb_cb, data)); | |
627 | ||
628 | return qdisc_skb_cb(skb)->data; | |
629 | } | |
630 | ||
631 | static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, | |
632 | struct sk_buff *skb) | |
633 | { | |
634 | u8 *cb_data = bpf_skb_cb(skb); | |
635 | u8 cb_saved[BPF_SKB_CB_LEN]; | |
636 | u32 res; | |
637 | ||
638 | if (unlikely(prog->cb_access)) { | |
639 | memcpy(cb_saved, cb_data, sizeof(cb_saved)); | |
640 | memset(cb_data, 0, sizeof(cb_saved)); | |
641 | } | |
642 | ||
643 | res = BPF_PROG_RUN(prog, skb); | |
644 | ||
645 | if (unlikely(prog->cb_access)) | |
646 | memcpy(cb_data, cb_saved, sizeof(cb_saved)); | |
647 | ||
648 | return res; | |
649 | } | |
650 | ||
651 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | |
652 | struct sk_buff *skb) | |
653 | { | |
654 | u32 res; | |
655 | ||
656 | preempt_disable(); | |
657 | res = __bpf_prog_run_save_cb(prog, skb); | |
658 | preempt_enable(); | |
659 | return res; | |
660 | } | |
661 | ||
662 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | |
663 | struct sk_buff *skb) | |
664 | { | |
665 | u8 *cb_data = bpf_skb_cb(skb); | |
666 | u32 res; | |
667 | ||
668 | if (unlikely(prog->cb_access)) | |
669 | memset(cb_data, 0, BPF_SKB_CB_LEN); | |
670 | ||
671 | preempt_disable(); | |
672 | res = BPF_PROG_RUN(prog, skb); | |
673 | preempt_enable(); | |
674 | return res; | |
675 | } | |
676 | ||
677 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, | |
678 | struct xdp_buff *xdp) | |
679 | { | |
680 | /* Caller needs to hold rcu_read_lock() (!), otherwise program | |
681 | * can be released while still running, or map elements could be | |
682 | * freed early while still having concurrent users. XDP fastpath | |
683 | * already takes rcu_read_lock() when fetching the program, so | |
684 | * it's not necessary here anymore. | |
685 | */ | |
686 | return BPF_PROG_RUN(prog, xdp); | |
687 | } | |
688 | ||
689 | static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) | |
690 | { | |
691 | return prog->len * sizeof(struct bpf_insn); | |
692 | } | |
693 | ||
694 | static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) | |
695 | { | |
696 | return round_up(bpf_prog_insn_size(prog) + | |
697 | sizeof(__be64) + 1, SHA_MESSAGE_BYTES); | |
698 | } | |
699 | ||
700 | static inline unsigned int bpf_prog_size(unsigned int proglen) | |
701 | { | |
702 | return max(sizeof(struct bpf_prog), | |
703 | offsetof(struct bpf_prog, insns[proglen])); | |
704 | } | |
705 | ||
706 | static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) | |
707 | { | |
708 | /* When classic BPF programs have been loaded and the arch | |
709 | * does not have a classic BPF JIT (anymore), they have been | |
710 | * converted via bpf_migrate_filter() to eBPF and thus always | |
711 | * have an unspec program type. | |
712 | */ | |
713 | return prog->type == BPF_PROG_TYPE_UNSPEC; | |
714 | } | |
715 | ||
716 | static inline u32 bpf_ctx_off_adjust_machine(u32 size) | |
717 | { | |
718 | const u32 size_machine = sizeof(unsigned long); | |
719 | ||
720 | if (size > size_machine && size % size_machine == 0) | |
721 | size = size_machine; | |
722 | ||
723 | return size; | |
724 | } | |
725 | ||
726 | static inline bool | |
727 | bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) | |
728 | { | |
729 | return size <= size_default && (size & (size - 1)) == 0; | |
730 | } | |
731 | ||
732 | #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) | |
733 | ||
734 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) | |
735 | { | |
736 | fp->undo_set_mem = 1; | |
737 | set_memory_ro((unsigned long)fp, fp->pages); | |
738 | } | |
739 | ||
740 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) | |
741 | { | |
742 | if (fp->undo_set_mem) | |
743 | set_memory_rw((unsigned long)fp, fp->pages); | |
744 | } | |
745 | ||
746 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) | |
747 | { | |
748 | set_memory_ro((unsigned long)hdr, hdr->pages); | |
749 | } | |
750 | ||
751 | static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) | |
752 | { | |
753 | set_memory_rw((unsigned long)hdr, hdr->pages); | |
754 | } | |
755 | ||
756 | static inline struct bpf_binary_header * | |
757 | bpf_jit_binary_hdr(const struct bpf_prog *fp) | |
758 | { | |
759 | unsigned long real_start = (unsigned long)fp->bpf_func; | |
760 | unsigned long addr = real_start & PAGE_MASK; | |
761 | ||
762 | return (void *)addr; | |
763 | } | |
764 | ||
765 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); | |
766 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | |
767 | { | |
768 | return sk_filter_trim_cap(sk, skb, 1); | |
769 | } | |
770 | ||
771 | struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); | |
772 | void bpf_prog_free(struct bpf_prog *fp); | |
773 | ||
774 | bool bpf_opcode_in_insntable(u8 code); | |
775 | ||
776 | void bpf_prog_free_linfo(struct bpf_prog *prog); | |
777 | void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, | |
778 | const u32 *insn_to_jit_off); | |
779 | int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); | |
780 | void bpf_prog_free_jited_linfo(struct bpf_prog *prog); | |
781 | void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); | |
782 | ||
783 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); | |
784 | struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); | |
785 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, | |
786 | gfp_t gfp_extra_flags); | |
787 | void __bpf_prog_free(struct bpf_prog *fp); | |
788 | ||
789 | static inline void bpf_prog_unlock_free(struct bpf_prog *fp) | |
790 | { | |
791 | bpf_prog_unlock_ro(fp); | |
792 | __bpf_prog_free(fp); | |
793 | } | |
794 | ||
795 | typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, | |
796 | unsigned int flen); | |
797 | ||
798 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); | |
799 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, | |
800 | bpf_aux_classic_check_t trans, bool save_orig); | |
801 | void bpf_prog_destroy(struct bpf_prog *fp); | |
802 | ||
803 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); | |
804 | int sk_attach_bpf(u32 ufd, struct sock *sk); | |
805 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); | |
806 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); | |
807 | void sk_reuseport_prog_free(struct bpf_prog *prog); | |
808 | int sk_detach_filter(struct sock *sk); | |
809 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, | |
810 | unsigned int len); | |
811 | ||
812 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); | |
813 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); | |
814 | ||
815 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | |
816 | #define __bpf_call_base_args \ | |
817 | ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ | |
818 | __bpf_call_base) | |
819 | ||
820 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); | |
821 | void bpf_jit_compile(struct bpf_prog *prog); | |
822 | bool bpf_helper_changes_pkt_data(void *func); | |
823 | ||
824 | static inline bool bpf_dump_raw_ok(void) | |
825 | { | |
826 | /* Reconstruction of call-sites is dependent on kallsyms, | |
827 | * thus make dump the same restriction. | |
828 | */ | |
829 | return kallsyms_show_value() == 1; | |
830 | } | |
831 | ||
832 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | |
833 | const struct bpf_insn *patch, u32 len); | |
834 | int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); | |
835 | ||
836 | void bpf_clear_redirect_map(struct bpf_map *map); | |
837 | ||
838 | static inline bool xdp_return_frame_no_direct(void) | |
839 | { | |
840 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
841 | ||
842 | return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; | |
843 | } | |
844 | ||
845 | static inline void xdp_set_return_frame_no_direct(void) | |
846 | { | |
847 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
848 | ||
849 | ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; | |
850 | } | |
851 | ||
852 | static inline void xdp_clear_return_frame_no_direct(void) | |
853 | { | |
854 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
855 | ||
856 | ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; | |
857 | } | |
858 | ||
859 | static inline int xdp_ok_fwd_dev(const struct net_device *fwd, | |
860 | unsigned int pktlen) | |
861 | { | |
862 | unsigned int len; | |
863 | ||
864 | if (unlikely(!(fwd->flags & IFF_UP))) | |
865 | return -ENETDOWN; | |
866 | ||
867 | len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; | |
868 | if (pktlen > len) | |
869 | return -EMSGSIZE; | |
870 | ||
871 | return 0; | |
872 | } | |
873 | ||
874 | /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the | |
875 | * same cpu context. Further for best results no more than a single map | |
876 | * for the do_redirect/do_flush pair should be used. This limitation is | |
877 | * because we only track one map and force a flush when the map changes. | |
878 | * This does not appear to be a real limitation for existing software. | |
879 | */ | |
880 | int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, | |
881 | struct xdp_buff *xdp, struct bpf_prog *prog); | |
882 | int xdp_do_redirect(struct net_device *dev, | |
883 | struct xdp_buff *xdp, | |
884 | struct bpf_prog *prog); | |
885 | void xdp_do_flush_map(void); | |
886 | ||
887 | void bpf_warn_invalid_xdp_action(u32 act); | |
888 | ||
889 | #ifdef CONFIG_INET | |
890 | struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
891 | struct bpf_prog *prog, struct sk_buff *skb, | |
892 | u32 hash); | |
893 | #else | |
894 | static inline struct sock * | |
895 | bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
896 | struct bpf_prog *prog, struct sk_buff *skb, | |
897 | u32 hash) | |
898 | { | |
899 | return NULL; | |
900 | } | |
901 | #endif | |
902 | ||
903 | #ifdef CONFIG_BPF_JIT | |
904 | extern int bpf_jit_enable; | |
905 | extern int bpf_jit_harden; | |
906 | extern int bpf_jit_kallsyms; | |
907 | extern long bpf_jit_limit; | |
908 | ||
909 | typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); | |
910 | ||
911 | struct bpf_binary_header * | |
912 | bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |
913 | unsigned int alignment, | |
914 | bpf_jit_fill_hole_t bpf_fill_ill_insns); | |
915 | void bpf_jit_binary_free(struct bpf_binary_header *hdr); | |
916 | u64 bpf_jit_alloc_exec_limit(void); | |
917 | void *bpf_jit_alloc_exec(unsigned long size); | |
918 | void bpf_jit_free_exec(void *addr); | |
919 | void bpf_jit_free(struct bpf_prog *fp); | |
920 | ||
921 | int bpf_jit_get_func_addr(const struct bpf_prog *prog, | |
922 | const struct bpf_insn *insn, bool extra_pass, | |
923 | u64 *func_addr, bool *func_addr_fixed); | |
924 | ||
925 | struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); | |
926 | void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); | |
927 | ||
928 | static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, | |
929 | u32 pass, void *image) | |
930 | { | |
931 | pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, | |
932 | proglen, pass, image, current->comm, task_pid_nr(current)); | |
933 | ||
934 | if (image) | |
935 | print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, | |
936 | 16, 1, image, proglen, false); | |
937 | } | |
938 | ||
939 | static inline bool bpf_jit_is_ebpf(void) | |
940 | { | |
941 | # ifdef CONFIG_HAVE_EBPF_JIT | |
942 | return true; | |
943 | # else | |
944 | return false; | |
945 | # endif | |
946 | } | |
947 | ||
948 | static inline bool ebpf_jit_enabled(void) | |
949 | { | |
950 | return bpf_jit_enable && bpf_jit_is_ebpf(); | |
951 | } | |
952 | ||
953 | static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) | |
954 | { | |
955 | return fp->jited && bpf_jit_is_ebpf(); | |
956 | } | |
957 | ||
958 | static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) | |
959 | { | |
960 | /* These are the prerequisites, should someone ever have the | |
961 | * idea to call blinding outside of them, we make sure to | |
962 | * bail out. | |
963 | */ | |
964 | if (!bpf_jit_is_ebpf()) | |
965 | return false; | |
966 | if (!prog->jit_requested) | |
967 | return false; | |
968 | if (!bpf_jit_harden) | |
969 | return false; | |
970 | if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) | |
971 | return false; | |
972 | ||
973 | return true; | |
974 | } | |
975 | ||
976 | static inline bool bpf_jit_kallsyms_enabled(void) | |
977 | { | |
978 | /* There are a couple of corner cases where kallsyms should | |
979 | * not be enabled f.e. on hardening. | |
980 | */ | |
981 | if (bpf_jit_harden) | |
982 | return false; | |
983 | if (!bpf_jit_kallsyms) | |
984 | return false; | |
985 | if (bpf_jit_kallsyms == 1) | |
986 | return true; | |
987 | ||
988 | return false; | |
989 | } | |
990 | ||
991 | const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, | |
992 | unsigned long *off, char *sym); | |
993 | bool is_bpf_text_address(unsigned long addr); | |
994 | int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |
995 | char *sym); | |
996 | ||
997 | static inline const char * | |
998 | bpf_address_lookup(unsigned long addr, unsigned long *size, | |
999 | unsigned long *off, char **modname, char *sym) | |
1000 | { | |
1001 | const char *ret = __bpf_address_lookup(addr, size, off, sym); | |
1002 | ||
1003 | if (ret && modname) | |
1004 | *modname = NULL; | |
1005 | return ret; | |
1006 | } | |
1007 | ||
1008 | void bpf_prog_kallsyms_add(struct bpf_prog *fp); | |
1009 | void bpf_prog_kallsyms_del(struct bpf_prog *fp); | |
1010 | void bpf_get_prog_name(const struct bpf_prog *prog, char *sym); | |
1011 | ||
1012 | #else /* CONFIG_BPF_JIT */ | |
1013 | ||
1014 | static inline bool ebpf_jit_enabled(void) | |
1015 | { | |
1016 | return false; | |
1017 | } | |
1018 | ||
1019 | static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) | |
1020 | { | |
1021 | return false; | |
1022 | } | |
1023 | ||
1024 | static inline void bpf_jit_free(struct bpf_prog *fp) | |
1025 | { | |
1026 | bpf_prog_unlock_free(fp); | |
1027 | } | |
1028 | ||
1029 | static inline bool bpf_jit_kallsyms_enabled(void) | |
1030 | { | |
1031 | return false; | |
1032 | } | |
1033 | ||
1034 | static inline const char * | |
1035 | __bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1036 | unsigned long *off, char *sym) | |
1037 | { | |
1038 | return NULL; | |
1039 | } | |
1040 | ||
1041 | static inline bool is_bpf_text_address(unsigned long addr) | |
1042 | { | |
1043 | return false; | |
1044 | } | |
1045 | ||
1046 | static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, | |
1047 | char *type, char *sym) | |
1048 | { | |
1049 | return -ERANGE; | |
1050 | } | |
1051 | ||
1052 | static inline const char * | |
1053 | bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1054 | unsigned long *off, char **modname, char *sym) | |
1055 | { | |
1056 | return NULL; | |
1057 | } | |
1058 | ||
1059 | static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) | |
1060 | { | |
1061 | } | |
1062 | ||
1063 | static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) | |
1064 | { | |
1065 | } | |
1066 | ||
1067 | static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) | |
1068 | { | |
1069 | sym[0] = '\0'; | |
1070 | } | |
1071 | ||
1072 | #endif /* CONFIG_BPF_JIT */ | |
1073 | ||
1074 | void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); | |
1075 | void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); | |
1076 | ||
1077 | #define BPF_ANC BIT(15) | |
1078 | ||
1079 | static inline bool bpf_needs_clear_a(const struct sock_filter *first) | |
1080 | { | |
1081 | switch (first->code) { | |
1082 | case BPF_RET | BPF_K: | |
1083 | case BPF_LD | BPF_W | BPF_LEN: | |
1084 | return false; | |
1085 | ||
1086 | case BPF_LD | BPF_W | BPF_ABS: | |
1087 | case BPF_LD | BPF_H | BPF_ABS: | |
1088 | case BPF_LD | BPF_B | BPF_ABS: | |
1089 | if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) | |
1090 | return true; | |
1091 | return false; | |
1092 | ||
1093 | default: | |
1094 | return true; | |
1095 | } | |
1096 | } | |
1097 | ||
1098 | static inline u16 bpf_anc_helper(const struct sock_filter *ftest) | |
1099 | { | |
1100 | BUG_ON(ftest->code & BPF_ANC); | |
1101 | ||
1102 | switch (ftest->code) { | |
1103 | case BPF_LD | BPF_W | BPF_ABS: | |
1104 | case BPF_LD | BPF_H | BPF_ABS: | |
1105 | case BPF_LD | BPF_B | BPF_ABS: | |
1106 | #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ | |
1107 | return BPF_ANC | SKF_AD_##CODE | |
1108 | switch (ftest->k) { | |
1109 | BPF_ANCILLARY(PROTOCOL); | |
1110 | BPF_ANCILLARY(PKTTYPE); | |
1111 | BPF_ANCILLARY(IFINDEX); | |
1112 | BPF_ANCILLARY(NLATTR); | |
1113 | BPF_ANCILLARY(NLATTR_NEST); | |
1114 | BPF_ANCILLARY(MARK); | |
1115 | BPF_ANCILLARY(QUEUE); | |
1116 | BPF_ANCILLARY(HATYPE); | |
1117 | BPF_ANCILLARY(RXHASH); | |
1118 | BPF_ANCILLARY(CPU); | |
1119 | BPF_ANCILLARY(ALU_XOR_X); | |
1120 | BPF_ANCILLARY(VLAN_TAG); | |
1121 | BPF_ANCILLARY(VLAN_TAG_PRESENT); | |
1122 | BPF_ANCILLARY(PAY_OFFSET); | |
1123 | BPF_ANCILLARY(RANDOM); | |
1124 | BPF_ANCILLARY(VLAN_TPID); | |
1125 | } | |
1126 | /* Fallthrough. */ | |
1127 | default: | |
1128 | return ftest->code; | |
1129 | } | |
1130 | } | |
1131 | ||
1132 | void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, | |
1133 | int k, unsigned int size); | |
1134 | ||
1135 | static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, | |
1136 | unsigned int size, void *buffer) | |
1137 | { | |
1138 | if (k >= 0) | |
1139 | return skb_header_pointer(skb, k, size, buffer); | |
1140 | ||
1141 | return bpf_internal_load_pointer_neg_helper(skb, k, size); | |
1142 | } | |
1143 | ||
1144 | static inline int bpf_tell_extensions(void) | |
1145 | { | |
1146 | return SKF_AD_MAX; | |
1147 | } | |
1148 | ||
1149 | struct bpf_sock_addr_kern { | |
1150 | struct sock *sk; | |
1151 | struct sockaddr *uaddr; | |
1152 | /* Temporary "register" to make indirect stores to nested structures | |
1153 | * defined above. We need three registers to make such a store, but | |
1154 | * only two (src and dst) are available at convert_ctx_access time | |
1155 | */ | |
1156 | u64 tmp_reg; | |
1157 | void *t_ctx; /* Attach type specific context. */ | |
1158 | }; | |
1159 | ||
1160 | struct bpf_sock_ops_kern { | |
1161 | struct sock *sk; | |
1162 | u32 op; | |
1163 | union { | |
1164 | u32 args[4]; | |
1165 | u32 reply; | |
1166 | u32 replylong[4]; | |
1167 | }; | |
1168 | u32 is_fullsock; | |
1169 | u64 temp; /* temp and everything after is not | |
1170 | * initialized to 0 before calling | |
1171 | * the BPF program. New fields that | |
1172 | * should be initialized to 0 should | |
1173 | * be inserted before temp. | |
1174 | * temp is scratch storage used by | |
1175 | * sock_ops_convert_ctx_access | |
1176 | * as temporary storage of a register. | |
1177 | */ | |
1178 | }; | |
1179 | ||
1180 | #endif /* __LINUX_FILTER_H__ */ |