2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/module.h>
25 #include <linux/types.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/sock_diag.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_packet.h>
34 #include <linux/if_arp.h>
35 #include <linux/gfp.h>
37 #include <net/protocol.h>
38 #include <net/netlink.h>
39 #include <linux/skbuff.h>
41 #include <net/flow_dissector.h>
42 #include <linux/errno.h>
43 #include <linux/timer.h>
44 #include <linux/uaccess.h>
45 #include <asm/unaligned.h>
46 #include <linux/filter.h>
47 #include <linux/ratelimit.h>
48 #include <linux/seccomp.h>
49 #include <linux/if_vlan.h>
50 #include <linux/bpf.h>
51 #include <net/sch_generic.h>
52 #include <net/cls_cgroup.h>
53 #include <net/dst_metadata.h>
55 #include <net/sock_reuseport.h>
56 #include <net/busy_poll.h>
59 * sk_filter_trim_cap - run a packet through a socket filter
60 * @sk: sock associated with &sk_buff
61 * @skb: buffer to filter
62 * @cap: limit on how short the eBPF program may trim the packet
64 * Run the eBPF program and then cut skb->data to correct size returned by
65 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
66 * than pkt_len we keep whole skb->data. This is the socket level
67 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
68 * be accepted or -EPERM if the packet should be tossed.
71 int sk_filter_trim_cap(struct sock
*sk
, struct sk_buff
*skb
, unsigned int cap
)
74 struct sk_filter
*filter
;
77 * If the skb was allocated from pfmemalloc reserves, only
78 * allow SOCK_MEMALLOC sockets to use it as this socket is
81 if (skb_pfmemalloc(skb
) && !sock_flag(sk
, SOCK_MEMALLOC
)) {
82 NET_INC_STATS(sock_net(sk
), LINUX_MIB_PFMEMALLOCDROP
);
85 err
= BPF_CGROUP_RUN_PROG_INET_INGRESS(sk
, skb
);
89 err
= security_sock_rcv_skb(sk
, skb
);
94 filter
= rcu_dereference(sk
->sk_filter
);
96 struct sock
*save_sk
= skb
->sk
;
100 pkt_len
= bpf_prog_run_save_cb(filter
->prog
, skb
);
102 err
= pkt_len
? pskb_trim(skb
, max(cap
, pkt_len
)) : -EPERM
;
108 EXPORT_SYMBOL(sk_filter_trim_cap
);
110 BPF_CALL_1(__skb_get_pay_offset
, struct sk_buff
*, skb
)
112 return skb_get_poff(skb
);
115 BPF_CALL_3(__skb_get_nlattr
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
119 if (skb_is_nonlinear(skb
))
122 if (skb
->len
< sizeof(struct nlattr
))
125 if (a
> skb
->len
- sizeof(struct nlattr
))
128 nla
= nla_find((struct nlattr
*) &skb
->data
[a
], skb
->len
- a
, x
);
130 return (void *) nla
- (void *) skb
->data
;
135 BPF_CALL_3(__skb_get_nlattr_nest
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
139 if (skb_is_nonlinear(skb
))
142 if (skb
->len
< sizeof(struct nlattr
))
145 if (a
> skb
->len
- sizeof(struct nlattr
))
148 nla
= (struct nlattr
*) &skb
->data
[a
];
149 if (nla
->nla_len
> skb
->len
- a
)
152 nla
= nla_find_nested(nla
, x
);
154 return (void *) nla
- (void *) skb
->data
;
159 BPF_CALL_0(__get_raw_cpu_id
)
161 return raw_smp_processor_id();
164 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto
= {
165 .func
= __get_raw_cpu_id
,
167 .ret_type
= RET_INTEGER
,
170 static u32
convert_skb_access(int skb_field
, int dst_reg
, int src_reg
,
171 struct bpf_insn
*insn_buf
)
173 struct bpf_insn
*insn
= insn_buf
;
177 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
179 *insn
++ = BPF_LDX_MEM(BPF_W
, dst_reg
, src_reg
,
180 offsetof(struct sk_buff
, mark
));
184 *insn
++ = BPF_LDX_MEM(BPF_B
, dst_reg
, src_reg
, PKT_TYPE_OFFSET());
185 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, PKT_TYPE_MAX
);
186 #ifdef __BIG_ENDIAN_BITFIELD
187 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 5);
192 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, queue_mapping
) != 2);
194 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
195 offsetof(struct sk_buff
, queue_mapping
));
198 case SKF_AD_VLAN_TAG
:
199 case SKF_AD_VLAN_TAG_PRESENT
:
200 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
201 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
203 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
204 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
205 offsetof(struct sk_buff
, vlan_tci
));
206 if (skb_field
== SKF_AD_VLAN_TAG
) {
207 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
,
211 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 12);
213 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, 1);
218 return insn
- insn_buf
;
221 static bool convert_bpf_extensions(struct sock_filter
*fp
,
222 struct bpf_insn
**insnp
)
224 struct bpf_insn
*insn
= *insnp
;
228 case SKF_AD_OFF
+ SKF_AD_PROTOCOL
:
229 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, protocol
) != 2);
231 /* A = *(u16 *) (CTX + offsetof(protocol)) */
232 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
233 offsetof(struct sk_buff
, protocol
));
234 /* A = ntohs(A) [emitting a nop or swap16] */
235 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
238 case SKF_AD_OFF
+ SKF_AD_PKTTYPE
:
239 cnt
= convert_skb_access(SKF_AD_PKTTYPE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
243 case SKF_AD_OFF
+ SKF_AD_IFINDEX
:
244 case SKF_AD_OFF
+ SKF_AD_HATYPE
:
245 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, ifindex
) != 4);
246 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, type
) != 2);
248 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
249 BPF_REG_TMP
, BPF_REG_CTX
,
250 offsetof(struct sk_buff
, dev
));
251 /* if (tmp != 0) goto pc + 1 */
252 *insn
++ = BPF_JMP_IMM(BPF_JNE
, BPF_REG_TMP
, 0, 1);
253 *insn
++ = BPF_EXIT_INSN();
254 if (fp
->k
== SKF_AD_OFF
+ SKF_AD_IFINDEX
)
255 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_TMP
,
256 offsetof(struct net_device
, ifindex
));
258 *insn
= BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_TMP
,
259 offsetof(struct net_device
, type
));
262 case SKF_AD_OFF
+ SKF_AD_MARK
:
263 cnt
= convert_skb_access(SKF_AD_MARK
, BPF_REG_A
, BPF_REG_CTX
, insn
);
267 case SKF_AD_OFF
+ SKF_AD_RXHASH
:
268 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
270 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
,
271 offsetof(struct sk_buff
, hash
));
274 case SKF_AD_OFF
+ SKF_AD_QUEUE
:
275 cnt
= convert_skb_access(SKF_AD_QUEUE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
279 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG
:
280 cnt
= convert_skb_access(SKF_AD_VLAN_TAG
,
281 BPF_REG_A
, BPF_REG_CTX
, insn
);
285 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG_PRESENT
:
286 cnt
= convert_skb_access(SKF_AD_VLAN_TAG_PRESENT
,
287 BPF_REG_A
, BPF_REG_CTX
, insn
);
291 case SKF_AD_OFF
+ SKF_AD_VLAN_TPID
:
292 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_proto
) != 2);
294 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
295 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
296 offsetof(struct sk_buff
, vlan_proto
));
297 /* A = ntohs(A) [emitting a nop or swap16] */
298 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
301 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
302 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
303 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
304 case SKF_AD_OFF
+ SKF_AD_CPU
:
305 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
307 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG1
, BPF_REG_CTX
);
309 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_A
);
311 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG3
, BPF_REG_X
);
312 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
314 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
315 *insn
= BPF_EMIT_CALL(__skb_get_pay_offset
);
317 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
318 *insn
= BPF_EMIT_CALL(__skb_get_nlattr
);
320 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
321 *insn
= BPF_EMIT_CALL(__skb_get_nlattr_nest
);
323 case SKF_AD_OFF
+ SKF_AD_CPU
:
324 *insn
= BPF_EMIT_CALL(__get_raw_cpu_id
);
326 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
327 *insn
= BPF_EMIT_CALL(bpf_user_rnd_u32
);
328 bpf_user_rnd_init_once();
333 case SKF_AD_OFF
+ SKF_AD_ALU_XOR_X
:
335 *insn
= BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_X
);
339 /* This is just a dummy call to avoid letting the compiler
340 * evict __bpf_call_base() as an optimization. Placed here
341 * where no-one bothers.
343 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
352 * bpf_convert_filter - convert filter program
353 * @prog: the user passed filter program
354 * @len: the length of the user passed filter program
355 * @new_prog: buffer where converted program will be stored
356 * @new_len: pointer to store length of converted program
358 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
359 * style extended BPF (eBPF).
360 * Conversion workflow:
362 * 1) First pass for calculating the new program length:
363 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
365 * 2) 2nd pass to remap in two passes: 1st pass finds new
366 * jump offsets, 2nd pass remapping:
367 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
368 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
370 static int bpf_convert_filter(struct sock_filter
*prog
, int len
,
371 struct bpf_insn
*new_prog
, int *new_len
)
373 int new_flen
= 0, pass
= 0, target
, i
;
374 struct bpf_insn
*new_insn
;
375 struct sock_filter
*fp
;
379 BUILD_BUG_ON(BPF_MEMWORDS
* sizeof(u32
) > MAX_BPF_STACK
);
380 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
382 if (len
<= 0 || len
> BPF_MAXINSNS
)
386 addrs
= kcalloc(len
, sizeof(*addrs
),
387 GFP_KERNEL
| __GFP_NOWARN
);
396 /* Classic BPF related prologue emission. */
398 /* Classic BPF expects A and X to be reset first. These need
399 * to be guaranteed to be the first two instructions.
401 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_A
);
402 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_X
, BPF_REG_X
);
404 /* All programs must keep CTX in callee saved BPF_REG_CTX.
405 * In eBPF case it's done by the compiler, here we need to
406 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
408 *new_insn
++ = BPF_MOV64_REG(BPF_REG_CTX
, BPF_REG_ARG1
);
413 for (i
= 0; i
< len
; fp
++, i
++) {
414 struct bpf_insn tmp_insns
[6] = { };
415 struct bpf_insn
*insn
= tmp_insns
;
418 addrs
[i
] = new_insn
- new_prog
;
421 /* All arithmetic insns and skb loads map as-is. */
422 case BPF_ALU
| BPF_ADD
| BPF_X
:
423 case BPF_ALU
| BPF_ADD
| BPF_K
:
424 case BPF_ALU
| BPF_SUB
| BPF_X
:
425 case BPF_ALU
| BPF_SUB
| BPF_K
:
426 case BPF_ALU
| BPF_AND
| BPF_X
:
427 case BPF_ALU
| BPF_AND
| BPF_K
:
428 case BPF_ALU
| BPF_OR
| BPF_X
:
429 case BPF_ALU
| BPF_OR
| BPF_K
:
430 case BPF_ALU
| BPF_LSH
| BPF_X
:
431 case BPF_ALU
| BPF_LSH
| BPF_K
:
432 case BPF_ALU
| BPF_RSH
| BPF_X
:
433 case BPF_ALU
| BPF_RSH
| BPF_K
:
434 case BPF_ALU
| BPF_XOR
| BPF_X
:
435 case BPF_ALU
| BPF_XOR
| BPF_K
:
436 case BPF_ALU
| BPF_MUL
| BPF_X
:
437 case BPF_ALU
| BPF_MUL
| BPF_K
:
438 case BPF_ALU
| BPF_DIV
| BPF_X
:
439 case BPF_ALU
| BPF_DIV
| BPF_K
:
440 case BPF_ALU
| BPF_MOD
| BPF_X
:
441 case BPF_ALU
| BPF_MOD
| BPF_K
:
442 case BPF_ALU
| BPF_NEG
:
443 case BPF_LD
| BPF_ABS
| BPF_W
:
444 case BPF_LD
| BPF_ABS
| BPF_H
:
445 case BPF_LD
| BPF_ABS
| BPF_B
:
446 case BPF_LD
| BPF_IND
| BPF_W
:
447 case BPF_LD
| BPF_IND
| BPF_H
:
448 case BPF_LD
| BPF_IND
| BPF_B
:
449 /* Check for overloaded BPF extension and
450 * directly convert it if found, otherwise
451 * just move on with mapping.
453 if (BPF_CLASS(fp
->code
) == BPF_LD
&&
454 BPF_MODE(fp
->code
) == BPF_ABS
&&
455 convert_bpf_extensions(fp
, &insn
))
458 *insn
= BPF_RAW_INSN(fp
->code
, BPF_REG_A
, BPF_REG_X
, 0, fp
->k
);
461 /* Jump transformation cannot use BPF block macros
462 * everywhere as offset calculation and target updates
463 * require a bit more work than the rest, i.e. jump
464 * opcodes map as-is, but offsets need adjustment.
467 #define BPF_EMIT_JMP \
469 if (target >= len || target < 0) \
471 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
472 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
473 insn->off -= insn - tmp_insns; \
476 case BPF_JMP
| BPF_JA
:
477 target
= i
+ fp
->k
+ 1;
478 insn
->code
= fp
->code
;
482 case BPF_JMP
| BPF_JEQ
| BPF_K
:
483 case BPF_JMP
| BPF_JEQ
| BPF_X
:
484 case BPF_JMP
| BPF_JSET
| BPF_K
:
485 case BPF_JMP
| BPF_JSET
| BPF_X
:
486 case BPF_JMP
| BPF_JGT
| BPF_K
:
487 case BPF_JMP
| BPF_JGT
| BPF_X
:
488 case BPF_JMP
| BPF_JGE
| BPF_K
:
489 case BPF_JMP
| BPF_JGE
| BPF_X
:
490 if (BPF_SRC(fp
->code
) == BPF_K
&& (int) fp
->k
< 0) {
491 /* BPF immediates are signed, zero extend
492 * immediate into tmp register and use it
495 *insn
++ = BPF_MOV32_IMM(BPF_REG_TMP
, fp
->k
);
497 insn
->dst_reg
= BPF_REG_A
;
498 insn
->src_reg
= BPF_REG_TMP
;
501 insn
->dst_reg
= BPF_REG_A
;
503 bpf_src
= BPF_SRC(fp
->code
);
504 insn
->src_reg
= bpf_src
== BPF_X
? BPF_REG_X
: 0;
507 /* Common case where 'jump_false' is next insn. */
509 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
510 target
= i
+ fp
->jt
+ 1;
515 /* Convert JEQ into JNE when 'jump_true' is next insn. */
516 if (fp
->jt
== 0 && BPF_OP(fp
->code
) == BPF_JEQ
) {
517 insn
->code
= BPF_JMP
| BPF_JNE
| bpf_src
;
518 target
= i
+ fp
->jf
+ 1;
523 /* Other jumps are mapped into two insns: Jxx and JA. */
524 target
= i
+ fp
->jt
+ 1;
525 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
529 insn
->code
= BPF_JMP
| BPF_JA
;
530 target
= i
+ fp
->jf
+ 1;
534 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
535 case BPF_LDX
| BPF_MSH
| BPF_B
:
537 *insn
++ = BPF_MOV64_REG(BPF_REG_TMP
, BPF_REG_A
);
538 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
539 *insn
++ = BPF_LD_ABS(BPF_B
, fp
->k
);
541 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_A
, 0xf);
543 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, BPF_REG_A
, 2);
545 *insn
++ = BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
547 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_TMP
);
550 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
551 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
553 case BPF_RET
| BPF_A
:
554 case BPF_RET
| BPF_K
:
555 if (BPF_RVAL(fp
->code
) == BPF_K
)
556 *insn
++ = BPF_MOV32_RAW(BPF_K
, BPF_REG_0
,
558 *insn
= BPF_EXIT_INSN();
561 /* Store to stack. */
564 *insn
= BPF_STX_MEM(BPF_W
, BPF_REG_FP
, BPF_CLASS(fp
->code
) ==
565 BPF_ST
? BPF_REG_A
: BPF_REG_X
,
566 -(BPF_MEMWORDS
- fp
->k
) * 4);
569 /* Load from stack. */
570 case BPF_LD
| BPF_MEM
:
571 case BPF_LDX
| BPF_MEM
:
572 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
573 BPF_REG_A
: BPF_REG_X
, BPF_REG_FP
,
574 -(BPF_MEMWORDS
- fp
->k
) * 4);
578 case BPF_LD
| BPF_IMM
:
579 case BPF_LDX
| BPF_IMM
:
580 *insn
= BPF_MOV32_IMM(BPF_CLASS(fp
->code
) == BPF_LD
?
581 BPF_REG_A
: BPF_REG_X
, fp
->k
);
585 case BPF_MISC
| BPF_TAX
:
586 *insn
= BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
590 case BPF_MISC
| BPF_TXA
:
591 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_X
);
594 /* A = skb->len or X = skb->len */
595 case BPF_LD
| BPF_W
| BPF_LEN
:
596 case BPF_LDX
| BPF_W
| BPF_LEN
:
597 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
598 BPF_REG_A
: BPF_REG_X
, BPF_REG_CTX
,
599 offsetof(struct sk_buff
, len
));
602 /* Access seccomp_data fields. */
603 case BPF_LDX
| BPF_ABS
| BPF_W
:
604 /* A = *(u32 *) (ctx + K) */
605 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
, fp
->k
);
608 /* Unknown instruction. */
615 memcpy(new_insn
, tmp_insns
,
616 sizeof(*insn
) * (insn
- tmp_insns
));
617 new_insn
+= insn
- tmp_insns
;
621 /* Only calculating new length. */
622 *new_len
= new_insn
- new_prog
;
627 if (new_flen
!= new_insn
- new_prog
) {
628 new_flen
= new_insn
- new_prog
;
635 BUG_ON(*new_len
!= new_flen
);
644 * As we dont want to clear mem[] array for each packet going through
645 * __bpf_prog_run(), we check that filter loaded by user never try to read
646 * a cell if not previously written, and we check all branches to be sure
647 * a malicious user doesn't try to abuse us.
649 static int check_load_and_stores(const struct sock_filter
*filter
, int flen
)
651 u16
*masks
, memvalid
= 0; /* One bit per cell, 16 cells */
654 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
656 masks
= kmalloc_array(flen
, sizeof(*masks
), GFP_KERNEL
);
660 memset(masks
, 0xff, flen
* sizeof(*masks
));
662 for (pc
= 0; pc
< flen
; pc
++) {
663 memvalid
&= masks
[pc
];
665 switch (filter
[pc
].code
) {
668 memvalid
|= (1 << filter
[pc
].k
);
670 case BPF_LD
| BPF_MEM
:
671 case BPF_LDX
| BPF_MEM
:
672 if (!(memvalid
& (1 << filter
[pc
].k
))) {
677 case BPF_JMP
| BPF_JA
:
678 /* A jump must set masks on target */
679 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
682 case BPF_JMP
| BPF_JEQ
| BPF_K
:
683 case BPF_JMP
| BPF_JEQ
| BPF_X
:
684 case BPF_JMP
| BPF_JGE
| BPF_K
:
685 case BPF_JMP
| BPF_JGE
| BPF_X
:
686 case BPF_JMP
| BPF_JGT
| BPF_K
:
687 case BPF_JMP
| BPF_JGT
| BPF_X
:
688 case BPF_JMP
| BPF_JSET
| BPF_K
:
689 case BPF_JMP
| BPF_JSET
| BPF_X
:
690 /* A jump must set masks on targets */
691 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
692 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
702 static bool chk_code_allowed(u16 code_to_probe
)
704 static const bool codes
[] = {
705 /* 32 bit ALU operations */
706 [BPF_ALU
| BPF_ADD
| BPF_K
] = true,
707 [BPF_ALU
| BPF_ADD
| BPF_X
] = true,
708 [BPF_ALU
| BPF_SUB
| BPF_K
] = true,
709 [BPF_ALU
| BPF_SUB
| BPF_X
] = true,
710 [BPF_ALU
| BPF_MUL
| BPF_K
] = true,
711 [BPF_ALU
| BPF_MUL
| BPF_X
] = true,
712 [BPF_ALU
| BPF_DIV
| BPF_K
] = true,
713 [BPF_ALU
| BPF_DIV
| BPF_X
] = true,
714 [BPF_ALU
| BPF_MOD
| BPF_K
] = true,
715 [BPF_ALU
| BPF_MOD
| BPF_X
] = true,
716 [BPF_ALU
| BPF_AND
| BPF_K
] = true,
717 [BPF_ALU
| BPF_AND
| BPF_X
] = true,
718 [BPF_ALU
| BPF_OR
| BPF_K
] = true,
719 [BPF_ALU
| BPF_OR
| BPF_X
] = true,
720 [BPF_ALU
| BPF_XOR
| BPF_K
] = true,
721 [BPF_ALU
| BPF_XOR
| BPF_X
] = true,
722 [BPF_ALU
| BPF_LSH
| BPF_K
] = true,
723 [BPF_ALU
| BPF_LSH
| BPF_X
] = true,
724 [BPF_ALU
| BPF_RSH
| BPF_K
] = true,
725 [BPF_ALU
| BPF_RSH
| BPF_X
] = true,
726 [BPF_ALU
| BPF_NEG
] = true,
727 /* Load instructions */
728 [BPF_LD
| BPF_W
| BPF_ABS
] = true,
729 [BPF_LD
| BPF_H
| BPF_ABS
] = true,
730 [BPF_LD
| BPF_B
| BPF_ABS
] = true,
731 [BPF_LD
| BPF_W
| BPF_LEN
] = true,
732 [BPF_LD
| BPF_W
| BPF_IND
] = true,
733 [BPF_LD
| BPF_H
| BPF_IND
] = true,
734 [BPF_LD
| BPF_B
| BPF_IND
] = true,
735 [BPF_LD
| BPF_IMM
] = true,
736 [BPF_LD
| BPF_MEM
] = true,
737 [BPF_LDX
| BPF_W
| BPF_LEN
] = true,
738 [BPF_LDX
| BPF_B
| BPF_MSH
] = true,
739 [BPF_LDX
| BPF_IMM
] = true,
740 [BPF_LDX
| BPF_MEM
] = true,
741 /* Store instructions */
744 /* Misc instructions */
745 [BPF_MISC
| BPF_TAX
] = true,
746 [BPF_MISC
| BPF_TXA
] = true,
747 /* Return instructions */
748 [BPF_RET
| BPF_K
] = true,
749 [BPF_RET
| BPF_A
] = true,
750 /* Jump instructions */
751 [BPF_JMP
| BPF_JA
] = true,
752 [BPF_JMP
| BPF_JEQ
| BPF_K
] = true,
753 [BPF_JMP
| BPF_JEQ
| BPF_X
] = true,
754 [BPF_JMP
| BPF_JGE
| BPF_K
] = true,
755 [BPF_JMP
| BPF_JGE
| BPF_X
] = true,
756 [BPF_JMP
| BPF_JGT
| BPF_K
] = true,
757 [BPF_JMP
| BPF_JGT
| BPF_X
] = true,
758 [BPF_JMP
| BPF_JSET
| BPF_K
] = true,
759 [BPF_JMP
| BPF_JSET
| BPF_X
] = true,
762 if (code_to_probe
>= ARRAY_SIZE(codes
))
765 return codes
[code_to_probe
];
768 static bool bpf_check_basics_ok(const struct sock_filter
*filter
,
773 if (flen
== 0 || flen
> BPF_MAXINSNS
)
780 * bpf_check_classic - verify socket filter code
781 * @filter: filter to verify
782 * @flen: length of filter
784 * Check the user's filter code. If we let some ugly
785 * filter code slip through kaboom! The filter must contain
786 * no references or jumps that are out of range, no illegal
787 * instructions, and must end with a RET instruction.
789 * All jumps are forward as they are not signed.
791 * Returns 0 if the rule set is legal or -EINVAL if not.
793 static int bpf_check_classic(const struct sock_filter
*filter
,
799 /* Check the filter code now */
800 for (pc
= 0; pc
< flen
; pc
++) {
801 const struct sock_filter
*ftest
= &filter
[pc
];
803 /* May we actually operate on this code? */
804 if (!chk_code_allowed(ftest
->code
))
807 /* Some instructions need special checks */
808 switch (ftest
->code
) {
809 case BPF_ALU
| BPF_DIV
| BPF_K
:
810 case BPF_ALU
| BPF_MOD
| BPF_K
:
811 /* Check for division by zero */
815 case BPF_ALU
| BPF_LSH
| BPF_K
:
816 case BPF_ALU
| BPF_RSH
| BPF_K
:
820 case BPF_LD
| BPF_MEM
:
821 case BPF_LDX
| BPF_MEM
:
824 /* Check for invalid memory addresses */
825 if (ftest
->k
>= BPF_MEMWORDS
)
828 case BPF_JMP
| BPF_JA
:
829 /* Note, the large ftest->k might cause loops.
830 * Compare this with conditional jumps below,
831 * where offsets are limited. --ANK (981016)
833 if (ftest
->k
>= (unsigned int)(flen
- pc
- 1))
836 case BPF_JMP
| BPF_JEQ
| BPF_K
:
837 case BPF_JMP
| BPF_JEQ
| BPF_X
:
838 case BPF_JMP
| BPF_JGE
| BPF_K
:
839 case BPF_JMP
| BPF_JGE
| BPF_X
:
840 case BPF_JMP
| BPF_JGT
| BPF_K
:
841 case BPF_JMP
| BPF_JGT
| BPF_X
:
842 case BPF_JMP
| BPF_JSET
| BPF_K
:
843 case BPF_JMP
| BPF_JSET
| BPF_X
:
844 /* Both conditionals must be safe */
845 if (pc
+ ftest
->jt
+ 1 >= flen
||
846 pc
+ ftest
->jf
+ 1 >= flen
)
849 case BPF_LD
| BPF_W
| BPF_ABS
:
850 case BPF_LD
| BPF_H
| BPF_ABS
:
851 case BPF_LD
| BPF_B
| BPF_ABS
:
853 if (bpf_anc_helper(ftest
) & BPF_ANC
)
855 /* Ancillary operation unknown or unsupported */
856 if (anc_found
== false && ftest
->k
>= SKF_AD_OFF
)
861 /* Last instruction must be a RET code */
862 switch (filter
[flen
- 1].code
) {
863 case BPF_RET
| BPF_K
:
864 case BPF_RET
| BPF_A
:
865 return check_load_and_stores(filter
, flen
);
871 static int bpf_prog_store_orig_filter(struct bpf_prog
*fp
,
872 const struct sock_fprog
*fprog
)
874 unsigned int fsize
= bpf_classic_proglen(fprog
);
875 struct sock_fprog_kern
*fkprog
;
877 fp
->orig_prog
= kmalloc(sizeof(*fkprog
), GFP_KERNEL
);
881 fkprog
= fp
->orig_prog
;
882 fkprog
->len
= fprog
->len
;
884 fkprog
->filter
= kmemdup(fp
->insns
, fsize
,
885 GFP_KERNEL
| __GFP_NOWARN
);
886 if (!fkprog
->filter
) {
887 kfree(fp
->orig_prog
);
894 static void bpf_release_orig_filter(struct bpf_prog
*fp
)
896 struct sock_fprog_kern
*fprog
= fp
->orig_prog
;
899 kfree(fprog
->filter
);
904 static void __bpf_prog_release(struct bpf_prog
*prog
)
906 if (prog
->type
== BPF_PROG_TYPE_SOCKET_FILTER
) {
909 bpf_release_orig_filter(prog
);
914 static void __sk_filter_release(struct sk_filter
*fp
)
916 __bpf_prog_release(fp
->prog
);
921 * sk_filter_release_rcu - Release a socket filter by rcu_head
922 * @rcu: rcu_head that contains the sk_filter to free
924 static void sk_filter_release_rcu(struct rcu_head
*rcu
)
926 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
928 __sk_filter_release(fp
);
932 * sk_filter_release - release a socket filter
933 * @fp: filter to remove
935 * Remove a filter from a socket and release its resources.
937 static void sk_filter_release(struct sk_filter
*fp
)
939 if (refcount_dec_and_test(&fp
->refcnt
))
940 call_rcu(&fp
->rcu
, sk_filter_release_rcu
);
943 void sk_filter_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
945 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
947 atomic_sub(filter_size
, &sk
->sk_omem_alloc
);
948 sk_filter_release(fp
);
951 /* try to charge the socket memory if there is space available
952 * return true on success
954 static bool __sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
956 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
958 /* same check as in sock_kmalloc() */
959 if (filter_size
<= sysctl_optmem_max
&&
960 atomic_read(&sk
->sk_omem_alloc
) + filter_size
< sysctl_optmem_max
) {
961 atomic_add(filter_size
, &sk
->sk_omem_alloc
);
967 bool sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
969 bool ret
= __sk_filter_charge(sk
, fp
);
971 refcount_inc(&fp
->refcnt
);
975 static struct bpf_prog
*bpf_migrate_filter(struct bpf_prog
*fp
)
977 struct sock_filter
*old_prog
;
978 struct bpf_prog
*old_fp
;
979 int err
, new_len
, old_len
= fp
->len
;
981 /* We are free to overwrite insns et al right here as it
982 * won't be used at this point in time anymore internally
983 * after the migration to the internal BPF instruction
986 BUILD_BUG_ON(sizeof(struct sock_filter
) !=
987 sizeof(struct bpf_insn
));
989 /* Conversion cannot happen on overlapping memory areas,
990 * so we need to keep the user BPF around until the 2nd
991 * pass. At this time, the user BPF is stored in fp->insns.
993 old_prog
= kmemdup(fp
->insns
, old_len
* sizeof(struct sock_filter
),
994 GFP_KERNEL
| __GFP_NOWARN
);
1000 /* 1st pass: calculate the new program length. */
1001 err
= bpf_convert_filter(old_prog
, old_len
, NULL
, &new_len
);
1005 /* Expand fp for appending the new filter representation. */
1007 fp
= bpf_prog_realloc(old_fp
, bpf_prog_size(new_len
), 0);
1009 /* The old_fp is still around in case we couldn't
1010 * allocate new memory, so uncharge on that one.
1019 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1020 err
= bpf_convert_filter(old_prog
, old_len
, fp
->insnsi
, &new_len
);
1022 /* 2nd bpf_convert_filter() can fail only if it fails
1023 * to allocate memory, remapping must succeed. Note,
1024 * that at this time old_fp has already been released
1029 /* We are guaranteed to never error here with cBPF to eBPF
1030 * transitions, since there's no issue with type compatibility
1031 * checks on program arrays.
1033 fp
= bpf_prog_select_runtime(fp
, &err
);
1041 __bpf_prog_release(fp
);
1042 return ERR_PTR(err
);
1045 static struct bpf_prog
*bpf_prepare_filter(struct bpf_prog
*fp
,
1046 bpf_aux_classic_check_t trans
)
1050 fp
->bpf_func
= NULL
;
1053 err
= bpf_check_classic(fp
->insns
, fp
->len
);
1055 __bpf_prog_release(fp
);
1056 return ERR_PTR(err
);
1059 /* There might be additional checks and transformations
1060 * needed on classic filters, f.e. in case of seccomp.
1063 err
= trans(fp
->insns
, fp
->len
);
1065 __bpf_prog_release(fp
);
1066 return ERR_PTR(err
);
1070 /* Probe if we can JIT compile the filter and if so, do
1071 * the compilation of the filter.
1073 bpf_jit_compile(fp
);
1075 /* JIT compiler couldn't process this filter, so do the
1076 * internal BPF translation for the optimized interpreter.
1079 fp
= bpf_migrate_filter(fp
);
1085 * bpf_prog_create - create an unattached filter
1086 * @pfp: the unattached filter that is created
1087 * @fprog: the filter program
1089 * Create a filter independent of any socket. We first run some
1090 * sanity checks on it to make sure it does not explode on us later.
1091 * If an error occurs or there is insufficient memory for the filter
1092 * a negative errno code is returned. On success the return is zero.
1094 int bpf_prog_create(struct bpf_prog
**pfp
, struct sock_fprog_kern
*fprog
)
1096 unsigned int fsize
= bpf_classic_proglen(fprog
);
1097 struct bpf_prog
*fp
;
1099 /* Make sure new filter is there and in the right amounts. */
1100 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1103 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1107 memcpy(fp
->insns
, fprog
->filter
, fsize
);
1109 fp
->len
= fprog
->len
;
1110 /* Since unattached filters are not copied back to user
1111 * space through sk_get_filter(), we do not need to hold
1112 * a copy here, and can spare us the work.
1114 fp
->orig_prog
= NULL
;
1116 /* bpf_prepare_filter() already takes care of freeing
1117 * memory in case something goes wrong.
1119 fp
= bpf_prepare_filter(fp
, NULL
);
1126 EXPORT_SYMBOL_GPL(bpf_prog_create
);
1129 * bpf_prog_create_from_user - create an unattached filter from user buffer
1130 * @pfp: the unattached filter that is created
1131 * @fprog: the filter program
1132 * @trans: post-classic verifier transformation handler
1133 * @save_orig: save classic BPF program
1135 * This function effectively does the same as bpf_prog_create(), only
1136 * that it builds up its insns buffer from user space provided buffer.
1137 * It also allows for passing a bpf_aux_classic_check_t handler.
1139 int bpf_prog_create_from_user(struct bpf_prog
**pfp
, struct sock_fprog
*fprog
,
1140 bpf_aux_classic_check_t trans
, bool save_orig
)
1142 unsigned int fsize
= bpf_classic_proglen(fprog
);
1143 struct bpf_prog
*fp
;
1146 /* Make sure new filter is there and in the right amounts. */
1147 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1150 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1154 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
1155 __bpf_prog_free(fp
);
1159 fp
->len
= fprog
->len
;
1160 fp
->orig_prog
= NULL
;
1163 err
= bpf_prog_store_orig_filter(fp
, fprog
);
1165 __bpf_prog_free(fp
);
1170 /* bpf_prepare_filter() already takes care of freeing
1171 * memory in case something goes wrong.
1173 fp
= bpf_prepare_filter(fp
, trans
);
1180 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user
);
1182 void bpf_prog_destroy(struct bpf_prog
*fp
)
1184 __bpf_prog_release(fp
);
1186 EXPORT_SYMBOL_GPL(bpf_prog_destroy
);
1188 static int __sk_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1190 struct sk_filter
*fp
, *old_fp
;
1192 fp
= kmalloc(sizeof(*fp
), GFP_KERNEL
);
1198 if (!__sk_filter_charge(sk
, fp
)) {
1202 refcount_set(&fp
->refcnt
, 1);
1204 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
1205 lockdep_sock_is_held(sk
));
1206 rcu_assign_pointer(sk
->sk_filter
, fp
);
1209 sk_filter_uncharge(sk
, old_fp
);
1214 static int __reuseport_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1216 struct bpf_prog
*old_prog
;
1219 if (bpf_prog_size(prog
->len
) > sysctl_optmem_max
)
1222 if (sk_unhashed(sk
) && sk
->sk_reuseport
) {
1223 err
= reuseport_alloc(sk
);
1226 } else if (!rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1227 /* The socket wasn't bound with SO_REUSEPORT */
1231 old_prog
= reuseport_attach_prog(sk
, prog
);
1233 bpf_prog_destroy(old_prog
);
1239 struct bpf_prog
*__get_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1241 unsigned int fsize
= bpf_classic_proglen(fprog
);
1242 struct bpf_prog
*prog
;
1245 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1246 return ERR_PTR(-EPERM
);
1248 /* Make sure new filter is there and in the right amounts. */
1249 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1250 return ERR_PTR(-EINVAL
);
1252 prog
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1254 return ERR_PTR(-ENOMEM
);
1256 if (copy_from_user(prog
->insns
, fprog
->filter
, fsize
)) {
1257 __bpf_prog_free(prog
);
1258 return ERR_PTR(-EFAULT
);
1261 prog
->len
= fprog
->len
;
1263 err
= bpf_prog_store_orig_filter(prog
, fprog
);
1265 __bpf_prog_free(prog
);
1266 return ERR_PTR(-ENOMEM
);
1269 /* bpf_prepare_filter() already takes care of freeing
1270 * memory in case something goes wrong.
1272 return bpf_prepare_filter(prog
, NULL
);
1276 * sk_attach_filter - attach a socket filter
1277 * @fprog: the filter program
1278 * @sk: the socket to use
1280 * Attach the user's filter code. We first run some sanity checks on
1281 * it to make sure it does not explode on us later. If an error
1282 * occurs or there is insufficient memory for the filter a negative
1283 * errno code is returned. On success the return is zero.
1285 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1287 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1291 return PTR_ERR(prog
);
1293 err
= __sk_attach_prog(prog
, sk
);
1295 __bpf_prog_release(prog
);
1301 EXPORT_SYMBOL_GPL(sk_attach_filter
);
1303 int sk_reuseport_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1305 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1309 return PTR_ERR(prog
);
1311 err
= __reuseport_attach_prog(prog
, sk
);
1313 __bpf_prog_release(prog
);
1320 static struct bpf_prog
*__get_bpf(u32 ufd
, struct sock
*sk
)
1322 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1323 return ERR_PTR(-EPERM
);
1325 return bpf_prog_get_type(ufd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1328 int sk_attach_bpf(u32 ufd
, struct sock
*sk
)
1330 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1334 return PTR_ERR(prog
);
1336 err
= __sk_attach_prog(prog
, sk
);
1345 int sk_reuseport_attach_bpf(u32 ufd
, struct sock
*sk
)
1347 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1351 return PTR_ERR(prog
);
1353 err
= __reuseport_attach_prog(prog
, sk
);
1362 struct bpf_scratchpad
{
1364 __be32 diff
[MAX_BPF_STACK
/ sizeof(__be32
)];
1365 u8 buff
[MAX_BPF_STACK
];
1369 static DEFINE_PER_CPU(struct bpf_scratchpad
, bpf_sp
);
1371 static inline int __bpf_try_make_writable(struct sk_buff
*skb
,
1372 unsigned int write_len
)
1374 return skb_ensure_writable(skb
, write_len
);
1377 static inline int bpf_try_make_writable(struct sk_buff
*skb
,
1378 unsigned int write_len
)
1380 int err
= __bpf_try_make_writable(skb
, write_len
);
1382 bpf_compute_data_end(skb
);
1386 static int bpf_try_make_head_writable(struct sk_buff
*skb
)
1388 return bpf_try_make_writable(skb
, skb_headlen(skb
));
1391 static inline void bpf_push_mac_rcsum(struct sk_buff
*skb
)
1393 if (skb_at_tc_ingress(skb
))
1394 skb_postpush_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1397 static inline void bpf_pull_mac_rcsum(struct sk_buff
*skb
)
1399 if (skb_at_tc_ingress(skb
))
1400 skb_postpull_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1403 BPF_CALL_5(bpf_skb_store_bytes
, struct sk_buff
*, skb
, u32
, offset
,
1404 const void *, from
, u32
, len
, u64
, flags
)
1408 if (unlikely(flags
& ~(BPF_F_RECOMPUTE_CSUM
| BPF_F_INVALIDATE_HASH
)))
1410 if (unlikely(offset
> 0xffff))
1412 if (unlikely(bpf_try_make_writable(skb
, offset
+ len
)))
1415 ptr
= skb
->data
+ offset
;
1416 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1417 __skb_postpull_rcsum(skb
, ptr
, len
, offset
);
1419 memcpy(ptr
, from
, len
);
1421 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1422 __skb_postpush_rcsum(skb
, ptr
, len
, offset
);
1423 if (flags
& BPF_F_INVALIDATE_HASH
)
1424 skb_clear_hash(skb
);
1429 static const struct bpf_func_proto bpf_skb_store_bytes_proto
= {
1430 .func
= bpf_skb_store_bytes
,
1432 .ret_type
= RET_INTEGER
,
1433 .arg1_type
= ARG_PTR_TO_CTX
,
1434 .arg2_type
= ARG_ANYTHING
,
1435 .arg3_type
= ARG_PTR_TO_MEM
,
1436 .arg4_type
= ARG_CONST_SIZE
,
1437 .arg5_type
= ARG_ANYTHING
,
1440 BPF_CALL_4(bpf_skb_load_bytes
, const struct sk_buff
*, skb
, u32
, offset
,
1441 void *, to
, u32
, len
)
1445 if (unlikely(offset
> 0xffff))
1448 ptr
= skb_header_pointer(skb
, offset
, len
, to
);
1452 memcpy(to
, ptr
, len
);
1460 static const struct bpf_func_proto bpf_skb_load_bytes_proto
= {
1461 .func
= bpf_skb_load_bytes
,
1463 .ret_type
= RET_INTEGER
,
1464 .arg1_type
= ARG_PTR_TO_CTX
,
1465 .arg2_type
= ARG_ANYTHING
,
1466 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
1467 .arg4_type
= ARG_CONST_SIZE
,
1470 BPF_CALL_2(bpf_skb_pull_data
, struct sk_buff
*, skb
, u32
, len
)
1472 /* Idea is the following: should the needed direct read/write
1473 * test fail during runtime, we can pull in more data and redo
1474 * again, since implicitly, we invalidate previous checks here.
1476 * Or, since we know how much we need to make read/writeable,
1477 * this can be done once at the program beginning for direct
1478 * access case. By this we overcome limitations of only current
1479 * headroom being accessible.
1481 return bpf_try_make_writable(skb
, len
? : skb_headlen(skb
));
1484 static const struct bpf_func_proto bpf_skb_pull_data_proto
= {
1485 .func
= bpf_skb_pull_data
,
1487 .ret_type
= RET_INTEGER
,
1488 .arg1_type
= ARG_PTR_TO_CTX
,
1489 .arg2_type
= ARG_ANYTHING
,
1492 BPF_CALL_5(bpf_l3_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1493 u64
, from
, u64
, to
, u64
, flags
)
1497 if (unlikely(flags
& ~(BPF_F_HDR_FIELD_MASK
)))
1499 if (unlikely(offset
> 0xffff || offset
& 1))
1501 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1504 ptr
= (__sum16
*)(skb
->data
+ offset
);
1505 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1507 if (unlikely(from
!= 0))
1510 csum_replace_by_diff(ptr
, to
);
1513 csum_replace2(ptr
, from
, to
);
1516 csum_replace4(ptr
, from
, to
);
1525 static const struct bpf_func_proto bpf_l3_csum_replace_proto
= {
1526 .func
= bpf_l3_csum_replace
,
1528 .ret_type
= RET_INTEGER
,
1529 .arg1_type
= ARG_PTR_TO_CTX
,
1530 .arg2_type
= ARG_ANYTHING
,
1531 .arg3_type
= ARG_ANYTHING
,
1532 .arg4_type
= ARG_ANYTHING
,
1533 .arg5_type
= ARG_ANYTHING
,
1536 BPF_CALL_5(bpf_l4_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1537 u64
, from
, u64
, to
, u64
, flags
)
1539 bool is_pseudo
= flags
& BPF_F_PSEUDO_HDR
;
1540 bool is_mmzero
= flags
& BPF_F_MARK_MANGLED_0
;
1541 bool do_mforce
= flags
& BPF_F_MARK_ENFORCE
;
1544 if (unlikely(flags
& ~(BPF_F_MARK_MANGLED_0
| BPF_F_MARK_ENFORCE
|
1545 BPF_F_PSEUDO_HDR
| BPF_F_HDR_FIELD_MASK
)))
1547 if (unlikely(offset
> 0xffff || offset
& 1))
1549 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1552 ptr
= (__sum16
*)(skb
->data
+ offset
);
1553 if (is_mmzero
&& !do_mforce
&& !*ptr
)
1556 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1558 if (unlikely(from
!= 0))
1561 inet_proto_csum_replace_by_diff(ptr
, skb
, to
, is_pseudo
);
1564 inet_proto_csum_replace2(ptr
, skb
, from
, to
, is_pseudo
);
1567 inet_proto_csum_replace4(ptr
, skb
, from
, to
, is_pseudo
);
1573 if (is_mmzero
&& !*ptr
)
1574 *ptr
= CSUM_MANGLED_0
;
1578 static const struct bpf_func_proto bpf_l4_csum_replace_proto
= {
1579 .func
= bpf_l4_csum_replace
,
1581 .ret_type
= RET_INTEGER
,
1582 .arg1_type
= ARG_PTR_TO_CTX
,
1583 .arg2_type
= ARG_ANYTHING
,
1584 .arg3_type
= ARG_ANYTHING
,
1585 .arg4_type
= ARG_ANYTHING
,
1586 .arg5_type
= ARG_ANYTHING
,
1589 BPF_CALL_5(bpf_csum_diff
, __be32
*, from
, u32
, from_size
,
1590 __be32
*, to
, u32
, to_size
, __wsum
, seed
)
1592 struct bpf_scratchpad
*sp
= this_cpu_ptr(&bpf_sp
);
1593 u32 diff_size
= from_size
+ to_size
;
1596 /* This is quite flexible, some examples:
1598 * from_size == 0, to_size > 0, seed := csum --> pushing data
1599 * from_size > 0, to_size == 0, seed := csum --> pulling data
1600 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1602 * Even for diffing, from_size and to_size don't need to be equal.
1604 if (unlikely(((from_size
| to_size
) & (sizeof(__be32
) - 1)) ||
1605 diff_size
> sizeof(sp
->diff
)))
1608 for (i
= 0; i
< from_size
/ sizeof(__be32
); i
++, j
++)
1609 sp
->diff
[j
] = ~from
[i
];
1610 for (i
= 0; i
< to_size
/ sizeof(__be32
); i
++, j
++)
1611 sp
->diff
[j
] = to
[i
];
1613 return csum_partial(sp
->diff
, diff_size
, seed
);
1616 static const struct bpf_func_proto bpf_csum_diff_proto
= {
1617 .func
= bpf_csum_diff
,
1620 .ret_type
= RET_INTEGER
,
1621 .arg1_type
= ARG_PTR_TO_MEM
,
1622 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
1623 .arg3_type
= ARG_PTR_TO_MEM
,
1624 .arg4_type
= ARG_CONST_SIZE_OR_ZERO
,
1625 .arg5_type
= ARG_ANYTHING
,
1628 BPF_CALL_2(bpf_csum_update
, struct sk_buff
*, skb
, __wsum
, csum
)
1630 /* The interface is to be used in combination with bpf_csum_diff()
1631 * for direct packet writes. csum rotation for alignment as well
1632 * as emulating csum_sub() can be done from the eBPF program.
1634 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1635 return (skb
->csum
= csum_add(skb
->csum
, csum
));
1640 static const struct bpf_func_proto bpf_csum_update_proto
= {
1641 .func
= bpf_csum_update
,
1643 .ret_type
= RET_INTEGER
,
1644 .arg1_type
= ARG_PTR_TO_CTX
,
1645 .arg2_type
= ARG_ANYTHING
,
1648 static inline int __bpf_rx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1650 return dev_forward_skb(dev
, skb
);
1653 static inline int __bpf_rx_skb_no_mac(struct net_device
*dev
,
1654 struct sk_buff
*skb
)
1656 int ret
= ____dev_forward_skb(dev
, skb
);
1660 ret
= netif_rx(skb
);
1666 static inline int __bpf_tx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1670 if (unlikely(__this_cpu_read(xmit_recursion
) > XMIT_RECURSION_LIMIT
)) {
1671 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1678 __this_cpu_inc(xmit_recursion
);
1679 ret
= dev_queue_xmit(skb
);
1680 __this_cpu_dec(xmit_recursion
);
1685 static int __bpf_redirect_no_mac(struct sk_buff
*skb
, struct net_device
*dev
,
1688 /* skb->mac_len is not set on normal egress */
1689 unsigned int mlen
= skb
->network_header
- skb
->mac_header
;
1691 __skb_pull(skb
, mlen
);
1693 /* At ingress, the mac header has already been pulled once.
1694 * At egress, skb_pospull_rcsum has to be done in case that
1695 * the skb is originated from ingress (i.e. a forwarded skb)
1696 * to ensure that rcsum starts at net header.
1698 if (!skb_at_tc_ingress(skb
))
1699 skb_postpull_rcsum(skb
, skb_mac_header(skb
), mlen
);
1700 skb_pop_mac_header(skb
);
1701 skb_reset_mac_len(skb
);
1702 return flags
& BPF_F_INGRESS
?
1703 __bpf_rx_skb_no_mac(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1706 static int __bpf_redirect_common(struct sk_buff
*skb
, struct net_device
*dev
,
1709 /* Verify that a link layer header is carried */
1710 if (unlikely(skb
->mac_header
>= skb
->network_header
)) {
1715 bpf_push_mac_rcsum(skb
);
1716 return flags
& BPF_F_INGRESS
?
1717 __bpf_rx_skb(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1720 static int __bpf_redirect(struct sk_buff
*skb
, struct net_device
*dev
,
1723 if (dev_is_mac_header_xmit(dev
))
1724 return __bpf_redirect_common(skb
, dev
, flags
);
1726 return __bpf_redirect_no_mac(skb
, dev
, flags
);
1729 BPF_CALL_3(bpf_clone_redirect
, struct sk_buff
*, skb
, u32
, ifindex
, u64
, flags
)
1731 struct net_device
*dev
;
1732 struct sk_buff
*clone
;
1735 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1738 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ifindex
);
1742 clone
= skb_clone(skb
, GFP_ATOMIC
);
1743 if (unlikely(!clone
))
1746 /* For direct write, we need to keep the invariant that the skbs
1747 * we're dealing with need to be uncloned. Should uncloning fail
1748 * here, we need to free the just generated clone to unclone once
1751 ret
= bpf_try_make_head_writable(skb
);
1752 if (unlikely(ret
)) {
1757 return __bpf_redirect(clone
, dev
, flags
);
1760 static const struct bpf_func_proto bpf_clone_redirect_proto
= {
1761 .func
= bpf_clone_redirect
,
1763 .ret_type
= RET_INTEGER
,
1764 .arg1_type
= ARG_PTR_TO_CTX
,
1765 .arg2_type
= ARG_ANYTHING
,
1766 .arg3_type
= ARG_ANYTHING
,
1769 struct redirect_info
{
1774 static DEFINE_PER_CPU(struct redirect_info
, redirect_info
);
1776 BPF_CALL_2(bpf_redirect
, u32
, ifindex
, u64
, flags
)
1778 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1780 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1783 ri
->ifindex
= ifindex
;
1786 return TC_ACT_REDIRECT
;
1789 int skb_do_redirect(struct sk_buff
*skb
)
1791 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1792 struct net_device
*dev
;
1794 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ri
->ifindex
);
1796 if (unlikely(!dev
)) {
1801 return __bpf_redirect(skb
, dev
, ri
->flags
);
1804 static const struct bpf_func_proto bpf_redirect_proto
= {
1805 .func
= bpf_redirect
,
1807 .ret_type
= RET_INTEGER
,
1808 .arg1_type
= ARG_ANYTHING
,
1809 .arg2_type
= ARG_ANYTHING
,
1812 BPF_CALL_1(bpf_get_cgroup_classid
, const struct sk_buff
*, skb
)
1814 return task_get_classid(skb
);
1817 static const struct bpf_func_proto bpf_get_cgroup_classid_proto
= {
1818 .func
= bpf_get_cgroup_classid
,
1820 .ret_type
= RET_INTEGER
,
1821 .arg1_type
= ARG_PTR_TO_CTX
,
1824 BPF_CALL_1(bpf_get_route_realm
, const struct sk_buff
*, skb
)
1826 return dst_tclassid(skb
);
1829 static const struct bpf_func_proto bpf_get_route_realm_proto
= {
1830 .func
= bpf_get_route_realm
,
1832 .ret_type
= RET_INTEGER
,
1833 .arg1_type
= ARG_PTR_TO_CTX
,
1836 BPF_CALL_1(bpf_get_hash_recalc
, struct sk_buff
*, skb
)
1838 /* If skb_clear_hash() was called due to mangling, we can
1839 * trigger SW recalculation here. Later access to hash
1840 * can then use the inline skb->hash via context directly
1841 * instead of calling this helper again.
1843 return skb_get_hash(skb
);
1846 static const struct bpf_func_proto bpf_get_hash_recalc_proto
= {
1847 .func
= bpf_get_hash_recalc
,
1849 .ret_type
= RET_INTEGER
,
1850 .arg1_type
= ARG_PTR_TO_CTX
,
1853 BPF_CALL_1(bpf_set_hash_invalid
, struct sk_buff
*, skb
)
1855 /* After all direct packet write, this can be used once for
1856 * triggering a lazy recalc on next skb_get_hash() invocation.
1858 skb_clear_hash(skb
);
1862 static const struct bpf_func_proto bpf_set_hash_invalid_proto
= {
1863 .func
= bpf_set_hash_invalid
,
1865 .ret_type
= RET_INTEGER
,
1866 .arg1_type
= ARG_PTR_TO_CTX
,
1869 BPF_CALL_3(bpf_skb_vlan_push
, struct sk_buff
*, skb
, __be16
, vlan_proto
,
1874 if (unlikely(vlan_proto
!= htons(ETH_P_8021Q
) &&
1875 vlan_proto
!= htons(ETH_P_8021AD
)))
1876 vlan_proto
= htons(ETH_P_8021Q
);
1878 bpf_push_mac_rcsum(skb
);
1879 ret
= skb_vlan_push(skb
, vlan_proto
, vlan_tci
);
1880 bpf_pull_mac_rcsum(skb
);
1882 bpf_compute_data_end(skb
);
1886 const struct bpf_func_proto bpf_skb_vlan_push_proto
= {
1887 .func
= bpf_skb_vlan_push
,
1889 .ret_type
= RET_INTEGER
,
1890 .arg1_type
= ARG_PTR_TO_CTX
,
1891 .arg2_type
= ARG_ANYTHING
,
1892 .arg3_type
= ARG_ANYTHING
,
1894 EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto
);
1896 BPF_CALL_1(bpf_skb_vlan_pop
, struct sk_buff
*, skb
)
1900 bpf_push_mac_rcsum(skb
);
1901 ret
= skb_vlan_pop(skb
);
1902 bpf_pull_mac_rcsum(skb
);
1904 bpf_compute_data_end(skb
);
1908 const struct bpf_func_proto bpf_skb_vlan_pop_proto
= {
1909 .func
= bpf_skb_vlan_pop
,
1911 .ret_type
= RET_INTEGER
,
1912 .arg1_type
= ARG_PTR_TO_CTX
,
1914 EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto
);
1916 static int bpf_skb_generic_push(struct sk_buff
*skb
, u32 off
, u32 len
)
1918 /* Caller already did skb_cow() with len as headroom,
1919 * so no need to do it here.
1922 memmove(skb
->data
, skb
->data
+ len
, off
);
1923 memset(skb
->data
+ off
, 0, len
);
1925 /* No skb_postpush_rcsum(skb, skb->data + off, len)
1926 * needed here as it does not change the skb->csum
1927 * result for checksum complete when summing over
1933 static int bpf_skb_generic_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
1935 /* skb_ensure_writable() is not needed here, as we're
1936 * already working on an uncloned skb.
1938 if (unlikely(!pskb_may_pull(skb
, off
+ len
)))
1941 skb_postpull_rcsum(skb
, skb
->data
+ off
, len
);
1942 memmove(skb
->data
+ len
, skb
->data
, off
);
1943 __skb_pull(skb
, len
);
1948 static int bpf_skb_net_hdr_push(struct sk_buff
*skb
, u32 off
, u32 len
)
1950 bool trans_same
= skb
->transport_header
== skb
->network_header
;
1953 /* There's no need for __skb_push()/__skb_pull() pair to
1954 * get to the start of the mac header as we're guaranteed
1955 * to always start from here under eBPF.
1957 ret
= bpf_skb_generic_push(skb
, off
, len
);
1959 skb
->mac_header
-= len
;
1960 skb
->network_header
-= len
;
1962 skb
->transport_header
= skb
->network_header
;
1968 static int bpf_skb_net_hdr_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
1970 bool trans_same
= skb
->transport_header
== skb
->network_header
;
1973 /* Same here, __skb_push()/__skb_pull() pair not needed. */
1974 ret
= bpf_skb_generic_pop(skb
, off
, len
);
1976 skb
->mac_header
+= len
;
1977 skb
->network_header
+= len
;
1979 skb
->transport_header
= skb
->network_header
;
1985 static int bpf_skb_proto_4_to_6(struct sk_buff
*skb
)
1987 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
1988 u32 off
= skb
->network_header
- skb
->mac_header
;
1991 ret
= skb_cow(skb
, len_diff
);
1992 if (unlikely(ret
< 0))
1995 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
1996 if (unlikely(ret
< 0))
1999 if (skb_is_gso(skb
)) {
2000 /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to
2001 * be changed into SKB_GSO_TCPV6.
2003 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2004 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV4
;
2005 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV6
;
2008 /* Due to IPv6 header, MSS needs to be downgraded. */
2009 skb_shinfo(skb
)->gso_size
-= len_diff
;
2010 /* Header must be checked, and gso_segs recomputed. */
2011 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2012 skb_shinfo(skb
)->gso_segs
= 0;
2015 skb
->protocol
= htons(ETH_P_IPV6
);
2016 skb_clear_hash(skb
);
2021 static int bpf_skb_proto_6_to_4(struct sk_buff
*skb
)
2023 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2024 u32 off
= skb
->network_header
- skb
->mac_header
;
2027 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2028 if (unlikely(ret
< 0))
2031 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2032 if (unlikely(ret
< 0))
2035 if (skb_is_gso(skb
)) {
2036 /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to
2037 * be changed into SKB_GSO_TCPV4.
2039 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
2040 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV6
;
2041 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV4
;
2044 /* Due to IPv4 header, MSS can be upgraded. */
2045 skb_shinfo(skb
)->gso_size
+= len_diff
;
2046 /* Header must be checked, and gso_segs recomputed. */
2047 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2048 skb_shinfo(skb
)->gso_segs
= 0;
2051 skb
->protocol
= htons(ETH_P_IP
);
2052 skb_clear_hash(skb
);
2057 static int bpf_skb_proto_xlat(struct sk_buff
*skb
, __be16 to_proto
)
2059 __be16 from_proto
= skb
->protocol
;
2061 if (from_proto
== htons(ETH_P_IP
) &&
2062 to_proto
== htons(ETH_P_IPV6
))
2063 return bpf_skb_proto_4_to_6(skb
);
2065 if (from_proto
== htons(ETH_P_IPV6
) &&
2066 to_proto
== htons(ETH_P_IP
))
2067 return bpf_skb_proto_6_to_4(skb
);
2072 BPF_CALL_3(bpf_skb_change_proto
, struct sk_buff
*, skb
, __be16
, proto
,
2077 if (unlikely(flags
))
2080 /* General idea is that this helper does the basic groundwork
2081 * needed for changing the protocol, and eBPF program fills the
2082 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2083 * and other helpers, rather than passing a raw buffer here.
2085 * The rationale is to keep this minimal and without a need to
2086 * deal with raw packet data. F.e. even if we would pass buffers
2087 * here, the program still needs to call the bpf_lX_csum_replace()
2088 * helpers anyway. Plus, this way we keep also separation of
2089 * concerns, since f.e. bpf_skb_store_bytes() should only take
2092 * Currently, additional options and extension header space are
2093 * not supported, but flags register is reserved so we can adapt
2094 * that. For offloads, we mark packet as dodgy, so that headers
2095 * need to be verified first.
2097 ret
= bpf_skb_proto_xlat(skb
, proto
);
2098 bpf_compute_data_end(skb
);
2102 static const struct bpf_func_proto bpf_skb_change_proto_proto
= {
2103 .func
= bpf_skb_change_proto
,
2105 .ret_type
= RET_INTEGER
,
2106 .arg1_type
= ARG_PTR_TO_CTX
,
2107 .arg2_type
= ARG_ANYTHING
,
2108 .arg3_type
= ARG_ANYTHING
,
2111 BPF_CALL_2(bpf_skb_change_type
, struct sk_buff
*, skb
, u32
, pkt_type
)
2113 /* We only allow a restricted subset to be changed for now. */
2114 if (unlikely(!skb_pkt_type_ok(skb
->pkt_type
) ||
2115 !skb_pkt_type_ok(pkt_type
)))
2118 skb
->pkt_type
= pkt_type
;
2122 static const struct bpf_func_proto bpf_skb_change_type_proto
= {
2123 .func
= bpf_skb_change_type
,
2125 .ret_type
= RET_INTEGER
,
2126 .arg1_type
= ARG_PTR_TO_CTX
,
2127 .arg2_type
= ARG_ANYTHING
,
2130 static u32
__bpf_skb_min_len(const struct sk_buff
*skb
)
2132 u32 min_len
= skb_network_offset(skb
);
2134 if (skb_transport_header_was_set(skb
))
2135 min_len
= skb_transport_offset(skb
);
2136 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2137 min_len
= skb_checksum_start_offset(skb
) +
2138 skb
->csum_offset
+ sizeof(__sum16
);
2142 static u32
__bpf_skb_max_len(const struct sk_buff
*skb
)
2144 return skb
->dev
->mtu
+ skb
->dev
->hard_header_len
;
2147 static int bpf_skb_grow_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2149 unsigned int old_len
= skb
->len
;
2152 ret
= __skb_grow_rcsum(skb
, new_len
);
2154 memset(skb
->data
+ old_len
, 0, new_len
- old_len
);
2158 static int bpf_skb_trim_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2160 return __skb_trim_rcsum(skb
, new_len
);
2163 BPF_CALL_3(bpf_skb_change_tail
, struct sk_buff
*, skb
, u32
, new_len
,
2166 u32 max_len
= __bpf_skb_max_len(skb
);
2167 u32 min_len
= __bpf_skb_min_len(skb
);
2170 if (unlikely(flags
|| new_len
> max_len
|| new_len
< min_len
))
2172 if (skb
->encapsulation
)
2175 /* The basic idea of this helper is that it's performing the
2176 * needed work to either grow or trim an skb, and eBPF program
2177 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2178 * bpf_lX_csum_replace() and others rather than passing a raw
2179 * buffer here. This one is a slow path helper and intended
2180 * for replies with control messages.
2182 * Like in bpf_skb_change_proto(), we want to keep this rather
2183 * minimal and without protocol specifics so that we are able
2184 * to separate concerns as in bpf_skb_store_bytes() should only
2185 * be the one responsible for writing buffers.
2187 * It's really expected to be a slow path operation here for
2188 * control message replies, so we're implicitly linearizing,
2189 * uncloning and drop offloads from the skb by this.
2191 ret
= __bpf_try_make_writable(skb
, skb
->len
);
2193 if (new_len
> skb
->len
)
2194 ret
= bpf_skb_grow_rcsum(skb
, new_len
);
2195 else if (new_len
< skb
->len
)
2196 ret
= bpf_skb_trim_rcsum(skb
, new_len
);
2197 if (!ret
&& skb_is_gso(skb
))
2201 bpf_compute_data_end(skb
);
2205 static const struct bpf_func_proto bpf_skb_change_tail_proto
= {
2206 .func
= bpf_skb_change_tail
,
2208 .ret_type
= RET_INTEGER
,
2209 .arg1_type
= ARG_PTR_TO_CTX
,
2210 .arg2_type
= ARG_ANYTHING
,
2211 .arg3_type
= ARG_ANYTHING
,
2214 BPF_CALL_3(bpf_skb_change_head
, struct sk_buff
*, skb
, u32
, head_room
,
2217 u32 max_len
= __bpf_skb_max_len(skb
);
2218 u32 new_len
= skb
->len
+ head_room
;
2221 if (unlikely(flags
|| (!skb_is_gso(skb
) && new_len
> max_len
) ||
2222 new_len
< skb
->len
))
2225 ret
= skb_cow(skb
, head_room
);
2227 /* Idea for this helper is that we currently only
2228 * allow to expand on mac header. This means that
2229 * skb->protocol network header, etc, stay as is.
2230 * Compared to bpf_skb_change_tail(), we're more
2231 * flexible due to not needing to linearize or
2232 * reset GSO. Intention for this helper is to be
2233 * used by an L3 skb that needs to push mac header
2234 * for redirection into L2 device.
2236 __skb_push(skb
, head_room
);
2237 memset(skb
->data
, 0, head_room
);
2238 skb_reset_mac_header(skb
);
2241 bpf_compute_data_end(skb
);
2245 static const struct bpf_func_proto bpf_skb_change_head_proto
= {
2246 .func
= bpf_skb_change_head
,
2248 .ret_type
= RET_INTEGER
,
2249 .arg1_type
= ARG_PTR_TO_CTX
,
2250 .arg2_type
= ARG_ANYTHING
,
2251 .arg3_type
= ARG_ANYTHING
,
2254 BPF_CALL_2(bpf_xdp_adjust_head
, struct xdp_buff
*, xdp
, int, offset
)
2256 void *data
= xdp
->data
+ offset
;
2258 if (unlikely(data
< xdp
->data_hard_start
||
2259 data
> xdp
->data_end
- ETH_HLEN
))
2267 static const struct bpf_func_proto bpf_xdp_adjust_head_proto
= {
2268 .func
= bpf_xdp_adjust_head
,
2270 .ret_type
= RET_INTEGER
,
2271 .arg1_type
= ARG_PTR_TO_CTX
,
2272 .arg2_type
= ARG_ANYTHING
,
2275 bool bpf_helper_changes_pkt_data(void *func
)
2277 if (func
== bpf_skb_vlan_push
||
2278 func
== bpf_skb_vlan_pop
||
2279 func
== bpf_skb_store_bytes
||
2280 func
== bpf_skb_change_proto
||
2281 func
== bpf_skb_change_head
||
2282 func
== bpf_skb_change_tail
||
2283 func
== bpf_skb_pull_data
||
2284 func
== bpf_l3_csum_replace
||
2285 func
== bpf_l4_csum_replace
||
2286 func
== bpf_xdp_adjust_head
)
2292 static unsigned long bpf_skb_copy(void *dst_buff
, const void *skb
,
2293 unsigned long off
, unsigned long len
)
2295 void *ptr
= skb_header_pointer(skb
, off
, len
, dst_buff
);
2299 if (ptr
!= dst_buff
)
2300 memcpy(dst_buff
, ptr
, len
);
2305 BPF_CALL_5(bpf_skb_event_output
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
2306 u64
, flags
, void *, meta
, u64
, meta_size
)
2308 u64 skb_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
2310 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
2312 if (unlikely(skb_size
> skb
->len
))
2315 return bpf_event_output(map
, flags
, meta
, meta_size
, skb
, skb_size
,
2319 static const struct bpf_func_proto bpf_skb_event_output_proto
= {
2320 .func
= bpf_skb_event_output
,
2322 .ret_type
= RET_INTEGER
,
2323 .arg1_type
= ARG_PTR_TO_CTX
,
2324 .arg2_type
= ARG_CONST_MAP_PTR
,
2325 .arg3_type
= ARG_ANYTHING
,
2326 .arg4_type
= ARG_PTR_TO_MEM
,
2327 .arg5_type
= ARG_CONST_SIZE
,
2330 static unsigned short bpf_tunnel_key_af(u64 flags
)
2332 return flags
& BPF_F_TUNINFO_IPV6
? AF_INET6
: AF_INET
;
2335 BPF_CALL_4(bpf_skb_get_tunnel_key
, struct sk_buff
*, skb
, struct bpf_tunnel_key
*, to
,
2336 u32
, size
, u64
, flags
)
2338 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2339 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2343 if (unlikely(!info
|| (flags
& ~(BPF_F_TUNINFO_IPV6
)))) {
2347 if (ip_tunnel_info_af(info
) != bpf_tunnel_key_af(flags
)) {
2351 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2354 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2355 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2357 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2358 /* Fixup deprecated structure layouts here, so we have
2359 * a common path later on.
2361 if (ip_tunnel_info_af(info
) != AF_INET
)
2364 to
= (struct bpf_tunnel_key
*)compat
;
2371 to
->tunnel_id
= be64_to_cpu(info
->key
.tun_id
);
2372 to
->tunnel_tos
= info
->key
.tos
;
2373 to
->tunnel_ttl
= info
->key
.ttl
;
2375 if (flags
& BPF_F_TUNINFO_IPV6
) {
2376 memcpy(to
->remote_ipv6
, &info
->key
.u
.ipv6
.src
,
2377 sizeof(to
->remote_ipv6
));
2378 to
->tunnel_label
= be32_to_cpu(info
->key
.label
);
2380 to
->remote_ipv4
= be32_to_cpu(info
->key
.u
.ipv4
.src
);
2383 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
)))
2384 memcpy(to_orig
, to
, size
);
2388 memset(to_orig
, 0, size
);
2392 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto
= {
2393 .func
= bpf_skb_get_tunnel_key
,
2395 .ret_type
= RET_INTEGER
,
2396 .arg1_type
= ARG_PTR_TO_CTX
,
2397 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2398 .arg3_type
= ARG_CONST_SIZE
,
2399 .arg4_type
= ARG_ANYTHING
,
2402 BPF_CALL_3(bpf_skb_get_tunnel_opt
, struct sk_buff
*, skb
, u8
*, to
, u32
, size
)
2404 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2407 if (unlikely(!info
||
2408 !(info
->key
.tun_flags
& TUNNEL_OPTIONS_PRESENT
))) {
2412 if (unlikely(size
< info
->options_len
)) {
2417 ip_tunnel_info_opts_get(to
, info
);
2418 if (size
> info
->options_len
)
2419 memset(to
+ info
->options_len
, 0, size
- info
->options_len
);
2421 return info
->options_len
;
2423 memset(to
, 0, size
);
2427 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto
= {
2428 .func
= bpf_skb_get_tunnel_opt
,
2430 .ret_type
= RET_INTEGER
,
2431 .arg1_type
= ARG_PTR_TO_CTX
,
2432 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2433 .arg3_type
= ARG_CONST_SIZE
,
2436 static struct metadata_dst __percpu
*md_dst
;
2438 BPF_CALL_4(bpf_skb_set_tunnel_key
, struct sk_buff
*, skb
,
2439 const struct bpf_tunnel_key
*, from
, u32
, size
, u64
, flags
)
2441 struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
2442 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2443 struct ip_tunnel_info
*info
;
2445 if (unlikely(flags
& ~(BPF_F_TUNINFO_IPV6
| BPF_F_ZERO_CSUM_TX
|
2446 BPF_F_DONT_FRAGMENT
)))
2448 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2450 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2451 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2452 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2453 /* Fixup deprecated structure layouts here, so we have
2454 * a common path later on.
2456 memcpy(compat
, from
, size
);
2457 memset(compat
+ size
, 0, sizeof(compat
) - size
);
2458 from
= (const struct bpf_tunnel_key
*) compat
;
2464 if (unlikely((!(flags
& BPF_F_TUNINFO_IPV6
) && from
->tunnel_label
) ||
2469 dst_hold((struct dst_entry
*) md
);
2470 skb_dst_set(skb
, (struct dst_entry
*) md
);
2472 info
= &md
->u
.tun_info
;
2473 info
->mode
= IP_TUNNEL_INFO_TX
;
2475 info
->key
.tun_flags
= TUNNEL_KEY
| TUNNEL_CSUM
| TUNNEL_NOCACHE
;
2476 if (flags
& BPF_F_DONT_FRAGMENT
)
2477 info
->key
.tun_flags
|= TUNNEL_DONT_FRAGMENT
;
2479 info
->key
.tun_id
= cpu_to_be64(from
->tunnel_id
);
2480 info
->key
.tos
= from
->tunnel_tos
;
2481 info
->key
.ttl
= from
->tunnel_ttl
;
2483 if (flags
& BPF_F_TUNINFO_IPV6
) {
2484 info
->mode
|= IP_TUNNEL_INFO_IPV6
;
2485 memcpy(&info
->key
.u
.ipv6
.dst
, from
->remote_ipv6
,
2486 sizeof(from
->remote_ipv6
));
2487 info
->key
.label
= cpu_to_be32(from
->tunnel_label
) &
2488 IPV6_FLOWLABEL_MASK
;
2490 info
->key
.u
.ipv4
.dst
= cpu_to_be32(from
->remote_ipv4
);
2491 if (flags
& BPF_F_ZERO_CSUM_TX
)
2492 info
->key
.tun_flags
&= ~TUNNEL_CSUM
;
2498 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto
= {
2499 .func
= bpf_skb_set_tunnel_key
,
2501 .ret_type
= RET_INTEGER
,
2502 .arg1_type
= ARG_PTR_TO_CTX
,
2503 .arg2_type
= ARG_PTR_TO_MEM
,
2504 .arg3_type
= ARG_CONST_SIZE
,
2505 .arg4_type
= ARG_ANYTHING
,
2508 BPF_CALL_3(bpf_skb_set_tunnel_opt
, struct sk_buff
*, skb
,
2509 const u8
*, from
, u32
, size
)
2511 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2512 const struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
2514 if (unlikely(info
!= &md
->u
.tun_info
|| (size
& (sizeof(u32
) - 1))))
2516 if (unlikely(size
> IP_TUNNEL_OPTS_MAX
))
2519 ip_tunnel_info_opts_set(info
, from
, size
);
2524 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto
= {
2525 .func
= bpf_skb_set_tunnel_opt
,
2527 .ret_type
= RET_INTEGER
,
2528 .arg1_type
= ARG_PTR_TO_CTX
,
2529 .arg2_type
= ARG_PTR_TO_MEM
,
2530 .arg3_type
= ARG_CONST_SIZE
,
2533 static const struct bpf_func_proto
*
2534 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which
)
2537 /* Race is not possible, since it's called from verifier
2538 * that is holding verifier mutex.
2540 md_dst
= metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX
,
2547 case BPF_FUNC_skb_set_tunnel_key
:
2548 return &bpf_skb_set_tunnel_key_proto
;
2549 case BPF_FUNC_skb_set_tunnel_opt
:
2550 return &bpf_skb_set_tunnel_opt_proto
;
2556 BPF_CALL_3(bpf_skb_under_cgroup
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
2559 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
2560 struct cgroup
*cgrp
;
2563 sk
= skb_to_full_sk(skb
);
2564 if (!sk
|| !sk_fullsock(sk
))
2566 if (unlikely(idx
>= array
->map
.max_entries
))
2569 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
2570 if (unlikely(!cgrp
))
2573 return sk_under_cgroup_hierarchy(sk
, cgrp
);
2576 static const struct bpf_func_proto bpf_skb_under_cgroup_proto
= {
2577 .func
= bpf_skb_under_cgroup
,
2579 .ret_type
= RET_INTEGER
,
2580 .arg1_type
= ARG_PTR_TO_CTX
,
2581 .arg2_type
= ARG_CONST_MAP_PTR
,
2582 .arg3_type
= ARG_ANYTHING
,
2585 static unsigned long bpf_xdp_copy(void *dst_buff
, const void *src_buff
,
2586 unsigned long off
, unsigned long len
)
2588 memcpy(dst_buff
, src_buff
+ off
, len
);
2592 BPF_CALL_5(bpf_xdp_event_output
, struct xdp_buff
*, xdp
, struct bpf_map
*, map
,
2593 u64
, flags
, void *, meta
, u64
, meta_size
)
2595 u64 xdp_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
2597 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
2599 if (unlikely(xdp_size
> (unsigned long)(xdp
->data_end
- xdp
->data
)))
2602 return bpf_event_output(map
, flags
, meta
, meta_size
, xdp
->data
,
2603 xdp_size
, bpf_xdp_copy
);
2606 static const struct bpf_func_proto bpf_xdp_event_output_proto
= {
2607 .func
= bpf_xdp_event_output
,
2609 .ret_type
= RET_INTEGER
,
2610 .arg1_type
= ARG_PTR_TO_CTX
,
2611 .arg2_type
= ARG_CONST_MAP_PTR
,
2612 .arg3_type
= ARG_ANYTHING
,
2613 .arg4_type
= ARG_PTR_TO_MEM
,
2614 .arg5_type
= ARG_CONST_SIZE
,
2617 BPF_CALL_1(bpf_get_socket_cookie
, struct sk_buff
*, skb
)
2619 return skb
->sk
? sock_gen_cookie(skb
->sk
) : 0;
2622 static const struct bpf_func_proto bpf_get_socket_cookie_proto
= {
2623 .func
= bpf_get_socket_cookie
,
2625 .ret_type
= RET_INTEGER
,
2626 .arg1_type
= ARG_PTR_TO_CTX
,
2629 BPF_CALL_1(bpf_get_socket_uid
, struct sk_buff
*, skb
)
2631 struct sock
*sk
= sk_to_full_sk(skb
->sk
);
2634 if (!sk
|| !sk_fullsock(sk
))
2636 kuid
= sock_net_uid(sock_net(sk
), sk
);
2637 return from_kuid_munged(sock_net(sk
)->user_ns
, kuid
);
2640 static const struct bpf_func_proto bpf_get_socket_uid_proto
= {
2641 .func
= bpf_get_socket_uid
,
2643 .ret_type
= RET_INTEGER
,
2644 .arg1_type
= ARG_PTR_TO_CTX
,
2647 static const struct bpf_func_proto
*
2648 bpf_base_func_proto(enum bpf_func_id func_id
)
2651 case BPF_FUNC_map_lookup_elem
:
2652 return &bpf_map_lookup_elem_proto
;
2653 case BPF_FUNC_map_update_elem
:
2654 return &bpf_map_update_elem_proto
;
2655 case BPF_FUNC_map_delete_elem
:
2656 return &bpf_map_delete_elem_proto
;
2657 case BPF_FUNC_get_prandom_u32
:
2658 return &bpf_get_prandom_u32_proto
;
2659 case BPF_FUNC_get_smp_processor_id
:
2660 return &bpf_get_raw_smp_processor_id_proto
;
2661 case BPF_FUNC_get_numa_node_id
:
2662 return &bpf_get_numa_node_id_proto
;
2663 case BPF_FUNC_tail_call
:
2664 return &bpf_tail_call_proto
;
2665 case BPF_FUNC_ktime_get_ns
:
2666 return &bpf_ktime_get_ns_proto
;
2667 case BPF_FUNC_trace_printk
:
2668 if (capable(CAP_SYS_ADMIN
))
2669 return bpf_get_trace_printk_proto();
2675 static const struct bpf_func_proto
*
2676 sk_filter_func_proto(enum bpf_func_id func_id
)
2679 case BPF_FUNC_skb_load_bytes
:
2680 return &bpf_skb_load_bytes_proto
;
2681 case BPF_FUNC_get_socket_cookie
:
2682 return &bpf_get_socket_cookie_proto
;
2683 case BPF_FUNC_get_socket_uid
:
2684 return &bpf_get_socket_uid_proto
;
2686 return bpf_base_func_proto(func_id
);
2690 static const struct bpf_func_proto
*
2691 tc_cls_act_func_proto(enum bpf_func_id func_id
)
2694 case BPF_FUNC_skb_store_bytes
:
2695 return &bpf_skb_store_bytes_proto
;
2696 case BPF_FUNC_skb_load_bytes
:
2697 return &bpf_skb_load_bytes_proto
;
2698 case BPF_FUNC_skb_pull_data
:
2699 return &bpf_skb_pull_data_proto
;
2700 case BPF_FUNC_csum_diff
:
2701 return &bpf_csum_diff_proto
;
2702 case BPF_FUNC_csum_update
:
2703 return &bpf_csum_update_proto
;
2704 case BPF_FUNC_l3_csum_replace
:
2705 return &bpf_l3_csum_replace_proto
;
2706 case BPF_FUNC_l4_csum_replace
:
2707 return &bpf_l4_csum_replace_proto
;
2708 case BPF_FUNC_clone_redirect
:
2709 return &bpf_clone_redirect_proto
;
2710 case BPF_FUNC_get_cgroup_classid
:
2711 return &bpf_get_cgroup_classid_proto
;
2712 case BPF_FUNC_skb_vlan_push
:
2713 return &bpf_skb_vlan_push_proto
;
2714 case BPF_FUNC_skb_vlan_pop
:
2715 return &bpf_skb_vlan_pop_proto
;
2716 case BPF_FUNC_skb_change_proto
:
2717 return &bpf_skb_change_proto_proto
;
2718 case BPF_FUNC_skb_change_type
:
2719 return &bpf_skb_change_type_proto
;
2720 case BPF_FUNC_skb_change_tail
:
2721 return &bpf_skb_change_tail_proto
;
2722 case BPF_FUNC_skb_get_tunnel_key
:
2723 return &bpf_skb_get_tunnel_key_proto
;
2724 case BPF_FUNC_skb_set_tunnel_key
:
2725 return bpf_get_skb_set_tunnel_proto(func_id
);
2726 case BPF_FUNC_skb_get_tunnel_opt
:
2727 return &bpf_skb_get_tunnel_opt_proto
;
2728 case BPF_FUNC_skb_set_tunnel_opt
:
2729 return bpf_get_skb_set_tunnel_proto(func_id
);
2730 case BPF_FUNC_redirect
:
2731 return &bpf_redirect_proto
;
2732 case BPF_FUNC_get_route_realm
:
2733 return &bpf_get_route_realm_proto
;
2734 case BPF_FUNC_get_hash_recalc
:
2735 return &bpf_get_hash_recalc_proto
;
2736 case BPF_FUNC_set_hash_invalid
:
2737 return &bpf_set_hash_invalid_proto
;
2738 case BPF_FUNC_perf_event_output
:
2739 return &bpf_skb_event_output_proto
;
2740 case BPF_FUNC_get_smp_processor_id
:
2741 return &bpf_get_smp_processor_id_proto
;
2742 case BPF_FUNC_skb_under_cgroup
:
2743 return &bpf_skb_under_cgroup_proto
;
2744 case BPF_FUNC_get_socket_cookie
:
2745 return &bpf_get_socket_cookie_proto
;
2746 case BPF_FUNC_get_socket_uid
:
2747 return &bpf_get_socket_uid_proto
;
2749 return bpf_base_func_proto(func_id
);
2753 static const struct bpf_func_proto
*
2754 xdp_func_proto(enum bpf_func_id func_id
)
2757 case BPF_FUNC_perf_event_output
:
2758 return &bpf_xdp_event_output_proto
;
2759 case BPF_FUNC_get_smp_processor_id
:
2760 return &bpf_get_smp_processor_id_proto
;
2761 case BPF_FUNC_xdp_adjust_head
:
2762 return &bpf_xdp_adjust_head_proto
;
2764 return bpf_base_func_proto(func_id
);
2768 static const struct bpf_func_proto
*
2769 cg_skb_func_proto(enum bpf_func_id func_id
)
2771 return sk_filter_func_proto(func_id
);
2774 static const struct bpf_func_proto
*
2775 lwt_inout_func_proto(enum bpf_func_id func_id
)
2778 case BPF_FUNC_skb_load_bytes
:
2779 return &bpf_skb_load_bytes_proto
;
2780 case BPF_FUNC_skb_pull_data
:
2781 return &bpf_skb_pull_data_proto
;
2782 case BPF_FUNC_csum_diff
:
2783 return &bpf_csum_diff_proto
;
2784 case BPF_FUNC_get_cgroup_classid
:
2785 return &bpf_get_cgroup_classid_proto
;
2786 case BPF_FUNC_get_route_realm
:
2787 return &bpf_get_route_realm_proto
;
2788 case BPF_FUNC_get_hash_recalc
:
2789 return &bpf_get_hash_recalc_proto
;
2790 case BPF_FUNC_perf_event_output
:
2791 return &bpf_skb_event_output_proto
;
2792 case BPF_FUNC_get_smp_processor_id
:
2793 return &bpf_get_smp_processor_id_proto
;
2794 case BPF_FUNC_skb_under_cgroup
:
2795 return &bpf_skb_under_cgroup_proto
;
2797 return bpf_base_func_proto(func_id
);
2801 static const struct bpf_func_proto
*
2802 lwt_xmit_func_proto(enum bpf_func_id func_id
)
2805 case BPF_FUNC_skb_get_tunnel_key
:
2806 return &bpf_skb_get_tunnel_key_proto
;
2807 case BPF_FUNC_skb_set_tunnel_key
:
2808 return bpf_get_skb_set_tunnel_proto(func_id
);
2809 case BPF_FUNC_skb_get_tunnel_opt
:
2810 return &bpf_skb_get_tunnel_opt_proto
;
2811 case BPF_FUNC_skb_set_tunnel_opt
:
2812 return bpf_get_skb_set_tunnel_proto(func_id
);
2813 case BPF_FUNC_redirect
:
2814 return &bpf_redirect_proto
;
2815 case BPF_FUNC_clone_redirect
:
2816 return &bpf_clone_redirect_proto
;
2817 case BPF_FUNC_skb_change_tail
:
2818 return &bpf_skb_change_tail_proto
;
2819 case BPF_FUNC_skb_change_head
:
2820 return &bpf_skb_change_head_proto
;
2821 case BPF_FUNC_skb_store_bytes
:
2822 return &bpf_skb_store_bytes_proto
;
2823 case BPF_FUNC_csum_update
:
2824 return &bpf_csum_update_proto
;
2825 case BPF_FUNC_l3_csum_replace
:
2826 return &bpf_l3_csum_replace_proto
;
2827 case BPF_FUNC_l4_csum_replace
:
2828 return &bpf_l4_csum_replace_proto
;
2829 case BPF_FUNC_set_hash_invalid
:
2830 return &bpf_set_hash_invalid_proto
;
2832 return lwt_inout_func_proto(func_id
);
2836 static bool __is_valid_access(int off
, int size
)
2838 if (off
< 0 || off
>= sizeof(struct __sk_buff
))
2841 /* The verifier guarantees that size > 0. */
2842 if (off
% size
!= 0)
2846 case offsetof(struct __sk_buff
, cb
[0]) ...
2847 offsetof(struct __sk_buff
, cb
[4]) + sizeof(__u32
) - 1:
2849 offsetof(struct __sk_buff
, cb
[4]) + sizeof(__u32
))
2853 if (size
!= sizeof(__u32
))
2860 static bool sk_filter_is_valid_access(int off
, int size
,
2861 enum bpf_access_type type
,
2862 enum bpf_reg_type
*reg_type
)
2865 case offsetof(struct __sk_buff
, tc_classid
):
2866 case offsetof(struct __sk_buff
, data
):
2867 case offsetof(struct __sk_buff
, data_end
):
2871 if (type
== BPF_WRITE
) {
2873 case offsetof(struct __sk_buff
, cb
[0]) ...
2874 offsetof(struct __sk_buff
, cb
[4]) + sizeof(__u32
) - 1:
2881 return __is_valid_access(off
, size
);
2884 static bool lwt_is_valid_access(int off
, int size
,
2885 enum bpf_access_type type
,
2886 enum bpf_reg_type
*reg_type
)
2889 case offsetof(struct __sk_buff
, tc_classid
):
2893 if (type
== BPF_WRITE
) {
2895 case offsetof(struct __sk_buff
, mark
):
2896 case offsetof(struct __sk_buff
, priority
):
2897 case offsetof(struct __sk_buff
, cb
[0]) ...
2898 offsetof(struct __sk_buff
, cb
[4]) + sizeof(__u32
) - 1:
2906 case offsetof(struct __sk_buff
, data
):
2907 *reg_type
= PTR_TO_PACKET
;
2909 case offsetof(struct __sk_buff
, data_end
):
2910 *reg_type
= PTR_TO_PACKET_END
;
2914 return __is_valid_access(off
, size
);
2917 static bool sock_filter_is_valid_access(int off
, int size
,
2918 enum bpf_access_type type
,
2919 enum bpf_reg_type
*reg_type
)
2921 if (type
== BPF_WRITE
) {
2923 case offsetof(struct bpf_sock
, bound_dev_if
):
2930 if (off
< 0 || off
+ size
> sizeof(struct bpf_sock
))
2932 /* The verifier guarantees that size > 0. */
2933 if (off
% size
!= 0)
2935 if (size
!= sizeof(__u32
))
2941 static int tc_cls_act_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
2942 const struct bpf_prog
*prog
)
2944 struct bpf_insn
*insn
= insn_buf
;
2949 /* if (!skb->cloned)
2952 * (Fast-path, otherwise approximation that we might be
2953 * a clone, do the rest in helper.)
2955 *insn
++ = BPF_LDX_MEM(BPF_B
, BPF_REG_6
, BPF_REG_1
, CLONED_OFFSET());
2956 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_6
, CLONED_MASK
);
2957 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_6
, 0, 7);
2959 /* ret = bpf_skb_pull_data(skb, 0); */
2960 *insn
++ = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
2961 *insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_2
, BPF_REG_2
);
2962 *insn
++ = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2963 BPF_FUNC_skb_pull_data
);
2966 * return TC_ACT_SHOT;
2968 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2);
2969 *insn
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, TC_ACT_SHOT
);
2970 *insn
++ = BPF_EXIT_INSN();
2973 *insn
++ = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
2975 *insn
++ = prog
->insnsi
[0];
2977 return insn
- insn_buf
;
2980 static bool tc_cls_act_is_valid_access(int off
, int size
,
2981 enum bpf_access_type type
,
2982 enum bpf_reg_type
*reg_type
)
2984 if (type
== BPF_WRITE
) {
2986 case offsetof(struct __sk_buff
, mark
):
2987 case offsetof(struct __sk_buff
, tc_index
):
2988 case offsetof(struct __sk_buff
, priority
):
2989 case offsetof(struct __sk_buff
, cb
[0]) ...
2990 offsetof(struct __sk_buff
, cb
[4]) + sizeof(__u32
) - 1:
2991 case offsetof(struct __sk_buff
, tc_classid
):
2999 case offsetof(struct __sk_buff
, data
):
3000 *reg_type
= PTR_TO_PACKET
;
3002 case offsetof(struct __sk_buff
, data_end
):
3003 *reg_type
= PTR_TO_PACKET_END
;
3007 return __is_valid_access(off
, size
);
3010 static bool __is_valid_xdp_access(int off
, int size
)
3012 if (off
< 0 || off
>= sizeof(struct xdp_md
))
3014 if (off
% size
!= 0)
3016 if (size
!= sizeof(__u32
))
3022 static bool xdp_is_valid_access(int off
, int size
,
3023 enum bpf_access_type type
,
3024 enum bpf_reg_type
*reg_type
)
3026 if (type
== BPF_WRITE
)
3030 case offsetof(struct xdp_md
, data
):
3031 *reg_type
= PTR_TO_PACKET
;
3033 case offsetof(struct xdp_md
, data_end
):
3034 *reg_type
= PTR_TO_PACKET_END
;
3038 return __is_valid_xdp_access(off
, size
);
3041 void bpf_warn_invalid_xdp_action(u32 act
)
3043 WARN_ONCE(1, "Illegal XDP return value %u, expect packet loss\n", act
);
3045 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action
);
3047 static u32
bpf_convert_ctx_access(enum bpf_access_type type
,
3048 const struct bpf_insn
*si
,
3049 struct bpf_insn
*insn_buf
,
3050 struct bpf_prog
*prog
)
3052 struct bpf_insn
*insn
= insn_buf
;
3056 case offsetof(struct __sk_buff
, len
):
3057 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, len
) != 4);
3059 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3060 offsetof(struct sk_buff
, len
));
3063 case offsetof(struct __sk_buff
, protocol
):
3064 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, protocol
) != 2);
3066 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3067 offsetof(struct sk_buff
, protocol
));
3070 case offsetof(struct __sk_buff
, vlan_proto
):
3071 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_proto
) != 2);
3073 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3074 offsetof(struct sk_buff
, vlan_proto
));
3077 case offsetof(struct __sk_buff
, priority
):
3078 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, priority
) != 4);
3080 if (type
== BPF_WRITE
)
3081 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3082 offsetof(struct sk_buff
, priority
));
3084 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3085 offsetof(struct sk_buff
, priority
));
3088 case offsetof(struct __sk_buff
, ingress_ifindex
):
3089 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, skb_iif
) != 4);
3091 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3092 offsetof(struct sk_buff
, skb_iif
));
3095 case offsetof(struct __sk_buff
, ifindex
):
3096 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, ifindex
) != 4);
3098 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
3099 si
->dst_reg
, si
->src_reg
,
3100 offsetof(struct sk_buff
, dev
));
3101 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, si
->dst_reg
, 0, 1);
3102 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3103 offsetof(struct net_device
, ifindex
));
3106 case offsetof(struct __sk_buff
, hash
):
3107 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
3109 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3110 offsetof(struct sk_buff
, hash
));
3113 case offsetof(struct __sk_buff
, mark
):
3114 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
3116 if (type
== BPF_WRITE
)
3117 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3118 offsetof(struct sk_buff
, mark
));
3120 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3121 offsetof(struct sk_buff
, mark
));
3124 case offsetof(struct __sk_buff
, pkt_type
):
3125 return convert_skb_access(SKF_AD_PKTTYPE
, si
->dst_reg
,
3128 case offsetof(struct __sk_buff
, queue_mapping
):
3129 return convert_skb_access(SKF_AD_QUEUE
, si
->dst_reg
,
3132 case offsetof(struct __sk_buff
, vlan_present
):
3133 return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT
,
3134 si
->dst_reg
, si
->src_reg
, insn
);
3136 case offsetof(struct __sk_buff
, vlan_tci
):
3137 return convert_skb_access(SKF_AD_VLAN_TAG
,
3138 si
->dst_reg
, si
->src_reg
, insn
);
3140 case offsetof(struct __sk_buff
, cb
[0]) ...
3141 offsetof(struct __sk_buff
, cb
[4]) + sizeof(__u32
) - 1:
3142 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, data
) < 20);
3143 BUILD_BUG_ON((offsetof(struct sk_buff
, cb
) +
3144 offsetof(struct qdisc_skb_cb
, data
)) %
3147 prog
->cb_access
= 1;
3149 off
-= offsetof(struct __sk_buff
, cb
[0]);
3150 off
+= offsetof(struct sk_buff
, cb
);
3151 off
+= offsetof(struct qdisc_skb_cb
, data
);
3152 if (type
== BPF_WRITE
)
3153 *insn
++ = BPF_STX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
3156 *insn
++ = BPF_LDX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
3160 case offsetof(struct __sk_buff
, tc_classid
):
3161 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, tc_classid
) != 2);
3164 off
-= offsetof(struct __sk_buff
, tc_classid
);
3165 off
+= offsetof(struct sk_buff
, cb
);
3166 off
+= offsetof(struct qdisc_skb_cb
, tc_classid
);
3167 if (type
== BPF_WRITE
)
3168 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
,
3171 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
,
3175 case offsetof(struct __sk_buff
, data
):
3176 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, data
),
3177 si
->dst_reg
, si
->src_reg
,
3178 offsetof(struct sk_buff
, data
));
3181 case offsetof(struct __sk_buff
, data_end
):
3183 off
-= offsetof(struct __sk_buff
, data_end
);
3184 off
+= offsetof(struct sk_buff
, cb
);
3185 off
+= offsetof(struct bpf_skb_data_end
, data_end
);
3186 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
3190 case offsetof(struct __sk_buff
, tc_index
):
3191 #ifdef CONFIG_NET_SCHED
3192 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, tc_index
) != 2);
3194 if (type
== BPF_WRITE
)
3195 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3196 offsetof(struct sk_buff
, tc_index
));
3198 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3199 offsetof(struct sk_buff
, tc_index
));
3201 if (type
== BPF_WRITE
)
3202 *insn
++ = BPF_MOV64_REG(si
->dst_reg
, si
->dst_reg
);
3204 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
3208 case offsetof(struct __sk_buff
, napi_id
):
3209 #if defined(CONFIG_NET_RX_BUSY_POLL)
3210 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, napi_id
) != 4);
3212 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3213 offsetof(struct sk_buff
, napi_id
));
3214 *insn
++ = BPF_JMP_IMM(BPF_JGE
, si
->dst_reg
, MIN_NAPI_ID
, 1);
3215 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
3217 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
3222 return insn
- insn_buf
;
3225 static u32
sock_filter_convert_ctx_access(enum bpf_access_type type
,
3226 const struct bpf_insn
*si
,
3227 struct bpf_insn
*insn_buf
,
3228 struct bpf_prog
*prog
)
3230 struct bpf_insn
*insn
= insn_buf
;
3233 case offsetof(struct bpf_sock
, bound_dev_if
):
3234 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_bound_dev_if
) != 4);
3236 if (type
== BPF_WRITE
)
3237 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3238 offsetof(struct sock
, sk_bound_dev_if
));
3240 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3241 offsetof(struct sock
, sk_bound_dev_if
));
3244 case offsetof(struct bpf_sock
, family
):
3245 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_family
) != 2);
3247 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3248 offsetof(struct sock
, sk_family
));
3251 case offsetof(struct bpf_sock
, type
):
3252 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3253 offsetof(struct sock
, __sk_flags_offset
));
3254 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_TYPE_MASK
);
3255 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_TYPE_SHIFT
);
3258 case offsetof(struct bpf_sock
, protocol
):
3259 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3260 offsetof(struct sock
, __sk_flags_offset
));
3261 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_PROTO_MASK
);
3262 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_PROTO_SHIFT
);
3266 return insn
- insn_buf
;
3269 static u32
tc_cls_act_convert_ctx_access(enum bpf_access_type type
,
3270 const struct bpf_insn
*si
,
3271 struct bpf_insn
*insn_buf
,
3272 struct bpf_prog
*prog
)
3274 struct bpf_insn
*insn
= insn_buf
;
3277 case offsetof(struct __sk_buff
, ifindex
):
3278 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, ifindex
) != 4);
3280 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
3281 si
->dst_reg
, si
->src_reg
,
3282 offsetof(struct sk_buff
, dev
));
3283 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3284 offsetof(struct net_device
, ifindex
));
3287 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
);
3290 return insn
- insn_buf
;
3293 static u32
xdp_convert_ctx_access(enum bpf_access_type type
,
3294 const struct bpf_insn
*si
,
3295 struct bpf_insn
*insn_buf
,
3296 struct bpf_prog
*prog
)
3298 struct bpf_insn
*insn
= insn_buf
;
3301 case offsetof(struct xdp_md
, data
):
3302 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data
),
3303 si
->dst_reg
, si
->src_reg
,
3304 offsetof(struct xdp_buff
, data
));
3306 case offsetof(struct xdp_md
, data_end
):
3307 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_end
),
3308 si
->dst_reg
, si
->src_reg
,
3309 offsetof(struct xdp_buff
, data_end
));
3313 return insn
- insn_buf
;
3316 const struct bpf_verifier_ops sk_filter_prog_ops
= {
3317 .get_func_proto
= sk_filter_func_proto
,
3318 .is_valid_access
= sk_filter_is_valid_access
,
3319 .convert_ctx_access
= bpf_convert_ctx_access
,
3322 const struct bpf_verifier_ops tc_cls_act_prog_ops
= {
3323 .get_func_proto
= tc_cls_act_func_proto
,
3324 .is_valid_access
= tc_cls_act_is_valid_access
,
3325 .convert_ctx_access
= tc_cls_act_convert_ctx_access
,
3326 .gen_prologue
= tc_cls_act_prologue
,
3327 .test_run
= bpf_prog_test_run_skb
,
3330 const struct bpf_verifier_ops xdp_prog_ops
= {
3331 .get_func_proto
= xdp_func_proto
,
3332 .is_valid_access
= xdp_is_valid_access
,
3333 .convert_ctx_access
= xdp_convert_ctx_access
,
3334 .test_run
= bpf_prog_test_run_xdp
,
3337 const struct bpf_verifier_ops cg_skb_prog_ops
= {
3338 .get_func_proto
= cg_skb_func_proto
,
3339 .is_valid_access
= sk_filter_is_valid_access
,
3340 .convert_ctx_access
= bpf_convert_ctx_access
,
3341 .test_run
= bpf_prog_test_run_skb
,
3344 const struct bpf_verifier_ops lwt_inout_prog_ops
= {
3345 .get_func_proto
= lwt_inout_func_proto
,
3346 .is_valid_access
= lwt_is_valid_access
,
3347 .convert_ctx_access
= bpf_convert_ctx_access
,
3348 .test_run
= bpf_prog_test_run_skb
,
3351 const struct bpf_verifier_ops lwt_xmit_prog_ops
= {
3352 .get_func_proto
= lwt_xmit_func_proto
,
3353 .is_valid_access
= lwt_is_valid_access
,
3354 .convert_ctx_access
= bpf_convert_ctx_access
,
3355 .gen_prologue
= tc_cls_act_prologue
,
3356 .test_run
= bpf_prog_test_run_skb
,
3359 const struct bpf_verifier_ops cg_sock_prog_ops
= {
3360 .get_func_proto
= bpf_base_func_proto
,
3361 .is_valid_access
= sock_filter_is_valid_access
,
3362 .convert_ctx_access
= sock_filter_convert_ctx_access
,
3365 int sk_detach_filter(struct sock
*sk
)
3368 struct sk_filter
*filter
;
3370 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
3373 filter
= rcu_dereference_protected(sk
->sk_filter
,
3374 lockdep_sock_is_held(sk
));
3376 RCU_INIT_POINTER(sk
->sk_filter
, NULL
);
3377 sk_filter_uncharge(sk
, filter
);
3383 EXPORT_SYMBOL_GPL(sk_detach_filter
);
3385 int sk_get_filter(struct sock
*sk
, struct sock_filter __user
*ubuf
,
3388 struct sock_fprog_kern
*fprog
;
3389 struct sk_filter
*filter
;
3393 filter
= rcu_dereference_protected(sk
->sk_filter
,
3394 lockdep_sock_is_held(sk
));
3398 /* We're copying the filter that has been originally attached,
3399 * so no conversion/decode needed anymore. eBPF programs that
3400 * have no original program cannot be dumped through this.
3403 fprog
= filter
->prog
->orig_prog
;
3409 /* User space only enquires number of filter blocks. */
3413 if (len
< fprog
->len
)
3417 if (copy_to_user(ubuf
, fprog
->filter
, bpf_classic_proglen(fprog
)))
3420 /* Instead of bytes, the API requests to return the number