2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/module.h>
25 #include <linux/types.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/sock_diag.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_packet.h>
34 #include <linux/if_arp.h>
35 #include <linux/gfp.h>
37 #include <net/protocol.h>
38 #include <net/netlink.h>
39 #include <linux/skbuff.h>
41 #include <net/flow_dissector.h>
42 #include <linux/errno.h>
43 #include <linux/timer.h>
44 #include <linux/uaccess.h>
45 #include <asm/unaligned.h>
46 #include <linux/filter.h>
47 #include <linux/ratelimit.h>
48 #include <linux/seccomp.h>
49 #include <linux/if_vlan.h>
50 #include <linux/bpf.h>
51 #include <net/sch_generic.h>
52 #include <net/cls_cgroup.h>
53 #include <net/dst_metadata.h>
55 #include <net/sock_reuseport.h>
56 #include <net/busy_poll.h>
60 * sk_filter_trim_cap - run a packet through a socket filter
61 * @sk: sock associated with &sk_buff
62 * @skb: buffer to filter
63 * @cap: limit on how short the eBPF program may trim the packet
65 * Run the eBPF program and then cut skb->data to correct size returned by
66 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
67 * than pkt_len we keep whole skb->data. This is the socket level
68 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
69 * be accepted or -EPERM if the packet should be tossed.
72 int sk_filter_trim_cap(struct sock
*sk
, struct sk_buff
*skb
, unsigned int cap
)
75 struct sk_filter
*filter
;
78 * If the skb was allocated from pfmemalloc reserves, only
79 * allow SOCK_MEMALLOC sockets to use it as this socket is
82 if (skb_pfmemalloc(skb
) && !sock_flag(sk
, SOCK_MEMALLOC
)) {
83 NET_INC_STATS(sock_net(sk
), LINUX_MIB_PFMEMALLOCDROP
);
86 err
= BPF_CGROUP_RUN_PROG_INET_INGRESS(sk
, skb
);
90 err
= security_sock_rcv_skb(sk
, skb
);
95 filter
= rcu_dereference(sk
->sk_filter
);
97 struct sock
*save_sk
= skb
->sk
;
101 pkt_len
= bpf_prog_run_save_cb(filter
->prog
, skb
);
103 err
= pkt_len
? pskb_trim(skb
, max(cap
, pkt_len
)) : -EPERM
;
109 EXPORT_SYMBOL(sk_filter_trim_cap
);
111 BPF_CALL_1(__skb_get_pay_offset
, struct sk_buff
*, skb
)
113 return skb_get_poff(skb
);
116 BPF_CALL_3(__skb_get_nlattr
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
120 if (skb_is_nonlinear(skb
))
123 if (skb
->len
< sizeof(struct nlattr
))
126 if (a
> skb
->len
- sizeof(struct nlattr
))
129 nla
= nla_find((struct nlattr
*) &skb
->data
[a
], skb
->len
- a
, x
);
131 return (void *) nla
- (void *) skb
->data
;
136 BPF_CALL_3(__skb_get_nlattr_nest
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
140 if (skb_is_nonlinear(skb
))
143 if (skb
->len
< sizeof(struct nlattr
))
146 if (a
> skb
->len
- sizeof(struct nlattr
))
149 nla
= (struct nlattr
*) &skb
->data
[a
];
150 if (nla
->nla_len
> skb
->len
- a
)
153 nla
= nla_find_nested(nla
, x
);
155 return (void *) nla
- (void *) skb
->data
;
160 BPF_CALL_0(__get_raw_cpu_id
)
162 return raw_smp_processor_id();
165 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto
= {
166 .func
= __get_raw_cpu_id
,
168 .ret_type
= RET_INTEGER
,
171 static u32
convert_skb_access(int skb_field
, int dst_reg
, int src_reg
,
172 struct bpf_insn
*insn_buf
)
174 struct bpf_insn
*insn
= insn_buf
;
178 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
180 *insn
++ = BPF_LDX_MEM(BPF_W
, dst_reg
, src_reg
,
181 offsetof(struct sk_buff
, mark
));
185 *insn
++ = BPF_LDX_MEM(BPF_B
, dst_reg
, src_reg
, PKT_TYPE_OFFSET());
186 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, PKT_TYPE_MAX
);
187 #ifdef __BIG_ENDIAN_BITFIELD
188 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 5);
193 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, queue_mapping
) != 2);
195 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
196 offsetof(struct sk_buff
, queue_mapping
));
199 case SKF_AD_VLAN_TAG
:
200 case SKF_AD_VLAN_TAG_PRESENT
:
201 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
202 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
204 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
205 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
206 offsetof(struct sk_buff
, vlan_tci
));
207 if (skb_field
== SKF_AD_VLAN_TAG
) {
208 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
,
212 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 12);
214 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, 1);
219 return insn
- insn_buf
;
222 static bool convert_bpf_extensions(struct sock_filter
*fp
,
223 struct bpf_insn
**insnp
)
225 struct bpf_insn
*insn
= *insnp
;
229 case SKF_AD_OFF
+ SKF_AD_PROTOCOL
:
230 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, protocol
) != 2);
232 /* A = *(u16 *) (CTX + offsetof(protocol)) */
233 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
234 offsetof(struct sk_buff
, protocol
));
235 /* A = ntohs(A) [emitting a nop or swap16] */
236 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
239 case SKF_AD_OFF
+ SKF_AD_PKTTYPE
:
240 cnt
= convert_skb_access(SKF_AD_PKTTYPE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
244 case SKF_AD_OFF
+ SKF_AD_IFINDEX
:
245 case SKF_AD_OFF
+ SKF_AD_HATYPE
:
246 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, ifindex
) != 4);
247 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, type
) != 2);
249 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
250 BPF_REG_TMP
, BPF_REG_CTX
,
251 offsetof(struct sk_buff
, dev
));
252 /* if (tmp != 0) goto pc + 1 */
253 *insn
++ = BPF_JMP_IMM(BPF_JNE
, BPF_REG_TMP
, 0, 1);
254 *insn
++ = BPF_EXIT_INSN();
255 if (fp
->k
== SKF_AD_OFF
+ SKF_AD_IFINDEX
)
256 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_TMP
,
257 offsetof(struct net_device
, ifindex
));
259 *insn
= BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_TMP
,
260 offsetof(struct net_device
, type
));
263 case SKF_AD_OFF
+ SKF_AD_MARK
:
264 cnt
= convert_skb_access(SKF_AD_MARK
, BPF_REG_A
, BPF_REG_CTX
, insn
);
268 case SKF_AD_OFF
+ SKF_AD_RXHASH
:
269 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
271 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
,
272 offsetof(struct sk_buff
, hash
));
275 case SKF_AD_OFF
+ SKF_AD_QUEUE
:
276 cnt
= convert_skb_access(SKF_AD_QUEUE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
280 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG
:
281 cnt
= convert_skb_access(SKF_AD_VLAN_TAG
,
282 BPF_REG_A
, BPF_REG_CTX
, insn
);
286 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG_PRESENT
:
287 cnt
= convert_skb_access(SKF_AD_VLAN_TAG_PRESENT
,
288 BPF_REG_A
, BPF_REG_CTX
, insn
);
292 case SKF_AD_OFF
+ SKF_AD_VLAN_TPID
:
293 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_proto
) != 2);
295 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
296 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
297 offsetof(struct sk_buff
, vlan_proto
));
298 /* A = ntohs(A) [emitting a nop or swap16] */
299 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
302 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
303 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
304 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
305 case SKF_AD_OFF
+ SKF_AD_CPU
:
306 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
308 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG1
, BPF_REG_CTX
);
310 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_A
);
312 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG3
, BPF_REG_X
);
313 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
315 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
316 *insn
= BPF_EMIT_CALL(__skb_get_pay_offset
);
318 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
319 *insn
= BPF_EMIT_CALL(__skb_get_nlattr
);
321 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
322 *insn
= BPF_EMIT_CALL(__skb_get_nlattr_nest
);
324 case SKF_AD_OFF
+ SKF_AD_CPU
:
325 *insn
= BPF_EMIT_CALL(__get_raw_cpu_id
);
327 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
328 *insn
= BPF_EMIT_CALL(bpf_user_rnd_u32
);
329 bpf_user_rnd_init_once();
334 case SKF_AD_OFF
+ SKF_AD_ALU_XOR_X
:
336 *insn
= BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_X
);
340 /* This is just a dummy call to avoid letting the compiler
341 * evict __bpf_call_base() as an optimization. Placed here
342 * where no-one bothers.
344 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
353 * bpf_convert_filter - convert filter program
354 * @prog: the user passed filter program
355 * @len: the length of the user passed filter program
356 * @new_prog: allocated 'struct bpf_prog' or NULL
357 * @new_len: pointer to store length of converted program
359 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
360 * style extended BPF (eBPF).
361 * Conversion workflow:
363 * 1) First pass for calculating the new program length:
364 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
366 * 2) 2nd pass to remap in two passes: 1st pass finds new
367 * jump offsets, 2nd pass remapping:
368 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
370 static int bpf_convert_filter(struct sock_filter
*prog
, int len
,
371 struct bpf_prog
*new_prog
, int *new_len
)
373 int new_flen
= 0, pass
= 0, target
, i
, stack_off
;
374 struct bpf_insn
*new_insn
, *first_insn
= NULL
;
375 struct sock_filter
*fp
;
379 BUILD_BUG_ON(BPF_MEMWORDS
* sizeof(u32
) > MAX_BPF_STACK
);
380 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
382 if (len
<= 0 || len
> BPF_MAXINSNS
)
386 first_insn
= new_prog
->insnsi
;
387 addrs
= kcalloc(len
, sizeof(*addrs
),
388 GFP_KERNEL
| __GFP_NOWARN
);
394 new_insn
= first_insn
;
397 /* Classic BPF related prologue emission. */
399 /* Classic BPF expects A and X to be reset first. These need
400 * to be guaranteed to be the first two instructions.
402 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_A
);
403 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_X
, BPF_REG_X
);
405 /* All programs must keep CTX in callee saved BPF_REG_CTX.
406 * In eBPF case it's done by the compiler, here we need to
407 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
409 *new_insn
++ = BPF_MOV64_REG(BPF_REG_CTX
, BPF_REG_ARG1
);
414 for (i
= 0; i
< len
; fp
++, i
++) {
415 struct bpf_insn tmp_insns
[6] = { };
416 struct bpf_insn
*insn
= tmp_insns
;
419 addrs
[i
] = new_insn
- first_insn
;
422 /* All arithmetic insns and skb loads map as-is. */
423 case BPF_ALU
| BPF_ADD
| BPF_X
:
424 case BPF_ALU
| BPF_ADD
| BPF_K
:
425 case BPF_ALU
| BPF_SUB
| BPF_X
:
426 case BPF_ALU
| BPF_SUB
| BPF_K
:
427 case BPF_ALU
| BPF_AND
| BPF_X
:
428 case BPF_ALU
| BPF_AND
| BPF_K
:
429 case BPF_ALU
| BPF_OR
| BPF_X
:
430 case BPF_ALU
| BPF_OR
| BPF_K
:
431 case BPF_ALU
| BPF_LSH
| BPF_X
:
432 case BPF_ALU
| BPF_LSH
| BPF_K
:
433 case BPF_ALU
| BPF_RSH
| BPF_X
:
434 case BPF_ALU
| BPF_RSH
| BPF_K
:
435 case BPF_ALU
| BPF_XOR
| BPF_X
:
436 case BPF_ALU
| BPF_XOR
| BPF_K
:
437 case BPF_ALU
| BPF_MUL
| BPF_X
:
438 case BPF_ALU
| BPF_MUL
| BPF_K
:
439 case BPF_ALU
| BPF_DIV
| BPF_X
:
440 case BPF_ALU
| BPF_DIV
| BPF_K
:
441 case BPF_ALU
| BPF_MOD
| BPF_X
:
442 case BPF_ALU
| BPF_MOD
| BPF_K
:
443 case BPF_ALU
| BPF_NEG
:
444 case BPF_LD
| BPF_ABS
| BPF_W
:
445 case BPF_LD
| BPF_ABS
| BPF_H
:
446 case BPF_LD
| BPF_ABS
| BPF_B
:
447 case BPF_LD
| BPF_IND
| BPF_W
:
448 case BPF_LD
| BPF_IND
| BPF_H
:
449 case BPF_LD
| BPF_IND
| BPF_B
:
450 /* Check for overloaded BPF extension and
451 * directly convert it if found, otherwise
452 * just move on with mapping.
454 if (BPF_CLASS(fp
->code
) == BPF_LD
&&
455 BPF_MODE(fp
->code
) == BPF_ABS
&&
456 convert_bpf_extensions(fp
, &insn
))
459 *insn
= BPF_RAW_INSN(fp
->code
, BPF_REG_A
, BPF_REG_X
, 0, fp
->k
);
462 /* Jump transformation cannot use BPF block macros
463 * everywhere as offset calculation and target updates
464 * require a bit more work than the rest, i.e. jump
465 * opcodes map as-is, but offsets need adjustment.
468 #define BPF_EMIT_JMP \
470 if (target >= len || target < 0) \
472 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
473 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
474 insn->off -= insn - tmp_insns; \
477 case BPF_JMP
| BPF_JA
:
478 target
= i
+ fp
->k
+ 1;
479 insn
->code
= fp
->code
;
483 case BPF_JMP
| BPF_JEQ
| BPF_K
:
484 case BPF_JMP
| BPF_JEQ
| BPF_X
:
485 case BPF_JMP
| BPF_JSET
| BPF_K
:
486 case BPF_JMP
| BPF_JSET
| BPF_X
:
487 case BPF_JMP
| BPF_JGT
| BPF_K
:
488 case BPF_JMP
| BPF_JGT
| BPF_X
:
489 case BPF_JMP
| BPF_JGE
| BPF_K
:
490 case BPF_JMP
| BPF_JGE
| BPF_X
:
491 if (BPF_SRC(fp
->code
) == BPF_K
&& (int) fp
->k
< 0) {
492 /* BPF immediates are signed, zero extend
493 * immediate into tmp register and use it
496 *insn
++ = BPF_MOV32_IMM(BPF_REG_TMP
, fp
->k
);
498 insn
->dst_reg
= BPF_REG_A
;
499 insn
->src_reg
= BPF_REG_TMP
;
502 insn
->dst_reg
= BPF_REG_A
;
504 bpf_src
= BPF_SRC(fp
->code
);
505 insn
->src_reg
= bpf_src
== BPF_X
? BPF_REG_X
: 0;
508 /* Common case where 'jump_false' is next insn. */
510 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
511 target
= i
+ fp
->jt
+ 1;
516 /* Convert JEQ into JNE when 'jump_true' is next insn. */
517 if (fp
->jt
== 0 && BPF_OP(fp
->code
) == BPF_JEQ
) {
518 insn
->code
= BPF_JMP
| BPF_JNE
| bpf_src
;
519 target
= i
+ fp
->jf
+ 1;
524 /* Other jumps are mapped into two insns: Jxx and JA. */
525 target
= i
+ fp
->jt
+ 1;
526 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
530 insn
->code
= BPF_JMP
| BPF_JA
;
531 target
= i
+ fp
->jf
+ 1;
535 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
536 case BPF_LDX
| BPF_MSH
| BPF_B
:
538 *insn
++ = BPF_MOV64_REG(BPF_REG_TMP
, BPF_REG_A
);
539 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
540 *insn
++ = BPF_LD_ABS(BPF_B
, fp
->k
);
542 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_A
, 0xf);
544 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, BPF_REG_A
, 2);
546 *insn
++ = BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
548 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_TMP
);
551 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
552 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
554 case BPF_RET
| BPF_A
:
555 case BPF_RET
| BPF_K
:
556 if (BPF_RVAL(fp
->code
) == BPF_K
)
557 *insn
++ = BPF_MOV32_RAW(BPF_K
, BPF_REG_0
,
559 *insn
= BPF_EXIT_INSN();
562 /* Store to stack. */
565 stack_off
= fp
->k
* 4 + 4;
566 *insn
= BPF_STX_MEM(BPF_W
, BPF_REG_FP
, BPF_CLASS(fp
->code
) ==
567 BPF_ST
? BPF_REG_A
: BPF_REG_X
,
569 /* check_load_and_stores() verifies that classic BPF can
570 * load from stack only after write, so tracking
571 * stack_depth for ST|STX insns is enough
573 if (new_prog
&& new_prog
->aux
->stack_depth
< stack_off
)
574 new_prog
->aux
->stack_depth
= stack_off
;
577 /* Load from stack. */
578 case BPF_LD
| BPF_MEM
:
579 case BPF_LDX
| BPF_MEM
:
580 stack_off
= fp
->k
* 4 + 4;
581 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
582 BPF_REG_A
: BPF_REG_X
, BPF_REG_FP
,
587 case BPF_LD
| BPF_IMM
:
588 case BPF_LDX
| BPF_IMM
:
589 *insn
= BPF_MOV32_IMM(BPF_CLASS(fp
->code
) == BPF_LD
?
590 BPF_REG_A
: BPF_REG_X
, fp
->k
);
594 case BPF_MISC
| BPF_TAX
:
595 *insn
= BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
599 case BPF_MISC
| BPF_TXA
:
600 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_X
);
603 /* A = skb->len or X = skb->len */
604 case BPF_LD
| BPF_W
| BPF_LEN
:
605 case BPF_LDX
| BPF_W
| BPF_LEN
:
606 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
607 BPF_REG_A
: BPF_REG_X
, BPF_REG_CTX
,
608 offsetof(struct sk_buff
, len
));
611 /* Access seccomp_data fields. */
612 case BPF_LDX
| BPF_ABS
| BPF_W
:
613 /* A = *(u32 *) (ctx + K) */
614 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
, fp
->k
);
617 /* Unknown instruction. */
624 memcpy(new_insn
, tmp_insns
,
625 sizeof(*insn
) * (insn
- tmp_insns
));
626 new_insn
+= insn
- tmp_insns
;
630 /* Only calculating new length. */
631 *new_len
= new_insn
- first_insn
;
636 if (new_flen
!= new_insn
- first_insn
) {
637 new_flen
= new_insn
- first_insn
;
644 BUG_ON(*new_len
!= new_flen
);
653 * As we dont want to clear mem[] array for each packet going through
654 * __bpf_prog_run(), we check that filter loaded by user never try to read
655 * a cell if not previously written, and we check all branches to be sure
656 * a malicious user doesn't try to abuse us.
658 static int check_load_and_stores(const struct sock_filter
*filter
, int flen
)
660 u16
*masks
, memvalid
= 0; /* One bit per cell, 16 cells */
663 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
665 masks
= kmalloc_array(flen
, sizeof(*masks
), GFP_KERNEL
);
669 memset(masks
, 0xff, flen
* sizeof(*masks
));
671 for (pc
= 0; pc
< flen
; pc
++) {
672 memvalid
&= masks
[pc
];
674 switch (filter
[pc
].code
) {
677 memvalid
|= (1 << filter
[pc
].k
);
679 case BPF_LD
| BPF_MEM
:
680 case BPF_LDX
| BPF_MEM
:
681 if (!(memvalid
& (1 << filter
[pc
].k
))) {
686 case BPF_JMP
| BPF_JA
:
687 /* A jump must set masks on target */
688 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
691 case BPF_JMP
| BPF_JEQ
| BPF_K
:
692 case BPF_JMP
| BPF_JEQ
| BPF_X
:
693 case BPF_JMP
| BPF_JGE
| BPF_K
:
694 case BPF_JMP
| BPF_JGE
| BPF_X
:
695 case BPF_JMP
| BPF_JGT
| BPF_K
:
696 case BPF_JMP
| BPF_JGT
| BPF_X
:
697 case BPF_JMP
| BPF_JSET
| BPF_K
:
698 case BPF_JMP
| BPF_JSET
| BPF_X
:
699 /* A jump must set masks on targets */
700 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
701 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
711 static bool chk_code_allowed(u16 code_to_probe
)
713 static const bool codes
[] = {
714 /* 32 bit ALU operations */
715 [BPF_ALU
| BPF_ADD
| BPF_K
] = true,
716 [BPF_ALU
| BPF_ADD
| BPF_X
] = true,
717 [BPF_ALU
| BPF_SUB
| BPF_K
] = true,
718 [BPF_ALU
| BPF_SUB
| BPF_X
] = true,
719 [BPF_ALU
| BPF_MUL
| BPF_K
] = true,
720 [BPF_ALU
| BPF_MUL
| BPF_X
] = true,
721 [BPF_ALU
| BPF_DIV
| BPF_K
] = true,
722 [BPF_ALU
| BPF_DIV
| BPF_X
] = true,
723 [BPF_ALU
| BPF_MOD
| BPF_K
] = true,
724 [BPF_ALU
| BPF_MOD
| BPF_X
] = true,
725 [BPF_ALU
| BPF_AND
| BPF_K
] = true,
726 [BPF_ALU
| BPF_AND
| BPF_X
] = true,
727 [BPF_ALU
| BPF_OR
| BPF_K
] = true,
728 [BPF_ALU
| BPF_OR
| BPF_X
] = true,
729 [BPF_ALU
| BPF_XOR
| BPF_K
] = true,
730 [BPF_ALU
| BPF_XOR
| BPF_X
] = true,
731 [BPF_ALU
| BPF_LSH
| BPF_K
] = true,
732 [BPF_ALU
| BPF_LSH
| BPF_X
] = true,
733 [BPF_ALU
| BPF_RSH
| BPF_K
] = true,
734 [BPF_ALU
| BPF_RSH
| BPF_X
] = true,
735 [BPF_ALU
| BPF_NEG
] = true,
736 /* Load instructions */
737 [BPF_LD
| BPF_W
| BPF_ABS
] = true,
738 [BPF_LD
| BPF_H
| BPF_ABS
] = true,
739 [BPF_LD
| BPF_B
| BPF_ABS
] = true,
740 [BPF_LD
| BPF_W
| BPF_LEN
] = true,
741 [BPF_LD
| BPF_W
| BPF_IND
] = true,
742 [BPF_LD
| BPF_H
| BPF_IND
] = true,
743 [BPF_LD
| BPF_B
| BPF_IND
] = true,
744 [BPF_LD
| BPF_IMM
] = true,
745 [BPF_LD
| BPF_MEM
] = true,
746 [BPF_LDX
| BPF_W
| BPF_LEN
] = true,
747 [BPF_LDX
| BPF_B
| BPF_MSH
] = true,
748 [BPF_LDX
| BPF_IMM
] = true,
749 [BPF_LDX
| BPF_MEM
] = true,
750 /* Store instructions */
753 /* Misc instructions */
754 [BPF_MISC
| BPF_TAX
] = true,
755 [BPF_MISC
| BPF_TXA
] = true,
756 /* Return instructions */
757 [BPF_RET
| BPF_K
] = true,
758 [BPF_RET
| BPF_A
] = true,
759 /* Jump instructions */
760 [BPF_JMP
| BPF_JA
] = true,
761 [BPF_JMP
| BPF_JEQ
| BPF_K
] = true,
762 [BPF_JMP
| BPF_JEQ
| BPF_X
] = true,
763 [BPF_JMP
| BPF_JGE
| BPF_K
] = true,
764 [BPF_JMP
| BPF_JGE
| BPF_X
] = true,
765 [BPF_JMP
| BPF_JGT
| BPF_K
] = true,
766 [BPF_JMP
| BPF_JGT
| BPF_X
] = true,
767 [BPF_JMP
| BPF_JSET
| BPF_K
] = true,
768 [BPF_JMP
| BPF_JSET
| BPF_X
] = true,
771 if (code_to_probe
>= ARRAY_SIZE(codes
))
774 return codes
[code_to_probe
];
777 static bool bpf_check_basics_ok(const struct sock_filter
*filter
,
782 if (flen
== 0 || flen
> BPF_MAXINSNS
)
789 * bpf_check_classic - verify socket filter code
790 * @filter: filter to verify
791 * @flen: length of filter
793 * Check the user's filter code. If we let some ugly
794 * filter code slip through kaboom! The filter must contain
795 * no references or jumps that are out of range, no illegal
796 * instructions, and must end with a RET instruction.
798 * All jumps are forward as they are not signed.
800 * Returns 0 if the rule set is legal or -EINVAL if not.
802 static int bpf_check_classic(const struct sock_filter
*filter
,
808 /* Check the filter code now */
809 for (pc
= 0; pc
< flen
; pc
++) {
810 const struct sock_filter
*ftest
= &filter
[pc
];
812 /* May we actually operate on this code? */
813 if (!chk_code_allowed(ftest
->code
))
816 /* Some instructions need special checks */
817 switch (ftest
->code
) {
818 case BPF_ALU
| BPF_DIV
| BPF_K
:
819 case BPF_ALU
| BPF_MOD
| BPF_K
:
820 /* Check for division by zero */
824 case BPF_ALU
| BPF_LSH
| BPF_K
:
825 case BPF_ALU
| BPF_RSH
| BPF_K
:
829 case BPF_LD
| BPF_MEM
:
830 case BPF_LDX
| BPF_MEM
:
833 /* Check for invalid memory addresses */
834 if (ftest
->k
>= BPF_MEMWORDS
)
837 case BPF_JMP
| BPF_JA
:
838 /* Note, the large ftest->k might cause loops.
839 * Compare this with conditional jumps below,
840 * where offsets are limited. --ANK (981016)
842 if (ftest
->k
>= (unsigned int)(flen
- pc
- 1))
845 case BPF_JMP
| BPF_JEQ
| BPF_K
:
846 case BPF_JMP
| BPF_JEQ
| BPF_X
:
847 case BPF_JMP
| BPF_JGE
| BPF_K
:
848 case BPF_JMP
| BPF_JGE
| BPF_X
:
849 case BPF_JMP
| BPF_JGT
| BPF_K
:
850 case BPF_JMP
| BPF_JGT
| BPF_X
:
851 case BPF_JMP
| BPF_JSET
| BPF_K
:
852 case BPF_JMP
| BPF_JSET
| BPF_X
:
853 /* Both conditionals must be safe */
854 if (pc
+ ftest
->jt
+ 1 >= flen
||
855 pc
+ ftest
->jf
+ 1 >= flen
)
858 case BPF_LD
| BPF_W
| BPF_ABS
:
859 case BPF_LD
| BPF_H
| BPF_ABS
:
860 case BPF_LD
| BPF_B
| BPF_ABS
:
862 if (bpf_anc_helper(ftest
) & BPF_ANC
)
864 /* Ancillary operation unknown or unsupported */
865 if (anc_found
== false && ftest
->k
>= SKF_AD_OFF
)
870 /* Last instruction must be a RET code */
871 switch (filter
[flen
- 1].code
) {
872 case BPF_RET
| BPF_K
:
873 case BPF_RET
| BPF_A
:
874 return check_load_and_stores(filter
, flen
);
880 static int bpf_prog_store_orig_filter(struct bpf_prog
*fp
,
881 const struct sock_fprog
*fprog
)
883 unsigned int fsize
= bpf_classic_proglen(fprog
);
884 struct sock_fprog_kern
*fkprog
;
886 fp
->orig_prog
= kmalloc(sizeof(*fkprog
), GFP_KERNEL
);
890 fkprog
= fp
->orig_prog
;
891 fkprog
->len
= fprog
->len
;
893 fkprog
->filter
= kmemdup(fp
->insns
, fsize
,
894 GFP_KERNEL
| __GFP_NOWARN
);
895 if (!fkprog
->filter
) {
896 kfree(fp
->orig_prog
);
903 static void bpf_release_orig_filter(struct bpf_prog
*fp
)
905 struct sock_fprog_kern
*fprog
= fp
->orig_prog
;
908 kfree(fprog
->filter
);
913 static void __bpf_prog_release(struct bpf_prog
*prog
)
915 if (prog
->type
== BPF_PROG_TYPE_SOCKET_FILTER
) {
918 bpf_release_orig_filter(prog
);
923 static void __sk_filter_release(struct sk_filter
*fp
)
925 __bpf_prog_release(fp
->prog
);
930 * sk_filter_release_rcu - Release a socket filter by rcu_head
931 * @rcu: rcu_head that contains the sk_filter to free
933 static void sk_filter_release_rcu(struct rcu_head
*rcu
)
935 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
937 __sk_filter_release(fp
);
941 * sk_filter_release - release a socket filter
942 * @fp: filter to remove
944 * Remove a filter from a socket and release its resources.
946 static void sk_filter_release(struct sk_filter
*fp
)
948 if (refcount_dec_and_test(&fp
->refcnt
))
949 call_rcu(&fp
->rcu
, sk_filter_release_rcu
);
952 void sk_filter_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
954 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
956 atomic_sub(filter_size
, &sk
->sk_omem_alloc
);
957 sk_filter_release(fp
);
960 /* try to charge the socket memory if there is space available
961 * return true on success
963 static bool __sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
965 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
967 /* same check as in sock_kmalloc() */
968 if (filter_size
<= sysctl_optmem_max
&&
969 atomic_read(&sk
->sk_omem_alloc
) + filter_size
< sysctl_optmem_max
) {
970 atomic_add(filter_size
, &sk
->sk_omem_alloc
);
976 bool sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
978 bool ret
= __sk_filter_charge(sk
, fp
);
980 refcount_inc(&fp
->refcnt
);
984 static struct bpf_prog
*bpf_migrate_filter(struct bpf_prog
*fp
)
986 struct sock_filter
*old_prog
;
987 struct bpf_prog
*old_fp
;
988 int err
, new_len
, old_len
= fp
->len
;
990 /* We are free to overwrite insns et al right here as it
991 * won't be used at this point in time anymore internally
992 * after the migration to the internal BPF instruction
995 BUILD_BUG_ON(sizeof(struct sock_filter
) !=
996 sizeof(struct bpf_insn
));
998 /* Conversion cannot happen on overlapping memory areas,
999 * so we need to keep the user BPF around until the 2nd
1000 * pass. At this time, the user BPF is stored in fp->insns.
1002 old_prog
= kmemdup(fp
->insns
, old_len
* sizeof(struct sock_filter
),
1003 GFP_KERNEL
| __GFP_NOWARN
);
1009 /* 1st pass: calculate the new program length. */
1010 err
= bpf_convert_filter(old_prog
, old_len
, NULL
, &new_len
);
1014 /* Expand fp for appending the new filter representation. */
1016 fp
= bpf_prog_realloc(old_fp
, bpf_prog_size(new_len
), 0);
1018 /* The old_fp is still around in case we couldn't
1019 * allocate new memory, so uncharge on that one.
1028 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1029 err
= bpf_convert_filter(old_prog
, old_len
, fp
, &new_len
);
1031 /* 2nd bpf_convert_filter() can fail only if it fails
1032 * to allocate memory, remapping must succeed. Note,
1033 * that at this time old_fp has already been released
1038 /* We are guaranteed to never error here with cBPF to eBPF
1039 * transitions, since there's no issue with type compatibility
1040 * checks on program arrays.
1042 fp
= bpf_prog_select_runtime(fp
, &err
);
1050 __bpf_prog_release(fp
);
1051 return ERR_PTR(err
);
1054 static struct bpf_prog
*bpf_prepare_filter(struct bpf_prog
*fp
,
1055 bpf_aux_classic_check_t trans
)
1059 fp
->bpf_func
= NULL
;
1062 err
= bpf_check_classic(fp
->insns
, fp
->len
);
1064 __bpf_prog_release(fp
);
1065 return ERR_PTR(err
);
1068 /* There might be additional checks and transformations
1069 * needed on classic filters, f.e. in case of seccomp.
1072 err
= trans(fp
->insns
, fp
->len
);
1074 __bpf_prog_release(fp
);
1075 return ERR_PTR(err
);
1079 /* Probe if we can JIT compile the filter and if so, do
1080 * the compilation of the filter.
1082 bpf_jit_compile(fp
);
1084 /* JIT compiler couldn't process this filter, so do the
1085 * internal BPF translation for the optimized interpreter.
1088 fp
= bpf_migrate_filter(fp
);
1094 * bpf_prog_create - create an unattached filter
1095 * @pfp: the unattached filter that is created
1096 * @fprog: the filter program
1098 * Create a filter independent of any socket. We first run some
1099 * sanity checks on it to make sure it does not explode on us later.
1100 * If an error occurs or there is insufficient memory for the filter
1101 * a negative errno code is returned. On success the return is zero.
1103 int bpf_prog_create(struct bpf_prog
**pfp
, struct sock_fprog_kern
*fprog
)
1105 unsigned int fsize
= bpf_classic_proglen(fprog
);
1106 struct bpf_prog
*fp
;
1108 /* Make sure new filter is there and in the right amounts. */
1109 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1112 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1116 memcpy(fp
->insns
, fprog
->filter
, fsize
);
1118 fp
->len
= fprog
->len
;
1119 /* Since unattached filters are not copied back to user
1120 * space through sk_get_filter(), we do not need to hold
1121 * a copy here, and can spare us the work.
1123 fp
->orig_prog
= NULL
;
1125 /* bpf_prepare_filter() already takes care of freeing
1126 * memory in case something goes wrong.
1128 fp
= bpf_prepare_filter(fp
, NULL
);
1135 EXPORT_SYMBOL_GPL(bpf_prog_create
);
1138 * bpf_prog_create_from_user - create an unattached filter from user buffer
1139 * @pfp: the unattached filter that is created
1140 * @fprog: the filter program
1141 * @trans: post-classic verifier transformation handler
1142 * @save_orig: save classic BPF program
1144 * This function effectively does the same as bpf_prog_create(), only
1145 * that it builds up its insns buffer from user space provided buffer.
1146 * It also allows for passing a bpf_aux_classic_check_t handler.
1148 int bpf_prog_create_from_user(struct bpf_prog
**pfp
, struct sock_fprog
*fprog
,
1149 bpf_aux_classic_check_t trans
, bool save_orig
)
1151 unsigned int fsize
= bpf_classic_proglen(fprog
);
1152 struct bpf_prog
*fp
;
1155 /* Make sure new filter is there and in the right amounts. */
1156 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1159 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1163 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
1164 __bpf_prog_free(fp
);
1168 fp
->len
= fprog
->len
;
1169 fp
->orig_prog
= NULL
;
1172 err
= bpf_prog_store_orig_filter(fp
, fprog
);
1174 __bpf_prog_free(fp
);
1179 /* bpf_prepare_filter() already takes care of freeing
1180 * memory in case something goes wrong.
1182 fp
= bpf_prepare_filter(fp
, trans
);
1189 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user
);
1191 void bpf_prog_destroy(struct bpf_prog
*fp
)
1193 __bpf_prog_release(fp
);
1195 EXPORT_SYMBOL_GPL(bpf_prog_destroy
);
1197 static int __sk_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1199 struct sk_filter
*fp
, *old_fp
;
1201 fp
= kmalloc(sizeof(*fp
), GFP_KERNEL
);
1207 if (!__sk_filter_charge(sk
, fp
)) {
1211 refcount_set(&fp
->refcnt
, 1);
1213 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
1214 lockdep_sock_is_held(sk
));
1215 rcu_assign_pointer(sk
->sk_filter
, fp
);
1218 sk_filter_uncharge(sk
, old_fp
);
1223 static int __reuseport_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1225 struct bpf_prog
*old_prog
;
1228 if (bpf_prog_size(prog
->len
) > sysctl_optmem_max
)
1231 if (sk_unhashed(sk
) && sk
->sk_reuseport
) {
1232 err
= reuseport_alloc(sk
);
1235 } else if (!rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1236 /* The socket wasn't bound with SO_REUSEPORT */
1240 old_prog
= reuseport_attach_prog(sk
, prog
);
1242 bpf_prog_destroy(old_prog
);
1248 struct bpf_prog
*__get_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1250 unsigned int fsize
= bpf_classic_proglen(fprog
);
1251 struct bpf_prog
*prog
;
1254 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1255 return ERR_PTR(-EPERM
);
1257 /* Make sure new filter is there and in the right amounts. */
1258 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1259 return ERR_PTR(-EINVAL
);
1261 prog
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1263 return ERR_PTR(-ENOMEM
);
1265 if (copy_from_user(prog
->insns
, fprog
->filter
, fsize
)) {
1266 __bpf_prog_free(prog
);
1267 return ERR_PTR(-EFAULT
);
1270 prog
->len
= fprog
->len
;
1272 err
= bpf_prog_store_orig_filter(prog
, fprog
);
1274 __bpf_prog_free(prog
);
1275 return ERR_PTR(-ENOMEM
);
1278 /* bpf_prepare_filter() already takes care of freeing
1279 * memory in case something goes wrong.
1281 return bpf_prepare_filter(prog
, NULL
);
1285 * sk_attach_filter - attach a socket filter
1286 * @fprog: the filter program
1287 * @sk: the socket to use
1289 * Attach the user's filter code. We first run some sanity checks on
1290 * it to make sure it does not explode on us later. If an error
1291 * occurs or there is insufficient memory for the filter a negative
1292 * errno code is returned. On success the return is zero.
1294 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1296 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1300 return PTR_ERR(prog
);
1302 err
= __sk_attach_prog(prog
, sk
);
1304 __bpf_prog_release(prog
);
1310 EXPORT_SYMBOL_GPL(sk_attach_filter
);
1312 int sk_reuseport_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1314 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1318 return PTR_ERR(prog
);
1320 err
= __reuseport_attach_prog(prog
, sk
);
1322 __bpf_prog_release(prog
);
1329 static struct bpf_prog
*__get_bpf(u32 ufd
, struct sock
*sk
)
1331 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1332 return ERR_PTR(-EPERM
);
1334 return bpf_prog_get_type(ufd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1337 int sk_attach_bpf(u32 ufd
, struct sock
*sk
)
1339 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1343 return PTR_ERR(prog
);
1345 err
= __sk_attach_prog(prog
, sk
);
1354 int sk_reuseport_attach_bpf(u32 ufd
, struct sock
*sk
)
1356 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1360 return PTR_ERR(prog
);
1362 err
= __reuseport_attach_prog(prog
, sk
);
1371 struct bpf_scratchpad
{
1373 __be32 diff
[MAX_BPF_STACK
/ sizeof(__be32
)];
1374 u8 buff
[MAX_BPF_STACK
];
1378 static DEFINE_PER_CPU(struct bpf_scratchpad
, bpf_sp
);
1380 static inline int __bpf_try_make_writable(struct sk_buff
*skb
,
1381 unsigned int write_len
)
1383 return skb_ensure_writable(skb
, write_len
);
1386 static inline int bpf_try_make_writable(struct sk_buff
*skb
,
1387 unsigned int write_len
)
1389 int err
= __bpf_try_make_writable(skb
, write_len
);
1391 bpf_compute_data_end(skb
);
1395 static int bpf_try_make_head_writable(struct sk_buff
*skb
)
1397 return bpf_try_make_writable(skb
, skb_headlen(skb
));
1400 static inline void bpf_push_mac_rcsum(struct sk_buff
*skb
)
1402 if (skb_at_tc_ingress(skb
))
1403 skb_postpush_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1406 static inline void bpf_pull_mac_rcsum(struct sk_buff
*skb
)
1408 if (skb_at_tc_ingress(skb
))
1409 skb_postpull_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1412 BPF_CALL_5(bpf_skb_store_bytes
, struct sk_buff
*, skb
, u32
, offset
,
1413 const void *, from
, u32
, len
, u64
, flags
)
1417 if (unlikely(flags
& ~(BPF_F_RECOMPUTE_CSUM
| BPF_F_INVALIDATE_HASH
)))
1419 if (unlikely(offset
> 0xffff))
1421 if (unlikely(bpf_try_make_writable(skb
, offset
+ len
)))
1424 ptr
= skb
->data
+ offset
;
1425 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1426 __skb_postpull_rcsum(skb
, ptr
, len
, offset
);
1428 memcpy(ptr
, from
, len
);
1430 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1431 __skb_postpush_rcsum(skb
, ptr
, len
, offset
);
1432 if (flags
& BPF_F_INVALIDATE_HASH
)
1433 skb_clear_hash(skb
);
1438 static const struct bpf_func_proto bpf_skb_store_bytes_proto
= {
1439 .func
= bpf_skb_store_bytes
,
1441 .ret_type
= RET_INTEGER
,
1442 .arg1_type
= ARG_PTR_TO_CTX
,
1443 .arg2_type
= ARG_ANYTHING
,
1444 .arg3_type
= ARG_PTR_TO_MEM
,
1445 .arg4_type
= ARG_CONST_SIZE
,
1446 .arg5_type
= ARG_ANYTHING
,
1449 BPF_CALL_4(bpf_skb_load_bytes
, const struct sk_buff
*, skb
, u32
, offset
,
1450 void *, to
, u32
, len
)
1454 if (unlikely(offset
> 0xffff))
1457 ptr
= skb_header_pointer(skb
, offset
, len
, to
);
1461 memcpy(to
, ptr
, len
);
1469 static const struct bpf_func_proto bpf_skb_load_bytes_proto
= {
1470 .func
= bpf_skb_load_bytes
,
1472 .ret_type
= RET_INTEGER
,
1473 .arg1_type
= ARG_PTR_TO_CTX
,
1474 .arg2_type
= ARG_ANYTHING
,
1475 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
1476 .arg4_type
= ARG_CONST_SIZE
,
1479 BPF_CALL_2(bpf_skb_pull_data
, struct sk_buff
*, skb
, u32
, len
)
1481 /* Idea is the following: should the needed direct read/write
1482 * test fail during runtime, we can pull in more data and redo
1483 * again, since implicitly, we invalidate previous checks here.
1485 * Or, since we know how much we need to make read/writeable,
1486 * this can be done once at the program beginning for direct
1487 * access case. By this we overcome limitations of only current
1488 * headroom being accessible.
1490 return bpf_try_make_writable(skb
, len
? : skb_headlen(skb
));
1493 static const struct bpf_func_proto bpf_skb_pull_data_proto
= {
1494 .func
= bpf_skb_pull_data
,
1496 .ret_type
= RET_INTEGER
,
1497 .arg1_type
= ARG_PTR_TO_CTX
,
1498 .arg2_type
= ARG_ANYTHING
,
1501 BPF_CALL_5(bpf_l3_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1502 u64
, from
, u64
, to
, u64
, flags
)
1506 if (unlikely(flags
& ~(BPF_F_HDR_FIELD_MASK
)))
1508 if (unlikely(offset
> 0xffff || offset
& 1))
1510 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1513 ptr
= (__sum16
*)(skb
->data
+ offset
);
1514 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1516 if (unlikely(from
!= 0))
1519 csum_replace_by_diff(ptr
, to
);
1522 csum_replace2(ptr
, from
, to
);
1525 csum_replace4(ptr
, from
, to
);
1534 static const struct bpf_func_proto bpf_l3_csum_replace_proto
= {
1535 .func
= bpf_l3_csum_replace
,
1537 .ret_type
= RET_INTEGER
,
1538 .arg1_type
= ARG_PTR_TO_CTX
,
1539 .arg2_type
= ARG_ANYTHING
,
1540 .arg3_type
= ARG_ANYTHING
,
1541 .arg4_type
= ARG_ANYTHING
,
1542 .arg5_type
= ARG_ANYTHING
,
1545 BPF_CALL_5(bpf_l4_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1546 u64
, from
, u64
, to
, u64
, flags
)
1548 bool is_pseudo
= flags
& BPF_F_PSEUDO_HDR
;
1549 bool is_mmzero
= flags
& BPF_F_MARK_MANGLED_0
;
1550 bool do_mforce
= flags
& BPF_F_MARK_ENFORCE
;
1553 if (unlikely(flags
& ~(BPF_F_MARK_MANGLED_0
| BPF_F_MARK_ENFORCE
|
1554 BPF_F_PSEUDO_HDR
| BPF_F_HDR_FIELD_MASK
)))
1556 if (unlikely(offset
> 0xffff || offset
& 1))
1558 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1561 ptr
= (__sum16
*)(skb
->data
+ offset
);
1562 if (is_mmzero
&& !do_mforce
&& !*ptr
)
1565 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1567 if (unlikely(from
!= 0))
1570 inet_proto_csum_replace_by_diff(ptr
, skb
, to
, is_pseudo
);
1573 inet_proto_csum_replace2(ptr
, skb
, from
, to
, is_pseudo
);
1576 inet_proto_csum_replace4(ptr
, skb
, from
, to
, is_pseudo
);
1582 if (is_mmzero
&& !*ptr
)
1583 *ptr
= CSUM_MANGLED_0
;
1587 static const struct bpf_func_proto bpf_l4_csum_replace_proto
= {
1588 .func
= bpf_l4_csum_replace
,
1590 .ret_type
= RET_INTEGER
,
1591 .arg1_type
= ARG_PTR_TO_CTX
,
1592 .arg2_type
= ARG_ANYTHING
,
1593 .arg3_type
= ARG_ANYTHING
,
1594 .arg4_type
= ARG_ANYTHING
,
1595 .arg5_type
= ARG_ANYTHING
,
1598 BPF_CALL_5(bpf_csum_diff
, __be32
*, from
, u32
, from_size
,
1599 __be32
*, to
, u32
, to_size
, __wsum
, seed
)
1601 struct bpf_scratchpad
*sp
= this_cpu_ptr(&bpf_sp
);
1602 u32 diff_size
= from_size
+ to_size
;
1605 /* This is quite flexible, some examples:
1607 * from_size == 0, to_size > 0, seed := csum --> pushing data
1608 * from_size > 0, to_size == 0, seed := csum --> pulling data
1609 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1611 * Even for diffing, from_size and to_size don't need to be equal.
1613 if (unlikely(((from_size
| to_size
) & (sizeof(__be32
) - 1)) ||
1614 diff_size
> sizeof(sp
->diff
)))
1617 for (i
= 0; i
< from_size
/ sizeof(__be32
); i
++, j
++)
1618 sp
->diff
[j
] = ~from
[i
];
1619 for (i
= 0; i
< to_size
/ sizeof(__be32
); i
++, j
++)
1620 sp
->diff
[j
] = to
[i
];
1622 return csum_partial(sp
->diff
, diff_size
, seed
);
1625 static const struct bpf_func_proto bpf_csum_diff_proto
= {
1626 .func
= bpf_csum_diff
,
1629 .ret_type
= RET_INTEGER
,
1630 .arg1_type
= ARG_PTR_TO_MEM
,
1631 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
1632 .arg3_type
= ARG_PTR_TO_MEM
,
1633 .arg4_type
= ARG_CONST_SIZE_OR_ZERO
,
1634 .arg5_type
= ARG_ANYTHING
,
1637 BPF_CALL_2(bpf_csum_update
, struct sk_buff
*, skb
, __wsum
, csum
)
1639 /* The interface is to be used in combination with bpf_csum_diff()
1640 * for direct packet writes. csum rotation for alignment as well
1641 * as emulating csum_sub() can be done from the eBPF program.
1643 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1644 return (skb
->csum
= csum_add(skb
->csum
, csum
));
1649 static const struct bpf_func_proto bpf_csum_update_proto
= {
1650 .func
= bpf_csum_update
,
1652 .ret_type
= RET_INTEGER
,
1653 .arg1_type
= ARG_PTR_TO_CTX
,
1654 .arg2_type
= ARG_ANYTHING
,
1657 static inline int __bpf_rx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1659 return dev_forward_skb(dev
, skb
);
1662 static inline int __bpf_rx_skb_no_mac(struct net_device
*dev
,
1663 struct sk_buff
*skb
)
1665 int ret
= ____dev_forward_skb(dev
, skb
);
1669 ret
= netif_rx(skb
);
1675 static inline int __bpf_tx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1679 if (unlikely(__this_cpu_read(xmit_recursion
) > XMIT_RECURSION_LIMIT
)) {
1680 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1687 __this_cpu_inc(xmit_recursion
);
1688 ret
= dev_queue_xmit(skb
);
1689 __this_cpu_dec(xmit_recursion
);
1694 static int __bpf_redirect_no_mac(struct sk_buff
*skb
, struct net_device
*dev
,
1697 /* skb->mac_len is not set on normal egress */
1698 unsigned int mlen
= skb
->network_header
- skb
->mac_header
;
1700 __skb_pull(skb
, mlen
);
1702 /* At ingress, the mac header has already been pulled once.
1703 * At egress, skb_pospull_rcsum has to be done in case that
1704 * the skb is originated from ingress (i.e. a forwarded skb)
1705 * to ensure that rcsum starts at net header.
1707 if (!skb_at_tc_ingress(skb
))
1708 skb_postpull_rcsum(skb
, skb_mac_header(skb
), mlen
);
1709 skb_pop_mac_header(skb
);
1710 skb_reset_mac_len(skb
);
1711 return flags
& BPF_F_INGRESS
?
1712 __bpf_rx_skb_no_mac(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1715 static int __bpf_redirect_common(struct sk_buff
*skb
, struct net_device
*dev
,
1718 /* Verify that a link layer header is carried */
1719 if (unlikely(skb
->mac_header
>= skb
->network_header
)) {
1724 bpf_push_mac_rcsum(skb
);
1725 return flags
& BPF_F_INGRESS
?
1726 __bpf_rx_skb(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1729 static int __bpf_redirect(struct sk_buff
*skb
, struct net_device
*dev
,
1732 if (dev_is_mac_header_xmit(dev
))
1733 return __bpf_redirect_common(skb
, dev
, flags
);
1735 return __bpf_redirect_no_mac(skb
, dev
, flags
);
1738 BPF_CALL_3(bpf_clone_redirect
, struct sk_buff
*, skb
, u32
, ifindex
, u64
, flags
)
1740 struct net_device
*dev
;
1741 struct sk_buff
*clone
;
1744 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1747 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ifindex
);
1751 clone
= skb_clone(skb
, GFP_ATOMIC
);
1752 if (unlikely(!clone
))
1755 /* For direct write, we need to keep the invariant that the skbs
1756 * we're dealing with need to be uncloned. Should uncloning fail
1757 * here, we need to free the just generated clone to unclone once
1760 ret
= bpf_try_make_head_writable(skb
);
1761 if (unlikely(ret
)) {
1766 return __bpf_redirect(clone
, dev
, flags
);
1769 static const struct bpf_func_proto bpf_clone_redirect_proto
= {
1770 .func
= bpf_clone_redirect
,
1772 .ret_type
= RET_INTEGER
,
1773 .arg1_type
= ARG_PTR_TO_CTX
,
1774 .arg2_type
= ARG_ANYTHING
,
1775 .arg3_type
= ARG_ANYTHING
,
1778 struct redirect_info
{
1783 static DEFINE_PER_CPU(struct redirect_info
, redirect_info
);
1785 BPF_CALL_2(bpf_redirect
, u32
, ifindex
, u64
, flags
)
1787 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1789 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1792 ri
->ifindex
= ifindex
;
1795 return TC_ACT_REDIRECT
;
1798 int skb_do_redirect(struct sk_buff
*skb
)
1800 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1801 struct net_device
*dev
;
1803 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ri
->ifindex
);
1805 if (unlikely(!dev
)) {
1810 return __bpf_redirect(skb
, dev
, ri
->flags
);
1813 static const struct bpf_func_proto bpf_redirect_proto
= {
1814 .func
= bpf_redirect
,
1816 .ret_type
= RET_INTEGER
,
1817 .arg1_type
= ARG_ANYTHING
,
1818 .arg2_type
= ARG_ANYTHING
,
1821 BPF_CALL_1(bpf_get_cgroup_classid
, const struct sk_buff
*, skb
)
1823 return task_get_classid(skb
);
1826 static const struct bpf_func_proto bpf_get_cgroup_classid_proto
= {
1827 .func
= bpf_get_cgroup_classid
,
1829 .ret_type
= RET_INTEGER
,
1830 .arg1_type
= ARG_PTR_TO_CTX
,
1833 BPF_CALL_1(bpf_get_route_realm
, const struct sk_buff
*, skb
)
1835 return dst_tclassid(skb
);
1838 static const struct bpf_func_proto bpf_get_route_realm_proto
= {
1839 .func
= bpf_get_route_realm
,
1841 .ret_type
= RET_INTEGER
,
1842 .arg1_type
= ARG_PTR_TO_CTX
,
1845 BPF_CALL_1(bpf_get_hash_recalc
, struct sk_buff
*, skb
)
1847 /* If skb_clear_hash() was called due to mangling, we can
1848 * trigger SW recalculation here. Later access to hash
1849 * can then use the inline skb->hash via context directly
1850 * instead of calling this helper again.
1852 return skb_get_hash(skb
);
1855 static const struct bpf_func_proto bpf_get_hash_recalc_proto
= {
1856 .func
= bpf_get_hash_recalc
,
1858 .ret_type
= RET_INTEGER
,
1859 .arg1_type
= ARG_PTR_TO_CTX
,
1862 BPF_CALL_1(bpf_set_hash_invalid
, struct sk_buff
*, skb
)
1864 /* After all direct packet write, this can be used once for
1865 * triggering a lazy recalc on next skb_get_hash() invocation.
1867 skb_clear_hash(skb
);
1871 static const struct bpf_func_proto bpf_set_hash_invalid_proto
= {
1872 .func
= bpf_set_hash_invalid
,
1874 .ret_type
= RET_INTEGER
,
1875 .arg1_type
= ARG_PTR_TO_CTX
,
1878 BPF_CALL_2(bpf_set_hash
, struct sk_buff
*, skb
, u32
, hash
)
1880 /* Set user specified hash as L4(+), so that it gets returned
1881 * on skb_get_hash() call unless BPF prog later on triggers a
1884 __skb_set_sw_hash(skb
, hash
, true);
1888 static const struct bpf_func_proto bpf_set_hash_proto
= {
1889 .func
= bpf_set_hash
,
1891 .ret_type
= RET_INTEGER
,
1892 .arg1_type
= ARG_PTR_TO_CTX
,
1893 .arg2_type
= ARG_ANYTHING
,
1896 BPF_CALL_3(bpf_skb_vlan_push
, struct sk_buff
*, skb
, __be16
, vlan_proto
,
1901 if (unlikely(vlan_proto
!= htons(ETH_P_8021Q
) &&
1902 vlan_proto
!= htons(ETH_P_8021AD
)))
1903 vlan_proto
= htons(ETH_P_8021Q
);
1905 bpf_push_mac_rcsum(skb
);
1906 ret
= skb_vlan_push(skb
, vlan_proto
, vlan_tci
);
1907 bpf_pull_mac_rcsum(skb
);
1909 bpf_compute_data_end(skb
);
1913 const struct bpf_func_proto bpf_skb_vlan_push_proto
= {
1914 .func
= bpf_skb_vlan_push
,
1916 .ret_type
= RET_INTEGER
,
1917 .arg1_type
= ARG_PTR_TO_CTX
,
1918 .arg2_type
= ARG_ANYTHING
,
1919 .arg3_type
= ARG_ANYTHING
,
1921 EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto
);
1923 BPF_CALL_1(bpf_skb_vlan_pop
, struct sk_buff
*, skb
)
1927 bpf_push_mac_rcsum(skb
);
1928 ret
= skb_vlan_pop(skb
);
1929 bpf_pull_mac_rcsum(skb
);
1931 bpf_compute_data_end(skb
);
1935 const struct bpf_func_proto bpf_skb_vlan_pop_proto
= {
1936 .func
= bpf_skb_vlan_pop
,
1938 .ret_type
= RET_INTEGER
,
1939 .arg1_type
= ARG_PTR_TO_CTX
,
1941 EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto
);
1943 static int bpf_skb_generic_push(struct sk_buff
*skb
, u32 off
, u32 len
)
1945 /* Caller already did skb_cow() with len as headroom,
1946 * so no need to do it here.
1949 memmove(skb
->data
, skb
->data
+ len
, off
);
1950 memset(skb
->data
+ off
, 0, len
);
1952 /* No skb_postpush_rcsum(skb, skb->data + off, len)
1953 * needed here as it does not change the skb->csum
1954 * result for checksum complete when summing over
1960 static int bpf_skb_generic_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
1962 /* skb_ensure_writable() is not needed here, as we're
1963 * already working on an uncloned skb.
1965 if (unlikely(!pskb_may_pull(skb
, off
+ len
)))
1968 skb_postpull_rcsum(skb
, skb
->data
+ off
, len
);
1969 memmove(skb
->data
+ len
, skb
->data
, off
);
1970 __skb_pull(skb
, len
);
1975 static int bpf_skb_net_hdr_push(struct sk_buff
*skb
, u32 off
, u32 len
)
1977 bool trans_same
= skb
->transport_header
== skb
->network_header
;
1980 /* There's no need for __skb_push()/__skb_pull() pair to
1981 * get to the start of the mac header as we're guaranteed
1982 * to always start from here under eBPF.
1984 ret
= bpf_skb_generic_push(skb
, off
, len
);
1986 skb
->mac_header
-= len
;
1987 skb
->network_header
-= len
;
1989 skb
->transport_header
= skb
->network_header
;
1995 static int bpf_skb_net_hdr_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
1997 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2000 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2001 ret
= bpf_skb_generic_pop(skb
, off
, len
);
2003 skb
->mac_header
+= len
;
2004 skb
->network_header
+= len
;
2006 skb
->transport_header
= skb
->network_header
;
2012 static int bpf_skb_proto_4_to_6(struct sk_buff
*skb
)
2014 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2015 u32 off
= skb_mac_header_len(skb
);
2018 ret
= skb_cow(skb
, len_diff
);
2019 if (unlikely(ret
< 0))
2022 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2023 if (unlikely(ret
< 0))
2026 if (skb_is_gso(skb
)) {
2027 /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to
2028 * be changed into SKB_GSO_TCPV6.
2030 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2031 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV4
;
2032 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV6
;
2035 /* Due to IPv6 header, MSS needs to be downgraded. */
2036 skb_shinfo(skb
)->gso_size
-= len_diff
;
2037 /* Header must be checked, and gso_segs recomputed. */
2038 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2039 skb_shinfo(skb
)->gso_segs
= 0;
2042 skb
->protocol
= htons(ETH_P_IPV6
);
2043 skb_clear_hash(skb
);
2048 static int bpf_skb_proto_6_to_4(struct sk_buff
*skb
)
2050 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2051 u32 off
= skb_mac_header_len(skb
);
2054 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2055 if (unlikely(ret
< 0))
2058 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2059 if (unlikely(ret
< 0))
2062 if (skb_is_gso(skb
)) {
2063 /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to
2064 * be changed into SKB_GSO_TCPV4.
2066 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
2067 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV6
;
2068 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV4
;
2071 /* Due to IPv4 header, MSS can be upgraded. */
2072 skb_shinfo(skb
)->gso_size
+= len_diff
;
2073 /* Header must be checked, and gso_segs recomputed. */
2074 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2075 skb_shinfo(skb
)->gso_segs
= 0;
2078 skb
->protocol
= htons(ETH_P_IP
);
2079 skb_clear_hash(skb
);
2084 static int bpf_skb_proto_xlat(struct sk_buff
*skb
, __be16 to_proto
)
2086 __be16 from_proto
= skb
->protocol
;
2088 if (from_proto
== htons(ETH_P_IP
) &&
2089 to_proto
== htons(ETH_P_IPV6
))
2090 return bpf_skb_proto_4_to_6(skb
);
2092 if (from_proto
== htons(ETH_P_IPV6
) &&
2093 to_proto
== htons(ETH_P_IP
))
2094 return bpf_skb_proto_6_to_4(skb
);
2099 BPF_CALL_3(bpf_skb_change_proto
, struct sk_buff
*, skb
, __be16
, proto
,
2104 if (unlikely(flags
))
2107 /* General idea is that this helper does the basic groundwork
2108 * needed for changing the protocol, and eBPF program fills the
2109 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2110 * and other helpers, rather than passing a raw buffer here.
2112 * The rationale is to keep this minimal and without a need to
2113 * deal with raw packet data. F.e. even if we would pass buffers
2114 * here, the program still needs to call the bpf_lX_csum_replace()
2115 * helpers anyway. Plus, this way we keep also separation of
2116 * concerns, since f.e. bpf_skb_store_bytes() should only take
2119 * Currently, additional options and extension header space are
2120 * not supported, but flags register is reserved so we can adapt
2121 * that. For offloads, we mark packet as dodgy, so that headers
2122 * need to be verified first.
2124 ret
= bpf_skb_proto_xlat(skb
, proto
);
2125 bpf_compute_data_end(skb
);
2129 static const struct bpf_func_proto bpf_skb_change_proto_proto
= {
2130 .func
= bpf_skb_change_proto
,
2132 .ret_type
= RET_INTEGER
,
2133 .arg1_type
= ARG_PTR_TO_CTX
,
2134 .arg2_type
= ARG_ANYTHING
,
2135 .arg3_type
= ARG_ANYTHING
,
2138 BPF_CALL_2(bpf_skb_change_type
, struct sk_buff
*, skb
, u32
, pkt_type
)
2140 /* We only allow a restricted subset to be changed for now. */
2141 if (unlikely(!skb_pkt_type_ok(skb
->pkt_type
) ||
2142 !skb_pkt_type_ok(pkt_type
)))
2145 skb
->pkt_type
= pkt_type
;
2149 static const struct bpf_func_proto bpf_skb_change_type_proto
= {
2150 .func
= bpf_skb_change_type
,
2152 .ret_type
= RET_INTEGER
,
2153 .arg1_type
= ARG_PTR_TO_CTX
,
2154 .arg2_type
= ARG_ANYTHING
,
2157 static u32
bpf_skb_net_base_len(const struct sk_buff
*skb
)
2159 switch (skb
->protocol
) {
2160 case htons(ETH_P_IP
):
2161 return sizeof(struct iphdr
);
2162 case htons(ETH_P_IPV6
):
2163 return sizeof(struct ipv6hdr
);
2169 static int bpf_skb_net_grow(struct sk_buff
*skb
, u32 len_diff
)
2171 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2174 ret
= skb_cow(skb
, len_diff
);
2175 if (unlikely(ret
< 0))
2178 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2179 if (unlikely(ret
< 0))
2182 if (skb_is_gso(skb
)) {
2183 /* Due to header grow, MSS needs to be downgraded. */
2184 skb_shinfo(skb
)->gso_size
-= len_diff
;
2185 /* Header must be checked, and gso_segs recomputed. */
2186 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2187 skb_shinfo(skb
)->gso_segs
= 0;
2193 static int bpf_skb_net_shrink(struct sk_buff
*skb
, u32 len_diff
)
2195 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2198 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2199 if (unlikely(ret
< 0))
2202 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2203 if (unlikely(ret
< 0))
2206 if (skb_is_gso(skb
)) {
2207 /* Due to header shrink, MSS can be upgraded. */
2208 skb_shinfo(skb
)->gso_size
+= len_diff
;
2209 /* Header must be checked, and gso_segs recomputed. */
2210 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2211 skb_shinfo(skb
)->gso_segs
= 0;
2217 static u32
__bpf_skb_max_len(const struct sk_buff
*skb
)
2219 return skb
->dev
->mtu
+ skb
->dev
->hard_header_len
;
2222 static int bpf_skb_adjust_net(struct sk_buff
*skb
, s32 len_diff
)
2224 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2225 u32 len_cur
, len_diff_abs
= abs(len_diff
);
2226 u32 len_min
= bpf_skb_net_base_len(skb
);
2227 u32 len_max
= __bpf_skb_max_len(skb
);
2228 __be16 proto
= skb
->protocol
;
2229 bool shrink
= len_diff
< 0;
2232 if (unlikely(len_diff_abs
> 0xfffU
))
2234 if (unlikely(proto
!= htons(ETH_P_IP
) &&
2235 proto
!= htons(ETH_P_IPV6
)))
2238 len_cur
= skb
->len
- skb_network_offset(skb
);
2239 if (skb_transport_header_was_set(skb
) && !trans_same
)
2240 len_cur
= skb_network_header_len(skb
);
2241 if ((shrink
&& (len_diff_abs
>= len_cur
||
2242 len_cur
- len_diff_abs
< len_min
)) ||
2243 (!shrink
&& (skb
->len
+ len_diff_abs
> len_max
&&
2247 ret
= shrink
? bpf_skb_net_shrink(skb
, len_diff_abs
) :
2248 bpf_skb_net_grow(skb
, len_diff_abs
);
2250 bpf_compute_data_end(skb
);
2254 BPF_CALL_4(bpf_skb_adjust_room
, struct sk_buff
*, skb
, s32
, len_diff
,
2255 u32
, mode
, u64
, flags
)
2257 if (unlikely(flags
))
2259 if (likely(mode
== BPF_ADJ_ROOM_NET
))
2260 return bpf_skb_adjust_net(skb
, len_diff
);
2265 static const struct bpf_func_proto bpf_skb_adjust_room_proto
= {
2266 .func
= bpf_skb_adjust_room
,
2268 .ret_type
= RET_INTEGER
,
2269 .arg1_type
= ARG_PTR_TO_CTX
,
2270 .arg2_type
= ARG_ANYTHING
,
2271 .arg3_type
= ARG_ANYTHING
,
2272 .arg4_type
= ARG_ANYTHING
,
2275 static u32
__bpf_skb_min_len(const struct sk_buff
*skb
)
2277 u32 min_len
= skb_network_offset(skb
);
2279 if (skb_transport_header_was_set(skb
))
2280 min_len
= skb_transport_offset(skb
);
2281 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2282 min_len
= skb_checksum_start_offset(skb
) +
2283 skb
->csum_offset
+ sizeof(__sum16
);
2287 static int bpf_skb_grow_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2289 unsigned int old_len
= skb
->len
;
2292 ret
= __skb_grow_rcsum(skb
, new_len
);
2294 memset(skb
->data
+ old_len
, 0, new_len
- old_len
);
2298 static int bpf_skb_trim_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2300 return __skb_trim_rcsum(skb
, new_len
);
2303 BPF_CALL_3(bpf_skb_change_tail
, struct sk_buff
*, skb
, u32
, new_len
,
2306 u32 max_len
= __bpf_skb_max_len(skb
);
2307 u32 min_len
= __bpf_skb_min_len(skb
);
2310 if (unlikely(flags
|| new_len
> max_len
|| new_len
< min_len
))
2312 if (skb
->encapsulation
)
2315 /* The basic idea of this helper is that it's performing the
2316 * needed work to either grow or trim an skb, and eBPF program
2317 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2318 * bpf_lX_csum_replace() and others rather than passing a raw
2319 * buffer here. This one is a slow path helper and intended
2320 * for replies with control messages.
2322 * Like in bpf_skb_change_proto(), we want to keep this rather
2323 * minimal and without protocol specifics so that we are able
2324 * to separate concerns as in bpf_skb_store_bytes() should only
2325 * be the one responsible for writing buffers.
2327 * It's really expected to be a slow path operation here for
2328 * control message replies, so we're implicitly linearizing,
2329 * uncloning and drop offloads from the skb by this.
2331 ret
= __bpf_try_make_writable(skb
, skb
->len
);
2333 if (new_len
> skb
->len
)
2334 ret
= bpf_skb_grow_rcsum(skb
, new_len
);
2335 else if (new_len
< skb
->len
)
2336 ret
= bpf_skb_trim_rcsum(skb
, new_len
);
2337 if (!ret
&& skb_is_gso(skb
))
2341 bpf_compute_data_end(skb
);
2345 static const struct bpf_func_proto bpf_skb_change_tail_proto
= {
2346 .func
= bpf_skb_change_tail
,
2348 .ret_type
= RET_INTEGER
,
2349 .arg1_type
= ARG_PTR_TO_CTX
,
2350 .arg2_type
= ARG_ANYTHING
,
2351 .arg3_type
= ARG_ANYTHING
,
2354 BPF_CALL_3(bpf_skb_change_head
, struct sk_buff
*, skb
, u32
, head_room
,
2357 u32 max_len
= __bpf_skb_max_len(skb
);
2358 u32 new_len
= skb
->len
+ head_room
;
2361 if (unlikely(flags
|| (!skb_is_gso(skb
) && new_len
> max_len
) ||
2362 new_len
< skb
->len
))
2365 ret
= skb_cow(skb
, head_room
);
2367 /* Idea for this helper is that we currently only
2368 * allow to expand on mac header. This means that
2369 * skb->protocol network header, etc, stay as is.
2370 * Compared to bpf_skb_change_tail(), we're more
2371 * flexible due to not needing to linearize or
2372 * reset GSO. Intention for this helper is to be
2373 * used by an L3 skb that needs to push mac header
2374 * for redirection into L2 device.
2376 __skb_push(skb
, head_room
);
2377 memset(skb
->data
, 0, head_room
);
2378 skb_reset_mac_header(skb
);
2381 bpf_compute_data_end(skb
);
2385 static const struct bpf_func_proto bpf_skb_change_head_proto
= {
2386 .func
= bpf_skb_change_head
,
2388 .ret_type
= RET_INTEGER
,
2389 .arg1_type
= ARG_PTR_TO_CTX
,
2390 .arg2_type
= ARG_ANYTHING
,
2391 .arg3_type
= ARG_ANYTHING
,
2394 BPF_CALL_2(bpf_xdp_adjust_head
, struct xdp_buff
*, xdp
, int, offset
)
2396 void *data
= xdp
->data
+ offset
;
2398 if (unlikely(data
< xdp
->data_hard_start
||
2399 data
> xdp
->data_end
- ETH_HLEN
))
2407 static const struct bpf_func_proto bpf_xdp_adjust_head_proto
= {
2408 .func
= bpf_xdp_adjust_head
,
2410 .ret_type
= RET_INTEGER
,
2411 .arg1_type
= ARG_PTR_TO_CTX
,
2412 .arg2_type
= ARG_ANYTHING
,
2415 bool bpf_helper_changes_pkt_data(void *func
)
2417 if (func
== bpf_skb_vlan_push
||
2418 func
== bpf_skb_vlan_pop
||
2419 func
== bpf_skb_store_bytes
||
2420 func
== bpf_skb_change_proto
||
2421 func
== bpf_skb_change_head
||
2422 func
== bpf_skb_change_tail
||
2423 func
== bpf_skb_adjust_room
||
2424 func
== bpf_skb_pull_data
||
2425 func
== bpf_clone_redirect
||
2426 func
== bpf_l3_csum_replace
||
2427 func
== bpf_l4_csum_replace
||
2428 func
== bpf_xdp_adjust_head
)
2434 static unsigned long bpf_skb_copy(void *dst_buff
, const void *skb
,
2435 unsigned long off
, unsigned long len
)
2437 void *ptr
= skb_header_pointer(skb
, off
, len
, dst_buff
);
2441 if (ptr
!= dst_buff
)
2442 memcpy(dst_buff
, ptr
, len
);
2447 BPF_CALL_5(bpf_skb_event_output
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
2448 u64
, flags
, void *, meta
, u64
, meta_size
)
2450 u64 skb_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
2452 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
2454 if (unlikely(skb_size
> skb
->len
))
2457 return bpf_event_output(map
, flags
, meta
, meta_size
, skb
, skb_size
,
2461 static const struct bpf_func_proto bpf_skb_event_output_proto
= {
2462 .func
= bpf_skb_event_output
,
2464 .ret_type
= RET_INTEGER
,
2465 .arg1_type
= ARG_PTR_TO_CTX
,
2466 .arg2_type
= ARG_CONST_MAP_PTR
,
2467 .arg3_type
= ARG_ANYTHING
,
2468 .arg4_type
= ARG_PTR_TO_MEM
,
2469 .arg5_type
= ARG_CONST_SIZE
,
2472 static unsigned short bpf_tunnel_key_af(u64 flags
)
2474 return flags
& BPF_F_TUNINFO_IPV6
? AF_INET6
: AF_INET
;
2477 BPF_CALL_4(bpf_skb_get_tunnel_key
, struct sk_buff
*, skb
, struct bpf_tunnel_key
*, to
,
2478 u32
, size
, u64
, flags
)
2480 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2481 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2485 if (unlikely(!info
|| (flags
& ~(BPF_F_TUNINFO_IPV6
)))) {
2489 if (ip_tunnel_info_af(info
) != bpf_tunnel_key_af(flags
)) {
2493 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2496 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2497 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2499 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2500 /* Fixup deprecated structure layouts here, so we have
2501 * a common path later on.
2503 if (ip_tunnel_info_af(info
) != AF_INET
)
2506 to
= (struct bpf_tunnel_key
*)compat
;
2513 to
->tunnel_id
= be64_to_cpu(info
->key
.tun_id
);
2514 to
->tunnel_tos
= info
->key
.tos
;
2515 to
->tunnel_ttl
= info
->key
.ttl
;
2517 if (flags
& BPF_F_TUNINFO_IPV6
) {
2518 memcpy(to
->remote_ipv6
, &info
->key
.u
.ipv6
.src
,
2519 sizeof(to
->remote_ipv6
));
2520 to
->tunnel_label
= be32_to_cpu(info
->key
.label
);
2522 to
->remote_ipv4
= be32_to_cpu(info
->key
.u
.ipv4
.src
);
2525 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
)))
2526 memcpy(to_orig
, to
, size
);
2530 memset(to_orig
, 0, size
);
2534 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto
= {
2535 .func
= bpf_skb_get_tunnel_key
,
2537 .ret_type
= RET_INTEGER
,
2538 .arg1_type
= ARG_PTR_TO_CTX
,
2539 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2540 .arg3_type
= ARG_CONST_SIZE
,
2541 .arg4_type
= ARG_ANYTHING
,
2544 BPF_CALL_3(bpf_skb_get_tunnel_opt
, struct sk_buff
*, skb
, u8
*, to
, u32
, size
)
2546 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2549 if (unlikely(!info
||
2550 !(info
->key
.tun_flags
& TUNNEL_OPTIONS_PRESENT
))) {
2554 if (unlikely(size
< info
->options_len
)) {
2559 ip_tunnel_info_opts_get(to
, info
);
2560 if (size
> info
->options_len
)
2561 memset(to
+ info
->options_len
, 0, size
- info
->options_len
);
2563 return info
->options_len
;
2565 memset(to
, 0, size
);
2569 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto
= {
2570 .func
= bpf_skb_get_tunnel_opt
,
2572 .ret_type
= RET_INTEGER
,
2573 .arg1_type
= ARG_PTR_TO_CTX
,
2574 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2575 .arg3_type
= ARG_CONST_SIZE
,
2578 static struct metadata_dst __percpu
*md_dst
;
2580 BPF_CALL_4(bpf_skb_set_tunnel_key
, struct sk_buff
*, skb
,
2581 const struct bpf_tunnel_key
*, from
, u32
, size
, u64
, flags
)
2583 struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
2584 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2585 struct ip_tunnel_info
*info
;
2587 if (unlikely(flags
& ~(BPF_F_TUNINFO_IPV6
| BPF_F_ZERO_CSUM_TX
|
2588 BPF_F_DONT_FRAGMENT
)))
2590 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2592 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2593 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2594 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2595 /* Fixup deprecated structure layouts here, so we have
2596 * a common path later on.
2598 memcpy(compat
, from
, size
);
2599 memset(compat
+ size
, 0, sizeof(compat
) - size
);
2600 from
= (const struct bpf_tunnel_key
*) compat
;
2606 if (unlikely((!(flags
& BPF_F_TUNINFO_IPV6
) && from
->tunnel_label
) ||
2611 dst_hold((struct dst_entry
*) md
);
2612 skb_dst_set(skb
, (struct dst_entry
*) md
);
2614 info
= &md
->u
.tun_info
;
2615 info
->mode
= IP_TUNNEL_INFO_TX
;
2617 info
->key
.tun_flags
= TUNNEL_KEY
| TUNNEL_CSUM
| TUNNEL_NOCACHE
;
2618 if (flags
& BPF_F_DONT_FRAGMENT
)
2619 info
->key
.tun_flags
|= TUNNEL_DONT_FRAGMENT
;
2621 info
->key
.tun_id
= cpu_to_be64(from
->tunnel_id
);
2622 info
->key
.tos
= from
->tunnel_tos
;
2623 info
->key
.ttl
= from
->tunnel_ttl
;
2625 if (flags
& BPF_F_TUNINFO_IPV6
) {
2626 info
->mode
|= IP_TUNNEL_INFO_IPV6
;
2627 memcpy(&info
->key
.u
.ipv6
.dst
, from
->remote_ipv6
,
2628 sizeof(from
->remote_ipv6
));
2629 info
->key
.label
= cpu_to_be32(from
->tunnel_label
) &
2630 IPV6_FLOWLABEL_MASK
;
2632 info
->key
.u
.ipv4
.dst
= cpu_to_be32(from
->remote_ipv4
);
2633 if (flags
& BPF_F_ZERO_CSUM_TX
)
2634 info
->key
.tun_flags
&= ~TUNNEL_CSUM
;
2640 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto
= {
2641 .func
= bpf_skb_set_tunnel_key
,
2643 .ret_type
= RET_INTEGER
,
2644 .arg1_type
= ARG_PTR_TO_CTX
,
2645 .arg2_type
= ARG_PTR_TO_MEM
,
2646 .arg3_type
= ARG_CONST_SIZE
,
2647 .arg4_type
= ARG_ANYTHING
,
2650 BPF_CALL_3(bpf_skb_set_tunnel_opt
, struct sk_buff
*, skb
,
2651 const u8
*, from
, u32
, size
)
2653 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2654 const struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
2656 if (unlikely(info
!= &md
->u
.tun_info
|| (size
& (sizeof(u32
) - 1))))
2658 if (unlikely(size
> IP_TUNNEL_OPTS_MAX
))
2661 ip_tunnel_info_opts_set(info
, from
, size
);
2666 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto
= {
2667 .func
= bpf_skb_set_tunnel_opt
,
2669 .ret_type
= RET_INTEGER
,
2670 .arg1_type
= ARG_PTR_TO_CTX
,
2671 .arg2_type
= ARG_PTR_TO_MEM
,
2672 .arg3_type
= ARG_CONST_SIZE
,
2675 static const struct bpf_func_proto
*
2676 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which
)
2679 /* Race is not possible, since it's called from verifier
2680 * that is holding verifier mutex.
2682 md_dst
= metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX
,
2690 case BPF_FUNC_skb_set_tunnel_key
:
2691 return &bpf_skb_set_tunnel_key_proto
;
2692 case BPF_FUNC_skb_set_tunnel_opt
:
2693 return &bpf_skb_set_tunnel_opt_proto
;
2699 BPF_CALL_3(bpf_skb_under_cgroup
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
2702 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
2703 struct cgroup
*cgrp
;
2706 sk
= skb_to_full_sk(skb
);
2707 if (!sk
|| !sk_fullsock(sk
))
2709 if (unlikely(idx
>= array
->map
.max_entries
))
2712 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
2713 if (unlikely(!cgrp
))
2716 return sk_under_cgroup_hierarchy(sk
, cgrp
);
2719 static const struct bpf_func_proto bpf_skb_under_cgroup_proto
= {
2720 .func
= bpf_skb_under_cgroup
,
2722 .ret_type
= RET_INTEGER
,
2723 .arg1_type
= ARG_PTR_TO_CTX
,
2724 .arg2_type
= ARG_CONST_MAP_PTR
,
2725 .arg3_type
= ARG_ANYTHING
,
2728 static unsigned long bpf_xdp_copy(void *dst_buff
, const void *src_buff
,
2729 unsigned long off
, unsigned long len
)
2731 memcpy(dst_buff
, src_buff
+ off
, len
);
2735 BPF_CALL_5(bpf_xdp_event_output
, struct xdp_buff
*, xdp
, struct bpf_map
*, map
,
2736 u64
, flags
, void *, meta
, u64
, meta_size
)
2738 u64 xdp_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
2740 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
2742 if (unlikely(xdp_size
> (unsigned long)(xdp
->data_end
- xdp
->data
)))
2745 return bpf_event_output(map
, flags
, meta
, meta_size
, xdp
->data
,
2746 xdp_size
, bpf_xdp_copy
);
2749 static const struct bpf_func_proto bpf_xdp_event_output_proto
= {
2750 .func
= bpf_xdp_event_output
,
2752 .ret_type
= RET_INTEGER
,
2753 .arg1_type
= ARG_PTR_TO_CTX
,
2754 .arg2_type
= ARG_CONST_MAP_PTR
,
2755 .arg3_type
= ARG_ANYTHING
,
2756 .arg4_type
= ARG_PTR_TO_MEM
,
2757 .arg5_type
= ARG_CONST_SIZE
,
2760 BPF_CALL_1(bpf_get_socket_cookie
, struct sk_buff
*, skb
)
2762 return skb
->sk
? sock_gen_cookie(skb
->sk
) : 0;
2765 static const struct bpf_func_proto bpf_get_socket_cookie_proto
= {
2766 .func
= bpf_get_socket_cookie
,
2768 .ret_type
= RET_INTEGER
,
2769 .arg1_type
= ARG_PTR_TO_CTX
,
2772 BPF_CALL_1(bpf_get_socket_uid
, struct sk_buff
*, skb
)
2774 struct sock
*sk
= sk_to_full_sk(skb
->sk
);
2777 if (!sk
|| !sk_fullsock(sk
))
2779 kuid
= sock_net_uid(sock_net(sk
), sk
);
2780 return from_kuid_munged(sock_net(sk
)->user_ns
, kuid
);
2783 static const struct bpf_func_proto bpf_get_socket_uid_proto
= {
2784 .func
= bpf_get_socket_uid
,
2786 .ret_type
= RET_INTEGER
,
2787 .arg1_type
= ARG_PTR_TO_CTX
,
2790 BPF_CALL_5(bpf_setsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
2791 int, level
, int, optname
, char *, optval
, int, optlen
)
2793 struct sock
*sk
= bpf_sock
->sk
;
2797 if (!sk_fullsock(sk
))
2800 if (level
== SOL_SOCKET
) {
2801 if (optlen
!= sizeof(int))
2803 val
= *((int *)optval
);
2805 /* Only some socketops are supported */
2808 sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
2809 sk
->sk_rcvbuf
= max_t(int, val
* 2, SOCK_MIN_RCVBUF
);
2812 sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
2813 sk
->sk_sndbuf
= max_t(int, val
* 2, SOCK_MIN_SNDBUF
);
2815 case SO_MAX_PACING_RATE
:
2816 sk
->sk_max_pacing_rate
= val
;
2817 sk
->sk_pacing_rate
= min(sk
->sk_pacing_rate
,
2818 sk
->sk_max_pacing_rate
);
2821 sk
->sk_priority
= val
;
2826 sk
->sk_rcvlowat
= val
? : 1;
2835 } else if (level
== SOL_TCP
&&
2836 sk
->sk_prot
->setsockopt
== tcp_setsockopt
) {
2837 if (optname
== TCP_CONGESTION
) {
2838 char name
[TCP_CA_NAME_MAX
];
2840 strncpy(name
, optval
, min_t(long, optlen
,
2841 TCP_CA_NAME_MAX
-1));
2842 name
[TCP_CA_NAME_MAX
-1] = 0;
2843 ret
= tcp_set_congestion_control(sk
, name
, false);
2844 if (!ret
&& bpf_sock
->op
> BPF_SOCK_OPS_NEEDS_ECN
)
2845 /* replacing an existing ca */
2846 tcp_reinit_congestion_control(sk
,
2847 inet_csk(sk
)->icsk_ca_ops
);
2849 struct tcp_sock
*tp
= tcp_sk(sk
);
2851 if (optlen
!= sizeof(int))
2854 val
= *((int *)optval
);
2855 /* Only some options are supported */
2858 if (val
<= 0 || tp
->data_segs_out
> 0)
2863 case TCP_BPF_SNDCWND_CLAMP
:
2867 tp
->snd_cwnd_clamp
= val
;
2868 tp
->snd_ssthresh
= val
;
2883 static const struct bpf_func_proto bpf_setsockopt_proto
= {
2884 .func
= bpf_setsockopt
,
2886 .ret_type
= RET_INTEGER
,
2887 .arg1_type
= ARG_PTR_TO_CTX
,
2888 .arg2_type
= ARG_ANYTHING
,
2889 .arg3_type
= ARG_ANYTHING
,
2890 .arg4_type
= ARG_PTR_TO_MEM
,
2891 .arg5_type
= ARG_CONST_SIZE
,
2894 static const struct bpf_func_proto
*
2895 bpf_base_func_proto(enum bpf_func_id func_id
)
2898 case BPF_FUNC_map_lookup_elem
:
2899 return &bpf_map_lookup_elem_proto
;
2900 case BPF_FUNC_map_update_elem
:
2901 return &bpf_map_update_elem_proto
;
2902 case BPF_FUNC_map_delete_elem
:
2903 return &bpf_map_delete_elem_proto
;
2904 case BPF_FUNC_get_prandom_u32
:
2905 return &bpf_get_prandom_u32_proto
;
2906 case BPF_FUNC_get_smp_processor_id
:
2907 return &bpf_get_raw_smp_processor_id_proto
;
2908 case BPF_FUNC_get_numa_node_id
:
2909 return &bpf_get_numa_node_id_proto
;
2910 case BPF_FUNC_tail_call
:
2911 return &bpf_tail_call_proto
;
2912 case BPF_FUNC_ktime_get_ns
:
2913 return &bpf_ktime_get_ns_proto
;
2914 case BPF_FUNC_trace_printk
:
2915 if (capable(CAP_SYS_ADMIN
))
2916 return bpf_get_trace_printk_proto();
2922 static const struct bpf_func_proto
*
2923 sk_filter_func_proto(enum bpf_func_id func_id
)
2926 case BPF_FUNC_skb_load_bytes
:
2927 return &bpf_skb_load_bytes_proto
;
2928 case BPF_FUNC_get_socket_cookie
:
2929 return &bpf_get_socket_cookie_proto
;
2930 case BPF_FUNC_get_socket_uid
:
2931 return &bpf_get_socket_uid_proto
;
2933 return bpf_base_func_proto(func_id
);
2937 static const struct bpf_func_proto
*
2938 tc_cls_act_func_proto(enum bpf_func_id func_id
)
2941 case BPF_FUNC_skb_store_bytes
:
2942 return &bpf_skb_store_bytes_proto
;
2943 case BPF_FUNC_skb_load_bytes
:
2944 return &bpf_skb_load_bytes_proto
;
2945 case BPF_FUNC_skb_pull_data
:
2946 return &bpf_skb_pull_data_proto
;
2947 case BPF_FUNC_csum_diff
:
2948 return &bpf_csum_diff_proto
;
2949 case BPF_FUNC_csum_update
:
2950 return &bpf_csum_update_proto
;
2951 case BPF_FUNC_l3_csum_replace
:
2952 return &bpf_l3_csum_replace_proto
;
2953 case BPF_FUNC_l4_csum_replace
:
2954 return &bpf_l4_csum_replace_proto
;
2955 case BPF_FUNC_clone_redirect
:
2956 return &bpf_clone_redirect_proto
;
2957 case BPF_FUNC_get_cgroup_classid
:
2958 return &bpf_get_cgroup_classid_proto
;
2959 case BPF_FUNC_skb_vlan_push
:
2960 return &bpf_skb_vlan_push_proto
;
2961 case BPF_FUNC_skb_vlan_pop
:
2962 return &bpf_skb_vlan_pop_proto
;
2963 case BPF_FUNC_skb_change_proto
:
2964 return &bpf_skb_change_proto_proto
;
2965 case BPF_FUNC_skb_change_type
:
2966 return &bpf_skb_change_type_proto
;
2967 case BPF_FUNC_skb_adjust_room
:
2968 return &bpf_skb_adjust_room_proto
;
2969 case BPF_FUNC_skb_change_tail
:
2970 return &bpf_skb_change_tail_proto
;
2971 case BPF_FUNC_skb_get_tunnel_key
:
2972 return &bpf_skb_get_tunnel_key_proto
;
2973 case BPF_FUNC_skb_set_tunnel_key
:
2974 return bpf_get_skb_set_tunnel_proto(func_id
);
2975 case BPF_FUNC_skb_get_tunnel_opt
:
2976 return &bpf_skb_get_tunnel_opt_proto
;
2977 case BPF_FUNC_skb_set_tunnel_opt
:
2978 return bpf_get_skb_set_tunnel_proto(func_id
);
2979 case BPF_FUNC_redirect
:
2980 return &bpf_redirect_proto
;
2981 case BPF_FUNC_get_route_realm
:
2982 return &bpf_get_route_realm_proto
;
2983 case BPF_FUNC_get_hash_recalc
:
2984 return &bpf_get_hash_recalc_proto
;
2985 case BPF_FUNC_set_hash_invalid
:
2986 return &bpf_set_hash_invalid_proto
;
2987 case BPF_FUNC_set_hash
:
2988 return &bpf_set_hash_proto
;
2989 case BPF_FUNC_perf_event_output
:
2990 return &bpf_skb_event_output_proto
;
2991 case BPF_FUNC_get_smp_processor_id
:
2992 return &bpf_get_smp_processor_id_proto
;
2993 case BPF_FUNC_skb_under_cgroup
:
2994 return &bpf_skb_under_cgroup_proto
;
2995 case BPF_FUNC_get_socket_cookie
:
2996 return &bpf_get_socket_cookie_proto
;
2997 case BPF_FUNC_get_socket_uid
:
2998 return &bpf_get_socket_uid_proto
;
3000 return bpf_base_func_proto(func_id
);
3004 static const struct bpf_func_proto
*
3005 xdp_func_proto(enum bpf_func_id func_id
)
3008 case BPF_FUNC_perf_event_output
:
3009 return &bpf_xdp_event_output_proto
;
3010 case BPF_FUNC_get_smp_processor_id
:
3011 return &bpf_get_smp_processor_id_proto
;
3012 case BPF_FUNC_xdp_adjust_head
:
3013 return &bpf_xdp_adjust_head_proto
;
3015 return bpf_base_func_proto(func_id
);
3019 static const struct bpf_func_proto
*
3020 lwt_inout_func_proto(enum bpf_func_id func_id
)
3023 case BPF_FUNC_skb_load_bytes
:
3024 return &bpf_skb_load_bytes_proto
;
3025 case BPF_FUNC_skb_pull_data
:
3026 return &bpf_skb_pull_data_proto
;
3027 case BPF_FUNC_csum_diff
:
3028 return &bpf_csum_diff_proto
;
3029 case BPF_FUNC_get_cgroup_classid
:
3030 return &bpf_get_cgroup_classid_proto
;
3031 case BPF_FUNC_get_route_realm
:
3032 return &bpf_get_route_realm_proto
;
3033 case BPF_FUNC_get_hash_recalc
:
3034 return &bpf_get_hash_recalc_proto
;
3035 case BPF_FUNC_perf_event_output
:
3036 return &bpf_skb_event_output_proto
;
3037 case BPF_FUNC_get_smp_processor_id
:
3038 return &bpf_get_smp_processor_id_proto
;
3039 case BPF_FUNC_skb_under_cgroup
:
3040 return &bpf_skb_under_cgroup_proto
;
3042 return bpf_base_func_proto(func_id
);
3046 static const struct bpf_func_proto
*
3047 sock_ops_func_proto(enum bpf_func_id func_id
)
3050 case BPF_FUNC_setsockopt
:
3051 return &bpf_setsockopt_proto
;
3053 return bpf_base_func_proto(func_id
);
3057 static const struct bpf_func_proto
*
3058 lwt_xmit_func_proto(enum bpf_func_id func_id
)
3061 case BPF_FUNC_skb_get_tunnel_key
:
3062 return &bpf_skb_get_tunnel_key_proto
;
3063 case BPF_FUNC_skb_set_tunnel_key
:
3064 return bpf_get_skb_set_tunnel_proto(func_id
);
3065 case BPF_FUNC_skb_get_tunnel_opt
:
3066 return &bpf_skb_get_tunnel_opt_proto
;
3067 case BPF_FUNC_skb_set_tunnel_opt
:
3068 return bpf_get_skb_set_tunnel_proto(func_id
);
3069 case BPF_FUNC_redirect
:
3070 return &bpf_redirect_proto
;
3071 case BPF_FUNC_clone_redirect
:
3072 return &bpf_clone_redirect_proto
;
3073 case BPF_FUNC_skb_change_tail
:
3074 return &bpf_skb_change_tail_proto
;
3075 case BPF_FUNC_skb_change_head
:
3076 return &bpf_skb_change_head_proto
;
3077 case BPF_FUNC_skb_store_bytes
:
3078 return &bpf_skb_store_bytes_proto
;
3079 case BPF_FUNC_csum_update
:
3080 return &bpf_csum_update_proto
;
3081 case BPF_FUNC_l3_csum_replace
:
3082 return &bpf_l3_csum_replace_proto
;
3083 case BPF_FUNC_l4_csum_replace
:
3084 return &bpf_l4_csum_replace_proto
;
3085 case BPF_FUNC_set_hash_invalid
:
3086 return &bpf_set_hash_invalid_proto
;
3088 return lwt_inout_func_proto(func_id
);
3092 static bool bpf_skb_is_valid_access(int off
, int size
, enum bpf_access_type type
,
3093 struct bpf_insn_access_aux
*info
)
3095 const int size_default
= sizeof(__u32
);
3097 if (off
< 0 || off
>= sizeof(struct __sk_buff
))
3100 /* The verifier guarantees that size > 0. */
3101 if (off
% size
!= 0)
3105 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3106 if (off
+ size
> offsetofend(struct __sk_buff
, cb
[4]))
3109 case bpf_ctx_range(struct __sk_buff
, data
):
3110 case bpf_ctx_range(struct __sk_buff
, data_end
):
3111 if (size
!= size_default
)
3115 /* Only narrow read access allowed for now. */
3116 if (type
== BPF_WRITE
) {
3117 if (size
!= size_default
)
3120 bpf_ctx_record_field_size(info
, size_default
);
3121 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
3129 static bool sk_filter_is_valid_access(int off
, int size
,
3130 enum bpf_access_type type
,
3131 struct bpf_insn_access_aux
*info
)
3134 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3135 case bpf_ctx_range(struct __sk_buff
, data
):
3136 case bpf_ctx_range(struct __sk_buff
, data_end
):
3140 if (type
== BPF_WRITE
) {
3142 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3149 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3152 static bool lwt_is_valid_access(int off
, int size
,
3153 enum bpf_access_type type
,
3154 struct bpf_insn_access_aux
*info
)
3157 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3161 if (type
== BPF_WRITE
) {
3163 case bpf_ctx_range(struct __sk_buff
, mark
):
3164 case bpf_ctx_range(struct __sk_buff
, priority
):
3165 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3173 case bpf_ctx_range(struct __sk_buff
, data
):
3174 info
->reg_type
= PTR_TO_PACKET
;
3176 case bpf_ctx_range(struct __sk_buff
, data_end
):
3177 info
->reg_type
= PTR_TO_PACKET_END
;
3181 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3184 static bool sock_filter_is_valid_access(int off
, int size
,
3185 enum bpf_access_type type
,
3186 struct bpf_insn_access_aux
*info
)
3188 if (type
== BPF_WRITE
) {
3190 case offsetof(struct bpf_sock
, bound_dev_if
):
3197 if (off
< 0 || off
+ size
> sizeof(struct bpf_sock
))
3199 /* The verifier guarantees that size > 0. */
3200 if (off
% size
!= 0)
3202 if (size
!= sizeof(__u32
))
3208 static int tc_cls_act_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3209 const struct bpf_prog
*prog
)
3211 struct bpf_insn
*insn
= insn_buf
;
3216 /* if (!skb->cloned)
3219 * (Fast-path, otherwise approximation that we might be
3220 * a clone, do the rest in helper.)
3222 *insn
++ = BPF_LDX_MEM(BPF_B
, BPF_REG_6
, BPF_REG_1
, CLONED_OFFSET());
3223 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_6
, CLONED_MASK
);
3224 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_6
, 0, 7);
3226 /* ret = bpf_skb_pull_data(skb, 0); */
3227 *insn
++ = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
3228 *insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_2
, BPF_REG_2
);
3229 *insn
++ = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3230 BPF_FUNC_skb_pull_data
);
3233 * return TC_ACT_SHOT;
3235 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2);
3236 *insn
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, TC_ACT_SHOT
);
3237 *insn
++ = BPF_EXIT_INSN();
3240 *insn
++ = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
3242 *insn
++ = prog
->insnsi
[0];
3244 return insn
- insn_buf
;
3247 static bool tc_cls_act_is_valid_access(int off
, int size
,
3248 enum bpf_access_type type
,
3249 struct bpf_insn_access_aux
*info
)
3251 if (type
== BPF_WRITE
) {
3253 case bpf_ctx_range(struct __sk_buff
, mark
):
3254 case bpf_ctx_range(struct __sk_buff
, tc_index
):
3255 case bpf_ctx_range(struct __sk_buff
, priority
):
3256 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3257 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3265 case bpf_ctx_range(struct __sk_buff
, data
):
3266 info
->reg_type
= PTR_TO_PACKET
;
3268 case bpf_ctx_range(struct __sk_buff
, data_end
):
3269 info
->reg_type
= PTR_TO_PACKET_END
;
3273 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3276 static bool __is_valid_xdp_access(int off
, int size
)
3278 if (off
< 0 || off
>= sizeof(struct xdp_md
))
3280 if (off
% size
!= 0)
3282 if (size
!= sizeof(__u32
))
3288 static bool xdp_is_valid_access(int off
, int size
,
3289 enum bpf_access_type type
,
3290 struct bpf_insn_access_aux
*info
)
3292 if (type
== BPF_WRITE
)
3296 case offsetof(struct xdp_md
, data
):
3297 info
->reg_type
= PTR_TO_PACKET
;
3299 case offsetof(struct xdp_md
, data_end
):
3300 info
->reg_type
= PTR_TO_PACKET_END
;
3304 return __is_valid_xdp_access(off
, size
);
3307 void bpf_warn_invalid_xdp_action(u32 act
)
3309 WARN_ONCE(1, "Illegal XDP return value %u, expect packet loss\n", act
);
3311 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action
);
3313 static bool __is_valid_sock_ops_access(int off
, int size
)
3315 if (off
< 0 || off
>= sizeof(struct bpf_sock_ops
))
3317 /* The verifier guarantees that size > 0. */
3318 if (off
% size
!= 0)
3320 if (size
!= sizeof(__u32
))
3326 static bool sock_ops_is_valid_access(int off
, int size
,
3327 enum bpf_access_type type
,
3328 struct bpf_insn_access_aux
*info
)
3330 if (type
== BPF_WRITE
) {
3332 case offsetof(struct bpf_sock_ops
, op
) ...
3333 offsetof(struct bpf_sock_ops
, replylong
[3]):
3340 return __is_valid_sock_ops_access(off
, size
);
3343 static u32
bpf_convert_ctx_access(enum bpf_access_type type
,
3344 const struct bpf_insn
*si
,
3345 struct bpf_insn
*insn_buf
,
3346 struct bpf_prog
*prog
, u32
*target_size
)
3348 struct bpf_insn
*insn
= insn_buf
;
3352 case offsetof(struct __sk_buff
, len
):
3353 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3354 bpf_target_off(struct sk_buff
, len
, 4,
3358 case offsetof(struct __sk_buff
, protocol
):
3359 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3360 bpf_target_off(struct sk_buff
, protocol
, 2,
3364 case offsetof(struct __sk_buff
, vlan_proto
):
3365 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3366 bpf_target_off(struct sk_buff
, vlan_proto
, 2,
3370 case offsetof(struct __sk_buff
, priority
):
3371 if (type
== BPF_WRITE
)
3372 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3373 bpf_target_off(struct sk_buff
, priority
, 4,
3376 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3377 bpf_target_off(struct sk_buff
, priority
, 4,
3381 case offsetof(struct __sk_buff
, ingress_ifindex
):
3382 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3383 bpf_target_off(struct sk_buff
, skb_iif
, 4,
3387 case offsetof(struct __sk_buff
, ifindex
):
3388 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
3389 si
->dst_reg
, si
->src_reg
,
3390 offsetof(struct sk_buff
, dev
));
3391 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, si
->dst_reg
, 0, 1);
3392 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3393 bpf_target_off(struct net_device
, ifindex
, 4,
3397 case offsetof(struct __sk_buff
, hash
):
3398 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3399 bpf_target_off(struct sk_buff
, hash
, 4,
3403 case offsetof(struct __sk_buff
, mark
):
3404 if (type
== BPF_WRITE
)
3405 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3406 bpf_target_off(struct sk_buff
, mark
, 4,
3409 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3410 bpf_target_off(struct sk_buff
, mark
, 4,
3414 case offsetof(struct __sk_buff
, pkt_type
):
3416 *insn
++ = BPF_LDX_MEM(BPF_B
, si
->dst_reg
, si
->src_reg
,
3418 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, PKT_TYPE_MAX
);
3419 #ifdef __BIG_ENDIAN_BITFIELD
3420 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 5);
3424 case offsetof(struct __sk_buff
, queue_mapping
):
3425 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3426 bpf_target_off(struct sk_buff
, queue_mapping
, 2,
3430 case offsetof(struct __sk_buff
, vlan_present
):
3431 case offsetof(struct __sk_buff
, vlan_tci
):
3432 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
3434 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3435 bpf_target_off(struct sk_buff
, vlan_tci
, 2,
3437 if (si
->off
== offsetof(struct __sk_buff
, vlan_tci
)) {
3438 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
,
3441 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 12);
3442 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, 1);
3446 case offsetof(struct __sk_buff
, cb
[0]) ...
3447 offsetofend(struct __sk_buff
, cb
[4]) - 1:
3448 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, data
) < 20);
3449 BUILD_BUG_ON((offsetof(struct sk_buff
, cb
) +
3450 offsetof(struct qdisc_skb_cb
, data
)) %
3453 prog
->cb_access
= 1;
3455 off
-= offsetof(struct __sk_buff
, cb
[0]);
3456 off
+= offsetof(struct sk_buff
, cb
);
3457 off
+= offsetof(struct qdisc_skb_cb
, data
);
3458 if (type
== BPF_WRITE
)
3459 *insn
++ = BPF_STX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
3462 *insn
++ = BPF_LDX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
3466 case offsetof(struct __sk_buff
, tc_classid
):
3467 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, tc_classid
) != 2);
3470 off
-= offsetof(struct __sk_buff
, tc_classid
);
3471 off
+= offsetof(struct sk_buff
, cb
);
3472 off
+= offsetof(struct qdisc_skb_cb
, tc_classid
);
3474 if (type
== BPF_WRITE
)
3475 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
,
3478 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
,
3482 case offsetof(struct __sk_buff
, data
):
3483 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, data
),
3484 si
->dst_reg
, si
->src_reg
,
3485 offsetof(struct sk_buff
, data
));
3488 case offsetof(struct __sk_buff
, data_end
):
3490 off
-= offsetof(struct __sk_buff
, data_end
);
3491 off
+= offsetof(struct sk_buff
, cb
);
3492 off
+= offsetof(struct bpf_skb_data_end
, data_end
);
3493 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
3497 case offsetof(struct __sk_buff
, tc_index
):
3498 #ifdef CONFIG_NET_SCHED
3499 if (type
== BPF_WRITE
)
3500 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3501 bpf_target_off(struct sk_buff
, tc_index
, 2,
3504 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3505 bpf_target_off(struct sk_buff
, tc_index
, 2,
3508 if (type
== BPF_WRITE
)
3509 *insn
++ = BPF_MOV64_REG(si
->dst_reg
, si
->dst_reg
);
3511 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
3515 case offsetof(struct __sk_buff
, napi_id
):
3516 #if defined(CONFIG_NET_RX_BUSY_POLL)
3517 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3518 bpf_target_off(struct sk_buff
, napi_id
, 4,
3520 *insn
++ = BPF_JMP_IMM(BPF_JGE
, si
->dst_reg
, MIN_NAPI_ID
, 1);
3521 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
3523 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
3528 return insn
- insn_buf
;
3531 static u32
sock_filter_convert_ctx_access(enum bpf_access_type type
,
3532 const struct bpf_insn
*si
,
3533 struct bpf_insn
*insn_buf
,
3534 struct bpf_prog
*prog
, u32
*target_size
)
3536 struct bpf_insn
*insn
= insn_buf
;
3539 case offsetof(struct bpf_sock
, bound_dev_if
):
3540 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_bound_dev_if
) != 4);
3542 if (type
== BPF_WRITE
)
3543 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3544 offsetof(struct sock
, sk_bound_dev_if
));
3546 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3547 offsetof(struct sock
, sk_bound_dev_if
));
3550 case offsetof(struct bpf_sock
, family
):
3551 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_family
) != 2);
3553 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3554 offsetof(struct sock
, sk_family
));
3557 case offsetof(struct bpf_sock
, type
):
3558 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3559 offsetof(struct sock
, __sk_flags_offset
));
3560 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_TYPE_MASK
);
3561 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_TYPE_SHIFT
);
3564 case offsetof(struct bpf_sock
, protocol
):
3565 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3566 offsetof(struct sock
, __sk_flags_offset
));
3567 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_PROTO_MASK
);
3568 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_PROTO_SHIFT
);
3572 return insn
- insn_buf
;
3575 static u32
tc_cls_act_convert_ctx_access(enum bpf_access_type type
,
3576 const struct bpf_insn
*si
,
3577 struct bpf_insn
*insn_buf
,
3578 struct bpf_prog
*prog
, u32
*target_size
)
3580 struct bpf_insn
*insn
= insn_buf
;
3583 case offsetof(struct __sk_buff
, ifindex
):
3584 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
3585 si
->dst_reg
, si
->src_reg
,
3586 offsetof(struct sk_buff
, dev
));
3587 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3588 bpf_target_off(struct net_device
, ifindex
, 4,
3592 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
3596 return insn
- insn_buf
;
3599 static u32
xdp_convert_ctx_access(enum bpf_access_type type
,
3600 const struct bpf_insn
*si
,
3601 struct bpf_insn
*insn_buf
,
3602 struct bpf_prog
*prog
, u32
*target_size
)
3604 struct bpf_insn
*insn
= insn_buf
;
3607 case offsetof(struct xdp_md
, data
):
3608 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data
),
3609 si
->dst_reg
, si
->src_reg
,
3610 offsetof(struct xdp_buff
, data
));
3612 case offsetof(struct xdp_md
, data_end
):
3613 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_end
),
3614 si
->dst_reg
, si
->src_reg
,
3615 offsetof(struct xdp_buff
, data_end
));
3619 return insn
- insn_buf
;
3622 static u32
sock_ops_convert_ctx_access(enum bpf_access_type type
,
3623 const struct bpf_insn
*si
,
3624 struct bpf_insn
*insn_buf
,
3625 struct bpf_prog
*prog
,
3628 struct bpf_insn
*insn
= insn_buf
;
3632 case offsetof(struct bpf_sock_ops
, op
) ...
3633 offsetof(struct bpf_sock_ops
, replylong
[3]):
3634 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, op
) !=
3635 FIELD_SIZEOF(struct bpf_sock_ops_kern
, op
));
3636 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, reply
) !=
3637 FIELD_SIZEOF(struct bpf_sock_ops_kern
, reply
));
3638 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, replylong
) !=
3639 FIELD_SIZEOF(struct bpf_sock_ops_kern
, replylong
));
3641 off
-= offsetof(struct bpf_sock_ops
, op
);
3642 off
+= offsetof(struct bpf_sock_ops_kern
, op
);
3643 if (type
== BPF_WRITE
)
3644 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3647 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3651 case offsetof(struct bpf_sock_ops
, family
):
3652 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
3654 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3655 struct bpf_sock_ops_kern
, sk
),
3656 si
->dst_reg
, si
->src_reg
,
3657 offsetof(struct bpf_sock_ops_kern
, sk
));
3658 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
3659 offsetof(struct sock_common
, skc_family
));
3662 case offsetof(struct bpf_sock_ops
, remote_ip4
):
3663 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
3665 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3666 struct bpf_sock_ops_kern
, sk
),
3667 si
->dst_reg
, si
->src_reg
,
3668 offsetof(struct bpf_sock_ops_kern
, sk
));
3669 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3670 offsetof(struct sock_common
, skc_daddr
));
3673 case offsetof(struct bpf_sock_ops
, local_ip4
):
3674 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_rcv_saddr
) != 4);
3676 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3677 struct bpf_sock_ops_kern
, sk
),
3678 si
->dst_reg
, si
->src_reg
,
3679 offsetof(struct bpf_sock_ops_kern
, sk
));
3680 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3681 offsetof(struct sock_common
,
3685 case offsetof(struct bpf_sock_ops
, remote_ip6
[0]) ...
3686 offsetof(struct bpf_sock_ops
, remote_ip6
[3]):
3687 #if IS_ENABLED(CONFIG_IPV6)
3688 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
3689 skc_v6_daddr
.s6_addr32
[0]) != 4);
3692 off
-= offsetof(struct bpf_sock_ops
, remote_ip6
[0]);
3693 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3694 struct bpf_sock_ops_kern
, sk
),
3695 si
->dst_reg
, si
->src_reg
,
3696 offsetof(struct bpf_sock_ops_kern
, sk
));
3697 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3698 offsetof(struct sock_common
,
3699 skc_v6_daddr
.s6_addr32
[0]) +
3702 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
3706 case offsetof(struct bpf_sock_ops
, local_ip6
[0]) ...
3707 offsetof(struct bpf_sock_ops
, local_ip6
[3]):
3708 #if IS_ENABLED(CONFIG_IPV6)
3709 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
3710 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
3713 off
-= offsetof(struct bpf_sock_ops
, local_ip6
[0]);
3714 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3715 struct bpf_sock_ops_kern
, sk
),
3716 si
->dst_reg
, si
->src_reg
,
3717 offsetof(struct bpf_sock_ops_kern
, sk
));
3718 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3719 offsetof(struct sock_common
,
3720 skc_v6_rcv_saddr
.s6_addr32
[0]) +
3723 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
3727 case offsetof(struct bpf_sock_ops
, remote_port
):
3728 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
3730 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3731 struct bpf_sock_ops_kern
, sk
),
3732 si
->dst_reg
, si
->src_reg
,
3733 offsetof(struct bpf_sock_ops_kern
, sk
));
3734 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
3735 offsetof(struct sock_common
, skc_dport
));
3736 #ifndef __BIG_ENDIAN_BITFIELD
3737 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
3741 case offsetof(struct bpf_sock_ops
, local_port
):
3742 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
3744 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
3745 struct bpf_sock_ops_kern
, sk
),
3746 si
->dst_reg
, si
->src_reg
,
3747 offsetof(struct bpf_sock_ops_kern
, sk
));
3748 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
3749 offsetof(struct sock_common
, skc_num
));
3752 return insn
- insn_buf
;
3755 const struct bpf_verifier_ops sk_filter_prog_ops
= {
3756 .get_func_proto
= sk_filter_func_proto
,
3757 .is_valid_access
= sk_filter_is_valid_access
,
3758 .convert_ctx_access
= bpf_convert_ctx_access
,
3761 const struct bpf_verifier_ops tc_cls_act_prog_ops
= {
3762 .get_func_proto
= tc_cls_act_func_proto
,
3763 .is_valid_access
= tc_cls_act_is_valid_access
,
3764 .convert_ctx_access
= tc_cls_act_convert_ctx_access
,
3765 .gen_prologue
= tc_cls_act_prologue
,
3766 .test_run
= bpf_prog_test_run_skb
,
3769 const struct bpf_verifier_ops xdp_prog_ops
= {
3770 .get_func_proto
= xdp_func_proto
,
3771 .is_valid_access
= xdp_is_valid_access
,
3772 .convert_ctx_access
= xdp_convert_ctx_access
,
3773 .test_run
= bpf_prog_test_run_xdp
,
3776 const struct bpf_verifier_ops cg_skb_prog_ops
= {
3777 .get_func_proto
= sk_filter_func_proto
,
3778 .is_valid_access
= sk_filter_is_valid_access
,
3779 .convert_ctx_access
= bpf_convert_ctx_access
,
3780 .test_run
= bpf_prog_test_run_skb
,
3783 const struct bpf_verifier_ops lwt_inout_prog_ops
= {
3784 .get_func_proto
= lwt_inout_func_proto
,
3785 .is_valid_access
= lwt_is_valid_access
,
3786 .convert_ctx_access
= bpf_convert_ctx_access
,
3787 .test_run
= bpf_prog_test_run_skb
,
3790 const struct bpf_verifier_ops lwt_xmit_prog_ops
= {
3791 .get_func_proto
= lwt_xmit_func_proto
,
3792 .is_valid_access
= lwt_is_valid_access
,
3793 .convert_ctx_access
= bpf_convert_ctx_access
,
3794 .gen_prologue
= tc_cls_act_prologue
,
3795 .test_run
= bpf_prog_test_run_skb
,
3798 const struct bpf_verifier_ops cg_sock_prog_ops
= {
3799 .get_func_proto
= bpf_base_func_proto
,
3800 .is_valid_access
= sock_filter_is_valid_access
,
3801 .convert_ctx_access
= sock_filter_convert_ctx_access
,
3804 const struct bpf_verifier_ops sock_ops_prog_ops
= {
3805 .get_func_proto
= sock_ops_func_proto
,
3806 .is_valid_access
= sock_ops_is_valid_access
,
3807 .convert_ctx_access
= sock_ops_convert_ctx_access
,
3810 int sk_detach_filter(struct sock
*sk
)
3813 struct sk_filter
*filter
;
3815 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
3818 filter
= rcu_dereference_protected(sk
->sk_filter
,
3819 lockdep_sock_is_held(sk
));
3821 RCU_INIT_POINTER(sk
->sk_filter
, NULL
);
3822 sk_filter_uncharge(sk
, filter
);
3828 EXPORT_SYMBOL_GPL(sk_detach_filter
);
3830 int sk_get_filter(struct sock
*sk
, struct sock_filter __user
*ubuf
,
3833 struct sock_fprog_kern
*fprog
;
3834 struct sk_filter
*filter
;
3838 filter
= rcu_dereference_protected(sk
->sk_filter
,
3839 lockdep_sock_is_held(sk
));
3843 /* We're copying the filter that has been originally attached,
3844 * so no conversion/decode needed anymore. eBPF programs that
3845 * have no original program cannot be dumped through this.
3848 fprog
= filter
->prog
->orig_prog
;
3854 /* User space only enquires number of filter blocks. */
3858 if (len
< fprog
->len
)
3862 if (copy_to_user(ubuf
, fprog
->filter
, bpf_classic_proglen(fprog
)))
3865 /* Instead of bytes, the API requests to return the number