2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/module.h>
25 #include <linux/types.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/sock_diag.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_packet.h>
34 #include <linux/if_arp.h>
35 #include <linux/gfp.h>
37 #include <net/protocol.h>
38 #include <net/netlink.h>
39 #include <linux/skbuff.h>
41 #include <net/flow_dissector.h>
42 #include <linux/errno.h>
43 #include <linux/timer.h>
44 #include <linux/uaccess.h>
45 #include <asm/unaligned.h>
46 #include <asm/cmpxchg.h>
47 #include <linux/filter.h>
48 #include <linux/ratelimit.h>
49 #include <linux/seccomp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/bpf.h>
52 #include <net/sch_generic.h>
53 #include <net/cls_cgroup.h>
54 #include <net/dst_metadata.h>
56 #include <net/sock_reuseport.h>
57 #include <net/busy_poll.h>
59 #include <linux/bpf_trace.h>
62 * sk_filter_trim_cap - run a packet through a socket filter
63 * @sk: sock associated with &sk_buff
64 * @skb: buffer to filter
65 * @cap: limit on how short the eBPF program may trim the packet
67 * Run the eBPF program and then cut skb->data to correct size returned by
68 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
69 * than pkt_len we keep whole skb->data. This is the socket level
70 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
71 * be accepted or -EPERM if the packet should be tossed.
74 int sk_filter_trim_cap(struct sock
*sk
, struct sk_buff
*skb
, unsigned int cap
)
77 struct sk_filter
*filter
;
80 * If the skb was allocated from pfmemalloc reserves, only
81 * allow SOCK_MEMALLOC sockets to use it as this socket is
84 if (skb_pfmemalloc(skb
) && !sock_flag(sk
, SOCK_MEMALLOC
)) {
85 NET_INC_STATS(sock_net(sk
), LINUX_MIB_PFMEMALLOCDROP
);
88 err
= BPF_CGROUP_RUN_PROG_INET_INGRESS(sk
, skb
);
92 err
= security_sock_rcv_skb(sk
, skb
);
97 filter
= rcu_dereference(sk
->sk_filter
);
99 struct sock
*save_sk
= skb
->sk
;
100 unsigned int pkt_len
;
103 pkt_len
= bpf_prog_run_save_cb(filter
->prog
, skb
);
105 err
= pkt_len
? pskb_trim(skb
, max(cap
, pkt_len
)) : -EPERM
;
111 EXPORT_SYMBOL(sk_filter_trim_cap
);
113 BPF_CALL_1(__skb_get_pay_offset
, struct sk_buff
*, skb
)
115 return skb_get_poff(skb
);
118 BPF_CALL_3(__skb_get_nlattr
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
122 if (skb_is_nonlinear(skb
))
125 if (skb
->len
< sizeof(struct nlattr
))
128 if (a
> skb
->len
- sizeof(struct nlattr
))
131 nla
= nla_find((struct nlattr
*) &skb
->data
[a
], skb
->len
- a
, x
);
133 return (void *) nla
- (void *) skb
->data
;
138 BPF_CALL_3(__skb_get_nlattr_nest
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
142 if (skb_is_nonlinear(skb
))
145 if (skb
->len
< sizeof(struct nlattr
))
148 if (a
> skb
->len
- sizeof(struct nlattr
))
151 nla
= (struct nlattr
*) &skb
->data
[a
];
152 if (nla
->nla_len
> skb
->len
- a
)
155 nla
= nla_find_nested(nla
, x
);
157 return (void *) nla
- (void *) skb
->data
;
162 BPF_CALL_0(__get_raw_cpu_id
)
164 return raw_smp_processor_id();
167 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto
= {
168 .func
= __get_raw_cpu_id
,
170 .ret_type
= RET_INTEGER
,
173 static u32
convert_skb_access(int skb_field
, int dst_reg
, int src_reg
,
174 struct bpf_insn
*insn_buf
)
176 struct bpf_insn
*insn
= insn_buf
;
180 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
182 *insn
++ = BPF_LDX_MEM(BPF_W
, dst_reg
, src_reg
,
183 offsetof(struct sk_buff
, mark
));
187 *insn
++ = BPF_LDX_MEM(BPF_B
, dst_reg
, src_reg
, PKT_TYPE_OFFSET());
188 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, PKT_TYPE_MAX
);
189 #ifdef __BIG_ENDIAN_BITFIELD
190 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 5);
195 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, queue_mapping
) != 2);
197 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
198 offsetof(struct sk_buff
, queue_mapping
));
201 case SKF_AD_VLAN_TAG
:
202 case SKF_AD_VLAN_TAG_PRESENT
:
203 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
204 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
206 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
207 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
208 offsetof(struct sk_buff
, vlan_tci
));
209 if (skb_field
== SKF_AD_VLAN_TAG
) {
210 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
,
214 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 12);
216 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, 1);
221 return insn
- insn_buf
;
224 static bool convert_bpf_extensions(struct sock_filter
*fp
,
225 struct bpf_insn
**insnp
)
227 struct bpf_insn
*insn
= *insnp
;
231 case SKF_AD_OFF
+ SKF_AD_PROTOCOL
:
232 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, protocol
) != 2);
234 /* A = *(u16 *) (CTX + offsetof(protocol)) */
235 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
236 offsetof(struct sk_buff
, protocol
));
237 /* A = ntohs(A) [emitting a nop or swap16] */
238 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
241 case SKF_AD_OFF
+ SKF_AD_PKTTYPE
:
242 cnt
= convert_skb_access(SKF_AD_PKTTYPE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
246 case SKF_AD_OFF
+ SKF_AD_IFINDEX
:
247 case SKF_AD_OFF
+ SKF_AD_HATYPE
:
248 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, ifindex
) != 4);
249 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, type
) != 2);
251 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
252 BPF_REG_TMP
, BPF_REG_CTX
,
253 offsetof(struct sk_buff
, dev
));
254 /* if (tmp != 0) goto pc + 1 */
255 *insn
++ = BPF_JMP_IMM(BPF_JNE
, BPF_REG_TMP
, 0, 1);
256 *insn
++ = BPF_EXIT_INSN();
257 if (fp
->k
== SKF_AD_OFF
+ SKF_AD_IFINDEX
)
258 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_TMP
,
259 offsetof(struct net_device
, ifindex
));
261 *insn
= BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_TMP
,
262 offsetof(struct net_device
, type
));
265 case SKF_AD_OFF
+ SKF_AD_MARK
:
266 cnt
= convert_skb_access(SKF_AD_MARK
, BPF_REG_A
, BPF_REG_CTX
, insn
);
270 case SKF_AD_OFF
+ SKF_AD_RXHASH
:
271 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
273 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
,
274 offsetof(struct sk_buff
, hash
));
277 case SKF_AD_OFF
+ SKF_AD_QUEUE
:
278 cnt
= convert_skb_access(SKF_AD_QUEUE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
282 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG
:
283 cnt
= convert_skb_access(SKF_AD_VLAN_TAG
,
284 BPF_REG_A
, BPF_REG_CTX
, insn
);
288 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG_PRESENT
:
289 cnt
= convert_skb_access(SKF_AD_VLAN_TAG_PRESENT
,
290 BPF_REG_A
, BPF_REG_CTX
, insn
);
294 case SKF_AD_OFF
+ SKF_AD_VLAN_TPID
:
295 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_proto
) != 2);
297 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
298 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
299 offsetof(struct sk_buff
, vlan_proto
));
300 /* A = ntohs(A) [emitting a nop or swap16] */
301 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
304 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
305 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
306 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
307 case SKF_AD_OFF
+ SKF_AD_CPU
:
308 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
310 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG1
, BPF_REG_CTX
);
312 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_A
);
314 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG3
, BPF_REG_X
);
315 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
317 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
318 *insn
= BPF_EMIT_CALL(__skb_get_pay_offset
);
320 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
321 *insn
= BPF_EMIT_CALL(__skb_get_nlattr
);
323 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
324 *insn
= BPF_EMIT_CALL(__skb_get_nlattr_nest
);
326 case SKF_AD_OFF
+ SKF_AD_CPU
:
327 *insn
= BPF_EMIT_CALL(__get_raw_cpu_id
);
329 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
330 *insn
= BPF_EMIT_CALL(bpf_user_rnd_u32
);
331 bpf_user_rnd_init_once();
336 case SKF_AD_OFF
+ SKF_AD_ALU_XOR_X
:
338 *insn
= BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_X
);
342 /* This is just a dummy call to avoid letting the compiler
343 * evict __bpf_call_base() as an optimization. Placed here
344 * where no-one bothers.
346 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
355 * bpf_convert_filter - convert filter program
356 * @prog: the user passed filter program
357 * @len: the length of the user passed filter program
358 * @new_prog: allocated 'struct bpf_prog' or NULL
359 * @new_len: pointer to store length of converted program
361 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
362 * style extended BPF (eBPF).
363 * Conversion workflow:
365 * 1) First pass for calculating the new program length:
366 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
368 * 2) 2nd pass to remap in two passes: 1st pass finds new
369 * jump offsets, 2nd pass remapping:
370 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
372 static int bpf_convert_filter(struct sock_filter
*prog
, int len
,
373 struct bpf_prog
*new_prog
, int *new_len
)
375 int new_flen
= 0, pass
= 0, target
, i
, stack_off
;
376 struct bpf_insn
*new_insn
, *first_insn
= NULL
;
377 struct sock_filter
*fp
;
381 BUILD_BUG_ON(BPF_MEMWORDS
* sizeof(u32
) > MAX_BPF_STACK
);
382 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
384 if (len
<= 0 || len
> BPF_MAXINSNS
)
388 first_insn
= new_prog
->insnsi
;
389 addrs
= kcalloc(len
, sizeof(*addrs
),
390 GFP_KERNEL
| __GFP_NOWARN
);
396 new_insn
= first_insn
;
399 /* Classic BPF related prologue emission. */
401 /* Classic BPF expects A and X to be reset first. These need
402 * to be guaranteed to be the first two instructions.
404 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_A
);
405 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_X
, BPF_REG_X
);
407 /* All programs must keep CTX in callee saved BPF_REG_CTX.
408 * In eBPF case it's done by the compiler, here we need to
409 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
411 *new_insn
++ = BPF_MOV64_REG(BPF_REG_CTX
, BPF_REG_ARG1
);
416 for (i
= 0; i
< len
; fp
++, i
++) {
417 struct bpf_insn tmp_insns
[6] = { };
418 struct bpf_insn
*insn
= tmp_insns
;
421 addrs
[i
] = new_insn
- first_insn
;
424 /* All arithmetic insns and skb loads map as-is. */
425 case BPF_ALU
| BPF_ADD
| BPF_X
:
426 case BPF_ALU
| BPF_ADD
| BPF_K
:
427 case BPF_ALU
| BPF_SUB
| BPF_X
:
428 case BPF_ALU
| BPF_SUB
| BPF_K
:
429 case BPF_ALU
| BPF_AND
| BPF_X
:
430 case BPF_ALU
| BPF_AND
| BPF_K
:
431 case BPF_ALU
| BPF_OR
| BPF_X
:
432 case BPF_ALU
| BPF_OR
| BPF_K
:
433 case BPF_ALU
| BPF_LSH
| BPF_X
:
434 case BPF_ALU
| BPF_LSH
| BPF_K
:
435 case BPF_ALU
| BPF_RSH
| BPF_X
:
436 case BPF_ALU
| BPF_RSH
| BPF_K
:
437 case BPF_ALU
| BPF_XOR
| BPF_X
:
438 case BPF_ALU
| BPF_XOR
| BPF_K
:
439 case BPF_ALU
| BPF_MUL
| BPF_X
:
440 case BPF_ALU
| BPF_MUL
| BPF_K
:
441 case BPF_ALU
| BPF_DIV
| BPF_X
:
442 case BPF_ALU
| BPF_DIV
| BPF_K
:
443 case BPF_ALU
| BPF_MOD
| BPF_X
:
444 case BPF_ALU
| BPF_MOD
| BPF_K
:
445 case BPF_ALU
| BPF_NEG
:
446 case BPF_LD
| BPF_ABS
| BPF_W
:
447 case BPF_LD
| BPF_ABS
| BPF_H
:
448 case BPF_LD
| BPF_ABS
| BPF_B
:
449 case BPF_LD
| BPF_IND
| BPF_W
:
450 case BPF_LD
| BPF_IND
| BPF_H
:
451 case BPF_LD
| BPF_IND
| BPF_B
:
452 /* Check for overloaded BPF extension and
453 * directly convert it if found, otherwise
454 * just move on with mapping.
456 if (BPF_CLASS(fp
->code
) == BPF_LD
&&
457 BPF_MODE(fp
->code
) == BPF_ABS
&&
458 convert_bpf_extensions(fp
, &insn
))
461 *insn
= BPF_RAW_INSN(fp
->code
, BPF_REG_A
, BPF_REG_X
, 0, fp
->k
);
464 /* Jump transformation cannot use BPF block macros
465 * everywhere as offset calculation and target updates
466 * require a bit more work than the rest, i.e. jump
467 * opcodes map as-is, but offsets need adjustment.
470 #define BPF_EMIT_JMP \
472 if (target >= len || target < 0) \
474 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
475 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
476 insn->off -= insn - tmp_insns; \
479 case BPF_JMP
| BPF_JA
:
480 target
= i
+ fp
->k
+ 1;
481 insn
->code
= fp
->code
;
485 case BPF_JMP
| BPF_JEQ
| BPF_K
:
486 case BPF_JMP
| BPF_JEQ
| BPF_X
:
487 case BPF_JMP
| BPF_JSET
| BPF_K
:
488 case BPF_JMP
| BPF_JSET
| BPF_X
:
489 case BPF_JMP
| BPF_JGT
| BPF_K
:
490 case BPF_JMP
| BPF_JGT
| BPF_X
:
491 case BPF_JMP
| BPF_JGE
| BPF_K
:
492 case BPF_JMP
| BPF_JGE
| BPF_X
:
493 if (BPF_SRC(fp
->code
) == BPF_K
&& (int) fp
->k
< 0) {
494 /* BPF immediates are signed, zero extend
495 * immediate into tmp register and use it
498 *insn
++ = BPF_MOV32_IMM(BPF_REG_TMP
, fp
->k
);
500 insn
->dst_reg
= BPF_REG_A
;
501 insn
->src_reg
= BPF_REG_TMP
;
504 insn
->dst_reg
= BPF_REG_A
;
506 bpf_src
= BPF_SRC(fp
->code
);
507 insn
->src_reg
= bpf_src
== BPF_X
? BPF_REG_X
: 0;
510 /* Common case where 'jump_false' is next insn. */
512 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
513 target
= i
+ fp
->jt
+ 1;
518 /* Convert some jumps when 'jump_true' is next insn. */
520 switch (BPF_OP(fp
->code
)) {
522 insn
->code
= BPF_JMP
| BPF_JNE
| bpf_src
;
525 insn
->code
= BPF_JMP
| BPF_JLE
| bpf_src
;
528 insn
->code
= BPF_JMP
| BPF_JLT
| bpf_src
;
534 target
= i
+ fp
->jf
+ 1;
539 /* Other jumps are mapped into two insns: Jxx and JA. */
540 target
= i
+ fp
->jt
+ 1;
541 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
545 insn
->code
= BPF_JMP
| BPF_JA
;
546 target
= i
+ fp
->jf
+ 1;
550 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
551 case BPF_LDX
| BPF_MSH
| BPF_B
:
553 *insn
++ = BPF_MOV64_REG(BPF_REG_TMP
, BPF_REG_A
);
554 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
555 *insn
++ = BPF_LD_ABS(BPF_B
, fp
->k
);
557 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_A
, 0xf);
559 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, BPF_REG_A
, 2);
561 *insn
++ = BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
563 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_TMP
);
566 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
567 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
569 case BPF_RET
| BPF_A
:
570 case BPF_RET
| BPF_K
:
571 if (BPF_RVAL(fp
->code
) == BPF_K
)
572 *insn
++ = BPF_MOV32_RAW(BPF_K
, BPF_REG_0
,
574 *insn
= BPF_EXIT_INSN();
577 /* Store to stack. */
580 stack_off
= fp
->k
* 4 + 4;
581 *insn
= BPF_STX_MEM(BPF_W
, BPF_REG_FP
, BPF_CLASS(fp
->code
) ==
582 BPF_ST
? BPF_REG_A
: BPF_REG_X
,
584 /* check_load_and_stores() verifies that classic BPF can
585 * load from stack only after write, so tracking
586 * stack_depth for ST|STX insns is enough
588 if (new_prog
&& new_prog
->aux
->stack_depth
< stack_off
)
589 new_prog
->aux
->stack_depth
= stack_off
;
592 /* Load from stack. */
593 case BPF_LD
| BPF_MEM
:
594 case BPF_LDX
| BPF_MEM
:
595 stack_off
= fp
->k
* 4 + 4;
596 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
597 BPF_REG_A
: BPF_REG_X
, BPF_REG_FP
,
602 case BPF_LD
| BPF_IMM
:
603 case BPF_LDX
| BPF_IMM
:
604 *insn
= BPF_MOV32_IMM(BPF_CLASS(fp
->code
) == BPF_LD
?
605 BPF_REG_A
: BPF_REG_X
, fp
->k
);
609 case BPF_MISC
| BPF_TAX
:
610 *insn
= BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
614 case BPF_MISC
| BPF_TXA
:
615 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_X
);
618 /* A = skb->len or X = skb->len */
619 case BPF_LD
| BPF_W
| BPF_LEN
:
620 case BPF_LDX
| BPF_W
| BPF_LEN
:
621 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
622 BPF_REG_A
: BPF_REG_X
, BPF_REG_CTX
,
623 offsetof(struct sk_buff
, len
));
626 /* Access seccomp_data fields. */
627 case BPF_LDX
| BPF_ABS
| BPF_W
:
628 /* A = *(u32 *) (ctx + K) */
629 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
, fp
->k
);
632 /* Unknown instruction. */
639 memcpy(new_insn
, tmp_insns
,
640 sizeof(*insn
) * (insn
- tmp_insns
));
641 new_insn
+= insn
- tmp_insns
;
645 /* Only calculating new length. */
646 *new_len
= new_insn
- first_insn
;
651 if (new_flen
!= new_insn
- first_insn
) {
652 new_flen
= new_insn
- first_insn
;
659 BUG_ON(*new_len
!= new_flen
);
668 * As we dont want to clear mem[] array for each packet going through
669 * __bpf_prog_run(), we check that filter loaded by user never try to read
670 * a cell if not previously written, and we check all branches to be sure
671 * a malicious user doesn't try to abuse us.
673 static int check_load_and_stores(const struct sock_filter
*filter
, int flen
)
675 u16
*masks
, memvalid
= 0; /* One bit per cell, 16 cells */
678 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
680 masks
= kmalloc_array(flen
, sizeof(*masks
), GFP_KERNEL
);
684 memset(masks
, 0xff, flen
* sizeof(*masks
));
686 for (pc
= 0; pc
< flen
; pc
++) {
687 memvalid
&= masks
[pc
];
689 switch (filter
[pc
].code
) {
692 memvalid
|= (1 << filter
[pc
].k
);
694 case BPF_LD
| BPF_MEM
:
695 case BPF_LDX
| BPF_MEM
:
696 if (!(memvalid
& (1 << filter
[pc
].k
))) {
701 case BPF_JMP
| BPF_JA
:
702 /* A jump must set masks on target */
703 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
706 case BPF_JMP
| BPF_JEQ
| BPF_K
:
707 case BPF_JMP
| BPF_JEQ
| BPF_X
:
708 case BPF_JMP
| BPF_JGE
| BPF_K
:
709 case BPF_JMP
| BPF_JGE
| BPF_X
:
710 case BPF_JMP
| BPF_JGT
| BPF_K
:
711 case BPF_JMP
| BPF_JGT
| BPF_X
:
712 case BPF_JMP
| BPF_JSET
| BPF_K
:
713 case BPF_JMP
| BPF_JSET
| BPF_X
:
714 /* A jump must set masks on targets */
715 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
716 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
726 static bool chk_code_allowed(u16 code_to_probe
)
728 static const bool codes
[] = {
729 /* 32 bit ALU operations */
730 [BPF_ALU
| BPF_ADD
| BPF_K
] = true,
731 [BPF_ALU
| BPF_ADD
| BPF_X
] = true,
732 [BPF_ALU
| BPF_SUB
| BPF_K
] = true,
733 [BPF_ALU
| BPF_SUB
| BPF_X
] = true,
734 [BPF_ALU
| BPF_MUL
| BPF_K
] = true,
735 [BPF_ALU
| BPF_MUL
| BPF_X
] = true,
736 [BPF_ALU
| BPF_DIV
| BPF_K
] = true,
737 [BPF_ALU
| BPF_DIV
| BPF_X
] = true,
738 [BPF_ALU
| BPF_MOD
| BPF_K
] = true,
739 [BPF_ALU
| BPF_MOD
| BPF_X
] = true,
740 [BPF_ALU
| BPF_AND
| BPF_K
] = true,
741 [BPF_ALU
| BPF_AND
| BPF_X
] = true,
742 [BPF_ALU
| BPF_OR
| BPF_K
] = true,
743 [BPF_ALU
| BPF_OR
| BPF_X
] = true,
744 [BPF_ALU
| BPF_XOR
| BPF_K
] = true,
745 [BPF_ALU
| BPF_XOR
| BPF_X
] = true,
746 [BPF_ALU
| BPF_LSH
| BPF_K
] = true,
747 [BPF_ALU
| BPF_LSH
| BPF_X
] = true,
748 [BPF_ALU
| BPF_RSH
| BPF_K
] = true,
749 [BPF_ALU
| BPF_RSH
| BPF_X
] = true,
750 [BPF_ALU
| BPF_NEG
] = true,
751 /* Load instructions */
752 [BPF_LD
| BPF_W
| BPF_ABS
] = true,
753 [BPF_LD
| BPF_H
| BPF_ABS
] = true,
754 [BPF_LD
| BPF_B
| BPF_ABS
] = true,
755 [BPF_LD
| BPF_W
| BPF_LEN
] = true,
756 [BPF_LD
| BPF_W
| BPF_IND
] = true,
757 [BPF_LD
| BPF_H
| BPF_IND
] = true,
758 [BPF_LD
| BPF_B
| BPF_IND
] = true,
759 [BPF_LD
| BPF_IMM
] = true,
760 [BPF_LD
| BPF_MEM
] = true,
761 [BPF_LDX
| BPF_W
| BPF_LEN
] = true,
762 [BPF_LDX
| BPF_B
| BPF_MSH
] = true,
763 [BPF_LDX
| BPF_IMM
] = true,
764 [BPF_LDX
| BPF_MEM
] = true,
765 /* Store instructions */
768 /* Misc instructions */
769 [BPF_MISC
| BPF_TAX
] = true,
770 [BPF_MISC
| BPF_TXA
] = true,
771 /* Return instructions */
772 [BPF_RET
| BPF_K
] = true,
773 [BPF_RET
| BPF_A
] = true,
774 /* Jump instructions */
775 [BPF_JMP
| BPF_JA
] = true,
776 [BPF_JMP
| BPF_JEQ
| BPF_K
] = true,
777 [BPF_JMP
| BPF_JEQ
| BPF_X
] = true,
778 [BPF_JMP
| BPF_JGE
| BPF_K
] = true,
779 [BPF_JMP
| BPF_JGE
| BPF_X
] = true,
780 [BPF_JMP
| BPF_JGT
| BPF_K
] = true,
781 [BPF_JMP
| BPF_JGT
| BPF_X
] = true,
782 [BPF_JMP
| BPF_JSET
| BPF_K
] = true,
783 [BPF_JMP
| BPF_JSET
| BPF_X
] = true,
786 if (code_to_probe
>= ARRAY_SIZE(codes
))
789 return codes
[code_to_probe
];
792 static bool bpf_check_basics_ok(const struct sock_filter
*filter
,
797 if (flen
== 0 || flen
> BPF_MAXINSNS
)
804 * bpf_check_classic - verify socket filter code
805 * @filter: filter to verify
806 * @flen: length of filter
808 * Check the user's filter code. If we let some ugly
809 * filter code slip through kaboom! The filter must contain
810 * no references or jumps that are out of range, no illegal
811 * instructions, and must end with a RET instruction.
813 * All jumps are forward as they are not signed.
815 * Returns 0 if the rule set is legal or -EINVAL if not.
817 static int bpf_check_classic(const struct sock_filter
*filter
,
823 /* Check the filter code now */
824 for (pc
= 0; pc
< flen
; pc
++) {
825 const struct sock_filter
*ftest
= &filter
[pc
];
827 /* May we actually operate on this code? */
828 if (!chk_code_allowed(ftest
->code
))
831 /* Some instructions need special checks */
832 switch (ftest
->code
) {
833 case BPF_ALU
| BPF_DIV
| BPF_K
:
834 case BPF_ALU
| BPF_MOD
| BPF_K
:
835 /* Check for division by zero */
839 case BPF_ALU
| BPF_LSH
| BPF_K
:
840 case BPF_ALU
| BPF_RSH
| BPF_K
:
844 case BPF_LD
| BPF_MEM
:
845 case BPF_LDX
| BPF_MEM
:
848 /* Check for invalid memory addresses */
849 if (ftest
->k
>= BPF_MEMWORDS
)
852 case BPF_JMP
| BPF_JA
:
853 /* Note, the large ftest->k might cause loops.
854 * Compare this with conditional jumps below,
855 * where offsets are limited. --ANK (981016)
857 if (ftest
->k
>= (unsigned int)(flen
- pc
- 1))
860 case BPF_JMP
| BPF_JEQ
| BPF_K
:
861 case BPF_JMP
| BPF_JEQ
| BPF_X
:
862 case BPF_JMP
| BPF_JGE
| BPF_K
:
863 case BPF_JMP
| BPF_JGE
| BPF_X
:
864 case BPF_JMP
| BPF_JGT
| BPF_K
:
865 case BPF_JMP
| BPF_JGT
| BPF_X
:
866 case BPF_JMP
| BPF_JSET
| BPF_K
:
867 case BPF_JMP
| BPF_JSET
| BPF_X
:
868 /* Both conditionals must be safe */
869 if (pc
+ ftest
->jt
+ 1 >= flen
||
870 pc
+ ftest
->jf
+ 1 >= flen
)
873 case BPF_LD
| BPF_W
| BPF_ABS
:
874 case BPF_LD
| BPF_H
| BPF_ABS
:
875 case BPF_LD
| BPF_B
| BPF_ABS
:
877 if (bpf_anc_helper(ftest
) & BPF_ANC
)
879 /* Ancillary operation unknown or unsupported */
880 if (anc_found
== false && ftest
->k
>= SKF_AD_OFF
)
885 /* Last instruction must be a RET code */
886 switch (filter
[flen
- 1].code
) {
887 case BPF_RET
| BPF_K
:
888 case BPF_RET
| BPF_A
:
889 return check_load_and_stores(filter
, flen
);
895 static int bpf_prog_store_orig_filter(struct bpf_prog
*fp
,
896 const struct sock_fprog
*fprog
)
898 unsigned int fsize
= bpf_classic_proglen(fprog
);
899 struct sock_fprog_kern
*fkprog
;
901 fp
->orig_prog
= kmalloc(sizeof(*fkprog
), GFP_KERNEL
);
905 fkprog
= fp
->orig_prog
;
906 fkprog
->len
= fprog
->len
;
908 fkprog
->filter
= kmemdup(fp
->insns
, fsize
,
909 GFP_KERNEL
| __GFP_NOWARN
);
910 if (!fkprog
->filter
) {
911 kfree(fp
->orig_prog
);
918 static void bpf_release_orig_filter(struct bpf_prog
*fp
)
920 struct sock_fprog_kern
*fprog
= fp
->orig_prog
;
923 kfree(fprog
->filter
);
928 static void __bpf_prog_release(struct bpf_prog
*prog
)
930 if (prog
->type
== BPF_PROG_TYPE_SOCKET_FILTER
) {
933 bpf_release_orig_filter(prog
);
938 static void __sk_filter_release(struct sk_filter
*fp
)
940 __bpf_prog_release(fp
->prog
);
945 * sk_filter_release_rcu - Release a socket filter by rcu_head
946 * @rcu: rcu_head that contains the sk_filter to free
948 static void sk_filter_release_rcu(struct rcu_head
*rcu
)
950 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
952 __sk_filter_release(fp
);
956 * sk_filter_release - release a socket filter
957 * @fp: filter to remove
959 * Remove a filter from a socket and release its resources.
961 static void sk_filter_release(struct sk_filter
*fp
)
963 if (refcount_dec_and_test(&fp
->refcnt
))
964 call_rcu(&fp
->rcu
, sk_filter_release_rcu
);
967 void sk_filter_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
969 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
971 atomic_sub(filter_size
, &sk
->sk_omem_alloc
);
972 sk_filter_release(fp
);
975 /* try to charge the socket memory if there is space available
976 * return true on success
978 static bool __sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
980 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
982 /* same check as in sock_kmalloc() */
983 if (filter_size
<= sysctl_optmem_max
&&
984 atomic_read(&sk
->sk_omem_alloc
) + filter_size
< sysctl_optmem_max
) {
985 atomic_add(filter_size
, &sk
->sk_omem_alloc
);
991 bool sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
993 if (!refcount_inc_not_zero(&fp
->refcnt
))
996 if (!__sk_filter_charge(sk
, fp
)) {
997 sk_filter_release(fp
);
1003 static struct bpf_prog
*bpf_migrate_filter(struct bpf_prog
*fp
)
1005 struct sock_filter
*old_prog
;
1006 struct bpf_prog
*old_fp
;
1007 int err
, new_len
, old_len
= fp
->len
;
1009 /* We are free to overwrite insns et al right here as it
1010 * won't be used at this point in time anymore internally
1011 * after the migration to the internal BPF instruction
1014 BUILD_BUG_ON(sizeof(struct sock_filter
) !=
1015 sizeof(struct bpf_insn
));
1017 /* Conversion cannot happen on overlapping memory areas,
1018 * so we need to keep the user BPF around until the 2nd
1019 * pass. At this time, the user BPF is stored in fp->insns.
1021 old_prog
= kmemdup(fp
->insns
, old_len
* sizeof(struct sock_filter
),
1022 GFP_KERNEL
| __GFP_NOWARN
);
1028 /* 1st pass: calculate the new program length. */
1029 err
= bpf_convert_filter(old_prog
, old_len
, NULL
, &new_len
);
1033 /* Expand fp for appending the new filter representation. */
1035 fp
= bpf_prog_realloc(old_fp
, bpf_prog_size(new_len
), 0);
1037 /* The old_fp is still around in case we couldn't
1038 * allocate new memory, so uncharge on that one.
1047 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1048 err
= bpf_convert_filter(old_prog
, old_len
, fp
, &new_len
);
1050 /* 2nd bpf_convert_filter() can fail only if it fails
1051 * to allocate memory, remapping must succeed. Note,
1052 * that at this time old_fp has already been released
1057 /* We are guaranteed to never error here with cBPF to eBPF
1058 * transitions, since there's no issue with type compatibility
1059 * checks on program arrays.
1061 fp
= bpf_prog_select_runtime(fp
, &err
);
1069 __bpf_prog_release(fp
);
1070 return ERR_PTR(err
);
1073 static struct bpf_prog
*bpf_prepare_filter(struct bpf_prog
*fp
,
1074 bpf_aux_classic_check_t trans
)
1078 fp
->bpf_func
= NULL
;
1081 err
= bpf_check_classic(fp
->insns
, fp
->len
);
1083 __bpf_prog_release(fp
);
1084 return ERR_PTR(err
);
1087 /* There might be additional checks and transformations
1088 * needed on classic filters, f.e. in case of seccomp.
1091 err
= trans(fp
->insns
, fp
->len
);
1093 __bpf_prog_release(fp
);
1094 return ERR_PTR(err
);
1098 /* Probe if we can JIT compile the filter and if so, do
1099 * the compilation of the filter.
1101 bpf_jit_compile(fp
);
1103 /* JIT compiler couldn't process this filter, so do the
1104 * internal BPF translation for the optimized interpreter.
1107 fp
= bpf_migrate_filter(fp
);
1113 * bpf_prog_create - create an unattached filter
1114 * @pfp: the unattached filter that is created
1115 * @fprog: the filter program
1117 * Create a filter independent of any socket. We first run some
1118 * sanity checks on it to make sure it does not explode on us later.
1119 * If an error occurs or there is insufficient memory for the filter
1120 * a negative errno code is returned. On success the return is zero.
1122 int bpf_prog_create(struct bpf_prog
**pfp
, struct sock_fprog_kern
*fprog
)
1124 unsigned int fsize
= bpf_classic_proglen(fprog
);
1125 struct bpf_prog
*fp
;
1127 /* Make sure new filter is there and in the right amounts. */
1128 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1131 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1135 memcpy(fp
->insns
, fprog
->filter
, fsize
);
1137 fp
->len
= fprog
->len
;
1138 /* Since unattached filters are not copied back to user
1139 * space through sk_get_filter(), we do not need to hold
1140 * a copy here, and can spare us the work.
1142 fp
->orig_prog
= NULL
;
1144 /* bpf_prepare_filter() already takes care of freeing
1145 * memory in case something goes wrong.
1147 fp
= bpf_prepare_filter(fp
, NULL
);
1154 EXPORT_SYMBOL_GPL(bpf_prog_create
);
1157 * bpf_prog_create_from_user - create an unattached filter from user buffer
1158 * @pfp: the unattached filter that is created
1159 * @fprog: the filter program
1160 * @trans: post-classic verifier transformation handler
1161 * @save_orig: save classic BPF program
1163 * This function effectively does the same as bpf_prog_create(), only
1164 * that it builds up its insns buffer from user space provided buffer.
1165 * It also allows for passing a bpf_aux_classic_check_t handler.
1167 int bpf_prog_create_from_user(struct bpf_prog
**pfp
, struct sock_fprog
*fprog
,
1168 bpf_aux_classic_check_t trans
, bool save_orig
)
1170 unsigned int fsize
= bpf_classic_proglen(fprog
);
1171 struct bpf_prog
*fp
;
1174 /* Make sure new filter is there and in the right amounts. */
1175 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1178 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1182 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
1183 __bpf_prog_free(fp
);
1187 fp
->len
= fprog
->len
;
1188 fp
->orig_prog
= NULL
;
1191 err
= bpf_prog_store_orig_filter(fp
, fprog
);
1193 __bpf_prog_free(fp
);
1198 /* bpf_prepare_filter() already takes care of freeing
1199 * memory in case something goes wrong.
1201 fp
= bpf_prepare_filter(fp
, trans
);
1208 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user
);
1210 void bpf_prog_destroy(struct bpf_prog
*fp
)
1212 __bpf_prog_release(fp
);
1214 EXPORT_SYMBOL_GPL(bpf_prog_destroy
);
1216 static int __sk_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1218 struct sk_filter
*fp
, *old_fp
;
1220 fp
= kmalloc(sizeof(*fp
), GFP_KERNEL
);
1226 if (!__sk_filter_charge(sk
, fp
)) {
1230 refcount_set(&fp
->refcnt
, 1);
1232 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
1233 lockdep_sock_is_held(sk
));
1234 rcu_assign_pointer(sk
->sk_filter
, fp
);
1237 sk_filter_uncharge(sk
, old_fp
);
1242 static int __reuseport_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1244 struct bpf_prog
*old_prog
;
1247 if (bpf_prog_size(prog
->len
) > sysctl_optmem_max
)
1250 if (sk_unhashed(sk
) && sk
->sk_reuseport
) {
1251 err
= reuseport_alloc(sk
);
1254 } else if (!rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1255 /* The socket wasn't bound with SO_REUSEPORT */
1259 old_prog
= reuseport_attach_prog(sk
, prog
);
1261 bpf_prog_destroy(old_prog
);
1267 struct bpf_prog
*__get_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1269 unsigned int fsize
= bpf_classic_proglen(fprog
);
1270 struct bpf_prog
*prog
;
1273 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1274 return ERR_PTR(-EPERM
);
1276 /* Make sure new filter is there and in the right amounts. */
1277 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1278 return ERR_PTR(-EINVAL
);
1280 prog
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1282 return ERR_PTR(-ENOMEM
);
1284 if (copy_from_user(prog
->insns
, fprog
->filter
, fsize
)) {
1285 __bpf_prog_free(prog
);
1286 return ERR_PTR(-EFAULT
);
1289 prog
->len
= fprog
->len
;
1291 err
= bpf_prog_store_orig_filter(prog
, fprog
);
1293 __bpf_prog_free(prog
);
1294 return ERR_PTR(-ENOMEM
);
1297 /* bpf_prepare_filter() already takes care of freeing
1298 * memory in case something goes wrong.
1300 return bpf_prepare_filter(prog
, NULL
);
1304 * sk_attach_filter - attach a socket filter
1305 * @fprog: the filter program
1306 * @sk: the socket to use
1308 * Attach the user's filter code. We first run some sanity checks on
1309 * it to make sure it does not explode on us later. If an error
1310 * occurs or there is insufficient memory for the filter a negative
1311 * errno code is returned. On success the return is zero.
1313 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1315 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1319 return PTR_ERR(prog
);
1321 err
= __sk_attach_prog(prog
, sk
);
1323 __bpf_prog_release(prog
);
1329 EXPORT_SYMBOL_GPL(sk_attach_filter
);
1331 int sk_reuseport_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1333 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1337 return PTR_ERR(prog
);
1339 err
= __reuseport_attach_prog(prog
, sk
);
1341 __bpf_prog_release(prog
);
1348 static struct bpf_prog
*__get_bpf(u32 ufd
, struct sock
*sk
)
1350 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1351 return ERR_PTR(-EPERM
);
1353 return bpf_prog_get_type(ufd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1356 int sk_attach_bpf(u32 ufd
, struct sock
*sk
)
1358 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1362 return PTR_ERR(prog
);
1364 err
= __sk_attach_prog(prog
, sk
);
1373 int sk_reuseport_attach_bpf(u32 ufd
, struct sock
*sk
)
1375 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1379 return PTR_ERR(prog
);
1381 err
= __reuseport_attach_prog(prog
, sk
);
1390 struct bpf_scratchpad
{
1392 __be32 diff
[MAX_BPF_STACK
/ sizeof(__be32
)];
1393 u8 buff
[MAX_BPF_STACK
];
1397 static DEFINE_PER_CPU(struct bpf_scratchpad
, bpf_sp
);
1399 static inline int __bpf_try_make_writable(struct sk_buff
*skb
,
1400 unsigned int write_len
)
1402 return skb_ensure_writable(skb
, write_len
);
1405 static inline int bpf_try_make_writable(struct sk_buff
*skb
,
1406 unsigned int write_len
)
1408 int err
= __bpf_try_make_writable(skb
, write_len
);
1410 bpf_compute_data_pointers(skb
);
1414 static int bpf_try_make_head_writable(struct sk_buff
*skb
)
1416 return bpf_try_make_writable(skb
, skb_headlen(skb
));
1419 static inline void bpf_push_mac_rcsum(struct sk_buff
*skb
)
1421 if (skb_at_tc_ingress(skb
))
1422 skb_postpush_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1425 static inline void bpf_pull_mac_rcsum(struct sk_buff
*skb
)
1427 if (skb_at_tc_ingress(skb
))
1428 skb_postpull_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1431 BPF_CALL_5(bpf_skb_store_bytes
, struct sk_buff
*, skb
, u32
, offset
,
1432 const void *, from
, u32
, len
, u64
, flags
)
1436 if (unlikely(flags
& ~(BPF_F_RECOMPUTE_CSUM
| BPF_F_INVALIDATE_HASH
)))
1438 if (unlikely(offset
> 0xffff))
1440 if (unlikely(bpf_try_make_writable(skb
, offset
+ len
)))
1443 ptr
= skb
->data
+ offset
;
1444 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1445 __skb_postpull_rcsum(skb
, ptr
, len
, offset
);
1447 memcpy(ptr
, from
, len
);
1449 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1450 __skb_postpush_rcsum(skb
, ptr
, len
, offset
);
1451 if (flags
& BPF_F_INVALIDATE_HASH
)
1452 skb_clear_hash(skb
);
1457 static const struct bpf_func_proto bpf_skb_store_bytes_proto
= {
1458 .func
= bpf_skb_store_bytes
,
1460 .ret_type
= RET_INTEGER
,
1461 .arg1_type
= ARG_PTR_TO_CTX
,
1462 .arg2_type
= ARG_ANYTHING
,
1463 .arg3_type
= ARG_PTR_TO_MEM
,
1464 .arg4_type
= ARG_CONST_SIZE
,
1465 .arg5_type
= ARG_ANYTHING
,
1468 BPF_CALL_4(bpf_skb_load_bytes
, const struct sk_buff
*, skb
, u32
, offset
,
1469 void *, to
, u32
, len
)
1473 if (unlikely(offset
> 0xffff))
1476 ptr
= skb_header_pointer(skb
, offset
, len
, to
);
1480 memcpy(to
, ptr
, len
);
1488 static const struct bpf_func_proto bpf_skb_load_bytes_proto
= {
1489 .func
= bpf_skb_load_bytes
,
1491 .ret_type
= RET_INTEGER
,
1492 .arg1_type
= ARG_PTR_TO_CTX
,
1493 .arg2_type
= ARG_ANYTHING
,
1494 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
1495 .arg4_type
= ARG_CONST_SIZE
,
1498 BPF_CALL_2(bpf_skb_pull_data
, struct sk_buff
*, skb
, u32
, len
)
1500 /* Idea is the following: should the needed direct read/write
1501 * test fail during runtime, we can pull in more data and redo
1502 * again, since implicitly, we invalidate previous checks here.
1504 * Or, since we know how much we need to make read/writeable,
1505 * this can be done once at the program beginning for direct
1506 * access case. By this we overcome limitations of only current
1507 * headroom being accessible.
1509 return bpf_try_make_writable(skb
, len
? : skb_headlen(skb
));
1512 static const struct bpf_func_proto bpf_skb_pull_data_proto
= {
1513 .func
= bpf_skb_pull_data
,
1515 .ret_type
= RET_INTEGER
,
1516 .arg1_type
= ARG_PTR_TO_CTX
,
1517 .arg2_type
= ARG_ANYTHING
,
1520 BPF_CALL_5(bpf_l3_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1521 u64
, from
, u64
, to
, u64
, flags
)
1525 if (unlikely(flags
& ~(BPF_F_HDR_FIELD_MASK
)))
1527 if (unlikely(offset
> 0xffff || offset
& 1))
1529 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1532 ptr
= (__sum16
*)(skb
->data
+ offset
);
1533 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1535 if (unlikely(from
!= 0))
1538 csum_replace_by_diff(ptr
, to
);
1541 csum_replace2(ptr
, from
, to
);
1544 csum_replace4(ptr
, from
, to
);
1553 static const struct bpf_func_proto bpf_l3_csum_replace_proto
= {
1554 .func
= bpf_l3_csum_replace
,
1556 .ret_type
= RET_INTEGER
,
1557 .arg1_type
= ARG_PTR_TO_CTX
,
1558 .arg2_type
= ARG_ANYTHING
,
1559 .arg3_type
= ARG_ANYTHING
,
1560 .arg4_type
= ARG_ANYTHING
,
1561 .arg5_type
= ARG_ANYTHING
,
1564 BPF_CALL_5(bpf_l4_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1565 u64
, from
, u64
, to
, u64
, flags
)
1567 bool is_pseudo
= flags
& BPF_F_PSEUDO_HDR
;
1568 bool is_mmzero
= flags
& BPF_F_MARK_MANGLED_0
;
1569 bool do_mforce
= flags
& BPF_F_MARK_ENFORCE
;
1572 if (unlikely(flags
& ~(BPF_F_MARK_MANGLED_0
| BPF_F_MARK_ENFORCE
|
1573 BPF_F_PSEUDO_HDR
| BPF_F_HDR_FIELD_MASK
)))
1575 if (unlikely(offset
> 0xffff || offset
& 1))
1577 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1580 ptr
= (__sum16
*)(skb
->data
+ offset
);
1581 if (is_mmzero
&& !do_mforce
&& !*ptr
)
1584 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1586 if (unlikely(from
!= 0))
1589 inet_proto_csum_replace_by_diff(ptr
, skb
, to
, is_pseudo
);
1592 inet_proto_csum_replace2(ptr
, skb
, from
, to
, is_pseudo
);
1595 inet_proto_csum_replace4(ptr
, skb
, from
, to
, is_pseudo
);
1601 if (is_mmzero
&& !*ptr
)
1602 *ptr
= CSUM_MANGLED_0
;
1606 static const struct bpf_func_proto bpf_l4_csum_replace_proto
= {
1607 .func
= bpf_l4_csum_replace
,
1609 .ret_type
= RET_INTEGER
,
1610 .arg1_type
= ARG_PTR_TO_CTX
,
1611 .arg2_type
= ARG_ANYTHING
,
1612 .arg3_type
= ARG_ANYTHING
,
1613 .arg4_type
= ARG_ANYTHING
,
1614 .arg5_type
= ARG_ANYTHING
,
1617 BPF_CALL_5(bpf_csum_diff
, __be32
*, from
, u32
, from_size
,
1618 __be32
*, to
, u32
, to_size
, __wsum
, seed
)
1620 struct bpf_scratchpad
*sp
= this_cpu_ptr(&bpf_sp
);
1621 u32 diff_size
= from_size
+ to_size
;
1624 /* This is quite flexible, some examples:
1626 * from_size == 0, to_size > 0, seed := csum --> pushing data
1627 * from_size > 0, to_size == 0, seed := csum --> pulling data
1628 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1630 * Even for diffing, from_size and to_size don't need to be equal.
1632 if (unlikely(((from_size
| to_size
) & (sizeof(__be32
) - 1)) ||
1633 diff_size
> sizeof(sp
->diff
)))
1636 for (i
= 0; i
< from_size
/ sizeof(__be32
); i
++, j
++)
1637 sp
->diff
[j
] = ~from
[i
];
1638 for (i
= 0; i
< to_size
/ sizeof(__be32
); i
++, j
++)
1639 sp
->diff
[j
] = to
[i
];
1641 return csum_partial(sp
->diff
, diff_size
, seed
);
1644 static const struct bpf_func_proto bpf_csum_diff_proto
= {
1645 .func
= bpf_csum_diff
,
1648 .ret_type
= RET_INTEGER
,
1649 .arg1_type
= ARG_PTR_TO_MEM_OR_NULL
,
1650 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
1651 .arg3_type
= ARG_PTR_TO_MEM_OR_NULL
,
1652 .arg4_type
= ARG_CONST_SIZE_OR_ZERO
,
1653 .arg5_type
= ARG_ANYTHING
,
1656 BPF_CALL_2(bpf_csum_update
, struct sk_buff
*, skb
, __wsum
, csum
)
1658 /* The interface is to be used in combination with bpf_csum_diff()
1659 * for direct packet writes. csum rotation for alignment as well
1660 * as emulating csum_sub() can be done from the eBPF program.
1662 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1663 return (skb
->csum
= csum_add(skb
->csum
, csum
));
1668 static const struct bpf_func_proto bpf_csum_update_proto
= {
1669 .func
= bpf_csum_update
,
1671 .ret_type
= RET_INTEGER
,
1672 .arg1_type
= ARG_PTR_TO_CTX
,
1673 .arg2_type
= ARG_ANYTHING
,
1676 static inline int __bpf_rx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1678 return dev_forward_skb(dev
, skb
);
1681 static inline int __bpf_rx_skb_no_mac(struct net_device
*dev
,
1682 struct sk_buff
*skb
)
1684 int ret
= ____dev_forward_skb(dev
, skb
);
1688 ret
= netif_rx(skb
);
1694 static inline int __bpf_tx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1698 if (unlikely(__this_cpu_read(xmit_recursion
) > XMIT_RECURSION_LIMIT
)) {
1699 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1706 __this_cpu_inc(xmit_recursion
);
1707 ret
= dev_queue_xmit(skb
);
1708 __this_cpu_dec(xmit_recursion
);
1713 static int __bpf_redirect_no_mac(struct sk_buff
*skb
, struct net_device
*dev
,
1716 /* skb->mac_len is not set on normal egress */
1717 unsigned int mlen
= skb
->network_header
- skb
->mac_header
;
1719 __skb_pull(skb
, mlen
);
1721 /* At ingress, the mac header has already been pulled once.
1722 * At egress, skb_pospull_rcsum has to be done in case that
1723 * the skb is originated from ingress (i.e. a forwarded skb)
1724 * to ensure that rcsum starts at net header.
1726 if (!skb_at_tc_ingress(skb
))
1727 skb_postpull_rcsum(skb
, skb_mac_header(skb
), mlen
);
1728 skb_pop_mac_header(skb
);
1729 skb_reset_mac_len(skb
);
1730 return flags
& BPF_F_INGRESS
?
1731 __bpf_rx_skb_no_mac(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1734 static int __bpf_redirect_common(struct sk_buff
*skb
, struct net_device
*dev
,
1737 /* Verify that a link layer header is carried */
1738 if (unlikely(skb
->mac_header
>= skb
->network_header
)) {
1743 bpf_push_mac_rcsum(skb
);
1744 return flags
& BPF_F_INGRESS
?
1745 __bpf_rx_skb(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1748 static int __bpf_redirect(struct sk_buff
*skb
, struct net_device
*dev
,
1751 if (dev_is_mac_header_xmit(dev
))
1752 return __bpf_redirect_common(skb
, dev
, flags
);
1754 return __bpf_redirect_no_mac(skb
, dev
, flags
);
1757 BPF_CALL_3(bpf_clone_redirect
, struct sk_buff
*, skb
, u32
, ifindex
, u64
, flags
)
1759 struct net_device
*dev
;
1760 struct sk_buff
*clone
;
1763 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1766 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ifindex
);
1770 clone
= skb_clone(skb
, GFP_ATOMIC
);
1771 if (unlikely(!clone
))
1774 /* For direct write, we need to keep the invariant that the skbs
1775 * we're dealing with need to be uncloned. Should uncloning fail
1776 * here, we need to free the just generated clone to unclone once
1779 ret
= bpf_try_make_head_writable(skb
);
1780 if (unlikely(ret
)) {
1785 return __bpf_redirect(clone
, dev
, flags
);
1788 static const struct bpf_func_proto bpf_clone_redirect_proto
= {
1789 .func
= bpf_clone_redirect
,
1791 .ret_type
= RET_INTEGER
,
1792 .arg1_type
= ARG_PTR_TO_CTX
,
1793 .arg2_type
= ARG_ANYTHING
,
1794 .arg3_type
= ARG_ANYTHING
,
1797 struct redirect_info
{
1800 struct bpf_map
*map
;
1801 struct bpf_map
*map_to_flush
;
1802 unsigned long map_owner
;
1805 static DEFINE_PER_CPU(struct redirect_info
, redirect_info
);
1807 BPF_CALL_2(bpf_redirect
, u32
, ifindex
, u64
, flags
)
1809 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1811 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1814 ri
->ifindex
= ifindex
;
1817 return TC_ACT_REDIRECT
;
1820 int skb_do_redirect(struct sk_buff
*skb
)
1822 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1823 struct net_device
*dev
;
1825 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ri
->ifindex
);
1827 if (unlikely(!dev
)) {
1832 return __bpf_redirect(skb
, dev
, ri
->flags
);
1835 static const struct bpf_func_proto bpf_redirect_proto
= {
1836 .func
= bpf_redirect
,
1838 .ret_type
= RET_INTEGER
,
1839 .arg1_type
= ARG_ANYTHING
,
1840 .arg2_type
= ARG_ANYTHING
,
1843 BPF_CALL_4(bpf_sk_redirect_map
, struct sk_buff
*, skb
,
1844 struct bpf_map
*, map
, u32
, key
, u64
, flags
)
1846 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
1848 /* If user passes invalid input drop the packet. */
1849 if (unlikely(flags
))
1853 tcb
->bpf
.flags
= flags
;
1859 struct sock
*do_sk_redirect_map(struct sk_buff
*skb
)
1861 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
1862 struct sock
*sk
= NULL
;
1865 sk
= __sock_map_lookup_elem(tcb
->bpf
.map
, tcb
->bpf
.key
);
1868 tcb
->bpf
.map
= NULL
;
1874 static const struct bpf_func_proto bpf_sk_redirect_map_proto
= {
1875 .func
= bpf_sk_redirect_map
,
1877 .ret_type
= RET_INTEGER
,
1878 .arg1_type
= ARG_PTR_TO_CTX
,
1879 .arg2_type
= ARG_CONST_MAP_PTR
,
1880 .arg3_type
= ARG_ANYTHING
,
1881 .arg4_type
= ARG_ANYTHING
,
1884 BPF_CALL_1(bpf_get_cgroup_classid
, const struct sk_buff
*, skb
)
1886 return task_get_classid(skb
);
1889 static const struct bpf_func_proto bpf_get_cgroup_classid_proto
= {
1890 .func
= bpf_get_cgroup_classid
,
1892 .ret_type
= RET_INTEGER
,
1893 .arg1_type
= ARG_PTR_TO_CTX
,
1896 BPF_CALL_1(bpf_get_route_realm
, const struct sk_buff
*, skb
)
1898 return dst_tclassid(skb
);
1901 static const struct bpf_func_proto bpf_get_route_realm_proto
= {
1902 .func
= bpf_get_route_realm
,
1904 .ret_type
= RET_INTEGER
,
1905 .arg1_type
= ARG_PTR_TO_CTX
,
1908 BPF_CALL_1(bpf_get_hash_recalc
, struct sk_buff
*, skb
)
1910 /* If skb_clear_hash() was called due to mangling, we can
1911 * trigger SW recalculation here. Later access to hash
1912 * can then use the inline skb->hash via context directly
1913 * instead of calling this helper again.
1915 return skb_get_hash(skb
);
1918 static const struct bpf_func_proto bpf_get_hash_recalc_proto
= {
1919 .func
= bpf_get_hash_recalc
,
1921 .ret_type
= RET_INTEGER
,
1922 .arg1_type
= ARG_PTR_TO_CTX
,
1925 BPF_CALL_1(bpf_set_hash_invalid
, struct sk_buff
*, skb
)
1927 /* After all direct packet write, this can be used once for
1928 * triggering a lazy recalc on next skb_get_hash() invocation.
1930 skb_clear_hash(skb
);
1934 static const struct bpf_func_proto bpf_set_hash_invalid_proto
= {
1935 .func
= bpf_set_hash_invalid
,
1937 .ret_type
= RET_INTEGER
,
1938 .arg1_type
= ARG_PTR_TO_CTX
,
1941 BPF_CALL_2(bpf_set_hash
, struct sk_buff
*, skb
, u32
, hash
)
1943 /* Set user specified hash as L4(+), so that it gets returned
1944 * on skb_get_hash() call unless BPF prog later on triggers a
1947 __skb_set_sw_hash(skb
, hash
, true);
1951 static const struct bpf_func_proto bpf_set_hash_proto
= {
1952 .func
= bpf_set_hash
,
1954 .ret_type
= RET_INTEGER
,
1955 .arg1_type
= ARG_PTR_TO_CTX
,
1956 .arg2_type
= ARG_ANYTHING
,
1959 BPF_CALL_3(bpf_skb_vlan_push
, struct sk_buff
*, skb
, __be16
, vlan_proto
,
1964 if (unlikely(vlan_proto
!= htons(ETH_P_8021Q
) &&
1965 vlan_proto
!= htons(ETH_P_8021AD
)))
1966 vlan_proto
= htons(ETH_P_8021Q
);
1968 bpf_push_mac_rcsum(skb
);
1969 ret
= skb_vlan_push(skb
, vlan_proto
, vlan_tci
);
1970 bpf_pull_mac_rcsum(skb
);
1972 bpf_compute_data_pointers(skb
);
1976 const struct bpf_func_proto bpf_skb_vlan_push_proto
= {
1977 .func
= bpf_skb_vlan_push
,
1979 .ret_type
= RET_INTEGER
,
1980 .arg1_type
= ARG_PTR_TO_CTX
,
1981 .arg2_type
= ARG_ANYTHING
,
1982 .arg3_type
= ARG_ANYTHING
,
1984 EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto
);
1986 BPF_CALL_1(bpf_skb_vlan_pop
, struct sk_buff
*, skb
)
1990 bpf_push_mac_rcsum(skb
);
1991 ret
= skb_vlan_pop(skb
);
1992 bpf_pull_mac_rcsum(skb
);
1994 bpf_compute_data_pointers(skb
);
1998 const struct bpf_func_proto bpf_skb_vlan_pop_proto
= {
1999 .func
= bpf_skb_vlan_pop
,
2001 .ret_type
= RET_INTEGER
,
2002 .arg1_type
= ARG_PTR_TO_CTX
,
2004 EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto
);
2006 static int bpf_skb_generic_push(struct sk_buff
*skb
, u32 off
, u32 len
)
2008 /* Caller already did skb_cow() with len as headroom,
2009 * so no need to do it here.
2012 memmove(skb
->data
, skb
->data
+ len
, off
);
2013 memset(skb
->data
+ off
, 0, len
);
2015 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2016 * needed here as it does not change the skb->csum
2017 * result for checksum complete when summing over
2023 static int bpf_skb_generic_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
2025 /* skb_ensure_writable() is not needed here, as we're
2026 * already working on an uncloned skb.
2028 if (unlikely(!pskb_may_pull(skb
, off
+ len
)))
2031 skb_postpull_rcsum(skb
, skb
->data
+ off
, len
);
2032 memmove(skb
->data
+ len
, skb
->data
, off
);
2033 __skb_pull(skb
, len
);
2038 static int bpf_skb_net_hdr_push(struct sk_buff
*skb
, u32 off
, u32 len
)
2040 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2043 /* There's no need for __skb_push()/__skb_pull() pair to
2044 * get to the start of the mac header as we're guaranteed
2045 * to always start from here under eBPF.
2047 ret
= bpf_skb_generic_push(skb
, off
, len
);
2049 skb
->mac_header
-= len
;
2050 skb
->network_header
-= len
;
2052 skb
->transport_header
= skb
->network_header
;
2058 static int bpf_skb_net_hdr_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
2060 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2063 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2064 ret
= bpf_skb_generic_pop(skb
, off
, len
);
2066 skb
->mac_header
+= len
;
2067 skb
->network_header
+= len
;
2069 skb
->transport_header
= skb
->network_header
;
2075 static int bpf_skb_proto_4_to_6(struct sk_buff
*skb
)
2077 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2078 u32 off
= skb_mac_header_len(skb
);
2081 ret
= skb_cow(skb
, len_diff
);
2082 if (unlikely(ret
< 0))
2085 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2086 if (unlikely(ret
< 0))
2089 if (skb_is_gso(skb
)) {
2090 /* SKB_GSO_TCPV4 needs to be changed into
2093 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2094 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV4
;
2095 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV6
;
2098 /* Due to IPv6 header, MSS needs to be downgraded. */
2099 skb_shinfo(skb
)->gso_size
-= len_diff
;
2100 /* Header must be checked, and gso_segs recomputed. */
2101 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2102 skb_shinfo(skb
)->gso_segs
= 0;
2105 skb
->protocol
= htons(ETH_P_IPV6
);
2106 skb_clear_hash(skb
);
2111 static int bpf_skb_proto_6_to_4(struct sk_buff
*skb
)
2113 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2114 u32 off
= skb_mac_header_len(skb
);
2117 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2118 if (unlikely(ret
< 0))
2121 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2122 if (unlikely(ret
< 0))
2125 if (skb_is_gso(skb
)) {
2126 /* SKB_GSO_TCPV6 needs to be changed into
2129 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
2130 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV6
;
2131 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV4
;
2134 /* Due to IPv4 header, MSS can be upgraded. */
2135 skb_shinfo(skb
)->gso_size
+= len_diff
;
2136 /* Header must be checked, and gso_segs recomputed. */
2137 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2138 skb_shinfo(skb
)->gso_segs
= 0;
2141 skb
->protocol
= htons(ETH_P_IP
);
2142 skb_clear_hash(skb
);
2147 static int bpf_skb_proto_xlat(struct sk_buff
*skb
, __be16 to_proto
)
2149 __be16 from_proto
= skb
->protocol
;
2151 if (from_proto
== htons(ETH_P_IP
) &&
2152 to_proto
== htons(ETH_P_IPV6
))
2153 return bpf_skb_proto_4_to_6(skb
);
2155 if (from_proto
== htons(ETH_P_IPV6
) &&
2156 to_proto
== htons(ETH_P_IP
))
2157 return bpf_skb_proto_6_to_4(skb
);
2162 BPF_CALL_3(bpf_skb_change_proto
, struct sk_buff
*, skb
, __be16
, proto
,
2167 if (unlikely(flags
))
2170 /* General idea is that this helper does the basic groundwork
2171 * needed for changing the protocol, and eBPF program fills the
2172 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2173 * and other helpers, rather than passing a raw buffer here.
2175 * The rationale is to keep this minimal and without a need to
2176 * deal with raw packet data. F.e. even if we would pass buffers
2177 * here, the program still needs to call the bpf_lX_csum_replace()
2178 * helpers anyway. Plus, this way we keep also separation of
2179 * concerns, since f.e. bpf_skb_store_bytes() should only take
2182 * Currently, additional options and extension header space are
2183 * not supported, but flags register is reserved so we can adapt
2184 * that. For offloads, we mark packet as dodgy, so that headers
2185 * need to be verified first.
2187 ret
= bpf_skb_proto_xlat(skb
, proto
);
2188 bpf_compute_data_pointers(skb
);
2192 static const struct bpf_func_proto bpf_skb_change_proto_proto
= {
2193 .func
= bpf_skb_change_proto
,
2195 .ret_type
= RET_INTEGER
,
2196 .arg1_type
= ARG_PTR_TO_CTX
,
2197 .arg2_type
= ARG_ANYTHING
,
2198 .arg3_type
= ARG_ANYTHING
,
2201 BPF_CALL_2(bpf_skb_change_type
, struct sk_buff
*, skb
, u32
, pkt_type
)
2203 /* We only allow a restricted subset to be changed for now. */
2204 if (unlikely(!skb_pkt_type_ok(skb
->pkt_type
) ||
2205 !skb_pkt_type_ok(pkt_type
)))
2208 skb
->pkt_type
= pkt_type
;
2212 static const struct bpf_func_proto bpf_skb_change_type_proto
= {
2213 .func
= bpf_skb_change_type
,
2215 .ret_type
= RET_INTEGER
,
2216 .arg1_type
= ARG_PTR_TO_CTX
,
2217 .arg2_type
= ARG_ANYTHING
,
2220 static u32
bpf_skb_net_base_len(const struct sk_buff
*skb
)
2222 switch (skb
->protocol
) {
2223 case htons(ETH_P_IP
):
2224 return sizeof(struct iphdr
);
2225 case htons(ETH_P_IPV6
):
2226 return sizeof(struct ipv6hdr
);
2232 static int bpf_skb_net_grow(struct sk_buff
*skb
, u32 len_diff
)
2234 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2237 ret
= skb_cow(skb
, len_diff
);
2238 if (unlikely(ret
< 0))
2241 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2242 if (unlikely(ret
< 0))
2245 if (skb_is_gso(skb
)) {
2246 /* Due to header grow, MSS needs to be downgraded. */
2247 skb_shinfo(skb
)->gso_size
-= len_diff
;
2248 /* Header must be checked, and gso_segs recomputed. */
2249 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2250 skb_shinfo(skb
)->gso_segs
= 0;
2256 static int bpf_skb_net_shrink(struct sk_buff
*skb
, u32 len_diff
)
2258 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2261 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2262 if (unlikely(ret
< 0))
2265 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2266 if (unlikely(ret
< 0))
2269 if (skb_is_gso(skb
)) {
2270 /* Due to header shrink, MSS can be upgraded. */
2271 skb_shinfo(skb
)->gso_size
+= len_diff
;
2272 /* Header must be checked, and gso_segs recomputed. */
2273 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2274 skb_shinfo(skb
)->gso_segs
= 0;
2280 static u32
__bpf_skb_max_len(const struct sk_buff
*skb
)
2282 return skb
->dev
->mtu
+ skb
->dev
->hard_header_len
;
2285 static int bpf_skb_adjust_net(struct sk_buff
*skb
, s32 len_diff
)
2287 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2288 u32 len_cur
, len_diff_abs
= abs(len_diff
);
2289 u32 len_min
= bpf_skb_net_base_len(skb
);
2290 u32 len_max
= __bpf_skb_max_len(skb
);
2291 __be16 proto
= skb
->protocol
;
2292 bool shrink
= len_diff
< 0;
2295 if (unlikely(len_diff_abs
> 0xfffU
))
2297 if (unlikely(proto
!= htons(ETH_P_IP
) &&
2298 proto
!= htons(ETH_P_IPV6
)))
2301 len_cur
= skb
->len
- skb_network_offset(skb
);
2302 if (skb_transport_header_was_set(skb
) && !trans_same
)
2303 len_cur
= skb_network_header_len(skb
);
2304 if ((shrink
&& (len_diff_abs
>= len_cur
||
2305 len_cur
- len_diff_abs
< len_min
)) ||
2306 (!shrink
&& (skb
->len
+ len_diff_abs
> len_max
&&
2310 ret
= shrink
? bpf_skb_net_shrink(skb
, len_diff_abs
) :
2311 bpf_skb_net_grow(skb
, len_diff_abs
);
2313 bpf_compute_data_pointers(skb
);
2317 BPF_CALL_4(bpf_skb_adjust_room
, struct sk_buff
*, skb
, s32
, len_diff
,
2318 u32
, mode
, u64
, flags
)
2320 if (unlikely(flags
))
2322 if (likely(mode
== BPF_ADJ_ROOM_NET
))
2323 return bpf_skb_adjust_net(skb
, len_diff
);
2328 static const struct bpf_func_proto bpf_skb_adjust_room_proto
= {
2329 .func
= bpf_skb_adjust_room
,
2331 .ret_type
= RET_INTEGER
,
2332 .arg1_type
= ARG_PTR_TO_CTX
,
2333 .arg2_type
= ARG_ANYTHING
,
2334 .arg3_type
= ARG_ANYTHING
,
2335 .arg4_type
= ARG_ANYTHING
,
2338 static u32
__bpf_skb_min_len(const struct sk_buff
*skb
)
2340 u32 min_len
= skb_network_offset(skb
);
2342 if (skb_transport_header_was_set(skb
))
2343 min_len
= skb_transport_offset(skb
);
2344 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2345 min_len
= skb_checksum_start_offset(skb
) +
2346 skb
->csum_offset
+ sizeof(__sum16
);
2350 static int bpf_skb_grow_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2352 unsigned int old_len
= skb
->len
;
2355 ret
= __skb_grow_rcsum(skb
, new_len
);
2357 memset(skb
->data
+ old_len
, 0, new_len
- old_len
);
2361 static int bpf_skb_trim_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2363 return __skb_trim_rcsum(skb
, new_len
);
2366 BPF_CALL_3(bpf_skb_change_tail
, struct sk_buff
*, skb
, u32
, new_len
,
2369 u32 max_len
= __bpf_skb_max_len(skb
);
2370 u32 min_len
= __bpf_skb_min_len(skb
);
2373 if (unlikely(flags
|| new_len
> max_len
|| new_len
< min_len
))
2375 if (skb
->encapsulation
)
2378 /* The basic idea of this helper is that it's performing the
2379 * needed work to either grow or trim an skb, and eBPF program
2380 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2381 * bpf_lX_csum_replace() and others rather than passing a raw
2382 * buffer here. This one is a slow path helper and intended
2383 * for replies with control messages.
2385 * Like in bpf_skb_change_proto(), we want to keep this rather
2386 * minimal and without protocol specifics so that we are able
2387 * to separate concerns as in bpf_skb_store_bytes() should only
2388 * be the one responsible for writing buffers.
2390 * It's really expected to be a slow path operation here for
2391 * control message replies, so we're implicitly linearizing,
2392 * uncloning and drop offloads from the skb by this.
2394 ret
= __bpf_try_make_writable(skb
, skb
->len
);
2396 if (new_len
> skb
->len
)
2397 ret
= bpf_skb_grow_rcsum(skb
, new_len
);
2398 else if (new_len
< skb
->len
)
2399 ret
= bpf_skb_trim_rcsum(skb
, new_len
);
2400 if (!ret
&& skb_is_gso(skb
))
2404 bpf_compute_data_pointers(skb
);
2408 static const struct bpf_func_proto bpf_skb_change_tail_proto
= {
2409 .func
= bpf_skb_change_tail
,
2411 .ret_type
= RET_INTEGER
,
2412 .arg1_type
= ARG_PTR_TO_CTX
,
2413 .arg2_type
= ARG_ANYTHING
,
2414 .arg3_type
= ARG_ANYTHING
,
2417 BPF_CALL_3(bpf_skb_change_head
, struct sk_buff
*, skb
, u32
, head_room
,
2420 u32 max_len
= __bpf_skb_max_len(skb
);
2421 u32 new_len
= skb
->len
+ head_room
;
2424 if (unlikely(flags
|| (!skb_is_gso(skb
) && new_len
> max_len
) ||
2425 new_len
< skb
->len
))
2428 ret
= skb_cow(skb
, head_room
);
2430 /* Idea for this helper is that we currently only
2431 * allow to expand on mac header. This means that
2432 * skb->protocol network header, etc, stay as is.
2433 * Compared to bpf_skb_change_tail(), we're more
2434 * flexible due to not needing to linearize or
2435 * reset GSO. Intention for this helper is to be
2436 * used by an L3 skb that needs to push mac header
2437 * for redirection into L2 device.
2439 __skb_push(skb
, head_room
);
2440 memset(skb
->data
, 0, head_room
);
2441 skb_reset_mac_header(skb
);
2444 bpf_compute_data_pointers(skb
);
2448 static const struct bpf_func_proto bpf_skb_change_head_proto
= {
2449 .func
= bpf_skb_change_head
,
2451 .ret_type
= RET_INTEGER
,
2452 .arg1_type
= ARG_PTR_TO_CTX
,
2453 .arg2_type
= ARG_ANYTHING
,
2454 .arg3_type
= ARG_ANYTHING
,
2457 static unsigned long xdp_get_metalen(const struct xdp_buff
*xdp
)
2459 return xdp_data_meta_unsupported(xdp
) ? 0 :
2460 xdp
->data
- xdp
->data_meta
;
2463 BPF_CALL_2(bpf_xdp_adjust_head
, struct xdp_buff
*, xdp
, int, offset
)
2465 unsigned long metalen
= xdp_get_metalen(xdp
);
2466 void *data_start
= xdp
->data_hard_start
+ metalen
;
2467 void *data
= xdp
->data
+ offset
;
2469 if (unlikely(data
< data_start
||
2470 data
> xdp
->data_end
- ETH_HLEN
))
2474 memmove(xdp
->data_meta
+ offset
,
2475 xdp
->data_meta
, metalen
);
2476 xdp
->data_meta
+= offset
;
2482 static const struct bpf_func_proto bpf_xdp_adjust_head_proto
= {
2483 .func
= bpf_xdp_adjust_head
,
2485 .ret_type
= RET_INTEGER
,
2486 .arg1_type
= ARG_PTR_TO_CTX
,
2487 .arg2_type
= ARG_ANYTHING
,
2490 BPF_CALL_2(bpf_xdp_adjust_meta
, struct xdp_buff
*, xdp
, int, offset
)
2492 void *meta
= xdp
->data_meta
+ offset
;
2493 unsigned long metalen
= xdp
->data
- meta
;
2495 if (xdp_data_meta_unsupported(xdp
))
2497 if (unlikely(meta
< xdp
->data_hard_start
||
2500 if (unlikely((metalen
& (sizeof(__u32
) - 1)) ||
2504 xdp
->data_meta
= meta
;
2509 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto
= {
2510 .func
= bpf_xdp_adjust_meta
,
2512 .ret_type
= RET_INTEGER
,
2513 .arg1_type
= ARG_PTR_TO_CTX
,
2514 .arg2_type
= ARG_ANYTHING
,
2517 static int __bpf_tx_xdp(struct net_device
*dev
,
2518 struct bpf_map
*map
,
2519 struct xdp_buff
*xdp
,
2524 if (!dev
->netdev_ops
->ndo_xdp_xmit
) {
2528 err
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, xdp
);
2531 dev
->netdev_ops
->ndo_xdp_flush(dev
);
2535 static int __bpf_tx_xdp_map(struct net_device
*dev_rx
, void *fwd
,
2536 struct bpf_map
*map
,
2537 struct xdp_buff
*xdp
,
2542 if (map
->map_type
== BPF_MAP_TYPE_DEVMAP
) {
2543 struct net_device
*dev
= fwd
;
2545 if (!dev
->netdev_ops
->ndo_xdp_xmit
)
2548 err
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, xdp
);
2551 __dev_map_insert_ctx(map
, index
);
2553 } else if (map
->map_type
== BPF_MAP_TYPE_CPUMAP
) {
2554 struct bpf_cpu_map_entry
*rcpu
= fwd
;
2556 err
= cpu_map_enqueue(rcpu
, xdp
, dev_rx
);
2559 __cpu_map_insert_ctx(map
, index
);
2564 void xdp_do_flush_map(void)
2566 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2567 struct bpf_map
*map
= ri
->map_to_flush
;
2569 ri
->map_to_flush
= NULL
;
2571 switch (map
->map_type
) {
2572 case BPF_MAP_TYPE_DEVMAP
:
2573 __dev_map_flush(map
);
2575 case BPF_MAP_TYPE_CPUMAP
:
2576 __cpu_map_flush(map
);
2583 EXPORT_SYMBOL_GPL(xdp_do_flush_map
);
2585 static void *__xdp_map_lookup_elem(struct bpf_map
*map
, u32 index
)
2587 switch (map
->map_type
) {
2588 case BPF_MAP_TYPE_DEVMAP
:
2589 return __dev_map_lookup_elem(map
, index
);
2590 case BPF_MAP_TYPE_CPUMAP
:
2591 return __cpu_map_lookup_elem(map
, index
);
2597 static inline bool xdp_map_invalid(const struct bpf_prog
*xdp_prog
,
2600 return (unsigned long)xdp_prog
->aux
!= aux
;
2603 static int xdp_do_redirect_map(struct net_device
*dev
, struct xdp_buff
*xdp
,
2604 struct bpf_prog
*xdp_prog
)
2606 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2607 unsigned long map_owner
= ri
->map_owner
;
2608 struct bpf_map
*map
= ri
->map
;
2609 u32 index
= ri
->ifindex
;
2617 if (unlikely(xdp_map_invalid(xdp_prog
, map_owner
))) {
2623 fwd
= __xdp_map_lookup_elem(map
, index
);
2628 if (ri
->map_to_flush
&& ri
->map_to_flush
!= map
)
2631 err
= __bpf_tx_xdp_map(dev
, fwd
, map
, xdp
, index
);
2635 ri
->map_to_flush
= map
;
2636 _trace_xdp_redirect_map(dev
, xdp_prog
, fwd
, map
, index
);
2639 _trace_xdp_redirect_map_err(dev
, xdp_prog
, fwd
, map
, index
, err
);
2643 int xdp_do_redirect(struct net_device
*dev
, struct xdp_buff
*xdp
,
2644 struct bpf_prog
*xdp_prog
)
2646 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2647 struct net_device
*fwd
;
2648 u32 index
= ri
->ifindex
;
2652 return xdp_do_redirect_map(dev
, xdp
, xdp_prog
);
2654 fwd
= dev_get_by_index_rcu(dev_net(dev
), index
);
2656 if (unlikely(!fwd
)) {
2661 err
= __bpf_tx_xdp(fwd
, NULL
, xdp
, 0);
2665 _trace_xdp_redirect(dev
, xdp_prog
, index
);
2668 _trace_xdp_redirect_err(dev
, xdp_prog
, index
, err
);
2671 EXPORT_SYMBOL_GPL(xdp_do_redirect
);
2673 static int __xdp_generic_ok_fwd_dev(struct sk_buff
*skb
, struct net_device
*fwd
)
2677 if (unlikely(!(fwd
->flags
& IFF_UP
)))
2680 len
= fwd
->mtu
+ fwd
->hard_header_len
+ VLAN_HLEN
;
2687 int xdp_do_generic_redirect_map(struct net_device
*dev
, struct sk_buff
*skb
,
2688 struct bpf_prog
*xdp_prog
)
2690 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2691 unsigned long map_owner
= ri
->map_owner
;
2692 struct bpf_map
*map
= ri
->map
;
2693 struct net_device
*fwd
= NULL
;
2694 u32 index
= ri
->ifindex
;
2701 if (unlikely(xdp_map_invalid(xdp_prog
, map_owner
))) {
2706 fwd
= __xdp_map_lookup_elem(map
, index
);
2707 if (unlikely(!fwd
)) {
2712 if (map
->map_type
== BPF_MAP_TYPE_DEVMAP
) {
2713 if (unlikely((err
= __xdp_generic_ok_fwd_dev(skb
, fwd
))))
2717 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
2722 _trace_xdp_redirect_map(dev
, xdp_prog
, fwd
, map
, index
);
2725 _trace_xdp_redirect_map_err(dev
, xdp_prog
, fwd
, map
, index
, err
);
2729 int xdp_do_generic_redirect(struct net_device
*dev
, struct sk_buff
*skb
,
2730 struct bpf_prog
*xdp_prog
)
2732 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2733 u32 index
= ri
->ifindex
;
2734 struct net_device
*fwd
;
2738 return xdp_do_generic_redirect_map(dev
, skb
, xdp_prog
);
2741 fwd
= dev_get_by_index_rcu(dev_net(dev
), index
);
2742 if (unlikely(!fwd
)) {
2747 if (unlikely((err
= __xdp_generic_ok_fwd_dev(skb
, fwd
))))
2751 _trace_xdp_redirect(dev
, xdp_prog
, index
);
2754 _trace_xdp_redirect_err(dev
, xdp_prog
, index
, err
);
2757 EXPORT_SYMBOL_GPL(xdp_do_generic_redirect
);
2759 BPF_CALL_2(bpf_xdp_redirect
, u32
, ifindex
, u64
, flags
)
2761 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2763 if (unlikely(flags
))
2766 ri
->ifindex
= ifindex
;
2771 return XDP_REDIRECT
;
2774 static const struct bpf_func_proto bpf_xdp_redirect_proto
= {
2775 .func
= bpf_xdp_redirect
,
2777 .ret_type
= RET_INTEGER
,
2778 .arg1_type
= ARG_ANYTHING
,
2779 .arg2_type
= ARG_ANYTHING
,
2782 BPF_CALL_4(bpf_xdp_redirect_map
, struct bpf_map
*, map
, u32
, ifindex
, u64
, flags
,
2783 unsigned long, map_owner
)
2785 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2787 if (unlikely(flags
))
2790 ri
->ifindex
= ifindex
;
2793 ri
->map_owner
= map_owner
;
2795 return XDP_REDIRECT
;
2798 /* Note, arg4 is hidden from users and populated by the verifier
2799 * with the right pointer.
2801 static const struct bpf_func_proto bpf_xdp_redirect_map_proto
= {
2802 .func
= bpf_xdp_redirect_map
,
2804 .ret_type
= RET_INTEGER
,
2805 .arg1_type
= ARG_CONST_MAP_PTR
,
2806 .arg2_type
= ARG_ANYTHING
,
2807 .arg3_type
= ARG_ANYTHING
,
2810 bool bpf_helper_changes_pkt_data(void *func
)
2812 if (func
== bpf_skb_vlan_push
||
2813 func
== bpf_skb_vlan_pop
||
2814 func
== bpf_skb_store_bytes
||
2815 func
== bpf_skb_change_proto
||
2816 func
== bpf_skb_change_head
||
2817 func
== bpf_skb_change_tail
||
2818 func
== bpf_skb_adjust_room
||
2819 func
== bpf_skb_pull_data
||
2820 func
== bpf_clone_redirect
||
2821 func
== bpf_l3_csum_replace
||
2822 func
== bpf_l4_csum_replace
||
2823 func
== bpf_xdp_adjust_head
||
2824 func
== bpf_xdp_adjust_meta
)
2830 static unsigned long bpf_skb_copy(void *dst_buff
, const void *skb
,
2831 unsigned long off
, unsigned long len
)
2833 void *ptr
= skb_header_pointer(skb
, off
, len
, dst_buff
);
2837 if (ptr
!= dst_buff
)
2838 memcpy(dst_buff
, ptr
, len
);
2843 BPF_CALL_5(bpf_skb_event_output
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
2844 u64
, flags
, void *, meta
, u64
, meta_size
)
2846 u64 skb_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
2848 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
2850 if (unlikely(skb_size
> skb
->len
))
2853 return bpf_event_output(map
, flags
, meta
, meta_size
, skb
, skb_size
,
2857 static const struct bpf_func_proto bpf_skb_event_output_proto
= {
2858 .func
= bpf_skb_event_output
,
2860 .ret_type
= RET_INTEGER
,
2861 .arg1_type
= ARG_PTR_TO_CTX
,
2862 .arg2_type
= ARG_CONST_MAP_PTR
,
2863 .arg3_type
= ARG_ANYTHING
,
2864 .arg4_type
= ARG_PTR_TO_MEM
,
2865 .arg5_type
= ARG_CONST_SIZE
,
2868 static unsigned short bpf_tunnel_key_af(u64 flags
)
2870 return flags
& BPF_F_TUNINFO_IPV6
? AF_INET6
: AF_INET
;
2873 BPF_CALL_4(bpf_skb_get_tunnel_key
, struct sk_buff
*, skb
, struct bpf_tunnel_key
*, to
,
2874 u32
, size
, u64
, flags
)
2876 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2877 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2881 if (unlikely(!info
|| (flags
& ~(BPF_F_TUNINFO_IPV6
)))) {
2885 if (ip_tunnel_info_af(info
) != bpf_tunnel_key_af(flags
)) {
2889 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2892 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2893 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2895 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2896 /* Fixup deprecated structure layouts here, so we have
2897 * a common path later on.
2899 if (ip_tunnel_info_af(info
) != AF_INET
)
2902 to
= (struct bpf_tunnel_key
*)compat
;
2909 to
->tunnel_id
= be64_to_cpu(info
->key
.tun_id
);
2910 to
->tunnel_tos
= info
->key
.tos
;
2911 to
->tunnel_ttl
= info
->key
.ttl
;
2913 if (flags
& BPF_F_TUNINFO_IPV6
) {
2914 memcpy(to
->remote_ipv6
, &info
->key
.u
.ipv6
.src
,
2915 sizeof(to
->remote_ipv6
));
2916 to
->tunnel_label
= be32_to_cpu(info
->key
.label
);
2918 to
->remote_ipv4
= be32_to_cpu(info
->key
.u
.ipv4
.src
);
2921 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
)))
2922 memcpy(to_orig
, to
, size
);
2926 memset(to_orig
, 0, size
);
2930 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto
= {
2931 .func
= bpf_skb_get_tunnel_key
,
2933 .ret_type
= RET_INTEGER
,
2934 .arg1_type
= ARG_PTR_TO_CTX
,
2935 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2936 .arg3_type
= ARG_CONST_SIZE
,
2937 .arg4_type
= ARG_ANYTHING
,
2940 BPF_CALL_3(bpf_skb_get_tunnel_opt
, struct sk_buff
*, skb
, u8
*, to
, u32
, size
)
2942 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2945 if (unlikely(!info
||
2946 !(info
->key
.tun_flags
& TUNNEL_OPTIONS_PRESENT
))) {
2950 if (unlikely(size
< info
->options_len
)) {
2955 ip_tunnel_info_opts_get(to
, info
);
2956 if (size
> info
->options_len
)
2957 memset(to
+ info
->options_len
, 0, size
- info
->options_len
);
2959 return info
->options_len
;
2961 memset(to
, 0, size
);
2965 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto
= {
2966 .func
= bpf_skb_get_tunnel_opt
,
2968 .ret_type
= RET_INTEGER
,
2969 .arg1_type
= ARG_PTR_TO_CTX
,
2970 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2971 .arg3_type
= ARG_CONST_SIZE
,
2974 static struct metadata_dst __percpu
*md_dst
;
2976 BPF_CALL_4(bpf_skb_set_tunnel_key
, struct sk_buff
*, skb
,
2977 const struct bpf_tunnel_key
*, from
, u32
, size
, u64
, flags
)
2979 struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
2980 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2981 struct ip_tunnel_info
*info
;
2983 if (unlikely(flags
& ~(BPF_F_TUNINFO_IPV6
| BPF_F_ZERO_CSUM_TX
|
2984 BPF_F_DONT_FRAGMENT
)))
2986 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2988 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2989 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2990 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2991 /* Fixup deprecated structure layouts here, so we have
2992 * a common path later on.
2994 memcpy(compat
, from
, size
);
2995 memset(compat
+ size
, 0, sizeof(compat
) - size
);
2996 from
= (const struct bpf_tunnel_key
*) compat
;
3002 if (unlikely((!(flags
& BPF_F_TUNINFO_IPV6
) && from
->tunnel_label
) ||
3007 dst_hold((struct dst_entry
*) md
);
3008 skb_dst_set(skb
, (struct dst_entry
*) md
);
3010 info
= &md
->u
.tun_info
;
3011 info
->mode
= IP_TUNNEL_INFO_TX
;
3013 info
->key
.tun_flags
= TUNNEL_KEY
| TUNNEL_CSUM
| TUNNEL_NOCACHE
;
3014 if (flags
& BPF_F_DONT_FRAGMENT
)
3015 info
->key
.tun_flags
|= TUNNEL_DONT_FRAGMENT
;
3017 info
->key
.tun_id
= cpu_to_be64(from
->tunnel_id
);
3018 info
->key
.tos
= from
->tunnel_tos
;
3019 info
->key
.ttl
= from
->tunnel_ttl
;
3021 if (flags
& BPF_F_TUNINFO_IPV6
) {
3022 info
->mode
|= IP_TUNNEL_INFO_IPV6
;
3023 memcpy(&info
->key
.u
.ipv6
.dst
, from
->remote_ipv6
,
3024 sizeof(from
->remote_ipv6
));
3025 info
->key
.label
= cpu_to_be32(from
->tunnel_label
) &
3026 IPV6_FLOWLABEL_MASK
;
3028 info
->key
.u
.ipv4
.dst
= cpu_to_be32(from
->remote_ipv4
);
3029 if (flags
& BPF_F_ZERO_CSUM_TX
)
3030 info
->key
.tun_flags
&= ~TUNNEL_CSUM
;
3036 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto
= {
3037 .func
= bpf_skb_set_tunnel_key
,
3039 .ret_type
= RET_INTEGER
,
3040 .arg1_type
= ARG_PTR_TO_CTX
,
3041 .arg2_type
= ARG_PTR_TO_MEM
,
3042 .arg3_type
= ARG_CONST_SIZE
,
3043 .arg4_type
= ARG_ANYTHING
,
3046 BPF_CALL_3(bpf_skb_set_tunnel_opt
, struct sk_buff
*, skb
,
3047 const u8
*, from
, u32
, size
)
3049 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
3050 const struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
3052 if (unlikely(info
!= &md
->u
.tun_info
|| (size
& (sizeof(u32
) - 1))))
3054 if (unlikely(size
> IP_TUNNEL_OPTS_MAX
))
3057 ip_tunnel_info_opts_set(info
, from
, size
);
3062 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto
= {
3063 .func
= bpf_skb_set_tunnel_opt
,
3065 .ret_type
= RET_INTEGER
,
3066 .arg1_type
= ARG_PTR_TO_CTX
,
3067 .arg2_type
= ARG_PTR_TO_MEM
,
3068 .arg3_type
= ARG_CONST_SIZE
,
3071 static const struct bpf_func_proto
*
3072 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which
)
3075 struct metadata_dst __percpu
*tmp
;
3077 tmp
= metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX
,
3082 if (cmpxchg(&md_dst
, NULL
, tmp
))
3083 metadata_dst_free_percpu(tmp
);
3087 case BPF_FUNC_skb_set_tunnel_key
:
3088 return &bpf_skb_set_tunnel_key_proto
;
3089 case BPF_FUNC_skb_set_tunnel_opt
:
3090 return &bpf_skb_set_tunnel_opt_proto
;
3096 BPF_CALL_3(bpf_skb_under_cgroup
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
3099 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
3100 struct cgroup
*cgrp
;
3103 sk
= skb_to_full_sk(skb
);
3104 if (!sk
|| !sk_fullsock(sk
))
3106 if (unlikely(idx
>= array
->map
.max_entries
))
3109 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
3110 if (unlikely(!cgrp
))
3113 return sk_under_cgroup_hierarchy(sk
, cgrp
);
3116 static const struct bpf_func_proto bpf_skb_under_cgroup_proto
= {
3117 .func
= bpf_skb_under_cgroup
,
3119 .ret_type
= RET_INTEGER
,
3120 .arg1_type
= ARG_PTR_TO_CTX
,
3121 .arg2_type
= ARG_CONST_MAP_PTR
,
3122 .arg3_type
= ARG_ANYTHING
,
3125 static unsigned long bpf_xdp_copy(void *dst_buff
, const void *src_buff
,
3126 unsigned long off
, unsigned long len
)
3128 memcpy(dst_buff
, src_buff
+ off
, len
);
3132 BPF_CALL_5(bpf_xdp_event_output
, struct xdp_buff
*, xdp
, struct bpf_map
*, map
,
3133 u64
, flags
, void *, meta
, u64
, meta_size
)
3135 u64 xdp_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
3137 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
3139 if (unlikely(xdp_size
> (unsigned long)(xdp
->data_end
- xdp
->data
)))
3142 return bpf_event_output(map
, flags
, meta
, meta_size
, xdp
->data
,
3143 xdp_size
, bpf_xdp_copy
);
3146 static const struct bpf_func_proto bpf_xdp_event_output_proto
= {
3147 .func
= bpf_xdp_event_output
,
3149 .ret_type
= RET_INTEGER
,
3150 .arg1_type
= ARG_PTR_TO_CTX
,
3151 .arg2_type
= ARG_CONST_MAP_PTR
,
3152 .arg3_type
= ARG_ANYTHING
,
3153 .arg4_type
= ARG_PTR_TO_MEM
,
3154 .arg5_type
= ARG_CONST_SIZE
,
3157 BPF_CALL_1(bpf_get_socket_cookie
, struct sk_buff
*, skb
)
3159 return skb
->sk
? sock_gen_cookie(skb
->sk
) : 0;
3162 static const struct bpf_func_proto bpf_get_socket_cookie_proto
= {
3163 .func
= bpf_get_socket_cookie
,
3165 .ret_type
= RET_INTEGER
,
3166 .arg1_type
= ARG_PTR_TO_CTX
,
3169 BPF_CALL_1(bpf_get_socket_uid
, struct sk_buff
*, skb
)
3171 struct sock
*sk
= sk_to_full_sk(skb
->sk
);
3174 if (!sk
|| !sk_fullsock(sk
))
3176 kuid
= sock_net_uid(sock_net(sk
), sk
);
3177 return from_kuid_munged(sock_net(sk
)->user_ns
, kuid
);
3180 static const struct bpf_func_proto bpf_get_socket_uid_proto
= {
3181 .func
= bpf_get_socket_uid
,
3183 .ret_type
= RET_INTEGER
,
3184 .arg1_type
= ARG_PTR_TO_CTX
,
3187 BPF_CALL_5(bpf_setsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
3188 int, level
, int, optname
, char *, optval
, int, optlen
)
3190 struct sock
*sk
= bpf_sock
->sk
;
3194 if (!sk_fullsock(sk
))
3197 if (level
== SOL_SOCKET
) {
3198 if (optlen
!= sizeof(int))
3200 val
= *((int *)optval
);
3202 /* Only some socketops are supported */
3205 sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
3206 sk
->sk_rcvbuf
= max_t(int, val
* 2, SOCK_MIN_RCVBUF
);
3209 sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
3210 sk
->sk_sndbuf
= max_t(int, val
* 2, SOCK_MIN_SNDBUF
);
3212 case SO_MAX_PACING_RATE
:
3213 sk
->sk_max_pacing_rate
= val
;
3214 sk
->sk_pacing_rate
= min(sk
->sk_pacing_rate
,
3215 sk
->sk_max_pacing_rate
);
3218 sk
->sk_priority
= val
;
3223 sk
->sk_rcvlowat
= val
? : 1;
3232 } else if (level
== SOL_TCP
&&
3233 sk
->sk_prot
->setsockopt
== tcp_setsockopt
) {
3234 if (optname
== TCP_CONGESTION
) {
3235 char name
[TCP_CA_NAME_MAX
];
3236 bool reinit
= bpf_sock
->op
> BPF_SOCK_OPS_NEEDS_ECN
;
3238 strncpy(name
, optval
, min_t(long, optlen
,
3239 TCP_CA_NAME_MAX
-1));
3240 name
[TCP_CA_NAME_MAX
-1] = 0;
3241 ret
= tcp_set_congestion_control(sk
, name
, false, reinit
);
3243 struct tcp_sock
*tp
= tcp_sk(sk
);
3245 if (optlen
!= sizeof(int))
3248 val
= *((int *)optval
);
3249 /* Only some options are supported */
3252 if (val
<= 0 || tp
->data_segs_out
> 0)
3257 case TCP_BPF_SNDCWND_CLAMP
:
3261 tp
->snd_cwnd_clamp
= val
;
3262 tp
->snd_ssthresh
= val
;
3276 static const struct bpf_func_proto bpf_setsockopt_proto
= {
3277 .func
= bpf_setsockopt
,
3279 .ret_type
= RET_INTEGER
,
3280 .arg1_type
= ARG_PTR_TO_CTX
,
3281 .arg2_type
= ARG_ANYTHING
,
3282 .arg3_type
= ARG_ANYTHING
,
3283 .arg4_type
= ARG_PTR_TO_MEM
,
3284 .arg5_type
= ARG_CONST_SIZE
,
3287 BPF_CALL_5(bpf_getsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
3288 int, level
, int, optname
, char *, optval
, int, optlen
)
3290 struct sock
*sk
= bpf_sock
->sk
;
3292 if (!sk_fullsock(sk
))
3296 if (level
== SOL_TCP
&& sk
->sk_prot
->getsockopt
== tcp_getsockopt
) {
3297 if (optname
== TCP_CONGESTION
) {
3298 struct inet_connection_sock
*icsk
= inet_csk(sk
);
3300 if (!icsk
->icsk_ca_ops
|| optlen
<= 1)
3302 strncpy(optval
, icsk
->icsk_ca_ops
->name
, optlen
);
3303 optval
[optlen
- 1] = 0;
3313 memset(optval
, 0, optlen
);
3317 static const struct bpf_func_proto bpf_getsockopt_proto
= {
3318 .func
= bpf_getsockopt
,
3320 .ret_type
= RET_INTEGER
,
3321 .arg1_type
= ARG_PTR_TO_CTX
,
3322 .arg2_type
= ARG_ANYTHING
,
3323 .arg3_type
= ARG_ANYTHING
,
3324 .arg4_type
= ARG_PTR_TO_UNINIT_MEM
,
3325 .arg5_type
= ARG_CONST_SIZE
,
3328 static const struct bpf_func_proto
*
3329 bpf_base_func_proto(enum bpf_func_id func_id
)
3332 case BPF_FUNC_map_lookup_elem
:
3333 return &bpf_map_lookup_elem_proto
;
3334 case BPF_FUNC_map_update_elem
:
3335 return &bpf_map_update_elem_proto
;
3336 case BPF_FUNC_map_delete_elem
:
3337 return &bpf_map_delete_elem_proto
;
3338 case BPF_FUNC_get_prandom_u32
:
3339 return &bpf_get_prandom_u32_proto
;
3340 case BPF_FUNC_get_smp_processor_id
:
3341 return &bpf_get_raw_smp_processor_id_proto
;
3342 case BPF_FUNC_get_numa_node_id
:
3343 return &bpf_get_numa_node_id_proto
;
3344 case BPF_FUNC_tail_call
:
3345 return &bpf_tail_call_proto
;
3346 case BPF_FUNC_ktime_get_ns
:
3347 return &bpf_ktime_get_ns_proto
;
3348 case BPF_FUNC_trace_printk
:
3349 if (capable(CAP_SYS_ADMIN
))
3350 return bpf_get_trace_printk_proto();
3356 static const struct bpf_func_proto
*
3357 sock_filter_func_proto(enum bpf_func_id func_id
)
3360 /* inet and inet6 sockets are created in a process
3361 * context so there is always a valid uid/gid
3363 case BPF_FUNC_get_current_uid_gid
:
3364 return &bpf_get_current_uid_gid_proto
;
3366 return bpf_base_func_proto(func_id
);
3370 static const struct bpf_func_proto
*
3371 sk_filter_func_proto(enum bpf_func_id func_id
)
3374 case BPF_FUNC_skb_load_bytes
:
3375 return &bpf_skb_load_bytes_proto
;
3376 case BPF_FUNC_get_socket_cookie
:
3377 return &bpf_get_socket_cookie_proto
;
3378 case BPF_FUNC_get_socket_uid
:
3379 return &bpf_get_socket_uid_proto
;
3381 return bpf_base_func_proto(func_id
);
3385 static const struct bpf_func_proto
*
3386 tc_cls_act_func_proto(enum bpf_func_id func_id
)
3389 case BPF_FUNC_skb_store_bytes
:
3390 return &bpf_skb_store_bytes_proto
;
3391 case BPF_FUNC_skb_load_bytes
:
3392 return &bpf_skb_load_bytes_proto
;
3393 case BPF_FUNC_skb_pull_data
:
3394 return &bpf_skb_pull_data_proto
;
3395 case BPF_FUNC_csum_diff
:
3396 return &bpf_csum_diff_proto
;
3397 case BPF_FUNC_csum_update
:
3398 return &bpf_csum_update_proto
;
3399 case BPF_FUNC_l3_csum_replace
:
3400 return &bpf_l3_csum_replace_proto
;
3401 case BPF_FUNC_l4_csum_replace
:
3402 return &bpf_l4_csum_replace_proto
;
3403 case BPF_FUNC_clone_redirect
:
3404 return &bpf_clone_redirect_proto
;
3405 case BPF_FUNC_get_cgroup_classid
:
3406 return &bpf_get_cgroup_classid_proto
;
3407 case BPF_FUNC_skb_vlan_push
:
3408 return &bpf_skb_vlan_push_proto
;
3409 case BPF_FUNC_skb_vlan_pop
:
3410 return &bpf_skb_vlan_pop_proto
;
3411 case BPF_FUNC_skb_change_proto
:
3412 return &bpf_skb_change_proto_proto
;
3413 case BPF_FUNC_skb_change_type
:
3414 return &bpf_skb_change_type_proto
;
3415 case BPF_FUNC_skb_adjust_room
:
3416 return &bpf_skb_adjust_room_proto
;
3417 case BPF_FUNC_skb_change_tail
:
3418 return &bpf_skb_change_tail_proto
;
3419 case BPF_FUNC_skb_get_tunnel_key
:
3420 return &bpf_skb_get_tunnel_key_proto
;
3421 case BPF_FUNC_skb_set_tunnel_key
:
3422 return bpf_get_skb_set_tunnel_proto(func_id
);
3423 case BPF_FUNC_skb_get_tunnel_opt
:
3424 return &bpf_skb_get_tunnel_opt_proto
;
3425 case BPF_FUNC_skb_set_tunnel_opt
:
3426 return bpf_get_skb_set_tunnel_proto(func_id
);
3427 case BPF_FUNC_redirect
:
3428 return &bpf_redirect_proto
;
3429 case BPF_FUNC_get_route_realm
:
3430 return &bpf_get_route_realm_proto
;
3431 case BPF_FUNC_get_hash_recalc
:
3432 return &bpf_get_hash_recalc_proto
;
3433 case BPF_FUNC_set_hash_invalid
:
3434 return &bpf_set_hash_invalid_proto
;
3435 case BPF_FUNC_set_hash
:
3436 return &bpf_set_hash_proto
;
3437 case BPF_FUNC_perf_event_output
:
3438 return &bpf_skb_event_output_proto
;
3439 case BPF_FUNC_get_smp_processor_id
:
3440 return &bpf_get_smp_processor_id_proto
;
3441 case BPF_FUNC_skb_under_cgroup
:
3442 return &bpf_skb_under_cgroup_proto
;
3443 case BPF_FUNC_get_socket_cookie
:
3444 return &bpf_get_socket_cookie_proto
;
3445 case BPF_FUNC_get_socket_uid
:
3446 return &bpf_get_socket_uid_proto
;
3448 return bpf_base_func_proto(func_id
);
3452 static const struct bpf_func_proto
*
3453 xdp_func_proto(enum bpf_func_id func_id
)
3456 case BPF_FUNC_perf_event_output
:
3457 return &bpf_xdp_event_output_proto
;
3458 case BPF_FUNC_get_smp_processor_id
:
3459 return &bpf_get_smp_processor_id_proto
;
3460 case BPF_FUNC_xdp_adjust_head
:
3461 return &bpf_xdp_adjust_head_proto
;
3462 case BPF_FUNC_xdp_adjust_meta
:
3463 return &bpf_xdp_adjust_meta_proto
;
3464 case BPF_FUNC_redirect
:
3465 return &bpf_xdp_redirect_proto
;
3466 case BPF_FUNC_redirect_map
:
3467 return &bpf_xdp_redirect_map_proto
;
3469 return bpf_base_func_proto(func_id
);
3473 static const struct bpf_func_proto
*
3474 lwt_inout_func_proto(enum bpf_func_id func_id
)
3477 case BPF_FUNC_skb_load_bytes
:
3478 return &bpf_skb_load_bytes_proto
;
3479 case BPF_FUNC_skb_pull_data
:
3480 return &bpf_skb_pull_data_proto
;
3481 case BPF_FUNC_csum_diff
:
3482 return &bpf_csum_diff_proto
;
3483 case BPF_FUNC_get_cgroup_classid
:
3484 return &bpf_get_cgroup_classid_proto
;
3485 case BPF_FUNC_get_route_realm
:
3486 return &bpf_get_route_realm_proto
;
3487 case BPF_FUNC_get_hash_recalc
:
3488 return &bpf_get_hash_recalc_proto
;
3489 case BPF_FUNC_perf_event_output
:
3490 return &bpf_skb_event_output_proto
;
3491 case BPF_FUNC_get_smp_processor_id
:
3492 return &bpf_get_smp_processor_id_proto
;
3493 case BPF_FUNC_skb_under_cgroup
:
3494 return &bpf_skb_under_cgroup_proto
;
3496 return bpf_base_func_proto(func_id
);
3500 static const struct bpf_func_proto
*
3501 sock_ops_func_proto(enum bpf_func_id func_id
)
3504 case BPF_FUNC_setsockopt
:
3505 return &bpf_setsockopt_proto
;
3506 case BPF_FUNC_getsockopt
:
3507 return &bpf_getsockopt_proto
;
3508 case BPF_FUNC_sock_map_update
:
3509 return &bpf_sock_map_update_proto
;
3511 return bpf_base_func_proto(func_id
);
3515 static const struct bpf_func_proto
*sk_skb_func_proto(enum bpf_func_id func_id
)
3518 case BPF_FUNC_skb_store_bytes
:
3519 return &bpf_skb_store_bytes_proto
;
3520 case BPF_FUNC_skb_load_bytes
:
3521 return &bpf_skb_load_bytes_proto
;
3522 case BPF_FUNC_skb_pull_data
:
3523 return &bpf_skb_pull_data_proto
;
3524 case BPF_FUNC_skb_change_tail
:
3525 return &bpf_skb_change_tail_proto
;
3526 case BPF_FUNC_skb_change_head
:
3527 return &bpf_skb_change_head_proto
;
3528 case BPF_FUNC_get_socket_cookie
:
3529 return &bpf_get_socket_cookie_proto
;
3530 case BPF_FUNC_get_socket_uid
:
3531 return &bpf_get_socket_uid_proto
;
3532 case BPF_FUNC_sk_redirect_map
:
3533 return &bpf_sk_redirect_map_proto
;
3535 return bpf_base_func_proto(func_id
);
3539 static const struct bpf_func_proto
*
3540 lwt_xmit_func_proto(enum bpf_func_id func_id
)
3543 case BPF_FUNC_skb_get_tunnel_key
:
3544 return &bpf_skb_get_tunnel_key_proto
;
3545 case BPF_FUNC_skb_set_tunnel_key
:
3546 return bpf_get_skb_set_tunnel_proto(func_id
);
3547 case BPF_FUNC_skb_get_tunnel_opt
:
3548 return &bpf_skb_get_tunnel_opt_proto
;
3549 case BPF_FUNC_skb_set_tunnel_opt
:
3550 return bpf_get_skb_set_tunnel_proto(func_id
);
3551 case BPF_FUNC_redirect
:
3552 return &bpf_redirect_proto
;
3553 case BPF_FUNC_clone_redirect
:
3554 return &bpf_clone_redirect_proto
;
3555 case BPF_FUNC_skb_change_tail
:
3556 return &bpf_skb_change_tail_proto
;
3557 case BPF_FUNC_skb_change_head
:
3558 return &bpf_skb_change_head_proto
;
3559 case BPF_FUNC_skb_store_bytes
:
3560 return &bpf_skb_store_bytes_proto
;
3561 case BPF_FUNC_csum_update
:
3562 return &bpf_csum_update_proto
;
3563 case BPF_FUNC_l3_csum_replace
:
3564 return &bpf_l3_csum_replace_proto
;
3565 case BPF_FUNC_l4_csum_replace
:
3566 return &bpf_l4_csum_replace_proto
;
3567 case BPF_FUNC_set_hash_invalid
:
3568 return &bpf_set_hash_invalid_proto
;
3570 return lwt_inout_func_proto(func_id
);
3574 static bool bpf_skb_is_valid_access(int off
, int size
, enum bpf_access_type type
,
3575 struct bpf_insn_access_aux
*info
)
3577 const int size_default
= sizeof(__u32
);
3579 if (off
< 0 || off
>= sizeof(struct __sk_buff
))
3582 /* The verifier guarantees that size > 0. */
3583 if (off
% size
!= 0)
3587 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3588 if (off
+ size
> offsetofend(struct __sk_buff
, cb
[4]))
3591 case bpf_ctx_range_till(struct __sk_buff
, remote_ip6
[0], remote_ip6
[3]):
3592 case bpf_ctx_range_till(struct __sk_buff
, local_ip6
[0], local_ip6
[3]):
3593 case bpf_ctx_range_till(struct __sk_buff
, remote_ip4
, remote_ip4
):
3594 case bpf_ctx_range_till(struct __sk_buff
, local_ip4
, local_ip4
):
3595 case bpf_ctx_range(struct __sk_buff
, data
):
3596 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3597 case bpf_ctx_range(struct __sk_buff
, data_end
):
3598 if (size
!= size_default
)
3602 /* Only narrow read access allowed for now. */
3603 if (type
== BPF_WRITE
) {
3604 if (size
!= size_default
)
3607 bpf_ctx_record_field_size(info
, size_default
);
3608 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
3616 static bool sk_filter_is_valid_access(int off
, int size
,
3617 enum bpf_access_type type
,
3618 struct bpf_insn_access_aux
*info
)
3621 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3622 case bpf_ctx_range(struct __sk_buff
, data
):
3623 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3624 case bpf_ctx_range(struct __sk_buff
, data_end
):
3625 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3629 if (type
== BPF_WRITE
) {
3631 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3638 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3641 static bool lwt_is_valid_access(int off
, int size
,
3642 enum bpf_access_type type
,
3643 struct bpf_insn_access_aux
*info
)
3646 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3647 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3648 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3652 if (type
== BPF_WRITE
) {
3654 case bpf_ctx_range(struct __sk_buff
, mark
):
3655 case bpf_ctx_range(struct __sk_buff
, priority
):
3656 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3664 case bpf_ctx_range(struct __sk_buff
, data
):
3665 info
->reg_type
= PTR_TO_PACKET
;
3667 case bpf_ctx_range(struct __sk_buff
, data_end
):
3668 info
->reg_type
= PTR_TO_PACKET_END
;
3672 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3675 static bool sock_filter_is_valid_access(int off
, int size
,
3676 enum bpf_access_type type
,
3677 struct bpf_insn_access_aux
*info
)
3679 if (type
== BPF_WRITE
) {
3681 case offsetof(struct bpf_sock
, bound_dev_if
):
3682 case offsetof(struct bpf_sock
, mark
):
3683 case offsetof(struct bpf_sock
, priority
):
3690 if (off
< 0 || off
+ size
> sizeof(struct bpf_sock
))
3692 /* The verifier guarantees that size > 0. */
3693 if (off
% size
!= 0)
3695 if (size
!= sizeof(__u32
))
3701 static int bpf_unclone_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3702 const struct bpf_prog
*prog
, int drop_verdict
)
3704 struct bpf_insn
*insn
= insn_buf
;
3709 /* if (!skb->cloned)
3712 * (Fast-path, otherwise approximation that we might be
3713 * a clone, do the rest in helper.)
3715 *insn
++ = BPF_LDX_MEM(BPF_B
, BPF_REG_6
, BPF_REG_1
, CLONED_OFFSET());
3716 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_6
, CLONED_MASK
);
3717 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_6
, 0, 7);
3719 /* ret = bpf_skb_pull_data(skb, 0); */
3720 *insn
++ = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
3721 *insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_2
, BPF_REG_2
);
3722 *insn
++ = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3723 BPF_FUNC_skb_pull_data
);
3726 * return TC_ACT_SHOT;
3728 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2);
3729 *insn
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, drop_verdict
);
3730 *insn
++ = BPF_EXIT_INSN();
3733 *insn
++ = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
3735 *insn
++ = prog
->insnsi
[0];
3737 return insn
- insn_buf
;
3740 static int tc_cls_act_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3741 const struct bpf_prog
*prog
)
3743 return bpf_unclone_prologue(insn_buf
, direct_write
, prog
, TC_ACT_SHOT
);
3746 static bool tc_cls_act_is_valid_access(int off
, int size
,
3747 enum bpf_access_type type
,
3748 struct bpf_insn_access_aux
*info
)
3750 if (type
== BPF_WRITE
) {
3752 case bpf_ctx_range(struct __sk_buff
, mark
):
3753 case bpf_ctx_range(struct __sk_buff
, tc_index
):
3754 case bpf_ctx_range(struct __sk_buff
, priority
):
3755 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3756 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3764 case bpf_ctx_range(struct __sk_buff
, data
):
3765 info
->reg_type
= PTR_TO_PACKET
;
3767 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3768 info
->reg_type
= PTR_TO_PACKET_META
;
3770 case bpf_ctx_range(struct __sk_buff
, data_end
):
3771 info
->reg_type
= PTR_TO_PACKET_END
;
3773 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3777 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3780 static bool __is_valid_xdp_access(int off
, int size
)
3782 if (off
< 0 || off
>= sizeof(struct xdp_md
))
3784 if (off
% size
!= 0)
3786 if (size
!= sizeof(__u32
))
3792 static bool xdp_is_valid_access(int off
, int size
,
3793 enum bpf_access_type type
,
3794 struct bpf_insn_access_aux
*info
)
3796 if (type
== BPF_WRITE
)
3800 case offsetof(struct xdp_md
, data
):
3801 info
->reg_type
= PTR_TO_PACKET
;
3803 case offsetof(struct xdp_md
, data_meta
):
3804 info
->reg_type
= PTR_TO_PACKET_META
;
3806 case offsetof(struct xdp_md
, data_end
):
3807 info
->reg_type
= PTR_TO_PACKET_END
;
3811 return __is_valid_xdp_access(off
, size
);
3814 void bpf_warn_invalid_xdp_action(u32 act
)
3816 const u32 act_max
= XDP_REDIRECT
;
3818 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
3819 act
> act_max
? "Illegal" : "Driver unsupported",
3822 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action
);
3824 static bool __is_valid_sock_ops_access(int off
, int size
)
3826 if (off
< 0 || off
>= sizeof(struct bpf_sock_ops
))
3828 /* The verifier guarantees that size > 0. */
3829 if (off
% size
!= 0)
3831 if (size
!= sizeof(__u32
))
3837 static bool sock_ops_is_valid_access(int off
, int size
,
3838 enum bpf_access_type type
,
3839 struct bpf_insn_access_aux
*info
)
3841 if (type
== BPF_WRITE
) {
3843 case offsetof(struct bpf_sock_ops
, op
) ...
3844 offsetof(struct bpf_sock_ops
, replylong
[3]):
3851 return __is_valid_sock_ops_access(off
, size
);
3854 static int sk_skb_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3855 const struct bpf_prog
*prog
)
3857 return bpf_unclone_prologue(insn_buf
, direct_write
, prog
, SK_DROP
);
3860 static bool sk_skb_is_valid_access(int off
, int size
,
3861 enum bpf_access_type type
,
3862 struct bpf_insn_access_aux
*info
)
3865 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3866 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3870 if (type
== BPF_WRITE
) {
3872 case bpf_ctx_range(struct __sk_buff
, tc_index
):
3873 case bpf_ctx_range(struct __sk_buff
, priority
):
3881 case bpf_ctx_range(struct __sk_buff
, mark
):
3883 case bpf_ctx_range(struct __sk_buff
, data
):
3884 info
->reg_type
= PTR_TO_PACKET
;
3886 case bpf_ctx_range(struct __sk_buff
, data_end
):
3887 info
->reg_type
= PTR_TO_PACKET_END
;
3891 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3894 static u32
bpf_convert_ctx_access(enum bpf_access_type type
,
3895 const struct bpf_insn
*si
,
3896 struct bpf_insn
*insn_buf
,
3897 struct bpf_prog
*prog
, u32
*target_size
)
3899 struct bpf_insn
*insn
= insn_buf
;
3903 case offsetof(struct __sk_buff
, len
):
3904 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3905 bpf_target_off(struct sk_buff
, len
, 4,
3909 case offsetof(struct __sk_buff
, protocol
):
3910 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3911 bpf_target_off(struct sk_buff
, protocol
, 2,
3915 case offsetof(struct __sk_buff
, vlan_proto
):
3916 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3917 bpf_target_off(struct sk_buff
, vlan_proto
, 2,
3921 case offsetof(struct __sk_buff
, priority
):
3922 if (type
== BPF_WRITE
)
3923 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3924 bpf_target_off(struct sk_buff
, priority
, 4,
3927 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3928 bpf_target_off(struct sk_buff
, priority
, 4,
3932 case offsetof(struct __sk_buff
, ingress_ifindex
):
3933 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3934 bpf_target_off(struct sk_buff
, skb_iif
, 4,
3938 case offsetof(struct __sk_buff
, ifindex
):
3939 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
3940 si
->dst_reg
, si
->src_reg
,
3941 offsetof(struct sk_buff
, dev
));
3942 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, si
->dst_reg
, 0, 1);
3943 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3944 bpf_target_off(struct net_device
, ifindex
, 4,
3948 case offsetof(struct __sk_buff
, hash
):
3949 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3950 bpf_target_off(struct sk_buff
, hash
, 4,
3954 case offsetof(struct __sk_buff
, mark
):
3955 if (type
== BPF_WRITE
)
3956 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3957 bpf_target_off(struct sk_buff
, mark
, 4,
3960 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3961 bpf_target_off(struct sk_buff
, mark
, 4,
3965 case offsetof(struct __sk_buff
, pkt_type
):
3967 *insn
++ = BPF_LDX_MEM(BPF_B
, si
->dst_reg
, si
->src_reg
,
3969 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, PKT_TYPE_MAX
);
3970 #ifdef __BIG_ENDIAN_BITFIELD
3971 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 5);
3975 case offsetof(struct __sk_buff
, queue_mapping
):
3976 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3977 bpf_target_off(struct sk_buff
, queue_mapping
, 2,
3981 case offsetof(struct __sk_buff
, vlan_present
):
3982 case offsetof(struct __sk_buff
, vlan_tci
):
3983 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
3985 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3986 bpf_target_off(struct sk_buff
, vlan_tci
, 2,
3988 if (si
->off
== offsetof(struct __sk_buff
, vlan_tci
)) {
3989 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
,
3992 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 12);
3993 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, 1);
3997 case offsetof(struct __sk_buff
, cb
[0]) ...
3998 offsetofend(struct __sk_buff
, cb
[4]) - 1:
3999 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, data
) < 20);
4000 BUILD_BUG_ON((offsetof(struct sk_buff
, cb
) +
4001 offsetof(struct qdisc_skb_cb
, data
)) %
4004 prog
->cb_access
= 1;
4006 off
-= offsetof(struct __sk_buff
, cb
[0]);
4007 off
+= offsetof(struct sk_buff
, cb
);
4008 off
+= offsetof(struct qdisc_skb_cb
, data
);
4009 if (type
== BPF_WRITE
)
4010 *insn
++ = BPF_STX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
4013 *insn
++ = BPF_LDX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
4017 case offsetof(struct __sk_buff
, tc_classid
):
4018 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, tc_classid
) != 2);
4021 off
-= offsetof(struct __sk_buff
, tc_classid
);
4022 off
+= offsetof(struct sk_buff
, cb
);
4023 off
+= offsetof(struct qdisc_skb_cb
, tc_classid
);
4025 if (type
== BPF_WRITE
)
4026 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
,
4029 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
,
4033 case offsetof(struct __sk_buff
, data
):
4034 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, data
),
4035 si
->dst_reg
, si
->src_reg
,
4036 offsetof(struct sk_buff
, data
));
4039 case offsetof(struct __sk_buff
, data_meta
):
4041 off
-= offsetof(struct __sk_buff
, data_meta
);
4042 off
+= offsetof(struct sk_buff
, cb
);
4043 off
+= offsetof(struct bpf_skb_data_end
, data_meta
);
4044 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4048 case offsetof(struct __sk_buff
, data_end
):
4050 off
-= offsetof(struct __sk_buff
, data_end
);
4051 off
+= offsetof(struct sk_buff
, cb
);
4052 off
+= offsetof(struct bpf_skb_data_end
, data_end
);
4053 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4057 case offsetof(struct __sk_buff
, tc_index
):
4058 #ifdef CONFIG_NET_SCHED
4059 if (type
== BPF_WRITE
)
4060 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4061 bpf_target_off(struct sk_buff
, tc_index
, 2,
4064 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4065 bpf_target_off(struct sk_buff
, tc_index
, 2,
4069 if (type
== BPF_WRITE
)
4070 *insn
++ = BPF_MOV64_REG(si
->dst_reg
, si
->dst_reg
);
4072 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4076 case offsetof(struct __sk_buff
, napi_id
):
4077 #if defined(CONFIG_NET_RX_BUSY_POLL)
4078 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4079 bpf_target_off(struct sk_buff
, napi_id
, 4,
4081 *insn
++ = BPF_JMP_IMM(BPF_JGE
, si
->dst_reg
, MIN_NAPI_ID
, 1);
4082 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4085 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4088 case offsetof(struct __sk_buff
, family
):
4089 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
4091 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4092 si
->dst_reg
, si
->src_reg
,
4093 offsetof(struct sk_buff
, sk
));
4094 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4095 bpf_target_off(struct sock_common
,
4099 case offsetof(struct __sk_buff
, remote_ip4
):
4100 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
4102 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4103 si
->dst_reg
, si
->src_reg
,
4104 offsetof(struct sk_buff
, sk
));
4105 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4106 bpf_target_off(struct sock_common
,
4110 case offsetof(struct __sk_buff
, local_ip4
):
4111 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4112 skc_rcv_saddr
) != 4);
4114 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4115 si
->dst_reg
, si
->src_reg
,
4116 offsetof(struct sk_buff
, sk
));
4117 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4118 bpf_target_off(struct sock_common
,
4122 case offsetof(struct __sk_buff
, remote_ip6
[0]) ...
4123 offsetof(struct __sk_buff
, remote_ip6
[3]):
4124 #if IS_ENABLED(CONFIG_IPV6)
4125 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4126 skc_v6_daddr
.s6_addr32
[0]) != 4);
4129 off
-= offsetof(struct __sk_buff
, remote_ip6
[0]);
4131 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4132 si
->dst_reg
, si
->src_reg
,
4133 offsetof(struct sk_buff
, sk
));
4134 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4135 offsetof(struct sock_common
,
4136 skc_v6_daddr
.s6_addr32
[0]) +
4139 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4142 case offsetof(struct __sk_buff
, local_ip6
[0]) ...
4143 offsetof(struct __sk_buff
, local_ip6
[3]):
4144 #if IS_ENABLED(CONFIG_IPV6)
4145 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4146 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
4149 off
-= offsetof(struct __sk_buff
, local_ip6
[0]);
4151 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4152 si
->dst_reg
, si
->src_reg
,
4153 offsetof(struct sk_buff
, sk
));
4154 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4155 offsetof(struct sock_common
,
4156 skc_v6_rcv_saddr
.s6_addr32
[0]) +
4159 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4163 case offsetof(struct __sk_buff
, remote_port
):
4164 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
4166 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4167 si
->dst_reg
, si
->src_reg
,
4168 offsetof(struct sk_buff
, sk
));
4169 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4170 bpf_target_off(struct sock_common
,
4173 #ifndef __BIG_ENDIAN_BITFIELD
4174 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
4178 case offsetof(struct __sk_buff
, local_port
):
4179 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
4181 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4182 si
->dst_reg
, si
->src_reg
,
4183 offsetof(struct sk_buff
, sk
));
4184 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4185 bpf_target_off(struct sock_common
,
4186 skc_num
, 2, target_size
));
4190 return insn
- insn_buf
;
4193 static u32
sock_filter_convert_ctx_access(enum bpf_access_type type
,
4194 const struct bpf_insn
*si
,
4195 struct bpf_insn
*insn_buf
,
4196 struct bpf_prog
*prog
, u32
*target_size
)
4198 struct bpf_insn
*insn
= insn_buf
;
4201 case offsetof(struct bpf_sock
, bound_dev_if
):
4202 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_bound_dev_if
) != 4);
4204 if (type
== BPF_WRITE
)
4205 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4206 offsetof(struct sock
, sk_bound_dev_if
));
4208 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4209 offsetof(struct sock
, sk_bound_dev_if
));
4212 case offsetof(struct bpf_sock
, mark
):
4213 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_mark
) != 4);
4215 if (type
== BPF_WRITE
)
4216 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4217 offsetof(struct sock
, sk_mark
));
4219 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4220 offsetof(struct sock
, sk_mark
));
4223 case offsetof(struct bpf_sock
, priority
):
4224 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_priority
) != 4);
4226 if (type
== BPF_WRITE
)
4227 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4228 offsetof(struct sock
, sk_priority
));
4230 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4231 offsetof(struct sock
, sk_priority
));
4234 case offsetof(struct bpf_sock
, family
):
4235 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_family
) != 2);
4237 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4238 offsetof(struct sock
, sk_family
));
4241 case offsetof(struct bpf_sock
, type
):
4242 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4243 offsetof(struct sock
, __sk_flags_offset
));
4244 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_TYPE_MASK
);
4245 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_TYPE_SHIFT
);
4248 case offsetof(struct bpf_sock
, protocol
):
4249 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4250 offsetof(struct sock
, __sk_flags_offset
));
4251 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_PROTO_MASK
);
4252 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_PROTO_SHIFT
);
4256 return insn
- insn_buf
;
4259 static u32
tc_cls_act_convert_ctx_access(enum bpf_access_type type
,
4260 const struct bpf_insn
*si
,
4261 struct bpf_insn
*insn_buf
,
4262 struct bpf_prog
*prog
, u32
*target_size
)
4264 struct bpf_insn
*insn
= insn_buf
;
4267 case offsetof(struct __sk_buff
, ifindex
):
4268 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
4269 si
->dst_reg
, si
->src_reg
,
4270 offsetof(struct sk_buff
, dev
));
4271 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4272 bpf_target_off(struct net_device
, ifindex
, 4,
4276 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
4280 return insn
- insn_buf
;
4283 static u32
xdp_convert_ctx_access(enum bpf_access_type type
,
4284 const struct bpf_insn
*si
,
4285 struct bpf_insn
*insn_buf
,
4286 struct bpf_prog
*prog
, u32
*target_size
)
4288 struct bpf_insn
*insn
= insn_buf
;
4291 case offsetof(struct xdp_md
, data
):
4292 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data
),
4293 si
->dst_reg
, si
->src_reg
,
4294 offsetof(struct xdp_buff
, data
));
4296 case offsetof(struct xdp_md
, data_meta
):
4297 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_meta
),
4298 si
->dst_reg
, si
->src_reg
,
4299 offsetof(struct xdp_buff
, data_meta
));
4301 case offsetof(struct xdp_md
, data_end
):
4302 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_end
),
4303 si
->dst_reg
, si
->src_reg
,
4304 offsetof(struct xdp_buff
, data_end
));
4308 return insn
- insn_buf
;
4311 static u32
sock_ops_convert_ctx_access(enum bpf_access_type type
,
4312 const struct bpf_insn
*si
,
4313 struct bpf_insn
*insn_buf
,
4314 struct bpf_prog
*prog
,
4317 struct bpf_insn
*insn
= insn_buf
;
4321 case offsetof(struct bpf_sock_ops
, op
) ...
4322 offsetof(struct bpf_sock_ops
, replylong
[3]):
4323 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, op
) !=
4324 FIELD_SIZEOF(struct bpf_sock_ops_kern
, op
));
4325 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, reply
) !=
4326 FIELD_SIZEOF(struct bpf_sock_ops_kern
, reply
));
4327 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, replylong
) !=
4328 FIELD_SIZEOF(struct bpf_sock_ops_kern
, replylong
));
4330 off
-= offsetof(struct bpf_sock_ops
, op
);
4331 off
+= offsetof(struct bpf_sock_ops_kern
, op
);
4332 if (type
== BPF_WRITE
)
4333 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4336 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4340 case offsetof(struct bpf_sock_ops
, family
):
4341 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
4343 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4344 struct bpf_sock_ops_kern
, sk
),
4345 si
->dst_reg
, si
->src_reg
,
4346 offsetof(struct bpf_sock_ops_kern
, sk
));
4347 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4348 offsetof(struct sock_common
, skc_family
));
4351 case offsetof(struct bpf_sock_ops
, remote_ip4
):
4352 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
4354 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4355 struct bpf_sock_ops_kern
, sk
),
4356 si
->dst_reg
, si
->src_reg
,
4357 offsetof(struct bpf_sock_ops_kern
, sk
));
4358 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4359 offsetof(struct sock_common
, skc_daddr
));
4362 case offsetof(struct bpf_sock_ops
, local_ip4
):
4363 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_rcv_saddr
) != 4);
4365 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4366 struct bpf_sock_ops_kern
, sk
),
4367 si
->dst_reg
, si
->src_reg
,
4368 offsetof(struct bpf_sock_ops_kern
, sk
));
4369 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4370 offsetof(struct sock_common
,
4374 case offsetof(struct bpf_sock_ops
, remote_ip6
[0]) ...
4375 offsetof(struct bpf_sock_ops
, remote_ip6
[3]):
4376 #if IS_ENABLED(CONFIG_IPV6)
4377 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4378 skc_v6_daddr
.s6_addr32
[0]) != 4);
4381 off
-= offsetof(struct bpf_sock_ops
, remote_ip6
[0]);
4382 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4383 struct bpf_sock_ops_kern
, sk
),
4384 si
->dst_reg
, si
->src_reg
,
4385 offsetof(struct bpf_sock_ops_kern
, sk
));
4386 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4387 offsetof(struct sock_common
,
4388 skc_v6_daddr
.s6_addr32
[0]) +
4391 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4395 case offsetof(struct bpf_sock_ops
, local_ip6
[0]) ...
4396 offsetof(struct bpf_sock_ops
, local_ip6
[3]):
4397 #if IS_ENABLED(CONFIG_IPV6)
4398 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4399 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
4402 off
-= offsetof(struct bpf_sock_ops
, local_ip6
[0]);
4403 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4404 struct bpf_sock_ops_kern
, sk
),
4405 si
->dst_reg
, si
->src_reg
,
4406 offsetof(struct bpf_sock_ops_kern
, sk
));
4407 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4408 offsetof(struct sock_common
,
4409 skc_v6_rcv_saddr
.s6_addr32
[0]) +
4412 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4416 case offsetof(struct bpf_sock_ops
, remote_port
):
4417 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
4419 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4420 struct bpf_sock_ops_kern
, sk
),
4421 si
->dst_reg
, si
->src_reg
,
4422 offsetof(struct bpf_sock_ops_kern
, sk
));
4423 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4424 offsetof(struct sock_common
, skc_dport
));
4425 #ifndef __BIG_ENDIAN_BITFIELD
4426 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
4430 case offsetof(struct bpf_sock_ops
, local_port
):
4431 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
4433 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4434 struct bpf_sock_ops_kern
, sk
),
4435 si
->dst_reg
, si
->src_reg
,
4436 offsetof(struct bpf_sock_ops_kern
, sk
));
4437 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4438 offsetof(struct sock_common
, skc_num
));
4441 return insn
- insn_buf
;
4444 static u32
sk_skb_convert_ctx_access(enum bpf_access_type type
,
4445 const struct bpf_insn
*si
,
4446 struct bpf_insn
*insn_buf
,
4447 struct bpf_prog
*prog
, u32
*target_size
)
4449 struct bpf_insn
*insn
= insn_buf
;
4453 case offsetof(struct __sk_buff
, data_end
):
4455 off
-= offsetof(struct __sk_buff
, data_end
);
4456 off
+= offsetof(struct sk_buff
, cb
);
4457 off
+= offsetof(struct tcp_skb_cb
, bpf
.data_end
);
4458 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4462 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
4466 return insn
- insn_buf
;
4469 const struct bpf_verifier_ops sk_filter_verifier_ops
= {
4470 .get_func_proto
= sk_filter_func_proto
,
4471 .is_valid_access
= sk_filter_is_valid_access
,
4472 .convert_ctx_access
= bpf_convert_ctx_access
,
4475 const struct bpf_prog_ops sk_filter_prog_ops
= {
4478 const struct bpf_verifier_ops tc_cls_act_verifier_ops
= {
4479 .get_func_proto
= tc_cls_act_func_proto
,
4480 .is_valid_access
= tc_cls_act_is_valid_access
,
4481 .convert_ctx_access
= tc_cls_act_convert_ctx_access
,
4482 .gen_prologue
= tc_cls_act_prologue
,
4485 const struct bpf_prog_ops tc_cls_act_prog_ops
= {
4486 .test_run
= bpf_prog_test_run_skb
,
4489 const struct bpf_verifier_ops xdp_verifier_ops
= {
4490 .get_func_proto
= xdp_func_proto
,
4491 .is_valid_access
= xdp_is_valid_access
,
4492 .convert_ctx_access
= xdp_convert_ctx_access
,
4495 const struct bpf_prog_ops xdp_prog_ops
= {
4496 .test_run
= bpf_prog_test_run_xdp
,
4499 const struct bpf_verifier_ops cg_skb_verifier_ops
= {
4500 .get_func_proto
= sk_filter_func_proto
,
4501 .is_valid_access
= sk_filter_is_valid_access
,
4502 .convert_ctx_access
= bpf_convert_ctx_access
,
4505 const struct bpf_prog_ops cg_skb_prog_ops
= {
4506 .test_run
= bpf_prog_test_run_skb
,
4509 const struct bpf_verifier_ops lwt_inout_verifier_ops
= {
4510 .get_func_proto
= lwt_inout_func_proto
,
4511 .is_valid_access
= lwt_is_valid_access
,
4512 .convert_ctx_access
= bpf_convert_ctx_access
,
4515 const struct bpf_prog_ops lwt_inout_prog_ops
= {
4516 .test_run
= bpf_prog_test_run_skb
,
4519 const struct bpf_verifier_ops lwt_xmit_verifier_ops
= {
4520 .get_func_proto
= lwt_xmit_func_proto
,
4521 .is_valid_access
= lwt_is_valid_access
,
4522 .convert_ctx_access
= bpf_convert_ctx_access
,
4523 .gen_prologue
= tc_cls_act_prologue
,
4526 const struct bpf_prog_ops lwt_xmit_prog_ops
= {
4527 .test_run
= bpf_prog_test_run_skb
,
4530 const struct bpf_verifier_ops cg_sock_verifier_ops
= {
4531 .get_func_proto
= sock_filter_func_proto
,
4532 .is_valid_access
= sock_filter_is_valid_access
,
4533 .convert_ctx_access
= sock_filter_convert_ctx_access
,
4536 const struct bpf_prog_ops cg_sock_prog_ops
= {
4539 const struct bpf_verifier_ops sock_ops_verifier_ops
= {
4540 .get_func_proto
= sock_ops_func_proto
,
4541 .is_valid_access
= sock_ops_is_valid_access
,
4542 .convert_ctx_access
= sock_ops_convert_ctx_access
,
4545 const struct bpf_prog_ops sock_ops_prog_ops
= {
4548 const struct bpf_verifier_ops sk_skb_verifier_ops
= {
4549 .get_func_proto
= sk_skb_func_proto
,
4550 .is_valid_access
= sk_skb_is_valid_access
,
4551 .convert_ctx_access
= sk_skb_convert_ctx_access
,
4552 .gen_prologue
= sk_skb_prologue
,
4555 const struct bpf_prog_ops sk_skb_prog_ops
= {
4558 int sk_detach_filter(struct sock
*sk
)
4561 struct sk_filter
*filter
;
4563 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
4566 filter
= rcu_dereference_protected(sk
->sk_filter
,
4567 lockdep_sock_is_held(sk
));
4569 RCU_INIT_POINTER(sk
->sk_filter
, NULL
);
4570 sk_filter_uncharge(sk
, filter
);
4576 EXPORT_SYMBOL_GPL(sk_detach_filter
);
4578 int sk_get_filter(struct sock
*sk
, struct sock_filter __user
*ubuf
,
4581 struct sock_fprog_kern
*fprog
;
4582 struct sk_filter
*filter
;
4586 filter
= rcu_dereference_protected(sk
->sk_filter
,
4587 lockdep_sock_is_held(sk
));
4591 /* We're copying the filter that has been originally attached,
4592 * so no conversion/decode needed anymore. eBPF programs that
4593 * have no original program cannot be dumped through this.
4596 fprog
= filter
->prog
->orig_prog
;
4602 /* User space only enquires number of filter blocks. */
4606 if (len
< fprog
->len
)
4610 if (copy_to_user(ubuf
, fprog
->filter
, bpf_classic_proglen(fprog
)))
4613 /* Instead of bytes, the API requests to return the number