2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/module.h>
25 #include <linux/types.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/sock_diag.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_packet.h>
34 #include <linux/if_arp.h>
35 #include <linux/gfp.h>
37 #include <net/protocol.h>
38 #include <net/netlink.h>
39 #include <linux/skbuff.h>
41 #include <net/flow_dissector.h>
42 #include <linux/errno.h>
43 #include <linux/timer.h>
44 #include <linux/uaccess.h>
45 #include <asm/unaligned.h>
46 #include <asm/cmpxchg.h>
47 #include <linux/filter.h>
48 #include <linux/ratelimit.h>
49 #include <linux/seccomp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/bpf.h>
52 #include <net/sch_generic.h>
53 #include <net/cls_cgroup.h>
54 #include <net/dst_metadata.h>
56 #include <net/sock_reuseport.h>
57 #include <net/busy_poll.h>
59 #include <linux/bpf_trace.h>
62 * sk_filter_trim_cap - run a packet through a socket filter
63 * @sk: sock associated with &sk_buff
64 * @skb: buffer to filter
65 * @cap: limit on how short the eBPF program may trim the packet
67 * Run the eBPF program and then cut skb->data to correct size returned by
68 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
69 * than pkt_len we keep whole skb->data. This is the socket level
70 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
71 * be accepted or -EPERM if the packet should be tossed.
74 int sk_filter_trim_cap(struct sock
*sk
, struct sk_buff
*skb
, unsigned int cap
)
77 struct sk_filter
*filter
;
80 * If the skb was allocated from pfmemalloc reserves, only
81 * allow SOCK_MEMALLOC sockets to use it as this socket is
84 if (skb_pfmemalloc(skb
) && !sock_flag(sk
, SOCK_MEMALLOC
)) {
85 NET_INC_STATS(sock_net(sk
), LINUX_MIB_PFMEMALLOCDROP
);
88 err
= BPF_CGROUP_RUN_PROG_INET_INGRESS(sk
, skb
);
92 err
= security_sock_rcv_skb(sk
, skb
);
97 filter
= rcu_dereference(sk
->sk_filter
);
99 struct sock
*save_sk
= skb
->sk
;
100 unsigned int pkt_len
;
103 pkt_len
= bpf_prog_run_save_cb(filter
->prog
, skb
);
105 err
= pkt_len
? pskb_trim(skb
, max(cap
, pkt_len
)) : -EPERM
;
111 EXPORT_SYMBOL(sk_filter_trim_cap
);
113 BPF_CALL_1(__skb_get_pay_offset
, struct sk_buff
*, skb
)
115 return skb_get_poff(skb
);
118 BPF_CALL_3(__skb_get_nlattr
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
122 if (skb_is_nonlinear(skb
))
125 if (skb
->len
< sizeof(struct nlattr
))
128 if (a
> skb
->len
- sizeof(struct nlattr
))
131 nla
= nla_find((struct nlattr
*) &skb
->data
[a
], skb
->len
- a
, x
);
133 return (void *) nla
- (void *) skb
->data
;
138 BPF_CALL_3(__skb_get_nlattr_nest
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
142 if (skb_is_nonlinear(skb
))
145 if (skb
->len
< sizeof(struct nlattr
))
148 if (a
> skb
->len
- sizeof(struct nlattr
))
151 nla
= (struct nlattr
*) &skb
->data
[a
];
152 if (nla
->nla_len
> skb
->len
- a
)
155 nla
= nla_find_nested(nla
, x
);
157 return (void *) nla
- (void *) skb
->data
;
162 BPF_CALL_0(__get_raw_cpu_id
)
164 return raw_smp_processor_id();
167 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto
= {
168 .func
= __get_raw_cpu_id
,
170 .ret_type
= RET_INTEGER
,
173 static u32
convert_skb_access(int skb_field
, int dst_reg
, int src_reg
,
174 struct bpf_insn
*insn_buf
)
176 struct bpf_insn
*insn
= insn_buf
;
180 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
182 *insn
++ = BPF_LDX_MEM(BPF_W
, dst_reg
, src_reg
,
183 offsetof(struct sk_buff
, mark
));
187 *insn
++ = BPF_LDX_MEM(BPF_B
, dst_reg
, src_reg
, PKT_TYPE_OFFSET());
188 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, PKT_TYPE_MAX
);
189 #ifdef __BIG_ENDIAN_BITFIELD
190 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 5);
195 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, queue_mapping
) != 2);
197 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
198 offsetof(struct sk_buff
, queue_mapping
));
201 case SKF_AD_VLAN_TAG
:
202 case SKF_AD_VLAN_TAG_PRESENT
:
203 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
204 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
206 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
207 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
208 offsetof(struct sk_buff
, vlan_tci
));
209 if (skb_field
== SKF_AD_VLAN_TAG
) {
210 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
,
214 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 12);
216 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, 1);
221 return insn
- insn_buf
;
224 static bool convert_bpf_extensions(struct sock_filter
*fp
,
225 struct bpf_insn
**insnp
)
227 struct bpf_insn
*insn
= *insnp
;
231 case SKF_AD_OFF
+ SKF_AD_PROTOCOL
:
232 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, protocol
) != 2);
234 /* A = *(u16 *) (CTX + offsetof(protocol)) */
235 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
236 offsetof(struct sk_buff
, protocol
));
237 /* A = ntohs(A) [emitting a nop or swap16] */
238 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
241 case SKF_AD_OFF
+ SKF_AD_PKTTYPE
:
242 cnt
= convert_skb_access(SKF_AD_PKTTYPE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
246 case SKF_AD_OFF
+ SKF_AD_IFINDEX
:
247 case SKF_AD_OFF
+ SKF_AD_HATYPE
:
248 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, ifindex
) != 4);
249 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, type
) != 2);
251 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
252 BPF_REG_TMP
, BPF_REG_CTX
,
253 offsetof(struct sk_buff
, dev
));
254 /* if (tmp != 0) goto pc + 1 */
255 *insn
++ = BPF_JMP_IMM(BPF_JNE
, BPF_REG_TMP
, 0, 1);
256 *insn
++ = BPF_EXIT_INSN();
257 if (fp
->k
== SKF_AD_OFF
+ SKF_AD_IFINDEX
)
258 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_TMP
,
259 offsetof(struct net_device
, ifindex
));
261 *insn
= BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_TMP
,
262 offsetof(struct net_device
, type
));
265 case SKF_AD_OFF
+ SKF_AD_MARK
:
266 cnt
= convert_skb_access(SKF_AD_MARK
, BPF_REG_A
, BPF_REG_CTX
, insn
);
270 case SKF_AD_OFF
+ SKF_AD_RXHASH
:
271 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
273 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
,
274 offsetof(struct sk_buff
, hash
));
277 case SKF_AD_OFF
+ SKF_AD_QUEUE
:
278 cnt
= convert_skb_access(SKF_AD_QUEUE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
282 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG
:
283 cnt
= convert_skb_access(SKF_AD_VLAN_TAG
,
284 BPF_REG_A
, BPF_REG_CTX
, insn
);
288 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG_PRESENT
:
289 cnt
= convert_skb_access(SKF_AD_VLAN_TAG_PRESENT
,
290 BPF_REG_A
, BPF_REG_CTX
, insn
);
294 case SKF_AD_OFF
+ SKF_AD_VLAN_TPID
:
295 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_proto
) != 2);
297 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
298 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
299 offsetof(struct sk_buff
, vlan_proto
));
300 /* A = ntohs(A) [emitting a nop or swap16] */
301 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
304 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
305 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
306 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
307 case SKF_AD_OFF
+ SKF_AD_CPU
:
308 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
310 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG1
, BPF_REG_CTX
);
312 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_A
);
314 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG3
, BPF_REG_X
);
315 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
317 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
318 *insn
= BPF_EMIT_CALL(__skb_get_pay_offset
);
320 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
321 *insn
= BPF_EMIT_CALL(__skb_get_nlattr
);
323 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
324 *insn
= BPF_EMIT_CALL(__skb_get_nlattr_nest
);
326 case SKF_AD_OFF
+ SKF_AD_CPU
:
327 *insn
= BPF_EMIT_CALL(__get_raw_cpu_id
);
329 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
330 *insn
= BPF_EMIT_CALL(bpf_user_rnd_u32
);
331 bpf_user_rnd_init_once();
336 case SKF_AD_OFF
+ SKF_AD_ALU_XOR_X
:
338 *insn
= BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_X
);
342 /* This is just a dummy call to avoid letting the compiler
343 * evict __bpf_call_base() as an optimization. Placed here
344 * where no-one bothers.
346 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
355 * bpf_convert_filter - convert filter program
356 * @prog: the user passed filter program
357 * @len: the length of the user passed filter program
358 * @new_prog: allocated 'struct bpf_prog' or NULL
359 * @new_len: pointer to store length of converted program
361 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
362 * style extended BPF (eBPF).
363 * Conversion workflow:
365 * 1) First pass for calculating the new program length:
366 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
368 * 2) 2nd pass to remap in two passes: 1st pass finds new
369 * jump offsets, 2nd pass remapping:
370 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
372 static int bpf_convert_filter(struct sock_filter
*prog
, int len
,
373 struct bpf_prog
*new_prog
, int *new_len
)
375 int new_flen
= 0, pass
= 0, target
, i
, stack_off
;
376 struct bpf_insn
*new_insn
, *first_insn
= NULL
;
377 struct sock_filter
*fp
;
381 BUILD_BUG_ON(BPF_MEMWORDS
* sizeof(u32
) > MAX_BPF_STACK
);
382 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
384 if (len
<= 0 || len
> BPF_MAXINSNS
)
388 first_insn
= new_prog
->insnsi
;
389 addrs
= kcalloc(len
, sizeof(*addrs
),
390 GFP_KERNEL
| __GFP_NOWARN
);
396 new_insn
= first_insn
;
399 /* Classic BPF related prologue emission. */
401 /* Classic BPF expects A and X to be reset first. These need
402 * to be guaranteed to be the first two instructions.
404 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_A
);
405 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_X
, BPF_REG_X
);
407 /* All programs must keep CTX in callee saved BPF_REG_CTX.
408 * In eBPF case it's done by the compiler, here we need to
409 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
411 *new_insn
++ = BPF_MOV64_REG(BPF_REG_CTX
, BPF_REG_ARG1
);
416 for (i
= 0; i
< len
; fp
++, i
++) {
417 struct bpf_insn tmp_insns
[6] = { };
418 struct bpf_insn
*insn
= tmp_insns
;
421 addrs
[i
] = new_insn
- first_insn
;
424 /* All arithmetic insns and skb loads map as-is. */
425 case BPF_ALU
| BPF_ADD
| BPF_X
:
426 case BPF_ALU
| BPF_ADD
| BPF_K
:
427 case BPF_ALU
| BPF_SUB
| BPF_X
:
428 case BPF_ALU
| BPF_SUB
| BPF_K
:
429 case BPF_ALU
| BPF_AND
| BPF_X
:
430 case BPF_ALU
| BPF_AND
| BPF_K
:
431 case BPF_ALU
| BPF_OR
| BPF_X
:
432 case BPF_ALU
| BPF_OR
| BPF_K
:
433 case BPF_ALU
| BPF_LSH
| BPF_X
:
434 case BPF_ALU
| BPF_LSH
| BPF_K
:
435 case BPF_ALU
| BPF_RSH
| BPF_X
:
436 case BPF_ALU
| BPF_RSH
| BPF_K
:
437 case BPF_ALU
| BPF_XOR
| BPF_X
:
438 case BPF_ALU
| BPF_XOR
| BPF_K
:
439 case BPF_ALU
| BPF_MUL
| BPF_X
:
440 case BPF_ALU
| BPF_MUL
| BPF_K
:
441 case BPF_ALU
| BPF_DIV
| BPF_X
:
442 case BPF_ALU
| BPF_DIV
| BPF_K
:
443 case BPF_ALU
| BPF_MOD
| BPF_X
:
444 case BPF_ALU
| BPF_MOD
| BPF_K
:
445 case BPF_ALU
| BPF_NEG
:
446 case BPF_LD
| BPF_ABS
| BPF_W
:
447 case BPF_LD
| BPF_ABS
| BPF_H
:
448 case BPF_LD
| BPF_ABS
| BPF_B
:
449 case BPF_LD
| BPF_IND
| BPF_W
:
450 case BPF_LD
| BPF_IND
| BPF_H
:
451 case BPF_LD
| BPF_IND
| BPF_B
:
452 /* Check for overloaded BPF extension and
453 * directly convert it if found, otherwise
454 * just move on with mapping.
456 if (BPF_CLASS(fp
->code
) == BPF_LD
&&
457 BPF_MODE(fp
->code
) == BPF_ABS
&&
458 convert_bpf_extensions(fp
, &insn
))
461 if (fp
->code
== (BPF_ALU
| BPF_DIV
| BPF_X
) ||
462 fp
->code
== (BPF_ALU
| BPF_MOD
| BPF_X
))
463 *insn
++ = BPF_MOV32_REG(BPF_REG_X
, BPF_REG_X
);
465 *insn
= BPF_RAW_INSN(fp
->code
, BPF_REG_A
, BPF_REG_X
, 0, fp
->k
);
468 /* Jump transformation cannot use BPF block macros
469 * everywhere as offset calculation and target updates
470 * require a bit more work than the rest, i.e. jump
471 * opcodes map as-is, but offsets need adjustment.
474 #define BPF_EMIT_JMP \
476 if (target >= len || target < 0) \
478 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
479 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
480 insn->off -= insn - tmp_insns; \
483 case BPF_JMP
| BPF_JA
:
484 target
= i
+ fp
->k
+ 1;
485 insn
->code
= fp
->code
;
489 case BPF_JMP
| BPF_JEQ
| BPF_K
:
490 case BPF_JMP
| BPF_JEQ
| BPF_X
:
491 case BPF_JMP
| BPF_JSET
| BPF_K
:
492 case BPF_JMP
| BPF_JSET
| BPF_X
:
493 case BPF_JMP
| BPF_JGT
| BPF_K
:
494 case BPF_JMP
| BPF_JGT
| BPF_X
:
495 case BPF_JMP
| BPF_JGE
| BPF_K
:
496 case BPF_JMP
| BPF_JGE
| BPF_X
:
497 if (BPF_SRC(fp
->code
) == BPF_K
&& (int) fp
->k
< 0) {
498 /* BPF immediates are signed, zero extend
499 * immediate into tmp register and use it
502 *insn
++ = BPF_MOV32_IMM(BPF_REG_TMP
, fp
->k
);
504 insn
->dst_reg
= BPF_REG_A
;
505 insn
->src_reg
= BPF_REG_TMP
;
508 insn
->dst_reg
= BPF_REG_A
;
510 bpf_src
= BPF_SRC(fp
->code
);
511 insn
->src_reg
= bpf_src
== BPF_X
? BPF_REG_X
: 0;
514 /* Common case where 'jump_false' is next insn. */
516 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
517 target
= i
+ fp
->jt
+ 1;
522 /* Convert some jumps when 'jump_true' is next insn. */
524 switch (BPF_OP(fp
->code
)) {
526 insn
->code
= BPF_JMP
| BPF_JNE
| bpf_src
;
529 insn
->code
= BPF_JMP
| BPF_JLE
| bpf_src
;
532 insn
->code
= BPF_JMP
| BPF_JLT
| bpf_src
;
538 target
= i
+ fp
->jf
+ 1;
543 /* Other jumps are mapped into two insns: Jxx and JA. */
544 target
= i
+ fp
->jt
+ 1;
545 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
549 insn
->code
= BPF_JMP
| BPF_JA
;
550 target
= i
+ fp
->jf
+ 1;
554 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
555 case BPF_LDX
| BPF_MSH
| BPF_B
:
557 *insn
++ = BPF_MOV64_REG(BPF_REG_TMP
, BPF_REG_A
);
558 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
559 *insn
++ = BPF_LD_ABS(BPF_B
, fp
->k
);
561 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_A
, 0xf);
563 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, BPF_REG_A
, 2);
565 *insn
++ = BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
567 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_TMP
);
570 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
571 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
573 case BPF_RET
| BPF_A
:
574 case BPF_RET
| BPF_K
:
575 if (BPF_RVAL(fp
->code
) == BPF_K
)
576 *insn
++ = BPF_MOV32_RAW(BPF_K
, BPF_REG_0
,
578 *insn
= BPF_EXIT_INSN();
581 /* Store to stack. */
584 stack_off
= fp
->k
* 4 + 4;
585 *insn
= BPF_STX_MEM(BPF_W
, BPF_REG_FP
, BPF_CLASS(fp
->code
) ==
586 BPF_ST
? BPF_REG_A
: BPF_REG_X
,
588 /* check_load_and_stores() verifies that classic BPF can
589 * load from stack only after write, so tracking
590 * stack_depth for ST|STX insns is enough
592 if (new_prog
&& new_prog
->aux
->stack_depth
< stack_off
)
593 new_prog
->aux
->stack_depth
= stack_off
;
596 /* Load from stack. */
597 case BPF_LD
| BPF_MEM
:
598 case BPF_LDX
| BPF_MEM
:
599 stack_off
= fp
->k
* 4 + 4;
600 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
601 BPF_REG_A
: BPF_REG_X
, BPF_REG_FP
,
606 case BPF_LD
| BPF_IMM
:
607 case BPF_LDX
| BPF_IMM
:
608 *insn
= BPF_MOV32_IMM(BPF_CLASS(fp
->code
) == BPF_LD
?
609 BPF_REG_A
: BPF_REG_X
, fp
->k
);
613 case BPF_MISC
| BPF_TAX
:
614 *insn
= BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
618 case BPF_MISC
| BPF_TXA
:
619 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_X
);
622 /* A = skb->len or X = skb->len */
623 case BPF_LD
| BPF_W
| BPF_LEN
:
624 case BPF_LDX
| BPF_W
| BPF_LEN
:
625 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
626 BPF_REG_A
: BPF_REG_X
, BPF_REG_CTX
,
627 offsetof(struct sk_buff
, len
));
630 /* Access seccomp_data fields. */
631 case BPF_LDX
| BPF_ABS
| BPF_W
:
632 /* A = *(u32 *) (ctx + K) */
633 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
, fp
->k
);
636 /* Unknown instruction. */
643 memcpy(new_insn
, tmp_insns
,
644 sizeof(*insn
) * (insn
- tmp_insns
));
645 new_insn
+= insn
- tmp_insns
;
649 /* Only calculating new length. */
650 *new_len
= new_insn
- first_insn
;
655 if (new_flen
!= new_insn
- first_insn
) {
656 new_flen
= new_insn
- first_insn
;
663 BUG_ON(*new_len
!= new_flen
);
672 * As we dont want to clear mem[] array for each packet going through
673 * __bpf_prog_run(), we check that filter loaded by user never try to read
674 * a cell if not previously written, and we check all branches to be sure
675 * a malicious user doesn't try to abuse us.
677 static int check_load_and_stores(const struct sock_filter
*filter
, int flen
)
679 u16
*masks
, memvalid
= 0; /* One bit per cell, 16 cells */
682 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
684 masks
= kmalloc_array(flen
, sizeof(*masks
), GFP_KERNEL
);
688 memset(masks
, 0xff, flen
* sizeof(*masks
));
690 for (pc
= 0; pc
< flen
; pc
++) {
691 memvalid
&= masks
[pc
];
693 switch (filter
[pc
].code
) {
696 memvalid
|= (1 << filter
[pc
].k
);
698 case BPF_LD
| BPF_MEM
:
699 case BPF_LDX
| BPF_MEM
:
700 if (!(memvalid
& (1 << filter
[pc
].k
))) {
705 case BPF_JMP
| BPF_JA
:
706 /* A jump must set masks on target */
707 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
710 case BPF_JMP
| BPF_JEQ
| BPF_K
:
711 case BPF_JMP
| BPF_JEQ
| BPF_X
:
712 case BPF_JMP
| BPF_JGE
| BPF_K
:
713 case BPF_JMP
| BPF_JGE
| BPF_X
:
714 case BPF_JMP
| BPF_JGT
| BPF_K
:
715 case BPF_JMP
| BPF_JGT
| BPF_X
:
716 case BPF_JMP
| BPF_JSET
| BPF_K
:
717 case BPF_JMP
| BPF_JSET
| BPF_X
:
718 /* A jump must set masks on targets */
719 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
720 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
730 static bool chk_code_allowed(u16 code_to_probe
)
732 static const bool codes
[] = {
733 /* 32 bit ALU operations */
734 [BPF_ALU
| BPF_ADD
| BPF_K
] = true,
735 [BPF_ALU
| BPF_ADD
| BPF_X
] = true,
736 [BPF_ALU
| BPF_SUB
| BPF_K
] = true,
737 [BPF_ALU
| BPF_SUB
| BPF_X
] = true,
738 [BPF_ALU
| BPF_MUL
| BPF_K
] = true,
739 [BPF_ALU
| BPF_MUL
| BPF_X
] = true,
740 [BPF_ALU
| BPF_DIV
| BPF_K
] = true,
741 [BPF_ALU
| BPF_DIV
| BPF_X
] = true,
742 [BPF_ALU
| BPF_MOD
| BPF_K
] = true,
743 [BPF_ALU
| BPF_MOD
| BPF_X
] = true,
744 [BPF_ALU
| BPF_AND
| BPF_K
] = true,
745 [BPF_ALU
| BPF_AND
| BPF_X
] = true,
746 [BPF_ALU
| BPF_OR
| BPF_K
] = true,
747 [BPF_ALU
| BPF_OR
| BPF_X
] = true,
748 [BPF_ALU
| BPF_XOR
| BPF_K
] = true,
749 [BPF_ALU
| BPF_XOR
| BPF_X
] = true,
750 [BPF_ALU
| BPF_LSH
| BPF_K
] = true,
751 [BPF_ALU
| BPF_LSH
| BPF_X
] = true,
752 [BPF_ALU
| BPF_RSH
| BPF_K
] = true,
753 [BPF_ALU
| BPF_RSH
| BPF_X
] = true,
754 [BPF_ALU
| BPF_NEG
] = true,
755 /* Load instructions */
756 [BPF_LD
| BPF_W
| BPF_ABS
] = true,
757 [BPF_LD
| BPF_H
| BPF_ABS
] = true,
758 [BPF_LD
| BPF_B
| BPF_ABS
] = true,
759 [BPF_LD
| BPF_W
| BPF_LEN
] = true,
760 [BPF_LD
| BPF_W
| BPF_IND
] = true,
761 [BPF_LD
| BPF_H
| BPF_IND
] = true,
762 [BPF_LD
| BPF_B
| BPF_IND
] = true,
763 [BPF_LD
| BPF_IMM
] = true,
764 [BPF_LD
| BPF_MEM
] = true,
765 [BPF_LDX
| BPF_W
| BPF_LEN
] = true,
766 [BPF_LDX
| BPF_B
| BPF_MSH
] = true,
767 [BPF_LDX
| BPF_IMM
] = true,
768 [BPF_LDX
| BPF_MEM
] = true,
769 /* Store instructions */
772 /* Misc instructions */
773 [BPF_MISC
| BPF_TAX
] = true,
774 [BPF_MISC
| BPF_TXA
] = true,
775 /* Return instructions */
776 [BPF_RET
| BPF_K
] = true,
777 [BPF_RET
| BPF_A
] = true,
778 /* Jump instructions */
779 [BPF_JMP
| BPF_JA
] = true,
780 [BPF_JMP
| BPF_JEQ
| BPF_K
] = true,
781 [BPF_JMP
| BPF_JEQ
| BPF_X
] = true,
782 [BPF_JMP
| BPF_JGE
| BPF_K
] = true,
783 [BPF_JMP
| BPF_JGE
| BPF_X
] = true,
784 [BPF_JMP
| BPF_JGT
| BPF_K
] = true,
785 [BPF_JMP
| BPF_JGT
| BPF_X
] = true,
786 [BPF_JMP
| BPF_JSET
| BPF_K
] = true,
787 [BPF_JMP
| BPF_JSET
| BPF_X
] = true,
790 if (code_to_probe
>= ARRAY_SIZE(codes
))
793 return codes
[code_to_probe
];
796 static bool bpf_check_basics_ok(const struct sock_filter
*filter
,
801 if (flen
== 0 || flen
> BPF_MAXINSNS
)
808 * bpf_check_classic - verify socket filter code
809 * @filter: filter to verify
810 * @flen: length of filter
812 * Check the user's filter code. If we let some ugly
813 * filter code slip through kaboom! The filter must contain
814 * no references or jumps that are out of range, no illegal
815 * instructions, and must end with a RET instruction.
817 * All jumps are forward as they are not signed.
819 * Returns 0 if the rule set is legal or -EINVAL if not.
821 static int bpf_check_classic(const struct sock_filter
*filter
,
827 /* Check the filter code now */
828 for (pc
= 0; pc
< flen
; pc
++) {
829 const struct sock_filter
*ftest
= &filter
[pc
];
831 /* May we actually operate on this code? */
832 if (!chk_code_allowed(ftest
->code
))
835 /* Some instructions need special checks */
836 switch (ftest
->code
) {
837 case BPF_ALU
| BPF_DIV
| BPF_K
:
838 case BPF_ALU
| BPF_MOD
| BPF_K
:
839 /* Check for division by zero */
843 case BPF_ALU
| BPF_LSH
| BPF_K
:
844 case BPF_ALU
| BPF_RSH
| BPF_K
:
848 case BPF_LD
| BPF_MEM
:
849 case BPF_LDX
| BPF_MEM
:
852 /* Check for invalid memory addresses */
853 if (ftest
->k
>= BPF_MEMWORDS
)
856 case BPF_JMP
| BPF_JA
:
857 /* Note, the large ftest->k might cause loops.
858 * Compare this with conditional jumps below,
859 * where offsets are limited. --ANK (981016)
861 if (ftest
->k
>= (unsigned int)(flen
- pc
- 1))
864 case BPF_JMP
| BPF_JEQ
| BPF_K
:
865 case BPF_JMP
| BPF_JEQ
| BPF_X
:
866 case BPF_JMP
| BPF_JGE
| BPF_K
:
867 case BPF_JMP
| BPF_JGE
| BPF_X
:
868 case BPF_JMP
| BPF_JGT
| BPF_K
:
869 case BPF_JMP
| BPF_JGT
| BPF_X
:
870 case BPF_JMP
| BPF_JSET
| BPF_K
:
871 case BPF_JMP
| BPF_JSET
| BPF_X
:
872 /* Both conditionals must be safe */
873 if (pc
+ ftest
->jt
+ 1 >= flen
||
874 pc
+ ftest
->jf
+ 1 >= flen
)
877 case BPF_LD
| BPF_W
| BPF_ABS
:
878 case BPF_LD
| BPF_H
| BPF_ABS
:
879 case BPF_LD
| BPF_B
| BPF_ABS
:
881 if (bpf_anc_helper(ftest
) & BPF_ANC
)
883 /* Ancillary operation unknown or unsupported */
884 if (anc_found
== false && ftest
->k
>= SKF_AD_OFF
)
889 /* Last instruction must be a RET code */
890 switch (filter
[flen
- 1].code
) {
891 case BPF_RET
| BPF_K
:
892 case BPF_RET
| BPF_A
:
893 return check_load_and_stores(filter
, flen
);
899 static int bpf_prog_store_orig_filter(struct bpf_prog
*fp
,
900 const struct sock_fprog
*fprog
)
902 unsigned int fsize
= bpf_classic_proglen(fprog
);
903 struct sock_fprog_kern
*fkprog
;
905 fp
->orig_prog
= kmalloc(sizeof(*fkprog
), GFP_KERNEL
);
909 fkprog
= fp
->orig_prog
;
910 fkprog
->len
= fprog
->len
;
912 fkprog
->filter
= kmemdup(fp
->insns
, fsize
,
913 GFP_KERNEL
| __GFP_NOWARN
);
914 if (!fkprog
->filter
) {
915 kfree(fp
->orig_prog
);
922 static void bpf_release_orig_filter(struct bpf_prog
*fp
)
924 struct sock_fprog_kern
*fprog
= fp
->orig_prog
;
927 kfree(fprog
->filter
);
932 static void __bpf_prog_release(struct bpf_prog
*prog
)
934 if (prog
->type
== BPF_PROG_TYPE_SOCKET_FILTER
) {
937 bpf_release_orig_filter(prog
);
942 static void __sk_filter_release(struct sk_filter
*fp
)
944 __bpf_prog_release(fp
->prog
);
949 * sk_filter_release_rcu - Release a socket filter by rcu_head
950 * @rcu: rcu_head that contains the sk_filter to free
952 static void sk_filter_release_rcu(struct rcu_head
*rcu
)
954 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
956 __sk_filter_release(fp
);
960 * sk_filter_release - release a socket filter
961 * @fp: filter to remove
963 * Remove a filter from a socket and release its resources.
965 static void sk_filter_release(struct sk_filter
*fp
)
967 if (refcount_dec_and_test(&fp
->refcnt
))
968 call_rcu(&fp
->rcu
, sk_filter_release_rcu
);
971 void sk_filter_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
973 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
975 atomic_sub(filter_size
, &sk
->sk_omem_alloc
);
976 sk_filter_release(fp
);
979 /* try to charge the socket memory if there is space available
980 * return true on success
982 static bool __sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
984 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
986 /* same check as in sock_kmalloc() */
987 if (filter_size
<= sysctl_optmem_max
&&
988 atomic_read(&sk
->sk_omem_alloc
) + filter_size
< sysctl_optmem_max
) {
989 atomic_add(filter_size
, &sk
->sk_omem_alloc
);
995 bool sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
997 if (!refcount_inc_not_zero(&fp
->refcnt
))
1000 if (!__sk_filter_charge(sk
, fp
)) {
1001 sk_filter_release(fp
);
1007 static struct bpf_prog
*bpf_migrate_filter(struct bpf_prog
*fp
)
1009 struct sock_filter
*old_prog
;
1010 struct bpf_prog
*old_fp
;
1011 int err
, new_len
, old_len
= fp
->len
;
1013 /* We are free to overwrite insns et al right here as it
1014 * won't be used at this point in time anymore internally
1015 * after the migration to the internal BPF instruction
1018 BUILD_BUG_ON(sizeof(struct sock_filter
) !=
1019 sizeof(struct bpf_insn
));
1021 /* Conversion cannot happen on overlapping memory areas,
1022 * so we need to keep the user BPF around until the 2nd
1023 * pass. At this time, the user BPF is stored in fp->insns.
1025 old_prog
= kmemdup(fp
->insns
, old_len
* sizeof(struct sock_filter
),
1026 GFP_KERNEL
| __GFP_NOWARN
);
1032 /* 1st pass: calculate the new program length. */
1033 err
= bpf_convert_filter(old_prog
, old_len
, NULL
, &new_len
);
1037 /* Expand fp for appending the new filter representation. */
1039 fp
= bpf_prog_realloc(old_fp
, bpf_prog_size(new_len
), 0);
1041 /* The old_fp is still around in case we couldn't
1042 * allocate new memory, so uncharge on that one.
1051 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1052 err
= bpf_convert_filter(old_prog
, old_len
, fp
, &new_len
);
1054 /* 2nd bpf_convert_filter() can fail only if it fails
1055 * to allocate memory, remapping must succeed. Note,
1056 * that at this time old_fp has already been released
1061 fp
= bpf_prog_select_runtime(fp
, &err
);
1071 __bpf_prog_release(fp
);
1072 return ERR_PTR(err
);
1075 static struct bpf_prog
*bpf_prepare_filter(struct bpf_prog
*fp
,
1076 bpf_aux_classic_check_t trans
)
1080 fp
->bpf_func
= NULL
;
1083 err
= bpf_check_classic(fp
->insns
, fp
->len
);
1085 __bpf_prog_release(fp
);
1086 return ERR_PTR(err
);
1089 /* There might be additional checks and transformations
1090 * needed on classic filters, f.e. in case of seccomp.
1093 err
= trans(fp
->insns
, fp
->len
);
1095 __bpf_prog_release(fp
);
1096 return ERR_PTR(err
);
1100 /* Probe if we can JIT compile the filter and if so, do
1101 * the compilation of the filter.
1103 bpf_jit_compile(fp
);
1105 /* JIT compiler couldn't process this filter, so do the
1106 * internal BPF translation for the optimized interpreter.
1109 fp
= bpf_migrate_filter(fp
);
1115 * bpf_prog_create - create an unattached filter
1116 * @pfp: the unattached filter that is created
1117 * @fprog: the filter program
1119 * Create a filter independent of any socket. We first run some
1120 * sanity checks on it to make sure it does not explode on us later.
1121 * If an error occurs or there is insufficient memory for the filter
1122 * a negative errno code is returned. On success the return is zero.
1124 int bpf_prog_create(struct bpf_prog
**pfp
, struct sock_fprog_kern
*fprog
)
1126 unsigned int fsize
= bpf_classic_proglen(fprog
);
1127 struct bpf_prog
*fp
;
1129 /* Make sure new filter is there and in the right amounts. */
1130 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1133 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1137 memcpy(fp
->insns
, fprog
->filter
, fsize
);
1139 fp
->len
= fprog
->len
;
1140 /* Since unattached filters are not copied back to user
1141 * space through sk_get_filter(), we do not need to hold
1142 * a copy here, and can spare us the work.
1144 fp
->orig_prog
= NULL
;
1146 /* bpf_prepare_filter() already takes care of freeing
1147 * memory in case something goes wrong.
1149 fp
= bpf_prepare_filter(fp
, NULL
);
1156 EXPORT_SYMBOL_GPL(bpf_prog_create
);
1159 * bpf_prog_create_from_user - create an unattached filter from user buffer
1160 * @pfp: the unattached filter that is created
1161 * @fprog: the filter program
1162 * @trans: post-classic verifier transformation handler
1163 * @save_orig: save classic BPF program
1165 * This function effectively does the same as bpf_prog_create(), only
1166 * that it builds up its insns buffer from user space provided buffer.
1167 * It also allows for passing a bpf_aux_classic_check_t handler.
1169 int bpf_prog_create_from_user(struct bpf_prog
**pfp
, struct sock_fprog
*fprog
,
1170 bpf_aux_classic_check_t trans
, bool save_orig
)
1172 unsigned int fsize
= bpf_classic_proglen(fprog
);
1173 struct bpf_prog
*fp
;
1176 /* Make sure new filter is there and in the right amounts. */
1177 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1180 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1184 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
1185 __bpf_prog_free(fp
);
1189 fp
->len
= fprog
->len
;
1190 fp
->orig_prog
= NULL
;
1193 err
= bpf_prog_store_orig_filter(fp
, fprog
);
1195 __bpf_prog_free(fp
);
1200 /* bpf_prepare_filter() already takes care of freeing
1201 * memory in case something goes wrong.
1203 fp
= bpf_prepare_filter(fp
, trans
);
1210 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user
);
1212 void bpf_prog_destroy(struct bpf_prog
*fp
)
1214 __bpf_prog_release(fp
);
1216 EXPORT_SYMBOL_GPL(bpf_prog_destroy
);
1218 static int __sk_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1220 struct sk_filter
*fp
, *old_fp
;
1222 fp
= kmalloc(sizeof(*fp
), GFP_KERNEL
);
1228 if (!__sk_filter_charge(sk
, fp
)) {
1232 refcount_set(&fp
->refcnt
, 1);
1234 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
1235 lockdep_sock_is_held(sk
));
1236 rcu_assign_pointer(sk
->sk_filter
, fp
);
1239 sk_filter_uncharge(sk
, old_fp
);
1244 static int __reuseport_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1246 struct bpf_prog
*old_prog
;
1249 if (bpf_prog_size(prog
->len
) > sysctl_optmem_max
)
1252 if (sk_unhashed(sk
) && sk
->sk_reuseport
) {
1253 err
= reuseport_alloc(sk
);
1256 } else if (!rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1257 /* The socket wasn't bound with SO_REUSEPORT */
1261 old_prog
= reuseport_attach_prog(sk
, prog
);
1263 bpf_prog_destroy(old_prog
);
1269 struct bpf_prog
*__get_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1271 unsigned int fsize
= bpf_classic_proglen(fprog
);
1272 struct bpf_prog
*prog
;
1275 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1276 return ERR_PTR(-EPERM
);
1278 /* Make sure new filter is there and in the right amounts. */
1279 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1280 return ERR_PTR(-EINVAL
);
1282 prog
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1284 return ERR_PTR(-ENOMEM
);
1286 if (copy_from_user(prog
->insns
, fprog
->filter
, fsize
)) {
1287 __bpf_prog_free(prog
);
1288 return ERR_PTR(-EFAULT
);
1291 prog
->len
= fprog
->len
;
1293 err
= bpf_prog_store_orig_filter(prog
, fprog
);
1295 __bpf_prog_free(prog
);
1296 return ERR_PTR(-ENOMEM
);
1299 /* bpf_prepare_filter() already takes care of freeing
1300 * memory in case something goes wrong.
1302 return bpf_prepare_filter(prog
, NULL
);
1306 * sk_attach_filter - attach a socket filter
1307 * @fprog: the filter program
1308 * @sk: the socket to use
1310 * Attach the user's filter code. We first run some sanity checks on
1311 * it to make sure it does not explode on us later. If an error
1312 * occurs or there is insufficient memory for the filter a negative
1313 * errno code is returned. On success the return is zero.
1315 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1317 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1321 return PTR_ERR(prog
);
1323 err
= __sk_attach_prog(prog
, sk
);
1325 __bpf_prog_release(prog
);
1331 EXPORT_SYMBOL_GPL(sk_attach_filter
);
1333 int sk_reuseport_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1335 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1339 return PTR_ERR(prog
);
1341 err
= __reuseport_attach_prog(prog
, sk
);
1343 __bpf_prog_release(prog
);
1350 static struct bpf_prog
*__get_bpf(u32 ufd
, struct sock
*sk
)
1352 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1353 return ERR_PTR(-EPERM
);
1355 return bpf_prog_get_type(ufd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1358 int sk_attach_bpf(u32 ufd
, struct sock
*sk
)
1360 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1364 return PTR_ERR(prog
);
1366 err
= __sk_attach_prog(prog
, sk
);
1375 int sk_reuseport_attach_bpf(u32 ufd
, struct sock
*sk
)
1377 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1381 return PTR_ERR(prog
);
1383 err
= __reuseport_attach_prog(prog
, sk
);
1392 struct bpf_scratchpad
{
1394 __be32 diff
[MAX_BPF_STACK
/ sizeof(__be32
)];
1395 u8 buff
[MAX_BPF_STACK
];
1399 static DEFINE_PER_CPU(struct bpf_scratchpad
, bpf_sp
);
1401 static inline int __bpf_try_make_writable(struct sk_buff
*skb
,
1402 unsigned int write_len
)
1404 return skb_ensure_writable(skb
, write_len
);
1407 static inline int bpf_try_make_writable(struct sk_buff
*skb
,
1408 unsigned int write_len
)
1410 int err
= __bpf_try_make_writable(skb
, write_len
);
1412 bpf_compute_data_pointers(skb
);
1416 static int bpf_try_make_head_writable(struct sk_buff
*skb
)
1418 return bpf_try_make_writable(skb
, skb_headlen(skb
));
1421 static inline void bpf_push_mac_rcsum(struct sk_buff
*skb
)
1423 if (skb_at_tc_ingress(skb
))
1424 skb_postpush_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1427 static inline void bpf_pull_mac_rcsum(struct sk_buff
*skb
)
1429 if (skb_at_tc_ingress(skb
))
1430 skb_postpull_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1433 BPF_CALL_5(bpf_skb_store_bytes
, struct sk_buff
*, skb
, u32
, offset
,
1434 const void *, from
, u32
, len
, u64
, flags
)
1438 if (unlikely(flags
& ~(BPF_F_RECOMPUTE_CSUM
| BPF_F_INVALIDATE_HASH
)))
1440 if (unlikely(offset
> 0xffff))
1442 if (unlikely(bpf_try_make_writable(skb
, offset
+ len
)))
1445 ptr
= skb
->data
+ offset
;
1446 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1447 __skb_postpull_rcsum(skb
, ptr
, len
, offset
);
1449 memcpy(ptr
, from
, len
);
1451 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1452 __skb_postpush_rcsum(skb
, ptr
, len
, offset
);
1453 if (flags
& BPF_F_INVALIDATE_HASH
)
1454 skb_clear_hash(skb
);
1459 static const struct bpf_func_proto bpf_skb_store_bytes_proto
= {
1460 .func
= bpf_skb_store_bytes
,
1462 .ret_type
= RET_INTEGER
,
1463 .arg1_type
= ARG_PTR_TO_CTX
,
1464 .arg2_type
= ARG_ANYTHING
,
1465 .arg3_type
= ARG_PTR_TO_MEM
,
1466 .arg4_type
= ARG_CONST_SIZE
,
1467 .arg5_type
= ARG_ANYTHING
,
1470 BPF_CALL_4(bpf_skb_load_bytes
, const struct sk_buff
*, skb
, u32
, offset
,
1471 void *, to
, u32
, len
)
1475 if (unlikely(offset
> 0xffff))
1478 ptr
= skb_header_pointer(skb
, offset
, len
, to
);
1482 memcpy(to
, ptr
, len
);
1490 static const struct bpf_func_proto bpf_skb_load_bytes_proto
= {
1491 .func
= bpf_skb_load_bytes
,
1493 .ret_type
= RET_INTEGER
,
1494 .arg1_type
= ARG_PTR_TO_CTX
,
1495 .arg2_type
= ARG_ANYTHING
,
1496 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
1497 .arg4_type
= ARG_CONST_SIZE
,
1500 BPF_CALL_2(bpf_skb_pull_data
, struct sk_buff
*, skb
, u32
, len
)
1502 /* Idea is the following: should the needed direct read/write
1503 * test fail during runtime, we can pull in more data and redo
1504 * again, since implicitly, we invalidate previous checks here.
1506 * Or, since we know how much we need to make read/writeable,
1507 * this can be done once at the program beginning for direct
1508 * access case. By this we overcome limitations of only current
1509 * headroom being accessible.
1511 return bpf_try_make_writable(skb
, len
? : skb_headlen(skb
));
1514 static const struct bpf_func_proto bpf_skb_pull_data_proto
= {
1515 .func
= bpf_skb_pull_data
,
1517 .ret_type
= RET_INTEGER
,
1518 .arg1_type
= ARG_PTR_TO_CTX
,
1519 .arg2_type
= ARG_ANYTHING
,
1522 BPF_CALL_5(bpf_l3_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1523 u64
, from
, u64
, to
, u64
, flags
)
1527 if (unlikely(flags
& ~(BPF_F_HDR_FIELD_MASK
)))
1529 if (unlikely(offset
> 0xffff || offset
& 1))
1531 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1534 ptr
= (__sum16
*)(skb
->data
+ offset
);
1535 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1537 if (unlikely(from
!= 0))
1540 csum_replace_by_diff(ptr
, to
);
1543 csum_replace2(ptr
, from
, to
);
1546 csum_replace4(ptr
, from
, to
);
1555 static const struct bpf_func_proto bpf_l3_csum_replace_proto
= {
1556 .func
= bpf_l3_csum_replace
,
1558 .ret_type
= RET_INTEGER
,
1559 .arg1_type
= ARG_PTR_TO_CTX
,
1560 .arg2_type
= ARG_ANYTHING
,
1561 .arg3_type
= ARG_ANYTHING
,
1562 .arg4_type
= ARG_ANYTHING
,
1563 .arg5_type
= ARG_ANYTHING
,
1566 BPF_CALL_5(bpf_l4_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1567 u64
, from
, u64
, to
, u64
, flags
)
1569 bool is_pseudo
= flags
& BPF_F_PSEUDO_HDR
;
1570 bool is_mmzero
= flags
& BPF_F_MARK_MANGLED_0
;
1571 bool do_mforce
= flags
& BPF_F_MARK_ENFORCE
;
1574 if (unlikely(flags
& ~(BPF_F_MARK_MANGLED_0
| BPF_F_MARK_ENFORCE
|
1575 BPF_F_PSEUDO_HDR
| BPF_F_HDR_FIELD_MASK
)))
1577 if (unlikely(offset
> 0xffff || offset
& 1))
1579 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1582 ptr
= (__sum16
*)(skb
->data
+ offset
);
1583 if (is_mmzero
&& !do_mforce
&& !*ptr
)
1586 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1588 if (unlikely(from
!= 0))
1591 inet_proto_csum_replace_by_diff(ptr
, skb
, to
, is_pseudo
);
1594 inet_proto_csum_replace2(ptr
, skb
, from
, to
, is_pseudo
);
1597 inet_proto_csum_replace4(ptr
, skb
, from
, to
, is_pseudo
);
1603 if (is_mmzero
&& !*ptr
)
1604 *ptr
= CSUM_MANGLED_0
;
1608 static const struct bpf_func_proto bpf_l4_csum_replace_proto
= {
1609 .func
= bpf_l4_csum_replace
,
1611 .ret_type
= RET_INTEGER
,
1612 .arg1_type
= ARG_PTR_TO_CTX
,
1613 .arg2_type
= ARG_ANYTHING
,
1614 .arg3_type
= ARG_ANYTHING
,
1615 .arg4_type
= ARG_ANYTHING
,
1616 .arg5_type
= ARG_ANYTHING
,
1619 BPF_CALL_5(bpf_csum_diff
, __be32
*, from
, u32
, from_size
,
1620 __be32
*, to
, u32
, to_size
, __wsum
, seed
)
1622 struct bpf_scratchpad
*sp
= this_cpu_ptr(&bpf_sp
);
1623 u32 diff_size
= from_size
+ to_size
;
1626 /* This is quite flexible, some examples:
1628 * from_size == 0, to_size > 0, seed := csum --> pushing data
1629 * from_size > 0, to_size == 0, seed := csum --> pulling data
1630 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1632 * Even for diffing, from_size and to_size don't need to be equal.
1634 if (unlikely(((from_size
| to_size
) & (sizeof(__be32
) - 1)) ||
1635 diff_size
> sizeof(sp
->diff
)))
1638 for (i
= 0; i
< from_size
/ sizeof(__be32
); i
++, j
++)
1639 sp
->diff
[j
] = ~from
[i
];
1640 for (i
= 0; i
< to_size
/ sizeof(__be32
); i
++, j
++)
1641 sp
->diff
[j
] = to
[i
];
1643 return csum_partial(sp
->diff
, diff_size
, seed
);
1646 static const struct bpf_func_proto bpf_csum_diff_proto
= {
1647 .func
= bpf_csum_diff
,
1650 .ret_type
= RET_INTEGER
,
1651 .arg1_type
= ARG_PTR_TO_MEM_OR_NULL
,
1652 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
1653 .arg3_type
= ARG_PTR_TO_MEM_OR_NULL
,
1654 .arg4_type
= ARG_CONST_SIZE_OR_ZERO
,
1655 .arg5_type
= ARG_ANYTHING
,
1658 BPF_CALL_2(bpf_csum_update
, struct sk_buff
*, skb
, __wsum
, csum
)
1660 /* The interface is to be used in combination with bpf_csum_diff()
1661 * for direct packet writes. csum rotation for alignment as well
1662 * as emulating csum_sub() can be done from the eBPF program.
1664 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1665 return (skb
->csum
= csum_add(skb
->csum
, csum
));
1670 static const struct bpf_func_proto bpf_csum_update_proto
= {
1671 .func
= bpf_csum_update
,
1673 .ret_type
= RET_INTEGER
,
1674 .arg1_type
= ARG_PTR_TO_CTX
,
1675 .arg2_type
= ARG_ANYTHING
,
1678 static inline int __bpf_rx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1680 return dev_forward_skb(dev
, skb
);
1683 static inline int __bpf_rx_skb_no_mac(struct net_device
*dev
,
1684 struct sk_buff
*skb
)
1686 int ret
= ____dev_forward_skb(dev
, skb
);
1690 ret
= netif_rx(skb
);
1696 static inline int __bpf_tx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1700 if (unlikely(__this_cpu_read(xmit_recursion
) > XMIT_RECURSION_LIMIT
)) {
1701 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1708 __this_cpu_inc(xmit_recursion
);
1709 ret
= dev_queue_xmit(skb
);
1710 __this_cpu_dec(xmit_recursion
);
1715 static int __bpf_redirect_no_mac(struct sk_buff
*skb
, struct net_device
*dev
,
1718 /* skb->mac_len is not set on normal egress */
1719 unsigned int mlen
= skb
->network_header
- skb
->mac_header
;
1721 __skb_pull(skb
, mlen
);
1723 /* At ingress, the mac header has already been pulled once.
1724 * At egress, skb_pospull_rcsum has to be done in case that
1725 * the skb is originated from ingress (i.e. a forwarded skb)
1726 * to ensure that rcsum starts at net header.
1728 if (!skb_at_tc_ingress(skb
))
1729 skb_postpull_rcsum(skb
, skb_mac_header(skb
), mlen
);
1730 skb_pop_mac_header(skb
);
1731 skb_reset_mac_len(skb
);
1732 return flags
& BPF_F_INGRESS
?
1733 __bpf_rx_skb_no_mac(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1736 static int __bpf_redirect_common(struct sk_buff
*skb
, struct net_device
*dev
,
1739 /* Verify that a link layer header is carried */
1740 if (unlikely(skb
->mac_header
>= skb
->network_header
)) {
1745 bpf_push_mac_rcsum(skb
);
1746 return flags
& BPF_F_INGRESS
?
1747 __bpf_rx_skb(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1750 static int __bpf_redirect(struct sk_buff
*skb
, struct net_device
*dev
,
1753 if (dev_is_mac_header_xmit(dev
))
1754 return __bpf_redirect_common(skb
, dev
, flags
);
1756 return __bpf_redirect_no_mac(skb
, dev
, flags
);
1759 BPF_CALL_3(bpf_clone_redirect
, struct sk_buff
*, skb
, u32
, ifindex
, u64
, flags
)
1761 struct net_device
*dev
;
1762 struct sk_buff
*clone
;
1765 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1768 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ifindex
);
1772 clone
= skb_clone(skb
, GFP_ATOMIC
);
1773 if (unlikely(!clone
))
1776 /* For direct write, we need to keep the invariant that the skbs
1777 * we're dealing with need to be uncloned. Should uncloning fail
1778 * here, we need to free the just generated clone to unclone once
1781 ret
= bpf_try_make_head_writable(skb
);
1782 if (unlikely(ret
)) {
1787 return __bpf_redirect(clone
, dev
, flags
);
1790 static const struct bpf_func_proto bpf_clone_redirect_proto
= {
1791 .func
= bpf_clone_redirect
,
1793 .ret_type
= RET_INTEGER
,
1794 .arg1_type
= ARG_PTR_TO_CTX
,
1795 .arg2_type
= ARG_ANYTHING
,
1796 .arg3_type
= ARG_ANYTHING
,
1799 struct redirect_info
{
1802 struct bpf_map
*map
;
1803 struct bpf_map
*map_to_flush
;
1804 unsigned long map_owner
;
1807 static DEFINE_PER_CPU(struct redirect_info
, redirect_info
);
1809 BPF_CALL_2(bpf_redirect
, u32
, ifindex
, u64
, flags
)
1811 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1813 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1816 ri
->ifindex
= ifindex
;
1819 return TC_ACT_REDIRECT
;
1822 int skb_do_redirect(struct sk_buff
*skb
)
1824 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1825 struct net_device
*dev
;
1827 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ri
->ifindex
);
1829 if (unlikely(!dev
)) {
1834 return __bpf_redirect(skb
, dev
, ri
->flags
);
1837 static const struct bpf_func_proto bpf_redirect_proto
= {
1838 .func
= bpf_redirect
,
1840 .ret_type
= RET_INTEGER
,
1841 .arg1_type
= ARG_ANYTHING
,
1842 .arg2_type
= ARG_ANYTHING
,
1845 BPF_CALL_4(bpf_sk_redirect_map
, struct sk_buff
*, skb
,
1846 struct bpf_map
*, map
, u32
, key
, u64
, flags
)
1848 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
1850 /* If user passes invalid input drop the packet. */
1851 if (unlikely(flags
))
1855 tcb
->bpf
.flags
= flags
;
1861 struct sock
*do_sk_redirect_map(struct sk_buff
*skb
)
1863 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
1864 struct sock
*sk
= NULL
;
1867 sk
= __sock_map_lookup_elem(tcb
->bpf
.map
, tcb
->bpf
.key
);
1870 tcb
->bpf
.map
= NULL
;
1876 static const struct bpf_func_proto bpf_sk_redirect_map_proto
= {
1877 .func
= bpf_sk_redirect_map
,
1879 .ret_type
= RET_INTEGER
,
1880 .arg1_type
= ARG_PTR_TO_CTX
,
1881 .arg2_type
= ARG_CONST_MAP_PTR
,
1882 .arg3_type
= ARG_ANYTHING
,
1883 .arg4_type
= ARG_ANYTHING
,
1886 BPF_CALL_1(bpf_get_cgroup_classid
, const struct sk_buff
*, skb
)
1888 return task_get_classid(skb
);
1891 static const struct bpf_func_proto bpf_get_cgroup_classid_proto
= {
1892 .func
= bpf_get_cgroup_classid
,
1894 .ret_type
= RET_INTEGER
,
1895 .arg1_type
= ARG_PTR_TO_CTX
,
1898 BPF_CALL_1(bpf_get_route_realm
, const struct sk_buff
*, skb
)
1900 return dst_tclassid(skb
);
1903 static const struct bpf_func_proto bpf_get_route_realm_proto
= {
1904 .func
= bpf_get_route_realm
,
1906 .ret_type
= RET_INTEGER
,
1907 .arg1_type
= ARG_PTR_TO_CTX
,
1910 BPF_CALL_1(bpf_get_hash_recalc
, struct sk_buff
*, skb
)
1912 /* If skb_clear_hash() was called due to mangling, we can
1913 * trigger SW recalculation here. Later access to hash
1914 * can then use the inline skb->hash via context directly
1915 * instead of calling this helper again.
1917 return skb_get_hash(skb
);
1920 static const struct bpf_func_proto bpf_get_hash_recalc_proto
= {
1921 .func
= bpf_get_hash_recalc
,
1923 .ret_type
= RET_INTEGER
,
1924 .arg1_type
= ARG_PTR_TO_CTX
,
1927 BPF_CALL_1(bpf_set_hash_invalid
, struct sk_buff
*, skb
)
1929 /* After all direct packet write, this can be used once for
1930 * triggering a lazy recalc on next skb_get_hash() invocation.
1932 skb_clear_hash(skb
);
1936 static const struct bpf_func_proto bpf_set_hash_invalid_proto
= {
1937 .func
= bpf_set_hash_invalid
,
1939 .ret_type
= RET_INTEGER
,
1940 .arg1_type
= ARG_PTR_TO_CTX
,
1943 BPF_CALL_2(bpf_set_hash
, struct sk_buff
*, skb
, u32
, hash
)
1945 /* Set user specified hash as L4(+), so that it gets returned
1946 * on skb_get_hash() call unless BPF prog later on triggers a
1949 __skb_set_sw_hash(skb
, hash
, true);
1953 static const struct bpf_func_proto bpf_set_hash_proto
= {
1954 .func
= bpf_set_hash
,
1956 .ret_type
= RET_INTEGER
,
1957 .arg1_type
= ARG_PTR_TO_CTX
,
1958 .arg2_type
= ARG_ANYTHING
,
1961 BPF_CALL_3(bpf_skb_vlan_push
, struct sk_buff
*, skb
, __be16
, vlan_proto
,
1966 if (unlikely(vlan_proto
!= htons(ETH_P_8021Q
) &&
1967 vlan_proto
!= htons(ETH_P_8021AD
)))
1968 vlan_proto
= htons(ETH_P_8021Q
);
1970 bpf_push_mac_rcsum(skb
);
1971 ret
= skb_vlan_push(skb
, vlan_proto
, vlan_tci
);
1972 bpf_pull_mac_rcsum(skb
);
1974 bpf_compute_data_pointers(skb
);
1978 const struct bpf_func_proto bpf_skb_vlan_push_proto
= {
1979 .func
= bpf_skb_vlan_push
,
1981 .ret_type
= RET_INTEGER
,
1982 .arg1_type
= ARG_PTR_TO_CTX
,
1983 .arg2_type
= ARG_ANYTHING
,
1984 .arg3_type
= ARG_ANYTHING
,
1986 EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto
);
1988 BPF_CALL_1(bpf_skb_vlan_pop
, struct sk_buff
*, skb
)
1992 bpf_push_mac_rcsum(skb
);
1993 ret
= skb_vlan_pop(skb
);
1994 bpf_pull_mac_rcsum(skb
);
1996 bpf_compute_data_pointers(skb
);
2000 const struct bpf_func_proto bpf_skb_vlan_pop_proto
= {
2001 .func
= bpf_skb_vlan_pop
,
2003 .ret_type
= RET_INTEGER
,
2004 .arg1_type
= ARG_PTR_TO_CTX
,
2006 EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto
);
2008 static int bpf_skb_generic_push(struct sk_buff
*skb
, u32 off
, u32 len
)
2010 /* Caller already did skb_cow() with len as headroom,
2011 * so no need to do it here.
2014 memmove(skb
->data
, skb
->data
+ len
, off
);
2015 memset(skb
->data
+ off
, 0, len
);
2017 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2018 * needed here as it does not change the skb->csum
2019 * result for checksum complete when summing over
2025 static int bpf_skb_generic_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
2027 /* skb_ensure_writable() is not needed here, as we're
2028 * already working on an uncloned skb.
2030 if (unlikely(!pskb_may_pull(skb
, off
+ len
)))
2033 skb_postpull_rcsum(skb
, skb
->data
+ off
, len
);
2034 memmove(skb
->data
+ len
, skb
->data
, off
);
2035 __skb_pull(skb
, len
);
2040 static int bpf_skb_net_hdr_push(struct sk_buff
*skb
, u32 off
, u32 len
)
2042 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2045 /* There's no need for __skb_push()/__skb_pull() pair to
2046 * get to the start of the mac header as we're guaranteed
2047 * to always start from here under eBPF.
2049 ret
= bpf_skb_generic_push(skb
, off
, len
);
2051 skb
->mac_header
-= len
;
2052 skb
->network_header
-= len
;
2054 skb
->transport_header
= skb
->network_header
;
2060 static int bpf_skb_net_hdr_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
2062 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2065 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2066 ret
= bpf_skb_generic_pop(skb
, off
, len
);
2068 skb
->mac_header
+= len
;
2069 skb
->network_header
+= len
;
2071 skb
->transport_header
= skb
->network_header
;
2077 static int bpf_skb_proto_4_to_6(struct sk_buff
*skb
)
2079 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2080 u32 off
= skb_mac_header_len(skb
);
2083 ret
= skb_cow(skb
, len_diff
);
2084 if (unlikely(ret
< 0))
2087 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2088 if (unlikely(ret
< 0))
2091 if (skb_is_gso(skb
)) {
2092 /* SKB_GSO_TCPV4 needs to be changed into
2095 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2096 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV4
;
2097 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV6
;
2100 /* Due to IPv6 header, MSS needs to be downgraded. */
2101 skb_shinfo(skb
)->gso_size
-= len_diff
;
2102 /* Header must be checked, and gso_segs recomputed. */
2103 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2104 skb_shinfo(skb
)->gso_segs
= 0;
2107 skb
->protocol
= htons(ETH_P_IPV6
);
2108 skb_clear_hash(skb
);
2113 static int bpf_skb_proto_6_to_4(struct sk_buff
*skb
)
2115 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2116 u32 off
= skb_mac_header_len(skb
);
2119 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2120 if (unlikely(ret
< 0))
2123 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2124 if (unlikely(ret
< 0))
2127 if (skb_is_gso(skb
)) {
2128 /* SKB_GSO_TCPV6 needs to be changed into
2131 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
2132 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV6
;
2133 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV4
;
2136 /* Due to IPv4 header, MSS can be upgraded. */
2137 skb_shinfo(skb
)->gso_size
+= len_diff
;
2138 /* Header must be checked, and gso_segs recomputed. */
2139 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2140 skb_shinfo(skb
)->gso_segs
= 0;
2143 skb
->protocol
= htons(ETH_P_IP
);
2144 skb_clear_hash(skb
);
2149 static int bpf_skb_proto_xlat(struct sk_buff
*skb
, __be16 to_proto
)
2151 __be16 from_proto
= skb
->protocol
;
2153 if (from_proto
== htons(ETH_P_IP
) &&
2154 to_proto
== htons(ETH_P_IPV6
))
2155 return bpf_skb_proto_4_to_6(skb
);
2157 if (from_proto
== htons(ETH_P_IPV6
) &&
2158 to_proto
== htons(ETH_P_IP
))
2159 return bpf_skb_proto_6_to_4(skb
);
2164 BPF_CALL_3(bpf_skb_change_proto
, struct sk_buff
*, skb
, __be16
, proto
,
2169 if (unlikely(flags
))
2172 /* General idea is that this helper does the basic groundwork
2173 * needed for changing the protocol, and eBPF program fills the
2174 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2175 * and other helpers, rather than passing a raw buffer here.
2177 * The rationale is to keep this minimal and without a need to
2178 * deal with raw packet data. F.e. even if we would pass buffers
2179 * here, the program still needs to call the bpf_lX_csum_replace()
2180 * helpers anyway. Plus, this way we keep also separation of
2181 * concerns, since f.e. bpf_skb_store_bytes() should only take
2184 * Currently, additional options and extension header space are
2185 * not supported, but flags register is reserved so we can adapt
2186 * that. For offloads, we mark packet as dodgy, so that headers
2187 * need to be verified first.
2189 ret
= bpf_skb_proto_xlat(skb
, proto
);
2190 bpf_compute_data_pointers(skb
);
2194 static const struct bpf_func_proto bpf_skb_change_proto_proto
= {
2195 .func
= bpf_skb_change_proto
,
2197 .ret_type
= RET_INTEGER
,
2198 .arg1_type
= ARG_PTR_TO_CTX
,
2199 .arg2_type
= ARG_ANYTHING
,
2200 .arg3_type
= ARG_ANYTHING
,
2203 BPF_CALL_2(bpf_skb_change_type
, struct sk_buff
*, skb
, u32
, pkt_type
)
2205 /* We only allow a restricted subset to be changed for now. */
2206 if (unlikely(!skb_pkt_type_ok(skb
->pkt_type
) ||
2207 !skb_pkt_type_ok(pkt_type
)))
2210 skb
->pkt_type
= pkt_type
;
2214 static const struct bpf_func_proto bpf_skb_change_type_proto
= {
2215 .func
= bpf_skb_change_type
,
2217 .ret_type
= RET_INTEGER
,
2218 .arg1_type
= ARG_PTR_TO_CTX
,
2219 .arg2_type
= ARG_ANYTHING
,
2222 static u32
bpf_skb_net_base_len(const struct sk_buff
*skb
)
2224 switch (skb
->protocol
) {
2225 case htons(ETH_P_IP
):
2226 return sizeof(struct iphdr
);
2227 case htons(ETH_P_IPV6
):
2228 return sizeof(struct ipv6hdr
);
2234 static int bpf_skb_net_grow(struct sk_buff
*skb
, u32 len_diff
)
2236 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2239 ret
= skb_cow(skb
, len_diff
);
2240 if (unlikely(ret
< 0))
2243 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2244 if (unlikely(ret
< 0))
2247 if (skb_is_gso(skb
)) {
2248 /* Due to header grow, MSS needs to be downgraded. */
2249 skb_shinfo(skb
)->gso_size
-= len_diff
;
2250 /* Header must be checked, and gso_segs recomputed. */
2251 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2252 skb_shinfo(skb
)->gso_segs
= 0;
2258 static int bpf_skb_net_shrink(struct sk_buff
*skb
, u32 len_diff
)
2260 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2263 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2264 if (unlikely(ret
< 0))
2267 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2268 if (unlikely(ret
< 0))
2271 if (skb_is_gso(skb
)) {
2272 /* Due to header shrink, MSS can be upgraded. */
2273 skb_shinfo(skb
)->gso_size
+= len_diff
;
2274 /* Header must be checked, and gso_segs recomputed. */
2275 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2276 skb_shinfo(skb
)->gso_segs
= 0;
2282 static u32
__bpf_skb_max_len(const struct sk_buff
*skb
)
2284 return skb
->dev
->mtu
+ skb
->dev
->hard_header_len
;
2287 static int bpf_skb_adjust_net(struct sk_buff
*skb
, s32 len_diff
)
2289 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2290 u32 len_cur
, len_diff_abs
= abs(len_diff
);
2291 u32 len_min
= bpf_skb_net_base_len(skb
);
2292 u32 len_max
= __bpf_skb_max_len(skb
);
2293 __be16 proto
= skb
->protocol
;
2294 bool shrink
= len_diff
< 0;
2297 if (unlikely(len_diff_abs
> 0xfffU
))
2299 if (unlikely(proto
!= htons(ETH_P_IP
) &&
2300 proto
!= htons(ETH_P_IPV6
)))
2303 len_cur
= skb
->len
- skb_network_offset(skb
);
2304 if (skb_transport_header_was_set(skb
) && !trans_same
)
2305 len_cur
= skb_network_header_len(skb
);
2306 if ((shrink
&& (len_diff_abs
>= len_cur
||
2307 len_cur
- len_diff_abs
< len_min
)) ||
2308 (!shrink
&& (skb
->len
+ len_diff_abs
> len_max
&&
2312 ret
= shrink
? bpf_skb_net_shrink(skb
, len_diff_abs
) :
2313 bpf_skb_net_grow(skb
, len_diff_abs
);
2315 bpf_compute_data_pointers(skb
);
2319 BPF_CALL_4(bpf_skb_adjust_room
, struct sk_buff
*, skb
, s32
, len_diff
,
2320 u32
, mode
, u64
, flags
)
2322 if (unlikely(flags
))
2324 if (likely(mode
== BPF_ADJ_ROOM_NET
))
2325 return bpf_skb_adjust_net(skb
, len_diff
);
2330 static const struct bpf_func_proto bpf_skb_adjust_room_proto
= {
2331 .func
= bpf_skb_adjust_room
,
2333 .ret_type
= RET_INTEGER
,
2334 .arg1_type
= ARG_PTR_TO_CTX
,
2335 .arg2_type
= ARG_ANYTHING
,
2336 .arg3_type
= ARG_ANYTHING
,
2337 .arg4_type
= ARG_ANYTHING
,
2340 static u32
__bpf_skb_min_len(const struct sk_buff
*skb
)
2342 u32 min_len
= skb_network_offset(skb
);
2344 if (skb_transport_header_was_set(skb
))
2345 min_len
= skb_transport_offset(skb
);
2346 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2347 min_len
= skb_checksum_start_offset(skb
) +
2348 skb
->csum_offset
+ sizeof(__sum16
);
2352 static int bpf_skb_grow_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2354 unsigned int old_len
= skb
->len
;
2357 ret
= __skb_grow_rcsum(skb
, new_len
);
2359 memset(skb
->data
+ old_len
, 0, new_len
- old_len
);
2363 static int bpf_skb_trim_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2365 return __skb_trim_rcsum(skb
, new_len
);
2368 BPF_CALL_3(bpf_skb_change_tail
, struct sk_buff
*, skb
, u32
, new_len
,
2371 u32 max_len
= __bpf_skb_max_len(skb
);
2372 u32 min_len
= __bpf_skb_min_len(skb
);
2375 if (unlikely(flags
|| new_len
> max_len
|| new_len
< min_len
))
2377 if (skb
->encapsulation
)
2380 /* The basic idea of this helper is that it's performing the
2381 * needed work to either grow or trim an skb, and eBPF program
2382 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2383 * bpf_lX_csum_replace() and others rather than passing a raw
2384 * buffer here. This one is a slow path helper and intended
2385 * for replies with control messages.
2387 * Like in bpf_skb_change_proto(), we want to keep this rather
2388 * minimal and without protocol specifics so that we are able
2389 * to separate concerns as in bpf_skb_store_bytes() should only
2390 * be the one responsible for writing buffers.
2392 * It's really expected to be a slow path operation here for
2393 * control message replies, so we're implicitly linearizing,
2394 * uncloning and drop offloads from the skb by this.
2396 ret
= __bpf_try_make_writable(skb
, skb
->len
);
2398 if (new_len
> skb
->len
)
2399 ret
= bpf_skb_grow_rcsum(skb
, new_len
);
2400 else if (new_len
< skb
->len
)
2401 ret
= bpf_skb_trim_rcsum(skb
, new_len
);
2402 if (!ret
&& skb_is_gso(skb
))
2406 bpf_compute_data_pointers(skb
);
2410 static const struct bpf_func_proto bpf_skb_change_tail_proto
= {
2411 .func
= bpf_skb_change_tail
,
2413 .ret_type
= RET_INTEGER
,
2414 .arg1_type
= ARG_PTR_TO_CTX
,
2415 .arg2_type
= ARG_ANYTHING
,
2416 .arg3_type
= ARG_ANYTHING
,
2419 BPF_CALL_3(bpf_skb_change_head
, struct sk_buff
*, skb
, u32
, head_room
,
2422 u32 max_len
= __bpf_skb_max_len(skb
);
2423 u32 new_len
= skb
->len
+ head_room
;
2426 if (unlikely(flags
|| (!skb_is_gso(skb
) && new_len
> max_len
) ||
2427 new_len
< skb
->len
))
2430 ret
= skb_cow(skb
, head_room
);
2432 /* Idea for this helper is that we currently only
2433 * allow to expand on mac header. This means that
2434 * skb->protocol network header, etc, stay as is.
2435 * Compared to bpf_skb_change_tail(), we're more
2436 * flexible due to not needing to linearize or
2437 * reset GSO. Intention for this helper is to be
2438 * used by an L3 skb that needs to push mac header
2439 * for redirection into L2 device.
2441 __skb_push(skb
, head_room
);
2442 memset(skb
->data
, 0, head_room
);
2443 skb_reset_mac_header(skb
);
2446 bpf_compute_data_pointers(skb
);
2450 static const struct bpf_func_proto bpf_skb_change_head_proto
= {
2451 .func
= bpf_skb_change_head
,
2453 .ret_type
= RET_INTEGER
,
2454 .arg1_type
= ARG_PTR_TO_CTX
,
2455 .arg2_type
= ARG_ANYTHING
,
2456 .arg3_type
= ARG_ANYTHING
,
2459 static unsigned long xdp_get_metalen(const struct xdp_buff
*xdp
)
2461 return xdp_data_meta_unsupported(xdp
) ? 0 :
2462 xdp
->data
- xdp
->data_meta
;
2465 BPF_CALL_2(bpf_xdp_adjust_head
, struct xdp_buff
*, xdp
, int, offset
)
2467 unsigned long metalen
= xdp_get_metalen(xdp
);
2468 void *data_start
= xdp
->data_hard_start
+ metalen
;
2469 void *data
= xdp
->data
+ offset
;
2471 if (unlikely(data
< data_start
||
2472 data
> xdp
->data_end
- ETH_HLEN
))
2476 memmove(xdp
->data_meta
+ offset
,
2477 xdp
->data_meta
, metalen
);
2478 xdp
->data_meta
+= offset
;
2484 static const struct bpf_func_proto bpf_xdp_adjust_head_proto
= {
2485 .func
= bpf_xdp_adjust_head
,
2487 .ret_type
= RET_INTEGER
,
2488 .arg1_type
= ARG_PTR_TO_CTX
,
2489 .arg2_type
= ARG_ANYTHING
,
2492 BPF_CALL_2(bpf_xdp_adjust_meta
, struct xdp_buff
*, xdp
, int, offset
)
2494 void *meta
= xdp
->data_meta
+ offset
;
2495 unsigned long metalen
= xdp
->data
- meta
;
2497 if (xdp_data_meta_unsupported(xdp
))
2499 if (unlikely(meta
< xdp
->data_hard_start
||
2502 if (unlikely((metalen
& (sizeof(__u32
) - 1)) ||
2506 xdp
->data_meta
= meta
;
2511 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto
= {
2512 .func
= bpf_xdp_adjust_meta
,
2514 .ret_type
= RET_INTEGER
,
2515 .arg1_type
= ARG_PTR_TO_CTX
,
2516 .arg2_type
= ARG_ANYTHING
,
2519 static int __bpf_tx_xdp(struct net_device
*dev
,
2520 struct bpf_map
*map
,
2521 struct xdp_buff
*xdp
,
2526 if (!dev
->netdev_ops
->ndo_xdp_xmit
) {
2530 err
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, xdp
);
2533 dev
->netdev_ops
->ndo_xdp_flush(dev
);
2537 static int __bpf_tx_xdp_map(struct net_device
*dev_rx
, void *fwd
,
2538 struct bpf_map
*map
,
2539 struct xdp_buff
*xdp
,
2544 if (map
->map_type
== BPF_MAP_TYPE_DEVMAP
) {
2545 struct net_device
*dev
= fwd
;
2547 if (!dev
->netdev_ops
->ndo_xdp_xmit
)
2550 err
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, xdp
);
2553 __dev_map_insert_ctx(map
, index
);
2555 } else if (map
->map_type
== BPF_MAP_TYPE_CPUMAP
) {
2556 struct bpf_cpu_map_entry
*rcpu
= fwd
;
2558 err
= cpu_map_enqueue(rcpu
, xdp
, dev_rx
);
2561 __cpu_map_insert_ctx(map
, index
);
2566 void xdp_do_flush_map(void)
2568 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2569 struct bpf_map
*map
= ri
->map_to_flush
;
2571 ri
->map_to_flush
= NULL
;
2573 switch (map
->map_type
) {
2574 case BPF_MAP_TYPE_DEVMAP
:
2575 __dev_map_flush(map
);
2577 case BPF_MAP_TYPE_CPUMAP
:
2578 __cpu_map_flush(map
);
2585 EXPORT_SYMBOL_GPL(xdp_do_flush_map
);
2587 static void *__xdp_map_lookup_elem(struct bpf_map
*map
, u32 index
)
2589 switch (map
->map_type
) {
2590 case BPF_MAP_TYPE_DEVMAP
:
2591 return __dev_map_lookup_elem(map
, index
);
2592 case BPF_MAP_TYPE_CPUMAP
:
2593 return __cpu_map_lookup_elem(map
, index
);
2599 static inline bool xdp_map_invalid(const struct bpf_prog
*xdp_prog
,
2602 return (unsigned long)xdp_prog
->aux
!= aux
;
2605 static int xdp_do_redirect_map(struct net_device
*dev
, struct xdp_buff
*xdp
,
2606 struct bpf_prog
*xdp_prog
)
2608 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2609 unsigned long map_owner
= ri
->map_owner
;
2610 struct bpf_map
*map
= ri
->map
;
2611 u32 index
= ri
->ifindex
;
2619 if (unlikely(xdp_map_invalid(xdp_prog
, map_owner
))) {
2625 fwd
= __xdp_map_lookup_elem(map
, index
);
2630 if (ri
->map_to_flush
&& ri
->map_to_flush
!= map
)
2633 err
= __bpf_tx_xdp_map(dev
, fwd
, map
, xdp
, index
);
2637 ri
->map_to_flush
= map
;
2638 _trace_xdp_redirect_map(dev
, xdp_prog
, fwd
, map
, index
);
2641 _trace_xdp_redirect_map_err(dev
, xdp_prog
, fwd
, map
, index
, err
);
2645 int xdp_do_redirect(struct net_device
*dev
, struct xdp_buff
*xdp
,
2646 struct bpf_prog
*xdp_prog
)
2648 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2649 struct net_device
*fwd
;
2650 u32 index
= ri
->ifindex
;
2654 return xdp_do_redirect_map(dev
, xdp
, xdp_prog
);
2656 fwd
= dev_get_by_index_rcu(dev_net(dev
), index
);
2658 if (unlikely(!fwd
)) {
2663 err
= __bpf_tx_xdp(fwd
, NULL
, xdp
, 0);
2667 _trace_xdp_redirect(dev
, xdp_prog
, index
);
2670 _trace_xdp_redirect_err(dev
, xdp_prog
, index
, err
);
2673 EXPORT_SYMBOL_GPL(xdp_do_redirect
);
2675 static int __xdp_generic_ok_fwd_dev(struct sk_buff
*skb
, struct net_device
*fwd
)
2679 if (unlikely(!(fwd
->flags
& IFF_UP
)))
2682 len
= fwd
->mtu
+ fwd
->hard_header_len
+ VLAN_HLEN
;
2689 int xdp_do_generic_redirect_map(struct net_device
*dev
, struct sk_buff
*skb
,
2690 struct bpf_prog
*xdp_prog
)
2692 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2693 unsigned long map_owner
= ri
->map_owner
;
2694 struct bpf_map
*map
= ri
->map
;
2695 struct net_device
*fwd
= NULL
;
2696 u32 index
= ri
->ifindex
;
2703 if (unlikely(xdp_map_invalid(xdp_prog
, map_owner
))) {
2708 fwd
= __xdp_map_lookup_elem(map
, index
);
2709 if (unlikely(!fwd
)) {
2714 if (map
->map_type
== BPF_MAP_TYPE_DEVMAP
) {
2715 if (unlikely((err
= __xdp_generic_ok_fwd_dev(skb
, fwd
))))
2719 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
2724 _trace_xdp_redirect_map(dev
, xdp_prog
, fwd
, map
, index
);
2727 _trace_xdp_redirect_map_err(dev
, xdp_prog
, fwd
, map
, index
, err
);
2731 int xdp_do_generic_redirect(struct net_device
*dev
, struct sk_buff
*skb
,
2732 struct bpf_prog
*xdp_prog
)
2734 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2735 u32 index
= ri
->ifindex
;
2736 struct net_device
*fwd
;
2740 return xdp_do_generic_redirect_map(dev
, skb
, xdp_prog
);
2743 fwd
= dev_get_by_index_rcu(dev_net(dev
), index
);
2744 if (unlikely(!fwd
)) {
2749 if (unlikely((err
= __xdp_generic_ok_fwd_dev(skb
, fwd
))))
2753 _trace_xdp_redirect(dev
, xdp_prog
, index
);
2756 _trace_xdp_redirect_err(dev
, xdp_prog
, index
, err
);
2759 EXPORT_SYMBOL_GPL(xdp_do_generic_redirect
);
2761 BPF_CALL_2(bpf_xdp_redirect
, u32
, ifindex
, u64
, flags
)
2763 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2765 if (unlikely(flags
))
2768 ri
->ifindex
= ifindex
;
2773 return XDP_REDIRECT
;
2776 static const struct bpf_func_proto bpf_xdp_redirect_proto
= {
2777 .func
= bpf_xdp_redirect
,
2779 .ret_type
= RET_INTEGER
,
2780 .arg1_type
= ARG_ANYTHING
,
2781 .arg2_type
= ARG_ANYTHING
,
2784 BPF_CALL_4(bpf_xdp_redirect_map
, struct bpf_map
*, map
, u32
, ifindex
, u64
, flags
,
2785 unsigned long, map_owner
)
2787 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2789 if (unlikely(flags
))
2792 ri
->ifindex
= ifindex
;
2795 ri
->map_owner
= map_owner
;
2797 return XDP_REDIRECT
;
2800 /* Note, arg4 is hidden from users and populated by the verifier
2801 * with the right pointer.
2803 static const struct bpf_func_proto bpf_xdp_redirect_map_proto
= {
2804 .func
= bpf_xdp_redirect_map
,
2806 .ret_type
= RET_INTEGER
,
2807 .arg1_type
= ARG_CONST_MAP_PTR
,
2808 .arg2_type
= ARG_ANYTHING
,
2809 .arg3_type
= ARG_ANYTHING
,
2812 bool bpf_helper_changes_pkt_data(void *func
)
2814 if (func
== bpf_skb_vlan_push
||
2815 func
== bpf_skb_vlan_pop
||
2816 func
== bpf_skb_store_bytes
||
2817 func
== bpf_skb_change_proto
||
2818 func
== bpf_skb_change_head
||
2819 func
== bpf_skb_change_tail
||
2820 func
== bpf_skb_adjust_room
||
2821 func
== bpf_skb_pull_data
||
2822 func
== bpf_clone_redirect
||
2823 func
== bpf_l3_csum_replace
||
2824 func
== bpf_l4_csum_replace
||
2825 func
== bpf_xdp_adjust_head
||
2826 func
== bpf_xdp_adjust_meta
)
2832 static unsigned long bpf_skb_copy(void *dst_buff
, const void *skb
,
2833 unsigned long off
, unsigned long len
)
2835 void *ptr
= skb_header_pointer(skb
, off
, len
, dst_buff
);
2839 if (ptr
!= dst_buff
)
2840 memcpy(dst_buff
, ptr
, len
);
2845 BPF_CALL_5(bpf_skb_event_output
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
2846 u64
, flags
, void *, meta
, u64
, meta_size
)
2848 u64 skb_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
2850 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
2852 if (unlikely(skb_size
> skb
->len
))
2855 return bpf_event_output(map
, flags
, meta
, meta_size
, skb
, skb_size
,
2859 static const struct bpf_func_proto bpf_skb_event_output_proto
= {
2860 .func
= bpf_skb_event_output
,
2862 .ret_type
= RET_INTEGER
,
2863 .arg1_type
= ARG_PTR_TO_CTX
,
2864 .arg2_type
= ARG_CONST_MAP_PTR
,
2865 .arg3_type
= ARG_ANYTHING
,
2866 .arg4_type
= ARG_PTR_TO_MEM
,
2867 .arg5_type
= ARG_CONST_SIZE
,
2870 static unsigned short bpf_tunnel_key_af(u64 flags
)
2872 return flags
& BPF_F_TUNINFO_IPV6
? AF_INET6
: AF_INET
;
2875 BPF_CALL_4(bpf_skb_get_tunnel_key
, struct sk_buff
*, skb
, struct bpf_tunnel_key
*, to
,
2876 u32
, size
, u64
, flags
)
2878 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2879 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2883 if (unlikely(!info
|| (flags
& ~(BPF_F_TUNINFO_IPV6
)))) {
2887 if (ip_tunnel_info_af(info
) != bpf_tunnel_key_af(flags
)) {
2891 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2894 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2895 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2897 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2898 /* Fixup deprecated structure layouts here, so we have
2899 * a common path later on.
2901 if (ip_tunnel_info_af(info
) != AF_INET
)
2904 to
= (struct bpf_tunnel_key
*)compat
;
2911 to
->tunnel_id
= be64_to_cpu(info
->key
.tun_id
);
2912 to
->tunnel_tos
= info
->key
.tos
;
2913 to
->tunnel_ttl
= info
->key
.ttl
;
2915 if (flags
& BPF_F_TUNINFO_IPV6
) {
2916 memcpy(to
->remote_ipv6
, &info
->key
.u
.ipv6
.src
,
2917 sizeof(to
->remote_ipv6
));
2918 to
->tunnel_label
= be32_to_cpu(info
->key
.label
);
2920 to
->remote_ipv4
= be32_to_cpu(info
->key
.u
.ipv4
.src
);
2923 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
)))
2924 memcpy(to_orig
, to
, size
);
2928 memset(to_orig
, 0, size
);
2932 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto
= {
2933 .func
= bpf_skb_get_tunnel_key
,
2935 .ret_type
= RET_INTEGER
,
2936 .arg1_type
= ARG_PTR_TO_CTX
,
2937 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2938 .arg3_type
= ARG_CONST_SIZE
,
2939 .arg4_type
= ARG_ANYTHING
,
2942 BPF_CALL_3(bpf_skb_get_tunnel_opt
, struct sk_buff
*, skb
, u8
*, to
, u32
, size
)
2944 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2947 if (unlikely(!info
||
2948 !(info
->key
.tun_flags
& TUNNEL_OPTIONS_PRESENT
))) {
2952 if (unlikely(size
< info
->options_len
)) {
2957 ip_tunnel_info_opts_get(to
, info
);
2958 if (size
> info
->options_len
)
2959 memset(to
+ info
->options_len
, 0, size
- info
->options_len
);
2961 return info
->options_len
;
2963 memset(to
, 0, size
);
2967 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto
= {
2968 .func
= bpf_skb_get_tunnel_opt
,
2970 .ret_type
= RET_INTEGER
,
2971 .arg1_type
= ARG_PTR_TO_CTX
,
2972 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2973 .arg3_type
= ARG_CONST_SIZE
,
2976 static struct metadata_dst __percpu
*md_dst
;
2978 BPF_CALL_4(bpf_skb_set_tunnel_key
, struct sk_buff
*, skb
,
2979 const struct bpf_tunnel_key
*, from
, u32
, size
, u64
, flags
)
2981 struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
2982 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2983 struct ip_tunnel_info
*info
;
2985 if (unlikely(flags
& ~(BPF_F_TUNINFO_IPV6
| BPF_F_ZERO_CSUM_TX
|
2986 BPF_F_DONT_FRAGMENT
)))
2988 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2990 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2991 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2992 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2993 /* Fixup deprecated structure layouts here, so we have
2994 * a common path later on.
2996 memcpy(compat
, from
, size
);
2997 memset(compat
+ size
, 0, sizeof(compat
) - size
);
2998 from
= (const struct bpf_tunnel_key
*) compat
;
3004 if (unlikely((!(flags
& BPF_F_TUNINFO_IPV6
) && from
->tunnel_label
) ||
3009 dst_hold((struct dst_entry
*) md
);
3010 skb_dst_set(skb
, (struct dst_entry
*) md
);
3012 info
= &md
->u
.tun_info
;
3013 info
->mode
= IP_TUNNEL_INFO_TX
;
3015 info
->key
.tun_flags
= TUNNEL_KEY
| TUNNEL_CSUM
| TUNNEL_NOCACHE
;
3016 if (flags
& BPF_F_DONT_FRAGMENT
)
3017 info
->key
.tun_flags
|= TUNNEL_DONT_FRAGMENT
;
3019 info
->key
.tun_id
= cpu_to_be64(from
->tunnel_id
);
3020 info
->key
.tos
= from
->tunnel_tos
;
3021 info
->key
.ttl
= from
->tunnel_ttl
;
3023 if (flags
& BPF_F_TUNINFO_IPV6
) {
3024 info
->mode
|= IP_TUNNEL_INFO_IPV6
;
3025 memcpy(&info
->key
.u
.ipv6
.dst
, from
->remote_ipv6
,
3026 sizeof(from
->remote_ipv6
));
3027 info
->key
.label
= cpu_to_be32(from
->tunnel_label
) &
3028 IPV6_FLOWLABEL_MASK
;
3030 info
->key
.u
.ipv4
.dst
= cpu_to_be32(from
->remote_ipv4
);
3031 if (flags
& BPF_F_ZERO_CSUM_TX
)
3032 info
->key
.tun_flags
&= ~TUNNEL_CSUM
;
3038 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto
= {
3039 .func
= bpf_skb_set_tunnel_key
,
3041 .ret_type
= RET_INTEGER
,
3042 .arg1_type
= ARG_PTR_TO_CTX
,
3043 .arg2_type
= ARG_PTR_TO_MEM
,
3044 .arg3_type
= ARG_CONST_SIZE
,
3045 .arg4_type
= ARG_ANYTHING
,
3048 BPF_CALL_3(bpf_skb_set_tunnel_opt
, struct sk_buff
*, skb
,
3049 const u8
*, from
, u32
, size
)
3051 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
3052 const struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
3054 if (unlikely(info
!= &md
->u
.tun_info
|| (size
& (sizeof(u32
) - 1))))
3056 if (unlikely(size
> IP_TUNNEL_OPTS_MAX
))
3059 ip_tunnel_info_opts_set(info
, from
, size
);
3064 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto
= {
3065 .func
= bpf_skb_set_tunnel_opt
,
3067 .ret_type
= RET_INTEGER
,
3068 .arg1_type
= ARG_PTR_TO_CTX
,
3069 .arg2_type
= ARG_PTR_TO_MEM
,
3070 .arg3_type
= ARG_CONST_SIZE
,
3073 static const struct bpf_func_proto
*
3074 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which
)
3077 struct metadata_dst __percpu
*tmp
;
3079 tmp
= metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX
,
3084 if (cmpxchg(&md_dst
, NULL
, tmp
))
3085 metadata_dst_free_percpu(tmp
);
3089 case BPF_FUNC_skb_set_tunnel_key
:
3090 return &bpf_skb_set_tunnel_key_proto
;
3091 case BPF_FUNC_skb_set_tunnel_opt
:
3092 return &bpf_skb_set_tunnel_opt_proto
;
3098 BPF_CALL_3(bpf_skb_under_cgroup
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
3101 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
3102 struct cgroup
*cgrp
;
3105 sk
= skb_to_full_sk(skb
);
3106 if (!sk
|| !sk_fullsock(sk
))
3108 if (unlikely(idx
>= array
->map
.max_entries
))
3111 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
3112 if (unlikely(!cgrp
))
3115 return sk_under_cgroup_hierarchy(sk
, cgrp
);
3118 static const struct bpf_func_proto bpf_skb_under_cgroup_proto
= {
3119 .func
= bpf_skb_under_cgroup
,
3121 .ret_type
= RET_INTEGER
,
3122 .arg1_type
= ARG_PTR_TO_CTX
,
3123 .arg2_type
= ARG_CONST_MAP_PTR
,
3124 .arg3_type
= ARG_ANYTHING
,
3127 static unsigned long bpf_xdp_copy(void *dst_buff
, const void *src_buff
,
3128 unsigned long off
, unsigned long len
)
3130 memcpy(dst_buff
, src_buff
+ off
, len
);
3134 BPF_CALL_5(bpf_xdp_event_output
, struct xdp_buff
*, xdp
, struct bpf_map
*, map
,
3135 u64
, flags
, void *, meta
, u64
, meta_size
)
3137 u64 xdp_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
3139 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
3141 if (unlikely(xdp_size
> (unsigned long)(xdp
->data_end
- xdp
->data
)))
3144 return bpf_event_output(map
, flags
, meta
, meta_size
, xdp
->data
,
3145 xdp_size
, bpf_xdp_copy
);
3148 static const struct bpf_func_proto bpf_xdp_event_output_proto
= {
3149 .func
= bpf_xdp_event_output
,
3151 .ret_type
= RET_INTEGER
,
3152 .arg1_type
= ARG_PTR_TO_CTX
,
3153 .arg2_type
= ARG_CONST_MAP_PTR
,
3154 .arg3_type
= ARG_ANYTHING
,
3155 .arg4_type
= ARG_PTR_TO_MEM
,
3156 .arg5_type
= ARG_CONST_SIZE
,
3159 BPF_CALL_1(bpf_get_socket_cookie
, struct sk_buff
*, skb
)
3161 return skb
->sk
? sock_gen_cookie(skb
->sk
) : 0;
3164 static const struct bpf_func_proto bpf_get_socket_cookie_proto
= {
3165 .func
= bpf_get_socket_cookie
,
3167 .ret_type
= RET_INTEGER
,
3168 .arg1_type
= ARG_PTR_TO_CTX
,
3171 BPF_CALL_1(bpf_get_socket_uid
, struct sk_buff
*, skb
)
3173 struct sock
*sk
= sk_to_full_sk(skb
->sk
);
3176 if (!sk
|| !sk_fullsock(sk
))
3178 kuid
= sock_net_uid(sock_net(sk
), sk
);
3179 return from_kuid_munged(sock_net(sk
)->user_ns
, kuid
);
3182 static const struct bpf_func_proto bpf_get_socket_uid_proto
= {
3183 .func
= bpf_get_socket_uid
,
3185 .ret_type
= RET_INTEGER
,
3186 .arg1_type
= ARG_PTR_TO_CTX
,
3189 BPF_CALL_5(bpf_setsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
3190 int, level
, int, optname
, char *, optval
, int, optlen
)
3192 struct sock
*sk
= bpf_sock
->sk
;
3196 if (!sk_fullsock(sk
))
3199 if (level
== SOL_SOCKET
) {
3200 if (optlen
!= sizeof(int))
3202 val
= *((int *)optval
);
3204 /* Only some socketops are supported */
3207 sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
3208 sk
->sk_rcvbuf
= max_t(int, val
* 2, SOCK_MIN_RCVBUF
);
3211 sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
3212 sk
->sk_sndbuf
= max_t(int, val
* 2, SOCK_MIN_SNDBUF
);
3214 case SO_MAX_PACING_RATE
:
3215 sk
->sk_max_pacing_rate
= val
;
3216 sk
->sk_pacing_rate
= min(sk
->sk_pacing_rate
,
3217 sk
->sk_max_pacing_rate
);
3220 sk
->sk_priority
= val
;
3225 sk
->sk_rcvlowat
= val
? : 1;
3234 } else if (level
== SOL_TCP
&&
3235 sk
->sk_prot
->setsockopt
== tcp_setsockopt
) {
3236 if (optname
== TCP_CONGESTION
) {
3237 char name
[TCP_CA_NAME_MAX
];
3238 bool reinit
= bpf_sock
->op
> BPF_SOCK_OPS_NEEDS_ECN
;
3240 strncpy(name
, optval
, min_t(long, optlen
,
3241 TCP_CA_NAME_MAX
-1));
3242 name
[TCP_CA_NAME_MAX
-1] = 0;
3243 ret
= tcp_set_congestion_control(sk
, name
, false, reinit
);
3245 struct tcp_sock
*tp
= tcp_sk(sk
);
3247 if (optlen
!= sizeof(int))
3250 val
= *((int *)optval
);
3251 /* Only some options are supported */
3254 if (val
<= 0 || tp
->data_segs_out
> 0)
3259 case TCP_BPF_SNDCWND_CLAMP
:
3263 tp
->snd_cwnd_clamp
= val
;
3264 tp
->snd_ssthresh
= val
;
3278 static const struct bpf_func_proto bpf_setsockopt_proto
= {
3279 .func
= bpf_setsockopt
,
3281 .ret_type
= RET_INTEGER
,
3282 .arg1_type
= ARG_PTR_TO_CTX
,
3283 .arg2_type
= ARG_ANYTHING
,
3284 .arg3_type
= ARG_ANYTHING
,
3285 .arg4_type
= ARG_PTR_TO_MEM
,
3286 .arg5_type
= ARG_CONST_SIZE
,
3289 BPF_CALL_5(bpf_getsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
3290 int, level
, int, optname
, char *, optval
, int, optlen
)
3292 struct sock
*sk
= bpf_sock
->sk
;
3294 if (!sk_fullsock(sk
))
3298 if (level
== SOL_TCP
&& sk
->sk_prot
->getsockopt
== tcp_getsockopt
) {
3299 if (optname
== TCP_CONGESTION
) {
3300 struct inet_connection_sock
*icsk
= inet_csk(sk
);
3302 if (!icsk
->icsk_ca_ops
|| optlen
<= 1)
3304 strncpy(optval
, icsk
->icsk_ca_ops
->name
, optlen
);
3305 optval
[optlen
- 1] = 0;
3315 memset(optval
, 0, optlen
);
3319 static const struct bpf_func_proto bpf_getsockopt_proto
= {
3320 .func
= bpf_getsockopt
,
3322 .ret_type
= RET_INTEGER
,
3323 .arg1_type
= ARG_PTR_TO_CTX
,
3324 .arg2_type
= ARG_ANYTHING
,
3325 .arg3_type
= ARG_ANYTHING
,
3326 .arg4_type
= ARG_PTR_TO_UNINIT_MEM
,
3327 .arg5_type
= ARG_CONST_SIZE
,
3330 static const struct bpf_func_proto
*
3331 bpf_base_func_proto(enum bpf_func_id func_id
)
3334 case BPF_FUNC_map_lookup_elem
:
3335 return &bpf_map_lookup_elem_proto
;
3336 case BPF_FUNC_map_update_elem
:
3337 return &bpf_map_update_elem_proto
;
3338 case BPF_FUNC_map_delete_elem
:
3339 return &bpf_map_delete_elem_proto
;
3340 case BPF_FUNC_get_prandom_u32
:
3341 return &bpf_get_prandom_u32_proto
;
3342 case BPF_FUNC_get_smp_processor_id
:
3343 return &bpf_get_raw_smp_processor_id_proto
;
3344 case BPF_FUNC_get_numa_node_id
:
3345 return &bpf_get_numa_node_id_proto
;
3346 case BPF_FUNC_tail_call
:
3347 return &bpf_tail_call_proto
;
3348 case BPF_FUNC_ktime_get_ns
:
3349 return &bpf_ktime_get_ns_proto
;
3350 case BPF_FUNC_trace_printk
:
3351 if (capable(CAP_SYS_ADMIN
))
3352 return bpf_get_trace_printk_proto();
3358 static const struct bpf_func_proto
*
3359 sock_filter_func_proto(enum bpf_func_id func_id
)
3362 /* inet and inet6 sockets are created in a process
3363 * context so there is always a valid uid/gid
3365 case BPF_FUNC_get_current_uid_gid
:
3366 return &bpf_get_current_uid_gid_proto
;
3368 return bpf_base_func_proto(func_id
);
3372 static const struct bpf_func_proto
*
3373 sk_filter_func_proto(enum bpf_func_id func_id
)
3376 case BPF_FUNC_skb_load_bytes
:
3377 return &bpf_skb_load_bytes_proto
;
3378 case BPF_FUNC_get_socket_cookie
:
3379 return &bpf_get_socket_cookie_proto
;
3380 case BPF_FUNC_get_socket_uid
:
3381 return &bpf_get_socket_uid_proto
;
3383 return bpf_base_func_proto(func_id
);
3387 static const struct bpf_func_proto
*
3388 tc_cls_act_func_proto(enum bpf_func_id func_id
)
3391 case BPF_FUNC_skb_store_bytes
:
3392 return &bpf_skb_store_bytes_proto
;
3393 case BPF_FUNC_skb_load_bytes
:
3394 return &bpf_skb_load_bytes_proto
;
3395 case BPF_FUNC_skb_pull_data
:
3396 return &bpf_skb_pull_data_proto
;
3397 case BPF_FUNC_csum_diff
:
3398 return &bpf_csum_diff_proto
;
3399 case BPF_FUNC_csum_update
:
3400 return &bpf_csum_update_proto
;
3401 case BPF_FUNC_l3_csum_replace
:
3402 return &bpf_l3_csum_replace_proto
;
3403 case BPF_FUNC_l4_csum_replace
:
3404 return &bpf_l4_csum_replace_proto
;
3405 case BPF_FUNC_clone_redirect
:
3406 return &bpf_clone_redirect_proto
;
3407 case BPF_FUNC_get_cgroup_classid
:
3408 return &bpf_get_cgroup_classid_proto
;
3409 case BPF_FUNC_skb_vlan_push
:
3410 return &bpf_skb_vlan_push_proto
;
3411 case BPF_FUNC_skb_vlan_pop
:
3412 return &bpf_skb_vlan_pop_proto
;
3413 case BPF_FUNC_skb_change_proto
:
3414 return &bpf_skb_change_proto_proto
;
3415 case BPF_FUNC_skb_change_type
:
3416 return &bpf_skb_change_type_proto
;
3417 case BPF_FUNC_skb_adjust_room
:
3418 return &bpf_skb_adjust_room_proto
;
3419 case BPF_FUNC_skb_change_tail
:
3420 return &bpf_skb_change_tail_proto
;
3421 case BPF_FUNC_skb_get_tunnel_key
:
3422 return &bpf_skb_get_tunnel_key_proto
;
3423 case BPF_FUNC_skb_set_tunnel_key
:
3424 return bpf_get_skb_set_tunnel_proto(func_id
);
3425 case BPF_FUNC_skb_get_tunnel_opt
:
3426 return &bpf_skb_get_tunnel_opt_proto
;
3427 case BPF_FUNC_skb_set_tunnel_opt
:
3428 return bpf_get_skb_set_tunnel_proto(func_id
);
3429 case BPF_FUNC_redirect
:
3430 return &bpf_redirect_proto
;
3431 case BPF_FUNC_get_route_realm
:
3432 return &bpf_get_route_realm_proto
;
3433 case BPF_FUNC_get_hash_recalc
:
3434 return &bpf_get_hash_recalc_proto
;
3435 case BPF_FUNC_set_hash_invalid
:
3436 return &bpf_set_hash_invalid_proto
;
3437 case BPF_FUNC_set_hash
:
3438 return &bpf_set_hash_proto
;
3439 case BPF_FUNC_perf_event_output
:
3440 return &bpf_skb_event_output_proto
;
3441 case BPF_FUNC_get_smp_processor_id
:
3442 return &bpf_get_smp_processor_id_proto
;
3443 case BPF_FUNC_skb_under_cgroup
:
3444 return &bpf_skb_under_cgroup_proto
;
3445 case BPF_FUNC_get_socket_cookie
:
3446 return &bpf_get_socket_cookie_proto
;
3447 case BPF_FUNC_get_socket_uid
:
3448 return &bpf_get_socket_uid_proto
;
3450 return bpf_base_func_proto(func_id
);
3454 static const struct bpf_func_proto
*
3455 xdp_func_proto(enum bpf_func_id func_id
)
3458 case BPF_FUNC_perf_event_output
:
3459 return &bpf_xdp_event_output_proto
;
3460 case BPF_FUNC_get_smp_processor_id
:
3461 return &bpf_get_smp_processor_id_proto
;
3462 case BPF_FUNC_xdp_adjust_head
:
3463 return &bpf_xdp_adjust_head_proto
;
3464 case BPF_FUNC_xdp_adjust_meta
:
3465 return &bpf_xdp_adjust_meta_proto
;
3466 case BPF_FUNC_redirect
:
3467 return &bpf_xdp_redirect_proto
;
3468 case BPF_FUNC_redirect_map
:
3469 return &bpf_xdp_redirect_map_proto
;
3471 return bpf_base_func_proto(func_id
);
3475 static const struct bpf_func_proto
*
3476 lwt_inout_func_proto(enum bpf_func_id func_id
)
3479 case BPF_FUNC_skb_load_bytes
:
3480 return &bpf_skb_load_bytes_proto
;
3481 case BPF_FUNC_skb_pull_data
:
3482 return &bpf_skb_pull_data_proto
;
3483 case BPF_FUNC_csum_diff
:
3484 return &bpf_csum_diff_proto
;
3485 case BPF_FUNC_get_cgroup_classid
:
3486 return &bpf_get_cgroup_classid_proto
;
3487 case BPF_FUNC_get_route_realm
:
3488 return &bpf_get_route_realm_proto
;
3489 case BPF_FUNC_get_hash_recalc
:
3490 return &bpf_get_hash_recalc_proto
;
3491 case BPF_FUNC_perf_event_output
:
3492 return &bpf_skb_event_output_proto
;
3493 case BPF_FUNC_get_smp_processor_id
:
3494 return &bpf_get_smp_processor_id_proto
;
3495 case BPF_FUNC_skb_under_cgroup
:
3496 return &bpf_skb_under_cgroup_proto
;
3498 return bpf_base_func_proto(func_id
);
3502 static const struct bpf_func_proto
*
3503 sock_ops_func_proto(enum bpf_func_id func_id
)
3506 case BPF_FUNC_setsockopt
:
3507 return &bpf_setsockopt_proto
;
3508 case BPF_FUNC_getsockopt
:
3509 return &bpf_getsockopt_proto
;
3510 case BPF_FUNC_sock_map_update
:
3511 return &bpf_sock_map_update_proto
;
3513 return bpf_base_func_proto(func_id
);
3517 static const struct bpf_func_proto
*sk_skb_func_proto(enum bpf_func_id func_id
)
3520 case BPF_FUNC_skb_store_bytes
:
3521 return &bpf_skb_store_bytes_proto
;
3522 case BPF_FUNC_skb_load_bytes
:
3523 return &bpf_skb_load_bytes_proto
;
3524 case BPF_FUNC_skb_pull_data
:
3525 return &bpf_skb_pull_data_proto
;
3526 case BPF_FUNC_skb_change_tail
:
3527 return &bpf_skb_change_tail_proto
;
3528 case BPF_FUNC_skb_change_head
:
3529 return &bpf_skb_change_head_proto
;
3530 case BPF_FUNC_get_socket_cookie
:
3531 return &bpf_get_socket_cookie_proto
;
3532 case BPF_FUNC_get_socket_uid
:
3533 return &bpf_get_socket_uid_proto
;
3534 case BPF_FUNC_sk_redirect_map
:
3535 return &bpf_sk_redirect_map_proto
;
3537 return bpf_base_func_proto(func_id
);
3541 static const struct bpf_func_proto
*
3542 lwt_xmit_func_proto(enum bpf_func_id func_id
)
3545 case BPF_FUNC_skb_get_tunnel_key
:
3546 return &bpf_skb_get_tunnel_key_proto
;
3547 case BPF_FUNC_skb_set_tunnel_key
:
3548 return bpf_get_skb_set_tunnel_proto(func_id
);
3549 case BPF_FUNC_skb_get_tunnel_opt
:
3550 return &bpf_skb_get_tunnel_opt_proto
;
3551 case BPF_FUNC_skb_set_tunnel_opt
:
3552 return bpf_get_skb_set_tunnel_proto(func_id
);
3553 case BPF_FUNC_redirect
:
3554 return &bpf_redirect_proto
;
3555 case BPF_FUNC_clone_redirect
:
3556 return &bpf_clone_redirect_proto
;
3557 case BPF_FUNC_skb_change_tail
:
3558 return &bpf_skb_change_tail_proto
;
3559 case BPF_FUNC_skb_change_head
:
3560 return &bpf_skb_change_head_proto
;
3561 case BPF_FUNC_skb_store_bytes
:
3562 return &bpf_skb_store_bytes_proto
;
3563 case BPF_FUNC_csum_update
:
3564 return &bpf_csum_update_proto
;
3565 case BPF_FUNC_l3_csum_replace
:
3566 return &bpf_l3_csum_replace_proto
;
3567 case BPF_FUNC_l4_csum_replace
:
3568 return &bpf_l4_csum_replace_proto
;
3569 case BPF_FUNC_set_hash_invalid
:
3570 return &bpf_set_hash_invalid_proto
;
3572 return lwt_inout_func_proto(func_id
);
3576 static bool bpf_skb_is_valid_access(int off
, int size
, enum bpf_access_type type
,
3577 struct bpf_insn_access_aux
*info
)
3579 const int size_default
= sizeof(__u32
);
3581 if (off
< 0 || off
>= sizeof(struct __sk_buff
))
3584 /* The verifier guarantees that size > 0. */
3585 if (off
% size
!= 0)
3589 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3590 if (off
+ size
> offsetofend(struct __sk_buff
, cb
[4]))
3593 case bpf_ctx_range_till(struct __sk_buff
, remote_ip6
[0], remote_ip6
[3]):
3594 case bpf_ctx_range_till(struct __sk_buff
, local_ip6
[0], local_ip6
[3]):
3595 case bpf_ctx_range_till(struct __sk_buff
, remote_ip4
, remote_ip4
):
3596 case bpf_ctx_range_till(struct __sk_buff
, local_ip4
, local_ip4
):
3597 case bpf_ctx_range(struct __sk_buff
, data
):
3598 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3599 case bpf_ctx_range(struct __sk_buff
, data_end
):
3600 if (size
!= size_default
)
3604 /* Only narrow read access allowed for now. */
3605 if (type
== BPF_WRITE
) {
3606 if (size
!= size_default
)
3609 bpf_ctx_record_field_size(info
, size_default
);
3610 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
3618 static bool sk_filter_is_valid_access(int off
, int size
,
3619 enum bpf_access_type type
,
3620 struct bpf_insn_access_aux
*info
)
3623 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3624 case bpf_ctx_range(struct __sk_buff
, data
):
3625 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3626 case bpf_ctx_range(struct __sk_buff
, data_end
):
3627 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3631 if (type
== BPF_WRITE
) {
3633 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3640 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3643 static bool lwt_is_valid_access(int off
, int size
,
3644 enum bpf_access_type type
,
3645 struct bpf_insn_access_aux
*info
)
3648 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3649 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3650 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3654 if (type
== BPF_WRITE
) {
3656 case bpf_ctx_range(struct __sk_buff
, mark
):
3657 case bpf_ctx_range(struct __sk_buff
, priority
):
3658 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3666 case bpf_ctx_range(struct __sk_buff
, data
):
3667 info
->reg_type
= PTR_TO_PACKET
;
3669 case bpf_ctx_range(struct __sk_buff
, data_end
):
3670 info
->reg_type
= PTR_TO_PACKET_END
;
3674 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3677 static bool sock_filter_is_valid_access(int off
, int size
,
3678 enum bpf_access_type type
,
3679 struct bpf_insn_access_aux
*info
)
3681 if (type
== BPF_WRITE
) {
3683 case offsetof(struct bpf_sock
, bound_dev_if
):
3684 case offsetof(struct bpf_sock
, mark
):
3685 case offsetof(struct bpf_sock
, priority
):
3692 if (off
< 0 || off
+ size
> sizeof(struct bpf_sock
))
3694 /* The verifier guarantees that size > 0. */
3695 if (off
% size
!= 0)
3697 if (size
!= sizeof(__u32
))
3703 static int bpf_unclone_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3704 const struct bpf_prog
*prog
, int drop_verdict
)
3706 struct bpf_insn
*insn
= insn_buf
;
3711 /* if (!skb->cloned)
3714 * (Fast-path, otherwise approximation that we might be
3715 * a clone, do the rest in helper.)
3717 *insn
++ = BPF_LDX_MEM(BPF_B
, BPF_REG_6
, BPF_REG_1
, CLONED_OFFSET());
3718 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_6
, CLONED_MASK
);
3719 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_6
, 0, 7);
3721 /* ret = bpf_skb_pull_data(skb, 0); */
3722 *insn
++ = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
3723 *insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_2
, BPF_REG_2
);
3724 *insn
++ = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3725 BPF_FUNC_skb_pull_data
);
3728 * return TC_ACT_SHOT;
3730 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2);
3731 *insn
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, drop_verdict
);
3732 *insn
++ = BPF_EXIT_INSN();
3735 *insn
++ = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
3737 *insn
++ = prog
->insnsi
[0];
3739 return insn
- insn_buf
;
3742 static int tc_cls_act_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3743 const struct bpf_prog
*prog
)
3745 return bpf_unclone_prologue(insn_buf
, direct_write
, prog
, TC_ACT_SHOT
);
3748 static bool tc_cls_act_is_valid_access(int off
, int size
,
3749 enum bpf_access_type type
,
3750 struct bpf_insn_access_aux
*info
)
3752 if (type
== BPF_WRITE
) {
3754 case bpf_ctx_range(struct __sk_buff
, mark
):
3755 case bpf_ctx_range(struct __sk_buff
, tc_index
):
3756 case bpf_ctx_range(struct __sk_buff
, priority
):
3757 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3758 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3766 case bpf_ctx_range(struct __sk_buff
, data
):
3767 info
->reg_type
= PTR_TO_PACKET
;
3769 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3770 info
->reg_type
= PTR_TO_PACKET_META
;
3772 case bpf_ctx_range(struct __sk_buff
, data_end
):
3773 info
->reg_type
= PTR_TO_PACKET_END
;
3775 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3779 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3782 static bool __is_valid_xdp_access(int off
, int size
)
3784 if (off
< 0 || off
>= sizeof(struct xdp_md
))
3786 if (off
% size
!= 0)
3788 if (size
!= sizeof(__u32
))
3794 static bool xdp_is_valid_access(int off
, int size
,
3795 enum bpf_access_type type
,
3796 struct bpf_insn_access_aux
*info
)
3798 if (type
== BPF_WRITE
)
3802 case offsetof(struct xdp_md
, data
):
3803 info
->reg_type
= PTR_TO_PACKET
;
3805 case offsetof(struct xdp_md
, data_meta
):
3806 info
->reg_type
= PTR_TO_PACKET_META
;
3808 case offsetof(struct xdp_md
, data_end
):
3809 info
->reg_type
= PTR_TO_PACKET_END
;
3813 return __is_valid_xdp_access(off
, size
);
3816 void bpf_warn_invalid_xdp_action(u32 act
)
3818 const u32 act_max
= XDP_REDIRECT
;
3820 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
3821 act
> act_max
? "Illegal" : "Driver unsupported",
3824 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action
);
3826 static bool __is_valid_sock_ops_access(int off
, int size
)
3828 if (off
< 0 || off
>= sizeof(struct bpf_sock_ops
))
3830 /* The verifier guarantees that size > 0. */
3831 if (off
% size
!= 0)
3833 if (size
!= sizeof(__u32
))
3839 static bool sock_ops_is_valid_access(int off
, int size
,
3840 enum bpf_access_type type
,
3841 struct bpf_insn_access_aux
*info
)
3843 if (type
== BPF_WRITE
) {
3845 case offsetof(struct bpf_sock_ops
, op
) ...
3846 offsetof(struct bpf_sock_ops
, replylong
[3]):
3853 return __is_valid_sock_ops_access(off
, size
);
3856 static int sk_skb_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3857 const struct bpf_prog
*prog
)
3859 return bpf_unclone_prologue(insn_buf
, direct_write
, prog
, SK_DROP
);
3862 static bool sk_skb_is_valid_access(int off
, int size
,
3863 enum bpf_access_type type
,
3864 struct bpf_insn_access_aux
*info
)
3867 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3868 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3872 if (type
== BPF_WRITE
) {
3874 case bpf_ctx_range(struct __sk_buff
, tc_index
):
3875 case bpf_ctx_range(struct __sk_buff
, priority
):
3883 case bpf_ctx_range(struct __sk_buff
, mark
):
3885 case bpf_ctx_range(struct __sk_buff
, data
):
3886 info
->reg_type
= PTR_TO_PACKET
;
3888 case bpf_ctx_range(struct __sk_buff
, data_end
):
3889 info
->reg_type
= PTR_TO_PACKET_END
;
3893 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3896 static u32
bpf_convert_ctx_access(enum bpf_access_type type
,
3897 const struct bpf_insn
*si
,
3898 struct bpf_insn
*insn_buf
,
3899 struct bpf_prog
*prog
, u32
*target_size
)
3901 struct bpf_insn
*insn
= insn_buf
;
3905 case offsetof(struct __sk_buff
, len
):
3906 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3907 bpf_target_off(struct sk_buff
, len
, 4,
3911 case offsetof(struct __sk_buff
, protocol
):
3912 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3913 bpf_target_off(struct sk_buff
, protocol
, 2,
3917 case offsetof(struct __sk_buff
, vlan_proto
):
3918 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3919 bpf_target_off(struct sk_buff
, vlan_proto
, 2,
3923 case offsetof(struct __sk_buff
, priority
):
3924 if (type
== BPF_WRITE
)
3925 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3926 bpf_target_off(struct sk_buff
, priority
, 4,
3929 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3930 bpf_target_off(struct sk_buff
, priority
, 4,
3934 case offsetof(struct __sk_buff
, ingress_ifindex
):
3935 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3936 bpf_target_off(struct sk_buff
, skb_iif
, 4,
3940 case offsetof(struct __sk_buff
, ifindex
):
3941 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
3942 si
->dst_reg
, si
->src_reg
,
3943 offsetof(struct sk_buff
, dev
));
3944 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, si
->dst_reg
, 0, 1);
3945 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3946 bpf_target_off(struct net_device
, ifindex
, 4,
3950 case offsetof(struct __sk_buff
, hash
):
3951 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3952 bpf_target_off(struct sk_buff
, hash
, 4,
3956 case offsetof(struct __sk_buff
, mark
):
3957 if (type
== BPF_WRITE
)
3958 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3959 bpf_target_off(struct sk_buff
, mark
, 4,
3962 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3963 bpf_target_off(struct sk_buff
, mark
, 4,
3967 case offsetof(struct __sk_buff
, pkt_type
):
3969 *insn
++ = BPF_LDX_MEM(BPF_B
, si
->dst_reg
, si
->src_reg
,
3971 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, PKT_TYPE_MAX
);
3972 #ifdef __BIG_ENDIAN_BITFIELD
3973 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 5);
3977 case offsetof(struct __sk_buff
, queue_mapping
):
3978 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3979 bpf_target_off(struct sk_buff
, queue_mapping
, 2,
3983 case offsetof(struct __sk_buff
, vlan_present
):
3984 case offsetof(struct __sk_buff
, vlan_tci
):
3985 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
3987 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3988 bpf_target_off(struct sk_buff
, vlan_tci
, 2,
3990 if (si
->off
== offsetof(struct __sk_buff
, vlan_tci
)) {
3991 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
,
3994 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 12);
3995 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, 1);
3999 case offsetof(struct __sk_buff
, cb
[0]) ...
4000 offsetofend(struct __sk_buff
, cb
[4]) - 1:
4001 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, data
) < 20);
4002 BUILD_BUG_ON((offsetof(struct sk_buff
, cb
) +
4003 offsetof(struct qdisc_skb_cb
, data
)) %
4006 prog
->cb_access
= 1;
4008 off
-= offsetof(struct __sk_buff
, cb
[0]);
4009 off
+= offsetof(struct sk_buff
, cb
);
4010 off
+= offsetof(struct qdisc_skb_cb
, data
);
4011 if (type
== BPF_WRITE
)
4012 *insn
++ = BPF_STX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
4015 *insn
++ = BPF_LDX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
4019 case offsetof(struct __sk_buff
, tc_classid
):
4020 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, tc_classid
) != 2);
4023 off
-= offsetof(struct __sk_buff
, tc_classid
);
4024 off
+= offsetof(struct sk_buff
, cb
);
4025 off
+= offsetof(struct qdisc_skb_cb
, tc_classid
);
4027 if (type
== BPF_WRITE
)
4028 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
,
4031 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
,
4035 case offsetof(struct __sk_buff
, data
):
4036 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, data
),
4037 si
->dst_reg
, si
->src_reg
,
4038 offsetof(struct sk_buff
, data
));
4041 case offsetof(struct __sk_buff
, data_meta
):
4043 off
-= offsetof(struct __sk_buff
, data_meta
);
4044 off
+= offsetof(struct sk_buff
, cb
);
4045 off
+= offsetof(struct bpf_skb_data_end
, data_meta
);
4046 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4050 case offsetof(struct __sk_buff
, data_end
):
4052 off
-= offsetof(struct __sk_buff
, data_end
);
4053 off
+= offsetof(struct sk_buff
, cb
);
4054 off
+= offsetof(struct bpf_skb_data_end
, data_end
);
4055 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4059 case offsetof(struct __sk_buff
, tc_index
):
4060 #ifdef CONFIG_NET_SCHED
4061 if (type
== BPF_WRITE
)
4062 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4063 bpf_target_off(struct sk_buff
, tc_index
, 2,
4066 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4067 bpf_target_off(struct sk_buff
, tc_index
, 2,
4071 if (type
== BPF_WRITE
)
4072 *insn
++ = BPF_MOV64_REG(si
->dst_reg
, si
->dst_reg
);
4074 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4078 case offsetof(struct __sk_buff
, napi_id
):
4079 #if defined(CONFIG_NET_RX_BUSY_POLL)
4080 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4081 bpf_target_off(struct sk_buff
, napi_id
, 4,
4083 *insn
++ = BPF_JMP_IMM(BPF_JGE
, si
->dst_reg
, MIN_NAPI_ID
, 1);
4084 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4087 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4090 case offsetof(struct __sk_buff
, family
):
4091 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
4093 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4094 si
->dst_reg
, si
->src_reg
,
4095 offsetof(struct sk_buff
, sk
));
4096 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4097 bpf_target_off(struct sock_common
,
4101 case offsetof(struct __sk_buff
, remote_ip4
):
4102 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
4104 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4105 si
->dst_reg
, si
->src_reg
,
4106 offsetof(struct sk_buff
, sk
));
4107 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4108 bpf_target_off(struct sock_common
,
4112 case offsetof(struct __sk_buff
, local_ip4
):
4113 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4114 skc_rcv_saddr
) != 4);
4116 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4117 si
->dst_reg
, si
->src_reg
,
4118 offsetof(struct sk_buff
, sk
));
4119 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4120 bpf_target_off(struct sock_common
,
4124 case offsetof(struct __sk_buff
, remote_ip6
[0]) ...
4125 offsetof(struct __sk_buff
, remote_ip6
[3]):
4126 #if IS_ENABLED(CONFIG_IPV6)
4127 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4128 skc_v6_daddr
.s6_addr32
[0]) != 4);
4131 off
-= offsetof(struct __sk_buff
, remote_ip6
[0]);
4133 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4134 si
->dst_reg
, si
->src_reg
,
4135 offsetof(struct sk_buff
, sk
));
4136 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4137 offsetof(struct sock_common
,
4138 skc_v6_daddr
.s6_addr32
[0]) +
4141 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4144 case offsetof(struct __sk_buff
, local_ip6
[0]) ...
4145 offsetof(struct __sk_buff
, local_ip6
[3]):
4146 #if IS_ENABLED(CONFIG_IPV6)
4147 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4148 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
4151 off
-= offsetof(struct __sk_buff
, local_ip6
[0]);
4153 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4154 si
->dst_reg
, si
->src_reg
,
4155 offsetof(struct sk_buff
, sk
));
4156 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4157 offsetof(struct sock_common
,
4158 skc_v6_rcv_saddr
.s6_addr32
[0]) +
4161 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4165 case offsetof(struct __sk_buff
, remote_port
):
4166 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
4168 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4169 si
->dst_reg
, si
->src_reg
,
4170 offsetof(struct sk_buff
, sk
));
4171 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4172 bpf_target_off(struct sock_common
,
4175 #ifndef __BIG_ENDIAN_BITFIELD
4176 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
4180 case offsetof(struct __sk_buff
, local_port
):
4181 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
4183 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4184 si
->dst_reg
, si
->src_reg
,
4185 offsetof(struct sk_buff
, sk
));
4186 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4187 bpf_target_off(struct sock_common
,
4188 skc_num
, 2, target_size
));
4192 return insn
- insn_buf
;
4195 static u32
sock_filter_convert_ctx_access(enum bpf_access_type type
,
4196 const struct bpf_insn
*si
,
4197 struct bpf_insn
*insn_buf
,
4198 struct bpf_prog
*prog
, u32
*target_size
)
4200 struct bpf_insn
*insn
= insn_buf
;
4203 case offsetof(struct bpf_sock
, bound_dev_if
):
4204 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_bound_dev_if
) != 4);
4206 if (type
== BPF_WRITE
)
4207 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4208 offsetof(struct sock
, sk_bound_dev_if
));
4210 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4211 offsetof(struct sock
, sk_bound_dev_if
));
4214 case offsetof(struct bpf_sock
, mark
):
4215 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_mark
) != 4);
4217 if (type
== BPF_WRITE
)
4218 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4219 offsetof(struct sock
, sk_mark
));
4221 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4222 offsetof(struct sock
, sk_mark
));
4225 case offsetof(struct bpf_sock
, priority
):
4226 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_priority
) != 4);
4228 if (type
== BPF_WRITE
)
4229 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4230 offsetof(struct sock
, sk_priority
));
4232 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4233 offsetof(struct sock
, sk_priority
));
4236 case offsetof(struct bpf_sock
, family
):
4237 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_family
) != 2);
4239 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4240 offsetof(struct sock
, sk_family
));
4243 case offsetof(struct bpf_sock
, type
):
4244 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4245 offsetof(struct sock
, __sk_flags_offset
));
4246 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_TYPE_MASK
);
4247 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_TYPE_SHIFT
);
4250 case offsetof(struct bpf_sock
, protocol
):
4251 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4252 offsetof(struct sock
, __sk_flags_offset
));
4253 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_PROTO_MASK
);
4254 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_PROTO_SHIFT
);
4258 return insn
- insn_buf
;
4261 static u32
tc_cls_act_convert_ctx_access(enum bpf_access_type type
,
4262 const struct bpf_insn
*si
,
4263 struct bpf_insn
*insn_buf
,
4264 struct bpf_prog
*prog
, u32
*target_size
)
4266 struct bpf_insn
*insn
= insn_buf
;
4269 case offsetof(struct __sk_buff
, ifindex
):
4270 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
4271 si
->dst_reg
, si
->src_reg
,
4272 offsetof(struct sk_buff
, dev
));
4273 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4274 bpf_target_off(struct net_device
, ifindex
, 4,
4278 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
4282 return insn
- insn_buf
;
4285 static u32
xdp_convert_ctx_access(enum bpf_access_type type
,
4286 const struct bpf_insn
*si
,
4287 struct bpf_insn
*insn_buf
,
4288 struct bpf_prog
*prog
, u32
*target_size
)
4290 struct bpf_insn
*insn
= insn_buf
;
4293 case offsetof(struct xdp_md
, data
):
4294 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data
),
4295 si
->dst_reg
, si
->src_reg
,
4296 offsetof(struct xdp_buff
, data
));
4298 case offsetof(struct xdp_md
, data_meta
):
4299 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_meta
),
4300 si
->dst_reg
, si
->src_reg
,
4301 offsetof(struct xdp_buff
, data_meta
));
4303 case offsetof(struct xdp_md
, data_end
):
4304 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_end
),
4305 si
->dst_reg
, si
->src_reg
,
4306 offsetof(struct xdp_buff
, data_end
));
4310 return insn
- insn_buf
;
4313 static u32
sock_ops_convert_ctx_access(enum bpf_access_type type
,
4314 const struct bpf_insn
*si
,
4315 struct bpf_insn
*insn_buf
,
4316 struct bpf_prog
*prog
,
4319 struct bpf_insn
*insn
= insn_buf
;
4323 case offsetof(struct bpf_sock_ops
, op
) ...
4324 offsetof(struct bpf_sock_ops
, replylong
[3]):
4325 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, op
) !=
4326 FIELD_SIZEOF(struct bpf_sock_ops_kern
, op
));
4327 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, reply
) !=
4328 FIELD_SIZEOF(struct bpf_sock_ops_kern
, reply
));
4329 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, replylong
) !=
4330 FIELD_SIZEOF(struct bpf_sock_ops_kern
, replylong
));
4332 off
-= offsetof(struct bpf_sock_ops
, op
);
4333 off
+= offsetof(struct bpf_sock_ops_kern
, op
);
4334 if (type
== BPF_WRITE
)
4335 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4338 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4342 case offsetof(struct bpf_sock_ops
, family
):
4343 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
4345 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4346 struct bpf_sock_ops_kern
, sk
),
4347 si
->dst_reg
, si
->src_reg
,
4348 offsetof(struct bpf_sock_ops_kern
, sk
));
4349 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4350 offsetof(struct sock_common
, skc_family
));
4353 case offsetof(struct bpf_sock_ops
, remote_ip4
):
4354 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
4356 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4357 struct bpf_sock_ops_kern
, sk
),
4358 si
->dst_reg
, si
->src_reg
,
4359 offsetof(struct bpf_sock_ops_kern
, sk
));
4360 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4361 offsetof(struct sock_common
, skc_daddr
));
4364 case offsetof(struct bpf_sock_ops
, local_ip4
):
4365 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_rcv_saddr
) != 4);
4367 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4368 struct bpf_sock_ops_kern
, sk
),
4369 si
->dst_reg
, si
->src_reg
,
4370 offsetof(struct bpf_sock_ops_kern
, sk
));
4371 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4372 offsetof(struct sock_common
,
4376 case offsetof(struct bpf_sock_ops
, remote_ip6
[0]) ...
4377 offsetof(struct bpf_sock_ops
, remote_ip6
[3]):
4378 #if IS_ENABLED(CONFIG_IPV6)
4379 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4380 skc_v6_daddr
.s6_addr32
[0]) != 4);
4383 off
-= offsetof(struct bpf_sock_ops
, remote_ip6
[0]);
4384 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4385 struct bpf_sock_ops_kern
, sk
),
4386 si
->dst_reg
, si
->src_reg
,
4387 offsetof(struct bpf_sock_ops_kern
, sk
));
4388 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4389 offsetof(struct sock_common
,
4390 skc_v6_daddr
.s6_addr32
[0]) +
4393 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4397 case offsetof(struct bpf_sock_ops
, local_ip6
[0]) ...
4398 offsetof(struct bpf_sock_ops
, local_ip6
[3]):
4399 #if IS_ENABLED(CONFIG_IPV6)
4400 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4401 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
4404 off
-= offsetof(struct bpf_sock_ops
, local_ip6
[0]);
4405 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4406 struct bpf_sock_ops_kern
, sk
),
4407 si
->dst_reg
, si
->src_reg
,
4408 offsetof(struct bpf_sock_ops_kern
, sk
));
4409 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4410 offsetof(struct sock_common
,
4411 skc_v6_rcv_saddr
.s6_addr32
[0]) +
4414 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4418 case offsetof(struct bpf_sock_ops
, remote_port
):
4419 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
4421 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4422 struct bpf_sock_ops_kern
, sk
),
4423 si
->dst_reg
, si
->src_reg
,
4424 offsetof(struct bpf_sock_ops_kern
, sk
));
4425 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4426 offsetof(struct sock_common
, skc_dport
));
4427 #ifndef __BIG_ENDIAN_BITFIELD
4428 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
4432 case offsetof(struct bpf_sock_ops
, local_port
):
4433 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
4435 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4436 struct bpf_sock_ops_kern
, sk
),
4437 si
->dst_reg
, si
->src_reg
,
4438 offsetof(struct bpf_sock_ops_kern
, sk
));
4439 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4440 offsetof(struct sock_common
, skc_num
));
4443 return insn
- insn_buf
;
4446 static u32
sk_skb_convert_ctx_access(enum bpf_access_type type
,
4447 const struct bpf_insn
*si
,
4448 struct bpf_insn
*insn_buf
,
4449 struct bpf_prog
*prog
, u32
*target_size
)
4451 struct bpf_insn
*insn
= insn_buf
;
4455 case offsetof(struct __sk_buff
, data_end
):
4457 off
-= offsetof(struct __sk_buff
, data_end
);
4458 off
+= offsetof(struct sk_buff
, cb
);
4459 off
+= offsetof(struct tcp_skb_cb
, bpf
.data_end
);
4460 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4464 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
4468 return insn
- insn_buf
;
4471 const struct bpf_verifier_ops sk_filter_verifier_ops
= {
4472 .get_func_proto
= sk_filter_func_proto
,
4473 .is_valid_access
= sk_filter_is_valid_access
,
4474 .convert_ctx_access
= bpf_convert_ctx_access
,
4477 const struct bpf_prog_ops sk_filter_prog_ops
= {
4480 const struct bpf_verifier_ops tc_cls_act_verifier_ops
= {
4481 .get_func_proto
= tc_cls_act_func_proto
,
4482 .is_valid_access
= tc_cls_act_is_valid_access
,
4483 .convert_ctx_access
= tc_cls_act_convert_ctx_access
,
4484 .gen_prologue
= tc_cls_act_prologue
,
4487 const struct bpf_prog_ops tc_cls_act_prog_ops
= {
4488 .test_run
= bpf_prog_test_run_skb
,
4491 const struct bpf_verifier_ops xdp_verifier_ops
= {
4492 .get_func_proto
= xdp_func_proto
,
4493 .is_valid_access
= xdp_is_valid_access
,
4494 .convert_ctx_access
= xdp_convert_ctx_access
,
4497 const struct bpf_prog_ops xdp_prog_ops
= {
4498 .test_run
= bpf_prog_test_run_xdp
,
4501 const struct bpf_verifier_ops cg_skb_verifier_ops
= {
4502 .get_func_proto
= sk_filter_func_proto
,
4503 .is_valid_access
= sk_filter_is_valid_access
,
4504 .convert_ctx_access
= bpf_convert_ctx_access
,
4507 const struct bpf_prog_ops cg_skb_prog_ops
= {
4508 .test_run
= bpf_prog_test_run_skb
,
4511 const struct bpf_verifier_ops lwt_inout_verifier_ops
= {
4512 .get_func_proto
= lwt_inout_func_proto
,
4513 .is_valid_access
= lwt_is_valid_access
,
4514 .convert_ctx_access
= bpf_convert_ctx_access
,
4517 const struct bpf_prog_ops lwt_inout_prog_ops
= {
4518 .test_run
= bpf_prog_test_run_skb
,
4521 const struct bpf_verifier_ops lwt_xmit_verifier_ops
= {
4522 .get_func_proto
= lwt_xmit_func_proto
,
4523 .is_valid_access
= lwt_is_valid_access
,
4524 .convert_ctx_access
= bpf_convert_ctx_access
,
4525 .gen_prologue
= tc_cls_act_prologue
,
4528 const struct bpf_prog_ops lwt_xmit_prog_ops
= {
4529 .test_run
= bpf_prog_test_run_skb
,
4532 const struct bpf_verifier_ops cg_sock_verifier_ops
= {
4533 .get_func_proto
= sock_filter_func_proto
,
4534 .is_valid_access
= sock_filter_is_valid_access
,
4535 .convert_ctx_access
= sock_filter_convert_ctx_access
,
4538 const struct bpf_prog_ops cg_sock_prog_ops
= {
4541 const struct bpf_verifier_ops sock_ops_verifier_ops
= {
4542 .get_func_proto
= sock_ops_func_proto
,
4543 .is_valid_access
= sock_ops_is_valid_access
,
4544 .convert_ctx_access
= sock_ops_convert_ctx_access
,
4547 const struct bpf_prog_ops sock_ops_prog_ops
= {
4550 const struct bpf_verifier_ops sk_skb_verifier_ops
= {
4551 .get_func_proto
= sk_skb_func_proto
,
4552 .is_valid_access
= sk_skb_is_valid_access
,
4553 .convert_ctx_access
= sk_skb_convert_ctx_access
,
4554 .gen_prologue
= sk_skb_prologue
,
4557 const struct bpf_prog_ops sk_skb_prog_ops
= {
4560 int sk_detach_filter(struct sock
*sk
)
4563 struct sk_filter
*filter
;
4565 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
4568 filter
= rcu_dereference_protected(sk
->sk_filter
,
4569 lockdep_sock_is_held(sk
));
4571 RCU_INIT_POINTER(sk
->sk_filter
, NULL
);
4572 sk_filter_uncharge(sk
, filter
);
4578 EXPORT_SYMBOL_GPL(sk_detach_filter
);
4580 int sk_get_filter(struct sock
*sk
, struct sock_filter __user
*ubuf
,
4583 struct sock_fprog_kern
*fprog
;
4584 struct sk_filter
*filter
;
4588 filter
= rcu_dereference_protected(sk
->sk_filter
,
4589 lockdep_sock_is_held(sk
));
4593 /* We're copying the filter that has been originally attached,
4594 * so no conversion/decode needed anymore. eBPF programs that
4595 * have no original program cannot be dumped through this.
4598 fprog
= filter
->prog
->orig_prog
;
4604 /* User space only enquires number of filter blocks. */
4608 if (len
< fprog
->len
)
4612 if (copy_to_user(ubuf
, fprog
->filter
, bpf_classic_proglen(fprog
)))
4615 /* Instead of bytes, the API requests to return the number