2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/module.h>
25 #include <linux/types.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/sock_diag.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_packet.h>
34 #include <linux/if_arp.h>
35 #include <linux/gfp.h>
37 #include <net/protocol.h>
38 #include <net/netlink.h>
39 #include <linux/skbuff.h>
41 #include <net/flow_dissector.h>
42 #include <linux/errno.h>
43 #include <linux/timer.h>
44 #include <linux/uaccess.h>
45 #include <asm/unaligned.h>
46 #include <asm/cmpxchg.h>
47 #include <linux/filter.h>
48 #include <linux/ratelimit.h>
49 #include <linux/seccomp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/bpf.h>
52 #include <net/sch_generic.h>
53 #include <net/cls_cgroup.h>
54 #include <net/dst_metadata.h>
56 #include <net/sock_reuseport.h>
57 #include <net/busy_poll.h>
59 #include <linux/bpf_trace.h>
62 * sk_filter_trim_cap - run a packet through a socket filter
63 * @sk: sock associated with &sk_buff
64 * @skb: buffer to filter
65 * @cap: limit on how short the eBPF program may trim the packet
67 * Run the eBPF program and then cut skb->data to correct size returned by
68 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
69 * than pkt_len we keep whole skb->data. This is the socket level
70 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
71 * be accepted or -EPERM if the packet should be tossed.
74 int sk_filter_trim_cap(struct sock
*sk
, struct sk_buff
*skb
, unsigned int cap
)
77 struct sk_filter
*filter
;
80 * If the skb was allocated from pfmemalloc reserves, only
81 * allow SOCK_MEMALLOC sockets to use it as this socket is
84 if (skb_pfmemalloc(skb
) && !sock_flag(sk
, SOCK_MEMALLOC
)) {
85 NET_INC_STATS(sock_net(sk
), LINUX_MIB_PFMEMALLOCDROP
);
88 err
= BPF_CGROUP_RUN_PROG_INET_INGRESS(sk
, skb
);
92 err
= security_sock_rcv_skb(sk
, skb
);
97 filter
= rcu_dereference(sk
->sk_filter
);
99 struct sock
*save_sk
= skb
->sk
;
100 unsigned int pkt_len
;
103 pkt_len
= bpf_prog_run_save_cb(filter
->prog
, skb
);
105 err
= pkt_len
? pskb_trim(skb
, max(cap
, pkt_len
)) : -EPERM
;
111 EXPORT_SYMBOL(sk_filter_trim_cap
);
113 BPF_CALL_1(__skb_get_pay_offset
, struct sk_buff
*, skb
)
115 return skb_get_poff(skb
);
118 BPF_CALL_3(__skb_get_nlattr
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
122 if (skb_is_nonlinear(skb
))
125 if (skb
->len
< sizeof(struct nlattr
))
128 if (a
> skb
->len
- sizeof(struct nlattr
))
131 nla
= nla_find((struct nlattr
*) &skb
->data
[a
], skb
->len
- a
, x
);
133 return (void *) nla
- (void *) skb
->data
;
138 BPF_CALL_3(__skb_get_nlattr_nest
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
142 if (skb_is_nonlinear(skb
))
145 if (skb
->len
< sizeof(struct nlattr
))
148 if (a
> skb
->len
- sizeof(struct nlattr
))
151 nla
= (struct nlattr
*) &skb
->data
[a
];
152 if (nla
->nla_len
> skb
->len
- a
)
155 nla
= nla_find_nested(nla
, x
);
157 return (void *) nla
- (void *) skb
->data
;
162 BPF_CALL_0(__get_raw_cpu_id
)
164 return raw_smp_processor_id();
167 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto
= {
168 .func
= __get_raw_cpu_id
,
170 .ret_type
= RET_INTEGER
,
173 static u32
convert_skb_access(int skb_field
, int dst_reg
, int src_reg
,
174 struct bpf_insn
*insn_buf
)
176 struct bpf_insn
*insn
= insn_buf
;
180 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
182 *insn
++ = BPF_LDX_MEM(BPF_W
, dst_reg
, src_reg
,
183 offsetof(struct sk_buff
, mark
));
187 *insn
++ = BPF_LDX_MEM(BPF_B
, dst_reg
, src_reg
, PKT_TYPE_OFFSET());
188 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, PKT_TYPE_MAX
);
189 #ifdef __BIG_ENDIAN_BITFIELD
190 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 5);
195 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, queue_mapping
) != 2);
197 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
198 offsetof(struct sk_buff
, queue_mapping
));
201 case SKF_AD_VLAN_TAG
:
202 case SKF_AD_VLAN_TAG_PRESENT
:
203 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
204 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
206 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
207 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
208 offsetof(struct sk_buff
, vlan_tci
));
209 if (skb_field
== SKF_AD_VLAN_TAG
) {
210 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
,
214 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 12);
216 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, 1);
221 return insn
- insn_buf
;
224 static bool convert_bpf_extensions(struct sock_filter
*fp
,
225 struct bpf_insn
**insnp
)
227 struct bpf_insn
*insn
= *insnp
;
231 case SKF_AD_OFF
+ SKF_AD_PROTOCOL
:
232 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, protocol
) != 2);
234 /* A = *(u16 *) (CTX + offsetof(protocol)) */
235 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
236 offsetof(struct sk_buff
, protocol
));
237 /* A = ntohs(A) [emitting a nop or swap16] */
238 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
241 case SKF_AD_OFF
+ SKF_AD_PKTTYPE
:
242 cnt
= convert_skb_access(SKF_AD_PKTTYPE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
246 case SKF_AD_OFF
+ SKF_AD_IFINDEX
:
247 case SKF_AD_OFF
+ SKF_AD_HATYPE
:
248 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, ifindex
) != 4);
249 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, type
) != 2);
251 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
252 BPF_REG_TMP
, BPF_REG_CTX
,
253 offsetof(struct sk_buff
, dev
));
254 /* if (tmp != 0) goto pc + 1 */
255 *insn
++ = BPF_JMP_IMM(BPF_JNE
, BPF_REG_TMP
, 0, 1);
256 *insn
++ = BPF_EXIT_INSN();
257 if (fp
->k
== SKF_AD_OFF
+ SKF_AD_IFINDEX
)
258 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_TMP
,
259 offsetof(struct net_device
, ifindex
));
261 *insn
= BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_TMP
,
262 offsetof(struct net_device
, type
));
265 case SKF_AD_OFF
+ SKF_AD_MARK
:
266 cnt
= convert_skb_access(SKF_AD_MARK
, BPF_REG_A
, BPF_REG_CTX
, insn
);
270 case SKF_AD_OFF
+ SKF_AD_RXHASH
:
271 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
273 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
,
274 offsetof(struct sk_buff
, hash
));
277 case SKF_AD_OFF
+ SKF_AD_QUEUE
:
278 cnt
= convert_skb_access(SKF_AD_QUEUE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
282 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG
:
283 cnt
= convert_skb_access(SKF_AD_VLAN_TAG
,
284 BPF_REG_A
, BPF_REG_CTX
, insn
);
288 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG_PRESENT
:
289 cnt
= convert_skb_access(SKF_AD_VLAN_TAG_PRESENT
,
290 BPF_REG_A
, BPF_REG_CTX
, insn
);
294 case SKF_AD_OFF
+ SKF_AD_VLAN_TPID
:
295 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_proto
) != 2);
297 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
298 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
299 offsetof(struct sk_buff
, vlan_proto
));
300 /* A = ntohs(A) [emitting a nop or swap16] */
301 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
304 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
305 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
306 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
307 case SKF_AD_OFF
+ SKF_AD_CPU
:
308 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
310 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG1
, BPF_REG_CTX
);
312 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_A
);
314 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG3
, BPF_REG_X
);
315 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
317 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
318 *insn
= BPF_EMIT_CALL(__skb_get_pay_offset
);
320 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
321 *insn
= BPF_EMIT_CALL(__skb_get_nlattr
);
323 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
324 *insn
= BPF_EMIT_CALL(__skb_get_nlattr_nest
);
326 case SKF_AD_OFF
+ SKF_AD_CPU
:
327 *insn
= BPF_EMIT_CALL(__get_raw_cpu_id
);
329 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
330 *insn
= BPF_EMIT_CALL(bpf_user_rnd_u32
);
331 bpf_user_rnd_init_once();
336 case SKF_AD_OFF
+ SKF_AD_ALU_XOR_X
:
338 *insn
= BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_X
);
342 /* This is just a dummy call to avoid letting the compiler
343 * evict __bpf_call_base() as an optimization. Placed here
344 * where no-one bothers.
346 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
355 * bpf_convert_filter - convert filter program
356 * @prog: the user passed filter program
357 * @len: the length of the user passed filter program
358 * @new_prog: allocated 'struct bpf_prog' or NULL
359 * @new_len: pointer to store length of converted program
361 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
362 * style extended BPF (eBPF).
363 * Conversion workflow:
365 * 1) First pass for calculating the new program length:
366 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
368 * 2) 2nd pass to remap in two passes: 1st pass finds new
369 * jump offsets, 2nd pass remapping:
370 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
372 static int bpf_convert_filter(struct sock_filter
*prog
, int len
,
373 struct bpf_prog
*new_prog
, int *new_len
)
375 int new_flen
= 0, pass
= 0, target
, i
, stack_off
;
376 struct bpf_insn
*new_insn
, *first_insn
= NULL
;
377 struct sock_filter
*fp
;
381 BUILD_BUG_ON(BPF_MEMWORDS
* sizeof(u32
) > MAX_BPF_STACK
);
382 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
384 if (len
<= 0 || len
> BPF_MAXINSNS
)
388 first_insn
= new_prog
->insnsi
;
389 addrs
= kcalloc(len
, sizeof(*addrs
),
390 GFP_KERNEL
| __GFP_NOWARN
);
396 new_insn
= first_insn
;
399 /* Classic BPF related prologue emission. */
401 /* Classic BPF expects A and X to be reset first. These need
402 * to be guaranteed to be the first two instructions.
404 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_A
);
405 *new_insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_X
, BPF_REG_X
);
407 /* All programs must keep CTX in callee saved BPF_REG_CTX.
408 * In eBPF case it's done by the compiler, here we need to
409 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
411 *new_insn
++ = BPF_MOV64_REG(BPF_REG_CTX
, BPF_REG_ARG1
);
416 for (i
= 0; i
< len
; fp
++, i
++) {
417 struct bpf_insn tmp_insns
[6] = { };
418 struct bpf_insn
*insn
= tmp_insns
;
421 addrs
[i
] = new_insn
- first_insn
;
424 /* All arithmetic insns and skb loads map as-is. */
425 case BPF_ALU
| BPF_ADD
| BPF_X
:
426 case BPF_ALU
| BPF_ADD
| BPF_K
:
427 case BPF_ALU
| BPF_SUB
| BPF_X
:
428 case BPF_ALU
| BPF_SUB
| BPF_K
:
429 case BPF_ALU
| BPF_AND
| BPF_X
:
430 case BPF_ALU
| BPF_AND
| BPF_K
:
431 case BPF_ALU
| BPF_OR
| BPF_X
:
432 case BPF_ALU
| BPF_OR
| BPF_K
:
433 case BPF_ALU
| BPF_LSH
| BPF_X
:
434 case BPF_ALU
| BPF_LSH
| BPF_K
:
435 case BPF_ALU
| BPF_RSH
| BPF_X
:
436 case BPF_ALU
| BPF_RSH
| BPF_K
:
437 case BPF_ALU
| BPF_XOR
| BPF_X
:
438 case BPF_ALU
| BPF_XOR
| BPF_K
:
439 case BPF_ALU
| BPF_MUL
| BPF_X
:
440 case BPF_ALU
| BPF_MUL
| BPF_K
:
441 case BPF_ALU
| BPF_DIV
| BPF_X
:
442 case BPF_ALU
| BPF_DIV
| BPF_K
:
443 case BPF_ALU
| BPF_MOD
| BPF_X
:
444 case BPF_ALU
| BPF_MOD
| BPF_K
:
445 case BPF_ALU
| BPF_NEG
:
446 case BPF_LD
| BPF_ABS
| BPF_W
:
447 case BPF_LD
| BPF_ABS
| BPF_H
:
448 case BPF_LD
| BPF_ABS
| BPF_B
:
449 case BPF_LD
| BPF_IND
| BPF_W
:
450 case BPF_LD
| BPF_IND
| BPF_H
:
451 case BPF_LD
| BPF_IND
| BPF_B
:
452 /* Check for overloaded BPF extension and
453 * directly convert it if found, otherwise
454 * just move on with mapping.
456 if (BPF_CLASS(fp
->code
) == BPF_LD
&&
457 BPF_MODE(fp
->code
) == BPF_ABS
&&
458 convert_bpf_extensions(fp
, &insn
))
461 if (fp
->code
== (BPF_ALU
| BPF_DIV
| BPF_X
) ||
462 fp
->code
== (BPF_ALU
| BPF_MOD
| BPF_X
))
463 *insn
++ = BPF_MOV32_REG(BPF_REG_X
, BPF_REG_X
);
465 *insn
= BPF_RAW_INSN(fp
->code
, BPF_REG_A
, BPF_REG_X
, 0, fp
->k
);
468 /* Jump transformation cannot use BPF block macros
469 * everywhere as offset calculation and target updates
470 * require a bit more work than the rest, i.e. jump
471 * opcodes map as-is, but offsets need adjustment.
474 #define BPF_EMIT_JMP \
476 if (target >= len || target < 0) \
478 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
479 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
480 insn->off -= insn - tmp_insns; \
483 case BPF_JMP
| BPF_JA
:
484 target
= i
+ fp
->k
+ 1;
485 insn
->code
= fp
->code
;
489 case BPF_JMP
| BPF_JEQ
| BPF_K
:
490 case BPF_JMP
| BPF_JEQ
| BPF_X
:
491 case BPF_JMP
| BPF_JSET
| BPF_K
:
492 case BPF_JMP
| BPF_JSET
| BPF_X
:
493 case BPF_JMP
| BPF_JGT
| BPF_K
:
494 case BPF_JMP
| BPF_JGT
| BPF_X
:
495 case BPF_JMP
| BPF_JGE
| BPF_K
:
496 case BPF_JMP
| BPF_JGE
| BPF_X
:
497 if (BPF_SRC(fp
->code
) == BPF_K
&& (int) fp
->k
< 0) {
498 /* BPF immediates are signed, zero extend
499 * immediate into tmp register and use it
502 *insn
++ = BPF_MOV32_IMM(BPF_REG_TMP
, fp
->k
);
504 insn
->dst_reg
= BPF_REG_A
;
505 insn
->src_reg
= BPF_REG_TMP
;
508 insn
->dst_reg
= BPF_REG_A
;
510 bpf_src
= BPF_SRC(fp
->code
);
511 insn
->src_reg
= bpf_src
== BPF_X
? BPF_REG_X
: 0;
514 /* Common case where 'jump_false' is next insn. */
516 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
517 target
= i
+ fp
->jt
+ 1;
522 /* Convert some jumps when 'jump_true' is next insn. */
524 switch (BPF_OP(fp
->code
)) {
526 insn
->code
= BPF_JMP
| BPF_JNE
| bpf_src
;
529 insn
->code
= BPF_JMP
| BPF_JLE
| bpf_src
;
532 insn
->code
= BPF_JMP
| BPF_JLT
| bpf_src
;
538 target
= i
+ fp
->jf
+ 1;
543 /* Other jumps are mapped into two insns: Jxx and JA. */
544 target
= i
+ fp
->jt
+ 1;
545 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
549 insn
->code
= BPF_JMP
| BPF_JA
;
550 target
= i
+ fp
->jf
+ 1;
554 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
555 case BPF_LDX
| BPF_MSH
| BPF_B
:
557 *insn
++ = BPF_MOV64_REG(BPF_REG_TMP
, BPF_REG_A
);
558 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
559 *insn
++ = BPF_LD_ABS(BPF_B
, fp
->k
);
561 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_A
, 0xf);
563 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, BPF_REG_A
, 2);
565 *insn
++ = BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
567 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_TMP
);
570 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
571 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
573 case BPF_RET
| BPF_A
:
574 case BPF_RET
| BPF_K
:
575 if (BPF_RVAL(fp
->code
) == BPF_K
)
576 *insn
++ = BPF_MOV32_RAW(BPF_K
, BPF_REG_0
,
578 *insn
= BPF_EXIT_INSN();
581 /* Store to stack. */
584 stack_off
= fp
->k
* 4 + 4;
585 *insn
= BPF_STX_MEM(BPF_W
, BPF_REG_FP
, BPF_CLASS(fp
->code
) ==
586 BPF_ST
? BPF_REG_A
: BPF_REG_X
,
588 /* check_load_and_stores() verifies that classic BPF can
589 * load from stack only after write, so tracking
590 * stack_depth for ST|STX insns is enough
592 if (new_prog
&& new_prog
->aux
->stack_depth
< stack_off
)
593 new_prog
->aux
->stack_depth
= stack_off
;
596 /* Load from stack. */
597 case BPF_LD
| BPF_MEM
:
598 case BPF_LDX
| BPF_MEM
:
599 stack_off
= fp
->k
* 4 + 4;
600 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
601 BPF_REG_A
: BPF_REG_X
, BPF_REG_FP
,
606 case BPF_LD
| BPF_IMM
:
607 case BPF_LDX
| BPF_IMM
:
608 *insn
= BPF_MOV32_IMM(BPF_CLASS(fp
->code
) == BPF_LD
?
609 BPF_REG_A
: BPF_REG_X
, fp
->k
);
613 case BPF_MISC
| BPF_TAX
:
614 *insn
= BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
618 case BPF_MISC
| BPF_TXA
:
619 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_X
);
622 /* A = skb->len or X = skb->len */
623 case BPF_LD
| BPF_W
| BPF_LEN
:
624 case BPF_LDX
| BPF_W
| BPF_LEN
:
625 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
626 BPF_REG_A
: BPF_REG_X
, BPF_REG_CTX
,
627 offsetof(struct sk_buff
, len
));
630 /* Access seccomp_data fields. */
631 case BPF_LDX
| BPF_ABS
| BPF_W
:
632 /* A = *(u32 *) (ctx + K) */
633 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
, fp
->k
);
636 /* Unknown instruction. */
643 memcpy(new_insn
, tmp_insns
,
644 sizeof(*insn
) * (insn
- tmp_insns
));
645 new_insn
+= insn
- tmp_insns
;
649 /* Only calculating new length. */
650 *new_len
= new_insn
- first_insn
;
655 if (new_flen
!= new_insn
- first_insn
) {
656 new_flen
= new_insn
- first_insn
;
663 BUG_ON(*new_len
!= new_flen
);
672 * As we dont want to clear mem[] array for each packet going through
673 * __bpf_prog_run(), we check that filter loaded by user never try to read
674 * a cell if not previously written, and we check all branches to be sure
675 * a malicious user doesn't try to abuse us.
677 static int check_load_and_stores(const struct sock_filter
*filter
, int flen
)
679 u16
*masks
, memvalid
= 0; /* One bit per cell, 16 cells */
682 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
684 masks
= kmalloc_array(flen
, sizeof(*masks
), GFP_KERNEL
);
688 memset(masks
, 0xff, flen
* sizeof(*masks
));
690 for (pc
= 0; pc
< flen
; pc
++) {
691 memvalid
&= masks
[pc
];
693 switch (filter
[pc
].code
) {
696 memvalid
|= (1 << filter
[pc
].k
);
698 case BPF_LD
| BPF_MEM
:
699 case BPF_LDX
| BPF_MEM
:
700 if (!(memvalid
& (1 << filter
[pc
].k
))) {
705 case BPF_JMP
| BPF_JA
:
706 /* A jump must set masks on target */
707 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
710 case BPF_JMP
| BPF_JEQ
| BPF_K
:
711 case BPF_JMP
| BPF_JEQ
| BPF_X
:
712 case BPF_JMP
| BPF_JGE
| BPF_K
:
713 case BPF_JMP
| BPF_JGE
| BPF_X
:
714 case BPF_JMP
| BPF_JGT
| BPF_K
:
715 case BPF_JMP
| BPF_JGT
| BPF_X
:
716 case BPF_JMP
| BPF_JSET
| BPF_K
:
717 case BPF_JMP
| BPF_JSET
| BPF_X
:
718 /* A jump must set masks on targets */
719 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
720 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
730 static bool chk_code_allowed(u16 code_to_probe
)
732 static const bool codes
[] = {
733 /* 32 bit ALU operations */
734 [BPF_ALU
| BPF_ADD
| BPF_K
] = true,
735 [BPF_ALU
| BPF_ADD
| BPF_X
] = true,
736 [BPF_ALU
| BPF_SUB
| BPF_K
] = true,
737 [BPF_ALU
| BPF_SUB
| BPF_X
] = true,
738 [BPF_ALU
| BPF_MUL
| BPF_K
] = true,
739 [BPF_ALU
| BPF_MUL
| BPF_X
] = true,
740 [BPF_ALU
| BPF_DIV
| BPF_K
] = true,
741 [BPF_ALU
| BPF_DIV
| BPF_X
] = true,
742 [BPF_ALU
| BPF_MOD
| BPF_K
] = true,
743 [BPF_ALU
| BPF_MOD
| BPF_X
] = true,
744 [BPF_ALU
| BPF_AND
| BPF_K
] = true,
745 [BPF_ALU
| BPF_AND
| BPF_X
] = true,
746 [BPF_ALU
| BPF_OR
| BPF_K
] = true,
747 [BPF_ALU
| BPF_OR
| BPF_X
] = true,
748 [BPF_ALU
| BPF_XOR
| BPF_K
] = true,
749 [BPF_ALU
| BPF_XOR
| BPF_X
] = true,
750 [BPF_ALU
| BPF_LSH
| BPF_K
] = true,
751 [BPF_ALU
| BPF_LSH
| BPF_X
] = true,
752 [BPF_ALU
| BPF_RSH
| BPF_K
] = true,
753 [BPF_ALU
| BPF_RSH
| BPF_X
] = true,
754 [BPF_ALU
| BPF_NEG
] = true,
755 /* Load instructions */
756 [BPF_LD
| BPF_W
| BPF_ABS
] = true,
757 [BPF_LD
| BPF_H
| BPF_ABS
] = true,
758 [BPF_LD
| BPF_B
| BPF_ABS
] = true,
759 [BPF_LD
| BPF_W
| BPF_LEN
] = true,
760 [BPF_LD
| BPF_W
| BPF_IND
] = true,
761 [BPF_LD
| BPF_H
| BPF_IND
] = true,
762 [BPF_LD
| BPF_B
| BPF_IND
] = true,
763 [BPF_LD
| BPF_IMM
] = true,
764 [BPF_LD
| BPF_MEM
] = true,
765 [BPF_LDX
| BPF_W
| BPF_LEN
] = true,
766 [BPF_LDX
| BPF_B
| BPF_MSH
] = true,
767 [BPF_LDX
| BPF_IMM
] = true,
768 [BPF_LDX
| BPF_MEM
] = true,
769 /* Store instructions */
772 /* Misc instructions */
773 [BPF_MISC
| BPF_TAX
] = true,
774 [BPF_MISC
| BPF_TXA
] = true,
775 /* Return instructions */
776 [BPF_RET
| BPF_K
] = true,
777 [BPF_RET
| BPF_A
] = true,
778 /* Jump instructions */
779 [BPF_JMP
| BPF_JA
] = true,
780 [BPF_JMP
| BPF_JEQ
| BPF_K
] = true,
781 [BPF_JMP
| BPF_JEQ
| BPF_X
] = true,
782 [BPF_JMP
| BPF_JGE
| BPF_K
] = true,
783 [BPF_JMP
| BPF_JGE
| BPF_X
] = true,
784 [BPF_JMP
| BPF_JGT
| BPF_K
] = true,
785 [BPF_JMP
| BPF_JGT
| BPF_X
] = true,
786 [BPF_JMP
| BPF_JSET
| BPF_K
] = true,
787 [BPF_JMP
| BPF_JSET
| BPF_X
] = true,
790 if (code_to_probe
>= ARRAY_SIZE(codes
))
793 return codes
[code_to_probe
];
796 static bool bpf_check_basics_ok(const struct sock_filter
*filter
,
801 if (flen
== 0 || flen
> BPF_MAXINSNS
)
808 * bpf_check_classic - verify socket filter code
809 * @filter: filter to verify
810 * @flen: length of filter
812 * Check the user's filter code. If we let some ugly
813 * filter code slip through kaboom! The filter must contain
814 * no references or jumps that are out of range, no illegal
815 * instructions, and must end with a RET instruction.
817 * All jumps are forward as they are not signed.
819 * Returns 0 if the rule set is legal or -EINVAL if not.
821 static int bpf_check_classic(const struct sock_filter
*filter
,
827 /* Check the filter code now */
828 for (pc
= 0; pc
< flen
; pc
++) {
829 const struct sock_filter
*ftest
= &filter
[pc
];
831 /* May we actually operate on this code? */
832 if (!chk_code_allowed(ftest
->code
))
835 /* Some instructions need special checks */
836 switch (ftest
->code
) {
837 case BPF_ALU
| BPF_DIV
| BPF_K
:
838 case BPF_ALU
| BPF_MOD
| BPF_K
:
839 /* Check for division by zero */
843 case BPF_ALU
| BPF_LSH
| BPF_K
:
844 case BPF_ALU
| BPF_RSH
| BPF_K
:
848 case BPF_LD
| BPF_MEM
:
849 case BPF_LDX
| BPF_MEM
:
852 /* Check for invalid memory addresses */
853 if (ftest
->k
>= BPF_MEMWORDS
)
856 case BPF_JMP
| BPF_JA
:
857 /* Note, the large ftest->k might cause loops.
858 * Compare this with conditional jumps below,
859 * where offsets are limited. --ANK (981016)
861 if (ftest
->k
>= (unsigned int)(flen
- pc
- 1))
864 case BPF_JMP
| BPF_JEQ
| BPF_K
:
865 case BPF_JMP
| BPF_JEQ
| BPF_X
:
866 case BPF_JMP
| BPF_JGE
| BPF_K
:
867 case BPF_JMP
| BPF_JGE
| BPF_X
:
868 case BPF_JMP
| BPF_JGT
| BPF_K
:
869 case BPF_JMP
| BPF_JGT
| BPF_X
:
870 case BPF_JMP
| BPF_JSET
| BPF_K
:
871 case BPF_JMP
| BPF_JSET
| BPF_X
:
872 /* Both conditionals must be safe */
873 if (pc
+ ftest
->jt
+ 1 >= flen
||
874 pc
+ ftest
->jf
+ 1 >= flen
)
877 case BPF_LD
| BPF_W
| BPF_ABS
:
878 case BPF_LD
| BPF_H
| BPF_ABS
:
879 case BPF_LD
| BPF_B
| BPF_ABS
:
881 if (bpf_anc_helper(ftest
) & BPF_ANC
)
883 /* Ancillary operation unknown or unsupported */
884 if (anc_found
== false && ftest
->k
>= SKF_AD_OFF
)
889 /* Last instruction must be a RET code */
890 switch (filter
[flen
- 1].code
) {
891 case BPF_RET
| BPF_K
:
892 case BPF_RET
| BPF_A
:
893 return check_load_and_stores(filter
, flen
);
899 static int bpf_prog_store_orig_filter(struct bpf_prog
*fp
,
900 const struct sock_fprog
*fprog
)
902 unsigned int fsize
= bpf_classic_proglen(fprog
);
903 struct sock_fprog_kern
*fkprog
;
905 fp
->orig_prog
= kmalloc(sizeof(*fkprog
), GFP_KERNEL
);
909 fkprog
= fp
->orig_prog
;
910 fkprog
->len
= fprog
->len
;
912 fkprog
->filter
= kmemdup(fp
->insns
, fsize
,
913 GFP_KERNEL
| __GFP_NOWARN
);
914 if (!fkprog
->filter
) {
915 kfree(fp
->orig_prog
);
922 static void bpf_release_orig_filter(struct bpf_prog
*fp
)
924 struct sock_fprog_kern
*fprog
= fp
->orig_prog
;
927 kfree(fprog
->filter
);
932 static void __bpf_prog_release(struct bpf_prog
*prog
)
934 if (prog
->type
== BPF_PROG_TYPE_SOCKET_FILTER
) {
937 bpf_release_orig_filter(prog
);
942 static void __sk_filter_release(struct sk_filter
*fp
)
944 __bpf_prog_release(fp
->prog
);
949 * sk_filter_release_rcu - Release a socket filter by rcu_head
950 * @rcu: rcu_head that contains the sk_filter to free
952 static void sk_filter_release_rcu(struct rcu_head
*rcu
)
954 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
956 __sk_filter_release(fp
);
960 * sk_filter_release - release a socket filter
961 * @fp: filter to remove
963 * Remove a filter from a socket and release its resources.
965 static void sk_filter_release(struct sk_filter
*fp
)
967 if (refcount_dec_and_test(&fp
->refcnt
))
968 call_rcu(&fp
->rcu
, sk_filter_release_rcu
);
971 void sk_filter_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
973 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
975 atomic_sub(filter_size
, &sk
->sk_omem_alloc
);
976 sk_filter_release(fp
);
979 /* try to charge the socket memory if there is space available
980 * return true on success
982 static bool __sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
984 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
986 /* same check as in sock_kmalloc() */
987 if (filter_size
<= sysctl_optmem_max
&&
988 atomic_read(&sk
->sk_omem_alloc
) + filter_size
< sysctl_optmem_max
) {
989 atomic_add(filter_size
, &sk
->sk_omem_alloc
);
995 bool sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
997 if (!refcount_inc_not_zero(&fp
->refcnt
))
1000 if (!__sk_filter_charge(sk
, fp
)) {
1001 sk_filter_release(fp
);
1007 static struct bpf_prog
*bpf_migrate_filter(struct bpf_prog
*fp
)
1009 struct sock_filter
*old_prog
;
1010 struct bpf_prog
*old_fp
;
1011 int err
, new_len
, old_len
= fp
->len
;
1013 /* We are free to overwrite insns et al right here as it
1014 * won't be used at this point in time anymore internally
1015 * after the migration to the internal BPF instruction
1018 BUILD_BUG_ON(sizeof(struct sock_filter
) !=
1019 sizeof(struct bpf_insn
));
1021 /* Conversion cannot happen on overlapping memory areas,
1022 * so we need to keep the user BPF around until the 2nd
1023 * pass. At this time, the user BPF is stored in fp->insns.
1025 old_prog
= kmemdup(fp
->insns
, old_len
* sizeof(struct sock_filter
),
1026 GFP_KERNEL
| __GFP_NOWARN
);
1032 /* 1st pass: calculate the new program length. */
1033 err
= bpf_convert_filter(old_prog
, old_len
, NULL
, &new_len
);
1037 /* Expand fp for appending the new filter representation. */
1039 fp
= bpf_prog_realloc(old_fp
, bpf_prog_size(new_len
), 0);
1041 /* The old_fp is still around in case we couldn't
1042 * allocate new memory, so uncharge on that one.
1051 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1052 err
= bpf_convert_filter(old_prog
, old_len
, fp
, &new_len
);
1054 /* 2nd bpf_convert_filter() can fail only if it fails
1055 * to allocate memory, remapping must succeed. Note,
1056 * that at this time old_fp has already been released
1061 fp
= bpf_prog_select_runtime(fp
, &err
);
1071 __bpf_prog_release(fp
);
1072 return ERR_PTR(err
);
1075 static struct bpf_prog
*bpf_prepare_filter(struct bpf_prog
*fp
,
1076 bpf_aux_classic_check_t trans
)
1080 fp
->bpf_func
= NULL
;
1083 err
= bpf_check_classic(fp
->insns
, fp
->len
);
1085 __bpf_prog_release(fp
);
1086 return ERR_PTR(err
);
1089 /* There might be additional checks and transformations
1090 * needed on classic filters, f.e. in case of seccomp.
1093 err
= trans(fp
->insns
, fp
->len
);
1095 __bpf_prog_release(fp
);
1096 return ERR_PTR(err
);
1100 /* Probe if we can JIT compile the filter and if so, do
1101 * the compilation of the filter.
1103 bpf_jit_compile(fp
);
1105 /* JIT compiler couldn't process this filter, so do the
1106 * internal BPF translation for the optimized interpreter.
1109 fp
= bpf_migrate_filter(fp
);
1115 * bpf_prog_create - create an unattached filter
1116 * @pfp: the unattached filter that is created
1117 * @fprog: the filter program
1119 * Create a filter independent of any socket. We first run some
1120 * sanity checks on it to make sure it does not explode on us later.
1121 * If an error occurs or there is insufficient memory for the filter
1122 * a negative errno code is returned. On success the return is zero.
1124 int bpf_prog_create(struct bpf_prog
**pfp
, struct sock_fprog_kern
*fprog
)
1126 unsigned int fsize
= bpf_classic_proglen(fprog
);
1127 struct bpf_prog
*fp
;
1129 /* Make sure new filter is there and in the right amounts. */
1130 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1133 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1137 memcpy(fp
->insns
, fprog
->filter
, fsize
);
1139 fp
->len
= fprog
->len
;
1140 /* Since unattached filters are not copied back to user
1141 * space through sk_get_filter(), we do not need to hold
1142 * a copy here, and can spare us the work.
1144 fp
->orig_prog
= NULL
;
1146 /* bpf_prepare_filter() already takes care of freeing
1147 * memory in case something goes wrong.
1149 fp
= bpf_prepare_filter(fp
, NULL
);
1156 EXPORT_SYMBOL_GPL(bpf_prog_create
);
1159 * bpf_prog_create_from_user - create an unattached filter from user buffer
1160 * @pfp: the unattached filter that is created
1161 * @fprog: the filter program
1162 * @trans: post-classic verifier transformation handler
1163 * @save_orig: save classic BPF program
1165 * This function effectively does the same as bpf_prog_create(), only
1166 * that it builds up its insns buffer from user space provided buffer.
1167 * It also allows for passing a bpf_aux_classic_check_t handler.
1169 int bpf_prog_create_from_user(struct bpf_prog
**pfp
, struct sock_fprog
*fprog
,
1170 bpf_aux_classic_check_t trans
, bool save_orig
)
1172 unsigned int fsize
= bpf_classic_proglen(fprog
);
1173 struct bpf_prog
*fp
;
1176 /* Make sure new filter is there and in the right amounts. */
1177 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1180 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1184 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
1185 __bpf_prog_free(fp
);
1189 fp
->len
= fprog
->len
;
1190 fp
->orig_prog
= NULL
;
1193 err
= bpf_prog_store_orig_filter(fp
, fprog
);
1195 __bpf_prog_free(fp
);
1200 /* bpf_prepare_filter() already takes care of freeing
1201 * memory in case something goes wrong.
1203 fp
= bpf_prepare_filter(fp
, trans
);
1210 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user
);
1212 void bpf_prog_destroy(struct bpf_prog
*fp
)
1214 __bpf_prog_release(fp
);
1216 EXPORT_SYMBOL_GPL(bpf_prog_destroy
);
1218 static int __sk_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1220 struct sk_filter
*fp
, *old_fp
;
1222 fp
= kmalloc(sizeof(*fp
), GFP_KERNEL
);
1228 if (!__sk_filter_charge(sk
, fp
)) {
1232 refcount_set(&fp
->refcnt
, 1);
1234 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
1235 lockdep_sock_is_held(sk
));
1236 rcu_assign_pointer(sk
->sk_filter
, fp
);
1239 sk_filter_uncharge(sk
, old_fp
);
1244 static int __reuseport_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1246 struct bpf_prog
*old_prog
;
1249 if (bpf_prog_size(prog
->len
) > sysctl_optmem_max
)
1252 if (sk_unhashed(sk
) && sk
->sk_reuseport
) {
1253 err
= reuseport_alloc(sk
);
1256 } else if (!rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1257 /* The socket wasn't bound with SO_REUSEPORT */
1261 old_prog
= reuseport_attach_prog(sk
, prog
);
1263 bpf_prog_destroy(old_prog
);
1269 struct bpf_prog
*__get_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1271 unsigned int fsize
= bpf_classic_proglen(fprog
);
1272 struct bpf_prog
*prog
;
1275 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1276 return ERR_PTR(-EPERM
);
1278 /* Make sure new filter is there and in the right amounts. */
1279 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1280 return ERR_PTR(-EINVAL
);
1282 prog
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1284 return ERR_PTR(-ENOMEM
);
1286 if (copy_from_user(prog
->insns
, fprog
->filter
, fsize
)) {
1287 __bpf_prog_free(prog
);
1288 return ERR_PTR(-EFAULT
);
1291 prog
->len
= fprog
->len
;
1293 err
= bpf_prog_store_orig_filter(prog
, fprog
);
1295 __bpf_prog_free(prog
);
1296 return ERR_PTR(-ENOMEM
);
1299 /* bpf_prepare_filter() already takes care of freeing
1300 * memory in case something goes wrong.
1302 return bpf_prepare_filter(prog
, NULL
);
1306 * sk_attach_filter - attach a socket filter
1307 * @fprog: the filter program
1308 * @sk: the socket to use
1310 * Attach the user's filter code. We first run some sanity checks on
1311 * it to make sure it does not explode on us later. If an error
1312 * occurs or there is insufficient memory for the filter a negative
1313 * errno code is returned. On success the return is zero.
1315 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1317 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1321 return PTR_ERR(prog
);
1323 err
= __sk_attach_prog(prog
, sk
);
1325 __bpf_prog_release(prog
);
1331 EXPORT_SYMBOL_GPL(sk_attach_filter
);
1333 int sk_reuseport_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1335 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1339 return PTR_ERR(prog
);
1341 err
= __reuseport_attach_prog(prog
, sk
);
1343 __bpf_prog_release(prog
);
1350 static struct bpf_prog
*__get_bpf(u32 ufd
, struct sock
*sk
)
1352 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1353 return ERR_PTR(-EPERM
);
1355 return bpf_prog_get_type(ufd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1358 int sk_attach_bpf(u32 ufd
, struct sock
*sk
)
1360 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1364 return PTR_ERR(prog
);
1366 err
= __sk_attach_prog(prog
, sk
);
1375 int sk_reuseport_attach_bpf(u32 ufd
, struct sock
*sk
)
1377 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1381 return PTR_ERR(prog
);
1383 err
= __reuseport_attach_prog(prog
, sk
);
1392 struct bpf_scratchpad
{
1394 __be32 diff
[MAX_BPF_STACK
/ sizeof(__be32
)];
1395 u8 buff
[MAX_BPF_STACK
];
1399 static DEFINE_PER_CPU(struct bpf_scratchpad
, bpf_sp
);
1401 static inline int __bpf_try_make_writable(struct sk_buff
*skb
,
1402 unsigned int write_len
)
1404 return skb_ensure_writable(skb
, write_len
);
1407 static inline int bpf_try_make_writable(struct sk_buff
*skb
,
1408 unsigned int write_len
)
1410 int err
= __bpf_try_make_writable(skb
, write_len
);
1412 bpf_compute_data_pointers(skb
);
1416 static int bpf_try_make_head_writable(struct sk_buff
*skb
)
1418 return bpf_try_make_writable(skb
, skb_headlen(skb
));
1421 static inline void bpf_push_mac_rcsum(struct sk_buff
*skb
)
1423 if (skb_at_tc_ingress(skb
))
1424 skb_postpush_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1427 static inline void bpf_pull_mac_rcsum(struct sk_buff
*skb
)
1429 if (skb_at_tc_ingress(skb
))
1430 skb_postpull_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1433 BPF_CALL_5(bpf_skb_store_bytes
, struct sk_buff
*, skb
, u32
, offset
,
1434 const void *, from
, u32
, len
, u64
, flags
)
1438 if (unlikely(flags
& ~(BPF_F_RECOMPUTE_CSUM
| BPF_F_INVALIDATE_HASH
)))
1440 if (unlikely(offset
> 0xffff))
1442 if (unlikely(bpf_try_make_writable(skb
, offset
+ len
)))
1445 ptr
= skb
->data
+ offset
;
1446 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1447 __skb_postpull_rcsum(skb
, ptr
, len
, offset
);
1449 memcpy(ptr
, from
, len
);
1451 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1452 __skb_postpush_rcsum(skb
, ptr
, len
, offset
);
1453 if (flags
& BPF_F_INVALIDATE_HASH
)
1454 skb_clear_hash(skb
);
1459 static const struct bpf_func_proto bpf_skb_store_bytes_proto
= {
1460 .func
= bpf_skb_store_bytes
,
1462 .ret_type
= RET_INTEGER
,
1463 .arg1_type
= ARG_PTR_TO_CTX
,
1464 .arg2_type
= ARG_ANYTHING
,
1465 .arg3_type
= ARG_PTR_TO_MEM
,
1466 .arg4_type
= ARG_CONST_SIZE
,
1467 .arg5_type
= ARG_ANYTHING
,
1470 BPF_CALL_4(bpf_skb_load_bytes
, const struct sk_buff
*, skb
, u32
, offset
,
1471 void *, to
, u32
, len
)
1475 if (unlikely(offset
> 0xffff))
1478 ptr
= skb_header_pointer(skb
, offset
, len
, to
);
1482 memcpy(to
, ptr
, len
);
1490 static const struct bpf_func_proto bpf_skb_load_bytes_proto
= {
1491 .func
= bpf_skb_load_bytes
,
1493 .ret_type
= RET_INTEGER
,
1494 .arg1_type
= ARG_PTR_TO_CTX
,
1495 .arg2_type
= ARG_ANYTHING
,
1496 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
1497 .arg4_type
= ARG_CONST_SIZE
,
1500 BPF_CALL_2(bpf_skb_pull_data
, struct sk_buff
*, skb
, u32
, len
)
1502 /* Idea is the following: should the needed direct read/write
1503 * test fail during runtime, we can pull in more data and redo
1504 * again, since implicitly, we invalidate previous checks here.
1506 * Or, since we know how much we need to make read/writeable,
1507 * this can be done once at the program beginning for direct
1508 * access case. By this we overcome limitations of only current
1509 * headroom being accessible.
1511 return bpf_try_make_writable(skb
, len
? : skb_headlen(skb
));
1514 static const struct bpf_func_proto bpf_skb_pull_data_proto
= {
1515 .func
= bpf_skb_pull_data
,
1517 .ret_type
= RET_INTEGER
,
1518 .arg1_type
= ARG_PTR_TO_CTX
,
1519 .arg2_type
= ARG_ANYTHING
,
1522 BPF_CALL_5(bpf_l3_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1523 u64
, from
, u64
, to
, u64
, flags
)
1527 if (unlikely(flags
& ~(BPF_F_HDR_FIELD_MASK
)))
1529 if (unlikely(offset
> 0xffff || offset
& 1))
1531 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1534 ptr
= (__sum16
*)(skb
->data
+ offset
);
1535 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1537 if (unlikely(from
!= 0))
1540 csum_replace_by_diff(ptr
, to
);
1543 csum_replace2(ptr
, from
, to
);
1546 csum_replace4(ptr
, from
, to
);
1555 static const struct bpf_func_proto bpf_l3_csum_replace_proto
= {
1556 .func
= bpf_l3_csum_replace
,
1558 .ret_type
= RET_INTEGER
,
1559 .arg1_type
= ARG_PTR_TO_CTX
,
1560 .arg2_type
= ARG_ANYTHING
,
1561 .arg3_type
= ARG_ANYTHING
,
1562 .arg4_type
= ARG_ANYTHING
,
1563 .arg5_type
= ARG_ANYTHING
,
1566 BPF_CALL_5(bpf_l4_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1567 u64
, from
, u64
, to
, u64
, flags
)
1569 bool is_pseudo
= flags
& BPF_F_PSEUDO_HDR
;
1570 bool is_mmzero
= flags
& BPF_F_MARK_MANGLED_0
;
1571 bool do_mforce
= flags
& BPF_F_MARK_ENFORCE
;
1574 if (unlikely(flags
& ~(BPF_F_MARK_MANGLED_0
| BPF_F_MARK_ENFORCE
|
1575 BPF_F_PSEUDO_HDR
| BPF_F_HDR_FIELD_MASK
)))
1577 if (unlikely(offset
> 0xffff || offset
& 1))
1579 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1582 ptr
= (__sum16
*)(skb
->data
+ offset
);
1583 if (is_mmzero
&& !do_mforce
&& !*ptr
)
1586 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1588 if (unlikely(from
!= 0))
1591 inet_proto_csum_replace_by_diff(ptr
, skb
, to
, is_pseudo
);
1594 inet_proto_csum_replace2(ptr
, skb
, from
, to
, is_pseudo
);
1597 inet_proto_csum_replace4(ptr
, skb
, from
, to
, is_pseudo
);
1603 if (is_mmzero
&& !*ptr
)
1604 *ptr
= CSUM_MANGLED_0
;
1608 static const struct bpf_func_proto bpf_l4_csum_replace_proto
= {
1609 .func
= bpf_l4_csum_replace
,
1611 .ret_type
= RET_INTEGER
,
1612 .arg1_type
= ARG_PTR_TO_CTX
,
1613 .arg2_type
= ARG_ANYTHING
,
1614 .arg3_type
= ARG_ANYTHING
,
1615 .arg4_type
= ARG_ANYTHING
,
1616 .arg5_type
= ARG_ANYTHING
,
1619 BPF_CALL_5(bpf_csum_diff
, __be32
*, from
, u32
, from_size
,
1620 __be32
*, to
, u32
, to_size
, __wsum
, seed
)
1622 struct bpf_scratchpad
*sp
= this_cpu_ptr(&bpf_sp
);
1623 u32 diff_size
= from_size
+ to_size
;
1626 /* This is quite flexible, some examples:
1628 * from_size == 0, to_size > 0, seed := csum --> pushing data
1629 * from_size > 0, to_size == 0, seed := csum --> pulling data
1630 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1632 * Even for diffing, from_size and to_size don't need to be equal.
1634 if (unlikely(((from_size
| to_size
) & (sizeof(__be32
) - 1)) ||
1635 diff_size
> sizeof(sp
->diff
)))
1638 for (i
= 0; i
< from_size
/ sizeof(__be32
); i
++, j
++)
1639 sp
->diff
[j
] = ~from
[i
];
1640 for (i
= 0; i
< to_size
/ sizeof(__be32
); i
++, j
++)
1641 sp
->diff
[j
] = to
[i
];
1643 return csum_partial(sp
->diff
, diff_size
, seed
);
1646 static const struct bpf_func_proto bpf_csum_diff_proto
= {
1647 .func
= bpf_csum_diff
,
1650 .ret_type
= RET_INTEGER
,
1651 .arg1_type
= ARG_PTR_TO_MEM_OR_NULL
,
1652 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
1653 .arg3_type
= ARG_PTR_TO_MEM_OR_NULL
,
1654 .arg4_type
= ARG_CONST_SIZE_OR_ZERO
,
1655 .arg5_type
= ARG_ANYTHING
,
1658 BPF_CALL_2(bpf_csum_update
, struct sk_buff
*, skb
, __wsum
, csum
)
1660 /* The interface is to be used in combination with bpf_csum_diff()
1661 * for direct packet writes. csum rotation for alignment as well
1662 * as emulating csum_sub() can be done from the eBPF program.
1664 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1665 return (skb
->csum
= csum_add(skb
->csum
, csum
));
1670 static const struct bpf_func_proto bpf_csum_update_proto
= {
1671 .func
= bpf_csum_update
,
1673 .ret_type
= RET_INTEGER
,
1674 .arg1_type
= ARG_PTR_TO_CTX
,
1675 .arg2_type
= ARG_ANYTHING
,
1678 static inline int __bpf_rx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1680 return dev_forward_skb(dev
, skb
);
1683 static inline int __bpf_rx_skb_no_mac(struct net_device
*dev
,
1684 struct sk_buff
*skb
)
1686 int ret
= ____dev_forward_skb(dev
, skb
);
1690 ret
= netif_rx(skb
);
1696 static inline int __bpf_tx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1700 if (unlikely(__this_cpu_read(xmit_recursion
) > XMIT_RECURSION_LIMIT
)) {
1701 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1708 __this_cpu_inc(xmit_recursion
);
1709 ret
= dev_queue_xmit(skb
);
1710 __this_cpu_dec(xmit_recursion
);
1715 static int __bpf_redirect_no_mac(struct sk_buff
*skb
, struct net_device
*dev
,
1718 unsigned int mlen
= skb_network_offset(skb
);
1721 __skb_pull(skb
, mlen
);
1723 /* At ingress, the mac header has already been pulled once.
1724 * At egress, skb_pospull_rcsum has to be done in case that
1725 * the skb is originated from ingress (i.e. a forwarded skb)
1726 * to ensure that rcsum starts at net header.
1728 if (!skb_at_tc_ingress(skb
))
1729 skb_postpull_rcsum(skb
, skb_mac_header(skb
), mlen
);
1731 skb_pop_mac_header(skb
);
1732 skb_reset_mac_len(skb
);
1733 return flags
& BPF_F_INGRESS
?
1734 __bpf_rx_skb_no_mac(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1737 static int __bpf_redirect_common(struct sk_buff
*skb
, struct net_device
*dev
,
1740 /* Verify that a link layer header is carried */
1741 if (unlikely(skb
->mac_header
>= skb
->network_header
)) {
1746 bpf_push_mac_rcsum(skb
);
1747 return flags
& BPF_F_INGRESS
?
1748 __bpf_rx_skb(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
1751 static int __bpf_redirect(struct sk_buff
*skb
, struct net_device
*dev
,
1754 if (dev_is_mac_header_xmit(dev
))
1755 return __bpf_redirect_common(skb
, dev
, flags
);
1757 return __bpf_redirect_no_mac(skb
, dev
, flags
);
1760 BPF_CALL_3(bpf_clone_redirect
, struct sk_buff
*, skb
, u32
, ifindex
, u64
, flags
)
1762 struct net_device
*dev
;
1763 struct sk_buff
*clone
;
1766 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1769 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ifindex
);
1773 clone
= skb_clone(skb
, GFP_ATOMIC
);
1774 if (unlikely(!clone
))
1777 /* For direct write, we need to keep the invariant that the skbs
1778 * we're dealing with need to be uncloned. Should uncloning fail
1779 * here, we need to free the just generated clone to unclone once
1782 ret
= bpf_try_make_head_writable(skb
);
1783 if (unlikely(ret
)) {
1788 return __bpf_redirect(clone
, dev
, flags
);
1791 static const struct bpf_func_proto bpf_clone_redirect_proto
= {
1792 .func
= bpf_clone_redirect
,
1794 .ret_type
= RET_INTEGER
,
1795 .arg1_type
= ARG_PTR_TO_CTX
,
1796 .arg2_type
= ARG_ANYTHING
,
1797 .arg3_type
= ARG_ANYTHING
,
1800 struct redirect_info
{
1803 struct bpf_map
*map
;
1804 struct bpf_map
*map_to_flush
;
1805 unsigned long map_owner
;
1808 static DEFINE_PER_CPU(struct redirect_info
, redirect_info
);
1810 BPF_CALL_2(bpf_redirect
, u32
, ifindex
, u64
, flags
)
1812 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1814 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
1817 ri
->ifindex
= ifindex
;
1820 return TC_ACT_REDIRECT
;
1823 int skb_do_redirect(struct sk_buff
*skb
)
1825 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
1826 struct net_device
*dev
;
1828 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ri
->ifindex
);
1830 if (unlikely(!dev
)) {
1835 return __bpf_redirect(skb
, dev
, ri
->flags
);
1838 static const struct bpf_func_proto bpf_redirect_proto
= {
1839 .func
= bpf_redirect
,
1841 .ret_type
= RET_INTEGER
,
1842 .arg1_type
= ARG_ANYTHING
,
1843 .arg2_type
= ARG_ANYTHING
,
1846 BPF_CALL_4(bpf_sk_redirect_map
, struct sk_buff
*, skb
,
1847 struct bpf_map
*, map
, u32
, key
, u64
, flags
)
1849 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
1851 /* If user passes invalid input drop the packet. */
1852 if (unlikely(flags
))
1856 tcb
->bpf
.flags
= flags
;
1862 struct sock
*do_sk_redirect_map(struct sk_buff
*skb
)
1864 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
1865 struct sock
*sk
= NULL
;
1868 sk
= __sock_map_lookup_elem(tcb
->bpf
.map
, tcb
->bpf
.key
);
1871 tcb
->bpf
.map
= NULL
;
1877 static const struct bpf_func_proto bpf_sk_redirect_map_proto
= {
1878 .func
= bpf_sk_redirect_map
,
1880 .ret_type
= RET_INTEGER
,
1881 .arg1_type
= ARG_PTR_TO_CTX
,
1882 .arg2_type
= ARG_CONST_MAP_PTR
,
1883 .arg3_type
= ARG_ANYTHING
,
1884 .arg4_type
= ARG_ANYTHING
,
1887 BPF_CALL_1(bpf_get_cgroup_classid
, const struct sk_buff
*, skb
)
1889 return task_get_classid(skb
);
1892 static const struct bpf_func_proto bpf_get_cgroup_classid_proto
= {
1893 .func
= bpf_get_cgroup_classid
,
1895 .ret_type
= RET_INTEGER
,
1896 .arg1_type
= ARG_PTR_TO_CTX
,
1899 BPF_CALL_1(bpf_get_route_realm
, const struct sk_buff
*, skb
)
1901 return dst_tclassid(skb
);
1904 static const struct bpf_func_proto bpf_get_route_realm_proto
= {
1905 .func
= bpf_get_route_realm
,
1907 .ret_type
= RET_INTEGER
,
1908 .arg1_type
= ARG_PTR_TO_CTX
,
1911 BPF_CALL_1(bpf_get_hash_recalc
, struct sk_buff
*, skb
)
1913 /* If skb_clear_hash() was called due to mangling, we can
1914 * trigger SW recalculation here. Later access to hash
1915 * can then use the inline skb->hash via context directly
1916 * instead of calling this helper again.
1918 return skb_get_hash(skb
);
1921 static const struct bpf_func_proto bpf_get_hash_recalc_proto
= {
1922 .func
= bpf_get_hash_recalc
,
1924 .ret_type
= RET_INTEGER
,
1925 .arg1_type
= ARG_PTR_TO_CTX
,
1928 BPF_CALL_1(bpf_set_hash_invalid
, struct sk_buff
*, skb
)
1930 /* After all direct packet write, this can be used once for
1931 * triggering a lazy recalc on next skb_get_hash() invocation.
1933 skb_clear_hash(skb
);
1937 static const struct bpf_func_proto bpf_set_hash_invalid_proto
= {
1938 .func
= bpf_set_hash_invalid
,
1940 .ret_type
= RET_INTEGER
,
1941 .arg1_type
= ARG_PTR_TO_CTX
,
1944 BPF_CALL_2(bpf_set_hash
, struct sk_buff
*, skb
, u32
, hash
)
1946 /* Set user specified hash as L4(+), so that it gets returned
1947 * on skb_get_hash() call unless BPF prog later on triggers a
1950 __skb_set_sw_hash(skb
, hash
, true);
1954 static const struct bpf_func_proto bpf_set_hash_proto
= {
1955 .func
= bpf_set_hash
,
1957 .ret_type
= RET_INTEGER
,
1958 .arg1_type
= ARG_PTR_TO_CTX
,
1959 .arg2_type
= ARG_ANYTHING
,
1962 BPF_CALL_3(bpf_skb_vlan_push
, struct sk_buff
*, skb
, __be16
, vlan_proto
,
1967 if (unlikely(vlan_proto
!= htons(ETH_P_8021Q
) &&
1968 vlan_proto
!= htons(ETH_P_8021AD
)))
1969 vlan_proto
= htons(ETH_P_8021Q
);
1971 bpf_push_mac_rcsum(skb
);
1972 ret
= skb_vlan_push(skb
, vlan_proto
, vlan_tci
);
1973 bpf_pull_mac_rcsum(skb
);
1975 bpf_compute_data_pointers(skb
);
1979 const struct bpf_func_proto bpf_skb_vlan_push_proto
= {
1980 .func
= bpf_skb_vlan_push
,
1982 .ret_type
= RET_INTEGER
,
1983 .arg1_type
= ARG_PTR_TO_CTX
,
1984 .arg2_type
= ARG_ANYTHING
,
1985 .arg3_type
= ARG_ANYTHING
,
1987 EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto
);
1989 BPF_CALL_1(bpf_skb_vlan_pop
, struct sk_buff
*, skb
)
1993 bpf_push_mac_rcsum(skb
);
1994 ret
= skb_vlan_pop(skb
);
1995 bpf_pull_mac_rcsum(skb
);
1997 bpf_compute_data_pointers(skb
);
2001 const struct bpf_func_proto bpf_skb_vlan_pop_proto
= {
2002 .func
= bpf_skb_vlan_pop
,
2004 .ret_type
= RET_INTEGER
,
2005 .arg1_type
= ARG_PTR_TO_CTX
,
2007 EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto
);
2009 static int bpf_skb_generic_push(struct sk_buff
*skb
, u32 off
, u32 len
)
2011 /* Caller already did skb_cow() with len as headroom,
2012 * so no need to do it here.
2015 memmove(skb
->data
, skb
->data
+ len
, off
);
2016 memset(skb
->data
+ off
, 0, len
);
2018 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2019 * needed here as it does not change the skb->csum
2020 * result for checksum complete when summing over
2026 static int bpf_skb_generic_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
2028 /* skb_ensure_writable() is not needed here, as we're
2029 * already working on an uncloned skb.
2031 if (unlikely(!pskb_may_pull(skb
, off
+ len
)))
2034 skb_postpull_rcsum(skb
, skb
->data
+ off
, len
);
2035 memmove(skb
->data
+ len
, skb
->data
, off
);
2036 __skb_pull(skb
, len
);
2041 static int bpf_skb_net_hdr_push(struct sk_buff
*skb
, u32 off
, u32 len
)
2043 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2046 /* There's no need for __skb_push()/__skb_pull() pair to
2047 * get to the start of the mac header as we're guaranteed
2048 * to always start from here under eBPF.
2050 ret
= bpf_skb_generic_push(skb
, off
, len
);
2052 skb
->mac_header
-= len
;
2053 skb
->network_header
-= len
;
2055 skb
->transport_header
= skb
->network_header
;
2061 static int bpf_skb_net_hdr_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
2063 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2066 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2067 ret
= bpf_skb_generic_pop(skb
, off
, len
);
2069 skb
->mac_header
+= len
;
2070 skb
->network_header
+= len
;
2072 skb
->transport_header
= skb
->network_header
;
2078 static int bpf_skb_proto_4_to_6(struct sk_buff
*skb
)
2080 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2081 u32 off
= skb_mac_header_len(skb
);
2084 ret
= skb_cow(skb
, len_diff
);
2085 if (unlikely(ret
< 0))
2088 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2089 if (unlikely(ret
< 0))
2092 if (skb_is_gso(skb
)) {
2093 /* SKB_GSO_TCPV4 needs to be changed into
2096 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
2097 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV4
;
2098 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV6
;
2101 /* Due to IPv6 header, MSS needs to be downgraded. */
2102 skb_shinfo(skb
)->gso_size
-= len_diff
;
2103 /* Header must be checked, and gso_segs recomputed. */
2104 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2105 skb_shinfo(skb
)->gso_segs
= 0;
2108 skb
->protocol
= htons(ETH_P_IPV6
);
2109 skb_clear_hash(skb
);
2114 static int bpf_skb_proto_6_to_4(struct sk_buff
*skb
)
2116 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2117 u32 off
= skb_mac_header_len(skb
);
2120 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2121 if (unlikely(ret
< 0))
2124 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2125 if (unlikely(ret
< 0))
2128 if (skb_is_gso(skb
)) {
2129 /* SKB_GSO_TCPV6 needs to be changed into
2132 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
2133 skb_shinfo(skb
)->gso_type
&= ~SKB_GSO_TCPV6
;
2134 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV4
;
2137 /* Due to IPv4 header, MSS can be upgraded. */
2138 skb_shinfo(skb
)->gso_size
+= len_diff
;
2139 /* Header must be checked, and gso_segs recomputed. */
2140 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2141 skb_shinfo(skb
)->gso_segs
= 0;
2144 skb
->protocol
= htons(ETH_P_IP
);
2145 skb_clear_hash(skb
);
2150 static int bpf_skb_proto_xlat(struct sk_buff
*skb
, __be16 to_proto
)
2152 __be16 from_proto
= skb
->protocol
;
2154 if (from_proto
== htons(ETH_P_IP
) &&
2155 to_proto
== htons(ETH_P_IPV6
))
2156 return bpf_skb_proto_4_to_6(skb
);
2158 if (from_proto
== htons(ETH_P_IPV6
) &&
2159 to_proto
== htons(ETH_P_IP
))
2160 return bpf_skb_proto_6_to_4(skb
);
2165 BPF_CALL_3(bpf_skb_change_proto
, struct sk_buff
*, skb
, __be16
, proto
,
2170 if (unlikely(flags
))
2173 /* General idea is that this helper does the basic groundwork
2174 * needed for changing the protocol, and eBPF program fills the
2175 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2176 * and other helpers, rather than passing a raw buffer here.
2178 * The rationale is to keep this minimal and without a need to
2179 * deal with raw packet data. F.e. even if we would pass buffers
2180 * here, the program still needs to call the bpf_lX_csum_replace()
2181 * helpers anyway. Plus, this way we keep also separation of
2182 * concerns, since f.e. bpf_skb_store_bytes() should only take
2185 * Currently, additional options and extension header space are
2186 * not supported, but flags register is reserved so we can adapt
2187 * that. For offloads, we mark packet as dodgy, so that headers
2188 * need to be verified first.
2190 ret
= bpf_skb_proto_xlat(skb
, proto
);
2191 bpf_compute_data_pointers(skb
);
2195 static const struct bpf_func_proto bpf_skb_change_proto_proto
= {
2196 .func
= bpf_skb_change_proto
,
2198 .ret_type
= RET_INTEGER
,
2199 .arg1_type
= ARG_PTR_TO_CTX
,
2200 .arg2_type
= ARG_ANYTHING
,
2201 .arg3_type
= ARG_ANYTHING
,
2204 BPF_CALL_2(bpf_skb_change_type
, struct sk_buff
*, skb
, u32
, pkt_type
)
2206 /* We only allow a restricted subset to be changed for now. */
2207 if (unlikely(!skb_pkt_type_ok(skb
->pkt_type
) ||
2208 !skb_pkt_type_ok(pkt_type
)))
2211 skb
->pkt_type
= pkt_type
;
2215 static const struct bpf_func_proto bpf_skb_change_type_proto
= {
2216 .func
= bpf_skb_change_type
,
2218 .ret_type
= RET_INTEGER
,
2219 .arg1_type
= ARG_PTR_TO_CTX
,
2220 .arg2_type
= ARG_ANYTHING
,
2223 static u32
bpf_skb_net_base_len(const struct sk_buff
*skb
)
2225 switch (skb
->protocol
) {
2226 case htons(ETH_P_IP
):
2227 return sizeof(struct iphdr
);
2228 case htons(ETH_P_IPV6
):
2229 return sizeof(struct ipv6hdr
);
2235 static int bpf_skb_net_grow(struct sk_buff
*skb
, u32 len_diff
)
2237 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2240 ret
= skb_cow(skb
, len_diff
);
2241 if (unlikely(ret
< 0))
2244 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2245 if (unlikely(ret
< 0))
2248 if (skb_is_gso(skb
)) {
2249 /* Due to header grow, MSS needs to be downgraded. */
2250 skb_shinfo(skb
)->gso_size
-= len_diff
;
2251 /* Header must be checked, and gso_segs recomputed. */
2252 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2253 skb_shinfo(skb
)->gso_segs
= 0;
2259 static int bpf_skb_net_shrink(struct sk_buff
*skb
, u32 len_diff
)
2261 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2264 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2265 if (unlikely(ret
< 0))
2268 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2269 if (unlikely(ret
< 0))
2272 if (skb_is_gso(skb
)) {
2273 /* Due to header shrink, MSS can be upgraded. */
2274 skb_shinfo(skb
)->gso_size
+= len_diff
;
2275 /* Header must be checked, and gso_segs recomputed. */
2276 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2277 skb_shinfo(skb
)->gso_segs
= 0;
2283 static u32
__bpf_skb_max_len(const struct sk_buff
*skb
)
2285 return skb
->dev
? skb
->dev
->mtu
+ skb
->dev
->hard_header_len
:
2289 static int bpf_skb_adjust_net(struct sk_buff
*skb
, s32 len_diff
)
2291 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2292 u32 len_cur
, len_diff_abs
= abs(len_diff
);
2293 u32 len_min
= bpf_skb_net_base_len(skb
);
2294 u32 len_max
= __bpf_skb_max_len(skb
);
2295 __be16 proto
= skb
->protocol
;
2296 bool shrink
= len_diff
< 0;
2299 if (unlikely(len_diff_abs
> 0xfffU
))
2301 if (unlikely(proto
!= htons(ETH_P_IP
) &&
2302 proto
!= htons(ETH_P_IPV6
)))
2305 len_cur
= skb
->len
- skb_network_offset(skb
);
2306 if (skb_transport_header_was_set(skb
) && !trans_same
)
2307 len_cur
= skb_network_header_len(skb
);
2308 if ((shrink
&& (len_diff_abs
>= len_cur
||
2309 len_cur
- len_diff_abs
< len_min
)) ||
2310 (!shrink
&& (skb
->len
+ len_diff_abs
> len_max
&&
2314 ret
= shrink
? bpf_skb_net_shrink(skb
, len_diff_abs
) :
2315 bpf_skb_net_grow(skb
, len_diff_abs
);
2317 bpf_compute_data_pointers(skb
);
2321 BPF_CALL_4(bpf_skb_adjust_room
, struct sk_buff
*, skb
, s32
, len_diff
,
2322 u32
, mode
, u64
, flags
)
2324 if (unlikely(flags
))
2326 if (likely(mode
== BPF_ADJ_ROOM_NET
))
2327 return bpf_skb_adjust_net(skb
, len_diff
);
2332 static const struct bpf_func_proto bpf_skb_adjust_room_proto
= {
2333 .func
= bpf_skb_adjust_room
,
2335 .ret_type
= RET_INTEGER
,
2336 .arg1_type
= ARG_PTR_TO_CTX
,
2337 .arg2_type
= ARG_ANYTHING
,
2338 .arg3_type
= ARG_ANYTHING
,
2339 .arg4_type
= ARG_ANYTHING
,
2342 static u32
__bpf_skb_min_len(const struct sk_buff
*skb
)
2344 u32 min_len
= skb_network_offset(skb
);
2346 if (skb_transport_header_was_set(skb
))
2347 min_len
= skb_transport_offset(skb
);
2348 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2349 min_len
= skb_checksum_start_offset(skb
) +
2350 skb
->csum_offset
+ sizeof(__sum16
);
2354 static int bpf_skb_grow_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2356 unsigned int old_len
= skb
->len
;
2359 ret
= __skb_grow_rcsum(skb
, new_len
);
2361 memset(skb
->data
+ old_len
, 0, new_len
- old_len
);
2365 static int bpf_skb_trim_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2367 return __skb_trim_rcsum(skb
, new_len
);
2370 BPF_CALL_3(bpf_skb_change_tail
, struct sk_buff
*, skb
, u32
, new_len
,
2373 u32 max_len
= __bpf_skb_max_len(skb
);
2374 u32 min_len
= __bpf_skb_min_len(skb
);
2377 if (unlikely(flags
|| new_len
> max_len
|| new_len
< min_len
))
2379 if (skb
->encapsulation
)
2382 /* The basic idea of this helper is that it's performing the
2383 * needed work to either grow or trim an skb, and eBPF program
2384 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2385 * bpf_lX_csum_replace() and others rather than passing a raw
2386 * buffer here. This one is a slow path helper and intended
2387 * for replies with control messages.
2389 * Like in bpf_skb_change_proto(), we want to keep this rather
2390 * minimal and without protocol specifics so that we are able
2391 * to separate concerns as in bpf_skb_store_bytes() should only
2392 * be the one responsible for writing buffers.
2394 * It's really expected to be a slow path operation here for
2395 * control message replies, so we're implicitly linearizing,
2396 * uncloning and drop offloads from the skb by this.
2398 ret
= __bpf_try_make_writable(skb
, skb
->len
);
2400 if (new_len
> skb
->len
)
2401 ret
= bpf_skb_grow_rcsum(skb
, new_len
);
2402 else if (new_len
< skb
->len
)
2403 ret
= bpf_skb_trim_rcsum(skb
, new_len
);
2404 if (!ret
&& skb_is_gso(skb
))
2408 bpf_compute_data_pointers(skb
);
2412 static const struct bpf_func_proto bpf_skb_change_tail_proto
= {
2413 .func
= bpf_skb_change_tail
,
2415 .ret_type
= RET_INTEGER
,
2416 .arg1_type
= ARG_PTR_TO_CTX
,
2417 .arg2_type
= ARG_ANYTHING
,
2418 .arg3_type
= ARG_ANYTHING
,
2421 BPF_CALL_3(bpf_skb_change_head
, struct sk_buff
*, skb
, u32
, head_room
,
2424 u32 max_len
= __bpf_skb_max_len(skb
);
2425 u32 new_len
= skb
->len
+ head_room
;
2428 if (unlikely(flags
|| (!skb_is_gso(skb
) && new_len
> max_len
) ||
2429 new_len
< skb
->len
))
2432 ret
= skb_cow(skb
, head_room
);
2434 /* Idea for this helper is that we currently only
2435 * allow to expand on mac header. This means that
2436 * skb->protocol network header, etc, stay as is.
2437 * Compared to bpf_skb_change_tail(), we're more
2438 * flexible due to not needing to linearize or
2439 * reset GSO. Intention for this helper is to be
2440 * used by an L3 skb that needs to push mac header
2441 * for redirection into L2 device.
2443 __skb_push(skb
, head_room
);
2444 memset(skb
->data
, 0, head_room
);
2445 skb_reset_mac_header(skb
);
2448 bpf_compute_data_pointers(skb
);
2452 static const struct bpf_func_proto bpf_skb_change_head_proto
= {
2453 .func
= bpf_skb_change_head
,
2455 .ret_type
= RET_INTEGER
,
2456 .arg1_type
= ARG_PTR_TO_CTX
,
2457 .arg2_type
= ARG_ANYTHING
,
2458 .arg3_type
= ARG_ANYTHING
,
2461 static unsigned long xdp_get_metalen(const struct xdp_buff
*xdp
)
2463 return xdp_data_meta_unsupported(xdp
) ? 0 :
2464 xdp
->data
- xdp
->data_meta
;
2467 BPF_CALL_2(bpf_xdp_adjust_head
, struct xdp_buff
*, xdp
, int, offset
)
2469 unsigned long metalen
= xdp_get_metalen(xdp
);
2470 void *data_start
= xdp
->data_hard_start
+ metalen
;
2471 void *data
= xdp
->data
+ offset
;
2473 if (unlikely(data
< data_start
||
2474 data
> xdp
->data_end
- ETH_HLEN
))
2478 memmove(xdp
->data_meta
+ offset
,
2479 xdp
->data_meta
, metalen
);
2480 xdp
->data_meta
+= offset
;
2486 static const struct bpf_func_proto bpf_xdp_adjust_head_proto
= {
2487 .func
= bpf_xdp_adjust_head
,
2489 .ret_type
= RET_INTEGER
,
2490 .arg1_type
= ARG_PTR_TO_CTX
,
2491 .arg2_type
= ARG_ANYTHING
,
2494 BPF_CALL_2(bpf_xdp_adjust_meta
, struct xdp_buff
*, xdp
, int, offset
)
2496 void *meta
= xdp
->data_meta
+ offset
;
2497 unsigned long metalen
= xdp
->data
- meta
;
2499 if (xdp_data_meta_unsupported(xdp
))
2501 if (unlikely(meta
< xdp
->data_hard_start
||
2504 if (unlikely((metalen
& (sizeof(__u32
) - 1)) ||
2508 xdp
->data_meta
= meta
;
2513 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto
= {
2514 .func
= bpf_xdp_adjust_meta
,
2516 .ret_type
= RET_INTEGER
,
2517 .arg1_type
= ARG_PTR_TO_CTX
,
2518 .arg2_type
= ARG_ANYTHING
,
2521 static int __bpf_tx_xdp(struct net_device
*dev
,
2522 struct bpf_map
*map
,
2523 struct xdp_buff
*xdp
,
2528 if (!dev
->netdev_ops
->ndo_xdp_xmit
) {
2532 err
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, xdp
);
2535 dev
->netdev_ops
->ndo_xdp_flush(dev
);
2539 static int __bpf_tx_xdp_map(struct net_device
*dev_rx
, void *fwd
,
2540 struct bpf_map
*map
,
2541 struct xdp_buff
*xdp
,
2546 if (map
->map_type
== BPF_MAP_TYPE_DEVMAP
) {
2547 struct net_device
*dev
= fwd
;
2549 if (!dev
->netdev_ops
->ndo_xdp_xmit
)
2552 err
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, xdp
);
2555 __dev_map_insert_ctx(map
, index
);
2557 } else if (map
->map_type
== BPF_MAP_TYPE_CPUMAP
) {
2558 struct bpf_cpu_map_entry
*rcpu
= fwd
;
2560 err
= cpu_map_enqueue(rcpu
, xdp
, dev_rx
);
2563 __cpu_map_insert_ctx(map
, index
);
2568 void xdp_do_flush_map(void)
2570 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2571 struct bpf_map
*map
= ri
->map_to_flush
;
2573 ri
->map_to_flush
= NULL
;
2575 switch (map
->map_type
) {
2576 case BPF_MAP_TYPE_DEVMAP
:
2577 __dev_map_flush(map
);
2579 case BPF_MAP_TYPE_CPUMAP
:
2580 __cpu_map_flush(map
);
2587 EXPORT_SYMBOL_GPL(xdp_do_flush_map
);
2589 static void *__xdp_map_lookup_elem(struct bpf_map
*map
, u32 index
)
2591 switch (map
->map_type
) {
2592 case BPF_MAP_TYPE_DEVMAP
:
2593 return __dev_map_lookup_elem(map
, index
);
2594 case BPF_MAP_TYPE_CPUMAP
:
2595 return __cpu_map_lookup_elem(map
, index
);
2601 static inline bool xdp_map_invalid(const struct bpf_prog
*xdp_prog
,
2604 return (unsigned long)xdp_prog
->aux
!= aux
;
2607 static int xdp_do_redirect_map(struct net_device
*dev
, struct xdp_buff
*xdp
,
2608 struct bpf_prog
*xdp_prog
)
2610 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2611 unsigned long map_owner
= ri
->map_owner
;
2612 struct bpf_map
*map
= ri
->map
;
2613 u32 index
= ri
->ifindex
;
2621 if (unlikely(xdp_map_invalid(xdp_prog
, map_owner
))) {
2627 fwd
= __xdp_map_lookup_elem(map
, index
);
2632 if (ri
->map_to_flush
&& ri
->map_to_flush
!= map
)
2635 err
= __bpf_tx_xdp_map(dev
, fwd
, map
, xdp
, index
);
2639 ri
->map_to_flush
= map
;
2640 _trace_xdp_redirect_map(dev
, xdp_prog
, fwd
, map
, index
);
2643 _trace_xdp_redirect_map_err(dev
, xdp_prog
, fwd
, map
, index
, err
);
2647 int xdp_do_redirect(struct net_device
*dev
, struct xdp_buff
*xdp
,
2648 struct bpf_prog
*xdp_prog
)
2650 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2651 struct net_device
*fwd
;
2652 u32 index
= ri
->ifindex
;
2656 return xdp_do_redirect_map(dev
, xdp
, xdp_prog
);
2658 fwd
= dev_get_by_index_rcu(dev_net(dev
), index
);
2660 if (unlikely(!fwd
)) {
2665 err
= __bpf_tx_xdp(fwd
, NULL
, xdp
, 0);
2669 _trace_xdp_redirect(dev
, xdp_prog
, index
);
2672 _trace_xdp_redirect_err(dev
, xdp_prog
, index
, err
);
2675 EXPORT_SYMBOL_GPL(xdp_do_redirect
);
2677 static int __xdp_generic_ok_fwd_dev(struct sk_buff
*skb
, struct net_device
*fwd
)
2681 if (unlikely(!(fwd
->flags
& IFF_UP
)))
2684 len
= fwd
->mtu
+ fwd
->hard_header_len
+ VLAN_HLEN
;
2691 int xdp_do_generic_redirect_map(struct net_device
*dev
, struct sk_buff
*skb
,
2692 struct bpf_prog
*xdp_prog
)
2694 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2695 unsigned long map_owner
= ri
->map_owner
;
2696 struct bpf_map
*map
= ri
->map
;
2697 struct net_device
*fwd
= NULL
;
2698 u32 index
= ri
->ifindex
;
2705 if (unlikely(xdp_map_invalid(xdp_prog
, map_owner
))) {
2710 fwd
= __xdp_map_lookup_elem(map
, index
);
2711 if (unlikely(!fwd
)) {
2716 if (map
->map_type
== BPF_MAP_TYPE_DEVMAP
) {
2717 if (unlikely((err
= __xdp_generic_ok_fwd_dev(skb
, fwd
))))
2721 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
2726 _trace_xdp_redirect_map(dev
, xdp_prog
, fwd
, map
, index
);
2729 _trace_xdp_redirect_map_err(dev
, xdp_prog
, fwd
, map
, index
, err
);
2733 int xdp_do_generic_redirect(struct net_device
*dev
, struct sk_buff
*skb
,
2734 struct bpf_prog
*xdp_prog
)
2736 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2737 u32 index
= ri
->ifindex
;
2738 struct net_device
*fwd
;
2742 return xdp_do_generic_redirect_map(dev
, skb
, xdp_prog
);
2745 fwd
= dev_get_by_index_rcu(dev_net(dev
), index
);
2746 if (unlikely(!fwd
)) {
2751 if (unlikely((err
= __xdp_generic_ok_fwd_dev(skb
, fwd
))))
2755 _trace_xdp_redirect(dev
, xdp_prog
, index
);
2758 _trace_xdp_redirect_err(dev
, xdp_prog
, index
, err
);
2761 EXPORT_SYMBOL_GPL(xdp_do_generic_redirect
);
2763 BPF_CALL_2(bpf_xdp_redirect
, u32
, ifindex
, u64
, flags
)
2765 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2767 if (unlikely(flags
))
2770 ri
->ifindex
= ifindex
;
2775 return XDP_REDIRECT
;
2778 static const struct bpf_func_proto bpf_xdp_redirect_proto
= {
2779 .func
= bpf_xdp_redirect
,
2781 .ret_type
= RET_INTEGER
,
2782 .arg1_type
= ARG_ANYTHING
,
2783 .arg2_type
= ARG_ANYTHING
,
2786 BPF_CALL_4(bpf_xdp_redirect_map
, struct bpf_map
*, map
, u32
, ifindex
, u64
, flags
,
2787 unsigned long, map_owner
)
2789 struct redirect_info
*ri
= this_cpu_ptr(&redirect_info
);
2791 if (unlikely(flags
))
2794 ri
->ifindex
= ifindex
;
2797 ri
->map_owner
= map_owner
;
2799 return XDP_REDIRECT
;
2802 /* Note, arg4 is hidden from users and populated by the verifier
2803 * with the right pointer.
2805 static const struct bpf_func_proto bpf_xdp_redirect_map_proto
= {
2806 .func
= bpf_xdp_redirect_map
,
2808 .ret_type
= RET_INTEGER
,
2809 .arg1_type
= ARG_CONST_MAP_PTR
,
2810 .arg2_type
= ARG_ANYTHING
,
2811 .arg3_type
= ARG_ANYTHING
,
2814 bool bpf_helper_changes_pkt_data(void *func
)
2816 if (func
== bpf_skb_vlan_push
||
2817 func
== bpf_skb_vlan_pop
||
2818 func
== bpf_skb_store_bytes
||
2819 func
== bpf_skb_change_proto
||
2820 func
== bpf_skb_change_head
||
2821 func
== bpf_skb_change_tail
||
2822 func
== bpf_skb_adjust_room
||
2823 func
== bpf_skb_pull_data
||
2824 func
== bpf_clone_redirect
||
2825 func
== bpf_l3_csum_replace
||
2826 func
== bpf_l4_csum_replace
||
2827 func
== bpf_xdp_adjust_head
||
2828 func
== bpf_xdp_adjust_meta
)
2834 static unsigned long bpf_skb_copy(void *dst_buff
, const void *skb
,
2835 unsigned long off
, unsigned long len
)
2837 void *ptr
= skb_header_pointer(skb
, off
, len
, dst_buff
);
2841 if (ptr
!= dst_buff
)
2842 memcpy(dst_buff
, ptr
, len
);
2847 BPF_CALL_5(bpf_skb_event_output
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
2848 u64
, flags
, void *, meta
, u64
, meta_size
)
2850 u64 skb_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
2852 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
2854 if (unlikely(skb_size
> skb
->len
))
2857 return bpf_event_output(map
, flags
, meta
, meta_size
, skb
, skb_size
,
2861 static const struct bpf_func_proto bpf_skb_event_output_proto
= {
2862 .func
= bpf_skb_event_output
,
2864 .ret_type
= RET_INTEGER
,
2865 .arg1_type
= ARG_PTR_TO_CTX
,
2866 .arg2_type
= ARG_CONST_MAP_PTR
,
2867 .arg3_type
= ARG_ANYTHING
,
2868 .arg4_type
= ARG_PTR_TO_MEM
,
2869 .arg5_type
= ARG_CONST_SIZE
,
2872 static unsigned short bpf_tunnel_key_af(u64 flags
)
2874 return flags
& BPF_F_TUNINFO_IPV6
? AF_INET6
: AF_INET
;
2877 BPF_CALL_4(bpf_skb_get_tunnel_key
, struct sk_buff
*, skb
, struct bpf_tunnel_key
*, to
,
2878 u32
, size
, u64
, flags
)
2880 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2881 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2885 if (unlikely(!info
|| (flags
& ~(BPF_F_TUNINFO_IPV6
)))) {
2889 if (ip_tunnel_info_af(info
) != bpf_tunnel_key_af(flags
)) {
2893 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2896 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2897 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2899 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2900 /* Fixup deprecated structure layouts here, so we have
2901 * a common path later on.
2903 if (ip_tunnel_info_af(info
) != AF_INET
)
2906 to
= (struct bpf_tunnel_key
*)compat
;
2913 to
->tunnel_id
= be64_to_cpu(info
->key
.tun_id
);
2914 to
->tunnel_tos
= info
->key
.tos
;
2915 to
->tunnel_ttl
= info
->key
.ttl
;
2917 if (flags
& BPF_F_TUNINFO_IPV6
) {
2918 memcpy(to
->remote_ipv6
, &info
->key
.u
.ipv6
.src
,
2919 sizeof(to
->remote_ipv6
));
2920 to
->tunnel_label
= be32_to_cpu(info
->key
.label
);
2922 to
->remote_ipv4
= be32_to_cpu(info
->key
.u
.ipv4
.src
);
2925 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
)))
2926 memcpy(to_orig
, to
, size
);
2930 memset(to_orig
, 0, size
);
2934 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto
= {
2935 .func
= bpf_skb_get_tunnel_key
,
2937 .ret_type
= RET_INTEGER
,
2938 .arg1_type
= ARG_PTR_TO_CTX
,
2939 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2940 .arg3_type
= ARG_CONST_SIZE
,
2941 .arg4_type
= ARG_ANYTHING
,
2944 BPF_CALL_3(bpf_skb_get_tunnel_opt
, struct sk_buff
*, skb
, u8
*, to
, u32
, size
)
2946 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
2949 if (unlikely(!info
||
2950 !(info
->key
.tun_flags
& TUNNEL_OPTIONS_PRESENT
))) {
2954 if (unlikely(size
< info
->options_len
)) {
2959 ip_tunnel_info_opts_get(to
, info
);
2960 if (size
> info
->options_len
)
2961 memset(to
+ info
->options_len
, 0, size
- info
->options_len
);
2963 return info
->options_len
;
2965 memset(to
, 0, size
);
2969 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto
= {
2970 .func
= bpf_skb_get_tunnel_opt
,
2972 .ret_type
= RET_INTEGER
,
2973 .arg1_type
= ARG_PTR_TO_CTX
,
2974 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
2975 .arg3_type
= ARG_CONST_SIZE
,
2978 static struct metadata_dst __percpu
*md_dst
;
2980 BPF_CALL_4(bpf_skb_set_tunnel_key
, struct sk_buff
*, skb
,
2981 const struct bpf_tunnel_key
*, from
, u32
, size
, u64
, flags
)
2983 struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
2984 u8 compat
[sizeof(struct bpf_tunnel_key
)];
2985 struct ip_tunnel_info
*info
;
2987 if (unlikely(flags
& ~(BPF_F_TUNINFO_IPV6
| BPF_F_ZERO_CSUM_TX
|
2988 BPF_F_DONT_FRAGMENT
)))
2990 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
2992 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
2993 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
2994 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
2995 /* Fixup deprecated structure layouts here, so we have
2996 * a common path later on.
2998 memcpy(compat
, from
, size
);
2999 memset(compat
+ size
, 0, sizeof(compat
) - size
);
3000 from
= (const struct bpf_tunnel_key
*) compat
;
3006 if (unlikely((!(flags
& BPF_F_TUNINFO_IPV6
) && from
->tunnel_label
) ||
3011 dst_hold((struct dst_entry
*) md
);
3012 skb_dst_set(skb
, (struct dst_entry
*) md
);
3014 info
= &md
->u
.tun_info
;
3015 info
->mode
= IP_TUNNEL_INFO_TX
;
3017 info
->key
.tun_flags
= TUNNEL_KEY
| TUNNEL_CSUM
| TUNNEL_NOCACHE
;
3018 if (flags
& BPF_F_DONT_FRAGMENT
)
3019 info
->key
.tun_flags
|= TUNNEL_DONT_FRAGMENT
;
3021 info
->key
.tun_id
= cpu_to_be64(from
->tunnel_id
);
3022 info
->key
.tos
= from
->tunnel_tos
;
3023 info
->key
.ttl
= from
->tunnel_ttl
;
3025 if (flags
& BPF_F_TUNINFO_IPV6
) {
3026 info
->mode
|= IP_TUNNEL_INFO_IPV6
;
3027 memcpy(&info
->key
.u
.ipv6
.dst
, from
->remote_ipv6
,
3028 sizeof(from
->remote_ipv6
));
3029 info
->key
.label
= cpu_to_be32(from
->tunnel_label
) &
3030 IPV6_FLOWLABEL_MASK
;
3032 info
->key
.u
.ipv4
.dst
= cpu_to_be32(from
->remote_ipv4
);
3033 if (flags
& BPF_F_ZERO_CSUM_TX
)
3034 info
->key
.tun_flags
&= ~TUNNEL_CSUM
;
3040 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto
= {
3041 .func
= bpf_skb_set_tunnel_key
,
3043 .ret_type
= RET_INTEGER
,
3044 .arg1_type
= ARG_PTR_TO_CTX
,
3045 .arg2_type
= ARG_PTR_TO_MEM
,
3046 .arg3_type
= ARG_CONST_SIZE
,
3047 .arg4_type
= ARG_ANYTHING
,
3050 BPF_CALL_3(bpf_skb_set_tunnel_opt
, struct sk_buff
*, skb
,
3051 const u8
*, from
, u32
, size
)
3053 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
3054 const struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
3056 if (unlikely(info
!= &md
->u
.tun_info
|| (size
& (sizeof(u32
) - 1))))
3058 if (unlikely(size
> IP_TUNNEL_OPTS_MAX
))
3061 ip_tunnel_info_opts_set(info
, from
, size
);
3066 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto
= {
3067 .func
= bpf_skb_set_tunnel_opt
,
3069 .ret_type
= RET_INTEGER
,
3070 .arg1_type
= ARG_PTR_TO_CTX
,
3071 .arg2_type
= ARG_PTR_TO_MEM
,
3072 .arg3_type
= ARG_CONST_SIZE
,
3075 static const struct bpf_func_proto
*
3076 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which
)
3079 struct metadata_dst __percpu
*tmp
;
3081 tmp
= metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX
,
3086 if (cmpxchg(&md_dst
, NULL
, tmp
))
3087 metadata_dst_free_percpu(tmp
);
3091 case BPF_FUNC_skb_set_tunnel_key
:
3092 return &bpf_skb_set_tunnel_key_proto
;
3093 case BPF_FUNC_skb_set_tunnel_opt
:
3094 return &bpf_skb_set_tunnel_opt_proto
;
3100 BPF_CALL_3(bpf_skb_under_cgroup
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
3103 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
3104 struct cgroup
*cgrp
;
3107 sk
= skb_to_full_sk(skb
);
3108 if (!sk
|| !sk_fullsock(sk
))
3110 if (unlikely(idx
>= array
->map
.max_entries
))
3113 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
3114 if (unlikely(!cgrp
))
3117 return sk_under_cgroup_hierarchy(sk
, cgrp
);
3120 static const struct bpf_func_proto bpf_skb_under_cgroup_proto
= {
3121 .func
= bpf_skb_under_cgroup
,
3123 .ret_type
= RET_INTEGER
,
3124 .arg1_type
= ARG_PTR_TO_CTX
,
3125 .arg2_type
= ARG_CONST_MAP_PTR
,
3126 .arg3_type
= ARG_ANYTHING
,
3129 static unsigned long bpf_xdp_copy(void *dst_buff
, const void *src_buff
,
3130 unsigned long off
, unsigned long len
)
3132 memcpy(dst_buff
, src_buff
+ off
, len
);
3136 BPF_CALL_5(bpf_xdp_event_output
, struct xdp_buff
*, xdp
, struct bpf_map
*, map
,
3137 u64
, flags
, void *, meta
, u64
, meta_size
)
3139 u64 xdp_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
3141 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
3143 if (unlikely(xdp_size
> (unsigned long)(xdp
->data_end
- xdp
->data
)))
3146 return bpf_event_output(map
, flags
, meta
, meta_size
, xdp
->data
,
3147 xdp_size
, bpf_xdp_copy
);
3150 static const struct bpf_func_proto bpf_xdp_event_output_proto
= {
3151 .func
= bpf_xdp_event_output
,
3153 .ret_type
= RET_INTEGER
,
3154 .arg1_type
= ARG_PTR_TO_CTX
,
3155 .arg2_type
= ARG_CONST_MAP_PTR
,
3156 .arg3_type
= ARG_ANYTHING
,
3157 .arg4_type
= ARG_PTR_TO_MEM
,
3158 .arg5_type
= ARG_CONST_SIZE
,
3161 BPF_CALL_1(bpf_get_socket_cookie
, struct sk_buff
*, skb
)
3163 return skb
->sk
? sock_gen_cookie(skb
->sk
) : 0;
3166 static const struct bpf_func_proto bpf_get_socket_cookie_proto
= {
3167 .func
= bpf_get_socket_cookie
,
3169 .ret_type
= RET_INTEGER
,
3170 .arg1_type
= ARG_PTR_TO_CTX
,
3173 BPF_CALL_1(bpf_get_socket_uid
, struct sk_buff
*, skb
)
3175 struct sock
*sk
= sk_to_full_sk(skb
->sk
);
3178 if (!sk
|| !sk_fullsock(sk
))
3180 kuid
= sock_net_uid(sock_net(sk
), sk
);
3181 return from_kuid_munged(sock_net(sk
)->user_ns
, kuid
);
3184 static const struct bpf_func_proto bpf_get_socket_uid_proto
= {
3185 .func
= bpf_get_socket_uid
,
3187 .ret_type
= RET_INTEGER
,
3188 .arg1_type
= ARG_PTR_TO_CTX
,
3191 BPF_CALL_5(bpf_setsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
3192 int, level
, int, optname
, char *, optval
, int, optlen
)
3194 struct sock
*sk
= bpf_sock
->sk
;
3198 if (!sk_fullsock(sk
))
3201 if (level
== SOL_SOCKET
) {
3202 if (optlen
!= sizeof(int))
3204 val
= *((int *)optval
);
3206 /* Only some socketops are supported */
3209 val
= min_t(u32
, val
, sysctl_rmem_max
);
3210 sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
3211 sk
->sk_rcvbuf
= max_t(int, val
* 2, SOCK_MIN_RCVBUF
);
3214 val
= min_t(u32
, val
, sysctl_wmem_max
);
3215 sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
3216 sk
->sk_sndbuf
= max_t(int, val
* 2, SOCK_MIN_SNDBUF
);
3218 case SO_MAX_PACING_RATE
:
3219 sk
->sk_max_pacing_rate
= val
;
3220 sk
->sk_pacing_rate
= min(sk
->sk_pacing_rate
,
3221 sk
->sk_max_pacing_rate
);
3224 sk
->sk_priority
= val
;
3229 sk
->sk_rcvlowat
= val
? : 1;
3232 if (sk
->sk_mark
!= val
) {
3241 } else if (level
== SOL_TCP
&&
3242 sk
->sk_prot
->setsockopt
== tcp_setsockopt
) {
3243 if (optname
== TCP_CONGESTION
) {
3244 char name
[TCP_CA_NAME_MAX
];
3245 bool reinit
= bpf_sock
->op
> BPF_SOCK_OPS_NEEDS_ECN
;
3247 strncpy(name
, optval
, min_t(long, optlen
,
3248 TCP_CA_NAME_MAX
-1));
3249 name
[TCP_CA_NAME_MAX
-1] = 0;
3250 ret
= tcp_set_congestion_control(sk
, name
, false,
3253 struct tcp_sock
*tp
= tcp_sk(sk
);
3255 if (optlen
!= sizeof(int))
3258 val
= *((int *)optval
);
3259 /* Only some options are supported */
3262 if (val
<= 0 || tp
->data_segs_out
> tp
->syn_data
)
3267 case TCP_BPF_SNDCWND_CLAMP
:
3271 tp
->snd_cwnd_clamp
= val
;
3272 tp
->snd_ssthresh
= val
;
3286 static const struct bpf_func_proto bpf_setsockopt_proto
= {
3287 .func
= bpf_setsockopt
,
3289 .ret_type
= RET_INTEGER
,
3290 .arg1_type
= ARG_PTR_TO_CTX
,
3291 .arg2_type
= ARG_ANYTHING
,
3292 .arg3_type
= ARG_ANYTHING
,
3293 .arg4_type
= ARG_PTR_TO_MEM
,
3294 .arg5_type
= ARG_CONST_SIZE
,
3297 BPF_CALL_5(bpf_getsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
3298 int, level
, int, optname
, char *, optval
, int, optlen
)
3300 struct sock
*sk
= bpf_sock
->sk
;
3302 if (!sk_fullsock(sk
))
3306 if (level
== SOL_TCP
&& sk
->sk_prot
->getsockopt
== tcp_getsockopt
) {
3307 if (optname
== TCP_CONGESTION
) {
3308 struct inet_connection_sock
*icsk
= inet_csk(sk
);
3310 if (!icsk
->icsk_ca_ops
|| optlen
<= 1)
3312 strncpy(optval
, icsk
->icsk_ca_ops
->name
, optlen
);
3313 optval
[optlen
- 1] = 0;
3323 memset(optval
, 0, optlen
);
3327 static const struct bpf_func_proto bpf_getsockopt_proto
= {
3328 .func
= bpf_getsockopt
,
3330 .ret_type
= RET_INTEGER
,
3331 .arg1_type
= ARG_PTR_TO_CTX
,
3332 .arg2_type
= ARG_ANYTHING
,
3333 .arg3_type
= ARG_ANYTHING
,
3334 .arg4_type
= ARG_PTR_TO_UNINIT_MEM
,
3335 .arg5_type
= ARG_CONST_SIZE
,
3338 static const struct bpf_func_proto
*
3339 bpf_base_func_proto(enum bpf_func_id func_id
)
3342 case BPF_FUNC_map_lookup_elem
:
3343 return &bpf_map_lookup_elem_proto
;
3344 case BPF_FUNC_map_update_elem
:
3345 return &bpf_map_update_elem_proto
;
3346 case BPF_FUNC_map_delete_elem
:
3347 return &bpf_map_delete_elem_proto
;
3348 case BPF_FUNC_get_prandom_u32
:
3349 return &bpf_get_prandom_u32_proto
;
3350 case BPF_FUNC_get_smp_processor_id
:
3351 return &bpf_get_raw_smp_processor_id_proto
;
3352 case BPF_FUNC_get_numa_node_id
:
3353 return &bpf_get_numa_node_id_proto
;
3354 case BPF_FUNC_tail_call
:
3355 return &bpf_tail_call_proto
;
3356 case BPF_FUNC_ktime_get_ns
:
3357 return &bpf_ktime_get_ns_proto
;
3358 case BPF_FUNC_trace_printk
:
3359 if (capable(CAP_SYS_ADMIN
))
3360 return bpf_get_trace_printk_proto();
3366 static const struct bpf_func_proto
*
3367 sock_filter_func_proto(enum bpf_func_id func_id
)
3370 /* inet and inet6 sockets are created in a process
3371 * context so there is always a valid uid/gid
3373 case BPF_FUNC_get_current_uid_gid
:
3374 return &bpf_get_current_uid_gid_proto
;
3376 return bpf_base_func_proto(func_id
);
3380 static const struct bpf_func_proto
*
3381 sk_filter_func_proto(enum bpf_func_id func_id
)
3384 case BPF_FUNC_skb_load_bytes
:
3385 return &bpf_skb_load_bytes_proto
;
3386 case BPF_FUNC_get_socket_cookie
:
3387 return &bpf_get_socket_cookie_proto
;
3388 case BPF_FUNC_get_socket_uid
:
3389 return &bpf_get_socket_uid_proto
;
3391 return bpf_base_func_proto(func_id
);
3395 static const struct bpf_func_proto
*
3396 tc_cls_act_func_proto(enum bpf_func_id func_id
)
3399 case BPF_FUNC_skb_store_bytes
:
3400 return &bpf_skb_store_bytes_proto
;
3401 case BPF_FUNC_skb_load_bytes
:
3402 return &bpf_skb_load_bytes_proto
;
3403 case BPF_FUNC_skb_pull_data
:
3404 return &bpf_skb_pull_data_proto
;
3405 case BPF_FUNC_csum_diff
:
3406 return &bpf_csum_diff_proto
;
3407 case BPF_FUNC_csum_update
:
3408 return &bpf_csum_update_proto
;
3409 case BPF_FUNC_l3_csum_replace
:
3410 return &bpf_l3_csum_replace_proto
;
3411 case BPF_FUNC_l4_csum_replace
:
3412 return &bpf_l4_csum_replace_proto
;
3413 case BPF_FUNC_clone_redirect
:
3414 return &bpf_clone_redirect_proto
;
3415 case BPF_FUNC_get_cgroup_classid
:
3416 return &bpf_get_cgroup_classid_proto
;
3417 case BPF_FUNC_skb_vlan_push
:
3418 return &bpf_skb_vlan_push_proto
;
3419 case BPF_FUNC_skb_vlan_pop
:
3420 return &bpf_skb_vlan_pop_proto
;
3421 case BPF_FUNC_skb_change_proto
:
3422 return &bpf_skb_change_proto_proto
;
3423 case BPF_FUNC_skb_change_type
:
3424 return &bpf_skb_change_type_proto
;
3425 case BPF_FUNC_skb_adjust_room
:
3426 return &bpf_skb_adjust_room_proto
;
3427 case BPF_FUNC_skb_change_tail
:
3428 return &bpf_skb_change_tail_proto
;
3429 case BPF_FUNC_skb_get_tunnel_key
:
3430 return &bpf_skb_get_tunnel_key_proto
;
3431 case BPF_FUNC_skb_set_tunnel_key
:
3432 return bpf_get_skb_set_tunnel_proto(func_id
);
3433 case BPF_FUNC_skb_get_tunnel_opt
:
3434 return &bpf_skb_get_tunnel_opt_proto
;
3435 case BPF_FUNC_skb_set_tunnel_opt
:
3436 return bpf_get_skb_set_tunnel_proto(func_id
);
3437 case BPF_FUNC_redirect
:
3438 return &bpf_redirect_proto
;
3439 case BPF_FUNC_get_route_realm
:
3440 return &bpf_get_route_realm_proto
;
3441 case BPF_FUNC_get_hash_recalc
:
3442 return &bpf_get_hash_recalc_proto
;
3443 case BPF_FUNC_set_hash_invalid
:
3444 return &bpf_set_hash_invalid_proto
;
3445 case BPF_FUNC_set_hash
:
3446 return &bpf_set_hash_proto
;
3447 case BPF_FUNC_perf_event_output
:
3448 return &bpf_skb_event_output_proto
;
3449 case BPF_FUNC_get_smp_processor_id
:
3450 return &bpf_get_smp_processor_id_proto
;
3451 case BPF_FUNC_skb_under_cgroup
:
3452 return &bpf_skb_under_cgroup_proto
;
3453 case BPF_FUNC_get_socket_cookie
:
3454 return &bpf_get_socket_cookie_proto
;
3455 case BPF_FUNC_get_socket_uid
:
3456 return &bpf_get_socket_uid_proto
;
3458 return bpf_base_func_proto(func_id
);
3462 static const struct bpf_func_proto
*
3463 xdp_func_proto(enum bpf_func_id func_id
)
3466 case BPF_FUNC_perf_event_output
:
3467 return &bpf_xdp_event_output_proto
;
3468 case BPF_FUNC_get_smp_processor_id
:
3469 return &bpf_get_smp_processor_id_proto
;
3470 case BPF_FUNC_xdp_adjust_head
:
3471 return &bpf_xdp_adjust_head_proto
;
3472 case BPF_FUNC_xdp_adjust_meta
:
3473 return &bpf_xdp_adjust_meta_proto
;
3474 case BPF_FUNC_redirect
:
3475 return &bpf_xdp_redirect_proto
;
3476 case BPF_FUNC_redirect_map
:
3477 return &bpf_xdp_redirect_map_proto
;
3479 return bpf_base_func_proto(func_id
);
3483 static const struct bpf_func_proto
*
3484 lwt_inout_func_proto(enum bpf_func_id func_id
)
3487 case BPF_FUNC_skb_load_bytes
:
3488 return &bpf_skb_load_bytes_proto
;
3489 case BPF_FUNC_skb_pull_data
:
3490 return &bpf_skb_pull_data_proto
;
3491 case BPF_FUNC_csum_diff
:
3492 return &bpf_csum_diff_proto
;
3493 case BPF_FUNC_get_cgroup_classid
:
3494 return &bpf_get_cgroup_classid_proto
;
3495 case BPF_FUNC_get_route_realm
:
3496 return &bpf_get_route_realm_proto
;
3497 case BPF_FUNC_get_hash_recalc
:
3498 return &bpf_get_hash_recalc_proto
;
3499 case BPF_FUNC_perf_event_output
:
3500 return &bpf_skb_event_output_proto
;
3501 case BPF_FUNC_get_smp_processor_id
:
3502 return &bpf_get_smp_processor_id_proto
;
3503 case BPF_FUNC_skb_under_cgroup
:
3504 return &bpf_skb_under_cgroup_proto
;
3506 return bpf_base_func_proto(func_id
);
3510 static const struct bpf_func_proto
*
3511 sock_ops_func_proto(enum bpf_func_id func_id
)
3514 case BPF_FUNC_setsockopt
:
3515 return &bpf_setsockopt_proto
;
3516 case BPF_FUNC_getsockopt
:
3517 return &bpf_getsockopt_proto
;
3518 case BPF_FUNC_sock_map_update
:
3519 return &bpf_sock_map_update_proto
;
3521 return bpf_base_func_proto(func_id
);
3525 static const struct bpf_func_proto
*sk_skb_func_proto(enum bpf_func_id func_id
)
3528 case BPF_FUNC_skb_store_bytes
:
3529 return &bpf_skb_store_bytes_proto
;
3530 case BPF_FUNC_skb_load_bytes
:
3531 return &bpf_skb_load_bytes_proto
;
3532 case BPF_FUNC_skb_pull_data
:
3533 return &bpf_skb_pull_data_proto
;
3534 case BPF_FUNC_skb_change_tail
:
3535 return &bpf_skb_change_tail_proto
;
3536 case BPF_FUNC_skb_change_head
:
3537 return &bpf_skb_change_head_proto
;
3538 case BPF_FUNC_get_socket_cookie
:
3539 return &bpf_get_socket_cookie_proto
;
3540 case BPF_FUNC_get_socket_uid
:
3541 return &bpf_get_socket_uid_proto
;
3542 case BPF_FUNC_sk_redirect_map
:
3543 return &bpf_sk_redirect_map_proto
;
3545 return bpf_base_func_proto(func_id
);
3549 static const struct bpf_func_proto
*
3550 lwt_xmit_func_proto(enum bpf_func_id func_id
)
3553 case BPF_FUNC_skb_get_tunnel_key
:
3554 return &bpf_skb_get_tunnel_key_proto
;
3555 case BPF_FUNC_skb_set_tunnel_key
:
3556 return bpf_get_skb_set_tunnel_proto(func_id
);
3557 case BPF_FUNC_skb_get_tunnel_opt
:
3558 return &bpf_skb_get_tunnel_opt_proto
;
3559 case BPF_FUNC_skb_set_tunnel_opt
:
3560 return bpf_get_skb_set_tunnel_proto(func_id
);
3561 case BPF_FUNC_redirect
:
3562 return &bpf_redirect_proto
;
3563 case BPF_FUNC_clone_redirect
:
3564 return &bpf_clone_redirect_proto
;
3565 case BPF_FUNC_skb_change_tail
:
3566 return &bpf_skb_change_tail_proto
;
3567 case BPF_FUNC_skb_change_head
:
3568 return &bpf_skb_change_head_proto
;
3569 case BPF_FUNC_skb_store_bytes
:
3570 return &bpf_skb_store_bytes_proto
;
3571 case BPF_FUNC_csum_update
:
3572 return &bpf_csum_update_proto
;
3573 case BPF_FUNC_l3_csum_replace
:
3574 return &bpf_l3_csum_replace_proto
;
3575 case BPF_FUNC_l4_csum_replace
:
3576 return &bpf_l4_csum_replace_proto
;
3577 case BPF_FUNC_set_hash_invalid
:
3578 return &bpf_set_hash_invalid_proto
;
3580 return lwt_inout_func_proto(func_id
);
3584 static bool bpf_skb_is_valid_access(int off
, int size
, enum bpf_access_type type
,
3585 struct bpf_insn_access_aux
*info
)
3587 const int size_default
= sizeof(__u32
);
3589 if (off
< 0 || off
>= sizeof(struct __sk_buff
))
3592 /* The verifier guarantees that size > 0. */
3593 if (off
% size
!= 0)
3597 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3598 if (off
+ size
> offsetofend(struct __sk_buff
, cb
[4]))
3601 case bpf_ctx_range_till(struct __sk_buff
, remote_ip6
[0], remote_ip6
[3]):
3602 case bpf_ctx_range_till(struct __sk_buff
, local_ip6
[0], local_ip6
[3]):
3603 case bpf_ctx_range_till(struct __sk_buff
, remote_ip4
, remote_ip4
):
3604 case bpf_ctx_range_till(struct __sk_buff
, local_ip4
, local_ip4
):
3605 case bpf_ctx_range(struct __sk_buff
, data
):
3606 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3607 case bpf_ctx_range(struct __sk_buff
, data_end
):
3608 if (size
!= size_default
)
3612 /* Only narrow read access allowed for now. */
3613 if (type
== BPF_WRITE
) {
3614 if (size
!= size_default
)
3617 bpf_ctx_record_field_size(info
, size_default
);
3618 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
3626 static bool sk_filter_is_valid_access(int off
, int size
,
3627 enum bpf_access_type type
,
3628 struct bpf_insn_access_aux
*info
)
3631 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3632 case bpf_ctx_range(struct __sk_buff
, data
):
3633 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3634 case bpf_ctx_range(struct __sk_buff
, data_end
):
3635 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3639 if (type
== BPF_WRITE
) {
3641 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3648 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3651 static bool lwt_is_valid_access(int off
, int size
,
3652 enum bpf_access_type type
,
3653 struct bpf_insn_access_aux
*info
)
3656 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3657 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3658 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3662 if (type
== BPF_WRITE
) {
3664 case bpf_ctx_range(struct __sk_buff
, mark
):
3665 case bpf_ctx_range(struct __sk_buff
, priority
):
3666 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3674 case bpf_ctx_range(struct __sk_buff
, data
):
3675 info
->reg_type
= PTR_TO_PACKET
;
3677 case bpf_ctx_range(struct __sk_buff
, data_end
):
3678 info
->reg_type
= PTR_TO_PACKET_END
;
3682 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3685 static bool sock_filter_is_valid_access(int off
, int size
,
3686 enum bpf_access_type type
,
3687 struct bpf_insn_access_aux
*info
)
3689 if (type
== BPF_WRITE
) {
3691 case offsetof(struct bpf_sock
, bound_dev_if
):
3692 case offsetof(struct bpf_sock
, mark
):
3693 case offsetof(struct bpf_sock
, priority
):
3700 if (off
< 0 || off
+ size
> sizeof(struct bpf_sock
))
3702 /* The verifier guarantees that size > 0. */
3703 if (off
% size
!= 0)
3705 if (size
!= sizeof(__u32
))
3711 static int bpf_unclone_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3712 const struct bpf_prog
*prog
, int drop_verdict
)
3714 struct bpf_insn
*insn
= insn_buf
;
3719 /* if (!skb->cloned)
3722 * (Fast-path, otherwise approximation that we might be
3723 * a clone, do the rest in helper.)
3725 *insn
++ = BPF_LDX_MEM(BPF_B
, BPF_REG_6
, BPF_REG_1
, CLONED_OFFSET());
3726 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_6
, CLONED_MASK
);
3727 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_6
, 0, 7);
3729 /* ret = bpf_skb_pull_data(skb, 0); */
3730 *insn
++ = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
3731 *insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_2
, BPF_REG_2
);
3732 *insn
++ = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3733 BPF_FUNC_skb_pull_data
);
3736 * return TC_ACT_SHOT;
3738 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2);
3739 *insn
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, drop_verdict
);
3740 *insn
++ = BPF_EXIT_INSN();
3743 *insn
++ = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
3745 *insn
++ = prog
->insnsi
[0];
3747 return insn
- insn_buf
;
3750 static int tc_cls_act_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3751 const struct bpf_prog
*prog
)
3753 return bpf_unclone_prologue(insn_buf
, direct_write
, prog
, TC_ACT_SHOT
);
3756 static bool tc_cls_act_is_valid_access(int off
, int size
,
3757 enum bpf_access_type type
,
3758 struct bpf_insn_access_aux
*info
)
3760 if (type
== BPF_WRITE
) {
3762 case bpf_ctx_range(struct __sk_buff
, mark
):
3763 case bpf_ctx_range(struct __sk_buff
, tc_index
):
3764 case bpf_ctx_range(struct __sk_buff
, priority
):
3765 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3766 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
3774 case bpf_ctx_range(struct __sk_buff
, data
):
3775 info
->reg_type
= PTR_TO_PACKET
;
3777 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3778 info
->reg_type
= PTR_TO_PACKET_META
;
3780 case bpf_ctx_range(struct __sk_buff
, data_end
):
3781 info
->reg_type
= PTR_TO_PACKET_END
;
3783 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
3787 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3790 static bool __is_valid_xdp_access(int off
, int size
)
3792 if (off
< 0 || off
>= sizeof(struct xdp_md
))
3794 if (off
% size
!= 0)
3796 if (size
!= sizeof(__u32
))
3802 static bool xdp_is_valid_access(int off
, int size
,
3803 enum bpf_access_type type
,
3804 struct bpf_insn_access_aux
*info
)
3806 if (type
== BPF_WRITE
)
3810 case offsetof(struct xdp_md
, data
):
3811 info
->reg_type
= PTR_TO_PACKET
;
3813 case offsetof(struct xdp_md
, data_meta
):
3814 info
->reg_type
= PTR_TO_PACKET_META
;
3816 case offsetof(struct xdp_md
, data_end
):
3817 info
->reg_type
= PTR_TO_PACKET_END
;
3821 return __is_valid_xdp_access(off
, size
);
3824 void bpf_warn_invalid_xdp_action(u32 act
)
3826 const u32 act_max
= XDP_REDIRECT
;
3828 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
3829 act
> act_max
? "Illegal" : "Driver unsupported",
3832 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action
);
3834 static bool __is_valid_sock_ops_access(int off
, int size
)
3836 if (off
< 0 || off
>= sizeof(struct bpf_sock_ops
))
3838 /* The verifier guarantees that size > 0. */
3839 if (off
% size
!= 0)
3841 if (size
!= sizeof(__u32
))
3847 static bool sock_ops_is_valid_access(int off
, int size
,
3848 enum bpf_access_type type
,
3849 struct bpf_insn_access_aux
*info
)
3851 if (type
== BPF_WRITE
) {
3853 case offsetof(struct bpf_sock_ops
, op
) ...
3854 offsetof(struct bpf_sock_ops
, replylong
[3]):
3861 return __is_valid_sock_ops_access(off
, size
);
3864 static int sk_skb_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
3865 const struct bpf_prog
*prog
)
3867 return bpf_unclone_prologue(insn_buf
, direct_write
, prog
, SK_DROP
);
3870 static bool sk_skb_is_valid_access(int off
, int size
,
3871 enum bpf_access_type type
,
3872 struct bpf_insn_access_aux
*info
)
3875 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
3876 case bpf_ctx_range(struct __sk_buff
, data_meta
):
3880 if (type
== BPF_WRITE
) {
3882 case bpf_ctx_range(struct __sk_buff
, tc_index
):
3883 case bpf_ctx_range(struct __sk_buff
, priority
):
3891 case bpf_ctx_range(struct __sk_buff
, mark
):
3893 case bpf_ctx_range(struct __sk_buff
, data
):
3894 info
->reg_type
= PTR_TO_PACKET
;
3896 case bpf_ctx_range(struct __sk_buff
, data_end
):
3897 info
->reg_type
= PTR_TO_PACKET_END
;
3901 return bpf_skb_is_valid_access(off
, size
, type
, info
);
3904 static u32
bpf_convert_ctx_access(enum bpf_access_type type
,
3905 const struct bpf_insn
*si
,
3906 struct bpf_insn
*insn_buf
,
3907 struct bpf_prog
*prog
, u32
*target_size
)
3909 struct bpf_insn
*insn
= insn_buf
;
3913 case offsetof(struct __sk_buff
, len
):
3914 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3915 bpf_target_off(struct sk_buff
, len
, 4,
3919 case offsetof(struct __sk_buff
, protocol
):
3920 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3921 bpf_target_off(struct sk_buff
, protocol
, 2,
3925 case offsetof(struct __sk_buff
, vlan_proto
):
3926 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3927 bpf_target_off(struct sk_buff
, vlan_proto
, 2,
3931 case offsetof(struct __sk_buff
, priority
):
3932 if (type
== BPF_WRITE
)
3933 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3934 bpf_target_off(struct sk_buff
, priority
, 4,
3937 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3938 bpf_target_off(struct sk_buff
, priority
, 4,
3942 case offsetof(struct __sk_buff
, ingress_ifindex
):
3943 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3944 bpf_target_off(struct sk_buff
, skb_iif
, 4,
3948 case offsetof(struct __sk_buff
, ifindex
):
3949 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
3950 si
->dst_reg
, si
->src_reg
,
3951 offsetof(struct sk_buff
, dev
));
3952 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, si
->dst_reg
, 0, 1);
3953 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
3954 bpf_target_off(struct net_device
, ifindex
, 4,
3958 case offsetof(struct __sk_buff
, hash
):
3959 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3960 bpf_target_off(struct sk_buff
, hash
, 4,
3964 case offsetof(struct __sk_buff
, mark
):
3965 if (type
== BPF_WRITE
)
3966 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3967 bpf_target_off(struct sk_buff
, mark
, 4,
3970 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
3971 bpf_target_off(struct sk_buff
, mark
, 4,
3975 case offsetof(struct __sk_buff
, pkt_type
):
3977 *insn
++ = BPF_LDX_MEM(BPF_B
, si
->dst_reg
, si
->src_reg
,
3979 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, PKT_TYPE_MAX
);
3980 #ifdef __BIG_ENDIAN_BITFIELD
3981 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 5);
3985 case offsetof(struct __sk_buff
, queue_mapping
):
3986 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3987 bpf_target_off(struct sk_buff
, queue_mapping
, 2,
3991 case offsetof(struct __sk_buff
, vlan_present
):
3992 case offsetof(struct __sk_buff
, vlan_tci
):
3993 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
3995 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
3996 bpf_target_off(struct sk_buff
, vlan_tci
, 2,
3998 if (si
->off
== offsetof(struct __sk_buff
, vlan_tci
)) {
3999 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
,
4002 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 12);
4003 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, 1);
4007 case offsetof(struct __sk_buff
, cb
[0]) ...
4008 offsetofend(struct __sk_buff
, cb
[4]) - 1:
4009 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, data
) < 20);
4010 BUILD_BUG_ON((offsetof(struct sk_buff
, cb
) +
4011 offsetof(struct qdisc_skb_cb
, data
)) %
4014 prog
->cb_access
= 1;
4016 off
-= offsetof(struct __sk_buff
, cb
[0]);
4017 off
+= offsetof(struct sk_buff
, cb
);
4018 off
+= offsetof(struct qdisc_skb_cb
, data
);
4019 if (type
== BPF_WRITE
)
4020 *insn
++ = BPF_STX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
4023 *insn
++ = BPF_LDX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
4027 case offsetof(struct __sk_buff
, tc_classid
):
4028 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, tc_classid
) != 2);
4031 off
-= offsetof(struct __sk_buff
, tc_classid
);
4032 off
+= offsetof(struct sk_buff
, cb
);
4033 off
+= offsetof(struct qdisc_skb_cb
, tc_classid
);
4035 if (type
== BPF_WRITE
)
4036 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
,
4039 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
,
4043 case offsetof(struct __sk_buff
, data
):
4044 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, data
),
4045 si
->dst_reg
, si
->src_reg
,
4046 offsetof(struct sk_buff
, data
));
4049 case offsetof(struct __sk_buff
, data_meta
):
4051 off
-= offsetof(struct __sk_buff
, data_meta
);
4052 off
+= offsetof(struct sk_buff
, cb
);
4053 off
+= offsetof(struct bpf_skb_data_end
, data_meta
);
4054 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4058 case offsetof(struct __sk_buff
, data_end
):
4060 off
-= offsetof(struct __sk_buff
, data_end
);
4061 off
+= offsetof(struct sk_buff
, cb
);
4062 off
+= offsetof(struct bpf_skb_data_end
, data_end
);
4063 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4067 case offsetof(struct __sk_buff
, tc_index
):
4068 #ifdef CONFIG_NET_SCHED
4069 if (type
== BPF_WRITE
)
4070 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4071 bpf_target_off(struct sk_buff
, tc_index
, 2,
4074 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4075 bpf_target_off(struct sk_buff
, tc_index
, 2,
4079 if (type
== BPF_WRITE
)
4080 *insn
++ = BPF_MOV64_REG(si
->dst_reg
, si
->dst_reg
);
4082 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4086 case offsetof(struct __sk_buff
, napi_id
):
4087 #if defined(CONFIG_NET_RX_BUSY_POLL)
4088 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4089 bpf_target_off(struct sk_buff
, napi_id
, 4,
4091 *insn
++ = BPF_JMP_IMM(BPF_JGE
, si
->dst_reg
, MIN_NAPI_ID
, 1);
4092 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4095 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
4098 case offsetof(struct __sk_buff
, family
):
4099 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
4101 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4102 si
->dst_reg
, si
->src_reg
,
4103 offsetof(struct sk_buff
, sk
));
4104 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4105 bpf_target_off(struct sock_common
,
4109 case offsetof(struct __sk_buff
, remote_ip4
):
4110 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
4112 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4113 si
->dst_reg
, si
->src_reg
,
4114 offsetof(struct sk_buff
, sk
));
4115 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4116 bpf_target_off(struct sock_common
,
4120 case offsetof(struct __sk_buff
, local_ip4
):
4121 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4122 skc_rcv_saddr
) != 4);
4124 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4125 si
->dst_reg
, si
->src_reg
,
4126 offsetof(struct sk_buff
, sk
));
4127 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4128 bpf_target_off(struct sock_common
,
4132 case offsetof(struct __sk_buff
, remote_ip6
[0]) ...
4133 offsetof(struct __sk_buff
, remote_ip6
[3]):
4134 #if IS_ENABLED(CONFIG_IPV6)
4135 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4136 skc_v6_daddr
.s6_addr32
[0]) != 4);
4139 off
-= offsetof(struct __sk_buff
, remote_ip6
[0]);
4141 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4142 si
->dst_reg
, si
->src_reg
,
4143 offsetof(struct sk_buff
, sk
));
4144 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4145 offsetof(struct sock_common
,
4146 skc_v6_daddr
.s6_addr32
[0]) +
4149 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4152 case offsetof(struct __sk_buff
, local_ip6
[0]) ...
4153 offsetof(struct __sk_buff
, local_ip6
[3]):
4154 #if IS_ENABLED(CONFIG_IPV6)
4155 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4156 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
4159 off
-= offsetof(struct __sk_buff
, local_ip6
[0]);
4161 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4162 si
->dst_reg
, si
->src_reg
,
4163 offsetof(struct sk_buff
, sk
));
4164 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4165 offsetof(struct sock_common
,
4166 skc_v6_rcv_saddr
.s6_addr32
[0]) +
4169 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4173 case offsetof(struct __sk_buff
, remote_port
):
4174 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
4176 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4177 si
->dst_reg
, si
->src_reg
,
4178 offsetof(struct sk_buff
, sk
));
4179 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4180 bpf_target_off(struct sock_common
,
4183 #ifndef __BIG_ENDIAN_BITFIELD
4184 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
4188 case offsetof(struct __sk_buff
, local_port
):
4189 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
4191 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
4192 si
->dst_reg
, si
->src_reg
,
4193 offsetof(struct sk_buff
, sk
));
4194 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4195 bpf_target_off(struct sock_common
,
4196 skc_num
, 2, target_size
));
4200 return insn
- insn_buf
;
4203 static u32
sock_filter_convert_ctx_access(enum bpf_access_type type
,
4204 const struct bpf_insn
*si
,
4205 struct bpf_insn
*insn_buf
,
4206 struct bpf_prog
*prog
, u32
*target_size
)
4208 struct bpf_insn
*insn
= insn_buf
;
4211 case offsetof(struct bpf_sock
, bound_dev_if
):
4212 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_bound_dev_if
) != 4);
4214 if (type
== BPF_WRITE
)
4215 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4216 offsetof(struct sock
, sk_bound_dev_if
));
4218 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4219 offsetof(struct sock
, sk_bound_dev_if
));
4222 case offsetof(struct bpf_sock
, mark
):
4223 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_mark
) != 4);
4225 if (type
== BPF_WRITE
)
4226 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4227 offsetof(struct sock
, sk_mark
));
4229 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4230 offsetof(struct sock
, sk_mark
));
4233 case offsetof(struct bpf_sock
, priority
):
4234 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_priority
) != 4);
4236 if (type
== BPF_WRITE
)
4237 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4238 offsetof(struct sock
, sk_priority
));
4240 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4241 offsetof(struct sock
, sk_priority
));
4244 case offsetof(struct bpf_sock
, family
):
4245 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_family
) != 2);
4247 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
4248 offsetof(struct sock
, sk_family
));
4251 case offsetof(struct bpf_sock
, type
):
4252 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4253 offsetof(struct sock
, __sk_flags_offset
));
4254 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_TYPE_MASK
);
4255 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_TYPE_SHIFT
);
4258 case offsetof(struct bpf_sock
, protocol
):
4259 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4260 offsetof(struct sock
, __sk_flags_offset
));
4261 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_PROTO_MASK
);
4262 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_PROTO_SHIFT
);
4266 return insn
- insn_buf
;
4269 static u32
tc_cls_act_convert_ctx_access(enum bpf_access_type type
,
4270 const struct bpf_insn
*si
,
4271 struct bpf_insn
*insn_buf
,
4272 struct bpf_prog
*prog
, u32
*target_size
)
4274 struct bpf_insn
*insn
= insn_buf
;
4277 case offsetof(struct __sk_buff
, ifindex
):
4278 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
4279 si
->dst_reg
, si
->src_reg
,
4280 offsetof(struct sk_buff
, dev
));
4281 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4282 bpf_target_off(struct net_device
, ifindex
, 4,
4286 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
4290 return insn
- insn_buf
;
4293 static u32
xdp_convert_ctx_access(enum bpf_access_type type
,
4294 const struct bpf_insn
*si
,
4295 struct bpf_insn
*insn_buf
,
4296 struct bpf_prog
*prog
, u32
*target_size
)
4298 struct bpf_insn
*insn
= insn_buf
;
4301 case offsetof(struct xdp_md
, data
):
4302 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data
),
4303 si
->dst_reg
, si
->src_reg
,
4304 offsetof(struct xdp_buff
, data
));
4306 case offsetof(struct xdp_md
, data_meta
):
4307 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_meta
),
4308 si
->dst_reg
, si
->src_reg
,
4309 offsetof(struct xdp_buff
, data_meta
));
4311 case offsetof(struct xdp_md
, data_end
):
4312 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_end
),
4313 si
->dst_reg
, si
->src_reg
,
4314 offsetof(struct xdp_buff
, data_end
));
4318 return insn
- insn_buf
;
4321 static u32
sock_ops_convert_ctx_access(enum bpf_access_type type
,
4322 const struct bpf_insn
*si
,
4323 struct bpf_insn
*insn_buf
,
4324 struct bpf_prog
*prog
,
4327 struct bpf_insn
*insn
= insn_buf
;
4331 case offsetof(struct bpf_sock_ops
, op
) ...
4332 offsetof(struct bpf_sock_ops
, replylong
[3]):
4333 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, op
) !=
4334 FIELD_SIZEOF(struct bpf_sock_ops_kern
, op
));
4335 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, reply
) !=
4336 FIELD_SIZEOF(struct bpf_sock_ops_kern
, reply
));
4337 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, replylong
) !=
4338 FIELD_SIZEOF(struct bpf_sock_ops_kern
, replylong
));
4340 off
-= offsetof(struct bpf_sock_ops
, op
);
4341 off
+= offsetof(struct bpf_sock_ops_kern
, op
);
4342 if (type
== BPF_WRITE
)
4343 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4346 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
4350 case offsetof(struct bpf_sock_ops
, family
):
4351 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
4353 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4354 struct bpf_sock_ops_kern
, sk
),
4355 si
->dst_reg
, si
->src_reg
,
4356 offsetof(struct bpf_sock_ops_kern
, sk
));
4357 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4358 offsetof(struct sock_common
, skc_family
));
4361 case offsetof(struct bpf_sock_ops
, remote_ip4
):
4362 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
4364 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4365 struct bpf_sock_ops_kern
, sk
),
4366 si
->dst_reg
, si
->src_reg
,
4367 offsetof(struct bpf_sock_ops_kern
, sk
));
4368 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4369 offsetof(struct sock_common
, skc_daddr
));
4372 case offsetof(struct bpf_sock_ops
, local_ip4
):
4373 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_rcv_saddr
) != 4);
4375 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4376 struct bpf_sock_ops_kern
, sk
),
4377 si
->dst_reg
, si
->src_reg
,
4378 offsetof(struct bpf_sock_ops_kern
, sk
));
4379 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4380 offsetof(struct sock_common
,
4384 case offsetof(struct bpf_sock_ops
, remote_ip6
[0]) ...
4385 offsetof(struct bpf_sock_ops
, remote_ip6
[3]):
4386 #if IS_ENABLED(CONFIG_IPV6)
4387 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4388 skc_v6_daddr
.s6_addr32
[0]) != 4);
4391 off
-= offsetof(struct bpf_sock_ops
, remote_ip6
[0]);
4392 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4393 struct bpf_sock_ops_kern
, sk
),
4394 si
->dst_reg
, si
->src_reg
,
4395 offsetof(struct bpf_sock_ops_kern
, sk
));
4396 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4397 offsetof(struct sock_common
,
4398 skc_v6_daddr
.s6_addr32
[0]) +
4401 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4405 case offsetof(struct bpf_sock_ops
, local_ip6
[0]) ...
4406 offsetof(struct bpf_sock_ops
, local_ip6
[3]):
4407 #if IS_ENABLED(CONFIG_IPV6)
4408 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
4409 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
4412 off
-= offsetof(struct bpf_sock_ops
, local_ip6
[0]);
4413 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4414 struct bpf_sock_ops_kern
, sk
),
4415 si
->dst_reg
, si
->src_reg
,
4416 offsetof(struct bpf_sock_ops_kern
, sk
));
4417 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
4418 offsetof(struct sock_common
,
4419 skc_v6_rcv_saddr
.s6_addr32
[0]) +
4422 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
4426 case offsetof(struct bpf_sock_ops
, remote_port
):
4427 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
4429 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4430 struct bpf_sock_ops_kern
, sk
),
4431 si
->dst_reg
, si
->src_reg
,
4432 offsetof(struct bpf_sock_ops_kern
, sk
));
4433 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4434 offsetof(struct sock_common
, skc_dport
));
4435 #ifndef __BIG_ENDIAN_BITFIELD
4436 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
4440 case offsetof(struct bpf_sock_ops
, local_port
):
4441 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
4443 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
4444 struct bpf_sock_ops_kern
, sk
),
4445 si
->dst_reg
, si
->src_reg
,
4446 offsetof(struct bpf_sock_ops_kern
, sk
));
4447 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
4448 offsetof(struct sock_common
, skc_num
));
4451 return insn
- insn_buf
;
4454 static u32
sk_skb_convert_ctx_access(enum bpf_access_type type
,
4455 const struct bpf_insn
*si
,
4456 struct bpf_insn
*insn_buf
,
4457 struct bpf_prog
*prog
, u32
*target_size
)
4459 struct bpf_insn
*insn
= insn_buf
;
4463 case offsetof(struct __sk_buff
, data_end
):
4465 off
-= offsetof(struct __sk_buff
, data_end
);
4466 off
+= offsetof(struct sk_buff
, cb
);
4467 off
+= offsetof(struct tcp_skb_cb
, bpf
.data_end
);
4468 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
4472 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
4476 return insn
- insn_buf
;
4479 const struct bpf_verifier_ops sk_filter_verifier_ops
= {
4480 .get_func_proto
= sk_filter_func_proto
,
4481 .is_valid_access
= sk_filter_is_valid_access
,
4482 .convert_ctx_access
= bpf_convert_ctx_access
,
4485 const struct bpf_prog_ops sk_filter_prog_ops
= {
4488 const struct bpf_verifier_ops tc_cls_act_verifier_ops
= {
4489 .get_func_proto
= tc_cls_act_func_proto
,
4490 .is_valid_access
= tc_cls_act_is_valid_access
,
4491 .convert_ctx_access
= tc_cls_act_convert_ctx_access
,
4492 .gen_prologue
= tc_cls_act_prologue
,
4495 const struct bpf_prog_ops tc_cls_act_prog_ops
= {
4496 .test_run
= bpf_prog_test_run_skb
,
4499 const struct bpf_verifier_ops xdp_verifier_ops
= {
4500 .get_func_proto
= xdp_func_proto
,
4501 .is_valid_access
= xdp_is_valid_access
,
4502 .convert_ctx_access
= xdp_convert_ctx_access
,
4505 const struct bpf_prog_ops xdp_prog_ops
= {
4506 .test_run
= bpf_prog_test_run_xdp
,
4509 const struct bpf_verifier_ops cg_skb_verifier_ops
= {
4510 .get_func_proto
= sk_filter_func_proto
,
4511 .is_valid_access
= sk_filter_is_valid_access
,
4512 .convert_ctx_access
= bpf_convert_ctx_access
,
4515 const struct bpf_prog_ops cg_skb_prog_ops
= {
4516 .test_run
= bpf_prog_test_run_skb
,
4519 const struct bpf_verifier_ops lwt_inout_verifier_ops
= {
4520 .get_func_proto
= lwt_inout_func_proto
,
4521 .is_valid_access
= lwt_is_valid_access
,
4522 .convert_ctx_access
= bpf_convert_ctx_access
,
4525 const struct bpf_prog_ops lwt_inout_prog_ops
= {
4526 .test_run
= bpf_prog_test_run_skb
,
4529 const struct bpf_verifier_ops lwt_xmit_verifier_ops
= {
4530 .get_func_proto
= lwt_xmit_func_proto
,
4531 .is_valid_access
= lwt_is_valid_access
,
4532 .convert_ctx_access
= bpf_convert_ctx_access
,
4533 .gen_prologue
= tc_cls_act_prologue
,
4536 const struct bpf_prog_ops lwt_xmit_prog_ops
= {
4537 .test_run
= bpf_prog_test_run_skb
,
4540 const struct bpf_verifier_ops cg_sock_verifier_ops
= {
4541 .get_func_proto
= sock_filter_func_proto
,
4542 .is_valid_access
= sock_filter_is_valid_access
,
4543 .convert_ctx_access
= sock_filter_convert_ctx_access
,
4546 const struct bpf_prog_ops cg_sock_prog_ops
= {
4549 const struct bpf_verifier_ops sock_ops_verifier_ops
= {
4550 .get_func_proto
= sock_ops_func_proto
,
4551 .is_valid_access
= sock_ops_is_valid_access
,
4552 .convert_ctx_access
= sock_ops_convert_ctx_access
,
4555 const struct bpf_prog_ops sock_ops_prog_ops
= {
4558 const struct bpf_verifier_ops sk_skb_verifier_ops
= {
4559 .get_func_proto
= sk_skb_func_proto
,
4560 .is_valid_access
= sk_skb_is_valid_access
,
4561 .convert_ctx_access
= sk_skb_convert_ctx_access
,
4562 .gen_prologue
= sk_skb_prologue
,
4565 const struct bpf_prog_ops sk_skb_prog_ops
= {
4568 int sk_detach_filter(struct sock
*sk
)
4571 struct sk_filter
*filter
;
4573 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
4576 filter
= rcu_dereference_protected(sk
->sk_filter
,
4577 lockdep_sock_is_held(sk
));
4579 RCU_INIT_POINTER(sk
->sk_filter
, NULL
);
4580 sk_filter_uncharge(sk
, filter
);
4586 EXPORT_SYMBOL_GPL(sk_detach_filter
);
4588 int sk_get_filter(struct sock
*sk
, struct sock_filter __user
*ubuf
,
4591 struct sock_fprog_kern
*fprog
;
4592 struct sk_filter
*filter
;
4596 filter
= rcu_dereference_protected(sk
->sk_filter
,
4597 lockdep_sock_is_held(sk
));
4601 /* We're copying the filter that has been originally attached,
4602 * so no conversion/decode needed anymore. eBPF programs that
4603 * have no original program cannot be dumped through this.
4606 fprog
= filter
->prog
->orig_prog
;
4612 /* User space only enquires number of filter blocks. */
4616 if (len
< fprog
->len
)
4620 if (copy_to_user(ubuf
, fprog
->filter
, bpf_classic_proglen(fprog
)))
4623 /* Instead of bytes, the API requests to return the number