2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/module.h>
25 #include <linux/types.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/sock_diag.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_packet.h>
34 #include <linux/if_arp.h>
35 #include <linux/gfp.h>
36 #include <net/inet_common.h>
38 #include <net/protocol.h>
39 #include <net/netlink.h>
40 #include <linux/skbuff.h>
41 #include <linux/skmsg.h>
43 #include <net/flow_dissector.h>
44 #include <linux/errno.h>
45 #include <linux/timer.h>
46 #include <linux/uaccess.h>
47 #include <asm/unaligned.h>
48 #include <asm/cmpxchg.h>
49 #include <linux/filter.h>
50 #include <linux/ratelimit.h>
51 #include <linux/seccomp.h>
52 #include <linux/if_vlan.h>
53 #include <linux/bpf.h>
54 #include <net/sch_generic.h>
55 #include <net/cls_cgroup.h>
56 #include <net/dst_metadata.h>
58 #include <net/sock_reuseport.h>
59 #include <net/busy_poll.h>
63 #include <linux/bpf_trace.h>
64 #include <net/xdp_sock.h>
65 #include <linux/inetdevice.h>
66 #include <net/inet_hashtables.h>
67 #include <net/inet6_hashtables.h>
68 #include <net/ip_fib.h>
72 #include <net/net_namespace.h>
73 #include <linux/seg6_local.h>
75 #include <net/seg6_local.h>
78 * sk_filter_trim_cap - run a packet through a socket filter
79 * @sk: sock associated with &sk_buff
80 * @skb: buffer to filter
81 * @cap: limit on how short the eBPF program may trim the packet
83 * Run the eBPF program and then cut skb->data to correct size returned by
84 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
85 * than pkt_len we keep whole skb->data. This is the socket level
86 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
87 * be accepted or -EPERM if the packet should be tossed.
90 int sk_filter_trim_cap(struct sock
*sk
, struct sk_buff
*skb
, unsigned int cap
)
93 struct sk_filter
*filter
;
96 * If the skb was allocated from pfmemalloc reserves, only
97 * allow SOCK_MEMALLOC sockets to use it as this socket is
100 if (skb_pfmemalloc(skb
) && !sock_flag(sk
, SOCK_MEMALLOC
)) {
101 NET_INC_STATS(sock_net(sk
), LINUX_MIB_PFMEMALLOCDROP
);
104 err
= BPF_CGROUP_RUN_PROG_INET_INGRESS(sk
, skb
);
108 err
= security_sock_rcv_skb(sk
, skb
);
113 filter
= rcu_dereference(sk
->sk_filter
);
115 struct sock
*save_sk
= skb
->sk
;
116 unsigned int pkt_len
;
119 pkt_len
= bpf_prog_run_save_cb(filter
->prog
, skb
);
121 err
= pkt_len
? pskb_trim(skb
, max(cap
, pkt_len
)) : -EPERM
;
127 EXPORT_SYMBOL(sk_filter_trim_cap
);
129 BPF_CALL_1(bpf_skb_get_pay_offset
, struct sk_buff
*, skb
)
131 return skb_get_poff(skb
);
134 BPF_CALL_3(bpf_skb_get_nlattr
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
138 if (skb_is_nonlinear(skb
))
141 if (skb
->len
< sizeof(struct nlattr
))
144 if (a
> skb
->len
- sizeof(struct nlattr
))
147 nla
= nla_find((struct nlattr
*) &skb
->data
[a
], skb
->len
- a
, x
);
149 return (void *) nla
- (void *) skb
->data
;
154 BPF_CALL_3(bpf_skb_get_nlattr_nest
, struct sk_buff
*, skb
, u32
, a
, u32
, x
)
158 if (skb_is_nonlinear(skb
))
161 if (skb
->len
< sizeof(struct nlattr
))
164 if (a
> skb
->len
- sizeof(struct nlattr
))
167 nla
= (struct nlattr
*) &skb
->data
[a
];
168 if (nla
->nla_len
> skb
->len
- a
)
171 nla
= nla_find_nested(nla
, x
);
173 return (void *) nla
- (void *) skb
->data
;
178 BPF_CALL_4(bpf_skb_load_helper_8
, const struct sk_buff
*, skb
, const void *,
179 data
, int, headlen
, int, offset
)
182 const int len
= sizeof(tmp
);
185 if (headlen
- offset
>= len
)
186 return *(u8
*)(data
+ offset
);
187 if (!skb_copy_bits(skb
, offset
, &tmp
, sizeof(tmp
)))
190 ptr
= bpf_internal_load_pointer_neg_helper(skb
, offset
, len
);
198 BPF_CALL_2(bpf_skb_load_helper_8_no_cache
, const struct sk_buff
*, skb
,
201 return ____bpf_skb_load_helper_8(skb
, skb
->data
, skb
->len
- skb
->data_len
,
205 BPF_CALL_4(bpf_skb_load_helper_16
, const struct sk_buff
*, skb
, const void *,
206 data
, int, headlen
, int, offset
)
209 const int len
= sizeof(tmp
);
212 if (headlen
- offset
>= len
)
213 return get_unaligned_be16(data
+ offset
);
214 if (!skb_copy_bits(skb
, offset
, &tmp
, sizeof(tmp
)))
215 return be16_to_cpu(tmp
);
217 ptr
= bpf_internal_load_pointer_neg_helper(skb
, offset
, len
);
219 return get_unaligned_be16(ptr
);
225 BPF_CALL_2(bpf_skb_load_helper_16_no_cache
, const struct sk_buff
*, skb
,
228 return ____bpf_skb_load_helper_16(skb
, skb
->data
, skb
->len
- skb
->data_len
,
232 BPF_CALL_4(bpf_skb_load_helper_32
, const struct sk_buff
*, skb
, const void *,
233 data
, int, headlen
, int, offset
)
236 const int len
= sizeof(tmp
);
238 if (likely(offset
>= 0)) {
239 if (headlen
- offset
>= len
)
240 return get_unaligned_be32(data
+ offset
);
241 if (!skb_copy_bits(skb
, offset
, &tmp
, sizeof(tmp
)))
242 return be32_to_cpu(tmp
);
244 ptr
= bpf_internal_load_pointer_neg_helper(skb
, offset
, len
);
246 return get_unaligned_be32(ptr
);
252 BPF_CALL_2(bpf_skb_load_helper_32_no_cache
, const struct sk_buff
*, skb
,
255 return ____bpf_skb_load_helper_32(skb
, skb
->data
, skb
->len
- skb
->data_len
,
259 BPF_CALL_0(bpf_get_raw_cpu_id
)
261 return raw_smp_processor_id();
264 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto
= {
265 .func
= bpf_get_raw_cpu_id
,
267 .ret_type
= RET_INTEGER
,
270 static u32
convert_skb_access(int skb_field
, int dst_reg
, int src_reg
,
271 struct bpf_insn
*insn_buf
)
273 struct bpf_insn
*insn
= insn_buf
;
277 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
279 *insn
++ = BPF_LDX_MEM(BPF_W
, dst_reg
, src_reg
,
280 offsetof(struct sk_buff
, mark
));
284 *insn
++ = BPF_LDX_MEM(BPF_B
, dst_reg
, src_reg
, PKT_TYPE_OFFSET());
285 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, PKT_TYPE_MAX
);
286 #ifdef __BIG_ENDIAN_BITFIELD
287 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 5);
292 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, queue_mapping
) != 2);
294 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
295 offsetof(struct sk_buff
, queue_mapping
));
298 case SKF_AD_VLAN_TAG
:
299 case SKF_AD_VLAN_TAG_PRESENT
:
300 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
301 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
303 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
304 *insn
++ = BPF_LDX_MEM(BPF_H
, dst_reg
, src_reg
,
305 offsetof(struct sk_buff
, vlan_tci
));
306 if (skb_field
== SKF_AD_VLAN_TAG
) {
307 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
,
311 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, dst_reg
, 12);
313 *insn
++ = BPF_ALU32_IMM(BPF_AND
, dst_reg
, 1);
318 return insn
- insn_buf
;
321 static bool convert_bpf_extensions(struct sock_filter
*fp
,
322 struct bpf_insn
**insnp
)
324 struct bpf_insn
*insn
= *insnp
;
328 case SKF_AD_OFF
+ SKF_AD_PROTOCOL
:
329 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, protocol
) != 2);
331 /* A = *(u16 *) (CTX + offsetof(protocol)) */
332 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
333 offsetof(struct sk_buff
, protocol
));
334 /* A = ntohs(A) [emitting a nop or swap16] */
335 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
338 case SKF_AD_OFF
+ SKF_AD_PKTTYPE
:
339 cnt
= convert_skb_access(SKF_AD_PKTTYPE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
343 case SKF_AD_OFF
+ SKF_AD_IFINDEX
:
344 case SKF_AD_OFF
+ SKF_AD_HATYPE
:
345 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, ifindex
) != 4);
346 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
, type
) != 2);
348 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
349 BPF_REG_TMP
, BPF_REG_CTX
,
350 offsetof(struct sk_buff
, dev
));
351 /* if (tmp != 0) goto pc + 1 */
352 *insn
++ = BPF_JMP_IMM(BPF_JNE
, BPF_REG_TMP
, 0, 1);
353 *insn
++ = BPF_EXIT_INSN();
354 if (fp
->k
== SKF_AD_OFF
+ SKF_AD_IFINDEX
)
355 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_TMP
,
356 offsetof(struct net_device
, ifindex
));
358 *insn
= BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_TMP
,
359 offsetof(struct net_device
, type
));
362 case SKF_AD_OFF
+ SKF_AD_MARK
:
363 cnt
= convert_skb_access(SKF_AD_MARK
, BPF_REG_A
, BPF_REG_CTX
, insn
);
367 case SKF_AD_OFF
+ SKF_AD_RXHASH
:
368 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
370 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
,
371 offsetof(struct sk_buff
, hash
));
374 case SKF_AD_OFF
+ SKF_AD_QUEUE
:
375 cnt
= convert_skb_access(SKF_AD_QUEUE
, BPF_REG_A
, BPF_REG_CTX
, insn
);
379 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG
:
380 cnt
= convert_skb_access(SKF_AD_VLAN_TAG
,
381 BPF_REG_A
, BPF_REG_CTX
, insn
);
385 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG_PRESENT
:
386 cnt
= convert_skb_access(SKF_AD_VLAN_TAG_PRESENT
,
387 BPF_REG_A
, BPF_REG_CTX
, insn
);
391 case SKF_AD_OFF
+ SKF_AD_VLAN_TPID
:
392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_proto
) != 2);
394 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
395 *insn
++ = BPF_LDX_MEM(BPF_H
, BPF_REG_A
, BPF_REG_CTX
,
396 offsetof(struct sk_buff
, vlan_proto
));
397 /* A = ntohs(A) [emitting a nop or swap16] */
398 *insn
= BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, 16);
401 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
402 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
403 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
404 case SKF_AD_OFF
+ SKF_AD_CPU
:
405 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
407 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG1
, BPF_REG_CTX
);
409 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_A
);
411 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG3
, BPF_REG_X
);
412 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
414 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
415 *insn
= BPF_EMIT_CALL(bpf_skb_get_pay_offset
);
417 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
418 *insn
= BPF_EMIT_CALL(bpf_skb_get_nlattr
);
420 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
421 *insn
= BPF_EMIT_CALL(bpf_skb_get_nlattr_nest
);
423 case SKF_AD_OFF
+ SKF_AD_CPU
:
424 *insn
= BPF_EMIT_CALL(bpf_get_raw_cpu_id
);
426 case SKF_AD_OFF
+ SKF_AD_RANDOM
:
427 *insn
= BPF_EMIT_CALL(bpf_user_rnd_u32
);
428 bpf_user_rnd_init_once();
433 case SKF_AD_OFF
+ SKF_AD_ALU_XOR_X
:
435 *insn
= BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_X
);
439 /* This is just a dummy call to avoid letting the compiler
440 * evict __bpf_call_base() as an optimization. Placed here
441 * where no-one bothers.
443 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
451 static bool convert_bpf_ld_abs(struct sock_filter
*fp
, struct bpf_insn
**insnp
)
453 const bool unaligned_ok
= IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
);
454 int size
= bpf_size_to_bytes(BPF_SIZE(fp
->code
));
455 bool endian
= BPF_SIZE(fp
->code
) == BPF_H
||
456 BPF_SIZE(fp
->code
) == BPF_W
;
457 bool indirect
= BPF_MODE(fp
->code
) == BPF_IND
;
458 const int ip_align
= NET_IP_ALIGN
;
459 struct bpf_insn
*insn
= *insnp
;
463 ((unaligned_ok
&& offset
>= 0) ||
464 (!unaligned_ok
&& offset
>= 0 &&
465 offset
+ ip_align
>= 0 &&
466 offset
+ ip_align
% size
== 0))) {
467 bool ldx_off_ok
= offset
<= S16_MAX
;
469 *insn
++ = BPF_MOV64_REG(BPF_REG_TMP
, BPF_REG_H
);
470 *insn
++ = BPF_ALU64_IMM(BPF_SUB
, BPF_REG_TMP
, offset
);
471 *insn
++ = BPF_JMP_IMM(BPF_JSLT
, BPF_REG_TMP
,
472 size
, 2 + endian
+ (!ldx_off_ok
* 2));
474 *insn
++ = BPF_LDX_MEM(BPF_SIZE(fp
->code
), BPF_REG_A
,
477 *insn
++ = BPF_MOV64_REG(BPF_REG_TMP
, BPF_REG_D
);
478 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, BPF_REG_TMP
, offset
);
479 *insn
++ = BPF_LDX_MEM(BPF_SIZE(fp
->code
), BPF_REG_A
,
483 *insn
++ = BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_A
, size
* 8);
484 *insn
++ = BPF_JMP_A(8);
487 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG1
, BPF_REG_CTX
);
488 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG2
, BPF_REG_D
);
489 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG3
, BPF_REG_H
);
491 *insn
++ = BPF_MOV64_IMM(BPF_REG_ARG4
, offset
);
493 *insn
++ = BPF_MOV64_REG(BPF_REG_ARG4
, BPF_REG_X
);
495 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, BPF_REG_ARG4
, offset
);
498 switch (BPF_SIZE(fp
->code
)) {
500 *insn
++ = BPF_EMIT_CALL(bpf_skb_load_helper_8
);
503 *insn
++ = BPF_EMIT_CALL(bpf_skb_load_helper_16
);
506 *insn
++ = BPF_EMIT_CALL(bpf_skb_load_helper_32
);
512 *insn
++ = BPF_JMP_IMM(BPF_JSGE
, BPF_REG_A
, 0, 2);
513 *insn
++ = BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_A
);
514 *insn
= BPF_EXIT_INSN();
521 * bpf_convert_filter - convert filter program
522 * @prog: the user passed filter program
523 * @len: the length of the user passed filter program
524 * @new_prog: allocated 'struct bpf_prog' or NULL
525 * @new_len: pointer to store length of converted program
526 * @seen_ld_abs: bool whether we've seen ld_abs/ind
528 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
529 * style extended BPF (eBPF).
530 * Conversion workflow:
532 * 1) First pass for calculating the new program length:
533 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
535 * 2) 2nd pass to remap in two passes: 1st pass finds new
536 * jump offsets, 2nd pass remapping:
537 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
539 static int bpf_convert_filter(struct sock_filter
*prog
, int len
,
540 struct bpf_prog
*new_prog
, int *new_len
,
543 int new_flen
= 0, pass
= 0, target
, i
, stack_off
;
544 struct bpf_insn
*new_insn
, *first_insn
= NULL
;
545 struct sock_filter
*fp
;
549 BUILD_BUG_ON(BPF_MEMWORDS
* sizeof(u32
) > MAX_BPF_STACK
);
550 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
552 if (len
<= 0 || len
> BPF_MAXINSNS
)
556 first_insn
= new_prog
->insnsi
;
557 addrs
= kcalloc(len
, sizeof(*addrs
),
558 GFP_KERNEL
| __GFP_NOWARN
);
564 new_insn
= first_insn
;
567 /* Classic BPF related prologue emission. */
569 /* Classic BPF expects A and X to be reset first. These need
570 * to be guaranteed to be the first two instructions.
572 *new_insn
++ = BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_A
);
573 *new_insn
++ = BPF_ALU32_REG(BPF_XOR
, BPF_REG_X
, BPF_REG_X
);
575 /* All programs must keep CTX in callee saved BPF_REG_CTX.
576 * In eBPF case it's done by the compiler, here we need to
577 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
579 *new_insn
++ = BPF_MOV64_REG(BPF_REG_CTX
, BPF_REG_ARG1
);
581 /* For packet access in classic BPF, cache skb->data
582 * in callee-saved BPF R8 and skb->len - skb->data_len
583 * (headlen) in BPF R9. Since classic BPF is read-only
584 * on CTX, we only need to cache it once.
586 *new_insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, data
),
587 BPF_REG_D
, BPF_REG_CTX
,
588 offsetof(struct sk_buff
, data
));
589 *new_insn
++ = BPF_LDX_MEM(BPF_W
, BPF_REG_H
, BPF_REG_CTX
,
590 offsetof(struct sk_buff
, len
));
591 *new_insn
++ = BPF_LDX_MEM(BPF_W
, BPF_REG_TMP
, BPF_REG_CTX
,
592 offsetof(struct sk_buff
, data_len
));
593 *new_insn
++ = BPF_ALU32_REG(BPF_SUB
, BPF_REG_H
, BPF_REG_TMP
);
599 for (i
= 0; i
< len
; fp
++, i
++) {
600 struct bpf_insn tmp_insns
[32] = { };
601 struct bpf_insn
*insn
= tmp_insns
;
604 addrs
[i
] = new_insn
- first_insn
;
607 /* All arithmetic insns and skb loads map as-is. */
608 case BPF_ALU
| BPF_ADD
| BPF_X
:
609 case BPF_ALU
| BPF_ADD
| BPF_K
:
610 case BPF_ALU
| BPF_SUB
| BPF_X
:
611 case BPF_ALU
| BPF_SUB
| BPF_K
:
612 case BPF_ALU
| BPF_AND
| BPF_X
:
613 case BPF_ALU
| BPF_AND
| BPF_K
:
614 case BPF_ALU
| BPF_OR
| BPF_X
:
615 case BPF_ALU
| BPF_OR
| BPF_K
:
616 case BPF_ALU
| BPF_LSH
| BPF_X
:
617 case BPF_ALU
| BPF_LSH
| BPF_K
:
618 case BPF_ALU
| BPF_RSH
| BPF_X
:
619 case BPF_ALU
| BPF_RSH
| BPF_K
:
620 case BPF_ALU
| BPF_XOR
| BPF_X
:
621 case BPF_ALU
| BPF_XOR
| BPF_K
:
622 case BPF_ALU
| BPF_MUL
| BPF_X
:
623 case BPF_ALU
| BPF_MUL
| BPF_K
:
624 case BPF_ALU
| BPF_DIV
| BPF_X
:
625 case BPF_ALU
| BPF_DIV
| BPF_K
:
626 case BPF_ALU
| BPF_MOD
| BPF_X
:
627 case BPF_ALU
| BPF_MOD
| BPF_K
:
628 case BPF_ALU
| BPF_NEG
:
629 case BPF_LD
| BPF_ABS
| BPF_W
:
630 case BPF_LD
| BPF_ABS
| BPF_H
:
631 case BPF_LD
| BPF_ABS
| BPF_B
:
632 case BPF_LD
| BPF_IND
| BPF_W
:
633 case BPF_LD
| BPF_IND
| BPF_H
:
634 case BPF_LD
| BPF_IND
| BPF_B
:
635 /* Check for overloaded BPF extension and
636 * directly convert it if found, otherwise
637 * just move on with mapping.
639 if (BPF_CLASS(fp
->code
) == BPF_LD
&&
640 BPF_MODE(fp
->code
) == BPF_ABS
&&
641 convert_bpf_extensions(fp
, &insn
))
643 if (BPF_CLASS(fp
->code
) == BPF_LD
&&
644 convert_bpf_ld_abs(fp
, &insn
)) {
649 if (fp
->code
== (BPF_ALU
| BPF_DIV
| BPF_X
) ||
650 fp
->code
== (BPF_ALU
| BPF_MOD
| BPF_X
)) {
651 *insn
++ = BPF_MOV32_REG(BPF_REG_X
, BPF_REG_X
);
652 /* Error with exception code on div/mod by 0.
653 * For cBPF programs, this was always return 0.
655 *insn
++ = BPF_JMP_IMM(BPF_JNE
, BPF_REG_X
, 0, 2);
656 *insn
++ = BPF_ALU32_REG(BPF_XOR
, BPF_REG_A
, BPF_REG_A
);
657 *insn
++ = BPF_EXIT_INSN();
660 *insn
= BPF_RAW_INSN(fp
->code
, BPF_REG_A
, BPF_REG_X
, 0, fp
->k
);
663 /* Jump transformation cannot use BPF block macros
664 * everywhere as offset calculation and target updates
665 * require a bit more work than the rest, i.e. jump
666 * opcodes map as-is, but offsets need adjustment.
669 #define BPF_EMIT_JMP \
671 const s32 off_min = S16_MIN, off_max = S16_MAX; \
674 if (target >= len || target < 0) \
676 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
677 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
678 off -= insn - tmp_insns; \
679 /* Reject anything not fitting into insn->off. */ \
680 if (off < off_min || off > off_max) \
685 case BPF_JMP
| BPF_JA
:
686 target
= i
+ fp
->k
+ 1;
687 insn
->code
= fp
->code
;
691 case BPF_JMP
| BPF_JEQ
| BPF_K
:
692 case BPF_JMP
| BPF_JEQ
| BPF_X
:
693 case BPF_JMP
| BPF_JSET
| BPF_K
:
694 case BPF_JMP
| BPF_JSET
| BPF_X
:
695 case BPF_JMP
| BPF_JGT
| BPF_K
:
696 case BPF_JMP
| BPF_JGT
| BPF_X
:
697 case BPF_JMP
| BPF_JGE
| BPF_K
:
698 case BPF_JMP
| BPF_JGE
| BPF_X
:
699 if (BPF_SRC(fp
->code
) == BPF_K
&& (int) fp
->k
< 0) {
700 /* BPF immediates are signed, zero extend
701 * immediate into tmp register and use it
704 *insn
++ = BPF_MOV32_IMM(BPF_REG_TMP
, fp
->k
);
706 insn
->dst_reg
= BPF_REG_A
;
707 insn
->src_reg
= BPF_REG_TMP
;
710 insn
->dst_reg
= BPF_REG_A
;
712 bpf_src
= BPF_SRC(fp
->code
);
713 insn
->src_reg
= bpf_src
== BPF_X
? BPF_REG_X
: 0;
716 /* Common case where 'jump_false' is next insn. */
718 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
719 target
= i
+ fp
->jt
+ 1;
724 /* Convert some jumps when 'jump_true' is next insn. */
726 switch (BPF_OP(fp
->code
)) {
728 insn
->code
= BPF_JMP
| BPF_JNE
| bpf_src
;
731 insn
->code
= BPF_JMP
| BPF_JLE
| bpf_src
;
734 insn
->code
= BPF_JMP
| BPF_JLT
| bpf_src
;
740 target
= i
+ fp
->jf
+ 1;
745 /* Other jumps are mapped into two insns: Jxx and JA. */
746 target
= i
+ fp
->jt
+ 1;
747 insn
->code
= BPF_JMP
| BPF_OP(fp
->code
) | bpf_src
;
751 insn
->code
= BPF_JMP
| BPF_JA
;
752 target
= i
+ fp
->jf
+ 1;
756 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
757 case BPF_LDX
| BPF_MSH
| BPF_B
: {
758 struct sock_filter tmp
= {
759 .code
= BPF_LD
| BPF_ABS
| BPF_B
,
766 *insn
++ = BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
767 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
768 convert_bpf_ld_abs(&tmp
, &insn
);
771 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_A
, 0xf);
773 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, BPF_REG_A
, 2);
775 *insn
++ = BPF_MOV64_REG(BPF_REG_TMP
, BPF_REG_X
);
777 *insn
++ = BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
779 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_TMP
);
782 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
783 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
785 case BPF_RET
| BPF_A
:
786 case BPF_RET
| BPF_K
:
787 if (BPF_RVAL(fp
->code
) == BPF_K
)
788 *insn
++ = BPF_MOV32_RAW(BPF_K
, BPF_REG_0
,
790 *insn
= BPF_EXIT_INSN();
793 /* Store to stack. */
796 stack_off
= fp
->k
* 4 + 4;
797 *insn
= BPF_STX_MEM(BPF_W
, BPF_REG_FP
, BPF_CLASS(fp
->code
) ==
798 BPF_ST
? BPF_REG_A
: BPF_REG_X
,
800 /* check_load_and_stores() verifies that classic BPF can
801 * load from stack only after write, so tracking
802 * stack_depth for ST|STX insns is enough
804 if (new_prog
&& new_prog
->aux
->stack_depth
< stack_off
)
805 new_prog
->aux
->stack_depth
= stack_off
;
808 /* Load from stack. */
809 case BPF_LD
| BPF_MEM
:
810 case BPF_LDX
| BPF_MEM
:
811 stack_off
= fp
->k
* 4 + 4;
812 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
813 BPF_REG_A
: BPF_REG_X
, BPF_REG_FP
,
818 case BPF_LD
| BPF_IMM
:
819 case BPF_LDX
| BPF_IMM
:
820 *insn
= BPF_MOV32_IMM(BPF_CLASS(fp
->code
) == BPF_LD
?
821 BPF_REG_A
: BPF_REG_X
, fp
->k
);
825 case BPF_MISC
| BPF_TAX
:
826 *insn
= BPF_MOV64_REG(BPF_REG_X
, BPF_REG_A
);
830 case BPF_MISC
| BPF_TXA
:
831 *insn
= BPF_MOV64_REG(BPF_REG_A
, BPF_REG_X
);
834 /* A = skb->len or X = skb->len */
835 case BPF_LD
| BPF_W
| BPF_LEN
:
836 case BPF_LDX
| BPF_W
| BPF_LEN
:
837 *insn
= BPF_LDX_MEM(BPF_W
, BPF_CLASS(fp
->code
) == BPF_LD
?
838 BPF_REG_A
: BPF_REG_X
, BPF_REG_CTX
,
839 offsetof(struct sk_buff
, len
));
842 /* Access seccomp_data fields. */
843 case BPF_LDX
| BPF_ABS
| BPF_W
:
844 /* A = *(u32 *) (ctx + K) */
845 *insn
= BPF_LDX_MEM(BPF_W
, BPF_REG_A
, BPF_REG_CTX
, fp
->k
);
848 /* Unknown instruction. */
855 memcpy(new_insn
, tmp_insns
,
856 sizeof(*insn
) * (insn
- tmp_insns
));
857 new_insn
+= insn
- tmp_insns
;
861 /* Only calculating new length. */
862 *new_len
= new_insn
- first_insn
;
864 *new_len
+= 4; /* Prologue bits. */
869 if (new_flen
!= new_insn
- first_insn
) {
870 new_flen
= new_insn
- first_insn
;
877 BUG_ON(*new_len
!= new_flen
);
886 * As we dont want to clear mem[] array for each packet going through
887 * __bpf_prog_run(), we check that filter loaded by user never try to read
888 * a cell if not previously written, and we check all branches to be sure
889 * a malicious user doesn't try to abuse us.
891 static int check_load_and_stores(const struct sock_filter
*filter
, int flen
)
893 u16
*masks
, memvalid
= 0; /* One bit per cell, 16 cells */
896 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
898 masks
= kmalloc_array(flen
, sizeof(*masks
), GFP_KERNEL
);
902 memset(masks
, 0xff, flen
* sizeof(*masks
));
904 for (pc
= 0; pc
< flen
; pc
++) {
905 memvalid
&= masks
[pc
];
907 switch (filter
[pc
].code
) {
910 memvalid
|= (1 << filter
[pc
].k
);
912 case BPF_LD
| BPF_MEM
:
913 case BPF_LDX
| BPF_MEM
:
914 if (!(memvalid
& (1 << filter
[pc
].k
))) {
919 case BPF_JMP
| BPF_JA
:
920 /* A jump must set masks on target */
921 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
924 case BPF_JMP
| BPF_JEQ
| BPF_K
:
925 case BPF_JMP
| BPF_JEQ
| BPF_X
:
926 case BPF_JMP
| BPF_JGE
| BPF_K
:
927 case BPF_JMP
| BPF_JGE
| BPF_X
:
928 case BPF_JMP
| BPF_JGT
| BPF_K
:
929 case BPF_JMP
| BPF_JGT
| BPF_X
:
930 case BPF_JMP
| BPF_JSET
| BPF_K
:
931 case BPF_JMP
| BPF_JSET
| BPF_X
:
932 /* A jump must set masks on targets */
933 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
934 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
944 static bool chk_code_allowed(u16 code_to_probe
)
946 static const bool codes
[] = {
947 /* 32 bit ALU operations */
948 [BPF_ALU
| BPF_ADD
| BPF_K
] = true,
949 [BPF_ALU
| BPF_ADD
| BPF_X
] = true,
950 [BPF_ALU
| BPF_SUB
| BPF_K
] = true,
951 [BPF_ALU
| BPF_SUB
| BPF_X
] = true,
952 [BPF_ALU
| BPF_MUL
| BPF_K
] = true,
953 [BPF_ALU
| BPF_MUL
| BPF_X
] = true,
954 [BPF_ALU
| BPF_DIV
| BPF_K
] = true,
955 [BPF_ALU
| BPF_DIV
| BPF_X
] = true,
956 [BPF_ALU
| BPF_MOD
| BPF_K
] = true,
957 [BPF_ALU
| BPF_MOD
| BPF_X
] = true,
958 [BPF_ALU
| BPF_AND
| BPF_K
] = true,
959 [BPF_ALU
| BPF_AND
| BPF_X
] = true,
960 [BPF_ALU
| BPF_OR
| BPF_K
] = true,
961 [BPF_ALU
| BPF_OR
| BPF_X
] = true,
962 [BPF_ALU
| BPF_XOR
| BPF_K
] = true,
963 [BPF_ALU
| BPF_XOR
| BPF_X
] = true,
964 [BPF_ALU
| BPF_LSH
| BPF_K
] = true,
965 [BPF_ALU
| BPF_LSH
| BPF_X
] = true,
966 [BPF_ALU
| BPF_RSH
| BPF_K
] = true,
967 [BPF_ALU
| BPF_RSH
| BPF_X
] = true,
968 [BPF_ALU
| BPF_NEG
] = true,
969 /* Load instructions */
970 [BPF_LD
| BPF_W
| BPF_ABS
] = true,
971 [BPF_LD
| BPF_H
| BPF_ABS
] = true,
972 [BPF_LD
| BPF_B
| BPF_ABS
] = true,
973 [BPF_LD
| BPF_W
| BPF_LEN
] = true,
974 [BPF_LD
| BPF_W
| BPF_IND
] = true,
975 [BPF_LD
| BPF_H
| BPF_IND
] = true,
976 [BPF_LD
| BPF_B
| BPF_IND
] = true,
977 [BPF_LD
| BPF_IMM
] = true,
978 [BPF_LD
| BPF_MEM
] = true,
979 [BPF_LDX
| BPF_W
| BPF_LEN
] = true,
980 [BPF_LDX
| BPF_B
| BPF_MSH
] = true,
981 [BPF_LDX
| BPF_IMM
] = true,
982 [BPF_LDX
| BPF_MEM
] = true,
983 /* Store instructions */
986 /* Misc instructions */
987 [BPF_MISC
| BPF_TAX
] = true,
988 [BPF_MISC
| BPF_TXA
] = true,
989 /* Return instructions */
990 [BPF_RET
| BPF_K
] = true,
991 [BPF_RET
| BPF_A
] = true,
992 /* Jump instructions */
993 [BPF_JMP
| BPF_JA
] = true,
994 [BPF_JMP
| BPF_JEQ
| BPF_K
] = true,
995 [BPF_JMP
| BPF_JEQ
| BPF_X
] = true,
996 [BPF_JMP
| BPF_JGE
| BPF_K
] = true,
997 [BPF_JMP
| BPF_JGE
| BPF_X
] = true,
998 [BPF_JMP
| BPF_JGT
| BPF_K
] = true,
999 [BPF_JMP
| BPF_JGT
| BPF_X
] = true,
1000 [BPF_JMP
| BPF_JSET
| BPF_K
] = true,
1001 [BPF_JMP
| BPF_JSET
| BPF_X
] = true,
1004 if (code_to_probe
>= ARRAY_SIZE(codes
))
1007 return codes
[code_to_probe
];
1010 static bool bpf_check_basics_ok(const struct sock_filter
*filter
,
1015 if (flen
== 0 || flen
> BPF_MAXINSNS
)
1022 * bpf_check_classic - verify socket filter code
1023 * @filter: filter to verify
1024 * @flen: length of filter
1026 * Check the user's filter code. If we let some ugly
1027 * filter code slip through kaboom! The filter must contain
1028 * no references or jumps that are out of range, no illegal
1029 * instructions, and must end with a RET instruction.
1031 * All jumps are forward as they are not signed.
1033 * Returns 0 if the rule set is legal or -EINVAL if not.
1035 static int bpf_check_classic(const struct sock_filter
*filter
,
1041 /* Check the filter code now */
1042 for (pc
= 0; pc
< flen
; pc
++) {
1043 const struct sock_filter
*ftest
= &filter
[pc
];
1045 /* May we actually operate on this code? */
1046 if (!chk_code_allowed(ftest
->code
))
1049 /* Some instructions need special checks */
1050 switch (ftest
->code
) {
1051 case BPF_ALU
| BPF_DIV
| BPF_K
:
1052 case BPF_ALU
| BPF_MOD
| BPF_K
:
1053 /* Check for division by zero */
1057 case BPF_ALU
| BPF_LSH
| BPF_K
:
1058 case BPF_ALU
| BPF_RSH
| BPF_K
:
1062 case BPF_LD
| BPF_MEM
:
1063 case BPF_LDX
| BPF_MEM
:
1066 /* Check for invalid memory addresses */
1067 if (ftest
->k
>= BPF_MEMWORDS
)
1070 case BPF_JMP
| BPF_JA
:
1071 /* Note, the large ftest->k might cause loops.
1072 * Compare this with conditional jumps below,
1073 * where offsets are limited. --ANK (981016)
1075 if (ftest
->k
>= (unsigned int)(flen
- pc
- 1))
1078 case BPF_JMP
| BPF_JEQ
| BPF_K
:
1079 case BPF_JMP
| BPF_JEQ
| BPF_X
:
1080 case BPF_JMP
| BPF_JGE
| BPF_K
:
1081 case BPF_JMP
| BPF_JGE
| BPF_X
:
1082 case BPF_JMP
| BPF_JGT
| BPF_K
:
1083 case BPF_JMP
| BPF_JGT
| BPF_X
:
1084 case BPF_JMP
| BPF_JSET
| BPF_K
:
1085 case BPF_JMP
| BPF_JSET
| BPF_X
:
1086 /* Both conditionals must be safe */
1087 if (pc
+ ftest
->jt
+ 1 >= flen
||
1088 pc
+ ftest
->jf
+ 1 >= flen
)
1091 case BPF_LD
| BPF_W
| BPF_ABS
:
1092 case BPF_LD
| BPF_H
| BPF_ABS
:
1093 case BPF_LD
| BPF_B
| BPF_ABS
:
1095 if (bpf_anc_helper(ftest
) & BPF_ANC
)
1097 /* Ancillary operation unknown or unsupported */
1098 if (anc_found
== false && ftest
->k
>= SKF_AD_OFF
)
1103 /* Last instruction must be a RET code */
1104 switch (filter
[flen
- 1].code
) {
1105 case BPF_RET
| BPF_K
:
1106 case BPF_RET
| BPF_A
:
1107 return check_load_and_stores(filter
, flen
);
1113 static int bpf_prog_store_orig_filter(struct bpf_prog
*fp
,
1114 const struct sock_fprog
*fprog
)
1116 unsigned int fsize
= bpf_classic_proglen(fprog
);
1117 struct sock_fprog_kern
*fkprog
;
1119 fp
->orig_prog
= kmalloc(sizeof(*fkprog
), GFP_KERNEL
);
1123 fkprog
= fp
->orig_prog
;
1124 fkprog
->len
= fprog
->len
;
1126 fkprog
->filter
= kmemdup(fp
->insns
, fsize
,
1127 GFP_KERNEL
| __GFP_NOWARN
);
1128 if (!fkprog
->filter
) {
1129 kfree(fp
->orig_prog
);
1136 static void bpf_release_orig_filter(struct bpf_prog
*fp
)
1138 struct sock_fprog_kern
*fprog
= fp
->orig_prog
;
1141 kfree(fprog
->filter
);
1146 static void __bpf_prog_release(struct bpf_prog
*prog
)
1148 if (prog
->type
== BPF_PROG_TYPE_SOCKET_FILTER
) {
1151 bpf_release_orig_filter(prog
);
1152 bpf_prog_free(prog
);
1156 static void __sk_filter_release(struct sk_filter
*fp
)
1158 __bpf_prog_release(fp
->prog
);
1163 * sk_filter_release_rcu - Release a socket filter by rcu_head
1164 * @rcu: rcu_head that contains the sk_filter to free
1166 static void sk_filter_release_rcu(struct rcu_head
*rcu
)
1168 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
1170 __sk_filter_release(fp
);
1174 * sk_filter_release - release a socket filter
1175 * @fp: filter to remove
1177 * Remove a filter from a socket and release its resources.
1179 static void sk_filter_release(struct sk_filter
*fp
)
1181 if (refcount_dec_and_test(&fp
->refcnt
))
1182 call_rcu(&fp
->rcu
, sk_filter_release_rcu
);
1185 void sk_filter_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
1187 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
1189 atomic_sub(filter_size
, &sk
->sk_omem_alloc
);
1190 sk_filter_release(fp
);
1193 /* try to charge the socket memory if there is space available
1194 * return true on success
1196 static bool __sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
1198 u32 filter_size
= bpf_prog_size(fp
->prog
->len
);
1200 /* same check as in sock_kmalloc() */
1201 if (filter_size
<= sysctl_optmem_max
&&
1202 atomic_read(&sk
->sk_omem_alloc
) + filter_size
< sysctl_optmem_max
) {
1203 atomic_add(filter_size
, &sk
->sk_omem_alloc
);
1209 bool sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
1211 if (!refcount_inc_not_zero(&fp
->refcnt
))
1214 if (!__sk_filter_charge(sk
, fp
)) {
1215 sk_filter_release(fp
);
1221 static struct bpf_prog
*bpf_migrate_filter(struct bpf_prog
*fp
)
1223 struct sock_filter
*old_prog
;
1224 struct bpf_prog
*old_fp
;
1225 int err
, new_len
, old_len
= fp
->len
;
1226 bool seen_ld_abs
= false;
1228 /* We are free to overwrite insns et al right here as it
1229 * won't be used at this point in time anymore internally
1230 * after the migration to the internal BPF instruction
1233 BUILD_BUG_ON(sizeof(struct sock_filter
) !=
1234 sizeof(struct bpf_insn
));
1236 /* Conversion cannot happen on overlapping memory areas,
1237 * so we need to keep the user BPF around until the 2nd
1238 * pass. At this time, the user BPF is stored in fp->insns.
1240 old_prog
= kmemdup(fp
->insns
, old_len
* sizeof(struct sock_filter
),
1241 GFP_KERNEL
| __GFP_NOWARN
);
1247 /* 1st pass: calculate the new program length. */
1248 err
= bpf_convert_filter(old_prog
, old_len
, NULL
, &new_len
,
1253 /* Expand fp for appending the new filter representation. */
1255 fp
= bpf_prog_realloc(old_fp
, bpf_prog_size(new_len
), 0);
1257 /* The old_fp is still around in case we couldn't
1258 * allocate new memory, so uncharge on that one.
1267 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1268 err
= bpf_convert_filter(old_prog
, old_len
, fp
, &new_len
,
1271 /* 2nd bpf_convert_filter() can fail only if it fails
1272 * to allocate memory, remapping must succeed. Note,
1273 * that at this time old_fp has already been released
1278 fp
= bpf_prog_select_runtime(fp
, &err
);
1288 __bpf_prog_release(fp
);
1289 return ERR_PTR(err
);
1292 static struct bpf_prog
*bpf_prepare_filter(struct bpf_prog
*fp
,
1293 bpf_aux_classic_check_t trans
)
1297 fp
->bpf_func
= NULL
;
1300 err
= bpf_check_classic(fp
->insns
, fp
->len
);
1302 __bpf_prog_release(fp
);
1303 return ERR_PTR(err
);
1306 /* There might be additional checks and transformations
1307 * needed on classic filters, f.e. in case of seccomp.
1310 err
= trans(fp
->insns
, fp
->len
);
1312 __bpf_prog_release(fp
);
1313 return ERR_PTR(err
);
1317 /* Probe if we can JIT compile the filter and if so, do
1318 * the compilation of the filter.
1320 bpf_jit_compile(fp
);
1322 /* JIT compiler couldn't process this filter, so do the
1323 * internal BPF translation for the optimized interpreter.
1326 fp
= bpf_migrate_filter(fp
);
1332 * bpf_prog_create - create an unattached filter
1333 * @pfp: the unattached filter that is created
1334 * @fprog: the filter program
1336 * Create a filter independent of any socket. We first run some
1337 * sanity checks on it to make sure it does not explode on us later.
1338 * If an error occurs or there is insufficient memory for the filter
1339 * a negative errno code is returned. On success the return is zero.
1341 int bpf_prog_create(struct bpf_prog
**pfp
, struct sock_fprog_kern
*fprog
)
1343 unsigned int fsize
= bpf_classic_proglen(fprog
);
1344 struct bpf_prog
*fp
;
1346 /* Make sure new filter is there and in the right amounts. */
1347 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1350 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1354 memcpy(fp
->insns
, fprog
->filter
, fsize
);
1356 fp
->len
= fprog
->len
;
1357 /* Since unattached filters are not copied back to user
1358 * space through sk_get_filter(), we do not need to hold
1359 * a copy here, and can spare us the work.
1361 fp
->orig_prog
= NULL
;
1363 /* bpf_prepare_filter() already takes care of freeing
1364 * memory in case something goes wrong.
1366 fp
= bpf_prepare_filter(fp
, NULL
);
1373 EXPORT_SYMBOL_GPL(bpf_prog_create
);
1376 * bpf_prog_create_from_user - create an unattached filter from user buffer
1377 * @pfp: the unattached filter that is created
1378 * @fprog: the filter program
1379 * @trans: post-classic verifier transformation handler
1380 * @save_orig: save classic BPF program
1382 * This function effectively does the same as bpf_prog_create(), only
1383 * that it builds up its insns buffer from user space provided buffer.
1384 * It also allows for passing a bpf_aux_classic_check_t handler.
1386 int bpf_prog_create_from_user(struct bpf_prog
**pfp
, struct sock_fprog
*fprog
,
1387 bpf_aux_classic_check_t trans
, bool save_orig
)
1389 unsigned int fsize
= bpf_classic_proglen(fprog
);
1390 struct bpf_prog
*fp
;
1393 /* Make sure new filter is there and in the right amounts. */
1394 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1397 fp
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1401 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
1402 __bpf_prog_free(fp
);
1406 fp
->len
= fprog
->len
;
1407 fp
->orig_prog
= NULL
;
1410 err
= bpf_prog_store_orig_filter(fp
, fprog
);
1412 __bpf_prog_free(fp
);
1417 /* bpf_prepare_filter() already takes care of freeing
1418 * memory in case something goes wrong.
1420 fp
= bpf_prepare_filter(fp
, trans
);
1427 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user
);
1429 void bpf_prog_destroy(struct bpf_prog
*fp
)
1431 __bpf_prog_release(fp
);
1433 EXPORT_SYMBOL_GPL(bpf_prog_destroy
);
1435 static int __sk_attach_prog(struct bpf_prog
*prog
, struct sock
*sk
)
1437 struct sk_filter
*fp
, *old_fp
;
1439 fp
= kmalloc(sizeof(*fp
), GFP_KERNEL
);
1445 if (!__sk_filter_charge(sk
, fp
)) {
1449 refcount_set(&fp
->refcnt
, 1);
1451 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
1452 lockdep_sock_is_held(sk
));
1453 rcu_assign_pointer(sk
->sk_filter
, fp
);
1456 sk_filter_uncharge(sk
, old_fp
);
1462 struct bpf_prog
*__get_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1464 unsigned int fsize
= bpf_classic_proglen(fprog
);
1465 struct bpf_prog
*prog
;
1468 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1469 return ERR_PTR(-EPERM
);
1471 /* Make sure new filter is there and in the right amounts. */
1472 if (!bpf_check_basics_ok(fprog
->filter
, fprog
->len
))
1473 return ERR_PTR(-EINVAL
);
1475 prog
= bpf_prog_alloc(bpf_prog_size(fprog
->len
), 0);
1477 return ERR_PTR(-ENOMEM
);
1479 if (copy_from_user(prog
->insns
, fprog
->filter
, fsize
)) {
1480 __bpf_prog_free(prog
);
1481 return ERR_PTR(-EFAULT
);
1484 prog
->len
= fprog
->len
;
1486 err
= bpf_prog_store_orig_filter(prog
, fprog
);
1488 __bpf_prog_free(prog
);
1489 return ERR_PTR(-ENOMEM
);
1492 /* bpf_prepare_filter() already takes care of freeing
1493 * memory in case something goes wrong.
1495 return bpf_prepare_filter(prog
, NULL
);
1499 * sk_attach_filter - attach a socket filter
1500 * @fprog: the filter program
1501 * @sk: the socket to use
1503 * Attach the user's filter code. We first run some sanity checks on
1504 * it to make sure it does not explode on us later. If an error
1505 * occurs or there is insufficient memory for the filter a negative
1506 * errno code is returned. On success the return is zero.
1508 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1510 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1514 return PTR_ERR(prog
);
1516 err
= __sk_attach_prog(prog
, sk
);
1518 __bpf_prog_release(prog
);
1524 EXPORT_SYMBOL_GPL(sk_attach_filter
);
1526 int sk_reuseport_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
1528 struct bpf_prog
*prog
= __get_filter(fprog
, sk
);
1532 return PTR_ERR(prog
);
1534 if (bpf_prog_size(prog
->len
) > sysctl_optmem_max
)
1537 err
= reuseport_attach_prog(sk
, prog
);
1540 __bpf_prog_release(prog
);
1545 static struct bpf_prog
*__get_bpf(u32 ufd
, struct sock
*sk
)
1547 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1548 return ERR_PTR(-EPERM
);
1550 return bpf_prog_get_type(ufd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1553 int sk_attach_bpf(u32 ufd
, struct sock
*sk
)
1555 struct bpf_prog
*prog
= __get_bpf(ufd
, sk
);
1559 return PTR_ERR(prog
);
1561 err
= __sk_attach_prog(prog
, sk
);
1570 int sk_reuseport_attach_bpf(u32 ufd
, struct sock
*sk
)
1572 struct bpf_prog
*prog
;
1575 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
1578 prog
= bpf_prog_get_type(ufd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1579 if (IS_ERR(prog
) && PTR_ERR(prog
) == -EINVAL
)
1580 prog
= bpf_prog_get_type(ufd
, BPF_PROG_TYPE_SK_REUSEPORT
);
1582 return PTR_ERR(prog
);
1584 if (prog
->type
== BPF_PROG_TYPE_SK_REUSEPORT
) {
1585 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
1586 * bpf prog (e.g. sockmap). It depends on the
1587 * limitation imposed by bpf_prog_load().
1588 * Hence, sysctl_optmem_max is not checked.
1590 if ((sk
->sk_type
!= SOCK_STREAM
&&
1591 sk
->sk_type
!= SOCK_DGRAM
) ||
1592 (sk
->sk_protocol
!= IPPROTO_UDP
&&
1593 sk
->sk_protocol
!= IPPROTO_TCP
) ||
1594 (sk
->sk_family
!= AF_INET
&&
1595 sk
->sk_family
!= AF_INET6
)) {
1600 /* BPF_PROG_TYPE_SOCKET_FILTER */
1601 if (bpf_prog_size(prog
->len
) > sysctl_optmem_max
) {
1607 err
= reuseport_attach_prog(sk
, prog
);
1615 void sk_reuseport_prog_free(struct bpf_prog
*prog
)
1620 if (prog
->type
== BPF_PROG_TYPE_SK_REUSEPORT
)
1623 bpf_prog_destroy(prog
);
1626 struct bpf_scratchpad
{
1628 __be32 diff
[MAX_BPF_STACK
/ sizeof(__be32
)];
1629 u8 buff
[MAX_BPF_STACK
];
1633 static DEFINE_PER_CPU(struct bpf_scratchpad
, bpf_sp
);
1635 static inline int __bpf_try_make_writable(struct sk_buff
*skb
,
1636 unsigned int write_len
)
1638 return skb_ensure_writable(skb
, write_len
);
1641 static inline int bpf_try_make_writable(struct sk_buff
*skb
,
1642 unsigned int write_len
)
1644 int err
= __bpf_try_make_writable(skb
, write_len
);
1646 bpf_compute_data_pointers(skb
);
1650 static int bpf_try_make_head_writable(struct sk_buff
*skb
)
1652 return bpf_try_make_writable(skb
, skb_headlen(skb
));
1655 static inline void bpf_push_mac_rcsum(struct sk_buff
*skb
)
1657 if (skb_at_tc_ingress(skb
))
1658 skb_postpush_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1661 static inline void bpf_pull_mac_rcsum(struct sk_buff
*skb
)
1663 if (skb_at_tc_ingress(skb
))
1664 skb_postpull_rcsum(skb
, skb_mac_header(skb
), skb
->mac_len
);
1667 BPF_CALL_5(bpf_skb_store_bytes
, struct sk_buff
*, skb
, u32
, offset
,
1668 const void *, from
, u32
, len
, u64
, flags
)
1672 if (unlikely(flags
& ~(BPF_F_RECOMPUTE_CSUM
| BPF_F_INVALIDATE_HASH
)))
1674 if (unlikely(offset
> 0xffff))
1676 if (unlikely(bpf_try_make_writable(skb
, offset
+ len
)))
1679 ptr
= skb
->data
+ offset
;
1680 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1681 __skb_postpull_rcsum(skb
, ptr
, len
, offset
);
1683 memcpy(ptr
, from
, len
);
1685 if (flags
& BPF_F_RECOMPUTE_CSUM
)
1686 __skb_postpush_rcsum(skb
, ptr
, len
, offset
);
1687 if (flags
& BPF_F_INVALIDATE_HASH
)
1688 skb_clear_hash(skb
);
1693 static const struct bpf_func_proto bpf_skb_store_bytes_proto
= {
1694 .func
= bpf_skb_store_bytes
,
1696 .ret_type
= RET_INTEGER
,
1697 .arg1_type
= ARG_PTR_TO_CTX
,
1698 .arg2_type
= ARG_ANYTHING
,
1699 .arg3_type
= ARG_PTR_TO_MEM
,
1700 .arg4_type
= ARG_CONST_SIZE
,
1701 .arg5_type
= ARG_ANYTHING
,
1704 BPF_CALL_4(bpf_skb_load_bytes
, const struct sk_buff
*, skb
, u32
, offset
,
1705 void *, to
, u32
, len
)
1709 if (unlikely(offset
> 0xffff))
1712 ptr
= skb_header_pointer(skb
, offset
, len
, to
);
1716 memcpy(to
, ptr
, len
);
1724 static const struct bpf_func_proto bpf_skb_load_bytes_proto
= {
1725 .func
= bpf_skb_load_bytes
,
1727 .ret_type
= RET_INTEGER
,
1728 .arg1_type
= ARG_PTR_TO_CTX
,
1729 .arg2_type
= ARG_ANYTHING
,
1730 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
1731 .arg4_type
= ARG_CONST_SIZE
,
1734 BPF_CALL_5(bpf_skb_load_bytes_relative
, const struct sk_buff
*, skb
,
1735 u32
, offset
, void *, to
, u32
, len
, u32
, start_header
)
1737 u8
*end
= skb_tail_pointer(skb
);
1738 u8
*net
= skb_network_header(skb
);
1739 u8
*mac
= skb_mac_header(skb
);
1742 if (unlikely(offset
> 0xffff || len
> (end
- mac
)))
1745 switch (start_header
) {
1746 case BPF_HDR_START_MAC
:
1749 case BPF_HDR_START_NET
:
1756 if (likely(ptr
>= mac
&& ptr
+ len
<= end
)) {
1757 memcpy(to
, ptr
, len
);
1766 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto
= {
1767 .func
= bpf_skb_load_bytes_relative
,
1769 .ret_type
= RET_INTEGER
,
1770 .arg1_type
= ARG_PTR_TO_CTX
,
1771 .arg2_type
= ARG_ANYTHING
,
1772 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
1773 .arg4_type
= ARG_CONST_SIZE
,
1774 .arg5_type
= ARG_ANYTHING
,
1777 BPF_CALL_2(bpf_skb_pull_data
, struct sk_buff
*, skb
, u32
, len
)
1779 /* Idea is the following: should the needed direct read/write
1780 * test fail during runtime, we can pull in more data and redo
1781 * again, since implicitly, we invalidate previous checks here.
1783 * Or, since we know how much we need to make read/writeable,
1784 * this can be done once at the program beginning for direct
1785 * access case. By this we overcome limitations of only current
1786 * headroom being accessible.
1788 return bpf_try_make_writable(skb
, len
? : skb_headlen(skb
));
1791 static const struct bpf_func_proto bpf_skb_pull_data_proto
= {
1792 .func
= bpf_skb_pull_data
,
1794 .ret_type
= RET_INTEGER
,
1795 .arg1_type
= ARG_PTR_TO_CTX
,
1796 .arg2_type
= ARG_ANYTHING
,
1799 static inline int sk_skb_try_make_writable(struct sk_buff
*skb
,
1800 unsigned int write_len
)
1802 int err
= __bpf_try_make_writable(skb
, write_len
);
1804 bpf_compute_data_end_sk_skb(skb
);
1808 BPF_CALL_2(sk_skb_pull_data
, struct sk_buff
*, skb
, u32
, len
)
1810 /* Idea is the following: should the needed direct read/write
1811 * test fail during runtime, we can pull in more data and redo
1812 * again, since implicitly, we invalidate previous checks here.
1814 * Or, since we know how much we need to make read/writeable,
1815 * this can be done once at the program beginning for direct
1816 * access case. By this we overcome limitations of only current
1817 * headroom being accessible.
1819 return sk_skb_try_make_writable(skb
, len
? : skb_headlen(skb
));
1822 static const struct bpf_func_proto sk_skb_pull_data_proto
= {
1823 .func
= sk_skb_pull_data
,
1825 .ret_type
= RET_INTEGER
,
1826 .arg1_type
= ARG_PTR_TO_CTX
,
1827 .arg2_type
= ARG_ANYTHING
,
1830 BPF_CALL_5(bpf_l3_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1831 u64
, from
, u64
, to
, u64
, flags
)
1835 if (unlikely(flags
& ~(BPF_F_HDR_FIELD_MASK
)))
1837 if (unlikely(offset
> 0xffff || offset
& 1))
1839 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1842 ptr
= (__sum16
*)(skb
->data
+ offset
);
1843 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1845 if (unlikely(from
!= 0))
1848 csum_replace_by_diff(ptr
, to
);
1851 csum_replace2(ptr
, from
, to
);
1854 csum_replace4(ptr
, from
, to
);
1863 static const struct bpf_func_proto bpf_l3_csum_replace_proto
= {
1864 .func
= bpf_l3_csum_replace
,
1866 .ret_type
= RET_INTEGER
,
1867 .arg1_type
= ARG_PTR_TO_CTX
,
1868 .arg2_type
= ARG_ANYTHING
,
1869 .arg3_type
= ARG_ANYTHING
,
1870 .arg4_type
= ARG_ANYTHING
,
1871 .arg5_type
= ARG_ANYTHING
,
1874 BPF_CALL_5(bpf_l4_csum_replace
, struct sk_buff
*, skb
, u32
, offset
,
1875 u64
, from
, u64
, to
, u64
, flags
)
1877 bool is_pseudo
= flags
& BPF_F_PSEUDO_HDR
;
1878 bool is_mmzero
= flags
& BPF_F_MARK_MANGLED_0
;
1879 bool do_mforce
= flags
& BPF_F_MARK_ENFORCE
;
1882 if (unlikely(flags
& ~(BPF_F_MARK_MANGLED_0
| BPF_F_MARK_ENFORCE
|
1883 BPF_F_PSEUDO_HDR
| BPF_F_HDR_FIELD_MASK
)))
1885 if (unlikely(offset
> 0xffff || offset
& 1))
1887 if (unlikely(bpf_try_make_writable(skb
, offset
+ sizeof(*ptr
))))
1890 ptr
= (__sum16
*)(skb
->data
+ offset
);
1891 if (is_mmzero
&& !do_mforce
&& !*ptr
)
1894 switch (flags
& BPF_F_HDR_FIELD_MASK
) {
1896 if (unlikely(from
!= 0))
1899 inet_proto_csum_replace_by_diff(ptr
, skb
, to
, is_pseudo
);
1902 inet_proto_csum_replace2(ptr
, skb
, from
, to
, is_pseudo
);
1905 inet_proto_csum_replace4(ptr
, skb
, from
, to
, is_pseudo
);
1911 if (is_mmzero
&& !*ptr
)
1912 *ptr
= CSUM_MANGLED_0
;
1916 static const struct bpf_func_proto bpf_l4_csum_replace_proto
= {
1917 .func
= bpf_l4_csum_replace
,
1919 .ret_type
= RET_INTEGER
,
1920 .arg1_type
= ARG_PTR_TO_CTX
,
1921 .arg2_type
= ARG_ANYTHING
,
1922 .arg3_type
= ARG_ANYTHING
,
1923 .arg4_type
= ARG_ANYTHING
,
1924 .arg5_type
= ARG_ANYTHING
,
1927 BPF_CALL_5(bpf_csum_diff
, __be32
*, from
, u32
, from_size
,
1928 __be32
*, to
, u32
, to_size
, __wsum
, seed
)
1930 struct bpf_scratchpad
*sp
= this_cpu_ptr(&bpf_sp
);
1931 u32 diff_size
= from_size
+ to_size
;
1934 /* This is quite flexible, some examples:
1936 * from_size == 0, to_size > 0, seed := csum --> pushing data
1937 * from_size > 0, to_size == 0, seed := csum --> pulling data
1938 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1940 * Even for diffing, from_size and to_size don't need to be equal.
1942 if (unlikely(((from_size
| to_size
) & (sizeof(__be32
) - 1)) ||
1943 diff_size
> sizeof(sp
->diff
)))
1946 for (i
= 0; i
< from_size
/ sizeof(__be32
); i
++, j
++)
1947 sp
->diff
[j
] = ~from
[i
];
1948 for (i
= 0; i
< to_size
/ sizeof(__be32
); i
++, j
++)
1949 sp
->diff
[j
] = to
[i
];
1951 return csum_partial(sp
->diff
, diff_size
, seed
);
1954 static const struct bpf_func_proto bpf_csum_diff_proto
= {
1955 .func
= bpf_csum_diff
,
1958 .ret_type
= RET_INTEGER
,
1959 .arg1_type
= ARG_PTR_TO_MEM_OR_NULL
,
1960 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
1961 .arg3_type
= ARG_PTR_TO_MEM_OR_NULL
,
1962 .arg4_type
= ARG_CONST_SIZE_OR_ZERO
,
1963 .arg5_type
= ARG_ANYTHING
,
1966 BPF_CALL_2(bpf_csum_update
, struct sk_buff
*, skb
, __wsum
, csum
)
1968 /* The interface is to be used in combination with bpf_csum_diff()
1969 * for direct packet writes. csum rotation for alignment as well
1970 * as emulating csum_sub() can be done from the eBPF program.
1972 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
1973 return (skb
->csum
= csum_add(skb
->csum
, csum
));
1978 static const struct bpf_func_proto bpf_csum_update_proto
= {
1979 .func
= bpf_csum_update
,
1981 .ret_type
= RET_INTEGER
,
1982 .arg1_type
= ARG_PTR_TO_CTX
,
1983 .arg2_type
= ARG_ANYTHING
,
1986 static inline int __bpf_rx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1988 return dev_forward_skb(dev
, skb
);
1991 static inline int __bpf_rx_skb_no_mac(struct net_device
*dev
,
1992 struct sk_buff
*skb
)
1994 int ret
= ____dev_forward_skb(dev
, skb
);
1998 ret
= netif_rx(skb
);
2004 static inline int __bpf_tx_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2008 if (unlikely(__this_cpu_read(xmit_recursion
) > XMIT_RECURSION_LIMIT
)) {
2009 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2016 __this_cpu_inc(xmit_recursion
);
2017 ret
= dev_queue_xmit(skb
);
2018 __this_cpu_dec(xmit_recursion
);
2023 static int __bpf_redirect_no_mac(struct sk_buff
*skb
, struct net_device
*dev
,
2026 /* skb->mac_len is not set on normal egress */
2027 unsigned int mlen
= skb
->network_header
- skb
->mac_header
;
2029 __skb_pull(skb
, mlen
);
2031 /* At ingress, the mac header has already been pulled once.
2032 * At egress, skb_pospull_rcsum has to be done in case that
2033 * the skb is originated from ingress (i.e. a forwarded skb)
2034 * to ensure that rcsum starts at net header.
2036 if (!skb_at_tc_ingress(skb
))
2037 skb_postpull_rcsum(skb
, skb_mac_header(skb
), mlen
);
2038 skb_pop_mac_header(skb
);
2039 skb_reset_mac_len(skb
);
2040 return flags
& BPF_F_INGRESS
?
2041 __bpf_rx_skb_no_mac(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
2044 static int __bpf_redirect_common(struct sk_buff
*skb
, struct net_device
*dev
,
2047 /* Verify that a link layer header is carried */
2048 if (unlikely(skb
->mac_header
>= skb
->network_header
)) {
2053 bpf_push_mac_rcsum(skb
);
2054 return flags
& BPF_F_INGRESS
?
2055 __bpf_rx_skb(dev
, skb
) : __bpf_tx_skb(dev
, skb
);
2058 static int __bpf_redirect(struct sk_buff
*skb
, struct net_device
*dev
,
2061 if (dev_is_mac_header_xmit(dev
))
2062 return __bpf_redirect_common(skb
, dev
, flags
);
2064 return __bpf_redirect_no_mac(skb
, dev
, flags
);
2067 BPF_CALL_3(bpf_clone_redirect
, struct sk_buff
*, skb
, u32
, ifindex
, u64
, flags
)
2069 struct net_device
*dev
;
2070 struct sk_buff
*clone
;
2073 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
2076 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ifindex
);
2080 clone
= skb_clone(skb
, GFP_ATOMIC
);
2081 if (unlikely(!clone
))
2084 /* For direct write, we need to keep the invariant that the skbs
2085 * we're dealing with need to be uncloned. Should uncloning fail
2086 * here, we need to free the just generated clone to unclone once
2089 ret
= bpf_try_make_head_writable(skb
);
2090 if (unlikely(ret
)) {
2095 return __bpf_redirect(clone
, dev
, flags
);
2098 static const struct bpf_func_proto bpf_clone_redirect_proto
= {
2099 .func
= bpf_clone_redirect
,
2101 .ret_type
= RET_INTEGER
,
2102 .arg1_type
= ARG_PTR_TO_CTX
,
2103 .arg2_type
= ARG_ANYTHING
,
2104 .arg3_type
= ARG_ANYTHING
,
2107 DEFINE_PER_CPU(struct bpf_redirect_info
, bpf_redirect_info
);
2108 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info
);
2110 BPF_CALL_2(bpf_redirect
, u32
, ifindex
, u64
, flags
)
2112 struct bpf_redirect_info
*ri
= this_cpu_ptr(&bpf_redirect_info
);
2114 if (unlikely(flags
& ~(BPF_F_INGRESS
)))
2117 ri
->ifindex
= ifindex
;
2120 return TC_ACT_REDIRECT
;
2123 int skb_do_redirect(struct sk_buff
*skb
)
2125 struct bpf_redirect_info
*ri
= this_cpu_ptr(&bpf_redirect_info
);
2126 struct net_device
*dev
;
2128 dev
= dev_get_by_index_rcu(dev_net(skb
->dev
), ri
->ifindex
);
2130 if (unlikely(!dev
)) {
2135 return __bpf_redirect(skb
, dev
, ri
->flags
);
2138 static const struct bpf_func_proto bpf_redirect_proto
= {
2139 .func
= bpf_redirect
,
2141 .ret_type
= RET_INTEGER
,
2142 .arg1_type
= ARG_ANYTHING
,
2143 .arg2_type
= ARG_ANYTHING
,
2146 BPF_CALL_2(bpf_msg_apply_bytes
, struct sk_msg
*, msg
, u32
, bytes
)
2148 msg
->apply_bytes
= bytes
;
2152 static const struct bpf_func_proto bpf_msg_apply_bytes_proto
= {
2153 .func
= bpf_msg_apply_bytes
,
2155 .ret_type
= RET_INTEGER
,
2156 .arg1_type
= ARG_PTR_TO_CTX
,
2157 .arg2_type
= ARG_ANYTHING
,
2160 BPF_CALL_2(bpf_msg_cork_bytes
, struct sk_msg
*, msg
, u32
, bytes
)
2162 msg
->cork_bytes
= bytes
;
2166 static const struct bpf_func_proto bpf_msg_cork_bytes_proto
= {
2167 .func
= bpf_msg_cork_bytes
,
2169 .ret_type
= RET_INTEGER
,
2170 .arg1_type
= ARG_PTR_TO_CTX
,
2171 .arg2_type
= ARG_ANYTHING
,
2174 BPF_CALL_4(bpf_msg_pull_data
, struct sk_msg
*, msg
, u32
, start
,
2175 u32
, end
, u64
, flags
)
2177 u32 len
= 0, offset
= 0, copy
= 0, poffset
= 0, bytes
= end
- start
;
2178 u32 first_sge
, last_sge
, i
, shift
, bytes_sg_total
;
2179 struct scatterlist
*sge
;
2180 u8
*raw
, *to
, *from
;
2183 if (unlikely(flags
|| end
<= start
))
2186 /* First find the starting scatterlist element */
2189 len
= sk_msg_elem(msg
, i
)->length
;
2190 if (start
< offset
+ len
)
2193 sk_msg_iter_var_next(i
);
2194 } while (i
!= msg
->sg
.end
);
2196 if (unlikely(start
>= offset
+ len
))
2200 /* The start may point into the sg element so we need to also
2201 * account for the headroom.
2203 bytes_sg_total
= start
- offset
+ bytes
;
2204 if (!msg
->sg
.copy
[i
] && bytes_sg_total
<= len
)
2207 /* At this point we need to linearize multiple scatterlist
2208 * elements or a single shared page. Either way we need to
2209 * copy into a linear buffer exclusively owned by BPF. Then
2210 * place the buffer in the scatterlist and fixup the original
2211 * entries by removing the entries now in the linear buffer
2212 * and shifting the remaining entries. For now we do not try
2213 * to copy partial entries to avoid complexity of running out
2214 * of sg_entry slots. The downside is reading a single byte
2215 * will copy the entire sg entry.
2218 copy
+= sk_msg_elem(msg
, i
)->length
;
2219 sk_msg_iter_var_next(i
);
2220 if (bytes_sg_total
<= copy
)
2222 } while (i
!= msg
->sg
.end
);
2225 if (unlikely(bytes_sg_total
> copy
))
2228 page
= alloc_pages(__GFP_NOWARN
| GFP_ATOMIC
| __GFP_COMP
,
2230 if (unlikely(!page
))
2233 raw
= page_address(page
);
2236 sge
= sk_msg_elem(msg
, i
);
2237 from
= sg_virt(sge
);
2241 memcpy(to
, from
, len
);
2244 put_page(sg_page(sge
));
2246 sk_msg_iter_var_next(i
);
2247 } while (i
!= last_sge
);
2249 sg_set_page(&msg
->sg
.data
[first_sge
], page
, copy
, 0);
2251 /* To repair sg ring we need to shift entries. If we only
2252 * had a single entry though we can just replace it and
2253 * be done. Otherwise walk the ring and shift the entries.
2255 WARN_ON_ONCE(last_sge
== first_sge
);
2256 shift
= last_sge
> first_sge
?
2257 last_sge
- first_sge
- 1 :
2258 MAX_SKB_FRAGS
- first_sge
+ last_sge
- 1;
2263 sk_msg_iter_var_next(i
);
2267 if (i
+ shift
>= MAX_MSG_FRAGS
)
2268 move_from
= i
+ shift
- MAX_MSG_FRAGS
;
2270 move_from
= i
+ shift
;
2271 if (move_from
== msg
->sg
.end
)
2274 msg
->sg
.data
[i
] = msg
->sg
.data
[move_from
];
2275 msg
->sg
.data
[move_from
].length
= 0;
2276 msg
->sg
.data
[move_from
].page_link
= 0;
2277 msg
->sg
.data
[move_from
].offset
= 0;
2278 sk_msg_iter_var_next(i
);
2281 msg
->sg
.end
= msg
->sg
.end
- shift
> msg
->sg
.end
?
2282 msg
->sg
.end
- shift
+ MAX_MSG_FRAGS
:
2283 msg
->sg
.end
- shift
;
2285 msg
->data
= sg_virt(&msg
->sg
.data
[first_sge
]) + start
- offset
;
2286 msg
->data_end
= msg
->data
+ bytes
;
2290 static const struct bpf_func_proto bpf_msg_pull_data_proto
= {
2291 .func
= bpf_msg_pull_data
,
2293 .ret_type
= RET_INTEGER
,
2294 .arg1_type
= ARG_PTR_TO_CTX
,
2295 .arg2_type
= ARG_ANYTHING
,
2296 .arg3_type
= ARG_ANYTHING
,
2297 .arg4_type
= ARG_ANYTHING
,
2300 BPF_CALL_1(bpf_get_cgroup_classid
, const struct sk_buff
*, skb
)
2302 return task_get_classid(skb
);
2305 static const struct bpf_func_proto bpf_get_cgroup_classid_proto
= {
2306 .func
= bpf_get_cgroup_classid
,
2308 .ret_type
= RET_INTEGER
,
2309 .arg1_type
= ARG_PTR_TO_CTX
,
2312 BPF_CALL_1(bpf_get_route_realm
, const struct sk_buff
*, skb
)
2314 return dst_tclassid(skb
);
2317 static const struct bpf_func_proto bpf_get_route_realm_proto
= {
2318 .func
= bpf_get_route_realm
,
2320 .ret_type
= RET_INTEGER
,
2321 .arg1_type
= ARG_PTR_TO_CTX
,
2324 BPF_CALL_1(bpf_get_hash_recalc
, struct sk_buff
*, skb
)
2326 /* If skb_clear_hash() was called due to mangling, we can
2327 * trigger SW recalculation here. Later access to hash
2328 * can then use the inline skb->hash via context directly
2329 * instead of calling this helper again.
2331 return skb_get_hash(skb
);
2334 static const struct bpf_func_proto bpf_get_hash_recalc_proto
= {
2335 .func
= bpf_get_hash_recalc
,
2337 .ret_type
= RET_INTEGER
,
2338 .arg1_type
= ARG_PTR_TO_CTX
,
2341 BPF_CALL_1(bpf_set_hash_invalid
, struct sk_buff
*, skb
)
2343 /* After all direct packet write, this can be used once for
2344 * triggering a lazy recalc on next skb_get_hash() invocation.
2346 skb_clear_hash(skb
);
2350 static const struct bpf_func_proto bpf_set_hash_invalid_proto
= {
2351 .func
= bpf_set_hash_invalid
,
2353 .ret_type
= RET_INTEGER
,
2354 .arg1_type
= ARG_PTR_TO_CTX
,
2357 BPF_CALL_2(bpf_set_hash
, struct sk_buff
*, skb
, u32
, hash
)
2359 /* Set user specified hash as L4(+), so that it gets returned
2360 * on skb_get_hash() call unless BPF prog later on triggers a
2363 __skb_set_sw_hash(skb
, hash
, true);
2367 static const struct bpf_func_proto bpf_set_hash_proto
= {
2368 .func
= bpf_set_hash
,
2370 .ret_type
= RET_INTEGER
,
2371 .arg1_type
= ARG_PTR_TO_CTX
,
2372 .arg2_type
= ARG_ANYTHING
,
2375 BPF_CALL_3(bpf_skb_vlan_push
, struct sk_buff
*, skb
, __be16
, vlan_proto
,
2380 if (unlikely(vlan_proto
!= htons(ETH_P_8021Q
) &&
2381 vlan_proto
!= htons(ETH_P_8021AD
)))
2382 vlan_proto
= htons(ETH_P_8021Q
);
2384 bpf_push_mac_rcsum(skb
);
2385 ret
= skb_vlan_push(skb
, vlan_proto
, vlan_tci
);
2386 bpf_pull_mac_rcsum(skb
);
2388 bpf_compute_data_pointers(skb
);
2392 static const struct bpf_func_proto bpf_skb_vlan_push_proto
= {
2393 .func
= bpf_skb_vlan_push
,
2395 .ret_type
= RET_INTEGER
,
2396 .arg1_type
= ARG_PTR_TO_CTX
,
2397 .arg2_type
= ARG_ANYTHING
,
2398 .arg3_type
= ARG_ANYTHING
,
2401 BPF_CALL_1(bpf_skb_vlan_pop
, struct sk_buff
*, skb
)
2405 bpf_push_mac_rcsum(skb
);
2406 ret
= skb_vlan_pop(skb
);
2407 bpf_pull_mac_rcsum(skb
);
2409 bpf_compute_data_pointers(skb
);
2413 static const struct bpf_func_proto bpf_skb_vlan_pop_proto
= {
2414 .func
= bpf_skb_vlan_pop
,
2416 .ret_type
= RET_INTEGER
,
2417 .arg1_type
= ARG_PTR_TO_CTX
,
2420 static int bpf_skb_generic_push(struct sk_buff
*skb
, u32 off
, u32 len
)
2422 /* Caller already did skb_cow() with len as headroom,
2423 * so no need to do it here.
2426 memmove(skb
->data
, skb
->data
+ len
, off
);
2427 memset(skb
->data
+ off
, 0, len
);
2429 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2430 * needed here as it does not change the skb->csum
2431 * result for checksum complete when summing over
2437 static int bpf_skb_generic_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
2439 /* skb_ensure_writable() is not needed here, as we're
2440 * already working on an uncloned skb.
2442 if (unlikely(!pskb_may_pull(skb
, off
+ len
)))
2445 skb_postpull_rcsum(skb
, skb
->data
+ off
, len
);
2446 memmove(skb
->data
+ len
, skb
->data
, off
);
2447 __skb_pull(skb
, len
);
2452 static int bpf_skb_net_hdr_push(struct sk_buff
*skb
, u32 off
, u32 len
)
2454 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2457 /* There's no need for __skb_push()/__skb_pull() pair to
2458 * get to the start of the mac header as we're guaranteed
2459 * to always start from here under eBPF.
2461 ret
= bpf_skb_generic_push(skb
, off
, len
);
2463 skb
->mac_header
-= len
;
2464 skb
->network_header
-= len
;
2466 skb
->transport_header
= skb
->network_header
;
2472 static int bpf_skb_net_hdr_pop(struct sk_buff
*skb
, u32 off
, u32 len
)
2474 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2477 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2478 ret
= bpf_skb_generic_pop(skb
, off
, len
);
2480 skb
->mac_header
+= len
;
2481 skb
->network_header
+= len
;
2483 skb
->transport_header
= skb
->network_header
;
2489 static int bpf_skb_proto_4_to_6(struct sk_buff
*skb
)
2491 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2492 u32 off
= skb_mac_header_len(skb
);
2495 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2496 if (skb_is_gso(skb
) && unlikely(skb_is_gso_sctp(skb
)))
2499 ret
= skb_cow(skb
, len_diff
);
2500 if (unlikely(ret
< 0))
2503 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2504 if (unlikely(ret
< 0))
2507 if (skb_is_gso(skb
)) {
2508 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
2510 /* SKB_GSO_TCPV4 needs to be changed into
2513 if (shinfo
->gso_type
& SKB_GSO_TCPV4
) {
2514 shinfo
->gso_type
&= ~SKB_GSO_TCPV4
;
2515 shinfo
->gso_type
|= SKB_GSO_TCPV6
;
2518 /* Due to IPv6 header, MSS needs to be downgraded. */
2519 skb_decrease_gso_size(shinfo
, len_diff
);
2520 /* Header must be checked, and gso_segs recomputed. */
2521 shinfo
->gso_type
|= SKB_GSO_DODGY
;
2522 shinfo
->gso_segs
= 0;
2525 skb
->protocol
= htons(ETH_P_IPV6
);
2526 skb_clear_hash(skb
);
2531 static int bpf_skb_proto_6_to_4(struct sk_buff
*skb
)
2533 const u32 len_diff
= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
2534 u32 off
= skb_mac_header_len(skb
);
2537 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2538 if (skb_is_gso(skb
) && unlikely(skb_is_gso_sctp(skb
)))
2541 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2542 if (unlikely(ret
< 0))
2545 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2546 if (unlikely(ret
< 0))
2549 if (skb_is_gso(skb
)) {
2550 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
2552 /* SKB_GSO_TCPV6 needs to be changed into
2555 if (shinfo
->gso_type
& SKB_GSO_TCPV6
) {
2556 shinfo
->gso_type
&= ~SKB_GSO_TCPV6
;
2557 shinfo
->gso_type
|= SKB_GSO_TCPV4
;
2560 /* Due to IPv4 header, MSS can be upgraded. */
2561 skb_increase_gso_size(shinfo
, len_diff
);
2562 /* Header must be checked, and gso_segs recomputed. */
2563 shinfo
->gso_type
|= SKB_GSO_DODGY
;
2564 shinfo
->gso_segs
= 0;
2567 skb
->protocol
= htons(ETH_P_IP
);
2568 skb_clear_hash(skb
);
2573 static int bpf_skb_proto_xlat(struct sk_buff
*skb
, __be16 to_proto
)
2575 __be16 from_proto
= skb
->protocol
;
2577 if (from_proto
== htons(ETH_P_IP
) &&
2578 to_proto
== htons(ETH_P_IPV6
))
2579 return bpf_skb_proto_4_to_6(skb
);
2581 if (from_proto
== htons(ETH_P_IPV6
) &&
2582 to_proto
== htons(ETH_P_IP
))
2583 return bpf_skb_proto_6_to_4(skb
);
2588 BPF_CALL_3(bpf_skb_change_proto
, struct sk_buff
*, skb
, __be16
, proto
,
2593 if (unlikely(flags
))
2596 /* General idea is that this helper does the basic groundwork
2597 * needed for changing the protocol, and eBPF program fills the
2598 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2599 * and other helpers, rather than passing a raw buffer here.
2601 * The rationale is to keep this minimal and without a need to
2602 * deal with raw packet data. F.e. even if we would pass buffers
2603 * here, the program still needs to call the bpf_lX_csum_replace()
2604 * helpers anyway. Plus, this way we keep also separation of
2605 * concerns, since f.e. bpf_skb_store_bytes() should only take
2608 * Currently, additional options and extension header space are
2609 * not supported, but flags register is reserved so we can adapt
2610 * that. For offloads, we mark packet as dodgy, so that headers
2611 * need to be verified first.
2613 ret
= bpf_skb_proto_xlat(skb
, proto
);
2614 bpf_compute_data_pointers(skb
);
2618 static const struct bpf_func_proto bpf_skb_change_proto_proto
= {
2619 .func
= bpf_skb_change_proto
,
2621 .ret_type
= RET_INTEGER
,
2622 .arg1_type
= ARG_PTR_TO_CTX
,
2623 .arg2_type
= ARG_ANYTHING
,
2624 .arg3_type
= ARG_ANYTHING
,
2627 BPF_CALL_2(bpf_skb_change_type
, struct sk_buff
*, skb
, u32
, pkt_type
)
2629 /* We only allow a restricted subset to be changed for now. */
2630 if (unlikely(!skb_pkt_type_ok(skb
->pkt_type
) ||
2631 !skb_pkt_type_ok(pkt_type
)))
2634 skb
->pkt_type
= pkt_type
;
2638 static const struct bpf_func_proto bpf_skb_change_type_proto
= {
2639 .func
= bpf_skb_change_type
,
2641 .ret_type
= RET_INTEGER
,
2642 .arg1_type
= ARG_PTR_TO_CTX
,
2643 .arg2_type
= ARG_ANYTHING
,
2646 static u32
bpf_skb_net_base_len(const struct sk_buff
*skb
)
2648 switch (skb
->protocol
) {
2649 case htons(ETH_P_IP
):
2650 return sizeof(struct iphdr
);
2651 case htons(ETH_P_IPV6
):
2652 return sizeof(struct ipv6hdr
);
2658 static int bpf_skb_net_grow(struct sk_buff
*skb
, u32 len_diff
)
2660 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2663 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2664 if (skb_is_gso(skb
) && unlikely(skb_is_gso_sctp(skb
)))
2667 ret
= skb_cow(skb
, len_diff
);
2668 if (unlikely(ret
< 0))
2671 ret
= bpf_skb_net_hdr_push(skb
, off
, len_diff
);
2672 if (unlikely(ret
< 0))
2675 if (skb_is_gso(skb
)) {
2676 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
2678 /* Due to header grow, MSS needs to be downgraded. */
2679 skb_decrease_gso_size(shinfo
, len_diff
);
2680 /* Header must be checked, and gso_segs recomputed. */
2681 shinfo
->gso_type
|= SKB_GSO_DODGY
;
2682 shinfo
->gso_segs
= 0;
2688 static int bpf_skb_net_shrink(struct sk_buff
*skb
, u32 len_diff
)
2690 u32 off
= skb_mac_header_len(skb
) + bpf_skb_net_base_len(skb
);
2693 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2694 if (skb_is_gso(skb
) && unlikely(skb_is_gso_sctp(skb
)))
2697 ret
= skb_unclone(skb
, GFP_ATOMIC
);
2698 if (unlikely(ret
< 0))
2701 ret
= bpf_skb_net_hdr_pop(skb
, off
, len_diff
);
2702 if (unlikely(ret
< 0))
2705 if (skb_is_gso(skb
)) {
2706 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
2708 /* Due to header shrink, MSS can be upgraded. */
2709 skb_increase_gso_size(shinfo
, len_diff
);
2710 /* Header must be checked, and gso_segs recomputed. */
2711 shinfo
->gso_type
|= SKB_GSO_DODGY
;
2712 shinfo
->gso_segs
= 0;
2718 static u32
__bpf_skb_max_len(const struct sk_buff
*skb
)
2720 return skb
->dev
? skb
->dev
->mtu
+ skb
->dev
->hard_header_len
:
2724 static int bpf_skb_adjust_net(struct sk_buff
*skb
, s32 len_diff
)
2726 bool trans_same
= skb
->transport_header
== skb
->network_header
;
2727 u32 len_cur
, len_diff_abs
= abs(len_diff
);
2728 u32 len_min
= bpf_skb_net_base_len(skb
);
2729 u32 len_max
= __bpf_skb_max_len(skb
);
2730 __be16 proto
= skb
->protocol
;
2731 bool shrink
= len_diff
< 0;
2734 if (unlikely(len_diff_abs
> 0xfffU
))
2736 if (unlikely(proto
!= htons(ETH_P_IP
) &&
2737 proto
!= htons(ETH_P_IPV6
)))
2740 len_cur
= skb
->len
- skb_network_offset(skb
);
2741 if (skb_transport_header_was_set(skb
) && !trans_same
)
2742 len_cur
= skb_network_header_len(skb
);
2743 if ((shrink
&& (len_diff_abs
>= len_cur
||
2744 len_cur
- len_diff_abs
< len_min
)) ||
2745 (!shrink
&& (skb
->len
+ len_diff_abs
> len_max
&&
2749 ret
= shrink
? bpf_skb_net_shrink(skb
, len_diff_abs
) :
2750 bpf_skb_net_grow(skb
, len_diff_abs
);
2752 bpf_compute_data_pointers(skb
);
2756 BPF_CALL_4(bpf_skb_adjust_room
, struct sk_buff
*, skb
, s32
, len_diff
,
2757 u32
, mode
, u64
, flags
)
2759 if (unlikely(flags
))
2761 if (likely(mode
== BPF_ADJ_ROOM_NET
))
2762 return bpf_skb_adjust_net(skb
, len_diff
);
2767 static const struct bpf_func_proto bpf_skb_adjust_room_proto
= {
2768 .func
= bpf_skb_adjust_room
,
2770 .ret_type
= RET_INTEGER
,
2771 .arg1_type
= ARG_PTR_TO_CTX
,
2772 .arg2_type
= ARG_ANYTHING
,
2773 .arg3_type
= ARG_ANYTHING
,
2774 .arg4_type
= ARG_ANYTHING
,
2777 static u32
__bpf_skb_min_len(const struct sk_buff
*skb
)
2779 u32 min_len
= skb_network_offset(skb
);
2781 if (skb_transport_header_was_set(skb
))
2782 min_len
= skb_transport_offset(skb
);
2783 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2784 min_len
= skb_checksum_start_offset(skb
) +
2785 skb
->csum_offset
+ sizeof(__sum16
);
2789 static int bpf_skb_grow_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2791 unsigned int old_len
= skb
->len
;
2794 ret
= __skb_grow_rcsum(skb
, new_len
);
2796 memset(skb
->data
+ old_len
, 0, new_len
- old_len
);
2800 static int bpf_skb_trim_rcsum(struct sk_buff
*skb
, unsigned int new_len
)
2802 return __skb_trim_rcsum(skb
, new_len
);
2805 static inline int __bpf_skb_change_tail(struct sk_buff
*skb
, u32 new_len
,
2808 u32 max_len
= __bpf_skb_max_len(skb
);
2809 u32 min_len
= __bpf_skb_min_len(skb
);
2812 if (unlikely(flags
|| new_len
> max_len
|| new_len
< min_len
))
2814 if (skb
->encapsulation
)
2817 /* The basic idea of this helper is that it's performing the
2818 * needed work to either grow or trim an skb, and eBPF program
2819 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2820 * bpf_lX_csum_replace() and others rather than passing a raw
2821 * buffer here. This one is a slow path helper and intended
2822 * for replies with control messages.
2824 * Like in bpf_skb_change_proto(), we want to keep this rather
2825 * minimal and without protocol specifics so that we are able
2826 * to separate concerns as in bpf_skb_store_bytes() should only
2827 * be the one responsible for writing buffers.
2829 * It's really expected to be a slow path operation here for
2830 * control message replies, so we're implicitly linearizing,
2831 * uncloning and drop offloads from the skb by this.
2833 ret
= __bpf_try_make_writable(skb
, skb
->len
);
2835 if (new_len
> skb
->len
)
2836 ret
= bpf_skb_grow_rcsum(skb
, new_len
);
2837 else if (new_len
< skb
->len
)
2838 ret
= bpf_skb_trim_rcsum(skb
, new_len
);
2839 if (!ret
&& skb_is_gso(skb
))
2845 BPF_CALL_3(bpf_skb_change_tail
, struct sk_buff
*, skb
, u32
, new_len
,
2848 int ret
= __bpf_skb_change_tail(skb
, new_len
, flags
);
2850 bpf_compute_data_pointers(skb
);
2854 static const struct bpf_func_proto bpf_skb_change_tail_proto
= {
2855 .func
= bpf_skb_change_tail
,
2857 .ret_type
= RET_INTEGER
,
2858 .arg1_type
= ARG_PTR_TO_CTX
,
2859 .arg2_type
= ARG_ANYTHING
,
2860 .arg3_type
= ARG_ANYTHING
,
2863 BPF_CALL_3(sk_skb_change_tail
, struct sk_buff
*, skb
, u32
, new_len
,
2866 int ret
= __bpf_skb_change_tail(skb
, new_len
, flags
);
2868 bpf_compute_data_end_sk_skb(skb
);
2872 static const struct bpf_func_proto sk_skb_change_tail_proto
= {
2873 .func
= sk_skb_change_tail
,
2875 .ret_type
= RET_INTEGER
,
2876 .arg1_type
= ARG_PTR_TO_CTX
,
2877 .arg2_type
= ARG_ANYTHING
,
2878 .arg3_type
= ARG_ANYTHING
,
2881 static inline int __bpf_skb_change_head(struct sk_buff
*skb
, u32 head_room
,
2884 u32 max_len
= __bpf_skb_max_len(skb
);
2885 u32 new_len
= skb
->len
+ head_room
;
2888 if (unlikely(flags
|| (!skb_is_gso(skb
) && new_len
> max_len
) ||
2889 new_len
< skb
->len
))
2892 ret
= skb_cow(skb
, head_room
);
2894 /* Idea for this helper is that we currently only
2895 * allow to expand on mac header. This means that
2896 * skb->protocol network header, etc, stay as is.
2897 * Compared to bpf_skb_change_tail(), we're more
2898 * flexible due to not needing to linearize or
2899 * reset GSO. Intention for this helper is to be
2900 * used by an L3 skb that needs to push mac header
2901 * for redirection into L2 device.
2903 __skb_push(skb
, head_room
);
2904 memset(skb
->data
, 0, head_room
);
2905 skb_reset_mac_header(skb
);
2911 BPF_CALL_3(bpf_skb_change_head
, struct sk_buff
*, skb
, u32
, head_room
,
2914 int ret
= __bpf_skb_change_head(skb
, head_room
, flags
);
2916 bpf_compute_data_pointers(skb
);
2920 static const struct bpf_func_proto bpf_skb_change_head_proto
= {
2921 .func
= bpf_skb_change_head
,
2923 .ret_type
= RET_INTEGER
,
2924 .arg1_type
= ARG_PTR_TO_CTX
,
2925 .arg2_type
= ARG_ANYTHING
,
2926 .arg3_type
= ARG_ANYTHING
,
2929 BPF_CALL_3(sk_skb_change_head
, struct sk_buff
*, skb
, u32
, head_room
,
2932 int ret
= __bpf_skb_change_head(skb
, head_room
, flags
);
2934 bpf_compute_data_end_sk_skb(skb
);
2938 static const struct bpf_func_proto sk_skb_change_head_proto
= {
2939 .func
= sk_skb_change_head
,
2941 .ret_type
= RET_INTEGER
,
2942 .arg1_type
= ARG_PTR_TO_CTX
,
2943 .arg2_type
= ARG_ANYTHING
,
2944 .arg3_type
= ARG_ANYTHING
,
2946 static unsigned long xdp_get_metalen(const struct xdp_buff
*xdp
)
2948 return xdp_data_meta_unsupported(xdp
) ? 0 :
2949 xdp
->data
- xdp
->data_meta
;
2952 BPF_CALL_2(bpf_xdp_adjust_head
, struct xdp_buff
*, xdp
, int, offset
)
2954 void *xdp_frame_end
= xdp
->data_hard_start
+ sizeof(struct xdp_frame
);
2955 unsigned long metalen
= xdp_get_metalen(xdp
);
2956 void *data_start
= xdp_frame_end
+ metalen
;
2957 void *data
= xdp
->data
+ offset
;
2959 if (unlikely(data
< data_start
||
2960 data
> xdp
->data_end
- ETH_HLEN
))
2964 memmove(xdp
->data_meta
+ offset
,
2965 xdp
->data_meta
, metalen
);
2966 xdp
->data_meta
+= offset
;
2972 static const struct bpf_func_proto bpf_xdp_adjust_head_proto
= {
2973 .func
= bpf_xdp_adjust_head
,
2975 .ret_type
= RET_INTEGER
,
2976 .arg1_type
= ARG_PTR_TO_CTX
,
2977 .arg2_type
= ARG_ANYTHING
,
2980 BPF_CALL_2(bpf_xdp_adjust_tail
, struct xdp_buff
*, xdp
, int, offset
)
2982 void *data_end
= xdp
->data_end
+ offset
;
2984 /* only shrinking is allowed for now. */
2985 if (unlikely(offset
>= 0))
2988 if (unlikely(data_end
< xdp
->data
+ ETH_HLEN
))
2991 xdp
->data_end
= data_end
;
2996 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto
= {
2997 .func
= bpf_xdp_adjust_tail
,
2999 .ret_type
= RET_INTEGER
,
3000 .arg1_type
= ARG_PTR_TO_CTX
,
3001 .arg2_type
= ARG_ANYTHING
,
3004 BPF_CALL_2(bpf_xdp_adjust_meta
, struct xdp_buff
*, xdp
, int, offset
)
3006 void *xdp_frame_end
= xdp
->data_hard_start
+ sizeof(struct xdp_frame
);
3007 void *meta
= xdp
->data_meta
+ offset
;
3008 unsigned long metalen
= xdp
->data
- meta
;
3010 if (xdp_data_meta_unsupported(xdp
))
3012 if (unlikely(meta
< xdp_frame_end
||
3015 if (unlikely((metalen
& (sizeof(__u32
) - 1)) ||
3019 xdp
->data_meta
= meta
;
3024 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto
= {
3025 .func
= bpf_xdp_adjust_meta
,
3027 .ret_type
= RET_INTEGER
,
3028 .arg1_type
= ARG_PTR_TO_CTX
,
3029 .arg2_type
= ARG_ANYTHING
,
3032 static int __bpf_tx_xdp(struct net_device
*dev
,
3033 struct bpf_map
*map
,
3034 struct xdp_buff
*xdp
,
3037 struct xdp_frame
*xdpf
;
3040 if (!dev
->netdev_ops
->ndo_xdp_xmit
) {
3044 err
= xdp_ok_fwd_dev(dev
, xdp
->data_end
- xdp
->data
);
3048 xdpf
= convert_to_xdp_frame(xdp
);
3049 if (unlikely(!xdpf
))
3052 sent
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, 1, &xdpf
, XDP_XMIT_FLUSH
);
3059 xdp_do_redirect_slow(struct net_device
*dev
, struct xdp_buff
*xdp
,
3060 struct bpf_prog
*xdp_prog
, struct bpf_redirect_info
*ri
)
3062 struct net_device
*fwd
;
3063 u32 index
= ri
->ifindex
;
3066 fwd
= dev_get_by_index_rcu(dev_net(dev
), index
);
3068 if (unlikely(!fwd
)) {
3073 err
= __bpf_tx_xdp(fwd
, NULL
, xdp
, 0);
3077 _trace_xdp_redirect(dev
, xdp_prog
, index
);
3080 _trace_xdp_redirect_err(dev
, xdp_prog
, index
, err
);
3084 static int __bpf_tx_xdp_map(struct net_device
*dev_rx
, void *fwd
,
3085 struct bpf_map
*map
,
3086 struct xdp_buff
*xdp
,
3091 switch (map
->map_type
) {
3092 case BPF_MAP_TYPE_DEVMAP
: {
3093 struct bpf_dtab_netdev
*dst
= fwd
;
3095 err
= dev_map_enqueue(dst
, xdp
, dev_rx
);
3098 __dev_map_insert_ctx(map
, index
);
3101 case BPF_MAP_TYPE_CPUMAP
: {
3102 struct bpf_cpu_map_entry
*rcpu
= fwd
;
3104 err
= cpu_map_enqueue(rcpu
, xdp
, dev_rx
);
3107 __cpu_map_insert_ctx(map
, index
);
3110 case BPF_MAP_TYPE_XSKMAP
: {
3111 struct xdp_sock
*xs
= fwd
;
3113 err
= __xsk_map_redirect(map
, xdp
, xs
);
3122 void xdp_do_flush_map(void)
3124 struct bpf_redirect_info
*ri
= this_cpu_ptr(&bpf_redirect_info
);
3125 struct bpf_map
*map
= ri
->map_to_flush
;
3127 ri
->map_to_flush
= NULL
;
3129 switch (map
->map_type
) {
3130 case BPF_MAP_TYPE_DEVMAP
:
3131 __dev_map_flush(map
);
3133 case BPF_MAP_TYPE_CPUMAP
:
3134 __cpu_map_flush(map
);
3136 case BPF_MAP_TYPE_XSKMAP
:
3137 __xsk_map_flush(map
);
3144 EXPORT_SYMBOL_GPL(xdp_do_flush_map
);
3146 static inline void *__xdp_map_lookup_elem(struct bpf_map
*map
, u32 index
)
3148 switch (map
->map_type
) {
3149 case BPF_MAP_TYPE_DEVMAP
:
3150 return __dev_map_lookup_elem(map
, index
);
3151 case BPF_MAP_TYPE_CPUMAP
:
3152 return __cpu_map_lookup_elem(map
, index
);
3153 case BPF_MAP_TYPE_XSKMAP
:
3154 return __xsk_map_lookup_elem(map
, index
);
3160 void bpf_clear_redirect_map(struct bpf_map
*map
)
3162 struct bpf_redirect_info
*ri
;
3165 for_each_possible_cpu(cpu
) {
3166 ri
= per_cpu_ptr(&bpf_redirect_info
, cpu
);
3167 /* Avoid polluting remote cacheline due to writes if
3168 * not needed. Once we pass this test, we need the
3169 * cmpxchg() to make sure it hasn't been changed in
3170 * the meantime by remote CPU.
3172 if (unlikely(READ_ONCE(ri
->map
) == map
))
3173 cmpxchg(&ri
->map
, map
, NULL
);
3177 static int xdp_do_redirect_map(struct net_device
*dev
, struct xdp_buff
*xdp
,
3178 struct bpf_prog
*xdp_prog
, struct bpf_map
*map
,
3179 struct bpf_redirect_info
*ri
)
3181 u32 index
= ri
->ifindex
;
3186 WRITE_ONCE(ri
->map
, NULL
);
3188 fwd
= __xdp_map_lookup_elem(map
, index
);
3189 if (unlikely(!fwd
)) {
3193 if (ri
->map_to_flush
&& unlikely(ri
->map_to_flush
!= map
))
3196 err
= __bpf_tx_xdp_map(dev
, fwd
, map
, xdp
, index
);
3200 ri
->map_to_flush
= map
;
3201 _trace_xdp_redirect_map(dev
, xdp_prog
, fwd
, map
, index
);
3204 _trace_xdp_redirect_map_err(dev
, xdp_prog
, fwd
, map
, index
, err
);
3208 int xdp_do_redirect(struct net_device
*dev
, struct xdp_buff
*xdp
,
3209 struct bpf_prog
*xdp_prog
)
3211 struct bpf_redirect_info
*ri
= this_cpu_ptr(&bpf_redirect_info
);
3212 struct bpf_map
*map
= READ_ONCE(ri
->map
);
3215 return xdp_do_redirect_map(dev
, xdp
, xdp_prog
, map
, ri
);
3217 return xdp_do_redirect_slow(dev
, xdp
, xdp_prog
, ri
);
3219 EXPORT_SYMBOL_GPL(xdp_do_redirect
);
3221 static int xdp_do_generic_redirect_map(struct net_device
*dev
,
3222 struct sk_buff
*skb
,
3223 struct xdp_buff
*xdp
,
3224 struct bpf_prog
*xdp_prog
,
3225 struct bpf_map
*map
)
3227 struct bpf_redirect_info
*ri
= this_cpu_ptr(&bpf_redirect_info
);
3228 u32 index
= ri
->ifindex
;
3233 WRITE_ONCE(ri
->map
, NULL
);
3235 fwd
= __xdp_map_lookup_elem(map
, index
);
3236 if (unlikely(!fwd
)) {
3241 if (map
->map_type
== BPF_MAP_TYPE_DEVMAP
) {
3242 struct bpf_dtab_netdev
*dst
= fwd
;
3244 err
= dev_map_generic_redirect(dst
, skb
, xdp_prog
);
3247 } else if (map
->map_type
== BPF_MAP_TYPE_XSKMAP
) {
3248 struct xdp_sock
*xs
= fwd
;
3250 err
= xsk_generic_rcv(xs
, xdp
);
3255 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3260 _trace_xdp_redirect_map(dev
, xdp_prog
, fwd
, map
, index
);
3263 _trace_xdp_redirect_map_err(dev
, xdp_prog
, fwd
, map
, index
, err
);
3267 int xdp_do_generic_redirect(struct net_device
*dev
, struct sk_buff
*skb
,
3268 struct xdp_buff
*xdp
, struct bpf_prog
*xdp_prog
)
3270 struct bpf_redirect_info
*ri
= this_cpu_ptr(&bpf_redirect_info
);
3271 struct bpf_map
*map
= READ_ONCE(ri
->map
);
3272 u32 index
= ri
->ifindex
;
3273 struct net_device
*fwd
;
3277 return xdp_do_generic_redirect_map(dev
, skb
, xdp
, xdp_prog
,
3280 fwd
= dev_get_by_index_rcu(dev_net(dev
), index
);
3281 if (unlikely(!fwd
)) {
3286 err
= xdp_ok_fwd_dev(fwd
, skb
->len
);
3291 _trace_xdp_redirect(dev
, xdp_prog
, index
);
3292 generic_xdp_tx(skb
, xdp_prog
);
3295 _trace_xdp_redirect_err(dev
, xdp_prog
, index
, err
);
3298 EXPORT_SYMBOL_GPL(xdp_do_generic_redirect
);
3300 BPF_CALL_2(bpf_xdp_redirect
, u32
, ifindex
, u64
, flags
)
3302 struct bpf_redirect_info
*ri
= this_cpu_ptr(&bpf_redirect_info
);
3304 if (unlikely(flags
))
3307 ri
->ifindex
= ifindex
;
3309 WRITE_ONCE(ri
->map
, NULL
);
3311 return XDP_REDIRECT
;
3314 static const struct bpf_func_proto bpf_xdp_redirect_proto
= {
3315 .func
= bpf_xdp_redirect
,
3317 .ret_type
= RET_INTEGER
,
3318 .arg1_type
= ARG_ANYTHING
,
3319 .arg2_type
= ARG_ANYTHING
,
3322 BPF_CALL_3(bpf_xdp_redirect_map
, struct bpf_map
*, map
, u32
, ifindex
,
3325 struct bpf_redirect_info
*ri
= this_cpu_ptr(&bpf_redirect_info
);
3327 if (unlikely(flags
))
3330 ri
->ifindex
= ifindex
;
3332 WRITE_ONCE(ri
->map
, map
);
3334 return XDP_REDIRECT
;
3337 static const struct bpf_func_proto bpf_xdp_redirect_map_proto
= {
3338 .func
= bpf_xdp_redirect_map
,
3340 .ret_type
= RET_INTEGER
,
3341 .arg1_type
= ARG_CONST_MAP_PTR
,
3342 .arg2_type
= ARG_ANYTHING
,
3343 .arg3_type
= ARG_ANYTHING
,
3346 static unsigned long bpf_skb_copy(void *dst_buff
, const void *skb
,
3347 unsigned long off
, unsigned long len
)
3349 void *ptr
= skb_header_pointer(skb
, off
, len
, dst_buff
);
3353 if (ptr
!= dst_buff
)
3354 memcpy(dst_buff
, ptr
, len
);
3359 BPF_CALL_5(bpf_skb_event_output
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
3360 u64
, flags
, void *, meta
, u64
, meta_size
)
3362 u64 skb_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
3364 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
3366 if (unlikely(skb_size
> skb
->len
))
3369 return bpf_event_output(map
, flags
, meta
, meta_size
, skb
, skb_size
,
3373 static const struct bpf_func_proto bpf_skb_event_output_proto
= {
3374 .func
= bpf_skb_event_output
,
3376 .ret_type
= RET_INTEGER
,
3377 .arg1_type
= ARG_PTR_TO_CTX
,
3378 .arg2_type
= ARG_CONST_MAP_PTR
,
3379 .arg3_type
= ARG_ANYTHING
,
3380 .arg4_type
= ARG_PTR_TO_MEM
,
3381 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
3384 static unsigned short bpf_tunnel_key_af(u64 flags
)
3386 return flags
& BPF_F_TUNINFO_IPV6
? AF_INET6
: AF_INET
;
3389 BPF_CALL_4(bpf_skb_get_tunnel_key
, struct sk_buff
*, skb
, struct bpf_tunnel_key
*, to
,
3390 u32
, size
, u64
, flags
)
3392 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
3393 u8 compat
[sizeof(struct bpf_tunnel_key
)];
3397 if (unlikely(!info
|| (flags
& ~(BPF_F_TUNINFO_IPV6
)))) {
3401 if (ip_tunnel_info_af(info
) != bpf_tunnel_key_af(flags
)) {
3405 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
3408 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
3409 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
3411 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
3412 /* Fixup deprecated structure layouts here, so we have
3413 * a common path later on.
3415 if (ip_tunnel_info_af(info
) != AF_INET
)
3418 to
= (struct bpf_tunnel_key
*)compat
;
3425 to
->tunnel_id
= be64_to_cpu(info
->key
.tun_id
);
3426 to
->tunnel_tos
= info
->key
.tos
;
3427 to
->tunnel_ttl
= info
->key
.ttl
;
3430 if (flags
& BPF_F_TUNINFO_IPV6
) {
3431 memcpy(to
->remote_ipv6
, &info
->key
.u
.ipv6
.src
,
3432 sizeof(to
->remote_ipv6
));
3433 to
->tunnel_label
= be32_to_cpu(info
->key
.label
);
3435 to
->remote_ipv4
= be32_to_cpu(info
->key
.u
.ipv4
.src
);
3436 memset(&to
->remote_ipv6
[1], 0, sizeof(__u32
) * 3);
3437 to
->tunnel_label
= 0;
3440 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
)))
3441 memcpy(to_orig
, to
, size
);
3445 memset(to_orig
, 0, size
);
3449 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto
= {
3450 .func
= bpf_skb_get_tunnel_key
,
3452 .ret_type
= RET_INTEGER
,
3453 .arg1_type
= ARG_PTR_TO_CTX
,
3454 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
3455 .arg3_type
= ARG_CONST_SIZE
,
3456 .arg4_type
= ARG_ANYTHING
,
3459 BPF_CALL_3(bpf_skb_get_tunnel_opt
, struct sk_buff
*, skb
, u8
*, to
, u32
, size
)
3461 const struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
3464 if (unlikely(!info
||
3465 !(info
->key
.tun_flags
& TUNNEL_OPTIONS_PRESENT
))) {
3469 if (unlikely(size
< info
->options_len
)) {
3474 ip_tunnel_info_opts_get(to
, info
);
3475 if (size
> info
->options_len
)
3476 memset(to
+ info
->options_len
, 0, size
- info
->options_len
);
3478 return info
->options_len
;
3480 memset(to
, 0, size
);
3484 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto
= {
3485 .func
= bpf_skb_get_tunnel_opt
,
3487 .ret_type
= RET_INTEGER
,
3488 .arg1_type
= ARG_PTR_TO_CTX
,
3489 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
3490 .arg3_type
= ARG_CONST_SIZE
,
3493 static struct metadata_dst __percpu
*md_dst
;
3495 BPF_CALL_4(bpf_skb_set_tunnel_key
, struct sk_buff
*, skb
,
3496 const struct bpf_tunnel_key
*, from
, u32
, size
, u64
, flags
)
3498 struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
3499 u8 compat
[sizeof(struct bpf_tunnel_key
)];
3500 struct ip_tunnel_info
*info
;
3502 if (unlikely(flags
& ~(BPF_F_TUNINFO_IPV6
| BPF_F_ZERO_CSUM_TX
|
3503 BPF_F_DONT_FRAGMENT
| BPF_F_SEQ_NUMBER
)))
3505 if (unlikely(size
!= sizeof(struct bpf_tunnel_key
))) {
3507 case offsetof(struct bpf_tunnel_key
, tunnel_label
):
3508 case offsetof(struct bpf_tunnel_key
, tunnel_ext
):
3509 case offsetof(struct bpf_tunnel_key
, remote_ipv6
[1]):
3510 /* Fixup deprecated structure layouts here, so we have
3511 * a common path later on.
3513 memcpy(compat
, from
, size
);
3514 memset(compat
+ size
, 0, sizeof(compat
) - size
);
3515 from
= (const struct bpf_tunnel_key
*) compat
;
3521 if (unlikely((!(flags
& BPF_F_TUNINFO_IPV6
) && from
->tunnel_label
) ||
3526 dst_hold((struct dst_entry
*) md
);
3527 skb_dst_set(skb
, (struct dst_entry
*) md
);
3529 info
= &md
->u
.tun_info
;
3530 memset(info
, 0, sizeof(*info
));
3531 info
->mode
= IP_TUNNEL_INFO_TX
;
3533 info
->key
.tun_flags
= TUNNEL_KEY
| TUNNEL_CSUM
| TUNNEL_NOCACHE
;
3534 if (flags
& BPF_F_DONT_FRAGMENT
)
3535 info
->key
.tun_flags
|= TUNNEL_DONT_FRAGMENT
;
3536 if (flags
& BPF_F_ZERO_CSUM_TX
)
3537 info
->key
.tun_flags
&= ~TUNNEL_CSUM
;
3538 if (flags
& BPF_F_SEQ_NUMBER
)
3539 info
->key
.tun_flags
|= TUNNEL_SEQ
;
3541 info
->key
.tun_id
= cpu_to_be64(from
->tunnel_id
);
3542 info
->key
.tos
= from
->tunnel_tos
;
3543 info
->key
.ttl
= from
->tunnel_ttl
;
3545 if (flags
& BPF_F_TUNINFO_IPV6
) {
3546 info
->mode
|= IP_TUNNEL_INFO_IPV6
;
3547 memcpy(&info
->key
.u
.ipv6
.dst
, from
->remote_ipv6
,
3548 sizeof(from
->remote_ipv6
));
3549 info
->key
.label
= cpu_to_be32(from
->tunnel_label
) &
3550 IPV6_FLOWLABEL_MASK
;
3552 info
->key
.u
.ipv4
.dst
= cpu_to_be32(from
->remote_ipv4
);
3558 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto
= {
3559 .func
= bpf_skb_set_tunnel_key
,
3561 .ret_type
= RET_INTEGER
,
3562 .arg1_type
= ARG_PTR_TO_CTX
,
3563 .arg2_type
= ARG_PTR_TO_MEM
,
3564 .arg3_type
= ARG_CONST_SIZE
,
3565 .arg4_type
= ARG_ANYTHING
,
3568 BPF_CALL_3(bpf_skb_set_tunnel_opt
, struct sk_buff
*, skb
,
3569 const u8
*, from
, u32
, size
)
3571 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
3572 const struct metadata_dst
*md
= this_cpu_ptr(md_dst
);
3574 if (unlikely(info
!= &md
->u
.tun_info
|| (size
& (sizeof(u32
) - 1))))
3576 if (unlikely(size
> IP_TUNNEL_OPTS_MAX
))
3579 ip_tunnel_info_opts_set(info
, from
, size
, TUNNEL_OPTIONS_PRESENT
);
3584 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto
= {
3585 .func
= bpf_skb_set_tunnel_opt
,
3587 .ret_type
= RET_INTEGER
,
3588 .arg1_type
= ARG_PTR_TO_CTX
,
3589 .arg2_type
= ARG_PTR_TO_MEM
,
3590 .arg3_type
= ARG_CONST_SIZE
,
3593 static const struct bpf_func_proto
*
3594 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which
)
3597 struct metadata_dst __percpu
*tmp
;
3599 tmp
= metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX
,
3604 if (cmpxchg(&md_dst
, NULL
, tmp
))
3605 metadata_dst_free_percpu(tmp
);
3609 case BPF_FUNC_skb_set_tunnel_key
:
3610 return &bpf_skb_set_tunnel_key_proto
;
3611 case BPF_FUNC_skb_set_tunnel_opt
:
3612 return &bpf_skb_set_tunnel_opt_proto
;
3618 BPF_CALL_3(bpf_skb_under_cgroup
, struct sk_buff
*, skb
, struct bpf_map
*, map
,
3621 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
3622 struct cgroup
*cgrp
;
3625 sk
= skb_to_full_sk(skb
);
3626 if (!sk
|| !sk_fullsock(sk
))
3628 if (unlikely(idx
>= array
->map
.max_entries
))
3631 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
3632 if (unlikely(!cgrp
))
3635 return sk_under_cgroup_hierarchy(sk
, cgrp
);
3638 static const struct bpf_func_proto bpf_skb_under_cgroup_proto
= {
3639 .func
= bpf_skb_under_cgroup
,
3641 .ret_type
= RET_INTEGER
,
3642 .arg1_type
= ARG_PTR_TO_CTX
,
3643 .arg2_type
= ARG_CONST_MAP_PTR
,
3644 .arg3_type
= ARG_ANYTHING
,
3647 #ifdef CONFIG_SOCK_CGROUP_DATA
3648 BPF_CALL_1(bpf_skb_cgroup_id
, const struct sk_buff
*, skb
)
3650 struct sock
*sk
= skb_to_full_sk(skb
);
3651 struct cgroup
*cgrp
;
3653 if (!sk
|| !sk_fullsock(sk
))
3656 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
3657 return cgrp
->kn
->id
.id
;
3660 static const struct bpf_func_proto bpf_skb_cgroup_id_proto
= {
3661 .func
= bpf_skb_cgroup_id
,
3663 .ret_type
= RET_INTEGER
,
3664 .arg1_type
= ARG_PTR_TO_CTX
,
3667 BPF_CALL_2(bpf_skb_ancestor_cgroup_id
, const struct sk_buff
*, skb
, int,
3670 struct sock
*sk
= skb_to_full_sk(skb
);
3671 struct cgroup
*ancestor
;
3672 struct cgroup
*cgrp
;
3674 if (!sk
|| !sk_fullsock(sk
))
3677 cgrp
= sock_cgroup_ptr(&sk
->sk_cgrp_data
);
3678 ancestor
= cgroup_ancestor(cgrp
, ancestor_level
);
3682 return ancestor
->kn
->id
.id
;
3685 static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto
= {
3686 .func
= bpf_skb_ancestor_cgroup_id
,
3688 .ret_type
= RET_INTEGER
,
3689 .arg1_type
= ARG_PTR_TO_CTX
,
3690 .arg2_type
= ARG_ANYTHING
,
3694 static unsigned long bpf_xdp_copy(void *dst_buff
, const void *src_buff
,
3695 unsigned long off
, unsigned long len
)
3697 memcpy(dst_buff
, src_buff
+ off
, len
);
3701 BPF_CALL_5(bpf_xdp_event_output
, struct xdp_buff
*, xdp
, struct bpf_map
*, map
,
3702 u64
, flags
, void *, meta
, u64
, meta_size
)
3704 u64 xdp_size
= (flags
& BPF_F_CTXLEN_MASK
) >> 32;
3706 if (unlikely(flags
& ~(BPF_F_CTXLEN_MASK
| BPF_F_INDEX_MASK
)))
3708 if (unlikely(xdp_size
> (unsigned long)(xdp
->data_end
- xdp
->data
)))
3711 return bpf_event_output(map
, flags
, meta
, meta_size
, xdp
->data
,
3712 xdp_size
, bpf_xdp_copy
);
3715 static const struct bpf_func_proto bpf_xdp_event_output_proto
= {
3716 .func
= bpf_xdp_event_output
,
3718 .ret_type
= RET_INTEGER
,
3719 .arg1_type
= ARG_PTR_TO_CTX
,
3720 .arg2_type
= ARG_CONST_MAP_PTR
,
3721 .arg3_type
= ARG_ANYTHING
,
3722 .arg4_type
= ARG_PTR_TO_MEM
,
3723 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
3726 BPF_CALL_1(bpf_get_socket_cookie
, struct sk_buff
*, skb
)
3728 return skb
->sk
? sock_gen_cookie(skb
->sk
) : 0;
3731 static const struct bpf_func_proto bpf_get_socket_cookie_proto
= {
3732 .func
= bpf_get_socket_cookie
,
3734 .ret_type
= RET_INTEGER
,
3735 .arg1_type
= ARG_PTR_TO_CTX
,
3738 BPF_CALL_1(bpf_get_socket_cookie_sock_addr
, struct bpf_sock_addr_kern
*, ctx
)
3740 return sock_gen_cookie(ctx
->sk
);
3743 static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto
= {
3744 .func
= bpf_get_socket_cookie_sock_addr
,
3746 .ret_type
= RET_INTEGER
,
3747 .arg1_type
= ARG_PTR_TO_CTX
,
3750 BPF_CALL_1(bpf_get_socket_cookie_sock_ops
, struct bpf_sock_ops_kern
*, ctx
)
3752 return sock_gen_cookie(ctx
->sk
);
3755 static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto
= {
3756 .func
= bpf_get_socket_cookie_sock_ops
,
3758 .ret_type
= RET_INTEGER
,
3759 .arg1_type
= ARG_PTR_TO_CTX
,
3762 BPF_CALL_1(bpf_get_socket_uid
, struct sk_buff
*, skb
)
3764 struct sock
*sk
= sk_to_full_sk(skb
->sk
);
3767 if (!sk
|| !sk_fullsock(sk
))
3769 kuid
= sock_net_uid(sock_net(sk
), sk
);
3770 return from_kuid_munged(sock_net(sk
)->user_ns
, kuid
);
3773 static const struct bpf_func_proto bpf_get_socket_uid_proto
= {
3774 .func
= bpf_get_socket_uid
,
3776 .ret_type
= RET_INTEGER
,
3777 .arg1_type
= ARG_PTR_TO_CTX
,
3780 BPF_CALL_5(bpf_setsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
3781 int, level
, int, optname
, char *, optval
, int, optlen
)
3783 struct sock
*sk
= bpf_sock
->sk
;
3787 if (!sk_fullsock(sk
))
3790 if (level
== SOL_SOCKET
) {
3791 if (optlen
!= sizeof(int))
3793 val
= *((int *)optval
);
3795 /* Only some socketops are supported */
3798 sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
3799 sk
->sk_rcvbuf
= max_t(int, val
* 2, SOCK_MIN_RCVBUF
);
3802 sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
3803 sk
->sk_sndbuf
= max_t(int, val
* 2, SOCK_MIN_SNDBUF
);
3805 case SO_MAX_PACING_RATE
:
3806 sk
->sk_max_pacing_rate
= val
;
3807 sk
->sk_pacing_rate
= min(sk
->sk_pacing_rate
,
3808 sk
->sk_max_pacing_rate
);
3811 sk
->sk_priority
= val
;
3816 sk
->sk_rcvlowat
= val
? : 1;
3825 } else if (level
== SOL_IP
) {
3826 if (optlen
!= sizeof(int) || sk
->sk_family
!= AF_INET
)
3829 val
= *((int *)optval
);
3830 /* Only some options are supported */
3833 if (val
< -1 || val
> 0xff) {
3836 struct inet_sock
*inet
= inet_sk(sk
);
3846 #if IS_ENABLED(CONFIG_IPV6)
3847 } else if (level
== SOL_IPV6
) {
3848 if (optlen
!= sizeof(int) || sk
->sk_family
!= AF_INET6
)
3851 val
= *((int *)optval
);
3852 /* Only some options are supported */
3855 if (val
< -1 || val
> 0xff) {
3858 struct ipv6_pinfo
*np
= inet6_sk(sk
);
3869 } else if (level
== SOL_TCP
&&
3870 sk
->sk_prot
->setsockopt
== tcp_setsockopt
) {
3871 if (optname
== TCP_CONGESTION
) {
3872 char name
[TCP_CA_NAME_MAX
];
3873 bool reinit
= bpf_sock
->op
> BPF_SOCK_OPS_NEEDS_ECN
;
3875 strncpy(name
, optval
, min_t(long, optlen
,
3876 TCP_CA_NAME_MAX
-1));
3877 name
[TCP_CA_NAME_MAX
-1] = 0;
3878 ret
= tcp_set_congestion_control(sk
, name
, false,
3881 struct tcp_sock
*tp
= tcp_sk(sk
);
3883 if (optlen
!= sizeof(int))
3886 val
= *((int *)optval
);
3887 /* Only some options are supported */
3890 if (val
<= 0 || tp
->data_segs_out
> 0)
3895 case TCP_BPF_SNDCWND_CLAMP
:
3899 tp
->snd_cwnd_clamp
= val
;
3900 tp
->snd_ssthresh
= val
;
3904 if (val
< 0 || val
> 1)
3920 static const struct bpf_func_proto bpf_setsockopt_proto
= {
3921 .func
= bpf_setsockopt
,
3923 .ret_type
= RET_INTEGER
,
3924 .arg1_type
= ARG_PTR_TO_CTX
,
3925 .arg2_type
= ARG_ANYTHING
,
3926 .arg3_type
= ARG_ANYTHING
,
3927 .arg4_type
= ARG_PTR_TO_MEM
,
3928 .arg5_type
= ARG_CONST_SIZE
,
3931 BPF_CALL_5(bpf_getsockopt
, struct bpf_sock_ops_kern
*, bpf_sock
,
3932 int, level
, int, optname
, char *, optval
, int, optlen
)
3934 struct sock
*sk
= bpf_sock
->sk
;
3936 if (!sk_fullsock(sk
))
3939 if (level
== SOL_TCP
&& sk
->sk_prot
->getsockopt
== tcp_getsockopt
) {
3940 struct inet_connection_sock
*icsk
;
3941 struct tcp_sock
*tp
;
3944 case TCP_CONGESTION
:
3945 icsk
= inet_csk(sk
);
3947 if (!icsk
->icsk_ca_ops
|| optlen
<= 1)
3949 strncpy(optval
, icsk
->icsk_ca_ops
->name
, optlen
);
3950 optval
[optlen
- 1] = 0;
3955 if (optlen
<= 0 || !tp
->saved_syn
||
3956 optlen
> tp
->saved_syn
[0])
3958 memcpy(optval
, tp
->saved_syn
+ 1, optlen
);
3963 } else if (level
== SOL_IP
) {
3964 struct inet_sock
*inet
= inet_sk(sk
);
3966 if (optlen
!= sizeof(int) || sk
->sk_family
!= AF_INET
)
3969 /* Only some options are supported */
3972 *((int *)optval
) = (int)inet
->tos
;
3977 #if IS_ENABLED(CONFIG_IPV6)
3978 } else if (level
== SOL_IPV6
) {
3979 struct ipv6_pinfo
*np
= inet6_sk(sk
);
3981 if (optlen
!= sizeof(int) || sk
->sk_family
!= AF_INET6
)
3984 /* Only some options are supported */
3987 *((int *)optval
) = (int)np
->tclass
;
3999 memset(optval
, 0, optlen
);
4003 static const struct bpf_func_proto bpf_getsockopt_proto
= {
4004 .func
= bpf_getsockopt
,
4006 .ret_type
= RET_INTEGER
,
4007 .arg1_type
= ARG_PTR_TO_CTX
,
4008 .arg2_type
= ARG_ANYTHING
,
4009 .arg3_type
= ARG_ANYTHING
,
4010 .arg4_type
= ARG_PTR_TO_UNINIT_MEM
,
4011 .arg5_type
= ARG_CONST_SIZE
,
4014 BPF_CALL_2(bpf_sock_ops_cb_flags_set
, struct bpf_sock_ops_kern
*, bpf_sock
,
4017 struct sock
*sk
= bpf_sock
->sk
;
4018 int val
= argval
& BPF_SOCK_OPS_ALL_CB_FLAGS
;
4020 if (!IS_ENABLED(CONFIG_INET
) || !sk_fullsock(sk
))
4024 tcp_sk(sk
)->bpf_sock_ops_cb_flags
= val
;
4026 return argval
& (~BPF_SOCK_OPS_ALL_CB_FLAGS
);
4029 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto
= {
4030 .func
= bpf_sock_ops_cb_flags_set
,
4032 .ret_type
= RET_INTEGER
,
4033 .arg1_type
= ARG_PTR_TO_CTX
,
4034 .arg2_type
= ARG_ANYTHING
,
4037 const struct ipv6_bpf_stub
*ipv6_bpf_stub __read_mostly
;
4038 EXPORT_SYMBOL_GPL(ipv6_bpf_stub
);
4040 BPF_CALL_3(bpf_bind
, struct bpf_sock_addr_kern
*, ctx
, struct sockaddr
*, addr
,
4044 struct sock
*sk
= ctx
->sk
;
4047 /* Binding to port can be expensive so it's prohibited in the helper.
4048 * Only binding to IP is supported.
4051 if (addr
->sa_family
== AF_INET
) {
4052 if (addr_len
< sizeof(struct sockaddr_in
))
4054 if (((struct sockaddr_in
*)addr
)->sin_port
!= htons(0))
4056 return __inet_bind(sk
, addr
, addr_len
, true, false);
4057 #if IS_ENABLED(CONFIG_IPV6)
4058 } else if (addr
->sa_family
== AF_INET6
) {
4059 if (addr_len
< SIN6_LEN_RFC2133
)
4061 if (((struct sockaddr_in6
*)addr
)->sin6_port
!= htons(0))
4063 /* ipv6_bpf_stub cannot be NULL, since it's called from
4064 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
4066 return ipv6_bpf_stub
->inet6_bind(sk
, addr
, addr_len
, true, false);
4067 #endif /* CONFIG_IPV6 */
4069 #endif /* CONFIG_INET */
4071 return -EAFNOSUPPORT
;
4074 static const struct bpf_func_proto bpf_bind_proto
= {
4077 .ret_type
= RET_INTEGER
,
4078 .arg1_type
= ARG_PTR_TO_CTX
,
4079 .arg2_type
= ARG_PTR_TO_MEM
,
4080 .arg3_type
= ARG_CONST_SIZE
,
4084 BPF_CALL_5(bpf_skb_get_xfrm_state
, struct sk_buff
*, skb
, u32
, index
,
4085 struct bpf_xfrm_state
*, to
, u32
, size
, u64
, flags
)
4087 const struct sec_path
*sp
= skb_sec_path(skb
);
4088 const struct xfrm_state
*x
;
4090 if (!sp
|| unlikely(index
>= sp
->len
|| flags
))
4093 x
= sp
->xvec
[index
];
4095 if (unlikely(size
!= sizeof(struct bpf_xfrm_state
)))
4098 to
->reqid
= x
->props
.reqid
;
4099 to
->spi
= x
->id
.spi
;
4100 to
->family
= x
->props
.family
;
4103 if (to
->family
== AF_INET6
) {
4104 memcpy(to
->remote_ipv6
, x
->props
.saddr
.a6
,
4105 sizeof(to
->remote_ipv6
));
4107 to
->remote_ipv4
= x
->props
.saddr
.a4
;
4108 memset(&to
->remote_ipv6
[1], 0, sizeof(__u32
) * 3);
4113 memset(to
, 0, size
);
4117 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto
= {
4118 .func
= bpf_skb_get_xfrm_state
,
4120 .ret_type
= RET_INTEGER
,
4121 .arg1_type
= ARG_PTR_TO_CTX
,
4122 .arg2_type
= ARG_ANYTHING
,
4123 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
4124 .arg4_type
= ARG_CONST_SIZE
,
4125 .arg5_type
= ARG_ANYTHING
,
4129 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4130 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup
*params
,
4131 const struct neighbour
*neigh
,
4132 const struct net_device
*dev
)
4134 memcpy(params
->dmac
, neigh
->ha
, ETH_ALEN
);
4135 memcpy(params
->smac
, dev
->dev_addr
, ETH_ALEN
);
4136 params
->h_vlan_TCI
= 0;
4137 params
->h_vlan_proto
= 0;
4138 params
->ifindex
= dev
->ifindex
;
4144 #if IS_ENABLED(CONFIG_INET)
4145 static int bpf_ipv4_fib_lookup(struct net
*net
, struct bpf_fib_lookup
*params
,
4146 u32 flags
, bool check_mtu
)
4148 struct in_device
*in_dev
;
4149 struct neighbour
*neigh
;
4150 struct net_device
*dev
;
4151 struct fib_result res
;
4157 dev
= dev_get_by_index_rcu(net
, params
->ifindex
);
4161 /* verify forwarding is enabled on this interface */
4162 in_dev
= __in_dev_get_rcu(dev
);
4163 if (unlikely(!in_dev
|| !IN_DEV_FORWARD(in_dev
)))
4164 return BPF_FIB_LKUP_RET_FWD_DISABLED
;
4166 if (flags
& BPF_FIB_LOOKUP_OUTPUT
) {
4168 fl4
.flowi4_oif
= params
->ifindex
;
4170 fl4
.flowi4_iif
= params
->ifindex
;
4173 fl4
.flowi4_tos
= params
->tos
& IPTOS_RT_MASK
;
4174 fl4
.flowi4_scope
= RT_SCOPE_UNIVERSE
;
4175 fl4
.flowi4_flags
= 0;
4177 fl4
.flowi4_proto
= params
->l4_protocol
;
4178 fl4
.daddr
= params
->ipv4_dst
;
4179 fl4
.saddr
= params
->ipv4_src
;
4180 fl4
.fl4_sport
= params
->sport
;
4181 fl4
.fl4_dport
= params
->dport
;
4183 if (flags
& BPF_FIB_LOOKUP_DIRECT
) {
4184 u32 tbid
= l3mdev_fib_table_rcu(dev
) ? : RT_TABLE_MAIN
;
4185 struct fib_table
*tb
;
4187 tb
= fib_get_table(net
, tbid
);
4189 return BPF_FIB_LKUP_RET_NOT_FWDED
;
4191 err
= fib_table_lookup(tb
, &fl4
, &res
, FIB_LOOKUP_NOREF
);
4193 fl4
.flowi4_mark
= 0;
4194 fl4
.flowi4_secid
= 0;
4195 fl4
.flowi4_tun_key
.tun_id
= 0;
4196 fl4
.flowi4_uid
= sock_net_uid(net
, NULL
);
4198 err
= fib_lookup(net
, &fl4
, &res
, FIB_LOOKUP_NOREF
);
4202 /* map fib lookup errors to RTN_ type */
4204 return BPF_FIB_LKUP_RET_BLACKHOLE
;
4205 if (err
== -EHOSTUNREACH
)
4206 return BPF_FIB_LKUP_RET_UNREACHABLE
;
4208 return BPF_FIB_LKUP_RET_PROHIBIT
;
4210 return BPF_FIB_LKUP_RET_NOT_FWDED
;
4213 if (res
.type
!= RTN_UNICAST
)
4214 return BPF_FIB_LKUP_RET_NOT_FWDED
;
4216 if (res
.fi
->fib_nhs
> 1)
4217 fib_select_path(net
, &res
, &fl4
, NULL
);
4220 mtu
= ip_mtu_from_fib_result(&res
, params
->ipv4_dst
);
4221 if (params
->tot_len
> mtu
)
4222 return BPF_FIB_LKUP_RET_FRAG_NEEDED
;
4225 nh
= &res
.fi
->fib_nh
[res
.nh_sel
];
4227 /* do not handle lwt encaps right now */
4228 if (nh
->nh_lwtstate
)
4229 return BPF_FIB_LKUP_RET_UNSUPP_LWT
;
4233 params
->ipv4_dst
= nh
->nh_gw
;
4235 params
->rt_metric
= res
.fi
->fib_priority
;
4237 /* xdp and cls_bpf programs are run in RCU-bh so
4238 * rcu_read_lock_bh is not needed here
4240 neigh
= __ipv4_neigh_lookup_noref(dev
, (__force u32
)params
->ipv4_dst
);
4242 return BPF_FIB_LKUP_RET_NO_NEIGH
;
4244 return bpf_fib_set_fwd_params(params
, neigh
, dev
);
4248 #if IS_ENABLED(CONFIG_IPV6)
4249 static int bpf_ipv6_fib_lookup(struct net
*net
, struct bpf_fib_lookup
*params
,
4250 u32 flags
, bool check_mtu
)
4252 struct in6_addr
*src
= (struct in6_addr
*) params
->ipv6_src
;
4253 struct in6_addr
*dst
= (struct in6_addr
*) params
->ipv6_dst
;
4254 struct neighbour
*neigh
;
4255 struct net_device
*dev
;
4256 struct inet6_dev
*idev
;
4257 struct fib6_info
*f6i
;
4263 /* link local addresses are never forwarded */
4264 if (rt6_need_strict(dst
) || rt6_need_strict(src
))
4265 return BPF_FIB_LKUP_RET_NOT_FWDED
;
4267 dev
= dev_get_by_index_rcu(net
, params
->ifindex
);
4271 idev
= __in6_dev_get_safely(dev
);
4272 if (unlikely(!idev
|| !net
->ipv6
.devconf_all
->forwarding
))
4273 return BPF_FIB_LKUP_RET_FWD_DISABLED
;
4275 if (flags
& BPF_FIB_LOOKUP_OUTPUT
) {
4277 oif
= fl6
.flowi6_oif
= params
->ifindex
;
4279 oif
= fl6
.flowi6_iif
= params
->ifindex
;
4281 strict
= RT6_LOOKUP_F_HAS_SADDR
;
4283 fl6
.flowlabel
= params
->flowinfo
;
4284 fl6
.flowi6_scope
= 0;
4285 fl6
.flowi6_flags
= 0;
4288 fl6
.flowi6_proto
= params
->l4_protocol
;
4291 fl6
.fl6_sport
= params
->sport
;
4292 fl6
.fl6_dport
= params
->dport
;
4294 if (flags
& BPF_FIB_LOOKUP_DIRECT
) {
4295 u32 tbid
= l3mdev_fib_table_rcu(dev
) ? : RT_TABLE_MAIN
;
4296 struct fib6_table
*tb
;
4298 tb
= ipv6_stub
->fib6_get_table(net
, tbid
);
4300 return BPF_FIB_LKUP_RET_NOT_FWDED
;
4302 f6i
= ipv6_stub
->fib6_table_lookup(net
, tb
, oif
, &fl6
, strict
);
4304 fl6
.flowi6_mark
= 0;
4305 fl6
.flowi6_secid
= 0;
4306 fl6
.flowi6_tun_key
.tun_id
= 0;
4307 fl6
.flowi6_uid
= sock_net_uid(net
, NULL
);
4309 f6i
= ipv6_stub
->fib6_lookup(net
, oif
, &fl6
, strict
);
4312 if (unlikely(IS_ERR_OR_NULL(f6i
) || f6i
== net
->ipv6
.fib6_null_entry
))
4313 return BPF_FIB_LKUP_RET_NOT_FWDED
;
4315 if (unlikely(f6i
->fib6_flags
& RTF_REJECT
)) {
4316 switch (f6i
->fib6_type
) {
4318 return BPF_FIB_LKUP_RET_BLACKHOLE
;
4319 case RTN_UNREACHABLE
:
4320 return BPF_FIB_LKUP_RET_UNREACHABLE
;
4322 return BPF_FIB_LKUP_RET_PROHIBIT
;
4324 return BPF_FIB_LKUP_RET_NOT_FWDED
;
4328 if (f6i
->fib6_type
!= RTN_UNICAST
)
4329 return BPF_FIB_LKUP_RET_NOT_FWDED
;
4331 if (f6i
->fib6_nsiblings
&& fl6
.flowi6_oif
== 0)
4332 f6i
= ipv6_stub
->fib6_multipath_select(net
, f6i
, &fl6
,
4333 fl6
.flowi6_oif
, NULL
,
4337 mtu
= ipv6_stub
->ip6_mtu_from_fib6(f6i
, dst
, src
);
4338 if (params
->tot_len
> mtu
)
4339 return BPF_FIB_LKUP_RET_FRAG_NEEDED
;
4342 if (f6i
->fib6_nh
.nh_lwtstate
)
4343 return BPF_FIB_LKUP_RET_UNSUPP_LWT
;
4345 if (f6i
->fib6_flags
& RTF_GATEWAY
)
4346 *dst
= f6i
->fib6_nh
.nh_gw
;
4348 dev
= f6i
->fib6_nh
.nh_dev
;
4349 params
->rt_metric
= f6i
->fib6_metric
;
4351 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
4352 * not needed here. Can not use __ipv6_neigh_lookup_noref here
4353 * because we need to get nd_tbl via the stub
4355 neigh
= ___neigh_lookup_noref(ipv6_stub
->nd_tbl
, neigh_key_eq128
,
4356 ndisc_hashfn
, dst
, dev
);
4358 return BPF_FIB_LKUP_RET_NO_NEIGH
;
4360 return bpf_fib_set_fwd_params(params
, neigh
, dev
);
4364 BPF_CALL_4(bpf_xdp_fib_lookup
, struct xdp_buff
*, ctx
,
4365 struct bpf_fib_lookup
*, params
, int, plen
, u32
, flags
)
4367 if (plen
< sizeof(*params
))
4370 if (flags
& ~(BPF_FIB_LOOKUP_DIRECT
| BPF_FIB_LOOKUP_OUTPUT
))
4373 switch (params
->family
) {
4374 #if IS_ENABLED(CONFIG_INET)
4376 return bpf_ipv4_fib_lookup(dev_net(ctx
->rxq
->dev
), params
,
4379 #if IS_ENABLED(CONFIG_IPV6)
4381 return bpf_ipv6_fib_lookup(dev_net(ctx
->rxq
->dev
), params
,
4385 return -EAFNOSUPPORT
;
4388 static const struct bpf_func_proto bpf_xdp_fib_lookup_proto
= {
4389 .func
= bpf_xdp_fib_lookup
,
4391 .ret_type
= RET_INTEGER
,
4392 .arg1_type
= ARG_PTR_TO_CTX
,
4393 .arg2_type
= ARG_PTR_TO_MEM
,
4394 .arg3_type
= ARG_CONST_SIZE
,
4395 .arg4_type
= ARG_ANYTHING
,
4398 BPF_CALL_4(bpf_skb_fib_lookup
, struct sk_buff
*, skb
,
4399 struct bpf_fib_lookup
*, params
, int, plen
, u32
, flags
)
4401 struct net
*net
= dev_net(skb
->dev
);
4402 int rc
= -EAFNOSUPPORT
;
4404 if (plen
< sizeof(*params
))
4407 if (flags
& ~(BPF_FIB_LOOKUP_DIRECT
| BPF_FIB_LOOKUP_OUTPUT
))
4410 switch (params
->family
) {
4411 #if IS_ENABLED(CONFIG_INET)
4413 rc
= bpf_ipv4_fib_lookup(net
, params
, flags
, false);
4416 #if IS_ENABLED(CONFIG_IPV6)
4418 rc
= bpf_ipv6_fib_lookup(net
, params
, flags
, false);
4424 struct net_device
*dev
;
4426 dev
= dev_get_by_index_rcu(net
, params
->ifindex
);
4427 if (!is_skb_forwardable(dev
, skb
))
4428 rc
= BPF_FIB_LKUP_RET_FRAG_NEEDED
;
4434 static const struct bpf_func_proto bpf_skb_fib_lookup_proto
= {
4435 .func
= bpf_skb_fib_lookup
,
4437 .ret_type
= RET_INTEGER
,
4438 .arg1_type
= ARG_PTR_TO_CTX
,
4439 .arg2_type
= ARG_PTR_TO_MEM
,
4440 .arg3_type
= ARG_CONST_SIZE
,
4441 .arg4_type
= ARG_ANYTHING
,
4444 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4445 static int bpf_push_seg6_encap(struct sk_buff
*skb
, u32 type
, void *hdr
, u32 len
)
4448 struct ipv6_sr_hdr
*srh
= (struct ipv6_sr_hdr
*)hdr
;
4450 if (!seg6_validate_srh(srh
, len
))
4454 case BPF_LWT_ENCAP_SEG6_INLINE
:
4455 if (skb
->protocol
!= htons(ETH_P_IPV6
))
4458 err
= seg6_do_srh_inline(skb
, srh
);
4460 case BPF_LWT_ENCAP_SEG6
:
4461 skb_reset_inner_headers(skb
);
4462 skb
->encapsulation
= 1;
4463 err
= seg6_do_srh_encap(skb
, srh
, IPPROTO_IPV6
);
4469 bpf_compute_data_pointers(skb
);
4473 ipv6_hdr(skb
)->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
4474 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
4476 return seg6_lookup_nexthop(skb
, NULL
, 0);
4478 #endif /* CONFIG_IPV6_SEG6_BPF */
4480 BPF_CALL_4(bpf_lwt_push_encap
, struct sk_buff
*, skb
, u32
, type
, void *, hdr
,
4484 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4485 case BPF_LWT_ENCAP_SEG6
:
4486 case BPF_LWT_ENCAP_SEG6_INLINE
:
4487 return bpf_push_seg6_encap(skb
, type
, hdr
, len
);
4494 static const struct bpf_func_proto bpf_lwt_push_encap_proto
= {
4495 .func
= bpf_lwt_push_encap
,
4497 .ret_type
= RET_INTEGER
,
4498 .arg1_type
= ARG_PTR_TO_CTX
,
4499 .arg2_type
= ARG_ANYTHING
,
4500 .arg3_type
= ARG_PTR_TO_MEM
,
4501 .arg4_type
= ARG_CONST_SIZE
4504 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4505 BPF_CALL_4(bpf_lwt_seg6_store_bytes
, struct sk_buff
*, skb
, u32
, offset
,
4506 const void *, from
, u32
, len
)
4508 struct seg6_bpf_srh_state
*srh_state
=
4509 this_cpu_ptr(&seg6_bpf_srh_states
);
4510 struct ipv6_sr_hdr
*srh
= srh_state
->srh
;
4511 void *srh_tlvs
, *srh_end
, *ptr
;
4517 srh_tlvs
= (void *)((char *)srh
+ ((srh
->first_segment
+ 1) << 4));
4518 srh_end
= (void *)((char *)srh
+ sizeof(*srh
) + srh_state
->hdrlen
);
4520 ptr
= skb
->data
+ offset
;
4521 if (ptr
>= srh_tlvs
&& ptr
+ len
<= srh_end
)
4522 srh_state
->valid
= false;
4523 else if (ptr
< (void *)&srh
->flags
||
4524 ptr
+ len
> (void *)&srh
->segments
)
4527 if (unlikely(bpf_try_make_writable(skb
, offset
+ len
)))
4529 if (ipv6_find_hdr(skb
, &srhoff
, IPPROTO_ROUTING
, NULL
, NULL
) < 0)
4531 srh_state
->srh
= (struct ipv6_sr_hdr
*)(skb
->data
+ srhoff
);
4533 memcpy(skb
->data
+ offset
, from
, len
);
4537 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto
= {
4538 .func
= bpf_lwt_seg6_store_bytes
,
4540 .ret_type
= RET_INTEGER
,
4541 .arg1_type
= ARG_PTR_TO_CTX
,
4542 .arg2_type
= ARG_ANYTHING
,
4543 .arg3_type
= ARG_PTR_TO_MEM
,
4544 .arg4_type
= ARG_CONST_SIZE
4547 static void bpf_update_srh_state(struct sk_buff
*skb
)
4549 struct seg6_bpf_srh_state
*srh_state
=
4550 this_cpu_ptr(&seg6_bpf_srh_states
);
4553 if (ipv6_find_hdr(skb
, &srhoff
, IPPROTO_ROUTING
, NULL
, NULL
) < 0) {
4554 srh_state
->srh
= NULL
;
4556 srh_state
->srh
= (struct ipv6_sr_hdr
*)(skb
->data
+ srhoff
);
4557 srh_state
->hdrlen
= srh_state
->srh
->hdrlen
<< 3;
4558 srh_state
->valid
= true;
4562 BPF_CALL_4(bpf_lwt_seg6_action
, struct sk_buff
*, skb
,
4563 u32
, action
, void *, param
, u32
, param_len
)
4565 struct seg6_bpf_srh_state
*srh_state
=
4566 this_cpu_ptr(&seg6_bpf_srh_states
);
4571 case SEG6_LOCAL_ACTION_END_X
:
4572 if (!seg6_bpf_has_valid_srh(skb
))
4574 if (param_len
!= sizeof(struct in6_addr
))
4576 return seg6_lookup_nexthop(skb
, (struct in6_addr
*)param
, 0);
4577 case SEG6_LOCAL_ACTION_END_T
:
4578 if (!seg6_bpf_has_valid_srh(skb
))
4580 if (param_len
!= sizeof(int))
4582 return seg6_lookup_nexthop(skb
, NULL
, *(int *)param
);
4583 case SEG6_LOCAL_ACTION_END_DT6
:
4584 if (!seg6_bpf_has_valid_srh(skb
))
4586 if (param_len
!= sizeof(int))
4589 if (ipv6_find_hdr(skb
, &hdroff
, IPPROTO_IPV6
, NULL
, NULL
) < 0)
4591 if (!pskb_pull(skb
, hdroff
))
4594 skb_postpull_rcsum(skb
, skb_network_header(skb
), hdroff
);
4595 skb_reset_network_header(skb
);
4596 skb_reset_transport_header(skb
);
4597 skb
->encapsulation
= 0;
4599 bpf_compute_data_pointers(skb
);
4600 bpf_update_srh_state(skb
);
4601 return seg6_lookup_nexthop(skb
, NULL
, *(int *)param
);
4602 case SEG6_LOCAL_ACTION_END_B6
:
4603 if (srh_state
->srh
&& !seg6_bpf_has_valid_srh(skb
))
4605 err
= bpf_push_seg6_encap(skb
, BPF_LWT_ENCAP_SEG6_INLINE
,
4608 bpf_update_srh_state(skb
);
4611 case SEG6_LOCAL_ACTION_END_B6_ENCAP
:
4612 if (srh_state
->srh
&& !seg6_bpf_has_valid_srh(skb
))
4614 err
= bpf_push_seg6_encap(skb
, BPF_LWT_ENCAP_SEG6
,
4617 bpf_update_srh_state(skb
);
4625 static const struct bpf_func_proto bpf_lwt_seg6_action_proto
= {
4626 .func
= bpf_lwt_seg6_action
,
4628 .ret_type
= RET_INTEGER
,
4629 .arg1_type
= ARG_PTR_TO_CTX
,
4630 .arg2_type
= ARG_ANYTHING
,
4631 .arg3_type
= ARG_PTR_TO_MEM
,
4632 .arg4_type
= ARG_CONST_SIZE
4635 BPF_CALL_3(bpf_lwt_seg6_adjust_srh
, struct sk_buff
*, skb
, u32
, offset
,
4638 struct seg6_bpf_srh_state
*srh_state
=
4639 this_cpu_ptr(&seg6_bpf_srh_states
);
4640 struct ipv6_sr_hdr
*srh
= srh_state
->srh
;
4641 void *srh_end
, *srh_tlvs
, *ptr
;
4642 struct ipv6hdr
*hdr
;
4646 if (unlikely(srh
== NULL
))
4649 srh_tlvs
= (void *)((unsigned char *)srh
+ sizeof(*srh
) +
4650 ((srh
->first_segment
+ 1) << 4));
4651 srh_end
= (void *)((unsigned char *)srh
+ sizeof(*srh
) +
4653 ptr
= skb
->data
+ offset
;
4655 if (unlikely(ptr
< srh_tlvs
|| ptr
> srh_end
))
4657 if (unlikely(len
< 0 && (void *)((char *)ptr
- len
) > srh_end
))
4661 ret
= skb_cow_head(skb
, len
);
4662 if (unlikely(ret
< 0))
4665 ret
= bpf_skb_net_hdr_push(skb
, offset
, len
);
4667 ret
= bpf_skb_net_hdr_pop(skb
, offset
, -1 * len
);
4670 bpf_compute_data_pointers(skb
);
4671 if (unlikely(ret
< 0))
4674 hdr
= (struct ipv6hdr
*)skb
->data
;
4675 hdr
->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
4677 if (ipv6_find_hdr(skb
, &srhoff
, IPPROTO_ROUTING
, NULL
, NULL
) < 0)
4679 srh_state
->srh
= (struct ipv6_sr_hdr
*)(skb
->data
+ srhoff
);
4680 srh_state
->hdrlen
+= len
;
4681 srh_state
->valid
= false;
4685 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto
= {
4686 .func
= bpf_lwt_seg6_adjust_srh
,
4688 .ret_type
= RET_INTEGER
,
4689 .arg1_type
= ARG_PTR_TO_CTX
,
4690 .arg2_type
= ARG_ANYTHING
,
4691 .arg3_type
= ARG_ANYTHING
,
4693 #endif /* CONFIG_IPV6_SEG6_BPF */
4696 static struct sock
*sk_lookup(struct net
*net
, struct bpf_sock_tuple
*tuple
,
4697 struct sk_buff
*skb
, u8 family
, u8 proto
)
4699 bool refcounted
= false;
4700 struct sock
*sk
= NULL
;
4704 dif
= skb
->dev
->ifindex
;
4706 if (family
== AF_INET
) {
4707 __be32 src4
= tuple
->ipv4
.saddr
;
4708 __be32 dst4
= tuple
->ipv4
.daddr
;
4709 int sdif
= inet_sdif(skb
);
4711 if (proto
== IPPROTO_TCP
)
4712 sk
= __inet_lookup(net
, &tcp_hashinfo
, skb
, 0,
4713 src4
, tuple
->ipv4
.sport
,
4714 dst4
, tuple
->ipv4
.dport
,
4715 dif
, sdif
, &refcounted
);
4717 sk
= __udp4_lib_lookup(net
, src4
, tuple
->ipv4
.sport
,
4718 dst4
, tuple
->ipv4
.dport
,
4719 dif
, sdif
, &udp_table
, skb
);
4720 #if IS_REACHABLE(CONFIG_IPV6)
4722 struct in6_addr
*src6
= (struct in6_addr
*)&tuple
->ipv6
.saddr
;
4723 struct in6_addr
*dst6
= (struct in6_addr
*)&tuple
->ipv6
.daddr
;
4724 int sdif
= inet6_sdif(skb
);
4726 if (proto
== IPPROTO_TCP
)
4727 sk
= __inet6_lookup(net
, &tcp_hashinfo
, skb
, 0,
4728 src6
, tuple
->ipv6
.sport
,
4729 dst6
, tuple
->ipv6
.dport
,
4730 dif
, sdif
, &refcounted
);
4732 sk
= __udp6_lib_lookup(net
, src6
, tuple
->ipv6
.sport
,
4733 dst6
, tuple
->ipv6
.dport
,
4734 dif
, sdif
, &udp_table
, skb
);
4738 if (unlikely(sk
&& !refcounted
&& !sock_flag(sk
, SOCK_RCU_FREE
))) {
4739 WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
4745 /* bpf_sk_lookup performs the core lookup for different types of sockets,
4746 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
4747 * Returns the socket as an 'unsigned long' to simplify the casting in the
4748 * callers to satisfy BPF_CALL declarations.
4750 static unsigned long
4751 bpf_sk_lookup(struct sk_buff
*skb
, struct bpf_sock_tuple
*tuple
, u32 len
,
4752 u8 proto
, u64 netns_id
, u64 flags
)
4754 struct net
*caller_net
;
4755 struct sock
*sk
= NULL
;
4756 u8 family
= AF_UNSPEC
;
4759 family
= len
== sizeof(tuple
->ipv4
) ? AF_INET
: AF_INET6
;
4760 if (unlikely(family
== AF_UNSPEC
|| netns_id
> U32_MAX
|| flags
))
4764 caller_net
= dev_net(skb
->dev
);
4766 caller_net
= sock_net(skb
->sk
);
4768 net
= get_net_ns_by_id(caller_net
, netns_id
);
4771 sk
= sk_lookup(net
, tuple
, skb
, family
, proto
);
4775 sk
= sk_lookup(net
, tuple
, skb
, family
, proto
);
4779 sk
= sk_to_full_sk(sk
);
4781 return (unsigned long) sk
;
4784 BPF_CALL_5(bpf_sk_lookup_tcp
, struct sk_buff
*, skb
,
4785 struct bpf_sock_tuple
*, tuple
, u32
, len
, u64
, netns_id
, u64
, flags
)
4787 return bpf_sk_lookup(skb
, tuple
, len
, IPPROTO_TCP
, netns_id
, flags
);
4790 static const struct bpf_func_proto bpf_sk_lookup_tcp_proto
= {
4791 .func
= bpf_sk_lookup_tcp
,
4794 .ret_type
= RET_PTR_TO_SOCKET_OR_NULL
,
4795 .arg1_type
= ARG_PTR_TO_CTX
,
4796 .arg2_type
= ARG_PTR_TO_MEM
,
4797 .arg3_type
= ARG_CONST_SIZE
,
4798 .arg4_type
= ARG_ANYTHING
,
4799 .arg5_type
= ARG_ANYTHING
,
4802 BPF_CALL_5(bpf_sk_lookup_udp
, struct sk_buff
*, skb
,
4803 struct bpf_sock_tuple
*, tuple
, u32
, len
, u64
, netns_id
, u64
, flags
)
4805 return bpf_sk_lookup(skb
, tuple
, len
, IPPROTO_UDP
, netns_id
, flags
);
4808 static const struct bpf_func_proto bpf_sk_lookup_udp_proto
= {
4809 .func
= bpf_sk_lookup_udp
,
4812 .ret_type
= RET_PTR_TO_SOCKET_OR_NULL
,
4813 .arg1_type
= ARG_PTR_TO_CTX
,
4814 .arg2_type
= ARG_PTR_TO_MEM
,
4815 .arg3_type
= ARG_CONST_SIZE
,
4816 .arg4_type
= ARG_ANYTHING
,
4817 .arg5_type
= ARG_ANYTHING
,
4820 BPF_CALL_1(bpf_sk_release
, struct sock
*, sk
)
4822 if (!sock_flag(sk
, SOCK_RCU_FREE
))
4827 static const struct bpf_func_proto bpf_sk_release_proto
= {
4828 .func
= bpf_sk_release
,
4830 .ret_type
= RET_INTEGER
,
4831 .arg1_type
= ARG_PTR_TO_SOCKET
,
4833 #endif /* CONFIG_INET */
4835 bool bpf_helper_changes_pkt_data(void *func
)
4837 if (func
== bpf_skb_vlan_push
||
4838 func
== bpf_skb_vlan_pop
||
4839 func
== bpf_skb_store_bytes
||
4840 func
== bpf_skb_change_proto
||
4841 func
== bpf_skb_change_head
||
4842 func
== sk_skb_change_head
||
4843 func
== bpf_skb_change_tail
||
4844 func
== sk_skb_change_tail
||
4845 func
== bpf_skb_adjust_room
||
4846 func
== bpf_skb_pull_data
||
4847 func
== sk_skb_pull_data
||
4848 func
== bpf_clone_redirect
||
4849 func
== bpf_l3_csum_replace
||
4850 func
== bpf_l4_csum_replace
||
4851 func
== bpf_xdp_adjust_head
||
4852 func
== bpf_xdp_adjust_meta
||
4853 func
== bpf_msg_pull_data
||
4854 func
== bpf_xdp_adjust_tail
||
4855 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4856 func
== bpf_lwt_seg6_store_bytes
||
4857 func
== bpf_lwt_seg6_adjust_srh
||
4858 func
== bpf_lwt_seg6_action
||
4860 func
== bpf_lwt_push_encap
)
4866 static const struct bpf_func_proto
*
4867 bpf_base_func_proto(enum bpf_func_id func_id
)
4870 case BPF_FUNC_map_lookup_elem
:
4871 return &bpf_map_lookup_elem_proto
;
4872 case BPF_FUNC_map_update_elem
:
4873 return &bpf_map_update_elem_proto
;
4874 case BPF_FUNC_map_delete_elem
:
4875 return &bpf_map_delete_elem_proto
;
4876 case BPF_FUNC_get_prandom_u32
:
4877 return &bpf_get_prandom_u32_proto
;
4878 case BPF_FUNC_get_smp_processor_id
:
4879 return &bpf_get_raw_smp_processor_id_proto
;
4880 case BPF_FUNC_get_numa_node_id
:
4881 return &bpf_get_numa_node_id_proto
;
4882 case BPF_FUNC_tail_call
:
4883 return &bpf_tail_call_proto
;
4884 case BPF_FUNC_ktime_get_ns
:
4885 return &bpf_ktime_get_ns_proto
;
4886 case BPF_FUNC_trace_printk
:
4887 if (capable(CAP_SYS_ADMIN
))
4888 return bpf_get_trace_printk_proto();
4889 /* else: fall through */
4895 static const struct bpf_func_proto
*
4896 sock_filter_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
4899 /* inet and inet6 sockets are created in a process
4900 * context so there is always a valid uid/gid
4902 case BPF_FUNC_get_current_uid_gid
:
4903 return &bpf_get_current_uid_gid_proto
;
4904 case BPF_FUNC_get_local_storage
:
4905 return &bpf_get_local_storage_proto
;
4907 return bpf_base_func_proto(func_id
);
4911 static const struct bpf_func_proto
*
4912 sock_addr_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
4915 /* inet and inet6 sockets are created in a process
4916 * context so there is always a valid uid/gid
4918 case BPF_FUNC_get_current_uid_gid
:
4919 return &bpf_get_current_uid_gid_proto
;
4921 switch (prog
->expected_attach_type
) {
4922 case BPF_CGROUP_INET4_CONNECT
:
4923 case BPF_CGROUP_INET6_CONNECT
:
4924 return &bpf_bind_proto
;
4928 case BPF_FUNC_get_socket_cookie
:
4929 return &bpf_get_socket_cookie_sock_addr_proto
;
4930 case BPF_FUNC_get_local_storage
:
4931 return &bpf_get_local_storage_proto
;
4933 return bpf_base_func_proto(func_id
);
4937 static const struct bpf_func_proto
*
4938 sk_filter_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
4941 case BPF_FUNC_skb_load_bytes
:
4942 return &bpf_skb_load_bytes_proto
;
4943 case BPF_FUNC_skb_load_bytes_relative
:
4944 return &bpf_skb_load_bytes_relative_proto
;
4945 case BPF_FUNC_get_socket_cookie
:
4946 return &bpf_get_socket_cookie_proto
;
4947 case BPF_FUNC_get_socket_uid
:
4948 return &bpf_get_socket_uid_proto
;
4950 return bpf_base_func_proto(func_id
);
4954 static const struct bpf_func_proto
*
4955 cg_skb_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
4958 case BPF_FUNC_get_local_storage
:
4959 return &bpf_get_local_storage_proto
;
4961 return sk_filter_func_proto(func_id
, prog
);
4965 static const struct bpf_func_proto
*
4966 tc_cls_act_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
4969 case BPF_FUNC_skb_store_bytes
:
4970 return &bpf_skb_store_bytes_proto
;
4971 case BPF_FUNC_skb_load_bytes
:
4972 return &bpf_skb_load_bytes_proto
;
4973 case BPF_FUNC_skb_load_bytes_relative
:
4974 return &bpf_skb_load_bytes_relative_proto
;
4975 case BPF_FUNC_skb_pull_data
:
4976 return &bpf_skb_pull_data_proto
;
4977 case BPF_FUNC_csum_diff
:
4978 return &bpf_csum_diff_proto
;
4979 case BPF_FUNC_csum_update
:
4980 return &bpf_csum_update_proto
;
4981 case BPF_FUNC_l3_csum_replace
:
4982 return &bpf_l3_csum_replace_proto
;
4983 case BPF_FUNC_l4_csum_replace
:
4984 return &bpf_l4_csum_replace_proto
;
4985 case BPF_FUNC_clone_redirect
:
4986 return &bpf_clone_redirect_proto
;
4987 case BPF_FUNC_get_cgroup_classid
:
4988 return &bpf_get_cgroup_classid_proto
;
4989 case BPF_FUNC_skb_vlan_push
:
4990 return &bpf_skb_vlan_push_proto
;
4991 case BPF_FUNC_skb_vlan_pop
:
4992 return &bpf_skb_vlan_pop_proto
;
4993 case BPF_FUNC_skb_change_proto
:
4994 return &bpf_skb_change_proto_proto
;
4995 case BPF_FUNC_skb_change_type
:
4996 return &bpf_skb_change_type_proto
;
4997 case BPF_FUNC_skb_adjust_room
:
4998 return &bpf_skb_adjust_room_proto
;
4999 case BPF_FUNC_skb_change_tail
:
5000 return &bpf_skb_change_tail_proto
;
5001 case BPF_FUNC_skb_get_tunnel_key
:
5002 return &bpf_skb_get_tunnel_key_proto
;
5003 case BPF_FUNC_skb_set_tunnel_key
:
5004 return bpf_get_skb_set_tunnel_proto(func_id
);
5005 case BPF_FUNC_skb_get_tunnel_opt
:
5006 return &bpf_skb_get_tunnel_opt_proto
;
5007 case BPF_FUNC_skb_set_tunnel_opt
:
5008 return bpf_get_skb_set_tunnel_proto(func_id
);
5009 case BPF_FUNC_redirect
:
5010 return &bpf_redirect_proto
;
5011 case BPF_FUNC_get_route_realm
:
5012 return &bpf_get_route_realm_proto
;
5013 case BPF_FUNC_get_hash_recalc
:
5014 return &bpf_get_hash_recalc_proto
;
5015 case BPF_FUNC_set_hash_invalid
:
5016 return &bpf_set_hash_invalid_proto
;
5017 case BPF_FUNC_set_hash
:
5018 return &bpf_set_hash_proto
;
5019 case BPF_FUNC_perf_event_output
:
5020 return &bpf_skb_event_output_proto
;
5021 case BPF_FUNC_get_smp_processor_id
:
5022 return &bpf_get_smp_processor_id_proto
;
5023 case BPF_FUNC_skb_under_cgroup
:
5024 return &bpf_skb_under_cgroup_proto
;
5025 case BPF_FUNC_get_socket_cookie
:
5026 return &bpf_get_socket_cookie_proto
;
5027 case BPF_FUNC_get_socket_uid
:
5028 return &bpf_get_socket_uid_proto
;
5029 case BPF_FUNC_fib_lookup
:
5030 return &bpf_skb_fib_lookup_proto
;
5032 case BPF_FUNC_skb_get_xfrm_state
:
5033 return &bpf_skb_get_xfrm_state_proto
;
5035 #ifdef CONFIG_SOCK_CGROUP_DATA
5036 case BPF_FUNC_skb_cgroup_id
:
5037 return &bpf_skb_cgroup_id_proto
;
5038 case BPF_FUNC_skb_ancestor_cgroup_id
:
5039 return &bpf_skb_ancestor_cgroup_id_proto
;
5042 case BPF_FUNC_sk_lookup_tcp
:
5043 return &bpf_sk_lookup_tcp_proto
;
5044 case BPF_FUNC_sk_lookup_udp
:
5045 return &bpf_sk_lookup_udp_proto
;
5046 case BPF_FUNC_sk_release
:
5047 return &bpf_sk_release_proto
;
5050 return bpf_base_func_proto(func_id
);
5054 static const struct bpf_func_proto
*
5055 xdp_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5058 case BPF_FUNC_perf_event_output
:
5059 return &bpf_xdp_event_output_proto
;
5060 case BPF_FUNC_get_smp_processor_id
:
5061 return &bpf_get_smp_processor_id_proto
;
5062 case BPF_FUNC_csum_diff
:
5063 return &bpf_csum_diff_proto
;
5064 case BPF_FUNC_xdp_adjust_head
:
5065 return &bpf_xdp_adjust_head_proto
;
5066 case BPF_FUNC_xdp_adjust_meta
:
5067 return &bpf_xdp_adjust_meta_proto
;
5068 case BPF_FUNC_redirect
:
5069 return &bpf_xdp_redirect_proto
;
5070 case BPF_FUNC_redirect_map
:
5071 return &bpf_xdp_redirect_map_proto
;
5072 case BPF_FUNC_xdp_adjust_tail
:
5073 return &bpf_xdp_adjust_tail_proto
;
5074 case BPF_FUNC_fib_lookup
:
5075 return &bpf_xdp_fib_lookup_proto
;
5077 return bpf_base_func_proto(func_id
);
5081 const struct bpf_func_proto bpf_sock_map_update_proto __weak
;
5082 const struct bpf_func_proto bpf_sock_hash_update_proto __weak
;
5084 static const struct bpf_func_proto
*
5085 sock_ops_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5088 case BPF_FUNC_setsockopt
:
5089 return &bpf_setsockopt_proto
;
5090 case BPF_FUNC_getsockopt
:
5091 return &bpf_getsockopt_proto
;
5092 case BPF_FUNC_sock_ops_cb_flags_set
:
5093 return &bpf_sock_ops_cb_flags_set_proto
;
5094 case BPF_FUNC_sock_map_update
:
5095 return &bpf_sock_map_update_proto
;
5096 case BPF_FUNC_sock_hash_update
:
5097 return &bpf_sock_hash_update_proto
;
5098 case BPF_FUNC_get_socket_cookie
:
5099 return &bpf_get_socket_cookie_sock_ops_proto
;
5100 case BPF_FUNC_get_local_storage
:
5101 return &bpf_get_local_storage_proto
;
5103 return bpf_base_func_proto(func_id
);
5107 const struct bpf_func_proto bpf_msg_redirect_map_proto __weak
;
5108 const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak
;
5110 static const struct bpf_func_proto
*
5111 sk_msg_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5114 case BPF_FUNC_msg_redirect_map
:
5115 return &bpf_msg_redirect_map_proto
;
5116 case BPF_FUNC_msg_redirect_hash
:
5117 return &bpf_msg_redirect_hash_proto
;
5118 case BPF_FUNC_msg_apply_bytes
:
5119 return &bpf_msg_apply_bytes_proto
;
5120 case BPF_FUNC_msg_cork_bytes
:
5121 return &bpf_msg_cork_bytes_proto
;
5122 case BPF_FUNC_msg_pull_data
:
5123 return &bpf_msg_pull_data_proto
;
5124 case BPF_FUNC_get_local_storage
:
5125 return &bpf_get_local_storage_proto
;
5127 return bpf_base_func_proto(func_id
);
5131 const struct bpf_func_proto bpf_sk_redirect_map_proto __weak
;
5132 const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak
;
5134 static const struct bpf_func_proto
*
5135 sk_skb_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5138 case BPF_FUNC_skb_store_bytes
:
5139 return &bpf_skb_store_bytes_proto
;
5140 case BPF_FUNC_skb_load_bytes
:
5141 return &bpf_skb_load_bytes_proto
;
5142 case BPF_FUNC_skb_pull_data
:
5143 return &sk_skb_pull_data_proto
;
5144 case BPF_FUNC_skb_change_tail
:
5145 return &sk_skb_change_tail_proto
;
5146 case BPF_FUNC_skb_change_head
:
5147 return &sk_skb_change_head_proto
;
5148 case BPF_FUNC_get_socket_cookie
:
5149 return &bpf_get_socket_cookie_proto
;
5150 case BPF_FUNC_get_socket_uid
:
5151 return &bpf_get_socket_uid_proto
;
5152 case BPF_FUNC_sk_redirect_map
:
5153 return &bpf_sk_redirect_map_proto
;
5154 case BPF_FUNC_sk_redirect_hash
:
5155 return &bpf_sk_redirect_hash_proto
;
5156 case BPF_FUNC_get_local_storage
:
5157 return &bpf_get_local_storage_proto
;
5159 case BPF_FUNC_sk_lookup_tcp
:
5160 return &bpf_sk_lookup_tcp_proto
;
5161 case BPF_FUNC_sk_lookup_udp
:
5162 return &bpf_sk_lookup_udp_proto
;
5163 case BPF_FUNC_sk_release
:
5164 return &bpf_sk_release_proto
;
5167 return bpf_base_func_proto(func_id
);
5171 static const struct bpf_func_proto
*
5172 flow_dissector_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5175 case BPF_FUNC_skb_load_bytes
:
5176 return &bpf_skb_load_bytes_proto
;
5178 return bpf_base_func_proto(func_id
);
5182 static const struct bpf_func_proto
*
5183 lwt_out_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5186 case BPF_FUNC_skb_load_bytes
:
5187 return &bpf_skb_load_bytes_proto
;
5188 case BPF_FUNC_skb_pull_data
:
5189 return &bpf_skb_pull_data_proto
;
5190 case BPF_FUNC_csum_diff
:
5191 return &bpf_csum_diff_proto
;
5192 case BPF_FUNC_get_cgroup_classid
:
5193 return &bpf_get_cgroup_classid_proto
;
5194 case BPF_FUNC_get_route_realm
:
5195 return &bpf_get_route_realm_proto
;
5196 case BPF_FUNC_get_hash_recalc
:
5197 return &bpf_get_hash_recalc_proto
;
5198 case BPF_FUNC_perf_event_output
:
5199 return &bpf_skb_event_output_proto
;
5200 case BPF_FUNC_get_smp_processor_id
:
5201 return &bpf_get_smp_processor_id_proto
;
5202 case BPF_FUNC_skb_under_cgroup
:
5203 return &bpf_skb_under_cgroup_proto
;
5205 return bpf_base_func_proto(func_id
);
5209 static const struct bpf_func_proto
*
5210 lwt_in_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5213 case BPF_FUNC_lwt_push_encap
:
5214 return &bpf_lwt_push_encap_proto
;
5216 return lwt_out_func_proto(func_id
, prog
);
5220 static const struct bpf_func_proto
*
5221 lwt_xmit_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5224 case BPF_FUNC_skb_get_tunnel_key
:
5225 return &bpf_skb_get_tunnel_key_proto
;
5226 case BPF_FUNC_skb_set_tunnel_key
:
5227 return bpf_get_skb_set_tunnel_proto(func_id
);
5228 case BPF_FUNC_skb_get_tunnel_opt
:
5229 return &bpf_skb_get_tunnel_opt_proto
;
5230 case BPF_FUNC_skb_set_tunnel_opt
:
5231 return bpf_get_skb_set_tunnel_proto(func_id
);
5232 case BPF_FUNC_redirect
:
5233 return &bpf_redirect_proto
;
5234 case BPF_FUNC_clone_redirect
:
5235 return &bpf_clone_redirect_proto
;
5236 case BPF_FUNC_skb_change_tail
:
5237 return &bpf_skb_change_tail_proto
;
5238 case BPF_FUNC_skb_change_head
:
5239 return &bpf_skb_change_head_proto
;
5240 case BPF_FUNC_skb_store_bytes
:
5241 return &bpf_skb_store_bytes_proto
;
5242 case BPF_FUNC_csum_update
:
5243 return &bpf_csum_update_proto
;
5244 case BPF_FUNC_l3_csum_replace
:
5245 return &bpf_l3_csum_replace_proto
;
5246 case BPF_FUNC_l4_csum_replace
:
5247 return &bpf_l4_csum_replace_proto
;
5248 case BPF_FUNC_set_hash_invalid
:
5249 return &bpf_set_hash_invalid_proto
;
5251 return lwt_out_func_proto(func_id
, prog
);
5255 static const struct bpf_func_proto
*
5256 lwt_seg6local_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
5259 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5260 case BPF_FUNC_lwt_seg6_store_bytes
:
5261 return &bpf_lwt_seg6_store_bytes_proto
;
5262 case BPF_FUNC_lwt_seg6_action
:
5263 return &bpf_lwt_seg6_action_proto
;
5264 case BPF_FUNC_lwt_seg6_adjust_srh
:
5265 return &bpf_lwt_seg6_adjust_srh_proto
;
5268 return lwt_out_func_proto(func_id
, prog
);
5272 static bool bpf_skb_is_valid_access(int off
, int size
, enum bpf_access_type type
,
5273 const struct bpf_prog
*prog
,
5274 struct bpf_insn_access_aux
*info
)
5276 const int size_default
= sizeof(__u32
);
5278 if (off
< 0 || off
>= sizeof(struct __sk_buff
))
5281 /* The verifier guarantees that size > 0. */
5282 if (off
% size
!= 0)
5286 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
5287 if (off
+ size
> offsetofend(struct __sk_buff
, cb
[4]))
5290 case bpf_ctx_range_till(struct __sk_buff
, remote_ip6
[0], remote_ip6
[3]):
5291 case bpf_ctx_range_till(struct __sk_buff
, local_ip6
[0], local_ip6
[3]):
5292 case bpf_ctx_range_till(struct __sk_buff
, remote_ip4
, remote_ip4
):
5293 case bpf_ctx_range_till(struct __sk_buff
, local_ip4
, local_ip4
):
5294 case bpf_ctx_range(struct __sk_buff
, data
):
5295 case bpf_ctx_range(struct __sk_buff
, data_meta
):
5296 case bpf_ctx_range(struct __sk_buff
, data_end
):
5297 if (size
!= size_default
)
5300 case bpf_ctx_range(struct __sk_buff
, flow_keys
):
5301 if (size
!= sizeof(struct bpf_flow_keys
*))
5305 /* Only narrow read access allowed for now. */
5306 if (type
== BPF_WRITE
) {
5307 if (size
!= size_default
)
5310 bpf_ctx_record_field_size(info
, size_default
);
5311 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
5319 static bool sk_filter_is_valid_access(int off
, int size
,
5320 enum bpf_access_type type
,
5321 const struct bpf_prog
*prog
,
5322 struct bpf_insn_access_aux
*info
)
5325 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
5326 case bpf_ctx_range(struct __sk_buff
, data
):
5327 case bpf_ctx_range(struct __sk_buff
, data_meta
):
5328 case bpf_ctx_range(struct __sk_buff
, data_end
):
5329 case bpf_ctx_range(struct __sk_buff
, flow_keys
):
5330 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
5334 if (type
== BPF_WRITE
) {
5336 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
5343 return bpf_skb_is_valid_access(off
, size
, type
, prog
, info
);
5346 static bool lwt_is_valid_access(int off
, int size
,
5347 enum bpf_access_type type
,
5348 const struct bpf_prog
*prog
,
5349 struct bpf_insn_access_aux
*info
)
5352 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
5353 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
5354 case bpf_ctx_range(struct __sk_buff
, data_meta
):
5355 case bpf_ctx_range(struct __sk_buff
, flow_keys
):
5359 if (type
== BPF_WRITE
) {
5361 case bpf_ctx_range(struct __sk_buff
, mark
):
5362 case bpf_ctx_range(struct __sk_buff
, priority
):
5363 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
5371 case bpf_ctx_range(struct __sk_buff
, data
):
5372 info
->reg_type
= PTR_TO_PACKET
;
5374 case bpf_ctx_range(struct __sk_buff
, data_end
):
5375 info
->reg_type
= PTR_TO_PACKET_END
;
5379 return bpf_skb_is_valid_access(off
, size
, type
, prog
, info
);
5382 /* Attach type specific accesses */
5383 static bool __sock_filter_check_attach_type(int off
,
5384 enum bpf_access_type access_type
,
5385 enum bpf_attach_type attach_type
)
5388 case offsetof(struct bpf_sock
, bound_dev_if
):
5389 case offsetof(struct bpf_sock
, mark
):
5390 case offsetof(struct bpf_sock
, priority
):
5391 switch (attach_type
) {
5392 case BPF_CGROUP_INET_SOCK_CREATE
:
5397 case bpf_ctx_range(struct bpf_sock
, src_ip4
):
5398 switch (attach_type
) {
5399 case BPF_CGROUP_INET4_POST_BIND
:
5404 case bpf_ctx_range_till(struct bpf_sock
, src_ip6
[0], src_ip6
[3]):
5405 switch (attach_type
) {
5406 case BPF_CGROUP_INET6_POST_BIND
:
5411 case bpf_ctx_range(struct bpf_sock
, src_port
):
5412 switch (attach_type
) {
5413 case BPF_CGROUP_INET4_POST_BIND
:
5414 case BPF_CGROUP_INET6_POST_BIND
:
5421 return access_type
== BPF_READ
;
5426 static bool __sock_filter_check_size(int off
, int size
,
5427 struct bpf_insn_access_aux
*info
)
5429 const int size_default
= sizeof(__u32
);
5432 case bpf_ctx_range(struct bpf_sock
, src_ip4
):
5433 case bpf_ctx_range_till(struct bpf_sock
, src_ip6
[0], src_ip6
[3]):
5434 bpf_ctx_record_field_size(info
, size_default
);
5435 return bpf_ctx_narrow_access_ok(off
, size
, size_default
);
5438 return size
== size_default
;
5441 bool bpf_sock_is_valid_access(int off
, int size
, enum bpf_access_type type
,
5442 struct bpf_insn_access_aux
*info
)
5444 if (off
< 0 || off
>= sizeof(struct bpf_sock
))
5446 if (off
% size
!= 0)
5448 if (!__sock_filter_check_size(off
, size
, info
))
5453 static bool sock_filter_is_valid_access(int off
, int size
,
5454 enum bpf_access_type type
,
5455 const struct bpf_prog
*prog
,
5456 struct bpf_insn_access_aux
*info
)
5458 if (!bpf_sock_is_valid_access(off
, size
, type
, info
))
5460 return __sock_filter_check_attach_type(off
, type
,
5461 prog
->expected_attach_type
);
5464 static int bpf_unclone_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
5465 const struct bpf_prog
*prog
, int drop_verdict
)
5467 struct bpf_insn
*insn
= insn_buf
;
5472 /* if (!skb->cloned)
5475 * (Fast-path, otherwise approximation that we might be
5476 * a clone, do the rest in helper.)
5478 *insn
++ = BPF_LDX_MEM(BPF_B
, BPF_REG_6
, BPF_REG_1
, CLONED_OFFSET());
5479 *insn
++ = BPF_ALU32_IMM(BPF_AND
, BPF_REG_6
, CLONED_MASK
);
5480 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_6
, 0, 7);
5482 /* ret = bpf_skb_pull_data(skb, 0); */
5483 *insn
++ = BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
);
5484 *insn
++ = BPF_ALU64_REG(BPF_XOR
, BPF_REG_2
, BPF_REG_2
);
5485 *insn
++ = BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5486 BPF_FUNC_skb_pull_data
);
5489 * return TC_ACT_SHOT;
5491 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2);
5492 *insn
++ = BPF_ALU32_IMM(BPF_MOV
, BPF_REG_0
, drop_verdict
);
5493 *insn
++ = BPF_EXIT_INSN();
5496 *insn
++ = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
);
5498 *insn
++ = prog
->insnsi
[0];
5500 return insn
- insn_buf
;
5503 static int bpf_gen_ld_abs(const struct bpf_insn
*orig
,
5504 struct bpf_insn
*insn_buf
)
5506 bool indirect
= BPF_MODE(orig
->code
) == BPF_IND
;
5507 struct bpf_insn
*insn
= insn_buf
;
5509 /* We're guaranteed here that CTX is in R6. */
5510 *insn
++ = BPF_MOV64_REG(BPF_REG_1
, BPF_REG_CTX
);
5512 *insn
++ = BPF_MOV64_IMM(BPF_REG_2
, orig
->imm
);
5514 *insn
++ = BPF_MOV64_REG(BPF_REG_2
, orig
->src_reg
);
5516 *insn
++ = BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, orig
->imm
);
5519 switch (BPF_SIZE(orig
->code
)) {
5521 *insn
++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache
);
5524 *insn
++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache
);
5527 *insn
++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache
);
5531 *insn
++ = BPF_JMP_IMM(BPF_JSGE
, BPF_REG_0
, 0, 2);
5532 *insn
++ = BPF_ALU32_REG(BPF_XOR
, BPF_REG_0
, BPF_REG_0
);
5533 *insn
++ = BPF_EXIT_INSN();
5535 return insn
- insn_buf
;
5538 static int tc_cls_act_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
5539 const struct bpf_prog
*prog
)
5541 return bpf_unclone_prologue(insn_buf
, direct_write
, prog
, TC_ACT_SHOT
);
5544 static bool tc_cls_act_is_valid_access(int off
, int size
,
5545 enum bpf_access_type type
,
5546 const struct bpf_prog
*prog
,
5547 struct bpf_insn_access_aux
*info
)
5549 if (type
== BPF_WRITE
) {
5551 case bpf_ctx_range(struct __sk_buff
, mark
):
5552 case bpf_ctx_range(struct __sk_buff
, tc_index
):
5553 case bpf_ctx_range(struct __sk_buff
, priority
):
5554 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
5555 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
5563 case bpf_ctx_range(struct __sk_buff
, data
):
5564 info
->reg_type
= PTR_TO_PACKET
;
5566 case bpf_ctx_range(struct __sk_buff
, data_meta
):
5567 info
->reg_type
= PTR_TO_PACKET_META
;
5569 case bpf_ctx_range(struct __sk_buff
, data_end
):
5570 info
->reg_type
= PTR_TO_PACKET_END
;
5572 case bpf_ctx_range(struct __sk_buff
, flow_keys
):
5573 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
5577 return bpf_skb_is_valid_access(off
, size
, type
, prog
, info
);
5580 static bool __is_valid_xdp_access(int off
, int size
)
5582 if (off
< 0 || off
>= sizeof(struct xdp_md
))
5584 if (off
% size
!= 0)
5586 if (size
!= sizeof(__u32
))
5592 static bool xdp_is_valid_access(int off
, int size
,
5593 enum bpf_access_type type
,
5594 const struct bpf_prog
*prog
,
5595 struct bpf_insn_access_aux
*info
)
5597 if (type
== BPF_WRITE
) {
5598 if (bpf_prog_is_dev_bound(prog
->aux
)) {
5600 case offsetof(struct xdp_md
, rx_queue_index
):
5601 return __is_valid_xdp_access(off
, size
);
5608 case offsetof(struct xdp_md
, data
):
5609 info
->reg_type
= PTR_TO_PACKET
;
5611 case offsetof(struct xdp_md
, data_meta
):
5612 info
->reg_type
= PTR_TO_PACKET_META
;
5614 case offsetof(struct xdp_md
, data_end
):
5615 info
->reg_type
= PTR_TO_PACKET_END
;
5619 return __is_valid_xdp_access(off
, size
);
5622 void bpf_warn_invalid_xdp_action(u32 act
)
5624 const u32 act_max
= XDP_REDIRECT
;
5626 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
5627 act
> act_max
? "Illegal" : "Driver unsupported",
5630 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action
);
5632 static bool sock_addr_is_valid_access(int off
, int size
,
5633 enum bpf_access_type type
,
5634 const struct bpf_prog
*prog
,
5635 struct bpf_insn_access_aux
*info
)
5637 const int size_default
= sizeof(__u32
);
5639 if (off
< 0 || off
>= sizeof(struct bpf_sock_addr
))
5641 if (off
% size
!= 0)
5644 /* Disallow access to IPv6 fields from IPv4 contex and vise
5648 case bpf_ctx_range(struct bpf_sock_addr
, user_ip4
):
5649 switch (prog
->expected_attach_type
) {
5650 case BPF_CGROUP_INET4_BIND
:
5651 case BPF_CGROUP_INET4_CONNECT
:
5652 case BPF_CGROUP_UDP4_SENDMSG
:
5658 case bpf_ctx_range_till(struct bpf_sock_addr
, user_ip6
[0], user_ip6
[3]):
5659 switch (prog
->expected_attach_type
) {
5660 case BPF_CGROUP_INET6_BIND
:
5661 case BPF_CGROUP_INET6_CONNECT
:
5662 case BPF_CGROUP_UDP6_SENDMSG
:
5668 case bpf_ctx_range(struct bpf_sock_addr
, msg_src_ip4
):
5669 switch (prog
->expected_attach_type
) {
5670 case BPF_CGROUP_UDP4_SENDMSG
:
5676 case bpf_ctx_range_till(struct bpf_sock_addr
, msg_src_ip6
[0],
5678 switch (prog
->expected_attach_type
) {
5679 case BPF_CGROUP_UDP6_SENDMSG
:
5688 case bpf_ctx_range(struct bpf_sock_addr
, user_ip4
):
5689 case bpf_ctx_range_till(struct bpf_sock_addr
, user_ip6
[0], user_ip6
[3]):
5690 case bpf_ctx_range(struct bpf_sock_addr
, msg_src_ip4
):
5691 case bpf_ctx_range_till(struct bpf_sock_addr
, msg_src_ip6
[0],
5693 /* Only narrow read access allowed for now. */
5694 if (type
== BPF_READ
) {
5695 bpf_ctx_record_field_size(info
, size_default
);
5696 if (!bpf_ctx_narrow_access_ok(off
, size
, size_default
))
5699 if (size
!= size_default
)
5703 case bpf_ctx_range(struct bpf_sock_addr
, user_port
):
5704 if (size
!= size_default
)
5708 if (type
== BPF_READ
) {
5709 if (size
!= size_default
)
5719 static bool sock_ops_is_valid_access(int off
, int size
,
5720 enum bpf_access_type type
,
5721 const struct bpf_prog
*prog
,
5722 struct bpf_insn_access_aux
*info
)
5724 const int size_default
= sizeof(__u32
);
5726 if (off
< 0 || off
>= sizeof(struct bpf_sock_ops
))
5729 /* The verifier guarantees that size > 0. */
5730 if (off
% size
!= 0)
5733 if (type
== BPF_WRITE
) {
5735 case offsetof(struct bpf_sock_ops
, reply
):
5736 case offsetof(struct bpf_sock_ops
, sk_txhash
):
5737 if (size
!= size_default
)
5745 case bpf_ctx_range_till(struct bpf_sock_ops
, bytes_received
,
5747 if (size
!= sizeof(__u64
))
5751 if (size
!= size_default
)
5760 static int sk_skb_prologue(struct bpf_insn
*insn_buf
, bool direct_write
,
5761 const struct bpf_prog
*prog
)
5763 return bpf_unclone_prologue(insn_buf
, direct_write
, prog
, SK_DROP
);
5766 static bool sk_skb_is_valid_access(int off
, int size
,
5767 enum bpf_access_type type
,
5768 const struct bpf_prog
*prog
,
5769 struct bpf_insn_access_aux
*info
)
5772 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
5773 case bpf_ctx_range(struct __sk_buff
, data_meta
):
5774 case bpf_ctx_range(struct __sk_buff
, flow_keys
):
5778 if (type
== BPF_WRITE
) {
5780 case bpf_ctx_range(struct __sk_buff
, tc_index
):
5781 case bpf_ctx_range(struct __sk_buff
, priority
):
5789 case bpf_ctx_range(struct __sk_buff
, mark
):
5791 case bpf_ctx_range(struct __sk_buff
, data
):
5792 info
->reg_type
= PTR_TO_PACKET
;
5794 case bpf_ctx_range(struct __sk_buff
, data_end
):
5795 info
->reg_type
= PTR_TO_PACKET_END
;
5799 return bpf_skb_is_valid_access(off
, size
, type
, prog
, info
);
5802 static bool sk_msg_is_valid_access(int off
, int size
,
5803 enum bpf_access_type type
,
5804 const struct bpf_prog
*prog
,
5805 struct bpf_insn_access_aux
*info
)
5807 if (type
== BPF_WRITE
)
5811 case offsetof(struct sk_msg_md
, data
):
5812 info
->reg_type
= PTR_TO_PACKET
;
5813 if (size
!= sizeof(__u64
))
5816 case offsetof(struct sk_msg_md
, data_end
):
5817 info
->reg_type
= PTR_TO_PACKET_END
;
5818 if (size
!= sizeof(__u64
))
5822 if (size
!= sizeof(__u32
))
5826 if (off
< 0 || off
>= sizeof(struct sk_msg_md
))
5828 if (off
% size
!= 0)
5834 static bool flow_dissector_is_valid_access(int off
, int size
,
5835 enum bpf_access_type type
,
5836 const struct bpf_prog
*prog
,
5837 struct bpf_insn_access_aux
*info
)
5839 if (type
== BPF_WRITE
) {
5841 case bpf_ctx_range_till(struct __sk_buff
, cb
[0], cb
[4]):
5849 case bpf_ctx_range(struct __sk_buff
, data
):
5850 info
->reg_type
= PTR_TO_PACKET
;
5852 case bpf_ctx_range(struct __sk_buff
, data_end
):
5853 info
->reg_type
= PTR_TO_PACKET_END
;
5855 case bpf_ctx_range(struct __sk_buff
, flow_keys
):
5856 info
->reg_type
= PTR_TO_FLOW_KEYS
;
5858 case bpf_ctx_range(struct __sk_buff
, tc_classid
):
5859 case bpf_ctx_range(struct __sk_buff
, data_meta
):
5860 case bpf_ctx_range_till(struct __sk_buff
, family
, local_port
):
5864 return bpf_skb_is_valid_access(off
, size
, type
, prog
, info
);
5867 static u32
bpf_convert_ctx_access(enum bpf_access_type type
,
5868 const struct bpf_insn
*si
,
5869 struct bpf_insn
*insn_buf
,
5870 struct bpf_prog
*prog
, u32
*target_size
)
5872 struct bpf_insn
*insn
= insn_buf
;
5876 case offsetof(struct __sk_buff
, len
):
5877 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
5878 bpf_target_off(struct sk_buff
, len
, 4,
5882 case offsetof(struct __sk_buff
, protocol
):
5883 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
5884 bpf_target_off(struct sk_buff
, protocol
, 2,
5888 case offsetof(struct __sk_buff
, vlan_proto
):
5889 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
5890 bpf_target_off(struct sk_buff
, vlan_proto
, 2,
5894 case offsetof(struct __sk_buff
, priority
):
5895 if (type
== BPF_WRITE
)
5896 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
5897 bpf_target_off(struct sk_buff
, priority
, 4,
5900 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
5901 bpf_target_off(struct sk_buff
, priority
, 4,
5905 case offsetof(struct __sk_buff
, ingress_ifindex
):
5906 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
5907 bpf_target_off(struct sk_buff
, skb_iif
, 4,
5911 case offsetof(struct __sk_buff
, ifindex
):
5912 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
5913 si
->dst_reg
, si
->src_reg
,
5914 offsetof(struct sk_buff
, dev
));
5915 *insn
++ = BPF_JMP_IMM(BPF_JEQ
, si
->dst_reg
, 0, 1);
5916 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
5917 bpf_target_off(struct net_device
, ifindex
, 4,
5921 case offsetof(struct __sk_buff
, hash
):
5922 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
5923 bpf_target_off(struct sk_buff
, hash
, 4,
5927 case offsetof(struct __sk_buff
, mark
):
5928 if (type
== BPF_WRITE
)
5929 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
5930 bpf_target_off(struct sk_buff
, mark
, 4,
5933 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
5934 bpf_target_off(struct sk_buff
, mark
, 4,
5938 case offsetof(struct __sk_buff
, pkt_type
):
5940 *insn
++ = BPF_LDX_MEM(BPF_B
, si
->dst_reg
, si
->src_reg
,
5942 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, PKT_TYPE_MAX
);
5943 #ifdef __BIG_ENDIAN_BITFIELD
5944 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 5);
5948 case offsetof(struct __sk_buff
, queue_mapping
):
5949 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
5950 bpf_target_off(struct sk_buff
, queue_mapping
, 2,
5954 case offsetof(struct __sk_buff
, vlan_present
):
5955 case offsetof(struct __sk_buff
, vlan_tci
):
5956 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
5958 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
5959 bpf_target_off(struct sk_buff
, vlan_tci
, 2,
5961 if (si
->off
== offsetof(struct __sk_buff
, vlan_tci
)) {
5962 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
,
5965 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, 12);
5966 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, 1);
5970 case offsetof(struct __sk_buff
, cb
[0]) ...
5971 offsetofend(struct __sk_buff
, cb
[4]) - 1:
5972 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, data
) < 20);
5973 BUILD_BUG_ON((offsetof(struct sk_buff
, cb
) +
5974 offsetof(struct qdisc_skb_cb
, data
)) %
5977 prog
->cb_access
= 1;
5979 off
-= offsetof(struct __sk_buff
, cb
[0]);
5980 off
+= offsetof(struct sk_buff
, cb
);
5981 off
+= offsetof(struct qdisc_skb_cb
, data
);
5982 if (type
== BPF_WRITE
)
5983 *insn
++ = BPF_STX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
5986 *insn
++ = BPF_LDX_MEM(BPF_SIZE(si
->code
), si
->dst_reg
,
5990 case offsetof(struct __sk_buff
, tc_classid
):
5991 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb
, tc_classid
) != 2);
5994 off
-= offsetof(struct __sk_buff
, tc_classid
);
5995 off
+= offsetof(struct sk_buff
, cb
);
5996 off
+= offsetof(struct qdisc_skb_cb
, tc_classid
);
5998 if (type
== BPF_WRITE
)
5999 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
,
6002 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
,
6006 case offsetof(struct __sk_buff
, data
):
6007 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, data
),
6008 si
->dst_reg
, si
->src_reg
,
6009 offsetof(struct sk_buff
, data
));
6012 case offsetof(struct __sk_buff
, data_meta
):
6014 off
-= offsetof(struct __sk_buff
, data_meta
);
6015 off
+= offsetof(struct sk_buff
, cb
);
6016 off
+= offsetof(struct bpf_skb_data_end
, data_meta
);
6017 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
6021 case offsetof(struct __sk_buff
, data_end
):
6023 off
-= offsetof(struct __sk_buff
, data_end
);
6024 off
+= offsetof(struct sk_buff
, cb
);
6025 off
+= offsetof(struct bpf_skb_data_end
, data_end
);
6026 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
6030 case offsetof(struct __sk_buff
, tc_index
):
6031 #ifdef CONFIG_NET_SCHED
6032 if (type
== BPF_WRITE
)
6033 *insn
++ = BPF_STX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
6034 bpf_target_off(struct sk_buff
, tc_index
, 2,
6037 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
6038 bpf_target_off(struct sk_buff
, tc_index
, 2,
6042 if (type
== BPF_WRITE
)
6043 *insn
++ = BPF_MOV64_REG(si
->dst_reg
, si
->dst_reg
);
6045 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
6049 case offsetof(struct __sk_buff
, napi_id
):
6050 #if defined(CONFIG_NET_RX_BUSY_POLL)
6051 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6052 bpf_target_off(struct sk_buff
, napi_id
, 4,
6054 *insn
++ = BPF_JMP_IMM(BPF_JGE
, si
->dst_reg
, MIN_NAPI_ID
, 1);
6055 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
6058 *insn
++ = BPF_MOV64_IMM(si
->dst_reg
, 0);
6061 case offsetof(struct __sk_buff
, family
):
6062 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
6064 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
6065 si
->dst_reg
, si
->src_reg
,
6066 offsetof(struct sk_buff
, sk
));
6067 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6068 bpf_target_off(struct sock_common
,
6072 case offsetof(struct __sk_buff
, remote_ip4
):
6073 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
6075 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
6076 si
->dst_reg
, si
->src_reg
,
6077 offsetof(struct sk_buff
, sk
));
6078 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6079 bpf_target_off(struct sock_common
,
6083 case offsetof(struct __sk_buff
, local_ip4
):
6084 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6085 skc_rcv_saddr
) != 4);
6087 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
6088 si
->dst_reg
, si
->src_reg
,
6089 offsetof(struct sk_buff
, sk
));
6090 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6091 bpf_target_off(struct sock_common
,
6095 case offsetof(struct __sk_buff
, remote_ip6
[0]) ...
6096 offsetof(struct __sk_buff
, remote_ip6
[3]):
6097 #if IS_ENABLED(CONFIG_IPV6)
6098 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6099 skc_v6_daddr
.s6_addr32
[0]) != 4);
6102 off
-= offsetof(struct __sk_buff
, remote_ip6
[0]);
6104 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
6105 si
->dst_reg
, si
->src_reg
,
6106 offsetof(struct sk_buff
, sk
));
6107 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6108 offsetof(struct sock_common
,
6109 skc_v6_daddr
.s6_addr32
[0]) +
6112 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
6115 case offsetof(struct __sk_buff
, local_ip6
[0]) ...
6116 offsetof(struct __sk_buff
, local_ip6
[3]):
6117 #if IS_ENABLED(CONFIG_IPV6)
6118 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6119 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
6122 off
-= offsetof(struct __sk_buff
, local_ip6
[0]);
6124 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
6125 si
->dst_reg
, si
->src_reg
,
6126 offsetof(struct sk_buff
, sk
));
6127 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6128 offsetof(struct sock_common
,
6129 skc_v6_rcv_saddr
.s6_addr32
[0]) +
6132 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
6136 case offsetof(struct __sk_buff
, remote_port
):
6137 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
6139 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
6140 si
->dst_reg
, si
->src_reg
,
6141 offsetof(struct sk_buff
, sk
));
6142 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6143 bpf_target_off(struct sock_common
,
6146 #ifndef __BIG_ENDIAN_BITFIELD
6147 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
6151 case offsetof(struct __sk_buff
, local_port
):
6152 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
6154 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, sk
),
6155 si
->dst_reg
, si
->src_reg
,
6156 offsetof(struct sk_buff
, sk
));
6157 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6158 bpf_target_off(struct sock_common
,
6159 skc_num
, 2, target_size
));
6162 case offsetof(struct __sk_buff
, flow_keys
):
6164 off
-= offsetof(struct __sk_buff
, flow_keys
);
6165 off
+= offsetof(struct sk_buff
, cb
);
6166 off
+= offsetof(struct qdisc_skb_cb
, flow_keys
);
6167 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
6172 return insn
- insn_buf
;
6175 u32
bpf_sock_convert_ctx_access(enum bpf_access_type type
,
6176 const struct bpf_insn
*si
,
6177 struct bpf_insn
*insn_buf
,
6178 struct bpf_prog
*prog
, u32
*target_size
)
6180 struct bpf_insn
*insn
= insn_buf
;
6184 case offsetof(struct bpf_sock
, bound_dev_if
):
6185 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_bound_dev_if
) != 4);
6187 if (type
== BPF_WRITE
)
6188 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6189 offsetof(struct sock
, sk_bound_dev_if
));
6191 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6192 offsetof(struct sock
, sk_bound_dev_if
));
6195 case offsetof(struct bpf_sock
, mark
):
6196 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_mark
) != 4);
6198 if (type
== BPF_WRITE
)
6199 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6200 offsetof(struct sock
, sk_mark
));
6202 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6203 offsetof(struct sock
, sk_mark
));
6206 case offsetof(struct bpf_sock
, priority
):
6207 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_priority
) != 4);
6209 if (type
== BPF_WRITE
)
6210 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6211 offsetof(struct sock
, sk_priority
));
6213 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6214 offsetof(struct sock
, sk_priority
));
6217 case offsetof(struct bpf_sock
, family
):
6218 BUILD_BUG_ON(FIELD_SIZEOF(struct sock
, sk_family
) != 2);
6220 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->src_reg
,
6221 offsetof(struct sock
, sk_family
));
6224 case offsetof(struct bpf_sock
, type
):
6225 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6226 offsetof(struct sock
, __sk_flags_offset
));
6227 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_TYPE_MASK
);
6228 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_TYPE_SHIFT
);
6231 case offsetof(struct bpf_sock
, protocol
):
6232 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6233 offsetof(struct sock
, __sk_flags_offset
));
6234 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_PROTO_MASK
);
6235 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_PROTO_SHIFT
);
6238 case offsetof(struct bpf_sock
, src_ip4
):
6239 *insn
++ = BPF_LDX_MEM(
6240 BPF_SIZE(si
->code
), si
->dst_reg
, si
->src_reg
,
6241 bpf_target_off(struct sock_common
, skc_rcv_saddr
,
6242 FIELD_SIZEOF(struct sock_common
,
6247 case bpf_ctx_range_till(struct bpf_sock
, src_ip6
[0], src_ip6
[3]):
6248 #if IS_ENABLED(CONFIG_IPV6)
6250 off
-= offsetof(struct bpf_sock
, src_ip6
[0]);
6251 *insn
++ = BPF_LDX_MEM(
6252 BPF_SIZE(si
->code
), si
->dst_reg
, si
->src_reg
,
6255 skc_v6_rcv_saddr
.s6_addr32
[0],
6256 FIELD_SIZEOF(struct sock_common
,
6257 skc_v6_rcv_saddr
.s6_addr32
[0]),
6258 target_size
) + off
);
6261 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
6265 case offsetof(struct bpf_sock
, src_port
):
6266 *insn
++ = BPF_LDX_MEM(
6267 BPF_FIELD_SIZEOF(struct sock_common
, skc_num
),
6268 si
->dst_reg
, si
->src_reg
,
6269 bpf_target_off(struct sock_common
, skc_num
,
6270 FIELD_SIZEOF(struct sock_common
,
6276 return insn
- insn_buf
;
6279 static u32
tc_cls_act_convert_ctx_access(enum bpf_access_type type
,
6280 const struct bpf_insn
*si
,
6281 struct bpf_insn
*insn_buf
,
6282 struct bpf_prog
*prog
, u32
*target_size
)
6284 struct bpf_insn
*insn
= insn_buf
;
6287 case offsetof(struct __sk_buff
, ifindex
):
6288 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff
, dev
),
6289 si
->dst_reg
, si
->src_reg
,
6290 offsetof(struct sk_buff
, dev
));
6291 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6292 bpf_target_off(struct net_device
, ifindex
, 4,
6296 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
6300 return insn
- insn_buf
;
6303 static u32
xdp_convert_ctx_access(enum bpf_access_type type
,
6304 const struct bpf_insn
*si
,
6305 struct bpf_insn
*insn_buf
,
6306 struct bpf_prog
*prog
, u32
*target_size
)
6308 struct bpf_insn
*insn
= insn_buf
;
6311 case offsetof(struct xdp_md
, data
):
6312 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data
),
6313 si
->dst_reg
, si
->src_reg
,
6314 offsetof(struct xdp_buff
, data
));
6316 case offsetof(struct xdp_md
, data_meta
):
6317 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_meta
),
6318 si
->dst_reg
, si
->src_reg
,
6319 offsetof(struct xdp_buff
, data_meta
));
6321 case offsetof(struct xdp_md
, data_end
):
6322 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, data_end
),
6323 si
->dst_reg
, si
->src_reg
,
6324 offsetof(struct xdp_buff
, data_end
));
6326 case offsetof(struct xdp_md
, ingress_ifindex
):
6327 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, rxq
),
6328 si
->dst_reg
, si
->src_reg
,
6329 offsetof(struct xdp_buff
, rxq
));
6330 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info
, dev
),
6331 si
->dst_reg
, si
->dst_reg
,
6332 offsetof(struct xdp_rxq_info
, dev
));
6333 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6334 offsetof(struct net_device
, ifindex
));
6336 case offsetof(struct xdp_md
, rx_queue_index
):
6337 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff
, rxq
),
6338 si
->dst_reg
, si
->src_reg
,
6339 offsetof(struct xdp_buff
, rxq
));
6340 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6341 offsetof(struct xdp_rxq_info
,
6346 return insn
- insn_buf
;
6349 /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
6350 * context Structure, F is Field in context structure that contains a pointer
6351 * to Nested Structure of type NS that has the field NF.
6353 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
6354 * sure that SIZE is not greater than actual size of S.F.NF.
6356 * If offset OFF is provided, the load happens from that offset relative to
6359 #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
6361 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
6362 si->src_reg, offsetof(S, F)); \
6363 *insn++ = BPF_LDX_MEM( \
6364 SIZE, si->dst_reg, si->dst_reg, \
6365 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
6370 #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
6371 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
6372 BPF_FIELD_SIZEOF(NS, NF), 0)
6374 /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
6375 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
6377 * It doesn't support SIZE argument though since narrow stores are not
6378 * supported for now.
6380 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
6381 * "register" since two registers available in convert_ctx_access are not
6382 * enough: we can't override neither SRC, since it contains value to store, nor
6383 * DST since it contains pointer to context that may be used by later
6384 * instructions. But we need a temporary place to save pointer to nested
6385 * structure whose field we want to store to.
6387 #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \
6389 int tmp_reg = BPF_REG_9; \
6390 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
6392 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
6394 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
6396 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
6397 si->dst_reg, offsetof(S, F)); \
6398 *insn++ = BPF_STX_MEM( \
6399 BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \
6400 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
6403 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
6407 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
6410 if (type == BPF_WRITE) { \
6411 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \
6414 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
6415 S, NS, F, NF, SIZE, OFF); \
6419 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
6420 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
6421 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
6423 static u32
sock_addr_convert_ctx_access(enum bpf_access_type type
,
6424 const struct bpf_insn
*si
,
6425 struct bpf_insn
*insn_buf
,
6426 struct bpf_prog
*prog
, u32
*target_size
)
6428 struct bpf_insn
*insn
= insn_buf
;
6432 case offsetof(struct bpf_sock_addr
, user_family
):
6433 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern
,
6434 struct sockaddr
, uaddr
, sa_family
);
6437 case offsetof(struct bpf_sock_addr
, user_ip4
):
6438 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6439 struct bpf_sock_addr_kern
, struct sockaddr_in
, uaddr
,
6440 sin_addr
, BPF_SIZE(si
->code
), 0, tmp_reg
);
6443 case bpf_ctx_range_till(struct bpf_sock_addr
, user_ip6
[0], user_ip6
[3]):
6445 off
-= offsetof(struct bpf_sock_addr
, user_ip6
[0]);
6446 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6447 struct bpf_sock_addr_kern
, struct sockaddr_in6
, uaddr
,
6448 sin6_addr
.s6_addr32
[0], BPF_SIZE(si
->code
), off
,
6452 case offsetof(struct bpf_sock_addr
, user_port
):
6453 /* To get port we need to know sa_family first and then treat
6454 * sockaddr as either sockaddr_in or sockaddr_in6.
6455 * Though we can simplify since port field has same offset and
6456 * size in both structures.
6457 * Here we check this invariant and use just one of the
6458 * structures if it's true.
6460 BUILD_BUG_ON(offsetof(struct sockaddr_in
, sin_port
) !=
6461 offsetof(struct sockaddr_in6
, sin6_port
));
6462 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in
, sin_port
) !=
6463 FIELD_SIZEOF(struct sockaddr_in6
, sin6_port
));
6464 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern
,
6465 struct sockaddr_in6
, uaddr
,
6466 sin6_port
, tmp_reg
);
6469 case offsetof(struct bpf_sock_addr
, family
):
6470 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern
,
6471 struct sock
, sk
, sk_family
);
6474 case offsetof(struct bpf_sock_addr
, type
):
6475 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
6476 struct bpf_sock_addr_kern
, struct sock
, sk
,
6477 __sk_flags_offset
, BPF_W
, 0);
6478 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_TYPE_MASK
);
6479 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
, SK_FL_TYPE_SHIFT
);
6482 case offsetof(struct bpf_sock_addr
, protocol
):
6483 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
6484 struct bpf_sock_addr_kern
, struct sock
, sk
,
6485 __sk_flags_offset
, BPF_W
, 0);
6486 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_PROTO_MASK
);
6487 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
,
6491 case offsetof(struct bpf_sock_addr
, msg_src_ip4
):
6492 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
6493 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6494 struct bpf_sock_addr_kern
, struct in_addr
, t_ctx
,
6495 s_addr
, BPF_SIZE(si
->code
), 0, tmp_reg
);
6498 case bpf_ctx_range_till(struct bpf_sock_addr
, msg_src_ip6
[0],
6501 off
-= offsetof(struct bpf_sock_addr
, msg_src_ip6
[0]);
6502 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
6503 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
6504 struct bpf_sock_addr_kern
, struct in6_addr
, t_ctx
,
6505 s6_addr32
[0], BPF_SIZE(si
->code
), off
, tmp_reg
);
6509 return insn
- insn_buf
;
6512 static u32
sock_ops_convert_ctx_access(enum bpf_access_type type
,
6513 const struct bpf_insn
*si
,
6514 struct bpf_insn
*insn_buf
,
6515 struct bpf_prog
*prog
,
6518 struct bpf_insn
*insn
= insn_buf
;
6522 case offsetof(struct bpf_sock_ops
, op
) ...
6523 offsetof(struct bpf_sock_ops
, replylong
[3]):
6524 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, op
) !=
6525 FIELD_SIZEOF(struct bpf_sock_ops_kern
, op
));
6526 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, reply
) !=
6527 FIELD_SIZEOF(struct bpf_sock_ops_kern
, reply
));
6528 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops
, replylong
) !=
6529 FIELD_SIZEOF(struct bpf_sock_ops_kern
, replylong
));
6531 off
-= offsetof(struct bpf_sock_ops
, op
);
6532 off
+= offsetof(struct bpf_sock_ops_kern
, op
);
6533 if (type
== BPF_WRITE
)
6534 *insn
++ = BPF_STX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6537 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->src_reg
,
6541 case offsetof(struct bpf_sock_ops
, family
):
6542 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
6544 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6545 struct bpf_sock_ops_kern
, sk
),
6546 si
->dst_reg
, si
->src_reg
,
6547 offsetof(struct bpf_sock_ops_kern
, sk
));
6548 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6549 offsetof(struct sock_common
, skc_family
));
6552 case offsetof(struct bpf_sock_ops
, remote_ip4
):
6553 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
6555 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6556 struct bpf_sock_ops_kern
, sk
),
6557 si
->dst_reg
, si
->src_reg
,
6558 offsetof(struct bpf_sock_ops_kern
, sk
));
6559 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6560 offsetof(struct sock_common
, skc_daddr
));
6563 case offsetof(struct bpf_sock_ops
, local_ip4
):
6564 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6565 skc_rcv_saddr
) != 4);
6567 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6568 struct bpf_sock_ops_kern
, sk
),
6569 si
->dst_reg
, si
->src_reg
,
6570 offsetof(struct bpf_sock_ops_kern
, sk
));
6571 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6572 offsetof(struct sock_common
,
6576 case offsetof(struct bpf_sock_ops
, remote_ip6
[0]) ...
6577 offsetof(struct bpf_sock_ops
, remote_ip6
[3]):
6578 #if IS_ENABLED(CONFIG_IPV6)
6579 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6580 skc_v6_daddr
.s6_addr32
[0]) != 4);
6583 off
-= offsetof(struct bpf_sock_ops
, remote_ip6
[0]);
6584 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6585 struct bpf_sock_ops_kern
, sk
),
6586 si
->dst_reg
, si
->src_reg
,
6587 offsetof(struct bpf_sock_ops_kern
, sk
));
6588 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6589 offsetof(struct sock_common
,
6590 skc_v6_daddr
.s6_addr32
[0]) +
6593 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
6597 case offsetof(struct bpf_sock_ops
, local_ip6
[0]) ...
6598 offsetof(struct bpf_sock_ops
, local_ip6
[3]):
6599 #if IS_ENABLED(CONFIG_IPV6)
6600 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6601 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
6604 off
-= offsetof(struct bpf_sock_ops
, local_ip6
[0]);
6605 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6606 struct bpf_sock_ops_kern
, sk
),
6607 si
->dst_reg
, si
->src_reg
,
6608 offsetof(struct bpf_sock_ops_kern
, sk
));
6609 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6610 offsetof(struct sock_common
,
6611 skc_v6_rcv_saddr
.s6_addr32
[0]) +
6614 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
6618 case offsetof(struct bpf_sock_ops
, remote_port
):
6619 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
6621 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6622 struct bpf_sock_ops_kern
, sk
),
6623 si
->dst_reg
, si
->src_reg
,
6624 offsetof(struct bpf_sock_ops_kern
, sk
));
6625 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6626 offsetof(struct sock_common
, skc_dport
));
6627 #ifndef __BIG_ENDIAN_BITFIELD
6628 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
6632 case offsetof(struct bpf_sock_ops
, local_port
):
6633 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
6635 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6636 struct bpf_sock_ops_kern
, sk
),
6637 si
->dst_reg
, si
->src_reg
,
6638 offsetof(struct bpf_sock_ops_kern
, sk
));
6639 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6640 offsetof(struct sock_common
, skc_num
));
6643 case offsetof(struct bpf_sock_ops
, is_fullsock
):
6644 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6645 struct bpf_sock_ops_kern
,
6647 si
->dst_reg
, si
->src_reg
,
6648 offsetof(struct bpf_sock_ops_kern
,
6652 case offsetof(struct bpf_sock_ops
, state
):
6653 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_state
) != 1);
6655 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6656 struct bpf_sock_ops_kern
, sk
),
6657 si
->dst_reg
, si
->src_reg
,
6658 offsetof(struct bpf_sock_ops_kern
, sk
));
6659 *insn
++ = BPF_LDX_MEM(BPF_B
, si
->dst_reg
, si
->dst_reg
,
6660 offsetof(struct sock_common
, skc_state
));
6663 case offsetof(struct bpf_sock_ops
, rtt_min
):
6664 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock
, rtt_min
) !=
6665 sizeof(struct minmax
));
6666 BUILD_BUG_ON(sizeof(struct minmax
) <
6667 sizeof(struct minmax_sample
));
6669 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6670 struct bpf_sock_ops_kern
, sk
),
6671 si
->dst_reg
, si
->src_reg
,
6672 offsetof(struct bpf_sock_ops_kern
, sk
));
6673 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6674 offsetof(struct tcp_sock
, rtt_min
) +
6675 FIELD_SIZEOF(struct minmax_sample
, t
));
6678 /* Helper macro for adding read access to tcp_sock or sock fields. */
6679 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
6681 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
6682 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
6683 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6684 struct bpf_sock_ops_kern, \
6686 si->dst_reg, si->src_reg, \
6687 offsetof(struct bpf_sock_ops_kern, \
6689 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
6690 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6691 struct bpf_sock_ops_kern, sk),\
6692 si->dst_reg, si->src_reg, \
6693 offsetof(struct bpf_sock_ops_kern, sk));\
6694 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
6696 si->dst_reg, si->dst_reg, \
6697 offsetof(OBJ, OBJ_FIELD)); \
6700 /* Helper macro for adding write access to tcp_sock or sock fields.
6701 * The macro is called with two registers, dst_reg which contains a pointer
6702 * to ctx (context) and src_reg which contains the value that should be
6703 * stored. However, we need an additional register since we cannot overwrite
6704 * dst_reg because it may be used later in the program.
6705 * Instead we "borrow" one of the other register. We first save its value
6706 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
6707 * it at the end of the macro.
6709 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
6711 int reg = BPF_REG_9; \
6712 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
6713 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
6714 if (si->dst_reg == reg || si->src_reg == reg) \
6716 if (si->dst_reg == reg || si->src_reg == reg) \
6718 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
6719 offsetof(struct bpf_sock_ops_kern, \
6721 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6722 struct bpf_sock_ops_kern, \
6725 offsetof(struct bpf_sock_ops_kern, \
6727 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
6728 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6729 struct bpf_sock_ops_kern, sk),\
6731 offsetof(struct bpf_sock_ops_kern, sk));\
6732 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
6734 offsetof(OBJ, OBJ_FIELD)); \
6735 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
6736 offsetof(struct bpf_sock_ops_kern, \
6740 #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
6742 if (TYPE == BPF_WRITE) \
6743 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
6745 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
6748 case offsetof(struct bpf_sock_ops
, snd_cwnd
):
6749 SOCK_OPS_GET_FIELD(snd_cwnd
, snd_cwnd
, struct tcp_sock
);
6752 case offsetof(struct bpf_sock_ops
, srtt_us
):
6753 SOCK_OPS_GET_FIELD(srtt_us
, srtt_us
, struct tcp_sock
);
6756 case offsetof(struct bpf_sock_ops
, bpf_sock_ops_cb_flags
):
6757 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags
, bpf_sock_ops_cb_flags
,
6761 case offsetof(struct bpf_sock_ops
, snd_ssthresh
):
6762 SOCK_OPS_GET_FIELD(snd_ssthresh
, snd_ssthresh
, struct tcp_sock
);
6765 case offsetof(struct bpf_sock_ops
, rcv_nxt
):
6766 SOCK_OPS_GET_FIELD(rcv_nxt
, rcv_nxt
, struct tcp_sock
);
6769 case offsetof(struct bpf_sock_ops
, snd_nxt
):
6770 SOCK_OPS_GET_FIELD(snd_nxt
, snd_nxt
, struct tcp_sock
);
6773 case offsetof(struct bpf_sock_ops
, snd_una
):
6774 SOCK_OPS_GET_FIELD(snd_una
, snd_una
, struct tcp_sock
);
6777 case offsetof(struct bpf_sock_ops
, mss_cache
):
6778 SOCK_OPS_GET_FIELD(mss_cache
, mss_cache
, struct tcp_sock
);
6781 case offsetof(struct bpf_sock_ops
, ecn_flags
):
6782 SOCK_OPS_GET_FIELD(ecn_flags
, ecn_flags
, struct tcp_sock
);
6785 case offsetof(struct bpf_sock_ops
, rate_delivered
):
6786 SOCK_OPS_GET_FIELD(rate_delivered
, rate_delivered
,
6790 case offsetof(struct bpf_sock_ops
, rate_interval_us
):
6791 SOCK_OPS_GET_FIELD(rate_interval_us
, rate_interval_us
,
6795 case offsetof(struct bpf_sock_ops
, packets_out
):
6796 SOCK_OPS_GET_FIELD(packets_out
, packets_out
, struct tcp_sock
);
6799 case offsetof(struct bpf_sock_ops
, retrans_out
):
6800 SOCK_OPS_GET_FIELD(retrans_out
, retrans_out
, struct tcp_sock
);
6803 case offsetof(struct bpf_sock_ops
, total_retrans
):
6804 SOCK_OPS_GET_FIELD(total_retrans
, total_retrans
,
6808 case offsetof(struct bpf_sock_ops
, segs_in
):
6809 SOCK_OPS_GET_FIELD(segs_in
, segs_in
, struct tcp_sock
);
6812 case offsetof(struct bpf_sock_ops
, data_segs_in
):
6813 SOCK_OPS_GET_FIELD(data_segs_in
, data_segs_in
, struct tcp_sock
);
6816 case offsetof(struct bpf_sock_ops
, segs_out
):
6817 SOCK_OPS_GET_FIELD(segs_out
, segs_out
, struct tcp_sock
);
6820 case offsetof(struct bpf_sock_ops
, data_segs_out
):
6821 SOCK_OPS_GET_FIELD(data_segs_out
, data_segs_out
,
6825 case offsetof(struct bpf_sock_ops
, lost_out
):
6826 SOCK_OPS_GET_FIELD(lost_out
, lost_out
, struct tcp_sock
);
6829 case offsetof(struct bpf_sock_ops
, sacked_out
):
6830 SOCK_OPS_GET_FIELD(sacked_out
, sacked_out
, struct tcp_sock
);
6833 case offsetof(struct bpf_sock_ops
, sk_txhash
):
6834 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash
, sk_txhash
,
6838 case offsetof(struct bpf_sock_ops
, bytes_received
):
6839 SOCK_OPS_GET_FIELD(bytes_received
, bytes_received
,
6843 case offsetof(struct bpf_sock_ops
, bytes_acked
):
6844 SOCK_OPS_GET_FIELD(bytes_acked
, bytes_acked
, struct tcp_sock
);
6848 return insn
- insn_buf
;
6851 static u32
sk_skb_convert_ctx_access(enum bpf_access_type type
,
6852 const struct bpf_insn
*si
,
6853 struct bpf_insn
*insn_buf
,
6854 struct bpf_prog
*prog
, u32
*target_size
)
6856 struct bpf_insn
*insn
= insn_buf
;
6860 case offsetof(struct __sk_buff
, data_end
):
6862 off
-= offsetof(struct __sk_buff
, data_end
);
6863 off
+= offsetof(struct sk_buff
, cb
);
6864 off
+= offsetof(struct tcp_skb_cb
, bpf
.data_end
);
6865 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si
->dst_reg
,
6869 return bpf_convert_ctx_access(type
, si
, insn_buf
, prog
,
6873 return insn
- insn_buf
;
6876 static u32
sk_msg_convert_ctx_access(enum bpf_access_type type
,
6877 const struct bpf_insn
*si
,
6878 struct bpf_insn
*insn_buf
,
6879 struct bpf_prog
*prog
, u32
*target_size
)
6881 struct bpf_insn
*insn
= insn_buf
;
6882 #if IS_ENABLED(CONFIG_IPV6)
6887 case offsetof(struct sk_msg_md
, data
):
6888 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg
, data
),
6889 si
->dst_reg
, si
->src_reg
,
6890 offsetof(struct sk_msg
, data
));
6892 case offsetof(struct sk_msg_md
, data_end
):
6893 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg
, data_end
),
6894 si
->dst_reg
, si
->src_reg
,
6895 offsetof(struct sk_msg
, data_end
));
6897 case offsetof(struct sk_msg_md
, family
):
6898 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_family
) != 2);
6900 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6902 si
->dst_reg
, si
->src_reg
,
6903 offsetof(struct sk_msg
, sk
));
6904 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6905 offsetof(struct sock_common
, skc_family
));
6908 case offsetof(struct sk_msg_md
, remote_ip4
):
6909 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_daddr
) != 4);
6911 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6913 si
->dst_reg
, si
->src_reg
,
6914 offsetof(struct sk_msg
, sk
));
6915 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6916 offsetof(struct sock_common
, skc_daddr
));
6919 case offsetof(struct sk_msg_md
, local_ip4
):
6920 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6921 skc_rcv_saddr
) != 4);
6923 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6925 si
->dst_reg
, si
->src_reg
,
6926 offsetof(struct sk_msg
, sk
));
6927 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6928 offsetof(struct sock_common
,
6932 case offsetof(struct sk_msg_md
, remote_ip6
[0]) ...
6933 offsetof(struct sk_msg_md
, remote_ip6
[3]):
6934 #if IS_ENABLED(CONFIG_IPV6)
6935 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6936 skc_v6_daddr
.s6_addr32
[0]) != 4);
6939 off
-= offsetof(struct sk_msg_md
, remote_ip6
[0]);
6940 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6942 si
->dst_reg
, si
->src_reg
,
6943 offsetof(struct sk_msg
, sk
));
6944 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6945 offsetof(struct sock_common
,
6946 skc_v6_daddr
.s6_addr32
[0]) +
6949 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
6953 case offsetof(struct sk_msg_md
, local_ip6
[0]) ...
6954 offsetof(struct sk_msg_md
, local_ip6
[3]):
6955 #if IS_ENABLED(CONFIG_IPV6)
6956 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
,
6957 skc_v6_rcv_saddr
.s6_addr32
[0]) != 4);
6960 off
-= offsetof(struct sk_msg_md
, local_ip6
[0]);
6961 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6963 si
->dst_reg
, si
->src_reg
,
6964 offsetof(struct sk_msg
, sk
));
6965 *insn
++ = BPF_LDX_MEM(BPF_W
, si
->dst_reg
, si
->dst_reg
,
6966 offsetof(struct sock_common
,
6967 skc_v6_rcv_saddr
.s6_addr32
[0]) +
6970 *insn
++ = BPF_MOV32_IMM(si
->dst_reg
, 0);
6974 case offsetof(struct sk_msg_md
, remote_port
):
6975 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_dport
) != 2);
6977 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6979 si
->dst_reg
, si
->src_reg
,
6980 offsetof(struct sk_msg
, sk
));
6981 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6982 offsetof(struct sock_common
, skc_dport
));
6983 #ifndef __BIG_ENDIAN_BITFIELD
6984 *insn
++ = BPF_ALU32_IMM(BPF_LSH
, si
->dst_reg
, 16);
6988 case offsetof(struct sk_msg_md
, local_port
):
6989 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common
, skc_num
) != 2);
6991 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
6993 si
->dst_reg
, si
->src_reg
,
6994 offsetof(struct sk_msg
, sk
));
6995 *insn
++ = BPF_LDX_MEM(BPF_H
, si
->dst_reg
, si
->dst_reg
,
6996 offsetof(struct sock_common
, skc_num
));
7000 return insn
- insn_buf
;
7003 const struct bpf_verifier_ops sk_filter_verifier_ops
= {
7004 .get_func_proto
= sk_filter_func_proto
,
7005 .is_valid_access
= sk_filter_is_valid_access
,
7006 .convert_ctx_access
= bpf_convert_ctx_access
,
7007 .gen_ld_abs
= bpf_gen_ld_abs
,
7010 const struct bpf_prog_ops sk_filter_prog_ops
= {
7011 .test_run
= bpf_prog_test_run_skb
,
7014 const struct bpf_verifier_ops tc_cls_act_verifier_ops
= {
7015 .get_func_proto
= tc_cls_act_func_proto
,
7016 .is_valid_access
= tc_cls_act_is_valid_access
,
7017 .convert_ctx_access
= tc_cls_act_convert_ctx_access
,
7018 .gen_prologue
= tc_cls_act_prologue
,
7019 .gen_ld_abs
= bpf_gen_ld_abs
,
7022 const struct bpf_prog_ops tc_cls_act_prog_ops
= {
7023 .test_run
= bpf_prog_test_run_skb
,
7026 const struct bpf_verifier_ops xdp_verifier_ops
= {
7027 .get_func_proto
= xdp_func_proto
,
7028 .is_valid_access
= xdp_is_valid_access
,
7029 .convert_ctx_access
= xdp_convert_ctx_access
,
7032 const struct bpf_prog_ops xdp_prog_ops
= {
7033 .test_run
= bpf_prog_test_run_xdp
,
7036 const struct bpf_verifier_ops cg_skb_verifier_ops
= {
7037 .get_func_proto
= cg_skb_func_proto
,
7038 .is_valid_access
= sk_filter_is_valid_access
,
7039 .convert_ctx_access
= bpf_convert_ctx_access
,
7042 const struct bpf_prog_ops cg_skb_prog_ops
= {
7043 .test_run
= bpf_prog_test_run_skb
,
7046 const struct bpf_verifier_ops lwt_in_verifier_ops
= {
7047 .get_func_proto
= lwt_in_func_proto
,
7048 .is_valid_access
= lwt_is_valid_access
,
7049 .convert_ctx_access
= bpf_convert_ctx_access
,
7052 const struct bpf_prog_ops lwt_in_prog_ops
= {
7053 .test_run
= bpf_prog_test_run_skb
,
7056 const struct bpf_verifier_ops lwt_out_verifier_ops
= {
7057 .get_func_proto
= lwt_out_func_proto
,
7058 .is_valid_access
= lwt_is_valid_access
,
7059 .convert_ctx_access
= bpf_convert_ctx_access
,
7062 const struct bpf_prog_ops lwt_out_prog_ops
= {
7063 .test_run
= bpf_prog_test_run_skb
,
7066 const struct bpf_verifier_ops lwt_xmit_verifier_ops
= {
7067 .get_func_proto
= lwt_xmit_func_proto
,
7068 .is_valid_access
= lwt_is_valid_access
,
7069 .convert_ctx_access
= bpf_convert_ctx_access
,
7070 .gen_prologue
= tc_cls_act_prologue
,
7073 const struct bpf_prog_ops lwt_xmit_prog_ops
= {
7074 .test_run
= bpf_prog_test_run_skb
,
7077 const struct bpf_verifier_ops lwt_seg6local_verifier_ops
= {
7078 .get_func_proto
= lwt_seg6local_func_proto
,
7079 .is_valid_access
= lwt_is_valid_access
,
7080 .convert_ctx_access
= bpf_convert_ctx_access
,
7083 const struct bpf_prog_ops lwt_seg6local_prog_ops
= {
7084 .test_run
= bpf_prog_test_run_skb
,
7087 const struct bpf_verifier_ops cg_sock_verifier_ops
= {
7088 .get_func_proto
= sock_filter_func_proto
,
7089 .is_valid_access
= sock_filter_is_valid_access
,
7090 .convert_ctx_access
= bpf_sock_convert_ctx_access
,
7093 const struct bpf_prog_ops cg_sock_prog_ops
= {
7096 const struct bpf_verifier_ops cg_sock_addr_verifier_ops
= {
7097 .get_func_proto
= sock_addr_func_proto
,
7098 .is_valid_access
= sock_addr_is_valid_access
,
7099 .convert_ctx_access
= sock_addr_convert_ctx_access
,
7102 const struct bpf_prog_ops cg_sock_addr_prog_ops
= {
7105 const struct bpf_verifier_ops sock_ops_verifier_ops
= {
7106 .get_func_proto
= sock_ops_func_proto
,
7107 .is_valid_access
= sock_ops_is_valid_access
,
7108 .convert_ctx_access
= sock_ops_convert_ctx_access
,
7111 const struct bpf_prog_ops sock_ops_prog_ops
= {
7114 const struct bpf_verifier_ops sk_skb_verifier_ops
= {
7115 .get_func_proto
= sk_skb_func_proto
,
7116 .is_valid_access
= sk_skb_is_valid_access
,
7117 .convert_ctx_access
= sk_skb_convert_ctx_access
,
7118 .gen_prologue
= sk_skb_prologue
,
7121 const struct bpf_prog_ops sk_skb_prog_ops
= {
7124 const struct bpf_verifier_ops sk_msg_verifier_ops
= {
7125 .get_func_proto
= sk_msg_func_proto
,
7126 .is_valid_access
= sk_msg_is_valid_access
,
7127 .convert_ctx_access
= sk_msg_convert_ctx_access
,
7130 const struct bpf_prog_ops sk_msg_prog_ops
= {
7133 const struct bpf_verifier_ops flow_dissector_verifier_ops
= {
7134 .get_func_proto
= flow_dissector_func_proto
,
7135 .is_valid_access
= flow_dissector_is_valid_access
,
7136 .convert_ctx_access
= bpf_convert_ctx_access
,
7139 const struct bpf_prog_ops flow_dissector_prog_ops
= {
7142 int sk_detach_filter(struct sock
*sk
)
7145 struct sk_filter
*filter
;
7147 if (sock_flag(sk
, SOCK_FILTER_LOCKED
))
7150 filter
= rcu_dereference_protected(sk
->sk_filter
,
7151 lockdep_sock_is_held(sk
));
7153 RCU_INIT_POINTER(sk
->sk_filter
, NULL
);
7154 sk_filter_uncharge(sk
, filter
);
7160 EXPORT_SYMBOL_GPL(sk_detach_filter
);
7162 int sk_get_filter(struct sock
*sk
, struct sock_filter __user
*ubuf
,
7165 struct sock_fprog_kern
*fprog
;
7166 struct sk_filter
*filter
;
7170 filter
= rcu_dereference_protected(sk
->sk_filter
,
7171 lockdep_sock_is_held(sk
));
7175 /* We're copying the filter that has been originally attached,
7176 * so no conversion/decode needed anymore. eBPF programs that
7177 * have no original program cannot be dumped through this.
7180 fprog
= filter
->prog
->orig_prog
;
7186 /* User space only enquires number of filter blocks. */
7190 if (len
< fprog
->len
)
7194 if (copy_to_user(ubuf
, fprog
->filter
, bpf_classic_proglen(fprog
)))
7197 /* Instead of bytes, the API requests to return the number
7207 struct sk_reuseport_kern
{
7208 struct sk_buff
*skb
;
7210 struct sock
*selected_sk
;
7217 static void bpf_init_reuseport_kern(struct sk_reuseport_kern
*reuse_kern
,
7218 struct sock_reuseport
*reuse
,
7219 struct sock
*sk
, struct sk_buff
*skb
,
7222 reuse_kern
->skb
= skb
;
7223 reuse_kern
->sk
= sk
;
7224 reuse_kern
->selected_sk
= NULL
;
7225 reuse_kern
->data_end
= skb
->data
+ skb_headlen(skb
);
7226 reuse_kern
->hash
= hash
;
7227 reuse_kern
->reuseport_id
= reuse
->reuseport_id
;
7228 reuse_kern
->bind_inany
= reuse
->bind_inany
;
7231 struct sock
*bpf_run_sk_reuseport(struct sock_reuseport
*reuse
, struct sock
*sk
,
7232 struct bpf_prog
*prog
, struct sk_buff
*skb
,
7235 struct sk_reuseport_kern reuse_kern
;
7236 enum sk_action action
;
7238 bpf_init_reuseport_kern(&reuse_kern
, reuse
, sk
, skb
, hash
);
7239 action
= BPF_PROG_RUN(prog
, &reuse_kern
);
7241 if (action
== SK_PASS
)
7242 return reuse_kern
.selected_sk
;
7244 return ERR_PTR(-ECONNREFUSED
);
7247 BPF_CALL_4(sk_select_reuseport
, struct sk_reuseport_kern
*, reuse_kern
,
7248 struct bpf_map
*, map
, void *, key
, u32
, flags
)
7250 struct sock_reuseport
*reuse
;
7251 struct sock
*selected_sk
;
7253 selected_sk
= map
->ops
->map_lookup_elem(map
, key
);
7257 reuse
= rcu_dereference(selected_sk
->sk_reuseport_cb
);
7259 /* selected_sk is unhashed (e.g. by close()) after the
7260 * above map_lookup_elem(). Treat selected_sk has already
7261 * been removed from the map.
7265 if (unlikely(reuse
->reuseport_id
!= reuse_kern
->reuseport_id
)) {
7268 if (unlikely(!reuse_kern
->reuseport_id
))
7269 /* There is a small race between adding the
7270 * sk to the map and setting the
7271 * reuse_kern->reuseport_id.
7272 * Treat it as the sk has not been added to
7277 sk
= reuse_kern
->sk
;
7278 if (sk
->sk_protocol
!= selected_sk
->sk_protocol
)
7280 else if (sk
->sk_family
!= selected_sk
->sk_family
)
7281 return -EAFNOSUPPORT
;
7283 /* Catch all. Likely bound to a different sockaddr. */
7287 reuse_kern
->selected_sk
= selected_sk
;
7292 static const struct bpf_func_proto sk_select_reuseport_proto
= {
7293 .func
= sk_select_reuseport
,
7295 .ret_type
= RET_INTEGER
,
7296 .arg1_type
= ARG_PTR_TO_CTX
,
7297 .arg2_type
= ARG_CONST_MAP_PTR
,
7298 .arg3_type
= ARG_PTR_TO_MAP_KEY
,
7299 .arg4_type
= ARG_ANYTHING
,
7302 BPF_CALL_4(sk_reuseport_load_bytes
,
7303 const struct sk_reuseport_kern
*, reuse_kern
, u32
, offset
,
7304 void *, to
, u32
, len
)
7306 return ____bpf_skb_load_bytes(reuse_kern
->skb
, offset
, to
, len
);
7309 static const struct bpf_func_proto sk_reuseport_load_bytes_proto
= {
7310 .func
= sk_reuseport_load_bytes
,
7312 .ret_type
= RET_INTEGER
,
7313 .arg1_type
= ARG_PTR_TO_CTX
,
7314 .arg2_type
= ARG_ANYTHING
,
7315 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
7316 .arg4_type
= ARG_CONST_SIZE
,
7319 BPF_CALL_5(sk_reuseport_load_bytes_relative
,
7320 const struct sk_reuseport_kern
*, reuse_kern
, u32
, offset
,
7321 void *, to
, u32
, len
, u32
, start_header
)
7323 return ____bpf_skb_load_bytes_relative(reuse_kern
->skb
, offset
, to
,
7327 static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto
= {
7328 .func
= sk_reuseport_load_bytes_relative
,
7330 .ret_type
= RET_INTEGER
,
7331 .arg1_type
= ARG_PTR_TO_CTX
,
7332 .arg2_type
= ARG_ANYTHING
,
7333 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
7334 .arg4_type
= ARG_CONST_SIZE
,
7335 .arg5_type
= ARG_ANYTHING
,
7338 static const struct bpf_func_proto
*
7339 sk_reuseport_func_proto(enum bpf_func_id func_id
,
7340 const struct bpf_prog
*prog
)
7343 case BPF_FUNC_sk_select_reuseport
:
7344 return &sk_select_reuseport_proto
;
7345 case BPF_FUNC_skb_load_bytes
:
7346 return &sk_reuseport_load_bytes_proto
;
7347 case BPF_FUNC_skb_load_bytes_relative
:
7348 return &sk_reuseport_load_bytes_relative_proto
;
7350 return bpf_base_func_proto(func_id
);
7355 sk_reuseport_is_valid_access(int off
, int size
,
7356 enum bpf_access_type type
,
7357 const struct bpf_prog
*prog
,
7358 struct bpf_insn_access_aux
*info
)
7360 const u32 size_default
= sizeof(__u32
);
7362 if (off
< 0 || off
>= sizeof(struct sk_reuseport_md
) ||
7363 off
% size
|| type
!= BPF_READ
)
7367 case offsetof(struct sk_reuseport_md
, data
):
7368 info
->reg_type
= PTR_TO_PACKET
;
7369 return size
== sizeof(__u64
);
7371 case offsetof(struct sk_reuseport_md
, data_end
):
7372 info
->reg_type
= PTR_TO_PACKET_END
;
7373 return size
== sizeof(__u64
);
7375 case offsetof(struct sk_reuseport_md
, hash
):
7376 return size
== size_default
;
7378 /* Fields that allow narrowing */
7379 case offsetof(struct sk_reuseport_md
, eth_protocol
):
7380 if (size
< FIELD_SIZEOF(struct sk_buff
, protocol
))
7383 case offsetof(struct sk_reuseport_md
, ip_protocol
):
7384 case offsetof(struct sk_reuseport_md
, bind_inany
):
7385 case offsetof(struct sk_reuseport_md
, len
):
7386 bpf_ctx_record_field_size(info
, size_default
);
7387 return bpf_ctx_narrow_access_ok(off
, size
, size_default
);
7394 #define SK_REUSEPORT_LOAD_FIELD(F) ({ \
7395 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
7396 si->dst_reg, si->src_reg, \
7397 bpf_target_off(struct sk_reuseport_kern, F, \
7398 FIELD_SIZEOF(struct sk_reuseport_kern, F), \
7402 #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \
7403 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
7408 #define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
7409 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \
7412 SK_FIELD, BPF_SIZE, EXTRA_OFF)
7414 static u32
sk_reuseport_convert_ctx_access(enum bpf_access_type type
,
7415 const struct bpf_insn
*si
,
7416 struct bpf_insn
*insn_buf
,
7417 struct bpf_prog
*prog
,
7420 struct bpf_insn
*insn
= insn_buf
;
7423 case offsetof(struct sk_reuseport_md
, data
):
7424 SK_REUSEPORT_LOAD_SKB_FIELD(data
);
7427 case offsetof(struct sk_reuseport_md
, len
):
7428 SK_REUSEPORT_LOAD_SKB_FIELD(len
);
7431 case offsetof(struct sk_reuseport_md
, eth_protocol
):
7432 SK_REUSEPORT_LOAD_SKB_FIELD(protocol
);
7435 case offsetof(struct sk_reuseport_md
, ip_protocol
):
7436 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK
) != BITS_PER_BYTE
);
7437 SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset
,
7439 *insn
++ = BPF_ALU32_IMM(BPF_AND
, si
->dst_reg
, SK_FL_PROTO_MASK
);
7440 *insn
++ = BPF_ALU32_IMM(BPF_RSH
, si
->dst_reg
,
7442 /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
7443 * aware. No further narrowing or masking is needed.
7448 case offsetof(struct sk_reuseport_md
, data_end
):
7449 SK_REUSEPORT_LOAD_FIELD(data_end
);
7452 case offsetof(struct sk_reuseport_md
, hash
):
7453 SK_REUSEPORT_LOAD_FIELD(hash
);
7456 case offsetof(struct sk_reuseport_md
, bind_inany
):
7457 SK_REUSEPORT_LOAD_FIELD(bind_inany
);
7461 return insn
- insn_buf
;
7464 const struct bpf_verifier_ops sk_reuseport_verifier_ops
= {
7465 .get_func_proto
= sk_reuseport_func_proto
,
7466 .is_valid_access
= sk_reuseport_is_valid_access
,
7467 .convert_ctx_access
= sk_reuseport_convert_ctx_access
,
7470 const struct bpf_prog_ops sk_reuseport_prog_ops
= {
7472 #endif /* CONFIG_INET */