1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
5 #include <linux/btf_ids.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/etherdevice.h>
9 #include <linux/filter.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/sched/signal.h>
12 #include <net/bpf_sk_storage.h>
15 #include <net/net_namespace.h>
16 #include <linux/error-injection.h>
17 #include <linux/smp.h>
18 #include <linux/sock_diag.h>
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/bpf_test_run.h>
24 struct bpf_test_timer
{
25 enum { NO_PREEMPT
, NO_MIGRATE
} mode
;
27 u64 time_start
, time_spent
;
30 static void bpf_test_timer_enter(struct bpf_test_timer
*t
)
34 if (t
->mode
== NO_PREEMPT
)
39 t
->time_start
= ktime_get_ns();
42 static void bpf_test_timer_leave(struct bpf_test_timer
*t
)
47 if (t
->mode
== NO_PREEMPT
)
54 static bool bpf_test_timer_continue(struct bpf_test_timer
*t
, u32 repeat
, int *err
, u32
*duration
)
60 t
->time_spent
+= ktime_get_ns() - t
->time_start
;
61 do_div(t
->time_spent
, t
->i
);
62 *duration
= t
->time_spent
> U32_MAX
? U32_MAX
: (u32
)t
->time_spent
;
67 if (signal_pending(current
)) {
68 /* During iteration: we've been cancelled, abort. */
74 /* During iteration: we need to reschedule between runs. */
75 t
->time_spent
+= ktime_get_ns() - t
->time_start
;
76 bpf_test_timer_leave(t
);
78 bpf_test_timer_enter(t
);
81 /* Do another round. */
89 static int bpf_test_run(struct bpf_prog
*prog
, void *ctx
, u32 repeat
,
90 u32
*retval
, u32
*time
, bool xdp
)
92 struct bpf_prog_array_item item
= {.prog
= prog
};
93 struct bpf_run_ctx
*old_ctx
;
94 struct bpf_cg_run_ctx run_ctx
;
95 struct bpf_test_timer t
= { NO_MIGRATE
};
96 enum bpf_cgroup_storage_type stype
;
99 for_each_cgroup_storage_type(stype
) {
100 item
.cgroup_storage
[stype
] = bpf_cgroup_storage_alloc(prog
, stype
);
101 if (IS_ERR(item
.cgroup_storage
[stype
])) {
102 item
.cgroup_storage
[stype
] = NULL
;
103 for_each_cgroup_storage_type(stype
)
104 bpf_cgroup_storage_free(item
.cgroup_storage
[stype
]);
112 bpf_test_timer_enter(&t
);
113 old_ctx
= bpf_set_run_ctx(&run_ctx
.run_ctx
);
115 run_ctx
.prog_item
= &item
;
117 *retval
= bpf_prog_run_xdp(prog
, ctx
);
119 *retval
= bpf_prog_run(prog
, ctx
);
120 } while (bpf_test_timer_continue(&t
, repeat
, &ret
, time
));
121 bpf_reset_run_ctx(old_ctx
);
122 bpf_test_timer_leave(&t
);
124 for_each_cgroup_storage_type(stype
)
125 bpf_cgroup_storage_free(item
.cgroup_storage
[stype
]);
130 static int bpf_test_finish(const union bpf_attr
*kattr
,
131 union bpf_attr __user
*uattr
, const void *data
,
132 u32 size
, u32 retval
, u32 duration
)
134 void __user
*data_out
= u64_to_user_ptr(kattr
->test
.data_out
);
136 u32 copy_size
= size
;
138 /* Clamp copy if the user has provided a size hint, but copy the full
139 * buffer if not to retain old behaviour.
141 if (kattr
->test
.data_size_out
&&
142 copy_size
> kattr
->test
.data_size_out
) {
143 copy_size
= kattr
->test
.data_size_out
;
147 if (data_out
&& copy_to_user(data_out
, data
, copy_size
))
149 if (copy_to_user(&uattr
->test
.data_size_out
, &size
, sizeof(size
)))
151 if (copy_to_user(&uattr
->test
.retval
, &retval
, sizeof(retval
)))
153 if (copy_to_user(&uattr
->test
.duration
, &duration
, sizeof(duration
)))
158 trace_bpf_test_finish(&err
);
162 /* Integer types of various sizes and pointer combinations cover variety of
163 * architecture dependent calling conventions. 7+ can be supported in the
167 __diag_ignore(GCC
, 8, "-Wmissing-prototypes",
168 "Global functions as their definitions will be in vmlinux BTF");
169 int noinline
bpf_fentry_test1(int a
)
174 int noinline
bpf_fentry_test2(int a
, u64 b
)
179 int noinline
bpf_fentry_test3(char a
, int b
, u64 c
)
184 int noinline
bpf_fentry_test4(void *a
, char b
, int c
, u64 d
)
186 return (long)a
+ b
+ c
+ d
;
189 int noinline
bpf_fentry_test5(u64 a
, void *b
, short c
, int d
, u64 e
)
191 return a
+ (long)b
+ c
+ d
+ e
;
194 int noinline
bpf_fentry_test6(u64 a
, void *b
, short c
, int d
, void *e
, u64 f
)
196 return a
+ (long)b
+ c
+ d
+ (long)e
+ f
;
199 struct bpf_fentry_test_t
{
200 struct bpf_fentry_test_t
*a
;
203 int noinline
bpf_fentry_test7(struct bpf_fentry_test_t
*arg
)
208 int noinline
bpf_fentry_test8(struct bpf_fentry_test_t
*arg
)
213 int noinline
bpf_modify_return_test(int a
, int *b
)
219 u64 noinline
bpf_kfunc_call_test1(struct sock
*sk
, u32 a
, u64 b
, u32 c
, u64 d
)
221 return a
+ b
+ c
+ d
;
224 int noinline
bpf_kfunc_call_test2(struct sock
*sk
, u32 a
, u32 b
)
229 struct sock
* noinline
bpf_kfunc_call_test3(struct sock
*sk
)
236 ALLOW_ERROR_INJECTION(bpf_modify_return_test
, ERRNO
);
238 BTF_SET_START(test_sk_kfunc_ids
)
239 BTF_ID(func
, bpf_kfunc_call_test1
)
240 BTF_ID(func
, bpf_kfunc_call_test2
)
241 BTF_ID(func
, bpf_kfunc_call_test3
)
242 BTF_SET_END(test_sk_kfunc_ids
)
244 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id
)
246 return btf_id_set_contains(&test_sk_kfunc_ids
, kfunc_id
);
249 static void *bpf_test_init(const union bpf_attr
*kattr
, u32 size
,
250 u32 headroom
, u32 tailroom
)
252 void __user
*data_in
= u64_to_user_ptr(kattr
->test
.data_in
);
253 u32 user_size
= kattr
->test
.data_size_in
;
256 if (size
< ETH_HLEN
|| size
> PAGE_SIZE
- headroom
- tailroom
)
257 return ERR_PTR(-EINVAL
);
259 if (user_size
> size
)
260 return ERR_PTR(-EMSGSIZE
);
262 data
= kzalloc(size
+ headroom
+ tailroom
, GFP_USER
);
264 return ERR_PTR(-ENOMEM
);
266 if (copy_from_user(data
+ headroom
, data_in
, user_size
)) {
268 return ERR_PTR(-EFAULT
);
274 int bpf_prog_test_run_tracing(struct bpf_prog
*prog
,
275 const union bpf_attr
*kattr
,
276 union bpf_attr __user
*uattr
)
278 struct bpf_fentry_test_t arg
= {};
279 u16 side_effect
= 0, ret
= 0;
280 int b
= 2, err
= -EFAULT
;
283 if (kattr
->test
.flags
|| kattr
->test
.cpu
)
286 switch (prog
->expected_attach_type
) {
287 case BPF_TRACE_FENTRY
:
288 case BPF_TRACE_FEXIT
:
289 if (bpf_fentry_test1(1) != 2 ||
290 bpf_fentry_test2(2, 3) != 5 ||
291 bpf_fentry_test3(4, 5, 6) != 15 ||
292 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
293 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
294 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
295 bpf_fentry_test7((struct bpf_fentry_test_t
*)0) != 0 ||
296 bpf_fentry_test8(&arg
) != 0)
299 case BPF_MODIFY_RETURN
:
300 ret
= bpf_modify_return_test(1, &b
);
308 retval
= ((u32
)side_effect
<< 16) | ret
;
309 if (copy_to_user(&uattr
->test
.retval
, &retval
, sizeof(retval
)))
314 trace_bpf_test_finish(&err
);
318 struct bpf_raw_tp_test_run_info
{
319 struct bpf_prog
*prog
;
325 __bpf_prog_test_run_raw_tp(void *data
)
327 struct bpf_raw_tp_test_run_info
*info
= data
;
330 info
->retval
= bpf_prog_run(info
->prog
, info
->ctx
);
334 int bpf_prog_test_run_raw_tp(struct bpf_prog
*prog
,
335 const union bpf_attr
*kattr
,
336 union bpf_attr __user
*uattr
)
338 void __user
*ctx_in
= u64_to_user_ptr(kattr
->test
.ctx_in
);
339 __u32 ctx_size_in
= kattr
->test
.ctx_size_in
;
340 struct bpf_raw_tp_test_run_info info
;
341 int cpu
= kattr
->test
.cpu
, err
= 0;
344 /* doesn't support data_in/out, ctx_out, duration, or repeat */
345 if (kattr
->test
.data_in
|| kattr
->test
.data_out
||
346 kattr
->test
.ctx_out
|| kattr
->test
.duration
||
350 if (ctx_size_in
< prog
->aux
->max_ctx_offset
||
351 ctx_size_in
> MAX_BPF_FUNC_ARGS
* sizeof(u64
))
354 if ((kattr
->test
.flags
& BPF_F_TEST_RUN_ON_CPU
) == 0 && cpu
!= 0)
358 info
.ctx
= kzalloc(ctx_size_in
, GFP_USER
);
361 if (copy_from_user(info
.ctx
, ctx_in
, ctx_size_in
)) {
371 current_cpu
= get_cpu();
372 if ((kattr
->test
.flags
& BPF_F_TEST_RUN_ON_CPU
) == 0 ||
373 cpu
== current_cpu
) {
374 __bpf_prog_test_run_raw_tp(&info
);
375 } else if (cpu
>= nr_cpu_ids
|| !cpu_online(cpu
)) {
376 /* smp_call_function_single() also checks cpu_online()
377 * after csd_lock(). However, since cpu is from user
378 * space, let's do an extra quick check to filter out
379 * invalid value before smp_call_function_single().
383 err
= smp_call_function_single(cpu
, __bpf_prog_test_run_raw_tp
,
389 copy_to_user(&uattr
->test
.retval
, &info
.retval
, sizeof(u32
)))
397 static void *bpf_ctx_init(const union bpf_attr
*kattr
, u32 max_size
)
399 void __user
*data_in
= u64_to_user_ptr(kattr
->test
.ctx_in
);
400 void __user
*data_out
= u64_to_user_ptr(kattr
->test
.ctx_out
);
401 u32 size
= kattr
->test
.ctx_size_in
;
405 if (!data_in
&& !data_out
)
408 data
= kzalloc(max_size
, GFP_USER
);
410 return ERR_PTR(-ENOMEM
);
413 err
= bpf_check_uarg_tail_zero(USER_BPFPTR(data_in
), max_size
, size
);
419 size
= min_t(u32
, max_size
, size
);
420 if (copy_from_user(data
, data_in
, size
)) {
422 return ERR_PTR(-EFAULT
);
428 static int bpf_ctx_finish(const union bpf_attr
*kattr
,
429 union bpf_attr __user
*uattr
, const void *data
,
432 void __user
*data_out
= u64_to_user_ptr(kattr
->test
.ctx_out
);
434 u32 copy_size
= size
;
436 if (!data
|| !data_out
)
439 if (copy_size
> kattr
->test
.ctx_size_out
) {
440 copy_size
= kattr
->test
.ctx_size_out
;
444 if (copy_to_user(data_out
, data
, copy_size
))
446 if (copy_to_user(&uattr
->test
.ctx_size_out
, &size
, sizeof(size
)))
455 * range_is_zero - test whether buffer is initialized
456 * @buf: buffer to check
457 * @from: check from this position
458 * @to: check up until (excluding) this position
460 * This function returns true if the there is a non-zero byte
461 * in the buf in the range [from,to).
463 static inline bool range_is_zero(void *buf
, size_t from
, size_t to
)
465 return !memchr_inv((u8
*)buf
+ from
, 0, to
- from
);
468 static int convert___skb_to_skb(struct sk_buff
*skb
, struct __sk_buff
*__skb
)
470 struct qdisc_skb_cb
*cb
= (struct qdisc_skb_cb
*)skb
->cb
;
475 /* make sure the fields we don't use are zeroed */
476 if (!range_is_zero(__skb
, 0, offsetof(struct __sk_buff
, mark
)))
479 /* mark is allowed */
481 if (!range_is_zero(__skb
, offsetofend(struct __sk_buff
, mark
),
482 offsetof(struct __sk_buff
, priority
)))
485 /* priority is allowed */
487 if (!range_is_zero(__skb
, offsetofend(struct __sk_buff
, priority
),
488 offsetof(struct __sk_buff
, ifindex
)))
491 /* ifindex is allowed */
493 if (!range_is_zero(__skb
, offsetofend(struct __sk_buff
, ifindex
),
494 offsetof(struct __sk_buff
, cb
)))
499 if (!range_is_zero(__skb
, offsetofend(struct __sk_buff
, cb
),
500 offsetof(struct __sk_buff
, tstamp
)))
503 /* tstamp is allowed */
504 /* wire_len is allowed */
505 /* gso_segs is allowed */
507 if (!range_is_zero(__skb
, offsetofend(struct __sk_buff
, gso_segs
),
508 offsetof(struct __sk_buff
, gso_size
)))
511 /* gso_size is allowed */
513 if (!range_is_zero(__skb
, offsetofend(struct __sk_buff
, gso_size
),
514 sizeof(struct __sk_buff
)))
517 skb
->mark
= __skb
->mark
;
518 skb
->priority
= __skb
->priority
;
519 skb
->tstamp
= __skb
->tstamp
;
520 memcpy(&cb
->data
, __skb
->cb
, QDISC_CB_PRIV_LEN
);
522 if (__skb
->wire_len
== 0) {
523 cb
->pkt_len
= skb
->len
;
525 if (__skb
->wire_len
< skb
->len
||
526 __skb
->wire_len
> GSO_MAX_SIZE
)
528 cb
->pkt_len
= __skb
->wire_len
;
531 if (__skb
->gso_segs
> GSO_MAX_SEGS
)
533 skb_shinfo(skb
)->gso_segs
= __skb
->gso_segs
;
534 skb_shinfo(skb
)->gso_size
= __skb
->gso_size
;
539 static void convert_skb_to___skb(struct sk_buff
*skb
, struct __sk_buff
*__skb
)
541 struct qdisc_skb_cb
*cb
= (struct qdisc_skb_cb
*)skb
->cb
;
546 __skb
->mark
= skb
->mark
;
547 __skb
->priority
= skb
->priority
;
548 __skb
->ifindex
= skb
->dev
->ifindex
;
549 __skb
->tstamp
= skb
->tstamp
;
550 memcpy(__skb
->cb
, &cb
->data
, QDISC_CB_PRIV_LEN
);
551 __skb
->wire_len
= cb
->pkt_len
;
552 __skb
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
555 static struct proto bpf_dummy_proto
= {
557 .owner
= THIS_MODULE
,
558 .obj_size
= sizeof(struct sock
),
561 int bpf_prog_test_run_skb(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
562 union bpf_attr __user
*uattr
)
564 bool is_l2
= false, is_direct_pkt_access
= false;
565 struct net
*net
= current
->nsproxy
->net_ns
;
566 struct net_device
*dev
= net
->loopback_dev
;
567 u32 size
= kattr
->test
.data_size_in
;
568 u32 repeat
= kattr
->test
.repeat
;
569 struct __sk_buff
*ctx
= NULL
;
570 u32 retval
, duration
;
571 int hh_len
= ETH_HLEN
;
577 if (kattr
->test
.flags
|| kattr
->test
.cpu
)
580 data
= bpf_test_init(kattr
, size
, NET_SKB_PAD
+ NET_IP_ALIGN
,
581 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)));
583 return PTR_ERR(data
);
585 ctx
= bpf_ctx_init(kattr
, sizeof(struct __sk_buff
));
591 switch (prog
->type
) {
592 case BPF_PROG_TYPE_SCHED_CLS
:
593 case BPF_PROG_TYPE_SCHED_ACT
:
596 case BPF_PROG_TYPE_LWT_IN
:
597 case BPF_PROG_TYPE_LWT_OUT
:
598 case BPF_PROG_TYPE_LWT_XMIT
:
599 is_direct_pkt_access
= true;
605 sk
= sk_alloc(net
, AF_UNSPEC
, GFP_USER
, &bpf_dummy_proto
, 1);
611 sock_init_data(NULL
, sk
);
613 skb
= build_skb(data
, 0);
622 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
623 __skb_put(skb
, size
);
624 if (ctx
&& ctx
->ifindex
> 1) {
625 dev
= dev_get_by_index(net
, ctx
->ifindex
);
631 skb
->protocol
= eth_type_trans(skb
, dev
);
632 skb_reset_network_header(skb
);
634 switch (skb
->protocol
) {
635 case htons(ETH_P_IP
):
636 sk
->sk_family
= AF_INET
;
637 if (sizeof(struct iphdr
) <= skb_headlen(skb
)) {
638 sk
->sk_rcv_saddr
= ip_hdr(skb
)->saddr
;
639 sk
->sk_daddr
= ip_hdr(skb
)->daddr
;
642 #if IS_ENABLED(CONFIG_IPV6)
643 case htons(ETH_P_IPV6
):
644 sk
->sk_family
= AF_INET6
;
645 if (sizeof(struct ipv6hdr
) <= skb_headlen(skb
)) {
646 sk
->sk_v6_rcv_saddr
= ipv6_hdr(skb
)->saddr
;
647 sk
->sk_v6_daddr
= ipv6_hdr(skb
)->daddr
;
656 __skb_push(skb
, hh_len
);
657 if (is_direct_pkt_access
)
658 bpf_compute_data_pointers(skb
);
659 ret
= convert___skb_to_skb(skb
, ctx
);
662 ret
= bpf_test_run(prog
, skb
, repeat
, &retval
, &duration
, false);
666 if (skb_headroom(skb
) < hh_len
) {
667 int nhead
= HH_DATA_ALIGN(hh_len
- skb_headroom(skb
));
669 if (pskb_expand_head(skb
, nhead
, 0, GFP_USER
)) {
674 memset(__skb_push(skb
, hh_len
), 0, hh_len
);
676 convert_skb_to___skb(skb
, ctx
);
679 /* bpf program can never convert linear skb to non-linear */
680 if (WARN_ON_ONCE(skb_is_nonlinear(skb
)))
681 size
= skb_headlen(skb
);
682 ret
= bpf_test_finish(kattr
, uattr
, skb
->data
, size
, retval
, duration
);
684 ret
= bpf_ctx_finish(kattr
, uattr
, ctx
,
685 sizeof(struct __sk_buff
));
687 if (dev
&& dev
!= net
->loopback_dev
)
695 static int xdp_convert_md_to_buff(struct xdp_md
*xdp_md
, struct xdp_buff
*xdp
)
697 unsigned int ingress_ifindex
, rx_queue_index
;
698 struct netdev_rx_queue
*rxqueue
;
699 struct net_device
*device
;
704 if (xdp_md
->egress_ifindex
!= 0)
707 ingress_ifindex
= xdp_md
->ingress_ifindex
;
708 rx_queue_index
= xdp_md
->rx_queue_index
;
710 if (!ingress_ifindex
&& rx_queue_index
)
713 if (ingress_ifindex
) {
714 device
= dev_get_by_index(current
->nsproxy
->net_ns
,
719 if (rx_queue_index
>= device
->real_num_rx_queues
)
722 rxqueue
= __netif_get_rx_queue(device
, rx_queue_index
);
724 if (!xdp_rxq_info_is_reg(&rxqueue
->xdp_rxq
))
727 xdp
->rxq
= &rxqueue
->xdp_rxq
;
728 /* The device is now tracked in the xdp->rxq for later
733 xdp
->data
= xdp
->data_meta
+ xdp_md
->data
;
741 static void xdp_convert_buff_to_md(struct xdp_buff
*xdp
, struct xdp_md
*xdp_md
)
746 xdp_md
->data
= xdp
->data
- xdp
->data_meta
;
747 xdp_md
->data_end
= xdp
->data_end
- xdp
->data_meta
;
749 if (xdp_md
->ingress_ifindex
)
750 dev_put(xdp
->rxq
->dev
);
753 int bpf_prog_test_run_xdp(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
754 union bpf_attr __user
*uattr
)
756 u32 tailroom
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
757 u32 headroom
= XDP_PACKET_HEADROOM
;
758 u32 size
= kattr
->test
.data_size_in
;
759 u32 repeat
= kattr
->test
.repeat
;
760 struct netdev_rx_queue
*rxqueue
;
761 struct xdp_buff xdp
= {};
762 u32 retval
, duration
;
768 if (prog
->expected_attach_type
== BPF_XDP_DEVMAP
||
769 prog
->expected_attach_type
== BPF_XDP_CPUMAP
)
772 ctx
= bpf_ctx_init(kattr
, sizeof(struct xdp_md
));
777 /* There can't be user provided data before the meta data */
778 if (ctx
->data_meta
|| ctx
->data_end
!= size
||
779 ctx
->data
> ctx
->data_end
||
780 unlikely(xdp_metalen_invalid(ctx
->data
)))
782 /* Meta data is allocated from the headroom */
783 headroom
-= ctx
->data
;
786 /* XDP have extra tailroom as (most) drivers use full page */
787 max_data_sz
= 4096 - headroom
- tailroom
;
789 data
= bpf_test_init(kattr
, max_data_sz
, headroom
, tailroom
);
795 rxqueue
= __netif_get_rx_queue(current
->nsproxy
->net_ns
->loopback_dev
, 0);
796 xdp_init_buff(&xdp
, headroom
+ max_data_sz
+ tailroom
,
798 xdp_prepare_buff(&xdp
, data
, headroom
, size
, true);
800 ret
= xdp_convert_md_to_buff(ctx
, &xdp
);
804 bpf_prog_change_xdp(NULL
, prog
);
805 ret
= bpf_test_run(prog
, &xdp
, repeat
, &retval
, &duration
, true);
806 /* We convert the xdp_buff back to an xdp_md before checking the return
807 * code so the reference count of any held netdevice will be decremented
808 * even if the test run failed.
810 xdp_convert_buff_to_md(&xdp
, ctx
);
814 if (xdp
.data_meta
!= data
+ headroom
||
815 xdp
.data_end
!= xdp
.data_meta
+ size
)
816 size
= xdp
.data_end
- xdp
.data_meta
;
818 ret
= bpf_test_finish(kattr
, uattr
, xdp
.data_meta
, size
, retval
,
821 ret
= bpf_ctx_finish(kattr
, uattr
, ctx
,
822 sizeof(struct xdp_md
));
825 bpf_prog_change_xdp(prog
, NULL
);
833 static int verify_user_bpf_flow_keys(struct bpf_flow_keys
*ctx
)
835 /* make sure the fields we don't use are zeroed */
836 if (!range_is_zero(ctx
, 0, offsetof(struct bpf_flow_keys
, flags
)))
839 /* flags is allowed */
841 if (!range_is_zero(ctx
, offsetofend(struct bpf_flow_keys
, flags
),
842 sizeof(struct bpf_flow_keys
)))
848 int bpf_prog_test_run_flow_dissector(struct bpf_prog
*prog
,
849 const union bpf_attr
*kattr
,
850 union bpf_attr __user
*uattr
)
852 struct bpf_test_timer t
= { NO_PREEMPT
};
853 u32 size
= kattr
->test
.data_size_in
;
854 struct bpf_flow_dissector ctx
= {};
855 u32 repeat
= kattr
->test
.repeat
;
856 struct bpf_flow_keys
*user_ctx
;
857 struct bpf_flow_keys flow_keys
;
858 const struct ethhdr
*eth
;
859 unsigned int flags
= 0;
860 u32 retval
, duration
;
864 if (prog
->type
!= BPF_PROG_TYPE_FLOW_DISSECTOR
)
867 if (kattr
->test
.flags
|| kattr
->test
.cpu
)
873 data
= bpf_test_init(kattr
, size
, 0, 0);
875 return PTR_ERR(data
);
877 eth
= (struct ethhdr
*)data
;
882 user_ctx
= bpf_ctx_init(kattr
, sizeof(struct bpf_flow_keys
));
883 if (IS_ERR(user_ctx
)) {
885 return PTR_ERR(user_ctx
);
888 ret
= verify_user_bpf_flow_keys(user_ctx
);
891 flags
= user_ctx
->flags
;
894 ctx
.flow_keys
= &flow_keys
;
896 ctx
.data_end
= (__u8
*)data
+ size
;
898 bpf_test_timer_enter(&t
);
900 retval
= bpf_flow_dissect(prog
, &ctx
, eth
->h_proto
, ETH_HLEN
,
902 } while (bpf_test_timer_continue(&t
, repeat
, &ret
, &duration
));
903 bpf_test_timer_leave(&t
);
908 ret
= bpf_test_finish(kattr
, uattr
, &flow_keys
, sizeof(flow_keys
),
911 ret
= bpf_ctx_finish(kattr
, uattr
, user_ctx
,
912 sizeof(struct bpf_flow_keys
));
920 int bpf_prog_test_run_sk_lookup(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
921 union bpf_attr __user
*uattr
)
923 struct bpf_test_timer t
= { NO_PREEMPT
};
924 struct bpf_prog_array
*progs
= NULL
;
925 struct bpf_sk_lookup_kern ctx
= {};
926 u32 repeat
= kattr
->test
.repeat
;
927 struct bpf_sk_lookup
*user_ctx
;
928 u32 retval
, duration
;
931 if (prog
->type
!= BPF_PROG_TYPE_SK_LOOKUP
)
934 if (kattr
->test
.flags
|| kattr
->test
.cpu
)
937 if (kattr
->test
.data_in
|| kattr
->test
.data_size_in
|| kattr
->test
.data_out
||
938 kattr
->test
.data_size_out
)
944 user_ctx
= bpf_ctx_init(kattr
, sizeof(*user_ctx
));
945 if (IS_ERR(user_ctx
))
946 return PTR_ERR(user_ctx
);
954 if (!range_is_zero(user_ctx
, offsetofend(typeof(*user_ctx
), local_port
), sizeof(*user_ctx
)))
957 if (user_ctx
->local_port
> U16_MAX
) {
962 ctx
.family
= (u16
)user_ctx
->family
;
963 ctx
.protocol
= (u16
)user_ctx
->protocol
;
964 ctx
.dport
= (u16
)user_ctx
->local_port
;
965 ctx
.sport
= user_ctx
->remote_port
;
967 switch (ctx
.family
) {
969 ctx
.v4
.daddr
= (__force __be32
)user_ctx
->local_ip4
;
970 ctx
.v4
.saddr
= (__force __be32
)user_ctx
->remote_ip4
;
973 #if IS_ENABLED(CONFIG_IPV6)
975 ctx
.v6
.daddr
= (struct in6_addr
*)user_ctx
->local_ip6
;
976 ctx
.v6
.saddr
= (struct in6_addr
*)user_ctx
->remote_ip6
;
985 progs
= bpf_prog_array_alloc(1, GFP_KERNEL
);
991 progs
->items
[0].prog
= prog
;
993 bpf_test_timer_enter(&t
);
995 ctx
.selected_sk
= NULL
;
996 retval
= BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs
, ctx
, bpf_prog_run
);
997 } while (bpf_test_timer_continue(&t
, repeat
, &ret
, &duration
));
998 bpf_test_timer_leave(&t
);
1003 user_ctx
->cookie
= 0;
1004 if (ctx
.selected_sk
) {
1005 if (ctx
.selected_sk
->sk_reuseport
&& !ctx
.no_reuseport
) {
1010 user_ctx
->cookie
= sock_gen_cookie(ctx
.selected_sk
);
1013 ret
= bpf_test_finish(kattr
, uattr
, NULL
, 0, retval
, duration
);
1015 ret
= bpf_ctx_finish(kattr
, uattr
, user_ctx
, sizeof(*user_ctx
));
1018 bpf_prog_array_free(progs
);
1023 int bpf_prog_test_run_syscall(struct bpf_prog
*prog
,
1024 const union bpf_attr
*kattr
,
1025 union bpf_attr __user
*uattr
)
1027 void __user
*ctx_in
= u64_to_user_ptr(kattr
->test
.ctx_in
);
1028 __u32 ctx_size_in
= kattr
->test
.ctx_size_in
;
1033 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1034 if (kattr
->test
.data_in
|| kattr
->test
.data_out
||
1035 kattr
->test
.ctx_out
|| kattr
->test
.duration
||
1036 kattr
->test
.repeat
|| kattr
->test
.flags
)
1039 if (ctx_size_in
< prog
->aux
->max_ctx_offset
||
1040 ctx_size_in
> U16_MAX
)
1044 ctx
= kzalloc(ctx_size_in
, GFP_USER
);
1047 if (copy_from_user(ctx
, ctx_in
, ctx_size_in
)) {
1053 rcu_read_lock_trace();
1054 retval
= bpf_prog_run_pin_on_cpu(prog
, ctx
);
1055 rcu_read_unlock_trace();
1057 if (copy_to_user(&uattr
->test
.retval
, &retval
, sizeof(u32
))) {
1062 if (copy_to_user(ctx_in
, ctx
, ctx_size_in
))