1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
19 #include "trace_probe.h"
22 #define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
26 struct bpf_trace_module
{
27 struct module
*module
;
28 struct list_head list
;
31 static LIST_HEAD(bpf_trace_modules
);
32 static DEFINE_MUTEX(bpf_module_mutex
);
34 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
36 struct bpf_raw_event_map
*btp
, *ret
= NULL
;
37 struct bpf_trace_module
*btm
;
40 mutex_lock(&bpf_module_mutex
);
41 list_for_each_entry(btm
, &bpf_trace_modules
, list
) {
42 for (i
= 0; i
< btm
->module
->num_bpf_raw_events
; ++i
) {
43 btp
= &btm
->module
->bpf_raw_events
[i
];
44 if (!strcmp(btp
->tp
->name
, name
)) {
45 if (try_module_get(btm
->module
))
52 mutex_unlock(&bpf_module_mutex
);
56 static struct bpf_raw_event_map
*bpf_get_raw_tracepoint_module(const char *name
)
60 #endif /* CONFIG_MODULES */
62 u64
bpf_get_stackid(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
63 u64
bpf_get_stack(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
);
66 * trace_call_bpf - invoke BPF program
67 * @call: tracepoint event
68 * @ctx: opaque context pointer
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
73 * Return: BPF programs always return an integer which is interpreted by
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
79 unsigned int trace_call_bpf(struct trace_event_call
*call
, void *ctx
)
83 if (in_nmi()) /* not supported yet */
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
114 ret
= BPF_PROG_RUN_ARRAY_CHECK(call
->prog_array
, ctx
, BPF_PROG_RUN
);
117 __this_cpu_dec(bpf_prog_active
);
122 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
123 BPF_CALL_2(bpf_override_return
, struct pt_regs
*, regs
, unsigned long, rc
)
125 regs_set_return_value(regs
, rc
);
126 override_function_with_return(regs
);
130 static const struct bpf_func_proto bpf_override_return_proto
= {
131 .func
= bpf_override_return
,
133 .ret_type
= RET_INTEGER
,
134 .arg1_type
= ARG_PTR_TO_CTX
,
135 .arg2_type
= ARG_ANYTHING
,
139 BPF_CALL_3(bpf_probe_read_user
, void *, dst
, u32
, size
,
140 const void __user
*, unsafe_ptr
)
142 int ret
= probe_user_read(dst
, unsafe_ptr
, size
);
144 if (unlikely(ret
< 0))
145 memset(dst
, 0, size
);
150 static const struct bpf_func_proto bpf_probe_read_user_proto
= {
151 .func
= bpf_probe_read_user
,
153 .ret_type
= RET_INTEGER
,
154 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
155 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
156 .arg3_type
= ARG_ANYTHING
,
159 BPF_CALL_3(bpf_probe_read_user_str
, void *, dst
, u32
, size
,
160 const void __user
*, unsafe_ptr
)
162 int ret
= strncpy_from_unsafe_user(dst
, unsafe_ptr
, size
);
164 if (unlikely(ret
< 0))
165 memset(dst
, 0, size
);
170 static const struct bpf_func_proto bpf_probe_read_user_str_proto
= {
171 .func
= bpf_probe_read_user_str
,
173 .ret_type
= RET_INTEGER
,
174 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
175 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
176 .arg3_type
= ARG_ANYTHING
,
179 static __always_inline
int
180 bpf_probe_read_kernel_common(void *dst
, u32 size
, const void *unsafe_ptr
,
183 int ret
= security_locked_down(LOCKDOWN_BPF_READ
);
185 if (unlikely(ret
< 0))
187 ret
= compat
? probe_kernel_read(dst
, unsafe_ptr
, size
) :
188 probe_kernel_read_strict(dst
, unsafe_ptr
, size
);
189 if (unlikely(ret
< 0))
191 memset(dst
, 0, size
);
195 BPF_CALL_3(bpf_probe_read_kernel
, void *, dst
, u32
, size
,
196 const void *, unsafe_ptr
)
198 return bpf_probe_read_kernel_common(dst
, size
, unsafe_ptr
, false);
201 static const struct bpf_func_proto bpf_probe_read_kernel_proto
= {
202 .func
= bpf_probe_read_kernel
,
204 .ret_type
= RET_INTEGER
,
205 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
206 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
207 .arg3_type
= ARG_ANYTHING
,
210 BPF_CALL_3(bpf_probe_read_compat
, void *, dst
, u32
, size
,
211 const void *, unsafe_ptr
)
213 return bpf_probe_read_kernel_common(dst
, size
, unsafe_ptr
, true);
216 static const struct bpf_func_proto bpf_probe_read_compat_proto
= {
217 .func
= bpf_probe_read_compat
,
219 .ret_type
= RET_INTEGER
,
220 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
221 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
222 .arg3_type
= ARG_ANYTHING
,
225 static __always_inline
int
226 bpf_probe_read_kernel_str_common(void *dst
, u32 size
, const void *unsafe_ptr
,
229 int ret
= security_locked_down(LOCKDOWN_BPF_READ
);
231 if (unlikely(ret
< 0))
234 * The strncpy_from_unsafe_*() call will likely not fill the entire
235 * buffer, but that's okay in this circumstance as we're probing
236 * arbitrary memory anyway similar to bpf_probe_read_*() and might
237 * as well probe the stack. Thus, memory is explicitly cleared
238 * only in error case, so that improper users ignoring return
239 * code altogether don't copy garbage; otherwise length of string
240 * is returned that can be used for bpf_perf_event_output() et al.
242 ret
= compat
? strncpy_from_unsafe(dst
, unsafe_ptr
, size
) :
243 strncpy_from_unsafe_strict(dst
, unsafe_ptr
, size
);
244 if (unlikely(ret
< 0))
246 memset(dst
, 0, size
);
250 BPF_CALL_3(bpf_probe_read_kernel_str
, void *, dst
, u32
, size
,
251 const void *, unsafe_ptr
)
253 return bpf_probe_read_kernel_str_common(dst
, size
, unsafe_ptr
, false);
256 static const struct bpf_func_proto bpf_probe_read_kernel_str_proto
= {
257 .func
= bpf_probe_read_kernel_str
,
259 .ret_type
= RET_INTEGER
,
260 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
261 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
262 .arg3_type
= ARG_ANYTHING
,
265 BPF_CALL_3(bpf_probe_read_compat_str
, void *, dst
, u32
, size
,
266 const void *, unsafe_ptr
)
268 return bpf_probe_read_kernel_str_common(dst
, size
, unsafe_ptr
, true);
271 static const struct bpf_func_proto bpf_probe_read_compat_str_proto
= {
272 .func
= bpf_probe_read_compat_str
,
274 .ret_type
= RET_INTEGER
,
275 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
276 .arg2_type
= ARG_CONST_SIZE_OR_ZERO
,
277 .arg3_type
= ARG_ANYTHING
,
280 BPF_CALL_3(bpf_probe_write_user
, void __user
*, unsafe_ptr
, const void *, src
,
284 * Ensure we're in user context which is safe for the helper to
285 * run. This helper has no business in a kthread.
287 * access_ok() should prevent writing to non-user memory, but in
288 * some situations (nommu, temporary switch, etc) access_ok() does
289 * not provide enough validation, hence the check on KERNEL_DS.
291 * nmi_uaccess_okay() ensures the probe is not run in an interim
292 * state, when the task or mm are switched. This is specifically
293 * required to prevent the use of temporary mm.
296 if (unlikely(in_interrupt() ||
297 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
299 if (unlikely(uaccess_kernel()))
301 if (unlikely(!nmi_uaccess_okay()))
304 return probe_user_write(unsafe_ptr
, src
, size
);
307 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
308 .func
= bpf_probe_write_user
,
310 .ret_type
= RET_INTEGER
,
311 .arg1_type
= ARG_ANYTHING
,
312 .arg2_type
= ARG_PTR_TO_MEM
,
313 .arg3_type
= ARG_CONST_SIZE
,
316 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
318 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
319 current
->comm
, task_pid_nr(current
));
321 return &bpf_probe_write_user_proto
;
325 * Only limited trace_printk() conversion specifiers allowed:
326 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
328 BPF_CALL_5(bpf_trace_printk
, char *, fmt
, u32
, fmt_size
, u64
, arg1
,
329 u64
, arg2
, u64
, arg3
)
331 bool str_seen
= false;
339 * bpf_check()->check_func_arg()->check_stack_boundary()
340 * guarantees that fmt points to bpf program stack,
341 * fmt_size bytes of it were initialized and fmt_size > 0
343 if (fmt
[--fmt_size
] != 0)
346 /* check format string for allowed specifiers */
347 for (i
= 0; i
< fmt_size
; i
++) {
348 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
357 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
362 } else if (fmt
[i
] == 'p' || fmt
[i
] == 's') {
364 /* disallow any further format extensions */
365 if (fmt
[i
+ 1] != 0 &&
366 !isspace(fmt
[i
+ 1]) &&
367 !ispunct(fmt
[i
+ 1]))
372 /* allow only one '%s' per fmt string */
391 strncpy_from_unsafe(buf
,
392 (void *) (long) unsafe_addr
,
403 if (fmt
[i
] != 'i' && fmt
[i
] != 'd' &&
404 fmt
[i
] != 'u' && fmt
[i
] != 'x')
409 /* Horrid workaround for getting va_list handling working with different
410 * argument type combinations generically for 32 and 64 bit archs.
412 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
413 #define __BPF_TP(...) \
414 __trace_printk(0 /* Fake ip */, \
417 #define __BPF_ARG1_TP(...) \
418 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
419 ? __BPF_TP(arg1, ##__VA_ARGS__) \
420 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
421 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
422 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
424 #define __BPF_ARG2_TP(...) \
425 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
426 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
427 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
428 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
429 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
431 #define __BPF_ARG3_TP(...) \
432 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
433 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
434 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
435 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
436 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
438 return __BPF_TP_EMIT();
441 static const struct bpf_func_proto bpf_trace_printk_proto
= {
442 .func
= bpf_trace_printk
,
444 .ret_type
= RET_INTEGER
,
445 .arg1_type
= ARG_PTR_TO_MEM
,
446 .arg2_type
= ARG_CONST_SIZE
,
449 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
452 * this program might be calling bpf_trace_printk,
453 * so allocate per-cpu printk buffers
455 trace_printk_init_buffers();
457 return &bpf_trace_printk_proto
;
460 static __always_inline
int
461 get_map_perf_counter(struct bpf_map
*map
, u64 flags
,
462 u64
*value
, u64
*enabled
, u64
*running
)
464 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
465 unsigned int cpu
= smp_processor_id();
466 u64 index
= flags
& BPF_F_INDEX_MASK
;
467 struct bpf_event_entry
*ee
;
469 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
471 if (index
== BPF_F_CURRENT_CPU
)
473 if (unlikely(index
>= array
->map
.max_entries
))
476 ee
= READ_ONCE(array
->ptrs
[index
]);
480 return perf_event_read_local(ee
->event
, value
, enabled
, running
);
483 BPF_CALL_2(bpf_perf_event_read
, struct bpf_map
*, map
, u64
, flags
)
488 err
= get_map_perf_counter(map
, flags
, &value
, NULL
, NULL
);
490 * this api is ugly since we miss [-22..-2] range of valid
491 * counter values, but that's uapi
498 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
499 .func
= bpf_perf_event_read
,
501 .ret_type
= RET_INTEGER
,
502 .arg1_type
= ARG_CONST_MAP_PTR
,
503 .arg2_type
= ARG_ANYTHING
,
506 BPF_CALL_4(bpf_perf_event_read_value
, struct bpf_map
*, map
, u64
, flags
,
507 struct bpf_perf_event_value
*, buf
, u32
, size
)
511 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
513 err
= get_map_perf_counter(map
, flags
, &buf
->counter
, &buf
->enabled
,
519 memset(buf
, 0, size
);
523 static const struct bpf_func_proto bpf_perf_event_read_value_proto
= {
524 .func
= bpf_perf_event_read_value
,
526 .ret_type
= RET_INTEGER
,
527 .arg1_type
= ARG_CONST_MAP_PTR
,
528 .arg2_type
= ARG_ANYTHING
,
529 .arg3_type
= ARG_PTR_TO_UNINIT_MEM
,
530 .arg4_type
= ARG_CONST_SIZE
,
533 static __always_inline u64
534 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
535 u64 flags
, struct perf_sample_data
*sd
)
537 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
538 unsigned int cpu
= smp_processor_id();
539 u64 index
= flags
& BPF_F_INDEX_MASK
;
540 struct bpf_event_entry
*ee
;
541 struct perf_event
*event
;
543 if (index
== BPF_F_CURRENT_CPU
)
545 if (unlikely(index
>= array
->map
.max_entries
))
548 ee
= READ_ONCE(array
->ptrs
[index
]);
553 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
554 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
557 if (unlikely(event
->oncpu
!= cpu
))
560 return perf_event_output(event
, sd
, regs
);
564 * Support executing tracepoints in normal, irq, and nmi context that each call
565 * bpf_perf_event_output
567 struct bpf_trace_sample_data
{
568 struct perf_sample_data sds
[3];
571 static DEFINE_PER_CPU(struct bpf_trace_sample_data
, bpf_trace_sds
);
572 static DEFINE_PER_CPU(int, bpf_trace_nest_level
);
573 BPF_CALL_5(bpf_perf_event_output
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
574 u64
, flags
, void *, data
, u64
, size
)
576 struct bpf_trace_sample_data
*sds
= this_cpu_ptr(&bpf_trace_sds
);
577 int nest_level
= this_cpu_inc_return(bpf_trace_nest_level
);
578 struct perf_raw_record raw
= {
584 struct perf_sample_data
*sd
;
587 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(sds
->sds
))) {
592 sd
= &sds
->sds
[nest_level
- 1];
594 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
))) {
599 perf_sample_data_init(sd
, 0, 0);
602 err
= __bpf_perf_event_output(regs
, map
, flags
, sd
);
605 this_cpu_dec(bpf_trace_nest_level
);
609 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
610 .func
= bpf_perf_event_output
,
612 .ret_type
= RET_INTEGER
,
613 .arg1_type
= ARG_PTR_TO_CTX
,
614 .arg2_type
= ARG_CONST_MAP_PTR
,
615 .arg3_type
= ARG_ANYTHING
,
616 .arg4_type
= ARG_PTR_TO_MEM
,
617 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
620 static DEFINE_PER_CPU(int, bpf_event_output_nest_level
);
621 struct bpf_nested_pt_regs
{
622 struct pt_regs regs
[3];
624 static DEFINE_PER_CPU(struct bpf_nested_pt_regs
, bpf_pt_regs
);
625 static DEFINE_PER_CPU(struct bpf_trace_sample_data
, bpf_misc_sds
);
627 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
628 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
630 int nest_level
= this_cpu_inc_return(bpf_event_output_nest_level
);
631 struct perf_raw_frag frag
= {
636 struct perf_raw_record raw
= {
639 .next
= ctx_size
? &frag
: NULL
,
645 struct perf_sample_data
*sd
;
646 struct pt_regs
*regs
;
649 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(bpf_misc_sds
.sds
))) {
653 sd
= this_cpu_ptr(&bpf_misc_sds
.sds
[nest_level
- 1]);
654 regs
= this_cpu_ptr(&bpf_pt_regs
.regs
[nest_level
- 1]);
656 perf_fetch_caller_regs(regs
);
657 perf_sample_data_init(sd
, 0, 0);
660 ret
= __bpf_perf_event_output(regs
, map
, flags
, sd
);
662 this_cpu_dec(bpf_event_output_nest_level
);
666 BPF_CALL_0(bpf_get_current_task
)
668 return (long) current
;
671 static const struct bpf_func_proto bpf_get_current_task_proto
= {
672 .func
= bpf_get_current_task
,
674 .ret_type
= RET_INTEGER
,
677 BPF_CALL_2(bpf_current_task_under_cgroup
, struct bpf_map
*, map
, u32
, idx
)
679 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
682 if (unlikely(idx
>= array
->map
.max_entries
))
685 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
689 return task_under_cgroup_hierarchy(current
, cgrp
);
692 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto
= {
693 .func
= bpf_current_task_under_cgroup
,
695 .ret_type
= RET_INTEGER
,
696 .arg1_type
= ARG_CONST_MAP_PTR
,
697 .arg2_type
= ARG_ANYTHING
,
700 struct send_signal_irq_work
{
701 struct irq_work irq_work
;
702 struct task_struct
*task
;
707 static DEFINE_PER_CPU(struct send_signal_irq_work
, send_signal_work
);
709 static void do_bpf_send_signal(struct irq_work
*entry
)
711 struct send_signal_irq_work
*work
;
713 work
= container_of(entry
, struct send_signal_irq_work
, irq_work
);
714 group_send_sig_info(work
->sig
, SEND_SIG_PRIV
, work
->task
, work
->type
);
717 static int bpf_send_signal_common(u32 sig
, enum pid_type type
)
719 struct send_signal_irq_work
*work
= NULL
;
721 /* Similar to bpf_probe_write_user, task needs to be
722 * in a sound condition and kernel memory access be
723 * permitted in order to send signal to the current
726 if (unlikely(current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
728 if (unlikely(uaccess_kernel()))
730 if (unlikely(!nmi_uaccess_okay()))
734 /* Do an early check on signal validity. Otherwise,
735 * the error is lost in deferred irq_work.
737 if (unlikely(!valid_signal(sig
)))
740 work
= this_cpu_ptr(&send_signal_work
);
741 if (atomic_read(&work
->irq_work
.flags
) & IRQ_WORK_BUSY
)
744 /* Add the current task, which is the target of sending signal,
745 * to the irq_work. The current task may change when queued
746 * irq works get executed.
748 work
->task
= current
;
751 irq_work_queue(&work
->irq_work
);
755 return group_send_sig_info(sig
, SEND_SIG_PRIV
, current
, type
);
758 BPF_CALL_1(bpf_send_signal
, u32
, sig
)
760 return bpf_send_signal_common(sig
, PIDTYPE_TGID
);
763 static const struct bpf_func_proto bpf_send_signal_proto
= {
764 .func
= bpf_send_signal
,
766 .ret_type
= RET_INTEGER
,
767 .arg1_type
= ARG_ANYTHING
,
770 BPF_CALL_1(bpf_send_signal_thread
, u32
, sig
)
772 return bpf_send_signal_common(sig
, PIDTYPE_PID
);
775 static const struct bpf_func_proto bpf_send_signal_thread_proto
= {
776 .func
= bpf_send_signal_thread
,
778 .ret_type
= RET_INTEGER
,
779 .arg1_type
= ARG_ANYTHING
,
782 static const struct bpf_func_proto
*
783 tracing_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
786 case BPF_FUNC_map_lookup_elem
:
787 return &bpf_map_lookup_elem_proto
;
788 case BPF_FUNC_map_update_elem
:
789 return &bpf_map_update_elem_proto
;
790 case BPF_FUNC_map_delete_elem
:
791 return &bpf_map_delete_elem_proto
;
792 case BPF_FUNC_map_push_elem
:
793 return &bpf_map_push_elem_proto
;
794 case BPF_FUNC_map_pop_elem
:
795 return &bpf_map_pop_elem_proto
;
796 case BPF_FUNC_map_peek_elem
:
797 return &bpf_map_peek_elem_proto
;
798 case BPF_FUNC_ktime_get_ns
:
799 return &bpf_ktime_get_ns_proto
;
800 case BPF_FUNC_tail_call
:
801 return &bpf_tail_call_proto
;
802 case BPF_FUNC_get_current_pid_tgid
:
803 return &bpf_get_current_pid_tgid_proto
;
804 case BPF_FUNC_get_current_task
:
805 return &bpf_get_current_task_proto
;
806 case BPF_FUNC_get_current_uid_gid
:
807 return &bpf_get_current_uid_gid_proto
;
808 case BPF_FUNC_get_current_comm
:
809 return &bpf_get_current_comm_proto
;
810 case BPF_FUNC_trace_printk
:
811 return bpf_get_trace_printk_proto();
812 case BPF_FUNC_get_smp_processor_id
:
813 return &bpf_get_smp_processor_id_proto
;
814 case BPF_FUNC_get_numa_node_id
:
815 return &bpf_get_numa_node_id_proto
;
816 case BPF_FUNC_perf_event_read
:
817 return &bpf_perf_event_read_proto
;
818 case BPF_FUNC_probe_write_user
:
819 return bpf_get_probe_write_proto();
820 case BPF_FUNC_current_task_under_cgroup
:
821 return &bpf_current_task_under_cgroup_proto
;
822 case BPF_FUNC_get_prandom_u32
:
823 return &bpf_get_prandom_u32_proto
;
824 case BPF_FUNC_probe_read_user
:
825 return &bpf_probe_read_user_proto
;
826 case BPF_FUNC_probe_read_kernel
:
827 return &bpf_probe_read_kernel_proto
;
828 case BPF_FUNC_probe_read
:
829 return &bpf_probe_read_compat_proto
;
830 case BPF_FUNC_probe_read_user_str
:
831 return &bpf_probe_read_user_str_proto
;
832 case BPF_FUNC_probe_read_kernel_str
:
833 return &bpf_probe_read_kernel_str_proto
;
834 case BPF_FUNC_probe_read_str
:
835 return &bpf_probe_read_compat_str_proto
;
836 #ifdef CONFIG_CGROUPS
837 case BPF_FUNC_get_current_cgroup_id
:
838 return &bpf_get_current_cgroup_id_proto
;
840 case BPF_FUNC_send_signal
:
841 return &bpf_send_signal_proto
;
842 case BPF_FUNC_send_signal_thread
:
843 return &bpf_send_signal_thread_proto
;
844 case BPF_FUNC_perf_event_read_value
:
845 return &bpf_perf_event_read_value_proto
;
846 case BPF_FUNC_get_ns_current_pid_tgid
:
847 return &bpf_get_ns_current_pid_tgid_proto
;
853 static const struct bpf_func_proto
*
854 kprobe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
857 case BPF_FUNC_perf_event_output
:
858 return &bpf_perf_event_output_proto
;
859 case BPF_FUNC_get_stackid
:
860 return &bpf_get_stackid_proto
;
861 case BPF_FUNC_get_stack
:
862 return &bpf_get_stack_proto
;
863 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
864 case BPF_FUNC_override_return
:
865 return &bpf_override_return_proto
;
868 return tracing_func_proto(func_id
, prog
);
872 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
873 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
874 const struct bpf_prog
*prog
,
875 struct bpf_insn_access_aux
*info
)
877 if (off
< 0 || off
>= sizeof(struct pt_regs
))
879 if (type
!= BPF_READ
)
884 * Assertion for 32 bit to make sure last 8 byte access
885 * (BPF_DW) to the last 4 byte member is disallowed.
887 if (off
+ size
> sizeof(struct pt_regs
))
893 const struct bpf_verifier_ops kprobe_verifier_ops
= {
894 .get_func_proto
= kprobe_prog_func_proto
,
895 .is_valid_access
= kprobe_prog_is_valid_access
,
898 const struct bpf_prog_ops kprobe_prog_ops
= {
901 BPF_CALL_5(bpf_perf_event_output_tp
, void *, tp_buff
, struct bpf_map
*, map
,
902 u64
, flags
, void *, data
, u64
, size
)
904 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
907 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
908 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
909 * from there and call the same bpf_perf_event_output() helper inline.
911 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
914 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
915 .func
= bpf_perf_event_output_tp
,
917 .ret_type
= RET_INTEGER
,
918 .arg1_type
= ARG_PTR_TO_CTX
,
919 .arg2_type
= ARG_CONST_MAP_PTR
,
920 .arg3_type
= ARG_ANYTHING
,
921 .arg4_type
= ARG_PTR_TO_MEM
,
922 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
925 BPF_CALL_3(bpf_get_stackid_tp
, void *, tp_buff
, struct bpf_map
*, map
,
928 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
931 * Same comment as in bpf_perf_event_output_tp(), only that this time
932 * the other helper's function body cannot be inlined due to being
933 * external, thus we need to call raw helper function.
935 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
939 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
940 .func
= bpf_get_stackid_tp
,
942 .ret_type
= RET_INTEGER
,
943 .arg1_type
= ARG_PTR_TO_CTX
,
944 .arg2_type
= ARG_CONST_MAP_PTR
,
945 .arg3_type
= ARG_ANYTHING
,
948 BPF_CALL_4(bpf_get_stack_tp
, void *, tp_buff
, void *, buf
, u32
, size
,
951 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
953 return bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
954 (unsigned long) size
, flags
, 0);
957 static const struct bpf_func_proto bpf_get_stack_proto_tp
= {
958 .func
= bpf_get_stack_tp
,
960 .ret_type
= RET_INTEGER
,
961 .arg1_type
= ARG_PTR_TO_CTX
,
962 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
963 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
964 .arg4_type
= ARG_ANYTHING
,
967 static const struct bpf_func_proto
*
968 tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
971 case BPF_FUNC_perf_event_output
:
972 return &bpf_perf_event_output_proto_tp
;
973 case BPF_FUNC_get_stackid
:
974 return &bpf_get_stackid_proto_tp
;
975 case BPF_FUNC_get_stack
:
976 return &bpf_get_stack_proto_tp
;
978 return tracing_func_proto(func_id
, prog
);
982 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
983 const struct bpf_prog
*prog
,
984 struct bpf_insn_access_aux
*info
)
986 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
988 if (type
!= BPF_READ
)
993 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(__u64
));
997 const struct bpf_verifier_ops tracepoint_verifier_ops
= {
998 .get_func_proto
= tp_prog_func_proto
,
999 .is_valid_access
= tp_prog_is_valid_access
,
1002 const struct bpf_prog_ops tracepoint_prog_ops
= {
1005 BPF_CALL_3(bpf_perf_prog_read_value
, struct bpf_perf_event_data_kern
*, ctx
,
1006 struct bpf_perf_event_value
*, buf
, u32
, size
)
1010 if (unlikely(size
!= sizeof(struct bpf_perf_event_value
)))
1012 err
= perf_event_read_local(ctx
->event
, &buf
->counter
, &buf
->enabled
,
1018 memset(buf
, 0, size
);
1022 static const struct bpf_func_proto bpf_perf_prog_read_value_proto
= {
1023 .func
= bpf_perf_prog_read_value
,
1025 .ret_type
= RET_INTEGER
,
1026 .arg1_type
= ARG_PTR_TO_CTX
,
1027 .arg2_type
= ARG_PTR_TO_UNINIT_MEM
,
1028 .arg3_type
= ARG_CONST_SIZE
,
1031 BPF_CALL_4(bpf_read_branch_records
, struct bpf_perf_event_data_kern
*, ctx
,
1032 void *, buf
, u32
, size
, u64
, flags
)
1037 static const u32 br_entry_size
= sizeof(struct perf_branch_entry
);
1038 struct perf_branch_stack
*br_stack
= ctx
->data
->br_stack
;
1041 if (unlikely(flags
& ~BPF_F_GET_BRANCH_RECORDS_SIZE
))
1044 if (unlikely(!br_stack
))
1047 if (flags
& BPF_F_GET_BRANCH_RECORDS_SIZE
)
1048 return br_stack
->nr
* br_entry_size
;
1050 if (!buf
|| (size
% br_entry_size
!= 0))
1053 to_copy
= min_t(u32
, br_stack
->nr
* br_entry_size
, size
);
1054 memcpy(buf
, br_stack
->entries
, to_copy
);
1060 static const struct bpf_func_proto bpf_read_branch_records_proto
= {
1061 .func
= bpf_read_branch_records
,
1063 .ret_type
= RET_INTEGER
,
1064 .arg1_type
= ARG_PTR_TO_CTX
,
1065 .arg2_type
= ARG_PTR_TO_MEM_OR_NULL
,
1066 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
1067 .arg4_type
= ARG_ANYTHING
,
1070 static const struct bpf_func_proto
*
1071 pe_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1074 case BPF_FUNC_perf_event_output
:
1075 return &bpf_perf_event_output_proto_tp
;
1076 case BPF_FUNC_get_stackid
:
1077 return &bpf_get_stackid_proto_tp
;
1078 case BPF_FUNC_get_stack
:
1079 return &bpf_get_stack_proto_tp
;
1080 case BPF_FUNC_perf_prog_read_value
:
1081 return &bpf_perf_prog_read_value_proto
;
1082 case BPF_FUNC_read_branch_records
:
1083 return &bpf_read_branch_records_proto
;
1085 return tracing_func_proto(func_id
, prog
);
1090 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1091 * to avoid potential recursive reuse issue when/if tracepoints are added
1092 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1094 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1095 * in normal, irq, and nmi context.
1097 struct bpf_raw_tp_regs
{
1098 struct pt_regs regs
[3];
1100 static DEFINE_PER_CPU(struct bpf_raw_tp_regs
, bpf_raw_tp_regs
);
1101 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level
);
1102 static struct pt_regs
*get_bpf_raw_tp_regs(void)
1104 struct bpf_raw_tp_regs
*tp_regs
= this_cpu_ptr(&bpf_raw_tp_regs
);
1105 int nest_level
= this_cpu_inc_return(bpf_raw_tp_nest_level
);
1107 if (WARN_ON_ONCE(nest_level
> ARRAY_SIZE(tp_regs
->regs
))) {
1108 this_cpu_dec(bpf_raw_tp_nest_level
);
1109 return ERR_PTR(-EBUSY
);
1112 return &tp_regs
->regs
[nest_level
- 1];
1115 static void put_bpf_raw_tp_regs(void)
1117 this_cpu_dec(bpf_raw_tp_nest_level
);
1120 BPF_CALL_5(bpf_perf_event_output_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1121 struct bpf_map
*, map
, u64
, flags
, void *, data
, u64
, size
)
1123 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1127 return PTR_ERR(regs
);
1129 perf_fetch_caller_regs(regs
);
1130 ret
= ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
1132 put_bpf_raw_tp_regs();
1136 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp
= {
1137 .func
= bpf_perf_event_output_raw_tp
,
1139 .ret_type
= RET_INTEGER
,
1140 .arg1_type
= ARG_PTR_TO_CTX
,
1141 .arg2_type
= ARG_CONST_MAP_PTR
,
1142 .arg3_type
= ARG_ANYTHING
,
1143 .arg4_type
= ARG_PTR_TO_MEM
,
1144 .arg5_type
= ARG_CONST_SIZE_OR_ZERO
,
1147 extern const struct bpf_func_proto bpf_skb_output_proto
;
1149 BPF_CALL_3(bpf_get_stackid_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1150 struct bpf_map
*, map
, u64
, flags
)
1152 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1156 return PTR_ERR(regs
);
1158 perf_fetch_caller_regs(regs
);
1159 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1160 ret
= bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
1162 put_bpf_raw_tp_regs();
1166 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp
= {
1167 .func
= bpf_get_stackid_raw_tp
,
1169 .ret_type
= RET_INTEGER
,
1170 .arg1_type
= ARG_PTR_TO_CTX
,
1171 .arg2_type
= ARG_CONST_MAP_PTR
,
1172 .arg3_type
= ARG_ANYTHING
,
1175 BPF_CALL_4(bpf_get_stack_raw_tp
, struct bpf_raw_tracepoint_args
*, args
,
1176 void *, buf
, u32
, size
, u64
, flags
)
1178 struct pt_regs
*regs
= get_bpf_raw_tp_regs();
1182 return PTR_ERR(regs
);
1184 perf_fetch_caller_regs(regs
);
1185 ret
= bpf_get_stack((unsigned long) regs
, (unsigned long) buf
,
1186 (unsigned long) size
, flags
, 0);
1187 put_bpf_raw_tp_regs();
1191 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp
= {
1192 .func
= bpf_get_stack_raw_tp
,
1194 .ret_type
= RET_INTEGER
,
1195 .arg1_type
= ARG_PTR_TO_CTX
,
1196 .arg2_type
= ARG_PTR_TO_MEM
,
1197 .arg3_type
= ARG_CONST_SIZE_OR_ZERO
,
1198 .arg4_type
= ARG_ANYTHING
,
1201 static const struct bpf_func_proto
*
1202 raw_tp_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1205 case BPF_FUNC_perf_event_output
:
1206 return &bpf_perf_event_output_proto_raw_tp
;
1207 case BPF_FUNC_get_stackid
:
1208 return &bpf_get_stackid_proto_raw_tp
;
1209 case BPF_FUNC_get_stack
:
1210 return &bpf_get_stack_proto_raw_tp
;
1212 return tracing_func_proto(func_id
, prog
);
1216 static const struct bpf_func_proto
*
1217 tracing_prog_func_proto(enum bpf_func_id func_id
, const struct bpf_prog
*prog
)
1221 case BPF_FUNC_skb_output
:
1222 return &bpf_skb_output_proto
;
1225 return raw_tp_prog_func_proto(func_id
, prog
);
1229 static bool raw_tp_prog_is_valid_access(int off
, int size
,
1230 enum bpf_access_type type
,
1231 const struct bpf_prog
*prog
,
1232 struct bpf_insn_access_aux
*info
)
1234 if (off
< 0 || off
>= sizeof(__u64
) * MAX_BPF_FUNC_ARGS
)
1236 if (type
!= BPF_READ
)
1238 if (off
% size
!= 0)
1243 static bool tracing_prog_is_valid_access(int off
, int size
,
1244 enum bpf_access_type type
,
1245 const struct bpf_prog
*prog
,
1246 struct bpf_insn_access_aux
*info
)
1248 if (off
< 0 || off
>= sizeof(__u64
) * MAX_BPF_FUNC_ARGS
)
1250 if (type
!= BPF_READ
)
1252 if (off
% size
!= 0)
1254 return btf_ctx_access(off
, size
, type
, prog
, info
);
1257 int __weak
bpf_prog_test_run_tracing(struct bpf_prog
*prog
,
1258 const union bpf_attr
*kattr
,
1259 union bpf_attr __user
*uattr
)
1264 const struct bpf_verifier_ops raw_tracepoint_verifier_ops
= {
1265 .get_func_proto
= raw_tp_prog_func_proto
,
1266 .is_valid_access
= raw_tp_prog_is_valid_access
,
1269 const struct bpf_prog_ops raw_tracepoint_prog_ops
= {
1272 const struct bpf_verifier_ops tracing_verifier_ops
= {
1273 .get_func_proto
= tracing_prog_func_proto
,
1274 .is_valid_access
= tracing_prog_is_valid_access
,
1277 const struct bpf_prog_ops tracing_prog_ops
= {
1278 .test_run
= bpf_prog_test_run_tracing
,
1281 static bool raw_tp_writable_prog_is_valid_access(int off
, int size
,
1282 enum bpf_access_type type
,
1283 const struct bpf_prog
*prog
,
1284 struct bpf_insn_access_aux
*info
)
1287 if (size
!= sizeof(u64
) || type
!= BPF_READ
)
1289 info
->reg_type
= PTR_TO_TP_BUFFER
;
1291 return raw_tp_prog_is_valid_access(off
, size
, type
, prog
, info
);
1294 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops
= {
1295 .get_func_proto
= raw_tp_prog_func_proto
,
1296 .is_valid_access
= raw_tp_writable_prog_is_valid_access
,
1299 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops
= {
1302 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
1303 const struct bpf_prog
*prog
,
1304 struct bpf_insn_access_aux
*info
)
1306 const int size_u64
= sizeof(u64
);
1308 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
1310 if (type
!= BPF_READ
)
1312 if (off
% size
!= 0) {
1313 if (sizeof(unsigned long) != 4)
1317 if (off
% size
!= 4)
1322 case bpf_ctx_range(struct bpf_perf_event_data
, sample_period
):
1323 bpf_ctx_record_field_size(info
, size_u64
);
1324 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
1327 case bpf_ctx_range(struct bpf_perf_event_data
, addr
):
1328 bpf_ctx_record_field_size(info
, size_u64
);
1329 if (!bpf_ctx_narrow_access_ok(off
, size
, size_u64
))
1333 if (size
!= sizeof(long))
1340 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
,
1341 const struct bpf_insn
*si
,
1342 struct bpf_insn
*insn_buf
,
1343 struct bpf_prog
*prog
, u32
*target_size
)
1345 struct bpf_insn
*insn
= insn_buf
;
1348 case offsetof(struct bpf_perf_event_data
, sample_period
):
1349 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1350 data
), si
->dst_reg
, si
->src_reg
,
1351 offsetof(struct bpf_perf_event_data_kern
, data
));
1352 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
1353 bpf_target_off(struct perf_sample_data
, period
, 8,
1356 case offsetof(struct bpf_perf_event_data
, addr
):
1357 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1358 data
), si
->dst_reg
, si
->src_reg
,
1359 offsetof(struct bpf_perf_event_data_kern
, data
));
1360 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
1361 bpf_target_off(struct perf_sample_data
, addr
, 8,
1365 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
1366 regs
), si
->dst_reg
, si
->src_reg
,
1367 offsetof(struct bpf_perf_event_data_kern
, regs
));
1368 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(long), si
->dst_reg
, si
->dst_reg
,
1373 return insn
- insn_buf
;
1376 const struct bpf_verifier_ops perf_event_verifier_ops
= {
1377 .get_func_proto
= pe_prog_func_proto
,
1378 .is_valid_access
= pe_prog_is_valid_access
,
1379 .convert_ctx_access
= pe_prog_convert_ctx_access
,
1382 const struct bpf_prog_ops perf_event_prog_ops
= {
1385 static DEFINE_MUTEX(bpf_event_mutex
);
1387 #define BPF_TRACE_MAX_PROGS 64
1389 int perf_event_attach_bpf_prog(struct perf_event
*event
,
1390 struct bpf_prog
*prog
)
1392 struct bpf_prog_array
*old_array
;
1393 struct bpf_prog_array
*new_array
;
1397 * Kprobe override only works if they are on the function entry,
1398 * and only if they are on the opt-in list.
1400 if (prog
->kprobe_override
&&
1401 (!trace_kprobe_on_func_entry(event
->tp_event
) ||
1402 !trace_kprobe_error_injectable(event
->tp_event
)))
1405 mutex_lock(&bpf_event_mutex
);
1410 old_array
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1412 bpf_prog_array_length(old_array
) >= BPF_TRACE_MAX_PROGS
) {
1417 ret
= bpf_prog_array_copy(old_array
, NULL
, prog
, &new_array
);
1421 /* set the new array to event->tp_event and set event->prog */
1423 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
1424 bpf_prog_array_free(old_array
);
1427 mutex_unlock(&bpf_event_mutex
);
1431 void perf_event_detach_bpf_prog(struct perf_event
*event
)
1433 struct bpf_prog_array
*old_array
;
1434 struct bpf_prog_array
*new_array
;
1437 mutex_lock(&bpf_event_mutex
);
1442 old_array
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1443 ret
= bpf_prog_array_copy(old_array
, event
->prog
, NULL
, &new_array
);
1447 bpf_prog_array_delete_safe(old_array
, event
->prog
);
1449 rcu_assign_pointer(event
->tp_event
->prog_array
, new_array
);
1450 bpf_prog_array_free(old_array
);
1453 bpf_prog_put(event
->prog
);
1457 mutex_unlock(&bpf_event_mutex
);
1460 int perf_event_query_prog_array(struct perf_event
*event
, void __user
*info
)
1462 struct perf_event_query_bpf __user
*uquery
= info
;
1463 struct perf_event_query_bpf query
= {};
1464 struct bpf_prog_array
*progs
;
1465 u32
*ids
, prog_cnt
, ids_len
;
1468 if (!capable(CAP_SYS_ADMIN
))
1470 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
1472 if (copy_from_user(&query
, uquery
, sizeof(query
)))
1475 ids_len
= query
.ids_len
;
1476 if (ids_len
> BPF_TRACE_MAX_PROGS
)
1478 ids
= kcalloc(ids_len
, sizeof(u32
), GFP_USER
| __GFP_NOWARN
);
1482 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1483 * is required when user only wants to check for uquery->prog_cnt.
1484 * There is no need to check for it since the case is handled
1485 * gracefully in bpf_prog_array_copy_info.
1488 mutex_lock(&bpf_event_mutex
);
1489 progs
= bpf_event_rcu_dereference(event
->tp_event
->prog_array
);
1490 ret
= bpf_prog_array_copy_info(progs
, ids
, ids_len
, &prog_cnt
);
1491 mutex_unlock(&bpf_event_mutex
);
1493 if (copy_to_user(&uquery
->prog_cnt
, &prog_cnt
, sizeof(prog_cnt
)) ||
1494 copy_to_user(uquery
->ids
, ids
, ids_len
* sizeof(u32
)))
1501 extern struct bpf_raw_event_map __start__bpf_raw_tp
[];
1502 extern struct bpf_raw_event_map __stop__bpf_raw_tp
[];
1504 struct bpf_raw_event_map
*bpf_get_raw_tracepoint(const char *name
)
1506 struct bpf_raw_event_map
*btp
= __start__bpf_raw_tp
;
1508 for (; btp
< __stop__bpf_raw_tp
; btp
++) {
1509 if (!strcmp(btp
->tp
->name
, name
))
1513 return bpf_get_raw_tracepoint_module(name
);
1516 void bpf_put_raw_tracepoint(struct bpf_raw_event_map
*btp
)
1518 struct module
*mod
= __module_address((unsigned long)btp
);
1524 static __always_inline
1525 void __bpf_trace_run(struct bpf_prog
*prog
, u64
*args
)
1529 (void) BPF_PROG_RUN(prog
, args
);
1533 #define UNPACK(...) __VA_ARGS__
1534 #define REPEAT_1(FN, DL, X, ...) FN(X)
1535 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1536 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1537 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1538 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1539 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1540 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1541 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1542 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1543 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1544 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1545 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1546 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1548 #define SARG(X) u64 arg##X
1549 #define COPY(X) args[X] = arg##X
1551 #define __DL_COM (,)
1552 #define __DL_SEM (;)
1554 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1556 #define BPF_TRACE_DEFN_x(x) \
1557 void bpf_trace_run##x(struct bpf_prog *prog, \
1558 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1561 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1562 __bpf_trace_run(prog, args); \
1564 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1565 BPF_TRACE_DEFN_x(1);
1566 BPF_TRACE_DEFN_x(2);
1567 BPF_TRACE_DEFN_x(3);
1568 BPF_TRACE_DEFN_x(4);
1569 BPF_TRACE_DEFN_x(5);
1570 BPF_TRACE_DEFN_x(6);
1571 BPF_TRACE_DEFN_x(7);
1572 BPF_TRACE_DEFN_x(8);
1573 BPF_TRACE_DEFN_x(9);
1574 BPF_TRACE_DEFN_x(10);
1575 BPF_TRACE_DEFN_x(11);
1576 BPF_TRACE_DEFN_x(12);
1578 static int __bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1580 struct tracepoint
*tp
= btp
->tp
;
1583 * check that program doesn't access arguments beyond what's
1584 * available in this tracepoint
1586 if (prog
->aux
->max_ctx_offset
> btp
->num_args
* sizeof(u64
))
1589 if (prog
->aux
->max_tp_access
> btp
->writable_size
)
1592 return tracepoint_probe_register(tp
, (void *)btp
->bpf_func
, prog
);
1595 int bpf_probe_register(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1597 return __bpf_probe_register(btp
, prog
);
1600 int bpf_probe_unregister(struct bpf_raw_event_map
*btp
, struct bpf_prog
*prog
)
1602 return tracepoint_probe_unregister(btp
->tp
, (void *)btp
->bpf_func
, prog
);
1605 int bpf_get_perf_event_info(const struct perf_event
*event
, u32
*prog_id
,
1606 u32
*fd_type
, const char **buf
,
1607 u64
*probe_offset
, u64
*probe_addr
)
1609 bool is_tracepoint
, is_syscall_tp
;
1610 struct bpf_prog
*prog
;
1617 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1618 if (prog
->type
== BPF_PROG_TYPE_PERF_EVENT
)
1621 *prog_id
= prog
->aux
->id
;
1622 flags
= event
->tp_event
->flags
;
1623 is_tracepoint
= flags
& TRACE_EVENT_FL_TRACEPOINT
;
1624 is_syscall_tp
= is_syscall_trace_event(event
->tp_event
);
1626 if (is_tracepoint
|| is_syscall_tp
) {
1627 *buf
= is_tracepoint
? event
->tp_event
->tp
->name
1628 : event
->tp_event
->name
;
1629 *fd_type
= BPF_FD_TYPE_TRACEPOINT
;
1630 *probe_offset
= 0x0;
1635 #ifdef CONFIG_KPROBE_EVENTS
1636 if (flags
& TRACE_EVENT_FL_KPROBE
)
1637 err
= bpf_get_kprobe_info(event
, fd_type
, buf
,
1638 probe_offset
, probe_addr
,
1639 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
1641 #ifdef CONFIG_UPROBE_EVENTS
1642 if (flags
& TRACE_EVENT_FL_UPROBE
)
1643 err
= bpf_get_uprobe_info(event
, fd_type
, buf
,
1645 event
->attr
.type
== PERF_TYPE_TRACEPOINT
);
1652 static int __init
send_signal_irq_work_init(void)
1655 struct send_signal_irq_work
*work
;
1657 for_each_possible_cpu(cpu
) {
1658 work
= per_cpu_ptr(&send_signal_work
, cpu
);
1659 init_irq_work(&work
->irq_work
, do_bpf_send_signal
);
1664 subsys_initcall(send_signal_irq_work_init
);
1666 #ifdef CONFIG_MODULES
1667 static int bpf_event_notify(struct notifier_block
*nb
, unsigned long op
,
1670 struct bpf_trace_module
*btm
, *tmp
;
1671 struct module
*mod
= module
;
1673 if (mod
->num_bpf_raw_events
== 0 ||
1674 (op
!= MODULE_STATE_COMING
&& op
!= MODULE_STATE_GOING
))
1677 mutex_lock(&bpf_module_mutex
);
1680 case MODULE_STATE_COMING
:
1681 btm
= kzalloc(sizeof(*btm
), GFP_KERNEL
);
1683 btm
->module
= module
;
1684 list_add(&btm
->list
, &bpf_trace_modules
);
1687 case MODULE_STATE_GOING
:
1688 list_for_each_entry_safe(btm
, tmp
, &bpf_trace_modules
, list
) {
1689 if (btm
->module
== module
) {
1690 list_del(&btm
->list
);
1698 mutex_unlock(&bpf_module_mutex
);
1703 static struct notifier_block bpf_module_nb
= {
1704 .notifier_call
= bpf_event_notify
,
1707 static int __init
bpf_event_init(void)
1709 register_module_notifier(&bpf_module_nb
);
1713 fs_initcall(bpf_event_init
);
1714 #endif /* CONFIG_MODULES */