]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/bpf_trace.c
tracing: Fix various typos in comments
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
c4d0bfb4 10#include <linux/btf.h>
2541517c
AS
11#include <linux/filter.h>
12#include <linux/uaccess.h>
9c959c86 13#include <linux/ctype.h>
9802d865 14#include <linux/kprobes.h>
ac5a72ea 15#include <linux/spinlock.h>
41bdc4b4 16#include <linux/syscalls.h>
540adea3 17#include <linux/error-injection.h>
c9a0f3b8 18#include <linux/btf_ids.h>
6f100640
KS
19#include <linux/bpf_lsm.h>
20
8e4597c6 21#include <net/bpf_sk_storage.h>
9802d865 22
c4d0bfb4
AM
23#include <uapi/linux/bpf.h>
24#include <uapi/linux/btf.h>
25
c7b6f29b
NA
26#include <asm/tlb.h>
27
9802d865 28#include "trace_probe.h"
2541517c
AS
29#include "trace.h"
30
ac5a72ea
AM
31#define CREATE_TRACE_POINTS
32#include "bpf_trace.h"
33
e672db03
SF
34#define bpf_event_rcu_dereference(p) \
35 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
36
a38d1107
MM
37#ifdef CONFIG_MODULES
38struct bpf_trace_module {
39 struct module *module;
40 struct list_head list;
41};
42
43static LIST_HEAD(bpf_trace_modules);
44static DEFINE_MUTEX(bpf_module_mutex);
45
46static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
47{
48 struct bpf_raw_event_map *btp, *ret = NULL;
49 struct bpf_trace_module *btm;
50 unsigned int i;
51
52 mutex_lock(&bpf_module_mutex);
53 list_for_each_entry(btm, &bpf_trace_modules, list) {
54 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55 btp = &btm->module->bpf_raw_events[i];
56 if (!strcmp(btp->tp->name, name)) {
57 if (try_module_get(btm->module))
58 ret = btp;
59 goto out;
60 }
61 }
62 }
63out:
64 mutex_unlock(&bpf_module_mutex);
65 return ret;
66}
67#else
68static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69{
70 return NULL;
71}
72#endif /* CONFIG_MODULES */
73
035226b9 74u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 75u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 76
eb411377
AM
77static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78 u64 flags, const struct btf **btf,
79 s32 *btf_id);
80
2541517c
AS
81/**
82 * trace_call_bpf - invoke BPF program
e87c6bc3 83 * @call: tracepoint event
2541517c
AS
84 * @ctx: opaque context pointer
85 *
86 * kprobe handlers execute BPF programs via this helper.
87 * Can be used from static tracepoints in the future.
88 *
89 * Return: BPF programs always return an integer which is interpreted by
90 * kprobe handler as:
91 * 0 - return from kprobe (event is filtered out)
92 * 1 - store kprobe event into ring buffer
93 * Other values are reserved and currently alias to 1
94 */
e87c6bc3 95unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
96{
97 unsigned int ret;
98
b0a81b94 99 cant_sleep();
2541517c
AS
100
101 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102 /*
103 * since some bpf program is already running on this cpu,
104 * don't call into another bpf program (same or different)
105 * and don't send kprobe event into ring-buffer,
106 * so return zero here
107 */
108 ret = 0;
109 goto out;
110 }
111
e87c6bc3
YS
112 /*
113 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114 * to all call sites, we did a bpf_prog_array_valid() there to check
115 * whether call->prog_array is empty or not, which is
2b5894cc 116 * a heuristic to speed up execution.
e87c6bc3
YS
117 *
118 * If bpf_prog_array_valid() fetched prog_array was
119 * non-NULL, we go into trace_call_bpf() and do the actual
120 * proper rcu_dereference() under RCU lock.
121 * If it turns out that prog_array is NULL then, we bail out.
122 * For the opposite, if the bpf_prog_array_valid() fetched pointer
123 * was NULL, you'll skip the prog_array with the risk of missing
124 * out of events when it was updated in between this and the
125 * rcu_dereference() which is accepted risk.
126 */
127 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
128
129 out:
130 __this_cpu_dec(bpf_prog_active);
2541517c
AS
131
132 return ret;
133}
2541517c 134
9802d865
JB
135#ifdef CONFIG_BPF_KPROBE_OVERRIDE
136BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137{
9802d865 138 regs_set_return_value(regs, rc);
540adea3 139 override_function_with_return(regs);
9802d865
JB
140 return 0;
141}
142
143static const struct bpf_func_proto bpf_override_return_proto = {
144 .func = bpf_override_return,
145 .gpl_only = true,
146 .ret_type = RET_INTEGER,
147 .arg1_type = ARG_PTR_TO_CTX,
148 .arg2_type = ARG_ANYTHING,
149};
150#endif
151
8d92db5c
CH
152static __always_inline int
153bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 154{
8d92db5c 155 int ret;
2541517c 156
c0ee37e8 157 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
158 if (unlikely(ret < 0))
159 memset(dst, 0, size);
6ae08ae3
DB
160 return ret;
161}
162
8d92db5c
CH
163BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 const void __user *, unsafe_ptr)
165{
166 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
167}
168
f470378c 169const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
170 .func = bpf_probe_read_user,
171 .gpl_only = true,
172 .ret_type = RET_INTEGER,
173 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
174 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
175 .arg3_type = ARG_ANYTHING,
176};
177
8d92db5c
CH
178static __always_inline int
179bpf_probe_read_user_str_common(void *dst, u32 size,
180 const void __user *unsafe_ptr)
6ae08ae3 181{
8d92db5c 182 int ret;
6ae08ae3 183
6fa6d280
DX
184 /*
185 * NB: We rely on strncpy_from_user() not copying junk past the NUL
186 * terminator into `dst`.
187 *
188 * strncpy_from_user() does long-sized strides in the fast path. If the
189 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190 * then there could be junk after the NUL in `dst`. If user takes `dst`
191 * and keys a hash map with it, then semantically identical strings can
192 * occupy multiple entries in the map.
193 */
8d92db5c 194 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
195 if (unlikely(ret < 0))
196 memset(dst, 0, size);
6ae08ae3
DB
197 return ret;
198}
199
8d92db5c
CH
200BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201 const void __user *, unsafe_ptr)
202{
203 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
204}
205
f470378c 206const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
207 .func = bpf_probe_read_user_str,
208 .gpl_only = true,
209 .ret_type = RET_INTEGER,
210 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
211 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
212 .arg3_type = ARG_ANYTHING,
213};
214
215static __always_inline int
8d92db5c 216bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
217{
218 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 219
6ae08ae3 220 if (unlikely(ret < 0))
8d92db5c 221 goto fail;
fe557319 222 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 223 if (unlikely(ret < 0))
8d92db5c
CH
224 goto fail;
225 return ret;
226fail:
227 memset(dst, 0, size);
6ae08ae3
DB
228 return ret;
229}
074f528e 230
6ae08ae3
DB
231BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
232 const void *, unsafe_ptr)
233{
8d92db5c 234 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
235}
236
f470378c 237const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
238 .func = bpf_probe_read_kernel,
239 .gpl_only = true,
240 .ret_type = RET_INTEGER,
241 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
242 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
243 .arg3_type = ARG_ANYTHING,
244};
245
6ae08ae3 246static __always_inline int
8d92db5c 247bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
248{
249 int ret = security_locked_down(LOCKDOWN_BPF_READ);
250
251 if (unlikely(ret < 0))
8d92db5c
CH
252 goto fail;
253
6ae08ae3 254 /*
8d92db5c
CH
255 * The strncpy_from_kernel_nofault() call will likely not fill the
256 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
257 * arbitrary memory anyway similar to bpf_probe_read_*() and might
258 * as well probe the stack. Thus, memory is explicitly cleared
259 * only in error case, so that improper users ignoring return
260 * code altogether don't copy garbage; otherwise length of string
261 * is returned that can be used for bpf_perf_event_output() et al.
262 */
8d92db5c 263 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 264 if (unlikely(ret < 0))
8d92db5c
CH
265 goto fail;
266
02553b91 267 return ret;
8d92db5c
CH
268fail:
269 memset(dst, 0, size);
074f528e 270 return ret;
2541517c
AS
271}
272
6ae08ae3
DB
273BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
274 const void *, unsafe_ptr)
275{
8d92db5c 276 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
277}
278
f470378c 279const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
280 .func = bpf_probe_read_kernel_str,
281 .gpl_only = true,
282 .ret_type = RET_INTEGER,
283 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
284 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
285 .arg3_type = ARG_ANYTHING,
286};
287
8d92db5c
CH
288#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
289BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
290 const void *, unsafe_ptr)
291{
292 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
293 return bpf_probe_read_user_common(dst, size,
294 (__force void __user *)unsafe_ptr);
295 }
296 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
297}
298
299static const struct bpf_func_proto bpf_probe_read_compat_proto = {
300 .func = bpf_probe_read_compat,
301 .gpl_only = true,
302 .ret_type = RET_INTEGER,
303 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
304 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
305 .arg3_type = ARG_ANYTHING,
306};
307
6ae08ae3
DB
308BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
309 const void *, unsafe_ptr)
310{
8d92db5c
CH
311 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
312 return bpf_probe_read_user_str_common(dst, size,
313 (__force void __user *)unsafe_ptr);
314 }
315 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
316}
317
318static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
319 .func = bpf_probe_read_compat_str,
2541517c
AS
320 .gpl_only = true,
321 .ret_type = RET_INTEGER,
39f19ebb 322 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 323 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
324 .arg3_type = ARG_ANYTHING,
325};
8d92db5c 326#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 327
eb1b6688 328BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 329 u32, size)
96ae5227 330{
96ae5227
SD
331 /*
332 * Ensure we're in user context which is safe for the helper to
333 * run. This helper has no business in a kthread.
334 *
335 * access_ok() should prevent writing to non-user memory, but in
336 * some situations (nommu, temporary switch, etc) access_ok() does
337 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
338 *
339 * nmi_uaccess_okay() ensures the probe is not run in an interim
340 * state, when the task or mm are switched. This is specifically
341 * required to prevent the use of temporary mm.
96ae5227
SD
342 */
343
344 if (unlikely(in_interrupt() ||
345 current->flags & (PF_KTHREAD | PF_EXITING)))
346 return -EPERM;
db68ce10 347 if (unlikely(uaccess_kernel()))
96ae5227 348 return -EPERM;
c7b6f29b
NA
349 if (unlikely(!nmi_uaccess_okay()))
350 return -EPERM;
96ae5227 351
c0ee37e8 352 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
353}
354
355static const struct bpf_func_proto bpf_probe_write_user_proto = {
356 .func = bpf_probe_write_user,
357 .gpl_only = true,
358 .ret_type = RET_INTEGER,
359 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
360 .arg2_type = ARG_PTR_TO_MEM,
361 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
362};
363
364static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
365{
2c78ee89
AS
366 if (!capable(CAP_SYS_ADMIN))
367 return NULL;
368
96ae5227
SD
369 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
370 current->comm, task_pid_nr(current));
371
372 return &bpf_probe_write_user_proto;
373}
374
d7b2977b
CH
375static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
376 size_t bufsz)
377{
378 void __user *user_ptr = (__force void __user *)unsafe_ptr;
379
380 buf[0] = 0;
381
382 switch (fmt_ptype) {
383 case 's':
384#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
aec6ce59
CH
385 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
386 strncpy_from_user_nofault(buf, user_ptr, bufsz);
387 break;
388 }
389 fallthrough;
d7b2977b
CH
390#endif
391 case 'k':
392 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
393 break;
394 case 'u':
395 strncpy_from_user_nofault(buf, user_ptr, bufsz);
396 break;
397 }
398}
399
ac5a72ea
AM
400static DEFINE_RAW_SPINLOCK(trace_printk_lock);
401
402#define BPF_TRACE_PRINTK_SIZE 1024
403
0d360d64 404static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
ac5a72ea
AM
405{
406 static char buf[BPF_TRACE_PRINTK_SIZE];
407 unsigned long flags;
408 va_list ap;
409 int ret;
410
411 raw_spin_lock_irqsave(&trace_printk_lock, flags);
412 va_start(ap, fmt);
413 ret = vsnprintf(buf, sizeof(buf), fmt, ap);
414 va_end(ap);
415 /* vsnprintf() will not append null for zero-length strings */
416 if (ret == 0)
417 buf[0] = '\0';
418 trace_bpf_trace_printk(buf);
419 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
420
421 return ret;
422}
423
9c959c86 424/*
7bda4b40 425 * Only limited trace_printk() conversion specifiers allowed:
2df6bb54 426 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
9c959c86 427 */
f3694e00
DB
428BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
429 u64, arg2, u64, arg3)
9c959c86 430{
b2a5212f
DB
431 int i, mod[3] = {}, fmt_cnt = 0;
432 char buf[64], fmt_ptype;
433 void *unsafe_ptr = NULL;
8d3b7dce 434 bool str_seen = false;
9c959c86
AS
435
436 /*
437 * bpf_check()->check_func_arg()->check_stack_boundary()
438 * guarantees that fmt points to bpf program stack,
439 * fmt_size bytes of it were initialized and fmt_size > 0
440 */
441 if (fmt[--fmt_size] != 0)
442 return -EINVAL;
443
444 /* check format string for allowed specifiers */
445 for (i = 0; i < fmt_size; i++) {
446 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
447 return -EINVAL;
448
449 if (fmt[i] != '%')
450 continue;
451
452 if (fmt_cnt >= 3)
453 return -EINVAL;
454
455 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
456 i++;
457 if (fmt[i] == 'l') {
458 mod[fmt_cnt]++;
459 i++;
b2a5212f 460 } else if (fmt[i] == 'p') {
9c959c86 461 mod[fmt_cnt]++;
b2a5212f
DB
462 if ((fmt[i + 1] == 'k' ||
463 fmt[i + 1] == 'u') &&
464 fmt[i + 2] == 's') {
465 fmt_ptype = fmt[i + 1];
466 i += 2;
467 goto fmt_str;
468 }
469
2df6bb54
SL
470 if (fmt[i + 1] == 'B') {
471 i++;
472 goto fmt_next;
473 }
474
1efb6ee3
MP
475 /* disallow any further format extensions */
476 if (fmt[i + 1] != 0 &&
477 !isspace(fmt[i + 1]) &&
478 !ispunct(fmt[i + 1]))
9c959c86 479 return -EINVAL;
b2a5212f
DB
480
481 goto fmt_next;
482 } else if (fmt[i] == 's') {
483 mod[fmt_cnt]++;
484 fmt_ptype = fmt[i];
485fmt_str:
486 if (str_seen)
487 /* allow only one '%s' per fmt string */
488 return -EINVAL;
489 str_seen = true;
490
491 if (fmt[i + 1] != 0 &&
492 !isspace(fmt[i + 1]) &&
493 !ispunct(fmt[i + 1]))
494 return -EINVAL;
495
496 switch (fmt_cnt) {
497 case 0:
498 unsafe_ptr = (void *)(long)arg1;
499 arg1 = (long)buf;
500 break;
501 case 1:
502 unsafe_ptr = (void *)(long)arg2;
503 arg2 = (long)buf;
504 break;
505 case 2:
506 unsafe_ptr = (void *)(long)arg3;
507 arg3 = (long)buf;
508 break;
509 }
510
d7b2977b
CH
511 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
512 sizeof(buf));
b2a5212f 513 goto fmt_next;
9c959c86
AS
514 }
515
516 if (fmt[i] == 'l') {
517 mod[fmt_cnt]++;
518 i++;
519 }
520
7bda4b40
JF
521 if (fmt[i] != 'i' && fmt[i] != 'd' &&
522 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86 523 return -EINVAL;
b2a5212f 524fmt_next:
9c959c86
AS
525 fmt_cnt++;
526 }
527
88a5c690
DB
528/* Horrid workaround for getting va_list handling working with different
529 * argument type combinations generically for 32 and 64 bit archs.
530 */
531#define __BPF_TP_EMIT() __BPF_ARG3_TP()
532#define __BPF_TP(...) \
ac5a72ea 533 bpf_do_trace_printk(fmt, ##__VA_ARGS__)
88a5c690
DB
534
535#define __BPF_ARG1_TP(...) \
536 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
537 ? __BPF_TP(arg1, ##__VA_ARGS__) \
538 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
539 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
540 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
541
542#define __BPF_ARG2_TP(...) \
543 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
544 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
545 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
546 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
547 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
548
549#define __BPF_ARG3_TP(...) \
550 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
551 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
552 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
553 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
554 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
555
556 return __BPF_TP_EMIT();
9c959c86
AS
557}
558
559static const struct bpf_func_proto bpf_trace_printk_proto = {
560 .func = bpf_trace_printk,
561 .gpl_only = true,
562 .ret_type = RET_INTEGER,
39f19ebb
AS
563 .arg1_type = ARG_PTR_TO_MEM,
564 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
565};
566
0756ea3e
AS
567const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
568{
569 /*
ac5a72ea
AM
570 * This program might be calling bpf_trace_printk,
571 * so enable the associated bpf_trace/bpf_trace_printk event.
572 * Repeat this each time as it is possible a user has
573 * disabled bpf_trace_printk events. By loading a program
574 * calling bpf_trace_printk() however the user has expressed
575 * the intent to see such events.
0756ea3e 576 */
ac5a72ea
AM
577 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
578 pr_warn_ratelimited("could not enable bpf_trace_printk events");
0756ea3e
AS
579
580 return &bpf_trace_printk_proto;
581}
582
492e639f
YS
583#define MAX_SEQ_PRINTF_VARARGS 12
584#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
585#define MAX_SEQ_PRINTF_STR_LEN 128
586
587struct bpf_seq_printf_buf {
588 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
589};
590static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
591static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
592
593BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
594 const void *, data, u32, data_len)
595{
596 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
597 int i, buf_used, copy_size, num_args;
598 u64 params[MAX_SEQ_PRINTF_VARARGS];
599 struct bpf_seq_printf_buf *bufs;
600 const u64 *args = data;
601
602 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
603 if (WARN_ON_ONCE(buf_used > 1)) {
604 err = -EBUSY;
605 goto out;
606 }
607
608 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
609
610 /*
611 * bpf_check()->check_func_arg()->check_stack_boundary()
612 * guarantees that fmt points to bpf program stack,
613 * fmt_size bytes of it were initialized and fmt_size > 0
614 */
615 if (fmt[--fmt_size] != 0)
616 goto out;
617
618 if (data_len & 7)
619 goto out;
620
621 for (i = 0; i < fmt_size; i++) {
622 if (fmt[i] == '%') {
623 if (fmt[i + 1] == '%')
624 i++;
625 else if (!data || !data_len)
626 goto out;
627 }
628 }
629
630 num_args = data_len / 8;
631
632 /* check format string for allowed specifiers */
633 for (i = 0; i < fmt_size; i++) {
634 /* only printable ascii for now. */
635 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
636 err = -EINVAL;
637 goto out;
638 }
639
640 if (fmt[i] != '%')
641 continue;
642
643 if (fmt[i + 1] == '%') {
644 i++;
645 continue;
646 }
647
648 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
649 err = -E2BIG;
650 goto out;
651 }
652
653 if (fmt_cnt >= num_args) {
654 err = -EINVAL;
655 goto out;
656 }
657
658 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
659 i++;
660
f2cc020d 661 /* skip optional "[0 +-][num]" width formatting field */
492e639f
YS
662 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
663 fmt[i] == ' ')
664 i++;
665 if (fmt[i] >= '1' && fmt[i] <= '9') {
666 i++;
667 while (fmt[i] >= '0' && fmt[i] <= '9')
668 i++;
669 }
670
671 if (fmt[i] == 's') {
19c8d8ac
AM
672 void *unsafe_ptr;
673
492e639f
YS
674 /* try our best to copy */
675 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
676 err = -E2BIG;
677 goto out;
678 }
679
19c8d8ac
AM
680 unsafe_ptr = (void *)(long)args[fmt_cnt];
681 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
682 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
492e639f
YS
683 if (err < 0)
684 bufs->buf[memcpy_cnt][0] = '\0';
685 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
686
687 fmt_cnt++;
688 memcpy_cnt++;
689 continue;
690 }
691
692 if (fmt[i] == 'p') {
693 if (fmt[i + 1] == 0 ||
694 fmt[i + 1] == 'K' ||
2df6bb54
SL
695 fmt[i + 1] == 'x' ||
696 fmt[i + 1] == 'B') {
492e639f
YS
697 /* just kernel pointers */
698 params[fmt_cnt] = args[fmt_cnt];
699 fmt_cnt++;
700 continue;
701 }
702
703 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
704 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
705 err = -EINVAL;
706 goto out;
707 }
708 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
709 err = -EINVAL;
710 goto out;
711 }
712
713 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
714 err = -E2BIG;
715 goto out;
716 }
717
718
719 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
720
fe557319 721 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
492e639f
YS
722 (void *) (long) args[fmt_cnt],
723 copy_size);
724 if (err < 0)
725 memset(bufs->buf[memcpy_cnt], 0, copy_size);
726 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
727
728 i += 2;
729 fmt_cnt++;
730 memcpy_cnt++;
731 continue;
732 }
733
734 if (fmt[i] == 'l') {
735 i++;
736 if (fmt[i] == 'l')
737 i++;
738 }
739
740 if (fmt[i] != 'i' && fmt[i] != 'd' &&
c06b0229
YS
741 fmt[i] != 'u' && fmt[i] != 'x' &&
742 fmt[i] != 'X') {
492e639f
YS
743 err = -EINVAL;
744 goto out;
745 }
746
747 params[fmt_cnt] = args[fmt_cnt];
748 fmt_cnt++;
749 }
750
f2cc020d
IM
751 /*
752 * The maximum we can have is MAX_SEQ_PRINTF_VARARGS parameters, so just give
492e639f
YS
753 * all of them to seq_printf().
754 */
755 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
756 params[4], params[5], params[6], params[7], params[8],
757 params[9], params[10], params[11]);
758
759 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
760out:
761 this_cpu_dec(bpf_seq_printf_buf_used);
762 return err;
763}
764
9436ef6e 765BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 766
492e639f
YS
767static const struct bpf_func_proto bpf_seq_printf_proto = {
768 .func = bpf_seq_printf,
769 .gpl_only = true,
770 .ret_type = RET_INTEGER,
771 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 772 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
773 .arg2_type = ARG_PTR_TO_MEM,
774 .arg3_type = ARG_CONST_SIZE,
775 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
776 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
777};
778
779BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
780{
781 return seq_write(m, data, len) ? -EOVERFLOW : 0;
782}
783
492e639f
YS
784static const struct bpf_func_proto bpf_seq_write_proto = {
785 .func = bpf_seq_write,
786 .gpl_only = true,
787 .ret_type = RET_INTEGER,
788 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 789 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
790 .arg2_type = ARG_PTR_TO_MEM,
791 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
792};
793
eb411377
AM
794BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
795 u32, btf_ptr_size, u64, flags)
796{
797 const struct btf *btf;
798 s32 btf_id;
799 int ret;
800
801 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
802 if (ret)
803 return ret;
804
805 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
806}
807
808static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
809 .func = bpf_seq_printf_btf,
810 .gpl_only = true,
811 .ret_type = RET_INTEGER,
812 .arg1_type = ARG_PTR_TO_BTF_ID,
813 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
814 .arg2_type = ARG_PTR_TO_MEM,
815 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
eb411377 816 .arg4_type = ARG_ANYTHING,
492e639f
YS
817};
818
908432ca
YS
819static __always_inline int
820get_map_perf_counter(struct bpf_map *map, u64 flags,
821 u64 *value, u64 *enabled, u64 *running)
35578d79 822{
35578d79 823 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
824 unsigned int cpu = smp_processor_id();
825 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 826 struct bpf_event_entry *ee;
35578d79 827
6816a7ff
DB
828 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
829 return -EINVAL;
830 if (index == BPF_F_CURRENT_CPU)
831 index = cpu;
35578d79
KX
832 if (unlikely(index >= array->map.max_entries))
833 return -E2BIG;
834
3b1efb19 835 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 836 if (!ee)
35578d79
KX
837 return -ENOENT;
838
908432ca
YS
839 return perf_event_read_local(ee->event, value, enabled, running);
840}
841
842BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
843{
844 u64 value = 0;
845 int err;
846
847 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 848 /*
f91840a3
AS
849 * this api is ugly since we miss [-22..-2] range of valid
850 * counter values, but that's uapi
35578d79 851 */
f91840a3
AS
852 if (err)
853 return err;
854 return value;
35578d79
KX
855}
856
62544ce8 857static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 858 .func = bpf_perf_event_read,
1075ef59 859 .gpl_only = true,
35578d79
KX
860 .ret_type = RET_INTEGER,
861 .arg1_type = ARG_CONST_MAP_PTR,
862 .arg2_type = ARG_ANYTHING,
863};
864
908432ca
YS
865BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
866 struct bpf_perf_event_value *, buf, u32, size)
867{
868 int err = -EINVAL;
869
870 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
871 goto clear;
872 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
873 &buf->running);
874 if (unlikely(err))
875 goto clear;
876 return 0;
877clear:
878 memset(buf, 0, size);
879 return err;
880}
881
882static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
883 .func = bpf_perf_event_read_value,
884 .gpl_only = true,
885 .ret_type = RET_INTEGER,
886 .arg1_type = ARG_CONST_MAP_PTR,
887 .arg2_type = ARG_ANYTHING,
888 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
889 .arg4_type = ARG_CONST_SIZE,
890};
891
8e7a3920
DB
892static __always_inline u64
893__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 894 u64 flags, struct perf_sample_data *sd)
a43eec30 895{
a43eec30 896 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 897 unsigned int cpu = smp_processor_id();
1e33759c 898 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 899 struct bpf_event_entry *ee;
a43eec30 900 struct perf_event *event;
a43eec30 901
1e33759c 902 if (index == BPF_F_CURRENT_CPU)
d7931330 903 index = cpu;
a43eec30
AS
904 if (unlikely(index >= array->map.max_entries))
905 return -E2BIG;
906
3b1efb19 907 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 908 if (!ee)
a43eec30
AS
909 return -ENOENT;
910
3b1efb19 911 event = ee->event;
a43eec30
AS
912 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
913 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
914 return -EINVAL;
915
d7931330 916 if (unlikely(event->oncpu != cpu))
a43eec30
AS
917 return -EOPNOTSUPP;
918
56201969 919 return perf_event_output(event, sd, regs);
a43eec30
AS
920}
921
9594dc3c
MM
922/*
923 * Support executing tracepoints in normal, irq, and nmi context that each call
924 * bpf_perf_event_output
925 */
926struct bpf_trace_sample_data {
927 struct perf_sample_data sds[3];
928};
929
930static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
931static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
932BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
933 u64, flags, void *, data, u64, size)
8e7a3920 934{
9594dc3c
MM
935 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
936 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
937 struct perf_raw_record raw = {
938 .frag = {
939 .size = size,
940 .data = data,
941 },
942 };
9594dc3c
MM
943 struct perf_sample_data *sd;
944 int err;
8e7a3920 945
9594dc3c
MM
946 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
947 err = -EBUSY;
948 goto out;
949 }
950
951 sd = &sds->sds[nest_level - 1];
952
953 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
954 err = -EINVAL;
955 goto out;
956 }
8e7a3920 957
283ca526
DB
958 perf_sample_data_init(sd, 0, 0);
959 sd->raw = &raw;
960
9594dc3c
MM
961 err = __bpf_perf_event_output(regs, map, flags, sd);
962
963out:
964 this_cpu_dec(bpf_trace_nest_level);
965 return err;
8e7a3920
DB
966}
967
a43eec30
AS
968static const struct bpf_func_proto bpf_perf_event_output_proto = {
969 .func = bpf_perf_event_output,
1075ef59 970 .gpl_only = true,
a43eec30
AS
971 .ret_type = RET_INTEGER,
972 .arg1_type = ARG_PTR_TO_CTX,
973 .arg2_type = ARG_CONST_MAP_PTR,
974 .arg3_type = ARG_ANYTHING,
39f19ebb 975 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 976 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
977};
978
768fb61f
AZ
979static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
980struct bpf_nested_pt_regs {
981 struct pt_regs regs[3];
982};
983static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
984static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 985
555c8a86
DB
986u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
987 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 988{
768fb61f 989 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
990 struct perf_raw_frag frag = {
991 .copy = ctx_copy,
992 .size = ctx_size,
993 .data = ctx,
994 };
995 struct perf_raw_record raw = {
996 .frag = {
183fc153
AM
997 {
998 .next = ctx_size ? &frag : NULL,
999 },
555c8a86
DB
1000 .size = meta_size,
1001 .data = meta,
1002 },
1003 };
768fb61f
AZ
1004 struct perf_sample_data *sd;
1005 struct pt_regs *regs;
1006 u64 ret;
1007
1008 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
1009 ret = -EBUSY;
1010 goto out;
1011 }
1012 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
1013 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
1014
1015 perf_fetch_caller_regs(regs);
283ca526
DB
1016 perf_sample_data_init(sd, 0, 0);
1017 sd->raw = &raw;
bd570ff9 1018
768fb61f
AZ
1019 ret = __bpf_perf_event_output(regs, map, flags, sd);
1020out:
1021 this_cpu_dec(bpf_event_output_nest_level);
1022 return ret;
bd570ff9
DB
1023}
1024
f3694e00 1025BPF_CALL_0(bpf_get_current_task)
606274c5
AS
1026{
1027 return (long) current;
1028}
1029
f470378c 1030const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
1031 .func = bpf_get_current_task,
1032 .gpl_only = true,
1033 .ret_type = RET_INTEGER,
1034};
1035
3ca1032a
KS
1036BPF_CALL_0(bpf_get_current_task_btf)
1037{
1038 return (unsigned long) current;
1039}
1040
1041BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
1042
1043static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
1044 .func = bpf_get_current_task_btf,
1045 .gpl_only = true,
1046 .ret_type = RET_PTR_TO_BTF_ID,
1047 .ret_btf_id = &bpf_get_current_btf_ids[0],
1048};
1049
f3694e00 1050BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 1051{
60d20f91
SD
1052 struct bpf_array *array = container_of(map, struct bpf_array, map);
1053 struct cgroup *cgrp;
60d20f91 1054
60d20f91
SD
1055 if (unlikely(idx >= array->map.max_entries))
1056 return -E2BIG;
1057
1058 cgrp = READ_ONCE(array->ptrs[idx]);
1059 if (unlikely(!cgrp))
1060 return -EAGAIN;
1061
1062 return task_under_cgroup_hierarchy(current, cgrp);
1063}
1064
1065static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1066 .func = bpf_current_task_under_cgroup,
1067 .gpl_only = false,
1068 .ret_type = RET_INTEGER,
1069 .arg1_type = ARG_CONST_MAP_PTR,
1070 .arg2_type = ARG_ANYTHING,
1071};
1072
8b401f9e
YS
1073struct send_signal_irq_work {
1074 struct irq_work irq_work;
1075 struct task_struct *task;
1076 u32 sig;
8482941f 1077 enum pid_type type;
8b401f9e
YS
1078};
1079
1080static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1081
1082static void do_bpf_send_signal(struct irq_work *entry)
1083{
1084 struct send_signal_irq_work *work;
1085
1086 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 1087 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
1088}
1089
8482941f 1090static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
1091{
1092 struct send_signal_irq_work *work = NULL;
1093
1094 /* Similar to bpf_probe_write_user, task needs to be
1095 * in a sound condition and kernel memory access be
1096 * permitted in order to send signal to the current
1097 * task.
1098 */
1099 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1100 return -EPERM;
1101 if (unlikely(uaccess_kernel()))
1102 return -EPERM;
1103 if (unlikely(!nmi_uaccess_okay()))
1104 return -EPERM;
1105
1bc7896e 1106 if (irqs_disabled()) {
e1afb702
YS
1107 /* Do an early check on signal validity. Otherwise,
1108 * the error is lost in deferred irq_work.
1109 */
1110 if (unlikely(!valid_signal(sig)))
1111 return -EINVAL;
1112
8b401f9e 1113 work = this_cpu_ptr(&send_signal_work);
7a9f50a0 1114 if (irq_work_is_busy(&work->irq_work))
8b401f9e
YS
1115 return -EBUSY;
1116
1117 /* Add the current task, which is the target of sending signal,
1118 * to the irq_work. The current task may change when queued
1119 * irq works get executed.
1120 */
1121 work->task = current;
1122 work->sig = sig;
8482941f 1123 work->type = type;
8b401f9e
YS
1124 irq_work_queue(&work->irq_work);
1125 return 0;
1126 }
1127
8482941f
YS
1128 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1129}
1130
1131BPF_CALL_1(bpf_send_signal, u32, sig)
1132{
1133 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
1134}
1135
1136static const struct bpf_func_proto bpf_send_signal_proto = {
1137 .func = bpf_send_signal,
1138 .gpl_only = false,
1139 .ret_type = RET_INTEGER,
1140 .arg1_type = ARG_ANYTHING,
1141};
1142
8482941f
YS
1143BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1144{
1145 return bpf_send_signal_common(sig, PIDTYPE_PID);
1146}
1147
1148static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1149 .func = bpf_send_signal_thread,
1150 .gpl_only = false,
1151 .ret_type = RET_INTEGER,
1152 .arg1_type = ARG_ANYTHING,
1153};
1154
6e22ab9d
JO
1155BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
1156{
1157 long len;
1158 char *p;
1159
1160 if (!sz)
1161 return 0;
1162
1163 p = d_path(path, buf, sz);
1164 if (IS_ERR(p)) {
1165 len = PTR_ERR(p);
1166 } else {
1167 len = buf + sz - p;
1168 memmove(buf, p, len);
1169 }
1170
1171 return len;
1172}
1173
1174BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
1175#ifdef CONFIG_SECURITY
1176BTF_ID(func, security_file_permission)
1177BTF_ID(func, security_inode_getattr)
1178BTF_ID(func, security_file_open)
1179#endif
1180#ifdef CONFIG_SECURITY_PATH
1181BTF_ID(func, security_path_truncate)
1182#endif
6e22ab9d
JO
1183BTF_ID(func, vfs_truncate)
1184BTF_ID(func, vfs_fallocate)
1185BTF_ID(func, dentry_open)
1186BTF_ID(func, vfs_getattr)
1187BTF_ID(func, filp_close)
1188BTF_SET_END(btf_allowlist_d_path)
1189
1190static bool bpf_d_path_allowed(const struct bpf_prog *prog)
1191{
3d06f34a
SL
1192 if (prog->type == BPF_PROG_TYPE_TRACING &&
1193 prog->expected_attach_type == BPF_TRACE_ITER)
1194 return true;
1195
6f100640
KS
1196 if (prog->type == BPF_PROG_TYPE_LSM)
1197 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
1198
1199 return btf_id_set_contains(&btf_allowlist_d_path,
1200 prog->aux->attach_btf_id);
6e22ab9d
JO
1201}
1202
9436ef6e 1203BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
1204
1205static const struct bpf_func_proto bpf_d_path_proto = {
1206 .func = bpf_d_path,
1207 .gpl_only = false,
1208 .ret_type = RET_INTEGER,
1209 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 1210 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
1211 .arg2_type = ARG_PTR_TO_MEM,
1212 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
1213 .allowed = bpf_d_path_allowed,
1214};
1215
c4d0bfb4
AM
1216#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
1217 BTF_F_PTR_RAW | BTF_F_ZERO)
1218
1219static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
1220 u64 flags, const struct btf **btf,
1221 s32 *btf_id)
1222{
1223 const struct btf_type *t;
1224
1225 if (unlikely(flags & ~(BTF_F_ALL)))
1226 return -EINVAL;
1227
1228 if (btf_ptr_size != sizeof(struct btf_ptr))
1229 return -EINVAL;
1230
1231 *btf = bpf_get_btf_vmlinux();
1232
1233 if (IS_ERR_OR_NULL(*btf))
abbaa433 1234 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
c4d0bfb4
AM
1235
1236 if (ptr->type_id > 0)
1237 *btf_id = ptr->type_id;
1238 else
1239 return -EINVAL;
1240
1241 if (*btf_id > 0)
1242 t = btf_type_by_id(*btf, *btf_id);
1243 if (*btf_id <= 0 || !t)
1244 return -ENOENT;
1245
1246 return 0;
1247}
1248
1249BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1250 u32, btf_ptr_size, u64, flags)
1251{
1252 const struct btf *btf;
1253 s32 btf_id;
1254 int ret;
1255
1256 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1257 if (ret)
1258 return ret;
1259
1260 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1261 flags);
1262}
1263
1264const struct bpf_func_proto bpf_snprintf_btf_proto = {
1265 .func = bpf_snprintf_btf,
1266 .gpl_only = false,
1267 .ret_type = RET_INTEGER,
1268 .arg1_type = ARG_PTR_TO_MEM,
1269 .arg2_type = ARG_CONST_SIZE,
1270 .arg3_type = ARG_PTR_TO_MEM,
1271 .arg4_type = ARG_CONST_SIZE,
1272 .arg5_type = ARG_ANYTHING,
1273};
1274
fc611f47
KS
1275const struct bpf_func_proto *
1276bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1277{
1278 switch (func_id) {
1279 case BPF_FUNC_map_lookup_elem:
1280 return &bpf_map_lookup_elem_proto;
1281 case BPF_FUNC_map_update_elem:
1282 return &bpf_map_update_elem_proto;
1283 case BPF_FUNC_map_delete_elem:
1284 return &bpf_map_delete_elem_proto;
02a8c817
AC
1285 case BPF_FUNC_map_push_elem:
1286 return &bpf_map_push_elem_proto;
1287 case BPF_FUNC_map_pop_elem:
1288 return &bpf_map_pop_elem_proto;
1289 case BPF_FUNC_map_peek_elem:
1290 return &bpf_map_peek_elem_proto;
d9847d31
AS
1291 case BPF_FUNC_ktime_get_ns:
1292 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1293 case BPF_FUNC_ktime_get_boot_ns:
1294 return &bpf_ktime_get_boot_ns_proto;
d0551261
DB
1295 case BPF_FUNC_ktime_get_coarse_ns:
1296 return &bpf_ktime_get_coarse_ns_proto;
04fd61ab
AS
1297 case BPF_FUNC_tail_call:
1298 return &bpf_tail_call_proto;
ffeedafb
AS
1299 case BPF_FUNC_get_current_pid_tgid:
1300 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1301 case BPF_FUNC_get_current_task:
1302 return &bpf_get_current_task_proto;
3ca1032a
KS
1303 case BPF_FUNC_get_current_task_btf:
1304 return &bpf_get_current_task_btf_proto;
ffeedafb
AS
1305 case BPF_FUNC_get_current_uid_gid:
1306 return &bpf_get_current_uid_gid_proto;
1307 case BPF_FUNC_get_current_comm:
1308 return &bpf_get_current_comm_proto;
9c959c86 1309 case BPF_FUNC_trace_printk:
0756ea3e 1310 return bpf_get_trace_printk_proto();
ab1973d3
AS
1311 case BPF_FUNC_get_smp_processor_id:
1312 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1313 case BPF_FUNC_get_numa_node_id:
1314 return &bpf_get_numa_node_id_proto;
35578d79
KX
1315 case BPF_FUNC_perf_event_read:
1316 return &bpf_perf_event_read_proto;
96ae5227
SD
1317 case BPF_FUNC_probe_write_user:
1318 return bpf_get_probe_write_proto();
60d20f91
SD
1319 case BPF_FUNC_current_task_under_cgroup:
1320 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1321 case BPF_FUNC_get_prandom_u32:
1322 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
1323 case BPF_FUNC_probe_read_user:
1324 return &bpf_probe_read_user_proto;
1325 case BPF_FUNC_probe_read_kernel:
1326 return &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1327 case BPF_FUNC_probe_read_user_str:
1328 return &bpf_probe_read_user_str_proto;
1329 case BPF_FUNC_probe_read_kernel_str:
1330 return &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1331#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1332 case BPF_FUNC_probe_read:
1333 return &bpf_probe_read_compat_proto;
a5e8c070 1334 case BPF_FUNC_probe_read_str:
6ae08ae3 1335 return &bpf_probe_read_compat_str_proto;
0ebeea8c 1336#endif
34ea38ca 1337#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1338 case BPF_FUNC_get_current_cgroup_id:
1339 return &bpf_get_current_cgroup_id_proto;
34ea38ca 1340#endif
8b401f9e
YS
1341 case BPF_FUNC_send_signal:
1342 return &bpf_send_signal_proto;
8482941f
YS
1343 case BPF_FUNC_send_signal_thread:
1344 return &bpf_send_signal_thread_proto;
b80b033b
SL
1345 case BPF_FUNC_perf_event_read_value:
1346 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1347 case BPF_FUNC_get_ns_current_pid_tgid:
1348 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1349 case BPF_FUNC_ringbuf_output:
1350 return &bpf_ringbuf_output_proto;
1351 case BPF_FUNC_ringbuf_reserve:
1352 return &bpf_ringbuf_reserve_proto;
1353 case BPF_FUNC_ringbuf_submit:
1354 return &bpf_ringbuf_submit_proto;
1355 case BPF_FUNC_ringbuf_discard:
1356 return &bpf_ringbuf_discard_proto;
1357 case BPF_FUNC_ringbuf_query:
1358 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1359 case BPF_FUNC_jiffies64:
1360 return &bpf_jiffies64_proto;
fa28dcb8
SL
1361 case BPF_FUNC_get_task_stack:
1362 return &bpf_get_task_stack_proto;
07be4c4a
AS
1363 case BPF_FUNC_copy_from_user:
1364 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
c4d0bfb4
AM
1365 case BPF_FUNC_snprintf_btf:
1366 return &bpf_snprintf_btf_proto;
b7906b70 1367 case BPF_FUNC_per_cpu_ptr:
eaa6bcb7 1368 return &bpf_per_cpu_ptr_proto;
b7906b70 1369 case BPF_FUNC_this_cpu_ptr:
63d9b80d 1370 return &bpf_this_cpu_ptr_proto;
9fd82b61
AS
1371 default:
1372 return NULL;
1373 }
1374}
1375
5e43f899
AI
1376static const struct bpf_func_proto *
1377kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1378{
1379 switch (func_id) {
a43eec30
AS
1380 case BPF_FUNC_perf_event_output:
1381 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1382 case BPF_FUNC_get_stackid:
1383 return &bpf_get_stackid_proto;
c195651e
YS
1384 case BPF_FUNC_get_stack:
1385 return &bpf_get_stack_proto;
9802d865
JB
1386#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1387 case BPF_FUNC_override_return:
1388 return &bpf_override_return_proto;
1389#endif
2541517c 1390 default:
fc611f47 1391 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1392 }
1393}
1394
1395/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1396static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1397 const struct bpf_prog *prog,
23994631 1398 struct bpf_insn_access_aux *info)
2541517c 1399{
2541517c
AS
1400 if (off < 0 || off >= sizeof(struct pt_regs))
1401 return false;
2541517c
AS
1402 if (type != BPF_READ)
1403 return false;
2541517c
AS
1404 if (off % size != 0)
1405 return false;
2d071c64
DB
1406 /*
1407 * Assertion for 32 bit to make sure last 8 byte access
1408 * (BPF_DW) to the last 4 byte member is disallowed.
1409 */
1410 if (off + size > sizeof(struct pt_regs))
1411 return false;
1412
2541517c
AS
1413 return true;
1414}
1415
7de16e3a 1416const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1417 .get_func_proto = kprobe_prog_func_proto,
1418 .is_valid_access = kprobe_prog_is_valid_access,
1419};
1420
7de16e3a
JK
1421const struct bpf_prog_ops kprobe_prog_ops = {
1422};
1423
f3694e00
DB
1424BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1425 u64, flags, void *, data, u64, size)
9940d67c 1426{
f3694e00
DB
1427 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1428
9940d67c
AS
1429 /*
1430 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1431 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1432 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1433 */
f3694e00 1434 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1435}
1436
1437static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1438 .func = bpf_perf_event_output_tp,
1439 .gpl_only = true,
1440 .ret_type = RET_INTEGER,
1441 .arg1_type = ARG_PTR_TO_CTX,
1442 .arg2_type = ARG_CONST_MAP_PTR,
1443 .arg3_type = ARG_ANYTHING,
39f19ebb 1444 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 1445 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1446};
1447
f3694e00
DB
1448BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1449 u64, flags)
9940d67c 1450{
f3694e00 1451 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1452
f3694e00
DB
1453 /*
1454 * Same comment as in bpf_perf_event_output_tp(), only that this time
1455 * the other helper's function body cannot be inlined due to being
1456 * external, thus we need to call raw helper function.
1457 */
1458 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1459 flags, 0, 0);
9940d67c
AS
1460}
1461
1462static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1463 .func = bpf_get_stackid_tp,
1464 .gpl_only = true,
1465 .ret_type = RET_INTEGER,
1466 .arg1_type = ARG_PTR_TO_CTX,
1467 .arg2_type = ARG_CONST_MAP_PTR,
1468 .arg3_type = ARG_ANYTHING,
1469};
1470
c195651e
YS
1471BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1472 u64, flags)
1473{
1474 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1475
1476 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1477 (unsigned long) size, flags, 0);
1478}
1479
1480static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1481 .func = bpf_get_stack_tp,
1482 .gpl_only = true,
1483 .ret_type = RET_INTEGER,
1484 .arg1_type = ARG_PTR_TO_CTX,
1485 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1486 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1487 .arg4_type = ARG_ANYTHING,
1488};
1489
5e43f899
AI
1490static const struct bpf_func_proto *
1491tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1492{
1493 switch (func_id) {
1494 case BPF_FUNC_perf_event_output:
1495 return &bpf_perf_event_output_proto_tp;
1496 case BPF_FUNC_get_stackid:
1497 return &bpf_get_stackid_proto_tp;
c195651e
YS
1498 case BPF_FUNC_get_stack:
1499 return &bpf_get_stack_proto_tp;
f005afed 1500 default:
fc611f47 1501 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1502 }
1503}
1504
1505static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1506 const struct bpf_prog *prog,
f005afed
YS
1507 struct bpf_insn_access_aux *info)
1508{
1509 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1510 return false;
1511 if (type != BPF_READ)
1512 return false;
1513 if (off % size != 0)
1514 return false;
1515
1516 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1517 return true;
1518}
1519
1520const struct bpf_verifier_ops tracepoint_verifier_ops = {
1521 .get_func_proto = tp_prog_func_proto,
1522 .is_valid_access = tp_prog_is_valid_access,
1523};
1524
1525const struct bpf_prog_ops tracepoint_prog_ops = {
1526};
1527
1528BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1529 struct bpf_perf_event_value *, buf, u32, size)
1530{
1531 int err = -EINVAL;
1532
1533 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1534 goto clear;
1535 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1536 &buf->running);
1537 if (unlikely(err))
1538 goto clear;
1539 return 0;
1540clear:
1541 memset(buf, 0, size);
1542 return err;
1543}
1544
f005afed
YS
1545static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1546 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1547 .gpl_only = true,
1548 .ret_type = RET_INTEGER,
1549 .arg1_type = ARG_PTR_TO_CTX,
1550 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1551 .arg3_type = ARG_CONST_SIZE,
1552};
1553
fff7b643
DX
1554BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1555 void *, buf, u32, size, u64, flags)
1556{
1557#ifndef CONFIG_X86
1558 return -ENOENT;
1559#else
1560 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1561 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1562 u32 to_copy;
1563
1564 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1565 return -EINVAL;
1566
1567 if (unlikely(!br_stack))
1568 return -EINVAL;
1569
1570 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1571 return br_stack->nr * br_entry_size;
1572
1573 if (!buf || (size % br_entry_size != 0))
1574 return -EINVAL;
1575
1576 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1577 memcpy(buf, br_stack->entries, to_copy);
1578
1579 return to_copy;
1580#endif
1581}
1582
1583static const struct bpf_func_proto bpf_read_branch_records_proto = {
1584 .func = bpf_read_branch_records,
1585 .gpl_only = true,
1586 .ret_type = RET_INTEGER,
1587 .arg1_type = ARG_PTR_TO_CTX,
1588 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1589 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1590 .arg4_type = ARG_ANYTHING,
1591};
1592
5e43f899
AI
1593static const struct bpf_func_proto *
1594pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1595{
1596 switch (func_id) {
1597 case BPF_FUNC_perf_event_output:
9940d67c 1598 return &bpf_perf_event_output_proto_tp;
9fd82b61 1599 case BPF_FUNC_get_stackid:
7b04d6d6 1600 return &bpf_get_stackid_proto_pe;
c195651e 1601 case BPF_FUNC_get_stack:
7b04d6d6 1602 return &bpf_get_stack_proto_pe;
4bebdc7a 1603 case BPF_FUNC_perf_prog_read_value:
f005afed 1604 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1605 case BPF_FUNC_read_branch_records:
1606 return &bpf_read_branch_records_proto;
9fd82b61 1607 default:
fc611f47 1608 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1609 }
1610}
1611
c4f6699d
AS
1612/*
1613 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1614 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1615 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1616 *
1617 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1618 * in normal, irq, and nmi context.
c4f6699d 1619 */
9594dc3c
MM
1620struct bpf_raw_tp_regs {
1621 struct pt_regs regs[3];
1622};
1623static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1624static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1625static struct pt_regs *get_bpf_raw_tp_regs(void)
1626{
1627 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1628 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1629
1630 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1631 this_cpu_dec(bpf_raw_tp_nest_level);
1632 return ERR_PTR(-EBUSY);
1633 }
1634
1635 return &tp_regs->regs[nest_level - 1];
1636}
1637
1638static void put_bpf_raw_tp_regs(void)
1639{
1640 this_cpu_dec(bpf_raw_tp_nest_level);
1641}
1642
c4f6699d
AS
1643BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1644 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1645{
9594dc3c
MM
1646 struct pt_regs *regs = get_bpf_raw_tp_regs();
1647 int ret;
1648
1649 if (IS_ERR(regs))
1650 return PTR_ERR(regs);
c4f6699d
AS
1651
1652 perf_fetch_caller_regs(regs);
9594dc3c
MM
1653 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1654
1655 put_bpf_raw_tp_regs();
1656 return ret;
c4f6699d
AS
1657}
1658
1659static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1660 .func = bpf_perf_event_output_raw_tp,
1661 .gpl_only = true,
1662 .ret_type = RET_INTEGER,
1663 .arg1_type = ARG_PTR_TO_CTX,
1664 .arg2_type = ARG_CONST_MAP_PTR,
1665 .arg3_type = ARG_ANYTHING,
1666 .arg4_type = ARG_PTR_TO_MEM,
1667 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1668};
1669
a7658e1a 1670extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1671extern const struct bpf_func_proto bpf_xdp_output_proto;
a7658e1a 1672
c4f6699d
AS
1673BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1674 struct bpf_map *, map, u64, flags)
1675{
9594dc3c
MM
1676 struct pt_regs *regs = get_bpf_raw_tp_regs();
1677 int ret;
1678
1679 if (IS_ERR(regs))
1680 return PTR_ERR(regs);
c4f6699d
AS
1681
1682 perf_fetch_caller_regs(regs);
1683 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1684 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1685 flags, 0, 0);
1686 put_bpf_raw_tp_regs();
1687 return ret;
c4f6699d
AS
1688}
1689
1690static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1691 .func = bpf_get_stackid_raw_tp,
1692 .gpl_only = true,
1693 .ret_type = RET_INTEGER,
1694 .arg1_type = ARG_PTR_TO_CTX,
1695 .arg2_type = ARG_CONST_MAP_PTR,
1696 .arg3_type = ARG_ANYTHING,
1697};
1698
c195651e
YS
1699BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1700 void *, buf, u32, size, u64, flags)
1701{
9594dc3c
MM
1702 struct pt_regs *regs = get_bpf_raw_tp_regs();
1703 int ret;
1704
1705 if (IS_ERR(regs))
1706 return PTR_ERR(regs);
c195651e
YS
1707
1708 perf_fetch_caller_regs(regs);
9594dc3c
MM
1709 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1710 (unsigned long) size, flags, 0);
1711 put_bpf_raw_tp_regs();
1712 return ret;
c195651e
YS
1713}
1714
1715static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1716 .func = bpf_get_stack_raw_tp,
1717 .gpl_only = true,
1718 .ret_type = RET_INTEGER,
1719 .arg1_type = ARG_PTR_TO_CTX,
1720 .arg2_type = ARG_PTR_TO_MEM,
1721 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1722 .arg4_type = ARG_ANYTHING,
1723};
1724
5e43f899
AI
1725static const struct bpf_func_proto *
1726raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1727{
1728 switch (func_id) {
1729 case BPF_FUNC_perf_event_output:
1730 return &bpf_perf_event_output_proto_raw_tp;
1731 case BPF_FUNC_get_stackid:
1732 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1733 case BPF_FUNC_get_stack:
1734 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1735 default:
fc611f47 1736 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1737 }
1738}
1739
958a3f2d 1740const struct bpf_func_proto *
f1b9509c
AS
1741tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1742{
1743 switch (func_id) {
1744#ifdef CONFIG_NET
1745 case BPF_FUNC_skb_output:
1746 return &bpf_skb_output_proto;
d831ee84
EC
1747 case BPF_FUNC_xdp_output:
1748 return &bpf_xdp_output_proto;
af7ec138
YS
1749 case BPF_FUNC_skc_to_tcp6_sock:
1750 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1751 case BPF_FUNC_skc_to_tcp_sock:
1752 return &bpf_skc_to_tcp_sock_proto;
1753 case BPF_FUNC_skc_to_tcp_timewait_sock:
1754 return &bpf_skc_to_tcp_timewait_sock_proto;
1755 case BPF_FUNC_skc_to_tcp_request_sock:
1756 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1757 case BPF_FUNC_skc_to_udp6_sock:
1758 return &bpf_skc_to_udp6_sock_proto;
8e4597c6
MKL
1759 case BPF_FUNC_sk_storage_get:
1760 return &bpf_sk_storage_get_tracing_proto;
1761 case BPF_FUNC_sk_storage_delete:
1762 return &bpf_sk_storage_delete_tracing_proto;
b60da495
FR
1763 case BPF_FUNC_sock_from_file:
1764 return &bpf_sock_from_file_proto;
c5dbb89f
FR
1765 case BPF_FUNC_get_socket_cookie:
1766 return &bpf_get_socket_ptr_cookie_proto;
f1b9509c 1767#endif
492e639f
YS
1768 case BPF_FUNC_seq_printf:
1769 return prog->expected_attach_type == BPF_TRACE_ITER ?
1770 &bpf_seq_printf_proto :
1771 NULL;
1772 case BPF_FUNC_seq_write:
1773 return prog->expected_attach_type == BPF_TRACE_ITER ?
1774 &bpf_seq_write_proto :
1775 NULL;
eb411377
AM
1776 case BPF_FUNC_seq_printf_btf:
1777 return prog->expected_attach_type == BPF_TRACE_ITER ?
1778 &bpf_seq_printf_btf_proto :
1779 NULL;
6e22ab9d
JO
1780 case BPF_FUNC_d_path:
1781 return &bpf_d_path_proto;
f1b9509c
AS
1782 default:
1783 return raw_tp_prog_func_proto(func_id, prog);
1784 }
1785}
1786
c4f6699d
AS
1787static bool raw_tp_prog_is_valid_access(int off, int size,
1788 enum bpf_access_type type,
5e43f899 1789 const struct bpf_prog *prog,
c4f6699d
AS
1790 struct bpf_insn_access_aux *info)
1791{
f1b9509c
AS
1792 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1793 return false;
1794 if (type != BPF_READ)
1795 return false;
1796 if (off % size != 0)
1797 return false;
1798 return true;
1799}
1800
1801static bool tracing_prog_is_valid_access(int off, int size,
1802 enum bpf_access_type type,
1803 const struct bpf_prog *prog,
1804 struct bpf_insn_access_aux *info)
1805{
1806 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1807 return false;
1808 if (type != BPF_READ)
1809 return false;
1810 if (off % size != 0)
1811 return false;
9e15db66 1812 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1813}
1814
3e7c67d9
KS
1815int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1816 const union bpf_attr *kattr,
1817 union bpf_attr __user *uattr)
1818{
1819 return -ENOTSUPP;
1820}
1821
c4f6699d
AS
1822const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1823 .get_func_proto = raw_tp_prog_func_proto,
1824 .is_valid_access = raw_tp_prog_is_valid_access,
1825};
1826
1827const struct bpf_prog_ops raw_tracepoint_prog_ops = {
ebfb4d40 1828#ifdef CONFIG_NET
1b4d60ec 1829 .test_run = bpf_prog_test_run_raw_tp,
ebfb4d40 1830#endif
c4f6699d
AS
1831};
1832
f1b9509c
AS
1833const struct bpf_verifier_ops tracing_verifier_ops = {
1834 .get_func_proto = tracing_prog_func_proto,
1835 .is_valid_access = tracing_prog_is_valid_access,
1836};
1837
1838const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1839 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1840};
1841
9df1c28b
MM
1842static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1843 enum bpf_access_type type,
1844 const struct bpf_prog *prog,
1845 struct bpf_insn_access_aux *info)
1846{
1847 if (off == 0) {
1848 if (size != sizeof(u64) || type != BPF_READ)
1849 return false;
1850 info->reg_type = PTR_TO_TP_BUFFER;
1851 }
1852 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1853}
1854
1855const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1856 .get_func_proto = raw_tp_prog_func_proto,
1857 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1858};
1859
1860const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1861};
1862
0515e599 1863static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1864 const struct bpf_prog *prog,
23994631 1865 struct bpf_insn_access_aux *info)
0515e599 1866{
95da0cdb 1867 const int size_u64 = sizeof(u64);
31fd8581 1868
0515e599
AS
1869 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1870 return false;
1871 if (type != BPF_READ)
1872 return false;
bc23105c
DB
1873 if (off % size != 0) {
1874 if (sizeof(unsigned long) != 4)
1875 return false;
1876 if (size != 8)
1877 return false;
1878 if (off % size != 4)
1879 return false;
1880 }
31fd8581 1881
f96da094
DB
1882 switch (off) {
1883 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1884 bpf_ctx_record_field_size(info, size_u64);
1885 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1886 return false;
1887 break;
1888 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1889 bpf_ctx_record_field_size(info, size_u64);
1890 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1891 return false;
f96da094
DB
1892 break;
1893 default:
0515e599
AS
1894 if (size != sizeof(long))
1895 return false;
1896 }
f96da094 1897
0515e599
AS
1898 return true;
1899}
1900
6b8cc1d1
DB
1901static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1902 const struct bpf_insn *si,
0515e599 1903 struct bpf_insn *insn_buf,
f96da094 1904 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1905{
1906 struct bpf_insn *insn = insn_buf;
1907
6b8cc1d1 1908 switch (si->off) {
0515e599 1909 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1910 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1911 data), si->dst_reg, si->src_reg,
0515e599 1912 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1913 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1914 bpf_target_off(struct perf_sample_data, period, 8,
1915 target_size));
0515e599 1916 break;
95da0cdb
TQ
1917 case offsetof(struct bpf_perf_event_data, addr):
1918 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1919 data), si->dst_reg, si->src_reg,
1920 offsetof(struct bpf_perf_event_data_kern, data));
1921 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1922 bpf_target_off(struct perf_sample_data, addr, 8,
1923 target_size));
1924 break;
0515e599 1925 default:
f035a515 1926 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1927 regs), si->dst_reg, si->src_reg,
0515e599 1928 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1929 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1930 si->off);
0515e599
AS
1931 break;
1932 }
1933
1934 return insn - insn_buf;
1935}
1936
7de16e3a 1937const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1938 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1939 .is_valid_access = pe_prog_is_valid_access,
1940 .convert_ctx_access = pe_prog_convert_ctx_access,
1941};
7de16e3a
JK
1942
1943const struct bpf_prog_ops perf_event_prog_ops = {
1944};
e87c6bc3
YS
1945
1946static DEFINE_MUTEX(bpf_event_mutex);
1947
c8c088ba
YS
1948#define BPF_TRACE_MAX_PROGS 64
1949
e87c6bc3
YS
1950int perf_event_attach_bpf_prog(struct perf_event *event,
1951 struct bpf_prog *prog)
1952{
e672db03 1953 struct bpf_prog_array *old_array;
e87c6bc3
YS
1954 struct bpf_prog_array *new_array;
1955 int ret = -EEXIST;
1956
9802d865 1957 /*
b4da3340
MH
1958 * Kprobe override only works if they are on the function entry,
1959 * and only if they are on the opt-in list.
9802d865
JB
1960 */
1961 if (prog->kprobe_override &&
b4da3340 1962 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1963 !trace_kprobe_error_injectable(event->tp_event)))
1964 return -EINVAL;
1965
e87c6bc3
YS
1966 mutex_lock(&bpf_event_mutex);
1967
1968 if (event->prog)
07c41a29 1969 goto unlock;
e87c6bc3 1970
e672db03 1971 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1972 if (old_array &&
1973 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1974 ret = -E2BIG;
1975 goto unlock;
1976 }
1977
e87c6bc3
YS
1978 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1979 if (ret < 0)
07c41a29 1980 goto unlock;
e87c6bc3
YS
1981
1982 /* set the new array to event->tp_event and set event->prog */
1983 event->prog = prog;
1984 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1985 bpf_prog_array_free(old_array);
1986
07c41a29 1987unlock:
e87c6bc3
YS
1988 mutex_unlock(&bpf_event_mutex);
1989 return ret;
1990}
1991
1992void perf_event_detach_bpf_prog(struct perf_event *event)
1993{
e672db03 1994 struct bpf_prog_array *old_array;
e87c6bc3
YS
1995 struct bpf_prog_array *new_array;
1996 int ret;
1997
1998 mutex_lock(&bpf_event_mutex);
1999
2000 if (!event->prog)
07c41a29 2001 goto unlock;
e87c6bc3 2002
e672db03 2003 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 2004 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
2005 if (ret == -ENOENT)
2006 goto unlock;
e87c6bc3
YS
2007 if (ret < 0) {
2008 bpf_prog_array_delete_safe(old_array, event->prog);
2009 } else {
2010 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2011 bpf_prog_array_free(old_array);
2012 }
2013
2014 bpf_prog_put(event->prog);
2015 event->prog = NULL;
2016
07c41a29 2017unlock:
e87c6bc3
YS
2018 mutex_unlock(&bpf_event_mutex);
2019}
f371b304 2020
f4e2298e 2021int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
2022{
2023 struct perf_event_query_bpf __user *uquery = info;
2024 struct perf_event_query_bpf query = {};
e672db03 2025 struct bpf_prog_array *progs;
3a38bb98 2026 u32 *ids, prog_cnt, ids_len;
f371b304
YS
2027 int ret;
2028
031258da 2029 if (!perfmon_capable())
f371b304
YS
2030 return -EPERM;
2031 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2032 return -EINVAL;
2033 if (copy_from_user(&query, uquery, sizeof(query)))
2034 return -EFAULT;
3a38bb98
YS
2035
2036 ids_len = query.ids_len;
2037 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 2038 return -E2BIG;
3a38bb98
YS
2039 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2040 if (!ids)
2041 return -ENOMEM;
2042 /*
2043 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2044 * is required when user only wants to check for uquery->prog_cnt.
2045 * There is no need to check for it since the case is handled
2046 * gracefully in bpf_prog_array_copy_info.
2047 */
f371b304
YS
2048
2049 mutex_lock(&bpf_event_mutex);
e672db03
SF
2050 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2051 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
2052 mutex_unlock(&bpf_event_mutex);
2053
3a38bb98
YS
2054 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2055 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2056 ret = -EFAULT;
2057
2058 kfree(ids);
f371b304
YS
2059 return ret;
2060}
c4f6699d
AS
2061
2062extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2063extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2064
a38d1107 2065struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
2066{
2067 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2068
2069 for (; btp < __stop__bpf_raw_tp; btp++) {
2070 if (!strcmp(btp->tp->name, name))
2071 return btp;
2072 }
a38d1107
MM
2073
2074 return bpf_get_raw_tracepoint_module(name);
2075}
2076
2077void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2078{
12cc126d 2079 struct module *mod;
a38d1107 2080
12cc126d
AN
2081 preempt_disable();
2082 mod = __module_address((unsigned long)btp);
2083 module_put(mod);
2084 preempt_enable();
c4f6699d
AS
2085}
2086
2087static __always_inline
2088void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2089{
f03efe49 2090 cant_sleep();
c4f6699d 2091 rcu_read_lock();
c4f6699d 2092 (void) BPF_PROG_RUN(prog, args);
c4f6699d
AS
2093 rcu_read_unlock();
2094}
2095
2096#define UNPACK(...) __VA_ARGS__
2097#define REPEAT_1(FN, DL, X, ...) FN(X)
2098#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2099#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2100#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2101#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2102#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2103#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2104#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2105#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2106#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2107#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2108#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2109#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2110
2111#define SARG(X) u64 arg##X
2112#define COPY(X) args[X] = arg##X
2113
2114#define __DL_COM (,)
2115#define __DL_SEM (;)
2116
2117#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2118
2119#define BPF_TRACE_DEFN_x(x) \
2120 void bpf_trace_run##x(struct bpf_prog *prog, \
2121 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2122 { \
2123 u64 args[x]; \
2124 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2125 __bpf_trace_run(prog, args); \
2126 } \
2127 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2128BPF_TRACE_DEFN_x(1);
2129BPF_TRACE_DEFN_x(2);
2130BPF_TRACE_DEFN_x(3);
2131BPF_TRACE_DEFN_x(4);
2132BPF_TRACE_DEFN_x(5);
2133BPF_TRACE_DEFN_x(6);
2134BPF_TRACE_DEFN_x(7);
2135BPF_TRACE_DEFN_x(8);
2136BPF_TRACE_DEFN_x(9);
2137BPF_TRACE_DEFN_x(10);
2138BPF_TRACE_DEFN_x(11);
2139BPF_TRACE_DEFN_x(12);
2140
2141static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2142{
2143 struct tracepoint *tp = btp->tp;
2144
2145 /*
2146 * check that program doesn't access arguments beyond what's
2147 * available in this tracepoint
2148 */
2149 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2150 return -EINVAL;
2151
9df1c28b
MM
2152 if (prog->aux->max_tp_access > btp->writable_size)
2153 return -EINVAL;
2154
c4f6699d
AS
2155 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
2156}
2157
2158int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2159{
e16ec340 2160 return __bpf_probe_register(btp, prog);
c4f6699d
AS
2161}
2162
2163int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2164{
e16ec340 2165 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 2166}
41bdc4b4
YS
2167
2168int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2169 u32 *fd_type, const char **buf,
2170 u64 *probe_offset, u64 *probe_addr)
2171{
2172 bool is_tracepoint, is_syscall_tp;
2173 struct bpf_prog *prog;
2174 int flags, err = 0;
2175
2176 prog = event->prog;
2177 if (!prog)
2178 return -ENOENT;
2179
2180 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2181 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2182 return -EOPNOTSUPP;
2183
2184 *prog_id = prog->aux->id;
2185 flags = event->tp_event->flags;
2186 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2187 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2188
2189 if (is_tracepoint || is_syscall_tp) {
2190 *buf = is_tracepoint ? event->tp_event->tp->name
2191 : event->tp_event->name;
2192 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2193 *probe_offset = 0x0;
2194 *probe_addr = 0x0;
2195 } else {
2196 /* kprobe/uprobe */
2197 err = -EOPNOTSUPP;
2198#ifdef CONFIG_KPROBE_EVENTS
2199 if (flags & TRACE_EVENT_FL_KPROBE)
2200 err = bpf_get_kprobe_info(event, fd_type, buf,
2201 probe_offset, probe_addr,
2202 event->attr.type == PERF_TYPE_TRACEPOINT);
2203#endif
2204#ifdef CONFIG_UPROBE_EVENTS
2205 if (flags & TRACE_EVENT_FL_UPROBE)
2206 err = bpf_get_uprobe_info(event, fd_type, buf,
2207 probe_offset,
2208 event->attr.type == PERF_TYPE_TRACEPOINT);
2209#endif
2210 }
2211
2212 return err;
2213}
a38d1107 2214
9db1ff0a
YS
2215static int __init send_signal_irq_work_init(void)
2216{
2217 int cpu;
2218 struct send_signal_irq_work *work;
2219
2220 for_each_possible_cpu(cpu) {
2221 work = per_cpu_ptr(&send_signal_work, cpu);
2222 init_irq_work(&work->irq_work, do_bpf_send_signal);
2223 }
2224 return 0;
2225}
2226
2227subsys_initcall(send_signal_irq_work_init);
2228
a38d1107 2229#ifdef CONFIG_MODULES
390e99cf
SF
2230static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2231 void *module)
a38d1107
MM
2232{
2233 struct bpf_trace_module *btm, *tmp;
2234 struct module *mod = module;
0340a6b7 2235 int ret = 0;
a38d1107
MM
2236
2237 if (mod->num_bpf_raw_events == 0 ||
2238 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
0340a6b7 2239 goto out;
a38d1107
MM
2240
2241 mutex_lock(&bpf_module_mutex);
2242
2243 switch (op) {
2244 case MODULE_STATE_COMING:
2245 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2246 if (btm) {
2247 btm->module = module;
2248 list_add(&btm->list, &bpf_trace_modules);
0340a6b7
PZ
2249 } else {
2250 ret = -ENOMEM;
a38d1107
MM
2251 }
2252 break;
2253 case MODULE_STATE_GOING:
2254 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2255 if (btm->module == module) {
2256 list_del(&btm->list);
2257 kfree(btm);
2258 break;
2259 }
2260 }
2261 break;
2262 }
2263
2264 mutex_unlock(&bpf_module_mutex);
2265
0340a6b7
PZ
2266out:
2267 return notifier_from_errno(ret);
a38d1107
MM
2268}
2269
2270static struct notifier_block bpf_module_nb = {
2271 .notifier_call = bpf_event_notify,
2272};
2273
390e99cf 2274static int __init bpf_event_init(void)
a38d1107
MM
2275{
2276 register_module_notifier(&bpf_module_nb);
2277 return 0;
2278}
2279
2280fs_initcall(bpf_event_init);
2281#endif /* CONFIG_MODULES */