]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/bpf_trace.c
libbpf: Fix VERSIONED_SYM_COUNT number parsing
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
c4d0bfb4 10#include <linux/btf.h>
2541517c
AS
11#include <linux/filter.h>
12#include <linux/uaccess.h>
9c959c86 13#include <linux/ctype.h>
9802d865 14#include <linux/kprobes.h>
ac5a72ea 15#include <linux/spinlock.h>
41bdc4b4 16#include <linux/syscalls.h>
540adea3 17#include <linux/error-injection.h>
c9a0f3b8 18#include <linux/btf_ids.h>
9802d865 19
c4d0bfb4
AM
20#include <uapi/linux/bpf.h>
21#include <uapi/linux/btf.h>
22
c7b6f29b
NA
23#include <asm/tlb.h>
24
9802d865 25#include "trace_probe.h"
2541517c
AS
26#include "trace.h"
27
ac5a72ea
AM
28#define CREATE_TRACE_POINTS
29#include "bpf_trace.h"
30
e672db03
SF
31#define bpf_event_rcu_dereference(p) \
32 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
33
a38d1107
MM
34#ifdef CONFIG_MODULES
35struct bpf_trace_module {
36 struct module *module;
37 struct list_head list;
38};
39
40static LIST_HEAD(bpf_trace_modules);
41static DEFINE_MUTEX(bpf_module_mutex);
42
43static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
44{
45 struct bpf_raw_event_map *btp, *ret = NULL;
46 struct bpf_trace_module *btm;
47 unsigned int i;
48
49 mutex_lock(&bpf_module_mutex);
50 list_for_each_entry(btm, &bpf_trace_modules, list) {
51 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
52 btp = &btm->module->bpf_raw_events[i];
53 if (!strcmp(btp->tp->name, name)) {
54 if (try_module_get(btm->module))
55 ret = btp;
56 goto out;
57 }
58 }
59 }
60out:
61 mutex_unlock(&bpf_module_mutex);
62 return ret;
63}
64#else
65static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
66{
67 return NULL;
68}
69#endif /* CONFIG_MODULES */
70
035226b9 71u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 72u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 73
eb411377
AM
74static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
75 u64 flags, const struct btf **btf,
76 s32 *btf_id);
77
2541517c
AS
78/**
79 * trace_call_bpf - invoke BPF program
e87c6bc3 80 * @call: tracepoint event
2541517c
AS
81 * @ctx: opaque context pointer
82 *
83 * kprobe handlers execute BPF programs via this helper.
84 * Can be used from static tracepoints in the future.
85 *
86 * Return: BPF programs always return an integer which is interpreted by
87 * kprobe handler as:
88 * 0 - return from kprobe (event is filtered out)
89 * 1 - store kprobe event into ring buffer
90 * Other values are reserved and currently alias to 1
91 */
e87c6bc3 92unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
93{
94 unsigned int ret;
95
96 if (in_nmi()) /* not supported yet */
97 return 1;
98
b0a81b94 99 cant_sleep();
2541517c
AS
100
101 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102 /*
103 * since some bpf program is already running on this cpu,
104 * don't call into another bpf program (same or different)
105 * and don't send kprobe event into ring-buffer,
106 * so return zero here
107 */
108 ret = 0;
109 goto out;
110 }
111
e87c6bc3
YS
112 /*
113 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114 * to all call sites, we did a bpf_prog_array_valid() there to check
115 * whether call->prog_array is empty or not, which is
116 * a heurisitc to speed up execution.
117 *
118 * If bpf_prog_array_valid() fetched prog_array was
119 * non-NULL, we go into trace_call_bpf() and do the actual
120 * proper rcu_dereference() under RCU lock.
121 * If it turns out that prog_array is NULL then, we bail out.
122 * For the opposite, if the bpf_prog_array_valid() fetched pointer
123 * was NULL, you'll skip the prog_array with the risk of missing
124 * out of events when it was updated in between this and the
125 * rcu_dereference() which is accepted risk.
126 */
127 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
128
129 out:
130 __this_cpu_dec(bpf_prog_active);
2541517c
AS
131
132 return ret;
133}
2541517c 134
9802d865
JB
135#ifdef CONFIG_BPF_KPROBE_OVERRIDE
136BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137{
9802d865 138 regs_set_return_value(regs, rc);
540adea3 139 override_function_with_return(regs);
9802d865
JB
140 return 0;
141}
142
143static const struct bpf_func_proto bpf_override_return_proto = {
144 .func = bpf_override_return,
145 .gpl_only = true,
146 .ret_type = RET_INTEGER,
147 .arg1_type = ARG_PTR_TO_CTX,
148 .arg2_type = ARG_ANYTHING,
149};
150#endif
151
8d92db5c
CH
152static __always_inline int
153bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 154{
8d92db5c 155 int ret;
2541517c 156
c0ee37e8 157 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
158 if (unlikely(ret < 0))
159 memset(dst, 0, size);
6ae08ae3
DB
160 return ret;
161}
162
8d92db5c
CH
163BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 const void __user *, unsafe_ptr)
165{
166 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
167}
168
f470378c 169const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
170 .func = bpf_probe_read_user,
171 .gpl_only = true,
172 .ret_type = RET_INTEGER,
173 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
174 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
175 .arg3_type = ARG_ANYTHING,
176};
177
8d92db5c
CH
178static __always_inline int
179bpf_probe_read_user_str_common(void *dst, u32 size,
180 const void __user *unsafe_ptr)
6ae08ae3 181{
8d92db5c 182 int ret;
6ae08ae3 183
8d92db5c 184 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
185 if (unlikely(ret < 0))
186 memset(dst, 0, size);
6ae08ae3
DB
187 return ret;
188}
189
8d92db5c
CH
190BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
191 const void __user *, unsafe_ptr)
192{
193 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
194}
195
f470378c 196const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
197 .func = bpf_probe_read_user_str,
198 .gpl_only = true,
199 .ret_type = RET_INTEGER,
200 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
201 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
202 .arg3_type = ARG_ANYTHING,
203};
204
205static __always_inline int
8d92db5c 206bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
207{
208 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 209
6ae08ae3 210 if (unlikely(ret < 0))
8d92db5c 211 goto fail;
fe557319 212 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 213 if (unlikely(ret < 0))
8d92db5c
CH
214 goto fail;
215 return ret;
216fail:
217 memset(dst, 0, size);
6ae08ae3
DB
218 return ret;
219}
074f528e 220
6ae08ae3
DB
221BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
222 const void *, unsafe_ptr)
223{
8d92db5c 224 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
225}
226
f470378c 227const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
228 .func = bpf_probe_read_kernel,
229 .gpl_only = true,
230 .ret_type = RET_INTEGER,
231 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
232 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
233 .arg3_type = ARG_ANYTHING,
234};
235
6ae08ae3 236static __always_inline int
8d92db5c 237bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
238{
239 int ret = security_locked_down(LOCKDOWN_BPF_READ);
240
241 if (unlikely(ret < 0))
8d92db5c
CH
242 goto fail;
243
6ae08ae3 244 /*
8d92db5c
CH
245 * The strncpy_from_kernel_nofault() call will likely not fill the
246 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
247 * arbitrary memory anyway similar to bpf_probe_read_*() and might
248 * as well probe the stack. Thus, memory is explicitly cleared
249 * only in error case, so that improper users ignoring return
250 * code altogether don't copy garbage; otherwise length of string
251 * is returned that can be used for bpf_perf_event_output() et al.
252 */
8d92db5c 253 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 254 if (unlikely(ret < 0))
8d92db5c
CH
255 goto fail;
256
02553b91 257 return ret;
8d92db5c
CH
258fail:
259 memset(dst, 0, size);
074f528e 260 return ret;
2541517c
AS
261}
262
6ae08ae3
DB
263BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
264 const void *, unsafe_ptr)
265{
8d92db5c 266 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
267}
268
f470378c 269const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
270 .func = bpf_probe_read_kernel_str,
271 .gpl_only = true,
272 .ret_type = RET_INTEGER,
273 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
274 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
275 .arg3_type = ARG_ANYTHING,
276};
277
8d92db5c
CH
278#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
279BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
280 const void *, unsafe_ptr)
281{
282 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
283 return bpf_probe_read_user_common(dst, size,
284 (__force void __user *)unsafe_ptr);
285 }
286 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
287}
288
289static const struct bpf_func_proto bpf_probe_read_compat_proto = {
290 .func = bpf_probe_read_compat,
291 .gpl_only = true,
292 .ret_type = RET_INTEGER,
293 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
294 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
295 .arg3_type = ARG_ANYTHING,
296};
297
6ae08ae3
DB
298BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
299 const void *, unsafe_ptr)
300{
8d92db5c
CH
301 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
302 return bpf_probe_read_user_str_common(dst, size,
303 (__force void __user *)unsafe_ptr);
304 }
305 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
306}
307
308static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
309 .func = bpf_probe_read_compat_str,
2541517c
AS
310 .gpl_only = true,
311 .ret_type = RET_INTEGER,
39f19ebb 312 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 313 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
314 .arg3_type = ARG_ANYTHING,
315};
8d92db5c 316#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 317
eb1b6688 318BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 319 u32, size)
96ae5227 320{
96ae5227
SD
321 /*
322 * Ensure we're in user context which is safe for the helper to
323 * run. This helper has no business in a kthread.
324 *
325 * access_ok() should prevent writing to non-user memory, but in
326 * some situations (nommu, temporary switch, etc) access_ok() does
327 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
328 *
329 * nmi_uaccess_okay() ensures the probe is not run in an interim
330 * state, when the task or mm are switched. This is specifically
331 * required to prevent the use of temporary mm.
96ae5227
SD
332 */
333
334 if (unlikely(in_interrupt() ||
335 current->flags & (PF_KTHREAD | PF_EXITING)))
336 return -EPERM;
db68ce10 337 if (unlikely(uaccess_kernel()))
96ae5227 338 return -EPERM;
c7b6f29b
NA
339 if (unlikely(!nmi_uaccess_okay()))
340 return -EPERM;
96ae5227 341
c0ee37e8 342 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
343}
344
345static const struct bpf_func_proto bpf_probe_write_user_proto = {
346 .func = bpf_probe_write_user,
347 .gpl_only = true,
348 .ret_type = RET_INTEGER,
349 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
350 .arg2_type = ARG_PTR_TO_MEM,
351 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
352};
353
354static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
355{
2c78ee89
AS
356 if (!capable(CAP_SYS_ADMIN))
357 return NULL;
358
96ae5227
SD
359 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
360 current->comm, task_pid_nr(current));
361
362 return &bpf_probe_write_user_proto;
363}
364
d7b2977b
CH
365static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
366 size_t bufsz)
367{
368 void __user *user_ptr = (__force void __user *)unsafe_ptr;
369
370 buf[0] = 0;
371
372 switch (fmt_ptype) {
373 case 's':
374#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
aec6ce59
CH
375 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
376 strncpy_from_user_nofault(buf, user_ptr, bufsz);
377 break;
378 }
379 fallthrough;
d7b2977b
CH
380#endif
381 case 'k':
382 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
383 break;
384 case 'u':
385 strncpy_from_user_nofault(buf, user_ptr, bufsz);
386 break;
387 }
388}
389
ac5a72ea
AM
390static DEFINE_RAW_SPINLOCK(trace_printk_lock);
391
392#define BPF_TRACE_PRINTK_SIZE 1024
393
0d360d64 394static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
ac5a72ea
AM
395{
396 static char buf[BPF_TRACE_PRINTK_SIZE];
397 unsigned long flags;
398 va_list ap;
399 int ret;
400
401 raw_spin_lock_irqsave(&trace_printk_lock, flags);
402 va_start(ap, fmt);
403 ret = vsnprintf(buf, sizeof(buf), fmt, ap);
404 va_end(ap);
405 /* vsnprintf() will not append null for zero-length strings */
406 if (ret == 0)
407 buf[0] = '\0';
408 trace_bpf_trace_printk(buf);
409 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
410
411 return ret;
412}
413
9c959c86 414/*
7bda4b40 415 * Only limited trace_printk() conversion specifiers allowed:
2df6bb54 416 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
9c959c86 417 */
f3694e00
DB
418BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
419 u64, arg2, u64, arg3)
9c959c86 420{
b2a5212f
DB
421 int i, mod[3] = {}, fmt_cnt = 0;
422 char buf[64], fmt_ptype;
423 void *unsafe_ptr = NULL;
8d3b7dce 424 bool str_seen = false;
9c959c86
AS
425
426 /*
427 * bpf_check()->check_func_arg()->check_stack_boundary()
428 * guarantees that fmt points to bpf program stack,
429 * fmt_size bytes of it were initialized and fmt_size > 0
430 */
431 if (fmt[--fmt_size] != 0)
432 return -EINVAL;
433
434 /* check format string for allowed specifiers */
435 for (i = 0; i < fmt_size; i++) {
436 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
437 return -EINVAL;
438
439 if (fmt[i] != '%')
440 continue;
441
442 if (fmt_cnt >= 3)
443 return -EINVAL;
444
445 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
446 i++;
447 if (fmt[i] == 'l') {
448 mod[fmt_cnt]++;
449 i++;
b2a5212f 450 } else if (fmt[i] == 'p') {
9c959c86 451 mod[fmt_cnt]++;
b2a5212f
DB
452 if ((fmt[i + 1] == 'k' ||
453 fmt[i + 1] == 'u') &&
454 fmt[i + 2] == 's') {
455 fmt_ptype = fmt[i + 1];
456 i += 2;
457 goto fmt_str;
458 }
459
2df6bb54
SL
460 if (fmt[i + 1] == 'B') {
461 i++;
462 goto fmt_next;
463 }
464
1efb6ee3
MP
465 /* disallow any further format extensions */
466 if (fmt[i + 1] != 0 &&
467 !isspace(fmt[i + 1]) &&
468 !ispunct(fmt[i + 1]))
9c959c86 469 return -EINVAL;
b2a5212f
DB
470
471 goto fmt_next;
472 } else if (fmt[i] == 's') {
473 mod[fmt_cnt]++;
474 fmt_ptype = fmt[i];
475fmt_str:
476 if (str_seen)
477 /* allow only one '%s' per fmt string */
478 return -EINVAL;
479 str_seen = true;
480
481 if (fmt[i + 1] != 0 &&
482 !isspace(fmt[i + 1]) &&
483 !ispunct(fmt[i + 1]))
484 return -EINVAL;
485
486 switch (fmt_cnt) {
487 case 0:
488 unsafe_ptr = (void *)(long)arg1;
489 arg1 = (long)buf;
490 break;
491 case 1:
492 unsafe_ptr = (void *)(long)arg2;
493 arg2 = (long)buf;
494 break;
495 case 2:
496 unsafe_ptr = (void *)(long)arg3;
497 arg3 = (long)buf;
498 break;
499 }
500
d7b2977b
CH
501 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
502 sizeof(buf));
b2a5212f 503 goto fmt_next;
9c959c86
AS
504 }
505
506 if (fmt[i] == 'l') {
507 mod[fmt_cnt]++;
508 i++;
509 }
510
7bda4b40
JF
511 if (fmt[i] != 'i' && fmt[i] != 'd' &&
512 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86 513 return -EINVAL;
b2a5212f 514fmt_next:
9c959c86
AS
515 fmt_cnt++;
516 }
517
88a5c690
DB
518/* Horrid workaround for getting va_list handling working with different
519 * argument type combinations generically for 32 and 64 bit archs.
520 */
521#define __BPF_TP_EMIT() __BPF_ARG3_TP()
522#define __BPF_TP(...) \
ac5a72ea 523 bpf_do_trace_printk(fmt, ##__VA_ARGS__)
88a5c690
DB
524
525#define __BPF_ARG1_TP(...) \
526 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
527 ? __BPF_TP(arg1, ##__VA_ARGS__) \
528 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
529 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
530 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
531
532#define __BPF_ARG2_TP(...) \
533 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
534 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
535 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
536 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
537 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
538
539#define __BPF_ARG3_TP(...) \
540 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
541 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
542 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
543 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
544 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
545
546 return __BPF_TP_EMIT();
9c959c86
AS
547}
548
549static const struct bpf_func_proto bpf_trace_printk_proto = {
550 .func = bpf_trace_printk,
551 .gpl_only = true,
552 .ret_type = RET_INTEGER,
39f19ebb
AS
553 .arg1_type = ARG_PTR_TO_MEM,
554 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
555};
556
0756ea3e
AS
557const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
558{
559 /*
ac5a72ea
AM
560 * This program might be calling bpf_trace_printk,
561 * so enable the associated bpf_trace/bpf_trace_printk event.
562 * Repeat this each time as it is possible a user has
563 * disabled bpf_trace_printk events. By loading a program
564 * calling bpf_trace_printk() however the user has expressed
565 * the intent to see such events.
0756ea3e 566 */
ac5a72ea
AM
567 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
568 pr_warn_ratelimited("could not enable bpf_trace_printk events");
0756ea3e
AS
569
570 return &bpf_trace_printk_proto;
571}
572
492e639f
YS
573#define MAX_SEQ_PRINTF_VARARGS 12
574#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
575#define MAX_SEQ_PRINTF_STR_LEN 128
576
577struct bpf_seq_printf_buf {
578 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
579};
580static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
581static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
582
583BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
584 const void *, data, u32, data_len)
585{
586 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
587 int i, buf_used, copy_size, num_args;
588 u64 params[MAX_SEQ_PRINTF_VARARGS];
589 struct bpf_seq_printf_buf *bufs;
590 const u64 *args = data;
591
592 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
593 if (WARN_ON_ONCE(buf_used > 1)) {
594 err = -EBUSY;
595 goto out;
596 }
597
598 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
599
600 /*
601 * bpf_check()->check_func_arg()->check_stack_boundary()
602 * guarantees that fmt points to bpf program stack,
603 * fmt_size bytes of it were initialized and fmt_size > 0
604 */
605 if (fmt[--fmt_size] != 0)
606 goto out;
607
608 if (data_len & 7)
609 goto out;
610
611 for (i = 0; i < fmt_size; i++) {
612 if (fmt[i] == '%') {
613 if (fmt[i + 1] == '%')
614 i++;
615 else if (!data || !data_len)
616 goto out;
617 }
618 }
619
620 num_args = data_len / 8;
621
622 /* check format string for allowed specifiers */
623 for (i = 0; i < fmt_size; i++) {
624 /* only printable ascii for now. */
625 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
626 err = -EINVAL;
627 goto out;
628 }
629
630 if (fmt[i] != '%')
631 continue;
632
633 if (fmt[i + 1] == '%') {
634 i++;
635 continue;
636 }
637
638 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
639 err = -E2BIG;
640 goto out;
641 }
642
643 if (fmt_cnt >= num_args) {
644 err = -EINVAL;
645 goto out;
646 }
647
648 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
649 i++;
650
651 /* skip optional "[0 +-][num]" width formating field */
652 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
653 fmt[i] == ' ')
654 i++;
655 if (fmt[i] >= '1' && fmt[i] <= '9') {
656 i++;
657 while (fmt[i] >= '0' && fmt[i] <= '9')
658 i++;
659 }
660
661 if (fmt[i] == 's') {
19c8d8ac
AM
662 void *unsafe_ptr;
663
492e639f
YS
664 /* try our best to copy */
665 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
666 err = -E2BIG;
667 goto out;
668 }
669
19c8d8ac
AM
670 unsafe_ptr = (void *)(long)args[fmt_cnt];
671 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
672 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
492e639f
YS
673 if (err < 0)
674 bufs->buf[memcpy_cnt][0] = '\0';
675 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
676
677 fmt_cnt++;
678 memcpy_cnt++;
679 continue;
680 }
681
682 if (fmt[i] == 'p') {
683 if (fmt[i + 1] == 0 ||
684 fmt[i + 1] == 'K' ||
2df6bb54
SL
685 fmt[i + 1] == 'x' ||
686 fmt[i + 1] == 'B') {
492e639f
YS
687 /* just kernel pointers */
688 params[fmt_cnt] = args[fmt_cnt];
689 fmt_cnt++;
690 continue;
691 }
692
693 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
694 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
695 err = -EINVAL;
696 goto out;
697 }
698 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
699 err = -EINVAL;
700 goto out;
701 }
702
703 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
704 err = -E2BIG;
705 goto out;
706 }
707
708
709 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
710
fe557319 711 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
492e639f
YS
712 (void *) (long) args[fmt_cnt],
713 copy_size);
714 if (err < 0)
715 memset(bufs->buf[memcpy_cnt], 0, copy_size);
716 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
717
718 i += 2;
719 fmt_cnt++;
720 memcpy_cnt++;
721 continue;
722 }
723
724 if (fmt[i] == 'l') {
725 i++;
726 if (fmt[i] == 'l')
727 i++;
728 }
729
730 if (fmt[i] != 'i' && fmt[i] != 'd' &&
c06b0229
YS
731 fmt[i] != 'u' && fmt[i] != 'x' &&
732 fmt[i] != 'X') {
492e639f
YS
733 err = -EINVAL;
734 goto out;
735 }
736
737 params[fmt_cnt] = args[fmt_cnt];
738 fmt_cnt++;
739 }
740
741 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
742 * all of them to seq_printf().
743 */
744 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
745 params[4], params[5], params[6], params[7], params[8],
746 params[9], params[10], params[11]);
747
748 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
749out:
750 this_cpu_dec(bpf_seq_printf_buf_used);
751 return err;
752}
753
9436ef6e 754BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 755
492e639f
YS
756static const struct bpf_func_proto bpf_seq_printf_proto = {
757 .func = bpf_seq_printf,
758 .gpl_only = true,
759 .ret_type = RET_INTEGER,
760 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 761 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
762 .arg2_type = ARG_PTR_TO_MEM,
763 .arg3_type = ARG_CONST_SIZE,
764 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
765 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
766};
767
768BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
769{
770 return seq_write(m, data, len) ? -EOVERFLOW : 0;
771}
772
492e639f
YS
773static const struct bpf_func_proto bpf_seq_write_proto = {
774 .func = bpf_seq_write,
775 .gpl_only = true,
776 .ret_type = RET_INTEGER,
777 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 778 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
779 .arg2_type = ARG_PTR_TO_MEM,
780 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
781};
782
eb411377
AM
783BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
784 u32, btf_ptr_size, u64, flags)
785{
786 const struct btf *btf;
787 s32 btf_id;
788 int ret;
789
790 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
791 if (ret)
792 return ret;
793
794 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
795}
796
797static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
798 .func = bpf_seq_printf_btf,
799 .gpl_only = true,
800 .ret_type = RET_INTEGER,
801 .arg1_type = ARG_PTR_TO_BTF_ID,
802 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
803 .arg2_type = ARG_PTR_TO_MEM,
804 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
eb411377 805 .arg4_type = ARG_ANYTHING,
492e639f
YS
806};
807
908432ca
YS
808static __always_inline int
809get_map_perf_counter(struct bpf_map *map, u64 flags,
810 u64 *value, u64 *enabled, u64 *running)
35578d79 811{
35578d79 812 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
813 unsigned int cpu = smp_processor_id();
814 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 815 struct bpf_event_entry *ee;
35578d79 816
6816a7ff
DB
817 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
818 return -EINVAL;
819 if (index == BPF_F_CURRENT_CPU)
820 index = cpu;
35578d79
KX
821 if (unlikely(index >= array->map.max_entries))
822 return -E2BIG;
823
3b1efb19 824 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 825 if (!ee)
35578d79
KX
826 return -ENOENT;
827
908432ca
YS
828 return perf_event_read_local(ee->event, value, enabled, running);
829}
830
831BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
832{
833 u64 value = 0;
834 int err;
835
836 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 837 /*
f91840a3
AS
838 * this api is ugly since we miss [-22..-2] range of valid
839 * counter values, but that's uapi
35578d79 840 */
f91840a3
AS
841 if (err)
842 return err;
843 return value;
35578d79
KX
844}
845
62544ce8 846static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 847 .func = bpf_perf_event_read,
1075ef59 848 .gpl_only = true,
35578d79
KX
849 .ret_type = RET_INTEGER,
850 .arg1_type = ARG_CONST_MAP_PTR,
851 .arg2_type = ARG_ANYTHING,
852};
853
908432ca
YS
854BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
855 struct bpf_perf_event_value *, buf, u32, size)
856{
857 int err = -EINVAL;
858
859 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
860 goto clear;
861 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
862 &buf->running);
863 if (unlikely(err))
864 goto clear;
865 return 0;
866clear:
867 memset(buf, 0, size);
868 return err;
869}
870
871static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
872 .func = bpf_perf_event_read_value,
873 .gpl_only = true,
874 .ret_type = RET_INTEGER,
875 .arg1_type = ARG_CONST_MAP_PTR,
876 .arg2_type = ARG_ANYTHING,
877 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
878 .arg4_type = ARG_CONST_SIZE,
879};
880
8e7a3920
DB
881static __always_inline u64
882__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 883 u64 flags, struct perf_sample_data *sd)
a43eec30 884{
a43eec30 885 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 886 unsigned int cpu = smp_processor_id();
1e33759c 887 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 888 struct bpf_event_entry *ee;
a43eec30 889 struct perf_event *event;
a43eec30 890
1e33759c 891 if (index == BPF_F_CURRENT_CPU)
d7931330 892 index = cpu;
a43eec30
AS
893 if (unlikely(index >= array->map.max_entries))
894 return -E2BIG;
895
3b1efb19 896 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 897 if (!ee)
a43eec30
AS
898 return -ENOENT;
899
3b1efb19 900 event = ee->event;
a43eec30
AS
901 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
902 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
903 return -EINVAL;
904
d7931330 905 if (unlikely(event->oncpu != cpu))
a43eec30
AS
906 return -EOPNOTSUPP;
907
56201969 908 return perf_event_output(event, sd, regs);
a43eec30
AS
909}
910
9594dc3c
MM
911/*
912 * Support executing tracepoints in normal, irq, and nmi context that each call
913 * bpf_perf_event_output
914 */
915struct bpf_trace_sample_data {
916 struct perf_sample_data sds[3];
917};
918
919static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
920static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
921BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
922 u64, flags, void *, data, u64, size)
8e7a3920 923{
9594dc3c
MM
924 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
925 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
926 struct perf_raw_record raw = {
927 .frag = {
928 .size = size,
929 .data = data,
930 },
931 };
9594dc3c
MM
932 struct perf_sample_data *sd;
933 int err;
8e7a3920 934
9594dc3c
MM
935 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
936 err = -EBUSY;
937 goto out;
938 }
939
940 sd = &sds->sds[nest_level - 1];
941
942 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
943 err = -EINVAL;
944 goto out;
945 }
8e7a3920 946
283ca526
DB
947 perf_sample_data_init(sd, 0, 0);
948 sd->raw = &raw;
949
9594dc3c
MM
950 err = __bpf_perf_event_output(regs, map, flags, sd);
951
952out:
953 this_cpu_dec(bpf_trace_nest_level);
954 return err;
8e7a3920
DB
955}
956
a43eec30
AS
957static const struct bpf_func_proto bpf_perf_event_output_proto = {
958 .func = bpf_perf_event_output,
1075ef59 959 .gpl_only = true,
a43eec30
AS
960 .ret_type = RET_INTEGER,
961 .arg1_type = ARG_PTR_TO_CTX,
962 .arg2_type = ARG_CONST_MAP_PTR,
963 .arg3_type = ARG_ANYTHING,
39f19ebb 964 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 965 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
966};
967
768fb61f
AZ
968static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
969struct bpf_nested_pt_regs {
970 struct pt_regs regs[3];
971};
972static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
973static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 974
555c8a86
DB
975u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
976 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 977{
768fb61f 978 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
979 struct perf_raw_frag frag = {
980 .copy = ctx_copy,
981 .size = ctx_size,
982 .data = ctx,
983 };
984 struct perf_raw_record raw = {
985 .frag = {
183fc153
AM
986 {
987 .next = ctx_size ? &frag : NULL,
988 },
555c8a86
DB
989 .size = meta_size,
990 .data = meta,
991 },
992 };
768fb61f
AZ
993 struct perf_sample_data *sd;
994 struct pt_regs *regs;
995 u64 ret;
996
997 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
998 ret = -EBUSY;
999 goto out;
1000 }
1001 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
1002 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
1003
1004 perf_fetch_caller_regs(regs);
283ca526
DB
1005 perf_sample_data_init(sd, 0, 0);
1006 sd->raw = &raw;
bd570ff9 1007
768fb61f
AZ
1008 ret = __bpf_perf_event_output(regs, map, flags, sd);
1009out:
1010 this_cpu_dec(bpf_event_output_nest_level);
1011 return ret;
bd570ff9
DB
1012}
1013
f3694e00 1014BPF_CALL_0(bpf_get_current_task)
606274c5
AS
1015{
1016 return (long) current;
1017}
1018
f470378c 1019const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
1020 .func = bpf_get_current_task,
1021 .gpl_only = true,
1022 .ret_type = RET_INTEGER,
1023};
1024
f3694e00 1025BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 1026{
60d20f91
SD
1027 struct bpf_array *array = container_of(map, struct bpf_array, map);
1028 struct cgroup *cgrp;
60d20f91 1029
60d20f91
SD
1030 if (unlikely(idx >= array->map.max_entries))
1031 return -E2BIG;
1032
1033 cgrp = READ_ONCE(array->ptrs[idx]);
1034 if (unlikely(!cgrp))
1035 return -EAGAIN;
1036
1037 return task_under_cgroup_hierarchy(current, cgrp);
1038}
1039
1040static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1041 .func = bpf_current_task_under_cgroup,
1042 .gpl_only = false,
1043 .ret_type = RET_INTEGER,
1044 .arg1_type = ARG_CONST_MAP_PTR,
1045 .arg2_type = ARG_ANYTHING,
1046};
1047
8b401f9e
YS
1048struct send_signal_irq_work {
1049 struct irq_work irq_work;
1050 struct task_struct *task;
1051 u32 sig;
8482941f 1052 enum pid_type type;
8b401f9e
YS
1053};
1054
1055static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1056
1057static void do_bpf_send_signal(struct irq_work *entry)
1058{
1059 struct send_signal_irq_work *work;
1060
1061 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 1062 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
1063}
1064
8482941f 1065static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
1066{
1067 struct send_signal_irq_work *work = NULL;
1068
1069 /* Similar to bpf_probe_write_user, task needs to be
1070 * in a sound condition and kernel memory access be
1071 * permitted in order to send signal to the current
1072 * task.
1073 */
1074 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1075 return -EPERM;
1076 if (unlikely(uaccess_kernel()))
1077 return -EPERM;
1078 if (unlikely(!nmi_uaccess_okay()))
1079 return -EPERM;
1080
1bc7896e 1081 if (irqs_disabled()) {
e1afb702
YS
1082 /* Do an early check on signal validity. Otherwise,
1083 * the error is lost in deferred irq_work.
1084 */
1085 if (unlikely(!valid_signal(sig)))
1086 return -EINVAL;
1087
8b401f9e 1088 work = this_cpu_ptr(&send_signal_work);
153bedba 1089 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
8b401f9e
YS
1090 return -EBUSY;
1091
1092 /* Add the current task, which is the target of sending signal,
1093 * to the irq_work. The current task may change when queued
1094 * irq works get executed.
1095 */
1096 work->task = current;
1097 work->sig = sig;
8482941f 1098 work->type = type;
8b401f9e
YS
1099 irq_work_queue(&work->irq_work);
1100 return 0;
1101 }
1102
8482941f
YS
1103 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1104}
1105
1106BPF_CALL_1(bpf_send_signal, u32, sig)
1107{
1108 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
1109}
1110
1111static const struct bpf_func_proto bpf_send_signal_proto = {
1112 .func = bpf_send_signal,
1113 .gpl_only = false,
1114 .ret_type = RET_INTEGER,
1115 .arg1_type = ARG_ANYTHING,
1116};
1117
8482941f
YS
1118BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1119{
1120 return bpf_send_signal_common(sig, PIDTYPE_PID);
1121}
1122
1123static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1124 .func = bpf_send_signal_thread,
1125 .gpl_only = false,
1126 .ret_type = RET_INTEGER,
1127 .arg1_type = ARG_ANYTHING,
1128};
1129
6e22ab9d
JO
1130BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
1131{
1132 long len;
1133 char *p;
1134
1135 if (!sz)
1136 return 0;
1137
1138 p = d_path(path, buf, sz);
1139 if (IS_ERR(p)) {
1140 len = PTR_ERR(p);
1141 } else {
1142 len = buf + sz - p;
1143 memmove(buf, p, len);
1144 }
1145
1146 return len;
1147}
1148
1149BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
1150#ifdef CONFIG_SECURITY
1151BTF_ID(func, security_file_permission)
1152BTF_ID(func, security_inode_getattr)
1153BTF_ID(func, security_file_open)
1154#endif
1155#ifdef CONFIG_SECURITY_PATH
1156BTF_ID(func, security_path_truncate)
1157#endif
6e22ab9d
JO
1158BTF_ID(func, vfs_truncate)
1159BTF_ID(func, vfs_fallocate)
1160BTF_ID(func, dentry_open)
1161BTF_ID(func, vfs_getattr)
1162BTF_ID(func, filp_close)
1163BTF_SET_END(btf_allowlist_d_path)
1164
1165static bool bpf_d_path_allowed(const struct bpf_prog *prog)
1166{
1167 return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
1168}
1169
9436ef6e 1170BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
1171
1172static const struct bpf_func_proto bpf_d_path_proto = {
1173 .func = bpf_d_path,
1174 .gpl_only = false,
1175 .ret_type = RET_INTEGER,
1176 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 1177 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
1178 .arg2_type = ARG_PTR_TO_MEM,
1179 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
1180 .allowed = bpf_d_path_allowed,
1181};
1182
c4d0bfb4
AM
1183#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
1184 BTF_F_PTR_RAW | BTF_F_ZERO)
1185
1186static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
1187 u64 flags, const struct btf **btf,
1188 s32 *btf_id)
1189{
1190 const struct btf_type *t;
1191
1192 if (unlikely(flags & ~(BTF_F_ALL)))
1193 return -EINVAL;
1194
1195 if (btf_ptr_size != sizeof(struct btf_ptr))
1196 return -EINVAL;
1197
1198 *btf = bpf_get_btf_vmlinux();
1199
1200 if (IS_ERR_OR_NULL(*btf))
abbaa433 1201 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
c4d0bfb4
AM
1202
1203 if (ptr->type_id > 0)
1204 *btf_id = ptr->type_id;
1205 else
1206 return -EINVAL;
1207
1208 if (*btf_id > 0)
1209 t = btf_type_by_id(*btf, *btf_id);
1210 if (*btf_id <= 0 || !t)
1211 return -ENOENT;
1212
1213 return 0;
1214}
1215
1216BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1217 u32, btf_ptr_size, u64, flags)
1218{
1219 const struct btf *btf;
1220 s32 btf_id;
1221 int ret;
1222
1223 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1224 if (ret)
1225 return ret;
1226
1227 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1228 flags);
1229}
1230
1231const struct bpf_func_proto bpf_snprintf_btf_proto = {
1232 .func = bpf_snprintf_btf,
1233 .gpl_only = false,
1234 .ret_type = RET_INTEGER,
1235 .arg1_type = ARG_PTR_TO_MEM,
1236 .arg2_type = ARG_CONST_SIZE,
1237 .arg3_type = ARG_PTR_TO_MEM,
1238 .arg4_type = ARG_CONST_SIZE,
1239 .arg5_type = ARG_ANYTHING,
1240};
1241
fc611f47
KS
1242const struct bpf_func_proto *
1243bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1244{
1245 switch (func_id) {
1246 case BPF_FUNC_map_lookup_elem:
1247 return &bpf_map_lookup_elem_proto;
1248 case BPF_FUNC_map_update_elem:
1249 return &bpf_map_update_elem_proto;
1250 case BPF_FUNC_map_delete_elem:
1251 return &bpf_map_delete_elem_proto;
02a8c817
AC
1252 case BPF_FUNC_map_push_elem:
1253 return &bpf_map_push_elem_proto;
1254 case BPF_FUNC_map_pop_elem:
1255 return &bpf_map_pop_elem_proto;
1256 case BPF_FUNC_map_peek_elem:
1257 return &bpf_map_peek_elem_proto;
d9847d31
AS
1258 case BPF_FUNC_ktime_get_ns:
1259 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1260 case BPF_FUNC_ktime_get_boot_ns:
1261 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
1262 case BPF_FUNC_tail_call:
1263 return &bpf_tail_call_proto;
ffeedafb
AS
1264 case BPF_FUNC_get_current_pid_tgid:
1265 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1266 case BPF_FUNC_get_current_task:
1267 return &bpf_get_current_task_proto;
ffeedafb
AS
1268 case BPF_FUNC_get_current_uid_gid:
1269 return &bpf_get_current_uid_gid_proto;
1270 case BPF_FUNC_get_current_comm:
1271 return &bpf_get_current_comm_proto;
9c959c86 1272 case BPF_FUNC_trace_printk:
0756ea3e 1273 return bpf_get_trace_printk_proto();
ab1973d3
AS
1274 case BPF_FUNC_get_smp_processor_id:
1275 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1276 case BPF_FUNC_get_numa_node_id:
1277 return &bpf_get_numa_node_id_proto;
35578d79
KX
1278 case BPF_FUNC_perf_event_read:
1279 return &bpf_perf_event_read_proto;
96ae5227
SD
1280 case BPF_FUNC_probe_write_user:
1281 return bpf_get_probe_write_proto();
60d20f91
SD
1282 case BPF_FUNC_current_task_under_cgroup:
1283 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1284 case BPF_FUNC_get_prandom_u32:
1285 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
1286 case BPF_FUNC_probe_read_user:
1287 return &bpf_probe_read_user_proto;
1288 case BPF_FUNC_probe_read_kernel:
1289 return &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1290 case BPF_FUNC_probe_read_user_str:
1291 return &bpf_probe_read_user_str_proto;
1292 case BPF_FUNC_probe_read_kernel_str:
1293 return &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1294#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1295 case BPF_FUNC_probe_read:
1296 return &bpf_probe_read_compat_proto;
a5e8c070 1297 case BPF_FUNC_probe_read_str:
6ae08ae3 1298 return &bpf_probe_read_compat_str_proto;
0ebeea8c 1299#endif
34ea38ca 1300#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1301 case BPF_FUNC_get_current_cgroup_id:
1302 return &bpf_get_current_cgroup_id_proto;
34ea38ca 1303#endif
8b401f9e
YS
1304 case BPF_FUNC_send_signal:
1305 return &bpf_send_signal_proto;
8482941f
YS
1306 case BPF_FUNC_send_signal_thread:
1307 return &bpf_send_signal_thread_proto;
b80b033b
SL
1308 case BPF_FUNC_perf_event_read_value:
1309 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1310 case BPF_FUNC_get_ns_current_pid_tgid:
1311 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1312 case BPF_FUNC_ringbuf_output:
1313 return &bpf_ringbuf_output_proto;
1314 case BPF_FUNC_ringbuf_reserve:
1315 return &bpf_ringbuf_reserve_proto;
1316 case BPF_FUNC_ringbuf_submit:
1317 return &bpf_ringbuf_submit_proto;
1318 case BPF_FUNC_ringbuf_discard:
1319 return &bpf_ringbuf_discard_proto;
1320 case BPF_FUNC_ringbuf_query:
1321 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1322 case BPF_FUNC_jiffies64:
1323 return &bpf_jiffies64_proto;
fa28dcb8
SL
1324 case BPF_FUNC_get_task_stack:
1325 return &bpf_get_task_stack_proto;
07be4c4a
AS
1326 case BPF_FUNC_copy_from_user:
1327 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
c4d0bfb4
AM
1328 case BPF_FUNC_snprintf_btf:
1329 return &bpf_snprintf_btf_proto;
eaa6bcb7
HL
1330 case BPF_FUNC_bpf_per_cpu_ptr:
1331 return &bpf_per_cpu_ptr_proto;
63d9b80d
HL
1332 case BPF_FUNC_bpf_this_cpu_ptr:
1333 return &bpf_this_cpu_ptr_proto;
9fd82b61
AS
1334 default:
1335 return NULL;
1336 }
1337}
1338
5e43f899
AI
1339static const struct bpf_func_proto *
1340kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1341{
1342 switch (func_id) {
a43eec30
AS
1343 case BPF_FUNC_perf_event_output:
1344 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1345 case BPF_FUNC_get_stackid:
1346 return &bpf_get_stackid_proto;
c195651e
YS
1347 case BPF_FUNC_get_stack:
1348 return &bpf_get_stack_proto;
9802d865
JB
1349#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1350 case BPF_FUNC_override_return:
1351 return &bpf_override_return_proto;
1352#endif
2541517c 1353 default:
fc611f47 1354 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1355 }
1356}
1357
1358/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1359static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1360 const struct bpf_prog *prog,
23994631 1361 struct bpf_insn_access_aux *info)
2541517c 1362{
2541517c
AS
1363 if (off < 0 || off >= sizeof(struct pt_regs))
1364 return false;
2541517c
AS
1365 if (type != BPF_READ)
1366 return false;
2541517c
AS
1367 if (off % size != 0)
1368 return false;
2d071c64
DB
1369 /*
1370 * Assertion for 32 bit to make sure last 8 byte access
1371 * (BPF_DW) to the last 4 byte member is disallowed.
1372 */
1373 if (off + size > sizeof(struct pt_regs))
1374 return false;
1375
2541517c
AS
1376 return true;
1377}
1378
7de16e3a 1379const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1380 .get_func_proto = kprobe_prog_func_proto,
1381 .is_valid_access = kprobe_prog_is_valid_access,
1382};
1383
7de16e3a
JK
1384const struct bpf_prog_ops kprobe_prog_ops = {
1385};
1386
f3694e00
DB
1387BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1388 u64, flags, void *, data, u64, size)
9940d67c 1389{
f3694e00
DB
1390 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1391
9940d67c
AS
1392 /*
1393 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1394 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1395 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1396 */
f3694e00 1397 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1398}
1399
1400static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1401 .func = bpf_perf_event_output_tp,
1402 .gpl_only = true,
1403 .ret_type = RET_INTEGER,
1404 .arg1_type = ARG_PTR_TO_CTX,
1405 .arg2_type = ARG_CONST_MAP_PTR,
1406 .arg3_type = ARG_ANYTHING,
39f19ebb 1407 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 1408 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1409};
1410
f3694e00
DB
1411BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1412 u64, flags)
9940d67c 1413{
f3694e00 1414 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1415
f3694e00
DB
1416 /*
1417 * Same comment as in bpf_perf_event_output_tp(), only that this time
1418 * the other helper's function body cannot be inlined due to being
1419 * external, thus we need to call raw helper function.
1420 */
1421 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1422 flags, 0, 0);
9940d67c
AS
1423}
1424
1425static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1426 .func = bpf_get_stackid_tp,
1427 .gpl_only = true,
1428 .ret_type = RET_INTEGER,
1429 .arg1_type = ARG_PTR_TO_CTX,
1430 .arg2_type = ARG_CONST_MAP_PTR,
1431 .arg3_type = ARG_ANYTHING,
1432};
1433
c195651e
YS
1434BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1435 u64, flags)
1436{
1437 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1438
1439 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1440 (unsigned long) size, flags, 0);
1441}
1442
1443static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1444 .func = bpf_get_stack_tp,
1445 .gpl_only = true,
1446 .ret_type = RET_INTEGER,
1447 .arg1_type = ARG_PTR_TO_CTX,
1448 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1449 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1450 .arg4_type = ARG_ANYTHING,
1451};
1452
5e43f899
AI
1453static const struct bpf_func_proto *
1454tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1455{
1456 switch (func_id) {
1457 case BPF_FUNC_perf_event_output:
1458 return &bpf_perf_event_output_proto_tp;
1459 case BPF_FUNC_get_stackid:
1460 return &bpf_get_stackid_proto_tp;
c195651e
YS
1461 case BPF_FUNC_get_stack:
1462 return &bpf_get_stack_proto_tp;
f005afed 1463 default:
fc611f47 1464 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1465 }
1466}
1467
1468static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1469 const struct bpf_prog *prog,
f005afed
YS
1470 struct bpf_insn_access_aux *info)
1471{
1472 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1473 return false;
1474 if (type != BPF_READ)
1475 return false;
1476 if (off % size != 0)
1477 return false;
1478
1479 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1480 return true;
1481}
1482
1483const struct bpf_verifier_ops tracepoint_verifier_ops = {
1484 .get_func_proto = tp_prog_func_proto,
1485 .is_valid_access = tp_prog_is_valid_access,
1486};
1487
1488const struct bpf_prog_ops tracepoint_prog_ops = {
1489};
1490
1491BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1492 struct bpf_perf_event_value *, buf, u32, size)
1493{
1494 int err = -EINVAL;
1495
1496 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1497 goto clear;
1498 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1499 &buf->running);
1500 if (unlikely(err))
1501 goto clear;
1502 return 0;
1503clear:
1504 memset(buf, 0, size);
1505 return err;
1506}
1507
f005afed
YS
1508static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1509 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1510 .gpl_only = true,
1511 .ret_type = RET_INTEGER,
1512 .arg1_type = ARG_PTR_TO_CTX,
1513 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1514 .arg3_type = ARG_CONST_SIZE,
1515};
1516
fff7b643
DX
1517BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1518 void *, buf, u32, size, u64, flags)
1519{
1520#ifndef CONFIG_X86
1521 return -ENOENT;
1522#else
1523 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1524 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1525 u32 to_copy;
1526
1527 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1528 return -EINVAL;
1529
1530 if (unlikely(!br_stack))
1531 return -EINVAL;
1532
1533 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1534 return br_stack->nr * br_entry_size;
1535
1536 if (!buf || (size % br_entry_size != 0))
1537 return -EINVAL;
1538
1539 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1540 memcpy(buf, br_stack->entries, to_copy);
1541
1542 return to_copy;
1543#endif
1544}
1545
1546static const struct bpf_func_proto bpf_read_branch_records_proto = {
1547 .func = bpf_read_branch_records,
1548 .gpl_only = true,
1549 .ret_type = RET_INTEGER,
1550 .arg1_type = ARG_PTR_TO_CTX,
1551 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1552 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1553 .arg4_type = ARG_ANYTHING,
1554};
1555
5e43f899
AI
1556static const struct bpf_func_proto *
1557pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1558{
1559 switch (func_id) {
1560 case BPF_FUNC_perf_event_output:
9940d67c 1561 return &bpf_perf_event_output_proto_tp;
9fd82b61 1562 case BPF_FUNC_get_stackid:
7b04d6d6 1563 return &bpf_get_stackid_proto_pe;
c195651e 1564 case BPF_FUNC_get_stack:
7b04d6d6 1565 return &bpf_get_stack_proto_pe;
4bebdc7a 1566 case BPF_FUNC_perf_prog_read_value:
f005afed 1567 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1568 case BPF_FUNC_read_branch_records:
1569 return &bpf_read_branch_records_proto;
9fd82b61 1570 default:
fc611f47 1571 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1572 }
1573}
1574
c4f6699d
AS
1575/*
1576 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1577 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1578 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1579 *
1580 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1581 * in normal, irq, and nmi context.
c4f6699d 1582 */
9594dc3c
MM
1583struct bpf_raw_tp_regs {
1584 struct pt_regs regs[3];
1585};
1586static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1587static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1588static struct pt_regs *get_bpf_raw_tp_regs(void)
1589{
1590 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1591 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1592
1593 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1594 this_cpu_dec(bpf_raw_tp_nest_level);
1595 return ERR_PTR(-EBUSY);
1596 }
1597
1598 return &tp_regs->regs[nest_level - 1];
1599}
1600
1601static void put_bpf_raw_tp_regs(void)
1602{
1603 this_cpu_dec(bpf_raw_tp_nest_level);
1604}
1605
c4f6699d
AS
1606BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1607 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1608{
9594dc3c
MM
1609 struct pt_regs *regs = get_bpf_raw_tp_regs();
1610 int ret;
1611
1612 if (IS_ERR(regs))
1613 return PTR_ERR(regs);
c4f6699d
AS
1614
1615 perf_fetch_caller_regs(regs);
9594dc3c
MM
1616 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1617
1618 put_bpf_raw_tp_regs();
1619 return ret;
c4f6699d
AS
1620}
1621
1622static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1623 .func = bpf_perf_event_output_raw_tp,
1624 .gpl_only = true,
1625 .ret_type = RET_INTEGER,
1626 .arg1_type = ARG_PTR_TO_CTX,
1627 .arg2_type = ARG_CONST_MAP_PTR,
1628 .arg3_type = ARG_ANYTHING,
1629 .arg4_type = ARG_PTR_TO_MEM,
1630 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1631};
1632
a7658e1a 1633extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1634extern const struct bpf_func_proto bpf_xdp_output_proto;
a7658e1a 1635
c4f6699d
AS
1636BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1637 struct bpf_map *, map, u64, flags)
1638{
9594dc3c
MM
1639 struct pt_regs *regs = get_bpf_raw_tp_regs();
1640 int ret;
1641
1642 if (IS_ERR(regs))
1643 return PTR_ERR(regs);
c4f6699d
AS
1644
1645 perf_fetch_caller_regs(regs);
1646 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1647 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1648 flags, 0, 0);
1649 put_bpf_raw_tp_regs();
1650 return ret;
c4f6699d
AS
1651}
1652
1653static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1654 .func = bpf_get_stackid_raw_tp,
1655 .gpl_only = true,
1656 .ret_type = RET_INTEGER,
1657 .arg1_type = ARG_PTR_TO_CTX,
1658 .arg2_type = ARG_CONST_MAP_PTR,
1659 .arg3_type = ARG_ANYTHING,
1660};
1661
c195651e
YS
1662BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1663 void *, buf, u32, size, u64, flags)
1664{
9594dc3c
MM
1665 struct pt_regs *regs = get_bpf_raw_tp_regs();
1666 int ret;
1667
1668 if (IS_ERR(regs))
1669 return PTR_ERR(regs);
c195651e
YS
1670
1671 perf_fetch_caller_regs(regs);
9594dc3c
MM
1672 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1673 (unsigned long) size, flags, 0);
1674 put_bpf_raw_tp_regs();
1675 return ret;
c195651e
YS
1676}
1677
1678static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1679 .func = bpf_get_stack_raw_tp,
1680 .gpl_only = true,
1681 .ret_type = RET_INTEGER,
1682 .arg1_type = ARG_PTR_TO_CTX,
1683 .arg2_type = ARG_PTR_TO_MEM,
1684 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1685 .arg4_type = ARG_ANYTHING,
1686};
1687
5e43f899
AI
1688static const struct bpf_func_proto *
1689raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1690{
1691 switch (func_id) {
1692 case BPF_FUNC_perf_event_output:
1693 return &bpf_perf_event_output_proto_raw_tp;
1694 case BPF_FUNC_get_stackid:
1695 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1696 case BPF_FUNC_get_stack:
1697 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1698 default:
fc611f47 1699 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1700 }
1701}
1702
958a3f2d 1703const struct bpf_func_proto *
f1b9509c
AS
1704tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1705{
1706 switch (func_id) {
1707#ifdef CONFIG_NET
1708 case BPF_FUNC_skb_output:
1709 return &bpf_skb_output_proto;
d831ee84
EC
1710 case BPF_FUNC_xdp_output:
1711 return &bpf_xdp_output_proto;
af7ec138
YS
1712 case BPF_FUNC_skc_to_tcp6_sock:
1713 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1714 case BPF_FUNC_skc_to_tcp_sock:
1715 return &bpf_skc_to_tcp_sock_proto;
1716 case BPF_FUNC_skc_to_tcp_timewait_sock:
1717 return &bpf_skc_to_tcp_timewait_sock_proto;
1718 case BPF_FUNC_skc_to_tcp_request_sock:
1719 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1720 case BPF_FUNC_skc_to_udp6_sock:
1721 return &bpf_skc_to_udp6_sock_proto;
f1b9509c 1722#endif
492e639f
YS
1723 case BPF_FUNC_seq_printf:
1724 return prog->expected_attach_type == BPF_TRACE_ITER ?
1725 &bpf_seq_printf_proto :
1726 NULL;
1727 case BPF_FUNC_seq_write:
1728 return prog->expected_attach_type == BPF_TRACE_ITER ?
1729 &bpf_seq_write_proto :
1730 NULL;
eb411377
AM
1731 case BPF_FUNC_seq_printf_btf:
1732 return prog->expected_attach_type == BPF_TRACE_ITER ?
1733 &bpf_seq_printf_btf_proto :
1734 NULL;
6e22ab9d
JO
1735 case BPF_FUNC_d_path:
1736 return &bpf_d_path_proto;
f1b9509c
AS
1737 default:
1738 return raw_tp_prog_func_proto(func_id, prog);
1739 }
1740}
1741
c4f6699d
AS
1742static bool raw_tp_prog_is_valid_access(int off, int size,
1743 enum bpf_access_type type,
5e43f899 1744 const struct bpf_prog *prog,
c4f6699d
AS
1745 struct bpf_insn_access_aux *info)
1746{
f1b9509c
AS
1747 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1748 return false;
1749 if (type != BPF_READ)
1750 return false;
1751 if (off % size != 0)
1752 return false;
1753 return true;
1754}
1755
1756static bool tracing_prog_is_valid_access(int off, int size,
1757 enum bpf_access_type type,
1758 const struct bpf_prog *prog,
1759 struct bpf_insn_access_aux *info)
1760{
1761 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1762 return false;
1763 if (type != BPF_READ)
1764 return false;
1765 if (off % size != 0)
1766 return false;
9e15db66 1767 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1768}
1769
3e7c67d9
KS
1770int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1771 const union bpf_attr *kattr,
1772 union bpf_attr __user *uattr)
1773{
1774 return -ENOTSUPP;
1775}
1776
c4f6699d
AS
1777const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1778 .get_func_proto = raw_tp_prog_func_proto,
1779 .is_valid_access = raw_tp_prog_is_valid_access,
1780};
1781
1782const struct bpf_prog_ops raw_tracepoint_prog_ops = {
ebfb4d40 1783#ifdef CONFIG_NET
1b4d60ec 1784 .test_run = bpf_prog_test_run_raw_tp,
ebfb4d40 1785#endif
c4f6699d
AS
1786};
1787
f1b9509c
AS
1788const struct bpf_verifier_ops tracing_verifier_ops = {
1789 .get_func_proto = tracing_prog_func_proto,
1790 .is_valid_access = tracing_prog_is_valid_access,
1791};
1792
1793const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1794 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1795};
1796
9df1c28b
MM
1797static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1798 enum bpf_access_type type,
1799 const struct bpf_prog *prog,
1800 struct bpf_insn_access_aux *info)
1801{
1802 if (off == 0) {
1803 if (size != sizeof(u64) || type != BPF_READ)
1804 return false;
1805 info->reg_type = PTR_TO_TP_BUFFER;
1806 }
1807 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1808}
1809
1810const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1811 .get_func_proto = raw_tp_prog_func_proto,
1812 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1813};
1814
1815const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1816};
1817
0515e599 1818static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1819 const struct bpf_prog *prog,
23994631 1820 struct bpf_insn_access_aux *info)
0515e599 1821{
95da0cdb 1822 const int size_u64 = sizeof(u64);
31fd8581 1823
0515e599
AS
1824 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1825 return false;
1826 if (type != BPF_READ)
1827 return false;
bc23105c
DB
1828 if (off % size != 0) {
1829 if (sizeof(unsigned long) != 4)
1830 return false;
1831 if (size != 8)
1832 return false;
1833 if (off % size != 4)
1834 return false;
1835 }
31fd8581 1836
f96da094
DB
1837 switch (off) {
1838 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1839 bpf_ctx_record_field_size(info, size_u64);
1840 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1841 return false;
1842 break;
1843 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1844 bpf_ctx_record_field_size(info, size_u64);
1845 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1846 return false;
f96da094
DB
1847 break;
1848 default:
0515e599
AS
1849 if (size != sizeof(long))
1850 return false;
1851 }
f96da094 1852
0515e599
AS
1853 return true;
1854}
1855
6b8cc1d1
DB
1856static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1857 const struct bpf_insn *si,
0515e599 1858 struct bpf_insn *insn_buf,
f96da094 1859 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1860{
1861 struct bpf_insn *insn = insn_buf;
1862
6b8cc1d1 1863 switch (si->off) {
0515e599 1864 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1865 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1866 data), si->dst_reg, si->src_reg,
0515e599 1867 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1868 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1869 bpf_target_off(struct perf_sample_data, period, 8,
1870 target_size));
0515e599 1871 break;
95da0cdb
TQ
1872 case offsetof(struct bpf_perf_event_data, addr):
1873 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1874 data), si->dst_reg, si->src_reg,
1875 offsetof(struct bpf_perf_event_data_kern, data));
1876 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1877 bpf_target_off(struct perf_sample_data, addr, 8,
1878 target_size));
1879 break;
0515e599 1880 default:
f035a515 1881 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1882 regs), si->dst_reg, si->src_reg,
0515e599 1883 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1884 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1885 si->off);
0515e599
AS
1886 break;
1887 }
1888
1889 return insn - insn_buf;
1890}
1891
7de16e3a 1892const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1893 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1894 .is_valid_access = pe_prog_is_valid_access,
1895 .convert_ctx_access = pe_prog_convert_ctx_access,
1896};
7de16e3a
JK
1897
1898const struct bpf_prog_ops perf_event_prog_ops = {
1899};
e87c6bc3
YS
1900
1901static DEFINE_MUTEX(bpf_event_mutex);
1902
c8c088ba
YS
1903#define BPF_TRACE_MAX_PROGS 64
1904
e87c6bc3
YS
1905int perf_event_attach_bpf_prog(struct perf_event *event,
1906 struct bpf_prog *prog)
1907{
e672db03 1908 struct bpf_prog_array *old_array;
e87c6bc3
YS
1909 struct bpf_prog_array *new_array;
1910 int ret = -EEXIST;
1911
9802d865 1912 /*
b4da3340
MH
1913 * Kprobe override only works if they are on the function entry,
1914 * and only if they are on the opt-in list.
9802d865
JB
1915 */
1916 if (prog->kprobe_override &&
b4da3340 1917 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1918 !trace_kprobe_error_injectable(event->tp_event)))
1919 return -EINVAL;
1920
e87c6bc3
YS
1921 mutex_lock(&bpf_event_mutex);
1922
1923 if (event->prog)
07c41a29 1924 goto unlock;
e87c6bc3 1925
e672db03 1926 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1927 if (old_array &&
1928 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1929 ret = -E2BIG;
1930 goto unlock;
1931 }
1932
e87c6bc3
YS
1933 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1934 if (ret < 0)
07c41a29 1935 goto unlock;
e87c6bc3
YS
1936
1937 /* set the new array to event->tp_event and set event->prog */
1938 event->prog = prog;
1939 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1940 bpf_prog_array_free(old_array);
1941
07c41a29 1942unlock:
e87c6bc3
YS
1943 mutex_unlock(&bpf_event_mutex);
1944 return ret;
1945}
1946
1947void perf_event_detach_bpf_prog(struct perf_event *event)
1948{
e672db03 1949 struct bpf_prog_array *old_array;
e87c6bc3
YS
1950 struct bpf_prog_array *new_array;
1951 int ret;
1952
1953 mutex_lock(&bpf_event_mutex);
1954
1955 if (!event->prog)
07c41a29 1956 goto unlock;
e87c6bc3 1957
e672db03 1958 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 1959 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
1960 if (ret == -ENOENT)
1961 goto unlock;
e87c6bc3
YS
1962 if (ret < 0) {
1963 bpf_prog_array_delete_safe(old_array, event->prog);
1964 } else {
1965 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1966 bpf_prog_array_free(old_array);
1967 }
1968
1969 bpf_prog_put(event->prog);
1970 event->prog = NULL;
1971
07c41a29 1972unlock:
e87c6bc3
YS
1973 mutex_unlock(&bpf_event_mutex);
1974}
f371b304 1975
f4e2298e 1976int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
1977{
1978 struct perf_event_query_bpf __user *uquery = info;
1979 struct perf_event_query_bpf query = {};
e672db03 1980 struct bpf_prog_array *progs;
3a38bb98 1981 u32 *ids, prog_cnt, ids_len;
f371b304
YS
1982 int ret;
1983
031258da 1984 if (!perfmon_capable())
f371b304
YS
1985 return -EPERM;
1986 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1987 return -EINVAL;
1988 if (copy_from_user(&query, uquery, sizeof(query)))
1989 return -EFAULT;
3a38bb98
YS
1990
1991 ids_len = query.ids_len;
1992 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 1993 return -E2BIG;
3a38bb98
YS
1994 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1995 if (!ids)
1996 return -ENOMEM;
1997 /*
1998 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1999 * is required when user only wants to check for uquery->prog_cnt.
2000 * There is no need to check for it since the case is handled
2001 * gracefully in bpf_prog_array_copy_info.
2002 */
f371b304
YS
2003
2004 mutex_lock(&bpf_event_mutex);
e672db03
SF
2005 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2006 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
2007 mutex_unlock(&bpf_event_mutex);
2008
3a38bb98
YS
2009 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2010 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2011 ret = -EFAULT;
2012
2013 kfree(ids);
f371b304
YS
2014 return ret;
2015}
c4f6699d
AS
2016
2017extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2018extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2019
a38d1107 2020struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
2021{
2022 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2023
2024 for (; btp < __stop__bpf_raw_tp; btp++) {
2025 if (!strcmp(btp->tp->name, name))
2026 return btp;
2027 }
a38d1107
MM
2028
2029 return bpf_get_raw_tracepoint_module(name);
2030}
2031
2032void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2033{
2034 struct module *mod = __module_address((unsigned long)btp);
2035
2036 if (mod)
2037 module_put(mod);
c4f6699d
AS
2038}
2039
2040static __always_inline
2041void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2042{
f03efe49 2043 cant_sleep();
c4f6699d 2044 rcu_read_lock();
c4f6699d 2045 (void) BPF_PROG_RUN(prog, args);
c4f6699d
AS
2046 rcu_read_unlock();
2047}
2048
2049#define UNPACK(...) __VA_ARGS__
2050#define REPEAT_1(FN, DL, X, ...) FN(X)
2051#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2052#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2053#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2054#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2055#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2056#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2057#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2058#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2059#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2060#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2061#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2062#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2063
2064#define SARG(X) u64 arg##X
2065#define COPY(X) args[X] = arg##X
2066
2067#define __DL_COM (,)
2068#define __DL_SEM (;)
2069
2070#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2071
2072#define BPF_TRACE_DEFN_x(x) \
2073 void bpf_trace_run##x(struct bpf_prog *prog, \
2074 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2075 { \
2076 u64 args[x]; \
2077 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2078 __bpf_trace_run(prog, args); \
2079 } \
2080 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2081BPF_TRACE_DEFN_x(1);
2082BPF_TRACE_DEFN_x(2);
2083BPF_TRACE_DEFN_x(3);
2084BPF_TRACE_DEFN_x(4);
2085BPF_TRACE_DEFN_x(5);
2086BPF_TRACE_DEFN_x(6);
2087BPF_TRACE_DEFN_x(7);
2088BPF_TRACE_DEFN_x(8);
2089BPF_TRACE_DEFN_x(9);
2090BPF_TRACE_DEFN_x(10);
2091BPF_TRACE_DEFN_x(11);
2092BPF_TRACE_DEFN_x(12);
2093
2094static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2095{
2096 struct tracepoint *tp = btp->tp;
2097
2098 /*
2099 * check that program doesn't access arguments beyond what's
2100 * available in this tracepoint
2101 */
2102 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2103 return -EINVAL;
2104
9df1c28b
MM
2105 if (prog->aux->max_tp_access > btp->writable_size)
2106 return -EINVAL;
2107
c4f6699d
AS
2108 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
2109}
2110
2111int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2112{
e16ec340 2113 return __bpf_probe_register(btp, prog);
c4f6699d
AS
2114}
2115
2116int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2117{
e16ec340 2118 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 2119}
41bdc4b4
YS
2120
2121int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2122 u32 *fd_type, const char **buf,
2123 u64 *probe_offset, u64 *probe_addr)
2124{
2125 bool is_tracepoint, is_syscall_tp;
2126 struct bpf_prog *prog;
2127 int flags, err = 0;
2128
2129 prog = event->prog;
2130 if (!prog)
2131 return -ENOENT;
2132
2133 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2134 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2135 return -EOPNOTSUPP;
2136
2137 *prog_id = prog->aux->id;
2138 flags = event->tp_event->flags;
2139 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2140 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2141
2142 if (is_tracepoint || is_syscall_tp) {
2143 *buf = is_tracepoint ? event->tp_event->tp->name
2144 : event->tp_event->name;
2145 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2146 *probe_offset = 0x0;
2147 *probe_addr = 0x0;
2148 } else {
2149 /* kprobe/uprobe */
2150 err = -EOPNOTSUPP;
2151#ifdef CONFIG_KPROBE_EVENTS
2152 if (flags & TRACE_EVENT_FL_KPROBE)
2153 err = bpf_get_kprobe_info(event, fd_type, buf,
2154 probe_offset, probe_addr,
2155 event->attr.type == PERF_TYPE_TRACEPOINT);
2156#endif
2157#ifdef CONFIG_UPROBE_EVENTS
2158 if (flags & TRACE_EVENT_FL_UPROBE)
2159 err = bpf_get_uprobe_info(event, fd_type, buf,
2160 probe_offset,
2161 event->attr.type == PERF_TYPE_TRACEPOINT);
2162#endif
2163 }
2164
2165 return err;
2166}
a38d1107 2167
9db1ff0a
YS
2168static int __init send_signal_irq_work_init(void)
2169{
2170 int cpu;
2171 struct send_signal_irq_work *work;
2172
2173 for_each_possible_cpu(cpu) {
2174 work = per_cpu_ptr(&send_signal_work, cpu);
2175 init_irq_work(&work->irq_work, do_bpf_send_signal);
2176 }
2177 return 0;
2178}
2179
2180subsys_initcall(send_signal_irq_work_init);
2181
a38d1107 2182#ifdef CONFIG_MODULES
390e99cf
SF
2183static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2184 void *module)
a38d1107
MM
2185{
2186 struct bpf_trace_module *btm, *tmp;
2187 struct module *mod = module;
0340a6b7 2188 int ret = 0;
a38d1107
MM
2189
2190 if (mod->num_bpf_raw_events == 0 ||
2191 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
0340a6b7 2192 goto out;
a38d1107
MM
2193
2194 mutex_lock(&bpf_module_mutex);
2195
2196 switch (op) {
2197 case MODULE_STATE_COMING:
2198 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2199 if (btm) {
2200 btm->module = module;
2201 list_add(&btm->list, &bpf_trace_modules);
0340a6b7
PZ
2202 } else {
2203 ret = -ENOMEM;
a38d1107
MM
2204 }
2205 break;
2206 case MODULE_STATE_GOING:
2207 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2208 if (btm->module == module) {
2209 list_del(&btm->list);
2210 kfree(btm);
2211 break;
2212 }
2213 }
2214 break;
2215 }
2216
2217 mutex_unlock(&bpf_module_mutex);
2218
0340a6b7
PZ
2219out:
2220 return notifier_from_errno(ret);
a38d1107
MM
2221}
2222
2223static struct notifier_block bpf_module_nb = {
2224 .notifier_call = bpf_event_notify,
2225};
2226
390e99cf 2227static int __init bpf_event_init(void)
a38d1107
MM
2228{
2229 register_module_notifier(&bpf_module_nb);
2230 return 0;
2231}
2232
2233fs_initcall(bpf_event_init);
2234#endif /* CONFIG_MODULES */