1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_perf_event.h>
13 #include <linux/filter.h>
14 #include <linux/uaccess.h>
15 #include <linux/ctype.h>
19 * trace_call_bpf - invoke BPF program
21 * @ctx: opaque context pointer
23 * kprobe handlers execute BPF programs via this helper.
24 * Can be used from static tracepoints in the future.
26 * Return: BPF programs always return an integer which is interpreted by
28 * 0 - return from kprobe (event is filtered out)
29 * 1 - store kprobe event into ring buffer
30 * Other values are reserved and currently alias to 1
32 unsigned int trace_call_bpf(struct bpf_prog
*prog
, void *ctx
)
36 if (in_nmi()) /* not supported yet */
41 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
43 * since some bpf program is already running on this cpu,
44 * don't call into another bpf program (same or different)
45 * and don't send kprobe event into ring-buffer,
53 ret
= BPF_PROG_RUN(prog
, ctx
);
57 __this_cpu_dec(bpf_prog_active
);
62 EXPORT_SYMBOL_GPL(trace_call_bpf
);
64 BPF_CALL_3(bpf_probe_read
, void *, dst
, u32
, size
, const void *, unsafe_ptr
)
68 ret
= probe_kernel_read(dst
, unsafe_ptr
, size
);
69 if (unlikely(ret
< 0))
75 static const struct bpf_func_proto bpf_probe_read_proto
= {
76 .func
= bpf_probe_read
,
78 .ret_type
= RET_INTEGER
,
79 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
80 .arg2_type
= ARG_CONST_SIZE
,
81 .arg3_type
= ARG_ANYTHING
,
84 BPF_CALL_3(bpf_probe_write_user
, void *, unsafe_ptr
, const void *, src
,
88 * Ensure we're in user context which is safe for the helper to
89 * run. This helper has no business in a kthread.
91 * access_ok() should prevent writing to non-user memory, but in
92 * some situations (nommu, temporary switch, etc) access_ok() does
93 * not provide enough validation, hence the check on KERNEL_DS.
96 if (unlikely(in_interrupt() ||
97 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
99 if (unlikely(segment_eq(get_fs(), KERNEL_DS
)))
101 if (!access_ok(VERIFY_WRITE
, unsafe_ptr
, size
))
104 return probe_kernel_write(unsafe_ptr
, src
, size
);
107 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
108 .func
= bpf_probe_write_user
,
110 .ret_type
= RET_INTEGER
,
111 .arg1_type
= ARG_ANYTHING
,
112 .arg2_type
= ARG_PTR_TO_MEM
,
113 .arg3_type
= ARG_CONST_SIZE
,
116 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
118 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
119 current
->comm
, task_pid_nr(current
));
121 return &bpf_probe_write_user_proto
;
125 * limited trace_printk()
126 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
128 BPF_CALL_5(bpf_trace_printk
, char *, fmt
, u32
, fmt_size
, u64
, arg1
,
129 u64
, arg2
, u64
, arg3
)
131 bool str_seen
= false;
139 * bpf_check()->check_func_arg()->check_stack_boundary()
140 * guarantees that fmt points to bpf program stack,
141 * fmt_size bytes of it were initialized and fmt_size > 0
143 if (fmt
[--fmt_size
] != 0)
146 /* check format string for allowed specifiers */
147 for (i
= 0; i
< fmt_size
; i
++) {
148 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
157 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
162 } else if (fmt
[i
] == 'p' || fmt
[i
] == 's') {
165 if (!isspace(fmt
[i
]) && !ispunct(fmt
[i
]) && fmt
[i
] != 0)
168 if (fmt
[i
- 1] == 's') {
170 /* allow only one '%s' per fmt string */
189 strncpy_from_unsafe(buf
,
190 (void *) (long) unsafe_addr
,
201 if (fmt
[i
] != 'd' && fmt
[i
] != 'u' && fmt
[i
] != 'x')
206 return __trace_printk(1/* fake ip will not be printed */, fmt
,
207 mod
[0] == 2 ? arg1
: mod
[0] == 1 ? (long) arg1
: (u32
) arg1
,
208 mod
[1] == 2 ? arg2
: mod
[1] == 1 ? (long) arg2
: (u32
) arg2
,
209 mod
[2] == 2 ? arg3
: mod
[2] == 1 ? (long) arg3
: (u32
) arg3
);
212 static const struct bpf_func_proto bpf_trace_printk_proto
= {
213 .func
= bpf_trace_printk
,
215 .ret_type
= RET_INTEGER
,
216 .arg1_type
= ARG_PTR_TO_MEM
,
217 .arg2_type
= ARG_CONST_SIZE
,
220 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
223 * this program might be calling bpf_trace_printk,
224 * so allocate per-cpu printk buffers
226 trace_printk_init_buffers();
228 return &bpf_trace_printk_proto
;
231 BPF_CALL_2(bpf_perf_event_read
, struct bpf_map
*, map
, u64
, flags
)
233 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
234 unsigned int cpu
= smp_processor_id();
235 u64 index
= flags
& BPF_F_INDEX_MASK
;
236 struct bpf_event_entry
*ee
;
237 struct perf_event
*event
;
239 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
241 if (index
== BPF_F_CURRENT_CPU
)
243 if (unlikely(index
>= array
->map
.max_entries
))
246 ee
= READ_ONCE(array
->ptrs
[index
]);
251 if (unlikely(event
->attr
.type
!= PERF_TYPE_HARDWARE
&&
252 event
->attr
.type
!= PERF_TYPE_RAW
))
255 /* make sure event is local and doesn't have pmu::count */
256 if (unlikely(event
->oncpu
!= cpu
|| event
->pmu
->count
))
260 * we don't know if the function is run successfully by the
261 * return value. It can be judged in other places, such as
264 return perf_event_read_local(event
);
267 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
268 .func
= bpf_perf_event_read
,
270 .ret_type
= RET_INTEGER
,
271 .arg1_type
= ARG_CONST_MAP_PTR
,
272 .arg2_type
= ARG_ANYTHING
,
275 static __always_inline u64
276 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
277 u64 flags
, struct perf_raw_record
*raw
)
279 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
280 unsigned int cpu
= smp_processor_id();
281 u64 index
= flags
& BPF_F_INDEX_MASK
;
282 struct perf_sample_data sample_data
;
283 struct bpf_event_entry
*ee
;
284 struct perf_event
*event
;
286 if (index
== BPF_F_CURRENT_CPU
)
288 if (unlikely(index
>= array
->map
.max_entries
))
291 ee
= READ_ONCE(array
->ptrs
[index
]);
296 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
297 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
300 if (unlikely(event
->oncpu
!= cpu
))
303 perf_sample_data_init(&sample_data
, 0, 0);
304 sample_data
.raw
= raw
;
305 perf_event_output(event
, &sample_data
, regs
);
309 BPF_CALL_5(bpf_perf_event_output
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
310 u64
, flags
, void *, data
, u64
, size
)
312 struct perf_raw_record raw
= {
319 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
322 return __bpf_perf_event_output(regs
, map
, flags
, &raw
);
325 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
326 .func
= bpf_perf_event_output
,
328 .ret_type
= RET_INTEGER
,
329 .arg1_type
= ARG_PTR_TO_CTX
,
330 .arg2_type
= ARG_CONST_MAP_PTR
,
331 .arg3_type
= ARG_ANYTHING
,
332 .arg4_type
= ARG_PTR_TO_MEM
,
333 .arg5_type
= ARG_CONST_SIZE
,
336 static DEFINE_PER_CPU(struct pt_regs
, bpf_pt_regs
);
338 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
339 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
341 struct pt_regs
*regs
= this_cpu_ptr(&bpf_pt_regs
);
342 struct perf_raw_frag frag
= {
347 struct perf_raw_record raw
= {
350 .next
= ctx_size
? &frag
: NULL
,
357 perf_fetch_caller_regs(regs
);
359 return __bpf_perf_event_output(regs
, map
, flags
, &raw
);
362 BPF_CALL_0(bpf_get_current_task
)
364 return (long) current
;
367 static const struct bpf_func_proto bpf_get_current_task_proto
= {
368 .func
= bpf_get_current_task
,
370 .ret_type
= RET_INTEGER
,
373 BPF_CALL_2(bpf_current_task_under_cgroup
, struct bpf_map
*, map
, u32
, idx
)
375 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
378 if (unlikely(in_interrupt()))
380 if (unlikely(idx
>= array
->map
.max_entries
))
383 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
387 return task_under_cgroup_hierarchy(current
, cgrp
);
390 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto
= {
391 .func
= bpf_current_task_under_cgroup
,
393 .ret_type
= RET_INTEGER
,
394 .arg1_type
= ARG_CONST_MAP_PTR
,
395 .arg2_type
= ARG_ANYTHING
,
398 BPF_CALL_3(bpf_probe_read_str
, void *, dst
, u32
, size
,
399 const void *, unsafe_ptr
)
404 * The strncpy_from_unsafe() call will likely not fill the entire
405 * buffer, but that's okay in this circumstance as we're probing
406 * arbitrary memory anyway similar to bpf_probe_read() and might
407 * as well probe the stack. Thus, memory is explicitly cleared
408 * only in error case, so that improper users ignoring return
409 * code altogether don't copy garbage; otherwise length of string
410 * is returned that can be used for bpf_perf_event_output() et al.
412 ret
= strncpy_from_unsafe(dst
, unsafe_ptr
, size
);
413 if (unlikely(ret
< 0))
414 memset(dst
, 0, size
);
419 static const struct bpf_func_proto bpf_probe_read_str_proto
= {
420 .func
= bpf_probe_read_str
,
422 .ret_type
= RET_INTEGER
,
423 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
424 .arg2_type
= ARG_CONST_SIZE
,
425 .arg3_type
= ARG_ANYTHING
,
428 static const struct bpf_func_proto
*tracing_func_proto(enum bpf_func_id func_id
)
431 case BPF_FUNC_map_lookup_elem
:
432 return &bpf_map_lookup_elem_proto
;
433 case BPF_FUNC_map_update_elem
:
434 return &bpf_map_update_elem_proto
;
435 case BPF_FUNC_map_delete_elem
:
436 return &bpf_map_delete_elem_proto
;
437 case BPF_FUNC_probe_read
:
438 return &bpf_probe_read_proto
;
439 case BPF_FUNC_ktime_get_ns
:
440 return &bpf_ktime_get_ns_proto
;
441 case BPF_FUNC_tail_call
:
442 return &bpf_tail_call_proto
;
443 case BPF_FUNC_get_current_pid_tgid
:
444 return &bpf_get_current_pid_tgid_proto
;
445 case BPF_FUNC_get_current_task
:
446 return &bpf_get_current_task_proto
;
447 case BPF_FUNC_get_current_uid_gid
:
448 return &bpf_get_current_uid_gid_proto
;
449 case BPF_FUNC_get_current_comm
:
450 return &bpf_get_current_comm_proto
;
451 case BPF_FUNC_trace_printk
:
452 return bpf_get_trace_printk_proto();
453 case BPF_FUNC_get_smp_processor_id
:
454 return &bpf_get_smp_processor_id_proto
;
455 case BPF_FUNC_get_numa_node_id
:
456 return &bpf_get_numa_node_id_proto
;
457 case BPF_FUNC_perf_event_read
:
458 return &bpf_perf_event_read_proto
;
459 case BPF_FUNC_probe_write_user
:
460 return bpf_get_probe_write_proto();
461 case BPF_FUNC_current_task_under_cgroup
:
462 return &bpf_current_task_under_cgroup_proto
;
463 case BPF_FUNC_get_prandom_u32
:
464 return &bpf_get_prandom_u32_proto
;
465 case BPF_FUNC_probe_read_str
:
466 return &bpf_probe_read_str_proto
;
472 static const struct bpf_func_proto
*kprobe_prog_func_proto(enum bpf_func_id func_id
)
475 case BPF_FUNC_perf_event_output
:
476 return &bpf_perf_event_output_proto
;
477 case BPF_FUNC_get_stackid
:
478 return &bpf_get_stackid_proto
;
480 return tracing_func_proto(func_id
);
484 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
485 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
486 enum bpf_reg_type
*reg_type
)
488 if (off
< 0 || off
>= sizeof(struct pt_regs
))
490 if (type
!= BPF_READ
)
495 * Assertion for 32 bit to make sure last 8 byte access
496 * (BPF_DW) to the last 4 byte member is disallowed.
498 if (off
+ size
> sizeof(struct pt_regs
))
504 static const struct bpf_verifier_ops kprobe_prog_ops
= {
505 .get_func_proto
= kprobe_prog_func_proto
,
506 .is_valid_access
= kprobe_prog_is_valid_access
,
509 static struct bpf_prog_type_list kprobe_tl __ro_after_init
= {
510 .ops
= &kprobe_prog_ops
,
511 .type
= BPF_PROG_TYPE_KPROBE
,
514 BPF_CALL_5(bpf_perf_event_output_tp
, void *, tp_buff
, struct bpf_map
*, map
,
515 u64
, flags
, void *, data
, u64
, size
)
517 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
520 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
521 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
522 * from there and call the same bpf_perf_event_output() helper inline.
524 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
527 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
528 .func
= bpf_perf_event_output_tp
,
530 .ret_type
= RET_INTEGER
,
531 .arg1_type
= ARG_PTR_TO_CTX
,
532 .arg2_type
= ARG_CONST_MAP_PTR
,
533 .arg3_type
= ARG_ANYTHING
,
534 .arg4_type
= ARG_PTR_TO_MEM
,
535 .arg5_type
= ARG_CONST_SIZE
,
538 BPF_CALL_3(bpf_get_stackid_tp
, void *, tp_buff
, struct bpf_map
*, map
,
541 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
544 * Same comment as in bpf_perf_event_output_tp(), only that this time
545 * the other helper's function body cannot be inlined due to being
546 * external, thus we need to call raw helper function.
548 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
552 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
553 .func
= bpf_get_stackid_tp
,
555 .ret_type
= RET_INTEGER
,
556 .arg1_type
= ARG_PTR_TO_CTX
,
557 .arg2_type
= ARG_CONST_MAP_PTR
,
558 .arg3_type
= ARG_ANYTHING
,
561 static const struct bpf_func_proto
*tp_prog_func_proto(enum bpf_func_id func_id
)
564 case BPF_FUNC_perf_event_output
:
565 return &bpf_perf_event_output_proto_tp
;
566 case BPF_FUNC_get_stackid
:
567 return &bpf_get_stackid_proto_tp
;
569 return tracing_func_proto(func_id
);
573 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
574 enum bpf_reg_type
*reg_type
)
576 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
578 if (type
!= BPF_READ
)
583 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(__u64
));
587 static const struct bpf_verifier_ops tracepoint_prog_ops
= {
588 .get_func_proto
= tp_prog_func_proto
,
589 .is_valid_access
= tp_prog_is_valid_access
,
592 static struct bpf_prog_type_list tracepoint_tl __ro_after_init
= {
593 .ops
= &tracepoint_prog_ops
,
594 .type
= BPF_PROG_TYPE_TRACEPOINT
,
597 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
598 enum bpf_reg_type
*reg_type
)
600 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
602 if (type
!= BPF_READ
)
606 if (off
== offsetof(struct bpf_perf_event_data
, sample_period
)) {
607 if (size
!= sizeof(u64
))
610 if (size
!= sizeof(long))
616 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
,
617 const struct bpf_insn
*si
,
618 struct bpf_insn
*insn_buf
,
619 struct bpf_prog
*prog
)
621 struct bpf_insn
*insn
= insn_buf
;
624 case offsetof(struct bpf_perf_event_data
, sample_period
):
625 BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data
, period
) != sizeof(u64
));
627 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
628 data
), si
->dst_reg
, si
->src_reg
,
629 offsetof(struct bpf_perf_event_data_kern
, data
));
630 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
631 offsetof(struct perf_sample_data
, period
));
634 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
635 regs
), si
->dst_reg
, si
->src_reg
,
636 offsetof(struct bpf_perf_event_data_kern
, regs
));
637 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(long), si
->dst_reg
, si
->dst_reg
,
642 return insn
- insn_buf
;
645 static const struct bpf_verifier_ops perf_event_prog_ops
= {
646 .get_func_proto
= tp_prog_func_proto
,
647 .is_valid_access
= pe_prog_is_valid_access
,
648 .convert_ctx_access
= pe_prog_convert_ctx_access
,
651 static struct bpf_prog_type_list perf_event_tl __ro_after_init
= {
652 .ops
= &perf_event_prog_ops
,
653 .type
= BPF_PROG_TYPE_PERF_EVENT
,
656 static int __init
register_kprobe_prog_ops(void)
658 bpf_register_prog_type(&kprobe_tl
);
659 bpf_register_prog_type(&tracepoint_tl
);
660 bpf_register_prog_type(&perf_event_tl
);
663 late_initcall(register_kprobe_prog_ops
);