1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_perf_event.h>
13 #include <linux/filter.h>
14 #include <linux/uaccess.h>
15 #include <linux/ctype.h>
19 * trace_call_bpf - invoke BPF program
21 * @ctx: opaque context pointer
23 * kprobe handlers execute BPF programs via this helper.
24 * Can be used from static tracepoints in the future.
26 * Return: BPF programs always return an integer which is interpreted by
28 * 0 - return from kprobe (event is filtered out)
29 * 1 - store kprobe event into ring buffer
30 * Other values are reserved and currently alias to 1
32 unsigned int trace_call_bpf(struct bpf_prog
*prog
, void *ctx
)
36 if (in_nmi()) /* not supported yet */
41 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
43 * since some bpf program is already running on this cpu,
44 * don't call into another bpf program (same or different)
45 * and don't send kprobe event into ring-buffer,
53 ret
= BPF_PROG_RUN(prog
, ctx
);
57 __this_cpu_dec(bpf_prog_active
);
62 EXPORT_SYMBOL_GPL(trace_call_bpf
);
64 BPF_CALL_3(bpf_probe_read
, void *, dst
, u32
, size
, const void *, unsafe_ptr
)
68 if (kernel_is_locked_down()) {
73 ret
= probe_kernel_read(dst
, unsafe_ptr
, size
);
74 if (unlikely(ret
< 0))
80 static const struct bpf_func_proto bpf_probe_read_proto
= {
81 .func
= bpf_probe_read
,
83 .ret_type
= RET_INTEGER
,
84 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
85 .arg2_type
= ARG_CONST_SIZE
,
86 .arg3_type
= ARG_ANYTHING
,
89 BPF_CALL_3(bpf_probe_write_user
, void *, unsafe_ptr
, const void *, src
,
92 if (kernel_is_locked_down())
96 * Ensure we're in user context which is safe for the helper to
97 * run. This helper has no business in a kthread.
99 * access_ok() should prevent writing to non-user memory, but in
100 * some situations (nommu, temporary switch, etc) access_ok() does
101 * not provide enough validation, hence the check on KERNEL_DS.
104 if (unlikely(in_interrupt() ||
105 current
->flags
& (PF_KTHREAD
| PF_EXITING
)))
107 if (unlikely(uaccess_kernel()))
109 if (!access_ok(VERIFY_WRITE
, unsafe_ptr
, size
))
112 return probe_kernel_write(unsafe_ptr
, src
, size
);
115 static const struct bpf_func_proto bpf_probe_write_user_proto
= {
116 .func
= bpf_probe_write_user
,
118 .ret_type
= RET_INTEGER
,
119 .arg1_type
= ARG_ANYTHING
,
120 .arg2_type
= ARG_PTR_TO_MEM
,
121 .arg3_type
= ARG_CONST_SIZE
,
124 static const struct bpf_func_proto
*bpf_get_probe_write_proto(void)
126 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
127 current
->comm
, task_pid_nr(current
));
129 return &bpf_probe_write_user_proto
;
133 * Only limited trace_printk() conversion specifiers allowed:
134 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
136 BPF_CALL_5(bpf_trace_printk
, char *, fmt
, u32
, fmt_size
, u64
, arg1
,
137 u64
, arg2
, u64
, arg3
)
139 bool str_seen
= false;
147 * bpf_check()->check_func_arg()->check_stack_boundary()
148 * guarantees that fmt points to bpf program stack,
149 * fmt_size bytes of it were initialized and fmt_size > 0
151 if (fmt
[--fmt_size
] != 0)
154 if (kernel_is_locked_down())
155 return __trace_printk(1, fmt
, 0, 0, 0);
157 /* check format string for allowed specifiers */
158 for (i
= 0; i
< fmt_size
; i
++) {
159 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
168 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
173 } else if (fmt
[i
] == 'p' || fmt
[i
] == 's') {
176 if (!isspace(fmt
[i
]) && !ispunct(fmt
[i
]) && fmt
[i
] != 0)
179 if (fmt
[i
- 1] == 's') {
181 /* allow only one '%s' per fmt string */
200 strncpy_from_unsafe(buf
,
201 (void *) (long) unsafe_addr
,
212 if (fmt
[i
] != 'i' && fmt
[i
] != 'd' &&
213 fmt
[i
] != 'u' && fmt
[i
] != 'x')
218 /* Horrid workaround for getting va_list handling working with different
219 * argument type combinations generically for 32 and 64 bit archs.
221 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
222 #define __BPF_TP(...) \
223 __trace_printk(1 /* Fake ip will not be printed. */, \
226 #define __BPF_ARG1_TP(...) \
227 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
228 ? __BPF_TP(arg1, ##__VA_ARGS__) \
229 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
230 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
231 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
233 #define __BPF_ARG2_TP(...) \
234 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
235 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
236 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
237 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
238 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
240 #define __BPF_ARG3_TP(...) \
241 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
242 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
243 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
244 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
245 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
247 return __BPF_TP_EMIT();
250 static const struct bpf_func_proto bpf_trace_printk_proto
= {
251 .func
= bpf_trace_printk
,
253 .ret_type
= RET_INTEGER
,
254 .arg1_type
= ARG_PTR_TO_MEM
,
255 .arg2_type
= ARG_CONST_SIZE
,
258 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
261 * this program might be calling bpf_trace_printk,
262 * so allocate per-cpu printk buffers
264 trace_printk_init_buffers();
266 return &bpf_trace_printk_proto
;
269 BPF_CALL_2(bpf_perf_event_read
, struct bpf_map
*, map
, u64
, flags
)
271 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
272 unsigned int cpu
= smp_processor_id();
273 u64 index
= flags
& BPF_F_INDEX_MASK
;
274 struct bpf_event_entry
*ee
;
278 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
280 if (index
== BPF_F_CURRENT_CPU
)
282 if (unlikely(index
>= array
->map
.max_entries
))
285 ee
= READ_ONCE(array
->ptrs
[index
]);
289 err
= perf_event_read_local(ee
->event
, &value
);
291 * this api is ugly since we miss [-22..-2] range of valid
292 * counter values, but that's uapi
299 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
300 .func
= bpf_perf_event_read
,
302 .ret_type
= RET_INTEGER
,
303 .arg1_type
= ARG_CONST_MAP_PTR
,
304 .arg2_type
= ARG_ANYTHING
,
307 static DEFINE_PER_CPU(struct perf_sample_data
, bpf_sd
);
309 static __always_inline u64
310 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
311 u64 flags
, struct perf_raw_record
*raw
)
313 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
314 struct perf_sample_data
*sd
= this_cpu_ptr(&bpf_sd
);
315 unsigned int cpu
= smp_processor_id();
316 u64 index
= flags
& BPF_F_INDEX_MASK
;
317 struct bpf_event_entry
*ee
;
318 struct perf_event
*event
;
320 if (index
== BPF_F_CURRENT_CPU
)
322 if (unlikely(index
>= array
->map
.max_entries
))
325 ee
= READ_ONCE(array
->ptrs
[index
]);
330 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
331 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
334 if (unlikely(event
->oncpu
!= cpu
))
337 perf_sample_data_init(sd
, 0, 0);
339 perf_event_output(event
, sd
, regs
);
343 BPF_CALL_5(bpf_perf_event_output
, struct pt_regs
*, regs
, struct bpf_map
*, map
,
344 u64
, flags
, void *, data
, u64
, size
)
346 struct perf_raw_record raw
= {
353 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
356 return __bpf_perf_event_output(regs
, map
, flags
, &raw
);
359 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
360 .func
= bpf_perf_event_output
,
362 .ret_type
= RET_INTEGER
,
363 .arg1_type
= ARG_PTR_TO_CTX
,
364 .arg2_type
= ARG_CONST_MAP_PTR
,
365 .arg3_type
= ARG_ANYTHING
,
366 .arg4_type
= ARG_PTR_TO_MEM
,
367 .arg5_type
= ARG_CONST_SIZE
,
370 static DEFINE_PER_CPU(struct pt_regs
, bpf_pt_regs
);
372 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
373 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
375 struct pt_regs
*regs
= this_cpu_ptr(&bpf_pt_regs
);
376 struct perf_raw_frag frag
= {
381 struct perf_raw_record raw
= {
384 .next
= ctx_size
? &frag
: NULL
,
391 perf_fetch_caller_regs(regs
);
393 return __bpf_perf_event_output(regs
, map
, flags
, &raw
);
396 BPF_CALL_0(bpf_get_current_task
)
398 return (long) current
;
401 static const struct bpf_func_proto bpf_get_current_task_proto
= {
402 .func
= bpf_get_current_task
,
404 .ret_type
= RET_INTEGER
,
407 BPF_CALL_2(bpf_current_task_under_cgroup
, struct bpf_map
*, map
, u32
, idx
)
409 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
412 if (unlikely(in_interrupt()))
414 if (unlikely(idx
>= array
->map
.max_entries
))
417 cgrp
= READ_ONCE(array
->ptrs
[idx
]);
421 return task_under_cgroup_hierarchy(current
, cgrp
);
424 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto
= {
425 .func
= bpf_current_task_under_cgroup
,
427 .ret_type
= RET_INTEGER
,
428 .arg1_type
= ARG_CONST_MAP_PTR
,
429 .arg2_type
= ARG_ANYTHING
,
432 BPF_CALL_3(bpf_probe_read_str
, void *, dst
, u32
, size
,
433 const void *, unsafe_ptr
)
438 * The strncpy_from_unsafe() call will likely not fill the entire
439 * buffer, but that's okay in this circumstance as we're probing
440 * arbitrary memory anyway similar to bpf_probe_read() and might
441 * as well probe the stack. Thus, memory is explicitly cleared
442 * only in error case, so that improper users ignoring return
443 * code altogether don't copy garbage; otherwise length of string
444 * is returned that can be used for bpf_perf_event_output() et al.
446 ret
= strncpy_from_unsafe(dst
, unsafe_ptr
, size
);
447 if (unlikely(ret
< 0))
448 memset(dst
, 0, size
);
453 static const struct bpf_func_proto bpf_probe_read_str_proto
= {
454 .func
= bpf_probe_read_str
,
456 .ret_type
= RET_INTEGER
,
457 .arg1_type
= ARG_PTR_TO_UNINIT_MEM
,
458 .arg2_type
= ARG_CONST_SIZE
,
459 .arg3_type
= ARG_ANYTHING
,
462 static const struct bpf_func_proto
*tracing_func_proto(enum bpf_func_id func_id
)
465 case BPF_FUNC_map_lookup_elem
:
466 return &bpf_map_lookup_elem_proto
;
467 case BPF_FUNC_map_update_elem
:
468 return &bpf_map_update_elem_proto
;
469 case BPF_FUNC_map_delete_elem
:
470 return &bpf_map_delete_elem_proto
;
471 case BPF_FUNC_probe_read
:
472 return &bpf_probe_read_proto
;
473 case BPF_FUNC_ktime_get_ns
:
474 return &bpf_ktime_get_ns_proto
;
475 case BPF_FUNC_tail_call
:
476 return &bpf_tail_call_proto
;
477 case BPF_FUNC_get_current_pid_tgid
:
478 return &bpf_get_current_pid_tgid_proto
;
479 case BPF_FUNC_get_current_task
:
480 return &bpf_get_current_task_proto
;
481 case BPF_FUNC_get_current_uid_gid
:
482 return &bpf_get_current_uid_gid_proto
;
483 case BPF_FUNC_get_current_comm
:
484 return &bpf_get_current_comm_proto
;
485 case BPF_FUNC_trace_printk
:
486 return bpf_get_trace_printk_proto();
487 case BPF_FUNC_get_smp_processor_id
:
488 return &bpf_get_smp_processor_id_proto
;
489 case BPF_FUNC_get_numa_node_id
:
490 return &bpf_get_numa_node_id_proto
;
491 case BPF_FUNC_perf_event_read
:
492 return &bpf_perf_event_read_proto
;
493 case BPF_FUNC_probe_write_user
:
494 return bpf_get_probe_write_proto();
495 case BPF_FUNC_current_task_under_cgroup
:
496 return &bpf_current_task_under_cgroup_proto
;
497 case BPF_FUNC_get_prandom_u32
:
498 return &bpf_get_prandom_u32_proto
;
499 case BPF_FUNC_probe_read_str
:
500 return &bpf_probe_read_str_proto
;
506 static const struct bpf_func_proto
*kprobe_prog_func_proto(enum bpf_func_id func_id
)
509 case BPF_FUNC_perf_event_output
:
510 return &bpf_perf_event_output_proto
;
511 case BPF_FUNC_get_stackid
:
512 return &bpf_get_stackid_proto
;
514 return tracing_func_proto(func_id
);
518 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
519 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
520 struct bpf_insn_access_aux
*info
)
522 if (off
< 0 || off
>= sizeof(struct pt_regs
))
524 if (type
!= BPF_READ
)
529 * Assertion for 32 bit to make sure last 8 byte access
530 * (BPF_DW) to the last 4 byte member is disallowed.
532 if (off
+ size
> sizeof(struct pt_regs
))
538 const struct bpf_verifier_ops kprobe_prog_ops
= {
539 .get_func_proto
= kprobe_prog_func_proto
,
540 .is_valid_access
= kprobe_prog_is_valid_access
,
543 BPF_CALL_5(bpf_perf_event_output_tp
, void *, tp_buff
, struct bpf_map
*, map
,
544 u64
, flags
, void *, data
, u64
, size
)
546 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
549 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
550 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
551 * from there and call the same bpf_perf_event_output() helper inline.
553 return ____bpf_perf_event_output(regs
, map
, flags
, data
, size
);
556 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
557 .func
= bpf_perf_event_output_tp
,
559 .ret_type
= RET_INTEGER
,
560 .arg1_type
= ARG_PTR_TO_CTX
,
561 .arg2_type
= ARG_CONST_MAP_PTR
,
562 .arg3_type
= ARG_ANYTHING
,
563 .arg4_type
= ARG_PTR_TO_MEM
,
564 .arg5_type
= ARG_CONST_SIZE
,
567 BPF_CALL_3(bpf_get_stackid_tp
, void *, tp_buff
, struct bpf_map
*, map
,
570 struct pt_regs
*regs
= *(struct pt_regs
**)tp_buff
;
573 * Same comment as in bpf_perf_event_output_tp(), only that this time
574 * the other helper's function body cannot be inlined due to being
575 * external, thus we need to call raw helper function.
577 return bpf_get_stackid((unsigned long) regs
, (unsigned long) map
,
581 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
582 .func
= bpf_get_stackid_tp
,
584 .ret_type
= RET_INTEGER
,
585 .arg1_type
= ARG_PTR_TO_CTX
,
586 .arg2_type
= ARG_CONST_MAP_PTR
,
587 .arg3_type
= ARG_ANYTHING
,
590 static const struct bpf_func_proto
*tp_prog_func_proto(enum bpf_func_id func_id
)
593 case BPF_FUNC_perf_event_output
:
594 return &bpf_perf_event_output_proto_tp
;
595 case BPF_FUNC_get_stackid
:
596 return &bpf_get_stackid_proto_tp
;
598 return tracing_func_proto(func_id
);
602 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
603 struct bpf_insn_access_aux
*info
)
605 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
607 if (type
!= BPF_READ
)
612 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(__u64
));
616 const struct bpf_verifier_ops tracepoint_prog_ops
= {
617 .get_func_proto
= tp_prog_func_proto
,
618 .is_valid_access
= tp_prog_is_valid_access
,
621 static bool pe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
622 struct bpf_insn_access_aux
*info
)
624 const int size_sp
= FIELD_SIZEOF(struct bpf_perf_event_data
,
627 if (off
< 0 || off
>= sizeof(struct bpf_perf_event_data
))
629 if (type
!= BPF_READ
)
635 case bpf_ctx_range(struct bpf_perf_event_data
, sample_period
):
636 bpf_ctx_record_field_size(info
, size_sp
);
637 if (!bpf_ctx_narrow_access_ok(off
, size
, size_sp
))
641 if (size
!= sizeof(long))
648 static u32
pe_prog_convert_ctx_access(enum bpf_access_type type
,
649 const struct bpf_insn
*si
,
650 struct bpf_insn
*insn_buf
,
651 struct bpf_prog
*prog
, u32
*target_size
)
653 struct bpf_insn
*insn
= insn_buf
;
656 case offsetof(struct bpf_perf_event_data
, sample_period
):
657 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
658 data
), si
->dst_reg
, si
->src_reg
,
659 offsetof(struct bpf_perf_event_data_kern
, data
));
660 *insn
++ = BPF_LDX_MEM(BPF_DW
, si
->dst_reg
, si
->dst_reg
,
661 bpf_target_off(struct perf_sample_data
, period
, 8,
665 *insn
++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern
,
666 regs
), si
->dst_reg
, si
->src_reg
,
667 offsetof(struct bpf_perf_event_data_kern
, regs
));
668 *insn
++ = BPF_LDX_MEM(BPF_SIZEOF(long), si
->dst_reg
, si
->dst_reg
,
673 return insn
- insn_buf
;
676 const struct bpf_verifier_ops perf_event_prog_ops
= {
677 .get_func_proto
= tp_prog_func_proto
,
678 .is_valid_access
= pe_prog_is_valid_access
,
679 .convert_ctx_access
= pe_prog_convert_ctx_access
,