1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
17 * trace_call_bpf - invoke BPF program
19 * @ctx: opaque context pointer
21 * kprobe handlers execute BPF programs via this helper.
22 * Can be used from static tracepoints in the future.
24 * Return: BPF programs always return an integer which is interpreted by
26 * 0 - return from kprobe (event is filtered out)
27 * 1 - store kprobe event into ring buffer
28 * Other values are reserved and currently alias to 1
30 unsigned int trace_call_bpf(struct bpf_prog
*prog
, void *ctx
)
34 if (in_nmi()) /* not supported yet */
39 if (unlikely(__this_cpu_inc_return(bpf_prog_active
) != 1)) {
41 * since some bpf program is already running on this cpu,
42 * don't call into another bpf program (same or different)
43 * and don't send kprobe event into ring-buffer,
51 ret
= BPF_PROG_RUN(prog
, ctx
);
55 __this_cpu_dec(bpf_prog_active
);
60 EXPORT_SYMBOL_GPL(trace_call_bpf
);
62 static u64
bpf_probe_read(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
64 void *dst
= (void *) (long) r1
;
65 int ret
, size
= (int) r2
;
66 void *unsafe_ptr
= (void *) (long) r3
;
68 ret
= probe_kernel_read(dst
, unsafe_ptr
, size
);
69 if (unlikely(ret
< 0))
75 static const struct bpf_func_proto bpf_probe_read_proto
= {
76 .func
= bpf_probe_read
,
78 .ret_type
= RET_INTEGER
,
79 .arg1_type
= ARG_PTR_TO_RAW_STACK
,
80 .arg2_type
= ARG_CONST_STACK_SIZE
,
81 .arg3_type
= ARG_ANYTHING
,
85 * limited trace_printk()
86 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
88 static u64
bpf_trace_printk(u64 r1
, u64 fmt_size
, u64 r3
, u64 r4
, u64 r5
)
90 char *fmt
= (char *) (long) r1
;
91 bool str_seen
= false;
99 * bpf_check()->check_func_arg()->check_stack_boundary()
100 * guarantees that fmt points to bpf program stack,
101 * fmt_size bytes of it were initialized and fmt_size > 0
103 if (fmt
[--fmt_size
] != 0)
106 /* check format string for allowed specifiers */
107 for (i
= 0; i
< fmt_size
; i
++) {
108 if ((!isprint(fmt
[i
]) && !isspace(fmt
[i
])) || !isascii(fmt
[i
]))
117 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
122 } else if (fmt
[i
] == 'p' || fmt
[i
] == 's') {
125 if (!isspace(fmt
[i
]) && !ispunct(fmt
[i
]) && fmt
[i
] != 0)
128 if (fmt
[i
- 1] == 's') {
130 /* allow only one '%s' per fmt string */
149 strncpy_from_unsafe(buf
,
150 (void *) (long) unsafe_addr
,
161 if (fmt
[i
] != 'd' && fmt
[i
] != 'u' && fmt
[i
] != 'x')
166 return __trace_printk(1/* fake ip will not be printed */, fmt
,
167 mod
[0] == 2 ? r3
: mod
[0] == 1 ? (long) r3
: (u32
) r3
,
168 mod
[1] == 2 ? r4
: mod
[1] == 1 ? (long) r4
: (u32
) r4
,
169 mod
[2] == 2 ? r5
: mod
[2] == 1 ? (long) r5
: (u32
) r5
);
172 static const struct bpf_func_proto bpf_trace_printk_proto
= {
173 .func
= bpf_trace_printk
,
175 .ret_type
= RET_INTEGER
,
176 .arg1_type
= ARG_PTR_TO_STACK
,
177 .arg2_type
= ARG_CONST_STACK_SIZE
,
180 const struct bpf_func_proto
*bpf_get_trace_printk_proto(void)
183 * this program might be calling bpf_trace_printk,
184 * so allocate per-cpu printk buffers
186 trace_printk_init_buffers();
188 return &bpf_trace_printk_proto
;
191 static u64
bpf_perf_event_read(u64 r1
, u64 flags
, u64 r3
, u64 r4
, u64 r5
)
193 struct bpf_map
*map
= (struct bpf_map
*) (unsigned long) r1
;
194 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
195 unsigned int cpu
= smp_processor_id();
196 u64 index
= flags
& BPF_F_INDEX_MASK
;
197 struct bpf_event_entry
*ee
;
198 struct perf_event
*event
;
200 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
202 if (index
== BPF_F_CURRENT_CPU
)
204 if (unlikely(index
>= array
->map
.max_entries
))
207 ee
= READ_ONCE(array
->ptrs
[index
]);
212 if (unlikely(event
->attr
.type
!= PERF_TYPE_HARDWARE
&&
213 event
->attr
.type
!= PERF_TYPE_RAW
))
216 /* make sure event is local and doesn't have pmu::count */
217 if (unlikely(event
->oncpu
!= cpu
|| event
->pmu
->count
))
221 * we don't know if the function is run successfully by the
222 * return value. It can be judged in other places, such as
225 return perf_event_read_local(event
);
228 static const struct bpf_func_proto bpf_perf_event_read_proto
= {
229 .func
= bpf_perf_event_read
,
231 .ret_type
= RET_INTEGER
,
232 .arg1_type
= ARG_CONST_MAP_PTR
,
233 .arg2_type
= ARG_ANYTHING
,
236 static __always_inline u64
237 __bpf_perf_event_output(struct pt_regs
*regs
, struct bpf_map
*map
,
238 u64 flags
, struct perf_raw_record
*raw
)
240 struct bpf_array
*array
= container_of(map
, struct bpf_array
, map
);
241 unsigned int cpu
= smp_processor_id();
242 u64 index
= flags
& BPF_F_INDEX_MASK
;
243 struct perf_sample_data sample_data
;
244 struct bpf_event_entry
*ee
;
245 struct perf_event
*event
;
247 if (index
== BPF_F_CURRENT_CPU
)
249 if (unlikely(index
>= array
->map
.max_entries
))
252 ee
= READ_ONCE(array
->ptrs
[index
]);
257 if (unlikely(event
->attr
.type
!= PERF_TYPE_SOFTWARE
||
258 event
->attr
.config
!= PERF_COUNT_SW_BPF_OUTPUT
))
261 if (unlikely(event
->oncpu
!= cpu
))
264 perf_sample_data_init(&sample_data
, 0, 0);
265 sample_data
.raw
= raw
;
266 perf_event_output(event
, &sample_data
, regs
);
270 static u64
bpf_perf_event_output(u64 r1
, u64 r2
, u64 flags
, u64 r4
, u64 size
)
272 struct pt_regs
*regs
= (struct pt_regs
*)(long) r1
;
273 struct bpf_map
*map
= (struct bpf_map
*)(long) r2
;
274 void *data
= (void *)(long) r4
;
275 struct perf_raw_record raw
= {
282 if (unlikely(flags
& ~(BPF_F_INDEX_MASK
)))
285 return __bpf_perf_event_output(regs
, map
, flags
, &raw
);
288 static const struct bpf_func_proto bpf_perf_event_output_proto
= {
289 .func
= bpf_perf_event_output
,
291 .ret_type
= RET_INTEGER
,
292 .arg1_type
= ARG_PTR_TO_CTX
,
293 .arg2_type
= ARG_CONST_MAP_PTR
,
294 .arg3_type
= ARG_ANYTHING
,
295 .arg4_type
= ARG_PTR_TO_STACK
,
296 .arg5_type
= ARG_CONST_STACK_SIZE
,
299 static DEFINE_PER_CPU(struct pt_regs
, bpf_pt_regs
);
301 u64
bpf_event_output(struct bpf_map
*map
, u64 flags
, void *meta
, u64 meta_size
,
302 void *ctx
, u64 ctx_size
, bpf_ctx_copy_t ctx_copy
)
304 struct pt_regs
*regs
= this_cpu_ptr(&bpf_pt_regs
);
305 struct perf_raw_frag frag
= {
310 struct perf_raw_record raw
= {
313 .next
= ctx_size
? &frag
: NULL
,
320 perf_fetch_caller_regs(regs
);
322 return __bpf_perf_event_output(regs
, map
, flags
, &raw
);
325 static u64
bpf_get_current_task(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
327 return (long) current
;
330 static const struct bpf_func_proto bpf_get_current_task_proto
= {
331 .func
= bpf_get_current_task
,
333 .ret_type
= RET_INTEGER
,
336 static const struct bpf_func_proto
*tracing_func_proto(enum bpf_func_id func_id
)
339 case BPF_FUNC_map_lookup_elem
:
340 return &bpf_map_lookup_elem_proto
;
341 case BPF_FUNC_map_update_elem
:
342 return &bpf_map_update_elem_proto
;
343 case BPF_FUNC_map_delete_elem
:
344 return &bpf_map_delete_elem_proto
;
345 case BPF_FUNC_probe_read
:
346 return &bpf_probe_read_proto
;
347 case BPF_FUNC_ktime_get_ns
:
348 return &bpf_ktime_get_ns_proto
;
349 case BPF_FUNC_tail_call
:
350 return &bpf_tail_call_proto
;
351 case BPF_FUNC_get_current_pid_tgid
:
352 return &bpf_get_current_pid_tgid_proto
;
353 case BPF_FUNC_get_current_task
:
354 return &bpf_get_current_task_proto
;
355 case BPF_FUNC_get_current_uid_gid
:
356 return &bpf_get_current_uid_gid_proto
;
357 case BPF_FUNC_get_current_comm
:
358 return &bpf_get_current_comm_proto
;
359 case BPF_FUNC_trace_printk
:
360 return bpf_get_trace_printk_proto();
361 case BPF_FUNC_get_smp_processor_id
:
362 return &bpf_get_smp_processor_id_proto
;
363 case BPF_FUNC_perf_event_read
:
364 return &bpf_perf_event_read_proto
;
370 static const struct bpf_func_proto
*kprobe_prog_func_proto(enum bpf_func_id func_id
)
373 case BPF_FUNC_perf_event_output
:
374 return &bpf_perf_event_output_proto
;
375 case BPF_FUNC_get_stackid
:
376 return &bpf_get_stackid_proto
;
378 return tracing_func_proto(func_id
);
382 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
383 static bool kprobe_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
384 enum bpf_reg_type
*reg_type
)
386 if (off
< 0 || off
>= sizeof(struct pt_regs
))
388 if (type
!= BPF_READ
)
395 static const struct bpf_verifier_ops kprobe_prog_ops
= {
396 .get_func_proto
= kprobe_prog_func_proto
,
397 .is_valid_access
= kprobe_prog_is_valid_access
,
400 static struct bpf_prog_type_list kprobe_tl
= {
401 .ops
= &kprobe_prog_ops
,
402 .type
= BPF_PROG_TYPE_KPROBE
,
405 static u64
bpf_perf_event_output_tp(u64 r1
, u64 r2
, u64 index
, u64 r4
, u64 size
)
408 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
409 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
410 * from there and call the same bpf_perf_event_output() helper
412 u64 ctx
= *(long *)(uintptr_t)r1
;
414 return bpf_perf_event_output(ctx
, r2
, index
, r4
, size
);
417 static const struct bpf_func_proto bpf_perf_event_output_proto_tp
= {
418 .func
= bpf_perf_event_output_tp
,
420 .ret_type
= RET_INTEGER
,
421 .arg1_type
= ARG_PTR_TO_CTX
,
422 .arg2_type
= ARG_CONST_MAP_PTR
,
423 .arg3_type
= ARG_ANYTHING
,
424 .arg4_type
= ARG_PTR_TO_STACK
,
425 .arg5_type
= ARG_CONST_STACK_SIZE
,
428 static u64
bpf_get_stackid_tp(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
430 u64 ctx
= *(long *)(uintptr_t)r1
;
432 return bpf_get_stackid(ctx
, r2
, r3
, r4
, r5
);
435 static const struct bpf_func_proto bpf_get_stackid_proto_tp
= {
436 .func
= bpf_get_stackid_tp
,
438 .ret_type
= RET_INTEGER
,
439 .arg1_type
= ARG_PTR_TO_CTX
,
440 .arg2_type
= ARG_CONST_MAP_PTR
,
441 .arg3_type
= ARG_ANYTHING
,
444 static const struct bpf_func_proto
*tp_prog_func_proto(enum bpf_func_id func_id
)
447 case BPF_FUNC_perf_event_output
:
448 return &bpf_perf_event_output_proto_tp
;
449 case BPF_FUNC_get_stackid
:
450 return &bpf_get_stackid_proto_tp
;
452 return tracing_func_proto(func_id
);
456 static bool tp_prog_is_valid_access(int off
, int size
, enum bpf_access_type type
,
457 enum bpf_reg_type
*reg_type
)
459 if (off
< sizeof(void *) || off
>= PERF_MAX_TRACE_SIZE
)
461 if (type
!= BPF_READ
)
468 static const struct bpf_verifier_ops tracepoint_prog_ops
= {
469 .get_func_proto
= tp_prog_func_proto
,
470 .is_valid_access
= tp_prog_is_valid_access
,
473 static struct bpf_prog_type_list tracepoint_tl
= {
474 .ops
= &tracepoint_prog_ops
,
475 .type
= BPF_PROG_TYPE_TRACEPOINT
,
478 static int __init
register_kprobe_prog_ops(void)
480 bpf_register_prog_type(&kprobe_tl
);
481 bpf_register_prog_type(&tracepoint_tl
);
484 late_initcall(register_kprobe_prog_ops
);