]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
tracing: Allow BPF programs to call bpf_trace_printk()
authorAlexei Starovoitov <ast@plumgrid.com>
Wed, 25 Mar 2015 19:49:22 +0000 (12:49 -0700)
committerIngo Molnar <mingo@kernel.org>
Thu, 2 Apr 2015 11:25:50 +0000 (13:25 +0200)
Debugging of BPF programs needs some form of printk from the
program, so let programs call limited trace_printk() with %d %u
%x %p modifiers only.

Similar to kernel modules, during program load verifier checks
whether program is calling bpf_trace_printk() and if so, kernel
allocates trace_printk buffers and emits big 'this is debug
only' banner.

Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1427312966-8434-6-git-send-email-ast@plumgrid.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/uapi/linux/bpf.h
kernel/trace/bpf_trace.c

index 238c6883877b4a9a5ca50bf86c6b6293321f16ea..cc47ef41076a04039e60fffad990da3443f77729 100644 (file)
@@ -166,6 +166,7 @@ enum bpf_func_id {
        BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
        BPF_FUNC_probe_read,      /* int bpf_probe_read(void *dst, int size, void *src) */
        BPF_FUNC_ktime_get_ns,    /* u64 bpf_ktime_get_ns(void) */
+       BPF_FUNC_trace_printk,    /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
        __BPF_FUNC_MAX_ID,
 };
 
index 8f578729497149c4b8864157f9a7e6520fd36906..2d56ce50163264e79fdefc5c2859753c3c0f2bfb 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/bpf.h>
 #include <linux/filter.h>
 #include <linux/uaccess.h>
+#include <linux/ctype.h>
 #include "trace.h"
 
 static DEFINE_PER_CPU(int, bpf_prog_active);
@@ -90,6 +91,74 @@ static const struct bpf_func_proto bpf_ktime_get_ns_proto = {
        .ret_type       = RET_INTEGER,
 };
 
+/*
+ * limited trace_printk()
+ * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
+ */
+static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
+{
+       char *fmt = (char *) (long) r1;
+       int mod[3] = {};
+       int fmt_cnt = 0;
+       int i;
+
+       /*
+        * bpf_check()->check_func_arg()->check_stack_boundary()
+        * guarantees that fmt points to bpf program stack,
+        * fmt_size bytes of it were initialized and fmt_size > 0
+        */
+       if (fmt[--fmt_size] != 0)
+               return -EINVAL;
+
+       /* check format string for allowed specifiers */
+       for (i = 0; i < fmt_size; i++) {
+               if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
+                       return -EINVAL;
+
+               if (fmt[i] != '%')
+                       continue;
+
+               if (fmt_cnt >= 3)
+                       return -EINVAL;
+
+               /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
+               i++;
+               if (fmt[i] == 'l') {
+                       mod[fmt_cnt]++;
+                       i++;
+               } else if (fmt[i] == 'p') {
+                       mod[fmt_cnt]++;
+                       i++;
+                       if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
+                               return -EINVAL;
+                       fmt_cnt++;
+                       continue;
+               }
+
+               if (fmt[i] == 'l') {
+                       mod[fmt_cnt]++;
+                       i++;
+               }
+
+               if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
+                       return -EINVAL;
+               fmt_cnt++;
+       }
+
+       return __trace_printk(1/* fake ip will not be printed */, fmt,
+                             mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
+                             mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
+                             mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
+}
+
+static const struct bpf_func_proto bpf_trace_printk_proto = {
+       .func           = bpf_trace_printk,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_STACK,
+       .arg2_type      = ARG_CONST_STACK_SIZE,
+};
+
 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
@@ -103,6 +172,15 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
                return &bpf_probe_read_proto;
        case BPF_FUNC_ktime_get_ns:
                return &bpf_ktime_get_ns_proto;
+
+       case BPF_FUNC_trace_printk:
+               /*
+                * this program might be calling bpf_trace_printk,
+                * so allocate per-cpu printk buffers
+                */
+               trace_printk_init_buffers();
+
+               return &bpf_trace_printk_proto;
        default:
                return NULL;
        }