]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
bpf: add selftest for stackmap with build_id in NMI context
authorSong Liu <songliubraving@fb.com>
Mon, 7 May 2018 17:50:49 +0000 (10:50 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 14 May 2018 21:29:45 +0000 (23:29 +0200)
This new test captures stackmap with build_id with hardware event
PERF_COUNT_HW_CPU_CYCLES.

Because we only support one ips-to-build_id lookup per cpu in NMI
context, stack_amap will not be able to do the lookup in this test.
Therefore, we didn't do compare_stack_ips(), as it will alwasy fail.

urandom_read.c is extended to run configurable cycles so that it can be
caught by the perf event.

Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/urandom_read.c

index f7731973ec682fbc829e74468978cdbcff3cebba..3ecf733330c109cc099cfba3ded4aabb9d4bf386 100644 (file)
@@ -1272,6 +1272,139 @@ out:
        return;
 }
 
+static void test_stacktrace_build_id_nmi(void)
+{
+       int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+       const char *file = "./test_stacktrace_build_id.o";
+       int err, pmu_fd, prog_fd;
+       struct perf_event_attr attr = {
+               .sample_freq = 5000,
+               .freq = 1,
+               .type = PERF_TYPE_HARDWARE,
+               .config = PERF_COUNT_HW_CPU_CYCLES,
+       };
+       __u32 key, previous_key, val, duration = 0;
+       struct bpf_object *obj;
+       char buf[256];
+       int i, j;
+       struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
+       int build_id_matches = 0;
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
+       if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+               return;
+
+       pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+                        0 /* cpu 0 */, -1 /* group id */,
+                        0 /* flags */);
+       if (CHECK(pmu_fd < 0, "perf_event_open",
+                 "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
+                 pmu_fd, errno))
+               goto close_prog;
+
+       err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+       if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+                 err, errno))
+               goto close_pmu;
+
+       err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+       if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+                 err, errno))
+               goto disable_pmu;
+
+       /* find map fds */
+       control_map_fd = bpf_find_map(__func__, obj, "control_map");
+       if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+       if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+       if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+                 err, errno))
+               goto disable_pmu;
+
+       stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+       if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
+              == 0);
+       assert(system("taskset 0x1 ./urandom_read 100000") == 0);
+       /* disable stack trace collection */
+       key = 0;
+       val = 1;
+       bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+       /* for every element in stackid_hmap, we can find a corresponding one
+        * in stackmap, and vise versa.
+        */
+       err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+       if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+       if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       err = extract_build_id(buf, 256);
+
+       if (CHECK(err, "get build_id with readelf",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+       if (CHECK(err, "get_next_key from stackmap",
+                 "err %d, errno %d\n", err, errno))
+               goto disable_pmu;
+
+       do {
+               char build_id[64];
+
+               err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+               if (CHECK(err, "lookup_elem from stackmap",
+                         "err %d, errno %d\n", err, errno))
+                       goto disable_pmu;
+               for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
+                       if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
+                           id_offs[i].offset != 0) {
+                               for (j = 0; j < 20; ++j)
+                                       sprintf(build_id + 2 * j, "%02x",
+                                               id_offs[i].build_id[j] & 0xff);
+                               if (strstr(buf, build_id) != NULL)
+                                       build_id_matches = 1;
+                       }
+               previous_key = key;
+       } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+
+       if (CHECK(build_id_matches < 1, "build id match",
+                 "Didn't find expected build ID from the map\n"))
+               goto disable_pmu;
+
+       /*
+        * We intentionally skip compare_stack_ips(). This is because we
+        * only support one in_nmi() ips-to-build_id translation per cpu
+        * at any time, thus stack_amap here will always fallback to
+        * BPF_STACK_BUILD_ID_IP;
+        */
+
+disable_pmu:
+       ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+       close(pmu_fd);
+
+close_prog:
+       bpf_object__close(obj);
+}
+
 #define MAX_CNT_RAWTP  10ull
 #define MAX_STACK_RAWTP        100
 struct get_stack_trace_t {
@@ -1425,6 +1558,7 @@ int main(void)
        test_tp_attach_query();
        test_stacktrace_map();
        test_stacktrace_build_id();
+       test_stacktrace_build_id_nmi();
        test_stacktrace_map_raw_tp();
        test_get_stack_raw_tp();
 
index 4acfdebf36fad511bea9d90f6f7b62f140f60eb6..9de8b7cb4e6df6b5929c915566dff74163efbcf2 100644 (file)
@@ -6,15 +6,21 @@
 #include <stdlib.h>
 
 #define BUF_SIZE 256
-int main(void)
+
+int main(int argc, char *argv[])
 {
        int fd = open("/dev/urandom", O_RDONLY);
        int i;
        char buf[BUF_SIZE];
+       int count = 4;
 
        if (fd < 0)
                return 1;
-       for (i = 0; i < 4; ++i)
+
+       if (argc == 2)
+               count = atoi(argv[1]);
+
+       for (i = 0; i < count; ++i)
                read(fd, buf, BUF_SIZE);
 
        close(fd);