]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
selftests/bpf: mmap: Use runtime page size
authorYauheni Kaliuta <yauheni.kaliuta@redhat.com>
Thu, 8 Apr 2021 06:13:06 +0000 (09:13 +0300)
committerAndrii Nakryiko <andrii@kernel.org>
Fri, 9 Apr 2021 06:54:48 +0000 (23:54 -0700)
Replace hardcoded 4096 with runtime value in the userspace part of
the test and set bpf table sizes dynamically according to the value.

Do not switch to ASSERT macros, keep CHECK, for consistency with the
rest of the test. Can be a separate cleanup patch.

Signed-off-by: Yauheni Kaliuta <yauheni.kaliuta@redhat.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210408061310.95877-5-yauheni.kaliuta@redhat.com
tools/testing/selftests/bpf/prog_tests/mmap.c
tools/testing/selftests/bpf/progs/test_mmap.c

index 9c3c5c0f068fb521897794aa050b841df90ff347..37b002ca11671e2186c6426460cfd63d32a0d5ab 100644 (file)
@@ -29,22 +29,36 @@ void test_mmap(void)
        struct test_mmap *skel;
        __u64 val = 0;
 
-       skel = test_mmap__open_and_load();
-       if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
+       skel = test_mmap__open();
+       if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
                return;
 
+       err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
+       if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+               goto cleanup;
+
+       /* at least 4 pages of data */
+       err = bpf_map__set_max_entries(skel->maps.data_map,
+                                      4 * (page_size / sizeof(u64)));
+       if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+               goto cleanup;
+
+       err = test_mmap__load(skel);
+       if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
+               goto cleanup;
+
        bss_map = skel->maps.bss;
        data_map = skel->maps.data_map;
        data_map_fd = bpf_map__fd(data_map);
 
        rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
-       tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
+       tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
        if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
-               munmap(tmp1, 4096);
+               munmap(tmp1, page_size);
                goto cleanup;
        }
        /* now double-check if it's mmap()'able at all */
-       tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
+       tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
        if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
                goto cleanup;
 
index 4eb42cff5fe966e8405770672daa6aaaf82457f4..5a5cc19a15bf8a21f72240117cc92d9bafe99808 100644 (file)
@@ -9,7 +9,6 @@ char _license[] SEC("license") = "GPL";
 
 struct {
        __uint(type, BPF_MAP_TYPE_ARRAY);
-       __uint(max_entries, 4096);
        __uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
        __type(key, __u32);
        __type(value, char);
@@ -17,7 +16,6 @@ struct {
 
 struct {
        __uint(type, BPF_MAP_TYPE_ARRAY);
-       __uint(max_entries, 512 * 4); /* at least 4 pages of data */
        __uint(map_flags, BPF_F_MMAPABLE);
        __type(key, __u32);
        __type(value, __u64);