]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge tag 'linux-kselftest-4.13-rc1-update' of git://git.kernel.org/pub/scm/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Jul 2017 21:04:47 +0000 (14:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Jul 2017 21:04:47 +0000 (14:04 -0700)
Pull Kselftest updates from Shuah Khan:
 "This update consists of:

   - TAP13 framework and changes to some tests to convert to TAP13.
     Converting kselftest output to standard format will help identify
     run to run differences and pin point failures easily. TAP13 format
     has been in use for several years and the output is human friendly.

     Please find the specification:
       https://testanything.org/tap-version-13-specification.html

     Credit goes to Tim Bird for recommending TAP13 as a suitable
     format, and to Grag KH for kick starting the work with help from
     Paul Elder and Alice Ferrazzi

     The first phase of the TAp13 conversion is included in this update.
     Future updates will include updates to rest of the tests.

   - Masami Hiramatsu fixed ftrace to run on 4.9 stable kernels.

   - Kselftest documnetation has been converted to ReST format. Document
     now has a new home under Documentation/dev-tools.

   - kselftest_harness.h is now available for general use as a result of
     Mickaël Salaün's work.

   - Several fixes to skip and/or fail tests gracefully on older
     releases"

* tag 'linux-kselftest-4.13-rc1-update' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest: (48 commits)
  selftests: membarrier: use ksft_* var arg msg api
  selftests: breakpoints: breakpoint_test_arm64: convert test to use TAP13
  selftests: breakpoints: step_after_suspend_test use ksft_* var arg msg api
  selftests: breakpoint_test: use ksft_* var arg msg api
  kselftest: add ksft_print_msg() function to output general information
  kselftest: make ksft_* output functions variadic
  selftests/capabilities: Fix the test_execve test
  selftests: intel_pstate: add .gitignore
  selftests: fix memory-hotplug test
  selftests: add missing test name in memory-hotplug test
  selftests: check percentage range for memory-hotplug test
  selftests: check hot-pluggagble memory for memory-hotplug test
  selftests: typo correction for memory-hotplug test
  selftests: ftrace: Use md5sum to take less time of checking logs
  tools/testing/selftests/sysctl: Add pre-check to the value of writes_strict
  kselftest.rst: do some adjustments after ReST conversion
  selftest/net/Makefile: Specify output with $(OUTPUT)
  selftest/intel_pstate/aperf: Use LDLIBS instead of LDFLAGS
  selftest/memfd/Makefile: Fix build error
  selftests: lib: Skip tests on missing test modules
  ...

44 files changed:
Documentation/00-INDEX
Documentation/dev-tools/index.rst
Documentation/dev-tools/kselftest.rst [new file with mode: 0644]
Documentation/kselftest.txt [deleted file]
MAINTAINERS
kernel/trace/trace.c
tools/testing/selftests/breakpoints/breakpoint_test.c
tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
tools/testing/selftests/breakpoints/step_after_suspend_test.c
tools/testing/selftests/capabilities/test_execve.c
tools/testing/selftests/ftrace/ftracetest
tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc
tools/testing/selftests/futex/functional/futex_requeue_pi.c
tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
tools/testing/selftests/futex/functional/futex_wait_timeout.c
tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
tools/testing/selftests/futex/include/logging.h
tools/testing/selftests/intel_pstate/.gitignore [new file with mode: 0644]
tools/testing/selftests/intel_pstate/Makefile
tools/testing/selftests/kselftest.h
tools/testing/selftests/kselftest_harness.h [new file with mode: 0644]
tools/testing/selftests/lib/bitmap.sh
tools/testing/selftests/lib/printf.sh
tools/testing/selftests/membarrier/membarrier_test.c
tools/testing/selftests/memfd/Makefile
tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
tools/testing/selftests/net/Makefile
tools/testing/selftests/seccomp/Makefile
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/seccomp/test_harness.h [deleted file]
tools/testing/selftests/size/get_size.c
tools/testing/selftests/sync/sync_test.c
tools/testing/selftests/sysctl/common_tests
tools/testing/selftests/sysctl/run_numerictests
tools/testing/selftests/sysctl/run_stringtests
tools/testing/selftests/vm/virtual_address_range.c

index f35473f8c630578b76841cf61370b16d1f131ed7..3bec49c33bbb9b71dd3c4f6463d56c32d940a46e 100644 (file)
@@ -242,8 +242,6 @@ kprobes.txt
        - documents the kernel probes debugging feature.
 kref.txt
        - docs on adding reference counters (krefs) to kernel objects.
-kselftest.txt
-       - small unittests for (some) individual codepaths in the kernel.
 laptops/
        - directory with laptop related info and laptop driver documentation.
 ldm.txt
index 4ac991dbddb714219ae5231480784317dbd4aece..a81787cd47d793746f28e1035630b13298dc65f8 100644 (file)
@@ -24,6 +24,7 @@ whole; patches welcome!
    kmemcheck
    gdb-kernel-debugging
    kgdb
+   kselftest
 
 
 .. only::  subproject and html
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
new file mode 100644 (file)
index 0000000..ebd03d1
--- /dev/null
@@ -0,0 +1,155 @@
+======================
+Linux Kernel Selftests
+======================
+
+The kernel contains a set of "self tests" under the tools/testing/selftests/
+directory. These are intended to be small tests to exercise individual code
+paths in the kernel. Tests are intended to be run after building, installing
+and booting a kernel.
+
+On some systems, hot-plug tests could hang forever waiting for cpu and
+memory to be ready to be offlined. A special hot-plug target is created
+to run full range of hot-plug tests. In default mode, hot-plug tests run
+in safe mode with a limited scope. In limited mode, cpu-hotplug test is
+run on a single cpu as opposed to all hotplug capable cpus, and memory
+hotplug test is run on 2% of hotplug capable memory instead of 10%.
+
+Running the selftests (hotplug tests are run in limited mode)
+=============================================================
+
+To build the tests::
+
+  $ make -C tools/testing/selftests
+
+To run the tests::
+
+  $ make -C tools/testing/selftests run_tests
+
+To build and run the tests with a single command, use::
+
+  $ make kselftest
+
+Note that some tests will require root privileges.
+
+
+Running a subset of selftests
+=============================
+
+You can use the "TARGETS" variable on the make command line to specify
+single test to run, or a list of tests to run.
+
+To run only tests targeted for a single subsystem::
+
+  $ make -C tools/testing/selftests TARGETS=ptrace run_tests
+
+You can specify multiple tests to build and run::
+
+  $  make TARGETS="size timers" kselftest
+
+See the top-level tools/testing/selftests/Makefile for the list of all
+possible targets.
+
+
+Running the full range hotplug selftests
+========================================
+
+To build the hotplug tests::
+
+  $ make -C tools/testing/selftests hotplug
+
+To run the hotplug tests::
+
+  $ make -C tools/testing/selftests run_hotplug
+
+Note that some tests will require root privileges.
+
+
+Install selftests
+=================
+
+You can use kselftest_install.sh tool installs selftests in default
+location which is tools/testing/selftests/kselftest or a user specified
+location.
+
+To install selftests in default location::
+
+   $ cd tools/testing/selftests
+   $ ./kselftest_install.sh
+
+To install selftests in a user specified location::
+
+   $ cd tools/testing/selftests
+   $ ./kselftest_install.sh install_dir
+
+Running installed selftests
+===========================
+
+Kselftest install as well as the Kselftest tarball provide a script
+named "run_kselftest.sh" to run the tests.
+
+You can simply do the following to run the installed Kselftests. Please
+note some tests will require root privileges::
+
+   $ cd kselftest
+   $ ./run_kselftest.sh
+
+Contributing new tests
+======================
+
+In general, the rules for selftests are
+
+ * Do as much as you can if you're not root;
+
+ * Don't take too long;
+
+ * Don't break the build on any architecture, and
+
+ * Don't cause the top-level "make run_tests" to fail if your feature is
+   unconfigured.
+
+Contributing new tests (details)
+================================
+
+ * Use TEST_GEN_XXX if such binaries or files are generated during
+   compiling.
+
+   TEST_PROGS, TEST_GEN_PROGS mean it is the excutable tested by
+   default.
+
+   TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
+   executable which is not tested by default.
+   TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
+   test.
+
+Test Harness
+============
+
+The kselftest_harness.h file contains useful helpers to build tests.  The tests
+from tools/testing/selftests/seccomp/seccomp_bpf.c can be used as example.
+
+Example
+-------
+
+.. kernel-doc:: tools/testing/selftests/kselftest_harness.h
+    :doc: example
+
+
+Helpers
+-------
+
+.. kernel-doc:: tools/testing/selftests/kselftest_harness.h
+    :functions: TH_LOG TEST TEST_SIGNAL FIXTURE FIXTURE_DATA FIXTURE_SETUP
+                FIXTURE_TEARDOWN TEST_F TEST_HARNESS_MAIN
+
+Operators
+---------
+
+.. kernel-doc:: tools/testing/selftests/kselftest_harness.h
+    :doc: operators
+
+.. kernel-doc:: tools/testing/selftests/kselftest_harness.h
+    :functions: ASSERT_EQ ASSERT_NE ASSERT_LT ASSERT_LE ASSERT_GT ASSERT_GE
+                ASSERT_NULL ASSERT_TRUE ASSERT_NULL ASSERT_TRUE ASSERT_FALSE
+                ASSERT_STREQ ASSERT_STRNE EXPECT_EQ EXPECT_NE EXPECT_LT
+                EXPECT_LE EXPECT_GT EXPECT_GE EXPECT_NULL EXPECT_TRUE
+                EXPECT_FALSE EXPECT_STREQ EXPECT_STRNE
diff --git a/Documentation/kselftest.txt b/Documentation/kselftest.txt
deleted file mode 100644 (file)
index 5bd5903..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-Linux Kernel Selftests
-
-The kernel contains a set of "self tests" under the tools/testing/selftests/
-directory. These are intended to be small tests to exercise individual code
-paths in the kernel. Tests are intended to be run after building, installing
-and booting a kernel.
-
-On some systems, hot-plug tests could hang forever waiting for cpu and
-memory to be ready to be offlined. A special hot-plug target is created
-to run full range of hot-plug tests. In default mode, hot-plug tests run
-in safe mode with a limited scope. In limited mode, cpu-hotplug test is
-run on a single cpu as opposed to all hotplug capable cpus, and memory
-hotplug test is run on 2% of hotplug capable memory instead of 10%.
-
-Running the selftests (hotplug tests are run in limited mode)
-=============================================================
-
-To build the tests:
-  $ make -C tools/testing/selftests
-
-
-To run the tests:
-  $ make -C tools/testing/selftests run_tests
-
-To build and run the tests with a single command, use:
-  $ make kselftest
-
-- note that some tests will require root privileges.
-
-
-Running a subset of selftests
-========================================
-You can use the "TARGETS" variable on the make command line to specify
-single test to run, or a list of tests to run.
-
-To run only tests targeted for a single subsystem:
-  $  make -C tools/testing/selftests TARGETS=ptrace run_tests
-
-You can specify multiple tests to build and run:
-  $  make TARGETS="size timers" kselftest
-
-See the top-level tools/testing/selftests/Makefile for the list of all
-possible targets.
-
-
-Running the full range hotplug selftests
-========================================
-
-To build the hotplug tests:
-  $ make -C tools/testing/selftests hotplug
-
-To run the hotplug tests:
-  $ make -C tools/testing/selftests run_hotplug
-
-- note that some tests will require root privileges.
-
-
-Install selftests
-=================
-
-You can use kselftest_install.sh tool installs selftests in default
-location which is tools/testing/selftests/kselftest or a user specified
-location.
-
-To install selftests in default location:
-   $ cd tools/testing/selftests
-   $ ./kselftest_install.sh
-
-To install selftests in a user specified location:
-   $ cd tools/testing/selftests
-   $ ./kselftest_install.sh install_dir
-
-Running installed selftests
-===========================
-
-Kselftest install as well as the Kselftest tarball provide a script
-named "run_kselftest.sh" to run the tests.
-
-You can simply do the following to run the installed Kselftests. Please
-note some tests will require root privileges.
-
-cd kselftest
-./run_kselftest.sh
-
-Contributing new tests
-======================
-
-In general, the rules for selftests are
-
- * Do as much as you can if you're not root;
-
- * Don't take too long;
-
- * Don't break the build on any architecture, and
-
- * Don't cause the top-level "make run_tests" to fail if your feature is
-   unconfigured.
-
-Contributing new tests(details)
-===============================
-
- * Use TEST_GEN_XXX if such binaries or files are generated during
-   compiling.
-   TEST_PROGS, TEST_GEN_PROGS mean it is the excutable tested by
-   default.
-   TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
-   executable which is not tested by default.
-   TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
-   test.
index a4f37b69a66c5da7149ad510607e2a0b4db617ee..b31be7522e45be65104b030086d51e718fde9be1 100644 (file)
@@ -7337,9 +7337,10 @@ KERNEL SELFTEST FRAMEWORK
 M:     Shuah Khan <shuahkh@osg.samsung.com>
 M:     Shuah Khan <shuah@kernel.org>
 L:     linux-kselftest@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/shuah/linux-kselftest
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
 S:     Maintained
-F:     tools/testing/selftests
+F:     tools/testing/selftests/
+F:     Documentation/dev-tools/kselftest*
 
 KERNEL VIRTUAL MACHINE (KVM)
 M:     Paolo Bonzini <pbonzini@redhat.com>
@@ -11740,6 +11741,7 @@ F:      kernel/seccomp.c
 F:     include/uapi/linux/seccomp.h
 F:     include/linux/seccomp.h
 F:     tools/testing/selftests/seccomp/*
+F:     tools/testing/selftests/kselftest_harness.h
 F:     Documentation/userspace-api/seccomp_filter.rst
 K:     \bsecure_computing
 K:     \bTIF_SECCOMP\b
index 00e2e4169b1e85216b7404b8eff7b1a3b1fc9418..948ec32e0c27f9197fda8ca500d584f259d5c217 100644 (file)
@@ -4564,7 +4564,8 @@ static const char readme_msg[] =
 #endif
 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
        "\t  accepts: event-definitions (one definition per line)\n"
-       "\t   Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
+       "\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
+       "\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
        "\t           -:[<group>/]<event>\n"
 #ifdef CONFIG_KPROBE_EVENTS
        "\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
index 120895ab5505b415dac85026ebf2c6d5d8a89761..f63356151ad41f1a20e9e6a13da6f9fdcb06ce28 100644 (file)
@@ -16,6 +16,8 @@
 #include <signal.h>
 #include <sys/types.h>
 #include <sys/wait.h>
+#include <errno.h>
+#include <string.h>
 
 #include "../kselftest.h"
 
@@ -42,10 +44,9 @@ static void set_breakpoint_addr(void *addr, int n)
 
        ret = ptrace(PTRACE_POKEUSER, child_pid,
                     offsetof(struct user, u_debugreg[n]), addr);
-       if (ret) {
-               perror("Can't set breakpoint addr\n");
-               ksft_exit_fail();
-       }
+       if (ret)
+               ksft_exit_fail_msg("Can't set breakpoint addr: %s\n",
+                       strerror(errno));
 }
 
 static void toggle_breakpoint(int n, int type, int len,
@@ -106,8 +107,8 @@ static void toggle_breakpoint(int n, int type, int len,
        ret = ptrace(PTRACE_POKEUSER, child_pid,
                     offsetof(struct user, u_debugreg[7]), dr7);
        if (ret) {
-               perror("Can't set dr7");
-               ksft_exit_fail();
+               ksft_print_msg("Can't set dr7: %s\n", strerror(errno));
+               exit(-1);
        }
 }
 
@@ -206,7 +207,7 @@ static void trigger_tests(void)
 
        ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
        if (ret) {
-               perror("Can't be traced?\n");
+               ksft_print_msg("Can't be traced? %s\n", strerror(errno));
                return;
        }
 
@@ -261,29 +262,30 @@ static void trigger_tests(void)
 
 static void check_success(const char *msg)
 {
-       const char *msg2;
        int child_nr_tests;
        int status;
+       int ret;
 
        /* Wait for the child to SIGTRAP */
        wait(&status);
 
-       msg2 = "Failed";
+       ret = 0;
 
        if (WSTOPSIG(status) == SIGTRAP) {
                child_nr_tests = ptrace(PTRACE_PEEKDATA, child_pid,
                                        &nr_tests, 0);
                if (child_nr_tests == nr_tests)
-                       msg2 = "Ok";
-               if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1)) {
-                       perror("Can't poke\n");
-                       ksft_exit_fail();
-               }
+                       ret = 1;
+               if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1))
+                       ksft_exit_fail_msg("Can't poke: %s\n", strerror(errno));
        }
 
        nr_tests++;
 
-       printf("%s [%s]\n", msg, msg2);
+       if (ret)
+               ksft_test_result_pass(msg);
+       else
+               ksft_test_result_fail(msg);
 }
 
 static void launch_instruction_breakpoints(char *buf, int local, int global)
@@ -294,7 +296,7 @@ static void launch_instruction_breakpoints(char *buf, int local, int global)
                set_breakpoint_addr(dummy_funcs[i], i);
                toggle_breakpoint(i, BP_X, 1, local, global, 1);
                ptrace(PTRACE_CONT, child_pid, NULL, 0);
-               sprintf(buf, "Test breakpoint %d with local: %d global: %d",
+               sprintf(buf, "Test breakpoint %d with local: %d global: %d\n",
                        i, local, global);
                check_success(buf);
                toggle_breakpoint(i, BP_X, 1, local, global, 0);
@@ -316,8 +318,9 @@ static void launch_watchpoints(char *buf, int mode, int len,
                set_breakpoint_addr(&dummy_var[i], i);
                toggle_breakpoint(i, mode, len, local, global, 1);
                ptrace(PTRACE_CONT, child_pid, NULL, 0);
-               sprintf(buf, "Test %s watchpoint %d with len: %d local: "
-                       "%d global: %d", mode_str, i, len, local, global);
+               sprintf(buf,
+                       "Test %s watchpoint %d with len: %d local: %d global: %d\n",
+                       mode_str, i, len, local, global);
                check_success(buf);
                toggle_breakpoint(i, mode, len, local, global, 0);
        }
@@ -378,10 +381,12 @@ int main(int argc, char **argv)
        pid_t pid;
        int ret;
 
+       ksft_print_header();
+
        pid = fork();
        if (!pid) {
                trigger_tests();
-               return 0;
+               exit(0);
        }
 
        child_pid = pid;
@@ -392,5 +397,5 @@ int main(int argc, char **argv)
 
        wait(NULL);
 
-       return ksft_exit_pass();
+       ksft_exit_pass();
 }
index 3897e996541e5d061c07e4b292e7368544fbc1ee..960d02100c26ebbf6214cde9965ba787817fd2c2 100644 (file)
@@ -43,19 +43,25 @@ static void child(int size, int wr)
        volatile uint8_t *addr = &var[32 + wr];
 
        if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) {
-               perror("ptrace(PTRACE_TRACEME) failed");
+               ksft_print_msg(
+                       "ptrace(PTRACE_TRACEME) failed: %s\n",
+                       strerror(errno));
                _exit(1);
        }
 
        if (raise(SIGSTOP) != 0) {
-               perror("raise(SIGSTOP) failed");
+               ksft_print_msg(
+                       "raise(SIGSTOP) failed: %s\n", strerror(errno));
                _exit(1);
        }
 
        if ((uintptr_t) addr % size) {
-               perror("Wrong address write for the given size\n");
+               ksft_print_msg(
+                        "Wrong address write for the given size: %s\n",
+                        strerror(errno));
                _exit(1);
        }
+
        switch (size) {
        case 1:
                *addr = 47;
@@ -100,12 +106,14 @@ static bool set_watchpoint(pid_t pid, int size, int wp)
        if (ptrace(PTRACE_SETREGSET, pid, NT_ARM_HW_WATCH, &iov) == 0)
                return true;
 
-       if (errno == EIO) {
-               printf("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) "
-                       "not supported on this hardware\n");
-               ksft_exit_skip();
-       }
-       perror("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed");
+       if (errno == EIO)
+               ksft_print_msg(
+                       "ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) not supported on this hardware: %s\n",
+                       strerror(errno));
+
+       ksft_print_msg(
+               "ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed: %s\n",
+               strerror(errno));
        return false;
 }
 
@@ -117,7 +125,8 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp)
        pid_t wpid;
 
        if (pid < 0) {
-               perror("fork() failed");
+               ksft_test_result_fail(
+                       "fork() failed: %s\n", strerror(errno));
                return false;
        }
        if (pid == 0)
@@ -125,15 +134,17 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp)
 
        wpid = waitpid(pid, &status, __WALL);
        if (wpid != pid) {
-               perror("waitpid() failed");
+               ksft_print_msg(
+                       "waitpid() failed: %s\n", strerror(errno));
                return false;
        }
        if (!WIFSTOPPED(status)) {
-               printf("child did not stop\n");
+               ksft_print_msg(
+                       "child did not stop: %s\n", strerror(errno));
                return false;
        }
        if (WSTOPSIG(status) != SIGSTOP) {
-               printf("child did not stop with SIGSTOP\n");
+               ksft_print_msg("child did not stop with SIGSTOP\n");
                return false;
        }
 
@@ -141,42 +152,49 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp)
                return false;
 
        if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
-               perror("ptrace(PTRACE_SINGLESTEP) failed");
+               ksft_print_msg(
+                       "ptrace(PTRACE_SINGLESTEP) failed: %s\n",
+                       strerror(errno));
                return false;
        }
 
        alarm(3);
        wpid = waitpid(pid, &status, __WALL);
        if (wpid != pid) {
-               perror("waitpid() failed");
+               ksft_print_msg(
+                       "waitpid() failed: %s\n", strerror(errno));
                return false;
        }
        alarm(0);
        if (WIFEXITED(status)) {
-               printf("child did not single-step\t");
+               ksft_print_msg("child did not single-step\n");
                return false;
        }
        if (!WIFSTOPPED(status)) {
-               printf("child did not stop\n");
+               ksft_print_msg("child did not stop\n");
                return false;
        }
        if (WSTOPSIG(status) != SIGTRAP) {
-               printf("child did not stop with SIGTRAP\n");
+               ksft_print_msg("child did not stop with SIGTRAP\n");
                return false;
        }
        if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0) {
-               perror("ptrace(PTRACE_GETSIGINFO)");
+               ksft_print_msg(
+                       "ptrace(PTRACE_GETSIGINFO): %s\n",
+                       strerror(errno));
                return false;
        }
        if (siginfo.si_code != TRAP_HWBKPT) {
-               printf("Unexpected si_code %d\n", siginfo.si_code);
+               ksft_print_msg(
+                       "Unexpected si_code %d\n", siginfo.si_code);
                return false;
        }
 
        kill(pid, SIGKILL);
        wpid = waitpid(pid, &status, 0);
        if (wpid != pid) {
-               perror("waitpid() failed");
+               ksft_print_msg(
+                       "waitpid() failed: %s\n", strerror(errno));
                return false;
        }
        return true;
@@ -194,6 +212,8 @@ int main(int argc, char **argv)
        int wr, wp, size;
        bool result;
 
+       ksft_print_header();
+
        act.sa_handler = sigalrm;
        sigemptyset(&act.sa_mask);
        act.sa_flags = 0;
@@ -201,14 +221,16 @@ int main(int argc, char **argv)
        for (size = 1; size <= 32; size = size*2) {
                for (wr = 0; wr <= 32; wr = wr + size) {
                        for (wp = wr - size; wp <= wr + size; wp = wp + size) {
-                               printf("Test size = %d write offset = %d watchpoint offset = %d\t", size, wr, wp);
                                result = run_test(size, MIN(size, 8), wr, wp);
-                               if ((result && wr == wp) || (!result && wr != wp)) {
-                                       printf("[OK]\n");
-                                       ksft_inc_pass_cnt();
-                               } else {
-                                       printf("[FAILED]\n");
-                                       ksft_inc_fail_cnt();
+                               if ((result && wr == wp) ||
+                                   (!result && wr != wp))
+                                       ksft_test_result_pass(
+                                               "Test size = %d write offset = %d watchpoint offset = %d\n",
+                                               size, wr, wp);
+                               else {
+                                       ksft_test_result_fail(
+                                               "Test size = %d write offset = %d watchpoint offset = %d\n",
+                                               size, wr, wp);
                                        succeeded = false;
                                }
                        }
@@ -216,19 +238,18 @@ int main(int argc, char **argv)
        }
 
        for (size = 1; size <= 32; size = size*2) {
-               printf("Test size = %d write offset = %d watchpoint offset = -8\t", size, -size);
-
-               if (run_test(size, 8, -size, -8)) {
-                       printf("[OK]\n");
-                       ksft_inc_pass_cnt();
-               } else {
-                       printf("[FAILED]\n");
-                       ksft_inc_fail_cnt();
+               if (run_test(size, 8, -size, -8))
+                       ksft_test_result_pass(
+                               "Test size = %d write offset = %d watchpoint offset = -8\n",
+                               size, -size);
+               else {
+                       ksft_test_result_fail(
+                               "Test size = %d write offset = %d watchpoint offset = -8\n",
+                               size, -size);
                        succeeded = false;
                }
        }
 
-       ksft_print_cnts();
        if (succeeded)
                ksft_exit_pass();
        else
index 60b8a95dac266fc31f8640198e372a267506807d..3fece06e9f6464774f049aea11f65a185a9a8236 100644 (file)
@@ -37,17 +37,19 @@ void child(int cpu)
        CPU_ZERO(&set);
        CPU_SET(cpu, &set);
        if (sched_setaffinity(0, sizeof(set), &set) != 0) {
-               perror("sched_setaffinity() failed");
+               ksft_print_msg("sched_setaffinity() failed: %s\n",
+                       strerror(errno));
                _exit(1);
        }
 
        if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) {
-               perror("ptrace(PTRACE_TRACEME) failed");
+               ksft_print_msg("ptrace(PTRACE_TRACEME) failed: %s\n",
+                       strerror(errno));
                _exit(1);
        }
 
        if (raise(SIGSTOP) != 0) {
-               perror("raise(SIGSTOP) failed");
+               ksft_print_msg("raise(SIGSTOP) failed: %s\n", strerror(errno));
                _exit(1);
        }
 
@@ -61,7 +63,7 @@ bool run_test(int cpu)
        pid_t wpid;
 
        if (pid < 0) {
-               perror("fork() failed");
+               ksft_print_msg("fork() failed: %s\n", strerror(errno));
                return false;
        }
        if (pid == 0)
@@ -69,57 +71,64 @@ bool run_test(int cpu)
 
        wpid = waitpid(pid, &status, __WALL);
        if (wpid != pid) {
-               perror("waitpid() failed");
+               ksft_print_msg("waitpid() failed: %s\n", strerror(errno));
                return false;
        }
        if (!WIFSTOPPED(status)) {
-               printf("child did not stop\n");
+               ksft_print_msg("child did not stop: %s\n", strerror(errno));
                return false;
        }
        if (WSTOPSIG(status) != SIGSTOP) {
-               printf("child did not stop with SIGSTOP\n");
+               ksft_print_msg("child did not stop with SIGSTOP: %s\n",
+                       strerror(errno));
                return false;
        }
 
        if (ptrace(PTRACE_SINGLESTEP, pid, NULL, NULL) < 0) {
                if (errno == EIO) {
-                       printf("ptrace(PTRACE_SINGLESTEP) not supported on this architecture\n");
-                       ksft_exit_skip();
+                       ksft_exit_skip(
+                               "ptrace(PTRACE_SINGLESTEP) not supported on this architecture: %s\n",
+                               strerror(errno));
                }
-               perror("ptrace(PTRACE_SINGLESTEP) failed");
+               ksft_print_msg("ptrace(PTRACE_SINGLESTEP) failed: %s\n",
+                       strerror(errno));
                return false;
        }
 
        wpid = waitpid(pid, &status, __WALL);
        if (wpid != pid) {
-               perror("waitpid() failed");
+               ksft_print_msg("waitpid() failed: $s\n", strerror(errno));
                return false;
        }
        if (WIFEXITED(status)) {
-               printf("child did not single-step\n");
+               ksft_print_msg("child did not single-step: %s\n",
+                       strerror(errno));
                return false;
        }
        if (!WIFSTOPPED(status)) {
-               printf("child did not stop\n");
+               ksft_print_msg("child did not stop: %s\n", strerror(errno));
                return false;
        }
        if (WSTOPSIG(status) != SIGTRAP) {
-               printf("child did not stop with SIGTRAP\n");
+               ksft_print_msg("child did not stop with SIGTRAP: %s\n",
+                       strerror(errno));
                return false;
        }
 
        if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
-               perror("ptrace(PTRACE_CONT) failed");
+               ksft_print_msg("ptrace(PTRACE_CONT) failed: %s\n",
+                       strerror(errno));
                return false;
        }
 
        wpid = waitpid(pid, &status, __WALL);
        if (wpid != pid) {
-               perror("waitpid() failed");
+               ksft_print_msg("waitpid() failed: %s\n", strerror(errno));
                return false;
        }
        if (!WIFEXITED(status)) {
-               printf("child did not exit after PTRACE_CONT\n");
+               ksft_print_msg("child did not exit after PTRACE_CONT: %s\n",
+                       strerror(errno));
                return false;
        }
 
@@ -135,28 +144,21 @@ void suspend(void)
        struct itimerspec spec = {};
 
        power_state_fd = open("/sys/power/state", O_RDWR);
-       if (power_state_fd < 0) {
-               perror("open(\"/sys/power/state\") failed (is this test running as root?)");
-               ksft_exit_fail();
-       }
+       if (power_state_fd < 0)
+               ksft_exit_fail_msg(
+                       "open(\"/sys/power/state\") failed (is this test running as root?)\n");
 
        timerfd = timerfd_create(CLOCK_BOOTTIME_ALARM, 0);
-       if (timerfd < 0) {
-               perror("timerfd_create() failed");
-               ksft_exit_fail();
-       }
+       if (timerfd < 0)
+               ksft_exit_fail_msg("timerfd_create() failed\n");
 
        spec.it_value.tv_sec = 5;
        err = timerfd_settime(timerfd, 0, &spec, NULL);
-       if (err < 0) {
-               perror("timerfd_settime() failed");
-               ksft_exit_fail();
-       }
+       if (err < 0)
+               ksft_exit_fail_msg("timerfd_settime() failed\n");
 
-       if (write(power_state_fd, "mem", strlen("mem")) != strlen("mem")) {
-               perror("entering suspend failed");
-               ksft_exit_fail();
-       }
+       if (write(power_state_fd, "mem", strlen("mem")) != strlen("mem"))
+               ksft_exit_fail_msg("Failed to enter Suspend state\n");
 
        close(timerfd);
        close(power_state_fd);
@@ -171,6 +173,8 @@ int main(int argc, char **argv)
        int err;
        int cpu;
 
+       ksft_print_header();
+
        while ((opt = getopt(argc, argv, "n")) != -1) {
                switch (opt) {
                case 'n':
@@ -187,10 +191,8 @@ int main(int argc, char **argv)
                suspend();
 
        err = sched_getaffinity(0, sizeof(available_cpus), &available_cpus);
-       if (err < 0) {
-               perror("sched_getaffinity() failed");
-               ksft_exit_fail();
-       }
+       if (err < 0)
+               ksft_exit_fail_msg("sched_getaffinity() failed\n");
 
        for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
                bool test_success;
@@ -199,18 +201,14 @@ int main(int argc, char **argv)
                        continue;
 
                test_success = run_test(cpu);
-               printf("CPU %d: ", cpu);
                if (test_success) {
-                       printf("[OK]\n");
-                       ksft_inc_pass_cnt();
+                       ksft_test_result_pass("CPU %d\n", cpu);
                } else {
-                       printf("[FAILED]\n");
-                       ksft_inc_fail_cnt();
+                       ksft_test_result_fail("CPU %d\n", cpu);
                        succeeded = false;
                }
        }
 
-       ksft_print_cnts();
        if (succeeded)
                ksft_exit_pass();
        else
index 10a21a958aaf72cc600da3d46bca0102f0c11142..763f37fecfb86b5ba37ee4eae409712939d5edb8 100644 (file)
@@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
 
        if (chdir(cwd) != 0)
                err(1, "chdir to private tmpfs");
-
-       if (umount2(".", MNT_DETACH) != 0)
-               err(1, "detach private tmpfs");
 }
 
 static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
@@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path)
                        err(1, "chown");
                if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
                        err(1, "chmod");
-}
+       }
 
        capng_get_caps_process();
 
@@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path)
        } else {
                printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
                exec_other_validate_cap("./validate_cap_sgidnonroot",
-                                               false, false, true, false);
+                                       false, false, true, false);
 
                if (fork_wait()) {
                        printf("[RUN]\tNon-root +ia, sgidroot => i\n");
index 717581145cfcf9dbf8ec129c81c26ce81d4d9ddf..14a03ea1e21d137efcbdc5efaba5d916585eeb2d 100755 (executable)
@@ -250,7 +250,7 @@ run_test() { # testfile
   local testlog=`mktemp $LOG_DIR/${testname}-log.XXXXXX`
   export TMPDIR=`mktemp -d /tmp/ftracetest-dir.XXXXXX`
   testcase $1
-  echo "execute: "$1 > $testlog
+  echo "execute$INSTANCE: "$1 > $testlog
   SIG_RESULT=0
   if [ $VERBOSE -ge 2 ]; then
     __run_test $1 2>> $testlog | tee -a $testlog
index 0bb5df3c00d41701f7392c73c31c95b10867f1d8..15e2d3fe1731788bb4fa66163ff8830d4db2933e 100644 (file)
@@ -28,7 +28,9 @@ echo '*:*' > set_event
 
 yield
 
-count=`cat trace | grep -v ^# | wc -l`
+echo 0 > tracing_on
+
+count=`head -n 128 trace | grep -v ^# | wc -l`
 if [ $count -eq 0 ]; then
     fail "none of events are recorded"
 fi
@@ -36,10 +38,12 @@ fi
 do_reset
 
 echo 1 > events/enable
+echo 1 > tracing_on
 
 yield
 
-count=`cat trace | grep -v ^# | wc -l`
+echo 0 > tracing_on
+count=`head -n 128 trace | grep -v ^# | wc -l`
 if [ $count -eq 0 ]; then
     fail "none of events are recorded"
 fi
index 9dcd0ca1f49cf30557f08e60fea330d2c275456b..8095e122daa9763564654455191ddb9af923aff4 100644 (file)
@@ -11,17 +11,6 @@ fi
 disable_tracing
 clear_trace
 
-# filter by ?, schedule is always good
-if ! echo "sch?dule" > set_ftrace_filter; then
-    # test for powerpc 64
-    if ! echo ".sch?dule" > set_ftrace_filter; then
-       fail "can not enable schedule filter"
-    fi
-    cat set_ftrace_filter | grep '^.schedule$'
-else
-    cat set_ftrace_filter | grep '^schedule$'
-fi
-
 ftrace_filter_check() { # glob grep
   echo "$1" > set_ftrace_filter
   cut -f1 -d" " set_ftrace_filter > $TMPDIR/actual
@@ -39,11 +28,28 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
 # filter by *, end match
 ftrace_filter_check 'schedule*' '^schedule.*$'
 
+# Advanced full-glob matching feature is recently supported.
+# Skip the tests if we are sure the kernel does not support it.
+if grep -q 'accepts: .* glob-matching-pattern' README ; then
+
 # filter by *, both side match
 ftrace_filter_check 'sch*ule' '^sch.*ule$'
 
 # filter by char class.
 ftrace_filter_check '[Ss]y[Ss]_*' '^[Ss]y[Ss]_.*$'
 
+# filter by ?, schedule is always good
+if ! echo "sch?dule" > set_ftrace_filter; then
+    # test for powerpc 64
+    if ! echo ".sch?dule" > set_ftrace_filter; then
+       fail "can not enable schedule filter"
+    fi
+    cat set_ftrace_filter | grep '^.schedule$'
+else
+    cat set_ftrace_filter | grep '^schedule$'
+fi
+
+fi
+
 echo > set_ftrace_filter
 enable_tracing
index aa31368851c952e2f6e9dc78dd4a4915a36450fc..77dfb6b481861cf14cdd722352f7827ae9aaebd3 100644 (file)
@@ -72,6 +72,15 @@ run_enable_disable() {
     test_event_enabled $check_disable
 
     echo "schedule:${enable}_event:$EVENT" > set_ftrace_filter
+    if [ -d ../../instances ]; then # Check instances
+       cur=`cat set_ftrace_filter`
+       top=`cat ../../set_ftrace_filter`
+       if [ "$cur" = "$top" ]; then
+           echo "This kernel is too old to support per instance filter"
+           reset_ftrace_filter
+           exit_unsupported
+       fi
+    fi
 
     echo " make sure it works 5 times"
 
index c8e02ec01eaf25640a4471dc522b4bb8916c2130..7a9ab4ff83b6fc27a78a4fde2b0a2c54855e9ab8 100644 (file)
@@ -63,6 +63,10 @@ fi
 
 # powerpc uses .schedule
 func="schedule"
+available_file=available_filter_functions
+if [ -d ../../instances -a -f ../../available_filter_functions ]; then
+   available_file=../../available_filter_functions
+fi
 x=`grep '^\.schedule$' available_filter_functions | wc -l`
 if [ "$x" -eq 1 ]; then
    func=".schedule"
@@ -71,6 +75,15 @@ fi
 echo '** SET TRACEOFF'
 
 echo "$func:traceoff" > set_ftrace_filter
+if [ -d ../../instances ]; then # Check instances
+    cur=`cat set_ftrace_filter`
+    top=`cat ../../set_ftrace_filter`
+    if [ "$cur" = "$top" ]; then
+       echo "This kernel is too old to support per instance filter"
+       reset_ftrace_filter
+       exit_unsupported
+    fi
+fi
 
 cnt=`grep schedule set_ftrace_filter | wc -l`
 if [ $cnt -ne 1 ]; then
@@ -90,11 +103,11 @@ if [ $on != "0" ]; then
     fail "Tracing is not off"
 fi
 
-line1=`cat trace | tail -1`
+csum1=`md5sum trace`
 sleep $SLEEP_TIME
-line2=`cat trace | tail -1`
+csum2=`md5sum trace`
 
-if [ "$line1" != "$line2" ]; then
+if [ "$csum1" != "$csum2" ]; then
     fail "Tracing file is still changing"
 fi
 
index c73db7863adbf41b51bd6b88ca79881801d59943..8a353314dc9b2a80c4ec28d377d5a9270e977d32 100644 (file)
@@ -82,7 +82,10 @@ rmdir foo
 if [ -d foo ]; then
         fail "foo still exists"
 fi
-
+if grep -q "schedule:enable_event:sched:sched_switch" ../set_ftrace_filter; then
+       echo "Older kernel detected. Cleanup filter"
+       echo '!schedule:enable_event:sched:sched_switch' > ../set_ftrace_filter
+fi
 
 instance_slam() {
     while :; do
index 57abdf1caabf71fb1d6a42cd9403633fe0333729..7ec6f2639ad6e6772bda7c30ee33a174e0fb103d 100644 (file)
@@ -2,6 +2,7 @@
 # description: Kretprobe dynamic event with maxactive
 
 [ -f kprobe_events ] || exit_unsupported # this is configurable
+grep -q 'r\[maxactive\]' README || exit_unsupported # this is older version
 
 echo > kprobe_events
 
index 3da06ad2399610e1d93b7a1cb0abc8b659ce52d5..d24ab7421e739897e248150f3fd82143eb970f14 100644 (file)
@@ -32,6 +32,7 @@
 #include "futextest.h"
 #include "logging.h"
 
+#define TEST_NAME "futex-requeue-pi"
 #define MAX_WAKE_ITERS 1000
 #define THREAD_MAX 10
 #define SIGNAL_PERIOD_US 100
@@ -404,6 +405,6 @@ int main(int argc, char *argv[])
         */
        ret = unit_test(broadcast, locked, owner, timeout_ns);
 
-       print_result(ret);
+       print_result(TEST_NAME, ret);
        return ret;
 }
index d5e4f2c4da2a2d22bc8dc7ced47611b965b6b350..e0a798ad0d21267bee7b2fa6e28810cd90cfa3b0 100644 (file)
@@ -30,6 +30,8 @@
 #include "futextest.h"
 #include "logging.h"
 
+#define TEST_NAME "futex-requeue-pi-mismatched-ops"
+
 futex_t f1 = FUTEX_INITIALIZER;
 futex_t f2 = FUTEX_INITIALIZER;
 int child_ret = 0;
@@ -130,6 +132,6 @@ int main(int argc, char *argv[])
 
  out:
        /* If the kernel crashes, we shouldn't return at all. */
-       print_result(ret);
+       print_result(TEST_NAME, ret);
        return ret;
 }
index 3d7dc6afc3f8f9459ca407ee5a16b1b2c9a8119b..982f83577501e2ab70d18dfefe2c1c0901f35941 100644 (file)
@@ -32,6 +32,7 @@
 #include "futextest.h"
 #include "logging.h"
 
+#define TEST_NAME "futex-requeue-pi-signal-restart"
 #define DELAY_US 100
 
 futex_t f1 = FUTEX_INITIALIZER;
@@ -218,6 +219,6 @@ int main(int argc, char *argv[])
        if (ret == RET_PASS && waiter_ret)
                ret = waiter_ret;
 
-       print_result(ret);
+       print_result(TEST_NAME, ret);
        return ret;
 }
index 5f687f2474546cee528fa65d953a7140b9fa5b63..bdc48dc047e5575214013aeddaac2f3835728e05 100644 (file)
@@ -34,6 +34,7 @@
 #include "logging.h"
 #include "futextest.h"
 
+#define TEST_NAME "futex-wait-private-mapped-file"
 #define PAGE_SZ 4096
 
 char pad[PAGE_SZ] = {1};
@@ -60,7 +61,7 @@ void *thr_futex_wait(void *arg)
        ret = futex_wait(&val, 1, &wait_timeout, 0);
        if (ret && errno != EWOULDBLOCK && errno != ETIMEDOUT) {
                error("futex error.\n", errno);
-               print_result(RET_ERROR);
+               print_result(TEST_NAME, RET_ERROR);
                exit(RET_ERROR);
        }
 
@@ -120,6 +121,6 @@ int main(int argc, char **argv)
        pthread_join(thr, NULL);
 
  out:
-       print_result(ret);
+       print_result(TEST_NAME, ret);
        return ret;
 }
index ab428ca894de00045e4fcc5552c03505e26680a2..6aadd560366e200e1727b82864d2295dd22f70e4 100644 (file)
@@ -27,6 +27,8 @@
 #include "futextest.h"
 #include "logging.h"
 
+#define TEST_NAME "futex-wait-timeout"
+
 static long timeout_ns = 100000;       /* 100us default timeout */
 
 void usage(char *prog)
@@ -81,6 +83,6 @@ int main(int argc, char *argv[])
                ret = RET_FAIL;
        }
 
-       print_result(ret);
+       print_result(TEST_NAME, ret);
        return ret;
 }
index fe7aee96844b81df08441460495b7aab5246ca4d..d237a8b702f0c8bbd21fda343fd9bd04b07054b2 100644 (file)
@@ -36,6 +36,7 @@
 #include "logging.h"
 #include "futextest.h"
 
+#define TEST_NAME "futex-wait-uninitialized-heap"
 #define WAIT_US 5000000
 
 static int child_blocked = 1;
@@ -119,6 +120,6 @@ int main(int argc, char **argv)
        }
 
  out:
-       print_result(ret);
+       print_result(TEST_NAME, ret);
        return ret;
 }
index b6b02744882534b794db600c82216abc65ece40f..9a2c56fa73056e43630aaee632f3d4661f0d7d2a 100644 (file)
@@ -28,6 +28,7 @@
 #include "futextest.h"
 #include "logging.h"
 
+#define TEST_NAME "futex-wait-wouldblock"
 #define timeout_ns 100000
 
 void usage(char *prog)
@@ -74,6 +75,6 @@ int main(int argc, char *argv[])
                ret = RET_FAIL;
        }
 
-       print_result(ret);
+       print_result(TEST_NAME, ret);
        return ret;
 }
index e14469103f073d071f5e4b43c845440865f72287..4e7944984fbbb541b56226a07c1b5a1e0c5ede7a 100644 (file)
@@ -107,7 +107,7 @@ void log_verbosity(int level)
  *
  * print_result() is primarily intended for functional tests.
  */
-void print_result(int ret)
+void print_result(const char *test_name, int ret)
 {
        const char *result = "Unknown return code";
 
@@ -124,7 +124,7 @@ void print_result(int ret)
                result = FAIL;
                break;
        }
-       printf("Result: %s\n", result);
+       printf("selftests: %s [%s]\n", test_name, result);
 }
 
 /* log level macros */
diff --git a/tools/testing/selftests/intel_pstate/.gitignore b/tools/testing/selftests/intel_pstate/.gitignore
new file mode 100644 (file)
index 0000000..3bfcbae
--- /dev/null
@@ -0,0 +1,2 @@
+aperf
+msr
index 19678e90efb25d1231e6bdfd97f8b6ea99a6f6bf..849a90ffe8dd2d311b9b66bed9058fefdf28ba7d 100644 (file)
@@ -1,5 +1,5 @@
 CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
-LDFLAGS := $(LDFLAGS) -lm
+LDLIBS := $(LDLIBS) -lm
 
 TEST_GEN_FILES := msr aperf
 
index ef1c80d67ac73307957e308a879eca1e556874fb..08e90c2cc5cb70be2b2fb1d2b26fc80999e04aef 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <stdlib.h>
 #include <unistd.h>
+#include <stdarg.h>
 
 /* define kselftest exit codes */
 #define KSFT_PASS  0
@@ -31,38 +32,125 @@ struct ksft_count {
 
 static struct ksft_count ksft_cnt;
 
+static inline int ksft_test_num(void)
+{
+       return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail +
+               ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass +
+               ksft_cnt.ksft_xskip;
+}
+
 static inline void ksft_inc_pass_cnt(void) { ksft_cnt.ksft_pass++; }
 static inline void ksft_inc_fail_cnt(void) { ksft_cnt.ksft_fail++; }
 static inline void ksft_inc_xfail_cnt(void) { ksft_cnt.ksft_xfail++; }
 static inline void ksft_inc_xpass_cnt(void) { ksft_cnt.ksft_xpass++; }
 static inline void ksft_inc_xskip_cnt(void) { ksft_cnt.ksft_xskip++; }
 
+static inline void ksft_print_header(void)
+{
+       printf("TAP version 13\n");
+}
+
 static inline void ksft_print_cnts(void)
 {
-       printf("Pass: %d Fail: %d Xfail: %d Xpass: %d, Xskip: %d\n",
-               ksft_cnt.ksft_pass, ksft_cnt.ksft_fail,
-               ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass,
-               ksft_cnt.ksft_xskip);
+       printf("1..%d\n", ksft_test_num());
+}
+
+static inline void ksft_print_msg(const char *msg, ...)
+{
+       va_list args;
+
+       va_start(args, msg);
+       printf("# ");
+       vprintf(msg, args);
+       va_end(args);
+}
+
+static inline void ksft_test_result_pass(const char *msg, ...)
+{
+       va_list args;
+
+       ksft_cnt.ksft_pass++;
+
+       va_start(args, msg);
+       printf("ok %d ", ksft_test_num());
+       vprintf(msg, args);
+       va_end(args);
+}
+
+static inline void ksft_test_result_fail(const char *msg, ...)
+{
+       va_list args;
+
+       ksft_cnt.ksft_fail++;
+
+       va_start(args, msg);
+       printf("not ok %d ", ksft_test_num());
+       vprintf(msg, args);
+       va_end(args);
+}
+
+static inline void ksft_test_result_skip(const char *msg, ...)
+{
+       va_list args;
+
+       ksft_cnt.ksft_xskip++;
+
+       va_start(args, msg);
+       printf("ok %d # skip ", ksft_test_num());
+       vprintf(msg, args);
+       va_end(args);
 }
 
 static inline int ksft_exit_pass(void)
 {
+       ksft_print_cnts();
        exit(KSFT_PASS);
 }
+
 static inline int ksft_exit_fail(void)
 {
+       printf("Bail out!\n");
+       ksft_print_cnts();
+       exit(KSFT_FAIL);
+}
+
+static inline int ksft_exit_fail_msg(const char *msg, ...)
+{
+       va_list args;
+
+       va_start(args, msg);
+       printf("Bail out! ");
+       vprintf(msg, args);
+       va_end(args);
+
+       ksft_print_cnts();
        exit(KSFT_FAIL);
 }
+
 static inline int ksft_exit_xfail(void)
 {
+       ksft_print_cnts();
        exit(KSFT_XFAIL);
 }
+
 static inline int ksft_exit_xpass(void)
 {
+       ksft_print_cnts();
        exit(KSFT_XPASS);
 }
-static inline int ksft_exit_skip(void)
+
+static inline int ksft_exit_skip(const char *msg, ...)
 {
+       if (msg) {
+               va_list args;
+
+               va_start(args, msg);
+               printf("1..%d # Skipped: ", ksft_test_num());
+               vprintf(msg, args);
+               va_end(args);
+       } else {
+               ksft_print_cnts();
+       }
        exit(KSFT_SKIP);
 }
 
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
new file mode 100644 (file)
index 0000000..c56f72e
--- /dev/null
@@ -0,0 +1,726 @@
+/*
+ * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by the GPLv2 license.
+ *
+ * kselftest_harness.h: simple C unit test helper.
+ *
+ * See documentation in Documentation/dev-tools/kselftest.rst
+ *
+ * API inspired by code.google.com/p/googletest
+ */
+
+/**
+ * DOC: example
+ *
+ * .. code-block:: c
+ *
+ *    #include "../kselftest_harness.h"
+ *
+ *    TEST(standalone_test) {
+ *      do_some_stuff;
+ *      EXPECT_GT(10, stuff) {
+ *         stuff_state_t state;
+ *         enumerate_stuff_state(&state);
+ *         TH_LOG("expectation failed with state: %s", state.msg);
+ *      }
+ *      more_stuff;
+ *      ASSERT_NE(some_stuff, NULL) TH_LOG("how did it happen?!");
+ *      last_stuff;
+ *      EXPECT_EQ(0, last_stuff);
+ *    }
+ *
+ *    FIXTURE(my_fixture) {
+ *      mytype_t *data;
+ *      int awesomeness_level;
+ *    };
+ *    FIXTURE_SETUP(my_fixture) {
+ *      self->data = mytype_new();
+ *      ASSERT_NE(NULL, self->data);
+ *    }
+ *    FIXTURE_TEARDOWN(my_fixture) {
+ *      mytype_free(self->data);
+ *    }
+ *    TEST_F(my_fixture, data_is_good) {
+ *      EXPECT_EQ(1, is_my_data_good(self->data));
+ *    }
+ *
+ *    TEST_HARNESS_MAIN
+ */
+
+#ifndef __KSELFTEST_HARNESS_H
+#define __KSELFTEST_HARNESS_H
+
+#define _GNU_SOURCE
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+
+/* Utilities exposed to the test definitions */
+#ifndef TH_LOG_STREAM
+#  define TH_LOG_STREAM stderr
+#endif
+
+#ifndef TH_LOG_ENABLED
+#  define TH_LOG_ENABLED 1
+#endif
+
+/**
+ * TH_LOG(fmt, ...)
+ *
+ * @fmt: format string
+ * @...: optional arguments
+ *
+ * .. code-block:: c
+ *
+ *     TH_LOG(format, ...)
+ *
+ * Optional debug logging function available for use in tests.
+ * Logging may be enabled or disabled by defining TH_LOG_ENABLED.
+ * E.g., #define TH_LOG_ENABLED 1
+ *
+ * If no definition is provided, logging is enabled by default.
+ */
+#define TH_LOG(fmt, ...) do { \
+       if (TH_LOG_ENABLED) \
+               __TH_LOG(fmt, ##__VA_ARGS__); \
+} while (0)
+
+/* Unconditional logger for internal use. */
+#define __TH_LOG(fmt, ...) \
+               fprintf(TH_LOG_STREAM, "%s:%d:%s:" fmt "\n", \
+                       __FILE__, __LINE__, _metadata->name, ##__VA_ARGS__)
+
+/**
+ * TEST(test_name) - Defines the test function and creates the registration
+ * stub
+ *
+ * @test_name: test name
+ *
+ * .. code-block:: c
+ *
+ *     TEST(name) { implementation }
+ *
+ * Defines a test by name.
+ * Names must be unique and tests must not be run in parallel.  The
+ * implementation containing block is a function and scoping should be treated
+ * as such.  Returning early may be performed with a bare "return;" statement.
+ *
+ * EXPECT_* and ASSERT_* are valid in a TEST() { } context.
+ */
+#define TEST(test_name) __TEST_IMPL(test_name, -1)
+
+/**
+ * TEST_SIGNAL(test_name, signal)
+ *
+ * @test_name: test name
+ * @signal: signal number
+ *
+ * .. code-block:: c
+ *
+ *     TEST_SIGNAL(name, signal) { implementation }
+ *
+ * Defines a test by name and the expected term signal.
+ * Names must be unique and tests must not be run in parallel.  The
+ * implementation containing block is a function and scoping should be treated
+ * as such.  Returning early may be performed with a bare "return;" statement.
+ *
+ * EXPECT_* and ASSERT_* are valid in a TEST() { } context.
+ */
+#define TEST_SIGNAL(test_name, signal) __TEST_IMPL(test_name, signal)
+
+#define __TEST_IMPL(test_name, _signal) \
+       static void test_name(struct __test_metadata *_metadata); \
+       static struct __test_metadata _##test_name##_object = \
+               { name: "global." #test_name, \
+                 fn: &test_name, termsig: _signal }; \
+       static void __attribute__((constructor)) _register_##test_name(void) \
+       { \
+               __register_test(&_##test_name##_object); \
+       } \
+       static void test_name( \
+               struct __test_metadata __attribute__((unused)) *_metadata)
+
+/**
+ * FIXTURE_DATA(datatype_name) - Wraps the struct name so we have one less
+ * argument to pass around
+ *
+ * @datatype_name: datatype name
+ *
+ * .. code-block:: c
+ *
+ *     FIXTURE_DATA(datatype name)
+ *
+ * This call may be used when the type of the fixture data
+ * is needed.  In general, this should not be needed unless
+ * the *self* is being passed to a helper directly.
+ */
+#define FIXTURE_DATA(datatype_name) struct _test_data_##datatype_name
+
+/**
+ * FIXTURE(fixture_name) - Called once per fixture to setup the data and
+ * register
+ *
+ * @fixture_name: fixture name
+ *
+ * .. code-block:: c
+ *
+ *     FIXTURE(datatype name) {
+ *       type property1;
+ *       ...
+ *     };
+ *
+ * Defines the data provided to TEST_F()-defined tests as *self*.  It should be
+ * populated and cleaned up using FIXTURE_SETUP() and FIXTURE_TEARDOWN().
+ */
+#define FIXTURE(fixture_name) \
+       static void __attribute__((constructor)) \
+       _register_##fixture_name##_data(void) \
+       { \
+               __fixture_count++; \
+       } \
+       FIXTURE_DATA(fixture_name)
+
+/**
+ * FIXTURE_SETUP(fixture_name) - Prepares the setup function for the fixture.
+ * *_metadata* is included so that ASSERT_* work as a convenience
+ *
+ * @fixture_name: fixture name
+ *
+ * .. code-block:: c
+ *
+ *     FIXTURE_SETUP(fixture name) { implementation }
+ *
+ * Populates the required "setup" function for a fixture.  An instance of the
+ * datatype defined with FIXTURE_DATA() will be exposed as *self* for the
+ * implementation.
+ *
+ * ASSERT_* are valid for use in this context and will prempt the execution
+ * of any dependent fixture tests.
+ *
+ * A bare "return;" statement may be used to return early.
+ */
+#define FIXTURE_SETUP(fixture_name) \
+       void fixture_name##_setup( \
+               struct __test_metadata __attribute__((unused)) *_metadata, \
+               FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
+/**
+ * FIXTURE_TEARDOWN(fixture_name)
+ *
+ * @fixture_name: fixture name
+ *
+ * .. code-block:: c
+ *
+ *     FIXTURE_TEARDOWN(fixture name) { implementation }
+ *
+ * Populates the required "teardown" function for a fixture.  An instance of the
+ * datatype defined with FIXTURE_DATA() will be exposed as *self* for the
+ * implementation to clean up.
+ *
+ * A bare "return;" statement may be used to return early.
+ */
+#define FIXTURE_TEARDOWN(fixture_name) \
+       void fixture_name##_teardown( \
+               struct __test_metadata __attribute__((unused)) *_metadata, \
+               FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
+
+/**
+ * TEST_F(fixture_name, test_name) - Emits test registration and helpers for
+ * fixture-based test cases
+ *
+ * @fixture_name: fixture name
+ * @test_name: test name
+ *
+ * .. code-block:: c
+ *
+ *     TEST_F(fixture, name) { implementation }
+ *
+ * Defines a test that depends on a fixture (e.g., is part of a test case).
+ * Very similar to TEST() except that *self* is the setup instance of fixture's
+ * datatype exposed for use by the implementation.
+ */
+/* TODO(wad) register fixtures on dedicated test lists. */
+#define TEST_F(fixture_name, test_name) \
+       __TEST_F_IMPL(fixture_name, test_name, -1)
+
+#define TEST_F_SIGNAL(fixture_name, test_name, signal) \
+       __TEST_F_IMPL(fixture_name, test_name, signal)
+
+#define __TEST_F_IMPL(fixture_name, test_name, signal) \
+       static void fixture_name##_##test_name( \
+               struct __test_metadata *_metadata, \
+               FIXTURE_DATA(fixture_name) *self); \
+       static inline void wrapper_##fixture_name##_##test_name( \
+               struct __test_metadata *_metadata) \
+       { \
+               /* fixture data is alloced, setup, and torn down per call. */ \
+               FIXTURE_DATA(fixture_name) self; \
+               memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \
+               fixture_name##_setup(_metadata, &self); \
+               /* Let setup failure terminate early. */ \
+               if (!_metadata->passed) \
+                       return; \
+               fixture_name##_##test_name(_metadata, &self); \
+               fixture_name##_teardown(_metadata, &self); \
+       } \
+       static struct __test_metadata \
+                     _##fixture_name##_##test_name##_object = { \
+               name: #fixture_name "." #test_name, \
+               fn: &wrapper_##fixture_name##_##test_name, \
+               termsig: signal, \
+        }; \
+       static void __attribute__((constructor)) \
+                       _register_##fixture_name##_##test_name(void) \
+       { \
+               __register_test(&_##fixture_name##_##test_name##_object); \
+       } \
+       static void fixture_name##_##test_name( \
+               struct __test_metadata __attribute__((unused)) *_metadata, \
+               FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
+
+/**
+ * TEST_HARNESS_MAIN - Simple wrapper to run the test harness
+ *
+ * .. code-block:: c
+ *
+ *     TEST_HARNESS_MAIN
+ *
+ * Use once to append a main() to the test file.
+ */
+#define TEST_HARNESS_MAIN \
+       static void __attribute__((constructor)) \
+       __constructor_order_last(void) \
+       { \
+               if (!__constructor_order) \
+                       __constructor_order = _CONSTRUCTOR_ORDER_BACKWARD; \
+       } \
+       int main(int argc, char **argv) { \
+               return test_harness_run(argc, argv); \
+       }
+
+/**
+ * DOC: operators
+ *
+ * Operators for use in TEST() and TEST_F().
+ * ASSERT_* calls will stop test execution immediately.
+ * EXPECT_* calls will emit a failure warning, note it, and continue.
+ */
+
+/**
+ * ASSERT_EQ(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_EQ(expected, measured): expected == measured
+ */
+#define ASSERT_EQ(expected, seen) \
+       __EXPECT(expected, seen, ==, 1)
+
+/**
+ * ASSERT_NE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_NE(expected, measured): expected != measured
+ */
+#define ASSERT_NE(expected, seen) \
+       __EXPECT(expected, seen, !=, 1)
+
+/**
+ * ASSERT_LT(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_LT(expected, measured): expected < measured
+ */
+#define ASSERT_LT(expected, seen) \
+       __EXPECT(expected, seen, <, 1)
+
+/**
+ * ASSERT_LE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_LE(expected, measured): expected <= measured
+ */
+#define ASSERT_LE(expected, seen) \
+       __EXPECT(expected, seen, <=, 1)
+
+/**
+ * ASSERT_GT(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_GT(expected, measured): expected > measured
+ */
+#define ASSERT_GT(expected, seen) \
+       __EXPECT(expected, seen, >, 1)
+
+/**
+ * ASSERT_GE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_GE(expected, measured): expected >= measured
+ */
+#define ASSERT_GE(expected, seen) \
+       __EXPECT(expected, seen, >=, 1)
+
+/**
+ * ASSERT_NULL(seen)
+ *
+ * @seen: measured value
+ *
+ * ASSERT_NULL(measured): NULL == measured
+ */
+#define ASSERT_NULL(seen) \
+       __EXPECT(NULL, seen, ==, 1)
+
+/**
+ * ASSERT_TRUE(seen)
+ *
+ * @seen: measured value
+ *
+ * ASSERT_TRUE(measured): measured != 0
+ */
+#define ASSERT_TRUE(seen) \
+       ASSERT_NE(0, seen)
+
+/**
+ * ASSERT_FALSE(seen)
+ *
+ * @seen: measured value
+ *
+ * ASSERT_FALSE(measured): measured == 0
+ */
+#define ASSERT_FALSE(seen) \
+       ASSERT_EQ(0, seen)
+
+/**
+ * ASSERT_STREQ(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_STREQ(expected, measured): !strcmp(expected, measured)
+ */
+#define ASSERT_STREQ(expected, seen) \
+       __EXPECT_STR(expected, seen, ==, 1)
+
+/**
+ * ASSERT_STRNE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_STRNE(expected, measured): strcmp(expected, measured)
+ */
+#define ASSERT_STRNE(expected, seen) \
+       __EXPECT_STR(expected, seen, !=, 1)
+
+/**
+ * EXPECT_EQ(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_EQ(expected, measured): expected == measured
+ */
+#define EXPECT_EQ(expected, seen) \
+       __EXPECT(expected, seen, ==, 0)
+
+/**
+ * EXPECT_NE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_NE(expected, measured): expected != measured
+ */
+#define EXPECT_NE(expected, seen) \
+       __EXPECT(expected, seen, !=, 0)
+
+/**
+ * EXPECT_LT(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_LT(expected, measured): expected < measured
+ */
+#define EXPECT_LT(expected, seen) \
+       __EXPECT(expected, seen, <, 0)
+
+/**
+ * EXPECT_LE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_LE(expected, measured): expected <= measured
+ */
+#define EXPECT_LE(expected, seen) \
+       __EXPECT(expected, seen, <=, 0)
+
+/**
+ * EXPECT_GT(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_GT(expected, measured): expected > measured
+ */
+#define EXPECT_GT(expected, seen) \
+       __EXPECT(expected, seen, >, 0)
+
+/**
+ * EXPECT_GE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_GE(expected, measured): expected >= measured
+ */
+#define EXPECT_GE(expected, seen) \
+       __EXPECT(expected, seen, >=, 0)
+
+/**
+ * EXPECT_NULL(seen)
+ *
+ * @seen: measured value
+ *
+ * EXPECT_NULL(measured): NULL == measured
+ */
+#define EXPECT_NULL(seen) \
+       __EXPECT(NULL, seen, ==, 0)
+
+/**
+ * EXPECT_TRUE(seen)
+ *
+ * @seen: measured value
+ *
+ * EXPECT_TRUE(measured): 0 != measured
+ */
+#define EXPECT_TRUE(seen) \
+       EXPECT_NE(0, seen)
+
+/**
+ * EXPECT_FALSE(seen)
+ *
+ * @seen: measured value
+ *
+ * EXPECT_FALSE(measured): 0 == measured
+ */
+#define EXPECT_FALSE(seen) \
+       EXPECT_EQ(0, seen)
+
+/**
+ * EXPECT_STREQ(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_STREQ(expected, measured): !strcmp(expected, measured)
+ */
+#define EXPECT_STREQ(expected, seen) \
+       __EXPECT_STR(expected, seen, ==, 0)
+
+/**
+ * EXPECT_STRNE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_STRNE(expected, measured): strcmp(expected, measured)
+ */
+#define EXPECT_STRNE(expected, seen) \
+       __EXPECT_STR(expected, seen, !=, 0)
+
+#define ARRAY_SIZE(a)  (sizeof(a) / sizeof(a[0]))
+
+/* Support an optional handler after and ASSERT_* or EXPECT_*.  The approach is
+ * not thread-safe, but it should be fine in most sane test scenarios.
+ *
+ * Using __bail(), which optionally abort()s, is the easiest way to early
+ * return while still providing an optional block to the API consumer.
+ */
+#define OPTIONAL_HANDLER(_assert) \
+       for (; _metadata->trigger;  _metadata->trigger = __bail(_assert))
+
+#define __EXPECT(_expected, _seen, _t, _assert) do { \
+       /* Avoid multiple evaluation of the cases */ \
+       __typeof__(_expected) __exp = (_expected); \
+       __typeof__(_seen) __seen = (_seen); \
+       if (!(__exp _t __seen)) { \
+               unsigned long long __exp_print = (uintptr_t)__exp; \
+               unsigned long long __seen_print = (uintptr_t)__seen; \
+               __TH_LOG("Expected %s (%llu) %s %s (%llu)", \
+                        #_expected, __exp_print, #_t, \
+                        #_seen, __seen_print); \
+               _metadata->passed = 0; \
+               /* Ensure the optional handler is triggered */ \
+               _metadata->trigger = 1; \
+       } \
+} while (0); OPTIONAL_HANDLER(_assert)
+
+#define __EXPECT_STR(_expected, _seen, _t, _assert) do { \
+       const char *__exp = (_expected); \
+       const char *__seen = (_seen); \
+       if (!(strcmp(__exp, __seen) _t 0))  { \
+               __TH_LOG("Expected '%s' %s '%s'.", __exp, #_t, __seen); \
+               _metadata->passed = 0; \
+               _metadata->trigger = 1; \
+       } \
+} while (0); OPTIONAL_HANDLER(_assert)
+
+/* Contains all the information for test execution and status checking. */
+struct __test_metadata {
+       const char *name;
+       void (*fn)(struct __test_metadata *);
+       int termsig;
+       int passed;
+       int trigger; /* extra handler after the evaluation */
+       struct __test_metadata *prev, *next;
+};
+
+/* Storage for the (global) tests to be run. */
+static struct __test_metadata *__test_list;
+static unsigned int __test_count;
+static unsigned int __fixture_count;
+static int __constructor_order;
+
+#define _CONSTRUCTOR_ORDER_FORWARD   1
+#define _CONSTRUCTOR_ORDER_BACKWARD -1
+
+/*
+ * Since constructors are called in reverse order, reverse the test
+ * list so tests are run in source declaration order.
+ * https://gcc.gnu.org/onlinedocs/gccint/Initialization.html
+ * However, it seems not all toolchains do this correctly, so use
+ * __constructor_order to detect which direction is called first
+ * and adjust list building logic to get things running in the right
+ * direction.
+ */
+static inline void __register_test(struct __test_metadata *t)
+{
+       __test_count++;
+       /* Circular linked list where only prev is circular. */
+       if (__test_list == NULL) {
+               __test_list = t;
+               t->next = NULL;
+               t->prev = t;
+               return;
+       }
+       if (__constructor_order == _CONSTRUCTOR_ORDER_FORWARD) {
+               t->next = NULL;
+               t->prev = __test_list->prev;
+               t->prev->next = t;
+               __test_list->prev = t;
+       } else {
+               t->next = __test_list;
+               t->next->prev = t;
+               t->prev = t;
+               __test_list = t;
+       }
+}
+
+static inline int __bail(int for_realz)
+{
+       if (for_realz)
+               abort();
+       return 0;
+}
+
+void __run_test(struct __test_metadata *t)
+{
+       pid_t child_pid;
+       int status;
+
+       t->passed = 1;
+       t->trigger = 0;
+       printf("[ RUN      ] %s\n", t->name);
+       child_pid = fork();
+       if (child_pid < 0) {
+               printf("ERROR SPAWNING TEST CHILD\n");
+               t->passed = 0;
+       } else if (child_pid == 0) {
+               t->fn(t);
+               _exit(t->passed);
+       } else {
+               /* TODO(wad) add timeout support. */
+               waitpid(child_pid, &status, 0);
+               if (WIFEXITED(status)) {
+                       t->passed = t->termsig == -1 ? WEXITSTATUS(status) : 0;
+                       if (t->termsig != -1) {
+                               fprintf(TH_LOG_STREAM,
+                                       "%s: Test exited normally "
+                                       "instead of by signal (code: %d)\n",
+                                       t->name,
+                                       WEXITSTATUS(status));
+                       }
+               } else if (WIFSIGNALED(status)) {
+                       t->passed = 0;
+                       if (WTERMSIG(status) == SIGABRT) {
+                               fprintf(TH_LOG_STREAM,
+                                       "%s: Test terminated by assertion\n",
+                                       t->name);
+                       } else if (WTERMSIG(status) == t->termsig) {
+                               t->passed = 1;
+                       } else {
+                               fprintf(TH_LOG_STREAM,
+                                       "%s: Test terminated unexpectedly "
+                                       "by signal %d\n",
+                                       t->name,
+                                       WTERMSIG(status));
+                       }
+               } else {
+                       fprintf(TH_LOG_STREAM,
+                               "%s: Test ended in some other way [%u]\n",
+                               t->name,
+                               status);
+               }
+       }
+       printf("[     %4s ] %s\n", (t->passed ? "OK" : "FAIL"), t->name);
+}
+
+static int test_harness_run(int __attribute__((unused)) argc,
+                           char __attribute__((unused)) **argv)
+{
+       struct __test_metadata *t;
+       int ret = 0;
+       unsigned int count = 0;
+       unsigned int pass_count = 0;
+
+       /* TODO(wad) add optional arguments similar to gtest. */
+       printf("[==========] Running %u tests from %u test cases.\n",
+              __test_count, __fixture_count + 1);
+       for (t = __test_list; t; t = t->next) {
+               count++;
+               __run_test(t);
+               if (t->passed)
+                       pass_count++;
+               else
+                       ret = 1;
+       }
+       printf("[==========] %u / %u tests passed.\n", pass_count, count);
+       printf("[  %s  ]\n", (ret ? "FAILED" : "PASSED"));
+       return ret;
+}
+
+static void __attribute__((constructor)) __constructor_order_first(void)
+{
+       if (!__constructor_order)
+               __constructor_order = _CONSTRUCTOR_ORDER_FORWARD;
+}
+
+#endif  /* __KSELFTEST_HARNESS_H */
index 2da187b6ddad574374bf46f6ab756b9208e1f297..b073c22a3435a25ab829a9337636f5784174a247 100755 (executable)
@@ -1,5 +1,9 @@
 #!/bin/sh
 # Runs bitmap infrastructure tests using test_bitmap kernel module
+if ! /sbin/modprobe -q -n test_bitmap; then
+       echo "bitmap: [SKIP]"
+       exit 77
+fi
 
 if /sbin/modprobe -q test_bitmap; then
        /sbin/modprobe -q -r test_bitmap
index 4fdc70fe6980273f6058b27c6337dcaf187d47cf..cbf3b124bd94a2af428e13be8365ddb2bd709591 100755 (executable)
@@ -1,5 +1,9 @@
 #!/bin/sh
 # Runs printf infrastructure using test_printf kernel module
+if ! /sbin/modprobe -q -n test_printf; then
+       echo "printf: [SKIP]"
+       exit 77
+fi
 
 if /sbin/modprobe -q test_printf; then
        /sbin/modprobe -q -r test_printf
index 535f0fef4d0bb524f638774ffc199fde10c97497..21399fcf1a59125a613db1c330a8b782790fd4d9 100644 (file)
@@ -7,56 +7,63 @@
 
 #include "../kselftest.h"
 
-enum test_membarrier_status {
-       TEST_MEMBARRIER_PASS = 0,
-       TEST_MEMBARRIER_FAIL,
-       TEST_MEMBARRIER_SKIP,
-};
-
 static int sys_membarrier(int cmd, int flags)
 {
        return syscall(__NR_membarrier, cmd, flags);
 }
 
-static enum test_membarrier_status test_membarrier_cmd_fail(void)
+static int test_membarrier_cmd_fail(void)
 {
        int cmd = -1, flags = 0;
 
        if (sys_membarrier(cmd, flags) != -1) {
-               printf("membarrier: Wrong command should fail but passed.\n");
-               return TEST_MEMBARRIER_FAIL;
+               ksft_exit_fail_msg(
+                       "sys membarrier invalid command test: command = %d, flags = %d. Should fail, but passed\n",
+                       cmd, flags);
        }
-       return TEST_MEMBARRIER_PASS;
+
+       ksft_test_result_pass(
+               "sys membarrier invalid command test: command = %d, flags = %d. Failed as expected\n",
+               cmd, flags);
+       return 0;
 }
 
-static enum test_membarrier_status test_membarrier_flags_fail(void)
+static int test_membarrier_flags_fail(void)
 {
        int cmd = MEMBARRIER_CMD_QUERY, flags = 1;
 
        if (sys_membarrier(cmd, flags) != -1) {
-               printf("membarrier: Wrong flags should fail but passed.\n");
-               return TEST_MEMBARRIER_FAIL;
+               ksft_exit_fail_msg(
+                       "sys membarrier MEMBARRIER_CMD_QUERY invalid flags test: flags = %d. Should fail, but passed\n",
+                       flags);
        }
-       return TEST_MEMBARRIER_PASS;
+
+       ksft_test_result_pass(
+               "sys membarrier MEMBARRIER_CMD_QUERY invalid flags test: flags = %d. Failed as expected\n",
+               flags);
+       return 0;
 }
 
-static enum test_membarrier_status test_membarrier_success(void)
+static int test_membarrier_success(void)
 {
        int cmd = MEMBARRIER_CMD_SHARED, flags = 0;
+       const char *test_name = "sys membarrier MEMBARRIER_CMD_SHARED\n";
 
        if (sys_membarrier(cmd, flags) != 0) {
-               printf("membarrier: Executing MEMBARRIER_CMD_SHARED failed. %s.\n",
-                               strerror(errno));
-               return TEST_MEMBARRIER_FAIL;
+               ksft_exit_fail_msg(
+                       "sys membarrier MEMBARRIER_CMD_SHARED test: flags = %d\n",
+                       flags);
        }
 
-       printf("membarrier: MEMBARRIER_CMD_SHARED success.\n");
-       return TEST_MEMBARRIER_PASS;
+       ksft_test_result_pass(
+               "sys membarrier MEMBARRIER_CMD_SHARED test: flags = %d\n",
+               flags);
+       return 0;
 }
 
-static enum test_membarrier_status test_membarrier(void)
+static int test_membarrier(void)
 {
-       enum test_membarrier_status status;
+       int status;
 
        status = test_membarrier_cmd_fail();
        if (status)
@@ -67,52 +74,38 @@ static enum test_membarrier_status test_membarrier(void)
        status = test_membarrier_success();
        if (status)
                return status;
-       return TEST_MEMBARRIER_PASS;
+       return 0;
 }
 
-static enum test_membarrier_status test_membarrier_query(void)
+static int test_membarrier_query(void)
 {
        int flags = 0, ret;
 
-       printf("membarrier MEMBARRIER_CMD_QUERY ");
        ret = sys_membarrier(MEMBARRIER_CMD_QUERY, flags);
        if (ret < 0) {
-               printf("failed. %s.\n", strerror(errno));
-               switch (errno) {
-               case ENOSYS:
+               if (errno == ENOSYS) {
                        /*
                         * It is valid to build a kernel with
                         * CONFIG_MEMBARRIER=n. However, this skips the tests.
                         */
-                       return TEST_MEMBARRIER_SKIP;
-               case EINVAL:
-               default:
-                       return TEST_MEMBARRIER_FAIL;
+                       ksft_exit_skip(
+                               "sys membarrier (CONFIG_MEMBARRIER) is disabled.\n");
                }
+               ksft_exit_fail_msg("sys_membarrier() failed\n");
        }
-       if (!(ret & MEMBARRIER_CMD_SHARED)) {
-               printf("command MEMBARRIER_CMD_SHARED is not supported.\n");
-               return TEST_MEMBARRIER_FAIL;
-       }
-       printf("syscall available.\n");
-       return TEST_MEMBARRIER_PASS;
+       if (!(ret & MEMBARRIER_CMD_SHARED))
+               ksft_exit_fail_msg("sys_membarrier is not supported.\n");
+
+       ksft_test_result_pass("sys_membarrier available\n");
+       return 0;
 }
 
 int main(int argc, char **argv)
 {
-       switch (test_membarrier_query()) {
-       case TEST_MEMBARRIER_FAIL:
-               return ksft_exit_fail();
-       case TEST_MEMBARRIER_SKIP:
-               return ksft_exit_skip();
-       }
-       switch (test_membarrier()) {
-       case TEST_MEMBARRIER_FAIL:
-               return ksft_exit_fail();
-       case TEST_MEMBARRIER_SKIP:
-               return ksft_exit_skip();
-       }
+       ksft_print_header();
+
+       test_membarrier_query();
+       test_membarrier();
 
-       printf("membarrier: tests done!\n");
-       return ksft_exit_pass();
+       ksft_exit_pass();
 }
index 79891d033de162cdcc921f3de7802738fa0e0a50..ad8a0897e47f8f52cd54de74cb1ae6043a6a0fc5 100644 (file)
@@ -7,7 +7,7 @@ TEST_PROGS := run_fuse_test.sh
 TEST_GEN_FILES := memfd_test fuse_mnt fuse_test
 
 fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
-fuse_mnt: LDFLAGS += $(shell pkg-config fuse --libs)
 
 include ../lib.mk
 
+$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs)
index 6cddde0b96f85bc4e24fee9b679d2daf11c94b81..35025ce9ca665dd9ff061888c71e35be1a5c7332 100755 (executable)
@@ -22,6 +22,11 @@ prerequisite()
                echo $msg memory hotplug is not supported >&2
                exit 0
        fi
+
+       if ! grep -q 1 $SYSFS/devices/system/memory/memory*/removable; then
+               echo $msg no hot-pluggable memory >&2
+               exit 0
+       fi
 }
 
 #
@@ -39,7 +44,7 @@ hotpluggable_memory()
        done
 }
 
-hotplaggable_offline_memory()
+hotpluggable_offline_memory()
 {
        hotpluggable_memory offline
 }
@@ -75,9 +80,12 @@ online_memory_expect_success()
 
        if ! online_memory $memory; then
                echo $FUNCNAME $memory: unexpected fail >&2
+               return 1
        elif ! memory_is_online $memory; then
                echo $FUNCNAME $memory: unexpected offline >&2
+               return 1
        fi
+       return 0
 }
 
 online_memory_expect_fail()
@@ -86,9 +94,12 @@ online_memory_expect_fail()
 
        if online_memory $memory 2> /dev/null; then
                echo $FUNCNAME $memory: unexpected success >&2
+               return 1
        elif ! memory_is_offline $memory; then
                echo $FUNCNAME $memory: unexpected online >&2
+               return 1
        fi
+       return 0
 }
 
 offline_memory_expect_success()
@@ -97,9 +108,12 @@ offline_memory_expect_success()
 
        if ! offline_memory $memory; then
                echo $FUNCNAME $memory: unexpected fail >&2
+               return 1
        elif ! memory_is_offline $memory; then
                echo $FUNCNAME $memory: unexpected offline >&2
+               return 1
        fi
+       return 0
 }
 
 offline_memory_expect_fail()
@@ -108,14 +122,18 @@ offline_memory_expect_fail()
 
        if offline_memory $memory 2> /dev/null; then
                echo $FUNCNAME $memory: unexpected success >&2
+               return 1
        elif ! memory_is_online $memory; then
                echo $FUNCNAME $memory: unexpected offline >&2
+               return 1
        fi
+       return 0
 }
 
 error=-12
 priority=0
 ratio=10
+retval=0
 
 while getopts e:hp:r: opt; do
        case $opt in
@@ -131,6 +149,10 @@ while getopts e:hp:r: opt; do
                ;;
        r)
                ratio=$OPTARG
+               if [ "$ratio" -gt 100 ] || [ "$ratio" -lt 0 ]; then
+                       echo "The percentage should be an integer within 0~100 range"
+                       exit 1
+               fi
                ;;
        esac
 done
@@ -143,35 +165,58 @@ fi
 prerequisite
 
 echo "Test scope: $ratio% hotplug memory"
-echo -e "\t online all hotplug memory in offline state"
-echo -e "\t offline $ratio% hotplug memory in online state"
-echo -e "\t online all hotplug memory in offline state"
 
 #
 # Online all hot-pluggable memory
 #
-for memory in `hotplaggable_offline_memory`; do
-       echo offline-online $memory
-       online_memory_expect_success $memory
-done
+hotpluggable_num=`hotpluggable_offline_memory | wc -l`
+echo -e "\t online all hot-pluggable memory in offline state:"
+if [ "$hotpluggable_num" -gt 0 ]; then
+       for memory in `hotpluggable_offline_memory`; do
+               echo "offline->online memory$memory"
+               if ! online_memory_expect_success $memory; then
+                       retval=1
+               fi
+       done
+else
+       echo -e "\t\t SKIPPED - no hot-pluggable memory in offline state"
+fi
 
 #
 # Offline $ratio percent of hot-pluggable memory
 #
+hotpluggable_num=`hotpluggable_online_memory | wc -l`
+target=`echo "a=$hotpluggable_num*$ratio; if ( a%100 ) a/100+1 else a/100" | bc`
+echo -e "\t offline $ratio% hot-pluggable memory in online state"
+echo -e "\t trying to offline $target out of $hotpluggable_num memory block(s):"
 for memory in `hotpluggable_online_memory`; do
-       if [ $((RANDOM % 100)) -lt $ratio ]; then
-               echo online-offline $memory
-               offline_memory_expect_success $memory
+       if [ "$target" -gt 0 ]; then
+               echo "online->offline memory$memory"
+               if offline_memory_expect_success $memory; then
+                       target=$(($target - 1))
+               fi
        fi
 done
+if [ "$target" -gt 0 ]; then
+       retval=1
+       echo -e "\t\t FAILED - unable to offline some memory blocks, device busy?"
+fi
 
 #
 # Online all hot-pluggable memory again
 #
-for memory in `hotplaggable_offline_memory`; do
-       echo offline-online $memory
-       online_memory_expect_success $memory
-done
+hotpluggable_num=`hotpluggable_offline_memory | wc -l`
+echo -e "\t online all hot-pluggable memory in offline state:"
+if [ "$hotpluggable_num" -gt 0 ]; then
+       for memory in `hotpluggable_offline_memory`; do
+               echo "offline->online memory$memory"
+               if ! online_memory_expect_success $memory; then
+                       retval=1
+               fi
+       done
+else
+       echo -e "\t\t SKIPPED - no hot-pluggable memory in offline state"
+fi
 
 #
 # Test with memory notifier error injection
@@ -189,15 +234,16 @@ prerequisite_extra()
 
        if [ ! -d "$DEBUGFS" ]; then
                echo $msg debugfs is not mounted >&2
-               exit 0
+               exit $retval
        fi
 
        if [ ! -d $NOTIFIER_ERR_INJECT_DIR ]; then
                echo $msg memory-notifier-error-inject module is not available >&2
-               exit 0
+               exit $retval
        fi
 }
 
+echo -e "\t Test with memory notifier error injection"
 prerequisite_extra
 
 #
@@ -214,7 +260,7 @@ done
 # Test memory hot-add error handling (offline => online)
 #
 echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error
-for memory in `hotplaggable_offline_memory`; do
+for memory in `hotpluggable_offline_memory`; do
        online_memory_expect_fail $memory
 done
 
@@ -222,7 +268,7 @@ done
 # Online all hot-pluggable memory
 #
 echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error
-for memory in `hotplaggable_offline_memory`; do
+for memory in `hotpluggable_offline_memory`; do
        online_memory_expect_success $memory
 done
 
@@ -236,3 +282,5 @@ done
 
 echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
 /sbin/modprobe -q -r memory-notifier-error-inject
+
+exit $retval
index 35cbb4cba4109551cc14beabcba39dff5d22e85c..f6c9dbf478f877576b0ff32f94d9f7c987cb59b8 100644 (file)
@@ -3,8 +3,6 @@
 CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g
 CFLAGS += -I../../../../usr/include/
 
-reuseport_bpf_numa: LDFLAGS += -lnuma
-
 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket
@@ -13,3 +11,4 @@ TEST_GEN_FILES += reuseport_dualstack
 
 include ../lib.mk
 
+$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
index 5fa6fd2246b18a45290efef20789a4948d9425d9..aeb0c805f3ca0f9471ae0f211e5633aa9d7a52f8 100644 (file)
@@ -4,3 +4,5 @@ LDFLAGS += -lpthread
 
 include ../lib.mk
 
+$(TEST_GEN_PROGS): seccomp_bpf.c ../kselftest_harness.h
+       $(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
index 00a928b833d02d3f50dced58005a55d93c5e7e5a..73f5ea6778ceb93420471ed20a6b433781e4b289 100644 (file)
@@ -37,7 +37,7 @@
 #include <unistd.h>
 #include <sys/syscall.h>
 
-#include "test_harness.h"
+#include "../kselftest_harness.h"
 
 #ifndef PR_SET_PTRACER
 # define PR_SET_PTRACER 0x59616d61
@@ -1310,7 +1310,7 @@ void change_syscall(struct __test_metadata *_metadata,
        iov.iov_len = sizeof(regs);
        ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
 #endif
-       EXPECT_EQ(0, ret);
+       EXPECT_EQ(0, ret) {}
 
 #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
     defined(__s390__) || defined(__hppa__)
diff --git a/tools/testing/selftests/seccomp/test_harness.h b/tools/testing/selftests/seccomp/test_harness.h
deleted file mode 100644 (file)
index a786c69..0000000
+++ /dev/null
@@ -1,535 +0,0 @@
-/*
- * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by the GPLv2 license.
- *
- * test_harness.h: simple C unit test helper.
- *
- * Usage:
- *   #include "test_harness.h"
- *   TEST(standalone_test) {
- *     do_some_stuff;
- *     EXPECT_GT(10, stuff) {
- *        stuff_state_t state;
- *        enumerate_stuff_state(&state);
- *        TH_LOG("expectation failed with state: %s", state.msg);
- *     }
- *     more_stuff;
- *     ASSERT_NE(some_stuff, NULL) TH_LOG("how did it happen?!");
- *     last_stuff;
- *     EXPECT_EQ(0, last_stuff);
- *   }
- *
- *   FIXTURE(my_fixture) {
- *     mytype_t *data;
- *     int awesomeness_level;
- *   };
- *   FIXTURE_SETUP(my_fixture) {
- *     self->data = mytype_new();
- *     ASSERT_NE(NULL, self->data);
- *   }
- *   FIXTURE_TEARDOWN(my_fixture) {
- *     mytype_free(self->data);
- *   }
- *   TEST_F(my_fixture, data_is_good) {
- *     EXPECT_EQ(1, is_my_data_good(self->data));
- *   }
- *
- *   TEST_HARNESS_MAIN
- *
- * API inspired by code.google.com/p/googletest
- */
-#ifndef TEST_HARNESS_H_
-#define TEST_HARNESS_H_
-
-#define _GNU_SOURCE
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-/* All exported functionality should be declared through this macro. */
-#define TEST_API(x) _##x
-
-/*
- * Exported APIs
- */
-
-/* TEST(name) { implementation }
- * Defines a test by name.
- * Names must be unique and tests must not be run in parallel.  The
- * implementation containing block is a function and scoping should be treated
- * as such.  Returning early may be performed with a bare "return;" statement.
- *
- * EXPECT_* and ASSERT_* are valid in a TEST() { } context.
- */
-#define TEST TEST_API(TEST)
-
-/* TEST_SIGNAL(name, signal) { implementation }
- * Defines a test by name and the expected term signal.
- * Names must be unique and tests must not be run in parallel.  The
- * implementation containing block is a function and scoping should be treated
- * as such.  Returning early may be performed with a bare "return;" statement.
- *
- * EXPECT_* and ASSERT_* are valid in a TEST() { } context.
- */
-#define TEST_SIGNAL TEST_API(TEST_SIGNAL)
-
-/* FIXTURE(datatype name) {
- *   type property1;
- *   ...
- * };
- * Defines the data provided to TEST_F()-defined tests as |self|.  It should be
- * populated and cleaned up using FIXTURE_SETUP and FIXTURE_TEARDOWN.
- */
-#define FIXTURE TEST_API(FIXTURE)
-
-/* FIXTURE_DATA(datatype name)
- * This call may be used when the type of the fixture data
- * is needed.  In general, this should not be needed unless
- * the |self| is being passed to a helper directly.
- */
-#define FIXTURE_DATA TEST_API(FIXTURE_DATA)
-
-/* FIXTURE_SETUP(fixture name) { implementation }
- * Populates the required "setup" function for a fixture.  An instance of the
- * datatype defined with _FIXTURE_DATA will be exposed as |self| for the
- * implementation.
- *
- * ASSERT_* are valid for use in this context and will prempt the execution
- * of any dependent fixture tests.
- *
- * A bare "return;" statement may be used to return early.
- */
-#define FIXTURE_SETUP TEST_API(FIXTURE_SETUP)
-
-/* FIXTURE_TEARDOWN(fixture name) { implementation }
- * Populates the required "teardown" function for a fixture.  An instance of the
- * datatype defined with _FIXTURE_DATA will be exposed as |self| for the
- * implementation to clean up.
- *
- * A bare "return;" statement may be used to return early.
- */
-#define FIXTURE_TEARDOWN TEST_API(FIXTURE_TEARDOWN)
-
-/* TEST_F(fixture, name) { implementation }
- * Defines a test that depends on a fixture (e.g., is part of a test case).
- * Very similar to TEST() except that |self| is the setup instance of fixture's
- * datatype exposed for use by the implementation.
- */
-#define TEST_F TEST_API(TEST_F)
-
-#define TEST_F_SIGNAL TEST_API(TEST_F_SIGNAL)
-
-/* Use once to append a main() to the test file. E.g.,
- *   TEST_HARNESS_MAIN
- */
-#define TEST_HARNESS_MAIN TEST_API(TEST_HARNESS_MAIN)
-
-/*
- * Operators for use in TEST and TEST_F.
- * ASSERT_* calls will stop test execution immediately.
- * EXPECT_* calls will emit a failure warning, note it, and continue.
- */
-
-/* ASSERT_EQ(expected, measured): expected == measured */
-#define ASSERT_EQ TEST_API(ASSERT_EQ)
-/* ASSERT_NE(expected, measured): expected != measured */
-#define ASSERT_NE TEST_API(ASSERT_NE)
-/* ASSERT_LT(expected, measured): expected < measured */
-#define ASSERT_LT TEST_API(ASSERT_LT)
-/* ASSERT_LE(expected, measured): expected <= measured */
-#define ASSERT_LE TEST_API(ASSERT_LE)
-/* ASSERT_GT(expected, measured): expected > measured */
-#define ASSERT_GT TEST_API(ASSERT_GT)
-/* ASSERT_GE(expected, measured): expected >= measured */
-#define ASSERT_GE TEST_API(ASSERT_GE)
-/* ASSERT_NULL(measured): NULL == measured */
-#define ASSERT_NULL TEST_API(ASSERT_NULL)
-/* ASSERT_TRUE(measured): measured != 0 */
-#define ASSERT_TRUE TEST_API(ASSERT_TRUE)
-/* ASSERT_FALSE(measured): measured == 0 */
-#define ASSERT_FALSE TEST_API(ASSERT_FALSE)
-/* ASSERT_STREQ(expected, measured): !strcmp(expected, measured) */
-#define ASSERT_STREQ TEST_API(ASSERT_STREQ)
-/* ASSERT_STRNE(expected, measured): strcmp(expected, measured) */
-#define ASSERT_STRNE TEST_API(ASSERT_STRNE)
-/* EXPECT_EQ(expected, measured): expected == measured */
-#define EXPECT_EQ TEST_API(EXPECT_EQ)
-/* EXPECT_NE(expected, measured): expected != measured */
-#define EXPECT_NE TEST_API(EXPECT_NE)
-/* EXPECT_LT(expected, measured): expected < measured */
-#define EXPECT_LT TEST_API(EXPECT_LT)
-/* EXPECT_LE(expected, measured): expected <= measured */
-#define EXPECT_LE TEST_API(EXPECT_LE)
-/* EXPECT_GT(expected, measured): expected > measured */
-#define EXPECT_GT TEST_API(EXPECT_GT)
-/* EXPECT_GE(expected, measured): expected >= measured */
-#define EXPECT_GE TEST_API(EXPECT_GE)
-/* EXPECT_NULL(measured): NULL == measured */
-#define EXPECT_NULL TEST_API(EXPECT_NULL)
-/* EXPECT_TRUE(measured): 0 != measured */
-#define EXPECT_TRUE TEST_API(EXPECT_TRUE)
-/* EXPECT_FALSE(measured): 0 == measured */
-#define EXPECT_FALSE TEST_API(EXPECT_FALSE)
-/* EXPECT_STREQ(expected, measured): !strcmp(expected, measured) */
-#define EXPECT_STREQ TEST_API(EXPECT_STREQ)
-/* EXPECT_STRNE(expected, measured): strcmp(expected, measured) */
-#define EXPECT_STRNE TEST_API(EXPECT_STRNE)
-
-/* TH_LOG(format, ...)
- * Optional debug logging function available for use in tests.
- * Logging may be enabled or disabled by defining TH_LOG_ENABLED.
- * E.g., #define TH_LOG_ENABLED 1
- * If no definition is provided, logging is enabled by default.
- */
-#define TH_LOG  TEST_API(TH_LOG)
-
-/*
- * Internal implementation.
- *
- */
-
-/* Utilities exposed to the test definitions */
-#ifndef TH_LOG_STREAM
-#  define TH_LOG_STREAM stderr
-#endif
-
-#ifndef TH_LOG_ENABLED
-#  define TH_LOG_ENABLED 1
-#endif
-
-#define _TH_LOG(fmt, ...) do { \
-       if (TH_LOG_ENABLED) \
-               __TH_LOG(fmt, ##__VA_ARGS__); \
-} while (0)
-
-/* Unconditional logger for internal use. */
-#define __TH_LOG(fmt, ...) \
-               fprintf(TH_LOG_STREAM, "%s:%d:%s:" fmt "\n", \
-                       __FILE__, __LINE__, _metadata->name, ##__VA_ARGS__)
-
-/* Defines the test function and creates the registration stub. */
-#define _TEST(test_name) __TEST_IMPL(test_name, -1)
-
-#define _TEST_SIGNAL(test_name, signal) __TEST_IMPL(test_name, signal)
-
-#define __TEST_IMPL(test_name, _signal) \
-       static void test_name(struct __test_metadata *_metadata); \
-       static struct __test_metadata _##test_name##_object = \
-               { name: "global." #test_name, \
-                 fn: &test_name, termsig: _signal }; \
-       static void __attribute__((constructor)) _register_##test_name(void) \
-       { \
-               __register_test(&_##test_name##_object); \
-       } \
-       static void test_name( \
-               struct __test_metadata __attribute__((unused)) *_metadata)
-
-/* Wraps the struct name so we have one less argument to pass around. */
-#define _FIXTURE_DATA(fixture_name) struct _test_data_##fixture_name
-
-/* Called once per fixture to setup the data and register. */
-#define _FIXTURE(fixture_name) \
-       static void __attribute__((constructor)) \
-       _register_##fixture_name##_data(void) \
-       { \
-               __fixture_count++; \
-       } \
-       _FIXTURE_DATA(fixture_name)
-
-/* Prepares the setup function for the fixture.  |_metadata| is included
- * so that ASSERT_* work as a convenience.
- */
-#define _FIXTURE_SETUP(fixture_name) \
-       void fixture_name##_setup( \
-               struct __test_metadata __attribute__((unused)) *_metadata, \
-               _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
-#define _FIXTURE_TEARDOWN(fixture_name) \
-       void fixture_name##_teardown( \
-               struct __test_metadata __attribute__((unused)) *_metadata, \
-               _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
-
-/* Emits test registration and helpers for fixture-based test
- * cases.
- * TODO(wad) register fixtures on dedicated test lists.
- */
-#define _TEST_F(fixture_name, test_name) \
-       __TEST_F_IMPL(fixture_name, test_name, -1)
-
-#define _TEST_F_SIGNAL(fixture_name, test_name, signal) \
-       __TEST_F_IMPL(fixture_name, test_name, signal)
-
-#define __TEST_F_IMPL(fixture_name, test_name, signal) \
-       static void fixture_name##_##test_name( \
-               struct __test_metadata *_metadata, \
-               _FIXTURE_DATA(fixture_name) *self); \
-       static inline void wrapper_##fixture_name##_##test_name( \
-               struct __test_metadata *_metadata) \
-       { \
-               /* fixture data is alloced, setup, and torn down per call. */ \
-               _FIXTURE_DATA(fixture_name) self; \
-               memset(&self, 0, sizeof(_FIXTURE_DATA(fixture_name))); \
-               fixture_name##_setup(_metadata, &self); \
-               /* Let setup failure terminate early. */ \
-               if (!_metadata->passed) \
-                       return; \
-               fixture_name##_##test_name(_metadata, &self); \
-               fixture_name##_teardown(_metadata, &self); \
-       } \
-       static struct __test_metadata \
-                     _##fixture_name##_##test_name##_object = { \
-               name: #fixture_name "." #test_name, \
-               fn: &wrapper_##fixture_name##_##test_name, \
-               termsig: signal, \
-        }; \
-       static void __attribute__((constructor)) \
-                       _register_##fixture_name##_##test_name(void) \
-       { \
-               __register_test(&_##fixture_name##_##test_name##_object); \
-       } \
-       static void fixture_name##_##test_name( \
-               struct __test_metadata __attribute__((unused)) *_metadata, \
-               _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
-
-/* Exports a simple wrapper to run the test harness. */
-#define _TEST_HARNESS_MAIN \
-       static void __attribute__((constructor)) \
-       __constructor_order_last(void) \
-       { \
-               if (!__constructor_order) \
-                       __constructor_order = _CONSTRUCTOR_ORDER_BACKWARD; \
-       } \
-       int main(int argc, char **argv) { \
-               return test_harness_run(argc, argv); \
-       }
-
-#define _ASSERT_EQ(_expected, _seen) \
-       __EXPECT(_expected, _seen, ==, 1)
-#define _ASSERT_NE(_expected, _seen) \
-       __EXPECT(_expected, _seen, !=, 1)
-#define _ASSERT_LT(_expected, _seen) \
-       __EXPECT(_expected, _seen, <, 1)
-#define _ASSERT_LE(_expected, _seen) \
-       __EXPECT(_expected, _seen, <=, 1)
-#define _ASSERT_GT(_expected, _seen) \
-       __EXPECT(_expected, _seen, >, 1)
-#define _ASSERT_GE(_expected, _seen) \
-       __EXPECT(_expected, _seen, >=, 1)
-#define _ASSERT_NULL(_seen) \
-       __EXPECT(NULL, _seen, ==, 1)
-
-#define _ASSERT_TRUE(_seen) \
-       _ASSERT_NE(0, _seen)
-#define _ASSERT_FALSE(_seen) \
-       _ASSERT_EQ(0, _seen)
-#define _ASSERT_STREQ(_expected, _seen) \
-       __EXPECT_STR(_expected, _seen, ==, 1)
-#define _ASSERT_STRNE(_expected, _seen) \
-       __EXPECT_STR(_expected, _seen, !=, 1)
-
-#define _EXPECT_EQ(_expected, _seen) \
-       __EXPECT(_expected, _seen, ==, 0)
-#define _EXPECT_NE(_expected, _seen) \
-       __EXPECT(_expected, _seen, !=, 0)
-#define _EXPECT_LT(_expected, _seen) \
-       __EXPECT(_expected, _seen, <, 0)
-#define _EXPECT_LE(_expected, _seen) \
-       __EXPECT(_expected, _seen, <=, 0)
-#define _EXPECT_GT(_expected, _seen) \
-       __EXPECT(_expected, _seen, >, 0)
-#define _EXPECT_GE(_expected, _seen) \
-       __EXPECT(_expected, _seen, >=, 0)
-
-#define _EXPECT_NULL(_seen) \
-       __EXPECT(NULL, _seen, ==, 0)
-#define _EXPECT_TRUE(_seen) \
-       _EXPECT_NE(0, _seen)
-#define _EXPECT_FALSE(_seen) \
-       _EXPECT_EQ(0, _seen)
-
-#define _EXPECT_STREQ(_expected, _seen) \
-       __EXPECT_STR(_expected, _seen, ==, 0)
-#define _EXPECT_STRNE(_expected, _seen) \
-       __EXPECT_STR(_expected, _seen, !=, 0)
-
-#define ARRAY_SIZE(a)  (sizeof(a) / sizeof(a[0]))
-
-/* Support an optional handler after and ASSERT_* or EXPECT_*.  The approach is
- * not thread-safe, but it should be fine in most sane test scenarios.
- *
- * Using __bail(), which optionally abort()s, is the easiest way to early
- * return while still providing an optional block to the API consumer.
- */
-#define OPTIONAL_HANDLER(_assert) \
-       for (; _metadata->trigger;  _metadata->trigger = __bail(_assert))
-
-#define __EXPECT(_expected, _seen, _t, _assert) do { \
-       /* Avoid multiple evaluation of the cases */ \
-       __typeof__(_expected) __exp = (_expected); \
-       __typeof__(_seen) __seen = (_seen); \
-       if (!(__exp _t __seen)) { \
-               unsigned long long __exp_print = (uintptr_t)__exp; \
-               unsigned long long __seen_print = (uintptr_t)__seen; \
-               __TH_LOG("Expected %s (%llu) %s %s (%llu)", \
-                        #_expected, __exp_print, #_t, \
-                        #_seen, __seen_print); \
-               _metadata->passed = 0; \
-               /* Ensure the optional handler is triggered */ \
-               _metadata->trigger = 1; \
-       } \
-} while (0); OPTIONAL_HANDLER(_assert)
-
-#define __EXPECT_STR(_expected, _seen, _t, _assert) do { \
-       const char *__exp = (_expected); \
-       const char *__seen = (_seen); \
-       if (!(strcmp(__exp, __seen) _t 0))  { \
-               __TH_LOG("Expected '%s' %s '%s'.", __exp, #_t, __seen); \
-               _metadata->passed = 0; \
-               _metadata->trigger = 1; \
-       } \
-} while (0); OPTIONAL_HANDLER(_assert)
-
-/* Contains all the information for test execution and status checking. */
-struct __test_metadata {
-       const char *name;
-       void (*fn)(struct __test_metadata *);
-       int termsig;
-       int passed;
-       int trigger; /* extra handler after the evaluation */
-       struct __test_metadata *prev, *next;
-};
-
-/* Storage for the (global) tests to be run. */
-static struct __test_metadata *__test_list;
-static unsigned int __test_count;
-static unsigned int __fixture_count;
-static int __constructor_order;
-
-#define _CONSTRUCTOR_ORDER_FORWARD   1
-#define _CONSTRUCTOR_ORDER_BACKWARD -1
-
-/*
- * Since constructors are called in reverse order, reverse the test
- * list so tests are run in source declaration order.
- * https://gcc.gnu.org/onlinedocs/gccint/Initialization.html
- * However, it seems not all toolchains do this correctly, so use
- * __constructor_order to detect which direction is called first
- * and adjust list building logic to get things running in the right
- * direction.
- */
-static inline void __register_test(struct __test_metadata *t)
-{
-       __test_count++;
-       /* Circular linked list where only prev is circular. */
-       if (__test_list == NULL) {
-               __test_list = t;
-               t->next = NULL;
-               t->prev = t;
-               return;
-       }
-       if (__constructor_order == _CONSTRUCTOR_ORDER_FORWARD) {
-               t->next = NULL;
-               t->prev = __test_list->prev;
-               t->prev->next = t;
-               __test_list->prev = t;
-       } else {
-               t->next = __test_list;
-               t->next->prev = t;
-               t->prev = t;
-               __test_list = t;
-       }
-}
-
-static inline int __bail(int for_realz)
-{
-       if (for_realz)
-               abort();
-       return 0;
-}
-
-void __run_test(struct __test_metadata *t)
-{
-       pid_t child_pid;
-       int status;
-
-       t->passed = 1;
-       t->trigger = 0;
-       printf("[ RUN      ] %s\n", t->name);
-       child_pid = fork();
-       if (child_pid < 0) {
-               printf("ERROR SPAWNING TEST CHILD\n");
-               t->passed = 0;
-       } else if (child_pid == 0) {
-               t->fn(t);
-               _exit(t->passed);
-       } else {
-               /* TODO(wad) add timeout support. */
-               waitpid(child_pid, &status, 0);
-               if (WIFEXITED(status)) {
-                       t->passed = t->termsig == -1 ? WEXITSTATUS(status) : 0;
-                       if (t->termsig != -1) {
-                               fprintf(TH_LOG_STREAM,
-                                       "%s: Test exited normally "
-                                       "instead of by signal (code: %d)\n",
-                                       t->name,
-                                       WEXITSTATUS(status));
-                       }
-               } else if (WIFSIGNALED(status)) {
-                       t->passed = 0;
-                       if (WTERMSIG(status) == SIGABRT) {
-                               fprintf(TH_LOG_STREAM,
-                                       "%s: Test terminated by assertion\n",
-                                       t->name);
-                       } else if (WTERMSIG(status) == t->termsig) {
-                               t->passed = 1;
-                       } else {
-                               fprintf(TH_LOG_STREAM,
-                                       "%s: Test terminated unexpectedly "
-                                       "by signal %d\n",
-                                       t->name,
-                                       WTERMSIG(status));
-                       }
-               } else {
-                       fprintf(TH_LOG_STREAM,
-                               "%s: Test ended in some other way [%u]\n",
-                               t->name,
-                               status);
-               }
-       }
-       printf("[     %4s ] %s\n", (t->passed ? "OK" : "FAIL"), t->name);
-}
-
-static int test_harness_run(int __attribute__((unused)) argc,
-                           char __attribute__((unused)) **argv)
-{
-       struct __test_metadata *t;
-       int ret = 0;
-       unsigned int count = 0;
-       unsigned int pass_count = 0;
-
-       /* TODO(wad) add optional arguments similar to gtest. */
-       printf("[==========] Running %u tests from %u test cases.\n",
-              __test_count, __fixture_count + 1);
-       for (t = __test_list; t; t = t->next) {
-               count++;
-               __run_test(t);
-               if (t->passed)
-                       pass_count++;
-               else
-                       ret = 1;
-       }
-       printf("[==========] %u / %u tests passed.\n", pass_count, count);
-       printf("[  %s  ]\n", (ret ? "FAILED" : "PASSED"));
-       return ret;
-}
-
-static void __attribute__((constructor)) __constructor_order_first(void)
-{
-       if (!__constructor_order)
-               __constructor_order = _CONSTRUCTOR_ORDER_FORWARD;
-}
-
-#endif  /* TEST_HARNESS_H_ */
index 2d1af7cca4631ff5c582cfe7c2f78a416a1d6c06..d4b59ab979a0939f71684c4fcbfa09d8be7c907e 100644 (file)
@@ -75,26 +75,31 @@ void _start(void)
        int ccode;
        struct sysinfo info;
        unsigned long used;
+       static const char *test_name = " get runtime memory use\n";
 
-       print("Testing system size.\n");
-       print("1..1\n");
+       print("TAP version 13\n");
+       print("# Testing system size.\n");
 
        ccode = sysinfo(&info);
        if (ccode < 0) {
-               print("not ok 1 get runtime memory use\n");
-               print("# could not get sysinfo\n");
+               print("not ok 1");
+               print(test_name);
+               print(" ---\n reason: \"could not get sysinfo\"\n ...\n");
                _exit(ccode);
        }
+       print("ok 1");
+       print(test_name);
+
        /* ignore cache complexities for now */
        used = info.totalram - info.freeram - info.bufferram;
-       print_k_value("ok 1 get runtime memory use # size = ", used,
-               info.mem_unit);
-
        print("# System runtime memory report (units in Kilobytes):\n");
-       print_k_value("#   Total:  ", info.totalram, info.mem_unit);
-       print_k_value("#   Free:   ", info.freeram, info.mem_unit);
-       print_k_value("#   Buffer: ", info.bufferram, info.mem_unit);
-       print_k_value("#   In use: ", used, info.mem_unit);
+       print(" ---\n");
+       print_k_value(" Total:  ", info.totalram, info.mem_unit);
+       print_k_value(" Free:   ", info.freeram, info.mem_unit);
+       print_k_value(" Buffer: ", info.bufferram, info.mem_unit);
+       print_k_value(" In use: ", used, info.mem_unit);
+       print(" ...\n");
+       print("1..1\n");
 
        _exit(0);
 }
index 9ea08d9f0b139d7a14c50f3bca450cdc17429e44..62fa666e501a4548a7a1b0a3e2ea87e35b94eaeb 100644 (file)
@@ -29,6 +29,7 @@
 #include <unistd.h>
 #include <stdlib.h>
 #include <sys/types.h>
+#include <sys/stat.h>
 #include <sys/wait.h>
 
 #include "synctest.h"
@@ -52,10 +53,22 @@ static int run_test(int (*test)(void), char *name)
        exit(test());
 }
 
+static int sync_api_supported(void)
+{
+       struct stat sbuf;
+
+       return 0 == stat("/sys/kernel/debug/sync/sw_sync", &sbuf);
+}
+
 int main(void)
 {
        int err = 0;
 
+       if (!sync_api_supported()) {
+               printf("SKIP: Sync framework not supported by kernel\n");
+               return 0;
+       }
+
        printf("[RUN]\tTesting sync framework\n");
 
        err += RUN_TEST(test_alloc_timeline);
index 17d534b1b7b4abfc0b4c3564875d9c0e114831ed..b6862322962ff3b1463ab0f23726cfd74ba45bf1 100644 (file)
@@ -24,6 +24,14 @@ verify()
        return 0
 }
 
+exit_test()
+{
+       if [ ! -z ${old_strict} ]; then
+               echo ${old_strict} > ${WRITES_STRICT}
+       fi
+       exit $rc
+}
+
 trap 'set_orig; rm -f "${TEST_FILE}"' EXIT
 
 rc=0
@@ -63,6 +71,20 @@ else
        echo "ok"
 fi
 
+echo -n "Checking write strict setting ... "
+WRITES_STRICT="${SYSCTL}/kernel/sysctl_writes_strict"
+if [ ! -e ${WRITES_STRICT} ]; then
+       echo "FAIL, but skip in case of old kernel" >&2
+else
+       old_strict=$(cat ${WRITES_STRICT})
+       if [ "$old_strict" = "1" ]; then
+               echo "ok"
+       else
+               echo "FAIL, strict value is 0 but force to 1 to continue" >&2
+               echo "1" > ${WRITES_STRICT}
+       fi
+fi
+
 # Now that we've validated the sanity of "set_test" and "set_orig",
 # we can use those functions to set starting states before running
 # specific behavioral tests.
index 8510f93f2d149ebb7ec199eab7be8d09bddc3bbf..e6e76c93d94873ec39c49519da05cb11bec5189d 100755 (executable)
@@ -7,4 +7,4 @@ TEST_STR=$(( $ORIG + 1 ))
 
 . ./common_tests
 
-exit $rc
+exit_test
index 90a9293d520cbb624c80a3e3b36e7dc3c1cfac38..857ec667fb02c9138d6ac3f157529a8f2b0d4662 100755 (executable)
@@ -74,4 +74,4 @@ else
        echo "ok"
 fi
 
-exit $rc
+exit_test
index 3b02aa6eb9da7f91d1b8b02bb5f547b5d33bdbcb..1830d66a6f0e835c545404b79e56b8c3de72d9c9 100644 (file)
@@ -10,7 +10,6 @@
 #include <string.h>
 #include <unistd.h>
 #include <errno.h>
-#include <numaif.h>
 #include <sys/mman.h>
 #include <sys/time.h>
 
  * different areas one below 128TB and one above 128TB
  * till it reaches 512TB. One with size 128TB and the
  * other being 384TB.
+ *
+ * On Arm64 the address space is 256TB and no high mappings
+ * are supported so far.
  */
+
 #define NR_CHUNKS_128TB   8192UL /* Number of 16GB chunks for 128TB */
-#define NR_CHUNKS_384TB  24576UL /* Number of 16GB chunks for 384TB */
+#define NR_CHUNKS_256TB   (NR_CHUNKS_128TB * 2UL)
+#define NR_CHUNKS_384TB   (NR_CHUNKS_128TB * 3UL)
 
 #define ADDR_MARK_128TB  (1UL << 47) /* First address beyond 128TB */
+#define ADDR_MARK_256TB  (1UL << 48) /* First address beyond 256TB */
+
+#ifdef __aarch64__
+#define HIGH_ADDR_MARK  ADDR_MARK_256TB
+#define HIGH_ADDR_SHIFT 49
+#define NR_CHUNKS_LOW   NR_CHUNKS_256TB
+#define NR_CHUNKS_HIGH  0
+#else
+#define HIGH_ADDR_MARK  ADDR_MARK_128TB
+#define HIGH_ADDR_SHIFT 48
+#define NR_CHUNKS_LOW   NR_CHUNKS_128TB
+#define NR_CHUNKS_HIGH  NR_CHUNKS_384TB
+#endif
 
 static char *hind_addr(void)
 {
-       int bits = 48 + rand() % 15;
+       int bits = HIGH_ADDR_SHIFT + rand() % (63 - HIGH_ADDR_SHIFT);
 
        return (char *) (1UL << bits);
 }
@@ -50,14 +67,14 @@ static int validate_addr(char *ptr, int high_addr)
        unsigned long addr = (unsigned long) ptr;
 
        if (high_addr) {
-               if (addr < ADDR_MARK_128TB) {
+               if (addr < HIGH_ADDR_MARK) {
                        printf("Bad address %lx\n", addr);
                        return 1;
                }
                return 0;
        }
 
-       if (addr > ADDR_MARK_128TB) {
+       if (addr > HIGH_ADDR_MARK) {
                printf("Bad address %lx\n", addr);
                return 1;
        }
@@ -79,12 +96,12 @@ static int validate_lower_address_hint(void)
 
 int main(int argc, char *argv[])
 {
-       char *ptr[NR_CHUNKS_128TB];
-       char *hptr[NR_CHUNKS_384TB];
+       char *ptr[NR_CHUNKS_LOW];
+       char *hptr[NR_CHUNKS_HIGH];
        char *hint;
        unsigned long i, lchunks, hchunks;
 
-       for (i = 0; i < NR_CHUNKS_128TB; i++) {
+       for (i = 0; i < NR_CHUNKS_LOW; i++) {
                ptr[i] = mmap(NULL, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
                                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 
@@ -99,7 +116,7 @@ int main(int argc, char *argv[])
        }
        lchunks = i;
 
-       for (i = 0; i < NR_CHUNKS_384TB; i++) {
+       for (i = 0; i < NR_CHUNKS_HIGH; i++) {
                hint = hind_addr();
                hptr[i] = mmap(hint, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
                                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);