From 6bb24f4dc7b7267699e3c3a4ca1ca062fe564b9e Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Wed, 1 Jul 2015 15:23:09 -0700 Subject: [PATCH] Add the ZFS Test Suite Add the ZFS Test Suite and test-runner framework from illumos. This is a continuation of the work done by Turbo Fredriksson to port the ZFS Test Suite to Linux. While this work was originally conceived as a stand alone project integrating it directly with the ZoL source tree has several advantages: * Allows the ZFS Test Suite to be packaged in zfs-test package. * Facilitates easy integration with the CI testing. * Users can locally run the ZFS Test Suite to validate ZFS. This testing should ONLY be done on a dedicated test system because the ZFS Test Suite in its current form is destructive. * Allows the ZFS Test Suite to be run directly in the ZoL source tree enabled developers to iterate quickly during development. * Developers can easily add/modify tests in the framework as features are added or functionality is changed. The tests will then always be in sync with the implementation. Full documentation for how to run the ZFS Test Suite is available in the tests/README.md file. Warning: This test suite is designed to be run on a dedicated test system. It will make modifications to the system including, but not limited to, the following. * Adding new users * Adding new groups * Modifying the following /proc files: * /proc/sys/kernel/core_pattern * /proc/sys/kernel/core_uses_pid * Creating directories under / Notes: * Not all of the test cases are expected to pass and by default these test cases are disabled. The failures are primarily due to assumption made for illumos which are invalid under Linux. * When updating these test cases it should be done in as generic a way as possible so the patch can be submitted back upstream. Most existing library functions have been updated to be Linux aware, and the following functions and variables have been added. * Functions: * is_linux - Used to wrap a Linux specific section. * block_device_wait - Waits for block devices to be added to /dev/. * Variables: Linux Illumos * ZVOL_DEVDIR "/dev/zvol" "/dev/zvol/dsk" * ZVOL_RDEVDIR "/dev/zvol" "/dev/zvol/rdsk" * DEV_DSKDIR "/dev" "/dev/dsk" * DEV_RDSKDIR "/dev" "/dev/rdsk" * NEWFS_DEFAULT_FS "ext2" "ufs" * Many of the disabled test cases fail because 'zfs/zpool destroy' returns EBUSY. This is largely causes by the asynchronous nature of device handling on Linux and is expected, the impacted test cases will need to be updated to handle this. * There are several test cases which have been disabled because they can trigger a deadlock. A primary example of this is to recursively create zpools within zpools. These tests have been disabled until the root issue can be addressed. * Illumos specific utilities such as (mkfile) should be added to the tests/zfs-tests/cmd/ directory. Custom programs required by the test scripts can also be added here. * SELinux should be either is permissive mode or disabled when running the tests. The test cases should be updated to conform to a standard policy. * Redundant test functionality has been removed (zfault.sh). * Existing test scripts (zconfig.sh) should be migrated to use the framework for consistency and ease of testing. * The DISKS environment variable currently only supports loopback devices because of how the ZFS Test Suite expects partitions to be named (p1, p2, etc). Support must be added to generate the correct partition name based on the device location and name. * The ZFS Test Suite is part of the illumos code base at: https://github.com/illumos/illumos-gate/tree/master/usr/src/test Original-patch-by: Turbo Fredriksson Signed-off-by: Brian Behlendorf Signed-off-by: Olaf Faaland Closes #6 Closes #1534 --- Makefile.am | 2 +- TEST | 6 + config/user-commands.m4 | 171 ++ config/user-libattr.m4 | 12 + config/user.m4 | 19 +- config/zfs-build.m4 | 4 +- configure.ac | 141 +- include/libzfs.h | 1 + lib/libzfs/libzfs_util.c | 3 +- rpm/generic/zfs.spec.in | 1 + scripts/Makefile.am | 42 +- scripts/common.sh.in | 11 +- scripts/zfault.sh | 955 ------ scripts/zfs-tests.sh | 343 +++ tests/Makefile.am | 1 + tests/README.md | 133 + tests/runfiles/Makefile.am | 2 + tests/runfiles/linux.run | 647 ++++ tests/test-runner/Makefile.am | 1 + tests/test-runner/cmd/Makefile.am | 3 + tests/test-runner/cmd/test-runner.py | 862 ++++++ tests/test-runner/include/Makefile.am | 4 + tests/test-runner/include/logapi.shlib | 385 +++ tests/test-runner/include/stf.shlib | 57 + tests/test-runner/man/Makefile.am | 4 + tests/test-runner/man/test-runner.1 | 370 +++ tests/zfs-tests/Makefile.am | 1 + tests/zfs-tests/cmd/Makefile.am | 22 + tests/zfs-tests/cmd/chg_usr_exec/.gitignore | 1 + tests/zfs-tests/cmd/chg_usr_exec/Makefile.am | 6 + .../zfs-tests/cmd/chg_usr_exec/chg_usr_exec.c | 77 + tests/zfs-tests/cmd/devname2devid/.gitignore | 1 + tests/zfs-tests/cmd/devname2devid/Makefile.am | 7 + .../cmd/devname2devid/devname2devid.c | 120 + tests/zfs-tests/cmd/dir_rd_update/.gitignore | 1 + tests/zfs-tests/cmd/dir_rd_update/Makefile.am | 6 + .../cmd/dir_rd_update/dir_rd_update.c | 117 + tests/zfs-tests/cmd/file_check/.gitignore | 1 + tests/zfs-tests/cmd/file_check/Makefile.am | 6 + tests/zfs-tests/cmd/file_check/file_check.c | 86 + tests/zfs-tests/cmd/file_common.h | 62 + tests/zfs-tests/cmd/file_trunc/.gitignore | 1 + tests/zfs-tests/cmd/file_trunc/Makefile.am | 6 + tests/zfs-tests/cmd/file_trunc/file_trunc.c | 244 ++ tests/zfs-tests/cmd/file_write/.gitignore | 1 + tests/zfs-tests/cmd/file_write/Makefile.am | 6 + tests/zfs-tests/cmd/file_write/file_write.c | 229 ++ tests/zfs-tests/cmd/largest_file/.gitignore | 1 + tests/zfs-tests/cmd/largest_file/Makefile.am | 6 + .../zfs-tests/cmd/largest_file/largest_file.c | 140 + tests/zfs-tests/cmd/mkbusy/.gitignore | 1 + tests/zfs-tests/cmd/mkbusy/Makefile.am | 6 + tests/zfs-tests/cmd/mkbusy/mkbusy.c | 183 ++ tests/zfs-tests/cmd/mkfile/.gitignore | 1 + tests/zfs-tests/cmd/mkfile/Makefile.am | 6 + tests/zfs-tests/cmd/mkfile/mkfile.c | 275 ++ tests/zfs-tests/cmd/mkfiles/.gitignore | 1 + tests/zfs-tests/cmd/mkfiles/Makefile.am | 6 + tests/zfs-tests/cmd/mkfiles/mkfiles.c | 65 + tests/zfs-tests/cmd/mktree/.gitignore | 1 + tests/zfs-tests/cmd/mktree/Makefile.am | 6 + tests/zfs-tests/cmd/mktree/mktree.c | 183 ++ tests/zfs-tests/cmd/mmap_exec/.gitignore | 1 + tests/zfs-tests/cmd/mmap_exec/Makefile.am | 6 + tests/zfs-tests/cmd/mmap_exec/mmap_exec.c | 69 + tests/zfs-tests/cmd/mmapwrite/.gitignore | 1 + tests/zfs-tests/cmd/mmapwrite/Makefile.am | 7 + tests/zfs-tests/cmd/mmapwrite/mmapwrite.c | 97 + tests/zfs-tests/cmd/randfree_file/.gitignore | 1 + tests/zfs-tests/cmd/randfree_file/Makefile.am | 6 + .../cmd/randfree_file/randfree_file.c | 105 + tests/zfs-tests/cmd/readmmap/.gitignore | 1 + tests/zfs-tests/cmd/readmmap/Makefile.am | 6 + tests/zfs-tests/cmd/readmmap/readmmap.c | 138 + tests/zfs-tests/cmd/rename_dir/.gitignore | 1 + tests/zfs-tests/cmd/rename_dir/Makefile.am | 6 + tests/zfs-tests/cmd/rename_dir/rename_dir.c | 88 + .../cmd/rm_lnkcnt_zero_file/.gitignore | 1 + .../cmd/rm_lnkcnt_zero_file/Makefile.am | 7 + .../rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c | 155 + tests/zfs-tests/cmd/threadsappend/.gitignore | 1 + tests/zfs-tests/cmd/threadsappend/Makefile.am | 7 + .../cmd/threadsappend/threadsappend.c | 135 + tests/zfs-tests/cmd/xattrtest/.gitignore | 1 + tests/zfs-tests/cmd/xattrtest/Makefile.am | 6 + tests/zfs-tests/cmd/xattrtest/xattrtest.c | 641 ++++ tests/zfs-tests/include/.gitignore | 2 + tests/zfs-tests/include/Makefile.am | 10 + tests/zfs-tests/include/commands.cfg.in | 119 + tests/zfs-tests/include/default.cfg.in | 200 ++ tests/zfs-tests/include/libtest.shlib | 2648 +++++++++++++++++ tests/zfs-tests/include/math.shlib | 43 + tests/zfs-tests/include/properties.shlib | 63 + tests/zfs-tests/include/zfs_commands.cfg | 1 + tests/zfs-tests/tests/Makefile.am | 1 + tests/zfs-tests/tests/functional/Makefile.am | 55 + .../tests/functional/acl/Makefile.am | 6 + tests/zfs-tests/tests/functional/acl/acl.cfg | 64 + .../tests/functional/acl/acl_common.kshlib | 626 ++++ .../tests/functional/acl/posix/Makefile.am | 6 + .../tests/functional/acl/posix/cleanup.ksh | 33 + .../functional/acl/posix/posix_001_pos.ksh | 68 + .../functional/acl/posix/posix_002_pos.ksh | 62 + .../tests/functional/acl/posix/setup.ksh | 48 + .../tests/functional/atime/Makefile.am | 8 + .../tests/functional/atime/atime.cfg | 30 + .../tests/functional/atime/atime_001_pos.ksh | 66 + .../tests/functional/atime/atime_002_neg.ksh | 67 + .../functional/atime/atime_common.kshlib | 78 + .../tests/functional/atime/cleanup.ksh | 30 + .../tests/functional/atime/setup.ksh | 31 + .../tests/functional/bootfs/Makefile.am | 12 + .../functional/bootfs/bootfs_001_pos.ksh | 78 + .../functional/bootfs/bootfs_002_neg.ksh | 78 + .../functional/bootfs/bootfs_003_pos.ksh | 82 + .../functional/bootfs/bootfs_004_neg.ksh | 91 + .../functional/bootfs/bootfs_005_neg.ksh | 79 + .../functional/bootfs/bootfs_006_pos.ksh | 142 + .../functional/bootfs/bootfs_007_neg.ksh | 67 + .../functional/bootfs/bootfs_007_pos.ksh | 65 + .../functional/bootfs/bootfs_008_neg.ksh | 78 + .../tests/functional/bootfs/cleanup.ksh | 34 + .../tests/functional/bootfs/setup.ksh | 34 + .../tests/functional/cache/Makefile.am | 17 + .../tests/functional/cache/cache.cfg | 70 + .../tests/functional/cache/cache.kshlib | 160 + .../tests/functional/cache/cache_001_pos.ksh | 65 + .../tests/functional/cache/cache_002_pos.ksh | 65 + .../tests/functional/cache/cache_003_pos.ksh | 69 + .../tests/functional/cache/cache_004_neg.ksh | 64 + .../tests/functional/cache/cache_005_neg.ksh | 65 + .../tests/functional/cache/cache_006_pos.ksh | 85 + .../tests/functional/cache/cache_007_neg.ksh | 63 + .../tests/functional/cache/cache_008_neg.ksh | 67 + .../tests/functional/cache/cache_009_pos.ksh | 69 + .../tests/functional/cache/cache_010_neg.ksh | 90 + .../tests/functional/cache/cache_011_pos.ksh | 68 + .../tests/functional/cache/cleanup.ksh | 46 + .../tests/functional/cache/setup.ksh | 45 + .../tests/functional/cachefile/Makefile.am | 8 + .../tests/functional/cachefile/cachefile.cfg | 33 + .../functional/cachefile/cachefile.kshlib | 47 + .../cachefile/cachefile_001_pos.ksh | 94 + .../cachefile/cachefile_002_pos.ksh | 82 + .../cachefile/cachefile_003_pos.ksh | 100 + .../cachefile/cachefile_004_pos.ksh | 124 + .../tests/functional/casenorm/Makefile.am | 22 + .../functional/casenorm/case_all_values.ksh | 42 + .../tests/functional/casenorm/casenorm.cfg | 27 + .../tests/functional/casenorm/casenorm.kshlib | 119 + .../tests/functional/casenorm/cleanup.ksh | 20 + .../casenorm/insensitive_formd_delete.ksh | 53 + .../casenorm/insensitive_formd_lookup.ksh | 51 + .../casenorm/insensitive_none_delete.ksh | 71 + .../casenorm/insensitive_none_lookup.ksh | 66 + .../casenorm/mixed_formd_delete.ksh | 59 + .../casenorm/mixed_formd_lookup.ksh | 56 + .../casenorm/mixed_formd_lookup_ci.ksh | 51 + .../functional/casenorm/mixed_none_delete.ksh | 56 + .../functional/casenorm/mixed_none_lookup.ksh | 53 + .../casenorm/mixed_none_lookup_ci.ksh | 66 + .../functional/casenorm/norm_all_values.ksh | 57 + .../casenorm/sensitive_formd_delete.ksh | 59 + .../casenorm/sensitive_formd_lookup.ksh | 56 + .../casenorm/sensitive_none_delete.ksh | 56 + .../casenorm/sensitive_none_lookup.ksh | 53 + .../tests/functional/casenorm/setup.ksh | 23 + .../tests/functional/clean_mirror/Makefile.am | 10 + .../clean_mirror/clean_mirror_001_pos.ksh | 53 + .../clean_mirror/clean_mirror_002_pos.ksh | 53 + .../clean_mirror/clean_mirror_003_pos.ksh | 53 + .../clean_mirror/clean_mirror_004_pos.ksh | 53 + .../clean_mirror/clean_mirror_common.kshlib | 78 + .../tests/functional/clean_mirror/cleanup.ksh | 49 + .../tests/functional/clean_mirror/default.cfg | 72 + .../tests/functional/clean_mirror/setup.ksh | 52 + .../tests/functional/cli_root/Makefile.am | 47 + .../functional/cli_root/cli_common.kshlib | 86 + .../tests/functional/cli_root/zdb/Makefile.am | 3 + .../functional/cli_root/zdb/zdb_001_neg.ksh | 73 + .../tests/functional/cli_root/zfs/Makefile.am | 7 + .../tests/functional/cli_root/zfs/cleanup.ksh | 30 + .../tests/functional/cli_root/zfs/setup.ksh | 32 + .../functional/cli_root/zfs/zfs_001_neg.ksh | 72 + .../functional/cli_root/zfs/zfs_002_pos.ksh | 113 + .../functional/cli_root/zfs/zfs_003_neg.ksh | 58 + .../functional/cli_root/zfs_clone/Makefile.am | 14 + .../functional/cli_root/zfs_clone/cleanup.ksh | 30 + .../functional/cli_root/zfs_clone/setup.ksh | 36 + .../cli_root/zfs_clone/zfs_clone_001_neg.ksh | 126 + .../cli_root/zfs_clone/zfs_clone_002_pos.ksh | 89 + .../cli_root/zfs_clone/zfs_clone_003_pos.ksh | 76 + .../cli_root/zfs_clone/zfs_clone_004_pos.ksh | 87 + .../cli_root/zfs_clone/zfs_clone_005_pos.ksh | 75 + .../cli_root/zfs_clone/zfs_clone_006_pos.ksh | 86 + .../cli_root/zfs_clone/zfs_clone_007_pos.ksh | 85 + .../cli_root/zfs_clone/zfs_clone_008_neg.ksh | 80 + .../cli_root/zfs_clone/zfs_clone_009_neg.ksh | 80 + .../cli_root/zfs_clone/zfs_clone_010_pos.ksh | 233 ++ .../cli_root/zfs_copies/Makefile.am | 12 + .../cli_root/zfs_copies/cleanup.ksh | 44 + .../functional/cli_root/zfs_copies/setup.ksh | 31 + .../cli_root/zfs_copies/zfs_copies.cfg | 37 + .../cli_root/zfs_copies/zfs_copies.kshlib | 151 + .../zfs_copies/zfs_copies_001_pos.ksh | 108 + .../zfs_copies/zfs_copies_002_pos.ksh | 99 + .../zfs_copies/zfs_copies_003_pos.ksh | 64 + .../zfs_copies/zfs_copies_004_neg.ksh | 53 + .../zfs_copies/zfs_copies_005_neg.ksh | 71 + .../zfs_copies/zfs_copies_006_pos.ksh | 73 + .../cli_root/zfs_create/Makefile.am | 20 + .../cli_root/zfs_create/cleanup.ksh | 30 + .../cli_root/zfs_create/properties.kshlib | 69 + .../functional/cli_root/zfs_create/setup.ksh | 32 + .../cli_root/zfs_create/zfs_create.cfg | 53 + .../zfs_create/zfs_create_001_pos.ksh | 66 + .../zfs_create/zfs_create_002_pos.ksh | 78 + .../zfs_create/zfs_create_003_pos.ksh | 69 + .../zfs_create/zfs_create_004_pos.ksh | 69 + .../zfs_create/zfs_create_005_pos.ksh | 81 + .../zfs_create/zfs_create_006_pos.ksh | 84 + .../zfs_create/zfs_create_007_pos.ksh | 93 + .../zfs_create/zfs_create_008_neg.ksh | 101 + .../zfs_create/zfs_create_009_neg.ksh | 127 + .../zfs_create/zfs_create_010_neg.ksh | 149 + .../zfs_create/zfs_create_011_pos.ksh | 65 + .../zfs_create/zfs_create_012_pos.ksh | 66 + .../zfs_create/zfs_create_013_pos.ksh | 78 + .../zfs_create/zfs_create_common.kshlib | 51 + .../cli_root/zfs_destroy/Makefile.am | 22 + .../cli_root/zfs_destroy/cleanup.ksh | 32 + .../functional/cli_root/zfs_destroy/setup.ksh | 32 + .../cli_root/zfs_destroy/zfs_destroy.cfg | 39 + .../zfs_destroy/zfs_destroy_001_pos.ksh | 221 ++ .../zfs_destroy/zfs_destroy_002_pos.ksh | 96 + .../zfs_destroy/zfs_destroy_003_pos.ksh | 156 + .../zfs_destroy/zfs_destroy_004_pos.ksh | 120 + .../zfs_destroy/zfs_destroy_005_neg.ksh | 212 ++ .../zfs_destroy/zfs_destroy_006_neg.ksh | 67 + .../zfs_destroy/zfs_destroy_007_neg.ksh | 76 + .../zfs_destroy/zfs_destroy_008_pos.ksh | 51 + .../zfs_destroy/zfs_destroy_009_pos.ksh | 60 + .../zfs_destroy/zfs_destroy_010_pos.ksh | 65 + .../zfs_destroy/zfs_destroy_011_pos.ksh | 50 + .../zfs_destroy/zfs_destroy_012_pos.ksh | 62 + .../zfs_destroy/zfs_destroy_013_neg.ksh | 49 + .../zfs_destroy/zfs_destroy_014_pos.ksh | 79 + .../zfs_destroy/zfs_destroy_015_pos.ksh | 161 + .../zfs_destroy/zfs_destroy_016_pos.ksh | 186 ++ .../zfs_destroy/zfs_destroy_common.kshlib | 147 + .../functional/cli_root/zfs_get/Makefile.am | 16 + .../functional/cli_root/zfs_get/cleanup.ksh | 30 + .../functional/cli_root/zfs_get/setup.ksh | 32 + .../cli_root/zfs_get/zfs_get_001_pos.ksh | 131 + .../cli_root/zfs_get/zfs_get_002_pos.ksh | 87 + .../cli_root/zfs_get/zfs_get_003_pos.ksh | 61 + .../cli_root/zfs_get/zfs_get_004_pos.ksh | 227 ++ .../cli_root/zfs_get/zfs_get_005_neg.ksh | 112 + .../cli_root/zfs_get/zfs_get_006_neg.ksh | 60 + .../cli_root/zfs_get/zfs_get_007_neg.ksh | 60 + .../cli_root/zfs_get/zfs_get_008_pos.ksh | 91 + .../cli_root/zfs_get/zfs_get_009_pos.ksh | 83 + .../cli_root/zfs_get/zfs_get_010_neg.ksh | 55 + .../cli_root/zfs_get/zfs_get_common.kshlib | 97 + .../cli_root/zfs_get/zfs_get_list_d.kshlib | 79 + .../cli_root/zfs_inherit/Makefile.am | 7 + .../cli_root/zfs_inherit/cleanup.ksh | 30 + .../functional/cli_root/zfs_inherit/setup.ksh | 32 + .../zfs_inherit/zfs_inherit_001_neg.ksh | 78 + .../zfs_inherit/zfs_inherit_002_neg.ksh | 102 + .../zfs_inherit/zfs_inherit_003_pos.ksh | 86 + .../functional/cli_root/zfs_mount/Makefile.am | 18 + .../functional/cli_root/zfs_mount/cleanup.ksh | 30 + .../functional/cli_root/zfs_mount/setup.ksh | 32 + .../cli_root/zfs_mount/zfs_mount.cfg | 39 + .../cli_root/zfs_mount/zfs_mount.kshlib | 132 + .../cli_root/zfs_mount/zfs_mount_001_pos.ksh | 63 + .../cli_root/zfs_mount/zfs_mount_002_pos.ksh | 74 + .../cli_root/zfs_mount/zfs_mount_003_pos.ksh | 86 + .../cli_root/zfs_mount/zfs_mount_004_pos.ksh | 79 + .../cli_root/zfs_mount/zfs_mount_005_pos.ksh | 82 + .../cli_root/zfs_mount/zfs_mount_006_pos.ksh | 120 + .../cli_root/zfs_mount/zfs_mount_007_pos.ksh | 142 + .../cli_root/zfs_mount/zfs_mount_008_pos.ksh | 92 + .../cli_root/zfs_mount/zfs_mount_009_neg.ksh | 111 + .../cli_root/zfs_mount/zfs_mount_010_neg.ksh | 67 + .../cli_root/zfs_mount/zfs_mount_011_neg.ksh | 77 + .../zfs_mount/zfs_mount_all_001_pos.ksh | 197 ++ .../cli_root/zfs_promote/Makefile.am | 13 + .../cli_root/zfs_promote/cleanup.ksh | 30 + .../functional/cli_root/zfs_promote/setup.ksh | 32 + .../cli_root/zfs_promote/zfs_promote.cfg | 43 + .../zfs_promote/zfs_promote_001_pos.ksh | 127 + .../zfs_promote/zfs_promote_002_pos.ksh | 102 + .../zfs_promote/zfs_promote_003_pos.ksh | 134 + .../zfs_promote/zfs_promote_004_pos.ksh | 140 + .../zfs_promote/zfs_promote_005_pos.ksh | 69 + .../zfs_promote/zfs_promote_006_neg.ksh | 80 + .../zfs_promote/zfs_promote_007_neg.ksh | 78 + .../zfs_promote/zfs_promote_008_pos.ksh | 79 + .../cli_root/zfs_property/Makefile.am | 5 + .../cli_root/zfs_property/cleanup.ksh | 34 + .../cli_root/zfs_property/setup.ksh | 36 + .../zfs_written_property_001_pos.ksh | 236 ++ .../cli_root/zfs_receive/Makefile.am | 13 + .../cli_root/zfs_receive/cleanup.ksh | 33 + .../functional/cli_root/zfs_receive/setup.ksh | 35 + .../zfs_receive/zfs_receive_001_pos.ksh | 176 ++ .../zfs_receive/zfs_receive_002_pos.ksh | 104 + .../zfs_receive/zfs_receive_003_pos.ksh | 89 + .../zfs_receive/zfs_receive_004_neg.ksh | 115 + .../zfs_receive/zfs_receive_005_neg.ksh | 95 + .../zfs_receive/zfs_receive_006_pos.ksh | 104 + .../zfs_receive/zfs_receive_007_neg.ksh | 80 + .../zfs_receive/zfs_receive_008_pos.ksh | 144 + .../zfs_receive/zfs_receive_009_neg.ksh | 114 + .../zfs_receive/zfs_receive_010_pos.ksh | 177 ++ .../zfs_receive/zfs_receive_011_pos.ksh | 84 + .../zfs_receive/zfs_receive_012_pos.ksh | 83 + .../cli_root/zfs_rename/Makefile.am | 19 + .../cli_root/zfs_rename/cleanup.ksh | 39 + .../functional/cli_root/zfs_rename/setup.ksh | 49 + .../cli_root/zfs_rename/zfs_rename.cfg | 38 + .../cli_root/zfs_rename/zfs_rename.kshlib | 123 + .../zfs_rename/zfs_rename_001_pos.ksh | 104 + .../zfs_rename/zfs_rename_002_pos.ksh | 87 + .../zfs_rename/zfs_rename_003_pos.ksh | 64 + .../zfs_rename/zfs_rename_004_neg.ksh | 107 + .../zfs_rename/zfs_rename_005_neg.ksh | 87 + .../zfs_rename/zfs_rename_006_pos.ksh | 75 + .../zfs_rename/zfs_rename_007_pos.ksh | 151 + .../zfs_rename/zfs_rename_008_pos.ksh | 88 + .../zfs_rename/zfs_rename_009_neg.ksh | 101 + .../zfs_rename/zfs_rename_010_neg.ksh | 73 + .../zfs_rename/zfs_rename_011_pos.ksh | 73 + .../zfs_rename/zfs_rename_012_neg.ksh | 67 + .../zfs_rename/zfs_rename_013_pos.ksh | 85 + .../cli_root/zfs_reservation/Makefile.am | 6 + .../cli_root/zfs_reservation/cleanup.ksh | 30 + .../cli_root/zfs_reservation/setup.ksh | 32 + .../zfs_reservation_001_pos.ksh | 59 + .../zfs_reservation_002_pos.ksh | 83 + .../cli_root/zfs_rollback/Makefile.am | 10 + .../cli_root/zfs_rollback/cleanup.ksh | 32 + .../cli_root/zfs_rollback/setup.ksh | 31 + .../cli_root/zfs_rollback/zfs_rollback.cfg | 45 + .../zfs_rollback/zfs_rollback_001_pos.ksh | 163 + .../zfs_rollback/zfs_rollback_002_pos.ksh | 64 + .../zfs_rollback/zfs_rollback_003_neg.ksh | 80 + .../zfs_rollback/zfs_rollback_004_neg.ksh | 82 + .../zfs_rollback/zfs_rollback_common.kshlib | 308 ++ .../functional/cli_root/zfs_send/Makefile.am | 12 + .../functional/cli_root/zfs_send/cleanup.ksh | 33 + .../functional/cli_root/zfs_send/setup.ksh | 32 + .../functional/cli_root/zfs_send/zfs_send.cfg | 32 + .../cli_root/zfs_send/zfs_send_001_pos.ksh | 127 + .../cli_root/zfs_send/zfs_send_002_pos.ksh | 139 + .../cli_root/zfs_send/zfs_send_003_pos.ksh | 65 + .../cli_root/zfs_send/zfs_send_004_neg.ksh | 105 + .../cli_root/zfs_send/zfs_send_005_pos.ksh | 66 + .../cli_root/zfs_send/zfs_send_006_pos.ksh | 194 ++ .../cli_root/zfs_send/zfs_send_007_pos.ksh | 99 + .../functional/cli_root/zfs_set/Makefile.am | 31 + .../cli_root/zfs_set/cache_001_pos.ksh | 63 + .../cli_root/zfs_set/cache_002_neg.ksh | 63 + .../cli_root/zfs_set/canmount_001_pos.ksh | 119 + .../cli_root/zfs_set/canmount_002_pos.ksh | 150 + .../cli_root/zfs_set/canmount_003_pos.ksh | 111 + .../cli_root/zfs_set/canmount_004_pos.ksh | 92 + .../cli_root/zfs_set/checksum_001_pos.ksh | 65 + .../functional/cli_root/zfs_set/cleanup.ksh | 30 + .../cli_root/zfs_set/compression_001_pos.ksh | 63 + .../cli_root/zfs_set/mountpoint_001_pos.ksh | 100 + .../cli_root/zfs_set/mountpoint_002_pos.ksh | 98 + .../cli_root/zfs_set/mountpoint_003_pos.ksh | 115 + .../cli_root/zfs_set/onoffs_001_pos.ksh | 99 + .../zfs_set/property_alias_001_pos.ksh | 140 + .../cli_root/zfs_set/readonly_001_pos.ksh | 160 + .../cli_root/zfs_set/reservation_001_neg.ksh | 97 + .../cli_root/zfs_set/ro_props_001_pos.ksh | 113 + .../functional/cli_root/zfs_set/setup.ksh | 31 + .../cli_root/zfs_set/share_mount_001_neg.ksh | 60 + .../cli_root/zfs_set/snapdir_001_pos.ksh | 108 + .../zfs_set/user_property_001_pos.ksh | 65 + .../zfs_set/user_property_002_pos.ksh | 118 + .../zfs_set/user_property_003_neg.ksh | 63 + .../zfs_set/user_property_004_pos.ksh | 97 + .../cli_root/zfs_set/version_001_neg.ksh | 87 + .../cli_root/zfs_set/zfs_set_001_neg.ksh | 82 + .../cli_root/zfs_set/zfs_set_002_neg.ksh | 62 + .../cli_root/zfs_set/zfs_set_003_neg.ksh | 73 + .../cli_root/zfs_set/zfs_set_common.kshlib | 267 ++ .../functional/cli_root/zfs_share/Makefile.am | 16 + .../functional/cli_root/zfs_share/cleanup.ksh | 30 + .../functional/cli_root/zfs_share/setup.ksh | 34 + .../cli_root/zfs_share/zfs_share.cfg | 32 + .../cli_root/zfs_share/zfs_share_001_pos.ksh | 145 + .../cli_root/zfs_share/zfs_share_002_pos.ksh | 73 + .../cli_root/zfs_share/zfs_share_003_pos.ksh | 106 + .../cli_root/zfs_share/zfs_share_004_pos.ksh | 96 + .../cli_root/zfs_share/zfs_share_005_pos.ksh | 79 + .../cli_root/zfs_share/zfs_share_006_pos.ksh | 101 + .../cli_root/zfs_share/zfs_share_007_neg.ksh | 81 + .../cli_root/zfs_share/zfs_share_008_neg.ksh | 70 + .../cli_root/zfs_share/zfs_share_009_neg.ksh | 69 + .../cli_root/zfs_share/zfs_share_010_neg.ksh | 56 + .../cli_root/zfs_share/zfs_share_011_pos.ksh | 85 + .../cli_root/zfs_snapshot/Makefile.am | 14 + .../cli_root/zfs_snapshot/cleanup.ksh | 30 + .../cli_root/zfs_snapshot/setup.ksh | 32 + .../cli_root/zfs_snapshot/zfs_snapshot.cfg | 42 + .../zfs_snapshot/zfs_snapshot_001_neg.ksh | 117 + .../zfs_snapshot/zfs_snapshot_002_neg.ksh | 93 + .../zfs_snapshot/zfs_snapshot_003_neg.ksh | 61 + .../zfs_snapshot/zfs_snapshot_004_neg.ksh | 92 + .../zfs_snapshot/zfs_snapshot_005_neg.ksh | 92 + .../zfs_snapshot/zfs_snapshot_006_pos.ksh | 124 + .../zfs_snapshot/zfs_snapshot_007_neg.ksh | 128 + .../zfs_snapshot/zfs_snapshot_008_neg.ksh | 68 + .../zfs_snapshot/zfs_snapshot_009_pos.ksh | 102 + .../cli_root/zfs_unmount/Makefile.am | 16 + .../cli_root/zfs_unmount/cleanup.ksh | 30 + .../functional/cli_root/zfs_unmount/setup.ksh | 32 + .../cli_root/zfs_unmount/zfs_unmount.cfg | 39 + .../cli_root/zfs_unmount/zfs_unmount.kshlib | 77 + .../zfs_unmount/zfs_unmount_001_pos.ksh | 112 + .../zfs_unmount/zfs_unmount_002_pos.ksh | 94 + .../zfs_unmount/zfs_unmount_003_pos.ksh | 105 + .../zfs_unmount/zfs_unmount_004_pos.ksh | 95 + .../zfs_unmount/zfs_unmount_005_pos.ksh | 109 + .../zfs_unmount/zfs_unmount_006_pos.ksh | 67 + .../zfs_unmount/zfs_unmount_007_neg.ksh | 106 + .../zfs_unmount/zfs_unmount_008_neg.ksh | 143 + .../zfs_unmount/zfs_unmount_009_pos.ksh | 123 + .../zfs_unmount/zfs_unmount_all_001_pos.ksh | 195 ++ .../cli_root/zfs_unshare/Makefile.am | 9 + .../cli_root/zfs_unshare/cleanup.ksh | 30 + .../functional/cli_root/zfs_unshare/setup.ksh | 34 + .../zfs_unshare/zfs_unshare_001_pos.ksh | 173 ++ .../zfs_unshare/zfs_unshare_002_pos.ksh | 177 ++ .../zfs_unshare/zfs_unshare_003_pos.ksh | 90 + .../zfs_unshare/zfs_unshare_004_neg.ksh | 84 + .../zfs_unshare/zfs_unshare_005_neg.ksh | 56 + .../cli_root/zfs_upgrade/Makefile.am | 12 + .../cli_root/zfs_upgrade/cleanup.ksh | 33 + .../functional/cli_root/zfs_upgrade/setup.ksh | 42 + .../cli_root/zfs_upgrade/zfs_upgrade.kshlib | 179 ++ .../zfs_upgrade/zfs_upgrade_001_pos.ksh | 136 + .../zfs_upgrade/zfs_upgrade_002_pos.ksh | 63 + .../zfs_upgrade/zfs_upgrade_003_pos.ksh | 100 + .../zfs_upgrade/zfs_upgrade_004_pos.ksh | 104 + .../zfs_upgrade/zfs_upgrade_005_pos.ksh | 104 + .../zfs_upgrade/zfs_upgrade_006_neg.ksh | 54 + .../zfs_upgrade/zfs_upgrade_007_neg.ksh | 55 + .../functional/cli_root/zpool/Makefile.am | 7 + .../functional/cli_root/zpool/cleanup.ksh | 30 + .../tests/functional/cli_root/zpool/setup.ksh | 32 + .../cli_root/zpool/zpool_001_neg.ksh | 65 + .../cli_root/zpool/zpool_002_pos.ksh | 103 + .../cli_root/zpool/zpool_003_pos.ksh | 76 + .../functional/cli_root/zpool_add/Makefile.am | 15 + .../functional/cli_root/zpool_add/cleanup.ksh | 37 + .../functional/cli_root/zpool_add/setup.ksh | 56 + .../cli_root/zpool_add/zpool_add.cfg | 92 + .../cli_root/zpool_add/zpool_add.kshlib | 109 + .../cli_root/zpool_add/zpool_add_001_pos.ksh | 142 + .../cli_root/zpool_add/zpool_add_002_pos.ksh | 72 + .../cli_root/zpool_add/zpool_add_003_pos.ksh | 78 + .../cli_root/zpool_add/zpool_add_004_pos.ksh | 78 + .../cli_root/zpool_add/zpool_add_005_pos.ksh | 84 + .../cli_root/zpool_add/zpool_add_006_pos.ksh | 78 + .../cli_root/zpool_add/zpool_add_007_neg.ksh | 66 + .../cli_root/zpool_add/zpool_add_008_neg.ksh | 67 + .../cli_root/zpool_add/zpool_add_009_neg.ksh | 66 + .../cli_root/zpool_attach/Makefile.am | 5 + .../cli_root/zpool_attach/cleanup.ksh | 32 + .../cli_root/zpool_attach/setup.ksh | 35 + .../zpool_attach/zpool_attach_001_neg.ksh | 78 + .../cli_root/zpool_clear/Makefile.am | 8 + .../cli_root/zpool_clear/cleanup.ksh | 30 + .../functional/cli_root/zpool_clear/setup.ksh | 32 + .../cli_root/zpool_clear/zpool_clear.cfg | 33 + .../zpool_clear/zpool_clear_001_pos.ksh | 217 ++ .../zpool_clear/zpool_clear_002_neg.ksh | 76 + .../zpool_clear/zpool_clear_003_neg.ksh | 73 + .../cli_root/zpool_create/Makefile.am | 32 + .../cli_root/zpool_create/cleanup.ksh | 39 + .../cli_root/zpool_create/setup.ksh | 57 + .../cli_root/zpool_create/zpool_create.cfg | 109 + .../cli_root/zpool_create/zpool_create.shlib | 154 + .../zpool_create/zpool_create_001_pos.ksh | 145 + .../zpool_create/zpool_create_002_pos.ksh | 125 + .../zpool_create/zpool_create_003_pos.ksh | 81 + .../zpool_create/zpool_create_004_pos.ksh | 80 + .../zpool_create/zpool_create_005_pos.ksh | 130 + .../zpool_create/zpool_create_006_pos.ksh | 123 + .../zpool_create/zpool_create_007_neg.ksh | 89 + .../zpool_create/zpool_create_008_pos.ksh | 152 + .../zpool_create/zpool_create_009_neg.ksh | 93 + .../zpool_create/zpool_create_010_neg.ksh | 88 + .../zpool_create/zpool_create_011_neg.ksh | 130 + .../zpool_create/zpool_create_012_neg.ksh | 63 + .../zpool_create/zpool_create_014_neg.ksh | 91 + .../zpool_create/zpool_create_015_neg.ksh | 96 + .../zpool_create/zpool_create_016_pos.ksh | 96 + .../zpool_create/zpool_create_017_neg.ksh | 90 + .../zpool_create/zpool_create_018_pos.ksh | 104 + .../zpool_create/zpool_create_019_pos.ksh | 72 + .../zpool_create/zpool_create_020_pos.ksh | 111 + .../zpool_create/zpool_create_021_pos.ksh | 88 + .../zpool_create/zpool_create_022_pos.ksh | 94 + .../zpool_create/zpool_create_023_neg.ksh | 85 + .../zpool_create_features_001_pos.ksh | 70 + .../zpool_create_features_002_pos.ksh | 69 + .../zpool_create_features_003_pos.ksh | 69 + .../zpool_create_features_004_neg.ksh | 61 + .../cli_root/zpool_destroy/Makefile.am | 6 + .../cli_root/zpool_destroy/zpool_destroy.cfg | 43 + .../zpool_destroy/zpool_destroy_001_pos.ksh | 87 + .../zpool_destroy/zpool_destroy_002_pos.ksh | 117 + .../zpool_destroy/zpool_destroy_003_neg.ksh | 52 + .../cli_root/zpool_detach/Makefile.am | 5 + .../cli_root/zpool_detach/cleanup.ksh | 32 + .../cli_root/zpool_detach/setup.ksh | 35 + .../zpool_detach/zpool_detach_001_neg.ksh | 65 + .../cli_root/zpool_expand/Makefile.am | 8 + .../cli_root/zpool_expand/cleanup.ksh | 32 + .../cli_root/zpool_expand/setup.ksh | 34 + .../cli_root/zpool_expand/zpool_expand.cfg | 38 + .../zpool_expand/zpool_expand_001_pos.ksh | 146 + .../zpool_expand/zpool_expand_002_pos.ksh | 148 + .../zpool_expand/zpool_expand_003_neg.ksh | 115 + .../cli_root/zpool_export/Makefile.am | 8 + .../cli_root/zpool_export/cleanup.ksh | 30 + .../cli_root/zpool_export/setup.ksh | 36 + .../zpool_export/zpool_export_001_pos.ksh | 65 + .../zpool_export/zpool_export_002_pos.ksh | 76 + .../zpool_export/zpool_export_003_neg.ksh | 64 + .../zpool_export/zpool_export_004_pos.ksh | 95 + .../functional/cli_root/zpool_get/Makefile.am | 9 + .../functional/cli_root/zpool_get/cleanup.ksh | 36 + .../functional/cli_root/zpool_get/setup.ksh | 37 + .../cli_root/zpool_get/zpool_get.cfg | 49 + .../cli_root/zpool_get/zpool_get_001_pos.ksh | 57 + .../cli_root/zpool_get/zpool_get_002_pos.ksh | 94 + .../cli_root/zpool_get/zpool_get_003_pos.ksh | 67 + .../cli_root/zpool_get/zpool_get_004_neg.ksh | 57 + .../cli_root/zpool_history/Makefile.am | 6 + .../cli_root/zpool_history/cleanup.ksh | 34 + .../cli_root/zpool_history/setup.ksh | 35 + .../zpool_history/zpool_history_001_neg.ksh | 65 + .../zpool_history/zpool_history_002_pos.ksh | 53 + .../cli_root/zpool_import/Makefile.am | 32 + .../cli_root/zpool_import/cleanup.ksh | 63 + .../cli_root/zpool_import/setup.ksh | 87 + .../zpool_import/unclean_export.dat.bz2 | Bin 0 -> 14161 bytes .../cli_root/zpool_import/zpool_import.cfg | 101 + .../zpool_import/zpool_import_001_pos.ksh | 138 + .../zpool_import/zpool_import_002_pos.ksh | 142 + .../zpool_import/zpool_import_003_pos.ksh | 67 + .../zpool_import/zpool_import_004_pos.ksh | 88 + .../zpool_import/zpool_import_005_pos.ksh | 85 + .../zpool_import/zpool_import_006_pos.ksh | 83 + .../zpool_import/zpool_import_007_pos.ksh | 90 + .../zpool_import/zpool_import_008_pos.ksh | 91 + .../zpool_import/zpool_import_009_neg.ksh | 103 + .../zpool_import/zpool_import_010_pos.ksh | 92 + .../zpool_import/zpool_import_011_neg.ksh | 81 + .../zpool_import/zpool_import_012_pos.ksh | 209 ++ .../zpool_import/zpool_import_013_neg.ksh | 75 + .../zpool_import/zpool_import_all_001_pos.ksh | 223 ++ .../zpool_import_features_001_pos.ksh | 71 + .../zpool_import_features_002_neg.ksh | 87 + .../zpool_import_features_003_pos.ksh | 106 + .../zpool_import_missing_001_pos.ksh | 203 ++ .../zpool_import_missing_002_pos.ksh | 197 ++ .../zpool_import_missing_003_pos.ksh | 232 ++ .../zpool_import_rename_001_pos.ksh | 162 + .../cli_root/zpool_offline/Makefile.am | 6 + .../cli_root/zpool_offline/cleanup.ksh | 32 + .../cli_root/zpool_offline/setup.ksh | 35 + .../zpool_offline/zpool_offline_001_pos.ksh | 120 + .../zpool_offline/zpool_offline_002_neg.ksh | 91 + .../cli_root/zpool_online/Makefile.am | 6 + .../cli_root/zpool_online/cleanup.ksh | 32 + .../cli_root/zpool_online/setup.ksh | 35 + .../zpool_online/zpool_online_001_pos.ksh | 108 + .../zpool_online/zpool_online_002_neg.ksh | 70 + .../cli_root/zpool_remove/Makefile.am | 8 + .../cli_root/zpool_remove/cleanup.ksh | 32 + .../cli_root/zpool_remove/setup.ksh | 43 + .../cli_root/zpool_remove/zpool_remove.cfg | 54 + .../zpool_remove/zpool_remove_001_neg.ksh | 96 + .../zpool_remove/zpool_remove_002_pos.ksh | 69 + .../zpool_remove/zpool_remove_003_pos.ksh | 73 + .../cli_root/zpool_replace/Makefile.am | 5 + .../cli_root/zpool_replace/cleanup.ksh | 32 + .../cli_root/zpool_replace/setup.ksh | 35 + .../zpool_replace/zpool_replace_001_neg.ksh | 78 + .../cli_root/zpool_scrub/Makefile.am | 10 + .../cli_root/zpool_scrub/cleanup.ksh | 32 + .../functional/cli_root/zpool_scrub/setup.ksh | 47 + .../cli_root/zpool_scrub/zpool_scrub.cfg | 32 + .../zpool_scrub/zpool_scrub_001_neg.ksh | 58 + .../zpool_scrub/zpool_scrub_002_pos.ksh | 48 + .../zpool_scrub/zpool_scrub_003_pos.ksh | 71 + .../zpool_scrub/zpool_scrub_004_pos.ksh | 59 + .../zpool_scrub/zpool_scrub_005_pos.ksh | 61 + .../functional/cli_root/zpool_set/Makefile.am | 5 + .../cli_root/zpool_set/zpool_set_001_pos.ksh | 56 + .../cli_root/zpool_set/zpool_set_002_neg.ksh | 119 + .../cli_root/zpool_set/zpool_set_003_neg.ksh | 72 + .../cli_root/zpool_status/Makefile.am | 6 + .../cli_root/zpool_status/cleanup.ksh | 30 + .../cli_root/zpool_status/setup.ksh | 32 + .../zpool_status/zpool_status_001_pos.ksh | 57 + .../zpool_status/zpool_status_002_pos.ksh | 62 + .../cli_root/zpool_upgrade/Makefile.am | 72 + .../cli_root/zpool_upgrade/cleanup.ksh | 39 + .../cli_root/zpool_upgrade/setup.ksh | 38 + .../zpool_upgrade/zfs-broken-mirror1.dat.bz2 | Bin 0 -> 8871 bytes .../zpool_upgrade/zfs-broken-mirror2.dat.bz2 | Bin 0 -> 29281 bytes .../zpool_upgrade/zfs-pool-v1.dat.bz2 | Bin 0 -> 31464 bytes .../zpool_upgrade/zfs-pool-v10.dat.bz2 | Bin 0 -> 31549 bytes .../zpool_upgrade/zfs-pool-v11.dat.bz2 | Bin 0 -> 29695 bytes .../zpool_upgrade/zfs-pool-v12.dat.bz2 | Bin 0 -> 29786 bytes .../zpool_upgrade/zfs-pool-v13.dat.bz2 | Bin 0 -> 30554 bytes .../zpool_upgrade/zfs-pool-v14.dat.bz2 | Bin 0 -> 30605 bytes .../zpool_upgrade/zfs-pool-v15.dat.bz2 | Bin 0 -> 33172 bytes .../zpool_upgrade/zfs-pool-v1mirror1.dat.bz2 | Bin 0 -> 32989 bytes .../zpool_upgrade/zfs-pool-v1mirror2.dat.bz2 | Bin 0 -> 32965 bytes .../zpool_upgrade/zfs-pool-v1mirror3.dat.bz2 | Bin 0 -> 32956 bytes .../zpool_upgrade/zfs-pool-v1raidz1.dat.bz2 | Bin 0 -> 28792 bytes .../zpool_upgrade/zfs-pool-v1raidz2.dat.bz2 | Bin 0 -> 28480 bytes .../zpool_upgrade/zfs-pool-v1raidz3.dat.bz2 | Bin 0 -> 28779 bytes .../zpool_upgrade/zfs-pool-v1stripe1.dat.bz2 | Bin 0 -> 20361 bytes .../zpool_upgrade/zfs-pool-v1stripe2.dat.bz2 | Bin 0 -> 20251 bytes .../zpool_upgrade/zfs-pool-v1stripe3.dat.bz2 | Bin 0 -> 32498 bytes .../zpool_upgrade/zfs-pool-v2.dat.bz2 | Bin 0 -> 32492 bytes .../zpool_upgrade/zfs-pool-v2mirror1.dat.bz2 | Bin 0 -> 44185 bytes .../zpool_upgrade/zfs-pool-v2mirror2.dat.bz2 | Bin 0 -> 44155 bytes .../zpool_upgrade/zfs-pool-v2mirror3.dat.bz2 | Bin 0 -> 44170 bytes .../zpool_upgrade/zfs-pool-v2raidz1.dat.bz2 | Bin 0 -> 45894 bytes .../zpool_upgrade/zfs-pool-v2raidz2.dat.bz2 | Bin 0 -> 49452 bytes .../zpool_upgrade/zfs-pool-v2raidz3.dat.bz2 | Bin 0 -> 44503 bytes .../zpool_upgrade/zfs-pool-v2stripe1.dat.bz2 | Bin 0 -> 35305 bytes .../zpool_upgrade/zfs-pool-v2stripe2.dat.bz2 | Bin 0 -> 28513 bytes .../zpool_upgrade/zfs-pool-v2stripe3.dat.bz2 | Bin 0 -> 35344 bytes .../zpool_upgrade/zfs-pool-v3.dat.bz2 | Bin 0 -> 27124 bytes .../zfs-pool-v3hotspare1.dat.bz2 | Bin 0 -> 42097 bytes .../zfs-pool-v3hotspare2.dat.bz2 | Bin 0 -> 35584 bytes .../zfs-pool-v3hotspare3.dat.bz2 | Bin 0 -> 19501 bytes .../zpool_upgrade/zfs-pool-v3mirror1.dat.bz2 | Bin 0 -> 28160 bytes .../zpool_upgrade/zfs-pool-v3mirror2.dat.bz2 | Bin 0 -> 28149 bytes .../zpool_upgrade/zfs-pool-v3mirror3.dat.bz2 | Bin 0 -> 28166 bytes .../zpool_upgrade/zfs-pool-v3raidz1.dat.bz2 | Bin 0 -> 29077 bytes .../zpool_upgrade/zfs-pool-v3raidz2.dat.bz2 | Bin 0 -> 29340 bytes .../zpool_upgrade/zfs-pool-v3raidz21.dat.bz2 | Bin 0 -> 28067 bytes .../zpool_upgrade/zfs-pool-v3raidz22.dat.bz2 | Bin 0 -> 27999 bytes .../zpool_upgrade/zfs-pool-v3raidz23.dat.bz2 | Bin 0 -> 28046 bytes .../zpool_upgrade/zfs-pool-v3raidz3.dat.bz2 | Bin 0 -> 29120 bytes .../zpool_upgrade/zfs-pool-v3stripe1.dat.bz2 | Bin 0 -> 26174 bytes .../zpool_upgrade/zfs-pool-v3stripe2.dat.bz2 | Bin 0 -> 24408 bytes .../zpool_upgrade/zfs-pool-v3stripe3.dat.bz2 | Bin 0 -> 26213 bytes .../zpool_upgrade/zfs-pool-v4.dat.bz2 | Bin 0 -> 39824 bytes .../zpool_upgrade/zfs-pool-v5.dat.bz2 | Bin 0 -> 44358 bytes .../zpool_upgrade/zfs-pool-v6.dat.bz2 | Bin 0 -> 42006 bytes .../zpool_upgrade/zfs-pool-v7.dat.bz2 | Bin 0 -> 38100 bytes .../zpool_upgrade/zfs-pool-v8.dat.bz2 | Bin 0 -> 38287 bytes .../zpool_upgrade/zfs-pool-v9.dat.bz2 | Bin 0 -> 33474 bytes .../zpool_upgrade/zfs-pool-v999.dat.bz2 | Bin 0 -> 31807 bytes .../zpool_upgrade/zfs-pool-vBROKEN.dat.bz2 | Bin 0 -> 26328 bytes .../cli_root/zpool_upgrade/zpool_upgrade.cfg | 161 + .../zpool_upgrade/zpool_upgrade.kshlib | 160 + .../zpool_upgrade/zpool_upgrade_001_pos.ksh | 71 + .../zpool_upgrade/zpool_upgrade_002_pos.ksh | 59 + .../zpool_upgrade/zpool_upgrade_003_pos.ksh | 59 + .../zpool_upgrade/zpool_upgrade_004_pos.ksh | 83 + .../zpool_upgrade/zpool_upgrade_005_neg.ksh | 54 + .../zpool_upgrade/zpool_upgrade_006_neg.ksh | 57 + .../zpool_upgrade/zpool_upgrade_007_pos.ksh | 68 + .../zpool_upgrade/zpool_upgrade_008_pos.ksh | 79 + .../zpool_upgrade/zpool_upgrade_009_neg.ksh | 66 + .../tests/functional/cli_user/Makefile.am | 5 + .../functional/cli_user/misc/Makefile.am | 46 + .../functional/cli_user/misc/cleanup.ksh | 50 + .../tests/functional/cli_user/misc/misc.cfg | 71 + .../tests/functional/cli_user/misc/setup.ksh | 161 + .../functional/cli_user/misc/zdb_001_neg.ksh | 82 + .../functional/cli_user/misc/zfs_001_neg.ksh | 59 + .../cli_user/misc/zfs_allow_001_neg.ksh | 67 + .../cli_user/misc/zfs_clone_001_neg.ksh | 54 + .../cli_user/misc/zfs_create_001_neg.ksh | 61 + .../cli_user/misc/zfs_destroy_001_neg.ksh | 67 + .../cli_user/misc/zfs_get_001_neg.ksh | 65 + .../cli_user/misc/zfs_inherit_001_neg.ksh | 56 + .../cli_user/misc/zfs_mount_001_neg.ksh | 57 + .../cli_user/misc/zfs_promote_001_neg.ksh | 56 + .../cli_user/misc/zfs_receive_001_neg.ksh | 58 + .../cli_user/misc/zfs_rename_001_neg.ksh | 56 + .../cli_user/misc/zfs_rollback_001_neg.ksh | 60 + .../cli_user/misc/zfs_send_001_neg.ksh | 67 + .../cli_user/misc/zfs_set_001_neg.ksh | 70 + .../cli_user/misc/zfs_share_001_neg.ksh | 63 + .../cli_user/misc/zfs_snapshot_001_neg.ksh | 56 + .../cli_user/misc/zfs_unallow_001_neg.ksh | 64 + .../cli_user/misc/zfs_unmount_001_neg.ksh | 65 + .../cli_user/misc/zfs_unshare_001_neg.ksh | 64 + .../cli_user/misc/zfs_upgrade_001_neg.ksh | 67 + .../cli_user/misc/zpool_001_neg.ksh | 60 + .../cli_user/misc/zpool_add_001_neg.ksh | 69 + .../cli_user/misc/zpool_attach_001_neg.ksh | 67 + .../cli_user/misc/zpool_clear_001_neg.ksh | 52 + .../cli_user/misc/zpool_create_001_neg.ksh | 69 + .../cli_user/misc/zpool_destroy_001_neg.ksh | 59 + .../cli_user/misc/zpool_detach_001_neg.ksh | 58 + .../cli_user/misc/zpool_export_001_neg.ksh | 65 + .../cli_user/misc/zpool_get_001_neg.ksh | 67 + .../cli_user/misc/zpool_history_001_neg.ksh | 55 + .../cli_user/misc/zpool_import_001_neg.ksh | 66 + .../cli_user/misc/zpool_import_002_neg.ksh | 64 + .../cli_user/misc/zpool_offline_001_neg.ksh | 66 + .../cli_user/misc/zpool_online_001_neg.ksh | 66 + .../cli_user/misc/zpool_remove_001_neg.ksh | 59 + .../cli_user/misc/zpool_replace_001_neg.ksh | 68 + .../cli_user/misc/zpool_scrub_001_neg.ksh | 53 + .../cli_user/misc/zpool_set_001_neg.ksh | 71 + .../cli_user/misc/zpool_status_001_neg.ksh | 76 + .../cli_user/misc/zpool_upgrade_001_neg.ksh | 65 + .../functional/cli_user/zfs_list/Makefile.am | 12 + .../functional/cli_user/zfs_list/cleanup.ksh | 36 + .../functional/cli_user/zfs_list/setup.ksh | 70 + .../functional/cli_user/zfs_list/zfs_list.cfg | 35 + .../cli_user/zfs_list/zfs_list.kshlib | 118 + .../cli_user/zfs_list/zfs_list_001_pos.ksh | 116 + .../cli_user/zfs_list/zfs_list_002_pos.ksh | 176 ++ .../cli_user/zfs_list/zfs_list_003_pos.ksh | 76 + .../cli_user/zfs_list/zfs_list_004_neg.ksh | 63 + .../cli_user/zfs_list/zfs_list_007_pos.ksh | 91 + .../cli_user/zfs_list/zfs_list_008_neg.ksh | 56 + .../cli_user/zpool_iostat/Makefile.am | 7 + .../cli_user/zpool_iostat/cleanup.ksh | 34 + .../cli_user/zpool_iostat/setup.ksh | 36 + .../zpool_iostat/zpool_iostat_001_neg.ksh | 63 + .../zpool_iostat/zpool_iostat_002_pos.ksh | 71 + .../zpool_iostat/zpool_iostat_003_neg.ksh | 65 + .../cli_user/zpool_list/Makefile.am | 6 + .../cli_user/zpool_list/cleanup.ksh | 34 + .../functional/cli_user/zpool_list/setup.ksh | 36 + .../zpool_list/zpool_list_001_pos.ksh | 64 + .../zpool_list/zpool_list_002_neg.ksh | 57 + .../tests/functional/compression/Makefile.am | 9 + .../tests/functional/compression/cleanup.ksh | 34 + .../tests/functional/compression/compress.cfg | 33 + .../compression/compress_001_pos.ksh | 74 + .../compression/compress_002_pos.ksh | 76 + .../compression/compress_003_pos.ksh | 96 + .../compression/compress_004_pos.ksh | 141 + .../tests/functional/compression/setup.ksh | 36 + .../tests/functional/ctime/.gitignore | 1 + .../tests/functional/ctime/Makefile.am | 12 + .../tests/functional/ctime/cleanup.ksh | 34 + .../tests/functional/ctime/ctime_001_pos.c | 347 +++ .../tests/functional/ctime/setup.ksh | 35 + .../tests/functional/delegate/Makefile.am | 26 + .../tests/functional/delegate/cleanup.ksh | 44 + .../tests/functional/delegate/delegate.cfg | 59 + .../delegate/delegate_common.kshlib | 1679 +++++++++++ .../tests/functional/delegate/setup.ksh | 59 + .../functional/delegate/zfs_allow_001_pos.ksh | 98 + .../functional/delegate/zfs_allow_002_pos.ksh | 79 + .../functional/delegate/zfs_allow_003_pos.ksh | 93 + .../functional/delegate/zfs_allow_004_pos.ksh | 96 + .../functional/delegate/zfs_allow_005_pos.ksh | 78 + .../functional/delegate/zfs_allow_006_pos.ksh | 72 + .../functional/delegate/zfs_allow_007_pos.ksh | 103 + .../functional/delegate/zfs_allow_008_pos.ksh | 78 + .../functional/delegate/zfs_allow_009_neg.ksh | 64 + .../functional/delegate/zfs_allow_010_pos.ksh | 113 + .../functional/delegate/zfs_allow_011_neg.ksh | 68 + .../functional/delegate/zfs_allow_012_neg.ksh | 79 + .../delegate/zfs_unallow_001_pos.ksh | 65 + .../delegate/zfs_unallow_002_pos.ksh | 61 + .../delegate/zfs_unallow_003_pos.ksh | 71 + .../delegate/zfs_unallow_004_pos.ksh | 59 + .../delegate/zfs_unallow_005_pos.ksh | 73 + .../delegate/zfs_unallow_006_pos.ksh | 71 + .../delegate/zfs_unallow_007_neg.ksh | 64 + .../delegate/zfs_unallow_008_neg.ksh | 75 + .../tests/functional/devices/Makefile.am | 9 + .../tests/functional/devices/cleanup.ksh | 34 + .../tests/functional/devices/devices.cfg | 32 + .../functional/devices/devices_001_pos.ksh | 66 + .../functional/devices/devices_002_neg.ksh | 66 + .../functional/devices/devices_003_pos.ksh | 49 + .../functional/devices/devices_common.kshlib | 126 + .../tests/functional/devices/setup.ksh | 35 + .../tests/functional/exec/.gitignore | 1 + .../tests/functional/exec/Makefile.am | 9 + .../tests/functional/exec/cleanup.ksh | 34 + .../tests/functional/exec/exec_001_pos.ksh | 63 + .../tests/functional/exec/exec_002_neg.ksh | 84 + .../zfs-tests/tests/functional/exec/setup.ksh | 35 + .../tests/functional/features/Makefile.am | 1 + .../features/async_destroy/Makefile.am | 5 + .../async_destroy/async_destroy_001_pos.ksh | 87 + .../features/async_destroy/cleanup.ksh | 34 + .../features/async_destroy/setup.ksh | 36 + .../tests/functional/grow_pool/Makefile.am | 4 + .../tests/functional/grow_pool/grow_pool.cfg | 72 + .../grow_pool/grow_pool_001_pos.ksh | 74 + .../functional/grow_replicas/Makefile.am | 4 + .../grow_replicas/grow_replicas.cfg | 78 + .../grow_replicas/grow_replicas_001_pos.ksh | 114 + .../tests/functional/history/Makefile.am | 24 + .../tests/functional/history/cleanup.ksh | 38 + .../tests/functional/history/history.cfg | 45 + .../functional/history/history_001_pos.ksh | 111 + .../functional/history/history_002_pos.ksh | 169 ++ .../functional/history/history_003_pos.ksh | 103 + .../functional/history/history_004_pos.ksh | 100 + .../functional/history/history_005_neg.ksh | 65 + .../functional/history/history_006_neg.ksh | 88 + .../functional/history/history_007_pos.ksh | 112 + .../functional/history/history_008_pos.ksh | 74 + .../functional/history/history_009_pos.ksh | 114 + .../functional/history/history_010_pos.ksh | 76 + .../functional/history/history_common.kshlib | 416 +++ .../history/i386.migratedpool.DAT.Z | Bin 0 -> 173047 bytes .../functional/history/i386.orig_history.txt | 12 + .../tests/functional/history/setup.ksh | 35 + .../history/sparc.migratedpool.DAT.Z | Bin 0 -> 163879 bytes .../functional/history/sparc.orig_history.txt | 12 + .../functional/history/zfs-pool-v4.dat.Z | Bin 0 -> 73415 bytes .../tests/functional/inheritance/Makefile.am | 53 + .../functional/inheritance/README.config | 67 + .../tests/functional/inheritance/README.state | 109 + .../tests/functional/inheritance/cleanup.ksh | 35 + .../functional/inheritance/config001.cfg | 33 + .../functional/inheritance/config002.cfg | 33 + .../functional/inheritance/config003.cfg | 33 + .../functional/inheritance/config004.cfg | 33 + .../functional/inheritance/config005.cfg | 33 + .../functional/inheritance/config006.cfg | 33 + .../functional/inheritance/config007.cfg | 33 + .../functional/inheritance/config008.cfg | 33 + .../functional/inheritance/config009.cfg | 33 + .../functional/inheritance/config010.cfg | 33 + .../functional/inheritance/config011.cfg | 33 + .../functional/inheritance/config012.cfg | 33 + .../functional/inheritance/config013.cfg | 33 + .../functional/inheritance/config014.cfg | 33 + .../functional/inheritance/config015.cfg | 33 + .../functional/inheritance/config016.cfg | 33 + .../functional/inheritance/config017.cfg | 33 + .../functional/inheritance/config018.cfg | 33 + .../functional/inheritance/config019.cfg | 33 + .../functional/inheritance/config020.cfg | 33 + .../functional/inheritance/config021.cfg | 33 + .../functional/inheritance/config022.cfg | 33 + .../functional/inheritance/config023.cfg | 33 + .../functional/inheritance/config024.cfg | 33 + .../functional/inheritance/inherit.kshlib | 114 + .../inheritance/inherit_001_pos.ksh | 456 +++ .../tests/functional/inheritance/state001.cfg | 44 + .../tests/functional/inheritance/state002.cfg | 45 + .../tests/functional/inheritance/state003.cfg | 43 + .../tests/functional/inheritance/state004.cfg | 44 + .../tests/functional/inheritance/state005.cfg | 45 + .../tests/functional/inheritance/state006.cfg | 47 + .../tests/functional/inheritance/state007.cfg | 45 + .../tests/functional/inheritance/state008.cfg | 44 + .../tests/functional/inheritance/state009.cfg | 57 + .../tests/functional/inheritance/state010.cfg | 56 + .../tests/functional/inheritance/state011.cfg | 58 + .../tests/functional/inheritance/state012.cfg | 62 + .../tests/functional/inheritance/state013.cfg | 56 + .../tests/functional/inheritance/state014.cfg | 62 + .../tests/functional/inheritance/state015.cfg | 66 + .../tests/functional/inheritance/state016.cfg | 62 + .../tests/functional/inheritance/state017.cfg | 67 + .../tests/functional/inheritance/state018.cfg | 64 + .../tests/functional/inheritance/state019.cfg | 63 + .../tests/functional/inheritance/state020.cfg | 64 + .../tests/functional/inheritance/state021.cfg | 64 + .../tests/functional/inheritance/state022.cfg | 63 + .../tests/functional/inheritance/state023.cfg | 65 + .../tests/functional/inheritance/state024.cfg | 63 + .../tests/functional/inuse/Makefile.am | 12 + .../tests/functional/inuse/inuse.cfg | 135 + .../tests/functional/inuse/inuse_001_pos.ksh | 90 + .../tests/functional/inuse/inuse_003_pos.ksh | 185 ++ .../tests/functional/inuse/inuse_004_pos.ksh | 94 + .../tests/functional/inuse/inuse_005_pos.ksh | 122 + .../tests/functional/inuse/inuse_006_pos.ksh | 125 + .../tests/functional/inuse/inuse_007_pos.ksh | 134 + .../tests/functional/inuse/inuse_008_pos.ksh | 111 + .../tests/functional/inuse/inuse_009_pos.ksh | 122 + .../tests/functional/inuse/setup.ksh | 36 + .../tests/functional/large_files/Makefile.am | 5 + .../tests/functional/large_files/cleanup.ksh | 34 + .../large_files/large_files_001_pos.ksh | 53 + .../tests/functional/large_files/setup.ksh | 36 + .../tests/functional/largest_pool/Makefile.am | 4 + .../functional/largest_pool/largest_pool.cfg | 43 + .../largest_pool/largest_pool_001_pos.ksh | 160 + .../tests/functional/link_count/Makefile.am | 5 + .../tests/functional/link_count/cleanup.ksh | 34 + .../functional/link_count/link_count_001.ksh | 89 + .../tests/functional/link_count/setup.ksh | 36 + .../tests/functional/migration/Makefile.am | 18 + .../tests/functional/migration/cleanup.ksh | 58 + .../tests/functional/migration/migration.cfg | 76 + .../functional/migration/migration.kshlib | 153 + .../migration/migration_001_pos.ksh | 66 + .../migration/migration_002_pos.ksh | 66 + .../migration/migration_003_pos.ksh | 66 + .../migration/migration_004_pos.ksh | 73 + .../migration/migration_005_pos.ksh | 73 + .../migration/migration_006_pos.ksh | 73 + .../migration/migration_007_pos.ksh | 66 + .../migration/migration_008_pos.ksh | 66 + .../migration/migration_009_pos.ksh | 66 + .../migration/migration_010_pos.ksh | 66 + .../migration/migration_011_pos.ksh | 66 + .../migration/migration_012_pos.ksh | 66 + .../tests/functional/migration/setup.ksh | 73 + .../tests/functional/mmap/Makefile.am | 6 + .../tests/functional/mmap/cleanup.ksh | 34 + .../functional/mmap/mmap_read_001_pos.ksh | 56 + .../functional/mmap/mmap_write_001_pos.ksh | 62 + .../zfs-tests/tests/functional/mmap/setup.ksh | 36 + .../tests/functional/mount/Makefile.am | 6 + .../tests/functional/mount/cleanup.ksh | 38 + .../tests/functional/mount/setup.ksh | 48 + .../tests/functional/mount/umount_001.ksh | 54 + .../tests/functional/mount/umountall_001.ksh | 59 + .../tests/functional/mv_files/Makefile.am | 8 + .../tests/functional/mv_files/cleanup.ksh | 48 + .../tests/functional/mv_files/mv_files.cfg | 43 + .../functional/mv_files/mv_files_001_pos.ksh | 69 + .../functional/mv_files/mv_files_002_pos.ksh | 71 + .../mv_files/mv_files_common.kshlib | 217 ++ .../tests/functional/mv_files/setup.ksh | 47 + .../tests/functional/nestedfs/Makefile.am | 5 + .../tests/functional/nestedfs/cleanup.ksh | 34 + .../functional/nestedfs/nestedfs_001_pos.ksh | 59 + .../tests/functional/nestedfs/setup.ksh | 36 + .../tests/functional/no_space/Makefile.am | 7 + .../tests/functional/no_space/cleanup.ksh | 49 + .../tests/functional/no_space/enospc.cfg | 38 + .../functional/no_space/enospc_001_pos.ksh | 76 + .../functional/no_space/enospc_002_pos.ksh | 74 + .../tests/functional/no_space/setup.ksh | 49 + .../tests/functional/nopwrite/Makefile.am | 13 + .../tests/functional/nopwrite/cleanup.ksh | 20 + .../tests/functional/nopwrite/nopwrite.shlib | 68 + .../functional/nopwrite/nopwrite_copies.ksh | 71 + .../functional/nopwrite/nopwrite_mtime.ksh | 81 + .../functional/nopwrite/nopwrite_negative.ksh | 90 + .../nopwrite/nopwrite_promoted_clone.ksh | 58 + .../functional/nopwrite/nopwrite_recsize.ksh | 57 + .../functional/nopwrite/nopwrite_sync.ksh | 55 + .../nopwrite/nopwrite_varying_compression.ksh | 64 + .../functional/nopwrite/nopwrite_volume.ksh | 58 + .../tests/functional/nopwrite/setup.ksh | 23 + .../functional/online_offline/Makefile.am | 8 + .../functional/online_offline/cleanup.ksh | 36 + .../online_offline/online_offline.cfg | 38 + .../online_offline/online_offline_001_pos.ksh | 94 + .../online_offline/online_offline_002_neg.ksh | 132 + .../online_offline/online_offline_003_neg.ksh | 81 + .../tests/functional/online_offline/setup.ksh | 47 + .../tests/functional/pool_names/Makefile.am | 4 + .../pool_names/pool_names_001_pos.ksh | 115 + .../pool_names/pool_names_002_neg.ksh | 131 + .../tests/functional/poolversion/Makefile.am | 6 + .../tests/functional/poolversion/cleanup.ksh | 42 + .../poolversion/poolversion_001_pos.ksh | 58 + .../poolversion/poolversion_002_pos.ksh | 71 + .../tests/functional/poolversion/setup.ksh | 45 + .../tests/functional/privilege/Makefile.am | 6 + .../tests/functional/privilege/cleanup.ksh | 49 + .../privilege/privilege_001_pos.ksh | 91 + .../privilege/privilege_002_pos.ksh | 101 + .../tests/functional/privilege/setup.ksh | 67 + .../tests/functional/quota/Makefile.am | 12 + .../tests/functional/quota/cleanup.ksh | 34 + .../tests/functional/quota/quota.cfg | 35 + .../tests/functional/quota/quota.kshlib | 95 + .../tests/functional/quota/quota_001_pos.ksh | 76 + .../tests/functional/quota/quota_002_pos.ksh | 73 + .../tests/functional/quota/quota_003_pos.ksh | 79 + .../tests/functional/quota/quota_004_pos.ksh | 74 + .../tests/functional/quota/quota_005_pos.ksh | 76 + .../tests/functional/quota/quota_006_neg.ksh | 71 + .../tests/functional/quota/setup.ksh | 36 + .../tests/functional/redundancy/Makefile.am | 10 + .../tests/functional/redundancy/cleanup.ksh | 38 + .../functional/redundancy/redundancy.cfg | 40 + .../functional/redundancy/redundancy.kshlib | 365 +++ .../redundancy/redundancy_001_pos.ksh | 76 + .../redundancy/redundancy_002_pos.ksh | 83 + .../redundancy/redundancy_003_pos.ksh | 93 + .../redundancy/redundancy_004_neg.ksh | 66 + .../tests/functional/redundancy/setup.ksh | 36 + .../tests/functional/refquota/Makefile.am | 10 + .../tests/functional/refquota/cleanup.ksh | 35 + .../functional/refquota/refquota_001_pos.ksh | 77 + .../functional/refquota/refquota_002_pos.ksh | 90 + .../functional/refquota/refquota_003_pos.ksh | 83 + .../functional/refquota/refquota_004_pos.ksh | 76 + .../functional/refquota/refquota_005_pos.ksh | 77 + .../functional/refquota/refquota_006_neg.ksh | 70 + .../tests/functional/refquota/setup.ksh | 36 + .../tests/functional/refreserv/Makefile.am | 10 + .../tests/functional/refreserv/cleanup.ksh | 35 + .../tests/functional/refreserv/refreserv.cfg | 31 + .../refreserv/refreserv_001_pos.ksh | 75 + .../refreserv/refreserv_002_pos.ksh | 114 + .../refreserv/refreserv_003_pos.ksh | 77 + .../refreserv/refreserv_004_pos.ksh | 90 + .../refreserv/refreserv_005_pos.ksh | 71 + .../tests/functional/refreserv/setup.ksh | 36 + .../tests/functional/rename_dirs/Makefile.am | 5 + .../tests/functional/rename_dirs/cleanup.ksh | 34 + .../rename_dirs/rename_dirs_001_pos.ksh | 71 + .../tests/functional/rename_dirs/setup.ksh | 35 + .../tests/functional/replacement/Makefile.am | 8 + .../tests/functional/replacement/cleanup.ksh | 36 + .../functional/replacement/replacement.cfg | 38 + .../replacement/replacement_001_pos.ksh | 160 + .../replacement/replacement_002_pos.ksh | 177 ++ .../replacement/replacement_003_pos.ksh | 164 + .../tests/functional/replacement/setup.ksh | 47 + .../tests/functional/reservation/Makefile.am | 24 + .../tests/functional/reservation/cleanup.ksh | 34 + .../functional/reservation/reservation.cfg | 44 + .../functional/reservation/reservation.shlib | 201 ++ .../reservation/reservation_001_pos.sh | 124 + .../reservation/reservation_002_pos.sh | 108 + .../reservation/reservation_003_pos.sh | 134 + .../reservation/reservation_004_pos.sh | 124 + .../reservation/reservation_005_pos.sh | 118 + .../reservation/reservation_006_pos.sh | 81 + .../reservation/reservation_007_pos.sh | 128 + .../reservation/reservation_008_pos.sh | 124 + .../reservation/reservation_009_pos.sh | 100 + .../reservation/reservation_010_pos.sh | 101 + .../reservation/reservation_011_pos.sh | 75 + .../reservation/reservation_012_pos.sh | 88 + .../reservation/reservation_013_pos.sh | 112 + .../reservation/reservation_014_pos.sh | 123 + .../reservation/reservation_015_pos.sh | 99 + .../reservation/reservation_016_pos.sh | 98 + .../reservation/reservation_017_pos.sh | 101 + .../reservation/reservation_018_pos.sh | 72 + .../tests/functional/reservation/setup.ksh | 35 + .../tests/functional/rootpool/Makefile.am | 7 + .../tests/functional/rootpool/cleanup.ksh | 36 + .../functional/rootpool/rootpool_002_neg.ksh | 68 + .../functional/rootpool/rootpool_003_neg.ksh | 61 + .../functional/rootpool/rootpool_007_neg.ksh | 70 + .../tests/functional/rootpool/setup.ksh | 34 + .../tests/functional/rsend/Makefile.am | 25 + .../tests/functional/rsend/cleanup.ksh | 45 + .../tests/functional/rsend/rsend.cfg | 37 + .../tests/functional/rsend/rsend.kshlib | 551 ++++ .../tests/functional/rsend/rsend_001_pos.ksh | 73 + .../tests/functional/rsend/rsend_002_pos.ksh | 93 + .../tests/functional/rsend/rsend_003_pos.ksh | 95 + .../tests/functional/rsend/rsend_004_pos.ksh | 120 + .../tests/functional/rsend/rsend_005_pos.ksh | 104 + .../tests/functional/rsend/rsend_006_pos.ksh | 82 + .../tests/functional/rsend/rsend_007_pos.ksh | 99 + .../tests/functional/rsend/rsend_008_pos.ksh | 128 + .../tests/functional/rsend/rsend_009_pos.ksh | 94 + .../tests/functional/rsend/rsend_010_pos.ksh | 77 + .../tests/functional/rsend/rsend_011_pos.ksh | 125 + .../tests/functional/rsend/rsend_012_pos.ksh | 201 ++ .../tests/functional/rsend/rsend_013_pos.ksh | 86 + .../tests/functional/rsend/rsend_014_pos.ksh | 56 + .../tests/functional/rsend/rsend_019_pos.ksh | 51 + .../tests/functional/rsend/rsend_020_pos.ksh | 49 + .../tests/functional/rsend/rsend_021_pos.ksh | 52 + .../tests/functional/rsend/rsend_022_pos.ksh | 60 + .../tests/functional/rsend/rsend_024_pos.ksh | 52 + .../tests/functional/rsend/setup.ksh | 45 + .../tests/functional/scrub_mirror/Makefile.am | 10 + .../tests/functional/scrub_mirror/cleanup.ksh | 50 + .../tests/functional/scrub_mirror/default.cfg | 70 + .../scrub_mirror/scrub_mirror_001_pos.ksh | 53 + .../scrub_mirror/scrub_mirror_002_pos.ksh | 53 + .../scrub_mirror/scrub_mirror_003_pos.ksh | 53 + .../scrub_mirror/scrub_mirror_004_pos.ksh | 53 + .../scrub_mirror/scrub_mirror_common.kshlib | 78 + .../tests/functional/scrub_mirror/setup.ksh | 51 + .../tests/functional/slog/Makefile.am | 20 + .../tests/functional/slog/cleanup.ksh | 54 + .../zfs-tests/tests/functional/slog/setup.ksh | 50 + .../zfs-tests/tests/functional/slog/slog.cfg | 41 + .../tests/functional/slog/slog.kshlib | 157 + .../tests/functional/slog/slog_001_pos.ksh | 68 + .../tests/functional/slog/slog_002_pos.ksh | 67 + .../tests/functional/slog/slog_003_pos.ksh | 74 + .../tests/functional/slog/slog_004_pos.ksh | 73 + .../tests/functional/slog/slog_005_pos.ksh | 65 + .../tests/functional/slog/slog_006_pos.ksh | 72 + .../tests/functional/slog/slog_007_pos.ksh | 93 + .../tests/functional/slog/slog_008_neg.ksh | 64 + .../tests/functional/slog/slog_009_neg.ksh | 69 + .../tests/functional/slog/slog_010_neg.ksh | 64 + .../tests/functional/slog/slog_011_neg.ksh | 70 + .../tests/functional/slog/slog_012_neg.ksh | 73 + .../tests/functional/slog/slog_013_pos.ksh | 94 + .../tests/functional/slog/slog_014_pos.ksh | 85 + .../tests/functional/snapshot/Makefile.am | 26 + .../tests/functional/snapshot/cleanup.ksh | 34 + .../functional/snapshot/clone_001_pos.ksh | 161 + .../functional/snapshot/rollback_001_pos.ksh | 115 + .../functional/snapshot/rollback_002_pos.ksh | 133 + .../functional/snapshot/rollback_003_pos.ksh | 106 + .../tests/functional/snapshot/setup.ksh | 36 + .../tests/functional/snapshot/snapshot.cfg | 53 + .../functional/snapshot/snapshot_001_pos.ksh | 91 + .../functional/snapshot/snapshot_002_pos.ksh | 134 + .../functional/snapshot/snapshot_003_pos.ksh | 103 + .../functional/snapshot/snapshot_004_pos.ksh | 90 + .../functional/snapshot/snapshot_005_pos.ksh | 90 + .../functional/snapshot/snapshot_006_pos.ksh | 132 + .../functional/snapshot/snapshot_007_pos.ksh | 107 + .../functional/snapshot/snapshot_008_pos.ksh | 100 + .../functional/snapshot/snapshot_009_pos.ksh | 119 + .../functional/snapshot/snapshot_010_pos.ksh | 101 + .../functional/snapshot/snapshot_011_pos.ksh | 113 + .../functional/snapshot/snapshot_012_pos.ksh | 104 + .../functional/snapshot/snapshot_013_pos.ksh | 99 + .../functional/snapshot/snapshot_014_pos.ksh | 78 + .../functional/snapshot/snapshot_015_pos.ksh | 121 + .../functional/snapshot/snapshot_016_pos.ksh | 101 + .../functional/snapshot/snapshot_017_pos.ksh | 202 ++ .../tests/functional/snapused/Makefile.am | 10 + .../tests/functional/snapused/cleanup.ksh | 34 + .../tests/functional/snapused/setup.ksh | 36 + .../tests/functional/snapused/snapused.kshlib | 185 ++ .../functional/snapused/snapused_001_pos.ksh | 91 + .../functional/snapused/snapused_002_pos.ksh | 82 + .../functional/snapused/snapused_003_pos.ksh | 82 + .../functional/snapused/snapused_004_pos.ksh | 95 + .../functional/snapused/snapused_005_pos.ksh | 73 + .../tests/functional/sparse/Makefile.am | 6 + .../tests/functional/sparse/cleanup.ksh | 34 + .../tests/functional/sparse/setup.ksh | 36 + .../tests/functional/sparse/sparse.cfg | 37 + .../functional/sparse/sparse_001_pos.ksh | 80 + .../tests/functional/threadsappend/.gitignore | 1 + .../functional/threadsappend/Makefile.am | 8 + .../functional/threadsappend/cleanup.ksh | 34 + .../tests/functional/threadsappend/setup.ksh | 36 + .../threadsappend/threadsappend_001_pos.ksh | 80 + .../tests/functional/truncate/Makefile.am | 7 + .../tests/functional/truncate/cleanup.ksh | 30 + .../tests/functional/truncate/setup.ksh | 32 + .../tests/functional/truncate/truncate.cfg | 32 + .../functional/truncate/truncate_001_pos.ksh | 75 + .../functional/truncate/truncate_002_pos.ksh | 63 + .../tests/functional/userquota/Makefile.am | 22 + .../tests/functional/userquota/cleanup.ksh | 41 + .../userquota/groupspace_001_pos.ksh | 79 + .../userquota/groupspace_002_pos.ksh | 79 + .../tests/functional/userquota/setup.ksh | 44 + .../tests/functional/userquota/userquota.cfg | 46 + .../userquota/userquota_001_pos.ksh | 74 + .../userquota/userquota_002_pos.ksh | 89 + .../userquota/userquota_003_pos.ksh | 61 + .../userquota/userquota_004_pos.ksh | 81 + .../userquota/userquota_005_neg.ksh | 94 + .../userquota/userquota_006_pos.ksh | 75 + .../userquota/userquota_007_pos.ksh | 75 + .../userquota/userquota_008_pos.ksh | 60 + .../userquota/userquota_009_pos.ksh | 92 + .../userquota/userquota_010_pos.ksh | 75 + .../userquota/userquota_011_pos.ksh | 127 + .../userquota/userquota_012_neg.ksh | 66 + .../userquota/userquota_common.kshlib | 120 + .../userquota/userspace_001_pos.ksh | 78 + .../userquota/userspace_002_pos.ksh | 81 + .../tests/functional/write_dirs/Makefile.am | 6 + .../tests/functional/write_dirs/cleanup.ksh | 34 + .../tests/functional/write_dirs/setup.ksh | 54 + .../write_dirs/write_dirs_001_pos.ksh | 76 + .../write_dirs/write_dirs_002_pos.ksh | 77 + .../tests/functional/xattr/Makefile.am | 18 + .../tests/functional/xattr/cleanup.ksh | 45 + .../tests/functional/xattr/setup.ksh | 64 + .../tests/functional/xattr/xattr_001_pos.ksh | 63 + .../tests/functional/xattr/xattr_002_neg.ksh | 56 + .../tests/functional/xattr/xattr_003_neg.ksh | 62 + .../tests/functional/xattr/xattr_004_pos.ksh | 87 + .../tests/functional/xattr/xattr_005_pos.ksh | 78 + .../tests/functional/xattr/xattr_006_pos.ksh | 63 + .../tests/functional/xattr/xattr_007_neg.ksh | 81 + .../tests/functional/xattr/xattr_008_pos.ksh | 80 + .../tests/functional/xattr/xattr_009_neg.ksh | 62 + .../tests/functional/xattr/xattr_010_neg.ksh | 66 + .../tests/functional/xattr/xattr_011_pos.ksh | 193 ++ .../tests/functional/xattr/xattr_012_pos.ksh | 103 + .../tests/functional/xattr/xattr_013_pos.ksh | 88 + .../functional/xattr/xattr_common.kshlib | 107 + .../tests/functional/zvol/Makefile.am | 10 + .../zfs-tests/tests/functional/zvol/zvol.cfg | 38 + .../functional/zvol/zvol_ENOSPC/Makefile.am | 5 + .../functional/zvol/zvol_ENOSPC/cleanup.ksh | 44 + .../functional/zvol/zvol_ENOSPC/setup.ksh | 45 + .../zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos.ksh | 80 + .../functional/zvol/zvol_cli/Makefile.am | 7 + .../functional/zvol/zvol_cli/cleanup.ksh | 39 + .../tests/functional/zvol/zvol_cli/setup.ksh | 39 + .../zvol/zvol_cli/zvol_cli_001_pos.ksh | 63 + .../zvol/zvol_cli/zvol_cli_002_pos.ksh | 62 + .../zvol/zvol_cli/zvol_cli_003_neg.ksh | 59 + .../tests/functional/zvol/zvol_common.shlib | 144 + .../functional/zvol/zvol_misc/Makefile.am | 10 + .../functional/zvol/zvol_misc/cleanup.ksh | 39 + .../tests/functional/zvol/zvol_misc/setup.ksh | 43 + .../zvol/zvol_misc/zvol_misc_001_neg.ksh | 64 + .../zvol/zvol_misc/zvol_misc_002_pos.ksh | 91 + .../zvol/zvol_misc/zvol_misc_003_neg.ksh | 75 + .../zvol/zvol_misc/zvol_misc_004_pos.ksh | 109 + .../zvol/zvol_misc/zvol_misc_005_neg.ksh | 75 + .../zvol/zvol_misc/zvol_misc_006_pos.ksh | 72 + .../functional/zvol/zvol_swap/Makefile.am | 11 + .../functional/zvol/zvol_swap/cleanup.ksh | 53 + .../tests/functional/zvol/zvol_swap/setup.ksh | 49 + .../functional/zvol/zvol_swap/zvol_swap.cfg | 40 + .../zvol/zvol_swap/zvol_swap_001_pos.ksh | 78 + .../zvol/zvol_swap/zvol_swap_002_pos.ksh | 64 + .../zvol/zvol_swap/zvol_swap_003_pos.ksh | 96 + .../zvol/zvol_swap/zvol_swap_004_pos.ksh | 83 + .../zvol/zvol_swap/zvol_swap_005_pos.ksh | 69 + .../zvol/zvol_swap/zvol_swap_006_pos.ksh | 108 + tests/zfs-tests/tests/stress/Makefile.am | 0 zfs-script-config.sh.in | 99 +- 1243 files changed, 89497 insertions(+), 1042 deletions(-) create mode 100644 config/user-commands.m4 create mode 100644 config/user-libattr.m4 delete mode 100755 scripts/zfault.sh create mode 100755 scripts/zfs-tests.sh create mode 100644 tests/Makefile.am create mode 100644 tests/README.md create mode 100644 tests/runfiles/Makefile.am create mode 100644 tests/runfiles/linux.run create mode 100644 tests/test-runner/Makefile.am create mode 100644 tests/test-runner/cmd/Makefile.am create mode 100755 tests/test-runner/cmd/test-runner.py create mode 100644 tests/test-runner/include/Makefile.am create mode 100644 tests/test-runner/include/logapi.shlib create mode 100644 tests/test-runner/include/stf.shlib create mode 100644 tests/test-runner/man/Makefile.am create mode 100644 tests/test-runner/man/test-runner.1 create mode 100644 tests/zfs-tests/Makefile.am create mode 100644 tests/zfs-tests/cmd/Makefile.am create mode 100644 tests/zfs-tests/cmd/chg_usr_exec/.gitignore create mode 100644 tests/zfs-tests/cmd/chg_usr_exec/Makefile.am create mode 100644 tests/zfs-tests/cmd/chg_usr_exec/chg_usr_exec.c create mode 100644 tests/zfs-tests/cmd/devname2devid/.gitignore create mode 100644 tests/zfs-tests/cmd/devname2devid/Makefile.am create mode 100644 tests/zfs-tests/cmd/devname2devid/devname2devid.c create mode 100644 tests/zfs-tests/cmd/dir_rd_update/.gitignore create mode 100644 tests/zfs-tests/cmd/dir_rd_update/Makefile.am create mode 100644 tests/zfs-tests/cmd/dir_rd_update/dir_rd_update.c create mode 100644 tests/zfs-tests/cmd/file_check/.gitignore create mode 100644 tests/zfs-tests/cmd/file_check/Makefile.am create mode 100644 tests/zfs-tests/cmd/file_check/file_check.c create mode 100644 tests/zfs-tests/cmd/file_common.h create mode 100644 tests/zfs-tests/cmd/file_trunc/.gitignore create mode 100644 tests/zfs-tests/cmd/file_trunc/Makefile.am create mode 100644 tests/zfs-tests/cmd/file_trunc/file_trunc.c create mode 100644 tests/zfs-tests/cmd/file_write/.gitignore create mode 100644 tests/zfs-tests/cmd/file_write/Makefile.am create mode 100644 tests/zfs-tests/cmd/file_write/file_write.c create mode 100644 tests/zfs-tests/cmd/largest_file/.gitignore create mode 100644 tests/zfs-tests/cmd/largest_file/Makefile.am create mode 100644 tests/zfs-tests/cmd/largest_file/largest_file.c create mode 100644 tests/zfs-tests/cmd/mkbusy/.gitignore create mode 100644 tests/zfs-tests/cmd/mkbusy/Makefile.am create mode 100644 tests/zfs-tests/cmd/mkbusy/mkbusy.c create mode 100644 tests/zfs-tests/cmd/mkfile/.gitignore create mode 100644 tests/zfs-tests/cmd/mkfile/Makefile.am create mode 100644 tests/zfs-tests/cmd/mkfile/mkfile.c create mode 100644 tests/zfs-tests/cmd/mkfiles/.gitignore create mode 100644 tests/zfs-tests/cmd/mkfiles/Makefile.am create mode 100644 tests/zfs-tests/cmd/mkfiles/mkfiles.c create mode 100644 tests/zfs-tests/cmd/mktree/.gitignore create mode 100644 tests/zfs-tests/cmd/mktree/Makefile.am create mode 100644 tests/zfs-tests/cmd/mktree/mktree.c create mode 100644 tests/zfs-tests/cmd/mmap_exec/.gitignore create mode 100644 tests/zfs-tests/cmd/mmap_exec/Makefile.am create mode 100644 tests/zfs-tests/cmd/mmap_exec/mmap_exec.c create mode 100644 tests/zfs-tests/cmd/mmapwrite/.gitignore create mode 100644 tests/zfs-tests/cmd/mmapwrite/Makefile.am create mode 100644 tests/zfs-tests/cmd/mmapwrite/mmapwrite.c create mode 100644 tests/zfs-tests/cmd/randfree_file/.gitignore create mode 100644 tests/zfs-tests/cmd/randfree_file/Makefile.am create mode 100644 tests/zfs-tests/cmd/randfree_file/randfree_file.c create mode 100644 tests/zfs-tests/cmd/readmmap/.gitignore create mode 100644 tests/zfs-tests/cmd/readmmap/Makefile.am create mode 100644 tests/zfs-tests/cmd/readmmap/readmmap.c create mode 100644 tests/zfs-tests/cmd/rename_dir/.gitignore create mode 100644 tests/zfs-tests/cmd/rename_dir/Makefile.am create mode 100644 tests/zfs-tests/cmd/rename_dir/rename_dir.c create mode 100644 tests/zfs-tests/cmd/rm_lnkcnt_zero_file/.gitignore create mode 100644 tests/zfs-tests/cmd/rm_lnkcnt_zero_file/Makefile.am create mode 100644 tests/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c create mode 100644 tests/zfs-tests/cmd/threadsappend/.gitignore create mode 100644 tests/zfs-tests/cmd/threadsappend/Makefile.am create mode 100644 tests/zfs-tests/cmd/threadsappend/threadsappend.c create mode 100644 tests/zfs-tests/cmd/xattrtest/.gitignore create mode 100644 tests/zfs-tests/cmd/xattrtest/Makefile.am create mode 100644 tests/zfs-tests/cmd/xattrtest/xattrtest.c create mode 100644 tests/zfs-tests/include/.gitignore create mode 100644 tests/zfs-tests/include/Makefile.am create mode 100644 tests/zfs-tests/include/commands.cfg.in create mode 100644 tests/zfs-tests/include/default.cfg.in create mode 100644 tests/zfs-tests/include/libtest.shlib create mode 100644 tests/zfs-tests/include/math.shlib create mode 100644 tests/zfs-tests/include/properties.shlib create mode 120000 tests/zfs-tests/include/zfs_commands.cfg create mode 100644 tests/zfs-tests/tests/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/acl/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/acl/acl.cfg create mode 100644 tests/zfs-tests/tests/functional/acl/acl_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/acl/posix/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/acl/posix/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/acl/posix/posix_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/acl/posix/posix_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/acl/posix/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/atime/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/atime/atime.cfg create mode 100755 tests/zfs-tests/tests/functional/atime/atime_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/atime/atime_002_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/atime/atime_common.kshlib create mode 100755 tests/zfs-tests/tests/functional/atime/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/atime/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/bootfs/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/bootfs_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/bootfs_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/bootfs_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/bootfs_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/bootfs_007_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/bootfs/bootfs_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/bootfs_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/bootfs/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cache/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/cache/cache.cfg create mode 100644 tests/zfs-tests/tests/functional/cache/cache.kshlib create mode 100755 tests/zfs-tests/tests/functional/cache/cache_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_010_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cache_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cache/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cachefile/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/cachefile/cachefile.cfg create mode 100644 tests/zfs-tests/tests/functional/cachefile/cachefile.kshlib create mode 100755 tests/zfs-tests/tests/functional/cachefile/cachefile_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cachefile/cachefile_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cachefile/cachefile_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/casenorm/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/casenorm/case_all_values.ksh create mode 100644 tests/zfs-tests/tests/functional/casenorm/casenorm.cfg create mode 100755 tests/zfs-tests/tests/functional/casenorm/casenorm.kshlib create mode 100755 tests/zfs-tests/tests/functional/casenorm/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/insensitive_formd_delete.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/insensitive_formd_lookup.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/insensitive_none_delete.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/insensitive_none_lookup.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/mixed_formd_delete.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/mixed_formd_lookup.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/mixed_formd_lookup_ci.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/mixed_none_delete.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/mixed_none_lookup.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/mixed_none_lookup_ci.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/norm_all_values.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/sensitive_formd_delete.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/sensitive_formd_lookup.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/sensitive_none_delete.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/sensitive_none_lookup.ksh create mode 100755 tests/zfs-tests/tests/functional/casenorm/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/clean_mirror/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_004_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/clean_mirror/clean_mirror_common.kshlib create mode 100755 tests/zfs-tests/tests/functional/clean_mirror/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/clean_mirror/default.cfg create mode 100755 tests/zfs-tests/tests/functional/clean_mirror/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/cli_root/cli_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/cli_root/zdb/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zdb/zdb_001_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs/zfs_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs/zfs_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs/zfs_003_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_clone/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_copies/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_copies/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_copies/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.cfg create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_006_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_create/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_create/properties.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_010_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_012_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_013_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_006_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_010_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_012_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_013_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_get/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_006_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_010_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_list_d.kshlib create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_inherit/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_inherit/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_inherit/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_mount/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount.cfg create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_011_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_all_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_promote/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_promote/zfs_promote_008_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_property/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_property/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_property/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_receive/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_010_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_012_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_rename/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.cfg create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_010_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_012_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_013_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_reservation/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_reservation/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_reservation/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_reservation/zfs_reservation_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_reservation/zfs_reservation_002_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_003_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_004_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_send/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_007_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_set/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/cache_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/cache_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/canmount_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/canmount_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/canmount_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/canmount_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/checksum_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/compression_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/mountpoint_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/mountpoint_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/mountpoint_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/onoffs_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/property_alias_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/readonly_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/reservation_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/ro_props_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/share_mount_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/snapdir_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/user_property_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/user_property_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/user_property_003_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/user_property_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/version_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_003_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_share/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_010_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_011_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_003_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount.cfg create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_all_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_unshare/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unshare/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unshare/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_005_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_006_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/zfs_upgrade_007_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool/zpool_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool/zpool_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool/zpool_003_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.cfg create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_009_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_attach/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_attach/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_attach/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_attach/zpool_attach_001_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_clear/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_clear/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_clear/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear_003_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.cfg create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.shlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_010_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_011_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_012_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_014_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_015_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_016_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_017_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_018_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_019_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_020_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_021_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_022_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_023_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_features_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_features_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_features_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_features_004_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_destroy/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_destroy/zpool_destroy.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_destroy/zpool_destroy_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_destroy/zpool_destroy_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_destroy/zpool_destroy_003_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_detach/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_detach/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_detach/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_detach/zpool_detach_001_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_expand/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_expand/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_expand/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_003_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_export/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_export/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_export/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_export/zpool_export_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_export/zpool_export_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_export/zpool_export_003_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_export/zpool_export_004_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_get/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_get/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get_004_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_history/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_history/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_history/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_history/zpool_history_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_history/zpool_history_002_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_import/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_import/unclean_export.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_010_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_011_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_012_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_013_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_all_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_features_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_features_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_features_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_rename_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_offline/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_offline/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_offline/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_offline/zpool_offline_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_offline/zpool_offline_002_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_online/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_online/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_online/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_online/zpool_online_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_online/zpool_online_002_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_remove/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_remove/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_remove/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_remove/zpool_remove.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_remove/zpool_remove_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_remove/zpool_remove_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_remove/zpool_remove_003_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_replace/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_replace/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_replace/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_replace/zpool_replace_001_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_005_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_set/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_003_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_status/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_status/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_status/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_status/zpool_status_002_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-broken-mirror1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-broken-mirror2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v10.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v11.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v12.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v13.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v14.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v15.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1mirror1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1mirror2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1mirror3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1raidz1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1raidz2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1raidz3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1stripe1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1stripe2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1stripe3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2mirror1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2mirror2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2mirror3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2raidz1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2raidz2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2raidz3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2stripe1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2stripe2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v2stripe3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3hotspare1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3hotspare2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3hotspare3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3mirror1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3mirror2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3mirror3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3raidz1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3raidz2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3raidz21.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3raidz22.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3raidz23.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3raidz3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3stripe1.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3stripe2.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v3stripe3.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v4.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v5.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v6.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v7.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v8.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v9.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-v999.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zfs-pool-vBROKEN.dat.bz2 create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade.cfg create mode 100644 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_006_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_009_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_user/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/cli_user/misc/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_user/misc/misc.cfg create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zdb_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_allow_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_clone_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_create_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_destroy_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_get_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_inherit_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_mount_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_promote_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_receive_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_rename_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_rollback_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_send_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_set_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_share_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_snapshot_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_unallow_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_unmount_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_unshare_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zfs_upgrade_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_add_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_attach_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_clear_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_create_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_destroy_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_detach_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_export_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_get_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_history_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_import_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_import_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_offline_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_online_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_remove_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_replace_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_scrub_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_set_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_status_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/misc/zpool_upgrade_001_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_user/zfs_list/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_user/zfs_list/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zfs_list/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list.cfg create mode 100644 tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list.kshlib create mode 100755 tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zfs_list/zfs_list_008_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_user/zpool_iostat/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_iostat/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_iostat/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_iostat/zpool_iostat_003_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/cli_user/zpool_list/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_list/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_list/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_list/zpool_list_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/cli_user/zpool_list/zpool_list_002_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/compression/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/compression/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/compression/compress.cfg create mode 100755 tests/zfs-tests/tests/functional/compression/compress_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/compression/compress_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/compression/compress_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/compression/compress_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/compression/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/ctime/.gitignore create mode 100644 tests/zfs-tests/tests/functional/ctime/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/ctime/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/ctime/ctime_001_pos.c create mode 100755 tests/zfs-tests/tests/functional/ctime/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/delegate/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/delegate/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/delegate/delegate.cfg create mode 100644 tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib create mode 100755 tests/zfs-tests/tests/functional/delegate/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_010_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_011_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_allow_012_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_unallow_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_unallow_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_unallow_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_unallow_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_unallow_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_unallow_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_unallow_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/delegate/zfs_unallow_008_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/devices/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/devices/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/devices/devices.cfg create mode 100755 tests/zfs-tests/tests/functional/devices/devices_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/devices/devices_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/devices/devices_003_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/devices/devices_common.kshlib create mode 100755 tests/zfs-tests/tests/functional/devices/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/exec/.gitignore create mode 100644 tests/zfs-tests/tests/functional/exec/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/exec/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/exec/exec_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/exec/exec_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/exec/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/features/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/features/async_destroy/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/features/async_destroy/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/features/async_destroy/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/grow_pool/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/grow_pool/grow_pool.cfg create mode 100755 tests/zfs-tests/tests/functional/grow_pool/grow_pool_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/grow_replicas/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/grow_replicas/grow_replicas.cfg create mode 100755 tests/zfs-tests/tests/functional/grow_replicas/grow_replicas_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/history/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/history/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/history/history.cfg create mode 100755 tests/zfs-tests/tests/functional/history/history_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_006_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/history/history_010_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/history/history_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/history/i386.migratedpool.DAT.Z create mode 100644 tests/zfs-tests/tests/functional/history/i386.orig_history.txt create mode 100755 tests/zfs-tests/tests/functional/history/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/history/sparc.migratedpool.DAT.Z create mode 100644 tests/zfs-tests/tests/functional/history/sparc.orig_history.txt create mode 100644 tests/zfs-tests/tests/functional/history/zfs-pool-v4.dat.Z create mode 100644 tests/zfs-tests/tests/functional/inheritance/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/inheritance/README.config create mode 100644 tests/zfs-tests/tests/functional/inheritance/README.state create mode 100755 tests/zfs-tests/tests/functional/inheritance/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/inheritance/config001.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config002.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config003.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config004.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config005.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config006.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config007.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config008.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config009.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config010.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config011.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config012.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config013.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config014.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config015.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config016.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config017.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config018.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config019.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config020.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config021.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config022.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config023.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/config024.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/inherit.kshlib create mode 100755 tests/zfs-tests/tests/functional/inheritance/inherit_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/inheritance/state001.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state002.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state003.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state004.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state005.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state006.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state007.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state008.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state009.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state010.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state011.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state012.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state013.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state014.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state015.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state016.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state017.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state018.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state019.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state020.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state021.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state022.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state023.cfg create mode 100644 tests/zfs-tests/tests/functional/inheritance/state024.cfg create mode 100644 tests/zfs-tests/tests/functional/inuse/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/inuse/inuse.cfg create mode 100755 tests/zfs-tests/tests/functional/inuse/inuse_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/inuse/inuse_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/inuse/inuse_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/inuse/inuse_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/inuse/inuse_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/inuse/inuse_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/inuse/inuse_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/inuse/inuse_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/inuse/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/large_files/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/large_files/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/large_files/large_files_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/large_files/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/largest_pool/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/largest_pool/largest_pool.cfg create mode 100755 tests/zfs-tests/tests/functional/largest_pool/largest_pool_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/link_count/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/link_count/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/link_count/link_count_001.ksh create mode 100755 tests/zfs-tests/tests/functional/link_count/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/migration/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/migration/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/migration/migration.cfg create mode 100644 tests/zfs-tests/tests/functional/migration/migration.kshlib create mode 100755 tests/zfs-tests/tests/functional/migration/migration_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_010_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/migration_012_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/migration/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/mmap/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/mmap/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/mmap/mmap_read_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/mmap/mmap_write_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/mmap/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/mount/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/mount/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/mount/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/mount/umount_001.ksh create mode 100755 tests/zfs-tests/tests/functional/mount/umountall_001.ksh create mode 100644 tests/zfs-tests/tests/functional/mv_files/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/mv_files/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/mv_files/mv_files.cfg create mode 100755 tests/zfs-tests/tests/functional/mv_files/mv_files_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/mv_files/mv_files_002_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/mv_files/mv_files_common.kshlib create mode 100755 tests/zfs-tests/tests/functional/mv_files/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/nestedfs/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/nestedfs/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/nestedfs/nestedfs_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/nestedfs/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/no_space/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/no_space/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/no_space/enospc.cfg create mode 100755 tests/zfs-tests/tests/functional/no_space/enospc_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/no_space/enospc_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/no_space/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/nopwrite/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/nopwrite/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/nopwrite/nopwrite.shlib create mode 100755 tests/zfs-tests/tests/functional/nopwrite/nopwrite_copies.ksh create mode 100755 tests/zfs-tests/tests/functional/nopwrite/nopwrite_mtime.ksh create mode 100755 tests/zfs-tests/tests/functional/nopwrite/nopwrite_negative.ksh create mode 100755 tests/zfs-tests/tests/functional/nopwrite/nopwrite_promoted_clone.ksh create mode 100755 tests/zfs-tests/tests/functional/nopwrite/nopwrite_recsize.ksh create mode 100755 tests/zfs-tests/tests/functional/nopwrite/nopwrite_sync.ksh create mode 100755 tests/zfs-tests/tests/functional/nopwrite/nopwrite_varying_compression.ksh create mode 100755 tests/zfs-tests/tests/functional/nopwrite/nopwrite_volume.ksh create mode 100755 tests/zfs-tests/tests/functional/nopwrite/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/online_offline/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/online_offline/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/online_offline/online_offline.cfg create mode 100755 tests/zfs-tests/tests/functional/online_offline/online_offline_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/online_offline/online_offline_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/online_offline/online_offline_003_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/online_offline/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/pool_names/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/pool_names/pool_names_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/pool_names/pool_names_002_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/poolversion/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/poolversion/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/poolversion/poolversion_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/poolversion/poolversion_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/poolversion/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/privilege/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/privilege/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/privilege/privilege_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/privilege/privilege_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/privilege/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/quota/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/quota/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/quota/quota.cfg create mode 100644 tests/zfs-tests/tests/functional/quota/quota.kshlib create mode 100755 tests/zfs-tests/tests/functional/quota/quota_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/quota/quota_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/quota/quota_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/quota/quota_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/quota/quota_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/quota/quota_006_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/quota/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/redundancy/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/redundancy/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/redundancy/redundancy.cfg create mode 100644 tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib create mode 100755 tests/zfs-tests/tests/functional/redundancy/redundancy_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/redundancy/redundancy_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/redundancy/redundancy_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/redundancy/redundancy_004_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/redundancy/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/refquota/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/refquota/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/refquota/refquota_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refquota/refquota_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refquota/refquota_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refquota/refquota_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refquota/refquota_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refquota/refquota_006_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/refquota/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/refreserv/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/refreserv/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/refreserv/refreserv.cfg create mode 100755 tests/zfs-tests/tests/functional/refreserv/refreserv_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refreserv/refreserv_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refreserv/refreserv_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refreserv/refreserv_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refreserv/refreserv_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/refreserv/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/rename_dirs/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/rename_dirs/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/rename_dirs/rename_dirs_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rename_dirs/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/replacement/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/replacement/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/replacement/replacement.cfg create mode 100755 tests/zfs-tests/tests/functional/replacement/replacement_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/replacement/replacement_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/replacement/replacement_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/replacement/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/reservation/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/reservation/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/reservation/reservation.cfg create mode 100644 tests/zfs-tests/tests/functional/reservation/reservation.shlib create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_001_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_002_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_003_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_004_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_005_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_006_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_007_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_008_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_009_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_010_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_011_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_012_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_013_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_014_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_015_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_016_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_017_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/reservation_018_pos.sh create mode 100755 tests/zfs-tests/tests/functional/reservation/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/rootpool/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/rootpool/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/rootpool/rootpool_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/rootpool/rootpool_003_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/rootpool/rootpool_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/rootpool/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/rsend/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/rsend/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/rsend/rsend.cfg create mode 100644 tests/zfs-tests/tests/functional/rsend/rsend.kshlib create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_010_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_012_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/rsend_013_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/rsend/rsend_014_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/rsend/rsend_019_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/rsend/rsend_020_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/rsend/rsend_021_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/rsend/rsend_022_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/rsend/rsend_024_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/rsend/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/scrub_mirror/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/scrub_mirror/cleanup.ksh create mode 100644 tests/zfs-tests/tests/functional/scrub_mirror/default.cfg create mode 100755 tests/zfs-tests/tests/functional/scrub_mirror/scrub_mirror_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/scrub_mirror/scrub_mirror_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/scrub_mirror/scrub_mirror_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/scrub_mirror/scrub_mirror_004_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/scrub_mirror/scrub_mirror_common.kshlib create mode 100755 tests/zfs-tests/tests/functional/scrub_mirror/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/slog/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/slog/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/slog/slog.cfg create mode 100644 tests/zfs-tests/tests/functional/slog/slog.kshlib create mode 100755 tests/zfs-tests/tests/functional/slog/slog_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_008_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_010_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_011_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/slog/slog_014_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/snapshot/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/snapshot/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/clone_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/rollback_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/rollback_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/rollback_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/snapshot/snapshot.cfg create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_010_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_012_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_013_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_014_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_015_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_016_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapshot/snapshot_017_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/snapused/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/snapused/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/snapused/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/snapused/snapused.kshlib create mode 100755 tests/zfs-tests/tests/functional/snapused/snapused_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapused/snapused_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapused/snapused_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapused/snapused_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/snapused/snapused_005_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/sparse/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/sparse/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/sparse/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/sparse/sparse.cfg create mode 100755 tests/zfs-tests/tests/functional/sparse/sparse_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/threadsappend/.gitignore create mode 100644 tests/zfs-tests/tests/functional/threadsappend/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/threadsappend/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/threadsappend/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/threadsappend/threadsappend_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/truncate/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/truncate/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/truncate/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/truncate/truncate.cfg create mode 100755 tests/zfs-tests/tests/functional/truncate/truncate_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/truncate/truncate_002_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/userquota/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/userquota/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/groupspace_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/groupspace_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/userquota/userquota.cfg create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_007_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_009_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_010_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userquota_012_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/userquota/userquota_common.kshlib create mode 100755 tests/zfs-tests/tests/functional/userquota/userspace_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/userquota/userspace_002_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/write_dirs/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/write_dirs/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/write_dirs/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/write_dirs/write_dirs_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/write_dirs/write_dirs_002_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/xattr/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/xattr/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_002_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_003_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_006_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_007_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_008_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_009_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_010_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_011_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_012_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/xattr/xattr_013_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/xattr/xattr_common.kshlib create mode 100644 tests/zfs-tests/tests/functional/zvol/Makefile.am create mode 100644 tests/zfs-tests/tests/functional/zvol/zvol.cfg create mode 100644 tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/zvol/zvol_cli/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_cli/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_cli/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_cli/zvol_cli_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_cli/zvol_cli_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_cli/zvol_cli_003_neg.ksh create mode 100644 tests/zfs-tests/tests/functional/zvol/zvol_common.shlib create mode 100644 tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_misc/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_misc/setup.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_001_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_003_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_005_neg.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_006_pos.ksh create mode 100644 tests/zfs-tests/tests/functional/zvol/zvol_swap/Makefile.am create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_swap/cleanup.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_swap/setup.ksh create mode 100644 tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap.cfg create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_001_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_002_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_003_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_004_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_005_pos.ksh create mode 100755 tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_006_pos.ksh create mode 100644 tests/zfs-tests/tests/stress/Makefile.am diff --git a/Makefile.am b/Makefile.am index f8abb5f2c..ca8c8a0aa 100644 --- a/Makefile.am +++ b/Makefile.am @@ -6,7 +6,7 @@ include config/tgz.am SUBDIRS = include rpm if CONFIG_USER -SUBDIRS += udev etc man scripts lib cmd contrib +SUBDIRS += udev etc man scripts tests lib cmd contrib endif if CONFIG_KERNEL SUBDIRS += module diff --git a/TEST b/TEST index dd599d119..1cdbde8aa 100644 --- a/TEST +++ b/TEST @@ -37,6 +37,12 @@ TEST_ZCONFIG_OPTIONS="-c -s10" #TEST_XFSTESTS_VDEV="/var/tmp/vdev" #TEST_XFSTESTS_OPTIONS="" +### zfs-tests.sh +#TEST_ZFSTESTS_SKIP="yes" +#TEST_ZFSTESTS_DISKS="vdb vdc vdd" +#TEST_ZFSTESTS_DISKSIZE="8G" +#TEST_ZFSTESTS_RUNFILE="linux.run" + ### filebench #TEST_FILEBENCH_SKIP="yes" #TEST_FILEBENCH_URL="http://build.zfsonlinux.org/" diff --git a/config/user-commands.m4 b/config/user-commands.m4 new file mode 100644 index 000000000..655b99241 --- /dev/null +++ b/config/user-commands.m4 @@ -0,0 +1,171 @@ +dnl # +dnl # Commands common to multiple platforms. They generally behave +dnl # in the same way and take similar options. +dnl # +AC_DEFUN([ZFS_AC_CONFIG_USER_COMMANDS_COMMON], [ + AC_PATH_TOOL(AWK, awk, "") + AC_PATH_TOOL(BASENAME, basename, "") + AC_PATH_TOOL(BC, bc, "") + AC_PATH_TOOL(BUNZIP2, bunzip2, "") + AC_PATH_TOOL(BZCAT, bzcat, "") + AC_PATH_TOOL(CAT, cat, "") + AC_PATH_TOOL(CD, cd, "cd") dnl # Builtin in bash + AC_PATH_TOOL(CHGRP, chgrp, "") + AC_PATH_TOOL(CHMOD, chmod, "") + AC_PATH_TOOL(CHOWN, chown, "") + AC_PATH_TOOL(CKSUM, cksum, "") + AC_PATH_TOOL(CMP, cmp, "") + AC_PATH_TOOL(CP, cp, "") + AC_PATH_TOOL(CPIO, cpio, "") + AC_PATH_TOOL(CUT, cut, "") + AC_PATH_TOOL(DATE, date, "") + AC_PATH_TOOL(DD, dd, "") + AC_PATH_TOOL(DF, df, "") + AC_PATH_TOOL(DIFF, diff, "") + AC_PATH_TOOL(DIRNAME, dirname, "") + AC_PATH_TOOL(DU, du, "") + AC_PATH_TOOL(ECHO, echo, "") + AC_PATH_TOOL(EGREP, egrep, "") + AC_PATH_TOOL(FDISK, fdisk, "") + AC_PATH_TOOL(FGREP, fgrep, "") + AC_PATH_TOOL(FILE, file, "") + AC_PATH_TOOL(FIND, find, "") + AC_PATH_TOOL(FSCK, fsck, "") + AC_PATH_TOOL(GNUDD, dd, "") + AC_PATH_TOOL(GETCONF, getconf, "") + AC_PATH_TOOL(GETENT, getent, "") + AC_PATH_TOOL(GREP, grep, "") + dnl # Due to permissions unpriviledged users may not detect group*. + AC_PATH_TOOL(GROUPADD, groupadd, "/usr/sbin/groupadd") + AC_PATH_TOOL(GROUPDEL, groupdel, "/usr/sbin/groupdel") + AC_PATH_TOOL(GROUPMOD, groupmod, "/usr/sbin/groupmod") + AC_PATH_TOOL(HEAD, head, "") + AC_PATH_TOOL(HOSTNAME, hostname, "") + AC_PATH_TOOL(ID, id, "") + AC_PATH_TOOL(KILL, kill, "") + AC_PATH_TOOL(KSH, ksh, "") + AC_PATH_TOOL(LOGNAME, logname, "") + AC_PATH_TOOL(LS, ls, "") + AC_PATH_TOOL(MD5SUM, md5sum, "") + AC_PATH_TOOL(MKDIR, mkdir, "") + AC_PATH_TOOL(MKNOD, mknod, "") + AC_PATH_TOOL(MKTEMP, mktemp, "") + AC_PATH_TOOL(MODINFO, modinfo, "") + AC_PATH_TOOL(MOUNT, mount, "") + AC_PATH_TOOL(MV, mv, "") + AC_PATH_TOOL(NAWK, nawk, "") + AC_PATH_TOOL(PGREP, pgrep, "") + AC_PATH_TOOL(PING, ping, "") + AC_PATH_TOOL(PKILL, pkill, "") + AC_PATH_TOOL(PRINTF, printf, "") + AC_PATH_TOOL(PS, ps, "") + AC_PATH_TOOL(PYTHON, python, "") + AC_PATH_TOOL(REBOOT, reboot, "") + AC_PATH_TOOL(RMDIR, rmdir, "") + AC_PATH_TOOL(RSH, rsh, "") + AC_PATH_TOOL(SED, sed, "") + AC_PATH_TOOL(SHUF, shuf, "") + AC_PATH_TOOL(SLEEP, sleep, "") + AC_PATH_TOOL(SORT, sort, "") + AC_PATH_TOOL(STRINGS, strings, "") + AC_PATH_TOOL(SU, su, "") + AC_PATH_TOOL(SUM, sum, "") + AC_PATH_TOOL(SYNC, sync, "") + AC_PATH_TOOL(TAIL, tail, "") + AC_PATH_TOOL(TAR, tar, "") + AC_PATH_TOOL(TOUCH, touch, "") + AC_PATH_TOOL(TR, tr, "") + AC_PATH_TOOL(TRUE, true, "") + AC_PATH_TOOL(UMASK, umask, "") + AC_PATH_TOOL(UMOUNT, umount, "") + AC_PATH_TOOL(UNAME, uname, "") + AC_PATH_TOOL(UNIQ, uniq, "") + dnl # Due to permissions unpriviledged users may not detect user*. + AC_PATH_TOOL(USERADD, useradd, "/usr/sbin/useradd") + AC_PATH_TOOL(USERDEL, userdel, "/usr/sbin/userdel") + AC_PATH_TOOL(USERMOD, usermod, "/usr/sbin/usermod") + AC_PATH_TOOL(WAIT, wait, "wait") dnl # Builtin in bash + AC_PATH_TOOL(WC, wc, "") +]) + +dnl # +dnl # Linux commands, used withing 'is_linux' blocks of test scripts. +dnl # These commands may take different command line arguments. +dnl # +AC_DEFUN([ZFS_AC_CONFIG_USER_COMMANDS_LINUX], [ + AC_PATH_TOOL(BLOCKDEV, blockdev, "") + AC_PATH_TOOL(COMPRESS, gzip, "") + AC_PATH_TOOL(FORMAT, parted, "") + AC_PATH_TOOL(LOCKFS, lsof, "") + AC_PATH_TOOL(MODUNLOAD, rmmod, "") + AC_PATH_TOOL(NEWFS, mke2fs, "") + AC_PATH_TOOL(PFEXEC, sudo, "") + AC_PATH_TOOL(SHARE, exportfs, "") + AC_PATH_TOOL(SWAP, swapon, "") + AC_PATH_TOOL(SWAPADD, swapon, "") + AC_PATH_TOOL(TRUNCATE, truncate, "") + AC_PATH_TOOL(UDEVADM, udevadm, "") + AC_PATH_TOOL(UFSDUMP, dump, "") + AC_PATH_TOOL(UFSRESTORE, restore, "") + AC_PATH_TOOL(UNCOMPRESS, gunzip, "") + AC_PATH_TOOL(UNSHARE, exportfs, "") + AC_PATH_TOOL(GETFACL, getfacl, "") + AC_PATH_TOOL(SETFACL, setfacl, "") + AC_PATH_TOOL(CHACL, chacl, "") + AC_PATH_TOOL(NPROC, nproc, "") + + PAGESIZE=$($GETCONF PAGESIZE) + AC_SUBST(PAGESIZE) + + MNTTAB=/etc/mtab + AC_SUBST(MNTTAB) +]) + +dnl # +dnl # BSD style commands, these have been kept in case at some point +dnl # we want to build these packages on a BSD style systems. Otherwise +dnl # they are unused and should be treated as such. +dnl # +AC_DEFUN([ZFS_AC_CONFIG_USER_COMMANDS_BSD], [ + AC_PATH_TOOL(COMPRESS, compress, "") + AC_PATH_TOOL(COREADM, coreadm, "") + AC_PATH_TOOL(DIRCMP, dircmp, "") + AC_PATH_TOOL(DUMPADM, dumpadm, "") + AC_PATH_TOOL(FORMAT, format, "") + AC_PATH_TOOL(GETMAJOR, getmajor, "") + AC_PATH_TOOL(ISAINFO, isainfo, "") + AC_PATH_TOOL(KSTAT, kstat, "") + AC_PATH_TOOL(LOCKFS, lockfs, "") + AC_PATH_TOOL(LOFIADM, lofiadm, "") + AC_PATH_TOOL(MODUNLOAD, modunload, "") + AC_PATH_TOOL(NEWFS, newfs, "") + AC_PATH_TOOL(PAGESIZE, pagesize, "") + AC_PATH_TOOL(PFEXEC, pfexec, "") + AC_PATH_TOOL(PKGINFO, pkginfo, "") + AC_PATH_TOOL(PRTVTOC, prtvtoc, "") + AC_PATH_TOOL(PSRINFO, psrinfo, "") + AC_PATH_TOOL(SHARE, share, "") + AC_PATH_TOOL(SVCADM, svcadm, "") + AC_PATH_TOOL(SVCS, svcs, "") + AC_PATH_TOOL(SWAP, swap, "") + AC_PATH_TOOL(SWAPADD, swapadd, "") + AC_PATH_TOOL(UFSDUMP, ufsdump, "") + AC_PATH_TOOL(UFSRESTORE, ufsrestore, "") + AC_PATH_TOOL(UMOUNTALL, umountall, "") + AC_PATH_TOOL(UNCOMPRESS, uncompress, "") + AC_PATH_TOOL(UNSHARE, unshare, "") + AC_PATH_TOOL(ZONEADM, zoneadm, "") + AC_PATH_TOOL(ZONECFG, zonecfg, "") + AC_PATH_TOOL(ZONENAME, zonename, "") +]) + +AC_DEFUN([ZFS_AC_CONFIG_USER_COMMANDS], [ + ZFS_AC_CONFIG_USER_COMMANDS_COMMON + + OS=$($UNAME -o) + AS_IF([test "$OS" == "GNU/Linux"], [ + ZFS_AC_CONFIG_USER_COMMANDS_LINUX + ], [ + ZFS_AC_CONFIG_USER_COMMANDS_BSD + ]) +]) diff --git a/config/user-libattr.m4 b/config/user-libattr.m4 new file mode 100644 index 000000000..3298fd491 --- /dev/null +++ b/config/user-libattr.m4 @@ -0,0 +1,12 @@ +dnl # +dnl # Check for libattr +dnl # +AC_DEFUN([ZFS_AC_CONFIG_USER_LIBATTR], [ + LIBATTR= + + AC_CHECK_HEADER([attr/xattr.h], [], [AC_MSG_FAILURE([ + *** attr/xattr.h missing, libattr-devel package required])]) + + AC_SUBST([LIBATTR], ["-lattr"]) + AC_DEFINE([HAVE_LIBATTR], 1, [Define if you have libattr]) +]) diff --git a/config/user.m4 b/config/user.m4 index 7f7942082..69d103c10 100644 --- a/config/user.m4 +++ b/config/user.m4 @@ -11,9 +11,24 @@ AC_DEFUN([ZFS_AC_CONFIG_USER], [ ZFS_AC_CONFIG_USER_ZLIB ZFS_AC_CONFIG_USER_LIBUUID ZFS_AC_CONFIG_USER_LIBBLKID + ZFS_AC_CONFIG_USER_LIBATTR ZFS_AC_CONFIG_USER_FRAME_LARGER_THAN ZFS_AC_CONFIG_USER_RUNSTATEDIR -dnl # -dnl # Checks for library functions + + ZFS_AC_CONFIG_USER_COMMANDS + ZFS_AC_TEST_FRAMEWORK + AC_CHECK_FUNCS([mlockall]) ]) + +dnl # +dnl # Setup the environment for the ZFS Test Suite. Currently only +dnl # Linux sytle systems are supported but this infrastructure can +dnl # be extended to support other platforms if needed. +dnl # +AC_DEFUN([ZFS_AC_TEST_FRAMEWORK], [ + ZONENAME="echo global" + AC_SUBST(ZONENAME) + + AC_SUBST(RM) +]) diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index 9d3f0a6f5..41431e10a 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -89,8 +89,8 @@ AC_DEFUN([ZFS_AC_CONFIG], [ case "$ZFS_CONFIG" in kernel) ZFS_AC_CONFIG_KERNEL ;; user) ZFS_AC_CONFIG_USER ;; - all) ZFS_AC_CONFIG_KERNEL - ZFS_AC_CONFIG_USER ;; + all) ZFS_AC_CONFIG_USER + ZFS_AC_CONFIG_KERNEL ;; srpm) ;; *) AC_MSG_RESULT([Error!]) diff --git a/configure.ac b/configure.ac index 9907857e2..41cd00758 100644 --- a/configure.ac +++ b/configure.ac @@ -56,7 +56,7 @@ ZFS_AC_CONFIG ZFS_AC_DEBUG ZFS_AC_DEBUG_DMU_TX -AC_CONFIG_FILES([ +AC_CONFIG_FILES([ Makefile udev/Makefile udev/rules.d/Makefile @@ -134,6 +134,145 @@ AC_CONFIG_FILES([ scripts/zpios-test/Makefile scripts/zpool-config/Makefile scripts/common.sh + tests/Makefile + tests/test-runner/Makefile + tests/test-runner/cmd/Makefile + tests/test-runner/include/Makefile + tests/test-runner/man/Makefile + tests/runfiles/Makefile + tests/zfs-tests/Makefile + tests/zfs-tests/cmd/Makefile + tests/zfs-tests/cmd/chg_usr_exec/Makefile + tests/zfs-tests/cmd/devname2devid/Makefile + tests/zfs-tests/cmd/dir_rd_update/Makefile + tests/zfs-tests/cmd/file_check/Makefile + tests/zfs-tests/cmd/file_trunc/Makefile + tests/zfs-tests/cmd/file_write/Makefile + tests/zfs-tests/cmd/largest_file/Makefile + tests/zfs-tests/cmd/mkbusy/Makefile + tests/zfs-tests/cmd/mkfile/Makefile + tests/zfs-tests/cmd/mkfiles/Makefile + tests/zfs-tests/cmd/mktree/Makefile + tests/zfs-tests/cmd/mmap_exec/Makefile + tests/zfs-tests/cmd/mmapwrite/Makefile + tests/zfs-tests/cmd/randfree_file/Makefile + tests/zfs-tests/cmd/readmmap/Makefile + tests/zfs-tests/cmd/rename_dir/Makefile + tests/zfs-tests/cmd/rm_lnkcnt_zero_file/Makefile + tests/zfs-tests/cmd/threadsappend/Makefile + tests/zfs-tests/cmd/xattrtest/Makefile + tests/zfs-tests/include/Makefile + tests/zfs-tests/include/commands.cfg + tests/zfs-tests/include/default.cfg + tests/zfs-tests/tests/Makefile + tests/zfs-tests/tests/functional/Makefile + tests/zfs-tests/tests/functional/acl/Makefile + tests/zfs-tests/tests/functional/acl/posix/Makefile + tests/zfs-tests/tests/functional/atime/Makefile + tests/zfs-tests/tests/functional/bootfs/Makefile + tests/zfs-tests/tests/functional/cache/Makefile + tests/zfs-tests/tests/functional/cachefile/Makefile + tests/zfs-tests/tests/functional/casenorm/Makefile + tests/zfs-tests/tests/functional/clean_mirror/Makefile + tests/zfs-tests/tests/functional/cli_root/Makefile + tests/zfs-tests/tests/functional/cli_root/zdb/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_clone/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_copies/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_create/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_destroy/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_get/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_inherit/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_mount/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_promote/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_property/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_receive/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_rename/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_reservation/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_rollback/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_send/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_set/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_share/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_unmount/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_unshare/Makefile + tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_attach/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_clear/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_destroy/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_detach/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_expand/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_export/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_history/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_import/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_offline/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_online/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_remove/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_replace/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_scrub/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_set/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_status/Makefile + tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/Makefile + tests/zfs-tests/tests/functional/cli_user/Makefile + tests/zfs-tests/tests/functional/cli_user/misc/Makefile + tests/zfs-tests/tests/functional/cli_user/zfs_list/Makefile + tests/zfs-tests/tests/functional/cli_user/zpool_iostat/Makefile + tests/zfs-tests/tests/functional/cli_user/zpool_list/Makefile + tests/zfs-tests/tests/functional/compression/Makefile + tests/zfs-tests/tests/functional/ctime/Makefile + tests/zfs-tests/tests/functional/delegate/Makefile + tests/zfs-tests/tests/functional/devices/Makefile + tests/zfs-tests/tests/functional/exec/Makefile + tests/zfs-tests/tests/functional/features/async_destroy/Makefile + tests/zfs-tests/tests/functional/features/Makefile + tests/zfs-tests/tests/functional/grow_pool/Makefile + tests/zfs-tests/tests/functional/grow_replicas/Makefile + tests/zfs-tests/tests/functional/history/Makefile + tests/zfs-tests/tests/functional/inheritance/Makefile + tests/zfs-tests/tests/functional/inuse/Makefile + tests/zfs-tests/tests/functional/large_files/Makefile + tests/zfs-tests/tests/functional/largest_pool/Makefile + tests/zfs-tests/tests/functional/link_count/Makefile + tests/zfs-tests/tests/functional/migration/Makefile + tests/zfs-tests/tests/functional/mmap/Makefile + tests/zfs-tests/tests/functional/mount/Makefile + tests/zfs-tests/tests/functional/mv_files/Makefile + tests/zfs-tests/tests/functional/nestedfs/Makefile + tests/zfs-tests/tests/functional/no_space/Makefile + tests/zfs-tests/tests/functional/nopwrite/Makefile + tests/zfs-tests/tests/functional/online_offline/Makefile + tests/zfs-tests/tests/functional/pool_names/Makefile + tests/zfs-tests/tests/functional/poolversion/Makefile + tests/zfs-tests/tests/functional/privilege/Makefile + tests/zfs-tests/tests/functional/quota/Makefile + tests/zfs-tests/tests/functional/redundancy/Makefile + tests/zfs-tests/tests/functional/refquota/Makefile + tests/zfs-tests/tests/functional/refreserv/Makefile + tests/zfs-tests/tests/functional/rename_dirs/Makefile + tests/zfs-tests/tests/functional/replacement/Makefile + tests/zfs-tests/tests/functional/reservation/Makefile + tests/zfs-tests/tests/functional/rootpool/Makefile + tests/zfs-tests/tests/functional/rsend/Makefile + tests/zfs-tests/tests/functional/scrub_mirror/Makefile + tests/zfs-tests/tests/functional/slog/Makefile + tests/zfs-tests/tests/functional/snapshot/Makefile + tests/zfs-tests/tests/functional/snapused/Makefile + tests/zfs-tests/tests/functional/sparse/Makefile + tests/zfs-tests/tests/functional/threadsappend/Makefile + tests/zfs-tests/tests/functional/truncate/Makefile + tests/zfs-tests/tests/functional/userquota/Makefile + tests/zfs-tests/tests/functional/write_dirs/Makefile + tests/zfs-tests/tests/functional/xattr/Makefile + tests/zfs-tests/tests/functional/zvol/Makefile + tests/zfs-tests/tests/functional/zvol/zvol_cli/Makefile + tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/Makefile + tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile + tests/zfs-tests/tests/functional/zvol/zvol_swap/Makefile + tests/zfs-tests/tests/stress/Makefile rpm/Makefile rpm/redhat/Makefile rpm/redhat/zfs.spec diff --git a/include/libzfs.h b/include/libzfs.h index 33c87b441..07a5906c2 100644 --- a/include/libzfs.h +++ b/include/libzfs.h @@ -58,6 +58,7 @@ extern "C" { */ #define DISK_ROOT "/dev" #define UDISK_ROOT "/dev/disk" +#define ZVOL_ROOT "/dev/zvol" /* * Default wait time for a device name to be created. diff --git a/lib/libzfs/libzfs_util.c b/lib/libzfs/libzfs_util.c index 65b04c59a..57c2ac853 100644 --- a/lib/libzfs/libzfs_util.c +++ b/lib/libzfs/libzfs_util.c @@ -908,7 +908,8 @@ zfs_append_partition(char *path, size_t max_len) { int len = strlen(path); - if (strncmp(path, UDISK_ROOT, strlen(UDISK_ROOT)) == 0) { + if ((strncmp(path, UDISK_ROOT, strlen(UDISK_ROOT)) == 0) || + (strncmp(path, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0)) { if (len + 6 >= max_len) return (-1); diff --git a/rpm/generic/zfs.spec.in b/rpm/generic/zfs.spec.in index cadc08c8a..cc8d02b6d 100644 --- a/rpm/generic/zfs.spec.in +++ b/rpm/generic/zfs.spec.in @@ -171,6 +171,7 @@ Requires: parted Requires: lsscsi Requires: mdadm Requires: bc +Requires: ksh %description test This package contains test infrastructure and support scripts for diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 26f9207e4..32d8911c3 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -6,52 +6,12 @@ pkgdatadir = $(datadir)/@PACKAGE@ dist_pkgdata_SCRIPTS = \ $(top_builddir)/scripts/common.sh \ $(top_srcdir)/scripts/zconfig.sh \ - $(top_srcdir)/scripts/zfault.sh \ $(top_srcdir)/scripts/ziltest.sh \ $(top_srcdir)/scripts/zimport.sh \ $(top_srcdir)/scripts/zfs.sh \ + $(top_srcdir)/scripts/zfs-tests.sh \ $(top_srcdir)/scripts/zpool-create.sh \ $(top_srcdir)/scripts/zpios.sh \ $(top_srcdir)/scripts/zpios-sanity.sh \ $(top_srcdir)/scripts/zpios-survey.sh \ $(top_srcdir)/scripts/smb.sh - -ZFS=$(top_builddir)/scripts/zfs.sh -ZCONFIG=$(top_builddir)/scripts/zconfig.sh -ZFAULT=$(top_builddir)/scripts/zfault.sh -ZIMPORT=$(top_builddir)/scripts/zimport.sh -ZTEST=$(top_builddir)/cmd/ztest/ztest -ZPIOS_SANITY=$(top_builddir)/scripts/zpios-sanity.sh - -check: - @$(ZFS) -u - @echo - @echo -n "====================================" - @echo -n " ZTEST " - @echo "====================================" - @echo - @$(ZFS) - @$(ZTEST) -V - @$(ZFS) -u - @echo - @echo - @echo -n "===================================" - @echo -n " ZCONFIG " - @echo "===================================" - @echo - @$(ZCONFIG) -c - @echo - @echo -n "===================================" - @echo -n " ZFAULT " - @echo "===================================" - @echo - @$(ZFAULT) -c - @echo - @echo -n "====================================" - @echo -n " ZPIOS " - @echo "====================================" - @echo - @$(ZFS) - @$(ZPIOS_SANITY) - @$(ZFS) -u - @echo diff --git a/scripts/common.sh.in b/scripts/common.sh.in index 2fac2a919..f6c6d93fe 100644 --- a/scripts/common.sh.in +++ b/scripts/common.sh.in @@ -45,10 +45,13 @@ DEVDIR=${DEVDIR:-/dev/disk/by-vdev} ZPOOLDIR=${ZPOOLDIR:-${pkgdatadir}/zpool-config} ZPIOSDIR=${ZPIOSDIR:-${pkgdatadir}/zpios-test} ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkgdatadir}/zpios-profile} +TESTSDIR=${TESTSDIR:-${pkgdatadir}/zfs-tests} +RUNFILEDIR=${RUNFILEDIR:-${pkgdatadir}/runfiles} ZDB=${ZDB:-${sbindir}/zdb} ZFS=${ZFS:-${sbindir}/zfs} ZINJECT=${ZINJECT:-${sbindir}/zinject} +ZHACK=${ZHACK:-${sbindir}/zhack} ZPOOL=${ZPOOL:-${sbindir}/zpool} ZTEST=${ZTEST:-${sbindir}/ztest} ZPIOS=${ZPIOS:-${sbindir}/zpios} @@ -58,6 +61,9 @@ ZFS_SH=${ZFS_SH:-${pkgdatadir}/zfs.sh} ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkgdatadir}/zpool-create.sh} ZPIOS_SH=${ZPIOS_SH:-${pkgdatadir}/zpios.sh} ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkgdatadir}/zpios-survey.sh} +TEST_RUNNER=${TEST_RUNNER:-${pkgdatadir}/test-runner/bin/test-runner.py} +STF_TOOLS=${STF_TOOLS:-${pkgdatadir}/test-runner} +STF_SUITE=${STF_SUITE:-${pkgdatadir}/zfs-tests} LDMOD=${LDMOD:-/sbin/modprobe} LSMOD=${LSMOD:-/sbin/lsmod} @@ -65,6 +71,7 @@ RMMOD=${RMMOD:-/sbin/rmmod} INFOMOD=${INFOMOD:-/sbin/modinfo} LOSETUP=${LOSETUP:-/sbin/losetup} MDADM=${MDADM:-/sbin/mdadm} +DMSETUP=${DMSETUP:-/sbin/dmsetup} PARTED=${PARTED:-/sbin/parted} BLOCKDEV=${BLOCKDEV:-/sbin/blockdev} LSSCSI=${LSSCSI:-/usr/bin/lsscsi} @@ -297,7 +304,7 @@ check_loop_utils() { # the minor as long as it's less than /sys/module/loop/parameters/max_loop. # unused_loop_device() { - local DEVICE=`${LOSETUP} -f` + local DEVICE=$(${LOSETUP} -f) local MAX_LOOP_PATH="/sys/module/loop/parameters/max_loop" local MAX_LOOP; @@ -359,7 +366,7 @@ destroy_loop_devices() { local LODEVICES="$1" msg "Destroying ${LODEVICES}" - ${LOSETUP} -d ${LODEVICES} || \ + ${LOSETUP} -d ${LODEVICES} || \ die "Error $? destroying ${FILE} -> ${DEVICE} loopback" rm -f ${FILES} diff --git a/scripts/zfault.sh b/scripts/zfault.sh deleted file mode 100755 index a5f1f3cb1..000000000 --- a/scripts/zfault.sh +++ /dev/null @@ -1,955 +0,0 @@ -#!/bin/bash -# -# ZPOOL fault verification test script. -# -# The current suite of fault tests should not be thought of an exhaustive -# list of failure modes. Rather it is simply an starting point which trys -# to cover the bulk the of the 'easy' and hopefully common, failure modes. -# -# Additional tests should be added but the current suite as new interesting -# failures modes are observed. Additional failure modes I'd like to see -# tests for include, but are not limited too: -# -# * Slow but successful IO. -# * SCSI sense codes generated as zevents. -# * 4k sectors -# * noise -# * medium error -# * recovered error -# -# The current infrastructure using the 'mdadm' faulty device and the -# 'scsi_debug' simulated scsi devices. The idea is to inject the error -# below the zfs stack to validate all the error paths. More targeted -# failure testing should be added using the 'zinject' command line util. -# -# Requires the following packages: -# * mdadm -# * lsscsi -# * sg3-utils -# - -basedir="$(dirname $0)" - -SCRIPT_COMMON=common.sh -if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then -. "${basedir}/${SCRIPT_COMMON}" -else -echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 -fi - -PROG=zfault.sh - -usage() { -cat << EOF -USAGE: -$0 [hvcts] - -DESCRIPTION: - ZPOOL fault verification tests - -OPTIONS: - -h Show this message - -v Verbose - -c Cleanup md+lo+file devices at start - -t <#> Run listed tests - -s <#> Skip listed tests - -EOF -} - -while getopts 'hvct:s:?' OPTION; do - case $OPTION in - h) - usage - exit 1 - ;; - v) - VERBOSE=1 - ;; - c) - CLEANUP=1 - ;; - t) - TESTS_RUN=($OPTARG) - ;; - s) - TESTS_SKIP=($OPTARG) - ;; - ?) - usage - exit - ;; - esac -done - -if [ $(id -u) != 0 ]; then - die "Must run as root" -fi - -# Initialize the test suite -init - -# Perform pre-cleanup is requested -if [ ${CLEANUP} ]; then - ${ZFS_SH} -u - cleanup_md_devices - cleanup_loop_devices - rm -f /tmp/zpool.cache.* -fi - -# Check if we need to skip all md based tests. -MD_PARTITIONABLE=0 -check_md_partitionable && MD_PARTITIONABLE=1 -if [ ${MD_PARTITIONABLE} -eq 0 ]; then - echo "Skipping tests 1-7 which require partitionable md devices" -fi - -# Check if we need to skip all the scsi_debug tests. -SCSI_DEBUG=0 -${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1 -if [ ${SCSI_DEBUG} -eq 0 ]; then - echo "Skipping tests 8-9 which require the scsi_debug module" -fi - -if [ ${MD_PARTITIONABLE} -eq 0 ] || [ ${SCSI_DEBUG} -eq 0 ]; then - echo -fi - -printf "%40s%s\t%s\t%s\t%s\t%s\n" "" "raid0" "raid10" "raidz" "raidz2" "raidz3" - -pass_nonewline() { - echo -n -e "${COLOR_GREEN}Pass${COLOR_RESET}\t" -} - -skip_nonewline() { - echo -n -e "${COLOR_BROWN}Skip${COLOR_RESET}\t" -} - -nth_zpool_vdev() { - local POOL_NAME=$1 - local DEVICE_TYPE=$2 - local DEVICE_NTH=$3 - - ${ZPOOL} status ${POOL_NAME} | grep ${DEVICE_TYPE} ${TMP_STATUS} | \ - head -n${DEVICE_NTH} | tail -n1 | ${AWK} "{ print \$1 }" -} - -vdev_status() { - local POOL_NAME=$1 - local VDEV_NAME=$2 - - ${ZPOOL} status ${POOL_NAME} | ${AWK} "/${VDEV_NAME}/ { print \$2 }" -} - -# Required format is x.yz[KMGTP] -expand_numeric_suffix() { - local VALUE=$1 - - VALUE=`echo "${VALUE/%K/*1000}"` - VALUE=`echo "${VALUE/%M/*1000000}"` - VALUE=`echo "${VALUE/%G/*1000000000}"` - VALUE=`echo "${VALUE/%T/*1000000000000}"` - VALUE=`echo "${VALUE/%P/*1000000000000000}"` - VALUE=`echo "${VALUE}" | bc | cut -d'.' -f1` - - echo "${VALUE}" -} - -vdev_read_errors() { - local POOL_NAME=$1 - local VDEV_NAME=$2 - local VDEV_ERRORS=`${ZPOOL} status ${POOL_NAME} | - ${AWK} "/${VDEV_NAME}/ { print \\$3 }"` - - expand_numeric_suffix ${VDEV_ERRORS} -} - -vdev_write_errors() { - local POOL_NAME=$1 - local VDEV_NAME=$2 - local VDEV_ERRORS=`${ZPOOL} status ${POOL_NAME} | - ${AWK} "/${VDEV_NAME}/ { print \\$4 }"` - - expand_numeric_suffix ${VDEV_ERRORS} -} - -vdev_cksum_errors() { - local POOL_NAME=$1 - local VDEV_NAME=$2 - local VDEV_ERRORS=`${ZPOOL} status ${POOL_NAME} | - ${AWK} "/${VDEV_NAME}/ { print \\$5 }"` - - expand_numeric_suffix ${VDEV_ERRORS} -} - -zpool_state() { - local POOL_NAME=$1 - - ${ZPOOL} status ${POOL_NAME} | ${AWK} "/state/ { print \$2; exit }" -} - -zpool_event() { - local EVENT_NAME=$1 - local EVENT_KEY=$2 - - SCRIPT1="BEGIN {RS=\"\"; FS=\"\n\"} /${EVENT_NAME}/ { print \$0; exit }" - SCRIPT2="BEGIN {FS=\"=\"} /${EVENT_KEY}/ { print \$2; exit }" - - ${ZPOOL} events -vH | ${AWK} "${SCRIPT1}" | ${AWK} "${SCRIPT2}" -} - -zpool_scan_errors() { - local POOL_NAME=$1 - - ${ZPOOL} status ${POOL_NAME} | ${AWK} "/scan: scrub/ { print \$8 }" - ${ZPOOL} status ${POOL_NAME} | ${AWK} "/scan: resilver/ { print \$7 }" -} - -pattern_create() { - local PATTERN_BLOCK_SIZE=$1 - local PATTERN_BLOCK_COUNT=$2 - local PATTERN_NAME=`mktemp -p /tmp zpool.pattern.XXXXXXXX` - - echo ${PATTERN_NAME} - dd if=/dev/urandom of=${PATTERN_NAME} bs=${PATTERN_BLOCK_SIZE} \ - count=${PATTERN_BLOCK_COUNT} &>/dev/null - return $? -} - -pattern_write() { - local PATTERN_NAME=$1 - local PATTERN_BLOCK_SIZE=$2 - local PATTERN_BLOCK_COUNT=$3 - local DEVICE_NAME=$4 - - dd if=${PATTERN_NAME} of=${DEVICE_NAME} bs=${PATTERN_BLOCK_SIZE} \ - count=${PATTERN_BLOCK_COUNT} oflag=direct &>/dev/null - return $? -} - -pattern_write_bg() { - local PATTERN_NAME=$1 - local PATTERN_BLOCK_SIZE=$2 - local PATTERN_BLOCK_COUNT=$3 - local DEVICE_NAME=$4 - - dd if=${PATTERN_NAME} of=${DEVICE_NAME} bs=${PATTERN_BLOCK_SIZE} \ - count=${PATTERN_BLOCK_COUNT} oflag=direct &>/dev/null & - return $? -} - -pattern_verify() { - local PATTERN_NAME=$1 - local PATTERN_BLOCK_SIZE=$2 - local PATTERN_BLOCK_COUNT=$3 - local DEVICE_NAME=$4 - local DEVICE_FILE=`mktemp -p /tmp zpool.pattern.XXXXXXXX` - - dd if=${DEVICE_NAME} of=${DEVICE_FILE} bs=${PATTERN_BLOCK_SIZE} \ - count=${PATTERN_BLOCK_COUNT} iflag=direct &>/dev/null - cmp -s ${PATTERN_NAME} ${DEVICE_FILE} - RC=$? - rm -f ${DEVICE_FILE} - - return ${RC} -} - -pattern_remove() { - local PATTERN_NAME=$1 - - rm -f ${PATTERN_NAME} - return $? -} - -fault_set_md() { - local VDEV_FAULTY=$1 - local FAULT_TYPE=$2 - - ${MDADM} /dev/${VDEV_FAULTY} --grow --level=faulty \ - --layout=${FAULT_TYPE} >/dev/null - return $? -} - -fault_clear_md() { - local VDEV_FAULTY=$1 - - # Clear all failure injection. - ${MDADM} /dev/${VDEV_FAULTY} --grow --level=faulty \ - --layout=clear >/dev/null || return $? - ${MDADM} /dev/${VDEV_FAULTY} --grow --level=faulty \ - --layout=flush >/dev/null || return $? - return $? -} - -fault_set_sd() { - local OPTS=$1 - local NTH=$2 - - echo ${OPTS} >/sys/bus/pseudo/drivers/scsi_debug/opts - echo ${NTH} >/sys/bus/pseudo/drivers/scsi_debug/every_nth -} - -fault_clear_sd() { - echo 0 >/sys/bus/pseudo/drivers/scsi_debug/every_nth - echo 0 >/sys/bus/pseudo/drivers/scsi_debug/opts -} - -test_setup() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local ZVOL_NAME=$3 - local TMP_CACHE=$4 - - ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 - ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c ${POOL_CONFIG} || fail 2 - ${ZFS} create -V 64M ${POOL_NAME}/${ZVOL_NAME} || fail 3 - - # Trigger udev and re-read the partition table to ensure all of - # this IO is out of the way before we begin injecting failures. - udev_trigger || fail 4 - ${BLOCKDEV} --rereadpt /dev/${POOL_NAME}/${ZVOL_NAME} || fail 5 -} - -test_cleanup() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local ZVOL_NAME=$3 - local TMP_CACHE=$4 - - ${ZFS} destroy ${POOL_NAME}/${ZVOL_NAME} || fail 101 - ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c ${POOL_CONFIG} -d || fail 102 - ${ZFS_SH} -u || fail 103 - rm -f ${TMP_CACHE} || fail 104 -} - -test_write_soft() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - - if [ ${MD_PARTITIONABLE} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - # Set soft write failure for first vdev device. - local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 1` - fault_set_md ${VDEV_FAULTY} write-transient - - # The application must not observe an error. - local TMP_PATTERN=`pattern_create 1M 8` || fail 11 - pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 - fault_clear_md ${VDEV_FAULTY} - - # Soft errors will not be logged to 'zpool status' - local WRITE_ERRORS=`vdev_write_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${WRITE_ERRORS} -eq 0 || fail 13 - - # Soft errors will still generate an EIO (5) event. - test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 14 - - # Verify the known pattern. - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 15 - pattern_remove ${TMP_PATTERN} || fail 16 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -# Soft write error. -test_1() { - test_write_soft tank lo-faulty-raid0 0 - test_write_soft tank lo-faulty-raid10 1 - test_write_soft tank lo-faulty-raidz 1 - test_write_soft tank lo-faulty-raidz2 1 - test_write_soft tank lo-faulty-raidz3 1 - echo -} -run_test 1 "soft write error" - -test_write_hard() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - - if [ ${MD_PARTITIONABLE} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - # Set hard write failure for first vdev device. - local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 1` - fault_set_md ${VDEV_FAULTY} write-persistent - - # The application must not observe an error. - local TMP_PATTERN=`pattern_create 1M 8` || fail 11 - pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 - fault_clear_md ${VDEV_FAULTY} - - local WRITE_ERRORS=`vdev_write_errors ${POOL_NAME} ${VDEV_FAULTY}` - if [ ${POOL_REDUNDANT} -eq 1 ]; then - # For redundant configurations hard errors will not be - # logged to 'zpool status' but will generate EIO events. - test ${WRITE_ERRORS} -eq 0 || fail 21 - test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 22 - else - # For non-redundant configurations hard errors will be - # logged to 'zpool status' and generate EIO events. They - # will also trigger a scrub of the impacted sectors. - sleep 10 - test ${WRITE_ERRORS} -gt 0 || fail 31 - test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 32 - test `zpool_event "zfs.resilver.start" "ena"` != "" || fail 33 - test `zpool_event "zfs.resilver.finish" "ena"` != "" || fail 34 - test `zpool_scan_errors ${POOL_NAME}` -eq 0 || fail 35 - fi - - # Verify the known pattern. - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 41 - pattern_remove ${TMP_PATTERN} || fail 42 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -# Hard write error. -test_2() { - test_write_hard tank lo-faulty-raid0 0 - test_write_hard tank lo-faulty-raid10 1 - test_write_hard tank lo-faulty-raidz 1 - test_write_hard tank lo-faulty-raidz2 1 - test_write_hard tank lo-faulty-raidz3 1 - echo -} -run_test 2 "hard write error" - -test_write_all() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - - if [ ${MD_PARTITIONABLE} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - # Set all write failures for first vdev device. - local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 1` - fault_set_md ${VDEV_FAULTY} write-all - - local TMP_PATTERN=`pattern_create 1M 8` || fail 11 - if [ ${POOL_REDUNDANT} -eq 1 ]; then - # The application must not observe an error. - pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 - else - # The application is expected to hang in the background until - # the faulty device is repaired and 'zpool clear' is run. - pattern_write_bg ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 13 - sleep 10 - fi - fault_clear_md ${VDEV_FAULTY} - - local WRITE_ERRORS=`vdev_write_errors ${POOL_NAME} ${VDEV_FAULTY}` - local VDEV_STATUS=`vdev_status ${POOL_NAME} ${VDEV_FAULTY}` - local POOL_STATE=`zpool_state ${POOL_NAME}` - # For all configurations write errors are logged to 'zpool status', - # and EIO events are generated. However, only a redundant config - # will cause the vdev to be FAULTED and pool DEGRADED. In a non- - # redundant config the IO will hang until 'zpool clear' is run. - test ${WRITE_ERRORS} -gt 0 || fail 14 - test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 15 - - if [ ${POOL_REDUNDANT} -eq 1 ]; then - test "${VDEV_STATUS}" = "FAULTED" || fail 21 - test "${POOL_STATE}" = "DEGRADED" || fail 22 - else - BLOCKED=`ps a | grep "${ZVOL_DEVICE}" | grep -c -v "grep"` - ${ZPOOL} clear ${POOL_NAME} || fail 31 - test ${BLOCKED} -eq 1 || fail 32 - wait - fi - - # Verify the known pattern. - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 41 - pattern_remove ${TMP_PATTERN} || fail 42 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -# All write errors. -test_3() { - test_write_all tank lo-faulty-raid0 0 - test_write_all tank lo-faulty-raid10 1 - test_write_all tank lo-faulty-raidz 1 - test_write_all tank lo-faulty-raidz2 1 - test_write_all tank lo-faulty-raidz3 1 - echo -} -run_test 3 "all write errors" - -test_read_soft() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - local READ_ERRORS=0 - - if [ ${MD_PARTITIONABLE} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - # Create a pattern to be verified during a read error. - local TMP_PATTERN=`pattern_create 1M 8` || fail 11 - pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 - - # Set soft read failure for all the vdevs to ensure we hit it. - for (( i=1; i<=4; i++ )); do - fault_set_md `nth_zpool_vdev ${POOL_NAME} md $i` read-transient - done - - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 13 - pattern_remove ${TMP_PATTERN} || fail 14 - - # Clear all failure injection and sum read errors. - for (( i=1; i<=4; i++ )); do - local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md $i` - local VDEV_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` - let READ_ERRORS=${READ_ERRORS}+${VDEV_ERRORS} - fault_clear_md ${VDEV_FAULTY} - done - - # Soft errors will not be logged to 'zpool status'. - test ${READ_ERRORS} -eq 0 || fail 15 - - # Soft errors will still generate an EIO (5) event. - test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 16 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -# Soft read error. -test_4() { - test_read_soft tank lo-faulty-raid0 0 - test_read_soft tank lo-faulty-raid10 1 - test_read_soft tank lo-faulty-raidz 1 - test_read_soft tank lo-faulty-raidz2 1 - test_read_soft tank lo-faulty-raidz3 1 - echo -} -run_test 4 "soft read error" - -test_read_hard() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - local READ_ERRORS=0 - - if [ ${MD_PARTITIONABLE} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - # Create a pattern to be verified during a read error. - local TMP_PATTERN=`pattern_create 1M 8` || fail 11 - pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 - - # Set hard read failure for the fourth vdev. - local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 4` - fault_set_md ${VDEV_FAULTY} read-persistent - - # For a redundant pool there must be no IO error, for a non-redundant - # pool we expect permanent damage and an IO error during verify, unless - # we get exceptionally lucky and have just damaged redundant metadata. - if [ ${POOL_REDUNDANT} -eq 1 ]; then - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 21 - local READ_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${READ_ERRORS} -eq 0 || fail 22 - else - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} - ${ZPOOL} scrub ${POOL_NAME} || fail 32 - local READ_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${READ_ERRORS} -gt 0 || fail 33 - ${ZPOOL} status -v ${POOL_NAME} | \ - grep -A8 "Permanent errors" | \ - grep -q "${POOL_NAME}" || fail 34 - fi - pattern_remove ${TMP_PATTERN} || fail 41 - - # Clear all failure injection and sum read errors. - fault_clear_md ${VDEV_FAULTY} - - # Hard errors will generate an EIO (5) event. - test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 42 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -# Hard read error. -test_5() { - test_read_hard tank lo-faulty-raid0 0 - test_read_hard tank lo-faulty-raid10 1 - test_read_hard tank lo-faulty-raidz 1 - test_read_hard tank lo-faulty-raidz2 1 - test_read_hard tank lo-faulty-raidz3 1 - echo -} -run_test 5 "hard read error" - -# Fixable read error. -test_read_fixable() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - local READ_ERRORS=0 - - if [ ${MD_PARTITIONABLE} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - # Create a pattern to be verified during a read error. - local TMP_PATTERN=`pattern_create 1M 8` || fail 11 - pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 - - # Set hard read failure for the fourth vdev. - local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 4` - fault_set_md ${VDEV_FAULTY} read-fixable - - # For a redundant pool there must be no IO error, for a non-redundant - # pool we expect permanent damage and an IO error during verify, unless - # we get exceptionally lucky and have just damaged redundant metadata. - if [ ${POOL_REDUNDANT} -eq 1 ]; then - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 21 - local READ_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${READ_ERRORS} -eq 0 || fail 22 - else - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} - ${ZPOOL} scrub ${POOL_NAME} || fail 32 - local READ_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${READ_ERRORS} -gt 0 || fail 33 - ${ZPOOL} status -v ${POOL_NAME} | \ - grep -A8 "Permanent errors" | \ - grep -q "${POOL_NAME}" || fail 34 - fi - pattern_remove ${TMP_PATTERN} || fail 41 - - # Clear all failure injection and sum read errors. - fault_clear_md ${VDEV_FAULTY} - - # Hard errors will generate an EIO (5) event. - test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 42 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -# Read errors fixable with a write. -test_6() { - test_read_fixable tank lo-faulty-raid0 0 - test_read_fixable tank lo-faulty-raid10 1 - test_read_fixable tank lo-faulty-raidz 1 - test_read_fixable tank lo-faulty-raidz2 1 - test_read_fixable tank lo-faulty-raidz3 1 - echo -} -run_test 6 "fixable read error" - -test_cksum() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local VDEV_DAMAGE="$4" - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - - if [ ${MD_PARTITIONABLE} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - # Create a pattern to be verified. - local TMP_PATTERN=`pattern_create 1M 8` || fail 11 - pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 - - # Verify the pattern and that no vdev has cksum errors. - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 13 - for (( i=1; i<4; i++ )); do - VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md ${i}` - CKSUM_ERRORS=`vdev_cksum_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${CKSUM_ERRORS} -eq 0 || fail 14 - done - - # Corrupt the bulk of a vdev with random garbage, we damage as many - # vdevs as we have levels of redundancy. For example for a raidz3 - # configuration we can trash 3 vdevs and still expect correct data. - # This improves the odds that we read one of the damaged vdevs. - for VDEV in ${VDEV_DAMAGE}; do - VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md $VDEV` - pattern_write /dev/urandom 1M 64 /dev/${VDEV_FAULTY}p1 - done - - # Verify the pattern is still correct. For non-redundant pools - # expect failure and for redundant pools success due to resilvering. - if [ ${POOL_REDUNDANT} -eq 1 ]; then - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 16 - else - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} && fail 17 - fi - - CKSUM_ERRORS=`vdev_cksum_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${CKSUM_ERRORS} -gt 0 || fail 18 - STATUS=`vdev_status ${POOL_NAME} ${VDEV_FAULTY}` - test "${STATUS}" = "ONLINE" || fail 19 - - # The checksum errors must be logged as an event. - local CKSUM_ERRORS=`zpool_event "zfs.checksum" "zio_err"` - test ${CKSUM_ERRORS} = "0x34" || test ${CKSUM_ERRORS} = "0x0" || fail 20 - - # Verify permant errors for non-redundant pools, and for redundant - # pools trigger a scrub and check that all checksums have been fixed. - if [ ${POOL_REDUNDANT} -eq 1 ]; then - # Scrub the checksum errors and clear the faults. - ${ZPOOL} scrub ${POOL_NAME} || fail 21 - sleep 3 - ${ZPOOL} clear ${POOL_NAME} || fail 22 - - # Re-verify the pattern for fixed checksums. - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 23 - CKSUM_ERRORS=`vdev_cksum_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${CKSUM_ERRORS} -eq 0 || fail 24 - - # Re-verify the entire pool for fixed checksums. - ${ZPOOL} scrub ${POOL_NAME} || fail 25 - CKSUM_ERRORS=`vdev_cksum_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${CKSUM_ERRORS} -eq 0 || fail 26 - else - ${ZPOOL} status -v ${POOL_NAME} | \ - grep -A8 "Permanent errors" | \ - grep -q "${POOL_NAME}/${ZVOL_NAME}" || fail 31 - ${ZPOOL} clear ${POOL_NAME} || fail 32 - fi - pattern_remove ${TMP_PATTERN} || fail 41 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -# Silent data corruption -test_7() { - test_cksum tank lo-faulty-raid0 0 "1" - test_cksum tank lo-faulty-raid10 1 "1 3" - test_cksum tank lo-faulty-raidz 1 "4" - test_cksum tank lo-faulty-raidz2 1 "3 4" - test_cksum tank lo-faulty-raidz3 1 "2 3 4" - echo -} -run_test 7 "silent data corruption" - -# Soft write timeout at the scsi device layer. -test_write_timeout_soft() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local POOL_NTH=$4 - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - - if [ ${SCSI_DEBUG} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - # Set timeout(0x4) for every nth command. - fault_set_sd 4 ${POOL_NTH} - - # The application must not observe an error. - local TMP_PATTERN=`pattern_create 1M 8` || fail 11 - pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 - fault_clear_sd - - # Intermittent write timeouts even with FAILFAST set may not cause - # an EIO (5) event. This is because how FAILFAST is handled depends - # a log on the low level driver and the exact nature of the failure. - # We will however see a 'zfs.delay' event logged due to the timeout. - VDEV_DELAY=`zpool_event "zfs.delay" "zio_delay"` - test `printf "%d" ${VDEV_DELAY}` -ge 30000 || fail 13 - - # Verify the known pattern. - pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 14 - pattern_remove ${TMP_PATTERN} || fail 15 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -test_8() { - test_write_timeout_soft tank scsi_debug-raid0 0 50 - test_write_timeout_soft tank scsi_debug-raid10 1 100 - test_write_timeout_soft tank scsi_debug-raidz 1 75 - test_write_timeout_soft tank scsi_debug-raidz2 1 150 - test_write_timeout_soft tank scsi_debug-raidz3 1 300 - echo -} -run_test 8 "soft write timeout" - -# Persistent write timeout at the scsi device layer. -test_write_timeout_hard() { - local POOL_NAME=$1 - local POOL_CONFIG=$2 - local POOL_REDUNDANT=$3 - local POOL_NTH=$4 - local ZVOL_NAME="zvol" - local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" - local RESCAN=1 - - if [ ${SCSI_DEBUG} -eq 0 ]; then - skip_nonewline - return - fi - - local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` - test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - - local TMP_PATTERN1=`pattern_create 1M 8` - local TMP_PATTERN2=`pattern_create 1M 8` - local TMP_PATTERN3=`pattern_create 1M 8` - - # Create three partitions each one gets a unique pattern. The first - # pattern is written before the failure, the second pattern during - # the failure, and the third pattern while the vdev is degraded. - # All three patterns are verified while the vdev is degraded and - # then again once it is brought back online. - ${PARTED} -s ${ZVOL_DEVICE} mklabel gpt || fail 11 - ${PARTED} -s ${ZVOL_DEVICE} mkpart primary 1M 16M || fail 12 - ${PARTED} -s ${ZVOL_DEVICE} mkpart primary 16M 32M || fail 13 - ${PARTED} -s ${ZVOL_DEVICE} mkpart primary 32M 48M || fail 14 - - wait_udev ${ZVOL_DEVICE}1 30 - wait_udev ${ZVOL_DEVICE}2 30 - wait_udev ${ZVOL_DEVICE}3 30 - - # Before the failure. - pattern_write ${TMP_PATTERN1} 1M 8 ${ZVOL_DEVICE}1 || fail 15 - - # Get the faulty vdev name. - local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} sd 1` - - # Set timeout(0x4) for every nth command. - fault_set_sd 4 ${POOL_NTH} - - # During the failure. - pattern_write ${TMP_PATTERN2} 1M 8 ${ZVOL_DEVICE}2 || fail 21 - - # Expect write errors to be logged to 'zpool status' - local WRITE_ERRORS=`vdev_write_errors ${POOL_NAME} ${VDEV_FAULTY}` - test ${WRITE_ERRORS} -gt 0 || fail 22 - - local VDEV_STATUS=`vdev_status ${POOL_NAME} ${VDEV_FAULTY}` - test "${VDEV_STATUS}" = "UNAVAIL" || fail 23 - - # Clear the error and remove it from /dev/. - fault_clear_sd - rm -f /dev/${VDEV_FAULTY}[0-9] - - # Verify the first two patterns and write out the third. - pattern_write ${TMP_PATTERN3} 1M 8 ${ZVOL_DEVICE}3 || fail 31 - pattern_verify ${TMP_PATTERN1} 1M 8 ${ZVOL_DEVICE}1 || fail 32 - pattern_verify ${TMP_PATTERN2} 1M 8 ${ZVOL_DEVICE}2 || fail 33 - pattern_verify ${TMP_PATTERN3} 1M 8 ${ZVOL_DEVICE}3 || fail 34 - - # Bring the device back online by rescanning for it. It must appear - # in lsscsi and be available to dd before allowing ZFS to bring it - # online. This is not required but provides additional sanity. - while [ ${RESCAN} -eq 1 ]; do - scsi_rescan - wait_udev /dev/${VDEV_FAULTY} 30 - - if [ `${LSSCSI} | grep -c "/dev/${VDEV_FAULTY}"` -eq 0 ]; then - continue - fi - - dd if=/dev/${VDEV_FAULTY} of=/dev/null bs=8M count=1 &>/dev/null - if [ $? -ne 0 ]; then - continue - fi - - RESCAN=0 - done - - # Bring the device back online. We expect it to be automatically - # resilvered without error and we should see minimally the zfs.io, - # zfs.statechange (VDEV_STATE_HEALTHY (0x7)), and zfs.resilver.* - # events posted. - ${ZPOOL} online ${POOL_NAME} ${VDEV_FAULTY} || fail 51 - sleep 3 - test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 52 - test `zpool_event "zfs.statechange" "vdev_state"` = "0x7" || fail 53 - test `zpool_event "zfs.resilver.start" "ena"` != "" || fail 54 - test `zpool_event "zfs.resilver.finish" "ena"` != "" || fail 55 - test `zpool_scan_errors ${POOL_NAME}` -eq 0 || fail 56 - - local VDEV_STATUS=`vdev_status ${POOL_NAME} ${VDEV_FAULTY}` - test "${VDEV_STATUS}" = "ONLINE" || fail 57 - - # Verify the known pattern. - pattern_verify ${TMP_PATTERN1} 1M 8 ${ZVOL_DEVICE}1 || fail 61 - pattern_verify ${TMP_PATTERN2} 1M 8 ${ZVOL_DEVICE}2 || fail 62 - pattern_verify ${TMP_PATTERN3} 1M 8 ${ZVOL_DEVICE}3 || fail 63 - pattern_remove ${TMP_PATTERN1} || fail 64 - pattern_remove ${TMP_PATTERN2} || fail 65 - pattern_remove ${TMP_PATTERN3} || fail 66 - - test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} - pass_nonewline -} - -test_9() { - skip_nonewline # Skip non-redundant config - test_write_timeout_hard tank scsi_debug-raid10 1 -50 - test_write_timeout_hard tank scsi_debug-raidz 1 -50 - test_write_timeout_hard tank scsi_debug-raidz2 1 -50 - test_write_timeout_hard tank scsi_debug-raidz3 1 -50 - echo -} -run_test 9 "hard write timeout" - -exit 0 diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh new file mode 100755 index 000000000..0c8a56c27 --- /dev/null +++ b/scripts/zfs-tests.sh @@ -0,0 +1,343 @@ +#!/bin/bash +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License, Version 1.0 only +# (the "License"). You may not use this file except in compliance +# with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +basedir="$(dirname $0)" + +SCRIPT_COMMON=common.sh +if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then +. "${basedir}/${SCRIPT_COMMON}" +else +echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 +fi + +. $STF_SUITE/include/default.cfg + +PROG=zfs-tests.sh +SUDO=/usr/bin/sudo +SETENFORCE=/usr/sbin/setenforce +VERBOSE= +QUIET= +CLEANUP=1 +CLEANUPALL=0 +LOOPBACK=1 +FILESIZE="2G" +RUNFILE=${RUNFILE:-"linux.run"} +FILEDIR=${FILEDIR:-/var/tmp} +DISKS=${DISKS:-""} + +# +# Attempt to remove loopback devices and files which where created earlier +# by this script to run the test framework. The '-k' option may be passed +# to the script to suppress cleanup for debugging purposes. +# +cleanup() { + if [ $CLEANUP -eq 0 ]; then + return 0 + fi + + if [ $LOOPBACK -eq 1 ]; then + for TEST_LOOPBACK in ${LOOPBACKS}; do + LOOP_DEV=$(basename $TEST_LOOPBACK) + DM_DEV=$(${SUDO} ${DMSETUP} ls 2>/dev/null | \ + grep ${LOOP_DEV} | cut -f1) + + if [ -n "$DM_DEV" ]; then + ${SUDO} ${DMSETUP} remove ${DM_DEV} || + echo "Failed to remove: ${DM_DEV}" + fi + + if [ -n "${TEST_LOOPBACK}" ]; then + ${SUDO} ${LOSETUP} -d ${TEST_LOOPBACK} || + echo "Failed to remove: ${TEST_LOOPBACK}" + fi + done + fi + + for TEST_FILE in ${FILES}; do + rm -f ${TEST_FILE} &>/dev/null + done +} +trap cleanup EXIT + +# +# Attempt to remove all testpools (testpool.XXX), unopened dm devices, +# loopback devices, and files. This is a useful way to cleanup a previous +# test run failure which has left the system in an unknown state. This can +# be dangerous and should only be used in a dedicated test environment. +# +cleanup_all() { + local TEST_POOLS=$(${SUDO} ${ZPOOL} list -H -o name | grep testpool) + local TEST_LOOPBACKS=$(${SUDO} ${LOSETUP} -a|grep file-vdev|cut -f1 -d:) + local TEST_FILES=$(ls /var/tmp/file-vdev* 2>/dev/null) + + msg + msg "--- Cleanup ---" + msg "Removing pool(s): $(echo ${TEST_POOLS} | tr '\n' ' ')" + for TEST_POOL in $TEST_POOLS; do + ${SUDO} ${ZPOOL} destroy ${TEST_POOL} + done + + msg "Removing dm(s): $(${SUDO} ${DMSETUP} ls | + grep loop | tr '\n' ' ')" + ${SUDO} ${DMSETUP} remove_all + + msg "Removing loopback(s): $(echo ${TEST_LOOPBACKS} | tr '\n' ' ')" + for TEST_LOOPBACK in $TEST_LOOPBACKS; do + ${SUDO} ${LOSETUP} -d ${TEST_LOOPBACK} + done + + msg "Removing files(s): $(echo ${TEST_FILES} | tr '\n' ' ')" + for TEST_FILE in $TEST_FILES; do + ${SUDO} rm -f ${TEST_FILE} + done +} + +# +# Log a failure message, cleanup, and return an error. +# +fail() { + echo -e "${PROG}: $1" >&2 + cleanup + exit 1 +} + +# +# Takes a name as the only arguments and looks for the following variations +# on that name. If one is found it is returned. +# +# $RUNFILEDIR/ +# $RUNFILEDIR/.run +# +# .run +# +find_runfile() { + local NAME=$1 + local RESULT="" + + if [ -f "$RUNFILEDIR/$NAME" ]; then + RESULT="$RUNFILEDIR/$NAME" + elif [ -f "$RUNFILEDIR/$NAME.run" ]; then + RESULT="$RUNFILEDIR/$NAME.run" + elif [ -f "$NAME" ]; then + RESULT="$NAME" + elif [ -f "$NAME.run" ]; then + RESULT="$NAME.run" + fi + + echo "$RESULT" +} + +# +# Output a useful usage message. +# +usage() { +cat << EOF +USAGE: +$0 [hvqxkf] [-s SIZE] [-r RUNFILE] + +DESCRIPTION: + ZFS Test Suite launch script + +OPTIONS: + -h Show this message + -v Verbose zfs-tests.sh output + -q Quiet test-runner output + -x Remove all testpools, dm, lo, and files (unsafe) + -k Disable cleanup after test failure + -f Use files only, disables block device tests + -d DIR Use DIR for files and loopback devices + -s SIZE Use vdevs of SIZE (default: 4G) + -r RUNFILE Run tests in RUNFILE (default: linux.run) + +EXAMPLES: +# Run the default (linux) suite of tests and output the configuration used. +$0 -v + +# Run a smaller suite of tests designed to run more quickly. +$0 -r linux-fast + +# Cleanup a previous run of the test suite prior to testing, run the +# default (linux) suite of tests and perform no cleanup on exit. +$0 -c + +EOF +} + +while getopts 'hvqxkfd:s:r:?' OPTION; do + case $OPTION in + h) + usage + exit 1 + ;; + v) + VERBOSE=1 + ;; + q) + QUIET="-q" + ;; + x) + CLEANUPALL=1 + ;; + k) + CLEANUP=0 + ;; + f) + LOOPBACK=0 + ;; + d) + FILEDIR="$OPTARG" + ;; + s) + FILESIZE="$OPTARG" + ;; + r) + RUNFILE="$OPTARG" + ;; + ?) + usage + exit + ;; + esac +done + +shift $((OPTIND-1)) + +FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"} +LOOPBACKS=${LOOPBACKS:-""} + +# +# Attempt to locate the runfile describing the test workload. +# +if [ -n "$RUNFILE" ]; then + SAVED_RUNFILE="$RUNFILE" + RUNFILE=$(find_runfile "$RUNFILE") + [ -z "$RUNFILE" ] && fail "Cannot find runfile: $SAVED_RUNFILE" +fi + +if [ ! -r "$RUNFILE" ]; then + fail "Cannot read runfile: $RUNFILE" +fi + +# +# This script should not be run as root. Instead the test user, which may +# be a normal user account, needs to be configured such that it can +# run commands via sudo passwordlessly. +# +if [ $(id -u) = "0" ]; then + fail "This script must not be run as root." +fi + +if [ $(sudo whoami) != "root" ]; then + fail "Passwordless sudo access required." +fi + +# +# Verify the ZFS module stack if loaded. +# +${SUDO} ${ZFS_SH} &>/dev/null + +# +# Attempt to cleanup all previous state for a new test run. +# +if [ $CLEANUPALL -ne 0 ]; then + cleanup_all +fi + +# +# By default preserve any existing pools +# +if [ -z "${KEEP}" ]; then + KEEP=$(${SUDO} ${ZPOOL} list -H -o name) + if [ -z "${KEEP}" ]; then + KEEP="rpool" + fi +fi + +msg +msg "--- Configuration ---" +msg "Runfile: $RUNFILE" +msg "STF_TOOLS: $STF_TOOLS" +msg "STF_SUITE: $STF_SUITE" + +# +# No DISKS have been provided so a basic file or loopback based devices +# must be created for the test suite to use. +# +if [ -z "${DISKS}" ]; then + # + # Create sparse files for the test suite. These may be used + # directory or have loopback devices layered on them. + # + for TEST_FILE in ${FILES}; do + [ -f "$TEST_FILE" ] && fail "Failed file exists: ${TEST_FILE}" + truncate -s ${FILESIZE} ${TEST_FILE} || + fail "Failed creating: ${TEST_FILE} ($?)" + DISKS="$DISKS$TEST_FILE " + done + + # + # If requested setup loopback devices backed by the sparse files. + # + if [ $LOOPBACK -eq 1 ]; then + DISKS="" + check_loop_utils + + for TEST_FILE in ${FILES}; do + TEST_LOOPBACK=$(${SUDO} ${LOSETUP} -f) + ${SUDO} ${LOSETUP} ${TEST_LOOPBACK} ${TEST_FILE} || + fail "Failed: ${TEST_FILE} -> ${TEST_LOOPBACK}" + LOOPBACKS="${LOOPBACKS}${TEST_LOOPBACK} " + DISKS="$DISKS$(basename $TEST_LOOPBACK) " + done + fi +fi + +NUM_DISKS=$(echo ${DISKS} | $AWK '{print NF}') +[ $NUM_DISKS -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)" + +# +# Disable SELinux until the ZFS Test Suite has been updated accordingly. +# +if [ -x ${SETENFORCE} ]; then + ${SUDO} ${SETENFORCE} permissive &>/dev/null +fi + +msg "FILEDIR: $FILEDIR" +msg "FILES: $FILES" +msg "LOOPBACKS: $LOOPBACKS" +msg "DISKS: $DISKS" +msg "NUM_DISKS: $NUM_DISKS" +msg "FILESIZE: $FILESIZE" +msg "Keep pool(s): $KEEP" +msg "" + +export STF_TOOLS +export STF_SUITE +export DISKS +export KEEP + +msg "${TEST_RUNNER} ${QUIET} -c ${RUNFILE} -i ${STF_SUITE}" +${TEST_RUNNER} ${QUIET} -c ${RUNFILE} -i ${STF_SUITE} +RESULT=$? +echo + +exit ${RESULT} diff --git a/tests/Makefile.am b/tests/Makefile.am new file mode 100644 index 000000000..28d6e95c3 --- /dev/null +++ b/tests/Makefile.am @@ -0,0 +1 @@ +SUBDIRS = runfiles test-runner zfs-tests diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..18bdd053a --- /dev/null +++ b/tests/README.md @@ -0,0 +1,133 @@ +# ZFS Test Suite README + +1) Building and installing the ZFS Test Suite + +The ZFS Test Suite runs under the test-runner framework. This framework +is built along side the standard ZFS utilities and is included as part of +zfs-test package. The zfs-test package can be built from source as follows: + + $ ./configure + $ make pkg-utils + +The resulting packages can be installed using the rpm or dpkg command as +appropriate for your distributions. Alternately, if you have installed +ZFS from a distributions repository (not from source) the zfs-test package +may be provided for your distribution. + + - Installed from source + $ rpm -ivh ./zfs-test*.rpm, or + $ dpkg -i ./zfs-test*.deb, + + - Installed from package repository + $ yum install zfs-test + $ apt-get install zfs-test + +2) Running the ZFS Test Suite + +The pre-requisites for running the ZFS Test Suite are: + + * Three scratch disks + * Specify the disks you wish to use in the $DISKS variable, as a + space delimited list like this: DISKS='vdb vdc vdd'. By default + the zfs-tests.sh sciprt will construct three loopback devices to + be used for testing: DISKS='loop0 loop1 loop2'. + * A non-root user with a full set of basic privileges and the ability + to sudo(8) to root without a password to run the test. + * Specify any pools you wish to preserve as a space delimited list in + the $KEEP variable. All pools detected at the start of testing are + added automatically. + * The ZFS Test Suite will add users and groups to test machine to + verify functionality. Therefore it is strongly advised that a + dedicated test machine, which can be a VM, be used for testing. + +Once the pre-requisites are satisfied simply run the zfs-tests.sh script: + + $ /usr/share/zfs/zfs-tests.sh + +Alternately, the zfs-tests.sh script can be run from the source tree to allow +developers to rapidly validate their work. In this mode the ZFS utilities and +modules from the source tree will be used (rather than those installed on the +system). In order to avoid certain types of failures you will need to ensure +the ZFS udev rules are installed. This can be done manually or by ensuring +some version of ZFS is installed on the system. + + $ ./scripts/zfs-tests.sh + +The following zfs-tests.sh options are supported: + + -v Verbose zfs-tests.sh output When specified additional + information describing the test environment will be logged + prior to invoking test-runner. This includes the runfile + being used, the DISKS targeted, pools to keep, etc. + + -q Quiet test-runner output. When specified it is passed to + test-runner(1) which causes output to be written to the + console only for tests that do not pass and the results + summary. + + -x Remove all testpools, dm, lo, and files (unsafe). When + specified the script will attempt to remove any leftover + configuration from a previous test run. This includes + destroying any pools named testpool, unused DM devices, + and loopback devices backed by file-vdevs. This operation + can be DANGEROUS because it is possible that the script + will mistakenly remove a resource not related to the testing. + + -k Disable cleanup after test failure. When specified the + zfs-tests.sh script will not perform any additional cleanup + when test-runner exists. This is useful when the results of + a specific test need to be preserved for further analysis. + + -f Use sparse files directly instread of loopback devices for + the testing. When running in this mode certain tests will + be skipped which depend on real block devices. + + -d DIR Create sparse files for vdevs in the DIR directory. By + default these files are created under /var/tmp/. + + -s SIZE Use vdevs of SIZE (default: 2G) + + -r RUNFILE Run tests in RUNFILE (default: linux.run) + + +The ZFS Test Suite allows the user to specify a subset of the tests via a +runfile. The format of the runfile is explained in test-runner(1), and +the files that zfs-tests.sh uses are available for reference under +/usr/share/zfs/runfiles. To specify a custom runfile, use the -r option: + + $ /usr/share/zfs/zfs-tests.sh -r my_tests.run + +3) Test results + +While the ZFS Test Suite is running, one informational line is printed at the +end of each test, and a results summary is printed at the end of the run. The +results summary includes the location of the complete logs, which is logged in +the form /var/tmp/test_results/[ISO 8601 date]. A normal test run launched +with the `zfs-tests.sh` wrapper script will look something like this: + +$ /usr/share/zfs/zfs-tests.sh -v -d /mnt + +--- Configuration --- +Runfile: /usr/share/zfs/runfiles/linux.run +STF_TOOLS: /usr/share/zfs/test-runner +STF_SUITE: /usr/share/zfs/zfs-tests +FILEDIR: /mnt +FILES: /mnt/file-vdev0 /mnt/file-vdev1 /mnt/file-vdev2 +LOOPBACKS: /dev/loop0 /dev/loop1 /dev/loop2 +DISKS: loop0 loop1 loop2 +NUM_DISKS: 3 +FILESIZE: 2G +Keep pool(s): rpool + +/usr/share/zfs/test-runner/bin/test-runner.py -c \ + /usr/share/zfs/runfiles/linux.run -i /usr/share/zfs/zfs-tests +Test: .../tests/functional/acl/posix/setup (run as root) [00:00] [PASS] +...470 additional tests... +Test: .../tests/functional/zvol/zvol_cli/cleanup (run as root) [00:00] [PASS] + +Results Summary +PASS 472 + +Running Time: 00:45:09 +Percent passed: 100.0% +Log directory: /var/tmp/test_results/20160316T181651 diff --git a/tests/runfiles/Makefile.am b/tests/runfiles/Makefile.am new file mode 100644 index 000000000..cddfb2ed6 --- /dev/null +++ b/tests/runfiles/Makefile.am @@ -0,0 +1,2 @@ +pkgdatadir = $(datadir)/@PACKAGE@/runfiles +dist_pkgdata_SCRIPTS = *.run diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run new file mode 100644 index 000000000..56e80b8e9 --- /dev/null +++ b/tests/runfiles/linux.run @@ -0,0 +1,647 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +[DEFAULT] +pre = setup +quiet = False +pre_user = root +user = root +timeout = 600 +post_user = root +post = cleanup +outputdir = /var/tmp/test_results + +# DISABLED: +# posix_001_pos - needs investigation +[tests/functional/acl/posix] +tests = ['posix_002_pos'] + +[tests/functional/atime] +tests = ['atime_001_pos', 'atime_002_neg'] + +# DISABLED: +# bootfs_006_pos - needs investigation +# bootfs_008_neg - needs investigation +[tests/functional/bootfs] +tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos', + 'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_007_neg'] + +# DISABLED: +# cache_001_pos - needs investigation +# cache_010_neg - needs investigation +[tests/functional/cache] +tests = ['cache_002_pos', 'cache_003_pos', 'cache_004_neg', + 'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg', + 'cache_009_pos', 'cache_011_pos'] + +# DISABLED: needs investigation +#[tests/functional/cachefile] +#tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos', +# 'cachefile_004_pos'] +#pre = +#post = + +# DISABLED: needs investigation +# 'sensitive_none_lookup', 'sensitive_none_delete', +# 'sensitive_formd_lookup', 'sensitive_formd_delete', +# 'insensitive_none_lookup', 'insensitive_none_delete', +# 'insensitive_formd_lookup', 'insensitive_formd_delete', +# 'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete', +# 'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete'] +[tests/functional/casenorm] +tests = ['case_all_values', 'norm_all_values'] + +[tests/functional/clean_mirror] +tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos', + 'clean_mirror_003_pos', 'clean_mirror_004_pos'] + +[tests/functional/cli_root/zdb] +tests = ['zdb_001_neg'] +pre = +post = + +[tests/functional/cli_root/zfs] +tests = ['zfs_001_neg', 'zfs_002_pos', 'zfs_003_neg'] + +# DISABLED: +# zfs_clone_005_pos - busy unmount +# zfs_clone_010_pos - needs investigation +[tests/functional/cli_root/zfs_clone] +tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos', + 'zfs_clone_004_pos', 'zfs_clone_006_pos', + 'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg'] + +# DISABLED: +# zfs_copies_002_pos - needs investigation +# zfs_copies_003_pos - zpool on zvol +# zfs_copies_005_neg - nested pools +[tests/functional/cli_root/zfs_copies] +tests = ['zfs_copies_001_pos', 'zfs_copies_004_neg', 'zfs_copies_006_pos'] + +# DISABLED: +# zfs_create_006_pos - needs investigation +# zfs_create_003_pos - needs investigation +[tests/functional/cli_root/zfs_create] +tests = ['zfs_create_001_pos', 'zfs_create_002_pos', + 'zfs_create_004_pos', 'zfs_create_005_pos', + 'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg', + 'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos', + 'zfs_create_013_pos'] + +# DISABLED: +# zfs_destroy_001_pos - busy mountpoint behavior +# zfs_destroy_004_pos - busy mountpoint behavior +# zfs_destroy_005_neg - busy mountpoint behavior +# zfs_destroy_008_pos - busy mountpoint behavior +# zfs_destroy_009_pos - busy mountpoint behavior +# zfs_destroy_010_pos - busy mountpoint behavior +# zfs_destroy_011_pos - busy mountpoint behavior +# zfs_destroy_012_pos - busy mountpoint behavior +# zfs_destroy_013_neg - busy mountpoint behavior +[tests/functional/cli_root/zfs_destroy] +tests = ['zfs_destroy_002_pos', 'zfs_destroy_003_pos', 'zfs_destroy_006_neg', + 'zfs_destroy_007_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos', + 'zfs_destroy_016_pos'] + +# DISABLED: +# zfs_get_004_pos - nested pools +# zfs_get_006_neg - needs investigation +[tests/functional/cli_root/zfs_get] +tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos', + 'zfs_get_005_neg', 'zfs_get_007_neg', 'zfs_get_008_pos', + 'zfs_get_009_pos', 'zfs_get_010_neg'] + +[tests/functional/cli_root/zfs_inherit] +tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos'] + +# DISABLED: +# zfs_mount_005_pos - needs investigation +# zfs_mount_006_pos - needs investigation +# zfs_mount_007_pos - needs investigation +# zfs_mount_009_neg - needs investigation +# zfs_mount_010_neg - needs investigation +# zfs_mount_all_001_pos - needs investigation +[tests/functional/cli_root/zfs_mount] +tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos', + 'zfs_mount_004_pos', 'zfs_mount_008_pos', + 'zfs_mount_011_neg'] + +[tests/functional/cli_root/zfs_promote] +tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos', + 'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg', + 'zfs_promote_007_neg', 'zfs_promote_008_pos'] + +# DISABLED: +# zfs_written_property_001_pos - sync(1) does not force txg under Linux +[tests/functional/cli_root/zfs_property] +tests = [] + +# DISABLED: +# zfs_receive_003_pos - needs investigation +# zfs_receive_010_pos - needs investigation +# zfs_receive_011_pos - needs investigation +# zfs_receive_012_pos - needs investigation +[tests/functional/cli_root/zfs_receive] +tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_005_neg', + 'zfs_receive_006_pos', 'zfs_receive_007_neg', 'zfs_receive_008_pos', + 'zfs_receive_009_neg'] + +# DISABLED: +# zfs_rename_002_pos - needs investigation +# zfs_rename_005_neg - nested pools +# zfs_rename_006_pos - needs investigation +# zfs_rename_007_pos - needs investigation +[tests/functional/cli_root/zfs_rename] +tests = ['zfs_rename_001_pos', 'zfs_rename_003_pos', + 'zfs_rename_004_neg', 'zfs_rename_008_pos', 'zfs_rename_009_neg', + 'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg', + 'zfs_rename_013_pos'] + +[tests/functional/cli_root/zfs_reservation] +tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos'] + +# DISABLED: +# zfs_rollback_001_pos - busy mountpoint behavior +# zfs_rollback_002_pos - busy mountpoint behavior +[tests/functional/cli_root/zfs_rollback] +tests = ['zfs_rollback_003_neg', 'zfs_rollback_004_neg'] + +# DISABLED: +# zfs_send_007_pos - needs investigation +[tests/functional/cli_root/zfs_send] +tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos', + 'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos'] + +# DISABLED: +# mountpoint_003_pos - needs investigation +# ro_props_001_pos - needs investigation +# onoffs_001_pos - needs investigation +# property_alias_001_pos - needs investigation +# readonly_001_pos - needs investigation +# user_property_002_pos - needs investigation +[tests/functional/cli_root/zfs_set] +tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos', + 'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos', + 'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos', + 'mountpoint_002_pos', 'reservation_001_neg', + 'share_mount_001_neg', 'snapdir_001_pos', + 'user_property_001_pos', 'user_property_003_neg', + 'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg', + 'zfs_set_002_neg', 'zfs_set_003_neg'] + +# DISABLED: Tests need to be updated for Linux share behavior +#[tests/functional/cli_root/zfs_share] +#tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos', +# 'zfs_share_004_pos', 'zfs_share_005_pos', 'zfs_share_006_pos', +# 'zfs_share_007_neg', 'zfs_share_008_neg', 'zfs_share_009_neg', +# 'zfs_share_010_neg', 'zfs_share_011_pos'] + +# DISABLED: +# zfs_snapshot_008_neg - nested pools +[tests/functional/cli_root/zfs_snapshot] +tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg', + 'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg', + 'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_009_pos'] + +# DISABLED: +# zfs_unmount_005_pos - needs investigation +# zfs_unmount_009_pos - needs investigation +# zfs_unmount_all_001_pos - needs investigation +[tests/functional/cli_root/zfs_unmount] +tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos', + 'zfs_unmount_004_pos', 'zfs_unmount_006_pos', + 'zfs_unmount_007_neg', 'zfs_unmount_008_neg'] + +# DISABLED: Tests need to be updated for Linux unshare behavior +#[tests/functional/cli_root/zfs_unshare] +#tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos', +# 'zfs_unshare_004_neg', 'zfs_unshare_005_neg'] + +[tests/functional/cli_root/zfs_upgrade] +tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos', + 'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg', + 'zfs_upgrade_007_neg'] + +[tests/functional/cli_root/zpool] +tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos'] + +# DISABLED: +# zpool_add_005_pos - no 'dumpadm' command. +# zpool_add_006_pos - nested pools +[tests/functional/cli_root/zpool_add] +tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos', + 'zpool_add_004_pos', + 'zpool_add_007_neg', 'zpool_add_008_neg', 'zpool_add_009_neg'] + +[tests/functional/cli_root/zpool_attach] +tests = ['zpool_attach_001_neg'] + +[tests/functional/cli_root/zpool_clear] +tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg'] + +# DISABLED: +# zpool_create_001_pos - needs investigation +# zpool_create_002_pos - needs investigation +# zpool_create_004_pos - needs investigation +# zpool_create_006_pos - nested pools +# zpool_create_008_pos - uses VTOC labels (?) and 'overlapping slices' +# zpool_create_011_neg - tries to access /etc/vfstab etc +# zpool_create_012_neg - swap devices +# zpool_create_014_neg - swap devices +# zpool_create_015_neg - swap devices +# zpool_create_016_pos - no dumadm command. +# zpool_create_020_pos - needs investigation +[tests/functional/cli_root/zpool_create] +tests = [ + 'zpool_create_003_pos', 'zpool_create_005_pos', 'zpool_create_007_neg', + 'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_017_neg', + 'zpool_create_018_pos', 'zpool_create_019_pos', + 'zpool_create_021_pos', 'zpool_create_022_pos', 'zpool_create_023_neg', + 'zpool_create_features_001_pos', 'zpool_create_features_002_pos', + 'zpool_create_features_003_pos', 'zpool_create_features_004_neg'] + +# DISABLED: +# zpool_destroy_001_pos - failure should be investigated +# zpool_destroy_002_pos - update for Linux fource unmount behavior +[tests/functional/cli_root/zpool_destroy] +tests = [ + 'zpool_destroy_003_neg'] +pre = +post = + +[tests/functional/cli_root/zpool_detach] +tests = ['zpool_detach_001_neg'] + +# DISABLED: Requires full FMA support in ZED +#[tests/functional/cli_root/zpool_expand] +#tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos', +# 'zpool_expand_003_neg'] + +# DISABLED: +# zpool_export_004_pos - nested pools +[tests/functional/cli_root/zpool_export] +tests = ['zpool_export_001_pos', 'zpool_export_002_pos', + 'zpool_export_003_neg'] + +[tests/functional/cli_root/zpool_get] +tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos', + 'zpool_get_004_neg'] + +[tests/functional/cli_root/zpool_history] +tests = ['zpool_history_001_neg', 'zpool_history_002_pos'] + +# DISABLED: +# zpool_import_012_pos - sharenfs issue +# zpool_import_all_001_pos - partition issue +# zpool_import_features_001_pos - zhack issue +# zpool_import_features_002_neg - zhack issue +# zpool_import_features_003_pos - zhack issue +# zpool_import_missing_001_pos - zhack_issue +# zpool_import_missing_002_pos - zhack_issue +# zpool_import_missing_003_pos - zhack_issue +# zpool_import_rename_001_pos - hack issue +[tests/functional/cli_root/zpool_import] +tests = ['zpool_import_001_pos', 'zpool_import_002_pos', + 'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos', + 'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos', + 'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg', + 'zpool_import_013_neg'] + +[tests/functional/cli_root/zpool_offline] +tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg'] + +[tests/functional/cli_root/zpool_online] +tests = ['zpool_online_001_pos', 'zpool_online_002_neg'] + +# DISABLED: +# zpool_remove_003_pos - needs investigation +[tests/functional/cli_root/zpool_remove] +tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos'] + +[tests/functional/cli_root/zpool_replace] +tests = ['zpool_replace_001_neg'] + +# DISABLED: +# zpool_scrub_004_pos - needs investigation +# zpool_scrub_005_pos - needs investigation +[tests/functional/cli_root/zpool_scrub] +tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos'] + +[tests/functional/cli_root/zpool_set] +tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg'] +pre = +post = + +[tests/functional/cli_root/zpool_status] +tests = ['zpool_status_001_pos', 'zpool_status_002_pos'] + +# DISABLED: ENOSPC failure +#[tests/functional/cli_root/zpool_upgrade] +#tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos', +# 'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos', 'zpool_upgrade_005_neg', +# 'zpool_upgrade_006_neg', 'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos', +# 'zpool_upgrade_009_neg'] + +# DISABLED: nested pools +#[tests/functional/cli_user/misc] +#tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg', +# 'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg', +# 'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg', +# 'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg', +# 'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg', +# 'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg', +# 'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg', +# 'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg', +# 'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg', +# 'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg', +# 'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg', +# 'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg', +# 'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg', +# 'zpool_status_001_neg', 'zpool_upgrade_001_neg'] +#user = zfs-tests + +[tests/functional/cli_user/zfs_list] +tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos', + 'zfs_list_004_neg', 'zfs_list_007_pos', 'zfs_list_008_neg'] + +[tests/functional/cli_user/zpool_iostat] +tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos', + 'zpool_iostat_003_neg'] + +[tests/functional/cli_user/zpool_list] +tests = ['zpool_list_001_pos', 'zpool_list_002_neg'] + +[tests/functional/compression] +tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos', + 'compress_004_pos'] + +[tests/functional/ctime] +tests = ['ctime_001_pos' ] + +# DISABLED: Linux does not yet support delegations. +#[tests/functional/delegate] +#tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', +# 'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos', +# 'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg', +# 'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg', +# 'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos', +# 'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos', +# 'zfs_unallow_007_neg', 'zfs_unallow_008_neg'] + +# DISABLED: +# devices_001_pos - needs investigation +# devices_002_neg - needs investigation +[tests/functional/devices] +tests = ['devices_003_pos'] + +# DISABLED: +# exec_002_neg - needs investigation +[tests/functional/exec] +tests = ['exec_001_pos'] + +[tests/functional/features/async_destroy] +tests = ['async_destroy_001_pos'] + +# DISABLED: needs investigation +#[tests/functional/grow_pool] +#tests = ['grow_pool_001_pos'] +#pre = +#post = + +# DISABLED: needs investigation +#[tests/functional/grow_replicas] +#tests = ['grow_replicas_001_pos'] +#pre = +#post = + +# DISABLED: +# history_001_pos - export commands missing from history +# history_003_pos - nested pool +# history_006_neg - needs investigation +# history_007_pos - needs investigation +# history_008_pos - needs investigation +# history_010_pos - needs investigation +[tests/functional/history] +tests = ['history_002_pos', 'history_004_pos', 'history_005_neg', + 'history_009_pos'] + +[tests/functional/inheritance] +tests = ['inherit_001_pos'] +pre = + +# DISABLED: +# inuse_001_pos, inuse_007_pos - no dumpadm command +# inuse_005_pos - partition issue +# inuse_006_pos - partition issue +# inuse_008_pos - partition issue +# inuse_009_pos - partition issue +[tests/functional/inuse] +tests = ['inuse_004_pos'] +post = + +# DISABLED: needs investigation +#[tests/functional/large_files] +#tests = ['large_files_001_pos'] + +# DISABLED: needs investigation +#[tests/functional/largest_pool] +#tests = ['largest_pool_001_pos'] +#pre = +#post = + +# DISABLED: needs investigation +#[tests/functional/link_count] +#tests = ['link_count_001'] + +[tests/functional/migration] +tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos', + 'migration_004_pos', 'migration_005_pos', 'migration_006_pos', + 'migration_007_pos', 'migration_008_pos', 'migration_009_pos', + 'migration_010_pos', 'migration_011_pos', 'migration_012_pos'] + +# DISABLED: +# mmap_write_001_pos: needs investigation +[tests/functional/mmap] +tests = ['mmap_read_001_pos'] + +# DISABLED: +# umountall_001 - Doesn't make sence in Linux - no umountall command. +[tests/functional/mount] +tests = ['umount_001'] + +[tests/functional/mv_files] +tests = ['mv_files_001_pos', 'mv_files_002_pos'] + +[tests/functional/nestedfs] +tests = ['nestedfs_001_pos'] + +[tests/functional/no_space] +tests = ['enospc_001_pos'] + +# DISABLED: needs investigation (CentOS 7 only) +#[tests/functional/nopwrite] +#tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative', +# 'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync', +# 'nopwrite_volume', 'nopwrite_varying_compression'] + +# DISABLED: needs investigation +#[tests/functional/online_offline] +#tests = ['online_offline_001_pos', 'online_offline_002_neg', +# 'online_offline_003_neg'] + +[tests/functional/pool_names] +tests = ['pool_names_001_pos', 'pool_names_002_neg'] +pre = +post = + +[tests/functional/poolversion] +tests = ['poolversion_001_pos', 'poolversion_002_pos'] + +# DISABLED: Doesn't make sense on Linux - no pfexec command or 'RBAC profile' +#[tests/functional/privilege] +#tests = ['privilege_001_pos', 'privilege_002_pos'] + +# DISABLED: +# quota_002_pos - size is less than current used or reserved space +# quota_004_pos - size is less than current used or reserved space +# quota_005_pos - size is less than current used or reserved space +[tests/functional/quota] +tests = ['quota_001_pos', 'quota_003_pos', 'quota_006_neg'] + +[tests/functional/redundancy] +tests = ['redundancy_001_pos', 'redundancy_002_pos', 'redundancy_003_pos'] + +# DISABLED: +# refquota_002_pos - size is less than current used or reserved space +# refquota_004_pos - needs investigation +[tests/functional/refquota] +tests = ['refquota_001_pos', 'refquota_003_pos', + 'refquota_005_pos', 'refquota_006_neg'] + +# DISABLED: +# refreserv_004_pos - needs investigation +[tests/functional/refreserv] +tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos', + 'refreserv_005_pos'] + +# DISABLED: nested pool +#[tests/functional/rename_dirs] +#tests = ['rename_dirs_001_pos'] + +# DISABLED: nested pool +#[tests/functional/replacement] +#tests = ['replacement_001_pos', 'replacement_002_pos'] + +# DISABLED: +# reservation_012_pos - needs investigation +# reservation_015_pos - needs investigation +# reservation_016_pos - needs investigation +[tests/functional/reservation] +tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos', + 'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos', + 'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos', + 'reservation_010_pos', 'reservation_011_pos', + 'reservation_013_pos', 'reservation_014_pos', + 'reservation_017_pos', 'reservation_018_pos'] + +# DISABLED: Root pools must be handled differently under Linux +#[tests/functional/rootpool] +#tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_neg'] + +# DISABLED: Hangs on I/O for unclear reason. +#[tests/functional/rsend] +#tests = ['rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', +# 'rsend_005_pos', 'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', +# 'rsend_009_pos', 'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', +# 'rsend_013_pos'] + +[tests/functional/scrub_mirror] +tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos', + 'scrub_mirror_003_pos', 'scrub_mirror_004_pos'] + +# DISABLED: Scripts need to be updated. +# slog_012_neg - needs investigation +# slog_013_pos - Linux doesn't have a 'lofiadm' command. +# slog_014_pos - needs investigation +[tests/functional/slog] +tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos', + 'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg', + 'slog_009_neg', 'slog_010_neg', 'slog_011_neg'] + +# DISABLED: +# rollback_003_pos - Hangs in unmount and spins. +# snapshot_013_pos - Hangs on I/O for unclear reason. +# snapshot_016_pos - .zfs mv/rmdir/mkdir disabled by default. +#[tests/functional/snapshot] +#tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos', +# 'snapshot_001_pos', 'snapshot_002_pos', +# 'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos', +# 'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos', +# 'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos', +# 'snapshot_012_pos', 'snapshot_014_pos', +# 'snapshot_015_pos', 'snapshot_017_pos'] +[tests/functional/snapused] +tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos', + 'snapused_004_pos', 'snapused_005_pos'] + +[tests/functional/sparse] +tests = ['sparse_001_pos'] + +# DISABLED: needs investigation +#[tests/functional/threadsappend] +#tests = ['threadsappend_001_pos'] + +[tests/functional/truncate] +tests = ['truncate_001_pos', 'truncate_002_pos'] + +# DISABLED: +# groupspace_001_pos +# groupspace_002_pos +# userquota_001_pos +# userquota_004_pos +# userquota_007_pos +# userquota_010_pos +# userspace_001_pos +# userspace_002_pos +[tests/functional/userquota] +tests = [ + 'userquota_002_pos', 'userquota_003_pos', + 'userquota_005_neg', 'userquota_006_pos', + 'userquota_008_pos', 'userquota_009_pos', + 'userquota_011_pos', 'userquota_012_neg'] + +# DISABLED: +# write_dirs_002_pos - needs investigation +[tests/functional/write_dirs] +tests = ['write_dirs_001_pos'] + +# DISABLED: No 'runat' command, replace the Linux equivilant and add xattrtest +#[tests/functional/xattr] +#tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos', +# 'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg', 'xattr_008_pos', +# 'xattr_009_neg', 'xattr_010_neg', 'xattr_011_pos', 'xattr_012_pos', +# 'xattr_013_pos'] + +[tests/functional/zvol/zvol_ENOSPC] +tests = ['zvol_ENOSPC_001_pos'] + +[tests/functional/zvol/zvol_cli] +tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg'] + +# DISABLED: requires dumpadm +#[tests/functional/zvol/zvol_misc] +#tests = ['zvol_misc_001_neg', 'zvol_misc_002_pos', 'zvol_misc_003_neg', +# 'zvol_misc_004_pos', 'zvol_misc_005_neg', 'zvol_misc_006_pos'] + +# DISABLED: requires updated for Linux +#[tests/functional/zvol/zvol_swap] +#tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_003_pos', +# 'zvol_swap_004_pos', 'zvol_swap_005_pos', 'zvol_swap_006_pos'] diff --git a/tests/test-runner/Makefile.am b/tests/test-runner/Makefile.am new file mode 100644 index 000000000..6a0d9ec29 --- /dev/null +++ b/tests/test-runner/Makefile.am @@ -0,0 +1 @@ +SUBDIRS = cmd include man diff --git a/tests/test-runner/cmd/Makefile.am b/tests/test-runner/cmd/Makefile.am new file mode 100644 index 000000000..223622dda --- /dev/null +++ b/tests/test-runner/cmd/Makefile.am @@ -0,0 +1,3 @@ +pkgdatadir = $(datadir)/@PACKAGE@/test-runner/bin +dist_pkgdata_SCRIPTS = \ + test-runner.py diff --git a/tests/test-runner/cmd/test-runner.py b/tests/test-runner/cmd/test-runner.py new file mode 100755 index 000000000..dd6a3c7b6 --- /dev/null +++ b/tests/test-runner/cmd/test-runner.py @@ -0,0 +1,862 @@ +#!/usr/bin/python + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2013 by Delphix. All rights reserved. +# + +import ConfigParser +import os +import logging +from datetime import datetime +from optparse import OptionParser +from pwd import getpwnam +from pwd import getpwuid +from select import select +from subprocess import PIPE +from subprocess import Popen +from sys import argv +from sys import exit +from threading import Timer +from time import time + +BASEDIR = '/var/tmp/test_results' +TESTDIR = '/usr/share/zfs/' +KILL = 'kill' +TRUE = 'true' +SUDO = 'sudo' + + +class Result(object): + total = 0 + runresults = {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'KILLED': 0} + + def __init__(self): + self.starttime = None + self.returncode = None + self.runtime = '' + self.stdout = [] + self.stderr = [] + self.result = '' + + def done(self, proc, killed): + """ + Finalize the results of this Cmd. + """ + Result.total += 1 + m, s = divmod(time() - self.starttime, 60) + self.runtime = '%02d:%02d' % (m, s) + self.returncode = proc.returncode + if killed: + self.result = 'KILLED' + Result.runresults['KILLED'] += 1 + elif self.returncode is 0: + self.result = 'PASS' + Result.runresults['PASS'] += 1 + elif self.returncode is 4: + self.result = 'SKIP' + Result.runresults['SKIP'] += 1 + elif self.returncode is not 0: + self.result = 'FAIL' + Result.runresults['FAIL'] += 1 + + +class Output(object): + """ + This class is a slightly modified version of the 'Stream' class found + here: http://goo.gl/aSGfv + """ + def __init__(self, stream): + self.stream = stream + self._buf = '' + self.lines = [] + + def fileno(self): + return self.stream.fileno() + + def read(self, drain=0): + """ + Read from the file descriptor. If 'drain' set, read until EOF. + """ + while self._read() is not None: + if not drain: + break + + def _read(self): + """ + Read up to 4k of data from this output stream. Collect the output + up to the last newline, and append it to any leftover data from a + previous call. The lines are stored as a (timestamp, data) tuple + for easy sorting/merging later. + """ + fd = self.fileno() + buf = os.read(fd, 4096) + if not buf: + return None + if '\n' not in buf: + self._buf += buf + return [] + + buf = self._buf + buf + tmp, rest = buf.rsplit('\n', 1) + self._buf = rest + now = datetime.now() + rows = tmp.split('\n') + self.lines += [(now, r) for r in rows] + + +class Cmd(object): + verified_users = [] + + def __init__(self, pathname, outputdir=None, timeout=None, user=None): + self.pathname = pathname + self.outputdir = outputdir or 'BASEDIR' + self.timeout = timeout or 60 + self.user = user or '' + self.killed = False + self.result = Result() + + def __str__(self): + return "Pathname: %s\nOutputdir: %s\nTimeout: %s\nUser: %s\n" % ( + self.pathname, self.outputdir, self.timeout, self.user) + + def kill_cmd(self, proc): + """ + Kill a running command due to timeout, or ^C from the keyboard. If + sudo is required, this user was verified previously. + """ + self.killed = True + do_sudo = len(self.user) != 0 + signal = '-TERM' + + cmd = [SUDO, KILL, signal, str(proc.pid)] + if not do_sudo: + del cmd[0] + + try: + kp = Popen(cmd) + kp.wait() + except: + pass + + def update_cmd_privs(self, cmd, user): + """ + If a user has been specified to run this Cmd and we're not already + running as that user, prepend the appropriate sudo command to run + as that user. + """ + me = getpwuid(os.getuid()) + + if not user or user is me: + return cmd + + if not os.path.isfile(cmd): + if os.path.isfile(cmd+'.ksh') and os.access(cmd+'.ksh', os.X_OK): + cmd += '.ksh' + if os.path.isfile(cmd+'.sh') and os.access(cmd+'.sh', os.X_OK): + cmd += '.sh' + + ret = '%s -E -u %s %s' % (SUDO, user, cmd) + return ret.split(' ') + + def collect_output(self, proc): + """ + Read from stdout/stderr as data becomes available, until the + process is no longer running. Return the lines from the stdout and + stderr Output objects. + """ + out = Output(proc.stdout) + err = Output(proc.stderr) + res = [] + while proc.returncode is None: + proc.poll() + res = select([out, err], [], [], .1) + for fd in res[0]: + fd.read() + for fd in res[0]: + fd.read(drain=1) + + return out.lines, err.lines + + def run(self, options): + """ + This is the main function that runs each individual test. + Determine whether or not the command requires sudo, and modify it + if needed. Run the command, and update the result object. + """ + if options.dryrun is True: + print self + return + + privcmd = self.update_cmd_privs(self.pathname, self.user) + try: + old = os.umask(0) + if not os.path.isdir(self.outputdir): + os.makedirs(self.outputdir, mode=0777) + os.umask(old) + except OSError, e: + fail('%s' % e) + + try: + self.result.starttime = time() + proc = Popen(privcmd, stdout=PIPE, stderr=PIPE) + t = Timer(int(self.timeout), self.kill_cmd, [proc]) + t.start() + self.result.stdout, self.result.stderr = self.collect_output(proc) + except KeyboardInterrupt: + self.kill_cmd(proc) + fail('\nRun terminated at user request.') + finally: + t.cancel() + + self.result.done(proc, self.killed) + + def skip(self): + """ + Initialize enough of the test result that we can log a skipped + command. + """ + Result.total += 1 + Result.runresults['SKIP'] += 1 + self.result.stdout = self.result.stderr = [] + self.result.starttime = time() + m, s = divmod(time() - self.result.starttime, 60) + self.result.runtime = '%02d:%02d' % (m, s) + self.result.result = 'SKIP' + + def log(self, logger, options): + """ + This function is responsible for writing all output. This includes + the console output, the logfile of all results (with timestamped + merged stdout and stderr), and for each test, the unmodified + stdout/stderr/merged in it's own file. + """ + if logger is None: + return + + logname = getpwuid(os.getuid()).pw_name + user = ' (run as %s)' % (self.user if len(self.user) else logname) + msga = 'Test: %s%s ' % (self.pathname, user) + msgb = '[%s] [%s]' % (self.result.runtime, self.result.result) + pad = ' ' * (80 - (len(msga) + len(msgb))) + + # If -q is specified, only print a line for tests that didn't pass. + # This means passing tests need to be logged as DEBUG, or the one + # line summary will only be printed in the logfile for failures. + if not options.quiet: + logger.info('%s%s%s' % (msga, pad, msgb)) + elif self.result.result is not 'PASS': + logger.info('%s%s%s' % (msga, pad, msgb)) + else: + logger.debug('%s%s%s' % (msga, pad, msgb)) + + lines = self.result.stdout + self.result.stderr + for dt, line in sorted(lines): + logger.debug('%s %s' % (dt.strftime("%H:%M:%S.%f ")[:11], line)) + + if len(self.result.stdout): + with open(os.path.join(self.outputdir, 'stdout'), 'w') as out: + for _, line in self.result.stdout: + os.write(out.fileno(), '%s\n' % line) + if len(self.result.stderr): + with open(os.path.join(self.outputdir, 'stderr'), 'w') as err: + for _, line in self.result.stderr: + os.write(err.fileno(), '%s\n' % line) + if len(self.result.stdout) and len(self.result.stderr): + with open(os.path.join(self.outputdir, 'merged'), 'w') as merged: + for _, line in sorted(lines): + os.write(merged.fileno(), '%s\n' % line) + + +class Test(Cmd): + props = ['outputdir', 'timeout', 'user', 'pre', 'pre_user', 'post', + 'post_user'] + + def __init__(self, pathname, outputdir=None, timeout=None, user=None, + pre=None, pre_user=None, post=None, post_user=None): + super(Test, self).__init__(pathname, outputdir, timeout, user) + self.pre = pre or '' + self.pre_user = pre_user or '' + self.post = post or '' + self.post_user = post_user or '' + + def __str__(self): + post_user = pre_user = '' + if len(self.pre_user): + pre_user = ' (as %s)' % (self.pre_user) + if len(self.post_user): + post_user = ' (as %s)' % (self.post_user) + return "Pathname: %s\nOutputdir: %s\nTimeout: %s\nPre: %s%s\nPost: " \ + "%s%s\nUser: %s\n" % (self.pathname, self.outputdir, + self.timeout, self.pre, pre_user, self.post, post_user, + self.user) + + def verify(self, logger): + """ + Check the pre/post scripts, user and Test. Omit the Test from this + run if there are any problems. + """ + files = [self.pre, self.pathname, self.post] + users = [self.pre_user, self.user, self.post_user] + + for f in [f for f in files if len(f)]: + if not verify_file(f): + logger.info("Warning: Test '%s' not added to this run because" + " it failed verification." % f) + return False + + for user in [user for user in users if len(user)]: + if not verify_user(user, logger): + logger.info("Not adding Test '%s' to this run." % + self.pathname) + return False + + return True + + def run(self, logger, options): + """ + Create Cmd instances for the pre/post scripts. If the pre script + doesn't pass, skip this Test. Run the post script regardless. + """ + pretest = Cmd(self.pre, outputdir=os.path.join(self.outputdir, + os.path.basename(self.pre)), timeout=self.timeout, + user=self.pre_user) + test = Cmd(self.pathname, outputdir=self.outputdir, + timeout=self.timeout, user=self.user) + posttest = Cmd(self.post, outputdir=os.path.join(self.outputdir, + os.path.basename(self.post)), timeout=self.timeout, + user=self.post_user) + + cont = True + if len(pretest.pathname): + pretest.run(options) + cont = pretest.result.result is 'PASS' + pretest.log(logger, options) + + if cont: + test.run(options) + else: + test.skip() + + test.log(logger, options) + + if len(posttest.pathname): + posttest.run(options) + posttest.log(logger, options) + + +class TestGroup(Test): + props = Test.props + ['tests'] + + def __init__(self, pathname, outputdir=None, timeout=None, user=None, + pre=None, pre_user=None, post=None, post_user=None, + tests=None): + super(TestGroup, self).__init__(pathname, outputdir, timeout, user, + pre, pre_user, post, post_user) + self.tests = tests or [] + + def __str__(self): + post_user = pre_user = '' + if len(self.pre_user): + pre_user = ' (as %s)' % (self.pre_user) + if len(self.post_user): + post_user = ' (as %s)' % (self.post_user) + return "Pathname: %s\nOutputdir: %s\nTests: %s\nTimeout: %s\n" \ + "Pre: %s%s\nPost: %s%s\nUser: %s\n" % (self.pathname, + self.outputdir, self.tests, self.timeout, self.pre, pre_user, + self.post, post_user, self.user) + + def verify(self, logger): + """ + Check the pre/post scripts, user and tests in this TestGroup. Omit + the TestGroup entirely, or simply delete the relevant tests in the + group, if that's all that's required. + """ + # If the pre or post scripts are relative pathnames, convert to + # absolute, so they stand a chance of passing verification. + if len(self.pre) and not os.path.isabs(self.pre): + self.pre = os.path.join(self.pathname, self.pre) + if len(self.post) and not os.path.isabs(self.post): + self.post = os.path.join(self.pathname, self.post) + + auxfiles = [self.pre, self.post] + users = [self.pre_user, self.user, self.post_user] + + for f in [f for f in auxfiles if len(f)]: + if self.pathname != os.path.dirname(f): + logger.info("Warning: TestGroup '%s' not added to this run. " + "Auxiliary script '%s' exists in a different " + "directory." % (self.pathname, f)) + return False + + if not verify_file(f): + logger.info("Warning: TestGroup '%s' not added to this run. " + "Auxiliary script '%s' failed verification." % + (self.pathname, f)) + return False + + for user in [user for user in users if len(user)]: + if not verify_user(user, logger): + logger.info("Not adding TestGroup '%s' to this run." % + self.pathname) + return False + + # If one of the tests is invalid, delete it, log it, and drive on. + for test in self.tests: + if not verify_file(os.path.join(self.pathname, test)): + del self.tests[self.tests.index(test)] + logger.info("Warning: Test '%s' removed from TestGroup '%s' " + "because it failed verification." % (test, + self.pathname)) + + return len(self.tests) is not 0 + + def run(self, logger, options): + """ + Create Cmd instances for the pre/post scripts. If the pre script + doesn't pass, skip all the tests in this TestGroup. Run the post + script regardless. + """ + pretest = Cmd(self.pre, outputdir=os.path.join(self.outputdir, + os.path.basename(self.pre)), timeout=self.timeout, + user=self.pre_user) + posttest = Cmd(self.post, outputdir=os.path.join(self.outputdir, + os.path.basename(self.post)), timeout=self.timeout, + user=self.post_user) + + cont = True + if len(pretest.pathname): + pretest.run(options) + cont = pretest.result.result is 'PASS' + pretest.log(logger, options) + + for fname in self.tests: + test = Cmd(os.path.join(self.pathname, fname), + outputdir=os.path.join(self.outputdir, fname), + timeout=self.timeout, user=self.user) + if cont: + test.run(options) + else: + test.skip() + + test.log(logger, options) + + if len(posttest.pathname): + posttest.run(options) + posttest.log(logger, options) + + +class TestRun(object): + props = ['quiet', 'outputdir'] + + def __init__(self, options): + self.tests = {} + self.testgroups = {} + self.starttime = time() + self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S') + self.outputdir = os.path.join(options.outputdir, self.timestamp) + self.logger = self.setup_logging(options) + self.defaults = [ + ('outputdir', BASEDIR), + ('quiet', False), + ('timeout', 60), + ('user', ''), + ('pre', ''), + ('pre_user', ''), + ('post', ''), + ('post_user', '') + ] + + def __str__(self): + s = 'TestRun:\n outputdir: %s\n' % self.outputdir + s += 'TESTS:\n' + for key in sorted(self.tests.keys()): + s += '%s%s' % (self.tests[key].__str__(), '\n') + s += 'TESTGROUPS:\n' + for key in sorted(self.testgroups.keys()): + s += '%s%s' % (self.testgroups[key].__str__(), '\n') + return s + + def addtest(self, pathname, options): + """ + Create a new Test, and apply any properties that were passed in + from the command line. If it passes verification, add it to the + TestRun. + """ + test = Test(pathname) + for prop in Test.props: + setattr(test, prop, getattr(options, prop)) + + if test.verify(self.logger): + self.tests[pathname] = test + + def addtestgroup(self, dirname, filenames, options): + """ + Create a new TestGroup, and apply any properties that were passed + in from the command line. If it passes verification, add it to the + TestRun. + """ + if dirname not in self.testgroups: + testgroup = TestGroup(dirname) + for prop in Test.props: + setattr(testgroup, prop, getattr(options, prop)) + + # Prevent pre/post scripts from running as regular tests + for f in [testgroup.pre, testgroup.post]: + if f in filenames: + del filenames[filenames.index(f)] + + self.testgroups[dirname] = testgroup + self.testgroups[dirname].tests = sorted(filenames) + + testgroup.verify(self.logger) + + def read(self, logger, options): + """ + Read in the specified runfile, and apply the TestRun properties + listed in the 'DEFAULT' section to our TestRun. Then read each + section, and apply the appropriate properties to the Test or + TestGroup. Properties from individual sections override those set + in the 'DEFAULT' section. If the Test or TestGroup passes + verification, add it to the TestRun. + """ + config = ConfigParser.RawConfigParser() + if not len(config.read(options.runfile)): + fail("Coulnd't read config file %s" % options.runfile) + + for opt in TestRun.props: + if config.has_option('DEFAULT', opt): + setattr(self, opt, config.get('DEFAULT', opt)) + self.outputdir = os.path.join(self.outputdir, self.timestamp) + + for section in config.sections(): + if 'tests' in config.options(section): + if os.path.isdir(section): + pathname = section + elif os.path.isdir(os.path.join(options.testdir, section)): + pathname = os.path.join(options.testdir, section) + else: + pathname = section + + testgroup = TestGroup(os.path.abspath(pathname)) + for prop in TestGroup.props: + try: + setattr(testgroup, prop, config.get('DEFAULT', prop)) + setattr(testgroup, prop, config.get(section, prop)) + except ConfigParser.NoOptionError: + pass + + # Repopulate tests using eval to convert the string to a list + testgroup.tests = eval(config.get(section, 'tests')) + + if testgroup.verify(logger): + self.testgroups[section] = testgroup + else: + test = Test(section) + for prop in Test.props: + try: + setattr(test, prop, config.get('DEFAULT', prop)) + setattr(test, prop, config.get(section, prop)) + except ConfigParser.NoOptionError: + pass + if test.verify(logger): + self.tests[section] = test + + def write(self, options): + """ + Create a configuration file for editing and later use. The + 'DEFAULT' section of the config file is created from the + properties that were specified on the command line. Tests are + simply added as sections that inherit everything from the + 'DEFAULT' section. TestGroups are the same, except they get an + option including all the tests to run in that directory. + """ + + defaults = dict([(prop, getattr(options, prop)) for prop, _ in + self.defaults]) + config = ConfigParser.RawConfigParser(defaults) + + for test in sorted(self.tests.keys()): + config.add_section(test) + + for testgroup in sorted(self.testgroups.keys()): + config.add_section(testgroup) + config.set(testgroup, 'tests', self.testgroups[testgroup].tests) + + try: + with open(options.template, 'w') as f: + return config.write(f) + except IOError: + fail('Could not open \'%s\' for writing.' % options.template) + + def complete_outputdirs(self, options): + """ + Collect all the pathnames for Tests, and TestGroups. Work + backwards one pathname component at a time, to create a unique + directory name in which to deposit test output. Tests will be able + to write output files directly in the newly modified outputdir. + TestGroups will be able to create one subdirectory per test in the + outputdir, and are guaranteed uniqueness because a group can only + contain files in one directory. Pre and post tests will create a + directory rooted at the outputdir of the Test or TestGroup in + question for their output. + """ + done = False + components = 0 + tmp_dict = dict(self.tests.items() + self.testgroups.items()) + total = len(tmp_dict) + base = self.outputdir + + while not done: + l = [] + components -= 1 + for testfile in tmp_dict.keys(): + uniq = '/'.join(testfile.split('/')[components:]).lstrip('/') + if not uniq in l: + l.append(uniq) + tmp_dict[testfile].outputdir = os.path.join(base, uniq) + else: + break + done = total == len(l) + + def setup_logging(self, options): + """ + Two loggers are set up here. The first is for the logfile which + will contain one line summarizing the test, including the test + name, result, and running time. This logger will also capture the + timestamped combined stdout and stderr of each run. The second + logger is optional console output, which will contain only the one + line summary. The loggers are initialized at two different levels + to facilitate segregating the output. + """ + if options.dryrun is True: + return + + testlogger = logging.getLogger(__name__) + testlogger.setLevel(logging.DEBUG) + + if options.cmd is not 'wrconfig': + try: + old = os.umask(0) + os.makedirs(self.outputdir, mode=0777) + os.umask(old) + except OSError, e: + fail('%s' % e) + filename = os.path.join(self.outputdir, 'log') + + logfile = logging.FileHandler(filename) + logfile.setLevel(logging.DEBUG) + logfilefmt = logging.Formatter('%(message)s') + logfile.setFormatter(logfilefmt) + testlogger.addHandler(logfile) + + cons = logging.StreamHandler() + cons.setLevel(logging.INFO) + consfmt = logging.Formatter('%(message)s') + cons.setFormatter(consfmt) + testlogger.addHandler(cons) + + return testlogger + + def run(self, options): + """ + Walk through all the Tests and TestGroups, calling run(). + """ + try: + os.chdir(self.outputdir) + except OSError: + fail('Could not change to directory %s' % self.outputdir) + for test in sorted(self.tests.keys()): + self.tests[test].run(self.logger, options) + for testgroup in sorted(self.testgroups.keys()): + self.testgroups[testgroup].run(self.logger, options) + + def summary(self): + if Result.total is 0: + return + + print '\nResults Summary' + for key in Result.runresults.keys(): + if Result.runresults[key] is not 0: + print '%s\t% 4d' % (key, Result.runresults[key]) + + m, s = divmod(time() - self.starttime, 60) + h, m = divmod(m, 60) + print '\nRunning Time:\t%02d:%02d:%02d' % (h, m, s) + print 'Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) / + float(Result.total)) * 100) + print 'Log directory:\t%s' % self.outputdir + + +def verify_file(pathname): + """ + Verify that the supplied pathname is an executable regular file. + """ + if os.path.isdir(pathname) or os.path.islink(pathname): + return False + + if (os.path.isfile(pathname) and os.access(pathname, os.X_OK)) or \ + (os.path.isfile(pathname+'.ksh') and os.access(pathname+'.ksh', os.X_OK)) or \ + (os.path.isfile(pathname+'.sh') and os.access(pathname+'.sh', os.X_OK)): + return True + + return False + + +def verify_user(user, logger): + """ + Verify that the specified user exists on this system, and can execute + sudo without being prompted for a password. + """ + testcmd = [SUDO, '-n', '-u', user, TRUE] + can_sudo = exists = True + + if user in Cmd.verified_users: + return True + + try: + _ = getpwnam(user) + except KeyError: + exists = False + logger.info("Warning: user '%s' does not exist.", user) + return False + + p = Popen(testcmd) + p.wait() + if p.returncode is not 0: + logger.info("Warning: user '%s' cannot use passwordless sudo.", user) + return False + else: + Cmd.verified_users.append(user) + + return True + + +def find_tests(testrun, options): + """ + For the given list of pathnames, add files as Tests. For directories, + if do_groups is True, add the directory as a TestGroup. If False, + recursively search for executable files. + """ + + for p in sorted(options.pathnames): + if os.path.isdir(p): + for dirname, _, filenames in os.walk(p): + if options.do_groups: + testrun.addtestgroup(dirname, filenames, options) + else: + for f in sorted(filenames): + testrun.addtest(os.path.join(dirname, f), options) + else: + testrun.addtest(p, options) + + +def fail(retstr, ret=1): + print '%s: %s' % (argv[0], retstr) + exit(ret) + + +def options_cb(option, opt_str, value, parser): + path_options = ['runfile', 'outputdir', 'template', 'testdir'] + + if option.dest is 'runfile' and '-w' in parser.rargs or \ + option.dest is 'template' and '-c' in parser.rargs: + fail('-c and -w are mutually exclusive.') + + if opt_str in parser.rargs: + fail('%s may only be specified once.' % opt_str) + + if option.dest is 'runfile': + parser.values.cmd = 'rdconfig' + if option.dest is 'template': + parser.values.cmd = 'wrconfig' + + setattr(parser.values, option.dest, value) + if option.dest in path_options: + setattr(parser.values, option.dest, os.path.abspath(value)) + + +def parse_args(): + parser = OptionParser() + parser.add_option('-c', action='callback', callback=options_cb, + type='string', dest='runfile', metavar='runfile', + help='Specify tests to run via config file.') + parser.add_option('-d', action='store_true', default=False, dest='dryrun', + help='Dry run. Print tests, but take no other action.') + parser.add_option('-g', action='store_true', default=False, + dest='do_groups', help='Make directories TestGroups.') + parser.add_option('-o', action='callback', callback=options_cb, + default=BASEDIR, dest='outputdir', type='string', + metavar='outputdir', help='Specify an output directory.') + parser.add_option('-i', action='callback', callback=options_cb, + default=TESTDIR, dest='testdir', type='string', + metavar='testdir', help='Specify a test directory.') + parser.add_option('-p', action='callback', callback=options_cb, + default='', dest='pre', metavar='script', + type='string', help='Specify a pre script.') + parser.add_option('-P', action='callback', callback=options_cb, + default='', dest='post', metavar='script', + type='string', help='Specify a post script.') + parser.add_option('-q', action='store_true', default=False, dest='quiet', + help='Silence on the console during a test run.') + parser.add_option('-t', action='callback', callback=options_cb, default=60, + dest='timeout', metavar='seconds', type='int', + help='Timeout (in seconds) for an individual test.') + parser.add_option('-u', action='callback', callback=options_cb, + default='', dest='user', metavar='user', type='string', + help='Specify a different user name to run as.') + parser.add_option('-w', action='callback', callback=options_cb, + default=None, dest='template', metavar='template', + type='string', help='Create a new config file.') + parser.add_option('-x', action='callback', callback=options_cb, default='', + dest='pre_user', metavar='pre_user', type='string', + help='Specify a user to execute the pre script.') + parser.add_option('-X', action='callback', callback=options_cb, default='', + dest='post_user', metavar='post_user', type='string', + help='Specify a user to execute the post script.') + (options, pathnames) = parser.parse_args() + + if not options.runfile and not options.template: + options.cmd = 'runtests' + + if options.runfile and len(pathnames): + fail('Extraneous arguments.') + + options.pathnames = [os.path.abspath(path) for path in pathnames] + + return options + + +def main(args): + options = parse_args() + testrun = TestRun(options) + + if options.cmd is 'runtests': + find_tests(testrun, options) + elif options.cmd is 'rdconfig': + testrun.read(testrun.logger, options) + elif options.cmd is 'wrconfig': + find_tests(testrun, options) + testrun.write(options) + exit(0) + else: + fail('Unknown command specified') + + testrun.complete_outputdirs(options) + testrun.run(options) + testrun.summary() + exit(0) + + +if __name__ == '__main__': + main(argv[1:]) diff --git a/tests/test-runner/include/Makefile.am b/tests/test-runner/include/Makefile.am new file mode 100644 index 000000000..d071dd495 --- /dev/null +++ b/tests/test-runner/include/Makefile.am @@ -0,0 +1,4 @@ +pkgdatadir = $(datadir)/@PACKAGE@/test-runner/include +dist_pkgdata_SCRIPTS = \ + logapi.shlib \ + stf.shlib diff --git a/tests/test-runner/include/logapi.shlib b/tests/test-runner/include/logapi.shlib new file mode 100644 index 000000000..6fd4ab369 --- /dev/null +++ b/tests/test-runner/include/logapi.shlib @@ -0,0 +1,385 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2007 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# Copyright (c) 2012 by Delphix. All rights reserved. +# + +. ${STF_TOOLS}/include/stf.shlib + +# Output an assertion +# +# $@ - assertion text + +function log_assert +{ + _printline ASSERTION: "$@" +} + +# Output a comment +# +# $@ - comment text + +function log_note +{ + _printline NOTE: "$@" +} + +# Execute and print command with status where success equals non-zero result +# +# $@ - command to execute +# +# return 0 if command fails, otherwise return 1 + +function log_neg +{ + log_neg_expect "" "$@" + return $? +} + +# Execute a positive test and exit $STF_FAIL is test fails +# +# $@ - command to execute + +function log_must +{ + log_pos "$@" + (( $? != 0 )) && log_fail +} + +# Execute a negative test and exit $STF_FAIL if test passes +# +# $@ - command to execute + +function log_mustnot +{ + log_neg "$@" + (( $? != 0 )) && log_fail +} + +# Execute a negative test with keyword expected, and exit +# $STF_FAIL if test passes +# +# $1 - keyword expected +# $2-$@ - command to execute + +function log_mustnot_expect +{ + log_neg_expect "$@" + (( $? != 0 )) && log_fail +} + +# Execute and print command with status where success equals non-zero result +# or output includes expected keyword +# +# $1 - keyword expected +# $2-$@ - command to execute +# +# return 0 if command fails, or the output contains the keyword expected, +# return 1 otherwise + +function log_neg_expect +{ + typeset out="" + typeset logfile="/tmp/log.$$" + typeset ret=1 + typeset expect=$1 + shift + + while [[ -e $logfile ]]; do + logfile="$logfile.$$" + done + + "$@" 2>$logfile + typeset status=$? + out="$CAT $logfile" + + # unexpected status + if (( $status == 0 )); then + print -u2 $($out) + _printerror "$@" "unexpectedly exited $status" + # missing binary + elif (( $status == 127 )); then + print -u2 $($out) + _printerror "$@" "unexpectedly exited $status (File not found)" + # bus error - core dump + elif (( $status == 138 )); then + print -u2 $($out) + _printerror "$@" "unexpectedly exited $status (Bus Error)" + # segmentation violation - core dump + elif (( $status == 139 )); then + print -u2 $($out) + _printerror "$@" "unexpectedly exited $status (SEGV)" + else + $out | $EGREP -i "internal error|assertion failed" \ + > /dev/null 2>&1 + # internal error or assertion failed + if (( $? == 0 )); then + print -u2 $($out) + _printerror "$@" "internal error or assertion failure" \ + " exited $status" + elif [[ -n $expect ]] ; then + $out | $GREP -i "$expect" > /dev/null 2>&1 + if (( $? == 0 )); then + ret=0 + else + print -u2 $($out) + _printerror "$@" "unexpectedly exited $status" + fi + else + ret=0 + fi + + if (( $ret == 0 )); then + [[ -n $LOGAPI_DEBUG ]] && print $($out) + _printsuccess "$@" "exited $status" + fi + fi + _recursive_output $logfile "false" + return $ret +} + +# Execute and print command with status where success equals zero result +# +# $@ command to execute +# +# return command exit status + +function log_pos +{ + typeset out="" + typeset logfile="/tmp/log.$$" + + while [[ -e $logfile ]]; do + logfile="$logfile.$$" + done + + "$@" 2>$logfile + typeset status=$? + out="$CAT $logfile" + + if (( $status != 0 )) ; then + print -u2 $($out) + _printerror "$@" "exited $status" + else + $out | $EGREP -i "internal error|assertion failed" \ + > /dev/null 2>&1 + # internal error or assertion failed + if [[ $? -eq 0 ]]; then + print -u2 $($out) + _printerror "$@" "internal error or assertion failure" \ + " exited $status" + status=1 + else + [[ -n $LOGAPI_DEBUG ]] && print $($out) + _printsuccess "$@" + fi + fi + _recursive_output $logfile "false" + return $status +} + +# Set an exit handler +# +# $@ - function(s) to perform on exit + +function log_onexit +{ + _CLEANUP="$@" +} + +# +# Exit functions +# + +# Perform cleanup and exit $STF_PASS +# +# $@ - message text + +function log_pass +{ + _endlog $STF_PASS "$@" +} + +# Perform cleanup and exit $STF_FAIL +# +# $@ - message text + +function log_fail +{ + _endlog $STF_FAIL "$@" +} + +# Perform cleanup and exit $STF_UNRESOLVED +# +# $@ - message text + +function log_unresolved +{ + _endlog $STF_UNRESOLVED "$@" +} + +# Perform cleanup and exit $STF_NOTINUSE +# +# $@ - message text + +function log_notinuse +{ + _endlog $STF_NOTINUSE "$@" +} + +# Perform cleanup and exit $STF_UNSUPPORTED +# +# $@ - message text + +function log_unsupported +{ + _endlog $STF_UNSUPPORTED "$@" +} + +# Perform cleanup and exit $STF_UNTESTED +# +# $@ - message text + +function log_untested +{ + _endlog $STF_UNTESTED "$@" +} + +# Perform cleanup and exit $STF_UNINITIATED +# +# $@ - message text + +function log_uninitiated +{ + _endlog $STF_UNINITIATED "$@" +} + +# Perform cleanup and exit $STF_NORESULT +# +# $@ - message text + +function log_noresult +{ + _endlog $STF_NORESULT "$@" +} + +# Perform cleanup and exit $STF_WARNING +# +# $@ - message text + +function log_warning +{ + _endlog $STF_WARNING "$@" +} + +# Perform cleanup and exit $STF_TIMED_OUT +# +# $@ - message text + +function log_timed_out +{ + _endlog $STF_TIMED_OUT "$@" +} + +# Perform cleanup and exit $STF_OTHER +# +# $@ - message text + +function log_other +{ + _endlog $STF_OTHER "$@" +} + +# +# Internal functions +# + +# Perform cleanup and exit +# +# $1 - stf exit code +# $2-$n - message text + +function _endlog +{ + typeset logfile="/tmp/log.$$" + _recursive_output $logfile + + if [[ -n $_CLEANUP ]] ; then + typeset cleanup=$_CLEANUP + log_onexit "" + log_note "Performing local cleanup via log_onexit ($cleanup)" + $cleanup + fi + typeset exitcode=$1 + shift + (( ${#@} > 0 )) && _printline "$@" + exit $exitcode +} + +# Output a formatted line +# +# $@ - message text + +function _printline +{ + print "$@" +} + +# Output an error message +# +# $@ - message text + +function _printerror +{ + _printline ERROR: "$@" +} + +# Output a success message +# +# $@ - message text + +function _printsuccess +{ + _printline SUCCESS: "$@" +} + +# Output logfiles recursively +# +# $1 - start file +# $2 - indicate whether output the start file itself, default as yes. + +function _recursive_output #logfile +{ + typeset logfile=$1 + + while [[ -e $logfile ]]; do + if [[ -z $2 || $logfile != $1 ]]; then + $CAT $logfile + fi + $RM -f $logfile + logfile="$logfile.$$" + done +} diff --git a/tests/test-runner/include/stf.shlib b/tests/test-runner/include/stf.shlib new file mode 100644 index 000000000..ea879a84c --- /dev/null +++ b/tests/test-runner/include/stf.shlib @@ -0,0 +1,57 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2007 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# Copyright (c) 2012 by Delphix. All rights reserved. +# + + +STF_PASS=0 +STF_FAIL=1 +STF_UNRESOLVED=2 +STF_NOTINUSE=3 +STF_UNSUPPORTED=4 +STF_UNTESTED=5 +STF_UNINITIATED=6 +STF_NORESULT=7 +STF_WARNING=8 +STF_TIMED_OUT=9 +STF_OTHER=10 + +# do this to use the names: eval echo \$STF_RESULT_NAME_${result} +STF_RESULT_NAME_0="PASS" +STF_RESULT_NAME_1="FAIL" +STF_RESULT_NAME_2="UNRESOLVED" +STF_RESULT_NAME_3="NOTINUSE" +STF_RESULT_NAME_4="UNSUPPORTED" +STF_RESULT_NAME_5="UNTESTED" +STF_RESULT_NAME_6="UNINITIATED" +STF_RESULT_NAME_7="NORESULT" +STF_RESULT_NAME_8="WARNING" +STF_RESULT_NAME_9="TIMED_OUT" +STF_RESULT_NAME_10="OTHER" + +# do this to use the array: ${STF_RESULT_NAMES[$result]} +STF_RESULT_NAMES=( "PASS" "FAIL" "UNRESOLVED" "NOTINUSE" "UNSUPPORTED" \ + "UNTESTED" "UNINITIATED" "NORESULT" "WARNING" "TIMED_OUT" "OTHER" ) diff --git a/tests/test-runner/man/Makefile.am b/tests/test-runner/man/Makefile.am new file mode 100644 index 000000000..a7017f5f0 --- /dev/null +++ b/tests/test-runner/man/Makefile.am @@ -0,0 +1,4 @@ +dist_man_MANS = test-runner.1 + +install-data-local: + $(INSTALL) -d -m 0755 "$(DESTDIR)$(mandir)/man1" diff --git a/tests/test-runner/man/test-runner.1 b/tests/test-runner/man/test-runner.1 new file mode 100644 index 000000000..31cd41245 --- /dev/null +++ b/tests/test-runner/man/test-runner.1 @@ -0,0 +1,370 @@ +.\" +.\" This file and its contents are supplied under the terms of the +.\" Common Development and Distribution License ("CDDL"), version 1.0. +.\" You may only use this file in accordance with the terms of version +.\" 1.0 of the CDDL. +.\" +.\" A full copy of the text of the CDDL should have accompanied this +.\" source. A copy of the CDDL is also available via the Internet at +.\" http://www.illumos.org/license/CDDL. +.\" +.\" +.\" Copyright (c) 2012 by Delphix. All rights reserved. +.\" +.TH run 1 "23 Sep 2012" +.SH NAME +run \- find, execute, and log the results of tests +.SH SYNOPSIS +.LP +.nf +\fBrun\fR [\fB-dgq] [\fB-o\fR \fIoutputdir\fR] [\fB-pP\fR \fIscript\fR] [\fB-t\fR \fIseconds\fR] [\fB-uxX\fR \fIusername\fR] + \fIpathname\fR ... +.fi + +.LP +.nf +\fBrun\fR \fB-w\fR \fIrunfile\fR [\fB-gq\fR] [\fB-o\fR \fIoutputdir\fR] [\fB-pP\fR \fIscript\fR] [\fB-t\fR \fIseconds\fR] + [\fB-uxX\fR \fIusername\fR] \fIpathname\fR ... +.fi + +.LP +.nf +\fBrun\fR \fB-c\fR \fIrunfile\fR [\fB-dq\fR] +.fi + +.LP +.nf +\fBrun\fR [\fB-h\fR] +.fi + +.SH DESCRIPTION +.sp +.LP +The \fBrun\fR command has three basic modes of operation. With neither the +\fB-c\fR nor the \fB-w\fR option, \fBrun\fR processes the arguments provided on +the command line, adding them to the list for this run. If a specified +\fIpathname\fR is an executable file, it is added as a test. If a specified +\fIpathname\fR is a directory, the behavior depends upon the \fB-g\fR option. +If \fB-g\fR is specified, the directory is treated as a test group. See the +section on "Test Groups" below. Without the \fB-g\fR option, \fBrun\fR simply +descends into the directory looking for executable files. The tests are then +executed, and the results are logged. + +With the \fB-w\fR option, \fBrun\fR finds tests in the manner described above. +Rather than executing the tests and logging the results, the test configuration +is stored in a \fIrunfile\fR which can be used in future invocations, or edited +to modify which tests are executed and which options are applied. Options +included on the command line with \fB-w\fR become defaults in the +\fIrunfile\fR. + +With the \fB-c\fR option, \fBrun\fR parses a \fIrunfile\fR, which can specify a +series of tests and test groups to be executed. The tests are then executed, +and the results are logged. +.sp +.SS "Test Groups" +.sp +.LP +A test group is comprised of a set of executable files, all of which exist in +one directory. The options specified on the command line or in a \fIrunfile\fR +apply to individual tests in the group. The exception is options pertaining to +pre and post scripts, which act on all tests as a group. Rather than running +before and after each test, these scripts are run only once each at the start +and end of the test group. +.SS "Test Execution" +.sp +.LP +The specified tests run serially, and are typically assigned results according +to exit values. Tests that exit zero and non-zero are marked "PASS" and "FAIL" +respectively. When a pre script fails for a test group, only the post script is +executed, and the remaining tests are marked "SKIPPED." Any test that exceeds +its \fItimeout\fR is terminated, and marked "KILLED." + +By default, tests are executed with the credentials of the \fBrun\fR script. +Executing tests with other credentials is done via \fBsudo\fR(1m), which must +be configured to allow execution without prompting for a password. Environment +variables from the calling shell are available to individual tests. During test +execution, the working directory is changed to \fIoutputdir\fR. +.SS "Output Logging" +.sp +.LP +By default, \fBrun\fR will print one line on standard output at the conclusion +of each test indicating the test name, result and elapsed time. Additionally, +for each invocation of \fBrun\fR, a directory is created using the ISO 8601 +date format. Within this directory is a file named \fIlog\fR containing all the +test output with timestamps, and a directory for each test. Within the test +directories, there is one file each for standard output, standard error and +merged output. The default location for the \fIoutputdir\fR is +\fI/var/tmp/test_results\fR. +.SS "Runfiles" +.sp +.LP +The \fIrunfile\fR is an ini style configuration file that describes a test run. +The file has one section named "DEFAULT," which contains configuration option +names and their values in "name = value" format. The values in this section +apply to all the subsequent sections, unless they are also specified there, in +which case the default is overridden. The remaining section names are the +absolute pathnames of files and direcotries, describing tests and test groups +respectively. The legal option names are: +.sp +.ne 2 +.na +\fBoutputdir\fR = \fIpathname\fR +.ad +.sp .6 +.RS 4n +The name of the directory that holds test logs. +.RE +.sp +.ne 2 +.na +\fBpre\fR = \fIscript\fR +.ad +.sp .6 +.RS 4n +Run \fIscript\fR prior to the test or test group. +.RE +.sp +.ne 2 +.na +\fBpre_user\fR = \fIusername\fR +.ad +.sp .6 +.RS 4n +Execute the pre script as \fIusername\fR. +.RE +.sp +.ne 2 +.na +\fBpost\fR = \fIscript\fR +.ad +.sp .6 +.RS 4n +Run \fIscript\fR after the test or test group. +.RE +.sp +.ne 2 +.na +\fBpost_user\fR = \fIusername\fR +.ad +.sp .6 +.RS 4n +Execute the post script as \fIusername\fR. +.RE +.sp +.ne 2 +.na +\fBquiet\fR = [\fITrue\fR|\fIFalse\fR] +.ad +.sp .6 +.RS 4n +If set to True, only the results summary is printed to standard out. +.RE +.sp +.ne 2 +.na +\fBtests\fR = [\fI'filename'\fR [,...]] +.ad +.sp .6 +.RS 4n +Specify a list of \fIfilenames\fR for this test group. Only the basename of the +absolute path is required. This option is only valid for test groups, and each +\fIfilename\fR must be single quoted. +.RE +.sp +.ne 2 +.na +\fBtimeout\fR = \fIn\fR +.ad +.sp .6 +.RS 4n +A timeout value of \fIn\fR seconds. +.RE +.sp +.ne 2 +.na +\fBuser\fR = \fIusername\fR +.ad +.sp .6 +.RS 4n +Execute the test or test group as \fIusername\fR. +.RE + +.SH OPTIONS +.sp +.LP +The following options are available for the \fBrun\fR command. +.sp +.ne 2 +.na +\fB-c\fR \fIrunfile\fR +.ad +.RS 6n +Specify a \fIrunfile\fR to be consumed by the run command. +.RE + +.ne 2 +.na +\fB-d\fR +.ad +.RS 6n +Dry run mode. Execute no tests, but print a description of each test that would +have been run. +.RE + +.ne 2 +.na +\fB-g\fR +.ad +.RS 6n +Create test groups from any directories found while searching for tests. +.RE + +.ne 2 +.na +\fB-o\fR \fIoutputdir\fR +.ad +.RS 6n +Specify the directory in which to write test results. +.RE + +.ne 2 +.na +\fB-p\fR \fIscript\fR +.ad +.RS 6n +Run \fIscript\fR prior to any test or test group. +.RE + +.ne 2 +.na +\fB-P\fR \fIscript\fR +.ad +.RS 6n +Run \fIscript\fR after any test or test group. +.RE + +.ne 2 +.na +\fB-q\fR +.ad +.RS 6n +Print only the results sumary to the standard output. +.RE + +.ne 2 +.na +\fB-t\fR \fIn\fR +.ad +.RS 6n +Specify a timeout value of \fIn\fR seconds per test. +.RE + +.ne 2 +.na +\fB-u\fR \fIusername\fR +.ad +.RS 6n +Execute tests or test groups as \fIusername\fR. +.RE + +.ne 2 +.na +\fB-w\fR \fIrunfile\fR +.ad +.RS 6n +Specify the name of the \fIrunfile\fR to create. +.RE + +.ne 2 +.na +\fB-x\fR \fIusername\fR +.ad +.RS 6n +Execute the pre script as \fIusername\fR. +.RE + +.ne 2 +.na +\fB-X\fR \fIusername\fR +.ad +.RS 6n +Execute the post script as \fIusername\fR. +.RE + +.SH EXAMPLES +.LP +\fBExample 1\fR Running ad-hoc tests. +.sp +.LP +This example demonstrates the simplest invocation of \fBrun\fR. + +.sp +.in +2 +.nf +% \fBrun my-tests\fR +Test: /home/jkennedy/my-tests/test-01 [00:02] [PASS] +Test: /home/jkennedy/my-tests/test-02 [00:04] [PASS] +Test: /home/jkennedy/my-tests/test-03 [00:01] [PASS] + +Results Summary +PASS 3 + +Running Time: 00:00:07 +Percent passed: 100.0% +Log directory: /var/tmp/test_results/20120923T180654 +.fi +.in -2 + +.LP +\fBExample 2\fR Creating a \fIrunfile\fR for future use. +.sp +.LP +This example demonstrates creating a \fIrunfile\fR with non default options. + +.sp +.in +2 +.nf +% \fBrun -p setup -x root -g -w new-tests.run new-tests\fR +% \fBcat new-tests.run\fR +[DEFAULT] +pre = setup +post_user = +quiet = False +user = +timeout = 60 +post = +pre_user = root +outputdir = /var/tmp/test_results + +[/home/jkennedy/new-tests] +tests = ['test-01', 'test-02', 'test-03'] +.fi +.in -2 + +.SH EXIT STATUS +.sp +.LP +The following exit values are returned: +.sp +.ne 2 +.na +\fB\fB0\fR\fR +.ad +.sp .6 +.RS 4n +Successful completion. +.RE +.sp +.ne 2 +.na +\fB\fB1\fR\fR +.ad +.sp .6 +.RS 4n +An error occurred. +.RE + +.SH SEE ALSO +.sp +.LP +\fBsudo\fR(1m) diff --git a/tests/zfs-tests/Makefile.am b/tests/zfs-tests/Makefile.am new file mode 100644 index 000000000..861703455 --- /dev/null +++ b/tests/zfs-tests/Makefile.am @@ -0,0 +1 @@ +SUBDIRS = cmd include tests diff --git a/tests/zfs-tests/cmd/Makefile.am b/tests/zfs-tests/cmd/Makefile.am new file mode 100644 index 000000000..f55ff8ce2 --- /dev/null +++ b/tests/zfs-tests/cmd/Makefile.am @@ -0,0 +1,22 @@ +EXTRA_DIST = file_common.h + +SUBDIRS = \ + chg_usr_exec \ + devname2devid \ + dir_rd_update \ + file_check \ + file_trunc \ + file_write \ + largest_file \ + mkbusy \ + mkfile \ + mkfiles \ + mktree \ + mmap_exec \ + mmapwrite \ + randfree_file \ + readmmap \ + rename_dir \ + rm_lnkcnt_zero_file \ + threadsappend \ + xattrtest diff --git a/tests/zfs-tests/cmd/chg_usr_exec/.gitignore b/tests/zfs-tests/cmd/chg_usr_exec/.gitignore new file mode 100644 index 000000000..a8b44df7c --- /dev/null +++ b/tests/zfs-tests/cmd/chg_usr_exec/.gitignore @@ -0,0 +1 @@ +/chg_usr_exec diff --git a/tests/zfs-tests/cmd/chg_usr_exec/Makefile.am b/tests/zfs-tests/cmd/chg_usr_exec/Makefile.am new file mode 100644 index 000000000..6f2968f1f --- /dev/null +++ b/tests/zfs-tests/cmd/chg_usr_exec/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = chg_usr_exec +chg_usr_exec_SOURCES = chg_usr_exec.c diff --git a/tests/zfs-tests/cmd/chg_usr_exec/chg_usr_exec.c b/tests/zfs-tests/cmd/chg_usr_exec/chg_usr_exec.c new file mode 100644 index 000000000..1fa9e88a6 --- /dev/null +++ b/tests/zfs-tests/cmd/chg_usr_exec/chg_usr_exec.c @@ -0,0 +1,77 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include + +#define EXECSHELL "/bin/sh" + +int +main(int argc, char *argv[]) +{ + char *plogin = NULL; + char cmds[BUFSIZ] = { 0 }; + char sep[] = " "; + struct passwd *ppw = NULL; + int i, len; + + if (argc < 3 || strlen(argv[1]) == 0) { + (void) printf("\tUsage: %s ...\n", argv[0]); + return (1); + } + + plogin = argv[1]; + len = 0; + for (i = 2; i < argc; i++) { + (void) snprintf(cmds+len, sizeof (cmds)-len, + "%s%s", argv[i], sep); + len += strlen(argv[i]) + strlen(sep); + } + + if ((ppw = getpwnam(plogin)) == NULL) { + perror("getpwnam"); + return (errno); + } + if (setgid(ppw->pw_gid) != 0) { + perror("setgid"); + return (errno); + } + if (setuid(ppw->pw_uid) != 0) { + perror("setuid"); + return (errno); + } + + if (execl(EXECSHELL, "sh", "-c", cmds, (char *)NULL) != 0) { + perror("execl: " EXECSHELL); + return (errno); + } + + return (0); +} diff --git a/tests/zfs-tests/cmd/devname2devid/.gitignore b/tests/zfs-tests/cmd/devname2devid/.gitignore new file mode 100644 index 000000000..fa9fb6c50 --- /dev/null +++ b/tests/zfs-tests/cmd/devname2devid/.gitignore @@ -0,0 +1 @@ +/devname2devid diff --git a/tests/zfs-tests/cmd/devname2devid/Makefile.am b/tests/zfs-tests/cmd/devname2devid/Makefile.am new file mode 100644 index 000000000..2eb03e9cf --- /dev/null +++ b/tests/zfs-tests/cmd/devname2devid/Makefile.am @@ -0,0 +1,7 @@ +include $(top_srcdir)/config/Rules.am + +pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/bin + +# DISABLED: Not supported under Linux +# pkgdata_PROGRAMS = devname2devid +# devname2devid_SOURCES = devname2devid.c diff --git a/tests/zfs-tests/cmd/devname2devid/devname2devid.c b/tests/zfs-tests/cmd/devname2devid/devname2devid.c new file mode 100644 index 000000000..fd3287491 --- /dev/null +++ b/tests/zfs-tests/cmd/devname2devid/devname2devid.c @@ -0,0 +1,120 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Usage: devname2devid + * + * Examples: + * # ./devname2devid /dev/dsk/c1t4d0s0 + * devid id1,sd@SSEAGATE_ST318404LSUN18G_3BT2G0Z300002146G4CR/a + * # ./devname2devid /dev/dsk/c1t4d0 + * devid id1,sd@SSEAGATE_ST318404LSUN18G_3BT2G0Z300002146G4CR/wd + * # ./devname2devid /dev/dsk/c1t4d0s1 + * devid id1,sd@SSEAGATE_ST318404LSUN18G_3BT2G0Z300002146G4CR/b + * # + * + * This program accepts a disk or disk slice path and prints a + * device id. + * + * Exit values: + * 0 - means success + * 1 - means failure + * + */ +int +main(int argc, char *argv[]) +{ + int fd; + ddi_devid_t devid; + char *minor_name, *devidstr, *device; +#ifdef DEBUG + devid_nmlist_t *list = NULL; + char *search_path; + int i; +#endif + + if (argc == 1) { + (void) printf("%s [search path]\n", + argv[0]); + exit(1); + } + device = argv[1]; + + if ((fd = open(device, O_RDONLY|O_NDELAY)) < 0) { + perror(device); + exit(1); + } + if (devid_get(fd, &devid) != 0) { + perror("devid_get"); + exit(1); + } + if (devid_get_minor_name(fd, &minor_name) != 0) { + perror("devid_get_minor_name"); + exit(1); + } + if ((devidstr = devid_str_encode(devid, minor_name)) == 0) { + perror("devid_str_encode"); + exit(1); + } + + (void) printf("devid %s\n", devidstr); + + devid_str_free(devidstr); + +#ifdef DEBUG + if (argc == 3) { + search_path = argv[2]; + } else { + search_path = "/dev/rdsk"; + } + + if (devid_deviceid_to_nmlist(search_path, devid, DEVID_MINOR_NAME_ALL, + &list)) { + perror("devid_deviceid_to_nmlist"); + exit(1); + } + + /* loop through list and process device names and numbers */ + for (i = 0; list[i].devname != NULL; i++) { + (void) printf("devname: %s %p\n", list[i].devname, list[i].dev); + } + devid_free_nmlist(list); + +#endif /* DEBUG */ + + devid_str_free(minor_name); + devid_free(devid); + + return (0); +} diff --git a/tests/zfs-tests/cmd/dir_rd_update/.gitignore b/tests/zfs-tests/cmd/dir_rd_update/.gitignore new file mode 100644 index 000000000..ec9a15f17 --- /dev/null +++ b/tests/zfs-tests/cmd/dir_rd_update/.gitignore @@ -0,0 +1 @@ +/dir_rd_update diff --git a/tests/zfs-tests/cmd/dir_rd_update/Makefile.am b/tests/zfs-tests/cmd/dir_rd_update/Makefile.am new file mode 100644 index 000000000..27cc9e97e --- /dev/null +++ b/tests/zfs-tests/cmd/dir_rd_update/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = dir_rd_update +dir_rd_update_SOURCES = dir_rd_update.c diff --git a/tests/zfs-tests/cmd/dir_rd_update/dir_rd_update.c b/tests/zfs-tests/cmd/dir_rd_update/dir_rd_update.c new file mode 100644 index 000000000..bca365c52 --- /dev/null +++ b/tests/zfs-tests/cmd/dir_rd_update/dir_rd_update.c @@ -0,0 +1,117 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Assertion: + * + * A read operation and directory update operation performed + * concurrently on the same directory can lead to deadlock + * on a UFS logging file system, but not on a ZFS file system. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#define TMP_DIR /tmp + +static char dirpath[256]; + +int +main(int argc, char **argv) +{ + char *cp1 = ""; + int i = 0; + int ret = 0; + int testdd = 0; + pid_t pid; + static const int op_num = 5; + + if (argc == 1) { + (void) printf("Usage: %s \n", argv[0]); + exit(-1); + } + for (i = 0; i < 256; i++) { + dirpath[i] = 0; + } + + cp1 = argv[1]; + (void) strcpy(&dirpath[0], (const char *)cp1); + (void) strcat(&dirpath[strlen(dirpath)], "TMP_DIR"); + + ret = mkdir(dirpath, 0777); + if (ret != 0) { + if (errno != EEXIST) { + (void) printf("%s: mkdir(<%s>, 0777) failed: errno " + "(decimal)=%d\n", argv[0], dirpath, errno); + exit(-1); + } + } + testdd = open(dirpath, O_RDONLY|O_RSYNC|O_SYNC|O_DSYNC); + if (testdd < 0) { + (void) printf("%s: open(<%s>, O_RDONLY|O_RSYNC|O_SYNC|O_DSYNC)" + " failed: errno (decimal)=%d\n", argv[0], dirpath, errno); + exit(-1); + } else { + (void) close(testdd); + } + pid = fork(); + if (pid > 0) { + int fd = open(dirpath, O_RDONLY|O_RSYNC|O_SYNC|O_DSYNC); + char buf[16]; + int rdret; + int j = 0; + + while (j < op_num) { + (void) sleep(1); + rdret = read(fd, buf, 16); + if (rdret == -1) { + (void) printf("readdir failed"); + } + j++; + } + } else if (pid == 0) { + int fd = open(dirpath, O_RDONLY); + int chownret; + int k = 0; + + while (k < op_num) { + (void) sleep(1); + chownret = fchown(fd, 0, 0); + if (chownret == -1) { + (void) printf("chown failed"); + } + + k++; + } + } + + return (0); +} diff --git a/tests/zfs-tests/cmd/file_check/.gitignore b/tests/zfs-tests/cmd/file_check/.gitignore new file mode 100644 index 000000000..24fe11322 --- /dev/null +++ b/tests/zfs-tests/cmd/file_check/.gitignore @@ -0,0 +1 @@ +/file_check diff --git a/tests/zfs-tests/cmd/file_check/Makefile.am b/tests/zfs-tests/cmd/file_check/Makefile.am new file mode 100644 index 000000000..13027ef5b --- /dev/null +++ b/tests/zfs-tests/cmd/file_check/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = file_check +file_check_SOURCES = file_check.c diff --git a/tests/zfs-tests/cmd/file_check/file_check.c b/tests/zfs-tests/cmd/file_check/file_check.c new file mode 100644 index 000000000..5df0ea735 --- /dev/null +++ b/tests/zfs-tests/cmd/file_check/file_check.c @@ -0,0 +1,86 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include "../file_common.h" + +static unsigned char bigbuffer[BIGBUFFERSIZE]; + +/* + * Given a filename, check that the file consists entirely + * of a particular pattern. If the pattern is not specified a + * default will be used. For default values see file_common.h + */ +int +main(int argc, char **argv) +{ + int bigfd; + long i, n; + unsigned char fillchar = DATA; + int bigbuffersize = BIGBUFFERSIZE; + int64_t read_count = 0; + + /* + * Validate arguments + */ + if (argc < 2) { + (void) printf("Usage: %s filename [pattern]\n", + argv[0]); + exit(1); + } + + if (argv[2]) { + fillchar = atoi(argv[2]); + } + + /* + * Read the file contents and check every character + * against the supplied pattern. Abort if the + * pattern check fails. + */ + if ((bigfd = open(argv[1], O_RDONLY)) == -1) { + (void) printf("open %s failed %d\n", argv[1], errno); + exit(1); + } + + do { + if ((n = read(bigfd, &bigbuffer, bigbuffersize)) == -1) { + (void) printf("read failed (%ld), %d\n", n, errno); + exit(errno); + } + + for (i = 0; i < n; i++) { + if (bigbuffer[i] != fillchar) { + (void) printf("error %s: 0x%x != 0x%x)\n", + argv[1], bigbuffer[i], fillchar); + exit(1); + } + } + + read_count += n; + } while (n == bigbuffersize); + + return (0); +} diff --git a/tests/zfs-tests/cmd/file_common.h b/tests/zfs-tests/cmd/file_common.h new file mode 100644 index 000000000..759889e70 --- /dev/null +++ b/tests/zfs-tests/cmd/file_common.h @@ -0,0 +1,62 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef FILE_COMMON_H +#define FILE_COMMON_H + +/* + * header file for file_* utilities. These utilities + * are used by the test cases to perform various file + * operations (append writes, for example). + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#define BLOCKSZ 8192 +#define DATA 0xa5 +#define DATA_RANGE 120 +#define BIGBUFFERSIZE 0x800000 +#define BIGFILESIZE 20 + +extern char *optarg; +extern int optind, opterr, optopt; + +#ifdef __cplusplus +} +#endif + +#endif /* FILE_COMMON_H */ diff --git a/tests/zfs-tests/cmd/file_trunc/.gitignore b/tests/zfs-tests/cmd/file_trunc/.gitignore new file mode 100644 index 000000000..90b149ff5 --- /dev/null +++ b/tests/zfs-tests/cmd/file_trunc/.gitignore @@ -0,0 +1 @@ +/file_trunc diff --git a/tests/zfs-tests/cmd/file_trunc/Makefile.am b/tests/zfs-tests/cmd/file_trunc/Makefile.am new file mode 100644 index 000000000..0455eb4a4 --- /dev/null +++ b/tests/zfs-tests/cmd/file_trunc/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = file_trunc +file_trunc_SOURCES = file_trunc.c diff --git a/tests/zfs-tests/cmd/file_trunc/file_trunc.c b/tests/zfs-tests/cmd/file_trunc/file_trunc.c new file mode 100644 index 000000000..38e36c9d4 --- /dev/null +++ b/tests/zfs-tests/cmd/file_trunc/file_trunc.c @@ -0,0 +1,244 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2012 by Delphix. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FSIZE 256*1024*1024 +#define BSIZE 512 + +/* Initialize Globals */ +static long fsize = FSIZE; +static size_t bsize = BSIZE; +static int count = 0; +static int rflag = 0; +static int seed = 0; +static int vflag = 0; +static int errflag = 0; +static off_t offset = 0; +static char *filename = NULL; + +static void usage(char *execname); +static void parse_options(int argc, char *argv[]); +static void do_write(int fd); +static void do_trunc(int fd); + +static void +usage(char *execname) +{ + (void) fprintf(stderr, + "usage: %s [-b blocksize] [-c count] [-f filesize]" + " [-o offset] [-s seed] [-r] [-v] filename\n", execname); + (void) exit(1); +} + +int +main(int argc, char *argv[]) +{ + int i = 0; + int fd = -1; + + parse_options(argc, argv); + + fd = open(filename, O_RDWR|O_CREAT|O_TRUNC, 0666); + if (fd < 0) { + perror("open"); + exit(3); + } + + while (i < count) { + (void) do_write(fd); + (void) do_trunc(fd); + + i++; + } + + (void) close(fd); + return (0); +} + +static void +parse_options(int argc, char *argv[]) +{ + int c; + + extern char *optarg; + extern int optind, optopt; + + count = fsize / bsize; + seed = time(NULL); + while ((c = getopt(argc, argv, "b:c:f:o:rs:v")) != -1) { + switch (c) { + case 'b': + bsize = atoi(optarg); + break; + + case 'c': + count = atoi(optarg); + break; + + case 'f': + fsize = atoi(optarg); + break; + + case 'o': + offset = atoi(optarg); + break; + + case 'r': + rflag++; + break; + + case 's': + seed = atoi(optarg); + break; + + case 'v': + vflag++; + break; + + case ':': + (void) fprintf(stderr, + "Option -%c requires an operand\n", optopt); + errflag++; + break; + + case '?': + (void) fprintf(stderr, + "Unrecognized option: -%c\n", optopt); + errflag++; + break; + } + + if (errflag) { + (void) usage(argv[0]); + } + } + if (argc <= optind) { + (void) fprintf(stderr, + "No filename specified\n"); + usage(argv[0]); + } + filename = argv[optind]; + + if (vflag) { + (void) fprintf(stderr, "Seed = %d\n", seed); + } + srandom(seed); +} + +static void +do_write(int fd) +{ + off_t roffset = 0; + char *buf = NULL; + char *rbuf = NULL; + + buf = (char *)calloc(1, bsize); + rbuf = (char *)calloc(1, bsize); + if (buf == NULL || rbuf == NULL) { + perror("malloc"); + exit(4); + } + + roffset = random() % fsize; + if (lseek64(fd, (offset + roffset), SEEK_SET) < 0) { + perror("lseek"); + exit(5); + } + + strcpy(buf, "ZFS Test Suite Truncation Test"); + if (write(fd, buf, bsize) < bsize) { + perror("write"); + exit(6); + } + + if (rflag) { + if (lseek64(fd, (offset + roffset), SEEK_SET) < 0) { + perror("lseek"); + exit(7); + } + + if (read(fd, rbuf, bsize) < bsize) { + perror("read"); + exit(8); + } + + if (memcmp(buf, rbuf, bsize) != 0) { + perror("memcmp"); + exit(9); + } + } + if (vflag) { + (void) fprintf(stderr, + "Wrote to offset %" PRId64 "\n", (offset + roffset)); + if (rflag) { + (void) fprintf(stderr, + "Read back from offset %" PRId64 "\n", + (offset + roffset)); + } + } + + (void) free(buf); + (void) free(rbuf); +} + +static void +do_trunc(int fd) +{ + off_t roffset = 0; + + roffset = random() % fsize; + if (ftruncate64(fd, (offset + roffset)) < 0) { + perror("truncate"); + exit(7); + } + + if (vflag) { + (void) fprintf(stderr, "Truncated at offset %" PRId64 "\n", + (offset + roffset)); + } +} diff --git a/tests/zfs-tests/cmd/file_write/.gitignore b/tests/zfs-tests/cmd/file_write/.gitignore new file mode 100644 index 000000000..9f691d580 --- /dev/null +++ b/tests/zfs-tests/cmd/file_write/.gitignore @@ -0,0 +1 @@ +/file_write diff --git a/tests/zfs-tests/cmd/file_write/Makefile.am b/tests/zfs-tests/cmd/file_write/Makefile.am new file mode 100644 index 000000000..60895711e --- /dev/null +++ b/tests/zfs-tests/cmd/file_write/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = file_write +file_write_SOURCES = file_write.c diff --git a/tests/zfs-tests/cmd/file_write/file_write.c b/tests/zfs-tests/cmd/file_write/file_write.c new file mode 100644 index 000000000..43fd96ac4 --- /dev/null +++ b/tests/zfs-tests/cmd/file_write/file_write.c @@ -0,0 +1,229 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include "../file_common.h" +#include +#include +#include +#include +#include + +typedef unsigned char uchar_t; +typedef long long longlong_t; +typedef longlong_t offset_t; + +static unsigned char bigbuffer[BIGBUFFERSIZE]; + +/* + * Writes (or appends) a given value to a file repeatedly. + * See header file for defaults. + */ + +static void usage(char *); + +int +main(int argc, char **argv) +{ + int bigfd; + int c; + int oflag = 0; + int err = 0; + int k; + long i; + int64_t good_writes = 0; + uchar_t nxtfillchar; + char *prog = argv[0]; + /* + * Default Parameters + */ + int write_count = BIGFILESIZE; + uchar_t fillchar = DATA; + int block_size = BLOCKSZ; + char *filename = NULL; + char *operation = NULL; + offset_t noffset, offset = 0; + int verbose = 0; + int rsync = 0; + int wsync = 0; + + /* + * Process Arguments + */ + while ((c = getopt(argc, argv, "b:c:d:s:f:o:vwr")) != -1) { + switch (c) { + case 'b': + block_size = atoi(optarg); + break; + case 'c': + write_count = atoi(optarg); + break; + case 'd': + fillchar = atoi(optarg); + break; + case 's': + offset = atoll(optarg); + break; + case 'f': + filename = optarg; + break; + case 'o': + operation = optarg; + break; + case 'v': + verbose = 1; + break; + case 'w': + wsync = 1; + break; + case 'r': + rsync = 1; + break; + case '?': + (void) printf("unknown arg %c\n", optopt); + usage(prog); + break; + } + } + + /* + * Validate Parameters + */ + if (!filename) { + (void) printf("Filename not specified (-f )\n"); + err++; + } + + if (!operation) { + (void) printf("Operation not specified (-o ).\n"); + err++; + } + + if (block_size > BIGBUFFERSIZE) { + (void) printf("block_size is too large max==%d.\n", + BIGBUFFERSIZE); + err++; + } + + if (err) usage(prog); + + /* + * Prepare the buffer and determine the requested operation + */ + nxtfillchar = fillchar; + k = 0; + + for (i = 0; i < block_size; i++) { + bigbuffer[i] = nxtfillchar; + + if (fillchar == 0) { + if ((k % DATA_RANGE) == 0) { + k = 0; + } + nxtfillchar = k++; + } + } + + /* + * using the strncmp of operation will make the operation match the + * first shortest match - as the operations are unique from the first + * character this means that we match single character operations + */ + if ((strncmp(operation, "create", strlen(operation) + 1)) == 0 || + (strncmp(operation, "overwrite", strlen(operation) + 1)) == 0) { + oflag = (O_RDWR|O_CREAT); + } else if ((strncmp(operation, "append", strlen(operation) + 1)) == 0) { + oflag = (O_RDWR|O_APPEND); + } else { + (void) printf("valid operations are not '%s'\n", + operation); + usage(prog); + } + + if (rsync) { + oflag = oflag | O_RSYNC; + } + + if (wsync) { + oflag = oflag | O_SYNC; + } + + /* + * Given an operation (create/overwrite/append), open the file + * accordingly and perform a write of the appropriate type. + */ + if ((bigfd = open(filename, oflag, 0666)) == -1) { + (void) printf("open %s: failed [%s]%d. Aborting!\n", filename, + strerror(errno), errno); + exit(errno); + } + noffset = lseek64(bigfd, offset, SEEK_SET); + if (noffset != offset) { + (void) printf("llseek %s (%lld/%lld) failed [%s]%d.Aborting!\n", + filename, offset, noffset, strerror(errno), errno); + exit(errno); + } + + if (verbose) { + (void) printf("%s: block_size = %d, write_count = %d, " + "offset = %lld, data = %s%d\n", filename, block_size, + write_count, offset, + (fillchar == 0) ? "0->" : "", + (fillchar == 0) ? DATA_RANGE : fillchar); + } + + for (i = 0; i < write_count; i++) { + ssize_t n; + + if ((n = write(bigfd, &bigbuffer, block_size)) == -1) { + (void) printf("write failed (%ld), good_writes = %" + PRId64 ", " "error: %s[%d]\n", + (long)n, good_writes, + strerror(errno), + errno); + exit(errno); + } + good_writes++; + } + + if (verbose) { + (void) printf("Success: good_writes = %" PRId64 "(%" + PRId64 ")\n", good_writes, (good_writes * block_size)); + } + + return (0); +} + +static void +usage(char *prog) +{ + (void) printf("Usage: %s [-v] -o {create,overwrite,append} -f file_name" + " [-b block_size]\n" + "\t[-s offset] [-c write_count] [-d data]\n" + "\twhere [data] equal to zero causes chars " + "0->%d to be repeated throughout\n", prog, DATA_RANGE); + + exit(1); +} diff --git a/tests/zfs-tests/cmd/largest_file/.gitignore b/tests/zfs-tests/cmd/largest_file/.gitignore new file mode 100644 index 000000000..f8f480d06 --- /dev/null +++ b/tests/zfs-tests/cmd/largest_file/.gitignore @@ -0,0 +1 @@ +/largest_file diff --git a/tests/zfs-tests/cmd/largest_file/Makefile.am b/tests/zfs-tests/cmd/largest_file/Makefile.am new file mode 100644 index 000000000..a3e4e9337 --- /dev/null +++ b/tests/zfs-tests/cmd/largest_file/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = largest_file +largest_file_SOURCES = largest_file.c diff --git a/tests/zfs-tests/cmd/largest_file/largest_file.c b/tests/zfs-tests/cmd/largest_file/largest_file.c new file mode 100644 index 000000000..1cf7529f1 --- /dev/null +++ b/tests/zfs-tests/cmd/largest_file/largest_file.c @@ -0,0 +1,140 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2012 by Delphix. All rights reserved. + */ + +#include "../file_common.h" +#include +#include +#include +#include +#include +#include + +typedef long long offset_t; +#define MAXOFFSET_T LLONG_MAX + +/* + * -------------------------------------------------------------- + * + * Assertion: + * The last byte of the largest file size can be + * accessed without any errors. Also, the writing + * beyond the last byte of the largest file size + * will produce an errno of EFBIG. + * + * -------------------------------------------------------------- + * If the write() system call below returns a "1", + * then the last byte can be accessed. + * -------------------------------------------------------------- + */ +static void sigxfsz(int); +static void usage(char *); + +int +main(int argc, char **argv) +{ + int fd = 0; + offset_t offset = (MAXOFFSET_T - 1); + offset_t llseek_ret = 0; + int write_ret = 0; + int err = 0; + char mybuf[5]; + char *testfile; + mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + + if (argc != 2) { + usage(argv[0]); + } + + (void) sigset(SIGXFSZ, sigxfsz); + + testfile = strdup(argv[1]); + + fd = open(testfile, O_CREAT | O_RDWR, mode); + if (fd < 0) { + perror("Failed to create testfile"); + err = errno; + goto out; + } + + llseek_ret = lseek64(fd, offset, SEEK_SET); + if (llseek_ret < 0) { + perror("Failed to seek to end of testfile"); + err = errno; + goto out; + } + + write_ret = write(fd, mybuf, 1); + if (write_ret < 0) { + perror("Failed to write to end of file"); + err = errno; + goto out; + } + + offset = 0; + llseek_ret = lseek64(fd, offset, SEEK_CUR); + if (llseek_ret < 0) { + perror("Failed to seek to end of file"); + err = errno; + goto out; + } + + write_ret = write(fd, mybuf, 1); + if (write_ret < 0) { + if (errno == EFBIG) { + (void) printf("write errno=EFBIG: success\n"); + err = 0; + } else { + perror("Did not receive EFBIG"); + err = errno; + } + } else { + (void) printf("write completed successfully, test failed\n"); + err = 1; + } + +out: + (void) unlink(testfile); + free(testfile); + return (err); +} + +static void +usage(char *name) +{ + (void) printf("%s \n", name); + exit(1); +} + +/* ARGSUSED */ +static void +sigxfsz(int signo) +{ + (void) printf("\nlargest_file: sigxfsz() caught SIGXFSZ\n"); +} diff --git a/tests/zfs-tests/cmd/mkbusy/.gitignore b/tests/zfs-tests/cmd/mkbusy/.gitignore new file mode 100644 index 000000000..18d099c08 --- /dev/null +++ b/tests/zfs-tests/cmd/mkbusy/.gitignore @@ -0,0 +1 @@ +/mkbusy diff --git a/tests/zfs-tests/cmd/mkbusy/Makefile.am b/tests/zfs-tests/cmd/mkbusy/Makefile.am new file mode 100644 index 000000000..abae69dea --- /dev/null +++ b/tests/zfs-tests/cmd/mkbusy/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = mkbusy +mkbusy_SOURCES = mkbusy.c diff --git a/tests/zfs-tests/cmd/mkbusy/mkbusy.c b/tests/zfs-tests/cmd/mkbusy/mkbusy.c new file mode 100644 index 000000000..1e4567488 --- /dev/null +++ b/tests/zfs-tests/cmd/mkbusy/mkbusy.c @@ -0,0 +1,183 @@ +/* + * This file and its contents are supplied under the terms of the + * Common Development and Distribution License ("CDDL"), version 1.0. + * You may only use this file in accordance with the terms of version + * 1.0 of the CDDL. + * + * A full copy of the text of the CDDL should have accompanied this + * source. A copy of the CDDL is also available via the Internet at + * http://www.illumos.org/license/CDDL. + */ + +/* + * Copyright (c) 2012 by Delphix. All rights reserved. + */ + +/* + * Make a directory busy. If the argument is an existing file or directory, + * simply open it directly and pause. If not, verify that the parent directory + * exists, and create a new file in that directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef enum boolean { B_FALSE, B_TRUE } boolean_t; + +static void +usage(char *progname) +{ + (void) fprintf(stderr, "Usage: %s \n", progname); + exit(1); +} + +static void +fail(char *err, int rval) +{ + perror(err); + exit(rval); +} + +static void +daemonize(void) +{ + pid_t pid; + + if ((pid = fork()) < 0) { + fail("fork", 1); + } else if (pid != 0) { + (void) fprintf(stdout, "%ld\n", (long)pid); + exit(0); + } + + (void) setsid(); + (void) close(0); + (void) close(1); + (void) close(2); +} + +int +main(int argc, char *argv[]) +{ + int ret, c; + boolean_t isdir = B_FALSE; + boolean_t fflag = B_FALSE; + boolean_t rflag = B_FALSE; + struct stat sbuf; + char *fpath = NULL; + char *prog = argv[0]; + + while ((c = getopt(argc, argv, "fr")) != -1) { + switch (c) { + /* Open the file or directory read only */ + case 'r': + rflag = B_TRUE; + break; + /* Run in the foreground */ + case 'f': + fflag = B_TRUE; + break; + default: + usage(prog); + } + } + + argc -= optind; + argv += optind; + + if (argc != 1) + usage(prog); + + if ((ret = stat(argv[0], &sbuf)) != 0) { + char *arg, *dname, *fname; + int arglen, dlen, flen; + char *slash; + + /* + * The argument supplied doesn't exist. Copy the path, and + * remove the trailing slash if presnt. + */ + if ((arg = strdup(argv[0])) == NULL) + fail("strdup", 1); + arglen = strlen(arg); + if (arg[arglen - 1] == '/') + arg[arglen - 1] = '\0'; + + /* + * Get the directory and file names, using the current directory + * if the provided path doesn't specify a directory at all. + */ + if ((slash = strrchr(arg, '/')) == NULL) { + dname = strdup("."); + fname = strdup(arg); + } else { + *slash = '\0'; + dname = strdup(arg); + fname = strdup(slash + 1); + } + free(arg); + if (dname == NULL || fname == NULL) + fail("strdup", 1); + dlen = strlen(dname); + flen = strlen(fname); + + /* The directory portion of the path must exist */ + if ((ret = stat(dname, &sbuf)) != 0 || !(sbuf.st_mode & + S_IFDIR)) + usage(prog); + + if ((fpath = (char *)malloc(dlen + 1 + flen + 1)) == NULL) + fail("malloc", 1); + (void) memset(fpath, '\0', dlen + 1 + flen + 1); + + (void) strncpy(fpath, dname, dlen); + fpath[dlen] = '/'; + (void) strncat(fpath, fname, flen); + free(dname); + free(fname); + } else if ((sbuf.st_mode & S_IFMT) == S_IFREG || + (sbuf.st_mode & S_IFMT) == S_IFLNK || + (sbuf.st_mode & S_IFMT) == S_IFCHR || + (sbuf.st_mode & S_IFMT) == S_IFBLK) { + fpath = strdup(argv[0]); + } else if ((sbuf.st_mode & S_IFMT) == S_IFDIR) { + fpath = strdup(argv[0]); + isdir = B_TRUE; + } else { + usage(prog); + } + + if (fpath == NULL) + fail("strdup", 1); + + if (isdir == B_FALSE) { + int fd, flags; + mode_t mode = S_IRUSR | S_IWUSR; + + flags = rflag == B_FALSE ? O_CREAT | O_RDWR : O_RDONLY; + + if ((fd = open(fpath, flags, mode)) < 0) + fail("open", 1); + } else { + DIR *dp; + + if ((dp = opendir(fpath)) == NULL) + fail("opendir", 1); + } + free(fpath); + + if (fflag == B_FALSE) + daemonize(); + (void) pause(); + + /* NOTREACHED */ + return (0); +} diff --git a/tests/zfs-tests/cmd/mkfile/.gitignore b/tests/zfs-tests/cmd/mkfile/.gitignore new file mode 100644 index 000000000..93e9a8a6d --- /dev/null +++ b/tests/zfs-tests/cmd/mkfile/.gitignore @@ -0,0 +1 @@ +/mkfile diff --git a/tests/zfs-tests/cmd/mkfile/Makefile.am b/tests/zfs-tests/cmd/mkfile/Makefile.am new file mode 100644 index 000000000..016c67128 --- /dev/null +++ b/tests/zfs-tests/cmd/mkfile/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = mkfile +mkfile_SOURCES = mkfile.c diff --git a/tests/zfs-tests/cmd/mkfile/mkfile.c b/tests/zfs-tests/cmd/mkfile/mkfile.c new file mode 100644 index 000000000..4e0be9383 --- /dev/null +++ b/tests/zfs-tests/cmd/mkfile/mkfile.c @@ -0,0 +1,275 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2005 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +#define BLOCK_SIZE 512 /* bytes */ +#define KILOBYTE 1024 +#define MEGABYTE (KILOBYTE * KILOBYTE) +#define GIGABYTE (KILOBYTE * MEGABYTE) + +#define FILE_MODE (S_ISVTX + S_IRUSR + S_IWUSR) + +typedef long long offset_t; + +static void usage(void); + +int +main(int argc, char **argv) +{ + char *opts; + off_t size; + size_t len; + size_t mult = 1; + char *buf = NULL; + size_t bufsz = 0; + int errors = 0; + int i; + int verbose = 0; /* option variable */ + int nobytes = 0; /* option variable */ + int saverr; + + if (argc == 1) + usage(); + + while (argv[1] && argv[1][0] == '-') { + opts = &argv[1][0]; + while (*(++opts)) { + switch (*opts) { + case 'v': + verbose++; + break; + case 'n': + nobytes++; + break; + default: + usage(); + } + } + argc--; + argv++; + } + if (argc < 3) + usage(); + + len = strlen(argv[1]); + if (len && isalpha(argv[1][len-1])) { + switch (argv[1][len-1]) { + case 'k': + case 'K': + mult = KILOBYTE; + break; + case 'b': + case 'B': + mult = BLOCK_SIZE; + break; + case 'm': + case 'M': + mult = MEGABYTE; + break; + case 'g': + case 'G': + mult = GIGABYTE; + break; + default: + (void) fprintf(stderr, + gettext("unknown size %s\n"), argv[1]); + usage(); + } + + for (i = 0; i <= (len-2); i++) { + if (!isdigit(argv[1][i])) { + (void) fprintf(stderr, + gettext("unknown size %s\n"), argv[1]); + usage(); + } + } + argv[1][len-1] = '\0'; + } + size = ((off_t)atoll(argv[1]) * (off_t)mult); + + argv++; + argc--; + + while (argc > 1) { + int fd; + + if (verbose) + (void) fprintf(stdout, gettext("%s %lld bytes\n"), + argv[1], (offset_t)size); + fd = open(argv[1], O_CREAT|O_TRUNC|O_RDWR, FILE_MODE); + if (fd < 0) { + saverr = errno; + (void) fprintf(stderr, + gettext("Could not open %s: %s\n"), + argv[1], strerror(saverr)); + errors++; + argv++; + argc--; + continue; + } + if (lseek(fd, (off_t)size-1, SEEK_SET) < 0) { + saverr = errno; + (void) fprintf(stderr, gettext( + "Could not seek to offset %ld in %s: %s\n"), + (unsigned long)size-1, argv[1], strerror(saverr)); + (void) close(fd); + errors++; + argv++; + argc--; + continue; + } else if (write(fd, "", 1) != 1) { + saverr = errno; + (void) fprintf(stderr, gettext( + "Could not set length of %s: %s\n"), + argv[1], strerror(saverr)); + (void) close(fd); + errors++; + argv++; + argc--; + continue; + } + + if (!nobytes) { + off_t written = 0; + struct stat64 st; + + if (lseek(fd, (off_t)0, SEEK_SET) < 0) { + saverr = errno; + (void) fprintf(stderr, gettext( + "Could not seek to beginning of %s: %s\n"), + argv[1], strerror(saverr)); + (void) close(fd); + errors++; + argv++; + argc--; + continue; + } + if (fstat64(fd, &st) < 0) { + saverr = errno; + (void) fprintf(stderr, gettext( + "Could not fstat64 %s: %s\n"), + argv[1], strerror(saverr)); + (void) close(fd); + errors++; + argv++; + argc--; + continue; + } + if (bufsz != st.st_blksize) { + if (buf) + free(buf); + bufsz = (size_t)st.st_blksize; + buf = calloc(bufsz, 1); + if (buf == NULL) { + (void) fprintf(stderr, gettext( + "Could not allocate buffer of" + " size %d\n"), (int)bufsz); + (void) close(fd); + bufsz = 0; + errors++; + argv++; + argc--; + continue; + } + } + while (written < size) { + ssize_t result; + size_t bytes = (size_t)MIN(bufsz, size-written); + + if ((result = write(fd, buf, bytes)) != + (ssize_t)bytes) { + saverr = errno; + if (result < 0) + result = 0; + written += result; + (void) fprintf(stderr, gettext( + "%s: initialized %lu of %lu bytes: %s\n"), + argv[1], (unsigned long)written, + (unsigned long)size, + strerror(saverr)); + errors++; + break; + } + written += bytes; + } + + /* + * A write(2) call in the above loop failed so + * close out this file and go on (error was + * already incremented when the write(2) failed). + */ + if (written < size) { + (void) close(fd); + argv++; + argc--; + continue; + } + } + if (close(fd) < 0) { + saverr = errno; + (void) fprintf(stderr, gettext( + "Error encountered when closing %s: %s\n"), + argv[1], strerror(saverr)); + errors++; + argv++; + argc--; + continue; + } + + /* + * Only set the modes (including the sticky bit) if we + * had no problems. It is not an error for the chmod(2) + * to fail, but do issue a warning. + */ + if (chmod(argv[1], FILE_MODE) < 0) + (void) fprintf(stderr, gettext( + "warning: couldn't set mode to %#o\n"), FILE_MODE); + + argv++; + argc--; + } + return (errors); +} + +static void usage() +{ + (void) fprintf(stderr, gettext( + "Usage: mkfile [-nv] [g|k|b|m] [] ...\n")); + exit(1); + /* NOTREACHED */ +} diff --git a/tests/zfs-tests/cmd/mkfiles/.gitignore b/tests/zfs-tests/cmd/mkfiles/.gitignore new file mode 100644 index 000000000..cee4858b7 --- /dev/null +++ b/tests/zfs-tests/cmd/mkfiles/.gitignore @@ -0,0 +1 @@ +/mkfiles diff --git a/tests/zfs-tests/cmd/mkfiles/Makefile.am b/tests/zfs-tests/cmd/mkfiles/Makefile.am new file mode 100644 index 000000000..54c21597f --- /dev/null +++ b/tests/zfs-tests/cmd/mkfiles/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = mkfiles +mkfiles_SOURCES = mkfiles.c diff --git a/tests/zfs-tests/cmd/mkfiles/mkfiles.c b/tests/zfs-tests/cmd/mkfiles/mkfiles.c new file mode 100644 index 000000000..418fb9d07 --- /dev/null +++ b/tests/zfs-tests/cmd/mkfiles/mkfiles.c @@ -0,0 +1,65 @@ +/* + * This file and its contents are supplied under the terms of the + * Common Development and Distribution License ("CDDL"), version 1.0. + * You may only use this file in accordance with the terms of version + * 1.0 of the CDDL. + * + * A full copy of the text of the CDDL should have accompanied this + * source. A copy of the CDDL is also available via the Internet at + * http://www.illumos.org/license/CDDL. + */ + +/* + * Copyright (c) 2016 by Delphix. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MAX_INT_LENGTH 10 + +static void +usage(char *msg, int exit_value) +{ + (void) fprintf(stderr, "mkfiles basename max_file [min_file]\n"); + (void) fprintf(stderr, "%s\n", msg); + exit(exit_value); +} + +int +main(int argc, char **argv) +{ + unsigned int numfiles = 0; + unsigned int first_file = 0; + unsigned int i; + char buf[MAXPATHLEN]; + + if (argc < 3 || argc > 4) + usage("Invalid number of arguments", -1); + + if (sscanf(argv[2], "%u", &numfiles) != 1) + usage("Invalid maximum file", -2); + + if (argc == 4 && sscanf(argv[3], "%u", &first_file) != 1) + usage("Invalid first file", -3); + + if (numfiles < first_file) + usage("First file larger than last file", -3); + + for (i = first_file; i <= numfiles; i++) { + int fd; + (void) snprintf(buf, MAXPATHLEN, "%s%u", argv[1], i); + if ((fd = open(buf, O_CREAT | O_EXCL, O_RDWR)) == -1) { + (void) fprintf(stderr, "Failed to create %s %s\n", buf, + strerror(errno)); + return (-4); + } + (void) close(fd); + } + return (0); +} diff --git a/tests/zfs-tests/cmd/mktree/.gitignore b/tests/zfs-tests/cmd/mktree/.gitignore new file mode 100644 index 000000000..588bc6d1c --- /dev/null +++ b/tests/zfs-tests/cmd/mktree/.gitignore @@ -0,0 +1 @@ +/mktree diff --git a/tests/zfs-tests/cmd/mktree/Makefile.am b/tests/zfs-tests/cmd/mktree/Makefile.am new file mode 100644 index 000000000..88c74ae0a --- /dev/null +++ b/tests/zfs-tests/cmd/mktree/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = mktree +mktree_SOURCES = mktree.c diff --git a/tests/zfs-tests/cmd/mktree/mktree.c b/tests/zfs-tests/cmd/mktree/mktree.c new file mode 100644 index 000000000..8f9b38578 --- /dev/null +++ b/tests/zfs-tests/cmd/mktree/mktree.c @@ -0,0 +1,183 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TYPE_D 'D' +#define TYPE_F 'F' + +static char fdname[MAXPATHLEN] = {0}; +static char *pbasedir = NULL; +static int nlevel = 2; +static int ndir = 2; +static int nfile = 2; + +static void usage(char *this); +static void crtfile(char *pname); +static char *getfdname(char *pdir, char type, int level, int dir, int file); +static int mktree(char *pbasedir, int level); + +int +main(int argc, char *argv[]) +{ + int c, ret; + + while ((c = getopt(argc, argv, "b:l:d:f:")) != -1) { + switch (c) { + case 'b': + pbasedir = optarg; + break; + case 'l': + nlevel = atoi(optarg); + break; + case 'd': + ndir = atoi(optarg); + break; + case 'f': + nfile = atoi(optarg); + break; + case '?': + usage(argv[0]); + } + } + if (nlevel < 0 || ndir < 0 || nfile < 0 || pbasedir == NULL) { + usage(argv[0]); + } + + ret = mktree(pbasedir, 1); + + return (ret); +} + +static void +usage(char *this) +{ + (void) fprintf(stderr, + "\tUsage: %s -b -l [nlevel] -d [ndir] -f [nfile]\n", + this); + exit(1); +} + +static int +mktree(char *pdir, int level) +{ + int d, f; + char dname[MAXPATHLEN] = {0}; + char fname[MAXPATHLEN] = {0}; + + if (level > nlevel) { + return (1); + } + + for (d = 0; d < ndir; d++) { + (void) memset(dname, '\0', sizeof (dname)); + (void) strcpy(dname, getfdname(pdir, TYPE_D, level, d, 0)); + + if (mkdir(dname, 0777) != 0) { + (void) fprintf(stderr, "mkdir(%s) failed." + "\n[%d]: %s.\n", + dname, errno, strerror(errno)); + exit(errno); + } + + /* + * No sub-directory need be created, only create files in it. + */ + if (mktree(dname, level+1) != 0) { + for (f = 0; f < nfile; f++) { + (void) memset(fname, '\0', sizeof (fname)); + (void) strcpy(fname, + getfdname(dname, TYPE_F, level+1, d, f)); + crtfile(fname); + } + } + } + + for (f = 0; f < nfile; f++) { + (void) memset(fname, '\0', sizeof (fname)); + (void) strcpy(fname, getfdname(pdir, TYPE_F, level, d, f)); + crtfile(fname); + } + + return (0); +} + +static char * +getfdname(char *pdir, char type, int level, int dir, int file) +{ + (void) snprintf(fdname, sizeof (fdname), + "%s/%c-l%dd%df%d", pdir, type, level, dir, file); + return (fdname); +} + +static void +crtfile(char *pname) +{ + int fd = -1; + int i, size; + char *context = "0123456789ABCDF"; + char *pbuf; + + if (pname == NULL) { + exit(1); + } + + size = sizeof (char) * 1024; + pbuf = (char *)valloc(size); + for (i = 0; i < size / strlen(context); i++) { + int offset = i * strlen(context); + (void) snprintf(pbuf+offset, size-offset, "%s", context); + } + + if ((fd = open(pname, O_CREAT|O_RDWR, 0777)) < 0) { + (void) fprintf(stderr, "open(%s, O_CREAT|O_RDWR, 0777) failed." + "\n[%d]: %s.\n", pname, errno, strerror(errno)); + exit(errno); + } + if (write(fd, pbuf, 1024) < 1024) { + (void) fprintf(stderr, "write(fd, pbuf, 1024) failed." + "\n[%d]: %s.\n", errno, strerror(errno)); + exit(errno); + } + + if (fsetxattr(fd, "xattr", pbuf, 1024, 0) < 0) { + (void) fprintf(stderr, "fsetxattr(fd, \"xattr\", pbuf, " + "1024, 0) failed.\n[%d]: %s.\n", errno, strerror(errno)); + exit(errno); + } + + (void) close(fd); + free(pbuf); +} diff --git a/tests/zfs-tests/cmd/mmap_exec/.gitignore b/tests/zfs-tests/cmd/mmap_exec/.gitignore new file mode 100644 index 000000000..63a68bbc6 --- /dev/null +++ b/tests/zfs-tests/cmd/mmap_exec/.gitignore @@ -0,0 +1 @@ +/mmap_exec diff --git a/tests/zfs-tests/cmd/mmap_exec/Makefile.am b/tests/zfs-tests/cmd/mmap_exec/Makefile.am new file mode 100644 index 000000000..ab9f81be9 --- /dev/null +++ b/tests/zfs-tests/cmd/mmap_exec/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = mmap_exec +mmap_exec_SOURCES = mmap_exec.c diff --git a/tests/zfs-tests/cmd/mmap_exec/mmap_exec.c b/tests/zfs-tests/cmd/mmap_exec/mmap_exec.c new file mode 100644 index 000000000..6a48a9c04 --- /dev/null +++ b/tests/zfs-tests/cmd/mmap_exec/mmap_exec.c @@ -0,0 +1,69 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2013 by Delphix. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +int +main(int argc, char *argv[]) +{ + int fd; + struct stat statbuf; + + if (argc != 2) { + (void) printf("Error: missing binary name.\n"); + (void) printf("Usage:\n\t%s \n", + argv[0]); + return (1); + } + + errno = 0; + + if ((fd = open(argv[1], O_RDONLY)) < 0) { + perror("open"); + return (errno); + } + if (fstat(fd, &statbuf) < 0) { + perror("fstat"); + return (errno); + } + + if (mmap(0, statbuf.st_size, + PROT_EXEC, MAP_SHARED, fd, 0) == MAP_FAILED) { + perror("mmap"); + return (errno); + } + + return (0); +} diff --git a/tests/zfs-tests/cmd/mmapwrite/.gitignore b/tests/zfs-tests/cmd/mmapwrite/.gitignore new file mode 100644 index 000000000..4e7043bbf --- /dev/null +++ b/tests/zfs-tests/cmd/mmapwrite/.gitignore @@ -0,0 +1 @@ +/mmapwrite diff --git a/tests/zfs-tests/cmd/mmapwrite/Makefile.am b/tests/zfs-tests/cmd/mmapwrite/Makefile.am new file mode 100644 index 000000000..b21b9e779 --- /dev/null +++ b/tests/zfs-tests/cmd/mmapwrite/Makefile.am @@ -0,0 +1,7 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = mmapwrite +mmapwrite_SOURCES = mmapwrite.c +mmapwrite_LDADD = -lpthread diff --git a/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c b/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c new file mode 100644 index 000000000..2eca12469 --- /dev/null +++ b/tests/zfs-tests/cmd/mmapwrite/mmapwrite.c @@ -0,0 +1,97 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include + +/* + * -------------------------------------------------------------------- + * Bug Id: 5032643 + * + * Simply writing to a file and mmaping that file at the same time can + * result in deadlock. Nothing perverse like writing from the file's + * own mapping is required. + * -------------------------------------------------------------------- + */ + +static void * +mapper(void *fdp) +{ + void *addr; + int fd = *(int *)fdp; + + if ((addr = + mmap(0, 8192, PROT_READ, MAP_SHARED, fd, 0)) == MAP_FAILED) { + perror("mmap"); + exit(1); + } + for (;;) { + if (mmap(addr, 8192, PROT_READ, + MAP_SHARED|MAP_FIXED, fd, 0) == MAP_FAILED) { + perror("mmap"); + exit(1); + } + } + /* NOTREACHED */ + return ((void *)1); +} + +int +main(int argc, char **argv) +{ + int fd; + char buf[BUFSIZ]; + pthread_t tid; + + if (argc != 2) { + (void) printf("usage: %s \n", argv[0]); + exit(1); + } + + if ((fd = open(argv[1], O_RDWR|O_CREAT|O_TRUNC, 0666)) == -1) { + perror("open"); + exit(1); + } + + (void) pthread_setconcurrency(2); + if (pthread_create(&tid, NULL, mapper, &fd) != 0) { + perror("pthread_create"); + exit(1); + } + for (;;) { + if (write(fd, buf, sizeof (buf)) == -1) { + perror("write"); + exit(1); + } + } + + /* NOTREACHED */ + return (0); +} diff --git a/tests/zfs-tests/cmd/randfree_file/.gitignore b/tests/zfs-tests/cmd/randfree_file/.gitignore new file mode 100644 index 000000000..0f5b394c5 --- /dev/null +++ b/tests/zfs-tests/cmd/randfree_file/.gitignore @@ -0,0 +1 @@ +/randfree_file diff --git a/tests/zfs-tests/cmd/randfree_file/Makefile.am b/tests/zfs-tests/cmd/randfree_file/Makefile.am new file mode 100644 index 000000000..6306e0e75 --- /dev/null +++ b/tests/zfs-tests/cmd/randfree_file/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = randfree_file +randfree_file_SOURCES = randfree_file.c diff --git a/tests/zfs-tests/cmd/randfree_file/randfree_file.c b/tests/zfs-tests/cmd/randfree_file/randfree_file.c new file mode 100644 index 000000000..8e7487c41 --- /dev/null +++ b/tests/zfs-tests/cmd/randfree_file/randfree_file.c @@ -0,0 +1,105 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2012 by Delphix. All rights reserved. + */ + +#include "../file_common.h" +#include +#include +#include +#include + +/* + * Create a file with assigned size and then free the specified + * section of the file + */ + +static void usage(char *progname); + +static void +usage(char *progname) +{ + (void) fprintf(stderr, + "usage: %s [-l filesize] [-s start-offset]" + "[-n section-len] filename\n", progname); + exit(1); +} + +int +main(int argc, char *argv[]) +{ + char *filename = NULL; + char *buf; + size_t filesize = 0; + off_t start_off = 0; + off_t off_len = 0; + int fd, ch; + mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + + while ((ch = getopt(argc, argv, "l:s:n:")) != EOF) { + switch (ch) { + case 'l': + filesize = atoll(optarg); + break; + case 's': + start_off = atoll(optarg); + break; + case 'n': + off_len = atoll(optarg); + break; + default: + usage(argv[0]); + break; + } + } + + if (optind == argc - 1) + filename = argv[optind]; + else + usage(argv[0]); + + buf = (char *)malloc(filesize); + + if ((fd = open(filename, O_RDWR | O_CREAT | O_TRUNC, mode)) < 0) { + perror("open"); + return (1); + } + if (write(fd, buf, filesize) < filesize) { + perror("write"); + return (1); + } + + if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start_off, off_len) < 0) { + perror("fallocate"); + return (1); + } + + free(buf); + return (0); +} diff --git a/tests/zfs-tests/cmd/readmmap/.gitignore b/tests/zfs-tests/cmd/readmmap/.gitignore new file mode 100644 index 000000000..3799193a9 --- /dev/null +++ b/tests/zfs-tests/cmd/readmmap/.gitignore @@ -0,0 +1 @@ +/readmmap diff --git a/tests/zfs-tests/cmd/readmmap/Makefile.am b/tests/zfs-tests/cmd/readmmap/Makefile.am new file mode 100644 index 000000000..9b735c287 --- /dev/null +++ b/tests/zfs-tests/cmd/readmmap/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = readmmap +readmmap_SOURCES = readmmap.c diff --git a/tests/zfs-tests/cmd/readmmap/readmmap.c b/tests/zfs-tests/cmd/readmmap/readmmap.c new file mode 100644 index 000000000..e21c2c867 --- /dev/null +++ b/tests/zfs-tests/cmd/readmmap/readmmap.c @@ -0,0 +1,138 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * -------------------------------------------------------------- + * BugId 5047993 : Getting bad read data. + * + * Usage: readmmap + * + * where: + * filename is an absolute path to the file name. + * + * Return values: + * 1 : error + * 0 : no errors + * -------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include + +int +main(int argc, char **argv) +{ + char *filename = "badfile"; + size_t size = 4395; + size_t idx = 0; + char *buf = NULL; + char *map = NULL; + int fd = -1, bytes, retval = 0; + unsigned seed; + + if (argc < 2 || optind == argc) { + (void) fprintf(stderr, + "usage: %s \n", argv[0]); + exit(1); + } + + if ((buf = calloc(1, size)) == NULL) { + perror("calloc"); + exit(1); + } + + filename = argv[optind]; + + (void) remove(filename); + + fd = open(filename, O_RDWR|O_CREAT|O_TRUNC, 0666); + if (fd == -1) { + perror("open to create"); + retval = 1; + goto end; + } + + bytes = write(fd, buf, size); + if (bytes != size) { + (void) printf("short write: %d != %zd\n", bytes, size); + retval = 1; + goto end; + } + + map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + if (map == MAP_FAILED) { + perror("mmap"); + retval = 1; + goto end; + } + seed = time(NULL); + srandom(seed); + + idx = random() % size; + map[idx] = 1; + + if (msync(map, size, MS_SYNC) != 0) { + perror("msync"); + retval = 1; + goto end; + } + + if (munmap(map, size) != 0) { + perror("munmap"); + retval = 1; + goto end; + } + + bytes = pread(fd, buf, size, 0); + if (bytes != size) { + (void) printf("short read: %d != %zd\n", bytes, size); + retval = 1; + goto end; + } + + if (buf[idx] != 1) { + (void) printf( + "bad data from read! got buf[%zd]=%d, expected 1\n", + idx, buf[idx]); + retval = 1; + goto end; + } + + (void) printf("good data from read: buf[%zd]=1\n", idx); +end: + if (fd != -1) { + (void) close(fd); + } + if (buf != NULL) { + free(buf); + } + + return (retval); +} diff --git a/tests/zfs-tests/cmd/rename_dir/.gitignore b/tests/zfs-tests/cmd/rename_dir/.gitignore new file mode 100644 index 000000000..39a0cb222 --- /dev/null +++ b/tests/zfs-tests/cmd/rename_dir/.gitignore @@ -0,0 +1 @@ +/rename_dir diff --git a/tests/zfs-tests/cmd/rename_dir/Makefile.am b/tests/zfs-tests/cmd/rename_dir/Makefile.am new file mode 100644 index 000000000..21971cd88 --- /dev/null +++ b/tests/zfs-tests/cmd/rename_dir/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = rename_dir +rename_dir_SOURCES = rename_dir.c diff --git a/tests/zfs-tests/cmd/rename_dir/rename_dir.c b/tests/zfs-tests/cmd/rename_dir/rename_dir.c new file mode 100644 index 000000000..5f80f7229 --- /dev/null +++ b/tests/zfs-tests/cmd/rename_dir/rename_dir.c @@ -0,0 +1,88 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2012 by Delphix. All rights reserved. + */ + +/* + * Assertion: + * Create two directory trees in zfs filesystem, and rename + * directory across the directory structure. ZFS can handle + * the race situation. + */ + +/* + * Need to create the following directory structures before + * running this program: + * + * mkdir -p 1/2/3/4/5 a/b/c/d/e + */ + + +#include +#include +#include +#include + +int +main(int argc, char *argvp[]) +{ + int i = 1; + + switch (fork()) { + case -1: + perror("fork"); + exit(1); + break; + case 0: + while (i > 0) { + int c_count = 0; + if (rename("a/b/c", "1/2/3/c") == 0) + c_count++; + if (rename("1/2/3/c", "a/b/c") == 0) + c_count++; + if (c_count) { + (void) fprintf(stderr, "c_count: %d", c_count); + } + } + break; + default: + while (i > 0) { + int p_count = 0; + if (rename("1", "a/b/c/d/e/1") == 0) + p_count++; + if (rename("a/b/c/d/e/1", "1") == 0) + p_count++; + if (p_count) { + (void) fprintf(stderr, "p_count: %d", p_count); + } + } + break; + } + + return (0); +} diff --git a/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/.gitignore b/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/.gitignore new file mode 100644 index 000000000..fc6323fb3 --- /dev/null +++ b/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/.gitignore @@ -0,0 +1 @@ +/rm_lnkcnt_zero_file diff --git a/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/Makefile.am b/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/Makefile.am new file mode 100644 index 000000000..90fc8d054 --- /dev/null +++ b/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/Makefile.am @@ -0,0 +1,7 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = rm_lnkcnt_zero_file +rm_lnkcnt_zero_file_SOURCES = rm_lnkcnt_zero_file.c +rm_lnkcnt_zero_file_LDADD = -lpthread diff --git a/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c b/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c new file mode 100644 index 000000000..ee85a207d --- /dev/null +++ b/tests/zfs-tests/cmd/rm_lnkcnt_zero_file/rm_lnkcnt_zero_file.c @@ -0,0 +1,155 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2012 by Delphix. All rights reserved. + */ + +/* + * -------------------------------------------------------------------- + * The purpose of this test is to see if the bug reported (#4723351) for + * UFS exists when using a ZFS file system. + * -------------------------------------------------------------------- + * + */ +#define _REENTRANT 1 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const int TRUE = 1; +static char *filebase; + +static int +pickidx(void) +{ + return (random() % 1000); +} + +/* ARGSUSED */ +static void * +mover(void *a) +{ + char buf[256]; + int idx, len, ret; + + len = strlen(filebase) + 5; + + while (TRUE) { + idx = pickidx(); + (void) snprintf(buf, len, "%s.%03d", filebase, idx); + ret = rename(filebase, buf); + if (ret < 0 && errno != ENOENT) + (void) perror("renaming file"); + } + + return (NULL); +} + +/* ARGSUSED */ +static void * +cleaner(void *a) +{ + char buf[256]; + int idx, len, ret; + + len = strlen(filebase) + 5; + + while (TRUE) { + idx = pickidx(); + (void) snprintf(buf, len, "%s.%03d", filebase, idx); + ret = remove(buf); + if (ret < 0 && errno != ENOENT) + (void) perror("removing file"); + } + + return (NULL); +} + +static void * +writer(void *a) +{ + int *fd = (int *)a; + int ret; + + while (TRUE) { + (void) close (*fd); + *fd = open(filebase, O_APPEND | O_RDWR | O_CREAT, 0644); + if (*fd < 0) + perror("refreshing file"); + ret = write(*fd, "test\n", 5); + if (ret != 5) + perror("writing file"); + } + + return (NULL); +} + +int +main(int argc, char **argv) +{ + int fd; + pthread_t tid; + + if (argc == 1) { + (void) printf("Usage: %s \n", argv[0]); + exit(-1); + } + + filebase = argv[1]; + fd = open(filebase, O_APPEND | O_RDWR | O_CREAT, 0644); + if (fd < 0) { + perror("creating test file"); + exit(-1); + } + + (void) pthread_setconcurrency(4); /* 3 threads + main */ + (void) pthread_create(&tid, NULL, mover, NULL); + (void) pthread_create(&tid, NULL, cleaner, NULL); + (void) pthread_create(&tid, NULL, writer, (void *) &fd); + + while (TRUE) { + int ret; + struct stat st; + + ret = stat(filebase, &st); + if (ret == 0 && (st.st_nlink > 2 || st.st_nlink < 1)) { + (void) printf("st.st_nlink = %d, exiting\n", \ + (int)st.st_nlink); + exit(0); + } + (void) sleep(1); + } + + return (0); +} diff --git a/tests/zfs-tests/cmd/threadsappend/.gitignore b/tests/zfs-tests/cmd/threadsappend/.gitignore new file mode 100644 index 000000000..4c8c8cdf3 --- /dev/null +++ b/tests/zfs-tests/cmd/threadsappend/.gitignore @@ -0,0 +1 @@ +/threadsappend diff --git a/tests/zfs-tests/cmd/threadsappend/Makefile.am b/tests/zfs-tests/cmd/threadsappend/Makefile.am new file mode 100644 index 000000000..f030b42d5 --- /dev/null +++ b/tests/zfs-tests/cmd/threadsappend/Makefile.am @@ -0,0 +1,7 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = threadsappend +threadsappend_SOURCES = threadsappend.c +threadsappend_LDADD = -lpthread diff --git a/tests/zfs-tests/cmd/threadsappend/threadsappend.c b/tests/zfs-tests/cmd/threadsappend/threadsappend.c new file mode 100644 index 000000000..25710a3c1 --- /dev/null +++ b/tests/zfs-tests/cmd/threadsappend/threadsappend.c @@ -0,0 +1,135 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2013 by Delphix. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The size of the output file, "go.out", should be 80*8192*2 = 1310720 + * + * $ cd /tmp; go; ls -l go.out + * done. + * -rwxr-xr-x 1 jdm staff 1310720 Apr 13 19:45 go.out + * $ cd /zfs; go; ls -l go.out + * done. + * -rwxr-xr-x 1 jdm staff 663552 Apr 13 19:45 go.out + * + * The file on zfs is short as it does not appear that zfs is making the + * implicit seek to EOF and the actual write atomic. From the SUSv3 + * interface spec, behavior is undefined if concurrent writes are performed + * from multi-processes to a single file. So I don't know if this is a + * standards violation, but I cannot find any such disclaimers in our + * man pages. This issue came up at a customer site in another context, and + * the suggestion was to open the file with O_APPEND, but that wouldn't + * help with zfs(see 4977529). Also see bug# 5031301. + */ + +static int outfd = 0; + +static void * +go(void *data) +{ + int ret, i = 0, n = *(int *)data; + char buf[8192] = {0}; + (void) memset(buf, n, sizeof (buf)); + + for (i = 0; i < 80; i++) { + ret = write(outfd, buf, sizeof (buf)); + if (ret != sizeof (buf)) + perror("write"); + } + return (NULL); +} + +static void +usage(void) +{ + (void) fprintf(stderr, + "usage: zfs_threadsappend \n"); + exit(1); +} + +int +main(int argc, char **argv) +{ + pthread_t tid; + int ret = 0; + long ncpus = 0; + int i; + + if (argc != 2) { + usage(); + } + + ncpus = sysconf(_SC_NPROCESSORS_ONLN); + if (ncpus < 0) { + (void) fprintf(stderr, + "Invalid return from sysconf(_SC_NPROCESSORS_ONLN)" + " : errno (decimal)=%d\n", errno); + exit(1); + } + if (ncpus < 2) { + (void) fprintf(stderr, + "Must execute this binary on a multi-processor system\n"); + exit(1); + } + + outfd = open(argv[optind++], O_RDWR|O_CREAT|O_APPEND|O_TRUNC, 0777); + if (outfd == -1) { + (void) fprintf(stderr, + "zfs_threadsappend: " + "open(%s, O_RDWR|O_CREAT|O_APPEND|O_TRUNC, 0777)" + " failed\n", argv[optind]); + perror("open"); + exit(1); + } + + for (i = 0; i < 2; i++) { + ret = pthread_create(&tid, NULL, go, (void *)&i); + if (ret != 0) { + (void) fprintf(stderr, + "zfs_threadsappend: thr_create(#%d) " + "failed error=%d\n", i+1, ret); + exit(1); + } + } + + while (pthread_join(tid, NULL) == 0) + continue; + + return (0); +} diff --git a/tests/zfs-tests/cmd/xattrtest/.gitignore b/tests/zfs-tests/cmd/xattrtest/.gitignore new file mode 100644 index 000000000..7d2128383 --- /dev/null +++ b/tests/zfs-tests/cmd/xattrtest/.gitignore @@ -0,0 +1 @@ +/xattrtest diff --git a/tests/zfs-tests/cmd/xattrtest/Makefile.am b/tests/zfs-tests/cmd/xattrtest/Makefile.am new file mode 100644 index 000000000..7398ae634 --- /dev/null +++ b/tests/zfs-tests/cmd/xattrtest/Makefile.am @@ -0,0 +1,6 @@ +include $(top_srcdir)/config/Rules.am + +pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/bin + +pkgexec_PROGRAMS = xattrtest +xattrtest_SOURCES = xattrtest.c diff --git a/tests/zfs-tests/cmd/xattrtest/xattrtest.c b/tests/zfs-tests/cmd/xattrtest/xattrtest.c new file mode 100644 index 000000000..22f798db2 --- /dev/null +++ b/tests/zfs-tests/cmd/xattrtest/xattrtest.c @@ -0,0 +1,641 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2016 Lawrence Livermore National Security, LLC. + */ + +/* + * An extended attribute (xattr) correctness test. This program creates + * N files and sets M attrs on them of size S. Optionally is will verify + * a pattern stored in the xattr. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern char *program_invocation_short_name; + +#define ERROR(fmt, ...) \ + fprintf(stderr, "%s: %s:%d: %s: " fmt "\n", \ + program_invocation_short_name, __FILE__, __LINE__, \ + __func__, ## __VA_ARGS__); + +static const char shortopts[] = "hvycdn:f:x:s:p:t:e:rRk"; +static const struct option longopts[] = { + { "help", no_argument, 0, 'h' }, + { "verbose", no_argument, 0, 'v' }, + { "verify", no_argument, 0, 'y' }, + { "nth", required_argument, 0, 'n' }, + { "files", required_argument, 0, 'f' }, + { "xattrs", required_argument, 0, 'x' }, + { "size", required_argument, 0, 's' }, + { "path", required_argument, 0, 'p' }, + { "synccaches", no_argument, 0, 'c' }, + { "dropcaches", no_argument, 0, 'd' }, + { "script", required_argument, 0, 't' }, + { "seed", required_argument, 0, 'e' }, + { "random", no_argument, 0, 'r' }, + { "randomvalue", no_argument, 0, 'R' }, + { "keep", no_argument, 0, 'k' }, + { 0, 0, 0, 0 } +}; + +static int verbose = 0; +static int verify = 0; +static int synccaches = 0; +static int dropcaches = 0; +static int nth = 0; +static int files = 1000; +static int xattrs = 1; +static int size = 1; +static int size_is_random = 0; +static int value_is_random = 0; +static int keep_files = 0; +static char path[PATH_MAX] = "/tmp/xattrtest"; +static char script[PATH_MAX] = "/bin/true"; + +static int +usage(int argc, char **argv) { + fprintf(stderr, + "usage: %s [-hvycdrRk] [-n ] [-f ] [-x ]\n" + " [-s ] [-p ] [-t