]> git.proxmox.com Git - mirror_zfs.git/commitdiff
OpenZFS 9245 - zfs-test failures: slog_013_pos and slog_014_pos
authorJohn Wren Kennedy <john.kennedy@delphix.com>
Wed, 17 Aug 2016 21:15:27 +0000 (15:15 -0600)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Mon, 4 Jun 2018 21:55:02 +0000 (14:55 -0700)
Test 13 would fail because of attempts to zpool destroy -f a pool that
was still busy. Changed those calls to destroy_pool which does a retry
loop, and the problem is no longer reproducible. Also removed some non
functional code in the test which is why it was originally commented out
by placing it after the call to log_pass.

Test 14 would fail because sometimes the check for a degraded pool would
complete before the pool had changed state. Changed the logic to check
in a loop with a timeout and the problem is no longer reproducible.

Authored by: John Wren Kennedy <john.kennedy@delphix.com>
Reviewed by: Matt Ahrens <matt@delphix.com>
Reviewed by: Chris Williamson <chris.williamson@delphix.com>
Reviewed by: Yuri Pankov <yuripv@yuripv.net>
Reviewed-by: George Melikov <mail@gmelikov.ru>
Approved by: Dan McDonald <danmcd@joyent.com>
Ported-by: Brian Behlendorf <behlendorf1@llnl.gov>
Porting Notes:
* Re-enabled slog_013_pos.ksh

OpenZFS-issue: https://illumos.org/issues/9245
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/8f323b5
Closes #7585

tests/zfs-tests/include/libtest.shlib
tests/zfs-tests/tests/functional/slog/slog.kshlib
tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh
tests/zfs-tests/tests/functional/slog/slog_014_pos.ksh

index e8def35f8408acb138b2b2434542f7c6be835bb7..53914d3d087b99f6285b4235be1bf32a563b82c6 100644 (file)
@@ -2152,6 +2152,25 @@ function is_pool_removed #pool
        return $?
 }
 
+function wait_for_degraded
+{
+       typeset pool=$1
+       typeset timeout=${2:-30}
+       typeset t0=$SECONDS
+
+       while :; do
+               [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
+               log_note "$pool is not yet degraded."
+               sleep 1
+               if ((SECONDS - t0 > $timeout)); then
+                       log_note "$pool not degraded after $timeout seconds."
+                       return 1
+               fi
+       done
+
+       return 0
+}
+
 #
 # Use create_pool()/destroy_pool() to clean up the information in
 # in the given disk to avoid slice overlapping.
index b32d18f2eac7aa4dde4cc268b4f7d2239dbb0b22..6ed7e4e0502f4c33267a322aa20f638cdb94fa3b 100644 (file)
 
 function cleanup
 {
-       if datasetexists $TESTPOOL ; then
-               log_must zpool destroy -f $TESTPOOL
-       fi
-       if datasetexists $TESTPOOL2 ; then
-               log_must zpool destroy -f $TESTPOOL2
-       fi
+       poolexists $TESTPOOL && destroy_pool $TESTPOOL
+       poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2
        rm -rf $TESTDIR
 }
 
index 1436feab923741e7bbac52edb241cd781495b6b6..bbe5adc241748828b8a3efc3a23ca25b1c5d22bf 100755 (executable)
 
 verify_runnable "global"
 
-if ! $(is_physical_device $DISKS) ; then
-       log_unsupported "This directory cannot be run on raw files."
-fi
-
 function cleanup_testenv
 {
        cleanup
-       if datasetexists $TESTPOOL2 ; then
-               log_must zpool destroy -f $TESTPOOL2
-       fi
        if [[ -n $lofidev ]]; then
                if is_linux; then
                        losetup -d $lofidev
@@ -71,19 +64,19 @@ log_onexit cleanup_testenv
 dsk1=${DISKS%% *}
 log_must zpool create $TESTPOOL ${DISKS#$dsk1}
 
-# Add nomal disk
+# Add provided disk
 log_must zpool add $TESTPOOL log $dsk1
 log_must verify_slog_device $TESTPOOL $dsk1 'ONLINE'
-# Add nomal file
+# Add normal file
 log_must zpool add $TESTPOOL log $LDEV
 ldev=$(random_get $LDEV)
 log_must verify_slog_device $TESTPOOL $ldev 'ONLINE'
 
-# Add lofi device
+# Add loop back device
 if is_linux; then
        lofidev=$(losetup -f)
-       lofidev=${lofidev##*/}
        log_must losetup $lofidev ${LDEV2%% *}
+       lofidev=${lofidev##*/}
 else
        lofidev=${LDEV2%% *}
        log_must lofiadm -a $lofidev
@@ -94,13 +87,3 @@ log_must verify_slog_device $TESTPOOL $lofidev 'ONLINE'
 
 log_pass "Verify slog device can be disk, file, lofi device or any device " \
        "that presents a block interface."
-
-# Add file which reside in the itself
-mntpnt=$(get_prop mountpoint $TESTPOOL)
-log_must mkfile $MINVDEVSIZE $mntpnt/vdev
-log_must zpool add $TESTPOOL $mntpnt/vdev
-
-# Add ZFS volume
-vol=$TESTPOOL/vol
-log_must zpool create -V $MINVDEVSIZE $vol
-log_must zpool add $TESTPOOL ${ZVOL_DEVDIR}/$vol
index bc0c2fec55d6649ba77b9f887383825f30d0e0cf..7e187289333c41bc521b5376f88720100e6b0dac 100755 (executable)
@@ -45,10 +45,8 @@ verify_runnable "global"
 
 log_assert "log device can survive when one of the pool device get corrupted."
 
-for type in "mirror" "raidz" "raidz2"
-do
-       for spare in "" "spare"
-       do
+for type in "mirror" "raidz" "raidz2"; do
+       for spare in "" "spare"; do
                log_must zpool create $TESTPOOL $type $VDEV $spare $SDEV \
                        log $LDEV
 
@@ -64,14 +62,8 @@ do
                fi
                log_must zpool scrub $TESTPOOL
                log_must display_status $TESTPOOL
-               log_must zpool status $TESTPOOL 2>&1 >/dev/null
                log_must zpool offline $TESTPOOL $VDIR/a
-
-               zpool status -v $TESTPOOL | \
-                       grep "state: DEGRADED" 2>&1 >/dev/null
-               if (( $? != 0 )); then
-                       log_fail "pool $TESTPOOL status should be DEGRADED"
-               fi
+               log_must wait_for_degraded $TESTPOOL
 
                zpool status -v $TESTPOOL | grep logs | \
                        grep "DEGRADED" 2>&1 >/dev/null