]> git.proxmox.com Git - mirror_zfs.git/commitdiff
ZTS: Test boundary conditions in alloc_class_012
authorRyan Moeller <ryan@iXsystems.com>
Thu, 12 Mar 2020 17:50:01 +0000 (13:50 -0400)
committerGitHub <noreply@github.com>
Thu, 12 Mar 2020 17:50:01 +0000 (10:50 -0700)
Issue #9142 describes an error in the checks for device removal that
can prevent removal of special allocation class vdevs in some
situations.

Enhance alloc_class/alloc_class_012_pos to check situations where this
bug occurs.

Update zts-report with knowledge of issue #9142.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ryan Moeller <ryan@iXsystems.com>
Closes #10116
Issue #9142

tests/test-runner/bin/zts-report.py
tests/zfs-tests/tests/functional/alloc_class/alloc_class_012_pos.ksh

index c0140036fb5cc84b5a29c09aeb5a94e074d3aa72..9f96d03ecdd761f78d768e93dcd9c9100db21753 100755 (executable)
@@ -178,6 +178,8 @@ if sys.platform.startswith('freebsd'):
 # reasons listed above can be used.
 #
 maybe = {
+    'alloc_class/alloc_class_012_pos': ['FAIL', '9142'],
+    'alloc_class/alloc_class_013_pos': ['FAIL', '9142'],
     'cache/cache_010_neg': ['FAIL', known_reason],
     'casenorm/mixed_formd_lookup': ['FAIL', '7633'],
     'casenorm/mixed_formd_delete': ['FAIL', '7633'],
index 7ab6a0543b2dbb111461f1b49d32b4a0216d61db..1cfe6642d8a81407c64ed72120f0e1ff93c7dea0 100755 (executable)
@@ -57,50 +57,66 @@ if (match($0,"L0 [0-9]+")) {
 }}'
 }
 
+#
+# Check that device removal works for special class vdevs
+#
+function check_removal
+{
+       #
+       # Create a non-raidz pool so we can remove top-level vdevs
+       #
+       log_must disk_setup
+       log_must zpool create $TESTPOOL $ZPOOL_DISKS \
+           special $CLASS_DISK0 special $CLASS_DISK1
+       log_must display_status "$TESTPOOL"
+
+       #
+       # Generate some metadata and small blocks in the special class vdev
+       # before removal
+       #
+       typeset -l i=1
+       typeset -l blocks=25
+
+       log_must zfs create -o special_small_blocks=32K -o recordsize=32K \
+           $TESTPOOL/$TESTFS
+       for i in 1 2 3 4; do
+               log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/testfile.$i \
+                   bs=1M count=$blocks
+               ((blocks = blocks + 25))
+       done
+       log_must sync_pool $TESTPOOL
+       log_must zpool list -v $TESTPOOL
+
+       # Verify the files were written in the special class vdevs
+       for i in 1 2 3 4; do
+               dataset="$TESTPOOL/$TESTFS"
+               inum="$(get_objnum /$TESTPOOL/$TESTFS/testfile.$i)"
+               log_must file_in_special_vdev $dataset $inum
+       done
+
+       log_must zpool remove $TESTPOOL $CLASS_DISK0
+
+       sleep 5
+       log_must sync_pool $TESTPOOL
+       sleep 1
+
+       log_must zdb -bbcc $TESTPOOL
+       log_must zpool list -v $TESTPOOL
+       log_must zpool destroy -f "$TESTPOOL"
+       log_must disk_cleanup
+}
+
 claim="Removing a special device from a pool succeeds."
 
 log_assert $claim
 log_onexit cleanup
 
-#
-# Create a non-raidz pool so we can remove top-level vdevs
-#
-log_must disk_setup
-log_must zpool create $TESTPOOL $ZPOOL_DISK0 $ZPOOL_DISK1 $ZPOOL_DISK2 \
-  special $CLASS_DISK0 special $CLASS_DISK1
-log_must display_status "$TESTPOOL"
-
-#
-# Generate some metadata and small blocks in the special class before removal
-#
-typeset -l i=1
-typeset -l blocks=25
-
-log_must zfs create -o special_small_blocks=32K -o recordsize=32K \
-       $TESTPOOL/$TESTFS
-for i in 1 2 3 4; do
-       log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS/testfile.$i bs=1M \
-           count=$blocks
-       ((blocks = blocks + 25))
-done
-log_must sync_pool $TESTPOOL
-log_must zpool list -v $TESTPOOL
-
-# Verify the files were written in the special class vdevs
-for i in 1 2 3 4; do
-       dataset="$TESTPOOL/$TESTFS"
-       inum="$(stat -c '%i' /$TESTPOOL/$TESTFS/testfile.$i)"
-       log_must file_in_special_vdev $dataset $inum
+typeset CLASS_DEVSIZE=$CLASS_DEVSIZE
+for CLASS_DEVSIZE in $CLASS_DEVSIZE $ZPOOL_DEVSIZE; do
+       typeset ZPOOL_DISKS=$ZPOOL_DISKS
+       for ZPOOL_DISKS in "$ZPOOL_DISKS" $ZPOOL_DISK0; do
+               check_removal
+       done
 done
 
-log_must zpool remove $TESTPOOL $CLASS_DISK0
-
-sleep 5
-log_must sync_pool $TESTPOOL
-sleep 1
-
-log_must zdb -bbcc $TESTPOOL
-log_must zpool list -v $TESTPOOL
-log_must zpool destroy -f "$TESTPOOL"
-
 log_pass $claim