destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
[[ -e $TESTDIR1 ]] && \
- log_must rm -rf $TESTDIR1 > /dev/null 2>&1
+ log_must rm -rf $TESTDIR1
default_cleanup
}
return 1
fi
- if is_linux; then
- swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
- elif is_freebsd; then
- swapctl -l | grep -w $device
- else
- swap -l | grep -w $device > /dev/null 2>&1
- fi
-
- return $?
+ case "$(uname)" in
+ Linux)
+ swapon -s | grep -wq $(readlink -f $device)
+ ;;
+ FreeBSD)
+ swapctl -l | grep -wq $device
+ ;;
+ *)
+ swap -l | grep -wq $device
+ ;;
+ esac
}
#
{
typeset swapdev=$1
- if is_linux; then
+ case "$(uname)" in
+ Linux)
log_must eval "mkswap $swapdev > /dev/null 2>&1"
log_must swapon $swapdev
- elif is_freebsd; then
+ ;;
+ FreeBSD)
log_must swapctl -a $swapdev
- else
- log_must swap -a $swapdev
- fi
+ ;;
+ *)
+ log_must swap -a $swapdev
+ ;;
+ esac
return 0
}
default_mirror_setup_noexit $DISKS
file_write -o create -w -f $init_data -b $blksize -c $write_count
-log_must echo "zfs" >> $init_data
+echo "zfs" >> $init_data
sync_pool $TESTPOOL
output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)
MNTPFS="$(get_prop mountpoint $TESTDS)"
FILENAME="$MNTPFS/file"
log_must mkfile 128k $FILENAME
-log_must exec 9<> $FILENAME # open file
+log_must eval "exec 9<> $FILENAME" # open file
# 3. Lazy umount
if is_freebsd; then
if [ ! -f $FILENAME ]; then
log_fail "Lazy remount failed"
fi
-log_must exec 9>&- # close fd
+log_must eval "exec 9>&-" # close fd
# 5. Verify multiple mounts of the same dataset are possible
MNTPFS2="$MNTPFS-second"
touch $mntpnt2/f18
# Remove objects that are intended to be missing.
-rm $mntpnt/h17
-rm $mntpnt2/h*
+rm $mntpnt/h17 $mntpnt2/h*
# Add empty objects to $fs to exercise dmu_traverse code
for i in {1..100}; do
log_must zfs snapshot $fs@s1
log_must zfs snapshot $fs2@s1
-log_must zfs send $fs@s1 > $TESTDIR/zr010p
-log_must zfs send $fs2@s1 > $TESTDIR/zr010p2
+log_must eval "zfs send $fs@s1 > $TESTDIR/zr010p"
+log_must eval "zfs send $fs2@s1 > $TESTDIR/zr010p2"
#
# Test that, when we receive a full send as a clone of itself,
# nop-write saves us all the space used by data blocks.
#
-cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs@s1 $rfs
+log_must eval "zfs receive -o origin=$fs@s1 $rfs < $TESTDIR/zr010p"
size=$(get_prop used $rfs)
size2=$(get_prop used $fs)
if [[ $size -ge $(($size2 / 10)) ]] then
log_must zfs destroy -fr $rfs
# Correctness testing: receive each full send as a clone of the other fiesystem.
-cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs2@s1 $rfs
+log_must eval "zfs receive -o origin=$fs2@s1 $rfs < $TESTDIR/zr010p"
mntpnt_old=$(get_prop mountpoint $fs)
mntpnt_new=$(get_prop mountpoint $rfs)
log_must directory_diff $mntpnt_old $mntpnt_new
log_must zfs destroy -r $rfs
-cat $TESTDIR/zr010p2 | log_must zfs receive -o origin=$fs@s1 $rfs
+log_must eval "zfs receive -o origin=$fs@s1 $rfs < $TESTDIR/zr010p2"
mntpnt_old=$(get_prop mountpoint $fs2)
mntpnt_new=$(get_prop mountpoint $rfs)
log_must directory_diff $mntpnt_old $mntpnt_new
log_must zfs create -o filesystem_limit=100 "$sendfs"
log_must zfs snapshot "$sendfs@a"
-log_must zfs send -R "$sendfs@a" >"$streamfile"
-log_must eval "zfs recv -svuF $recvfs <$streamfile"
+log_must eval "zfs send -R \"$sendfs@a\" >\"$streamfile\""
+log_must eval "zfs recv -svuF \"$recvfs\" <\"$streamfile\""
log_pass "ZFS can handle receiving streams with filesystem limits on \
pools where the feature was recently enabled"
POOL=$1
log_must zfs create -o recordsize=512 $POOL/fs
mntpnt=$(get_prop mountpoint "$POOL/fs")
- log_must dd if=/dev/urandom of=${mntpnt}/file bs=512 count=1 2>/dev/null
+ log_must eval "dd if=/dev/urandom of=${mntpnt}/file bs=512 count=1 2>/dev/null"
object=$(ls -i $mntpnt | awk '{print $1}')
log_must zfs snapshot $POOL/fs@a
while true; do
log_must zfs set sharenfs="rw=[::1]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
-log_must grep "::1(" <<< "$output" > /dev/null
+log_must grep -q "::1(" <<< "$output"
log_must zfs set sharenfs="rw=[2::3]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
-log_must grep "2::3(" <<< "$output" > /dev/null
+log_must grep -q "2::3(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]:[2::3]" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
-log_must grep "::1(" <<< "$output" > /dev/null
-log_must grep "2::3(" <<< "$output" > /dev/null
+log_must grep -q "::1(" <<< "$output"
+log_must grep -q "2::3(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]/64" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
-log_must grep "::1/64(" <<< "$output" > /dev/null
+log_must grep -q "::1/64(" <<< "$output"
log_must zfs set sharenfs="rw=[2::3]/128" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
-log_must grep "2::3/128(" <<< "$output" > /dev/null
+log_must grep -q "2::3/128(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]/32:[2::3]/128" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
-log_must grep "::1/32(" <<< "$output" > /dev/null
-log_must grep "2::3/128(" <<< "$output" > /dev/null
+log_must grep -q "::1/32(" <<< "$output"
+log_must grep -q "2::3/128(" <<< "$output"
log_must zfs set sharenfs="rw=[::1]:[2::3]/64:[2a01:1234:1234:1234:aa34:234:1234:1234]:1.2.3.4/24" $TESTPOOL/$TESTFS
output=$(showshares_nfs 2>&1)
-log_must grep "::1(" <<< "$output" > /dev/null
-log_must grep "2::3/64(" <<< "$output" > /dev/null
-log_must grep "2a01:1234:1234:1234:aa34:234:1234:1234(" <<< "$output" > /dev/null
-log_must grep "1\\.2\\.3\\.4/24(" <<< "$output" > /dev/null
+log_must grep -q "::1(" <<< "$output"
+log_must grep -q "2::3/64(" <<< "$output"
+log_must grep -q "2a01:1234:1234:1234:aa34:234:1234:1234(" <<< "$output"
+log_must grep -q "1\\.2\\.3\\.4/24(" <<< "$output"
log_pass "NFS share ip address propagated correctly."
# Once set zpool autoexpand=off, zpool can *NOT* autoexpand by
# Dynamic VDEV Expansion
#
-#
# STRATEGY:
# 1) Create three vdevs (loopback, scsi_debug, and file)
# 2) Create pool by using the different devices and set autoexpand=off
log_assert "zpool can not expand if set autoexpand=off after vdev expansion"
-for type in " " mirror raidz draid; do
+for type in "" mirror raidz draid; do
log_note "Setting up loopback, scsi_debug, and file vdevs"
log_must truncate -s $org_size $FILE_LO
DEV1=$(losetup -f)
# The -f is required since we're mixing disk and file vdevs.
log_must zpool create -f $TESTPOOL1 $type $DEV1 $DEV2 $DEV3
- typeset autoexp=$(get_pool_prop autoexpand $TESTPOOL1)
- if [[ $autoexp != "off" ]]; then
- log_fail "zpool $TESTPOOL1 autoexpand should be off but is " \
- "$autoexp"
- fi
+ log_must [ "$(get_pool_prop autoexpand $TESTPOOL1)" = "off" ]
typeset prev_size=$(get_pool_prop size $TESTPOOL1)
log_must losetup -c $DEV1
sleep 3
- echo "2" > /sys/bus/pseudo/drivers/scsi_debug/virtual_gb
- echo "1" > /sys/class/block/$DEV2/device/rescan
+ log_must eval "echo 2 > /sys/bus/pseudo/drivers/scsi_debug/virtual_gb"
+ log_must eval "echo 1 > /sys/class/block/$DEV2/device/rescan"
block_device_wait
sleep 3
# check for zpool history for the pool size expansion
zpool history -il $TESTPOOL1 | grep "pool '$TESTPOOL1' size:" | \
- grep "vdev online" >/dev/null 2>&1
-
- if [[ $? -eq 0 ]]; then
- log_fail "pool $TESTPOOL1 is not autoexpand after vdev " \
- "expansion"
- fi
-
- typeset expand_size=$(get_pool_prop size $TESTPOOL1)
+ grep "vdev online" &&
+ log_fail "pool $TESTPOOL1 is not autoexpand after vdev expansion"
- if [[ "$prev_size" != "$expand_size" ]]; then
- log_fail "pool $TESTPOOL1 size changed after vdev expansion"
- fi
+ log_must [ "$(get_pool_prop size $TESTPOOL1)" = "$prev_size" ]
cleanup
done
function cleanup
{
# clear any remaining zinjections
- log_must zinject -c all > /dev/null
+ log_must eval "zinject -c all > /dev/null"
destroy_pool $TESTPOOL1
function uncompress_pool
{
log_note "Creating pool from $POOL_FILE"
- log_must bzcat \
+ log_must eval bzcat \
$STF_SUITE/tests/functional/cli_root/zpool_import/blockfiles/$POOL_FILE.bz2 \
- > /$TESTPOOL/$POOL_FILE
- return 0
+ "> /$TESTPOOL/$POOL_FILE"
}
function cleanup
{
poolexists $POOL_NAME && log_must zpool destroy $POOL_NAME
- [[ -e /$TESTPOOL/$POOL_FILE ]] && rm /$TESTPOOL/$POOL_FILE
- return 0
+ log_must rm -f /$TESTPOOL/$POOL_FILE
}
log_onexit cleanup
typeset -i ver_new=$(random_int_between $ver_old $MAX_VER)
create_old_pool $ver_old
- log_must zpool upgrade -V $ver_new $pool_name > /dev/null
+ log_must eval 'zpool upgrade -V $ver_new $pool_name > /dev/null'
check_poolversion $pool_name $ver_new
destroy_upgraded_pool $ver_old
done
log_must zfs create -o dnodesize=1k $TEST_SEND_FS
log_must touch /$TEST_SEND_FS/$TEST_FILE
log_must zfs snap $TEST_SNAP
-log_must zfs send $TEST_SNAP > $TEST_STREAM
+log_must eval "zfs send $TEST_SNAP > $TEST_STREAM"
log_must rm -f /$TEST_SEND_FS/$TEST_FILE
log_must touch /$TEST_SEND_FS/$TEST_FILEINCR
log_must zfs snap $TEST_SNAPINCR
-log_must zfs send -i $TEST_SNAP $TEST_SNAPINCR > $TEST_STREAMINCR
+log_must eval "zfs send -i $TEST_SNAP $TEST_SNAPINCR > $TEST_STREAMINCR"
log_must eval "zfs recv $TEST_RECV_FS < $TEST_STREAM"
inode=$(ls -li /$TEST_RECV_FS/$TEST_FILE | awk '{print $1}')
log_must_busy zpool export $TESTPOOL
log_must zpool import $TESTPOOL
-log_must ls -lR "/$TEST_FS/" >/dev/null 2>&1
+log_must eval "ls -lR /$TEST_FS/ >/dev/null 2>&1"
log_must zdb -d $TESTPOOL
log_pass
# Save initial TESTPOOL history
log_must eval "zpool history $TESTPOOL >$OLD_HISTORY"
-log_must zpool get all $TESTPOOL >/dev/null
-log_must zpool list $TESTPOOL >/dev/null
-log_must zpool status $TESTPOOL >/dev/null
-log_must zpool iostat $TESTPOOL >/dev/null
+log_must eval "zpool get all $TESTPOOL >/dev/null"
+log_must eval "zpool list $TESTPOOL >/dev/null"
+log_must eval "zpool status $TESTPOOL >/dev/null"
+log_must eval "zpool iostat $TESTPOOL >/dev/null"
log_must eval "zpool history $TESTPOOL >$NEW_HISTORY"
log_must diff $OLD_HISTORY $NEW_HISTORY
# Save initial TESTPOOL history
log_must eval "zpool history $TESTPOOL > $OLD_HISTORY"
-log_must zfs list $fs > /dev/null
-log_must zfs get mountpoint $fs > /dev/null
+log_must eval "zfs list $fs > /dev/null"
+log_must eval "zfs get mountpoint $fs > /dev/null"
log_must zfs unmount $fs
log_must zfs mount $fs
if ! is_linux; then
log_must zfs share $fs
log_must zfs unshare $fs
fi
-log_must zfs send -i $snap1 $snap2 > /dev/null
+log_must eval "zfs send -i $snap1 $snap2 > /dev/null"
log_must zfs holds $snap1
log_must eval "zpool history $TESTPOOL > $NEW_HISTORY"
# Remove dump device.
#
if [[ -n $PREVDUMPDEV ]]; then
- log_must dumpadm -u -d $PREVDUMPDEV > /dev/null
+ log_must eval "dumpadm -u -d $PREVDUMPDEV > /dev/null"
fi
destroy_pool $TESTPOOL
typeset dumpdev=""
-PREVDUMPDEV=`dumpadm | grep "Dump device" | awk '{print $3}'`
+PREVDUMPDEV=`dumpadm | awk '/Dump device/ {print $3}'`
log_note "Zero $FS_DISK0"
log_must cleanup_devices $FS_DISK0
log_note "Configuring $rawdisk0 as dump device"
-log_must dumpadm -d $rawdisk0 > /dev/null
+log_must eval "dumpadm -d $rawdisk0 > /dev/null"
log_note "Confirm that dump device has been setup"
-dumpdev=`dumpadm | grep "Dump device" | awk '{print $3}'`
+dumpdev=`dumpadm | awk '/Dump device/ {print $3}'`
[[ -z "$dumpdev" ]] && log_untested "No dump device has been configured"
[[ "$dumpdev" != "$rawdisk0" ]] && \
log_note "Kill off ufsdump process if still running"
kill -0 $PIDUFSDUMP > /dev/null 2>&1 && \
- log_must kill -9 $PIDUFSDUMP > /dev/null 2>&1
+ log_must eval "kill -9 $PIDUFSDUMP"
#
# Note: It would appear that ufsdump spawns a number of processes
# which are not killed when the $PIDUFSDUMP is whacked. So best bet
# is to find the rest of the them and deal with them individually.
#
- for all in `pgrep ufsdump`
- do
- kill -9 $all > /dev/null 2>&1
- done
+ kill -9 `pgrep ufsdump` > /dev/null 2>&1
log_note "Kill off ufsrestore process if still running"
kill -0 $PIDUFSRESTORE > /dev/null 2>&1 && \
- log_must kill -9 $PIDUFSRESTORE > /dev/null 2>&1
+ log_must eval "kill -9 $PIDUFSRESTORE"
ismounted $UFSMP ufs && log_must umount $UFSMP
- rm -rf $UFSMP
- rm -rf $TESTDIR
+ rm -rf $UFSMP $TESTDIR
#
# Tidy up the disks we used.
typeset mmp_write
typeset mmp_delay
- log_must zdb -e -p $devpath $pool >$tmpfile 2>/dev/null
+ log_must eval "zdb -e -p $devpath $pool >$tmpfile 2>/dev/null"
mmp_fail=$(awk '/mmp_fail/ {print $NF}' $tmpfile)
mmp_write=$(awk '/mmp_write/ {print $NF}' $tmpfile)
mmp_delay=$(awk '/mmp_delay/ {print $NF}' $tmpfile)
- if [ -f $tmpfile ]; then
- rm $tmpfile
- fi
+ rm $tmpfile
# In order of preference:
if [ -n $mmp_fail -a -n $mmp_write ]; then
default_setup_noexit $DISK
log_must zpool set multihost=off $TESTPOOL
-log_must zdb -u $TESTPOOL > $PREV_UBER
+log_must eval "zdb -u $TESTPOOL > $PREV_UBER"
log_must sleep 5
-log_must zdb -u $TESTPOOL > $CURR_UBER
+log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if ! diff "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp thread has updated an uberblock"
log_must zpool set multihost=on $TESTPOOL
log_must sleep 5
-log_must zdb -u $TESTPOOL > $CURR_UBER
+log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if diff "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp failed to update uberblocks"
default_setup_noexit $DISK
log_must zpool set multihost=on $TESTPOOL
-log_must zdb -u $TESTPOOL > $PREV_UBER
+log_must eval "zdb -u $TESTPOOL > $PREV_UBER"
log_must sleep 5
-log_must zdb -u $TESTPOOL > $CURR_UBER
+log_must eval "zdb -u $TESTPOOL > $CURR_UBER"
if diff -u "$CURR_UBER" "$PREV_UBER"; then
log_fail "mmp failed to update uberblocks"
log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs
log_mustnot_expect space zfs clone $TESTPOOL/$TESTFS@snap $TESTPOOL/clone
-log_must zfs send $TESTPOOL/$TESTFS@snap > $TEST_BASE_DIR/stream.$$
-log_mustnot_expect space zfs receive $TESTPOOL/$TESTFS/recvd < $TEST_BASE_DIR/stream.$$
+log_must eval "zfs send $TESTPOOL/$TESTFS@snap > $TEST_BASE_DIR/stream.$$"
+log_mustnot_expect space eval "zfs receive $TESTPOOL/$TESTFS/recvd < $TEST_BASE_DIR/stream.$$"
log_must rm $TEST_BASE_DIR/stream.$$
log_must zfs rename $TESTPOOL/$TESTFS@snap $TESTPOOL/$TESTFS@snap_newname
# finish reading.
#
{
- log_must dd bs=512 count=4 >/dev/null
+ log_must eval "dd bs=512 count=4 >/dev/null"
log_must eval "$cmd"
- cat 2>&1 >/dev/null | log_must grep "Input/output error"
+ log_must eval 'cat 2>&1 >/dev/null | grep "Input/output error"'
} <$TXG_HIST
}
log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs)
- log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1
+ log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors"
#
log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs)
- log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1
+ log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors"
log_must zpool scrub -w $pool
log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs)
- log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1
+ log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors"
#
log_must zpool import -o cachefile=none -d $dir $pool
typeset mntpnt=$(get_prop mountpoint $pool/fs)
- log_must find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1
+ log_must eval "find $mntpnt -type f -exec cksum {} + >> /dev/null 2>&1"
log_must check_pool_status $pool "errors" "No known data errors"
log_must zpool scrub -w $pool
# inject read io errors on vdev and verify resilver does not restart
log_must zinject -a -d ${VDEV_FILES[2]} -e io -T read -f 0.25 $TESTPOOL1
- log_must cat ${DATAPATHS[1]} > /dev/null
+ log_must eval "cat ${DATAPATHS[1]} > /dev/null"
log_must zinject -c all
# there should still be 2 resilver starts w/o defer, 1 with defer
# create the pool and a 32M file (32k blocks)
log_must truncate -s $VDEV_FILE_SIZE ${VDEV_FILES[0]} $SPARE_VDEV_FILE
log_must zpool create -f -O recordsize=1k $TESTPOOL1 ${VDEV_FILES[0]}
-log_must dd if=/dev/urandom of=/$TESTPOOL1/file bs=1M count=32 > /dev/null 2>&1
+log_must eval "dd if=/dev/urandom of=/$TESTPOOL1/file bs=1M count=32 2>/dev/null"
# determine objset/object
objset=$(zdb -d $TESTPOOL1/ | sed -ne 's/.*ID \([0-9]*\).*/\1/p')
#
# https://github.com/openzfs/zfs/issues/6143
#
- log_must df >/dev/null
+ log_must eval "df >/dev/null"
log_must_busy zfs destroy -Rf $pool
else
typeset list=$(zfs list -H -r -t all -o name $pool)
if [[ -d $mntpnt ]]; then
rm -rf $mntpnt/*
fi
-
- return 0
}
function cleanup_pools
for ((i=0; i<2; i=i+1)); do
mess_send_file /$streamfs/$stream_num
- log_mustnot zfs recv -suv $recvfs </$streamfs/$stream_num
+ log_mustnot eval "zfs recv -suv $recvfs </$streamfs/$stream_num"
stream_num=$((stream_num+1))
token=$(zfs get -Hp -o value receive_resume_token $recvfs)
log_must eval "zfs send -nvt $token > /dev/null"
log_must eval "zfs send -t $token >/$streamfs/$stream_num"
- [[ -f /$streamfs/$stream_num ]] || \
- log_fail "NO FILE /$streamfs/$stream_num"
done
- log_must zfs recv -suv $recvfs </$streamfs/$stream_num
+ log_must eval "zfs recv -suv $recvfs </$streamfs/$stream_num"
}
function get_resume_token
log_must eval "$sendcmd > /$streamfs/1"
mess_send_file /$streamfs/1
- log_mustnot zfs recv -suv $recvfs < /$streamfs/1 2>&1
- token=$(zfs get -Hp -o value receive_resume_token $recvfs)
- echo "$token" > /$streamfs/resume_token
-
- return 0
+ log_mustnot eval "zfs recv -suv $recvfs < /$streamfs/1 2>&1"
+ get_prop receive_resume_token $recvfs > /$streamfs/resume_token
}
#
setup_all
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
typeset -i i=0
[[ $? -eq 0 ]] && \
log_must zfs destroy $SNAPFS
- [[ -e $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify that a rollback to a previous snapshot succeeds."
log_onexit cleanup
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
[[ $? -eq 0 ]] && \
log_must zfs destroy $SNAPFS
- [[ -e $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify rollback is with respect to latest snapshot."
log_onexit cleanup
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
(( i = i + 1 ))
done
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/original_file* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -f $TESTDIR/original_file*
#
# Now rollback to latest snapshot
#
# https://github.com/openzfs/zfs/issues/6143
#
-log_must df >/dev/null
+log_must eval "df >/dev/null"
export __ZFS_POOL_RESTRICT="$TESTPOOL"
log_must zfs unmount -a
log_must touch /$TESTPOOL/$TESTFILE/$TESTFILE.1
log_must zfs rollback $SNAPPOOL.1
-log_must df >/dev/null
+log_must eval "df >/dev/null"
log_pass "Rollbacks succeed when nested file systems are present."
log_must zfs destroy $SNAPFS
fi
- log_must rm -rf $SNAPDIR $TESTDIR/* > /dev/null 2>&1
+ log_must rm -rf $SNAPDIR $TESTDIR/*
}
log_assert "Verify a file system snapshot is identical to original."
function cleanup
{
- if [[ -d $CWD ]]; then
- cd $CWD || log_fail "Could not cd $CWD"
- fi
-
- snapexists $SNAPFS
- if [[ $? -eq 0 ]]; then
- log_must zfs destroy $SNAPFS
- fi
-
- if [[ -e $SNAPDIR ]]; then
- log_must rm -rf $SNAPDIR > /dev/null 2>&1
- fi
-
- if [[ -e $TESTDIR ]]; then
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
- fi
-
- if [[ -d "$SNAPSHOT_TARDIR" ]]; then
- log_must rm -rf $SNAPSHOT_TARDIR > /dev/null 2>&1
- fi
+ [ -d $CWD ] && log_must cd $CWD
+
+ snapexists $SNAPFS && log_must zfs destroy $SNAPFS
+
+ [ -e $SNAPDIR ] && log_must rm -rf $SNAPDIR
+ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
+ [ -d "$SNAPSHOT_TARDIR" ] && log_must rm -rf $SNAPSHOT_TARDIR
}
log_assert "Verify an archive of a file system is identical to " \
typeset -i COUNT=21
typeset OP=create
-[[ -n $TESTDIR ]] && \
- rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && rm -rf $TESTDIR/*
log_note "Create files in the zfs filesystem..."
log_note "Create a tarball from $TESTDIR contents..."
CWD=$PWD
-cd $TESTDIR || log_fail "Could not cd $TESTDIR"
+log_must cd $TESTDIR
log_must tar cf $SNAPSHOT_TARDIR/original.tar .
-cd $CWD || log_fail "Could not cd $CWD"
+log_must cd $CWD
log_note "Create a snapshot and mount it..."
log_must zfs snapshot $SNAPFS
log_note "Remove all of the original files..."
-log_must rm -f $TESTDIR/file* > /dev/null 2>&1
+log_must rm -f $TESTDIR/file*
log_note "Create tarball of snapshot..."
CWD=$PWD
-cd $SNAPDIR || log_fail "Could not cd $SNAPDIR"
+log_must cd $SNAPDIR
log_must tar cf $SNAPSHOT_TARDIR/snapshot.tar .
-cd $CWD || log_fail "Could not cd $CWD"
+log_must cd $CWD
-log_must mkdir $TESTDIR/original
-log_must mkdir $TESTDIR/snapshot
+log_must mkdir $TESTDIR/original $TESTDIR/snapshot
CWD=$PWD
-cd $TESTDIR/original || log_fail "Could not cd $TESTDIR/original"
+log_must cd $TESTDIR/original
log_must tar xf $SNAPSHOT_TARDIR/original.tar
-cd $TESTDIR/snapshot || log_fail "Could not cd $TESTDIR/snapshot"
+log_must cd $TESTDIR/snapshot
log_must tar xf $SNAPSHOT_TARDIR/snapshot.tar
-cd $CWD || log_fail "Could not cd $CWD"
+log_must cd $CWD
log_must directory_diff $TESTDIR/original $TESTDIR/snapshot
log_pass "Directory structures match."
{
typeset -i i=1
while [ $i -lt $COUNT ]; do
- snapexists $SNAPFS.$i
- if [[ $? -eq 0 ]]; then
- log_must zfs destroy $SNAPFS.$i
- fi
+ snapexists $SNAPFS.$i && log_must zfs destroy $SNAPFS.$i
- if [[ -e $SNAPDIR.$i ]]; then
- log_must rm -rf $SNAPDIR.$i > /dev/null 2>&1
+ if [ -e $SNAPDIR.$i ]; then
+ log_must rm -rf $SNAPDIR.$i
fi
(( i = i + 1 ))
done
- if [[ -e $TESTDIR ]]; then
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ if [ -e $TESTDIR ]; then
+ log_must rm -rf $TESTDIR/*
fi
}
log_onexit cleanup
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
done
log_note "Remove all of the original files"
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/file* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/file*
i=1
while [[ $i -lt $COUNT ]]; do
function cleanup
{
- snapexists $SNAPFS
- [[ $? -eq 0 ]] && \
- log_must zfs destroy $SNAPFS
+ snapexists $SNAPFS && log_must zfs destroy $SNAPFS
- [[ -e $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify that a snapshot of an empty file system remains empty."
log_onexit cleanup
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
log_must zfs snapshot $SNAPFS
FILE_COUNT=`ls -Al $SNAPDIR | grep -v "total 0" | wc -l`
function cleanup
{
- snapexists $SNAPCTR
- if [[ $? -eq 0 ]]; then
- log_must zfs destroy $SNAPCTR
- fi
+ snapexists $SNAPCTR && log_must zfs destroy $SNAPCTR
- if [[ -e $SNAPDIR1 ]]; then
- log_must rm -rf $SNAPDIR1 > /dev/null 2>&1
+ if [ -e $SNAPDIR1 ]; then
+ log_must rm -rf $SNAPDIR1
fi
- if [[ -e $TESTDIR ]]; then
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ if [ -e $TESTDIR ]; then
+ log_must rm -rf $TESTDIR/*
fi
}
function cleanup
{
if [[ -d $CWD ]]; then
- cd $CWD || log_fail "Could not cd $CWD"
+ log_must cd $CWD
fi
- snapexists $SNAPCTR
- if [[ $? -eq 0 ]]; then
- log_must zfs destroy $SNAPCTR
- fi
+ snapexists $SNAPCTR && log_must zfs destroy $SNAPCTR
- if [[ -e $SNAPDIR1 ]]; then
- log_must rm -rf $SNAPDIR1 > /dev/null 2>&1
+ if [ -e $SNAPDIR1 ]; then
+ log_must rm -rf $SNAPDIR1
fi
- if [[ -e $TESTDIR1 ]]; then
- log_must rm -rf $TESTDIR1/* > /dev/null 2>&1
+ if [ -e $TESTDIR1 ]; then
+ log_must rm -rf $TESTDIR1/*
fi
- if [[ -d "$SNAPSHOT_TARDIR" ]]; then
- log_must rm -rf $SNAPSHOT_TARDIR > /dev/null 2>&1
+ if [ -d "$SNAPSHOT_TARDIR" ]; then
+ log_must rm -rf $SNAPSHOT_TARDIR
fi
}
typeset -i COUNT=21
typeset OP=create
-[[ -n $TESTDIR1 ]] && rm -rf $TESTDIR1/* > /dev/null 2>&1
+[ -n $TESTDIR1 ] && rm -rf $TESTDIR1/*
log_note "Create files in the zfs dataset ..."
log_note "Create a tarball from $TESTDIR1 contents..."
CWD=$PWD
-cd $TESTDIR1 || log_fail "Could not cd $TESTDIR1"
+log_must cd $TESTDIR1
log_must tar cf $SNAPSHOT_TARDIR/original.tar .
-cd $CWD || log_fail "Could not cd $CWD"
+log_must cd $CWD
log_note "Create a snapshot and mount it..."
log_must zfs snapshot $SNAPCTR
log_note "Remove all of the original files..."
-log_must rm -f $TESTDIR1/file* > /dev/null 2>&1
+log_must rm -f $TESTDIR1/file*
log_note "Create tarball of snapshot..."
CWD=$PWD
-cd $SNAPDIR1 || log_fail "Could not cd $SNAPDIR1"
+log_must cd $SNAPDIR1
log_must tar cf $SNAPSHOT_TARDIR/snapshot.tar .
-cd $CWD || log_fail "Could not cd $CWD"
+log_must cd $CWD
-log_must mkdir $TESTDIR1/original
-log_must mkdir $TESTDIR1/snapshot
+log_must mkdir $TESTDIR1/original mkdir $TESTDIR1/snapshot
CWD=$PWD
-cd $TESTDIR1/original || log_fail "Could not cd $TESTDIR1/original"
+log_must cd $TESTDIR1/original
log_must tar xf $SNAPSHOT_TARDIR/original.tar
-cd $TESTDIR1/snapshot || log_fail "Could not cd $TESTDIR1/snapshot"
+log_must cd $TESTDIR1/snapshot
log_must tar xf $SNAPSHOT_TARDIR/snapshot.tar
-cd $CWD || log_fail "Could not cd $CWD"
+log_must cd $CWD
log_must directory_diff $TESTDIR1/original $TESTDIR1/snapshot
log_pass "Directory structures match."
{
typeset -i i=1
while [ $i -lt $COUNT ]; do
- snapexists $SNAPCTR.$i
- if [[ $? -eq 0 ]]; then
- log_must zfs destroy $SNAPCTR.$i
- fi
+ snapexists $SNAPCTR.$i && log_must zfs destroy $SNAPCTR.$i
- if [[ -e $SNAPDIR.$i ]]; then
- log_must rm -rf $SNAPDIR1.$i > /dev/null 2>&1
+ if [ -e $SNAPDIR.$i ]; then
+ log_must rm -rf $SNAPDIR1.$i
fi
(( i = i + 1 ))
done
- if [[ -e $SNAPDIR1 ]]; then
- log_must rm -rf $SNAPDIR1 > /dev/null 2>&1
+ if [ -e $SNAPDIR1 ]; then
+ log_must rm -rf $SNAPDIR1
fi
- if [[ -e $TESTDIR ]]; then
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ if [ -e $TESTDIR ]; then
+ log_must rm -rf $TESTDIR/*
fi
}
log_onexit cleanup
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
done
log_note "Remove all of the original files"
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR1/file* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -f $TESTDIR1/file*
i=1
while [[ $i -lt $COUNT ]]; do
- FILECOUNT=`ls $SNAPDIR1.$i/file* | wc -l`
+ FILECOUNT=`echo $SNAPDIR1.$i/file* | wc -w`
typeset j=1
while [ $j -lt $FILECOUNT ]; do
log_must file_check $SNAPDIR1.$i/file$j $j
(( i = i + 1 ))
done
- [[ -e $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify that destroying snapshots returns space to the pool."
log_onexit cleanup
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
typeset -i i=1
while [[ $i -lt $COUNT ]]; do
- log_must rm -rf $TESTDIR/file$i > /dev/null 2>&1
+ log_must rm -f $TESTDIR/file$i
log_must zfs destroy $SNAPFS.$i
(( i = i + 1 ))
{
snapexists $SNAPPOOL && destroy_dataset $SNAPPOOL -r
- [[ -e $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify that rollback to a snapshot created by snapshot -r succeeds."
log_onexit cleanup
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
datasetexists $ctrfs && destroy_dataset $ctrfs -r
snapexists $snappool && destroy_dataset $snappool -r
- [[ -e $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+ [ -e $TESTDIR ] && log_must rm -rf $TESTDIR/*
}
log_assert "Verify snapshots from 'snapshot -r' can be used for zfs send/recv"
fsdir=/$ctrfs
snapdir=$fsdir/.zfs/snapshot/$TESTSNAP
-[[ -n $TESTDIR ]] && \
- log_must rm -rf $TESTDIR/* > /dev/null 2>&1
+[ -n $TESTDIR ] && log_must rm -rf $TESTDIR/*
typeset -i COUNT=10
function cleanup
{
- [[ -e $TESTDIR1 ]] && \
- log_must rm -rf $TESTDIR1/* > /dev/null 2>&1
+ [ -e $TESTDIR1 ] && log_must rm -rf $TESTDIR1/*
snapexists $SNAPCTR && destroy_dataset $SNAPCTR
log_must zpool create -f $TESTPOOL $DISK
conf="$TESTDIR/vz001"
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
assert_top_zap $TESTPOOL $DISK "$conf"
assert_leaf_zap $TESTPOOL $DISK "$conf"
log_must zpool create -f $TESTPOOL $DISKS
conf="$TESTDIR/vz002"
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
for DISK in $DISKS; do
log_must zpool create -f $TESTPOOL mirror $DISKS
conf="$TESTDIR/vz003"
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
assert_top_zap $TESTPOOL "type: 'mirror'" "$conf"
# Make the pool.
conf="$TESTDIR/vz004"
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap $DISK $conf)
orig_leaf=$(get_leaf_vd_zap $DISK $conf)
disk2=$(echo $DISKS | awk '{print $2}')
log_must zpool attach $TESTPOOL $DISK $disk2
log_must zpool wait -t resilver $TESTPOOL
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
# Ensure top-level ZAP was transferred successfully.
new_top=$(get_top_vd_zap "type: 'mirror'" $conf)
#
log_must zpool detach $TESTPOOL $DISK
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
final_top=$(get_top_vd_zap $disk2 $conf)
final_leaf=$(get_leaf_vd_zap $disk2 $conf)
# Make the pool.
conf="$TESTDIR/vz005"
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap $DISK $conf)
orig_leaf=$(get_leaf_vd_zap $DISK $conf)
log_must zpool import $TESTPOOL
# Verify that ZAPs persisted.
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
new_top=$(get_top_vd_zap $DISK $conf)
new_leaf=$(get_leaf_vd_zap $DISK $conf)
log_must zpool add -f $TESTPOOL ${DISK_ARR[1]}
conf="$TESTDIR/vz006"
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap ${DISK_ARR[1]} $conf)
log_assert "Per-vdev ZAPs persist correctly on the original pool after split."
conf="$TESTDIR/vz007"
-log_must zdb -PC $TESTPOOL > $conf
+log_must eval "zdb -PC $TESTPOOL > $conf"
assert_has_sentinel "$conf"
orig_top=$(get_top_vd_zap "type: 'mirror'" $conf)
function check_for
{
- grep "^${1}," $tmpfile >/dev/null 2>/dev/null
- if [ $? -ne 0 ]; then
- log_fail "cannot find stats for $1"
- fi
+ log_must grep -q "^${1}," $tmpfile
}
# by default, all stats and histograms for all pools
-log_must zpool_influxdb > $tmpfile
+log_must eval "zpool_influxdb > $tmpfile"
STATS="
zpool_io_size
done
# scan stats aren't expected to be there until after a scan has started
-zpool scrub $TESTPOOL
-zpool_influxdb > $tmpfile
+log_must zpool scrub $TESTPOOL
+log_must eval "zpool_influxdb > $tmpfile"
check_for zpool_scan_stats
log_pass "zpool_influxdb gathers statistics"
for swapdev in $SAVESWAPDEVS
do
if ! is_swap_inuse $swapdev ; then
- log_must swap_setup $swapdev >/dev/null 2>&1
+ swap_setup $swapdev
fi
done
voldev=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
if is_swap_inuse $voldev ; then
- log_must swap_cleanup $voldev
+ swap_cleanup $voldev
fi
default_zvol_cleanup