else
AWK='awk'
fi
- log_must zpool sync -f
+ sync_all_pools true
zdb -dddddd $ds $objnum | $AWK -v pad=$((4<<20)) -v bs=512 '
/^$/ { looking = 0 }
looking {
{
typeset cksum
sync
+ sync_all_pools
cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
| awk -F= '{print $7}')
echo $cksum
return 0
}
+#
+# Sync all pools
+#
+# $1 boolean to force uberblock (and config including zpool cache file) update
+#
+function sync_all_pools #<force>
+{
+ typeset force=${1:-false}
+
+ if [[ $force == true ]]; then
+ log_must zpool sync -f
+ else
+ log_must zpool sync
+ fi
+
+ return 0
+}
+
#
# Wait for zpool 'freeing' property drops to zero.
#
bs=1M count=$blocks
((blocks = blocks + 25))
done
- log_must sync_pool $TESTPOOL
+ sync_pool $TESTPOOL
log_must zpool list -v $TESTPOOL
# Verify the files were written in the special class vdevs
log_must zpool remove $TESTPOOL $CLASS_DISK0
sleep 5
- log_must sync_pool $TESTPOOL
+ sync_pool $TESTPOOL
sleep 1
log_must zdb -bbcc $TESTPOOL
log_must zpool remove $TESTPOOL $CLASS_DISK0
sleep 5
-log_must sync_pool $TESTPOOL
+sync_pool $TESTPOOL
sleep 1
log_must zdb -bbcc $TESTPOOL
log_onexit cleanup
log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 20 -d R
-log_must zpool sync
+sync_all_pools
log_must eval "kstat dbufs > $DBUFS_FILE"
log_must eval "kstat dbufstats '' > $DBUFSTATS_FILE"
log_onexit cleanup
log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 1 -d R
-log_must zpool sync
+sync_all_pools
objid=$(get_objnum "$TESTDIR/file")
log_note "Object ID for $TESTDIR/file is $objid"
EOF
log_must mkdir $dir
-sync
+sync_all_pools
log_must_program $TESTPOOL - <<-EOF
ans, setpoint = zfs.get_prop("$fs", "written@$TESTSNAP")
# to the device. This 'sync' command prevents that from
# happening.
####################
- log_must zpool sync ${pool}
+ sync_pool ${pool}
}
function histo_check_test_pool
{
verify_disk_count "$DISKS" 2
default_mirror_setup_noexit $DISKS
-log_must zpool sync
+sync_all_pools
set -A bad_flags a b c e g h i j k l n o p q r s t u v w x y \
B C D E F G H I J K L M N O P Q R S T U V W X Y Z \
mkdir $TESTDIR/dir$x
done
-log_must zpool sync
+sync_all_pools
# Get list of all objects, but filter out user/group objects which don't
# appear when using object or object range arguments
default_mirror_setup_noexit $DISKS
file_write -o create -w -f $init_data -b $blksize -c $write_count
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)
log_must cmp $init_data $tmpfile
default_mirror_setup_noexit $DISKS
file_write -o create -w -f $init_data -b $blksize -c $write_count
log_must echo "zfs" >> $init_data
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)
log_must cmp $init_data $tmpfile
#
# Sync up the filesystem
#
-sync
+sync_all_pools
#
# Verify 'zfs list' can correctly list the space charged
# sync between each write to make sure a new entry is created
for i in {0..4}; do
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/testfile$i
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
done
check_ll_len "5 entries" "Unexpected livelist size"
# sync between each write to allow for a condense of the previous entry
for i in {0..4}; do
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/testfile$i
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
done
check_ll_len "6 entries" "Condense did not occur"
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE1
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
# snapshot and clone share 'atestfile', 33 percent
check_livelist_gone
log_must zfs destroy -R $TESTPOOL/$TESTCLONE
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE1
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE2
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
# snapshot and clone share 'atestfile', 25 percent
check_livelist_exists $TESTCLONE
log_must rm /$TESTPOOL/$TESTCLONE/atestfile
set_tunable32 "$1" 0
log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
for i in {1..5}; do
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
done
log_must zfs destroy $TESTPOOL/$TESTCLONE
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
[[ "1" == "$(get_tunable "$1")" ]] || \
log_fail "delete/condense race test failed"
}
set_tunable32 "$1" 0
log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
for i in {1..5}; do
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
done
log_must zpool export $TESTPOOL
set_tunable32 "$1" 0
log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
for i in {1..5}; do
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
done
# overwrite the file shared with the origin to trigger disable
log_must mkfile 100m /$TESTPOOL/$TESTCLONE/atestfile
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
[[ "1" == "$(get_tunable "$1")" ]] || \
log_fail "disable/condense race test failed"
log_must zfs destroy $TESTPOOL/$TESTCLONE
log_must zfs create $TESTPOOL/$TESTFS1
log_must mkfile 100m /$TESTPOOL/$TESTFS1/atestfile
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
log_must zfs snapshot $TESTPOOL/$TESTFS1@snap
# Reduce livelist size to trigger condense more easily
# Note: We sync before and after so all dedup blocks belong to the
# same TXG, otherwise they won't look identical to the livelist
# iterator due to their logical birth TXG being different.
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-0
log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-1
log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-2
log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-3
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
check_livelist_exists $TESTCLONE
# Introduce "double frees"
# was what triggered past panics.
# Note: Similarly to the previouys step we sync before and after our
# our deletions so all the entries end up in the same TXG.
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
log_must rm /$TESTPOOL/$TESTCLONE/data-dup-2
log_must rm /$TESTPOOL/$TESTCLONE/data-dup-3
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
check_livelist_exists $TESTCLONE
log_must zfs destroy $TESTPOOL/$TESTCLONE
function clone_write_file
{
log_must mkfile 1m /$TESTPOOL/$1/$2
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
}
function test_one_empty
function check_livelist_gone
{
log_must zpool wait -t free $TESTPOOL
- zpool sync
+ sync_all_pools
zdb -vvvvv $TESTPOOL | grep "Livelist" && \
log_fail "zdb found Livelist after the clone is deleted."
}
# Create initial files and pause condense zthr on next execution
log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B
-log_must zpool sync $TESTPOOL2
+sync_pool $TESTPOOL2
set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 1
# Add a new dev and remove the old one
set_tunable32 LIVELIST_CONDENSE_NEW_ALLOC 0
# Trigger a condense
log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
-log_must zpool sync $TESTPOOL2
+sync_pool $TESTPOOL2
log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
-log_must zpool sync $TESTPOOL2
+sync_pool $TESTPOOL2
# Write remapped blkptrs which will modify the livelist mid-condense
log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B
# Resume condense thr
set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 0
-log_must zpool sync $TESTPOOL2
+sync_pool $TESTPOOL2
# Check that we've added new ALLOC blkptrs during the condense
[[ "0" < "$(get_tunable LIVELIST_CONDENSE_NEW_ALLOC)" ]] || \
log_fail "removal/condense test failed"
log_must touch $CRYPT_MNTPFS/file.dat
log_must mount $RO $TESTFS/crypt $CRYPT_MNTPFS
log_must umount -f $CRYPT_MNTPFS
-zpool sync $TESTPOOL
+sync_pool $TESTPOOL
# 6. Re-import the pool readonly
log_must zpool export $TESTPOOL
log_must eval "zfs send -i $init_snap $inc_snap > $inc_bkup"
log_must touch /$TESTDIR/bar
-sync
+sync_all_pools
set -A badargs \
"" "nonexistent-snap" "blah@blah" "-d" "-d nonexistent-dataset" \
# 2. Create two equal-sized large files.
log_must mkfile 5M $origdir/file1
log_must mkfile 5M $origdir/file2
-log_must sync
+sync_all_pools
# 3. Snapshot the filesystem.
log_must zfs snapshot $orig@1
# 4. Remove one of the two large files.
log_must rm $origdir/file2
-log_must sync
+sync_all_pools
# 5. Create a refquota larger than one file, but smaller than both.
log_must zfs set refquota=8M $orig
if datasetnonexists $snap; then
log_must cp /etc/passwd $fname
if is_linux || is_freebsd; then
+ sync_all_pools
log_must sync
else
#
log_must zfs snapshot $POOL/fs@a
while true; do
log_must find $mntpnt/ -type f -delete
- sync
+ sync_all_pools
log_must mkfiles "$mntpnt/" 4000
- sync
+ sync_all_pools
# check if we started reusing objects
object=$(ls -i $mntpnt | sort -n | awk -v object=$object \
'{if ($1 <= object) {exit 1}} END {print $1}')
log_must dd if=/dev/urandom of=/$TESTDIR/testfile bs=10M count=1
-log_must zpool sync
+sync_all_pools
log_must zpool offline -f $TESTPOOL $DISK3
log_must wait_for_degraded $TESTPOOL
esac
dd if=/dev/zero of=$fbase.$i seek=512 bs=1024 count=$wcount conv=notrunc \
> /dev/null 2>&1
- log_must sync
+ sync_all_pools
+ log_must sync #ensure the vdev files are written out
log_must zpool scrub -w $TESTPOOL1
check_err $TESTPOOL1 && \
log_must zfs set compression=off recordsize=16k $POOL
# create a file full of zeros
log_must mkfile -v $FILESIZE $FILEPATH
-log_must zpool sync $POOL
+sync_pool $POOL
# run once and observe the checksum errors
damage_and_repair 1
if [ "$RW" == "write" ] ; then
log_must mkfile $FILESIZE $FILEPATH
- log_must zpool sync $POOL
+ sync_pool $POOL
fi
log_must zinject -c all
if [ "$RW" == "write" ] ; then
log_must mkfile $FILESIZE $MOUNTDIR/file
- log_must zpool sync $POOL
+ sync_pool $POOL
else
log_must zpool scrub $POOL
wait_scrubbed $POOL
log_must zpool create -f $TESTPOOL $DISK1
log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=1M count=30
-log_must sync
+sync_all_pools
log_must zpool initialize $TESTPOOL
log_fail "Initializing did not start"
log_must dd if=/dev/urandom of=/$TESTPOOL/file2 bs=1M count=30
-log_must sync
+sync_all_pools
log_must zdb -cc $TESTPOOL
log_must zpool create -f $TESTPOOL $DEVICE1 $DEVICE2 \
log $DEVICE3 cache $DEVICE4 spare $DEVICE5
-log_must zpool sync
+sync_all_pools
# Remove each type of vdev and verify the label can be cleared.
for dev in $DEVICE5 $DEVICE4 $DEVICE3 $DEVICE2; do
log_must zpool remove $TESTPOOL $dev
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL true
log_must zpool labelclear $dev
log_mustnot zdb -lq $dev
done
i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must sync_pool $TESTPOOL
+ sync_pool $TESTPOOL
log_must zpool offline $TESTPOOL $disk
check_state $TESTPOOL $disk "offline"
if [[ $? != 0 ]]; then
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# Write some data to the pool
log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
+sync_pool $TESTPOOL
# 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host
# 5. Reopen a pool and verify if removed disk is marked online again.
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# Write some data to the pool
log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
+sync_pool $TESTPOOL
# 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host
# 5. Reopen a pool and verify if removed disk is marked online again.
# 3. Write a test file to the pool and calculate its checksum.
TESTFILE=/$TESTPOOL/data
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+sync_pool $TESTPOOL
TESTFILE_MD5=$(md5digest $TESTFILE)
# 4. Execute scrub.
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# 3. Write test file to pool.
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+sync_pool $TESTPOOL
# 4. Execute scrub.
# add delay to I/O requests for remaining disk in pool
log_must zinject -d $DISK2 -D125:1 $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# 3. Write test file to pool.
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+sync_pool $TESTPOOL
# 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host
# 1. Write some data and detach the first drive so it has resilver work to do
log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile1
-log_must sync
+sync_all_pools
log_must zpool detach $TESTPOOL $DISK2
# 2. Repeat the process with a second disk
log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile2
-log_must sync
+sync_all_pools
log_must zpool detach $TESTPOOL $DISK3
# 3. Reattach the drives, causing the second drive's resilver to be deferred
# Create 1G of additional data
mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
log_must file_write -b 1048576 -c 1024 -o create -d 0 -f $mntpnt/biggerfile
-log_must sync
+sync_all_pools
log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool scrub $TESTPOOL
log_must zfs create -o copies=3 $TESTPOOL/$TESTFS2
typeset mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS2)
log_must mkfile 10m $mntpnt/file
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
log_must zinject -a -t data -C 0,1 -e io $mntpnt/file
# Create 2G of additional data
mntpnt=$(get_prop mountpoint $TESTPOOL)
log_must file_write -b 2097152 -c 1024 -o create -d 0 -f $mntpnt/biggerfile
- log_must sync
+ sync_all_pools
# temporarily prevent resilvering progress, so it will not finish too early
log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must truncate -s $LARGESIZE "$LARGEFILE"
log_must zpool create $TESTPOOL "$LARGEFILE"
log_must mkfile $(( floor(LARGESIZE * 0.80) )) /$TESTPOOL/file
-log_must zpool sync
+sync_all_pools
new_size=$(du -B1 "$LARGEFILE" | cut -f1)
log_must test $new_size -le $LARGESIZE
log_must zpool trim $TESTPOOL
log_must set_tunable64 TRIM_METASLAB_SKIP 0
-log_must zpool sync
+sync_all_pools
while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do
sleep 0.5
done
# space usage of the new metaslabs.
log_must zpool trim $TESTPOOL
-log_must zpool sync
+sync_all_pools
while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do
sleep 0.5
done
log_must zpool create $TESTPOOL "$LARGEFILE"
log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=1048576 count=64
-log_must zpool sync
+sync_all_pools
log_must zpool trim $TESTPOOL
[[ -z "$(trim_progress $TESTPOOL $DISK1)" ]] && \
log_fail "Trimming did not start"
log_must dd if=/dev/urandom of=/$TESTPOOL/file2 bs=1048576 count=64
-log_must zpool sync
+sync_all_pools
log_must zpool export $TESTPOOL
log_must zdb -e -p "$TESTDIR" -cc $TESTPOOL
log_must zpool detach $TESTPOOL $DISK2
get_disklist $TESTPOOL | grep $DISK3 >/dev/null && \
log_must zpool detach $TESTPOOL $DISK3
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
}
typeset pid
for i in {1..50}; do
log_must dd if=/dev/urandom of="/$CLONE/testfile$i" bs=1k count=512
# Force each new file to be tracked by a new livelist
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
done
log_must zfs destroy "$CLONE"
test_wait
sleep 10
log_must zinject -c all
-log_must zpool sync
+sync_all_pools
# Log txg sync times for reference and the zpool event summary.
if is_freebsd; then
log_must eval "dd if=/$mntpnt/file1 of=/dev/null bs=1048576 &"
sleep 10
log_must zinject -c all
-log_must zpool sync
+sync_all_pools
wait
# 5. Verify a "deadman" event is posted. The first appears after 5
typeset stamp=${perm}.${user}.$RANDOM
user_run $user zfs set "$user:ts=$stamp" $dtst
- zpool sync ${dtst%%/*}
+ sync_pool ${dtst%%/*}
if [[ $stamp != $(get_prop "$user:ts" $dtst) ]]; then
return 1
fi
log_must eval "$fullcmd"
# Collect the new events and verify there are some.
- log_must zpool sync -f
+ sync_all_pools true
log_must eval "zpool events >$TMP_EVENTS 2>/dev/null"
log_must eval "zpool events -v > $TMP_EVENTS_FULL 2>/dev/null"
# 2. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
# 3. Verify the device is unavailable.
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
# 2. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
# 3. Verify the device is handled by the spare.
log_must wait_hotspare_state $TESTPOOL $sparedev "INUSE"
# 3. Simulate physical removal of one device
remove_disk $removedev
log_must mkfile 1m $mntpnt/file
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
# 4. Verify the device is unavailable
log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
log_must zfs create -o compression=on $TESTPOOL/fs
mntpt=$(get_prop mountpoint $TESTPOOL/fs)
write_compressible $mntpt 32m 1 1024k "testfile"
-log_must sync
+sync_all_pools
log_must zfs umount $TESTPOOL/fs
log_must zfs mount $TESTPOOL/fs
log_must zinject -a -t data -e decompress -f 20 $mntpt/testfile.0
# Create 20ms IOs
log_must zinject -d $DISK -D20:100 $TESTPOOL
log_must mkfile 1048576 /$TESTPOOL/testfile
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
log_must zinject -c all
SLOW_IOS=$(zpool status -sp | grep "$DISK" | awk '{print $6}')
#
log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS 100
-log_must sync
+sync_all_pools
log_must zfs destroy $TEST_FS
#
log_must zfs create $LOGSM_POOL/fs
log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
-log_must sync
+sync_all_pools
log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
-log_must sync
+sync_all_pools
log_must set_tunable64 KEEP_LOG_SPACEMAPS_AT_EXPORT 1
log_must zpool export $LOGSM_POOL
for interval in $(seq $MMP_INTERVAL_MIN 200 $MMP_INTERVAL_DEFAULT); do
log_must set_tunable64 MULTIHOST_FAIL_INTERVALS $fails
log_must set_tunable64 MULTIHOST_INTERVAL $interval
- log_must sync_pool $TESTPOOL
+ sync_pool $TESTPOOL
typeset mmp_fail=$(zdb $TESTPOOL 2>/dev/null |
awk '/mmp_fail/ {print $NF}')
if [ $fails -ne $mmp_fail ]; then
(( $ret != $ENOSPC )) && \
log_fail "file.$i returned: $ret rather than ENOSPC."
- log_must zpool sync -f
+ sync_all_pools true
done
log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs
done
log_must kill $killpid
-sync
+sync_all_pools
+log_must sync
typeset dir=$(get_device_dir $DISKS)
verify_filesys "$TESTPOOL" "$TESTPOOL/$TESTFS" "$dir"
done
log_must kill $killpid
-sync
+sync_all_pools
+log_must sync
typeset dir=$(get_device_dir $DISKS)
verify_filesys "$TESTPOOL" "$TESTPOOL/$TESTFS" "$dir"
done
log_must kill $killpid
-sync
+sync_all_pools
log_pass
for i in {1..20}; do
log_must zfs snapshot "$FS@testsnapshot$i"
done
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
#
# Read the debug message file in small chunks to make sure that the read is
for i in {1..20}; do
log_must zfs snapshot "$FS@testsnapshot$i"
done
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
msgs1=$(mktemp) || log_fail
msgs2=$(mktemp) || log_fail
function sync_n
{
for i in {1..$1}; do
- log_must zpool sync $TESTPOOL
+ sync_pool $TESTPOOL
done
return 0
}
log_must user_run $PUSER mkdir $PRJDIR
log_must chattr +P -p $PRJID1 $PRJDIR
log_must user_run $PUSER mkfile 100m $PRJDIR/qf
-sync
+sync_all_pools
log_note "set projectquota at a smaller size than it current usage"
log_must zfs set projectquota@$PRJID1=90m $QFS
[[ -d $PRJDIR1 ]] && log_must rm -rf $PRJDIR1
[[ -d $PRJDIR2 ]] && log_must rm -rf $PRJDIR2
[[ -d $PRJDIR3 ]] && log_must rm -rf $PRJDIR3
- sync
+ sync_all_pools
return 0
}
log_must user_run $PUSER mkdir $PRJDIR
log_must chattr +P -p $PRJID1 $PRJDIR
log_must user_run $PUSER mkfile 50m $PRJDIR/qf
-sync
+sync_all_pools
log_must zfs snapshot $snap_fs
log_must user_run $PUSER mkdir $PRJDIR
log_must chattr +P -p $PRJID1 $PRJDIR
log_must user_run $PUSER mkfile 50m $PRJDIR/qf
-sync
+sync_all_pools
typeset snapfs=$QFS@snap
log_must zfs snapshot $FS@snap20M
log_must rm $mntpnt/$TESTFILE
-log_must sync
+sync_all_pools
log_must zfs set refquota=10M $FS
log_mustnot zfs rollback $FS@snap20M
# We want to make sure that the removal started
# before issuing the callback.
#
- sync
+ sync_pool $pool
log_must is_pool_removing $pool
log_must $callback "$@"
# Run sync once to ensure that the config actually changed.
#
log_must zpool add $TESTPOOL $DISK2
-log_must sync
+sync_all_pools
#
# Ensure that zdb does not find any problems with this.
mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=32
-log_must zpool sync $TESTPOOL1
+sync_pool $TESTPOOL1
log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=32
-log_must zpool sync $TESTPOOL1
+sync_pool $TESTPOOL1
log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
# offline then online a vdev to introduce a new DTL range after current
# scan, which should restart (or defer) the resilver
log_must zpool offline $TESTPOOL1 ${VDEV_FILES[2]}
- log_must zpool sync $TESTPOOL1
+ sync_pool $TESTPOOL1
log_must zpool online $TESTPOOL1 ${VDEV_FILES[2]}
- log_must zpool sync $TESTPOOL1
+ sync_pool $TESTPOOL1
# there should now be 2 resilver starts w/o defer, 1 with defer
verify_restarts ' after offline/online' "${RESTARTS[1]}" "${VDEVS[1]}"
log_must is_pool_resilvered $TESTPOOL1
# wait for a few txg's to see if a resilver happens
- log_must zpool sync $TESTPOOL1
- log_must zpool sync $TESTPOOL1
+ sync_pool $TESTPOOL1
+ sync_pool $TESTPOOL1
# there should now be 2 resilver starts
verify_restarts ' after resilver' "${RESTARTS[3]}" "${VDEVS[3]}"
log_note "waiting for read errors to start showing up"
for iter in {0..59}
do
- zpool sync $TESTPOOL1
+ sync_pool $TESTPOOL1
err=$(zpool status $TESTPOOL1 | grep ${VDEV_FILES[0]} | awk '{print $3}')
(( $err > 0 )) && break
sleep 1
(( $finish == 0 )) && log_fail "resilver took too long to finish"
# wait a few syncs to ensure that zfs does not restart the resilver
-log_must zpool sync $TESTPOOL1
-log_must zpool sync $TESTPOOL1
+sync_pool $TESTPOOL1
+sync_pool $TESTPOOL1
# check if resilver was restarted
start=$(zpool events | grep "sysevent.fs.zfs.resilver_start" | wc -l)
mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=64
-log_must zpool sync $TESTPOOL1
+sync_pool $TESTPOOL1
# Request a healing or sequential resilver
for replace_mode in "healing" "sequential"; do
log_must cp $mntpnt/file $mntpnt/file$j
done
- log_must sync
+ sync_all_pools
log_must mount $remount_ro $zdev $mntpnt
log_must zfs snap $TESTPOOL/$TESTVOL@snap$i
log_must mount $remount_rw $zdev $mntpnt
for i in {1..1000}; do
log_must rm /$TESTPOOL/$TESTFS2/dir/file-$i
done
-sync
+sync_all_pools
log_must zfs snapshot $TESTPOOL/$TESTFS2@snap2
expected_cksum=$(recursive_cksum /$TESTPOOL/$TESTFS2)
log_must truncate -s 393216 /$TESTPOOL/$TESTFS2/truncated2
log_must rm -f /$TESTPOOL/$TESTFS2/truncated3
log_must rm -f /$TESTPOOL/$TESTFS2/truncated4
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
log_must zfs umount $TESTPOOL/$TESTFS2
log_must zfs mount $TESTPOOL/$TESTFS2
log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS2/truncated3 \
# Ensure the file has been synced out before attempting to
# corrupt its contents.
#
- sync
+ sync_all_pools
#
# Corrupt a pool device to make the pool DEGRADED
log_must mount $VOLUME $MNTPNT
FSTYPE=$NEWFS_DEFAULT_FS
fi
-log_must zpool sync
+sync_all_pools
#
# 2. Freeze TESTVOL
(( i = i + 1 ))
done
+sync_pool $TESTPOOL
#
# Now rollback to latest snapshot
log_mustnot zfs snapshot $SNAPPOOL
log_must touch /$TESTPOOL/$TESTFILE
+sync_pool $TESTPOOL
log_must zfs rollback $SNAPPOOL
log_must zfs create $TESTPOOL/$TESTFILE
filesize=$((4096 + ((RANDOM * 691) % 131072) ))
log_must rm -rf $dir
log_must fill_fs $dir 10 10 $filesize 1 R
- zpool sync
+ sync_all_pools
done
log_must du -hs /$TESTPOOL
filesize=$((4096 + ((RANDOM * 691) % 131072) ))
log_must rm -rf $dir
log_must fill_fs $dir 10 10 $filesize 1 R
- zpool sync
+ sync_all_pools
if [[ $((n % 4)) -eq 0 ]]; then
log_must timeout 120 zpool trim -w $TESTPOOL
return
fi
- zpool sync -f
+ sync_all_pools true
((i = i + 1))
done
filesize=$((4096 + ((RANDOM * 691) % 131072) ))
log_must rm -rf $dir
log_must fill_fs $dir 10 10 $filesize 1 R
- zpool sync
+ sync_all_pools
done
log_must du -hs /$TESTPOOL
log_onexit cleanup
log_must cp $srcfile $TESTDIR/$TESTFILE
log_must cp /dev/null $TESTDIR/$TESTFILE
-log_must sync
+sync_all_pools
if [[ -s $TESTDIR/$TESTFILE ]]; then
log_note "$(ls -l $TESTDIR/$TESTFILE)"
log_fail "testfile not truncated"
mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 50m $QFILE
-sync
+sync_all_pools
log_must zfs snapshot $snap_fs
mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 100m $QFILE
-sync
+sync_all_pools
typeset snapfs=$QFS@snap
log_must user_run $QUSER1 mkfiles ${QFILE}_1 $user1_cnt
log_must user_run $QUSER2 mkfiles ${QFILE}_2 $user2_cnt
((grp_cnt = user1_cnt + user2_cnt))
-sync_pool
+sync_all_pools
typeset snapfs=$QFS@snap
log_note "write some data to the $QFS"
mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 100m $QFILE
-sync
+sync_all_pools
log_note "set user|group quota at a smaller size than it current usage"
log_must zfs set userquota@$QUSER1=90m $QFS
[[ -f $QFILE ]] && log_must rm -f $QFILE
[[ -f $OFILE ]] && log_must rm -f $OFILE
- sync
+ sync_all_pools
return 0
}
log_must zfs set userquota@$QUSER1=100m $QFS
mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 50m $QFILE
-sync
+sync_all_pools
log_must zfs snapshot $snap_fs
mkmount_writable $QFS
log_must user_run $QUSER1 mkfile 50m $QFILE
-sync
+sync_all_pools
typeset snapfs=$QFS@snap
log_must user_run $QUSER1 mkfiles ${QFILE}_1 $user1_cnt
log_must user_run $QUSER2 mkfiles ${QFILE}_2 $user2_cnt
-sync_pool
+sync_all_pools
typeset snapfs=$QFS@snap
orig_leaf=$(get_leaf_vd_zap $DISK $conf)
assert_zap_common $TESTPOOL $DISK "top" $orig_top
assert_zap_common $TESTPOOL $DISK "leaf" $orig_leaf
-log_must zpool sync
+sync_all_pools
# Export the pool.
log_must zpool export $TESTPOOL
done
if is_linux || is_freebsd ; then
- log_must sync
+ sync_all_pools
else
log_must lockfs -f $TESTDIR
fi