]> git.proxmox.com Git - mirror_zfs.git/commitdiff
ZTS: normalize on use of `sync_pool` and `sync_all_pools`
authorAllan Jude <allan@klarasystems.com>
Thu, 6 Jan 2022 18:57:09 +0000 (13:57 -0500)
committerGitHub <noreply@github.com>
Thu, 6 Jan 2022 18:57:09 +0000 (10:57 -0800)
- Replaces use of manual `zpool sync`
- Don't use `log_must sync_pool` as `sync_pool` uses it internally
- Replace many (but not all) uses of `sync` with `sync_pool`

This makes the tests more consistent, and makes searching easier.

Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Allan Jude <allan@klarasystems.com>
Closes #12894

95 files changed:
tests/zfs-tests/include/blkdev.shlib
tests/zfs-tests/include/libtest.shlib
tests/zfs-tests/tests/functional/alloc_class/alloc_class_012_pos.ksh
tests/zfs-tests/tests/functional/alloc_class/alloc_class_013_pos.ksh
tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh
tests/zfs-tests/tests/functional/arc/dbufstats_002_pos.ksh
tests/zfs-tests/tests/functional/channel_program/synctask_core/tst.get_written.ksh
tests/zfs-tests/tests/functional/cli_root/zdb/zdb_block_size_histogram.ksh
tests/zfs-tests/tests/functional/cli_root/zdb/zdb_object_range_neg.ksh
tests/zfs-tests/tests/functional/cli_root/zdb/zdb_object_range_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zdb/zdb_recover.ksh
tests/zfs-tests/tests/functional/cli_root/zdb/zdb_recover_2.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_dedup.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_remount.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_011_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib
tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_007_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool/zpool_colors.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_clear_retained.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_duplicates.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_verify_checksums.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/zpool_labelclear_removed.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_online/zpool_online_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_multiple_copies.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_split/zpool_split_resilver.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_partial.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_verify_checksums.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_freeing.ksh
tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh
tests/zfs-tests/tests/functional/deadman/deadman_zio.ksh
tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib
tests/zfs-tests/tests/functional/events/events_common.kshlib
tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh
tests/zfs-tests/tests/functional/fault/decompress_fault.ksh
tests/zfs-tests/tests/functional/fault/zpool_status_-s.ksh
tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh
tests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh
tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh
tests/zfs-tests/tests/functional/no_space/enospc_002_pos.ksh
tests/zfs-tests/tests/functional/online_offline/online_offline_001_pos.ksh
tests/zfs-tests/tests/functional/online_offline/online_offline_002_neg.ksh
tests/zfs-tests/tests/functional/online_offline/online_offline_003_neg.ksh
tests/zfs-tests/tests/functional/procfs/procfs_list_basic.ksh
tests/zfs-tests/tests/functional/procfs/procfs_list_concurrent_readers.ksh
tests/zfs-tests/tests/functional/procfs/procfs_list_stale_read.ksh
tests/zfs-tests/tests/functional/projectquota/projectquota_006_pos.ksh
tests/zfs-tests/tests/functional/projectquota/projectquota_common.kshlib
tests/zfs-tests/tests/functional/projectquota/projectspace_001_pos.ksh
tests/zfs-tests/tests/functional/projectquota/projectspace_002_pos.ksh
tests/zfs-tests/tests/functional/refquota/refquota_007_neg.ksh
tests/zfs-tests/tests/functional/removal/removal.kshlib
tests/zfs-tests/tests/functional/removal/remove_expanded.ksh
tests/zfs-tests/tests/functional/replacement/rebuild_multiple.ksh
tests/zfs-tests/tests/functional/replacement/resilver_restart_001.ksh
tests/zfs-tests/tests/functional/replacement/resilver_restart_002.ksh
tests/zfs-tests/tests/functional/replacement/scrub_cancel.ksh
tests/zfs-tests/tests/functional/rsend/send-wR_encrypted_zvol.ksh
tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh
tests/zfs-tests/tests/functional/rsend/send_encrypted_truncated_files.ksh
tests/zfs-tests/tests/functional/slog/slog_014_pos.ksh
tests/zfs-tests/tests/functional/slog/slog_replay_volume.ksh
tests/zfs-tests/tests/functional/snapshot/rollback_001_pos.ksh
tests/zfs-tests/tests/functional/snapshot/rollback_003_pos.ksh
tests/zfs-tests/tests/functional/trim/autotrim_integrity.ksh
tests/zfs-tests/tests/functional/trim/autotrim_trim_integrity.ksh
tests/zfs-tests/tests/functional/trim/trim.kshlib
tests/zfs-tests/tests/functional/trim/trim_integrity.ksh
tests/zfs-tests/tests/functional/truncate/truncate_002_pos.ksh
tests/zfs-tests/tests/functional/userquota/groupspace_001_pos.ksh
tests/zfs-tests/tests/functional/userquota/groupspace_002_pos.ksh
tests/zfs-tests/tests/functional/userquota/groupspace_003_pos.ksh
tests/zfs-tests/tests/functional/userquota/userquota_007_pos.ksh
tests/zfs-tests/tests/functional/userquota/userquota_common.kshlib
tests/zfs-tests/tests/functional/userquota/userspace_001_pos.ksh
tests/zfs-tests/tests/functional/userquota/userspace_002_pos.ksh
tests/zfs-tests/tests/functional/userquota/userspace_003_pos.ksh
tests/zfs-tests/tests/functional/vdev_zaps/vdev_zaps_005_pos.ksh
tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_002_pos.ksh

index 18fc6b3524a08e5e7ecf510b3090aebd5e09a08b..7159b92c080f4dabad8d4d3c5f2be540cfbcd76a 100644 (file)
@@ -595,7 +595,7 @@ function list_file_blocks # input_file
        else
                AWK='awk'
        fi
-       log_must zpool sync -f
+       sync_all_pools true
        zdb -dddddd $ds $objnum | $AWK -v pad=$((4<<20)) -v bs=512 '
            /^$/ { looking = 0 }
            looking {
index 66aec104c27dd5ef48e7f739ad7e8041b932a5b1..24010d9f3732f0ca92e2dd2456f435caba797fac 100644 (file)
@@ -3104,6 +3104,7 @@ function datasetcksum
 {
        typeset cksum
        sync
+       sync_all_pools
        cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
                | awk -F= '{print $7}')
        echo $cksum
@@ -3519,6 +3520,24 @@ function sync_pool #pool <force>
        return 0
 }
 
+#
+# Sync all pools
+#
+# $1 boolean to force uberblock (and config including zpool cache file) update
+#
+function sync_all_pools #<force>
+{
+       typeset force=${1:-false}
+
+       if [[ $force == true ]]; then
+               log_must zpool sync -f
+       else
+               log_must zpool sync
+       fi
+
+       return 0
+}
+
 #
 # Wait for zpool 'freeing' property drops to zero.
 #
index b49a8919ed8c837e43fb894330b3b86c76492565..2bce471a7f306dae2bc9943bc394aed240a232a7 100755 (executable)
@@ -85,7 +85,7 @@ function check_removal
                    bs=1M count=$blocks
                ((blocks = blocks + 25))
        done
-       log_must sync_pool $TESTPOOL
+       sync_pool $TESTPOOL
        log_must zpool list -v $TESTPOOL
 
        # Verify the files were written in the special class vdevs
@@ -98,7 +98,7 @@ function check_removal
        log_must zpool remove $TESTPOOL $CLASS_DISK0
 
        sleep 5
-       log_must sync_pool $TESTPOOL
+       sync_pool $TESTPOOL
        sleep 1
 
        log_must zdb -bbcc $TESTPOOL
index 2ce22a6242914a69980808e0643c0403f95aac3f..7a0eb53436a1755dfa44e569c07e73ae6905951c 100755 (executable)
@@ -53,7 +53,7 @@ log_must zpool list -v $TESTPOOL
 log_must zpool remove $TESTPOOL $CLASS_DISK0
 
 sleep 5
-log_must sync_pool $TESTPOOL
+sync_pool $TESTPOOL
 sleep 1
 
 log_must zdb -bbcc $TESTPOOL
index 0577a6b80c0488fe398f26878cffc4f72d1d988a..712309eda72f00e24324f55b3c50d212a67e86b1 100755 (executable)
@@ -75,7 +75,7 @@ log_assert "dbufstats produces correct statistics"
 log_onexit cleanup
 
 log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 20 -d R
-log_must zpool sync
+sync_all_pools
 
 log_must eval "kstat dbufs > $DBUFS_FILE"
 log_must eval "kstat dbufstats '' > $DBUFSTATS_FILE"
index 58d401539ed178fdcdcab470405fc3ceee818e87..e253553f07c0504c6aea48b50969956e1ceb40a8 100755 (executable)
@@ -56,7 +56,7 @@ log_assert "dbufs move from mru to mfu list"
 log_onexit cleanup
 
 log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 1 -d R
-log_must zpool sync
+sync_all_pools
 
 objid=$(get_objnum "$TESTDIR/file")
 log_note "Object ID for $TESTDIR/file is $objid"
index 9755e6f82e584234977d37f808b12808185c0e39..74fdffdce4fd024e7c716b80d4b15dca149263e4 100755 (executable)
@@ -46,7 +46,7 @@ log_must_program $TESTPOOL - <<-EOF
 EOF
 
 log_must mkdir $dir
-sync
+sync_all_pools
 
 log_must_program $TESTPOOL - <<-EOF
        ans, setpoint = zfs.get_prop("$fs", "written@$TESTSNAP")
index 8d677affb9fedff828ce296eb029f4b947aa9efd..6ad93d87ca9ab23017d98af1a155522e4a0dd98a 100755 (executable)
@@ -128,7 +128,7 @@ function histo_populate_test_pool
        # to the device.  This 'sync' command prevents that from 
        # happening.
        ####################
-       log_must zpool sync ${pool}
+       sync_pool ${pool}
 }
 function histo_check_test_pool
 {
index 43018078804411b7679779c70c7fe9dcc1e8f075..e2014405853df9d0af7b6a8be0215ab31ee83684 100755 (executable)
@@ -40,7 +40,7 @@ verify_runnable "both"
 verify_disk_count "$DISKS" 2
 default_mirror_setup_noexit $DISKS
 
-log_must zpool sync
+sync_all_pools
 
 set -A bad_flags a b c   e   g h i j k l   n o p q r s t u v w x y   \
                    B C D E F G H I J K L M N O P Q R S T U V W X Y Z \
index b7f47d11ad2f79a3526ec39996c018fb41ef5d48..61c031a0ce5b41f437e3b148bb6275a6b96e34b3 100755 (executable)
@@ -76,7 +76,7 @@ for x in $(seq 0 7); do
        mkdir $TESTDIR/dir$x
 done
 
-log_must zpool sync
+sync_all_pools
 
 # Get list of all objects, but filter out user/group objects which don't
 # appear when using object or object range arguments
index d51edf3763d45c7b7e2cd3c7376374fb1ad337bf..b3985614130551fdbf4ee4fdd1d3e5726791502b 100755 (executable)
@@ -47,7 +47,7 @@ verify_disk_count "$DISKS" 2
 
 default_mirror_setup_noexit $DISKS
 file_write -o create -w -f $init_data -b $blksize -c $write_count
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 
 output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)
 log_must cmp $init_data $tmpfile
index 91f04c7956384047330bd71527815a268f9a467e..6470327a17650aa3427ba0261263228c81bff36f 100755 (executable)
@@ -49,7 +49,7 @@ verify_disk_count "$DISKS" 2
 default_mirror_setup_noexit $DISKS
 file_write -o create -w -f $init_data -b $blksize -c $write_count
 log_must echo "zfs" >> $init_data
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 
 output=$(zdb -r $TESTPOOL/$TESTFS file1 $tmpfile)
 log_must cmp $init_data $tmpfile
index b644fcae3cf82720ba536ea2f34e2cedfab5517a..5774fb873f33096cf0ee0b3310486dafdd30b776 100755 (executable)
@@ -67,7 +67,7 @@ done
 #
 # Sync up the filesystem
 #
-sync
+sync_all_pools
 
 #
 # Verify 'zfs list' can correctly list the space charged
index ab506debe9ebfe673e366ee987291477fc2763d7..2f328ceac4ae6a8d8f34e8820d49528c0ee8b548 100755 (executable)
@@ -66,7 +66,7 @@ function test_condense
        # sync between each write to make sure a new entry is created
        for i in {0..4}; do
            log_must mkfile 5m /$TESTPOOL/$TESTCLONE/testfile$i
-           log_must zpool sync $TESTPOOL
+           sync_pool $TESTPOOL
        done
 
        check_ll_len "5 entries" "Unexpected livelist size"
@@ -74,7 +74,7 @@ function test_condense
        # sync between each write to allow for a condense of the previous entry
        for i in {0..4}; do
            log_must mkfile 5m /$TESTPOOL/$TESTCLONE/testfile$i
-           log_must zpool sync $TESTPOOL
+           sync_pool $TESTPOOL
        done
 
        check_ll_len "6 entries" "Condense did not occur"
@@ -91,7 +91,7 @@ function test_deactivated
 
        log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0
        log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE1
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
        # snapshot and clone share 'atestfile', 33 percent
        check_livelist_gone
        log_must zfs destroy -R $TESTPOOL/$TESTCLONE
@@ -103,7 +103,7 @@ function test_deactivated
        log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0
        log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE1
        log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE2
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
        # snapshot and clone share 'atestfile', 25 percent
        check_livelist_exists $TESTCLONE
        log_must rm /$TESTPOOL/$TESTCLONE/atestfile
index 453b502416ed8dda7c29a28f8722fb7ed510a52b..d83280e32deae71dc0917dc8019a33551861334c 100755 (executable)
@@ -49,11 +49,11 @@ function delete_race
        set_tunable32 "$1" 0
        log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
        for i in {1..5}; do
-               log_must zpool sync $TESTPOOL
+               sync_pool $TESTPOOL
                log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
        done
        log_must zfs destroy $TESTPOOL/$TESTCLONE
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
        [[ "1" == "$(get_tunable "$1")" ]] || \
            log_fail "delete/condense race test failed"
 }
@@ -63,7 +63,7 @@ function export_race
        set_tunable32 "$1" 0
        log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
        for i in {1..5}; do
-               log_must zpool sync $TESTPOOL
+               sync_pool $TESTPOOL
                log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
        done
        log_must zpool export $TESTPOOL
@@ -78,12 +78,12 @@ function disable_race
        set_tunable32 "$1" 0
        log_must zfs clone $TESTPOOL/$TESTFS1@snap $TESTPOOL/$TESTCLONE
        for i in {1..5}; do
-               log_must zpool sync $TESTPOOL
+               sync_pool $TESTPOOL
                log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
        done
        # overwrite the file shared with the origin to trigger disable
        log_must mkfile 100m /$TESTPOOL/$TESTCLONE/atestfile
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
        [[ "1" == "$(get_tunable "$1")" ]] || \
            log_fail "disable/condense race test failed"
        log_must zfs destroy $TESTPOOL/$TESTCLONE
@@ -95,7 +95,7 @@ log_onexit cleanup
 
 log_must zfs create $TESTPOOL/$TESTFS1
 log_must mkfile 100m /$TESTPOOL/$TESTFS1/atestfile
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 log_must zfs snapshot $TESTPOOL/$TESTFS1@snap
 
 # Reduce livelist size to trigger condense more easily
index 5f356967a457ba62b0500e891a971f99d715755f..00583402db890495f010bb036834e0bba74f48eb 100755 (executable)
@@ -54,12 +54,12 @@ function test_dedup
        # Note: We sync before and after so all dedup blocks belong to the
        #       same TXG, otherwise they won't look identical to the livelist
        #       iterator due to their logical birth TXG being different.
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
        log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-0
        log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-1
        log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-2
        log_must cp /$TESTPOOL/$TESTCLONE/data /$TESTPOOL/$TESTCLONE/data-dup-3
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
        check_livelist_exists $TESTCLONE
 
        # Introduce "double frees"
@@ -67,10 +67,10 @@ function test_dedup
        #   was what triggered past panics.
        # Note: Similarly to the previouys step we sync before and after our
        #       our deletions so all the entries end up in the same TXG.
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
        log_must rm /$TESTPOOL/$TESTCLONE/data-dup-2
        log_must rm /$TESTPOOL/$TESTCLONE/data-dup-3
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
        check_livelist_exists $TESTCLONE
 
        log_must zfs destroy $TESTPOOL/$TESTCLONE
index e7663ef7973ca9e166c4dcdf31edef88185ebe7e..9165b03a16470002e21ccb7fdde193ac6c531c10 100755 (executable)
@@ -47,7 +47,7 @@ function cleanup
 function clone_write_file
 {
        log_must mkfile 1m /$TESTPOOL/$1/$2
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
 }
 
 function test_one_empty
index 1a20b7a3313176e02b1e8f1d5001803e8718e635..51b3d2e513cc2e19f585fbd00eb961d7b36f28cb 100644 (file)
@@ -153,7 +153,7 @@ function check_livelist_exists
 function check_livelist_gone
 {
        log_must zpool wait -t free $TESTPOOL
-       zpool sync
+       sync_all_pools
        zdb -vvvvv $TESTPOOL | grep "Livelist" && \
                log_fail "zdb found Livelist after the clone is deleted."
 }
index ab646daecedcf17b44bb5c06177c9a66d5555528..b4f2740c7aa2cebc8206c27b4c591a2504794905 100755 (executable)
@@ -65,7 +65,7 @@ log_must zfs clone $TESTPOOL2/$TESTFS@snap $TESTPOOL2/$TESTCLONE
 # Create initial files and pause condense zthr on next execution
 log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
 log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B
-log_must zpool sync $TESTPOOL2
+sync_pool $TESTPOOL2
 set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 1
 
 # Add a new dev and remove the old one
@@ -76,15 +76,15 @@ wait_for_removal $TESTPOOL2
 set_tunable32 LIVELIST_CONDENSE_NEW_ALLOC 0
 # Trigger a condense
 log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
-log_must zpool sync $TESTPOOL2
+sync_pool $TESTPOOL2
 log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
-log_must zpool sync $TESTPOOL2
+sync_pool $TESTPOOL2
 # Write remapped blkptrs which will modify the livelist mid-condense
 log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B
 
 # Resume condense thr
 set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 0
-log_must zpool sync $TESTPOOL2
+sync_pool $TESTPOOL2
 # Check that we've added new ALLOC blkptrs during the condense
 [[ "0" < "$(get_tunable LIVELIST_CONDENSE_NEW_ALLOC)" ]] || \
     log_fail "removal/condense test failed"
index ac6103ebc7bfb383e874013cdf2d5b9fa149a37b..259f0e99b65c331ead40001ffe2b2d93dd1c0f81 100755 (executable)
@@ -155,7 +155,7 @@ CRYPT_MNTPFS="$(get_prop mountpoint $TESTFS/crypt)"
 log_must touch $CRYPT_MNTPFS/file.dat
 log_must mount $RO $TESTFS/crypt $CRYPT_MNTPFS
 log_must umount -f $CRYPT_MNTPFS
-zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 
 # 6. Re-import the pool readonly
 log_must zpool export $TESTPOOL
index 7c115ee33b96537f41210b8774e6d881f03e2533..2860cab2b6796766ba2d7b823dab95eddf8108e5 100755 (executable)
@@ -87,7 +87,7 @@ log_must zfs snapshot $inc_snap
 log_must eval "zfs send -i $init_snap $inc_snap > $inc_bkup"
 log_must touch /$TESTDIR/bar
 
-sync
+sync_all_pools
 
 set -A badargs \
     "" "nonexistent-snap" "blah@blah" "-d" "-d nonexistent-dataset" \
index afae804a15c2b0be38d7d8af8d60f895459e294f..7de2c183480765102d34d6a5640ebcd7e222fff0 100755 (executable)
@@ -65,14 +65,14 @@ origdir=$(get_prop mountpoint $orig)
 #      2. Create two equal-sized large files.
 log_must mkfile 5M $origdir/file1
 log_must mkfile 5M $origdir/file2
-log_must sync
+sync_all_pools
 
 #      3. Snapshot the filesystem.
 log_must zfs snapshot $orig@1
 
 #      4. Remove one of the two large files.
 log_must rm $origdir/file2
-log_must sync
+sync_all_pools
 
 #      5. Create a refquota larger than one file, but smaller than both.
 log_must zfs set refquota=8M $orig
index 433f240675f3b567a56c4b0555325f83940db79e..2eadb68c372df531b34bb2283af9856700af7208 100644 (file)
@@ -116,6 +116,7 @@ function setup_snap_env
                        if datasetnonexists $snap; then
                                log_must cp /etc/passwd $fname
                                if is_linux || is_freebsd; then
+                                       sync_all_pools
                                        log_must sync
                                else
                                        #
index da0aebe6b58140fb46ecb1047b8245914fddb868..675afa72f5af46a4b07bca271a2814106020cdda 100755 (executable)
@@ -62,9 +62,9 @@ test_pool ()
        log_must zfs snapshot $POOL/fs@a
        while true; do
                log_must find $mntpnt/ -type f -delete
-               sync
+               sync_all_pools
                log_must mkfiles "$mntpnt/" 4000
-               sync
+               sync_all_pools
                # check if we started reusing objects
                object=$(ls -i $mntpnt | sort -n | awk -v object=$object \
                    '{if ($1 <= object) {exit 1}} END {print $1}')
index 18f238386374e6aef82d608d0e3035327ab2806c..8c7f40ba9c0b6bf24f8f2f68faa83e4d609f1813 100755 (executable)
@@ -44,7 +44,7 @@ DISK3="$(echo $DISKS | cut -d' ' -f3)"
 
 log_must dd if=/dev/urandom of=/$TESTDIR/testfile bs=10M count=1
 
-log_must zpool sync
+sync_all_pools
 
 log_must zpool offline -f $TESTPOOL $DISK3
 log_must wait_for_degraded $TESTPOOL
index 98b4140727c07a20404f434d03687743ecd02531..1188ca10d14d1ba4498ee80d6f867fa74cdccec9 100755 (executable)
@@ -175,7 +175,8 @@ function do_testing #<clear type> <vdevs>
        esac
        dd if=/dev/zero of=$fbase.$i seek=512 bs=1024 count=$wcount conv=notrunc \
                        > /dev/null 2>&1
-       log_must sync
+       sync_all_pools
+       log_must sync #ensure the vdev files are written out
        log_must zpool scrub -w $TESTPOOL1
 
        check_err $TESTPOOL1 && \
index fdf56b2cf9a6d81c375a3fa3b14c9f7cc54a085f..22212a8f50a57fc09c807972b114ab66cf2985ca 100755 (executable)
@@ -111,7 +111,7 @@ log_must zpool create -f -m $MOUNTDIR -o failmode=continue $POOL raidz $VDEV1 $V
 log_must zfs set compression=off recordsize=16k $POOL
 # create a file full of zeros
 log_must mkfile -v $FILESIZE $FILEPATH
-log_must zpool sync $POOL
+sync_pool $POOL
 
 # run once and observe the checksum errors
 damage_and_repair 1
index 595eacf5b4b00d25ec7bc1a73b5cb70f6ce21609..7023c49e51f2fcb8083243b19095e840ba029e12 100755 (executable)
@@ -112,7 +112,7 @@ function do_dup_test
 
        if [ "$RW" == "write" ] ; then
                log_must mkfile $FILESIZE $FILEPATH
-               log_must zpool sync $POOL
+               sync_pool $POOL
        fi
 
        log_must zinject -c all
index a6833f167c66a8c97373b6af6dc9364aac75b856..f26c65f9db2caecbc582dc1c28f8e9f85da6b373 100755 (executable)
@@ -97,7 +97,7 @@ function do_test
 
        if [ "$RW" == "write" ] ; then
                log_must mkfile $FILESIZE $MOUNTDIR/file
-               log_must zpool sync $POOL
+               sync_pool $POOL
        else
                log_must zpool scrub $POOL
                wait_scrubbed $POOL
index 9be752ff83f212416b2924c3a6002027628e133f..2ccc57b475b7252ff0c0b665aa34e17c215936ed 100755 (executable)
@@ -42,7 +42,7 @@ DISK1=${DISKS%% *}
 
 log_must zpool create -f $TESTPOOL $DISK1
 log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=1M count=30
-log_must sync
+sync_all_pools
 
 log_must zpool initialize $TESTPOOL
 
@@ -52,7 +52,7 @@ log_must zdb -cc $TESTPOOL
     log_fail "Initializing did not start"
 
 log_must dd if=/dev/urandom of=/$TESTPOOL/file2 bs=1M count=30
-log_must sync
+sync_all_pools
 
 log_must zdb -cc $TESTPOOL
 
index f93de6e224457e77e4fa855ab4175dc37885d85f..bd8917b3cd66f55f7fdb2ce3ff54d8c808e2024d 100755 (executable)
@@ -49,12 +49,12 @@ log_must truncate -s $SPA_MINDEVSIZE $DEVICE2 $DEVICE3 $DEVICE4 $DEVICE5
 
 log_must zpool create -f $TESTPOOL $DEVICE1 $DEVICE2 \
     log $DEVICE3 cache $DEVICE4 spare $DEVICE5
-log_must zpool sync
+sync_all_pools
 
 # Remove each type of vdev and verify the label can be cleared.
 for dev in $DEVICE5 $DEVICE4 $DEVICE3 $DEVICE2; do
        log_must zpool remove $TESTPOOL $dev
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL true
        log_must zpool labelclear $dev
        log_mustnot zdb -lq $dev
 done
index 7ba301e2b56b71cbaf66c6947b5504c00a9e4c53..8489fddb410928319ba259b5bdcdd397a6a9feec 100755 (executable)
@@ -77,7 +77,7 @@ for disk in $DISKLIST; do
        i=0
        while [[ $i -lt ${#args[*]} ]]; do
 
-               log_must sync_pool $TESTPOOL
+               sync_pool $TESTPOOL
                log_must zpool offline $TESTPOOL $disk
                check_state $TESTPOOL $disk "offline"
                if [[ $? != 0 ]]; then
index 68ebf669c9f595befa9b606c9265284f17b1713b..5a1b94db3dcbdf0ec237faa5c6824a2616f6f524 100755 (executable)
@@ -55,6 +55,7 @@ log_must zpool reopen
 log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
 # Write some data to the pool
 log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
+sync_pool $TESTPOOL
 # 4. "Plug back" disk.
 insert_disk $REMOVED_DISK $scsi_host
 # 5. Reopen a pool and verify if removed disk is marked online again.
index 444c8a68523fb272dc769ead886c351826fc8e31..95f6f7dcbba441de2609bbe4a7e4218233e0e1e9 100755 (executable)
@@ -55,6 +55,7 @@ log_must zpool reopen $TESTPOOL
 log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
 # Write some data to the pool
 log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
+sync_pool $TESTPOOL
 # 4. "Plug back" disk.
 insert_disk $REMOVED_DISK $scsi_host
 # 5. Reopen a pool and verify if removed disk is marked online again.
index 097dd3c71d1c9ce07aef9ca84be0d35508309b48..6ce054cdb4a1bfa1f697343a6dd25b4d858ee994 100755 (executable)
@@ -64,6 +64,7 @@ log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
 # 3. Write a test file to the pool and calculate its checksum.
 TESTFILE=/$TESTPOOL/data
 log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+sync_pool $TESTPOOL
 TESTFILE_MD5=$(md5digest $TESTFILE)
 
 # 4. Execute scrub.
index 956ceebafbc61efbe1f203d1fd354601d750cdff..3180eddc094dbb302e22706279598306c886efdb 100755 (executable)
@@ -62,6 +62,7 @@ log_must zpool reopen -n $TESTPOOL
 log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
 # 3. Write test file to pool.
 log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+sync_pool $TESTPOOL
 # 4. Execute scrub.
 # add delay to I/O requests for remaining disk in pool
 log_must zinject -d $DISK2 -D125:1 $TESTPOOL
index fc298d01061e4c673d8e90d2da471feb2397532c..095f3bc05e6659508f1d15d300649f4ee2df9404 100755 (executable)
@@ -60,6 +60,7 @@ log_must zpool reopen $TESTPOOL
 log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
 # 3. Write test file to pool.
 log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+sync_pool $TESTPOOL
 # 4. "Plug back" disk.
 insert_disk $REMOVED_DISK $scsi_host
 
index 80fc169126d21885c58f4a1f3c1d9da7758db394..e487afd8ae4db36b0d29ac5d244e08166b3aaf18 100755 (executable)
@@ -58,12 +58,12 @@ mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
 
 # 1. Write some data and detach the first drive so it has resilver work to do
 log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile1
-log_must sync
+sync_all_pools
 log_must zpool detach $TESTPOOL $DISK2
 
 # 2. Repeat the process with a second disk
 log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile2
-log_must sync
+sync_all_pools
 log_must zpool detach $TESTPOOL $DISK3
 
 # 3. Reattach the drives, causing the second drive's resilver to be deferred
index 449bb9a822776e0b6de123fa3ad70386b1f0b322..116d622960f9b12665e45a4f061a53cf1a26b12f 100755 (executable)
@@ -61,7 +61,7 @@ log_assert "Verify scrub, scrub -p, and scrub -s show the right status."
 # Create 1G of additional data
 mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
 log_must file_write -b 1048576 -c 1024 -o create -d 0 -f $mntpnt/biggerfile
-log_must sync
+sync_all_pools
 
 log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
 log_must zpool scrub $TESTPOOL
index 2dd33c99c55489b26aaf82133085c1d119155136..1abef573883d101b084d5be866c5bc2d0acf4652 100755 (executable)
@@ -50,7 +50,7 @@ log_assert "Scrubs and self healing must work with additional copies"
 log_must zfs create -o copies=3 $TESTPOOL/$TESTFS2
 typeset mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS2)
 log_must mkfile 10m $mntpnt/file
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 
 log_must zinject -a -t data -C 0,1 -e io $mntpnt/file
 
index 99a40ecf2b745b28f47dca28f197e48cb21c6c63..b50963bbe2adc99817be155f355b247cd1eee8c1 100755 (executable)
@@ -66,7 +66,7 @@ function zpool_split #disk_to_be_offline/online
        # Create 2G of additional data
        mntpnt=$(get_prop mountpoint $TESTPOOL)
        log_must file_write -b 2097152 -c 1024 -o create -d 0 -f $mntpnt/biggerfile
-       log_must sync
+       sync_all_pools
 
        # temporarily prevent resilvering progress, so it will not finish too early
        log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
index 5d14b74ecc15c26a99e77374c70367a9e5ea1283..f3b3b0f272a7baf05fbf12973bc660ada5c9929d 100755 (executable)
@@ -66,7 +66,7 @@ log_must mkdir "$TESTDIR"
 log_must truncate -s $LARGESIZE "$LARGEFILE"
 log_must zpool create $TESTPOOL "$LARGEFILE"
 log_must mkfile $(( floor(LARGESIZE * 0.80) )) /$TESTPOOL/file
-log_must zpool sync
+sync_all_pools
 
 new_size=$(du -B1 "$LARGEFILE" | cut -f1)
 log_must test $new_size -le $LARGESIZE
@@ -89,7 +89,7 @@ log_must set_tunable64 TRIM_METASLAB_SKIP 1
 log_must zpool trim $TESTPOOL
 log_must set_tunable64 TRIM_METASLAB_SKIP 0
 
-log_must zpool sync
+sync_all_pools
 while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do
        sleep 0.5
 done
@@ -102,7 +102,7 @@ log_must test $new_size -gt $LARGESIZE
 # space usage of the new metaslabs.
 log_must zpool trim $TESTPOOL
 
-log_must zpool sync
+sync_all_pools
 while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do
        sleep 0.5
 done
index 093dc3fb9e76e4c79cf52a2e4b23d6fc29765714..a12acce44a4437f26ec5cef64e478acfdf4c52ce 100755 (executable)
@@ -54,14 +54,14 @@ log_must truncate -s $LARGESIZE "$LARGEFILE"
 log_must zpool create $TESTPOOL "$LARGEFILE"
 
 log_must dd if=/dev/urandom of=/$TESTPOOL/file1 bs=1048576 count=64
-log_must zpool sync
+sync_all_pools
 log_must zpool trim $TESTPOOL
 
 [[ -z "$(trim_progress $TESTPOOL $DISK1)" ]] && \
     log_fail "Trimming did not start"
 
 log_must dd if=/dev/urandom of=/$TESTPOOL/file2 bs=1048576 count=64
-log_must zpool sync
+sync_all_pools
 
 log_must zpool export $TESTPOOL
 log_must zdb -e -p "$TESTDIR" -cc $TESTPOOL
index a899e9f99f14b4b49d3b2f75d9c152b65a6a58e5..f50b2d8e9a74b3273a09f27c5d364ae234236c42 100755 (executable)
@@ -41,7 +41,7 @@ function cleanup
            log_must zpool detach $TESTPOOL $DISK2
        get_disklist $TESTPOOL | grep $DISK3 >/dev/null && \
            log_must zpool detach $TESTPOOL $DISK3
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
 }
 
 typeset pid
index 7f5a9e6a8d6c8f3becfdf3ed116aa4e0e2510716..73ec6a27bcc7ce39f2e5b1bd101435416343880d 100755 (executable)
@@ -104,7 +104,7 @@ log_must zfs clone "$SNAP" "$CLONE"
 for i in {1..50}; do
        log_must dd if=/dev/urandom of="/$CLONE/testfile$i" bs=1k count=512
        # Force each new file to be tracked by a new livelist
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
 done
 log_must zfs destroy "$CLONE"
 test_wait
index fd6e8c858edda6809a15ee06c7035643610ad06f..7b18ebdcb85929c881f3385b6220ab5a514205e6 100755 (executable)
@@ -70,7 +70,7 @@ log_must file_write -b 1048576 -c 8 -o create -d 0 -f $mntpnt/file
 sleep 10
 
 log_must zinject -c all
-log_must zpool sync
+sync_all_pools
 
 # Log txg sync times for reference and the zpool event summary.
 if is_freebsd; then
index c1cfc115123960ef77548b688974fc0ea0ca3ab9..f0774c4b29d90754d5a86bb3b9e1034450568059 100755 (executable)
@@ -83,7 +83,7 @@ log_must zinject -d $DISK1 -D10000:1 $TESTPOOL
 log_must eval "dd if=/$mntpnt/file1 of=/dev/null bs=1048576 &"
 sleep 10
 log_must zinject -c all
-log_must zpool sync
+sync_all_pools
 wait
 
 # 5. Verify a "deadman" event is posted.  The first appears after 5
index e39b015b21b8e0c717caab67367a76f4cb20d532..a78b390aa18ef397343b20efd4197a0c3c59bedb 100644 (file)
@@ -483,7 +483,7 @@ function verify_userprop
        typeset stamp=${perm}.${user}.$RANDOM
 
        user_run $user zfs set "$user:ts=$stamp" $dtst
-       zpool sync ${dtst%%/*}
+       sync_pool ${dtst%%/*}
        if [[ $stamp != $(get_prop "$user:ts" $dtst) ]]; then
                return 1
        fi
index 9c5879183b154674afccfff31c32282c6561c909..cc600c4ed510a7d6f5b70a5afee3255f91898317 100644 (file)
@@ -110,7 +110,7 @@ function run_and_verify
        log_must eval "$fullcmd"
 
        # Collect the new events and verify there are some.
-       log_must zpool sync -f
+       sync_all_pools true
        log_must eval "zpool events >$TMP_EVENTS 2>/dev/null"
        log_must eval "zpool events -v > $TMP_EVENTS_FULL 2>/dev/null"
 
index 86916bf906fe8d7c41a3c3305c0dd8577ca5a623..ef2ce24e097b52012b448c98da54519892f415b7 100755 (executable)
@@ -102,7 +102,7 @@ do
        # 2. Simulate physical removal of one device
        remove_disk $removedev
        log_must mkfile 1m $mntpnt/file
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
 
        # 3. Verify the device is unavailable.
        log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
@@ -134,7 +134,7 @@ do
        # 2. Simulate physical removal of one device
        remove_disk $removedev
        log_must mkfile 1m $mntpnt/file
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
 
        # 3. Verify the device is handled by the spare.
        log_must wait_hotspare_state $TESTPOOL $sparedev "INUSE"
@@ -171,7 +171,7 @@ do
        # 3. Simulate physical removal of one device
        remove_disk $removedev
        log_must mkfile 1m $mntpnt/file
-       log_must zpool sync $TESTPOOL
+       sync_pool $TESTPOOL
 
        # 4. Verify the device is unavailable
        log_must wait_vdev_state $TESTPOOL $removedev "UNAVAIL"
index 81eab56666273ec713e08bb1aa8d1db73c93412b..bcb7d8d1281c13cf2f7a5c10bc84f825bb1f8f91 100755 (executable)
@@ -45,7 +45,7 @@ log_must set_tunable64 COMPRESSED_ARC_ENABLED 0
 log_must zfs create -o compression=on $TESTPOOL/fs
 mntpt=$(get_prop mountpoint $TESTPOOL/fs)
 write_compressible $mntpt 32m 1 1024k "testfile"
-log_must sync
+sync_all_pools
 log_must zfs umount $TESTPOOL/fs
 log_must zfs mount $TESTPOOL/fs
 log_must zinject -a -t data -e decompress -f 20 $mntpt/testfile.0
index 85f0083a0eb69939f439284d9e0b3602f5683b90..a290053fd2698850886c21c49cf541d590cef3b4 100755 (executable)
@@ -64,7 +64,7 @@ log_must set_tunable64 SLOW_IO_EVENTS_PER_SECOND 1000
 # Create 20ms IOs
 log_must zinject -d $DISK -D20:100 $TESTPOOL
 log_must mkfile 1048576 /$TESTPOOL/testfile
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 
 log_must zinject -c all
 SLOW_IOS=$(zpool status -sp | grep "$DISK" | awk '{print $6}')
index ad0e49f8fb4021c41d0d1e256e7b28274d4ceec6..e0617961ba15d01ae2a2725715557975b8751d00 100755 (executable)
@@ -66,7 +66,7 @@ log_must dd bs=1024k count=128 if=/dev/zero of=/$TEST_FS/file
 #
 log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS 100
 
-log_must sync
+sync_all_pools
 log_must zfs destroy $TEST_FS
 
 #
index fca0e8e4a1b401c26cdf49c08bf4e7d0acbf190f..d1d283376bbb7daf590407b14141e056b857051f 100755 (executable)
@@ -63,9 +63,9 @@ log_must zpool create -o cachefile=none -f $LOGSM_POOL $TESTDISK
 log_must zfs create $LOGSM_POOL/fs
 
 log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
-log_must sync
+sync_all_pools
 log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
-log_must sync
+sync_all_pools
 
 log_must set_tunable64 KEEP_LOG_SPACEMAPS_AT_EXPORT 1
 log_must zpool export $LOGSM_POOL
index 6e3d1fe34d4b6214f5786133113d0100e0c8830c..6063c6a3796b0316de624c63950e346964a5e9bd 100755 (executable)
@@ -71,7 +71,7 @@ for fails in $(seq $MMP_FAIL_INTERVALS_MIN $((MMP_FAIL_INTERVALS_MIN*2))); do
        for interval in $(seq $MMP_INTERVAL_MIN 200 $MMP_INTERVAL_DEFAULT); do
                log_must set_tunable64 MULTIHOST_FAIL_INTERVALS $fails
                log_must set_tunable64 MULTIHOST_INTERVAL $interval
-               log_must sync_pool $TESTPOOL
+               sync_pool $TESTPOOL
                typeset mmp_fail=$(zdb $TESTPOOL 2>/dev/null |
                    awk '/mmp_fail/ {print $NF}')
                if [ $fails -ne $mmp_fail ]; then
index 6239d491e6eb11c72b748a3b668be98cbd94f889..081157cdc719a00e48046895bb9337a4a36b1ad1 100755 (executable)
@@ -56,7 +56,7 @@ for i in $(seq 100); do
        (( $ret != $ENOSPC )) && \
            log_fail "file.$i returned: $ret rather than ENOSPC."
 
-       log_must zpool sync -f
+       sync_all_pools true
 done
 
 log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs
index 2fd913f40338b9fcf7ebb793c813266c506beccf..5050447c000a5b43ed43009cbe2bdb686f248376 100755 (executable)
@@ -90,7 +90,8 @@ for disk in $DISKLIST; do
 done
 
 log_must kill $killpid
-sync
+sync_all_pools
+log_must sync
 
 typeset dir=$(get_device_dir $DISKS)
 verify_filesys "$TESTPOOL" "$TESTPOOL/$TESTFS" "$dir"
index 19576a82100bae1beede80b4df8faa94075ddb77..e66e7e10fedc656f7654b077df41ba61968e75de 100755 (executable)
@@ -129,7 +129,8 @@ while [[ $i -lt ${#disks[*]} ]]; do
 done
 
 log_must kill $killpid
-sync
+sync_all_pools
+log_must sync
 
 typeset dir=$(get_device_dir $DISKS)
 verify_filesys "$TESTPOOL" "$TESTPOOL/$TESTFS" "$dir"
index 0d763ee1b882114932c303d3ad8b3621ee178870..e33db28f8ddb7321233b17ba626215ac15f9708c 100755 (executable)
@@ -76,6 +76,6 @@ for i in 0 1 2; do
 done
 
 log_must kill $killpid
-sync
+sync_all_pools
 
 log_pass
index dfc1f1ee0497345ee8ca83a549b103c98bb1625c..9104e4ba2ac3155b656ec424838d0dd17e8b21f2 100755 (executable)
@@ -65,7 +65,7 @@ log_must zfs create $FS
 for i in {1..20}; do
        log_must zfs snapshot "$FS@testsnapshot$i"
 done
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 
 #
 # Read the debug message file in small chunks to make sure that the read is
index 1af1c2c07089ad03dc616f12436386e7952a23ef..a24452ed5892ff648ec47f8a9f1cc1073b28e609 100755 (executable)
@@ -60,7 +60,7 @@ log_must zfs create $FS
 for i in {1..20}; do
        log_must zfs snapshot "$FS@testsnapshot$i"
 done
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 
 msgs1=$(mktemp) || log_fail
 msgs2=$(mktemp) || log_fail
index 95a5e5c1ebc641948b89aa98a37cdb5f14266125..b3958b345d2a08a324cc231b9b25e2a15b212342 100755 (executable)
@@ -50,7 +50,7 @@ function cleanup
 function sync_n
 {
        for i in {1..$1}; do
-               log_must zpool sync $TESTPOOL
+               sync_pool $TESTPOOL
        done
        return 0
 }
index 6b375d407e23db2951520218c39b49a1f71e15f1..73554df6a42aea71c2791c1e7cd8978d6d747673 100755 (executable)
@@ -65,7 +65,7 @@ mkmount_writable $QFS
 log_must user_run $PUSER mkdir $PRJDIR
 log_must chattr +P -p $PRJID1 $PRJDIR
 log_must user_run $PUSER mkfile 100m $PRJDIR/qf
-sync
+sync_all_pools
 
 log_note "set projectquota at a smaller size than it current usage"
 log_must zfs set projectquota@$PRJID1=90m $QFS
index 23f7c2a50663d1c638b51254d742735deb22aacc..0582164f9d6ca8eac6682dfd47e6585ccaf889bb 100644 (file)
@@ -51,7 +51,7 @@ function cleanup_projectquota
        [[ -d $PRJDIR1 ]] && log_must rm -rf $PRJDIR1
        [[ -d $PRJDIR2 ]] && log_must rm -rf $PRJDIR2
        [[ -d $PRJDIR3 ]] && log_must rm -rf $PRJDIR3
-       sync
+       sync_all_pools
 
        return 0
 }
index b7707ea522c426f2ecba9a03cf8c1fc111b65cf4..4005c2a408c690496632716671710607a04e2d6e 100755 (executable)
@@ -70,7 +70,7 @@ mkmount_writable $QFS
 log_must user_run $PUSER mkdir $PRJDIR
 log_must chattr +P -p $PRJID1 $PRJDIR
 log_must user_run $PUSER mkfile 50m $PRJDIR/qf
-sync
+sync_all_pools
 
 log_must zfs snapshot $snap_fs
 
index 10edae771e99213e07ea2c822c00da672e40e92d..1d48eccf22d07ef3636ea2cb0d493a7d4efe4d0e 100755 (executable)
@@ -63,7 +63,7 @@ mkmount_writable $QFS
 log_must user_run $PUSER mkdir $PRJDIR
 log_must chattr +P -p $PRJID1 $PRJDIR
 log_must user_run $PUSER mkfile 50m $PRJDIR/qf
-sync
+sync_all_pools
 
 typeset snapfs=$QFS@snap
 
index 4f0393883b6abfd01a6ae48ef938b7e367d52b40..484203549a960cb5342a0db4c1ba9bd047bd11d8 100755 (executable)
@@ -53,7 +53,7 @@ log_must mkfile 20M $mntpnt/$TESTFILE
 log_must zfs snapshot $FS@snap20M
 log_must rm $mntpnt/$TESTFILE
 
-log_must sync
+sync_all_pools
 
 log_must zfs set refquota=10M $FS
 log_mustnot zfs rollback $FS@snap20M
index 140ac38ad89807bdeda7fe612985e19fb436c007..5752575a8bb13920eb852ebf223494b7520b9cc9 100644 (file)
@@ -69,7 +69,7 @@ function attempt_during_removal # pool disk callback [args]
        # We want to make sure that the removal started
        # before issuing the callback.
        #
-       sync
+       sync_pool $pool
        log_must is_pool_removing $pool
 
        log_must $callback "$@"
index e7e63b705575d69915d9a501252defed1d23e849..5ee55e9a9d3e448dc12dd88a25e8117cc4bb0373 100755 (executable)
@@ -79,7 +79,7 @@ log_must wait_for_removal $TESTPOOL
 # Run sync once to ensure that the config actually changed.
 #
 log_must zpool add $TESTPOOL $DISK2
-log_must sync
+sync_all_pools
 
 #
 # Ensure that zdb does not find any problems with this.
index 7775cbff4db8be5306482fe6cd3eae573239c7a5..2cb971a1caf1e538c5545963955fe0f2c488ecd8 100755 (executable)
@@ -73,7 +73,7 @@ log_must zfs create $TESTPOOL1/$TESTFS
 
 mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
 log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=32
-log_must zpool sync $TESTPOOL1
+sync_pool $TESTPOOL1
 
 log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
 
@@ -99,7 +99,7 @@ log_must zfs create $TESTPOOL1/$TESTFS
 
 mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
 log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=32
-log_must zpool sync $TESTPOOL1
+sync_pool $TESTPOOL1
 
 log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
 
index 7e96ab5187734d7c86e56917fd0f34bd18cc52cd..269d31bf8905d061263ddd68e6b5e453f2e37a6a 100755 (executable)
@@ -153,9 +153,9 @@ do
        # offline then online a vdev to introduce a new DTL range after current
        # scan, which should restart (or defer) the resilver
        log_must zpool offline $TESTPOOL1 ${VDEV_FILES[2]}
-       log_must zpool sync $TESTPOOL1
+       sync_pool $TESTPOOL1
        log_must zpool online $TESTPOOL1 ${VDEV_FILES[2]}
-       log_must zpool sync $TESTPOOL1
+       sync_pool $TESTPOOL1
 
        # there should now be 2 resilver starts w/o defer, 1 with defer
        verify_restarts ' after offline/online' "${RESTARTS[1]}" "${VDEVS[1]}"
@@ -177,8 +177,8 @@ do
        log_must is_pool_resilvered $TESTPOOL1
 
        # wait for a few txg's to see if a resilver happens
-       log_must zpool sync $TESTPOOL1
-       log_must zpool sync $TESTPOOL1
+       sync_pool $TESTPOOL1
+       sync_pool $TESTPOOL1
 
        # there should now be 2 resilver starts
        verify_restarts ' after resilver' "${RESTARTS[3]}" "${VDEVS[3]}"
index 48763f9b2dfacaa4a160b6e81cd3bafcf45cc584..4f2707693d13ba75652df804094026b7f2dd10a0 100755 (executable)
@@ -73,7 +73,7 @@ log_must zpool attach $TESTPOOL1 ${VDEV_FILES[0]} $SPARE_VDEV_FILE
 log_note "waiting for read errors to start showing up"
 for iter in {0..59}
 do
-       zpool sync $TESTPOOL1
+       sync_pool $TESTPOOL1
        err=$(zpool status $TESTPOOL1 | grep ${VDEV_FILES[0]} | awk '{print $3}')
        (( $err > 0 )) && break
        sleep 1
@@ -92,8 +92,8 @@ done
 (( $finish == 0 )) && log_fail "resilver took too long to finish"
 
 # wait a few syncs to ensure that zfs does not restart the resilver
-log_must zpool sync $TESTPOOL1
-log_must zpool sync $TESTPOOL1
+sync_pool $TESTPOOL1
+sync_pool $TESTPOOL1
 
 # check if resilver was restarted
 start=$(zpool events | grep "sysevent.fs.zfs.resilver_start" | wc -l)
index da8a0a26e333e51ced99b703e9449b0fe7f823a1..a1b48680c2029f83b90ad326e4c2a352298aad72 100755 (executable)
@@ -60,7 +60,7 @@ log_must zfs create $TESTPOOL1/$TESTFS
 
 mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS)
 log_must dd if=/dev/urandom of=$mntpnt/file bs=1M count=64
-log_must zpool sync $TESTPOOL1
+sync_pool $TESTPOOL1
 
 # Request a healing or sequential resilver
 for replace_mode in "healing" "sequential"; do
index b95fc3da30c3fd7941428ed18413a93df2638c69..64ee6293c7a4c848a996cf1e563d35e7b808d420 100755 (executable)
@@ -88,7 +88,7 @@ for ((i = 1; i <= $snap_count; i++)); do
                log_must cp $mntpnt/file $mntpnt/file$j
        done
 
-       log_must sync
+       sync_all_pools
        log_must mount $remount_ro $zdev $mntpnt
        log_must zfs snap $TESTPOOL/$TESTVOL@snap$i
        log_must mount $remount_rw $zdev $mntpnt
index 370f5382ebaeb3a1e90c49254935c715f2d21556..43a80dc582f8e5c4ed94d3f5c41bb4e8763696f3 100755 (executable)
@@ -97,7 +97,7 @@ log_must zfs snapshot $TESTPOOL/$TESTFS2@snap1
 for i in {1..1000}; do
        log_must rm /$TESTPOOL/$TESTFS2/dir/file-$i
 done
-sync
+sync_all_pools
 
 log_must zfs snapshot $TESTPOOL/$TESTFS2@snap2
 expected_cksum=$(recursive_cksum /$TESTPOOL/$TESTFS2)
index 5760bf9b902af9adc88f93a7048cc2488f296666..aa19847e069532452340ee734bf62129991515a0 100755 (executable)
@@ -100,7 +100,7 @@ log_must truncate -s 131072 /$TESTPOOL/$TESTFS2/truncated
 log_must truncate -s 393216 /$TESTPOOL/$TESTFS2/truncated2
 log_must rm -f /$TESTPOOL/$TESTFS2/truncated3
 log_must rm -f /$TESTPOOL/$TESTFS2/truncated4
-log_must zpool sync $TESTPOOL
+sync_pool $TESTPOOL
 log_must zfs umount $TESTPOOL/$TESTFS2
 log_must zfs mount $TESTPOOL/$TESTFS2
 log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS2/truncated3 \
index f8530a623d472874d87b168b7ac70719fe8c0431..dbdf1f1ce527281cd6f62673eeb0a4b1f52f1907 100755 (executable)
@@ -58,7 +58,7 @@ for type in "mirror" "raidz" "raidz2"; do
                 # Ensure the file has been synced out before attempting to
                 # corrupt its contents.
                 #
-                sync
+                sync_all_pools
 
                #
                # Corrupt a pool device to make the pool DEGRADED
index d39c6ded557a380a6aba48d77d9614c9d21fe3f3..35cb4b6969658acb1ad4ccc4c3a28dc31a61464c 100755 (executable)
@@ -102,7 +102,7 @@ else
        log_must mount $VOLUME $MNTPNT
        FSTYPE=$NEWFS_DEFAULT_FS
 fi
-log_must zpool sync
+sync_all_pools
 
 #
 # 2. Freeze TESTVOL
index 8894c3a6528781c5928b6c05bab8e2ebadea909b..97194f4fe4a088af3179c3ed2b98e142d225a566 100755 (executable)
@@ -93,6 +93,7 @@ while [[ $i -le $COUNT ]]; do
 
         (( i = i + 1 ))
 done
+sync_pool $TESTPOOL
 
 #
 # Now rollback to latest snapshot
index 036e71410c6d8cec90a0e6d1baa93c41e7900e0f..766de990ecdbc32b0ab51973904731da9afa1fcb 100755 (executable)
@@ -82,6 +82,7 @@ log_must zfs rollback $SNAPPOOL
 log_mustnot zfs snapshot $SNAPPOOL
 
 log_must touch /$TESTPOOL/$TESTFILE
+sync_pool $TESTPOOL
 
 log_must zfs rollback $SNAPPOOL
 log_must zfs create $TESTPOOL/$TESTFILE
index 78fe18fa6946228eeea0d3c18317d9edb4eed3e8..e25390339b6cb231326578851f5671bf848facb4 100755 (executable)
@@ -73,7 +73,7 @@ for type in "" "mirror" "raidz" "draid"; do
                filesize=$((4096 + ((RANDOM * 691) % 131072) ))
                log_must rm -rf $dir
                log_must fill_fs $dir 10 10 $filesize 1 R
-               zpool sync
+               sync_all_pools
        done
        log_must du -hs /$TESTPOOL
 
index 13c9b95e06612bf9e4a47b0864bfdec3191be0ff..ae7ad8d73dd8fde6159dfa0742c21b02e4445432 100755 (executable)
@@ -74,7 +74,7 @@ for type in "" "mirror" "raidz" "raidz2" "draid" "draid2"; do
                filesize=$((4096 + ((RANDOM * 691) % 131072) ))
                log_must rm -rf $dir
                log_must fill_fs $dir 10 10 $filesize 1 R
-               zpool sync
+               sync_all_pools
 
                if [[ $((n % 4)) -eq 0 ]]; then
                        log_must timeout 120 zpool trim -w $TESTPOOL
index bede946a09c56f4702bd3a0834836bebfb7c75ac..dc1a60a5ee9d7633c1ff25adc070c0f389ff9daa 100644 (file)
@@ -91,7 +91,7 @@ function wait_trim_io # pool type txgs
                        return
                fi
 
-               zpool sync -f
+               sync_all_pools true
                ((i = i + 1))
        done
 
index 38f226d7f8e72b9423214f0783e057c935d39b7f..2dff0924f7b13db954fae159e1326b8cdf3a6004 100755 (executable)
@@ -72,7 +72,7 @@ for type in "" "mirror" "raidz" "draid"; do
                filesize=$((4096 + ((RANDOM * 691) % 131072) ))
                log_must rm -rf $dir
                log_must fill_fs $dir 10 10 $filesize 1 R
-               zpool sync
+               sync_all_pools
        done
        log_must du -hs /$TESTPOOL
 
index 2a4996a1d926f2f3e607263272da3f69858dbb3e..cff740e5fbba142489cb92636e1570280e466d8f 100755 (executable)
@@ -58,7 +58,7 @@ log_must dd if=/dev/urandom of=$srcfile bs=1024k count=1
 log_onexit cleanup
 log_must cp $srcfile $TESTDIR/$TESTFILE
 log_must cp /dev/null $TESTDIR/$TESTFILE
-log_must sync
+sync_all_pools
 if [[ -s $TESTDIR/$TESTFILE ]]; then
        log_note "$(ls -l $TESTDIR/$TESTFILE)"
        log_fail "testfile not truncated"
index 762f561b834f36761a9fb056039f2eac6b7568fb..b2c48a6424c4d612944c9d2c005f46af05b88e4e 100755 (executable)
@@ -65,7 +65,7 @@ log_must zfs set groupquota@$QGROUP=500m $QFS
 mkmount_writable $QFS
 log_must user_run $QUSER1 mkfile 50m $QFILE
 
-sync
+sync_all_pools
 
 log_must zfs snapshot $snap_fs
 
index 27feafa2b6a25569274b1aef742508b4646fedc3..3e9262831229af1fa085180de5cc0ce7f483a3ee 100755 (executable)
@@ -57,7 +57,7 @@ log_must zfs set groupquota@$QGROUP=500m $QFS
 mkmount_writable $QFS
 log_must user_run $QUSER1 mkfile 100m $QFILE
 
-sync
+sync_all_pools
 
 typeset snapfs=$QFS@snap
 
index 37fd389377ecb589cd5b5c6b74e1c78d949a1ebb..6d7f5bad3f35e31796873113108734eaccf96b27 100755 (executable)
@@ -78,7 +78,7 @@ log_must zfs set xattr=sa $QFS
 log_must user_run $QUSER1 mkfiles ${QFILE}_1 $user1_cnt
 log_must user_run $QUSER2 mkfiles ${QFILE}_2 $user2_cnt
 ((grp_cnt = user1_cnt + user2_cnt))
-sync_pool
+sync_all_pools
 
 typeset snapfs=$QFS@snap
 
index 1dea4006618c15b8f584f11359a0c6ec938e35dc..61cfb466156787b91168ac0a88f8bc917c00553a 100755 (executable)
@@ -63,7 +63,7 @@ log_must zfs get groupquota@$QGROUP $QFS
 log_note "write some data to the $QFS"
 mkmount_writable $QFS
 log_must user_run $QUSER1 mkfile 100m $QFILE
-sync
+sync_all_pools
 
 log_note "set user|group quota at a smaller size than it current usage"
 log_must zfs set userquota@$QUSER1=90m $QFS
index 70b935d7fe1962ea7d3e8cb45302c240bdb28883..cee3c6fb3269e4666c1bc1fe266253a6dae6fd76 100644 (file)
@@ -48,7 +48,7 @@ function cleanup_quota
 
        [[ -f $QFILE ]] && log_must rm -f $QFILE
        [[ -f $OFILE ]] && log_must rm -f $OFILE
-       sync
+       sync_all_pools
 
        return 0
 }
index 9b8919344582b8d786679cc4af925405c9f2a282..137dd68662cce8dab9e2bcc58bd88c32a3e524a3 100755 (executable)
@@ -64,7 +64,7 @@ typeset snap_fs=$QFS@snap
 log_must zfs set userquota@$QUSER1=100m $QFS
 mkmount_writable $QFS
 log_must user_run $QUSER1 mkfile 50m $QFILE
-sync
+sync_all_pools
 
 log_must zfs snapshot $snap_fs
 
index 94593ed21ae175db40ade5a1c40589b0be4c8bd7..0821bd075f602f8875b6515f4fe0fe807b42af99 100755 (executable)
@@ -59,7 +59,7 @@ log_must zfs set userquota@$QUSER1=100m $QFS
 mkmount_writable $QFS
 
 log_must user_run $QUSER1 mkfile 50m $QFILE
-sync
+sync_all_pools
 
 typeset snapfs=$QFS@snap
 
index 70ef78e7ddbbb725a2907419e88dda41870bae59..de7c117af3ab12f52f4c5ee5ed25d1c3082a58cd 100755 (executable)
@@ -79,7 +79,7 @@ log_must zfs set xattr=sa $QFS
 
 log_must user_run $QUSER1 mkfiles ${QFILE}_1 $user1_cnt
 log_must user_run $QUSER2 mkfiles ${QFILE}_2 $user2_cnt
-sync_pool
+sync_all_pools
 
 typeset snapfs=$QFS@snap
 
index 066be917e43660d1312c84ff21414923f1d30efd..7a40d8f0c565c0ebea9c95f52a34138b4ad522f3 100755 (executable)
@@ -41,7 +41,7 @@ orig_top=$(get_top_vd_zap $DISK $conf)
 orig_leaf=$(get_leaf_vd_zap $DISK $conf)
 assert_zap_common $TESTPOOL $DISK "top" $orig_top
 assert_zap_common $TESTPOOL $DISK "leaf" $orig_leaf
-log_must zpool sync
+sync_all_pools
 
 # Export the pool.
 log_must zpool export $TESTPOOL
index 2ecb00da92e1de4c5cfe05ea40470998968e0aec..297ad242fe2581fd93651b1799d3cadcb434782d 100755 (executable)
@@ -84,7 +84,7 @@ while (( 1 )); do
 done
 
 if is_linux || is_freebsd ; then
-       log_must sync
+       sync_all_pools
 else
        log_must lockfs -f $TESTDIR
 fi