]> git.proxmox.com Git - mirror_zfs.git/commitdiff
OpenZFS 9082 - Add ZFS performance test targeting ZIL latency
authorJohn Wren Kennedy <john.kennedy@delphix.com>
Tue, 10 Apr 2018 20:29:55 +0000 (14:29 -0600)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Wed, 30 May 2018 18:59:04 +0000 (11:59 -0700)
This adds a new test to measure ZIL performance.

- Adds the ability to induce IO delays with zinject
- Adds a new variable (PERF_NTHREADS_PER_FS) to allow fio threads to
  be distributed to individual file systems as opposed to all IO going
  to one, as happens elsewhere.
- Refactoring of do_fio_run

Authored by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Ported-by: John Wren Kennedy <jwk404@gmail.com>
OpenZFS-issue: https://www.illumos.org/issues/9082
OpenZFS-commit: https://github.com/openzfs/openzfs/pull/634
External-issue: DLPX-48625
Closes #7491

19 files changed:
tests/runfiles/perf-regression.run
tests/zfs-tests/include/libtest.shlib
tests/zfs-tests/tests/perf/fio/mkfiles.fio
tests/zfs-tests/tests/perf/fio/random_reads.fio
tests/zfs-tests/tests/perf/fio/random_readwrite.fio
tests/zfs-tests/tests/perf/fio/random_writes.fio
tests/zfs-tests/tests/perf/fio/sequential_reads.fio
tests/zfs-tests/tests/perf/fio/sequential_writes.fio
tests/zfs-tests/tests/perf/perf.shlib
tests/zfs-tests/tests/perf/regression/Makefile.am
tests/zfs-tests/tests/perf/regression/random_reads.ksh
tests/zfs-tests/tests/perf/regression/random_readwrite.ksh
tests/zfs-tests/tests/perf/regression/random_writes.ksh
tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh [new file with mode: 0755]
tests/zfs-tests/tests/perf/regression/sequential_reads.ksh
tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh
tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh
tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh
tests/zfs-tests/tests/perf/regression/sequential_writes.ksh

index cb068e887fe8fc01d0e0d877f640b451855e8b9a..d10ff8836d00aaf5d470792a54b5216a3e571556 100644 (file)
@@ -10,7 +10,7 @@
 #
 
 #
-# Copyright (c) 2015 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
 #
 
 [DEFAULT]
@@ -27,6 +27,6 @@ tags = ['perf']
 [tests/perf/regression]
 tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_arc_cached',
     'sequential_reads_arc_cached_clone', 'sequential_reads_dbuf_cached',
-    'random_reads', 'random_writes', 'random_readwrite']
+    'random_reads', 'random_writes', 'random_readwrite', 'random_writes_zil']
 post =
 tags = ['perf', 'regression']
index 00326dcdc40776d1f3e83fa76951c93f5176ca8b..e8def35f8408acb138b2b2434542f7c6be835bb7 100644 (file)
@@ -1599,6 +1599,31 @@ function destroy_pool #pool
        return 0
 }
 
+# Return 0 if created successfully; $? otherwise
+#
+# $1 - dataset name
+# $2-n - dataset options
+
+function create_dataset #dataset dataset_options
+{
+       typeset dataset=$1
+
+       shift
+
+       if [[ -z $dataset ]]; then
+               log_note "Missing dataset name."
+               return 1
+       fi
+
+       if datasetexists $dataset ; then
+               destroy_dataset $dataset
+       fi
+
+       log_must zfs create $@ $dataset
+
+       return 0
+}
+
 # Return 0 if destroy successfully or the dataset exists; $? otherwise
 # Note: In local zones, this function should return 0 silently.
 #
index 8289d546de043086244706ba2f1fe50eaedd7a06..c7efda86d3fa07b3216f1293db680d52c64e9a26 100644 (file)
@@ -21,7 +21,7 @@ ioengine=psync
 bs=1024k
 rw=write
 thread=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
 numjobs=${NUMJOBS}
 filesize=${FILE_SIZE}
 buffer_compress_percentage=66
index 25dd2ff838dfe8127d3ed4aad44087b163a76def..79610f9b28aa8692cd89e9301a7f773dd5dfd083 100644 (file)
@@ -10,7 +10,7 @@
 #
 
 #
-# Copyright (c) 2015 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
 #
 
 [global]
@@ -21,7 +21,7 @@ overwrite=0
 thread=1
 rw=randread
 time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
 runtime=${RUNTIME}
 bs=${BLOCKSIZE}
 ioengine=psync
index 07090d4dcd3249904e4ac848b8022a81b6991cc5..7d01c38ada964bf7dd6d26d33bafa154821b60d1 100644 (file)
@@ -23,7 +23,7 @@ thread=1
 rw=randrw
 rwmixread=80
 time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
 runtime=${RUNTIME}
 bssplit=4k/50:8k/30:128k/10:1m/10
 ioengine=psync
index 9233a84260cfd6382158865be43ffe4b6d7fa5a7..5e2cb30026c710610b1ae7e6e5c364489c7d9bc5 100644 (file)
@@ -20,7 +20,7 @@ fallocate=0
 thread=1
 rw=randwrite
 time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
 runtime=${RUNTIME}
 bs=${BLOCKSIZE}
 ioengine=psync
index b7d9fea5f374742119533eb3914eb3e74ecfd24e..33a9a1d89396e8627fbe7becada44f26cff7b4a2 100644 (file)
@@ -10,7 +10,7 @@
 #
 
 #
-# Copyright (c) 2015 by Delphix. All rights reserved.
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
 #
 
 [global]
@@ -21,7 +21,7 @@ overwrite=0
 thread=1
 rw=read
 time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
 runtime=${RUNTIME}
 bs=${BLOCKSIZE}
 ioengine=psync
index 0ee6d091db6308bfc9df1b8feb1d85b74bcf61d3..65a65910fd4faec6beaf0566008514f282e9b6ac 100644 (file)
@@ -20,7 +20,7 @@ fallocate=0
 thread=1
 rw=write
 time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
 runtime=${RUNTIME}
 bs=${BLOCKSIZE}
 ioengine=psync
index e1e845ba6568213e889569745791a4c20daf7a41..c851ee32df7d03d05ed6022d837aa52f0426bce9 100644 (file)
@@ -37,6 +37,96 @@ function get_sync_str
        echo $sync_str
 }
 
+function get_suffix
+{
+       typeset threads=$1
+       typeset sync=$2
+       typeset iosize=$3
+
+       typeset sync_str=$(get_sync_str $sync)
+       typeset filesystems=$(get_nfilesystems)
+
+       typeset suffix="$sync_str.$iosize-ios"
+       suffix="$suffix.$threads-threads.$filesystems-filesystems"
+       echo $suffix
+}
+
+function do_fio_run_impl
+{
+       typeset script=$1
+       typeset do_recreate=$2
+       typeset clear_cache=$3
+
+       typeset threads=$4
+       typeset threads_per_fs=$5
+       typeset sync=$6
+       typeset iosize=$7
+
+       typeset sync_str=$(get_sync_str $sync)
+       log_note "Running with $threads $sync_str threads, $iosize ios"
+
+       if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then
+               log_must test $do_recreate
+               verify_threads_per_fs $threads $threads_per_fs
+       fi
+
+       if $do_recreate; then
+               recreate_perf_pool
+
+               #
+               # A value of zero for "threads_per_fs" is "special", and
+               # means a single filesystem should be used, regardless
+               # of the number of threads.
+               #
+               if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then
+                       populate_perf_filesystems $((threads / threads_per_fs))
+               else
+                       populate_perf_filesystems 1
+               fi
+       fi
+
+       if $clear_cache; then
+               # Clear the ARC
+               zpool export $PERFPOOL
+               zpool import $PERFPOOL
+       fi
+
+       if [[ -n $ZINJECT_DELAYS ]]; then
+               apply_zinject_delays
+       else
+               log_note "No per-device commands to execute."
+       fi
+
+       #
+       # Allow this to be overridden by the individual test case. This
+       # can be used to run the FIO job against something other than
+       # the default filesystem (e.g. against a clone).
+       #
+       export DIRECTORY=$(get_directory)
+       log_note "DIRECTORY: " $DIRECTORY
+
+       export RUNTIME=$PERF_RUNTIME
+       export FILESIZE=$((TOTAL_SIZE / threads))
+       export NUMJOBS=$threads
+       export SYNC_TYPE=$sync
+       export BLOCKSIZE=$iosize
+       sync
+
+       # This will be part of the output filename.
+       typeset suffix=$(get_suffix $threads $sync $iosize)
+
+       # Start the data collection
+       do_collect_scripts $suffix
+
+       # Define output file
+       typeset logbase="$(get_perf_output_dir)/$(basename \
+           $SUDO_COMMAND)"
+       typeset outfile="$logbase.fio.$suffix"
+
+       # Start the load
+       log_must fio --output $outfile $FIO_SCRIPTS/$script
+}
+
 #
 # This function will run fio in a loop, according to the .fio file passed
 # in and a number of environment variables. The following variables can be
@@ -56,47 +146,21 @@ function do_fio_run
        typeset script=$1
        typeset do_recreate=$2
        typeset clear_cache=$3
-       typeset threads sync iosize
+       typeset threads threads_per_fs sync iosize
 
        for threads in $PERF_NTHREADS; do
-               for sync in $PERF_SYNC_TYPES; do
-                       for iosize in $PERF_IOSIZES; do
-                               typeset sync_str=$(get_sync_str $sync)
-                               log_note "Running with $threads" \
-                                   "$sync_str threads, $iosize ios"
-
-                               if $do_recreate; then
-                                       recreate_perfpool
-                                       log_must zfs create $PERF_FS_OPTS \
-                                           $TESTFS
-                               fi
-
-                               if $clear_cache; then
-                                       # Clear the ARC
-                                       zpool export $PERFPOOL
-                                       zpool import $PERFPOOL
-                               fi
-
-                               export RUNTIME=$PERF_RUNTIME
-                               export FILESIZE=$((TOTAL_SIZE / threads))
-                               export NUMJOBS=$threads
-                               export SYNC_TYPE=$sync
-                               export BLOCKSIZE=$iosize
-                               sync
-
-                               # Start the data collection
-                               do_collect_scripts $threads $sync $iosize
-
-                               # This will be part of the output filename.
-                               typeset suffix="$sync_str.$iosize-ios.$threads-threads"
-
-                               # Define output file
-                               typeset logbase="$(get_perf_output_dir)/$(basename \
-                                   $SUDO_COMMAND)"
-                               typeset outfile="$logbase.fio.$suffix"
-
-                               # Start the load
-                               log_must fio --output $outfile $FIO_SCRIPTS/$script
+               for threads_per_fs in $PERF_NTHREADS_PER_FS; do
+                       for sync in $PERF_SYNC_TYPES; do
+                               for iosize in $PERF_IOSIZES; do
+                                       do_fio_run_impl \
+                                           $script \
+                                           $do_recreate \
+                                           $clear_cache \
+                                           $threads \
+                                           $threads_per_fs \
+                                           $sync \
+                                           $iosize
+                               done
                        done
                done
        done
@@ -109,17 +173,11 @@ function do_fio_run
 #
 function do_collect_scripts
 {
-       typeset threads=$1
-       typeset sync=$2
-       typeset iosize=$3
+       typeset suffix=$1
 
        [[ -n $collect_scripts ]] || log_fail "No data collection scripts."
        [[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified."
 
-       # This will be part of the output filename.
-       typeset sync_str=$(get_sync_str $sync)
-       typeset suffix="$sync_str.$iosize-ios.$threads-threads"
-
        # Add in user supplied scripts and logfiles, if any.
        typeset oIFS=$IFS
        IFS=','
@@ -152,23 +210,122 @@ function get_perf_output_dir
        echo $dir
 }
 
+function apply_zinject_delays
+{
+       typeset idx=0
+       while [[ $idx -lt "${#ZINJECT_DELAYS[@]}" ]]; do
+               [[ -n ${ZINJECT_DELAYS[$idx]} ]] || \
+                   log_must "No zinject delay found at index: $idx"
+
+               for disk in $DISKS; do
+                       log_must zinject \
+                           -d $disk -D ${ZINJECT_DELAYS[$idx]} $PERFPOOL
+               done
+
+               ((idx += 1))
+       done
+}
+
+function clear_zinject_delays
+{
+       log_must zinject -c all
+}
+
 #
-# Destroy and create the pool used for performance tests. The
-# PERFPOOL_CREATE_CMD variable allows users to test with a custom pool
-# configuration by specifying the pool creation command in their environment.
-# If PERFPOOL_CREATE_CMD is empty, a pool using all available disks is created.
+# Destroy and create the pool used for performance tests.
 #
-function recreate_perfpool
+function recreate_perf_pool
 {
        [[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set."
 
-       poolexists $PERFPOOL && destroy_pool $PERFPOOL
+       #
+       # In case there's been some "leaked" zinject delays, or if the
+       # performance test injected some delays itself, we clear all
+       # delays before attempting to destroy the pool. Each delay
+       # places a hold on the pool, so the destroy will fail if there
+       # are any outstanding delays.
+       #
+       clear_zinject_delays
+
+       #
+       # This function handles the case where the pool already exists,
+       # and will destroy the previous pool and recreate a new pool.
+       #
+       create_pool $PERFPOOL $DISKS
+}
 
-       if [[ -n $PERFPOOL_CREATE_CMD ]]; then
-               log_must $PERFPOOL_CREATE_CMD
-       else
-               log_must eval "zpool create -f $PERFPOOL $DISKS"
-       fi
+function verify_threads_per_fs
+{
+       typeset threads=$1
+       typeset threads_per_fs=$2
+
+       log_must test -n $threads
+       log_must test -n $threads_per_fs
+
+       #
+       # A value of "0" is treated as a "special value", and it is
+       # interpreted to mean all threads will run using a single
+       # filesystem.
+       #
+       [[ $threads_per_fs -eq 0 ]] && return
+
+       #
+       # The number of threads per filesystem must be a value greater
+       # than or equal to zero; since we just verified the value isn't
+       # 0 above, then it must be greater than zero here.
+       #
+       log_must test $threads_per_fs -ge 0
+
+       #
+       # This restriction can be lifted later if needed, but for now,
+       # we restrict the number of threads per filesystem to a value
+       # that evenly divides the thread count. This way, the threads
+       # will be evenly distributed over all the filesystems.
+       #
+       log_must test $((threads % threads_per_fs)) -eq 0
+}
+
+function populate_perf_filesystems
+{
+       typeset nfilesystems=${1:-1}
+
+       export TESTFS=""
+       for i in $(seq 1 $nfilesystems); do
+               typeset dataset="$PERFPOOL/fs$i"
+               create_dataset $dataset $PERF_FS_OPTS
+               if [[ -z "$TESTFS" ]]; then
+                       TESTFS="$dataset"
+               else
+                       TESTFS="$TESTFS $dataset"
+               fi
+       done
+}
+
+function get_nfilesystems
+{
+       typeset filesystems=( $TESTFS )
+       echo ${#filesystems[@]}
+}
+
+function get_directory
+{
+       typeset filesystems=( $TESTFS )
+       typeset directory=
+
+       typeset idx=0
+       while [[ $idx -lt "${#filesystems[@]}" ]]; do
+               mountpoint=$(get_prop mountpoint "${filesystems[$idx]}")
+
+               if [[ -n $directory ]]; then
+                       directory=$directory:$mountpoint
+               else
+                       directory=$mountpoint
+               fi
+
+               ((idx += 1))
+       done
+
+       echo $directory
 }
 
 function get_max_arc_size
index c0419949d4cb4d2cd27352c521cdeb17aec1c6d0..4f045880f0f6570392d0e2708994ecd165ad320e 100644 (file)
@@ -3,6 +3,7 @@ dist_pkgdata_SCRIPTS = \
        random_reads.ksh \
        random_readwrite.ksh \
        random_writes.ksh \
+       random_writes_zil.ksh \
        sequential_reads_arc_cached_clone.ksh \
        sequential_reads_arc_cached.ksh \
        sequential_reads_dbuf_cached.ksh \
index 5bf269a859db25a75ca096611a84a29e34501dd6..f4e3336964cf06f1befa0368a0a4af7689d5bd7d 100755 (executable)
 function cleanup
 {
        # kill fio and iostat
-       pkill ${fio##*/}
-       pkill ${iostat##*/}
-       log_must_busy zfs destroy $TESTFS
-       log_must_busy zpool destroy $PERFPOOL
+       pkill fio
+       pkill iostat
+       recreate_perf_pool
 }
 
 trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
-
-log_assert "Measure IO stats during random read load"
 log_onexit cleanup
 
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
 
 # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
 
 # Variables for use by fio.
 if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
 elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
 fi
@@ -79,6 +77,7 @@ fi
 # of the available files.
 export NUMJOBS=$(get_max $PERF_NTHREADS)
 export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
 log_must fio $FIO_SCRIPTS/mkfiles.fio
 
 # Set up the scripts and output files that will log performance data.
@@ -88,12 +87,20 @@ if is_linux; then
        typeset perf_record_cmd="perf record -F 99 -a -g -q \
            -o /dev/stdout -- sleep ${PERF_RUNTIME}"
 
-       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
-           "vmstat 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1"
-           "iostat" "$perf_record_cmd" "perf")
+        export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "vmstat 1" "vmstat"
+           "mpstat -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
 else
-       export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
-           "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat")
+       export collect_scripts=(
+           "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "vmstat 1" "vmstat"
+           "mpstat 1" "mpstat"
+           "iostat -xcnz 1" "iostat"
+       )
 fi
 
 log_note "Random reads with $PERF_RUNTYPE settings"
index e6d6e3a11d30ac3ed3ad091abe6e6b80388cdddd..00dd070254e2ebf8a29c77d15ea26b3d7098bed1 100755 (executable)
 function cleanup
 {
        # kill fio and iostat
-       pkill ${fio##*/}
-       pkill ${iostat##*/}
-       log_must_busy zfs destroy $TESTFS
-       log_must_busy zpool destroy $PERFPOOL
+       pkill fio
+       pkill iostat
+       recreate_perf_pool
 }
 
 trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
-
-log_assert "Measure IO stats during random read-write load"
 log_onexit cleanup
 
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
 
 # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
 
 # Variables for use by fio.
 if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'4 8 16 64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
        export PERF_IOSIZES=''          # bssplit used instead
 elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=''          # bssplit used instead
 fi
@@ -79,6 +77,7 @@ fi
 # a subset of the available files.
 export NUMJOBS=$(get_max $PERF_NTHREADS)
 export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
 log_must fio $FIO_SCRIPTS/mkfiles.fio
 
 # Set up the scripts and output files that will log performance data.
@@ -88,12 +87,20 @@ if is_linux; then
        typeset perf_record_cmd="perf record -F 99 -a -g -q \
            -o /dev/stdout -- sleep ${PERF_RUNTIME}"
 
-       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
-           "vmstat 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1"
-           "iostat" "$perf_record_cmd" "perf")
+       export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "vmstat 1" "vmstat"
+           "mpstat -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
 else
-       export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
-           "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat")
+       export collect_scripts=(
+           "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "vmstat 1" "vmstat"
+           "mpstat 1" "mpstat"
+           "iostat -xcnz 1" "iostat"
+       )
 fi
 
 log_note "Random reads and writes with $PERF_RUNTYPE settings"
index d85a3d98a1a8e5293f900c4f819f5b193e0c44dd..c84f96506e3a2ce39bbc11105f86756da2a3d0e5 100755 (executable)
 function cleanup
 {
        # kill fio and iostat
-       pkill ${fio##*/}
-       pkill ${iostat##*/}
-       log_must_busy zfs destroy $TESTFS
-       log_must_busy zpool destroy $PERFPOOL
+       pkill fio
+       pkill iostat
+       recreate_perf_pool
 }
 
 trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
-
-log_assert "Measure IO stats during random write load"
 log_onexit cleanup
 
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
 
 # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
 
 # Variables for use by fio.
 if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
 elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
 fi
@@ -80,12 +78,20 @@ if is_linux; then
        typeset perf_record_cmd="perf record -F 99 -a -g -q \
            -o /dev/stdout -- sleep ${PERF_RUNTIME}"
 
-       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
-           "vmstat 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1"
-           "iostat" "$perf_record_cmd" "perf")
+       export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "vmstat 1" "vmstat"
+           "mpstat -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
 else
-       export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
-           "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat")
+       export collect_scripts=(
+           "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "vmstat 1" "vmstat"
+           "mpstat 1" "mpstat"
+           "iostat -xcnz 1" "iostat"
+       )
 fi
 
 log_note "Random writes with $PERF_RUNTYPE settings"
diff --git a/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh b/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh
new file mode 100755 (executable)
index 0000000..4f2a496
--- /dev/null
@@ -0,0 +1,90 @@
+#!/usr/bin/ksh
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source.  A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/perf/perf.shlib
+
+function cleanup
+{
+       # kill fio and iostat
+       pkill fio
+       pkill iostat
+
+       #
+       # We're using many filesystems depending on the number of
+       # threads for each test, and there's no good way to get a list
+       # of all the filesystems that should be destroyed on cleanup
+       # (i.e. the list of filesystems used for the last test ran).
+       # Thus, we simply recreate the pool as a way to destroy all
+       # filesystems and leave a fresh pool behind.
+       #
+       recreate_perf_pool
+}
+
+trap "log_fail \"Measure IO stats during random write load\"" SIGTERM
+log_onexit cleanup
+
+recreate_perf_pool
+
+# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
+
+if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
+       export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
+       export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
+       export PERF_NTHREADS=${PERF_NTHREADS:-'1 2 4 8 16 32 64 128'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'}
+       export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
+       export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
+
+elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
+       export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
+       export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
+       export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 16 64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'}
+       export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
+       export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
+fi
+
+lun_list=$(pool_to_lun_list $PERFPOOL)
+log_note "Collecting backend IO stats with lun list $lun_list"
+if is_linux; then
+       typeset perf_record_cmd="perf record -F 99 -a -g -q \
+           -o /dev/stdout -- sleep ${PERF_RUNTIME}"
+
+       export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "vmstat 1" "vmstat"
+           "mpstat -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
+else
+       export collect_scripts=(
+           "kstat zfs:0 1" "kstat"
+           "vmstat -T d 1" "vmstat"
+           "mpstat -T d 1" "mpstat"
+           "iostat -T d -xcnz 1" "iostat"
+           "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "dtrace  -s $PERF_SCRIPTS/zil.d $PERFPOOL 1" "zil"
+           "dtrace  -s $PERF_SCRIPTS/profile.d" "profile"
+           "dtrace  -s $PERF_SCRIPTS/offcpu-profile.d" "offcpu-profile"
+       )
+fi
+log_note "ZIL specific random write workload with $PERF_RUNTYPE settings"
+do_fio_run random_writes.fio true false
+log_pass "Measure IO stats during ZIL specific random write workload"
index a9c62fe5a977e3791ef007d98e94c095ea637e6b..93c109e73bbad273d1bf7e6a5b2df4f0537ae150 100755 (executable)
 function cleanup
 {
        # kill fio and iostat
-       pkill ${fio##*/}
-       pkill ${iostat##*/}
-       log_must_busy zfs destroy $TESTFS
-       log_must_busy zpool destroy $PERFPOOL
+       pkill fio
+       pkill iostat
+       recreate_perf_pool
 }
 
 trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
-
-log_assert "Measure IO stats during sequential read load"
 log_onexit cleanup
 
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
 
 # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
 
 # Variables for use by fio.
 if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
 elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
 fi
@@ -79,6 +77,7 @@ fi
 # of the available files.
 export NUMJOBS=$(get_max $PERF_NTHREADS)
 export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
 log_must fio $FIO_SCRIPTS/mkfiles.fio
 
 # Set up the scripts and output files that will log performance data.
@@ -88,14 +87,22 @@ if is_linux; then
     typeset perf_record_cmd="perf record -F 99 -a -g -q \
         -o /dev/stdout -- sleep ${PERF_RUNTIME}"
 
-       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
-           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1"
-           "vmstat" "mpstat  -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat"
-           "$perf_record_cmd" "perf")
+       export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch"
+           "vmstat 1" "vmstat"
+           "mpstat  -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
 else
-       export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
-           "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat 1" "vmstat"
-           "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat")
+       export collect_scripts=(
+           "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
+           "vmstat 1" "vmstat"
+           "mpstat 1" "mpstat"
+           "iostat -xcnz 1" "iostat"
+       )
 fi
 
 log_note "Sequential reads with $PERF_RUNTYPE settings"
index 6622ac9737c496e4e51eae7e9a94af4d133ff0a3..5ef95c0e0d61d607dac6501ed0246e6c951943e1 100755 (executable)
 function cleanup
 {
        # kill fio and iostat
-       pkill ${fio##*/}
-       pkill ${iostat##*/}
-       log_must_busy zfs destroy $TESTFS
-       log_must_busy zpool destroy $PERFPOOL
+       pkill fio
+       pkill iostat
+       recreate_perf_pool
 }
 
 trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
-
-log_assert "Measure IO stats during sequential read load"
 log_onexit cleanup
 
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
 
 # Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
 export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
@@ -54,12 +50,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
 elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
 fi
@@ -69,6 +67,7 @@ fi
 # of the available files.
 export NUMJOBS=$(get_max $PERF_NTHREADS)
 export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
 log_must fio $FIO_SCRIPTS/mkfiles.fio
 
 # Set up the scripts and output files that will log performance data.
@@ -78,14 +77,22 @@ if is_linux; then
        typeset perf_record_cmd="perf record -F 99 -a -g -q \
            -o /dev/stdout -- sleep ${PERF_RUNTIME}"
 
-       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
-           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1"
-           "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat"
-           "$perf_record_cmd" "perf")
+       export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch"
+           "vmstat 1" "vmstat"
+           "mpstat -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
 else
-       export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
-           "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat 1" "vmstat"
-           "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat")
+       export collect_scripts=(
+           "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
+           "vmstat 1" "vmstat"
+           "mpstat 1" "mpstat"
+           "iostat -xcnz 1" "iostat"
+       )
 fi
 
 log_note "Sequential cached reads with $PERF_RUNTYPE settings"
index 9ed0e4792dd371e3d9d01d46916d0c1b6fa792e8..60f5d750d8add25ce2124cac33130252f7e415e9 100755 (executable)
 function cleanup
 {
        # kill fio and iostat
-       pkill ${fio##*/}
-       pkill ${iostat##*/}
-       log_must_busy zfs destroy $TESTFS
-       log_must_busy zpool destroy $PERFPOOL
+       pkill fio
+       pkill iostat
+       recreate_perf_pool
 }
 
 trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
-
-log_assert "Measure IO stats during sequential read load"
 log_onexit cleanup
 
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
 
 # Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
 export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
@@ -60,12 +56,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
 elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
 fi
@@ -75,15 +73,26 @@ fi
 # of the available files.
 export NUMJOBS=$(get_max $PERF_NTHREADS)
 export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
 log_must fio $FIO_SCRIPTS/mkfiles.fio
 
+#
+# Only a single filesystem is used by this test. To be defensive, we
+# double check that TESTFS only contains a single filesystem. We
+# wouldn't want to assume this was the case, and have it actually
+# contain multiple filesystem (causing cascading failures later).
+#
+log_must test $(get_nfilesystems) -eq 1
+
 log_note "Creating snapshot, $TESTSNAP, of $TESTFS"
 create_snapshot $TESTFS $TESTSNAP
 log_note "Creating clone, $PERFPOOL/$TESTCLONE, from $TESTFS@$TESTSNAP"
 create_clone $TESTFS@$TESTSNAP $PERFPOOL/$TESTCLONE
 
 #
-# Reset the TESTFS to point to the clone
+# We want to run FIO against the clone we created above, and not the
+# clone's originating filesystem. Thus, we override the default behavior
+# and explicitly set TESTFS to the clone.
 #
 export TESTFS=$PERFPOOL/$TESTCLONE
 
@@ -94,16 +103,24 @@ if is_linux; then
        typeset perf_record_cmd="perf record -F 99 -a -g -q \
            -o /dev/stdout -- sleep ${PERF_RUNTIME}"
 
-       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
-           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1"
-           "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat"
-           "$perf_record_cmd" "perf")
+       export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch"
+           "vmstat 1" "vmstat"
+           "mpstat -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
 else
-       export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
-           "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat 1" "vmstat"
-           "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat")
+       export collect_scripts=(
+           "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
+           "vmstat 1" "vmstat"
+           "mpstat 1" "mpstat"
+           "iostat -xcnz 1" "iostat"
+       )
 fi
 
-log_note "Sequential cached reads from $TESTFS with $PERF_RUNTYPE settings"
+log_note "Sequential cached reads from $DIRECTORY with $PERF_RUNTYPE settings"
 do_fio_run sequential_reads.fio false false
 log_pass "Measure IO stats during sequential cached read load"
index edb7a96c99f26fa16b5e57a70f4d60e7cf022a4d..d49da6057c8348df74eb3fafa45c7aff46fc072e 100755 (executable)
 function cleanup
 {
        # kill fio and iostat
-       pkill ${fio##*/}
-       pkill ${iostat##*/}
-       log_must_busy zfs destroy $TESTFS
-       log_must_busy zpool destroy $PERFPOOL
+       pkill fio
+       pkill iostat
+       recreate_perf_pool
 }
 
 trap "log_fail \"Measure IO stats during sequential read load\"" SIGTERM
 log_onexit cleanup
 
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
 
 # Ensure the working set can be cached in the dbuf cache.
 export TOTAL_SIZE=$(($(get_max_dbuf_cache_size) * 3 / 4))
@@ -56,12 +54,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'}
 elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'64'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'64k'}
 fi
@@ -71,6 +71,7 @@ fi
 # of the available files.
 export NUMJOBS=$(get_max $PERF_NTHREADS)
 export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
 log_must fio $FIO_SCRIPTS/mkfiles.fio
 
 # Set up the scripts and output files that will log performance data.
@@ -80,16 +81,24 @@ if is_linux; then
        typeset perf_record_cmd="perf record -F 99 -a -g -q \
            -o /dev/stdout -- sleep ${PERF_RUNTIME}"
 
-       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
-           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat 1"
-           "vmstat" "mpstat  -P ALL 1" "mpstat" "iostat -dxyz 1" "iostat"
-           "$perf_record_cmd" "perf")
+       export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch"
+           "vmstat 1" "vmstat"
+           "mpstat  -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
 else
-       export collect_scripts=("kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat"
-           "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat"
+       export collect_scripts=(
+           "kstat zfs:0 1" "kstat"
+           "vmstat -T d 1" "vmstat"
+           "mpstat -T d 1" "mpstat"
+           "iostat -T d -xcnz 1" "iostat"
            "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
            "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
-           "dtrace -s $PERF_SCRIPTS/profile.d" "profile")
+           "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+       )
 fi
 
 log_note "Sequential cached reads with $PERF_RUNTYPE settings"
index 01ab80d4a81038c772f9eecd16afdd4348d885e3..e6d9ec2d5cc0c94fde5c2dd81ecf66476c8c50e5 100755 (executable)
 . $STF_SUITE/include/libtest.shlib
 . $STF_SUITE/tests/perf/perf.shlib
 
-log_assert "Measure IO stats during sequential write load"
-log_onexit cleanup
-
 function cleanup
 {
        # kill fio and iostat
-       pkill ${fio##*/}
-       pkill ${iostat##*/}
-       log_must_busy zfs destroy $TESTFS
-       log_must_busy zpool destroy $PERFPOOL
+       pkill fio
+       pkill iostat
+       recreate_perf_pool
 }
 
 trap "log_fail \"Measure IO stats during random read load\"" SIGTERM
+log_onexit cleanup
 
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
 
 # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
 
 # Variables for use by fio.
 if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
 elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
        export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
        export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
        export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
+       export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
        export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
        export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
 fi
@@ -80,12 +78,20 @@ if is_linux; then
        typeset perf_record_cmd="perf record -F 99 -a -g -q \
            -o /dev/stdout -- sleep ${PERF_RUNTIME}"
 
-       export collect_scripts=("zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
-           "vmstat 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -dxyz 1"
-           "iostat" "$perf_record_cmd" "perf")
+       export collect_scripts=(
+           "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat"
+           "vmstat 1" "vmstat"
+           "mpstat -P ALL 1" "mpstat"
+           "iostat -dxyz 1" "iostat"
+           "$perf_record_cmd" "perf"
+       )
 else
-       export collect_scripts=("$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
-           "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat")
+       export collect_scripts=(
+           "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+           "vmstat 1" "vmstat"
+           "mpstat 1" "mpstat"
+           "iostat -xcnz 1" "iostat"
+       )
 fi
 
 log_note "Sequential writes with $PERF_RUNTYPE settings"