4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24 # Copyright (c) 2012, 2020, Delphix. All rights reserved.
25 # Copyright (c) 2017, Tim Chase. All rights reserved.
26 # Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27 # Copyright (c) 2017, Lawrence Livermore National Security LLC.
28 # Copyright (c) 2017, Datto Inc. All rights reserved.
29 # Copyright (c) 2017, Open-E Inc. All rights reserved.
30 # Copyright (c) 2021, The FreeBSD Foundation.
31 # Use is subject to license terms.
34 . ${STF_TOOLS}/include/logapi.shlib
35 . ${STF_SUITE}/include/math.shlib
36 . ${STF_SUITE}/include/blkdev.shlib
38 . ${STF_SUITE}/include/tunables.cfg
41 # Apply constrained path when available. This is required since the
42 # PATH may have been modified by sudo's secure_path behavior.
44 if [ -n "$STF_PATH" ]; then
45 export PATH="$STF_PATH"
49 # Generic dot version comparison function
51 # Returns success when version $1 is greater than or equal to $2.
53 function compare_version_gte
55 if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
62 # Linux kernel version comparison function
64 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
66 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
68 function linux_version
72 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
74 typeset version=$(echo $ver | cut -d '.' -f 1)
75 typeset major=$(echo $ver | cut -d '.' -f 2)
76 typeset minor=$(echo $ver | cut -d '.' -f 3)
78 [[ -z "$version" ]] && version=0
79 [[ -z "$major" ]] && major=0
80 [[ -z "$minor" ]] && minor=0
82 echo $((version * 10000 + major * 100 + minor))
85 # Determine if this is a Linux test system
87 # Return 0 if platform Linux, 1 if otherwise
91 [ $(uname) = "Linux" ]
94 # Determine if this is an illumos test system
96 # Return 0 if platform illumos, 1 if otherwise
99 [ $(uname) = "illumos" ]
102 # Determine if this is a FreeBSD test system
104 # Return 0 if platform FreeBSD, 1 if otherwise
108 [ $(uname) = "FreeBSD" ]
111 # Determine if this is a DilOS test system
113 # Return 0 if platform DilOS, 1 if otherwise
118 [[ -f /etc/os-release ]] && . /etc/os-release
122 # Determine if this is a 32-bit system
124 # Return 0 if platform is 32-bit, 1 if otherwise
128 [ $(getconf LONG_BIT) = "32" ]
131 # Determine if kmemleak is enabled
133 # Return 0 if kmemleak is enabled, 1 if otherwise
137 is_linux && [ -e /sys/kernel/debug/kmemleak ]
140 # Determine whether a dataset is mounted
143 # $2 filesystem type; optional - defaulted to zfs
145 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
150 [[ -z $fstype ]] && fstype=zfs
151 typeset out dir name ret
155 if [[ "$1" == "/"* ]] ; then
156 for out in $(zfs mount | awk '{print $2}'); do
157 [[ $1 == $out ]] && return 0
160 for out in $(zfs mount | awk '{print $1}'); do
161 [[ $1 == $out ]] && return 0
167 mount -pt $fstype | while read dev dir _t _flags; do
168 [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
171 out=$(df -F $fstype $1 2>/dev/null)
173 (($ret != 0)) && return $ret
181 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
185 out=$(df -t $fstype $1 2>/dev/null)
189 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
190 link=$(readlink -f $ZVOL_DEVDIR/$1)
191 [[ -n "$link" ]] && \
192 mount | grep -q "^$link" && \
201 # Return 0 if a dataset is mounted; 1 otherwise
204 # $2 filesystem type; optional - defaulted to zfs
209 (($? == 0)) && return 0
213 # Return 0 if a dataset is unmounted; 1 otherwise
216 # $2 filesystem type; optional - defaulted to zfs
221 (($? == 1)) && return 0
234 function default_setup
236 default_setup_noexit "$@"
241 function default_setup_no_mountpoint
243 default_setup_noexit "$1" "$2" "$3" "yes"
249 # Given a list of disks, setup storage pools and datasets.
251 function default_setup_noexit
256 typeset no_mountpoint=$4
257 log_note begin default_setup_noexit
259 if is_global_zone; then
260 if poolexists $TESTPOOL ; then
261 destroy_pool $TESTPOOL
263 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
264 log_must zpool create -f $TESTPOOL $disklist
269 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
270 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
272 log_must zfs create $TESTPOOL/$TESTFS
273 if [[ -z $no_mountpoint ]]; then
274 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
277 if [[ -n $container ]]; then
278 rm -rf $TESTDIR1 || \
279 log_unresolved Could not remove $TESTDIR1
280 mkdir -p $TESTDIR1 || \
281 log_unresolved Could not create $TESTDIR1
283 log_must zfs create $TESTPOOL/$TESTCTR
284 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
285 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
286 if [[ -z $no_mountpoint ]]; then
287 log_must zfs set mountpoint=$TESTDIR1 \
288 $TESTPOOL/$TESTCTR/$TESTFS1
292 if [[ -n $volume ]]; then
293 if is_global_zone ; then
294 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
297 log_must zfs create $TESTPOOL/$TESTVOL
303 # Given a list of disks, setup a storage pool, file system and
306 function default_container_setup
310 default_setup "$disklist" "true"
314 # Given a list of disks, setup a storage pool,file system
317 function default_volume_setup
321 default_setup "$disklist" "" "true"
325 # Given a list of disks, setup a storage pool,file system,
326 # a container and a volume.
328 function default_container_volume_setup
332 default_setup "$disklist" "true" "true"
336 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
339 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
340 # $2 snapshot name. Default, $TESTSNAP
342 function create_snapshot
344 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
345 typeset snap=${2:-$TESTSNAP}
347 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
348 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
350 if snapexists $fs_vol@$snap; then
351 log_fail "$fs_vol@$snap already exists."
353 datasetexists $fs_vol || \
354 log_fail "$fs_vol must exist."
356 log_must zfs snapshot $fs_vol@$snap
360 # Create a clone from a snapshot, default clone name is $TESTCLONE.
362 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
363 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
365 function create_clone # snapshot clone
367 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
368 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
371 log_fail "Snapshot name is undefined."
373 log_fail "Clone name is undefined."
375 log_must zfs clone $snap $clone
379 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
382 # $1 Existing filesystem or volume name. Default, $TESTFS
383 # $2 Existing snapshot name. Default, $TESTSNAP
384 # $3 bookmark name. Default, $TESTBKMARK
386 function create_bookmark
388 typeset fs_vol=${1:-$TESTFS}
389 typeset snap=${2:-$TESTSNAP}
390 typeset bkmark=${3:-$TESTBKMARK}
392 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
393 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
394 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
396 if bkmarkexists $fs_vol#$bkmark; then
397 log_fail "$fs_vol#$bkmark already exists."
399 datasetexists $fs_vol || \
400 log_fail "$fs_vol must exist."
401 snapexists $fs_vol@$snap || \
402 log_fail "$fs_vol@$snap must exist."
404 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
408 # Create a temporary clone result of an interrupted resumable 'zfs receive'
409 # $1 Destination filesystem name. Must not exist, will be created as the result
410 # of this function along with its %recv temporary clone
411 # $2 Source filesystem name. Must not exist, will be created and destroyed
413 function create_recv_clone
416 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
417 typeset snap="$sendfs@snap1"
418 typeset incr="$sendfs@snap2"
419 typeset mountpoint="$TESTDIR/create_recv_clone"
420 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
422 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
424 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
425 datasetexists $sendfs && log_fail "Send filesystem must not exist."
427 log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
428 log_must zfs snapshot $snap
429 log_must eval "zfs send $snap | zfs recv -u $recvfs"
430 log_must mkfile 1m "$mountpoint/data"
431 log_must zfs snapshot $incr
432 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
433 iflag=fullblock > $sendfile"
434 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
435 destroy_dataset "$sendfs" "-r"
436 log_must rm -f "$sendfile"
438 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
439 log_fail "Error creating temporary $recvfs/%recv clone"
443 function default_mirror_setup
445 default_mirror_setup_noexit $1 $2 $3
451 # Given a pair of disks, set up a storage pool and dataset for the mirror
452 # @parameters: $1 the primary side of the mirror
453 # $2 the secondary side of the mirror
454 # @uses: ZPOOL ZFS TESTPOOL TESTFS
455 function default_mirror_setup_noexit
457 readonly func="default_mirror_setup_noexit"
461 [[ -z $primary ]] && \
462 log_fail "$func: No parameters passed"
463 [[ -z $secondary ]] && \
464 log_fail "$func: No secondary partition passed"
465 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
466 log_must zpool create -f $TESTPOOL mirror $@
467 log_must zfs create $TESTPOOL/$TESTFS
468 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
472 # create a number of mirrors.
473 # We create a number($1) of 2 way mirrors using the pairs of disks named
474 # on the command line. These mirrors are *not* mounted
475 # @parameters: $1 the number of mirrors to create
476 # $... the devices to use to create the mirrors on
477 # @uses: ZPOOL ZFS TESTPOOL
478 function setup_mirrors
480 typeset -i nmirrors=$1
483 while ((nmirrors > 0)); do
484 log_must test -n "$1" -a -n "$2"
485 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
486 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
488 ((nmirrors = nmirrors - 1))
493 # create a number of raidz pools.
494 # We create a number($1) of 2 raidz pools using the pairs of disks named
495 # on the command line. These pools are *not* mounted
496 # @parameters: $1 the number of pools to create
497 # $... the devices to use to create the pools on
498 # @uses: ZPOOL ZFS TESTPOOL
499 function setup_raidzs
501 typeset -i nraidzs=$1
504 while ((nraidzs > 0)); do
505 log_must test -n "$1" -a -n "$2"
506 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
507 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
509 ((nraidzs = nraidzs - 1))
514 # Destroy the configured testpool mirrors.
515 # the mirrors are of the form ${TESTPOOL}{number}
516 # @uses: ZPOOL ZFS TESTPOOL
517 function destroy_mirrors
519 default_cleanup_noexit
524 function default_raidz_setup
526 default_raidz_setup_noexit "$*"
532 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
533 # $1 the list of disks
535 function default_raidz_setup_noexit
537 typeset disklist="$*"
538 disks=(${disklist[*]})
540 if [[ ${#disks[*]} -lt 2 ]]; then
541 log_fail "A raid-z requires a minimum of two disks."
544 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
545 log_must zpool create -f $TESTPOOL raidz $disklist
546 log_must zfs create $TESTPOOL/$TESTFS
547 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
551 # Common function used to cleanup storage pools and datasets.
553 # Invoked at the start of the test suite to ensure the system
554 # is in a known state, and also at the end of each set of
555 # sub-tests to ensure errors from one set of tests doesn't
556 # impact the execution of the next set.
558 function default_cleanup
560 default_cleanup_noexit
566 # Utility function used to list all available pool names.
568 # NOTE: $KEEP is a variable containing pool names, separated by a newline
569 # character, that must be excluded from the returned list.
571 function get_all_pools
573 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
576 function default_cleanup_noexit
580 # Destroying the pool will also destroy any
581 # filesystems it contains.
583 if is_global_zone; then
584 zfs unmount -a > /dev/null 2>&1
585 ALL_POOLS=$(get_all_pools)
586 # Here, we loop through the pools we're allowed to
587 # destroy, only destroying them if it's safe to do
589 while [ ! -z ${ALL_POOLS} ]
591 for pool in ${ALL_POOLS}
593 if safe_to_destroy_pool $pool ;
598 ALL_POOLS=$(get_all_pools)
604 for fs in $(zfs list -H -o name \
605 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
606 destroy_dataset "$fs" "-Rf"
609 # Need cleanup here to avoid garbage dir left.
610 for fs in $(zfs list -H -o name); do
611 [[ $fs == /$ZONE_POOL ]] && continue
612 [[ -d $fs ]] && log_must rm -rf $fs/*
616 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
619 for fs in $(zfs list -H -o name); do
620 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
621 log_must zfs set reservation=none $fs
622 log_must zfs set recordsize=128K $fs
623 log_must zfs set mountpoint=/$fs $fs
625 enc=$(get_prop encryption $fs)
626 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
627 [[ "$enc" == "off" ]]; then
628 log_must zfs set checksum=on $fs
630 log_must zfs set compression=off $fs
631 log_must zfs set atime=on $fs
632 log_must zfs set devices=off $fs
633 log_must zfs set exec=on $fs
634 log_must zfs set setuid=on $fs
635 log_must zfs set readonly=off $fs
636 log_must zfs set snapdir=hidden $fs
637 log_must zfs set aclmode=groupmask $fs
638 log_must zfs set aclinherit=secure $fs
643 [[ -d $TESTDIR ]] && \
644 log_must rm -rf $TESTDIR
647 if is_mpath_device $disk1; then
651 rm -f $TEST_BASE_DIR/{err,out}
656 # Common function used to cleanup storage pools, file systems
659 function default_container_cleanup
661 if ! is_global_zone; then
665 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
667 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
669 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
670 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
672 [[ -e $TESTDIR1 ]] && \
673 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
679 # Common function used to cleanup snapshot of file system or volume. Default to
680 # delete the file system's snapshot
684 function destroy_snapshot
686 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
688 if ! snapexists $snap; then
689 log_fail "'$snap' does not exist."
693 # For the sake of the value which come from 'get_prop' is not equal
694 # to the really mountpoint when the snapshot is unmounted. So, firstly
695 # check and make sure this snapshot's been mounted in current system.
698 if ismounted $snap; then
699 mtpt=$(get_prop mountpoint $snap)
701 log_fail "get_prop mountpoint $snap failed."
704 destroy_dataset "$snap"
705 [[ $mtpt != "" && -d $mtpt ]] && \
706 log_must rm -rf $mtpt
710 # Common function used to cleanup clone.
714 function destroy_clone
716 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
718 if ! datasetexists $clone; then
719 log_fail "'$clone' does not existed."
722 # With the same reason in destroy_snapshot
724 if ismounted $clone; then
725 mtpt=$(get_prop mountpoint $clone)
727 log_fail "get_prop mountpoint $clone failed."
730 destroy_dataset "$clone"
731 [[ $mtpt != "" && -d $mtpt ]] && \
732 log_must rm -rf $mtpt
736 # Common function used to cleanup bookmark of file system or volume. Default
737 # to delete the file system's bookmark.
741 function destroy_bookmark
743 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
745 if ! bkmarkexists $bkmark; then
746 log_fail "'$bkmarkp' does not existed."
749 destroy_dataset "$bkmark"
752 # Return 0 if a snapshot exists; $? otherwise
758 zfs list -H -t snapshot "$1" > /dev/null 2>&1
763 # Return 0 if a bookmark exists; $? otherwise
767 function bkmarkexists
769 zfs list -H -t bookmark "$1" > /dev/null 2>&1
774 # Return 0 if a hold exists; $? otherwise
781 zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1
786 # Set a property to a certain value on a dataset.
787 # Sets a property of the dataset to the value as passed in.
789 # $1 dataset who's property is being set
791 # $3 value to set property to
793 # 0 if the property could be set.
794 # non-zero otherwise.
797 function dataset_setprop
799 typeset fn=dataset_setprop
802 log_note "$fn: Insufficient parameters (need 3, had $#)"
806 output=$(zfs set $2=$3 $1 2>&1)
809 log_note "Setting property on $1 failed."
810 log_note "property $2=$3"
811 log_note "Return Code: $rv"
812 log_note "Output: $output"
819 # Assign suite defined dataset properties.
820 # This function is used to apply the suite's defined default set of
821 # properties to a dataset.
822 # @parameters: $1 dataset to use
823 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
825 # 0 if the dataset has been altered.
826 # 1 if no pool name was passed in.
827 # 2 if the dataset could not be found.
828 # 3 if the dataset could not have it's properties set.
830 function dataset_set_defaultproperties
834 [[ -z $dataset ]] && return 1
838 for confset in $(zfs list); do
839 if [[ $dataset = $confset ]]; then
844 [[ $found -eq 0 ]] && return 2
845 if [[ -n $COMPRESSION_PROP ]]; then
846 dataset_setprop $dataset compression $COMPRESSION_PROP || \
848 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
850 if [[ -n $CHECKSUM_PROP ]]; then
851 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
853 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
859 # Check a numeric assertion
860 # @parameter: $@ the assertion to check
861 # @output: big loud notice if assertion failed
866 (($@)) || log_fail "$@"
870 # Function to format partition size of a disk
871 # Given a disk cxtxdx reduces all partitions
874 function zero_partitions #<whole_disk_name>
880 gpart destroy -F $diskname
882 DSK=$DEV_DSKDIR/$diskname
883 DSK=$(echo $DSK | sed -e "s|//|/|g")
884 log_must parted $DSK -s -- mklabel gpt
885 blockdev --rereadpt $DSK 2>/dev/null
888 for i in 0 1 3 4 5 6 7
890 log_must set_partition $i "" 0mb $diskname
898 # Given a slice, size and disk, this function
899 # formats the slice to the specified size.
900 # Size should be specified with units as per
901 # the `format` command requirements eg. 100mb 3gb
903 # NOTE: This entire interface is problematic for the Linux parted utility
904 # which requires the end of the partition to be specified. It would be
905 # best to retire this interface and replace it with something more flexible.
906 # At the moment a best effort is made.
908 # arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
909 function set_partition
911 typeset -i slicenum=$1
914 typeset disk=${4#$DEV_DSKDIR/}
915 disk=${disk#$DEV_RDSKDIR/}
919 if [[ -z $size || -z $disk ]]; then
920 log_fail "The size or disk name is unspecified."
922 disk=$DEV_DSKDIR/$disk
923 typeset size_mb=${size%%[mMgG]}
925 size_mb=${size_mb%%[mMgG][bB]}
926 if [[ ${size:1:1} == 'g' ]]; then
927 ((size_mb = size_mb * 1024))
930 # Create GPT partition table when setting slice 0 or
931 # when the device doesn't already contain a GPT label.
932 parted $disk -s -- print 1 >/dev/null
934 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
935 parted $disk -s -- mklabel gpt
936 if [[ $? -ne 0 ]]; then
937 log_note "Failed to create GPT partition table on $disk"
942 # When no start is given align on the first cylinder.
943 if [[ -z "$start" ]]; then
947 # Determine the cylinder size for the device and using
948 # that calculate the end offset in cylinders.
949 typeset -i cly_size_kb=0
950 cly_size_kb=$(parted -m $disk -s -- \
951 unit cyl print | head -3 | tail -1 | \
952 awk -F '[:k.]' '{print $4}')
953 ((end = (size_mb * 1024 / cly_size_kb) + start))
956 mkpart part$slicenum ${start}cyl ${end}cyl
958 if [[ $ret_val -ne 0 ]]; then
959 log_note "Failed to create partition $slicenum on $disk"
963 blockdev --rereadpt $disk 2>/dev/null
964 block_device_wait $disk
967 if [[ -z $size || -z $disk ]]; then
968 log_fail "The size or disk name is unspecified."
970 disk=$DEV_DSKDIR/$disk
972 if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
973 gpart destroy -F $disk >/dev/null 2>&1
974 gpart create -s GPT $disk
975 if [[ $? -ne 0 ]]; then
976 log_note "Failed to create GPT partition table on $disk"
981 typeset index=$((slicenum + 1))
983 if [[ -n $start ]]; then
986 gpart add -t freebsd-zfs $start -s $size -i $index $disk
987 if [[ $ret_val -ne 0 ]]; then
988 log_note "Failed to create partition $slicenum on $disk"
992 block_device_wait $disk
995 if [[ -z $slicenum || -z $size || -z $disk ]]; then
996 log_fail "The slice, size or disk name is unspecified."
999 typeset format_file=/var/tmp/format_in.$$
1001 echo "partition" >$format_file
1002 echo "$slicenum" >> $format_file
1003 echo "" >> $format_file
1004 echo "" >> $format_file
1005 echo "$start" >> $format_file
1006 echo "$size" >> $format_file
1007 echo "label" >> $format_file
1008 echo "" >> $format_file
1009 echo "q" >> $format_file
1010 echo "q" >> $format_file
1012 format -e -s -d $disk -f $format_file
1018 if [[ $ret_val -ne 0 ]]; then
1019 log_note "Unable to format $disk slice $slicenum to $size"
1026 # Delete all partitions on all disks - this is specifically for the use of multipath
1027 # devices which currently can only be used in the test suite as raw/un-partitioned
1028 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
1030 function delete_partitions
1034 if [[ -z $DISKSARRAY ]]; then
1040 for disk in $DISKSARRAY; do
1041 for (( part = 1; part < MAX_PARTITIONS; part++ )); do
1042 typeset partition=${disk}${SLICE_PREFIX}${part}
1043 parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
1044 if lsblk | grep -qF ${partition}; then
1045 log_fail "Partition ${partition} not deleted"
1047 log_note "Partition ${partition} deleted"
1051 elif is_freebsd; then
1052 for disk in $DISKSARRAY; do
1053 if gpart destroy -F $disk; then
1054 log_note "Partitions for ${disk} deleted"
1056 log_fail "Partitions for ${disk} not deleted"
1063 # Get the end cyl of the given slice
1065 function get_endslice #<disk> <slice>
1069 if [[ -z $disk || -z $slice ]] ; then
1070 log_fail "The disk name or slice number is unspecified."
1075 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
1076 awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
1077 ((endcyl = (endcyl + 1)))
1080 disk=${disk#/dev/zvol/}
1082 slice=$((slice + 1))
1083 endcyl=$(gpart show $disk | \
1084 awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1087 disk=${disk#/dev/dsk/}
1088 disk=${disk#/dev/rdsk/}
1092 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1093 grep "sectors\/cylinder" | \
1096 if ((ratio == 0)); then
1100 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1101 nawk -v token="$slice" '{if ($1==token) print $6}')
1103 ((endcyl = (endcyl + 1) / ratio))
1112 # Given a size,disk and total slice number, this function formats the
1113 # disk slices from 0 to the total slice number with the same specified
1116 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1119 typeset slice_size=$1
1120 typeset disk_name=$2
1121 typeset total_slices=$3
1124 zero_partitions $disk_name
1125 while ((i < $total_slices)); do
1132 log_must set_partition $i "$cyl" $slice_size $disk_name
1133 cyl=$(get_endslice $disk_name $i)
1139 # This function continues to write to a filenum number of files into dirnum
1140 # number of directories until either file_write returns an error or the
1141 # maximum number of files per directory have been written.
1144 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1146 # Return value: 0 on success
1150 # destdir: is the directory where everything is to be created under
1151 # dirnum: the maximum number of subdirectories to use, -1 no limit
1152 # filenum: the maximum number of files per subdirectory
1153 # bytes: number of bytes to write
1154 # num_writes: number of types to write out bytes
1155 # data: the data that will be written
1158 # fill_fs /testdir 20 25 1024 256 0
1160 # Note: bytes * num_writes equals the size of the testfile
1162 function fill_fs # destdir dirnum filenum bytes num_writes data
1164 typeset destdir=${1:-$TESTDIR}
1165 typeset -i dirnum=${2:-50}
1166 typeset -i filenum=${3:-50}
1167 typeset -i bytes=${4:-8192}
1168 typeset -i num_writes=${5:-10240}
1169 typeset data=${6:-0}
1171 mkdir -p $destdir/{1..$dirnum}
1172 for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1173 file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1180 # Simple function to get the specified property. If unable to
1181 # get the property then exits.
1183 # Note property is in 'parsable' format (-p)
1185 function get_prop # property dataset
1191 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1192 if [[ $? -ne 0 ]]; then
1193 log_note "Unable to get $prop property for dataset " \
1203 # Simple function to get the specified property of pool. If unable to
1204 # get the property then exits.
1206 # Note property is in 'parsable' format (-p)
1208 function get_pool_prop # property pool
1214 if poolexists $pool ; then
1215 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1217 if [[ $? -ne 0 ]]; then
1218 log_note "Unable to get $prop property for pool " \
1223 log_note "Pool $pool not exists."
1231 # Return 0 if a pool exists; $? otherwise
1239 if [[ -z $pool ]]; then
1240 log_note "No pool name given."
1244 zpool get name "$pool" > /dev/null 2>&1
1248 # Return 0 if all the specified datasets exist; $? otherwise
1251 function datasetexists
1253 if (($# == 0)); then
1254 log_note "No dataset name given."
1258 while (($# > 0)); do
1259 zfs get name $1 > /dev/null 2>&1 || \
1267 # return 0 if none of the specified datasets exists, otherwise return 1.
1270 function datasetnonexists
1272 if (($# == 0)); then
1273 log_note "No dataset name given."
1277 while (($# > 0)); do
1278 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1286 function is_shared_freebsd
1290 pgrep -q mountd && showmount -E | grep -qx $fs
1293 function is_shared_illumos
1298 for mtpt in `share | awk '{print $2}'` ; do
1299 if [[ $mtpt == $fs ]] ; then
1304 typeset stat=$(svcs -H -o STA nfs/server:default)
1305 if [[ $stat != "ON" ]]; then
1306 log_note "Current nfs/server status: $stat"
1312 function is_shared_linux
1317 for mtpt in `share | awk '{print $1}'` ; do
1318 if [[ $mtpt == $fs ]] ; then
1326 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1328 # Returns 0 if shared, 1 otherwise.
1335 if [[ $fs != "/"* ]] ; then
1336 if datasetnonexists "$fs" ; then
1339 mtpt=$(get_prop mountpoint "$fs")
1341 none|legacy|-) return 1
1350 FreeBSD) is_shared_freebsd "$fs" ;;
1351 Linux) is_shared_linux "$fs" ;;
1352 *) is_shared_illumos "$fs" ;;
1356 function is_exported_illumos
1361 for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do
1362 if [[ $mtpt == $fs ]] ; then
1370 function is_exported_freebsd
1375 for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do
1376 if [[ $mtpt == $fs ]] ; then
1384 function is_exported_linux
1389 for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do
1390 if [[ $mtpt == $fs ]] ; then
1399 # Given a mountpoint, or a dataset name, determine if it is exported via
1400 # the os-specific NFS exports file.
1402 # Returns 0 if exported, 1 otherwise.
1404 function is_exported
1409 if [[ $fs != "/"* ]] ; then
1410 if datasetnonexists "$fs" ; then
1413 mtpt=$(get_prop mountpoint "$fs")
1415 none|legacy|-) return 1
1424 FreeBSD) is_exported_freebsd "$fs" ;;
1425 Linux) is_exported_linux "$fs" ;;
1426 *) is_exported_illumos "$fs" ;;
1431 # Given a dataset name determine if it is shared via SMB.
1433 # Returns 0 if shared, 1 otherwise.
1435 function is_shared_smb
1440 if datasetnonexists "$fs" ; then
1443 fs=$(echo $fs | tr / _)
1447 for mtpt in `net usershare list | awk '{print $1}'` ; do
1448 if [[ $mtpt == $fs ]] ; then
1454 log_note "Currently unsupported by the test framework"
1460 # Given a mountpoint, determine if it is not shared via NFS.
1462 # Returns 0 if not shared, 1 otherwise.
1470 # Given a dataset determine if it is not shared via SMB.
1472 # Returns 0 if not shared, 1 otherwise.
1474 function not_shared_smb
1480 # Helper function to unshare a mountpoint.
1482 function unshare_fs #fs
1486 if is_shared $fs || is_shared_smb $fs; then
1487 zfs unshare $fs || log_fail "zfs unshare $fs failed"
1492 # Helper function to share a NFS mountpoint.
1494 function share_nfs #fs
1498 if ! is_shared $fs; then
1500 log_must share "*:$fs"
1502 log_must share -F nfs $fs
1510 # Helper function to unshare a NFS mountpoint.
1512 function unshare_nfs #fs
1516 if is_shared $fs; then
1518 log_must unshare -u "*:$fs"
1520 log_must unshare -F nfs $fs
1528 # Helper function to show NFS shares.
1530 function showshares_nfs
1542 # Helper function to show SMB shares.
1544 function showshares_smb
1559 elif is_freebsd; then
1562 log_unsupported "Unknown platform"
1565 if [[ $? -ne 0 ]]; then
1566 log_unsupported "The NFS utilities are not installed"
1571 # Check NFS server status and trigger it online.
1573 function setup_nfs_server
1575 # Cannot share directory in non-global zone.
1577 if ! is_global_zone; then
1578 log_note "Cannot trigger NFS server by sharing in LZ."
1584 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1585 # /etc/exports.d./* to provide a clean test environment.
1589 log_note "NFS server must be started prior to running ZTS."
1591 elif is_freebsd; then
1592 kill -s HUP $(cat /var/run/mountd.pid)
1594 log_note "NFS server must be started prior to running ZTS."
1598 typeset nfs_fmri="svc:/network/nfs/server:default"
1599 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1601 # Only really sharing operation can enable NFS server
1602 # to online permanently.
1604 typeset dummy=/tmp/dummy
1606 if [[ -d $dummy ]]; then
1607 log_must rm -rf $dummy
1610 log_must mkdir $dummy
1611 log_must share $dummy
1614 # Waiting for fmri's status to be the final status.
1615 # Otherwise, in transition, an asterisk (*) is appended for
1616 # instances, unshare will reverse status to 'DIS' again.
1618 # Waiting for 1's at least.
1622 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1629 log_must unshare $dummy
1630 log_must rm -rf $dummy
1633 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1637 # To verify whether calling process is in global zone
1639 # Return 0 if in global zone, 1 in non-global zone
1641 function is_global_zone
1643 if is_linux || is_freebsd; then
1646 typeset cur_zone=$(zonename 2>/dev/null)
1647 [ $cur_zone = "global" ]
1652 # Verify whether test is permitted to run from
1653 # global zone, local zone, or both
1655 # $1 zone limit, could be "global", "local", or "both"(no limit)
1657 # Return 0 if permitted, otherwise exit with log_unsupported
1659 function verify_runnable # zone limit
1663 [[ -z $limit ]] && return 0
1665 if is_global_zone ; then
1669 local) log_unsupported "Test is unable to run from "\
1672 *) log_note "Warning: unknown limit $limit - " \
1680 global) log_unsupported "Test is unable to run from "\
1683 *) log_note "Warning: unknown limit $limit - " \
1694 # Return 0 if create successfully or the pool exists; $? otherwise
1695 # Note: In local zones, this function should return 0 silently.
1698 # $2-n - [keyword] devs_list
1700 function create_pool #pool devs_list
1702 typeset pool=${1%%/*}
1706 if [[ -z $pool ]]; then
1707 log_note "Missing pool name."
1711 if poolexists $pool ; then
1715 if is_global_zone ; then
1716 [[ -d /$pool ]] && rm -rf /$pool
1717 log_must zpool create -f $pool $@
1723 # Return 0 if destroy successfully or the pool exists; $? otherwise
1724 # Note: In local zones, this function should return 0 silently.
1727 # Destroy pool with the given parameters.
1729 function destroy_pool #pool
1731 typeset pool=${1%%/*}
1734 if [[ -z $pool ]]; then
1735 log_note "No pool name given."
1739 if is_global_zone ; then
1740 if poolexists "$pool" ; then
1741 mtpt=$(get_prop mountpoint "$pool")
1743 # At times, syseventd/udev activity can cause attempts
1744 # to destroy a pool to fail with EBUSY. We retry a few
1745 # times allowing failures before requiring the destroy
1747 log_must_busy zpool destroy -f $pool
1750 log_must rm -rf $mtpt
1752 log_note "Pool does not exist. ($pool)"
1760 # Return 0 if created successfully; $? otherwise
1763 # $2-n - dataset options
1765 function create_dataset #dataset dataset_options
1771 if [[ -z $dataset ]]; then
1772 log_note "Missing dataset name."
1776 if datasetexists $dataset ; then
1777 destroy_dataset $dataset
1780 log_must zfs create $@ $dataset
1785 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1786 # Note: In local zones, this function should return 0 silently.
1789 # $2 - custom arguments for zfs destroy
1790 # Destroy dataset with the given parameters.
1792 function destroy_dataset #dataset #args
1796 typeset args=${2:-""}
1798 if [[ -z $dataset ]]; then
1799 log_note "No dataset name given."
1803 if is_global_zone ; then
1804 if datasetexists "$dataset" ; then
1805 mtpt=$(get_prop mountpoint "$dataset")
1806 log_must_busy zfs destroy $args $dataset
1809 log_must rm -rf $mtpt
1811 log_note "Dataset does not exist. ($dataset)"
1820 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1821 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1822 # and a zvol device to the zone.
1825 # $2 zone root directory prefix
1828 function zfs_zones_setup #zone_name zone_root zone_ip
1830 typeset zone_name=${1:-$(hostname)-z}
1831 typeset zone_root=${2:-"/zone_root"}
1832 typeset zone_ip=${3:-"10.1.1.10"}
1833 typeset prefix_ctr=$ZONE_CTR
1834 typeset pool_name=$ZONE_POOL
1838 # Create pool and 5 container within it
1840 [[ -d /$pool_name ]] && rm -rf /$pool_name
1841 log_must zpool create -f $pool_name $DISKS
1842 while ((i < cntctr)); do
1843 log_must zfs create $pool_name/$prefix_ctr$i
1848 log_must zfs create -V 1g $pool_name/zone_zvol
1852 # Add slog device for pool
1854 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1855 log_must mkfile $MINVDEVSIZE $sdevs
1856 log_must zpool add $pool_name log mirror $sdevs
1858 # this isn't supported just yet.
1859 # Create a filesystem. In order to add this to
1860 # the zone, it must have it's mountpoint set to 'legacy'
1861 # log_must zfs create $pool_name/zfs_filesystem
1862 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1864 [[ -d $zone_root ]] && \
1865 log_must rm -rf $zone_root/$zone_name
1866 [[ ! -d $zone_root ]] && \
1867 log_must mkdir -p -m 0700 $zone_root/$zone_name
1869 # Create zone configure file and configure the zone
1871 typeset zone_conf=/tmp/zone_conf.$$
1872 echo "create" > $zone_conf
1873 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1874 echo "set autoboot=true" >> $zone_conf
1876 while ((i < cntctr)); do
1877 echo "add dataset" >> $zone_conf
1878 echo "set name=$pool_name/$prefix_ctr$i" >> \
1880 echo "end" >> $zone_conf
1884 # add our zvol to the zone
1885 echo "add device" >> $zone_conf
1886 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1887 echo "end" >> $zone_conf
1889 # add a corresponding zvol rdsk to the zone
1890 echo "add device" >> $zone_conf
1891 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1892 echo "end" >> $zone_conf
1894 # once it's supported, we'll add our filesystem to the zone
1895 # echo "add fs" >> $zone_conf
1896 # echo "set type=zfs" >> $zone_conf
1897 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1898 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1899 # echo "end" >> $zone_conf
1901 echo "verify" >> $zone_conf
1902 echo "commit" >> $zone_conf
1903 log_must zonecfg -z $zone_name -f $zone_conf
1904 log_must rm -f $zone_conf
1907 zoneadm -z $zone_name install
1908 if (($? == 0)); then
1909 log_note "SUCCESS: zoneadm -z $zone_name install"
1911 log_fail "FAIL: zoneadm -z $zone_name install"
1914 # Install sysidcfg file
1916 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1917 echo "system_locale=C" > $sysidcfg
1918 echo "terminal=dtterm" >> $sysidcfg
1919 echo "network_interface=primary {" >> $sysidcfg
1920 echo "hostname=$zone_name" >> $sysidcfg
1921 echo "}" >> $sysidcfg
1922 echo "name_service=NONE" >> $sysidcfg
1923 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1924 echo "security_policy=NONE" >> $sysidcfg
1925 echo "timezone=US/Eastern" >> $sysidcfg
1928 log_must zoneadm -z $zone_name boot
1932 # Reexport TESTPOOL & TESTPOOL(1-4)
1934 function reexport_pool
1939 while ((i < cntctr)); do
1941 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1942 if ! ismounted $TESTPOOL; then
1943 log_must zfs mount $TESTPOOL
1946 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1947 if eval ! ismounted \$TESTPOOL$i; then
1948 log_must eval zfs mount \$TESTPOOL$i
1956 # Verify a given disk or pool state
1958 # Return 0 is pool/disk matches expected state, 1 otherwise
1960 function check_state # pool disk state{online,offline,degraded}
1963 typeset disk=${2#$DEV_DSKDIR/}
1966 [[ -z $pool ]] || [[ -z $state ]] \
1967 && log_fail "Arguments invalid or missing"
1969 if [[ -z $disk ]]; then
1970 #check pool state only
1971 zpool get -H -o value health $pool \
1972 | grep -i "$state" > /dev/null 2>&1
1974 zpool status -v $pool | grep "$disk" \
1975 | grep -i "$state" > /dev/null 2>&1
1982 # Get the mountpoint of snapshot
1983 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1986 function snapshot_mountpoint
1988 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1990 if [[ $dataset != *@* ]]; then
1991 log_fail "Error name of snapshot '$dataset'."
1994 typeset fs=${dataset%@*}
1995 typeset snap=${dataset#*@}
1997 if [[ -z $fs || -z $snap ]]; then
1998 log_fail "Error name of snapshot '$dataset'."
2001 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
2005 # Given a device and 'ashift' value verify it's correctly set on every label
2007 function verify_ashift # device ashift
2012 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
2028 # Given a pool and file system, this function will verify the file system
2029 # using the zdb internal tool. Note that the pool is exported and imported
2030 # to ensure it has consistent state.
2032 function verify_filesys # pool filesystem dir
2035 typeset filesys="$2"
2036 typeset zdbout="/tmp/zdbout.$$"
2041 typeset search_path=""
2043 log_note "Calling zdb to verify filesystem '$filesys'"
2044 zfs unmount -a > /dev/null 2>&1
2045 log_must zpool export $pool
2047 if [[ -n $dirs ]] ; then
2048 for dir in $dirs ; do
2049 search_path="$search_path -d $dir"
2053 log_must zpool import $search_path $pool
2055 zdb -cudi $filesys > $zdbout 2>&1
2056 if [[ $? != 0 ]]; then
2057 log_note "Output: zdb -cudi $filesys"
2059 log_fail "zdb detected errors with: '$filesys'"
2062 log_must zfs mount -a
2063 log_must rm -rf $zdbout
2067 # Given a pool issue a scrub and verify that no checksum errors are reported.
2069 function verify_pool
2071 typeset pool=${1:-$TESTPOOL}
2073 log_must zpool scrub $pool
2074 log_must wait_scrubbed $pool
2076 typeset -i cksum=$(zpool status $pool | awk '
2078 isvdev { errors += $NF }
2079 /CKSUM$/ { isvdev = 1 }
2080 END { print errors }
2082 if [[ $cksum != 0 ]]; then
2083 log_must zpool status -v
2084 log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
2089 # Given a pool, and this function list all disks in the pool
2091 function get_disklist # pool
2095 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
2096 grep -v "\-\-\-\-\-" | \
2097 egrep -v -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
2103 # Given a pool, and this function list all disks in the pool with their full
2104 # path (like "/dev/sda" instead of "sda").
2106 function get_disklist_fullpath # pool
2115 # This function kills a given list of processes after a time period. We use
2116 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
2117 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
2118 # would be listed as FAIL, which we don't want : we're happy with stress tests
2119 # running for a certain amount of time, then finishing.
2121 # @param $1 the time in seconds after which we should terminate these processes
2122 # @param $2..$n the processes we wish to terminate.
2124 function stress_timeout
2126 typeset -i TIMEOUT=$1
2130 log_note "Waiting for child processes($cpids). " \
2131 "It could last dozens of minutes, please be patient ..."
2132 log_must sleep $TIMEOUT
2134 log_note "Killing child processes after ${TIMEOUT} stress timeout."
2136 for pid in $cpids; do
2137 ps -p $pid > /dev/null 2>&1
2138 if (($? == 0)); then
2139 log_must kill -USR1 $pid
2145 # Verify a given hotspare disk is inuse or avail
2147 # Return 0 is pool/disk matches expected state, 1 otherwise
2149 function check_hotspare_state # pool disk state{inuse,avail}
2152 typeset disk=${2#$DEV_DSKDIR/}
2155 cur_state=$(get_device_state $pool $disk "spares")
2157 if [[ $state != ${cur_state} ]]; then
2164 # Wait until a hotspare transitions to a given state or times out.
2166 # Return 0 when pool/disk matches expected state, 1 on timeout.
2168 function wait_hotspare_state # pool disk state timeout
2171 typeset disk=${2#*$DEV_DSKDIR/}
2173 typeset timeout=${4:-60}
2176 while [[ $i -lt $timeout ]]; do
2177 if check_hotspare_state $pool $disk $state; then
2189 # Verify a given slog disk is inuse or avail
2191 # Return 0 is pool/disk matches expected state, 1 otherwise
2193 function check_slog_state # pool disk state{online,offline,unavail}
2196 typeset disk=${2#$DEV_DSKDIR/}
2199 cur_state=$(get_device_state $pool $disk "logs")
2201 if [[ $state != ${cur_state} ]]; then
2208 # Verify a given vdev disk is inuse or avail
2210 # Return 0 is pool/disk matches expected state, 1 otherwise
2212 function check_vdev_state # pool disk state{online,offline,unavail}
2215 typeset disk=${2#*$DEV_DSKDIR/}
2218 cur_state=$(get_device_state $pool $disk)
2220 if [[ $state != ${cur_state} ]]; then
2227 # Wait until a vdev transitions to a given state or times out.
2229 # Return 0 when pool/disk matches expected state, 1 on timeout.
2231 function wait_vdev_state # pool disk state timeout
2234 typeset disk=${2#*$DEV_DSKDIR/}
2236 typeset timeout=${4:-60}
2239 while [[ $i -lt $timeout ]]; do
2240 if check_vdev_state $pool $disk $state; then
2252 # Check the output of 'zpool status -v <pool>',
2253 # and to see if the content of <token> contain the <keyword> specified.
2255 # Return 0 is contain, 1 otherwise
2257 function check_pool_status # pool token keyword <verbose>
2262 typeset verbose=${4:-false}
2264 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2265 ($1==token) {print $0}')
2266 if [[ $verbose == true ]]; then
2269 echo $scan | egrep -i "$keyword" > /dev/null 2>&1
2275 # The following functions are instance of check_pool_status()
2276 # is_pool_resilvering - to check if the pool resilver is in progress
2277 # is_pool_resilvered - to check if the pool resilver is completed
2278 # is_pool_scrubbing - to check if the pool scrub is in progress
2279 # is_pool_scrubbed - to check if the pool scrub is completed
2280 # is_pool_scrub_stopped - to check if the pool scrub is stopped
2281 # is_pool_scrub_paused - to check if the pool scrub has paused
2282 # is_pool_removing - to check if the pool removing is a vdev
2283 # is_pool_removed - to check if the pool remove is completed
2284 # is_pool_discarding - to check if the pool checkpoint is being discarded
2286 function is_pool_resilvering #pool <verbose>
2288 check_pool_status "$1" "scan" \
2289 "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
2293 function is_pool_resilvered #pool <verbose>
2295 check_pool_status "$1" "scan" "resilvered " $2
2299 function is_pool_scrubbing #pool <verbose>
2301 check_pool_status "$1" "scan" "scrub in progress since " $2
2305 function is_pool_scrubbed #pool <verbose>
2307 check_pool_status "$1" "scan" "scrub repaired" $2
2311 function is_pool_scrub_stopped #pool <verbose>
2313 check_pool_status "$1" "scan" "scrub canceled" $2
2317 function is_pool_scrub_paused #pool <verbose>
2319 check_pool_status "$1" "scan" "scrub paused since " $2
2323 function is_pool_removing #pool
2325 check_pool_status "$1" "remove" "in progress since "
2329 function is_pool_removed #pool
2331 check_pool_status "$1" "remove" "completed on"
2335 function is_pool_discarding #pool
2337 check_pool_status "$1" "checkpoint" "discarding"
2341 function wait_for_degraded
2344 typeset timeout=${2:-30}
2348 [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2349 log_note "$pool is not yet degraded."
2351 if ((SECONDS - t0 > $timeout)); then
2352 log_note "$pool not degraded after $timeout seconds."
2361 # Use create_pool()/destroy_pool() to clean up the information in
2362 # in the given disk to avoid slice overlapping.
2364 function cleanup_devices #vdevs
2366 typeset pool="foopool$$"
2369 zero_partitions $vdev
2372 poolexists $pool && destroy_pool $pool
2373 create_pool $pool $@
2380 # A function to find and locate free disks on a system or from given
2381 # disks as the parameter. It works by locating disks that are in use
2382 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2384 # $@ given disks to find which are free, default is all disks in
2387 # @return a string containing the list of available disks
2391 # Trust provided list, no attempt is made to locate unused devices.
2392 if is_linux || is_freebsd; then
2398 sfi=/tmp/swaplist.$$
2399 dmpi=/tmp/dumpdev.$$
2400 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2403 dumpadm > $dmpi 2>/dev/null
2405 # write an awk script that can process the output of format
2406 # to produce a list of disks we know about. Note that we have
2407 # to escape "$2" so that the shell doesn't interpret it while
2408 # we're creating the awk script.
2409 # -------------------
2410 cat > /tmp/find_disks.awk <<EOF
2419 if (searchdisks && \$2 !~ "^$"){
2425 /^AVAILABLE DISK SELECTIONS:/{
2429 #---------------------
2431 chmod 755 /tmp/find_disks.awk
2432 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2433 rm /tmp/find_disks.awk
2436 for disk in $disks; do
2438 grep "${disk}[sp]" /etc/mnttab >/dev/null
2439 (($? == 0)) && continue
2441 grep "${disk}[sp]" $sfi >/dev/null
2442 (($? == 0)) && continue
2443 # check for dump device
2444 grep "${disk}[sp]" $dmpi >/dev/null
2445 (($? == 0)) && continue
2446 # check to see if this disk hasn't been explicitly excluded
2447 # by a user-set environment variable
2448 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2449 (($? == 0)) && continue
2450 unused_candidates="$unused_candidates $disk"
2455 # now just check to see if those disks do actually exist
2456 # by looking for a device pointing to the first slice in
2457 # each case. limit the number to max_finddisksnum
2459 for disk in $unused_candidates; do
2460 if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2461 [ $count -lt $max_finddisksnum ]; then
2462 unused="$unused $disk"
2463 # do not impose limit if $@ is provided
2464 [[ -z $@ ]] && ((count = count + 1))
2468 # finally, return our disk list
2472 function add_user_freebsd #<group_name> <user_name> <basedir>
2478 # Check to see if the user exists.
2479 if id $user > /dev/null 2>&1; then
2483 # Assign 1000 as the base uid
2487 pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2491 # The uid is not unique
2495 if [[ $uid == 65000 ]]; then
2496 log_fail "No user id available under 65000 for $user"
2501 touch $basedir/$user/.hushlogin
2507 # Delete the specified user.
2511 function del_user_freebsd #<logname>
2515 if id $user > /dev/null 2>&1; then
2516 log_must pw userdel $user
2523 # Select valid gid and create specified group.
2527 function add_group_freebsd #<group_name>
2531 # See if the group already exists.
2532 if pw groupshow $group >/dev/null 2>&1; then
2536 # Assign 1000 as the base gid
2539 pw groupadd -g $gid -n $group > /dev/null 2>&1
2543 # The gid is not unique
2547 if [[ $gid == 65000 ]]; then
2548 log_fail "No user id available under 65000 for $group"
2554 # Delete the specified group.
2558 function del_group_freebsd #<group_name>
2562 pw groupdel -n $group > /dev/null 2>&1
2565 # Group does not exist, or was deleted successfully.
2567 # Name already exists as a group name
2568 9) log_must pw groupdel $group ;;
2575 function add_user_illumos #<group_name> <user_name> <basedir>
2581 log_must useradd -g $group -d $basedir/$user -m $user
2586 function del_user_illumos #<user_name>
2590 if id $user > /dev/null 2>&1; then
2591 log_must_retry "currently used" 6 userdel $user
2597 function add_group_illumos #<group_name>
2603 groupadd -g $gid $group > /dev/null 2>&1
2607 # The gid is not unique
2614 function del_group_illumos #<group_name>
2618 groupmod -n $grp $grp > /dev/null 2>&1
2621 # Group does not exist.
2623 # Name already exists as a group name
2624 9) log_must groupdel $grp ;;
2629 function add_user_linux #<group_name> <user_name> <basedir>
2635 log_must useradd -g $group -d $basedir/$user -m $user
2637 # Add new users to the same group and the command line utils.
2638 # This allows them to be run out of the original users home
2639 # directory as long as it permissioned to be group readable.
2640 cmd_group=$(stat --format="%G" $(which zfs))
2641 log_must usermod -a -G $cmd_group $user
2646 function del_user_linux #<user_name>
2650 if id $user > /dev/null 2>&1; then
2651 log_must_retry "currently used" 6 userdel $user
2657 function add_group_linux #<group_name>
2661 # Assign 100 as the base gid, a larger value is selected for
2662 # Linux because for many distributions 1000 and under are reserved.
2664 groupadd $group > /dev/null 2>&1
2673 function del_group_linux #<group_name>
2677 getent group $group > /dev/null 2>&1
2680 # Group does not exist.
2682 # Name already exists as a group name
2683 0) log_must groupdel $group ;;
2691 # Add specified user to specified group
2695 # $3 base of the homedir (optional)
2697 function add_user #<group_name> <user_name> <basedir>
2701 typeset basedir=${3:-"/var/tmp"}
2703 if ((${#group} == 0 || ${#user} == 0)); then
2704 log_fail "group name or user name are not defined."
2709 add_user_freebsd "$group" "$user" "$basedir"
2712 add_user_linux "$group" "$user" "$basedir"
2715 add_user_illumos "$group" "$user" "$basedir"
2723 # Delete the specified user.
2726 # $2 base of the homedir (optional)
2728 function del_user #<logname> <basedir>
2731 typeset basedir=${2:-"/var/tmp"}
2733 if ((${#user} == 0)); then
2734 log_fail "login name is necessary."
2739 del_user_freebsd "$user"
2742 del_user_linux "$user"
2745 del_user_illumos "$user"
2749 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2755 # Select valid gid and create specified group.
2759 function add_group #<group_name>
2763 if ((${#group} == 0)); then
2764 log_fail "group name is necessary."
2769 add_group_freebsd "$group"
2772 add_group_linux "$group"
2775 add_group_illumos "$group"
2783 # Delete the specified group.
2787 function del_group #<group_name>
2791 if ((${#group} == 0)); then
2792 log_fail "group name is necessary."
2797 del_group_freebsd "$group"
2800 del_group_linux "$group"
2803 del_group_illumos "$group"
2811 # This function will return true if it's safe to destroy the pool passed
2812 # as argument 1. It checks for pools based on zvols and files, and also
2813 # files contained in a pool that may have a different mountpoint.
2815 function safe_to_destroy_pool { # $1 the pool name
2818 typeset DONT_DESTROY=""
2820 # We check that by deleting the $1 pool, we're not
2821 # going to pull the rug out from other pools. Do this
2822 # by looking at all other pools, ensuring that they
2823 # aren't built from files or zvols contained in this pool.
2825 for pool in $(zpool list -H -o name)
2829 # this is a list of the top-level directories in each of the
2830 # files that make up the path to the files the pool is based on
2831 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2834 # this is a list of the zvols that make up the pool
2835 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2838 # also want to determine if it's a file-based pool using an
2839 # alternate mountpoint...
2840 POOL_FILE_DIRS=$(zpool status -v $pool | \
2841 grep / | awk '{print $1}' | \
2842 awk -F/ '{print $2}' | grep -v "dev")
2844 for pooldir in $POOL_FILE_DIRS
2846 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2847 grep "${pooldir}$" | awk '{print $1}')
2849 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2853 if [ ! -z "$ZVOLPOOL" ]
2856 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2859 if [ ! -z "$FILEPOOL" ]
2862 log_note "Pool $pool is built from $FILEPOOL on $1"
2865 if [ ! -z "$ALTMOUNTPOOL" ]
2868 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2872 if [ -z "${DONT_DESTROY}" ]
2876 log_note "Warning: it is not safe to destroy $1!"
2882 # Verify zfs operation with -p option work as expected
2883 # $1 operation, value could be create, clone or rename
2884 # $2 dataset type, value could be fs or vol
2886 # $4 new dataset name
2888 function verify_opt_p_ops
2893 typeset newdataset=$4
2895 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2896 log_fail "$datatype is not supported."
2899 # check parameters accordingly
2904 if [[ $datatype == "vol" ]]; then
2905 ops="create -V $VOLSIZE"
2909 if [[ -z $newdataset ]]; then
2910 log_fail "newdataset should not be empty" \
2913 log_must datasetexists $dataset
2914 log_must snapexists $dataset
2917 if [[ -z $newdataset ]]; then
2918 log_fail "newdataset should not be empty" \
2921 log_must datasetexists $dataset
2924 log_fail "$ops is not supported."
2928 # make sure the upper level filesystem does not exist
2929 destroy_dataset "${newdataset%/*}" "-rRf"
2931 # without -p option, operation will fail
2932 log_mustnot zfs $ops $dataset $newdataset
2933 log_mustnot datasetexists $newdataset ${newdataset%/*}
2935 # with -p option, operation should succeed
2936 log_must zfs $ops -p $dataset $newdataset
2939 if ! datasetexists $newdataset ; then
2940 log_fail "-p option does not work for $ops"
2943 # when $ops is create or clone, redo the operation still return zero
2944 if [[ $ops != "rename" ]]; then
2945 log_must zfs $ops -p $dataset $newdataset
2952 # Get configuration of pool
2962 if ! poolexists "$pool" ; then
2965 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2966 if [[ $alt_root == "-" ]]; then
2967 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2970 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2973 if [[ -n $value ]] ; then
2983 # Privated function. Random select one of items from arguments.
2988 function _random_get
2995 ((ind = RANDOM % cnt + 1))
2997 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
3002 # Random select one of item from arguments which include NONE string
3004 function random_get_with_non
3009 _random_get "$cnt" "$@"
3013 # Random select one of item from arguments which doesn't include NONE string
3017 _random_get "$#" "$@"
3021 # The function will generate a dataset name with specific length
3022 # $1, the length of the name
3023 # $2, the base string to construct the name
3025 function gen_dataset_name
3028 typeset basestr="$2"
3029 typeset -i baselen=${#basestr}
3033 if ((len % baselen == 0)); then
3034 ((iter = len / baselen))
3036 ((iter = len / baselen + 1))
3038 while ((iter > 0)); do
3039 l_name="${l_name}$basestr"
3048 # Get cksum tuple of dataset
3051 # sample zdb output:
3052 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
3053 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
3054 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
3055 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
3056 function datasetcksum
3061 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
3062 | awk -F= '{print $7}')
3073 cksum=$(cksum $1 | awk '{print $1}')
3078 # Get the given disk/slice state from the specific field of the pool
3080 function get_device_state #pool disk field("", "spares","logs")
3083 typeset disk=${2#$DEV_DSKDIR/}
3084 typeset field=${3:-$pool}
3086 state=$(zpool status -v "$pool" 2>/dev/null | \
3087 nawk -v device=$disk -v pool=$pool -v field=$field \
3088 'BEGIN {startconfig=0; startfield=0; }
3089 /config:/ {startconfig=1}
3090 (startconfig==1) && ($1==field) {startfield=1; next;}
3091 (startfield==1) && ($1==device) {print $2; exit;}
3093 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
3099 # print the given directory filesystem type
3107 if [[ -z $dir ]]; then
3108 log_fail "Usage: get_fstype <directory>"
3115 df -n $dir | awk '{print $3}'
3119 # Given a disk, label it to VTOC regardless what label was on the disk
3125 if [[ -z $disk ]]; then
3126 log_fail "The disk name is unspecified."
3128 typeset label_file=/var/tmp/labelvtoc.$$
3129 typeset arch=$(uname -p)
3131 if is_linux || is_freebsd; then
3132 log_note "Currently unsupported by the test framework"
3136 if [[ $arch == "i386" ]]; then
3137 echo "label" > $label_file
3138 echo "0" >> $label_file
3139 echo "" >> $label_file
3140 echo "q" >> $label_file
3141 echo "q" >> $label_file
3143 fdisk -B $disk >/dev/null 2>&1
3144 # wait a while for fdisk finishes
3146 elif [[ $arch == "sparc" ]]; then
3147 echo "label" > $label_file
3148 echo "0" >> $label_file
3149 echo "" >> $label_file
3150 echo "" >> $label_file
3151 echo "" >> $label_file
3152 echo "q" >> $label_file
3154 log_fail "unknown arch type"
3157 format -e -s -d $disk -f $label_file
3158 typeset -i ret_val=$?
3161 # wait the format to finish
3164 if ((ret_val != 0)); then
3165 log_fail "unable to label $disk as VTOC."
3172 # check if the system was installed as zfsroot or not
3173 # return: 0 if zfsroot, non-zero if not
3177 df -n / | grep zfs > /dev/null 2>&1
3182 # get the root filesystem name if it's zfsroot system.
3184 # return: root filesystem name
3190 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3191 elif ! is_linux; then
3192 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
3195 if [[ -z "$rootfs" ]]; then
3196 log_fail "Can not get rootfs"
3198 zfs list $rootfs > /dev/null 2>&1
3199 if (($? == 0)); then
3202 log_fail "This is not a zfsroot system."
3207 # get the rootfs's pool name
3211 function get_rootpool
3217 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3218 elif ! is_linux; then
3219 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
3222 if [[ -z "$rootfs" ]]; then
3223 log_fail "Can not get rootpool"
3225 zfs list $rootfs > /dev/null 2>&1
3226 if (($? == 0)); then
3229 log_fail "This is not a zfsroot system."
3234 # Get the word numbers from a string separated by white space
3236 function get_word_count
3242 # To verify if the require numbers of disks is given
3244 function verify_disk_count
3246 typeset -i min=${2:-1}
3248 typeset -i count=$(get_word_count "$1")
3250 if ((count < min)); then
3251 log_untested "A minimum of $min disks is required to run." \
3252 " You specified $count disk(s)"
3256 function ds_is_volume
3258 typeset type=$(get_prop type $1)
3259 [[ $type = "volume" ]] && return 0
3263 function ds_is_filesystem
3265 typeset type=$(get_prop type $1)
3266 [[ $type = "filesystem" ]] && return 0
3270 function ds_is_snapshot
3272 typeset type=$(get_prop type $1)
3273 [[ $type = "snapshot" ]] && return 0
3278 # Check if Trusted Extensions are installed and enabled
3280 function is_te_enabled
3282 svcs -H -o state labeld 2>/dev/null | grep "enabled"
3283 if (($? != 0)); then
3290 # Utility function to determine if a system has multiple cpus.
3295 elif is_freebsd; then
3296 sysctl -n kern.smp.cpus
3298 (($(psrinfo | wc -l) > 1))
3304 function get_cpu_freq
3307 lscpu | awk '/CPU MHz/ { print $3 }'
3308 elif is_freebsd; then
3309 sysctl -n hw.clockrate
3311 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3315 # Run the given command as the user provided.
3321 log_note "user: $user"
3324 typeset out=$TEST_BASE_DIR/out
3325 typeset err=$TEST_BASE_DIR/err
3327 sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
3329 log_note "out: $(<$out)"
3330 log_note "err: $(<$err)"
3335 # Check if the pool contains the specified vdevs
3340 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3341 # vdevs is not in the pool, and 2 if pool name is missing.
3343 function vdevs_in_pool
3348 if [[ -z $pool ]]; then
3349 log_note "Missing pool name."
3355 # We could use 'zpool list' to only get the vdevs of the pool but we
3356 # can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
3357 # therefore we use the 'zpool status' output.
3358 typeset tmpfile=$(mktemp)
3359 zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
3361 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3362 [[ $? -ne 0 ]] && return 1
3376 max=$((max > i ? max : i))
3388 min=$((min < i ? min : i))
3394 # Write data that can be compressed into a directory
3395 function write_compressible
3399 typeset nfiles=${3:-1}
3400 typeset bs=${4:-1024k}
3401 typeset fname=${5:-file}
3403 [[ -d $dir ]] || log_fail "No directory: $dir"
3405 # Under Linux fio is not currently used since its behavior can
3406 # differ significantly across versions. This includes missing
3407 # command line options and cases where the --buffer_compress_*
3408 # options fail to behave as expected.
3410 typeset file_bytes=$(to_bytes $megs)
3411 typeset bs_bytes=4096
3412 typeset blocks=$(($file_bytes / $bs_bytes))
3414 for (( i = 0; i < $nfiles; i++ )); do
3415 truncate -s $file_bytes $dir/$fname.$i
3417 # Write every third block to get 66% compression.
3418 for (( j = 0; j < $blocks; j += 3 )); do
3419 dd if=/dev/urandom of=$dir/$fname.$i \
3420 seek=$j bs=$bs_bytes count=1 \
3421 conv=notrunc >/dev/null 2>&1
3425 log_must eval "fio \
3430 --buffer_compress_percentage=66 \
3431 --buffer_compress_chunk=4096 \
3438 --filename_format='$fname.\$jobnum' >/dev/null"
3447 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3449 objnum=$(stat -f "%i" $pathname)
3451 objnum=$(stat -c %i $pathname)
3457 # Sync data to the pool
3460 # $2 boolean to force uberblock (and config including zpool cache file) update
3462 function sync_pool #pool <force>
3464 typeset pool=${1:-$TESTPOOL}
3465 typeset force=${2:-false}
3467 if [[ $force == true ]]; then
3468 log_must zpool sync -f $pool
3470 log_must zpool sync $pool
3479 # $1 boolean to force uberblock (and config including zpool cache file) update
3481 function sync_all_pools #<force>
3483 typeset force=${1:-false}
3485 if [[ $force == true ]]; then
3486 log_must zpool sync -f
3495 # Wait for zpool 'freeing' property drops to zero.
3499 function wait_freeing #pool
3501 typeset pool=${1:-$TESTPOOL}
3503 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3509 # Wait for every device replace operation to complete
3513 function wait_replacing #pool
3515 typeset pool=${1:-$TESTPOOL}
3517 [[ "" == "$(zpool status $pool |
3518 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3523 # Wait for a pool to be scrubbed
3528 function wait_scrubbed #pool timeout
3530 typeset timeout=${2:-300}
3531 typeset pool=${1:-$TESTPOOL}
3532 for (( timer = 0; timer < $timeout; timer++ )); do
3533 is_pool_scrubbed $pool && break;
3538 # Backup the zed.rc in our test directory so that we can edit it for our test.
3540 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3541 function zed_rc_backup
3543 zedrc_backup="$(mktemp)"
3544 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3548 function zed_rc_restore
3550 mv $1 $ZEDLET_DIR/zed.rc
3554 # Setup custom environment for the ZED.
3556 # $@ Optional list of zedlets to run under zed.
3560 log_unsupported "No zed on $(uname)"
3563 if [[ ! -d $ZEDLET_DIR ]]; then
3564 log_must mkdir $ZEDLET_DIR
3567 if [[ ! -e $VDEVID_CONF ]]; then
3568 log_must touch $VDEVID_CONF
3571 if [[ -e $VDEVID_CONF_ETC ]]; then
3572 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3576 # Create a symlink for /etc/zfs/vdev_id.conf file.
3577 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3579 # Setup minimal ZED configuration. Individual test cases should
3580 # add additional ZEDLETs as needed for their specific test.
3581 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3582 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3584 # Scripts must only be user writable.
3585 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3586 saved_umask=$(umask)
3588 for i in $EXTRA_ZEDLETS ; do
3589 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3591 log_must umask $saved_umask
3594 # Customize the zed.rc file to enable the full debug log.
3595 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3596 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3601 # Cleanup custom ZED environment.
3603 # $@ Optional list of zedlets to remove from our test zed.d directory.
3604 function zed_cleanup
3611 log_must rm -f ${ZEDLET_DIR}/zed.rc
3612 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3613 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3614 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3615 log_must rm -f ${ZEDLET_DIR}/state
3617 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3618 for i in $EXTRA_ZEDLETS ; do
3619 log_must rm -f ${ZEDLET_DIR}/$i
3622 log_must rm -f $ZED_LOG
3623 log_must rm -f $ZED_DEBUG_LOG
3624 log_must rm -f $VDEVID_CONF_ETC
3625 log_must rm -f $VDEVID_CONF
3630 # Check if ZED is currently running; if so, returns PIDs
3637 zedpids="$(pgrep -x zed)"
3639 zedpids2="$(pgrep -x lt-zed)"
3641 echo ${zedpids} ${zedpids2}
3645 # Check if ZED is currently running, if not start ZED.
3653 # ZEDLET_DIR=/var/tmp/zed
3654 if [[ ! -d $ZEDLET_DIR ]]; then
3655 log_must mkdir $ZEDLET_DIR
3658 # Verify the ZED is not already running.
3659 zedpids=$(zed_check)
3660 if [ -n "$zedpids" ]; then
3661 # We never, ever, really want it to just keep going if zed
3662 # is already running - usually this implies our test cases
3663 # will break very strangely because whatever we wanted to
3664 # configure zed for won't be listening to our changes in the
3666 log_fail "ZED already running - ${zedpids}"
3668 log_note "Starting ZED"
3669 # run ZED in the background and redirect foreground logging
3670 # output to $ZED_LOG.
3671 log_must truncate -s 0 $ZED_DEBUG_LOG
3672 log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3673 "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3688 log_note "Stopping ZED"
3690 zedpids=$(zed_check)
3691 [ ! -n "$zedpids" ] && break
3693 log_must kill $zedpids
3702 function zed_events_drain
3704 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3706 zpool events -c >/dev/null
3710 # Set a variable in zed.rc to something, un-commenting it in the process.
3720 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3723 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3728 # Check is provided device is being active used as a swap device.
3730 function is_swap_inuse
3734 if [[ -z $device ]] ; then
3735 log_note "No device specified."
3740 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3741 elif is_freebsd; then
3742 swapctl -l | grep -w $device
3744 swap -l | grep -w $device > /dev/null 2>&1
3751 # Setup a swap device using the provided device.
3758 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3759 log_must swapon $swapdev
3760 elif is_freebsd; then
3761 log_must swapctl -a $swapdev
3763 log_must swap -a $swapdev
3770 # Cleanup a swap device on the provided device.
3772 function swap_cleanup
3776 if is_swap_inuse $swapdev; then
3778 log_must swapoff $swapdev
3779 elif is_freebsd; then
3780 log_must swapoff $swapdev
3782 log_must swap -d $swapdev
3790 # Set a global system tunable (64-bit value)
3792 # $1 tunable name (use a NAME defined in tunables.cfg)
3795 function set_tunable64
3797 set_tunable_impl "$1" "$2" Z
3801 # Set a global system tunable (32-bit value)
3803 # $1 tunable name (use a NAME defined in tunables.cfg)
3806 function set_tunable32
3808 set_tunable_impl "$1" "$2" W
3811 function set_tunable_impl
3815 typeset mdb_cmd="$3"
3816 typeset module="${4:-zfs}"
3818 eval "typeset tunable=\$$name"
3821 log_unsupported "Tunable '$name' is unsupported on $(uname)"
3824 log_fail "Tunable '$name' must be added to tunables.cfg"
3830 [[ -z "$value" ]] && return 1
3831 [[ -z "$mdb_cmd" ]] && return 1
3835 typeset zfs_tunables="/sys/module/$module/parameters"
3836 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3837 cat >"$zfs_tunables/$tunable" <<<"$value"
3841 sysctl vfs.zfs.$tunable=$value
3845 [[ "$module" -eq "zfs" ]] || return 1
3846 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3853 # Get a global system tunable
3855 # $1 tunable name (use a NAME defined in tunables.cfg)
3857 function get_tunable
3859 get_tunable_impl "$1"
3862 function get_tunable_impl
3865 typeset module="${2:-zfs}"
3867 eval "typeset tunable=\$$name"
3870 log_unsupported "Tunable '$name' is unsupported on $(uname)"
3873 log_fail "Tunable '$name' must be added to tunables.cfg"
3881 typeset zfs_tunables="/sys/module/$module/parameters"
3882 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3883 cat $zfs_tunables/$tunable
3887 sysctl -n vfs.zfs.$tunable
3890 [[ "$module" -eq "zfs" ]] || return 1
3898 # Prints the current time in seconds since UNIX Epoch.
3900 function current_epoch
3906 # Get decimal value of global uint32_t variable using mdb.
3908 function mdb_get_uint32
3913 value=$(mdb -k -e "$variable/X | ::eval .=U")
3914 if [[ $? -ne 0 ]]; then
3915 log_fail "Failed to get value of '$variable' from mdb."
3924 # Set global uint32_t variable to a decimal value using mdb.
3926 function mdb_set_uint32
3931 mdb -kw -e "$variable/W 0t$value" > /dev/null
3932 if [[ $? -ne 0 ]]; then
3933 echo "Failed to set '$variable' to '$value' in mdb."
3941 # Set global scalar integer variable to a hex value using mdb.
3942 # Note: Target should have CTF data loaded.
3944 function mdb_ctf_set_int
3949 mdb -kw -e "$variable/z $value" > /dev/null
3950 if [[ $? -ne 0 ]]; then
3951 echo "Failed to set '$variable' to '$value' in mdb."
3959 # Compute MD5 digest for given file or stdin if no file given.
3960 # Note: file path must not contain spaces
3971 md5sum -b $file | awk '{ print $1 }'
3977 # Compute SHA256 digest for given file or stdin if no file given.
3978 # Note: file path must not contain spaces
3980 function sha256digest
3989 sha256sum -b $file | awk '{ print $1 }'
3994 function new_fs #<args>
4001 echo y | newfs -v "$@"
4006 function stat_size #<path>
4020 function stat_ctime #<path>
4034 function stat_crtime #<path>
4048 function stat_generation #<path>
4054 getversion "${path}"
4057 stat -f %v "${path}"
4062 # Run a command as if it was being run in a TTY.
4071 script -q /dev/null env "$@"
4073 script --return --quiet -c "$*" /dev/null
4078 # Produce a random permutation of the integers in a given range (inclusive).
4080 function range_shuffle # begin end
4085 seq ${begin} ${end} | sort -R
4089 # Cross-platform xattr helpers
4092 function get_xattr # name path
4099 getextattr -qq user "${name}" "${path}"
4102 attr -qg "${name}" "${path}"
4107 function set_xattr # name value path
4115 setextattr user "${name}" "${value}" "${path}"
4118 attr -qs "${name}" -V "${value}" "${path}"
4123 function set_xattr_stdin # name value
4130 setextattr -i user "${name}" "${path}"
4133 attr -qs "${name}" "${path}"
4138 function rm_xattr # name path
4145 rmextattr -q user "${name}" "${path}"
4148 attr -qr "${name}" "${path}"
4153 function ls_xattr # path
4159 lsextattr -qq user "${path}"
4167 function kstat # stat flags?
4170 typeset flags=${2-"-n"}
4174 sysctl $flags kstat.zfs.misc.$stat
4177 typeset zfs_kstat="/proc/spl/kstat/zfs/$stat"
4178 [[ -f "$zfs_kstat" ]] || return 1
4187 function get_arcstat # stat
4193 kstat arcstats.$stat
4196 kstat arcstats | awk "/$stat/ { print \$3 }"
4204 function punch_hole # offset length file
4212 truncate -d -o $offset -l $length "$file"
4215 fallocate --punch-hole --offset $offset --length $length "$file"
4224 # Wait for the specified arcstat to reach non-zero quiescence.
4225 # If echo is 1 echo the value after reaching quiescence, otherwise
4226 # if echo is 0 print the arcstat we are waiting on.
4228 function arcstat_quiescence # stat echo
4232 typeset do_once=true
4234 if [[ $echo -eq 0 ]]; then
4235 echo "Waiting for arcstat $1 quiescence."
4238 while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
4239 typeset stat1=$(get_arcstat $stat)
4241 typeset stat2=$(get_arcstat $stat)
4245 if [[ $echo -eq 1 ]]; then
4250 function arcstat_quiescence_noecho # stat
4253 arcstat_quiescence $stat 0
4256 function arcstat_quiescence_echo # stat
4259 arcstat_quiescence $stat 1
4263 # Given an array of pids, wait until all processes
4264 # have completed and check their return status.
4266 function wait_for_children #children
4270 for child in "${children[@]}"
4273 wait ${child} || child_exit=$?
4274 if [ $child_exit -ne 0 ]; then
4275 echo "child ${child} failed with ${child_exit}"
4283 # Compare two directory trees recursively in a manner similar to diff(1), but
4284 # using rsync. If there are any discrepancies, a summary of the differences are
4285 # output and a non-zero error is returned.
4287 # If you're comparing a directory after a ZIL replay, you should set
4288 # LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
4289 # directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
4292 function directory_diff # dir_a dir_b
4296 zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
4298 # If one of the directories doesn't exist, return 2. This is to match the
4299 # semantics of diff.
4300 if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
4304 # Run rsync with --dry-run --itemize-changes to get something akin to diff
4305 # output, but rsync is far more thorough in detecting differences (diff
4306 # doesn't compare file metadata, and cannot handle special files).
4308 # Also make sure to filter out non-user.* xattrs when comparing. On
4309 # SELinux-enabled systems the copied tree will probably have different
4311 args=("-nicaAHX" '--filter=-x! user.*' "--delete")
4313 # NOTE: Quite a few rsync builds do not support --crtimes which would be
4314 # necessary to verify that creation times are being maintained properly.
4315 # Unfortunately because of this we cannot use it unconditionally but we can
4316 # check if this rsync build supports it and use it then. This check is
4317 # based on the same check in the rsync test suite (testsuite/crtimes.test).
4319 # We check ctimes even with zil_replay=1 because the ZIL does store
4320 # creation times and we should make sure they match (if the creation times
4321 # do not match there is a "c" entry in one of the columns).
4322 if ( rsync --version | grep -q "[, ] crtimes" >/dev/null ); then
4325 echo "NOTE: This rsync package does not support --crtimes (-N)."
4328 # If we are testing a ZIL replay, we need to ignore timestamp changes.
4329 # Unfortunately --no-times doesn't do what we want -- it will still tell
4330 # you if the timestamps don't match but rsync will set the timestamps to
4331 # the current time (leading to an itemised change entry). It's simpler to
4332 # just filter out those lines.
4333 if [ "$zil_replay" -eq 0 ]; then
4336 # Different rsync versions have different numbers of columns. So just
4337 # require that aside from the first two, all other columns must be
4338 # blank (literal ".") or a timestamp field ("[tT]").
4339 filter=("grep" "-v" '^\..[.Tt]\+ ')
4342 diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
4344 if [ -n "$diff" ]; then
4352 # Compare two directory trees recursively, without checking whether the mtimes
4353 # match (creation times will be checked if the available rsync binary supports
4354 # it). This is necessary for ZIL replay checks (because the ZIL does not
4355 # contain mtimes and thus after a ZIL replay, mtimes won't match).
4357 # This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
4359 function replay_directory_diff # dir_a dir_b
4361 LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"