4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24 # Copyright (c) 2012, 2020, Delphix. All rights reserved.
25 # Copyright (c) 2017, Tim Chase. All rights reserved.
26 # Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27 # Copyright (c) 2017, Lawrence Livermore National Security LLC.
28 # Copyright (c) 2017, Datto Inc. All rights reserved.
29 # Copyright (c) 2017, Open-E Inc. All rights reserved.
30 # Copyright (c) 2021, The FreeBSD Foundation.
31 # Use is subject to license terms.
34 . ${STF_TOOLS}/include/logapi.shlib
35 . ${STF_SUITE}/include/math.shlib
36 . ${STF_SUITE}/include/blkdev.shlib
38 . ${STF_SUITE}/include/tunables.cfg
41 # Apply constrained path when available. This is required since the
42 # PATH may have been modified by sudo's secure_path behavior.
44 if [ -n "$STF_PATH" ]; then
45 export PATH="$STF_PATH"
49 # Generic dot version comparison function
51 # Returns success when version $1 is greater than or equal to $2.
53 function compare_version_gte
55 if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
62 # Linux kernel version comparison function
64 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
66 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
68 function linux_version
72 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
74 typeset version=$(echo $ver | cut -d '.' -f 1)
75 typeset major=$(echo $ver | cut -d '.' -f 2)
76 typeset minor=$(echo $ver | cut -d '.' -f 3)
78 [[ -z "$version" ]] && version=0
79 [[ -z "$major" ]] && major=0
80 [[ -z "$minor" ]] && minor=0
82 echo $((version * 10000 + major * 100 + minor))
85 # Determine if this is a Linux test system
87 # Return 0 if platform Linux, 1 if otherwise
91 if [[ $(uname -o) == "GNU/Linux" ]]; then
98 # Determine if this is an illumos test system
100 # Return 0 if platform illumos, 1 if otherwise
103 if [[ $(uname -o) == "illumos" ]]; then
110 # Determine if this is a FreeBSD test system
112 # Return 0 if platform FreeBSD, 1 if otherwise
116 if [[ $(uname -o) == "FreeBSD" ]]; then
123 # Determine if this is a DilOS test system
125 # Return 0 if platform DilOS, 1 if otherwise
130 [[ -f /etc/os-release ]] && . /etc/os-release
131 if [[ $ID == "dilos" ]]; then
138 # Determine if this is a 32-bit system
140 # Return 0 if platform is 32-bit, 1 if otherwise
144 if [[ $(getconf LONG_BIT) == "32" ]]; then
151 # Determine if kmemleak is enabled
153 # Return 0 if kmemleak is enabled, 1 if otherwise
157 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
164 # Determine whether a dataset is mounted
167 # $2 filesystem type; optional - defaulted to zfs
169 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
174 [[ -z $fstype ]] && fstype=zfs
175 typeset out dir name ret
179 if [[ "$1" == "/"* ]] ; then
180 for out in $(zfs mount | awk '{print $2}'); do
181 [[ $1 == $out ]] && return 0
184 for out in $(zfs mount | awk '{print $1}'); do
185 [[ $1 == $out ]] && return 0
191 mount -pt $fstype | while read dev dir _t _flags; do
192 [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
195 out=$(df -F $fstype $1 2>/dev/null)
197 (($ret != 0)) && return $ret
205 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
209 out=$(df -t $fstype $1 2>/dev/null)
213 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
214 link=$(readlink -f $ZVOL_DEVDIR/$1)
215 [[ -n "$link" ]] && \
216 mount | grep -q "^$link" && \
225 # Return 0 if a dataset is mounted; 1 otherwise
228 # $2 filesystem type; optional - defaulted to zfs
233 (($? == 0)) && return 0
237 # Return 0 if a dataset is unmounted; 1 otherwise
240 # $2 filesystem type; optional - defaulted to zfs
245 (($? == 1)) && return 0
258 function default_setup
260 default_setup_noexit "$@"
265 function default_setup_no_mountpoint
267 default_setup_noexit "$1" "$2" "$3" "yes"
273 # Given a list of disks, setup storage pools and datasets.
275 function default_setup_noexit
280 typeset no_mountpoint=$4
281 log_note begin default_setup_noexit
283 if is_global_zone; then
284 if poolexists $TESTPOOL ; then
285 destroy_pool $TESTPOOL
287 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
288 log_must zpool create -f $TESTPOOL $disklist
293 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
294 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
296 log_must zfs create $TESTPOOL/$TESTFS
297 if [[ -z $no_mountpoint ]]; then
298 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
301 if [[ -n $container ]]; then
302 rm -rf $TESTDIR1 || \
303 log_unresolved Could not remove $TESTDIR1
304 mkdir -p $TESTDIR1 || \
305 log_unresolved Could not create $TESTDIR1
307 log_must zfs create $TESTPOOL/$TESTCTR
308 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
309 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
310 if [[ -z $no_mountpoint ]]; then
311 log_must zfs set mountpoint=$TESTDIR1 \
312 $TESTPOOL/$TESTCTR/$TESTFS1
316 if [[ -n $volume ]]; then
317 if is_global_zone ; then
318 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
321 log_must zfs create $TESTPOOL/$TESTVOL
327 # Given a list of disks, setup a storage pool, file system and
330 function default_container_setup
334 default_setup "$disklist" "true"
338 # Given a list of disks, setup a storage pool,file system
341 function default_volume_setup
345 default_setup "$disklist" "" "true"
349 # Given a list of disks, setup a storage pool,file system,
350 # a container and a volume.
352 function default_container_volume_setup
356 default_setup "$disklist" "true" "true"
360 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
363 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
364 # $2 snapshot name. Default, $TESTSNAP
366 function create_snapshot
368 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
369 typeset snap=${2:-$TESTSNAP}
371 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
372 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
374 if snapexists $fs_vol@$snap; then
375 log_fail "$fs_vol@$snap already exists."
377 datasetexists $fs_vol || \
378 log_fail "$fs_vol must exist."
380 log_must zfs snapshot $fs_vol@$snap
384 # Create a clone from a snapshot, default clone name is $TESTCLONE.
386 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
387 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
389 function create_clone # snapshot clone
391 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
392 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
395 log_fail "Snapshot name is undefined."
397 log_fail "Clone name is undefined."
399 log_must zfs clone $snap $clone
403 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
406 # $1 Existing filesystem or volume name. Default, $TESTFS
407 # $2 Existing snapshot name. Default, $TESTSNAP
408 # $3 bookmark name. Default, $TESTBKMARK
410 function create_bookmark
412 typeset fs_vol=${1:-$TESTFS}
413 typeset snap=${2:-$TESTSNAP}
414 typeset bkmark=${3:-$TESTBKMARK}
416 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
417 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
418 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
420 if bkmarkexists $fs_vol#$bkmark; then
421 log_fail "$fs_vol#$bkmark already exists."
423 datasetexists $fs_vol || \
424 log_fail "$fs_vol must exist."
425 snapexists $fs_vol@$snap || \
426 log_fail "$fs_vol@$snap must exist."
428 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
432 # Create a temporary clone result of an interrupted resumable 'zfs receive'
433 # $1 Destination filesystem name. Must not exist, will be created as the result
434 # of this function along with its %recv temporary clone
435 # $2 Source filesystem name. Must not exist, will be created and destroyed
437 function create_recv_clone
440 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
441 typeset snap="$sendfs@snap1"
442 typeset incr="$sendfs@snap2"
443 typeset mountpoint="$TESTDIR/create_recv_clone"
444 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
446 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
448 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
449 datasetexists $sendfs && log_fail "Send filesystem must not exist."
451 log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
452 log_must zfs snapshot $snap
453 log_must eval "zfs send $snap | zfs recv -u $recvfs"
454 log_must mkfile 1m "$mountpoint/data"
455 log_must zfs snapshot $incr
456 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
457 iflag=fullblock > $sendfile"
458 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
459 destroy_dataset "$sendfs" "-r"
460 log_must rm -f "$sendfile"
462 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
463 log_fail "Error creating temporary $recvfs/%recv clone"
467 function default_mirror_setup
469 default_mirror_setup_noexit $1 $2 $3
475 # Given a pair of disks, set up a storage pool and dataset for the mirror
476 # @parameters: $1 the primary side of the mirror
477 # $2 the secondary side of the mirror
478 # @uses: ZPOOL ZFS TESTPOOL TESTFS
479 function default_mirror_setup_noexit
481 readonly func="default_mirror_setup_noexit"
485 [[ -z $primary ]] && \
486 log_fail "$func: No parameters passed"
487 [[ -z $secondary ]] && \
488 log_fail "$func: No secondary partition passed"
489 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
490 log_must zpool create -f $TESTPOOL mirror $@
491 log_must zfs create $TESTPOOL/$TESTFS
492 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
496 # create a number of mirrors.
497 # We create a number($1) of 2 way mirrors using the pairs of disks named
498 # on the command line. These mirrors are *not* mounted
499 # @parameters: $1 the number of mirrors to create
500 # $... the devices to use to create the mirrors on
501 # @uses: ZPOOL ZFS TESTPOOL
502 function setup_mirrors
504 typeset -i nmirrors=$1
507 while ((nmirrors > 0)); do
508 log_must test -n "$1" -a -n "$2"
509 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
510 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
512 ((nmirrors = nmirrors - 1))
517 # create a number of raidz pools.
518 # We create a number($1) of 2 raidz pools using the pairs of disks named
519 # on the command line. These pools are *not* mounted
520 # @parameters: $1 the number of pools to create
521 # $... the devices to use to create the pools on
522 # @uses: ZPOOL ZFS TESTPOOL
523 function setup_raidzs
525 typeset -i nraidzs=$1
528 while ((nraidzs > 0)); do
529 log_must test -n "$1" -a -n "$2"
530 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
531 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
533 ((nraidzs = nraidzs - 1))
538 # Destroy the configured testpool mirrors.
539 # the mirrors are of the form ${TESTPOOL}{number}
540 # @uses: ZPOOL ZFS TESTPOOL
541 function destroy_mirrors
543 default_cleanup_noexit
548 function default_raidz_setup
550 default_raidz_setup_noexit "$*"
556 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
557 # $1 the list of disks
559 function default_raidz_setup_noexit
561 typeset disklist="$*"
562 disks=(${disklist[*]})
564 if [[ ${#disks[*]} -lt 2 ]]; then
565 log_fail "A raid-z requires a minimum of two disks."
568 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
569 log_must zpool create -f $TESTPOOL raidz $disklist
570 log_must zfs create $TESTPOOL/$TESTFS
571 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
575 # Common function used to cleanup storage pools and datasets.
577 # Invoked at the start of the test suite to ensure the system
578 # is in a known state, and also at the end of each set of
579 # sub-tests to ensure errors from one set of tests doesn't
580 # impact the execution of the next set.
582 function default_cleanup
584 default_cleanup_noexit
590 # Utility function used to list all available pool names.
592 # NOTE: $KEEP is a variable containing pool names, separated by a newline
593 # character, that must be excluded from the returned list.
595 function get_all_pools
597 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
600 function default_cleanup_noexit
604 # Destroying the pool will also destroy any
605 # filesystems it contains.
607 if is_global_zone; then
608 zfs unmount -a > /dev/null 2>&1
609 ALL_POOLS=$(get_all_pools)
610 # Here, we loop through the pools we're allowed to
611 # destroy, only destroying them if it's safe to do
613 while [ ! -z ${ALL_POOLS} ]
615 for pool in ${ALL_POOLS}
617 if safe_to_destroy_pool $pool ;
622 ALL_POOLS=$(get_all_pools)
628 for fs in $(zfs list -H -o name \
629 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
630 destroy_dataset "$fs" "-Rf"
633 # Need cleanup here to avoid garbage dir left.
634 for fs in $(zfs list -H -o name); do
635 [[ $fs == /$ZONE_POOL ]] && continue
636 [[ -d $fs ]] && log_must rm -rf $fs/*
640 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
643 for fs in $(zfs list -H -o name); do
644 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
645 log_must zfs set reservation=none $fs
646 log_must zfs set recordsize=128K $fs
647 log_must zfs set mountpoint=/$fs $fs
649 enc=$(get_prop encryption $fs)
650 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
651 [[ "$enc" == "off" ]]; then
652 log_must zfs set checksum=on $fs
654 log_must zfs set compression=off $fs
655 log_must zfs set atime=on $fs
656 log_must zfs set devices=off $fs
657 log_must zfs set exec=on $fs
658 log_must zfs set setuid=on $fs
659 log_must zfs set readonly=off $fs
660 log_must zfs set snapdir=hidden $fs
661 log_must zfs set aclmode=groupmask $fs
662 log_must zfs set aclinherit=secure $fs
667 [[ -d $TESTDIR ]] && \
668 log_must rm -rf $TESTDIR
671 if is_mpath_device $disk1; then
675 rm -f $TEST_BASE_DIR/{err,out}
680 # Common function used to cleanup storage pools, file systems
683 function default_container_cleanup
685 if ! is_global_zone; then
689 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
691 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
693 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
694 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
696 [[ -e $TESTDIR1 ]] && \
697 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
703 # Common function used to cleanup snapshot of file system or volume. Default to
704 # delete the file system's snapshot
708 function destroy_snapshot
710 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
712 if ! snapexists $snap; then
713 log_fail "'$snap' does not exist."
717 # For the sake of the value which come from 'get_prop' is not equal
718 # to the really mountpoint when the snapshot is unmounted. So, firstly
719 # check and make sure this snapshot's been mounted in current system.
722 if ismounted $snap; then
723 mtpt=$(get_prop mountpoint $snap)
725 log_fail "get_prop mountpoint $snap failed."
728 destroy_dataset "$snap"
729 [[ $mtpt != "" && -d $mtpt ]] && \
730 log_must rm -rf $mtpt
734 # Common function used to cleanup clone.
738 function destroy_clone
740 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
742 if ! datasetexists $clone; then
743 log_fail "'$clone' does not existed."
746 # With the same reason in destroy_snapshot
748 if ismounted $clone; then
749 mtpt=$(get_prop mountpoint $clone)
751 log_fail "get_prop mountpoint $clone failed."
754 destroy_dataset "$clone"
755 [[ $mtpt != "" && -d $mtpt ]] && \
756 log_must rm -rf $mtpt
760 # Common function used to cleanup bookmark of file system or volume. Default
761 # to delete the file system's bookmark.
765 function destroy_bookmark
767 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
769 if ! bkmarkexists $bkmark; then
770 log_fail "'$bkmarkp' does not existed."
773 destroy_dataset "$bkmark"
776 # Return 0 if a snapshot exists; $? otherwise
782 zfs list -H -t snapshot "$1" > /dev/null 2>&1
787 # Return 0 if a bookmark exists; $? otherwise
791 function bkmarkexists
793 zfs list -H -t bookmark "$1" > /dev/null 2>&1
798 # Return 0 if a hold exists; $? otherwise
805 zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1
810 # Set a property to a certain value on a dataset.
811 # Sets a property of the dataset to the value as passed in.
813 # $1 dataset who's property is being set
815 # $3 value to set property to
817 # 0 if the property could be set.
818 # non-zero otherwise.
821 function dataset_setprop
823 typeset fn=dataset_setprop
826 log_note "$fn: Insufficient parameters (need 3, had $#)"
830 output=$(zfs set $2=$3 $1 2>&1)
833 log_note "Setting property on $1 failed."
834 log_note "property $2=$3"
835 log_note "Return Code: $rv"
836 log_note "Output: $output"
843 # Assign suite defined dataset properties.
844 # This function is used to apply the suite's defined default set of
845 # properties to a dataset.
846 # @parameters: $1 dataset to use
847 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
849 # 0 if the dataset has been altered.
850 # 1 if no pool name was passed in.
851 # 2 if the dataset could not be found.
852 # 3 if the dataset could not have it's properties set.
854 function dataset_set_defaultproperties
858 [[ -z $dataset ]] && return 1
862 for confset in $(zfs list); do
863 if [[ $dataset = $confset ]]; then
868 [[ $found -eq 0 ]] && return 2
869 if [[ -n $COMPRESSION_PROP ]]; then
870 dataset_setprop $dataset compression $COMPRESSION_PROP || \
872 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
874 if [[ -n $CHECKSUM_PROP ]]; then
875 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
877 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
883 # Check a numeric assertion
884 # @parameter: $@ the assertion to check
885 # @output: big loud notice if assertion failed
890 (($@)) || log_fail "$@"
894 # Function to format partition size of a disk
895 # Given a disk cxtxdx reduces all partitions
898 function zero_partitions #<whole_disk_name>
904 gpart destroy -F $diskname
906 DSK=$DEV_DSKDIR/$diskname
907 DSK=$(echo $DSK | sed -e "s|//|/|g")
908 log_must parted $DSK -s -- mklabel gpt
909 blockdev --rereadpt $DSK 2>/dev/null
912 for i in 0 1 3 4 5 6 7
914 log_must set_partition $i "" 0mb $diskname
922 # Given a slice, size and disk, this function
923 # formats the slice to the specified size.
924 # Size should be specified with units as per
925 # the `format` command requirements eg. 100mb 3gb
927 # NOTE: This entire interface is problematic for the Linux parted utility
928 # which requires the end of the partition to be specified. It would be
929 # best to retire this interface and replace it with something more flexible.
930 # At the moment a best effort is made.
932 # arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
933 function set_partition
935 typeset -i slicenum=$1
938 typeset disk=${4#$DEV_DSKDIR/}
939 disk=${disk#$DEV_RDSKDIR/}
943 if [[ -z $size || -z $disk ]]; then
944 log_fail "The size or disk name is unspecified."
946 disk=$DEV_DSKDIR/$disk
947 typeset size_mb=${size%%[mMgG]}
949 size_mb=${size_mb%%[mMgG][bB]}
950 if [[ ${size:1:1} == 'g' ]]; then
951 ((size_mb = size_mb * 1024))
954 # Create GPT partition table when setting slice 0 or
955 # when the device doesn't already contain a GPT label.
956 parted $disk -s -- print 1 >/dev/null
958 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
959 parted $disk -s -- mklabel gpt
960 if [[ $? -ne 0 ]]; then
961 log_note "Failed to create GPT partition table on $disk"
966 # When no start is given align on the first cylinder.
967 if [[ -z "$start" ]]; then
971 # Determine the cylinder size for the device and using
972 # that calculate the end offset in cylinders.
973 typeset -i cly_size_kb=0
974 cly_size_kb=$(parted -m $disk -s -- \
975 unit cyl print | head -3 | tail -1 | \
976 awk -F '[:k.]' '{print $4}')
977 ((end = (size_mb * 1024 / cly_size_kb) + start))
980 mkpart part$slicenum ${start}cyl ${end}cyl
982 if [[ $ret_val -ne 0 ]]; then
983 log_note "Failed to create partition $slicenum on $disk"
987 blockdev --rereadpt $disk 2>/dev/null
988 block_device_wait $disk
991 if [[ -z $size || -z $disk ]]; then
992 log_fail "The size or disk name is unspecified."
994 disk=$DEV_DSKDIR/$disk
996 if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
997 gpart destroy -F $disk >/dev/null 2>&1
998 gpart create -s GPT $disk
999 if [[ $? -ne 0 ]]; then
1000 log_note "Failed to create GPT partition table on $disk"
1005 typeset index=$((slicenum + 1))
1007 if [[ -n $start ]]; then
1010 gpart add -t freebsd-zfs $start -s $size -i $index $disk
1011 if [[ $ret_val -ne 0 ]]; then
1012 log_note "Failed to create partition $slicenum on $disk"
1016 block_device_wait $disk
1019 if [[ -z $slicenum || -z $size || -z $disk ]]; then
1020 log_fail "The slice, size or disk name is unspecified."
1023 typeset format_file=/var/tmp/format_in.$$
1025 echo "partition" >$format_file
1026 echo "$slicenum" >> $format_file
1027 echo "" >> $format_file
1028 echo "" >> $format_file
1029 echo "$start" >> $format_file
1030 echo "$size" >> $format_file
1031 echo "label" >> $format_file
1032 echo "" >> $format_file
1033 echo "q" >> $format_file
1034 echo "q" >> $format_file
1036 format -e -s -d $disk -f $format_file
1042 if [[ $ret_val -ne 0 ]]; then
1043 log_note "Unable to format $disk slice $slicenum to $size"
1050 # Delete all partitions on all disks - this is specifically for the use of multipath
1051 # devices which currently can only be used in the test suite as raw/un-partitioned
1052 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
1054 function delete_partitions
1058 if [[ -z $DISKSARRAY ]]; then
1064 for disk in $DISKSARRAY; do
1065 for (( part = 1; part < MAX_PARTITIONS; part++ )); do
1066 typeset partition=${disk}${SLICE_PREFIX}${part}
1067 parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
1068 if lsblk | grep -qF ${partition}; then
1069 log_fail "Partition ${partition} not deleted"
1071 log_note "Partition ${partition} deleted"
1075 elif is_freebsd; then
1076 for disk in $DISKSARRAY; do
1077 if gpart destroy -F $disk; then
1078 log_note "Partitions for ${disk} deleted"
1080 log_fail "Partitions for ${disk} not deleted"
1087 # Get the end cyl of the given slice
1089 function get_endslice #<disk> <slice>
1093 if [[ -z $disk || -z $slice ]] ; then
1094 log_fail "The disk name or slice number is unspecified."
1099 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
1100 awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
1101 ((endcyl = (endcyl + 1)))
1104 disk=${disk#/dev/zvol/}
1106 slice=$((slice + 1))
1107 endcyl=$(gpart show $disk | \
1108 awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1111 disk=${disk#/dev/dsk/}
1112 disk=${disk#/dev/rdsk/}
1116 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1117 grep "sectors\/cylinder" | \
1120 if ((ratio == 0)); then
1124 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1125 nawk -v token="$slice" '{if ($1==token) print $6}')
1127 ((endcyl = (endcyl + 1) / ratio))
1136 # Given a size,disk and total slice number, this function formats the
1137 # disk slices from 0 to the total slice number with the same specified
1140 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1143 typeset slice_size=$1
1144 typeset disk_name=$2
1145 typeset total_slices=$3
1148 zero_partitions $disk_name
1149 while ((i < $total_slices)); do
1156 log_must set_partition $i "$cyl" $slice_size $disk_name
1157 cyl=$(get_endslice $disk_name $i)
1163 # This function continues to write to a filenum number of files into dirnum
1164 # number of directories until either file_write returns an error or the
1165 # maximum number of files per directory have been written.
1168 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1170 # Return value: 0 on success
1174 # destdir: is the directory where everything is to be created under
1175 # dirnum: the maximum number of subdirectories to use, -1 no limit
1176 # filenum: the maximum number of files per subdirectory
1177 # bytes: number of bytes to write
1178 # num_writes: number of types to write out bytes
1179 # data: the data that will be written
1182 # fill_fs /testdir 20 25 1024 256 0
1184 # Note: bytes * num_writes equals the size of the testfile
1186 function fill_fs # destdir dirnum filenum bytes num_writes data
1188 typeset destdir=${1:-$TESTDIR}
1189 typeset -i dirnum=${2:-50}
1190 typeset -i filenum=${3:-50}
1191 typeset -i bytes=${4:-8192}
1192 typeset -i num_writes=${5:-10240}
1193 typeset data=${6:-0}
1195 mkdir -p $destdir/{1..$dirnum}
1196 for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1197 file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1204 # Simple function to get the specified property. If unable to
1205 # get the property then exits.
1207 # Note property is in 'parsable' format (-p)
1209 function get_prop # property dataset
1215 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1216 if [[ $? -ne 0 ]]; then
1217 log_note "Unable to get $prop property for dataset " \
1227 # Simple function to get the specified property of pool. If unable to
1228 # get the property then exits.
1230 # Note property is in 'parsable' format (-p)
1232 function get_pool_prop # property pool
1238 if poolexists $pool ; then
1239 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1241 if [[ $? -ne 0 ]]; then
1242 log_note "Unable to get $prop property for pool " \
1247 log_note "Pool $pool not exists."
1255 # Return 0 if a pool exists; $? otherwise
1263 if [[ -z $pool ]]; then
1264 log_note "No pool name given."
1268 zpool get name "$pool" > /dev/null 2>&1
1272 # Return 0 if all the specified datasets exist; $? otherwise
1275 function datasetexists
1277 if (($# == 0)); then
1278 log_note "No dataset name given."
1282 while (($# > 0)); do
1283 zfs get name $1 > /dev/null 2>&1 || \
1291 # return 0 if none of the specified datasets exists, otherwise return 1.
1294 function datasetnonexists
1296 if (($# == 0)); then
1297 log_note "No dataset name given."
1301 while (($# > 0)); do
1302 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1310 function is_shared_freebsd
1314 pgrep -q mountd && showmount -E | grep -qx $fs
1317 function is_shared_illumos
1322 for mtpt in `share | awk '{print $2}'` ; do
1323 if [[ $mtpt == $fs ]] ; then
1328 typeset stat=$(svcs -H -o STA nfs/server:default)
1329 if [[ $stat != "ON" ]]; then
1330 log_note "Current nfs/server status: $stat"
1336 function is_shared_linux
1341 for mtpt in `share | awk '{print $1}'` ; do
1342 if [[ $mtpt == $fs ]] ; then
1350 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1352 # Returns 0 if shared, 1 otherwise.
1359 if [[ $fs != "/"* ]] ; then
1360 if datasetnonexists "$fs" ; then
1363 mtpt=$(get_prop mountpoint "$fs")
1365 none|legacy|-) return 1
1374 FreeBSD) is_shared_freebsd "$fs" ;;
1375 Linux) is_shared_linux "$fs" ;;
1376 *) is_shared_illumos "$fs" ;;
1380 function is_exported_illumos
1385 for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do
1386 if [[ $mtpt == $fs ]] ; then
1394 function is_exported_freebsd
1399 for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do
1400 if [[ $mtpt == $fs ]] ; then
1408 function is_exported_linux
1413 for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do
1414 if [[ $mtpt == $fs ]] ; then
1423 # Given a mountpoint, or a dataset name, determine if it is exported via
1424 # the os-specific NFS exports file.
1426 # Returns 0 if exported, 1 otherwise.
1428 function is_exported
1433 if [[ $fs != "/"* ]] ; then
1434 if datasetnonexists "$fs" ; then
1437 mtpt=$(get_prop mountpoint "$fs")
1439 none|legacy|-) return 1
1448 FreeBSD) is_exported_freebsd "$fs" ;;
1449 Linux) is_exported_linux "$fs" ;;
1450 *) is_exported_illumos "$fs" ;;
1455 # Given a dataset name determine if it is shared via SMB.
1457 # Returns 0 if shared, 1 otherwise.
1459 function is_shared_smb
1464 if datasetnonexists "$fs" ; then
1467 fs=$(echo $fs | tr / _)
1471 for mtpt in `net usershare list | awk '{print $1}'` ; do
1472 if [[ $mtpt == $fs ]] ; then
1478 log_note "Currently unsupported by the test framework"
1484 # Given a mountpoint, determine if it is not shared via NFS.
1486 # Returns 0 if not shared, 1 otherwise.
1493 if (($? == 0)); then
1501 # Given a dataset determine if it is not shared via SMB.
1503 # Returns 0 if not shared, 1 otherwise.
1505 function not_shared_smb
1510 if (($? == 0)); then
1518 # Helper function to unshare a mountpoint.
1520 function unshare_fs #fs
1524 is_shared $fs || is_shared_smb $fs
1525 if (($? == 0)); then
1526 zfs unshare $fs || log_fail "zfs unshare $fs failed"
1533 # Helper function to share a NFS mountpoint.
1535 function share_nfs #fs
1541 if (($? != 0)); then
1542 log_must share "*:$fs"
1546 if (($? != 0)); then
1547 log_must share -F nfs $fs
1555 # Helper function to unshare a NFS mountpoint.
1557 function unshare_nfs #fs
1563 if (($? == 0)); then
1564 log_must unshare -u "*:$fs"
1568 if (($? == 0)); then
1569 log_must unshare -F nfs $fs
1577 # Helper function to show NFS shares.
1579 function showshares_nfs
1591 # Helper function to show SMB shares.
1593 function showshares_smb
1608 elif is_freebsd; then
1611 log_unsupported "Unknown platform"
1614 if [[ $? -ne 0 ]]; then
1615 log_unsupported "The NFS utilities are not installed"
1620 # Check NFS server status and trigger it online.
1622 function setup_nfs_server
1624 # Cannot share directory in non-global zone.
1626 if ! is_global_zone; then
1627 log_note "Cannot trigger NFS server by sharing in LZ."
1633 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1634 # /etc/exports.d./* to provide a clean test environment.
1638 log_note "NFS server must be started prior to running ZTS."
1640 elif is_freebsd; then
1641 kill -s HUP $(cat /var/run/mountd.pid)
1643 log_note "NFS server must be started prior to running ZTS."
1647 typeset nfs_fmri="svc:/network/nfs/server:default"
1648 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1650 # Only really sharing operation can enable NFS server
1651 # to online permanently.
1653 typeset dummy=/tmp/dummy
1655 if [[ -d $dummy ]]; then
1656 log_must rm -rf $dummy
1659 log_must mkdir $dummy
1660 log_must share $dummy
1663 # Waiting for fmri's status to be the final status.
1664 # Otherwise, in transition, an asterisk (*) is appended for
1665 # instances, unshare will reverse status to 'DIS' again.
1667 # Waiting for 1's at least.
1671 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1678 log_must unshare $dummy
1679 log_must rm -rf $dummy
1682 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1686 # To verify whether calling process is in global zone
1688 # Return 0 if in global zone, 1 in non-global zone
1690 function is_global_zone
1692 if is_linux || is_freebsd; then
1695 typeset cur_zone=$(zonename 2>/dev/null)
1696 if [[ $cur_zone != "global" ]]; then
1704 # Verify whether test is permitted to run from
1705 # global zone, local zone, or both
1707 # $1 zone limit, could be "global", "local", or "both"(no limit)
1709 # Return 0 if permitted, otherwise exit with log_unsupported
1711 function verify_runnable # zone limit
1715 [[ -z $limit ]] && return 0
1717 if is_global_zone ; then
1721 local) log_unsupported "Test is unable to run from "\
1724 *) log_note "Warning: unknown limit $limit - " \
1732 global) log_unsupported "Test is unable to run from "\
1735 *) log_note "Warning: unknown limit $limit - " \
1746 # Return 0 if create successfully or the pool exists; $? otherwise
1747 # Note: In local zones, this function should return 0 silently.
1750 # $2-n - [keyword] devs_list
1752 function create_pool #pool devs_list
1754 typeset pool=${1%%/*}
1758 if [[ -z $pool ]]; then
1759 log_note "Missing pool name."
1763 if poolexists $pool ; then
1767 if is_global_zone ; then
1768 [[ -d /$pool ]] && rm -rf /$pool
1769 log_must zpool create -f $pool $@
1775 # Return 0 if destroy successfully or the pool exists; $? otherwise
1776 # Note: In local zones, this function should return 0 silently.
1779 # Destroy pool with the given parameters.
1781 function destroy_pool #pool
1783 typeset pool=${1%%/*}
1786 if [[ -z $pool ]]; then
1787 log_note "No pool name given."
1791 if is_global_zone ; then
1792 if poolexists "$pool" ; then
1793 mtpt=$(get_prop mountpoint "$pool")
1795 # At times, syseventd/udev activity can cause attempts
1796 # to destroy a pool to fail with EBUSY. We retry a few
1797 # times allowing failures before requiring the destroy
1799 log_must_busy zpool destroy -f $pool
1802 log_must rm -rf $mtpt
1804 log_note "Pool does not exist. ($pool)"
1812 # Return 0 if created successfully; $? otherwise
1815 # $2-n - dataset options
1817 function create_dataset #dataset dataset_options
1823 if [[ -z $dataset ]]; then
1824 log_note "Missing dataset name."
1828 if datasetexists $dataset ; then
1829 destroy_dataset $dataset
1832 log_must zfs create $@ $dataset
1837 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1838 # Note: In local zones, this function should return 0 silently.
1841 # $2 - custom arguments for zfs destroy
1842 # Destroy dataset with the given parameters.
1844 function destroy_dataset #dataset #args
1848 typeset args=${2:-""}
1850 if [[ -z $dataset ]]; then
1851 log_note "No dataset name given."
1855 if is_global_zone ; then
1856 if datasetexists "$dataset" ; then
1857 mtpt=$(get_prop mountpoint "$dataset")
1858 log_must_busy zfs destroy $args $dataset
1861 log_must rm -rf $mtpt
1863 log_note "Dataset does not exist. ($dataset)"
1872 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1873 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1874 # and a zvol device to the zone.
1877 # $2 zone root directory prefix
1880 function zfs_zones_setup #zone_name zone_root zone_ip
1882 typeset zone_name=${1:-$(hostname)-z}
1883 typeset zone_root=${2:-"/zone_root"}
1884 typeset zone_ip=${3:-"10.1.1.10"}
1885 typeset prefix_ctr=$ZONE_CTR
1886 typeset pool_name=$ZONE_POOL
1890 # Create pool and 5 container within it
1892 [[ -d /$pool_name ]] && rm -rf /$pool_name
1893 log_must zpool create -f $pool_name $DISKS
1894 while ((i < cntctr)); do
1895 log_must zfs create $pool_name/$prefix_ctr$i
1900 log_must zfs create -V 1g $pool_name/zone_zvol
1904 # Add slog device for pool
1906 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1907 log_must mkfile $MINVDEVSIZE $sdevs
1908 log_must zpool add $pool_name log mirror $sdevs
1910 # this isn't supported just yet.
1911 # Create a filesystem. In order to add this to
1912 # the zone, it must have it's mountpoint set to 'legacy'
1913 # log_must zfs create $pool_name/zfs_filesystem
1914 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1916 [[ -d $zone_root ]] && \
1917 log_must rm -rf $zone_root/$zone_name
1918 [[ ! -d $zone_root ]] && \
1919 log_must mkdir -p -m 0700 $zone_root/$zone_name
1921 # Create zone configure file and configure the zone
1923 typeset zone_conf=/tmp/zone_conf.$$
1924 echo "create" > $zone_conf
1925 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1926 echo "set autoboot=true" >> $zone_conf
1928 while ((i < cntctr)); do
1929 echo "add dataset" >> $zone_conf
1930 echo "set name=$pool_name/$prefix_ctr$i" >> \
1932 echo "end" >> $zone_conf
1936 # add our zvol to the zone
1937 echo "add device" >> $zone_conf
1938 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1939 echo "end" >> $zone_conf
1941 # add a corresponding zvol rdsk to the zone
1942 echo "add device" >> $zone_conf
1943 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1944 echo "end" >> $zone_conf
1946 # once it's supported, we'll add our filesystem to the zone
1947 # echo "add fs" >> $zone_conf
1948 # echo "set type=zfs" >> $zone_conf
1949 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1950 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1951 # echo "end" >> $zone_conf
1953 echo "verify" >> $zone_conf
1954 echo "commit" >> $zone_conf
1955 log_must zonecfg -z $zone_name -f $zone_conf
1956 log_must rm -f $zone_conf
1959 zoneadm -z $zone_name install
1960 if (($? == 0)); then
1961 log_note "SUCCESS: zoneadm -z $zone_name install"
1963 log_fail "FAIL: zoneadm -z $zone_name install"
1966 # Install sysidcfg file
1968 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1969 echo "system_locale=C" > $sysidcfg
1970 echo "terminal=dtterm" >> $sysidcfg
1971 echo "network_interface=primary {" >> $sysidcfg
1972 echo "hostname=$zone_name" >> $sysidcfg
1973 echo "}" >> $sysidcfg
1974 echo "name_service=NONE" >> $sysidcfg
1975 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1976 echo "security_policy=NONE" >> $sysidcfg
1977 echo "timezone=US/Eastern" >> $sysidcfg
1980 log_must zoneadm -z $zone_name boot
1984 # Reexport TESTPOOL & TESTPOOL(1-4)
1986 function reexport_pool
1991 while ((i < cntctr)); do
1993 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1994 if ! ismounted $TESTPOOL; then
1995 log_must zfs mount $TESTPOOL
1998 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1999 if eval ! ismounted \$TESTPOOL$i; then
2000 log_must eval zfs mount \$TESTPOOL$i
2008 # Verify a given disk or pool state
2010 # Return 0 is pool/disk matches expected state, 1 otherwise
2012 function check_state # pool disk state{online,offline,degraded}
2015 typeset disk=${2#$DEV_DSKDIR/}
2018 [[ -z $pool ]] || [[ -z $state ]] \
2019 && log_fail "Arguments invalid or missing"
2021 if [[ -z $disk ]]; then
2022 #check pool state only
2023 zpool get -H -o value health $pool \
2024 | grep -i "$state" > /dev/null 2>&1
2026 zpool status -v $pool | grep "$disk" \
2027 | grep -i "$state" > /dev/null 2>&1
2034 # Get the mountpoint of snapshot
2035 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
2038 function snapshot_mountpoint
2040 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
2042 if [[ $dataset != *@* ]]; then
2043 log_fail "Error name of snapshot '$dataset'."
2046 typeset fs=${dataset%@*}
2047 typeset snap=${dataset#*@}
2049 if [[ -z $fs || -z $snap ]]; then
2050 log_fail "Error name of snapshot '$dataset'."
2053 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
2057 # Given a device and 'ashift' value verify it's correctly set on every label
2059 function verify_ashift # device ashift
2064 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
2080 # Given a pool and file system, this function will verify the file system
2081 # using the zdb internal tool. Note that the pool is exported and imported
2082 # to ensure it has consistent state.
2084 function verify_filesys # pool filesystem dir
2087 typeset filesys="$2"
2088 typeset zdbout="/tmp/zdbout.$$"
2093 typeset search_path=""
2095 log_note "Calling zdb to verify filesystem '$filesys'"
2096 zfs unmount -a > /dev/null 2>&1
2097 log_must zpool export $pool
2099 if [[ -n $dirs ]] ; then
2100 for dir in $dirs ; do
2101 search_path="$search_path -d $dir"
2105 log_must zpool import $search_path $pool
2107 zdb -cudi $filesys > $zdbout 2>&1
2108 if [[ $? != 0 ]]; then
2109 log_note "Output: zdb -cudi $filesys"
2111 log_fail "zdb detected errors with: '$filesys'"
2114 log_must zfs mount -a
2115 log_must rm -rf $zdbout
2119 # Given a pool issue a scrub and verify that no checksum errors are reported.
2121 function verify_pool
2123 typeset pool=${1:-$TESTPOOL}
2125 log_must zpool scrub $pool
2126 log_must wait_scrubbed $pool
2128 typeset -i cksum=$(zpool status $pool | awk '
2130 isvdev { errors += $NF }
2131 /CKSUM$/ { isvdev = 1 }
2132 END { print errors }
2134 if [[ $cksum != 0 ]]; then
2135 log_must zpool status -v
2136 log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
2141 # Given a pool, and this function list all disks in the pool
2143 function get_disklist # pool
2147 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
2148 grep -v "\-\-\-\-\-" | \
2149 egrep -v -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
2155 # Given a pool, and this function list all disks in the pool with their full
2156 # path (like "/dev/sda" instead of "sda").
2158 function get_disklist_fullpath # pool
2167 # This function kills a given list of processes after a time period. We use
2168 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
2169 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
2170 # would be listed as FAIL, which we don't want : we're happy with stress tests
2171 # running for a certain amount of time, then finishing.
2173 # @param $1 the time in seconds after which we should terminate these processes
2174 # @param $2..$n the processes we wish to terminate.
2176 function stress_timeout
2178 typeset -i TIMEOUT=$1
2182 log_note "Waiting for child processes($cpids). " \
2183 "It could last dozens of minutes, please be patient ..."
2184 log_must sleep $TIMEOUT
2186 log_note "Killing child processes after ${TIMEOUT} stress timeout."
2188 for pid in $cpids; do
2189 ps -p $pid > /dev/null 2>&1
2190 if (($? == 0)); then
2191 log_must kill -USR1 $pid
2197 # Verify a given hotspare disk is inuse or avail
2199 # Return 0 is pool/disk matches expected state, 1 otherwise
2201 function check_hotspare_state # pool disk state{inuse,avail}
2204 typeset disk=${2#$DEV_DSKDIR/}
2207 cur_state=$(get_device_state $pool $disk "spares")
2209 if [[ $state != ${cur_state} ]]; then
2216 # Wait until a hotspare transitions to a given state or times out.
2218 # Return 0 when pool/disk matches expected state, 1 on timeout.
2220 function wait_hotspare_state # pool disk state timeout
2223 typeset disk=${2#*$DEV_DSKDIR/}
2225 typeset timeout=${4:-60}
2228 while [[ $i -lt $timeout ]]; do
2229 if check_hotspare_state $pool $disk $state; then
2241 # Verify a given slog disk is inuse or avail
2243 # Return 0 is pool/disk matches expected state, 1 otherwise
2245 function check_slog_state # pool disk state{online,offline,unavail}
2248 typeset disk=${2#$DEV_DSKDIR/}
2251 cur_state=$(get_device_state $pool $disk "logs")
2253 if [[ $state != ${cur_state} ]]; then
2260 # Verify a given vdev disk is inuse or avail
2262 # Return 0 is pool/disk matches expected state, 1 otherwise
2264 function check_vdev_state # pool disk state{online,offline,unavail}
2267 typeset disk=${2#*$DEV_DSKDIR/}
2270 cur_state=$(get_device_state $pool $disk)
2272 if [[ $state != ${cur_state} ]]; then
2279 # Wait until a vdev transitions to a given state or times out.
2281 # Return 0 when pool/disk matches expected state, 1 on timeout.
2283 function wait_vdev_state # pool disk state timeout
2286 typeset disk=${2#*$DEV_DSKDIR/}
2288 typeset timeout=${4:-60}
2291 while [[ $i -lt $timeout ]]; do
2292 if check_vdev_state $pool $disk $state; then
2304 # Check the output of 'zpool status -v <pool>',
2305 # and to see if the content of <token> contain the <keyword> specified.
2307 # Return 0 is contain, 1 otherwise
2309 function check_pool_status # pool token keyword <verbose>
2314 typeset verbose=${4:-false}
2316 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2317 ($1==token) {print $0}')
2318 if [[ $verbose == true ]]; then
2321 echo $scan | egrep -i "$keyword" > /dev/null 2>&1
2327 # The following functions are instance of check_pool_status()
2328 # is_pool_resilvering - to check if the pool resilver is in progress
2329 # is_pool_resilvered - to check if the pool resilver is completed
2330 # is_pool_scrubbing - to check if the pool scrub is in progress
2331 # is_pool_scrubbed - to check if the pool scrub is completed
2332 # is_pool_scrub_stopped - to check if the pool scrub is stopped
2333 # is_pool_scrub_paused - to check if the pool scrub has paused
2334 # is_pool_removing - to check if the pool removing is a vdev
2335 # is_pool_removed - to check if the pool remove is completed
2336 # is_pool_discarding - to check if the pool checkpoint is being discarded
2338 function is_pool_resilvering #pool <verbose>
2340 check_pool_status "$1" "scan" \
2341 "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
2345 function is_pool_resilvered #pool <verbose>
2347 check_pool_status "$1" "scan" "resilvered " $2
2351 function is_pool_scrubbing #pool <verbose>
2353 check_pool_status "$1" "scan" "scrub in progress since " $2
2357 function is_pool_scrubbed #pool <verbose>
2359 check_pool_status "$1" "scan" "scrub repaired" $2
2363 function is_pool_scrub_stopped #pool <verbose>
2365 check_pool_status "$1" "scan" "scrub canceled" $2
2369 function is_pool_scrub_paused #pool <verbose>
2371 check_pool_status "$1" "scan" "scrub paused since " $2
2375 function is_pool_removing #pool
2377 check_pool_status "$1" "remove" "in progress since "
2381 function is_pool_removed #pool
2383 check_pool_status "$1" "remove" "completed on"
2387 function is_pool_discarding #pool
2389 check_pool_status "$1" "checkpoint" "discarding"
2393 function wait_for_degraded
2396 typeset timeout=${2:-30}
2400 [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2401 log_note "$pool is not yet degraded."
2403 if ((SECONDS - t0 > $timeout)); then
2404 log_note "$pool not degraded after $timeout seconds."
2413 # Use create_pool()/destroy_pool() to clean up the information in
2414 # in the given disk to avoid slice overlapping.
2416 function cleanup_devices #vdevs
2418 typeset pool="foopool$$"
2421 zero_partitions $vdev
2424 poolexists $pool && destroy_pool $pool
2425 create_pool $pool $@
2432 # A function to find and locate free disks on a system or from given
2433 # disks as the parameter. It works by locating disks that are in use
2434 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2436 # $@ given disks to find which are free, default is all disks in
2439 # @return a string containing the list of available disks
2443 # Trust provided list, no attempt is made to locate unused devices.
2444 if is_linux || is_freebsd; then
2450 sfi=/tmp/swaplist.$$
2451 dmpi=/tmp/dumpdev.$$
2452 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2455 dumpadm > $dmpi 2>/dev/null
2457 # write an awk script that can process the output of format
2458 # to produce a list of disks we know about. Note that we have
2459 # to escape "$2" so that the shell doesn't interpret it while
2460 # we're creating the awk script.
2461 # -------------------
2462 cat > /tmp/find_disks.awk <<EOF
2471 if (searchdisks && \$2 !~ "^$"){
2477 /^AVAILABLE DISK SELECTIONS:/{
2481 #---------------------
2483 chmod 755 /tmp/find_disks.awk
2484 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2485 rm /tmp/find_disks.awk
2488 for disk in $disks; do
2490 grep "${disk}[sp]" /etc/mnttab >/dev/null
2491 (($? == 0)) && continue
2493 grep "${disk}[sp]" $sfi >/dev/null
2494 (($? == 0)) && continue
2495 # check for dump device
2496 grep "${disk}[sp]" $dmpi >/dev/null
2497 (($? == 0)) && continue
2498 # check to see if this disk hasn't been explicitly excluded
2499 # by a user-set environment variable
2500 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2501 (($? == 0)) && continue
2502 unused_candidates="$unused_candidates $disk"
2507 # now just check to see if those disks do actually exist
2508 # by looking for a device pointing to the first slice in
2509 # each case. limit the number to max_finddisksnum
2511 for disk in $unused_candidates; do
2512 if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2513 [ $count -lt $max_finddisksnum ]; then
2514 unused="$unused $disk"
2515 # do not impose limit if $@ is provided
2516 [[ -z $@ ]] && ((count = count + 1))
2520 # finally, return our disk list
2524 function add_user_freebsd #<group_name> <user_name> <basedir>
2530 # Check to see if the user exists.
2531 if id $user > /dev/null 2>&1; then
2535 # Assign 1000 as the base uid
2539 pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2543 # The uid is not unique
2547 if [[ $uid == 65000 ]]; then
2548 log_fail "No user id available under 65000 for $user"
2553 touch $basedir/$user/.hushlogin
2559 # Delete the specified user.
2563 function del_user_freebsd #<logname>
2567 if id $user > /dev/null 2>&1; then
2568 log_must pw userdel $user
2575 # Select valid gid and create specified group.
2579 function add_group_freebsd #<group_name>
2583 # See if the group already exists.
2584 if pw groupshow $group >/dev/null 2>&1; then
2588 # Assign 1000 as the base gid
2591 pw groupadd -g $gid -n $group > /dev/null 2>&1
2595 # The gid is not unique
2599 if [[ $gid == 65000 ]]; then
2600 log_fail "No user id available under 65000 for $group"
2606 # Delete the specified group.
2610 function del_group_freebsd #<group_name>
2614 pw groupdel -n $group > /dev/null 2>&1
2617 # Group does not exist, or was deleted successfully.
2619 # Name already exists as a group name
2620 9) log_must pw groupdel $group ;;
2627 function add_user_illumos #<group_name> <user_name> <basedir>
2633 log_must useradd -g $group -d $basedir/$user -m $user
2638 function del_user_illumos #<user_name>
2642 if id $user > /dev/null 2>&1; then
2643 log_must_retry "currently used" 6 userdel $user
2649 function add_group_illumos #<group_name>
2655 groupadd -g $gid $group > /dev/null 2>&1
2659 # The gid is not unique
2666 function del_group_illumos #<group_name>
2670 groupmod -n $grp $grp > /dev/null 2>&1
2673 # Group does not exist.
2675 # Name already exists as a group name
2676 9) log_must groupdel $grp ;;
2681 function add_user_linux #<group_name> <user_name> <basedir>
2687 log_must useradd -g $group -d $basedir/$user -m $user
2689 # Add new users to the same group and the command line utils.
2690 # This allows them to be run out of the original users home
2691 # directory as long as it permissioned to be group readable.
2692 cmd_group=$(stat --format="%G" $(which zfs))
2693 log_must usermod -a -G $cmd_group $user
2698 function del_user_linux #<user_name>
2702 if id $user > /dev/null 2>&1; then
2703 log_must_retry "currently used" 6 userdel $user
2709 function add_group_linux #<group_name>
2713 # Assign 100 as the base gid, a larger value is selected for
2714 # Linux because for many distributions 1000 and under are reserved.
2716 groupadd $group > /dev/null 2>&1
2725 function del_group_linux #<group_name>
2729 getent group $group > /dev/null 2>&1
2732 # Group does not exist.
2734 # Name already exists as a group name
2735 0) log_must groupdel $group ;;
2743 # Add specified user to specified group
2747 # $3 base of the homedir (optional)
2749 function add_user #<group_name> <user_name> <basedir>
2753 typeset basedir=${3:-"/var/tmp"}
2755 if ((${#group} == 0 || ${#user} == 0)); then
2756 log_fail "group name or user name are not defined."
2761 add_user_freebsd "$group" "$user" "$basedir"
2764 add_user_linux "$group" "$user" "$basedir"
2767 add_user_illumos "$group" "$user" "$basedir"
2775 # Delete the specified user.
2778 # $2 base of the homedir (optional)
2780 function del_user #<logname> <basedir>
2783 typeset basedir=${2:-"/var/tmp"}
2785 if ((${#user} == 0)); then
2786 log_fail "login name is necessary."
2791 del_user_freebsd "$user"
2794 del_user_linux "$user"
2797 del_user_illumos "$user"
2801 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2807 # Select valid gid and create specified group.
2811 function add_group #<group_name>
2815 if ((${#group} == 0)); then
2816 log_fail "group name is necessary."
2821 add_group_freebsd "$group"
2824 add_group_linux "$group"
2827 add_group_illumos "$group"
2835 # Delete the specified group.
2839 function del_group #<group_name>
2843 if ((${#group} == 0)); then
2844 log_fail "group name is necessary."
2849 del_group_freebsd "$group"
2852 del_group_linux "$group"
2855 del_group_illumos "$group"
2863 # This function will return true if it's safe to destroy the pool passed
2864 # as argument 1. It checks for pools based on zvols and files, and also
2865 # files contained in a pool that may have a different mountpoint.
2867 function safe_to_destroy_pool { # $1 the pool name
2870 typeset DONT_DESTROY=""
2872 # We check that by deleting the $1 pool, we're not
2873 # going to pull the rug out from other pools. Do this
2874 # by looking at all other pools, ensuring that they
2875 # aren't built from files or zvols contained in this pool.
2877 for pool in $(zpool list -H -o name)
2881 # this is a list of the top-level directories in each of the
2882 # files that make up the path to the files the pool is based on
2883 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2886 # this is a list of the zvols that make up the pool
2887 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2890 # also want to determine if it's a file-based pool using an
2891 # alternate mountpoint...
2892 POOL_FILE_DIRS=$(zpool status -v $pool | \
2893 grep / | awk '{print $1}' | \
2894 awk -F/ '{print $2}' | grep -v "dev")
2896 for pooldir in $POOL_FILE_DIRS
2898 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2899 grep "${pooldir}$" | awk '{print $1}')
2901 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2905 if [ ! -z "$ZVOLPOOL" ]
2908 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2911 if [ ! -z "$FILEPOOL" ]
2914 log_note "Pool $pool is built from $FILEPOOL on $1"
2917 if [ ! -z "$ALTMOUNTPOOL" ]
2920 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2924 if [ -z "${DONT_DESTROY}" ]
2928 log_note "Warning: it is not safe to destroy $1!"
2934 # Verify zfs operation with -p option work as expected
2935 # $1 operation, value could be create, clone or rename
2936 # $2 dataset type, value could be fs or vol
2938 # $4 new dataset name
2940 function verify_opt_p_ops
2945 typeset newdataset=$4
2947 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2948 log_fail "$datatype is not supported."
2951 # check parameters accordingly
2956 if [[ $datatype == "vol" ]]; then
2957 ops="create -V $VOLSIZE"
2961 if [[ -z $newdataset ]]; then
2962 log_fail "newdataset should not be empty" \
2965 log_must datasetexists $dataset
2966 log_must snapexists $dataset
2969 if [[ -z $newdataset ]]; then
2970 log_fail "newdataset should not be empty" \
2973 log_must datasetexists $dataset
2976 log_fail "$ops is not supported."
2980 # make sure the upper level filesystem does not exist
2981 destroy_dataset "${newdataset%/*}" "-rRf"
2983 # without -p option, operation will fail
2984 log_mustnot zfs $ops $dataset $newdataset
2985 log_mustnot datasetexists $newdataset ${newdataset%/*}
2987 # with -p option, operation should succeed
2988 log_must zfs $ops -p $dataset $newdataset
2991 if ! datasetexists $newdataset ; then
2992 log_fail "-p option does not work for $ops"
2995 # when $ops is create or clone, redo the operation still return zero
2996 if [[ $ops != "rename" ]]; then
2997 log_must zfs $ops -p $dataset $newdataset
3004 # Get configuration of pool
3014 if ! poolexists "$pool" ; then
3017 alt_root=$(zpool list -H $pool | awk '{print $NF}')
3018 if [[ $alt_root == "-" ]]; then
3019 value=$(zdb -C $pool | grep "$config:" | awk -F: \
3022 value=$(zdb -e $pool | grep "$config:" | awk -F: \
3025 if [[ -n $value ]] ; then
3035 # Privated function. Random select one of items from arguments.
3040 function _random_get
3047 ((ind = RANDOM % cnt + 1))
3049 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
3054 # Random select one of item from arguments which include NONE string
3056 function random_get_with_non
3061 _random_get "$cnt" "$@"
3065 # Random select one of item from arguments which doesn't include NONE string
3069 _random_get "$#" "$@"
3073 # The function will generate a dataset name with specific length
3074 # $1, the length of the name
3075 # $2, the base string to construct the name
3077 function gen_dataset_name
3080 typeset basestr="$2"
3081 typeset -i baselen=${#basestr}
3085 if ((len % baselen == 0)); then
3086 ((iter = len / baselen))
3088 ((iter = len / baselen + 1))
3090 while ((iter > 0)); do
3091 l_name="${l_name}$basestr"
3100 # Get cksum tuple of dataset
3103 # sample zdb output:
3104 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
3105 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
3106 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
3107 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
3108 function datasetcksum
3113 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
3114 | awk -F= '{print $7}')
3125 cksum=$(cksum $1 | awk '{print $1}')
3130 # Get the given disk/slice state from the specific field of the pool
3132 function get_device_state #pool disk field("", "spares","logs")
3135 typeset disk=${2#$DEV_DSKDIR/}
3136 typeset field=${3:-$pool}
3138 state=$(zpool status -v "$pool" 2>/dev/null | \
3139 nawk -v device=$disk -v pool=$pool -v field=$field \
3140 'BEGIN {startconfig=0; startfield=0; }
3141 /config:/ {startconfig=1}
3142 (startconfig==1) && ($1==field) {startfield=1; next;}
3143 (startfield==1) && ($1==device) {print $2; exit;}
3145 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
3151 # print the given directory filesystem type
3159 if [[ -z $dir ]]; then
3160 log_fail "Usage: get_fstype <directory>"
3167 df -n $dir | awk '{print $3}'
3171 # Given a disk, label it to VTOC regardless what label was on the disk
3177 if [[ -z $disk ]]; then
3178 log_fail "The disk name is unspecified."
3180 typeset label_file=/var/tmp/labelvtoc.$$
3181 typeset arch=$(uname -p)
3183 if is_linux || is_freebsd; then
3184 log_note "Currently unsupported by the test framework"
3188 if [[ $arch == "i386" ]]; then
3189 echo "label" > $label_file
3190 echo "0" >> $label_file
3191 echo "" >> $label_file
3192 echo "q" >> $label_file
3193 echo "q" >> $label_file
3195 fdisk -B $disk >/dev/null 2>&1
3196 # wait a while for fdisk finishes
3198 elif [[ $arch == "sparc" ]]; then
3199 echo "label" > $label_file
3200 echo "0" >> $label_file
3201 echo "" >> $label_file
3202 echo "" >> $label_file
3203 echo "" >> $label_file
3204 echo "q" >> $label_file
3206 log_fail "unknown arch type"
3209 format -e -s -d $disk -f $label_file
3210 typeset -i ret_val=$?
3213 # wait the format to finish
3216 if ((ret_val != 0)); then
3217 log_fail "unable to label $disk as VTOC."
3224 # check if the system was installed as zfsroot or not
3225 # return: 0 if zfsroot, non-zero if not
3229 df -n / | grep zfs > /dev/null 2>&1
3234 # get the root filesystem name if it's zfsroot system.
3236 # return: root filesystem name
3242 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3243 elif ! is_linux; then
3244 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
3247 if [[ -z "$rootfs" ]]; then
3248 log_fail "Can not get rootfs"
3250 zfs list $rootfs > /dev/null 2>&1
3251 if (($? == 0)); then
3254 log_fail "This is not a zfsroot system."
3259 # get the rootfs's pool name
3263 function get_rootpool
3269 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3270 elif ! is_linux; then
3271 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
3274 if [[ -z "$rootfs" ]]; then
3275 log_fail "Can not get rootpool"
3277 zfs list $rootfs > /dev/null 2>&1
3278 if (($? == 0)); then
3281 log_fail "This is not a zfsroot system."
3286 # Get the word numbers from a string separated by white space
3288 function get_word_count
3294 # To verify if the require numbers of disks is given
3296 function verify_disk_count
3298 typeset -i min=${2:-1}
3300 typeset -i count=$(get_word_count "$1")
3302 if ((count < min)); then
3303 log_untested "A minimum of $min disks is required to run." \
3304 " You specified $count disk(s)"
3308 function ds_is_volume
3310 typeset type=$(get_prop type $1)
3311 [[ $type = "volume" ]] && return 0
3315 function ds_is_filesystem
3317 typeset type=$(get_prop type $1)
3318 [[ $type = "filesystem" ]] && return 0
3322 function ds_is_snapshot
3324 typeset type=$(get_prop type $1)
3325 [[ $type = "snapshot" ]] && return 0
3330 # Check if Trusted Extensions are installed and enabled
3332 function is_te_enabled
3334 svcs -H -o state labeld 2>/dev/null | grep "enabled"
3335 if (($? != 0)); then
3342 # Utility function to determine if a system has multiple cpus.
3347 elif is_freebsd; then
3348 sysctl -n kern.smp.cpus
3350 (($(psrinfo | wc -l) > 1))
3356 function get_cpu_freq
3359 lscpu | awk '/CPU MHz/ { print $3 }'
3360 elif is_freebsd; then
3361 sysctl -n hw.clockrate
3363 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3367 # Run the given command as the user provided.
3373 log_note "user: $user"
3376 typeset out=$TEST_BASE_DIR/out
3377 typeset err=$TEST_BASE_DIR/err
3379 sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
3381 log_note "out: $(<$out)"
3382 log_note "err: $(<$err)"
3387 # Check if the pool contains the specified vdevs
3392 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3393 # vdevs is not in the pool, and 2 if pool name is missing.
3395 function vdevs_in_pool
3400 if [[ -z $pool ]]; then
3401 log_note "Missing pool name."
3407 # We could use 'zpool list' to only get the vdevs of the pool but we
3408 # can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
3409 # therefore we use the 'zpool status' output.
3410 typeset tmpfile=$(mktemp)
3411 zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
3413 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3414 [[ $? -ne 0 ]] && return 1
3428 max=$((max > i ? max : i))
3440 min=$((min < i ? min : i))
3446 # Write data that can be compressed into a directory
3447 function write_compressible
3451 typeset nfiles=${3:-1}
3452 typeset bs=${4:-1024k}
3453 typeset fname=${5:-file}
3455 [[ -d $dir ]] || log_fail "No directory: $dir"
3457 # Under Linux fio is not currently used since its behavior can
3458 # differ significantly across versions. This includes missing
3459 # command line options and cases where the --buffer_compress_*
3460 # options fail to behave as expected.
3462 typeset file_bytes=$(to_bytes $megs)
3463 typeset bs_bytes=4096
3464 typeset blocks=$(($file_bytes / $bs_bytes))
3466 for (( i = 0; i < $nfiles; i++ )); do
3467 truncate -s $file_bytes $dir/$fname.$i
3469 # Write every third block to get 66% compression.
3470 for (( j = 0; j < $blocks; j += 3 )); do
3471 dd if=/dev/urandom of=$dir/$fname.$i \
3472 seek=$j bs=$bs_bytes count=1 \
3473 conv=notrunc >/dev/null 2>&1
3477 log_must eval "fio \
3482 --buffer_compress_percentage=66 \
3483 --buffer_compress_chunk=4096 \
3490 --filename_format='$fname.\$jobnum' >/dev/null"
3499 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3501 objnum=$(stat -f "%i" $pathname)
3503 objnum=$(stat -c %i $pathname)
3509 # Sync data to the pool
3512 # $2 boolean to force uberblock (and config including zpool cache file) update
3514 function sync_pool #pool <force>
3516 typeset pool=${1:-$TESTPOOL}
3517 typeset force=${2:-false}
3519 if [[ $force == true ]]; then
3520 log_must zpool sync -f $pool
3522 log_must zpool sync $pool
3531 # $1 boolean to force uberblock (and config including zpool cache file) update
3533 function sync_all_pools #<force>
3535 typeset force=${1:-false}
3537 if [[ $force == true ]]; then
3538 log_must zpool sync -f
3547 # Wait for zpool 'freeing' property drops to zero.
3551 function wait_freeing #pool
3553 typeset pool=${1:-$TESTPOOL}
3555 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3561 # Wait for every device replace operation to complete
3565 function wait_replacing #pool
3567 typeset pool=${1:-$TESTPOOL}
3569 [[ "" == "$(zpool status $pool |
3570 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3575 # Wait for a pool to be scrubbed
3580 function wait_scrubbed #pool timeout
3582 typeset timeout=${2:-300}
3583 typeset pool=${1:-$TESTPOOL}
3584 for (( timer = 0; timer < $timeout; timer++ )); do
3585 is_pool_scrubbed $pool && break;
3590 # Backup the zed.rc in our test directory so that we can edit it for our test.
3592 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3593 function zed_rc_backup
3595 zedrc_backup="$(mktemp)"
3596 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3600 function zed_rc_restore
3602 mv $1 $ZEDLET_DIR/zed.rc
3606 # Setup custom environment for the ZED.
3608 # $@ Optional list of zedlets to run under zed.
3612 log_unsupported "No zed on $(uname)"
3615 if [[ ! -d $ZEDLET_DIR ]]; then
3616 log_must mkdir $ZEDLET_DIR
3619 if [[ ! -e $VDEVID_CONF ]]; then
3620 log_must touch $VDEVID_CONF
3623 if [[ -e $VDEVID_CONF_ETC ]]; then
3624 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3628 # Create a symlink for /etc/zfs/vdev_id.conf file.
3629 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3631 # Setup minimal ZED configuration. Individual test cases should
3632 # add additional ZEDLETs as needed for their specific test.
3633 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3634 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3636 # Scripts must only be user writable.
3637 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3638 saved_umask=$(umask)
3640 for i in $EXTRA_ZEDLETS ; do
3641 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3643 log_must umask $saved_umask
3646 # Customize the zed.rc file to enable the full debug log.
3647 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3648 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3653 # Cleanup custom ZED environment.
3655 # $@ Optional list of zedlets to remove from our test zed.d directory.
3656 function zed_cleanup
3663 log_must rm -f ${ZEDLET_DIR}/zed.rc
3664 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3665 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3666 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3667 log_must rm -f ${ZEDLET_DIR}/state
3669 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3670 for i in $EXTRA_ZEDLETS ; do
3671 log_must rm -f ${ZEDLET_DIR}/$i
3674 log_must rm -f $ZED_LOG
3675 log_must rm -f $ZED_DEBUG_LOG
3676 log_must rm -f $VDEVID_CONF_ETC
3677 log_must rm -f $VDEVID_CONF
3682 # Check if ZED is currently running; if so, returns PIDs
3689 zedpids="$(pgrep -x zed)"
3691 zedpids2="$(pgrep -x lt-zed)"
3693 echo ${zedpids} ${zedpids2}
3697 # Check if ZED is currently running, if not start ZED.
3705 # ZEDLET_DIR=/var/tmp/zed
3706 if [[ ! -d $ZEDLET_DIR ]]; then
3707 log_must mkdir $ZEDLET_DIR
3710 # Verify the ZED is not already running.
3711 zedpids=$(zed_check)
3712 if [ -n "$zedpids" ]; then
3713 # We never, ever, really want it to just keep going if zed
3714 # is already running - usually this implies our test cases
3715 # will break very strangely because whatever we wanted to
3716 # configure zed for won't be listening to our changes in the
3718 log_fail "ZED already running - ${zedpids}"
3720 log_note "Starting ZED"
3721 # run ZED in the background and redirect foreground logging
3722 # output to $ZED_LOG.
3723 log_must truncate -s 0 $ZED_DEBUG_LOG
3724 log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3725 "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3740 log_note "Stopping ZED"
3742 zedpids=$(zed_check)
3743 [ ! -n "$zedpids" ] && break
3745 log_must kill $zedpids
3754 function zed_events_drain
3756 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3758 zpool events -c >/dev/null
3762 # Set a variable in zed.rc to something, un-commenting it in the process.
3772 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3775 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3780 # Check is provided device is being active used as a swap device.
3782 function is_swap_inuse
3786 if [[ -z $device ]] ; then
3787 log_note "No device specified."
3792 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3793 elif is_freebsd; then
3794 swapctl -l | grep -w $device
3796 swap -l | grep -w $device > /dev/null 2>&1
3803 # Setup a swap device using the provided device.
3810 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3811 log_must swapon $swapdev
3812 elif is_freebsd; then
3813 log_must swapctl -a $swapdev
3815 log_must swap -a $swapdev
3822 # Cleanup a swap device on the provided device.
3824 function swap_cleanup
3828 if is_swap_inuse $swapdev; then
3830 log_must swapoff $swapdev
3831 elif is_freebsd; then
3832 log_must swapoff $swapdev
3834 log_must swap -d $swapdev
3842 # Set a global system tunable (64-bit value)
3844 # $1 tunable name (use a NAME defined in tunables.cfg)
3847 function set_tunable64
3849 set_tunable_impl "$1" "$2" Z
3853 # Set a global system tunable (32-bit value)
3855 # $1 tunable name (use a NAME defined in tunables.cfg)
3858 function set_tunable32
3860 set_tunable_impl "$1" "$2" W
3863 function set_tunable_impl
3867 typeset mdb_cmd="$3"
3868 typeset module="${4:-zfs}"
3870 eval "typeset tunable=\$$name"
3873 log_unsupported "Tunable '$name' is unsupported on $(uname)"
3876 log_fail "Tunable '$name' must be added to tunables.cfg"
3882 [[ -z "$value" ]] && return 1
3883 [[ -z "$mdb_cmd" ]] && return 1
3887 typeset zfs_tunables="/sys/module/$module/parameters"
3888 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3889 cat >"$zfs_tunables/$tunable" <<<"$value"
3893 sysctl vfs.zfs.$tunable=$value
3897 [[ "$module" -eq "zfs" ]] || return 1
3898 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3905 # Get a global system tunable
3907 # $1 tunable name (use a NAME defined in tunables.cfg)
3909 function get_tunable
3911 get_tunable_impl "$1"
3914 function get_tunable_impl
3917 typeset module="${2:-zfs}"
3919 eval "typeset tunable=\$$name"
3922 log_unsupported "Tunable '$name' is unsupported on $(uname)"
3925 log_fail "Tunable '$name' must be added to tunables.cfg"
3933 typeset zfs_tunables="/sys/module/$module/parameters"
3934 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3935 cat $zfs_tunables/$tunable
3939 sysctl -n vfs.zfs.$tunable
3942 [[ "$module" -eq "zfs" ]] || return 1
3950 # Prints the current time in seconds since UNIX Epoch.
3952 function current_epoch
3958 # Get decimal value of global uint32_t variable using mdb.
3960 function mdb_get_uint32
3965 value=$(mdb -k -e "$variable/X | ::eval .=U")
3966 if [[ $? -ne 0 ]]; then
3967 log_fail "Failed to get value of '$variable' from mdb."
3976 # Set global uint32_t variable to a decimal value using mdb.
3978 function mdb_set_uint32
3983 mdb -kw -e "$variable/W 0t$value" > /dev/null
3984 if [[ $? -ne 0 ]]; then
3985 echo "Failed to set '$variable' to '$value' in mdb."
3993 # Set global scalar integer variable to a hex value using mdb.
3994 # Note: Target should have CTF data loaded.
3996 function mdb_ctf_set_int
4001 mdb -kw -e "$variable/z $value" > /dev/null
4002 if [[ $? -ne 0 ]]; then
4003 echo "Failed to set '$variable' to '$value' in mdb."
4011 # Compute MD5 digest for given file or stdin if no file given.
4012 # Note: file path must not contain spaces
4023 md5sum -b $file | awk '{ print $1 }'
4029 # Compute SHA256 digest for given file or stdin if no file given.
4030 # Note: file path must not contain spaces
4032 function sha256digest
4041 sha256sum -b $file | awk '{ print $1 }'
4046 function new_fs #<args>
4053 echo y | newfs -v "$@"
4058 function stat_size #<path>
4072 function stat_ctime #<path>
4086 function stat_crtime #<path>
4100 function stat_generation #<path>
4106 getversion "${path}"
4109 stat -f %v "${path}"
4114 # Run a command as if it was being run in a TTY.
4123 script -q /dev/null env "$@"
4125 script --return --quiet -c "$*" /dev/null
4130 # Produce a random permutation of the integers in a given range (inclusive).
4132 function range_shuffle # begin end
4137 seq ${begin} ${end} | sort -R
4141 # Cross-platform xattr helpers
4144 function get_xattr # name path
4151 getextattr -qq user "${name}" "${path}"
4154 attr -qg "${name}" "${path}"
4159 function set_xattr # name value path
4167 setextattr user "${name}" "${value}" "${path}"
4170 attr -qs "${name}" -V "${value}" "${path}"
4175 function set_xattr_stdin # name value
4182 setextattr -i user "${name}" "${path}"
4185 attr -qs "${name}" "${path}"
4190 function rm_xattr # name path
4197 rmextattr -q user "${name}" "${path}"
4200 attr -qr "${name}" "${path}"
4205 function ls_xattr # path
4211 lsextattr -qq user "${path}"
4219 function kstat # stat flags?
4222 typeset flags=${2-"-n"}
4226 sysctl $flags kstat.zfs.misc.$stat
4229 typeset zfs_kstat="/proc/spl/kstat/zfs/$stat"
4230 [[ -f "$zfs_kstat" ]] || return 1
4239 function get_arcstat # stat
4245 kstat arcstats.$stat
4248 kstat arcstats | awk "/$stat/ { print \$3 }"
4256 function punch_hole # offset length file
4264 truncate -d -o $offset -l $length "$file"
4267 fallocate --punch-hole --offset $offset --length $length "$file"
4276 # Wait for the specified arcstat to reach non-zero quiescence.
4277 # If echo is 1 echo the value after reaching quiescence, otherwise
4278 # if echo is 0 print the arcstat we are waiting on.
4280 function arcstat_quiescence # stat echo
4284 typeset do_once=true
4286 if [[ $echo -eq 0 ]]; then
4287 echo "Waiting for arcstat $1 quiescence."
4290 while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
4291 typeset stat1=$(get_arcstat $stat)
4293 typeset stat2=$(get_arcstat $stat)
4297 if [[ $echo -eq 1 ]]; then
4302 function arcstat_quiescence_noecho # stat
4305 arcstat_quiescence $stat 0
4308 function arcstat_quiescence_echo # stat
4311 arcstat_quiescence $stat 1
4315 # Given an array of pids, wait until all processes
4316 # have completed and check their return status.
4318 function wait_for_children #children
4322 for child in "${children[@]}"
4325 wait ${child} || child_exit=$?
4326 if [ $child_exit -ne 0 ]; then
4327 echo "child ${child} failed with ${child_exit}"
4335 # Compare two directory trees recursively in a manner similar to diff(1), but
4336 # using rsync. If there are any discrepancies, a summary of the differences are
4337 # output and a non-zero error is returned.
4339 # If you're comparing a directory after a ZIL replay, you should set
4340 # LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
4341 # directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
4344 function directory_diff # dir_a dir_b
4348 zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
4350 # If one of the directories doesn't exist, return 2. This is to match the
4351 # semantics of diff.
4352 if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
4356 # Run rsync with --dry-run --itemize-changes to get something akin to diff
4357 # output, but rsync is far more thorough in detecting differences (diff
4358 # doesn't compare file metadata, and cannot handle special files).
4360 # Also make sure to filter out non-user.* xattrs when comparing. On
4361 # SELinux-enabled systems the copied tree will probably have different
4363 args=("-nicaAHX" '--filter=-x! user.*' "--delete")
4365 # NOTE: Quite a few rsync builds do not support --crtimes which would be
4366 # necessary to verify that creation times are being maintained properly.
4367 # Unfortunately because of this we cannot use it unconditionally but we can
4368 # check if this rsync build supports it and use it then. This check is
4369 # based on the same check in the rsync test suite (testsuite/crtimes.test).
4371 # We check ctimes even with zil_replay=1 because the ZIL does store
4372 # creation times and we should make sure they match (if the creation times
4373 # do not match there is a "c" entry in one of the columns).
4374 if ( rsync --version | grep -q "[, ] crtimes" >/dev/null ); then
4377 echo "NOTE: This rsync package does not support --crtimes (-N)."
4380 # If we are testing a ZIL replay, we need to ignore timestamp changes.
4381 # Unfortunately --no-times doesn't do what we want -- it will still tell
4382 # you if the timestamps don't match but rsync will set the timestamps to
4383 # the current time (leading to an itemised change entry). It's simpler to
4384 # just filter out those lines.
4385 if [ "$zil_replay" -eq 0 ]; then
4388 # Different rsync versions have different numbers of columns. So just
4389 # require that aside from the first two, all other columns must be
4390 # blank (literal ".") or a timestamp field ("[tT]").
4391 filter=("grep" "-v" '^\..[.Tt]\+ ')
4394 diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
4396 if [ -n "$diff" ]; then
4404 # Compare two directory trees recursively, without checking whether the mtimes
4405 # match (creation times will be checked if the available rsync binary supports
4406 # it). This is necessary for ZIL replay checks (because the ZIL does not
4407 # contain mtimes and thus after a ZIL replay, mtimes won't match).
4409 # This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
4411 function replay_directory_diff # dir_a dir_b
4413 LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"