4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 # Use is subject to license terms.
25 # Copyright (c) 2012, 2016 by Delphix. All rights reserved.
26 # Copyright 2016 Nexenta Systems, Inc.
27 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
28 # Copyright (c) 2017 Datto Inc.
31 . ${STF_TOOLS}/include/logapi.shlib
32 . ${STF_SUITE}/include/math.shlib
35 # Apply constrained path when available. This is required since the
36 # PATH may have been modified by sudo's secure_path behavior.
38 if [ -n "$STF_PATH" ]; then
42 # Linux kernel version comparison function
44 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
46 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
48 function linux_version
52 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
54 typeset version=$(echo $ver | cut -d '.' -f 1)
55 typeset major=$(echo $ver | cut -d '.' -f 2)
56 typeset minor=$(echo $ver | cut -d '.' -f 3)
58 [[ -z "$version" ]] && version=0
59 [[ -z "$major" ]] && major=0
60 [[ -z "$minor" ]] && minor=0
62 echo $((version * 10000 + major * 100 + minor))
65 # Determine if this is a Linux test system
67 # Return 0 if platform Linux, 1 if otherwise
71 if [[ $(uname -o) == "GNU/Linux" ]]; then
78 # Determine if this is a 32-bit system
80 # Return 0 if platform is 32-bit, 1 if otherwise
84 if [[ $(getconf LONG_BIT) == "32" ]]; then
91 # Determine if kmemleak is enabled
93 # Return 0 if kmemleak is enabled, 1 if otherwise
97 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
104 # Determine whether a dataset is mounted
107 # $2 filesystem type; optional - defaulted to zfs
109 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
114 [[ -z $fstype ]] && fstype=zfs
115 typeset out dir name ret
119 if [[ "$1" == "/"* ]] ; then
120 for out in $(zfs mount | awk '{print $2}'); do
121 [[ $1 == $out ]] && return 0
124 for out in $(zfs mount | awk '{print $1}'); do
125 [[ $1 == $out ]] && return 0
130 out=$(df -F $fstype $1 2>/dev/null)
132 (($ret != 0)) && return $ret
140 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
143 out=$(df -t $fstype $1 2>/dev/null)
147 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
148 link=$(readlink -f $ZVOL_DEVDIR/$1)
149 [[ -n "$link" ]] && \
150 mount | grep -q "^$link" && \
159 # Return 0 if a dataset is mounted; 1 otherwise
162 # $2 filesystem type; optional - defaulted to zfs
167 (($? == 0)) && return 0
171 # Return 0 if a dataset is unmounted; 1 otherwise
174 # $2 filesystem type; optional - defaulted to zfs
179 (($? == 1)) && return 0
189 echo $1 | sed "s/,/ /g"
192 function default_setup
194 default_setup_noexit "$@"
200 # Given a list of disks, setup storage pools and datasets.
202 function default_setup_noexit
207 log_note begin default_setup_noexit
209 if is_global_zone; then
210 if poolexists $TESTPOOL ; then
211 destroy_pool $TESTPOOL
213 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
214 log_must zpool create -f $TESTPOOL $disklist
219 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
220 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
222 log_must zfs create $TESTPOOL/$TESTFS
223 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
225 if [[ -n $container ]]; then
226 rm -rf $TESTDIR1 || \
227 log_unresolved Could not remove $TESTDIR1
228 mkdir -p $TESTDIR1 || \
229 log_unresolved Could not create $TESTDIR1
231 log_must zfs create $TESTPOOL/$TESTCTR
232 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
233 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
234 log_must zfs set mountpoint=$TESTDIR1 \
235 $TESTPOOL/$TESTCTR/$TESTFS1
238 if [[ -n $volume ]]; then
239 if is_global_zone ; then
240 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
243 log_must zfs create $TESTPOOL/$TESTVOL
249 # Given a list of disks, setup a storage pool, file system and
252 function default_container_setup
256 default_setup "$disklist" "true"
260 # Given a list of disks, setup a storage pool,file system
263 function default_volume_setup
267 default_setup "$disklist" "" "true"
271 # Given a list of disks, setup a storage pool,file system,
272 # a container and a volume.
274 function default_container_volume_setup
278 default_setup "$disklist" "true" "true"
282 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
285 # $1 Existing filesystem or volume name. Default, $TESTFS
286 # $2 snapshot name. Default, $TESTSNAP
288 function create_snapshot
290 typeset fs_vol=${1:-$TESTFS}
291 typeset snap=${2:-$TESTSNAP}
293 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
294 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
296 if snapexists $fs_vol@$snap; then
297 log_fail "$fs_vol@$snap already exists."
299 datasetexists $fs_vol || \
300 log_fail "$fs_vol must exist."
302 log_must zfs snapshot $fs_vol@$snap
306 # Create a clone from a snapshot, default clone name is $TESTCLONE.
308 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
309 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
311 function create_clone # snapshot clone
313 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
314 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
317 log_fail "Snapshot name is undefined."
319 log_fail "Clone name is undefined."
321 log_must zfs clone $snap $clone
325 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
328 # $1 Existing filesystem or volume name. Default, $TESTFS
329 # $2 Existing snapshot name. Default, $TESTSNAP
330 # $3 bookmark name. Default, $TESTBKMARK
332 function create_bookmark
334 typeset fs_vol=${1:-$TESTFS}
335 typeset snap=${2:-$TESTSNAP}
336 typeset bkmark=${3:-$TESTBKMARK}
338 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
339 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
340 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
342 if bkmarkexists $fs_vol#$bkmark; then
343 log_fail "$fs_vol#$bkmark already exists."
345 datasetexists $fs_vol || \
346 log_fail "$fs_vol must exist."
347 snapexists $fs_vol@$snap || \
348 log_fail "$fs_vol@$snap must exist."
350 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
353 function default_mirror_setup
355 default_mirror_setup_noexit $1 $2 $3
361 # Given a pair of disks, set up a storage pool and dataset for the mirror
362 # @parameters: $1 the primary side of the mirror
363 # $2 the secondary side of the mirror
364 # @uses: ZPOOL ZFS TESTPOOL TESTFS
365 function default_mirror_setup_noexit
367 readonly func="default_mirror_setup_noexit"
371 [[ -z $primary ]] && \
372 log_fail "$func: No parameters passed"
373 [[ -z $secondary ]] && \
374 log_fail "$func: No secondary partition passed"
375 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
376 log_must zpool create -f $TESTPOOL mirror $@
377 log_must zfs create $TESTPOOL/$TESTFS
378 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
382 # create a number of mirrors.
383 # We create a number($1) of 2 way mirrors using the pairs of disks named
384 # on the command line. These mirrors are *not* mounted
385 # @parameters: $1 the number of mirrors to create
386 # $... the devices to use to create the mirrors on
387 # @uses: ZPOOL ZFS TESTPOOL
388 function setup_mirrors
390 typeset -i nmirrors=$1
393 while ((nmirrors > 0)); do
394 log_must test -n "$1" -a -n "$2"
395 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
396 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
398 ((nmirrors = nmirrors - 1))
403 # create a number of raidz pools.
404 # We create a number($1) of 2 raidz pools using the pairs of disks named
405 # on the command line. These pools are *not* mounted
406 # @parameters: $1 the number of pools to create
407 # $... the devices to use to create the pools on
408 # @uses: ZPOOL ZFS TESTPOOL
409 function setup_raidzs
411 typeset -i nraidzs=$1
414 while ((nraidzs > 0)); do
415 log_must test -n "$1" -a -n "$2"
416 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
417 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
419 ((nraidzs = nraidzs - 1))
424 # Destroy the configured testpool mirrors.
425 # the mirrors are of the form ${TESTPOOL}{number}
426 # @uses: ZPOOL ZFS TESTPOOL
427 function destroy_mirrors
429 default_cleanup_noexit
435 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
436 # $1 the list of disks
438 function default_raidz_setup
440 typeset disklist="$*"
441 disks=(${disklist[*]})
443 if [[ ${#disks[*]} -lt 2 ]]; then
444 log_fail "A raid-z requires a minimum of two disks."
447 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
448 log_must zpool create -f $TESTPOOL raidz $1 $2 $3
449 log_must zfs create $TESTPOOL/$TESTFS
450 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
456 # Common function used to cleanup storage pools and datasets.
458 # Invoked at the start of the test suite to ensure the system
459 # is in a known state, and also at the end of each set of
460 # sub-tests to ensure errors from one set of tests doesn't
461 # impact the execution of the next set.
463 function default_cleanup
465 default_cleanup_noexit
470 function default_cleanup_noexit
475 # Destroying the pool will also destroy any
476 # filesystems it contains.
478 if is_global_zone; then
479 zfs unmount -a > /dev/null 2>&1
480 exclude=`eval echo \"'(${KEEP})'\"`
481 ALL_POOLS=$(zpool list -H -o name \
482 | grep -v "$NO_POOLS" | egrep -v "$exclude")
483 # Here, we loop through the pools we're allowed to
484 # destroy, only destroying them if it's safe to do
486 while [ ! -z ${ALL_POOLS} ]
488 for pool in ${ALL_POOLS}
490 if safe_to_destroy_pool $pool ;
494 ALL_POOLS=$(zpool list -H -o name \
495 | grep -v "$NO_POOLS" \
496 | egrep -v "$exclude")
503 for fs in $(zfs list -H -o name \
504 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
505 destroy_dataset "$fs" "-Rf"
508 # Need cleanup here to avoid garbage dir left.
509 for fs in $(zfs list -H -o name); do
510 [[ $fs == /$ZONE_POOL ]] && continue
511 [[ -d $fs ]] && log_must rm -rf $fs/*
515 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
518 for fs in $(zfs list -H -o name); do
519 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
520 log_must zfs set reservation=none $fs
521 log_must zfs set recordsize=128K $fs
522 log_must zfs set mountpoint=/$fs $fs
524 enc=$(get_prop encryption $fs)
525 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
526 [[ "$enc" == "off" ]]; then
527 log_must zfs set checksum=on $fs
529 log_must zfs set compression=off $fs
530 log_must zfs set atime=on $fs
531 log_must zfs set devices=off $fs
532 log_must zfs set exec=on $fs
533 log_must zfs set setuid=on $fs
534 log_must zfs set readonly=off $fs
535 log_must zfs set snapdir=hidden $fs
536 log_must zfs set aclmode=groupmask $fs
537 log_must zfs set aclinherit=secure $fs
542 [[ -d $TESTDIR ]] && \
543 log_must rm -rf $TESTDIR
546 if is_mpath_device $disk1; then
553 # Common function used to cleanup storage pools, file systems
556 function default_container_cleanup
558 if ! is_global_zone; then
562 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
564 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
566 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
567 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
569 [[ -e $TESTDIR1 ]] && \
570 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
576 # Common function used to cleanup snapshot of file system or volume. Default to
577 # delete the file system's snapshot
581 function destroy_snapshot
583 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
585 if ! snapexists $snap; then
586 log_fail "'$snap' does not existed."
590 # For the sake of the value which come from 'get_prop' is not equal
591 # to the really mountpoint when the snapshot is unmounted. So, firstly
592 # check and make sure this snapshot's been mounted in current system.
595 if ismounted $snap; then
596 mtpt=$(get_prop mountpoint $snap)
598 log_fail "get_prop mountpoint $snap failed."
601 destroy_dataset "$snap"
602 [[ $mtpt != "" && -d $mtpt ]] && \
603 log_must rm -rf $mtpt
607 # Common function used to cleanup clone.
611 function destroy_clone
613 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
615 if ! datasetexists $clone; then
616 log_fail "'$clone' does not existed."
619 # With the same reason in destroy_snapshot
621 if ismounted $clone; then
622 mtpt=$(get_prop mountpoint $clone)
624 log_fail "get_prop mountpoint $clone failed."
627 destroy_dataset "$clone"
628 [[ $mtpt != "" && -d $mtpt ]] && \
629 log_must rm -rf $mtpt
633 # Common function used to cleanup bookmark of file system or volume. Default
634 # to delete the file system's bookmark.
638 function destroy_bookmark
640 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
642 if ! bkmarkexists $bkmark; then
643 log_fail "'$bkmarkp' does not existed."
646 destroy_dataset "$bkmark"
649 # Return 0 if a snapshot exists; $? otherwise
655 zfs list -H -t snapshot "$1" > /dev/null 2>&1
660 # Return 0 if a bookmark exists; $? otherwise
664 function bkmarkexists
666 zfs list -H -t bookmark "$1" > /dev/null 2>&1
671 # Set a property to a certain value on a dataset.
672 # Sets a property of the dataset to the value as passed in.
674 # $1 dataset who's property is being set
676 # $3 value to set property to
678 # 0 if the property could be set.
679 # non-zero otherwise.
682 function dataset_setprop
684 typeset fn=dataset_setprop
687 log_note "$fn: Insufficient parameters (need 3, had $#)"
691 output=$(zfs set $2=$3 $1 2>&1)
694 log_note "Setting property on $1 failed."
695 log_note "property $2=$3"
696 log_note "Return Code: $rv"
697 log_note "Output: $output"
704 # Assign suite defined dataset properties.
705 # This function is used to apply the suite's defined default set of
706 # properties to a dataset.
707 # @parameters: $1 dataset to use
708 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
710 # 0 if the dataset has been altered.
711 # 1 if no pool name was passed in.
712 # 2 if the dataset could not be found.
713 # 3 if the dataset could not have it's properties set.
715 function dataset_set_defaultproperties
719 [[ -z $dataset ]] && return 1
723 for confset in $(zfs list); do
724 if [[ $dataset = $confset ]]; then
729 [[ $found -eq 0 ]] && return 2
730 if [[ -n $COMPRESSION_PROP ]]; then
731 dataset_setprop $dataset compression $COMPRESSION_PROP || \
733 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
735 if [[ -n $CHECKSUM_PROP ]]; then
736 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
738 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
744 # Check a numeric assertion
745 # @parameter: $@ the assertion to check
746 # @output: big loud notice if assertion failed
751 (($@)) || log_fail "$@"
755 # Function to format partition size of a disk
756 # Given a disk cxtxdx reduces all partitions
759 function zero_partitions #<whole_disk_name>
765 DSK=$DEV_DSKDIR/$diskname
766 DSK=$(echo $DSK | sed -e "s|//|/|g")
767 log_must parted $DSK -s -- mklabel gpt
768 blockdev --rereadpt $DSK 2>/dev/null
771 for i in 0 1 3 4 5 6 7
773 log_must set_partition $i "" 0mb $diskname
781 # Given a slice, size and disk, this function
782 # formats the slice to the specified size.
783 # Size should be specified with units as per
784 # the `format` command requirements eg. 100mb 3gb
786 # NOTE: This entire interface is problematic for the Linux parted utilty
787 # which requires the end of the partition to be specified. It would be
788 # best to retire this interface and replace it with something more flexible.
789 # At the moment a best effort is made.
791 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
793 typeset -i slicenum=$1
799 if [[ -z $size || -z $disk ]]; then
800 log_fail "The size or disk name is unspecified."
802 typeset size_mb=${size%%[mMgG]}
804 size_mb=${size_mb%%[mMgG][bB]}
805 if [[ ${size:1:1} == 'g' ]]; then
806 ((size_mb = size_mb * 1024))
809 # Create GPT partition table when setting slice 0 or
810 # when the device doesn't already contain a GPT label.
811 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
813 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
814 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
815 if [[ $? -ne 0 ]]; then
816 log_note "Failed to create GPT partition table on $disk"
821 # When no start is given align on the first cylinder.
822 if [[ -z "$start" ]]; then
826 # Determine the cylinder size for the device and using
827 # that calculate the end offset in cylinders.
828 typeset -i cly_size_kb=0
829 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
830 unit cyl print | head -3 | tail -1 | \
831 awk -F '[:k.]' '{print $4}')
832 ((end = (size_mb * 1024 / cly_size_kb) + start))
834 parted $DEV_DSKDIR/$disk -s -- \
835 mkpart part$slicenum ${start}cyl ${end}cyl
836 if [[ $? -ne 0 ]]; then
837 log_note "Failed to create partition $slicenum on $disk"
841 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
844 if [[ -z $slicenum || -z $size || -z $disk ]]; then
845 log_fail "The slice, size or disk name is unspecified."
848 typeset format_file=/var/tmp/format_in.$$
850 echo "partition" >$format_file
851 echo "$slicenum" >> $format_file
852 echo "" >> $format_file
853 echo "" >> $format_file
854 echo "$start" >> $format_file
855 echo "$size" >> $format_file
856 echo "label" >> $format_file
857 echo "" >> $format_file
858 echo "q" >> $format_file
859 echo "q" >> $format_file
861 format -e -s -d $disk -f $format_file
866 if [[ $ret_val -ne 0 ]]; then
867 log_note "Unable to format $disk slice $slicenum to $size"
874 # Delete all partitions on all disks - this is specifically for the use of multipath
875 # devices which currently can only be used in the test suite as raw/un-partitioned
876 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
878 function delete_partitions
882 if [[ -z $DISK_ARRAY_NUM ]]; then
883 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
885 if [[ -z $DISKSARRAY ]]; then
890 if (( $DISK_ARRAY_NUM == 1 )); then
891 while ((j < MAX_PARTITIONS)); do
892 parted $DEV_DSKDIR/$DISK -s rm $j \
894 if (( $? == 1 )); then
895 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
896 if (( $? == 1 )); then
897 log_note "Partitions for $DISK should be deleted"
899 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
903 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
904 if (( $? == 0 )); then
905 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
911 for disk in `echo $DISKSARRAY`; do
912 while ((j < MAX_PARTITIONS)); do
913 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
914 if (( $? == 1 )); then
915 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
916 if (( $? == 1 )); then
917 log_note "Partitions for $disk should be deleted"
919 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
923 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
924 if (( $? == 0 )); then
925 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
938 # Get the end cyl of the given slice
940 function get_endslice #<disk> <slice>
944 if [[ -z $disk || -z $slice ]] ; then
945 log_fail "The disk name or slice number is unspecified."
949 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
950 grep "part${slice}" | \
953 ((endcyl = (endcyl + 1)))
955 disk=${disk#/dev/dsk/}
956 disk=${disk#/dev/rdsk/}
960 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
961 grep "sectors\/cylinder" | \
964 if ((ratio == 0)); then
968 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
969 nawk -v token="$slice" '{if ($1==token) print $6}')
971 ((endcyl = (endcyl + 1) / ratio))
979 # Given a size,disk and total slice number, this function formats the
980 # disk slices from 0 to the total slice number with the same specified
983 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
986 typeset slice_size=$1
988 typeset total_slices=$3
991 zero_partitions $disk_name
992 while ((i < $total_slices)); do
999 log_must set_partition $i "$cyl" $slice_size $disk_name
1000 cyl=$(get_endslice $disk_name $i)
1006 # This function continues to write to a filenum number of files into dirnum
1007 # number of directories until either file_write returns an error or the
1008 # maximum number of files per directory have been written.
1011 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1013 # Return value: 0 on success
1017 # destdir: is the directory where everything is to be created under
1018 # dirnum: the maximum number of subdirectories to use, -1 no limit
1019 # filenum: the maximum number of files per subdirectory
1020 # bytes: number of bytes to write
1021 # num_writes: numer of types to write out bytes
1022 # data: the data that will be written
1025 # file_fs /testdir 20 25 1024 256 0
1027 # Note: bytes * num_writes equals the size of the testfile
1029 function fill_fs # destdir dirnum filenum bytes num_writes data
1031 typeset destdir=${1:-$TESTDIR}
1032 typeset -i dirnum=${2:-50}
1033 typeset -i filenum=${3:-50}
1034 typeset -i bytes=${4:-8192}
1035 typeset -i num_writes=${5:-10240}
1036 typeset -i data=${6:-0}
1038 typeset -i odirnum=1
1039 typeset -i idirnum=0
1043 log_must mkdir -p $destdir/$idirnum
1044 while (($odirnum > 0)); do
1045 if ((dirnum >= 0 && idirnum >= dirnum)); then
1049 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1050 -b $bytes -c $num_writes -d $data
1052 if (($retval != 0)); then
1056 if (($fn >= $filenum)); then
1058 ((idirnum = idirnum + 1))
1059 log_must mkdir -p $destdir/$idirnum
1068 # Simple function to get the specified property. If unable to
1069 # get the property then exits.
1071 # Note property is in 'parsable' format (-p)
1073 function get_prop # property dataset
1079 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1080 if [[ $? -ne 0 ]]; then
1081 log_note "Unable to get $prop property for dataset " \
1091 # Simple function to get the specified property of pool. If unable to
1092 # get the property then exits.
1094 # Note property is in 'parsable' format (-p)
1096 function get_pool_prop # property pool
1102 if poolexists $pool ; then
1103 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1105 if [[ $? -ne 0 ]]; then
1106 log_note "Unable to get $prop property for pool " \
1111 log_note "Pool $pool not exists."
1119 # Return 0 if a pool exists; $? otherwise
1127 if [[ -z $pool ]]; then
1128 log_note "No pool name given."
1132 zpool get name "$pool" > /dev/null 2>&1
1136 # Return 0 if all the specified datasets exist; $? otherwise
1139 function datasetexists
1141 if (($# == 0)); then
1142 log_note "No dataset name given."
1146 while (($# > 0)); do
1147 zfs get name $1 > /dev/null 2>&1 || \
1155 # return 0 if none of the specified datasets exists, otherwise return 1.
1158 function datasetnonexists
1160 if (($# == 0)); then
1161 log_note "No dataset name given."
1165 while (($# > 0)); do
1166 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1175 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1177 # Returns 0 if shared, 1 otherwise.
1184 if [[ $fs != "/"* ]] ; then
1185 if datasetnonexists "$fs" ; then
1188 mtpt=$(get_prop mountpoint "$fs")
1190 none|legacy|-) return 1
1199 for mtpt in `share | awk '{print $1}'` ; do
1200 if [[ $mtpt == $fs ]] ; then
1207 for mtpt in `share | awk '{print $2}'` ; do
1208 if [[ $mtpt == $fs ]] ; then
1213 typeset stat=$(svcs -H -o STA nfs/server:default)
1214 if [[ $stat != "ON" ]]; then
1215 log_note "Current nfs/server status: $stat"
1222 # Given a dataset name determine if it is shared via SMB.
1224 # Returns 0 if shared, 1 otherwise.
1226 function is_shared_smb
1231 if datasetnonexists "$fs" ; then
1234 fs=$(echo $fs | sed 's@/@_@g')
1238 for mtpt in `net usershare list | awk '{print $1}'` ; do
1239 if [[ $mtpt == $fs ]] ; then
1245 log_unsupported "Currently unsupported by the test framework"
1251 # Given a mountpoint, determine if it is not shared via NFS.
1253 # Returns 0 if not shared, 1 otherwise.
1260 if (($? == 0)); then
1268 # Given a dataset determine if it is not shared via SMB.
1270 # Returns 0 if not shared, 1 otherwise.
1272 function not_shared_smb
1277 if (($? == 0)); then
1285 # Helper function to unshare a mountpoint.
1287 function unshare_fs #fs
1291 is_shared $fs || is_shared_smb $fs
1292 if (($? == 0)); then
1293 log_must zfs unshare $fs
1300 # Helper function to share a NFS mountpoint.
1302 function share_nfs #fs
1308 if (($? != 0)); then
1309 log_must share "*:$fs"
1313 if (($? != 0)); then
1314 log_must share -F nfs $fs
1322 # Helper function to unshare a NFS mountpoint.
1324 function unshare_nfs #fs
1330 if (($? == 0)); then
1331 log_must unshare -u "*:$fs"
1335 if (($? == 0)); then
1336 log_must unshare -F nfs $fs
1344 # Helper function to show NFS shares.
1346 function showshares_nfs
1358 # Helper function to show SMB shares.
1360 function showshares_smb
1372 # Check NFS server status and trigger it online.
1374 function setup_nfs_server
1376 # Cannot share directory in non-global zone.
1378 if ! is_global_zone; then
1379 log_note "Cannot trigger NFS server by sharing in LZ."
1384 log_note "NFS server must started prior to running test framework."
1388 typeset nfs_fmri="svc:/network/nfs/server:default"
1389 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1391 # Only really sharing operation can enable NFS server
1392 # to online permanently.
1394 typeset dummy=/tmp/dummy
1396 if [[ -d $dummy ]]; then
1397 log_must rm -rf $dummy
1400 log_must mkdir $dummy
1401 log_must share $dummy
1404 # Waiting for fmri's status to be the final status.
1405 # Otherwise, in transition, an asterisk (*) is appended for
1406 # instances, unshare will reverse status to 'DIS' again.
1408 # Waiting for 1's at least.
1412 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1419 log_must unshare $dummy
1420 log_must rm -rf $dummy
1423 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1427 # To verify whether calling process is in global zone
1429 # Return 0 if in global zone, 1 in non-global zone
1431 function is_global_zone
1436 typeset cur_zone=$(zonename 2>/dev/null)
1437 if [[ $cur_zone != "global" ]]; then
1445 # Verify whether test is permitted to run from
1446 # global zone, local zone, or both
1448 # $1 zone limit, could be "global", "local", or "both"(no limit)
1450 # Return 0 if permitted, otherwise exit with log_unsupported
1452 function verify_runnable # zone limit
1456 [[ -z $limit ]] && return 0
1458 if is_global_zone ; then
1462 local) log_unsupported "Test is unable to run from "\
1465 *) log_note "Warning: unknown limit $limit - " \
1473 global) log_unsupported "Test is unable to run from "\
1476 *) log_note "Warning: unknown limit $limit - " \
1487 # Return 0 if create successfully or the pool exists; $? otherwise
1488 # Note: In local zones, this function should return 0 silently.
1491 # $2-n - [keyword] devs_list
1493 function create_pool #pool devs_list
1495 typeset pool=${1%%/*}
1499 if [[ -z $pool ]]; then
1500 log_note "Missing pool name."
1504 if poolexists $pool ; then
1508 if is_global_zone ; then
1509 [[ -d /$pool ]] && rm -rf /$pool
1510 log_must zpool create -f $pool $@
1516 # Return 0 if destroy successfully or the pool exists; $? otherwise
1517 # Note: In local zones, this function should return 0 silently.
1520 # Destroy pool with the given parameters.
1522 function destroy_pool #pool
1524 typeset pool=${1%%/*}
1527 if [[ -z $pool ]]; then
1528 log_note "No pool name given."
1532 if is_global_zone ; then
1533 if poolexists "$pool" ; then
1534 mtpt=$(get_prop mountpoint "$pool")
1536 # At times, syseventd/udev activity can cause attempts
1537 # to destroy a pool to fail with EBUSY. We retry a few
1538 # times allowing failures before requiring the destroy
1540 log_must_busy zpool destroy -f $pool
1543 log_must rm -rf $mtpt
1545 log_note "Pool does not exist. ($pool)"
1553 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1554 # Note: In local zones, this function should return 0 silently.
1557 # $2 - custom arguments for zfs destroy
1558 # Destroy dataset with the given parameters.
1560 function destroy_dataset #dataset #args
1564 typeset args=${2:-""}
1566 if [[ -z $dataset ]]; then
1567 log_note "No dataset name given."
1571 if is_global_zone ; then
1572 if datasetexists "$dataset" ; then
1573 mtpt=$(get_prop mountpoint "$dataset")
1574 log_must_busy zfs destroy $args $dataset
1577 log_must rm -rf $mtpt
1579 log_note "Dataset does not exist. ($dataset)"
1588 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1589 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1590 # and a zvol device to the zone.
1593 # $2 zone root directory prefix
1596 function zfs_zones_setup #zone_name zone_root zone_ip
1598 typeset zone_name=${1:-$(hostname)-z}
1599 typeset zone_root=${2:-"/zone_root"}
1600 typeset zone_ip=${3:-"10.1.1.10"}
1601 typeset prefix_ctr=$ZONE_CTR
1602 typeset pool_name=$ZONE_POOL
1606 # Create pool and 5 container within it
1608 [[ -d /$pool_name ]] && rm -rf /$pool_name
1609 log_must zpool create -f $pool_name $DISKS
1610 while ((i < cntctr)); do
1611 log_must zfs create $pool_name/$prefix_ctr$i
1616 log_must zfs create -V 1g $pool_name/zone_zvol
1620 # If current system support slog, add slog device for pool
1622 if verify_slog_support ; then
1623 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1624 log_must mkfile $MINVDEVSIZE $sdevs
1625 log_must zpool add $pool_name log mirror $sdevs
1628 # this isn't supported just yet.
1629 # Create a filesystem. In order to add this to
1630 # the zone, it must have it's mountpoint set to 'legacy'
1631 # log_must zfs create $pool_name/zfs_filesystem
1632 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1634 [[ -d $zone_root ]] && \
1635 log_must rm -rf $zone_root/$zone_name
1636 [[ ! -d $zone_root ]] && \
1637 log_must mkdir -p -m 0700 $zone_root/$zone_name
1639 # Create zone configure file and configure the zone
1641 typeset zone_conf=/tmp/zone_conf.$$
1642 echo "create" > $zone_conf
1643 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1644 echo "set autoboot=true" >> $zone_conf
1646 while ((i < cntctr)); do
1647 echo "add dataset" >> $zone_conf
1648 echo "set name=$pool_name/$prefix_ctr$i" >> \
1650 echo "end" >> $zone_conf
1654 # add our zvol to the zone
1655 echo "add device" >> $zone_conf
1656 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1657 echo "end" >> $zone_conf
1659 # add a corresponding zvol rdsk to the zone
1660 echo "add device" >> $zone_conf
1661 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1662 echo "end" >> $zone_conf
1664 # once it's supported, we'll add our filesystem to the zone
1665 # echo "add fs" >> $zone_conf
1666 # echo "set type=zfs" >> $zone_conf
1667 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1668 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1669 # echo "end" >> $zone_conf
1671 echo "verify" >> $zone_conf
1672 echo "commit" >> $zone_conf
1673 log_must zonecfg -z $zone_name -f $zone_conf
1674 log_must rm -f $zone_conf
1677 zoneadm -z $zone_name install
1678 if (($? == 0)); then
1679 log_note "SUCCESS: zoneadm -z $zone_name install"
1681 log_fail "FAIL: zoneadm -z $zone_name install"
1684 # Install sysidcfg file
1686 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1687 echo "system_locale=C" > $sysidcfg
1688 echo "terminal=dtterm" >> $sysidcfg
1689 echo "network_interface=primary {" >> $sysidcfg
1690 echo "hostname=$zone_name" >> $sysidcfg
1691 echo "}" >> $sysidcfg
1692 echo "name_service=NONE" >> $sysidcfg
1693 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1694 echo "security_policy=NONE" >> $sysidcfg
1695 echo "timezone=US/Eastern" >> $sysidcfg
1698 log_must zoneadm -z $zone_name boot
1702 # Reexport TESTPOOL & TESTPOOL(1-4)
1704 function reexport_pool
1709 while ((i < cntctr)); do
1711 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1712 if ! ismounted $TESTPOOL; then
1713 log_must zfs mount $TESTPOOL
1716 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1717 if eval ! ismounted \$TESTPOOL$i; then
1718 log_must eval zfs mount \$TESTPOOL$i
1726 # Verify a given disk or pool state
1728 # Return 0 is pool/disk matches expected state, 1 otherwise
1730 function check_state # pool disk state{online,offline,degraded}
1733 typeset disk=${2#$DEV_DSKDIR/}
1736 [[ -z $pool ]] || [[ -z $state ]] \
1737 && log_fail "Arguments invalid or missing"
1739 if [[ -z $disk ]]; then
1740 #check pool state only
1741 zpool get -H -o value health $pool \
1742 | grep -i "$state" > /dev/null 2>&1
1744 zpool status -v $pool | grep "$disk" \
1745 | grep -i "$state" > /dev/null 2>&1
1752 # Cause a scan of all scsi host adapters by default
1754 # $1 optional host number
1756 function scan_scsi_hosts
1758 typeset hostnum=${1}
1761 if [[ -z $hostnum ]]; then
1762 for host in /sys/class/scsi_host/host*; do
1763 log_must eval "echo '- - -' > $host/scan"
1767 "echo /sys/class/scsi_host/host$hostnum/scan" \
1770 "echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
1775 # Wait for newly created block devices to have their minors created.
1777 function block_device_wait
1786 # Online or offline a disk on the system
1788 # First checks state of disk. Test will fail if disk is not properly onlined
1789 # or offlined. Online is a full rescan of SCSI disks by echoing to every
1792 function on_off_disk # disk state{online,offline} host
1798 [[ -z $disk ]] || [[ -z $state ]] && \
1799 log_fail "Arguments invalid or missing"
1802 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
1803 dm_name="$(readlink $DEV_DSKDIR/$disk \
1804 | nawk -F / '{print $2}')"
1805 slave="$(ls /sys/block/${dm_name}/slaves \
1806 | nawk '{print $1}')"
1807 while [[ -n $slave ]]; do
1808 #check if disk is online
1809 lsscsi | egrep $slave > /dev/null
1810 if (($? == 0)); then
1811 slave_dir="/sys/block/${dm_name}"
1812 slave_dir+="/slaves/${slave}/device"
1813 ss="${slave_dir}/state"
1814 sd="${slave_dir}/delete"
1815 log_must eval "echo 'offline' > ${ss}"
1816 log_must eval "echo '1' > ${sd}"
1817 lsscsi | egrep $slave > /dev/null
1818 if (($? == 0)); then
1819 log_fail "Offlining" \
1823 slave="$(ls /sys/block/$dm_name/slaves \
1824 2>/dev/null | nawk '{print $1}')"
1826 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1827 #check if disk is online
1828 lsscsi | egrep $disk > /dev/null
1829 if (($? == 0)); then
1830 dev_state="/sys/block/$disk/device/state"
1831 dev_delete="/sys/block/$disk/device/delete"
1832 log_must eval "echo 'offline' > ${dev_state}"
1833 log_must eval "echo '1' > ${dev_delete}"
1834 lsscsi | egrep $disk > /dev/null
1835 if (($? == 0)); then
1836 log_fail "Offlining $disk" \
1840 log_note "$disk is already offline"
1842 elif [[ $state == "online" ]]; then
1843 #force a full rescan
1844 scan_scsi_hosts $host
1846 if is_mpath_device $disk; then
1847 dm_name="$(readlink $DEV_DSKDIR/$disk \
1848 | nawk -F / '{print $2}')"
1849 slave="$(ls /sys/block/$dm_name/slaves \
1850 | nawk '{print $1}')"
1851 lsscsi | egrep $slave > /dev/null
1852 if (($? != 0)); then
1853 log_fail "Onlining $disk failed"
1855 elif is_real_device $disk; then
1856 lsscsi | egrep $disk > /dev/null
1857 if (($? != 0)); then
1858 log_fail "Onlining $disk failed"
1861 log_fail "$disk is not a real dev"
1864 log_fail "$disk failed to $state"
1870 # Get the mountpoint of snapshot
1871 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1874 function snapshot_mountpoint
1876 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1878 if [[ $dataset != *@* ]]; then
1879 log_fail "Error name of snapshot '$dataset'."
1882 typeset fs=${dataset%@*}
1883 typeset snap=${dataset#*@}
1885 if [[ -z $fs || -z $snap ]]; then
1886 log_fail "Error name of snapshot '$dataset'."
1889 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1893 # Given a device and 'ashift' value verify it's correctly set on every label
1895 function verify_ashift # device ashift
1900 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1916 # Given a pool and file system, this function will verify the file system
1917 # using the zdb internal tool. Note that the pool is exported and imported
1918 # to ensure it has consistent state.
1920 function verify_filesys # pool filesystem dir
1923 typeset filesys="$2"
1924 typeset zdbout="/tmp/zdbout.$$"
1929 typeset search_path=""
1931 log_note "Calling zdb to verify filesystem '$filesys'"
1932 zfs unmount -a > /dev/null 2>&1
1933 log_must zpool export $pool
1935 if [[ -n $dirs ]] ; then
1936 for dir in $dirs ; do
1937 search_path="$search_path -d $dir"
1941 log_must zpool import $search_path $pool
1943 zdb -cudi $filesys > $zdbout 2>&1
1944 if [[ $? != 0 ]]; then
1945 log_note "Output: zdb -cudi $filesys"
1947 log_fail "zdb detected errors with: '$filesys'"
1950 log_must zfs mount -a
1951 log_must rm -rf $zdbout
1955 # Given a pool, and this function list all disks in the pool
1957 function get_disklist # pool
1961 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1962 grep -v "\-\-\-\-\-" | \
1963 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1969 # Given a pool, and this function list all disks in the pool with their full
1970 # path (like "/dev/sda" instead of "sda").
1972 function get_disklist_fullpath # pool
1981 # This function kills a given list of processes after a time period. We use
1982 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1983 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1984 # would be listed as FAIL, which we don't want : we're happy with stress tests
1985 # running for a certain amount of time, then finishing.
1987 # @param $1 the time in seconds after which we should terminate these processes
1988 # @param $2..$n the processes we wish to terminate.
1990 function stress_timeout
1992 typeset -i TIMEOUT=$1
1996 log_note "Waiting for child processes($cpids). " \
1997 "It could last dozens of minutes, please be patient ..."
1998 log_must sleep $TIMEOUT
2000 log_note "Killing child processes after ${TIMEOUT} stress timeout."
2002 for pid in $cpids; do
2003 ps -p $pid > /dev/null 2>&1
2004 if (($? == 0)); then
2005 log_must kill -USR1 $pid
2011 # Verify a given hotspare disk is inuse or avail
2013 # Return 0 is pool/disk matches expected state, 1 otherwise
2015 function check_hotspare_state # pool disk state{inuse,avail}
2018 typeset disk=${2#$DEV_DSKDIR/}
2021 cur_state=$(get_device_state $pool $disk "spares")
2023 if [[ $state != ${cur_state} ]]; then
2030 # Wait until a hotspare transitions to a given state or times out.
2032 # Return 0 when pool/disk matches expected state, 1 on timeout.
2034 function wait_hotspare_state # pool disk state timeout
2037 typeset disk=${2#$/DEV_DSKDIR/}
2039 typeset timeout=${4:-60}
2042 while [[ $i -lt $timeout ]]; do
2043 if check_hotspare_state $pool $disk $state; then
2055 # Verify a given slog disk is inuse or avail
2057 # Return 0 is pool/disk matches expected state, 1 otherwise
2059 function check_slog_state # pool disk state{online,offline,unavail}
2062 typeset disk=${2#$DEV_DSKDIR/}
2065 cur_state=$(get_device_state $pool $disk "logs")
2067 if [[ $state != ${cur_state} ]]; then
2074 # Verify a given vdev disk is inuse or avail
2076 # Return 0 is pool/disk matches expected state, 1 otherwise
2078 function check_vdev_state # pool disk state{online,offline,unavail}
2081 typeset disk=${2#$/DEV_DSKDIR/}
2084 cur_state=$(get_device_state $pool $disk)
2086 if [[ $state != ${cur_state} ]]; then
2093 # Wait until a vdev transitions to a given state or times out.
2095 # Return 0 when pool/disk matches expected state, 1 on timeout.
2097 function wait_vdev_state # pool disk state timeout
2100 typeset disk=${2#$/DEV_DSKDIR/}
2102 typeset timeout=${4:-60}
2105 while [[ $i -lt $timeout ]]; do
2106 if check_vdev_state $pool $disk $state; then
2118 # Check the output of 'zpool status -v <pool>',
2119 # and to see if the content of <token> contain the <keyword> specified.
2121 # Return 0 is contain, 1 otherwise
2123 function check_pool_status # pool token keyword <verbose>
2128 typeset verbose=${4:-false}
2130 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2131 ($1==token) {print $0}')
2132 if [[ $verbose == true ]]; then
2135 echo $scan | grep -i "$keyword" > /dev/null 2>&1
2141 # These 6 following functions are instance of check_pool_status()
2142 # is_pool_resilvering - to check if the pool is resilver in progress
2143 # is_pool_resilvered - to check if the pool is resilver completed
2144 # is_pool_scrubbing - to check if the pool is scrub in progress
2145 # is_pool_scrubbed - to check if the pool is scrub completed
2146 # is_pool_scrub_stopped - to check if the pool is scrub stopped
2147 # is_pool_scrub_paused - to check if the pool has scrub paused
2149 function is_pool_resilvering #pool <verbose>
2151 check_pool_status "$1" "scan" "resilver in progress since " $2
2155 function is_pool_resilvered #pool <verbose>
2157 check_pool_status "$1" "scan" "resilvered " $2
2161 function is_pool_scrubbing #pool <verbose>
2163 check_pool_status "$1" "scan" "scrub in progress since " $2
2167 function is_pool_scrubbed #pool <verbose>
2169 check_pool_status "$1" "scan" "scrub repaired" $2
2173 function is_pool_scrub_stopped #pool <verbose>
2175 check_pool_status "$1" "scan" "scrub canceled" $2
2179 function is_pool_scrub_paused #pool <verbose>
2181 check_pool_status "$1" "scan" "scrub paused since " $2
2186 # Use create_pool()/destroy_pool() to clean up the information in
2187 # in the given disk to avoid slice overlapping.
2189 function cleanup_devices #vdevs
2191 typeset pool="foopool$$"
2193 if poolexists $pool ; then
2197 create_pool $pool $@
2204 # A function to find and locate free disks on a system or from given
2205 # disks as the parameter. It works by locating disks that are in use
2206 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2208 # $@ given disks to find which are free, default is all disks in
2211 # @return a string containing the list of available disks
2215 # Trust provided list, no attempt is made to locate unused devices.
2222 sfi=/tmp/swaplist.$$
2223 dmpi=/tmp/dumpdev.$$
2224 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2227 dumpadm > $dmpi 2>/dev/null
2229 # write an awk script that can process the output of format
2230 # to produce a list of disks we know about. Note that we have
2231 # to escape "$2" so that the shell doesn't interpret it while
2232 # we're creating the awk script.
2233 # -------------------
2234 cat > /tmp/find_disks.awk <<EOF
2243 if (searchdisks && \$2 !~ "^$"){
2249 /^AVAILABLE DISK SELECTIONS:/{
2253 #---------------------
2255 chmod 755 /tmp/find_disks.awk
2256 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2257 rm /tmp/find_disks.awk
2260 for disk in $disks; do
2262 grep "${disk}[sp]" /etc/mnttab >/dev/null
2263 (($? == 0)) && continue
2265 grep "${disk}[sp]" $sfi >/dev/null
2266 (($? == 0)) && continue
2267 # check for dump device
2268 grep "${disk}[sp]" $dmpi >/dev/null
2269 (($? == 0)) && continue
2270 # check to see if this disk hasn't been explicitly excluded
2271 # by a user-set environment variable
2272 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2273 (($? == 0)) && continue
2274 unused_candidates="$unused_candidates $disk"
2279 # now just check to see if those disks do actually exist
2280 # by looking for a device pointing to the first slice in
2281 # each case. limit the number to max_finddisksnum
2283 for disk in $unused_candidates; do
2284 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2285 if [ $count -lt $max_finddisksnum ]; then
2286 unused="$unused $disk"
2287 # do not impose limit if $@ is provided
2288 [[ -z $@ ]] && ((count = count + 1))
2293 # finally, return our disk list
2298 # Add specified user to specified group
2302 # $3 base of the homedir (optional)
2304 function add_user #<group_name> <user_name> <basedir>
2308 typeset basedir=${3:-"/var/tmp"}
2310 if ((${#gname} == 0 || ${#uname} == 0)); then
2311 log_fail "group name or user name are not defined."
2314 log_must useradd -g $gname -d $basedir/$uname -m $uname
2315 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2316 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2317 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
2319 # Add new users to the same group and the command line utils.
2320 # This allows them to be run out of the original users home
2321 # directory as long as it permissioned to be group readable.
2323 cmd_group=$(stat --format="%G" $(which zfs))
2324 log_must usermod -a -G $cmd_group $uname
2331 # Delete the specified user.
2334 # $2 base of the homedir (optional)
2336 function del_user #<logname> <basedir>
2339 typeset basedir=${2:-"/var/tmp"}
2341 if ((${#user} == 0)); then
2342 log_fail "login name is necessary."
2345 if id $user > /dev/null 2>&1; then
2346 log_must_retry "currently used" 5 userdel $user
2349 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2355 # Select valid gid and create specified group.
2359 function add_group #<group_name>
2363 if ((${#group} == 0)); then
2364 log_fail "group name is necessary."
2367 # Assign 100 as the base gid, a larger value is selected for
2368 # Linux because for many distributions 1000 and under are reserved.
2371 groupadd $group > /dev/null 2>&1
2381 groupadd -g $gid $group > /dev/null 2>&1
2385 # The gid is not unique
2394 # Delete the specified group.
2398 function del_group #<group_name>
2401 if ((${#grp} == 0)); then
2402 log_fail "group name is necessary."
2406 getent group $grp > /dev/null 2>&1
2409 # Group does not exist.
2411 # Name already exists as a group name
2412 0) log_must groupdel $grp ;;
2416 groupmod -n $grp $grp > /dev/null 2>&1
2419 # Group does not exist.
2421 # Name already exists as a group name
2422 9) log_must groupdel $grp ;;
2431 # This function will return true if it's safe to destroy the pool passed
2432 # as argument 1. It checks for pools based on zvols and files, and also
2433 # files contained in a pool that may have a different mountpoint.
2435 function safe_to_destroy_pool { # $1 the pool name
2438 typeset DONT_DESTROY=""
2440 # We check that by deleting the $1 pool, we're not
2441 # going to pull the rug out from other pools. Do this
2442 # by looking at all other pools, ensuring that they
2443 # aren't built from files or zvols contained in this pool.
2445 for pool in $(zpool list -H -o name)
2449 # this is a list of the top-level directories in each of the
2450 # files that make up the path to the files the pool is based on
2451 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2454 # this is a list of the zvols that make up the pool
2455 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2458 # also want to determine if it's a file-based pool using an
2459 # alternate mountpoint...
2460 POOL_FILE_DIRS=$(zpool status -v $pool | \
2461 grep / | awk '{print $1}' | \
2462 awk -F/ '{print $2}' | grep -v "dev")
2464 for pooldir in $POOL_FILE_DIRS
2466 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2467 grep "${pooldir}$" | awk '{print $1}')
2469 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2473 if [ ! -z "$ZVOLPOOL" ]
2476 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2479 if [ ! -z "$FILEPOOL" ]
2482 log_note "Pool $pool is built from $FILEPOOL on $1"
2485 if [ ! -z "$ALTMOUNTPOOL" ]
2488 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2492 if [ -z "${DONT_DESTROY}" ]
2496 log_note "Warning: it is not safe to destroy $1!"
2502 # Get the available ZFS compression options
2503 # $1 option type zfs_set|zfs_compress
2505 function get_compress_opts
2507 typeset COMPRESS_OPTS
2508 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2509 gzip-6 gzip-7 gzip-8 gzip-9"
2511 if [[ $1 == "zfs_compress" ]] ; then
2512 COMPRESS_OPTS="on lzjb"
2513 elif [[ $1 == "zfs_set" ]] ; then
2514 COMPRESS_OPTS="on off lzjb"
2516 typeset valid_opts="$COMPRESS_OPTS"
2517 zfs get 2>&1 | grep gzip >/dev/null 2>&1
2518 if [[ $? -eq 0 ]]; then
2519 valid_opts="$valid_opts $GZIP_OPTS"
2525 # Verify zfs operation with -p option work as expected
2526 # $1 operation, value could be create, clone or rename
2527 # $2 dataset type, value could be fs or vol
2529 # $4 new dataset name
2531 function verify_opt_p_ops
2536 typeset newdataset=$4
2538 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2539 log_fail "$datatype is not supported."
2542 # check parameters accordingly
2547 if [[ $datatype == "vol" ]]; then
2548 ops="create -V $VOLSIZE"
2552 if [[ -z $newdataset ]]; then
2553 log_fail "newdataset should not be empty" \
2556 log_must datasetexists $dataset
2557 log_must snapexists $dataset
2560 if [[ -z $newdataset ]]; then
2561 log_fail "newdataset should not be empty" \
2564 log_must datasetexists $dataset
2565 log_mustnot snapexists $dataset
2568 log_fail "$ops is not supported."
2572 # make sure the upper level filesystem does not exist
2573 destroy_dataset "${newdataset%/*}" "-rRf"
2575 # without -p option, operation will fail
2576 log_mustnot zfs $ops $dataset $newdataset
2577 log_mustnot datasetexists $newdataset ${newdataset%/*}
2579 # with -p option, operation should succeed
2580 log_must zfs $ops -p $dataset $newdataset
2583 if ! datasetexists $newdataset ; then
2584 log_fail "-p option does not work for $ops"
2587 # when $ops is create or clone, redo the operation still return zero
2588 if [[ $ops != "rename" ]]; then
2589 log_must zfs $ops -p $dataset $newdataset
2596 # Get configuration of pool
2606 if ! poolexists "$pool" ; then
2609 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2610 if [[ $alt_root == "-" ]]; then
2611 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2614 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2617 if [[ -n $value ]] ; then
2627 # Privated function. Random select one of items from arguments.
2632 function _random_get
2639 ((ind = RANDOM % cnt + 1))
2641 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2646 # Random select one of item from arguments which include NONE string
2648 function random_get_with_non
2653 _random_get "$cnt" "$@"
2657 # Random select one of item from arguments which doesn't include NONE string
2661 _random_get "$#" "$@"
2665 # Detect if the current system support slog
2667 function verify_slog_support
2669 typeset dir=/tmp/disk.$$
2675 mkfile $MINVDEVSIZE $vdev $sdev
2678 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2687 # The function will generate a dataset name with specific length
2688 # $1, the length of the name
2689 # $2, the base string to construct the name
2691 function gen_dataset_name
2694 typeset basestr="$2"
2695 typeset -i baselen=${#basestr}
2699 if ((len % baselen == 0)); then
2700 ((iter = len / baselen))
2702 ((iter = len / baselen + 1))
2704 while ((iter > 0)); do
2705 l_name="${l_name}$basestr"
2714 # Get cksum tuple of dataset
2717 # sample zdb output:
2718 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2719 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2720 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2721 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2722 function datasetcksum
2726 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2727 | awk -F= '{print $7}')
2738 cksum=$(cksum $1 | awk '{print $1}')
2743 # Get the given disk/slice state from the specific field of the pool
2745 function get_device_state #pool disk field("", "spares","logs")
2748 typeset disk=${2#$DEV_DSKDIR/}
2749 typeset field=${3:-$pool}
2751 state=$(zpool status -v "$pool" 2>/dev/null | \
2752 nawk -v device=$disk -v pool=$pool -v field=$field \
2753 'BEGIN {startconfig=0; startfield=0; }
2754 /config:/ {startconfig=1}
2755 (startconfig==1) && ($1==field) {startfield=1; next;}
2756 (startfield==1) && ($1==device) {print $2; exit;}
2758 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2764 # print the given directory filesystem type
2772 if [[ -z $dir ]]; then
2773 log_fail "Usage: get_fstype <directory>"
2780 df -n $dir | awk '{print $3}'
2784 # Given a disk, label it to VTOC regardless what label was on the disk
2790 if [[ -z $disk ]]; then
2791 log_fail "The disk name is unspecified."
2793 typeset label_file=/var/tmp/labelvtoc.$$
2794 typeset arch=$(uname -p)
2797 log_note "Currently unsupported by the test framework"
2801 if [[ $arch == "i386" ]]; then
2802 echo "label" > $label_file
2803 echo "0" >> $label_file
2804 echo "" >> $label_file
2805 echo "q" >> $label_file
2806 echo "q" >> $label_file
2808 fdisk -B $disk >/dev/null 2>&1
2809 # wait a while for fdisk finishes
2811 elif [[ $arch == "sparc" ]]; then
2812 echo "label" > $label_file
2813 echo "0" >> $label_file
2814 echo "" >> $label_file
2815 echo "" >> $label_file
2816 echo "" >> $label_file
2817 echo "q" >> $label_file
2819 log_fail "unknown arch type"
2822 format -e -s -d $disk -f $label_file
2823 typeset -i ret_val=$?
2826 # wait the format to finish
2829 if ((ret_val != 0)); then
2830 log_fail "unable to label $disk as VTOC."
2837 # check if the system was installed as zfsroot or not
2838 # return: 0 ture, otherwise false
2842 df -n / | grep zfs > /dev/null 2>&1
2847 # get the root filesystem name if it's zfsroot system.
2849 # return: root filesystem name
2855 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2858 if [[ -z "$rootfs" ]]; then
2859 log_fail "Can not get rootfs"
2861 zfs list $rootfs > /dev/null 2>&1
2862 if (($? == 0)); then
2865 log_fail "This is not a zfsroot system."
2870 # get the rootfs's pool name
2874 function get_rootpool
2880 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2883 if [[ -z "$rootfs" ]]; then
2884 log_fail "Can not get rootpool"
2886 zfs list $rootfs > /dev/null 2>&1
2887 if (($? == 0)); then
2888 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2891 log_fail "This is not a zfsroot system."
2896 # Check if the given device is physical device
2898 function is_physical_device #device
2900 typeset device=${1#$DEV_DSKDIR}
2901 device=${device#$DEV_RDSKDIR}
2904 [[ -b "$DEV_DSKDIR/$device" ]] && \
2905 [[ -f /sys/module/loop/parameters/max_part ]]
2908 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2914 # Check if the given device is a real device (ie SCSI device)
2916 function is_real_device #disk
2919 [[ -z $disk ]] && log_fail "No argument for disk given."
2922 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2923 egrep disk >/dev/null
2929 # Check if the given device is a loop device
2931 function is_loop_device #disk
2934 [[ -z $disk ]] && log_fail "No argument for disk given."
2937 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2938 egrep loop >/dev/null
2944 # Check if the given device is a multipath device and if there is a sybolic
2945 # link to a device mapper and to a disk
2946 # Currently no support for dm devices alone without multipath
2948 function is_mpath_device #disk
2951 [[ -z $disk ]] && log_fail "No argument for disk given."
2954 lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
2955 egrep mpath >/dev/null
2956 if (($? == 0)); then
2957 readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
2965 # Set the slice prefix for disk partitioning depending
2966 # on whether the device is a real, multipath, or loop device.
2967 # Currently all disks have to be of the same type, so only
2968 # checks first disk to determine slice prefix.
2970 function set_slice_prefix
2976 while (( i < $DISK_ARRAY_NUM )); do
2977 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2978 if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
2979 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
2980 export SLICE_PREFIX=""
2982 elif ( is_mpath_device $disk || is_loop_device \
2984 export SLICE_PREFIX="p"
2987 log_fail "$disk not supported for partitioning."
2995 # Set the directory path of the listed devices in $DISK_ARRAY_NUM
2996 # Currently all disks have to be of the same type, so only
2997 # checks first disk to determine device directory
2998 # default = /dev (linux)
2999 # real disk = /dev (linux)
3000 # multipath device = /dev/mapper (linux)
3002 function set_device_dir
3008 while (( i < $DISK_ARRAY_NUM )); do
3009 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
3010 if is_mpath_device $disk; then
3011 export DEV_DSKDIR=$DEV_MPATHDIR
3014 export DEV_DSKDIR=$DEV_RDSKDIR
3020 export DEV_DSKDIR=$DEV_RDSKDIR
3025 # Get the directory path of given device
3027 function get_device_dir #device
3031 if ! $(is_physical_device $device) ; then
3032 if [[ $device != "/" ]]; then
3035 if [[ -b "$DEV_DSKDIR/$device" ]]; then
3036 device="$DEV_DSKDIR"
3045 # Get persistent name for given disk
3047 function get_persistent_disk_name #device
3053 if is_real_device $device; then
3054 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
3055 | egrep disk/by-id | nawk '{print $2; exit}' \
3056 | nawk -F / '{print $3}')"
3058 elif is_mpath_device $device; then
3059 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
3060 | egrep disk/by-id/dm-uuid \
3061 | nawk '{print $2; exit}' \
3062 | nawk -F / '{print $3}')"
3073 # Load scsi_debug module with specified parameters
3075 function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
3082 [[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
3083 [[ -z $luns ]] && log_fail "Arguments invalid or missing"
3086 modprobe -n scsi_debug
3087 if (($? != 0)); then
3088 log_unsupported "Platform does not have scsi_debug"
3091 lsmod | egrep scsi_debug > /dev/null
3092 if (($? == 0)); then
3093 log_fail "scsi_debug module already installed"
3095 log_must modprobe scsi_debug dev_size_mb=$devsize \
3096 add_host=$hosts num_tgts=$tgts max_luns=$luns
3098 lsscsi | egrep scsi_debug > /dev/null
3099 if (($? == 1)); then
3100 log_fail "scsi_debug module install failed"
3107 # Get the package name
3109 function get_package_name
3111 typeset dirpath=${1:-$STC_NAME}
3113 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3117 # Get the word numbers from a string separated by white space
3119 function get_word_count
3125 # To verify if the require numbers of disks is given
3127 function verify_disk_count
3129 typeset -i min=${2:-1}
3131 typeset -i count=$(get_word_count "$1")
3133 if ((count < min)); then
3134 log_untested "A minimum of $min disks is required to run." \
3135 " You specified $count disk(s)"
3139 function ds_is_volume
3141 typeset type=$(get_prop type $1)
3142 [[ $type = "volume" ]] && return 0
3146 function ds_is_filesystem
3148 typeset type=$(get_prop type $1)
3149 [[ $type = "filesystem" ]] && return 0
3153 function ds_is_snapshot
3155 typeset type=$(get_prop type $1)
3156 [[ $type = "snapshot" ]] && return 0
3161 # Check if Trusted Extensions are installed and enabled
3163 function is_te_enabled
3165 svcs -H -o state labeld 2>/dev/null | grep "enabled"
3166 if (($? != 0)); then
3173 # Utility function to determine if a system has multiple cpus.
3179 (($(psrinfo | wc -l) > 1))
3185 function get_cpu_freq
3188 lscpu | awk '/CPU MHz/ { print $3 }'
3190 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3194 # Run the given command as the user provided.
3200 log_note "user:$user $@"
3201 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
3206 # Check if the pool contains the specified vdevs
3211 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3212 # vdevs is not in the pool, and 2 if pool name is missing.
3214 function vdevs_in_pool
3219 if [[ -z $pool ]]; then
3220 log_note "Missing pool name."
3226 typeset tmpfile=$(mktemp)
3227 zpool list -Hv "$pool" >$tmpfile
3229 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3230 [[ $? -ne 0 ]] && return 1
3244 max=$(echo $((max > i ? max : i)))
3256 min=$(echo $((min < i ? min : i)))
3263 # Generate a random number between 1 and the argument.
3268 echo $(( ($RANDOM % $max) + 1 ))
3271 # Write data that can be compressed into a directory
3272 function write_compressible
3276 typeset nfiles=${3:-1}
3277 typeset bs=${4:-1024k}
3278 typeset fname=${5:-file}
3280 [[ -d $dir ]] || log_fail "No directory: $dir"
3282 # Under Linux fio is not currently used since its behavior can
3283 # differ significantly across versions. This includes missing
3284 # command line options and cases where the --buffer_compress_*
3285 # options fail to behave as expected.
3287 typeset file_bytes=$(to_bytes $megs)
3288 typeset bs_bytes=4096
3289 typeset blocks=$(($file_bytes / $bs_bytes))
3291 for (( i = 0; i < $nfiles; i++ )); do
3292 truncate -s $file_bytes $dir/$fname.$i
3294 # Write every third block to get 66% compression.
3295 for (( j = 0; j < $blocks; j += 3 )); do
3296 dd if=/dev/urandom of=$dir/$fname.$i \
3297 seek=$j bs=$bs_bytes count=1 \
3298 conv=notrunc >/dev/null 2>&1
3302 log_must eval "fio \
3307 --buffer_compress_percentage=66 \
3308 --buffer_compress_chunk=4096 \
3315 --filename_format='$fname.\$jobnum' >/dev/null"
3324 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3325 objnum=$(stat -c %i $pathname)
3330 # Sync data to the pool
3333 # $2 boolean to force uberblock (and config including zpool cache file) update
3335 function sync_pool #pool <force>
3337 typeset pool=${1:-$TESTPOOL}
3338 typeset force=${2:-false}
3340 if [[ $force == true ]]; then
3341 log_must zpool sync -f $pool
3343 log_must zpool sync $pool
3350 # Wait for zpool 'freeing' property drops to zero.
3354 function wait_freeing #pool
3356 typeset pool=${1:-$TESTPOOL}
3358 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3364 # Wait for every device replace operation to complete
3368 function wait_replacing #pool
3370 typeset pool=${1:-$TESTPOOL}
3372 [[ "" == "$(zpool status $pool |
3373 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3379 # Wait for a pool to be scrubbed
3382 # $2 number of seconds to wait (optional)
3384 # Returns true when pool has been scrubbed, or false if there's a timeout or if
3385 # no scrub was done.
3387 function wait_scrubbed
3389 typeset pool=${1:-$TESTPOOL}
3390 typeset iter=${2:-10}
3391 for i in {1..$iter} ; do
3392 if is_pool_scrubbed $pool ; then
3400 # Backup the zed.rc in our test directory so that we can edit it for our test.
3402 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3403 function zed_rc_backup
3405 zedrc_backup="$(mktemp)"
3406 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3410 function zed_rc_restore
3412 mv $1 $ZEDLET_DIR/zed.rc
3416 # Setup custom environment for the ZED.
3418 # $@ Optional list of zedlets to run under zed.
3425 if [[ ! -d $ZEDLET_DIR ]]; then
3426 log_must mkdir $ZEDLET_DIR
3429 if [[ ! -e $VDEVID_CONF ]]; then
3430 log_must touch $VDEVID_CONF
3433 if [[ -e $VDEVID_CONF_ETC ]]; then
3434 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3438 # Create a symlink for /etc/zfs/vdev_id.conf file.
3439 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3441 # Setup minimal ZED configuration. Individual test cases should
3442 # add additional ZEDLETs as needed for their specific test.
3443 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3444 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3446 # Scripts must only be user writable.
3447 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3448 saved_umask=$(umask)
3450 for i in $EXTRA_ZEDLETS ; do
3451 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3453 log_must umask $saved_umask
3456 # Customize the zed.rc file to enable the full debug log.
3457 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3458 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3463 # Cleanup custom ZED environment.
3465 # $@ Optional list of zedlets to remove from our test zed.d directory.
3466 function zed_cleanup
3473 log_must rm -f ${ZEDLET_DIR}/zed.rc
3474 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3475 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3476 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3477 log_must rm -f ${ZEDLET_DIR}/state
3479 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3480 for i in $EXTRA_ZEDLETS ; do
3481 log_must rm -f ${ZEDLET_DIR}/$i
3484 log_must rm -f $ZED_LOG
3485 log_must rm -f $ZED_DEBUG_LOG
3486 log_must rm -f $VDEVID_CONF_ETC
3487 log_must rm -f $VDEVID_CONF
3492 # Check if ZED is currently running, if not start ZED.
3500 # ZEDLET_DIR=/var/tmp/zed
3501 if [[ ! -d $ZEDLET_DIR ]]; then
3502 log_must mkdir $ZEDLET_DIR
3505 # Verify the ZED is not already running.
3506 pgrep -x zed > /dev/null
3507 if (($? == 0)); then
3508 log_fail "ZED already running"
3511 log_note "Starting ZED"
3512 # run ZED in the background and redirect foreground logging
3513 # output to $ZED_LOG.
3514 log_must truncate -s 0 $ZED_DEBUG_LOG
3515 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
3516 "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3530 log_note "Stopping ZED"
3531 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3532 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3534 while ps -p $zedpid > /dev/null; do
3537 rm -f ${ZEDLET_DIR}/zed.pid
3545 function zed_events_drain
3547 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3549 zpool events -c >/dev/null
3553 # Set a variable in zed.rc to something, un-commenting it in the process.
3563 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3566 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3571 # Check is provided device is being active used as a swap device.
3573 function is_swap_inuse
3577 if [[ -z $device ]] ; then
3578 log_note "No device specified."
3583 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3585 swap -l | grep -w $device > /dev/null 2>&1
3592 # Setup a swap device using the provided device.
3599 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3600 log_must swapon $swapdev
3602 log_must swap -a $swapdev
3609 # Cleanup a swap device on the provided device.
3611 function swap_cleanup
3615 if is_swap_inuse $swapdev; then
3617 log_must swapoff $swapdev
3619 log_must swap -d $swapdev
3627 # Set a global system tunable (64-bit value)
3632 function set_tunable64
3634 set_tunable_impl "$1" "$2" Z
3638 # Set a global system tunable (32-bit value)
3643 function set_tunable32
3645 set_tunable_impl "$1" "$2" W
3648 function set_tunable_impl
3650 typeset tunable="$1"
3652 typeset mdb_cmd="$3"
3653 typeset module="${4:-zfs}"
3655 [[ -z "$tunable" ]] && return 1
3656 [[ -z "$value" ]] && return 1
3657 [[ -z "$mdb_cmd" ]] && return 1
3661 typeset zfs_tunables="/sys/module/$module/parameters"
3662 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3663 echo -n "$value" > "$zfs_tunables/$tunable"
3667 [[ "$module" -eq "zfs" ]] || return 1
3668 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3675 # Get a global system tunable
3679 function get_tunable
3681 get_tunable_impl "$1"
3684 function get_tunable_impl
3686 typeset tunable="$1"
3687 typeset module="${2:-zfs}"
3689 [[ -z "$tunable" ]] && return 1
3693 typeset zfs_tunables="/sys/module/$module/parameters"
3694 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3695 cat $zfs_tunables/$tunable
3699 [[ "$module" -eq "zfs" ]] || return 1
3707 # Get actual devices used by the pool (i.e. linux sdb1 not sdb).
3709 function get_pool_devices #testpool #devdir
3716 out=$(zpool status -P $testpool |grep ${devdir} | awk '{print $1}')
3717 out=$(echo $out | sed -e "s|${devdir}/||g" | tr '\n' ' ')