5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
29 # Copyright (c) 2012, 2015 by Delphix. All rights reserved.
32 .
${STF_TOOLS}/include
/logapi.shlib
34 # Determine if this is a Linux test system
36 # Return 0 if platform Linux, 1 if otherwise
40 if [[ $
($UNAME -o) == "GNU/Linux" ]]; then
47 # Determine whether a dataset is mounted
50 # $2 filesystem type; optional - defaulted to zfs
52 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
57 [[ -z $fstype ]] && fstype
=zfs
58 typeset out dir name ret
62 if [[ "$1" == "/"* ]] ; then
63 for out
in $
($ZFS mount |
$AWK '{print $2}'); do
64 [[ $1 == $out ]] && return 0
67 for out
in $
($ZFS mount |
$AWK '{print $1}'); do
68 [[ $1 == $out ]] && return 0
73 out
=$
($DF -F $fstype $1 2>/dev
/null
)
75 (($ret != 0)) && return $ret
83 [[ "$1" == "$dir" ||
"$1" == "$name" ]] && return 0
86 out
=$
($DF -t $fstype $1 2>/dev
/null
)
90 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
91 link
=$
(readlink
-f $ZVOL_DEVDIR/$1)
93 $MOUNT |
$GREP -q "^$link" && \
102 # Return 0 if a dataset is mounted; 1 otherwise
105 # $2 filesystem type; optional - defaulted to zfs
110 (($?
== 0)) && return 0
114 # Return 0 if a dataset is unmounted; 1 otherwise
117 # $2 filesystem type; optional - defaulted to zfs
122 (($?
== 1)) && return 0
132 $ECHO $1 |
$SED "s/,/ /g"
135 function default_setup
137 default_setup_noexit
"$@"
143 # Given a list of disks, setup storage pools and datasets.
145 function default_setup_noexit
151 if is_global_zone
; then
152 if poolexists
$TESTPOOL ; then
153 destroy_pool
$TESTPOOL
155 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
156 log_must
$ZPOOL create
-f $TESTPOOL $disklist
161 $RM -rf $TESTDIR || log_unresolved Could not remove
$TESTDIR
162 $MKDIR -p $TESTDIR || log_unresolved Could not create
$TESTDIR
164 log_must
$ZFS create
$TESTPOOL/$TESTFS
165 log_must
$ZFS set mountpoint
=$TESTDIR $TESTPOOL/$TESTFS
167 if [[ -n $container ]]; then
168 $RM -rf $TESTDIR1 || \
169 log_unresolved Could not remove
$TESTDIR1
170 $MKDIR -p $TESTDIR1 || \
171 log_unresolved Could not create
$TESTDIR1
173 log_must
$ZFS create
$TESTPOOL/$TESTCTR
174 log_must
$ZFS set canmount
=off
$TESTPOOL/$TESTCTR
175 log_must
$ZFS create
$TESTPOOL/$TESTCTR/$TESTFS1
176 log_must
$ZFS set mountpoint
=$TESTDIR1 \
177 $TESTPOOL/$TESTCTR/$TESTFS1
180 if [[ -n $volume ]]; then
181 if is_global_zone
; then
182 log_must
$ZFS create
-V $VOLSIZE $TESTPOOL/$TESTVOL
185 log_must
$ZFS create
$TESTPOOL/$TESTVOL
191 # Given a list of disks, setup a storage pool, file system and
194 function default_container_setup
198 default_setup
"$disklist" "true"
202 # Given a list of disks, setup a storage pool,file system
205 function default_volume_setup
209 default_setup
"$disklist" "" "true"
213 # Given a list of disks, setup a storage pool,file system,
214 # a container and a volume.
216 function default_container_volume_setup
220 default_setup
"$disklist" "true" "true"
224 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
227 # $1 Existing filesystem or volume name. Default, $TESTFS
228 # $2 snapshot name. Default, $TESTSNAP
230 function create_snapshot
232 typeset fs_vol
=${1:-$TESTFS}
233 typeset snap
=${2:-$TESTSNAP}
235 [[ -z $fs_vol ]] && log_fail
"Filesystem or volume's name is undefined."
236 [[ -z $snap ]] && log_fail
"Snapshot's name is undefined."
238 if snapexists
$fs_vol@
$snap; then
239 log_fail
"$fs_vol@$snap already exists."
241 datasetexists
$fs_vol || \
242 log_fail
"$fs_vol must exist."
244 log_must
$ZFS snapshot
$fs_vol@
$snap
248 # Create a clone from a snapshot, default clone name is $TESTCLONE.
250 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
251 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
253 function create_clone
# snapshot clone
255 typeset snap
=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
256 typeset clone
=${2:-$TESTPOOL/$TESTCLONE}
259 log_fail
"Snapshot name is undefined."
261 log_fail
"Clone name is undefined."
263 log_must
$ZFS clone
$snap $clone
266 function default_mirror_setup
268 default_mirror_setup_noexit
$1 $2 $3
274 # Given a pair of disks, set up a storage pool and dataset for the mirror
275 # @parameters: $1 the primary side of the mirror
276 # $2 the secondary side of the mirror
277 # @uses: ZPOOL ZFS TESTPOOL TESTFS
278 function default_mirror_setup_noexit
280 readonly func
="default_mirror_setup_noexit"
284 [[ -z $primary ]] && \
285 log_fail
"$func: No parameters passed"
286 [[ -z $secondary ]] && \
287 log_fail
"$func: No secondary partition passed"
288 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
289 log_must
$ZPOOL create
-f $TESTPOOL mirror $@
290 log_must
$ZFS create
$TESTPOOL/$TESTFS
291 log_must
$ZFS set mountpoint
=$TESTDIR $TESTPOOL/$TESTFS
295 # create a number of mirrors.
296 # We create a number($1) of 2 way mirrors using the pairs of disks named
297 # on the command line. These mirrors are *not* mounted
298 # @parameters: $1 the number of mirrors to create
299 # $... the devices to use to create the mirrors on
300 # @uses: ZPOOL ZFS TESTPOOL
301 function setup_mirrors
303 typeset
-i nmirrors
=$1
306 while ((nmirrors
> 0)); do
307 log_must
test -n "$1" -a -n "$2"
308 [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
309 log_must
$ZPOOL create
-f $TESTPOOL$nmirrors mirror
$1 $2
311 ((nmirrors
= nmirrors
- 1))
316 # create a number of raidz pools.
317 # We create a number($1) of 2 raidz pools using the pairs of disks named
318 # on the command line. These pools are *not* mounted
319 # @parameters: $1 the number of pools to create
320 # $... the devices to use to create the pools on
321 # @uses: ZPOOL ZFS TESTPOOL
322 function setup_raidzs
324 typeset
-i nraidzs
=$1
327 while ((nraidzs
> 0)); do
328 log_must
test -n "$1" -a -n "$2"
329 [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
330 log_must
$ZPOOL create
-f $TESTPOOL$nraidzs raidz
$1 $2
332 ((nraidzs
= nraidzs
- 1))
337 # Destroy the configured testpool mirrors.
338 # the mirrors are of the form ${TESTPOOL}{number}
339 # @uses: ZPOOL ZFS TESTPOOL
340 function destroy_mirrors
342 default_cleanup_noexit
348 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
349 # $1 the list of disks
351 function default_raidz_setup
353 typeset disklist
="$*"
354 disks
=(${disklist[*]})
356 if [[ ${#disks[*]} -lt 2 ]]; then
357 log_fail
"A raid-z requires a minimum of two disks."
360 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
361 log_must
$ZPOOL create
-f $TESTPOOL raidz
$1 $2 $3
362 log_must
$ZFS create
$TESTPOOL/$TESTFS
363 log_must
$ZFS set mountpoint
=$TESTDIR $TESTPOOL/$TESTFS
369 # Common function used to cleanup storage pools and datasets.
371 # Invoked at the start of the test suite to ensure the system
372 # is in a known state, and also at the end of each set of
373 # sub-tests to ensure errors from one set of tests doesn't
374 # impact the execution of the next set.
376 function default_cleanup
378 default_cleanup_noexit
383 function default_cleanup_noexit
388 # Destroying the pool will also destroy any
389 # filesystems it contains.
391 if is_global_zone
; then
392 $ZFS unmount
-a > /dev
/null
2>&1
393 [[ -z "$KEEP" ]] && KEEP
="rpool"
394 exclude
=`eval $ECHO \"'(${KEEP})'\"`
395 ALL_POOLS
=$
($ZPOOL list
-H -o name \
396 |
$GREP -v "$NO_POOLS" |
$EGREP -v "$exclude")
397 # Here, we loop through the pools we're allowed to
398 # destroy, only destroying them if it's safe to do
400 while [ ! -z ${ALL_POOLS} ]
402 for pool
in ${ALL_POOLS}
404 if safe_to_destroy_pool
$pool ;
408 ALL_POOLS
=$
($ZPOOL list
-H -o name \
409 |
$GREP -v "$NO_POOLS" \
410 |
$EGREP -v "$exclude")
417 for fs
in $
($ZFS list
-H -o name \
418 |
$GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
419 datasetexists
$fs && \
420 log_must
$ZFS destroy
-Rf $fs
423 # Need cleanup here to avoid garbage dir left.
424 for fs
in $
($ZFS list
-H -o name
); do
425 [[ $fs == /$ZONE_POOL ]] && continue
426 [[ -d $fs ]] && log_must
$RM -rf $fs/*
430 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
433 for fs
in $
($ZFS list
-H -o name
); do
434 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
435 log_must
$ZFS set reservation
=none
$fs
436 log_must
$ZFS set recordsize
=128K
$fs
437 log_must
$ZFS set mountpoint
=/$fs $fs
439 enc
=$
(get_prop encryption
$fs)
440 if [[ $?
-ne 0 ]] ||
[[ -z "$enc" ]] || \
441 [[ "$enc" == "off" ]]; then
442 log_must
$ZFS set checksum
=on
$fs
444 log_must
$ZFS set compression
=off
$fs
445 log_must
$ZFS set atime
=on
$fs
446 log_must
$ZFS set devices
=off
$fs
447 log_must
$ZFS set exec=on
$fs
448 log_must
$ZFS set setuid
=on
$fs
449 log_must
$ZFS set readonly=off
$fs
450 log_must
$ZFS set snapdir
=hidden
$fs
451 log_must
$ZFS set aclmode
=groupmask
$fs
452 log_must
$ZFS set aclinherit
=secure
$fs
457 [[ -d $TESTDIR ]] && \
458 log_must
$RM -rf $TESTDIR
463 # Common function used to cleanup storage pools, file systems
466 function default_container_cleanup
468 if ! is_global_zone
; then
472 ismounted
$TESTPOOL/$TESTCTR/$TESTFS1
474 log_must
$ZFS unmount
$TESTPOOL/$TESTCTR/$TESTFS1
476 datasetexists
$TESTPOOL/$TESTCTR/$TESTFS1 && \
477 log_must
$ZFS destroy
-R $TESTPOOL/$TESTCTR/$TESTFS1
479 datasetexists
$TESTPOOL/$TESTCTR && \
480 log_must
$ZFS destroy
-Rf $TESTPOOL/$TESTCTR
482 [[ -e $TESTDIR1 ]] && \
483 log_must
$RM -rf $TESTDIR1 > /dev
/null
2>&1
489 # Common function used to cleanup snapshot of file system or volume. Default to
490 # delete the file system's snapshot
494 function destroy_snapshot
496 typeset snap
=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
498 if ! snapexists
$snap; then
499 log_fail
"'$snap' does not existed."
503 # For the sake of the value which come from 'get_prop' is not equal
504 # to the really mountpoint when the snapshot is unmounted. So, firstly
505 # check and make sure this snapshot's been mounted in current system.
508 if ismounted
$snap; then
509 mtpt
=$
(get_prop mountpoint
$snap)
511 log_fail
"get_prop mountpoint $snap failed."
514 log_must
$ZFS destroy
$snap
515 [[ $mtpt != "" && -d $mtpt ]] && \
516 log_must
$RM -rf $mtpt
520 # Common function used to cleanup clone.
524 function destroy_clone
526 typeset clone
=${1:-$TESTPOOL/$TESTCLONE}
528 if ! datasetexists
$clone; then
529 log_fail
"'$clone' does not existed."
532 # With the same reason in destroy_snapshot
534 if ismounted
$clone; then
535 mtpt
=$
(get_prop mountpoint
$clone)
537 log_fail
"get_prop mountpoint $clone failed."
540 log_must
$ZFS destroy
$clone
541 [[ $mtpt != "" && -d $mtpt ]] && \
542 log_must
$RM -rf $mtpt
545 # Return 0 if a snapshot exists; $? otherwise
551 $ZFS list
-H -t snapshot
"$1" > /dev
/null
2>&1
556 # Set a property to a certain value on a dataset.
557 # Sets a property of the dataset to the value as passed in.
559 # $1 dataset who's property is being set
561 # $3 value to set property to
563 # 0 if the property could be set.
564 # non-zero otherwise.
567 function dataset_setprop
569 typeset fn
=dataset_setprop
572 log_note
"$fn: Insufficient parameters (need 3, had $#)"
576 output
=$
($ZFS set $2=$3 $1 2>&1)
579 log_note
"Setting property on $1 failed."
580 log_note
"property $2=$3"
581 log_note
"Return Code: $rv"
582 log_note
"Output: $output"
589 # Assign suite defined dataset properties.
590 # This function is used to apply the suite's defined default set of
591 # properties to a dataset.
592 # @parameters: $1 dataset to use
593 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
595 # 0 if the dataset has been altered.
596 # 1 if no pool name was passed in.
597 # 2 if the dataset could not be found.
598 # 3 if the dataset could not have it's properties set.
600 function dataset_set_defaultproperties
604 [[ -z $dataset ]] && return 1
608 for confset
in $
($ZFS list
); do
609 if [[ $dataset = $confset ]]; then
614 [[ $found -eq 0 ]] && return 2
615 if [[ -n $COMPRESSION_PROP ]]; then
616 dataset_setprop
$dataset compression
$COMPRESSION_PROP || \
618 log_note
"Compression set to '$COMPRESSION_PROP' on $dataset"
620 if [[ -n $CHECKSUM_PROP ]]; then
621 dataset_setprop
$dataset checksum
$CHECKSUM_PROP || \
623 log_note
"Checksum set to '$CHECKSUM_PROP' on $dataset"
629 # Check a numeric assertion
630 # @parameter: $@ the assertion to check
631 # @output: big loud notice if assertion failed
636 (($@
)) || log_fail
"$@"
640 # Function to format partition size of a disk
641 # Given a disk cxtxdx reduces all partitions
644 function zero_partitions
#<whole_disk_name>
650 log_must
$FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt
652 for i
in 0 1 3 4 5 6 7
654 set_partition
$i "" 0mb
$diskname
660 # Given a slice, size and disk, this function
661 # formats the slice to the specified size.
662 # Size should be specified with units as per
663 # the `format` command requirements eg. 100mb 3gb
665 # NOTE: This entire interface is problematic for the Linux parted utilty
666 # which requires the end of the partition to be specified. It would be
667 # best to retire this interface and replace it with something more flexible.
668 # At the moment a best effort is made.
670 function set_partition
#<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
672 typeset
-i slicenum
=$1
676 [[ -z $slicenum ||
-z $size ||
-z $disk ]] && \
677 log_fail
"The slice, size or disk name is unspecified."
680 typeset size_mb
=${size%%[mMgG]}
682 size_mb
=${size_mb%%[mMgG][bB]}
683 if [[ ${size:1:1} == 'g' ]]; then
684 ((size_mb
= size_mb
* 1024))
687 # Create GPT partition table when setting slice 0 or
688 # when the device doesn't already contain a GPT label.
689 $FORMAT $DEV_DSKDIR/$disk -s -- print
1 >/dev
/null
691 if [[ $slicenum -eq 0 ||
$ret_val -ne 0 ]]; then
692 log_must
$FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt
695 # When no start is given align on the first cylinder.
696 if [[ -z "$start" ]]; then
700 # Determine the cylinder size for the device and using
701 # that calculate the end offset in cylinders.
702 typeset
-i cly_size_kb
=0
703 cly_size_kb
=$
($FORMAT -m $DEV_DSKDIR/$disk -s -- \
704 unit cyl print |
$HEAD -3 |
$TAIL -1 | \
705 $AWK -F '[:k.]' '{print $4}')
706 ((end
= (size_mb
* 1024 / cly_size_kb
) + start
))
708 log_must
$FORMAT $DEV_DSKDIR/$disk -s -- \
709 mkpart part
$slicenum ${start}cyl
${end}cyl
711 $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev
/null
714 typeset format_file
=/var
/tmp
/format_in.$$
716 $ECHO "partition" >$format_file
717 $ECHO "$slicenum" >> $format_file
718 $ECHO "" >> $format_file
719 $ECHO "" >> $format_file
720 $ECHO "$start" >> $format_file
721 $ECHO "$size" >> $format_file
722 $ECHO "label" >> $format_file
723 $ECHO "" >> $format_file
724 $ECHO "q" >> $format_file
725 $ECHO "q" >> $format_file
727 $FORMAT -e -s -d $disk -f $format_file
731 [[ $ret_val -ne 0 ]] && \
732 log_fail
"Unable to format $disk slice $slicenum to $size"
737 # Get the end cyl of the given slice
739 function get_endslice
#<disk> <slice>
743 if [[ -z $disk ||
-z $slice ]] ; then
744 log_fail
"The disk name or slice number is unspecified."
748 endcyl
=$
($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \
749 $GREP "part${slice}" | \
750 $AWK '{print $3}' | \
752 ((endcyl
= (endcyl
+ 1)))
754 disk
=${disk#/dev/dsk/}
755 disk
=${disk#/dev/rdsk/}
759 ratio
=$
($PRTVTOC /dev
/rdsk
/${disk}s2 | \
760 $GREP "sectors\/cylinder" | \
763 if ((ratio
== 0)); then
767 typeset
-i endcyl
=$
($PRTVTOC -h /dev
/rdsk
/${disk}s2 |
768 $NAWK -v token
="$slice" '{if ($1==token) print $6}')
770 ((endcyl
= (endcyl
+ 1) / ratio
))
778 # Given a size,disk and total slice number, this function formats the
779 # disk slices from 0 to the total slice number with the same specified
782 function partition_disk
#<slice_size> <whole_disk_name> <total_slices>
785 typeset slice_size
=$1
787 typeset total_slices
=$3
790 zero_partitions
$disk_name
791 while ((i
< $total_slices)); do
798 set_partition
$i "$cyl" $slice_size $disk_name
799 cyl
=$
(get_endslice
$disk_name $i)
805 # This function continues to write to a filenum number of files into dirnum
806 # number of directories until either $FILE_WRITE returns an error or the
807 # maximum number of files per directory have been written.
810 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
812 # Return value: 0 on success
816 # destdir: is the directory where everything is to be created under
817 # dirnum: the maximum number of subdirectories to use, -1 no limit
818 # filenum: the maximum number of files per subdirectory
819 # bytes: number of bytes to write
820 # num_writes: numer of types to write out bytes
821 # data: the data that will be writen
824 # file_fs /testdir 20 25 1024 256 0
826 # Note: bytes * num_writes equals the size of the testfile
828 function fill_fs
# destdir dirnum filenum bytes num_writes data
830 typeset destdir
=${1:-$TESTDIR}
831 typeset
-i dirnum
=${2:-50}
832 typeset
-i filenum
=${3:-50}
833 typeset
-i bytes
=${4:-8192}
834 typeset
-i num_writes
=${5:-10240}
835 typeset
-i data
=${6:-0}
842 log_must
$MKDIR -p $destdir/$idirnum
843 while (($odirnum > 0)); do
844 if ((dirnum
>= 0 && idirnum
>= dirnum
)); then
848 $FILE_WRITE -o create
-f $destdir/$idirnum/$TESTFILE.
$fn \
849 -b $bytes -c $num_writes -d $data
851 if (($retval != 0)); then
855 if (($fn >= $filenum)); then
857 ((idirnum
= idirnum
+ 1))
858 log_must
$MKDIR -p $destdir/$idirnum
867 # Simple function to get the specified property. If unable to
868 # get the property then exits.
870 # Note property is in 'parsable' format (-p)
872 function get_prop
# property dataset
878 prop_val
=$
($ZFS get
-pH -o value
$prop $dataset 2>/dev
/null
)
879 if [[ $?
-ne 0 ]]; then
880 log_note
"Unable to get $prop property for dataset " \
890 # Simple function to get the specified property of pool. If unable to
891 # get the property then exits.
893 function get_pool_prop
# property pool
899 if poolexists
$pool ; then
900 prop_val
=$
($ZPOOL get
$prop $pool 2>/dev
/null |
$TAIL -1 | \
902 if [[ $?
-ne 0 ]]; then
903 log_note
"Unable to get $prop property for pool " \
908 log_note
"Pool $pool not exists."
916 # Return 0 if a pool exists; $? otherwise
924 if [[ -z $pool ]]; then
925 log_note
"No pool name given."
929 $ZPOOL get name
"$pool" > /dev
/null
2>&1
933 # Return 0 if all the specified datasets exist; $? otherwise
936 function datasetexists
939 log_note
"No dataset name given."
944 $ZFS get name
$1 > /dev
/null
2>&1 || \
952 # return 0 if none of the specified datasets exists, otherwise return 1.
955 function datasetnonexists
958 log_note
"No dataset name given."
963 $ZFS list
-H -t filesystem
,snapshot
,volume
$1 > /dev
/null
2>&1 \
972 # Given a mountpoint, or a dataset name, determine if it is shared.
974 # Returns 0 if shared, 1 otherwise.
982 log_unsupported
"Currently unsupported by the test framework"
986 if [[ $fs != "/"* ]] ; then
987 if datasetnonexists
"$fs" ; then
990 mtpt
=$
(get_prop mountpoint
"$fs")
992 none|legacy|
-) return 1
1000 for mtpt
in `$SHARE | $AWK '{print $2}'` ; do
1001 if [[ $mtpt == $fs ]] ; then
1006 typeset stat
=$
($SVCS -H -o STA nfs
/server
:default
)
1007 if [[ $stat != "ON" ]]; then
1008 log_note
"Current nfs/server status: $stat"
1015 # Given a mountpoint, determine if it is not shared.
1017 # Returns 0 if not shared, 1 otherwise.
1024 log_unsupported
"Currently unsupported by the test framework"
1029 if (($?
== 0)); then
1037 # Helper function to unshare a mountpoint.
1039 function unshare_fs
#fs
1044 log_unsupported
"Currently unsupported by the test framework"
1049 if (($?
== 0)); then
1050 log_must
$ZFS unshare
$fs
1057 # Check NFS server status and trigger it online.
1059 function setup_nfs_server
1061 # Cannot share directory in non-global zone.
1063 if ! is_global_zone
; then
1064 log_note
"Cannot trigger NFS server by sharing in LZ."
1069 log_unsupported
"Currently unsupported by the test framework"
1073 typeset nfs_fmri
="svc:/network/nfs/server:default"
1074 if [[ $
($SVCS -Ho STA
$nfs_fmri) != "ON" ]]; then
1076 # Only really sharing operation can enable NFS server
1077 # to online permanently.
1079 typeset dummy
=/tmp
/dummy
1081 if [[ -d $dummy ]]; then
1082 log_must
$RM -rf $dummy
1085 log_must
$MKDIR $dummy
1086 log_must
$SHARE $dummy
1089 # Waiting for fmri's status to be the final status.
1090 # Otherwise, in transition, an asterisk (*) is appended for
1091 # instances, unshare will reverse status to 'DIS' again.
1093 # Waiting for 1's at least.
1097 while [[ timeout
-ne 0 && $
($SVCS -Ho STA
$nfs_fmri) == *'*' ]]
1104 log_must
$UNSHARE $dummy
1105 log_must
$RM -rf $dummy
1108 log_note
"Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1112 # To verify whether calling process is in global zone
1114 # Return 0 if in global zone, 1 in non-global zone
1116 function is_global_zone
1118 typeset cur_zone
=$
($ZONENAME 2>/dev
/null
)
1119 if [[ $cur_zone != "global" ]]; then
1126 # Verify whether test is permitted to run from
1127 # global zone, local zone, or both
1129 # $1 zone limit, could be "global", "local", or "both"(no limit)
1131 # Return 0 if permitted, otherwise exit with log_unsupported
1133 function verify_runnable
# zone limit
1137 [[ -z $limit ]] && return 0
1139 if is_global_zone
; then
1143 local) log_unsupported
"Test is unable to run from "\
1146 *) log_note
"Warning: unknown limit $limit - " \
1154 global
) log_unsupported
"Test is unable to run from "\
1157 *) log_note
"Warning: unknown limit $limit - " \
1168 # Return 0 if create successfully or the pool exists; $? otherwise
1169 # Note: In local zones, this function should return 0 silently.
1172 # $2-n - [keyword] devs_list
1174 function create_pool
#pool devs_list
1176 typeset pool
=${1%%/*}
1180 if [[ -z $pool ]]; then
1181 log_note
"Missing pool name."
1185 if poolexists
$pool ; then
1189 if is_global_zone
; then
1190 [[ -d /$pool ]] && $RM -rf /$pool
1191 log_must
$ZPOOL create
-f $pool $@
1197 # Return 0 if destroy successfully or the pool exists; $? otherwise
1198 # Note: In local zones, this function should return 0 silently.
1201 # Destroy pool with the given parameters.
1203 function destroy_pool
#pool
1205 typeset pool
=${1%%/*}
1208 if [[ -z $pool ]]; then
1209 log_note
"No pool name given."
1213 if is_global_zone
; then
1214 if poolexists
"$pool" ; then
1215 mtpt
=$
(get_prop mountpoint
"$pool")
1217 # At times, syseventd activity can cause attempts to
1218 # destroy a pool to fail with EBUSY. We retry a few
1219 # times allowing failures before requiring the destroy
1221 typeset
-i wait_time
=10 ret
=1 count
=0
1223 while [[ $ret -ne 0 ]]; do
1224 $must $ZPOOL destroy
-f $pool
1226 [[ $ret -eq 0 ]] && break
1227 log_note
"zpool destroy failed with $ret"
1228 [[ count
++ -ge 7 ]] && must
=log_must
1233 log_must
$RM -rf $mtpt
1235 log_note
"Pool does not exist. ($pool)"
1244 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1245 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1246 # and a zvol device to the zone.
1249 # $2 zone root directory prefix
1252 function zfs_zones_setup
#zone_name zone_root zone_ip
1254 typeset zone_name
=${1:-$(hostname)-z}
1255 typeset zone_root
=${2:-"/zone_root"}
1256 typeset zone_ip
=${3:-"10.1.1.10"}
1257 typeset prefix_ctr
=$ZONE_CTR
1258 typeset pool_name
=$ZONE_POOL
1262 # Create pool and 5 container within it
1264 [[ -d /$pool_name ]] && $RM -rf /$pool_name
1265 log_must
$ZPOOL create
-f $pool_name $DISKS
1266 while ((i
< cntctr
)); do
1267 log_must
$ZFS create
$pool_name/$prefix_ctr$i
1272 log_must
$ZFS create
-V 1g
$pool_name/zone_zvol
1276 # If current system support slog, add slog device for pool
1278 if verify_slog_support
; then
1279 typeset sdevs
="/var/tmp/sdev1 /var/tmp/sdev2"
1280 log_must
$MKFILE 100M
$sdevs
1281 log_must
$ZPOOL add
$pool_name log mirror
$sdevs
1284 # this isn't supported just yet.
1285 # Create a filesystem. In order to add this to
1286 # the zone, it must have it's mountpoint set to 'legacy'
1287 # log_must $ZFS create $pool_name/zfs_filesystem
1288 # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1290 [[ -d $zone_root ]] && \
1291 log_must
$RM -rf $zone_root/$zone_name
1292 [[ ! -d $zone_root ]] && \
1293 log_must
$MKDIR -p -m 0700 $zone_root/$zone_name
1295 # Create zone configure file and configure the zone
1297 typeset zone_conf
=/tmp
/zone_conf.$$
1298 $ECHO "create" > $zone_conf
1299 $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1300 $ECHO "set autoboot=true" >> $zone_conf
1302 while ((i
< cntctr
)); do
1303 $ECHO "add dataset" >> $zone_conf
1304 $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1306 $ECHO "end" >> $zone_conf
1310 # add our zvol to the zone
1311 $ECHO "add device" >> $zone_conf
1312 $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf
1313 $ECHO "end" >> $zone_conf
1315 # add a corresponding zvol rdsk to the zone
1316 $ECHO "add device" >> $zone_conf
1317 $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1318 $ECHO "end" >> $zone_conf
1320 # once it's supported, we'll add our filesystem to the zone
1321 # $ECHO "add fs" >> $zone_conf
1322 # $ECHO "set type=zfs" >> $zone_conf
1323 # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1324 # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1325 # $ECHO "end" >> $zone_conf
1327 $ECHO "verify" >> $zone_conf
1328 $ECHO "commit" >> $zone_conf
1329 log_must
$ZONECFG -z $zone_name -f $zone_conf
1330 log_must
$RM -f $zone_conf
1333 $ZONEADM -z $zone_name install
1334 if (($?
== 0)); then
1335 log_note
"SUCCESS: $ZONEADM -z $zone_name install"
1337 log_fail
"FAIL: $ZONEADM -z $zone_name install"
1340 # Install sysidcfg file
1342 typeset sysidcfg
=$zone_root/$zone_name/root
/etc
/sysidcfg
1343 $ECHO "system_locale=C" > $sysidcfg
1344 $ECHO "terminal=dtterm" >> $sysidcfg
1345 $ECHO "network_interface=primary {" >> $sysidcfg
1346 $ECHO "hostname=$zone_name" >> $sysidcfg
1347 $ECHO "}" >> $sysidcfg
1348 $ECHO "name_service=NONE" >> $sysidcfg
1349 $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
1350 $ECHO "security_policy=NONE" >> $sysidcfg
1351 $ECHO "timezone=US/Eastern" >> $sysidcfg
1354 log_must
$ZONEADM -z $zone_name boot
1358 # Reexport TESTPOOL & TESTPOOL(1-4)
1360 function reexport_pool
1365 while ((i
< cntctr
)); do
1367 TESTPOOL
=$ZONE_POOL/$ZONE_CTR$i
1368 if ! ismounted
$TESTPOOL; then
1369 log_must
$ZFS mount
$TESTPOOL
1372 eval TESTPOOL
$i=$ZONE_POOL/$ZONE_CTR$i
1373 if eval ! ismounted \
$TESTPOOL$i; then
1374 log_must
eval $ZFS mount \
$TESTPOOL$i
1382 # Verify a given disk is online or offline
1384 # Return 0 is pool/disk matches expected state, 1 otherwise
1386 function check_state
# pool disk state{online,offline}
1389 typeset disk
=${2#$DEV_DSKDIR/}
1392 $ZPOOL status
-v $pool |
grep "$disk" \
1393 |
grep -i "$state" > /dev
/null
2>&1
1399 # Get the mountpoint of snapshot
1400 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1403 function snapshot_mountpoint
1405 typeset dataset
=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1407 if [[ $dataset != *@
* ]]; then
1408 log_fail
"Error name of snapshot '$dataset'."
1411 typeset fs
=${dataset%@*}
1412 typeset snap
=${dataset#*@}
1414 if [[ -z $fs ||
-z $snap ]]; then
1415 log_fail
"Error name of snapshot '$dataset'."
1418 $ECHO $
(get_prop mountpoint
$fs)/.zfs
/snapshot
/$snap
1422 # Given a pool and file system, this function will verify the file system
1423 # using the zdb internal tool. Note that the pool is exported and imported
1424 # to ensure it has consistent state.
1426 function verify_filesys
# pool filesystem dir
1429 typeset filesys
="$2"
1430 typeset zdbout
="/tmp/zdbout.$$"
1435 typeset search_path
=""
1437 log_note
"Calling $ZDB to verify filesystem '$filesys'"
1438 $ZFS unmount
-a > /dev
/null
2>&1
1439 log_must
$ZPOOL export $pool
1441 if [[ -n $dirs ]] ; then
1442 for dir
in $dirs ; do
1443 search_path
="$search_path -d $dir"
1447 log_must
$ZPOOL import
$search_path $pool
1449 $ZDB -cudi $filesys > $zdbout 2>&1
1450 if [[ $?
!= 0 ]]; then
1451 log_note
"Output: $ZDB -cudi $filesys"
1453 log_fail
"$ZDB detected errors with: '$filesys'"
1456 log_must
$ZFS mount
-a
1457 log_must
$RM -rf $zdbout
1461 # Given a pool, and this function list all disks in the pool
1463 function get_disklist
# pool
1467 disklist
=$
($ZPOOL iostat
-v $1 |
$NAWK '(NR >4) {print $1}' | \
1468 $GREP -v "\-\-\-\-\-" | \
1469 $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1475 # This function kills a given list of processes after a time period. We use
1476 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1477 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1478 # would be listed as FAIL, which we don't want : we're happy with stress tests
1479 # running for a certain amount of time, then finishing.
1481 # @param $1 the time in seconds after which we should terminate these processes
1482 # @param $2..$n the processes we wish to terminate.
1484 function stress_timeout
1486 typeset
-i TIMEOUT
=$1
1490 log_note
"Waiting for child processes($cpids). " \
1491 "It could last dozens of minutes, please be patient ..."
1492 log_must
$SLEEP $TIMEOUT
1494 log_note
"Killing child processes after ${TIMEOUT} stress timeout."
1496 for pid
in $cpids; do
1497 $PS -p $pid > /dev
/null
2>&1
1498 if (($?
== 0)); then
1499 log_must
$KILL -USR1 $pid
1505 # Verify a given hotspare disk is inuse or avail
1507 # Return 0 is pool/disk matches expected state, 1 otherwise
1509 function check_hotspare_state
# pool disk state{inuse,avail}
1512 typeset disk
=${2#$DEV_DSKDIR/}
1515 cur_state
=$
(get_device_state
$pool $disk "spares")
1517 if [[ $state != ${cur_state} ]]; then
1524 # Verify a given slog disk is inuse or avail
1526 # Return 0 is pool/disk matches expected state, 1 otherwise
1528 function check_slog_state
# pool disk state{online,offline,unavail}
1531 typeset disk
=${2#$DEV_DSKDIR/}
1534 cur_state
=$
(get_device_state
$pool $disk "logs")
1536 if [[ $state != ${cur_state} ]]; then
1543 # Verify a given vdev disk is inuse or avail
1545 # Return 0 is pool/disk matches expected state, 1 otherwise
1547 function check_vdev_state
# pool disk state{online,offline,unavail}
1550 typeset disk
=${2#$/DEV_DSKDIR/}
1553 cur_state
=$
(get_device_state
$pool $disk)
1555 if [[ $state != ${cur_state} ]]; then
1562 # Check the output of 'zpool status -v <pool>',
1563 # and to see if the content of <token> contain the <keyword> specified.
1565 # Return 0 is contain, 1 otherwise
1567 function check_pool_status
# pool token keyword
1573 $ZPOOL status
-v "$pool" 2>/dev
/null |
$NAWK -v token
="$token:" '
1574 ($1==token) {print $0}' \
1575 |
$GREP -i "$keyword" > /dev
/null
2>&1
1581 # These 5 following functions are instance of check_pool_status()
1582 # is_pool_resilvering - to check if the pool is resilver in progress
1583 # is_pool_resilvered - to check if the pool is resilver completed
1584 # is_pool_scrubbing - to check if the pool is scrub in progress
1585 # is_pool_scrubbed - to check if the pool is scrub completed
1586 # is_pool_scrub_stopped - to check if the pool is scrub stopped
1588 function is_pool_resilvering
#pool
1590 check_pool_status
"$1" "scan" "resilver in progress since "
1594 function is_pool_resilvered
#pool
1596 check_pool_status
"$1" "scan" "resilvered "
1600 function is_pool_scrubbing
#pool
1602 check_pool_status
"$1" "scan" "scrub in progress since "
1606 function is_pool_scrubbed
#pool
1608 check_pool_status
"$1" "scan" "scrub repaired"
1612 function is_pool_scrub_stopped
#pool
1614 check_pool_status
"$1" "scan" "scrub canceled"
1619 # Use create_pool()/destroy_pool() to clean up the infomation in
1620 # in the given disk to avoid slice overlapping.
1622 function cleanup_devices
#vdevs
1624 typeset pool
="foopool$$"
1626 if poolexists
$pool ; then
1630 create_pool
$pool $@
1637 # Verify the rsh connectivity to each remote host in RHOSTS.
1639 # Return 0 if remote host is accessible; otherwise 1.
1640 # $1 remote host name
1643 function verify_rsh_connect
#rhost, username
1647 typeset rsh_cmd
="$RSH -n"
1650 $GETENT hosts
$rhost >/dev
/null
2>&1
1651 if (($?
!= 0)); then
1652 log_note
"$rhost cannot be found from" \
1653 "administrative database."
1657 $PING $rhost 3 >/dev
/null
2>&1
1658 if (($?
!= 0)); then
1659 log_note
"$rhost is not reachable."
1663 if ((${#username} != 0)); then
1664 rsh_cmd
="$rsh_cmd -l $username"
1665 cur_user
="given user \"$username\""
1667 cur_user
="current user \"`$LOGNAME`\""
1670 if ! $rsh_cmd $rhost $TRUE; then
1671 log_note
"$RSH to $rhost is not accessible" \
1680 # Verify the remote host connection via rsh after rebooting
1683 function verify_remote
1688 # The following loop waits for the remote system rebooting.
1689 # Each iteration will wait for 150 seconds. there are
1690 # total 5 iterations, so the total timeout value will
1691 # be 12.5 minutes for the system rebooting. This number
1692 # is an approxiate number.
1695 while ! verify_rsh_connect
$rhost; do
1697 ((count
= count
+ 1))
1698 if ((count
> 5)); then
1706 # Replacement function for /usr/bin/rsh. This function will include
1707 # the /usr/bin/rsh and meanwhile return the execution status of the
1710 # $1 usrname passing down to -l option of /usr/bin/rsh
1711 # $2 remote machine hostname
1712 # $3... command string
1726 err_file
=/tmp
/${rhost}.$$.err
1727 if ((${#ruser} == 0)); then
1730 rsh_str
="$RSH -n -l $ruser"
1733 $rsh_str $rhost /bin
/ksh
-c "'$cmd_str; \
1734 print -u 2 \"status=\$?\"'" \
1735 >/dev
/null
2>$err_file
1737 if (($ret != 0)); then
1739 $RM -f $std_file $err_file
1740 log_fail
"$RSH itself failed with exit code $ret..."
1743 ret
=$
($GREP -v 'print -u 2' $err_file |
$GREP 'status=' | \
1745 (($ret != 0)) && $CAT $err_file >&2
1747 $RM -f $err_file >/dev
/null
2>&1
1752 # Get the SUNWstc-fs-zfs package installation path in a remote host
1753 # $1 remote host name
1755 function get_remote_pkgpath
1760 pkgpath
=$
($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1767 # A function to find and locate free disks on a system or from given
1768 # disks as the parameter. It works by locating disks that are in use
1769 # as swap devices and dump devices, and also disks listed in /etc/vfstab
1771 # $@ given disks to find which are free, default is all disks in
1774 # @return a string containing the list of available disks
1778 # Trust provided list, no attempt is made to locate unused devices.
1785 sfi
=/tmp
/swaplist.$$
1786 dmpi
=/tmp
/dumpdev.$$
1787 max_finddisksnum
=${MAX_FINDDISKSNUM:-6}
1790 $DUMPADM > $dmpi 2>/dev
/null
1792 # write an awk script that can process the output of format
1793 # to produce a list of disks we know about. Note that we have
1794 # to escape "$2" so that the shell doesn't interpret it while
1795 # we're creating the awk script.
1796 # -------------------
1797 $CAT > /tmp
/find_disks.
awk <<EOF
1806 if (searchdisks && \$2 !~ "^$"){
1812 /^AVAILABLE DISK SELECTIONS:/{
1816 #---------------------
1818 $CHMOD 755 /tmp
/find_disks.
awk
1819 disks
=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
1820 $RM /tmp
/find_disks.
awk
1823 for disk
in $disks; do
1825 $GREP "${disk}[sp]" /etc
/mnttab
>/dev
/null
1826 (($?
== 0)) && continue
1828 $GREP "${disk}[sp]" $sfi >/dev
/null
1829 (($?
== 0)) && continue
1830 # check for dump device
1831 $GREP "${disk}[sp]" $dmpi >/dev
/null
1832 (($?
== 0)) && continue
1833 # check to see if this disk hasn't been explicitly excluded
1834 # by a user-set environment variable
1835 $ECHO "${ZFS_HOST_DEVICES_IGNORE}" |
$GREP "${disk}" > /dev
/null
1836 (($?
== 0)) && continue
1837 unused_candidates
="$unused_candidates $disk"
1842 # now just check to see if those disks do actually exist
1843 # by looking for a device pointing to the first slice in
1844 # each case. limit the number to max_finddisksnum
1846 for disk
in $unused_candidates; do
1847 if [ -b $DEV_DSKDIR/${disk}s0
]; then
1848 if [ $count -lt $max_finddisksnum ]; then
1849 unused
="$unused $disk"
1850 # do not impose limit if $@ is provided
1851 [[ -z $@
]] && ((count
= count
+ 1))
1856 # finally, return our disk list
1861 # Add specified user to specified group
1865 # $3 base of the homedir (optional)
1867 function add_user
#<group_name> <user_name> <basedir>
1871 typeset basedir
=${3:-"/var/tmp"}
1873 if ((${#gname} == 0 ||
${#uname} == 0)); then
1874 log_fail
"group name or user name are not defined."
1877 log_must
$USERADD -g $gname -d $basedir/$uname -m $uname
1879 # Add new users to the same group and the command line utils.
1880 # This allows them to be run out of the original users home
1881 # directory as long as it permissioned to be group readable.
1883 cmd_group
=$
(stat
--format="%G" $ZFS)
1884 log_must
$USERMOD -a -G $cmd_group $uname
1891 # Delete the specified user.
1894 # $2 base of the homedir (optional)
1896 function del_user
#<logname> <basedir>
1899 typeset basedir
=${2:-"/var/tmp"}
1901 if ((${#user} == 0)); then
1902 log_fail
"login name is necessary."
1905 if $ID $user > /dev
/null
2>&1; then
1906 log_must
$USERDEL $user
1909 [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
1915 # Select valid gid and create specified group.
1919 function add_group
#<group_name>
1923 if ((${#group} == 0)); then
1924 log_fail
"group name is necessary."
1927 # Assign 100 as the base gid, a larger value is selected for
1928 # Linux because for many distributions 1000 and under are reserved.
1931 $GROUPADD $group > /dev
/null
2>&1
1942 $GROUPADD -g $gid $group > /dev
/null
2>&1
1946 # The gid is not unique
1955 # Delete the specified group.
1959 function del_group
#<group_name>
1962 if ((${#grp} == 0)); then
1963 log_fail
"group name is necessary."
1967 $GETENT group
$grp > /dev
/null
2>&1
1970 # Group does not exist.
1972 # Name already exists as a group name
1973 0) log_must
$GROUPDEL $grp ;;
1977 $GROUPMOD -n $grp $grp > /dev
/null
2>&1
1980 # Group does not exist.
1982 # Name already exists as a group name
1983 9) log_must
$GROUPDEL $grp ;;
1992 # This function will return true if it's safe to destroy the pool passed
1993 # as argument 1. It checks for pools based on zvols and files, and also
1994 # files contained in a pool that may have a different mountpoint.
1996 function safe_to_destroy_pool
{ # $1 the pool name
1999 typeset DONT_DESTROY
=""
2001 # We check that by deleting the $1 pool, we're not
2002 # going to pull the rug out from other pools. Do this
2003 # by looking at all other pools, ensuring that they
2004 # aren't built from files or zvols contained in this pool.
2006 for pool
in $
($ZPOOL list
-H -o name
)
2010 # this is a list of the top-level directories in each of the
2011 # files that make up the path to the files the pool is based on
2012 FILEPOOL
=$
($ZPOOL status
-v $pool |
$GREP /$1/ | \
2015 # this is a list of the zvols that make up the pool
2016 ZVOLPOOL
=$
($ZPOOL status
-v $pool |
$GREP "$ZVOL_DEVDIR/$1$" \
2017 |
$AWK '{print $1}')
2019 # also want to determine if it's a file-based pool using an
2020 # alternate mountpoint...
2021 POOL_FILE_DIRS
=$
($ZPOOL status
-v $pool | \
2022 $GREP / |
$AWK '{print $1}' | \
2023 $AWK -F/ '{print $2}' |
$GREP -v "dev")
2025 for pooldir
in $POOL_FILE_DIRS
2027 OUTPUT
=$
($ZFS list
-H -r -o mountpoint
$1 | \
2028 $GREP "${pooldir}$" |
$AWK '{print $1}')
2030 ALTMOUNTPOOL
="${ALTMOUNTPOOL}${OUTPUT}"
2034 if [ ! -z "$ZVOLPOOL" ]
2037 log_note
"Pool $pool is built from $ZVOLPOOL on $1"
2040 if [ ! -z "$FILEPOOL" ]
2043 log_note
"Pool $pool is built from $FILEPOOL on $1"
2046 if [ ! -z "$ALTMOUNTPOOL" ]
2049 log_note
"Pool $pool is built from $ALTMOUNTPOOL on $1"
2053 if [ -z "${DONT_DESTROY}" ]
2057 log_note
"Warning: it is not safe to destroy $1!"
2063 # Get the available ZFS compression options
2064 # $1 option type zfs_set|zfs_compress
2066 function get_compress_opts
2068 typeset COMPRESS_OPTS
2069 typeset GZIP_OPTS
="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2070 gzip-6 gzip-7 gzip-8 gzip-9"
2072 if [[ $1 == "zfs_compress" ]] ; then
2073 COMPRESS_OPTS
="on lzjb"
2074 elif [[ $1 == "zfs_set" ]] ; then
2075 COMPRESS_OPTS
="on off lzjb"
2077 typeset valid_opts
="$COMPRESS_OPTS"
2078 $ZFS get
2>&1 |
$GREP gzip >/dev
/null
2>&1
2079 if [[ $?
-eq 0 ]]; then
2080 valid_opts
="$valid_opts $GZIP_OPTS"
2086 # Verify zfs operation with -p option work as expected
2087 # $1 operation, value could be create, clone or rename
2088 # $2 dataset type, value could be fs or vol
2090 # $4 new dataset name
2092 function verify_opt_p_ops
2097 typeset newdataset
=$4
2099 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2100 log_fail
"$datatype is not supported."
2103 # check parameters accordingly
2108 if [[ $datatype == "vol" ]]; then
2109 ops
="create -V $VOLSIZE"
2113 if [[ -z $newdataset ]]; then
2114 log_fail
"newdataset should not be empty" \
2117 log_must datasetexists
$dataset
2118 log_must snapexists
$dataset
2121 if [[ -z $newdataset ]]; then
2122 log_fail
"newdataset should not be empty" \
2125 log_must datasetexists
$dataset
2126 log_mustnot snapexists
$dataset
2129 log_fail
"$ops is not supported."
2133 # make sure the upper level filesystem does not exist
2134 if datasetexists
${newdataset%/*} ; then
2135 log_must
$ZFS destroy
-rRf ${newdataset%/*}
2138 # without -p option, operation will fail
2139 log_mustnot
$ZFS $ops $dataset $newdataset
2140 log_mustnot datasetexists
$newdataset ${newdataset%/*}
2142 # with -p option, operation should succeed
2143 log_must
$ZFS $ops -p $dataset $newdataset
2146 if ! datasetexists
$newdataset ; then
2147 log_fail
"-p option does not work for $ops"
2150 # when $ops is create or clone, redo the operation still return zero
2151 if [[ $ops != "rename" ]]; then
2152 log_must
$ZFS $ops -p $dataset $newdataset
2159 # Get configuration of pool
2169 if ! poolexists
"$pool" ; then
2172 alt_root
=$
($ZPOOL list
-H $pool |
$AWK '{print $NF}')
2173 if [[ $alt_root == "-" ]]; then
2174 value
=$
($ZDB -C $pool |
$GREP "$config:" |
$AWK -F: \
2177 value
=$
($ZDB -e $pool |
$GREP "$config:" |
$AWK -F: \
2180 if [[ -n $value ]] ; then
2190 # Privated function. Random select one of items from arguments.
2195 function _random_get
2202 ((ind
= RANDOM
% cnt
+ 1))
2204 typeset ret
=$
($ECHO "$str" |
$CUT -f $ind -d ' ')
2209 # Random select one of item from arguments which include NONE string
2211 function random_get_with_non
2216 _random_get
"$cnt" "$@"
2220 # Random select one of item from arguments which doesn't include NONE string
2224 _random_get
"$#" "$@"
2228 # Detect if the current system support slog
2230 function verify_slog_support
2232 typeset dir
=/tmp
/disk.$$
2238 $MKFILE 64M
$vdev $sdev
2241 if ! $ZPOOL create
-n $pool $vdev log
$sdev > /dev
/null
2>&1; then
2250 # The function will generate a dataset name with specific length
2251 # $1, the length of the name
2252 # $2, the base string to construct the name
2254 function gen_dataset_name
2257 typeset basestr
="$2"
2258 typeset
-i baselen
=${#basestr}
2262 if ((len
% baselen
== 0)); then
2263 ((iter
= len
/ baselen
))
2265 ((iter
= len
/ baselen
+ 1))
2267 while ((iter
> 0)); do
2268 l_name
="${l_name}$basestr"
2277 # Get cksum tuple of dataset
2280 # sample zdb output:
2281 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2282 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2283 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2284 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2285 function datasetcksum
2289 cksum=$
($ZDB -vvv $1 |
$GREP "^Dataset $1 \[" |
$GREP "cksum" \
2290 |
$AWK -F= '{print $7}')
2301 cksum=$
($CKSUM $1 |
$AWK '{print $1}')
2306 # Get the given disk/slice state from the specific field of the pool
2308 function get_device_state
#pool disk field("", "spares","logs")
2311 typeset disk
=${2#$DEV_DSKDIR/}
2312 typeset field
=${3:-$pool}
2314 state
=$
($ZPOOL status
-v "$pool" 2>/dev
/null | \
2315 $NAWK -v device
=$disk -v pool
=$pool -v field
=$field \
2316 'BEGIN {startconfig=0; startfield=0; }
2317 /config:/ {startconfig=1}
2318 (startconfig==1) && ($1==field) {startfield=1; next;}
2319 (startfield==1) && ($1==device) {print $2; exit;}
2321 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2327 # print the given directory filesystem type
2335 if [[ -z $dir ]]; then
2336 log_fail
"Usage: get_fstype <directory>"
2343 $DF -n $dir |
$AWK '{print $3}'
2347 # Given a disk, label it to VTOC regardless what label was on the disk
2353 if [[ -z $disk ]]; then
2354 log_fail
"The disk name is unspecified."
2356 typeset label_file
=/var
/tmp
/labelvtoc.$$
2357 typeset arch
=$
($UNAME -p)
2360 log_note
"Currently unsupported by the test framework"
2364 if [[ $arch == "i386" ]]; then
2365 $ECHO "label" > $label_file
2366 $ECHO "0" >> $label_file
2367 $ECHO "" >> $label_file
2368 $ECHO "q" >> $label_file
2369 $ECHO "q" >> $label_file
2371 $FDISK -B $disk >/dev
/null
2>&1
2372 # wait a while for fdisk finishes
2374 elif [[ $arch == "sparc" ]]; then
2375 $ECHO "label" > $label_file
2376 $ECHO "0" >> $label_file
2377 $ECHO "" >> $label_file
2378 $ECHO "" >> $label_file
2379 $ECHO "" >> $label_file
2380 $ECHO "q" >> $label_file
2382 log_fail
"unknown arch type"
2385 $FORMAT -e -s -d $disk -f $label_file
2386 typeset
-i ret_val
=$?
2389 # wait the format to finish
2392 if ((ret_val
!= 0)); then
2393 log_fail
"unable to label $disk as VTOC."
2400 # check if the system was installed as zfsroot or not
2401 # return: 0 ture, otherwise false
2405 $DF -n / |
$GREP zfs
> /dev
/null
2>&1
2410 # get the root filesystem name if it's zfsroot system.
2412 # return: root filesystem name
2416 rootfs
=$
($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2418 if [[ -z "$rootfs" ]]; then
2419 log_fail
"Can not get rootfs"
2421 $ZFS list
$rootfs > /dev
/null
2>&1
2422 if (($?
== 0)); then
2425 log_fail
"This is not a zfsroot system."
2430 # get the rootfs's pool name
2434 function get_rootpool
2438 rootfs
=$
($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2440 if [[ -z "$rootfs" ]]; then
2441 log_fail
"Can not get rootpool"
2443 $ZFS list
$rootfs > /dev
/null
2>&1
2444 if (($?
== 0)); then
2445 rootpool
=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2448 log_fail
"This is not a zfsroot system."
2453 # Get the sub string from specified source string
2456 # $2 start position. Count from 1
2459 function get_substr
#src_str pos offset
2464 $NAWK -v pos
=$2 -v offset
=$3 '{print substr($0, pos, offset)}'
2468 # Check if the given device is physical device
2470 function is_physical_device
#device
2472 typeset device
=${1#$DEV_DSKDIR}
2473 device
=${device#$DEV_RDSKDIR}
2476 [[ -b "$DEV_DSKDIR/$device" ]] && \
2477 [[ -f /sys
/module
/loop
/parameters
/max_part
]]
2480 $ECHO $device |
$EGREP "^c[0-F]+([td][0-F]+)+$" > /dev
/null
2>&1
2486 # Get the directory path of given device
2488 function get_device_dir
#device
2492 if ! $
(is_physical_device
$device) ; then
2493 if [[ $device != "/" ]]; then
2496 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2497 device
="$DEV_DSKDIR"
2506 # Get the package name
2508 function get_package_name
2510 typeset dirpath
=${1:-$STC_NAME}
2512 echo "SUNWstc-${dirpath}" |
/usr
/bin
/sed -e "s/\//-/g"
2516 # Get the word numbers from a string separated by white space
2518 function get_word_count
2524 # To verify if the require numbers of disks is given
2526 function verify_disk_count
2528 typeset
-i min
=${2:-1}
2530 typeset
-i count
=$
(get_word_count
"$1")
2532 if ((count
< min
)); then
2533 log_untested
"A minimum of $min disks is required to run." \
2534 " You specified $count disk(s)"
2538 function ds_is_volume
2540 typeset
type=$
(get_prop
type $1)
2541 [[ $type = "volume" ]] && return 0
2545 function ds_is_filesystem
2547 typeset
type=$
(get_prop
type $1)
2548 [[ $type = "filesystem" ]] && return 0
2552 function ds_is_snapshot
2554 typeset
type=$
(get_prop
type $1)
2555 [[ $type = "snapshot" ]] && return 0
2560 # Check if Trusted Extensions are installed and enabled
2562 function is_te_enabled
2564 $SVCS -H -o state labeld
2>/dev
/null |
$GREP "enabled"
2565 if (($?
!= 0)); then
2572 # Utility function to determine if a system has multiple cpus.
2578 (($
($PSRINFO |
$WC -l) > 1))
2584 function get_cpu_freq
2587 lscpu |
$AWK '/CPU MHz/ { print $3 }'
2589 $PSRINFO -v 0 |
$AWK '/processor operates at/ {print $6}'
2593 # Run the given command as the user provided.
2599 log_note
"user:$user $@"
2600 eval \
$SU \
$user -c \"$@
\" > /tmp
/out
2>/tmp
/err
2605 # Check if the pool contains the specified vdevs
2610 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2611 # vdevs is not in the pool, and 2 if pool name is missing.
2613 function vdevs_in_pool
2618 if [[ -z $pool ]]; then
2619 log_note
"Missing pool name."
2625 typeset tmpfile
=$
($MKTEMP)
2626 $ZPOOL list
-Hv "$pool" >$tmpfile
2628 $GREP -w ${vdev##*/} $tmpfile >/dev
/null
2>&1
2629 [[ $?
-ne 0 ]] && return 1
2638 # Wait for newly created block devices to have their minors created.
2640 function block_device_wait