4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or https://opensource.org/licenses/CDDL-1.0.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24 # Copyright (c) 2012, 2020, Delphix. All rights reserved.
25 # Copyright (c) 2017, Tim Chase. All rights reserved.
26 # Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27 # Copyright (c) 2017, Lawrence Livermore National Security LLC.
28 # Copyright (c) 2017, Datto Inc. All rights reserved.
29 # Copyright (c) 2017, Open-E Inc. All rights reserved.
30 # Copyright (c) 2021, The FreeBSD Foundation.
31 # Use is subject to license terms.
34 . ${STF_SUITE}/include/tunables.cfg
36 . ${STF_TOOLS}/include/logapi.shlib
37 . ${STF_SUITE}/include/math.shlib
38 . ${STF_SUITE}/include/blkdev.shlib
40 # On AlmaLinux 9 we will see $PWD = '.' instead of the full path. This causes
41 # some tests to fail. Fix it up here.
42 if [ "$PWD" = "." ] ; then
43 PWD="$(readlink -f $PWD)"
47 # Apply constrained path when available. This is required since the
48 # PATH may have been modified by sudo's secure_path behavior.
50 if [ -n "$STF_PATH" ]; then
51 export PATH="$STF_PATH"
55 # Generic dot version comparison function
57 # Returns success when version $1 is greater than or equal to $2.
59 function compare_version_gte
61 [ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
64 # Helper function used by linux_version() and freebsd_version()
65 function kernel_version
69 [ -z "$ver" ] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
71 typeset version major minor _
72 IFS='.' read -r version major minor _ <<<"$ver"
74 [ -z "$version" ] && version=0
75 [ -z "$major" ] && major=0
76 [ -z "$minor" ] && minor=0
78 echo $((version * 100000 + major * 1000 + minor))
81 # Linux kernel version comparison function
83 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
85 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
86 function linux_version {
90 # FreeBSD version comparison function
92 # $1 FreeBSD version ("13.2", "14.0") or blank for installed FreeBSD version
94 # Used for comparison: if [ $(freebsd_version) -ge $(freebsd_version "13.2") ]
95 function freebsd_version {
99 # Determine if this is a Linux test system
101 # Return 0 if platform Linux, 1 if otherwise
105 [ "$UNAME" = "Linux" ]
108 # Determine if this is an illumos test system
110 # Return 0 if platform illumos, 1 if otherwise
113 [ "$UNAME" = "illumos" ]
116 # Determine if this is a FreeBSD test system
118 # Return 0 if platform FreeBSD, 1 if otherwise
122 [ "$UNAME" = "FreeBSD" ]
125 # Determine if this is a 32-bit system
127 # Return 0 if platform is 32-bit, 1 if otherwise
131 [ $(getconf LONG_BIT) = "32" ]
134 # Determine if kmemleak is enabled
136 # Return 0 if kmemleak is enabled, 1 if otherwise
140 is_linux && [ -e /sys/kernel/debug/kmemleak ]
143 # Determine whether a dataset is mounted
146 # $2 filesystem type; optional - defaulted to zfs
148 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
153 [[ -z $fstype ]] && fstype=zfs
158 if [[ "$1" == "/"* ]] ; then
159 ! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
161 ! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
166 mount -pt $fstype | while read dev dir _t _flags; do
167 [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
170 out=$(df -F $fstype $1 2>/dev/null) || return
178 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
182 df -t $fstype $1 > /dev/null 2>&1
185 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
186 link=$(readlink -f $ZVOL_DEVDIR/$1)
187 [[ -n "$link" ]] && \
188 mount | grep -q "^$link" && \
198 # Return 0 if a dataset is mounted; 1 otherwise
201 # $2 filesystem type; optional - defaulted to zfs
208 # Return 0 if a dataset is unmounted; 1 otherwise
211 # $2 filesystem type; optional - defaulted to zfs
218 function default_setup
220 default_setup_noexit "$@"
225 function default_setup_no_mountpoint
227 default_setup_noexit "$1" "$2" "$3" "yes"
233 # Given a list of disks, setup storage pools and datasets.
235 function default_setup_noexit
240 typeset no_mountpoint=$4
241 log_note begin default_setup_noexit
243 if is_global_zone; then
244 if poolexists $TESTPOOL ; then
245 destroy_pool $TESTPOOL
247 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
248 log_must zpool create -f $TESTPOOL $disklist
253 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
254 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
256 log_must zfs create $TESTPOOL/$TESTFS
257 if [[ -z $no_mountpoint ]]; then
258 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
261 if [[ -n $container ]]; then
262 rm -rf $TESTDIR1 || \
263 log_unresolved Could not remove $TESTDIR1
264 mkdir -p $TESTDIR1 || \
265 log_unresolved Could not create $TESTDIR1
267 log_must zfs create $TESTPOOL/$TESTCTR
268 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
269 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
270 if [[ -z $no_mountpoint ]]; then
271 log_must zfs set mountpoint=$TESTDIR1 \
272 $TESTPOOL/$TESTCTR/$TESTFS1
276 if [[ -n $volume ]]; then
277 if is_global_zone ; then
278 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
281 log_must zfs create $TESTPOOL/$TESTVOL
287 # Given a list of disks, setup a storage pool, file system and
290 function default_container_setup
294 default_setup "$disklist" "true"
298 # Given a list of disks, setup a storage pool,file system
301 function default_volume_setup
305 default_setup "$disklist" "" "true"
309 # Given a list of disks, setup a storage pool,file system,
310 # a container and a volume.
312 function default_container_volume_setup
316 default_setup "$disklist" "true" "true"
320 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
323 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
324 # $2 snapshot name. Default, $TESTSNAP
326 function create_snapshot
328 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
329 typeset snap=${2:-$TESTSNAP}
331 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
332 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
334 if snapexists $fs_vol@$snap; then
335 log_fail "$fs_vol@$snap already exists."
337 datasetexists $fs_vol || \
338 log_fail "$fs_vol must exist."
340 log_must zfs snapshot $fs_vol@$snap
344 # Create a clone from a snapshot, default clone name is $TESTCLONE.
346 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
347 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
349 function create_clone # snapshot clone
351 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
352 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
355 log_fail "Snapshot name is undefined."
357 log_fail "Clone name is undefined."
359 log_must zfs clone $snap $clone
363 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
366 # $1 Existing filesystem or volume name. Default, $TESTFS
367 # $2 Existing snapshot name. Default, $TESTSNAP
368 # $3 bookmark name. Default, $TESTBKMARK
370 function create_bookmark
372 typeset fs_vol=${1:-$TESTFS}
373 typeset snap=${2:-$TESTSNAP}
374 typeset bkmark=${3:-$TESTBKMARK}
376 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
377 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
378 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
380 if bkmarkexists $fs_vol#$bkmark; then
381 log_fail "$fs_vol#$bkmark already exists."
383 datasetexists $fs_vol || \
384 log_fail "$fs_vol must exist."
385 snapexists $fs_vol@$snap || \
386 log_fail "$fs_vol@$snap must exist."
388 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
392 # Create a temporary clone result of an interrupted resumable 'zfs receive'
393 # $1 Destination filesystem name. Must not exist, will be created as the result
394 # of this function along with its %recv temporary clone
395 # $2 Source filesystem name. Must not exist, will be created and destroyed
397 function create_recv_clone
400 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
401 typeset snap="$sendfs@snap1"
402 typeset incr="$sendfs@snap2"
403 typeset mountpoint="$TESTDIR/create_recv_clone"
404 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
406 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
408 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
409 datasetexists $sendfs && log_fail "Send filesystem must not exist."
411 log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
412 log_must zfs snapshot $snap
413 log_must eval "zfs send $snap | zfs recv -u $recvfs"
414 log_must mkfile 1m "$mountpoint/data"
415 log_must zfs snapshot $incr
416 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
417 iflag=fullblock > $sendfile"
418 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
419 destroy_dataset "$sendfs" "-r"
420 log_must rm -f "$sendfile"
422 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
423 log_fail "Error creating temporary $recvfs/%recv clone"
427 function default_mirror_setup
429 default_mirror_setup_noexit $1 $2 $3
435 # Given a pair of disks, set up a storage pool and dataset for the mirror
436 # @parameters: $1 the primary side of the mirror
437 # $2 the secondary side of the mirror
438 # @uses: ZPOOL ZFS TESTPOOL TESTFS
439 function default_mirror_setup_noexit
441 readonly func="default_mirror_setup_noexit"
445 [[ -z $primary ]] && \
446 log_fail "$func: No parameters passed"
447 [[ -z $secondary ]] && \
448 log_fail "$func: No secondary partition passed"
449 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
450 log_must zpool create -f $TESTPOOL mirror $@
451 log_must zfs create $TESTPOOL/$TESTFS
452 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
456 # Destroy the configured testpool mirrors.
457 # the mirrors are of the form ${TESTPOOL}{number}
458 # @uses: ZPOOL ZFS TESTPOOL
459 function destroy_mirrors
461 default_cleanup_noexit
466 function default_raidz_setup
468 default_raidz_setup_noexit "$*"
474 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
475 # $1 the list of disks
477 function default_raidz_setup_noexit
479 typeset disklist="$*"
480 disks=(${disklist[*]})
482 if [[ ${#disks[*]} -lt 2 ]]; then
483 log_fail "A raid-z requires a minimum of two disks."
486 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
487 log_must zpool create -f $TESTPOOL raidz $disklist
488 log_must zfs create $TESTPOOL/$TESTFS
489 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
493 # Common function used to cleanup storage pools and datasets.
495 # Invoked at the start of the test suite to ensure the system
496 # is in a known state, and also at the end of each set of
497 # sub-tests to ensure errors from one set of tests doesn't
498 # impact the execution of the next set.
500 function default_cleanup
502 default_cleanup_noexit
508 # Utility function used to list all available pool names.
510 # NOTE: $KEEP is a variable containing pool names, separated by a newline
511 # character, that must be excluded from the returned list.
513 function get_all_pools
515 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
518 function default_cleanup_noexit
522 # Destroying the pool will also destroy any
523 # filesystems it contains.
525 if is_global_zone; then
526 zfs unmount -a > /dev/null 2>&1
527 ALL_POOLS=$(get_all_pools)
528 # Here, we loop through the pools we're allowed to
529 # destroy, only destroying them if it's safe to do
531 while [ ! -z ${ALL_POOLS} ]
533 for pool in ${ALL_POOLS}
535 if safe_to_destroy_pool $pool ;
540 ALL_POOLS=$(get_all_pools)
546 for fs in $(zfs list -H -o name \
547 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
548 destroy_dataset "$fs" "-Rf"
551 # Need cleanup here to avoid garbage dir left.
552 for fs in $(zfs list -H -o name); do
553 [[ $fs == /$ZONE_POOL ]] && continue
554 [[ -d $fs ]] && log_must rm -rf $fs/*
558 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
561 for fs in $(zfs list -H -o name); do
562 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
563 log_must zfs set reservation=none $fs
564 log_must zfs set recordsize=128K $fs
565 log_must zfs set mountpoint=/$fs $fs
566 typeset enc=$(get_prop encryption $fs)
567 if [ -z "$enc" ] || [ "$enc" = "off" ]; then
568 log_must zfs set checksum=on $fs
570 log_must zfs set compression=off $fs
571 log_must zfs set atime=on $fs
572 log_must zfs set devices=off $fs
573 log_must zfs set exec=on $fs
574 log_must zfs set setuid=on $fs
575 log_must zfs set readonly=off $fs
576 log_must zfs set snapdir=hidden $fs
577 log_must zfs set aclmode=groupmask $fs
578 log_must zfs set aclinherit=secure $fs
583 [[ -d $TESTDIR ]] && \
584 log_must rm -rf $TESTDIR
587 if is_mpath_device $disk1; then
591 rm -f $TEST_BASE_DIR/{err,out}
596 # Common function used to cleanup storage pools, file systems
599 function default_container_cleanup
601 if ! is_global_zone; then
605 ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
606 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
608 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
609 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
611 [[ -e $TESTDIR1 ]] && \
612 log_must rm -rf $TESTDIR1
618 # Common function used to cleanup snapshot of file system or volume. Default to
619 # delete the file system's snapshot
623 function destroy_snapshot
625 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
627 if ! snapexists $snap; then
628 log_fail "'$snap' does not exist."
632 # For the sake of the value which come from 'get_prop' is not equal
633 # to the really mountpoint when the snapshot is unmounted. So, firstly
634 # check and make sure this snapshot's been mounted in current system.
637 if ismounted $snap; then
638 mtpt=$(get_prop mountpoint $snap)
641 destroy_dataset "$snap"
642 [[ $mtpt != "" && -d $mtpt ]] && \
643 log_must rm -rf $mtpt
647 # Common function used to cleanup clone.
651 function destroy_clone
653 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
655 if ! datasetexists $clone; then
656 log_fail "'$clone' does not existed."
659 # With the same reason in destroy_snapshot
661 if ismounted $clone; then
662 mtpt=$(get_prop mountpoint $clone)
665 destroy_dataset "$clone"
666 [[ $mtpt != "" && -d $mtpt ]] && \
667 log_must rm -rf $mtpt
671 # Common function used to cleanup bookmark of file system or volume. Default
672 # to delete the file system's bookmark.
676 function destroy_bookmark
678 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
680 if ! bkmarkexists $bkmark; then
681 log_fail "'$bkmarkp' does not existed."
684 destroy_dataset "$bkmark"
687 # Return 0 if a snapshot exists; $? otherwise
693 zfs list -H -t snapshot "$1" > /dev/null 2>&1
697 # Return 0 if a bookmark exists; $? otherwise
701 function bkmarkexists
703 zfs list -H -t bookmark "$1" > /dev/null 2>&1
707 # Return 0 if a hold exists; $? otherwise
714 ! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
718 # Set a property to a certain value on a dataset.
719 # Sets a property of the dataset to the value as passed in.
721 # $1 dataset who's property is being set
723 # $3 value to set property to
725 # 0 if the property could be set.
726 # non-zero otherwise.
729 function dataset_setprop
731 typeset fn=dataset_setprop
734 log_note "$fn: Insufficient parameters (need 3, had $#)"
738 output=$(zfs set $2=$3 $1 2>&1)
741 log_note "Setting property on $1 failed."
742 log_note "property $2=$3"
743 log_note "Return Code: $rv"
744 log_note "Output: $output"
751 # Check a numeric assertion
752 # @parameter: $@ the assertion to check
753 # @output: big loud notice if assertion failed
758 (($@)) || log_fail "$@"
762 # Function to format partition size of a disk
763 # Given a disk cxtxdx reduces all partitions
766 function zero_partitions #<whole_disk_name>
772 gpart destroy -F $diskname
774 DSK=$DEV_DSKDIR/$diskname
775 DSK=$(echo $DSK | sed -e "s|//|/|g")
776 log_must parted $DSK -s -- mklabel gpt
777 blockdev --rereadpt $DSK 2>/dev/null
780 for i in 0 1 3 4 5 6 7
782 log_must set_partition $i "" 0mb $diskname
790 # Given a slice, size and disk, this function
791 # formats the slice to the specified size.
792 # Size should be specified with units as per
793 # the `format` command requirements eg. 100mb 3gb
795 # NOTE: This entire interface is problematic for the Linux parted utility
796 # which requires the end of the partition to be specified. It would be
797 # best to retire this interface and replace it with something more flexible.
798 # At the moment a best effort is made.
800 # arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
801 function set_partition
803 typeset -i slicenum=$1
806 typeset disk=${4#$DEV_DSKDIR/}
807 disk=${disk#$DEV_RDSKDIR/}
811 if [[ -z $size || -z $disk ]]; then
812 log_fail "The size or disk name is unspecified."
814 disk=$DEV_DSKDIR/$disk
815 typeset size_mb=${size%%[mMgG]}
817 size_mb=${size_mb%%[mMgG][bB]}
818 if [[ ${size:1:1} == 'g' ]]; then
819 ((size_mb = size_mb * 1024))
822 # Create GPT partition table when setting slice 0 or
823 # when the device doesn't already contain a GPT label.
824 parted $disk -s -- print 1 >/dev/null
826 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
827 if ! parted $disk -s -- mklabel gpt; then
828 log_note "Failed to create GPT partition table on $disk"
833 # When no start is given align on the first cylinder.
834 if [[ -z "$start" ]]; then
838 # Determine the cylinder size for the device and using
839 # that calculate the end offset in cylinders.
840 typeset -i cly_size_kb=0
841 cly_size_kb=$(parted -m $disk -s -- unit cyl print |
842 awk -F '[:k.]' 'NR == 3 {print $4}')
843 ((end = (size_mb * 1024 / cly_size_kb) + start))
846 mkpart part$slicenum ${start}cyl ${end}cyl
848 if [[ $ret_val -ne 0 ]]; then
849 log_note "Failed to create partition $slicenum on $disk"
853 blockdev --rereadpt $disk 2>/dev/null
854 block_device_wait $disk
857 if [[ -z $size || -z $disk ]]; then
858 log_fail "The size or disk name is unspecified."
860 disk=$DEV_DSKDIR/$disk
862 if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
863 gpart destroy -F $disk >/dev/null 2>&1
864 if ! gpart create -s GPT $disk; then
865 log_note "Failed to create GPT partition table on $disk"
870 typeset index=$((slicenum + 1))
872 if [[ -n $start ]]; then
875 gpart add -t freebsd-zfs $start -s $size -i $index $disk
876 if [[ $ret_val -ne 0 ]]; then
877 log_note "Failed to create partition $slicenum on $disk"
881 block_device_wait $disk
884 if [[ -z $slicenum || -z $size || -z $disk ]]; then
885 log_fail "The slice, size or disk name is unspecified."
888 typeset format_file=/var/tmp/format_in.$$
890 echo "partition" >$format_file
891 echo "$slicenum" >> $format_file
892 echo "" >> $format_file
893 echo "" >> $format_file
894 echo "$start" >> $format_file
895 echo "$size" >> $format_file
896 echo "label" >> $format_file
897 echo "" >> $format_file
898 echo "q" >> $format_file
899 echo "q" >> $format_file
901 format -e -s -d $disk -f $format_file
907 if [[ $ret_val -ne 0 ]]; then
908 log_note "Unable to format $disk slice $slicenum to $size"
915 # Delete all partitions on all disks - this is specifically for the use of multipath
916 # devices which currently can only be used in the test suite as raw/un-partitioned
917 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
919 function delete_partitions
923 if [[ -z $DISKSARRAY ]]; then
929 for disk in $DISKSARRAY; do
930 for (( part = 1; part < MAX_PARTITIONS; part++ )); do
931 typeset partition=${disk}${SLICE_PREFIX}${part}
932 parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
933 if lsblk | grep -qF ${partition}; then
934 log_fail "Partition ${partition} not deleted"
936 log_note "Partition ${partition} deleted"
940 elif is_freebsd; then
941 for disk in $DISKSARRAY; do
942 if gpart destroy -F $disk; then
943 log_note "Partitions for ${disk} deleted"
945 log_fail "Partitions for ${disk} not deleted"
952 # Get the end cyl of the given slice
954 function get_endslice #<disk> <slice>
958 if [[ -z $disk || -z $slice ]] ; then
959 log_fail "The disk name or slice number is unspecified."
964 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
965 awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
966 ((endcyl = (endcyl + 1)))
969 disk=${disk#/dev/zvol/}
972 endcyl=$(gpart show $disk | \
973 awk -v slice=$slice '$3 == slice { print $1 + $2 }')
976 disk=${disk#/dev/dsk/}
977 disk=${disk#/dev/rdsk/}
981 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
982 awk '/sectors\/cylinder/ {print $2}')
984 if ((ratio == 0)); then
988 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
989 awk -v token="$slice" '$1 == token {print $6}')
991 ((endcyl = (endcyl + 1) / ratio))
1000 # Given a size,disk and total slice number, this function formats the
1001 # disk slices from 0 to the total slice number with the same specified
1004 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1007 typeset slice_size=$1
1008 typeset disk_name=$2
1009 typeset total_slices=$3
1012 zero_partitions $disk_name
1013 while ((i < $total_slices)); do
1020 log_must set_partition $i "$cyl" $slice_size $disk_name
1021 cyl=$(get_endslice $disk_name $i)
1027 # This function continues to write to a filenum number of files into dirnum
1028 # number of directories until either file_write returns an error or the
1029 # maximum number of files per directory have been written.
1032 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1034 # Return value: 0 on success
1038 # destdir: is the directory where everything is to be created under
1039 # dirnum: the maximum number of subdirectories to use, -1 no limit
1040 # filenum: the maximum number of files per subdirectory
1041 # bytes: number of bytes to write
1042 # num_writes: number of types to write out bytes
1043 # data: the data that will be written
1046 # fill_fs /testdir 20 25 1024 256 0
1048 # Note: bytes * num_writes equals the size of the testfile
1050 function fill_fs # destdir dirnum filenum bytes num_writes data
1052 typeset destdir=${1:-$TESTDIR}
1053 typeset -i dirnum=${2:-50}
1054 typeset -i filenum=${3:-50}
1055 typeset -i bytes=${4:-8192}
1056 typeset -i num_writes=${5:-10240}
1057 typeset data=${6:-0}
1059 mkdir -p $destdir/{1..$dirnum}
1060 for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1061 file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1066 # Get the specified dataset property in parsable format or fail
1067 function get_prop # property dataset
1072 zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
1075 # Get the specified pool property in parsable format or fail
1076 function get_pool_prop # property pool
1081 zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
1084 # Return 0 if a pool exists; $? otherwise
1092 if [[ -z $pool ]]; then
1093 log_note "No pool name given."
1097 zpool get name "$pool" > /dev/null 2>&1
1100 # Return 0 if all the specified datasets exist; $? otherwise
1103 function datasetexists
1105 if (($# == 0)); then
1106 log_note "No dataset name given."
1110 zfs get name "$@" > /dev/null 2>&1
1113 # return 0 if none of the specified datasets exists, otherwise return 1.
1116 function datasetnonexists
1118 if (($# == 0)); then
1119 log_note "No dataset name given."
1123 while (($# > 0)); do
1124 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1132 # FreeBSD breaks exports(5) at whitespace and doesn't process escapes
1133 # Solaris just breaks
1135 # cf. https://github.com/openzfs/zfs/pull/13165#issuecomment-1059845807
1137 # Linux can have spaces (which are \OOO-escaped),
1138 # but can't have backslashes because they're parsed recursively
1139 function shares_can_have_whitespace
1144 function is_shared_freebsd
1148 pgrep -q mountd && showmount -E | grep -qx "$fs"
1151 function is_shared_illumos
1156 for mtpt in `share | awk '{print $2}'` ; do
1157 if [[ $mtpt == $fs ]] ; then
1162 typeset stat=$(svcs -H -o STA nfs/server:default)
1163 if [[ $stat != "ON" ]]; then
1164 log_note "Current nfs/server status: $stat"
1170 function is_shared_linux
1173 ! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
1177 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1179 # Returns 0 if shared, 1 otherwise.
1186 if [[ $fs != "/"* ]] ; then
1187 if datasetnonexists "$fs" ; then
1190 mtpt=$(get_prop mountpoint "$fs")
1192 none|legacy|-) return 1
1201 FreeBSD) is_shared_freebsd "$fs" ;;
1202 Linux) is_shared_linux "$fs" ;;
1203 *) is_shared_illumos "$fs" ;;
1207 function is_exported_illumos
1212 while read -r mtpt _; do
1213 [ "$mtpt" = "$fs" ] && return
1214 done < /etc/dfs/sharetab
1219 function is_exported_freebsd
1224 while read -r mtpt _; do
1225 [ "$mtpt" = "$fs" ] && return
1226 done < /etc/zfs/exports
1231 function is_exported_linux
1236 while read -r mtpt _; do
1237 [ "$(printf "$mtpt")" = "$fs" ] && return
1238 done < /etc/exports.d/zfs.exports
1244 # Given a mountpoint, or a dataset name, determine if it is exported via
1245 # the os-specific NFS exports file.
1247 # Returns 0 if exported, 1 otherwise.
1249 function is_exported
1254 if [[ $fs != "/"* ]] ; then
1255 if datasetnonexists "$fs" ; then
1258 mtpt=$(get_prop mountpoint "$fs")
1260 none|legacy|-) return 1
1269 FreeBSD) is_exported_freebsd "$fs" ;;
1270 Linux) is_exported_linux "$fs" ;;
1271 *) is_exported_illumos "$fs" ;;
1276 # Given a dataset name determine if it is shared via SMB.
1278 # Returns 0 if shared, 1 otherwise.
1280 function is_shared_smb
1284 datasetexists "$fs" || return
1287 net usershare list | grep -xFq "${fs//[-\/]/_}"
1289 log_note "SMB on $UNAME currently unsupported by the test framework"
1295 # Given a mountpoint, determine if it is not shared via NFS.
1297 # Returns 0 if not shared, 1 otherwise.
1305 # Given a dataset determine if it is not shared via SMB.
1307 # Returns 0 if not shared, 1 otherwise.
1309 function not_shared_smb
1315 # Helper function to unshare a mountpoint.
1317 function unshare_fs #fs
1321 if is_shared $fs || is_shared_smb $fs; then
1322 log_must zfs unshare $fs
1327 # Helper function to share a NFS mountpoint.
1329 function share_nfs #fs
1333 is_shared "$fs" && return
1337 log_must exportfs "*:$fs"
1341 read -r mountd < /var/run/mountd.pid
1342 log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
1343 log_must kill -s HUP "$mountd"
1346 log_must share -F nfs "$fs"
1354 # Helper function to unshare a NFS mountpoint.
1356 function unshare_nfs #fs
1360 ! is_shared "$fs" && return
1364 log_must exportfs -u "*:$fs"
1368 read -r mountd < /var/run/mountd.pid
1369 awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
1370 log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
1371 log_must kill -s HUP "$mountd"
1374 log_must unshare -F nfs $fs
1382 # Helper function to show NFS shares.
1384 function showshares_nfs
1409 log_unsupported "Unknown platform"
1411 esac || log_unsupported "The NFS utilities are not installed"
1415 # Check NFS server status and trigger it online.
1417 function setup_nfs_server
1419 # Cannot share directory in non-global zone.
1421 if ! is_global_zone; then
1422 log_note "Cannot trigger NFS server by sharing in LZ."
1428 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1429 # /etc/exports.d./* to provide a clean test environment.
1431 log_must exportfs -r
1433 log_note "NFS server must be started prior to running ZTS."
1435 elif is_freebsd; then
1436 log_must kill -s HUP $(</var/run/mountd.pid)
1438 log_note "NFS server must be started prior to running ZTS."
1442 typeset nfs_fmri="svc:/network/nfs/server:default"
1443 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1445 # Only really sharing operation can enable NFS server
1446 # to online permanently.
1448 typeset dummy=/tmp/dummy
1450 if [[ -d $dummy ]]; then
1451 log_must rm -rf $dummy
1454 log_must mkdir $dummy
1455 log_must share $dummy
1458 # Waiting for fmri's status to be the final status.
1459 # Otherwise, in transition, an asterisk (*) is appended for
1460 # instances, unshare will reverse status to 'DIS' again.
1462 # Waiting for 1's at least.
1466 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1473 log_must unshare $dummy
1474 log_must rm -rf $dummy
1477 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1481 # To verify whether calling process is in global zone
1483 # Return 0 if in global zone, 1 in non-global zone
1485 function is_global_zone
1487 if is_linux || is_freebsd; then
1490 typeset cur_zone=$(zonename 2>/dev/null)
1491 [ $cur_zone = "global" ]
1496 # Verify whether test is permitted to run from
1497 # global zone, local zone, or both
1499 # $1 zone limit, could be "global", "local", or "both"(no limit)
1501 # Return 0 if permitted, otherwise exit with log_unsupported
1503 function verify_runnable # zone limit
1507 [[ -z $limit ]] && return 0
1509 if is_global_zone ; then
1513 local) log_unsupported "Test is unable to run from "\
1516 *) log_note "Warning: unknown limit $limit - " \
1524 global) log_unsupported "Test is unable to run from "\
1527 *) log_note "Warning: unknown limit $limit - " \
1538 # Return 0 if create successfully or the pool exists; $? otherwise
1539 # Note: In local zones, this function should return 0 silently.
1542 # $2-n - [keyword] devs_list
1544 function create_pool #pool devs_list
1546 typeset pool=${1%%/*}
1550 if [[ -z $pool ]]; then
1551 log_note "Missing pool name."
1555 if poolexists $pool ; then
1559 if is_global_zone ; then
1560 [[ -d /$pool ]] && rm -rf /$pool
1561 log_must zpool create -f $pool $@
1567 # Return 0 if destroy successfully or the pool exists; $? otherwise
1568 # Note: In local zones, this function should return 0 silently.
1571 # Destroy pool with the given parameters.
1573 function destroy_pool #pool
1575 typeset pool=${1%%/*}
1578 if [[ -z $pool ]]; then
1579 log_note "No pool name given."
1583 if is_global_zone ; then
1584 if poolexists "$pool" ; then
1585 mtpt=$(get_prop mountpoint "$pool")
1587 # At times, syseventd/udev activity can cause attempts
1588 # to destroy a pool to fail with EBUSY. We retry a few
1589 # times allowing failures before requiring the destroy
1591 log_must_busy zpool destroy -f $pool
1594 log_must rm -rf $mtpt
1596 log_note "Pool does not exist. ($pool)"
1604 # Return 0 if created successfully; $? otherwise
1607 # $2-n - dataset options
1609 function create_dataset #dataset dataset_options
1615 if [[ -z $dataset ]]; then
1616 log_note "Missing dataset name."
1620 if datasetexists $dataset ; then
1621 destroy_dataset $dataset
1624 log_must zfs create $@ $dataset
1629 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1630 # Note: In local zones, this function should return 0 silently.
1633 # $2 - custom arguments for zfs destroy
1634 # Destroy dataset with the given parameters.
1636 function destroy_dataset # dataset [args]
1640 typeset args=${2:-""}
1642 if [[ -z $dataset ]]; then
1643 log_note "No dataset name given."
1647 if is_global_zone ; then
1648 if datasetexists "$dataset" ; then
1649 mtpt=$(get_prop mountpoint "$dataset")
1650 log_must_busy zfs destroy $args $dataset
1652 [ -d $mtpt ] && log_must rm -rf $mtpt
1654 log_note "Dataset does not exist. ($dataset)"
1663 # Reexport TESTPOOL & TESTPOOL(1-4)
1665 function reexport_pool
1670 while ((i < cntctr)); do
1672 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1673 if ! ismounted $TESTPOOL; then
1674 log_must zfs mount $TESTPOOL
1677 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1678 if eval ! ismounted \$TESTPOOL$i; then
1679 log_must eval zfs mount \$TESTPOOL$i
1687 # Verify a given disk or pool state
1689 # Return 0 is pool/disk matches expected state, 1 otherwise
1691 function check_state # pool disk state{online,offline,degraded}
1694 typeset disk=${2#$DEV_DSKDIR/}
1697 [[ -z $pool ]] || [[ -z $state ]] \
1698 && log_fail "Arguments invalid or missing"
1700 if [[ -z $disk ]]; then
1701 #check pool state only
1702 zpool get -H -o value health $pool | grep -qi "$state"
1704 zpool status -v $pool | grep "$disk" | grep -qi "$state"
1709 # Get the mountpoint of snapshot
1710 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1713 function snapshot_mountpoint
1715 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1717 if [[ $dataset != *@* ]]; then
1718 log_fail "Error name of snapshot '$dataset'."
1721 typeset fs=${dataset%@*}
1722 typeset snap=${dataset#*@}
1724 if [[ -z $fs || -z $snap ]]; then
1725 log_fail "Error name of snapshot '$dataset'."
1728 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1732 # Given a device and 'ashift' value verify it's correctly set on every label
1734 function verify_ashift # device ashift
1739 zdb -e -lll $device | awk -v ashift=$ashift '
1752 # Given a pool and file system, this function will verify the file system
1753 # using the zdb internal tool. Note that the pool is exported and imported
1754 # to ensure it has consistent state.
1756 function verify_filesys # pool filesystem dir
1759 typeset filesys="$2"
1760 typeset zdbout="/tmp/zdbout.$$"
1765 typeset search_path=""
1767 log_note "Calling zdb to verify filesystem '$filesys'"
1768 zfs unmount -a > /dev/null 2>&1
1769 log_must zpool export $pool
1771 if [[ -n $dirs ]] ; then
1772 for dir in $dirs ; do
1773 search_path="$search_path -d $dir"
1777 log_must zpool import $search_path $pool
1779 if ! zdb -cudi $filesys > $zdbout 2>&1; then
1780 log_note "Output: zdb -cudi $filesys"
1783 log_fail "zdb detected errors with: '$filesys'"
1786 log_must zfs mount -a
1787 log_must rm -rf $zdbout
1791 # Given a pool issue a scrub and verify that no checksum errors are reported.
1793 function verify_pool
1795 typeset pool=${1:-$TESTPOOL}
1797 log_must zpool scrub $pool
1798 log_must wait_scrubbed $pool
1800 typeset -i cksum=$(zpool status $pool | awk '
1802 isvdev { errors += $NF }
1803 /CKSUM$/ { isvdev = 1 }
1804 END { print errors }
1806 if [[ $cksum != 0 ]]; then
1807 log_must zpool status -v
1808 log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1813 # Given a pool, and this function list all disks in the pool
1815 function get_disklist # pool
1817 echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
1818 grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
1822 # Given a pool, and this function list all disks in the pool with their full
1823 # path (like "/dev/sda" instead of "sda").
1825 function get_disklist_fullpath # pool
1827 get_disklist "-P $1"
1833 # This function kills a given list of processes after a time period. We use
1834 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1835 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1836 # would be listed as FAIL, which we don't want : we're happy with stress tests
1837 # running for a certain amount of time, then finishing.
1839 # @param $1 the time in seconds after which we should terminate these processes
1840 # @param $2..$n the processes we wish to terminate.
1842 function stress_timeout
1844 typeset -i TIMEOUT=$1
1848 log_note "Waiting for child processes($cpids). " \
1849 "It could last dozens of minutes, please be patient ..."
1850 log_must sleep $TIMEOUT
1852 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1854 for pid in $cpids; do
1855 ps -p $pid > /dev/null 2>&1 &&
1856 log_must kill -USR1 $pid
1861 # Verify a given hotspare disk is inuse or avail
1863 # Return 0 is pool/disk matches expected state, 1 otherwise
1865 function check_hotspare_state # pool disk state{inuse,avail}
1868 typeset disk=${2#$DEV_DSKDIR/}
1871 cur_state=$(get_device_state $pool $disk "spares")
1873 [ $state = $cur_state ]
1877 # Wait until a hotspare transitions to a given state or times out.
1879 # Return 0 when pool/disk matches expected state, 1 on timeout.
1881 function wait_hotspare_state # pool disk state timeout
1884 typeset disk=${2#*$DEV_DSKDIR/}
1886 typeset timeout=${4:-60}
1889 while [[ $i -lt $timeout ]]; do
1890 if check_hotspare_state $pool $disk $state; then
1902 # Verify a given vdev disk is inuse or avail
1904 # Return 0 is pool/disk matches expected state, 1 otherwise
1906 function check_vdev_state # pool disk state{online,offline,unavail,removed}
1909 typeset disk=${2#*$DEV_DSKDIR/}
1912 cur_state=$(get_device_state $pool $disk)
1914 [ $state = $cur_state ]
1918 # Wait until a vdev transitions to a given state or times out.
1920 # Return 0 when pool/disk matches expected state, 1 on timeout.
1922 function wait_vdev_state # pool disk state timeout
1925 typeset disk=${2#*$DEV_DSKDIR/}
1927 typeset timeout=${4:-60}
1930 while [[ $i -lt $timeout ]]; do
1931 if check_vdev_state $pool $disk $state; then
1943 # Check the output of 'zpool status -v <pool>',
1944 # and to see if the content of <token> contain the <keyword> specified.
1946 # Return 0 is contain, 1 otherwise
1948 function check_pool_status # pool token keyword <verbose>
1953 typeset verbose=${4:-false}
1955 scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
1956 if [[ $verbose == true ]]; then
1959 echo $scan | grep -qi "$keyword"
1963 # The following functions are instance of check_pool_status()
1964 # is_pool_resilvering - to check if the pool resilver is in progress
1965 # is_pool_resilvered - to check if the pool resilver is completed
1966 # is_pool_scrubbing - to check if the pool scrub is in progress
1967 # is_pool_scrubbed - to check if the pool scrub is completed
1968 # is_pool_scrub_stopped - to check if the pool scrub is stopped
1969 # is_pool_scrub_paused - to check if the pool scrub has paused
1970 # is_pool_removing - to check if the pool removing is a vdev
1971 # is_pool_removed - to check if the pool remove is completed
1972 # is_pool_discarding - to check if the pool checkpoint is being discarded
1973 # is_pool_replacing - to check if the pool is performing a replacement
1975 function is_pool_resilvering #pool <verbose>
1977 check_pool_status "$1" "scan" \
1978 "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
1981 function is_pool_resilvered #pool <verbose>
1983 check_pool_status "$1" "scan" "resilvered " $2
1986 function is_pool_scrubbing #pool <verbose>
1988 check_pool_status "$1" "scan" "scrub in progress since " $2
1991 function is_pool_error_scrubbing #pool <verbose>
1993 check_pool_status "$1" "scrub" "error scrub in progress since " $2
1997 function is_pool_scrubbed #pool <verbose>
1999 check_pool_status "$1" "scan" "scrub repaired" $2
2002 function is_pool_scrub_stopped #pool <verbose>
2004 check_pool_status "$1" "scan" "scrub canceled" $2
2007 function is_pool_error_scrub_stopped #pool <verbose>
2009 check_pool_status "$1" "scrub" "error scrub canceled on " $2
2013 function is_pool_scrub_paused #pool <verbose>
2015 check_pool_status "$1" "scan" "scrub paused since " $2
2018 function is_pool_error_scrub_paused #pool <verbose>
2020 check_pool_status "$1" "scrub" "error scrub paused since " $2
2024 function is_pool_removing #pool
2026 check_pool_status "$1" "remove" "in progress since "
2029 function is_pool_removed #pool
2031 check_pool_status "$1" "remove" "completed on"
2034 function is_pool_discarding #pool
2036 check_pool_status "$1" "checkpoint" "discarding"
2038 function is_pool_replacing #pool
2040 zpool status "$1" | grep -qE 'replacing-[0-9]+'
2043 function wait_for_degraded
2046 typeset timeout=${2:-30}
2050 [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2051 log_note "$pool is not yet degraded."
2053 if ((SECONDS - t0 > $timeout)); then
2054 log_note "$pool not degraded after $timeout seconds."
2063 # Use create_pool()/destroy_pool() to clean up the information in
2064 # in the given disk to avoid slice overlapping.
2066 function cleanup_devices #vdevs
2068 typeset pool="foopool$$"
2071 zero_partitions $vdev
2074 poolexists $pool && destroy_pool $pool
2075 create_pool $pool $@
2082 # A function to find and locate free disks on a system or from given
2083 # disks as the parameter. It works by locating disks that are in use
2084 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2086 # $@ given disks to find which are free, default is all disks in
2089 # @return a string containing the list of available disks
2093 # Trust provided list, no attempt is made to locate unused devices.
2094 if is_linux || is_freebsd; then
2100 sfi=/tmp/swaplist.$$
2101 dmpi=/tmp/dumpdev.$$
2102 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2105 dumpadm > $dmpi 2>/dev/null
2107 disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
2115 if (searchdisks && $2 !~ "^$"){
2121 /^AVAILABLE DISK SELECTIONS:/{
2127 for disk in $disks; do
2129 grep -q "${disk}[sp]" /etc/mnttab && continue
2131 grep -q "${disk}[sp]" $sfi && continue
2132 # check for dump device
2133 grep -q "${disk}[sp]" $dmpi && continue
2134 # check to see if this disk hasn't been explicitly excluded
2135 # by a user-set environment variable
2136 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
2137 unused_candidates="$unused_candidates $disk"
2141 # now just check to see if those disks do actually exist
2142 # by looking for a device pointing to the first slice in
2143 # each case. limit the number to max_finddisksnum
2145 for disk in $unused_candidates; do
2146 if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2147 [ $count -lt $max_finddisksnum ]; then
2148 unused="$unused $disk"
2149 # do not impose limit if $@ is provided
2150 [[ -z $@ ]] && ((count = count + 1))
2154 # finally, return our disk list
2158 function add_user_freebsd #<group_name> <user_name> <basedir>
2164 # Check to see if the user exists.
2165 if id $user > /dev/null 2>&1; then
2169 # Assign 1000 as the base uid
2172 pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2175 # The uid is not unique
2179 if [[ $uid == 65000 ]]; then
2180 log_fail "No user id available under 65000 for $user"
2185 touch $basedir/$user/.hushlogin
2191 # Delete the specified user.
2195 function del_user_freebsd #<logname>
2199 if id $user > /dev/null 2>&1; then
2200 log_must pw userdel $user
2207 # Select valid gid and create specified group.
2211 function add_group_freebsd #<group_name>
2215 # See if the group already exists.
2216 if pw groupshow $group >/dev/null 2>&1; then
2220 # Assign 1000 as the base gid
2223 pw groupadd -g $gid -n $group > /dev/null 2>&1
2226 # The gid is not unique
2230 if [[ $gid == 65000 ]]; then
2231 log_fail "No user id available under 65000 for $group"
2237 # Delete the specified group.
2241 function del_group_freebsd #<group_name>
2245 pw groupdel -n $group > /dev/null 2>&1
2247 # Group does not exist, or was deleted successfully.
2249 # Name already exists as a group name
2250 9) log_must pw groupdel $group ;;
2257 function add_user_illumos #<group_name> <user_name> <basedir>
2263 log_must useradd -g $group -d $basedir/$user -m $user
2268 function del_user_illumos #<user_name>
2272 if id $user > /dev/null 2>&1; then
2273 log_must_retry "currently used" 6 userdel $user
2279 function add_group_illumos #<group_name>
2285 groupadd -g $gid $group > /dev/null 2>&1
2288 # The gid is not unique
2295 function del_group_illumos #<group_name>
2299 groupmod -n $grp $grp > /dev/null 2>&1
2301 # Group does not exist.
2303 # Name already exists as a group name
2304 9) log_must groupdel $grp ;;
2309 function add_user_linux #<group_name> <user_name> <basedir>
2315 log_must useradd -g $group -d $basedir/$user -m $user
2317 # Add new users to the same group and the command line utils.
2318 # This allows them to be run out of the original users home
2319 # directory as long as it permissioned to be group readable.
2320 cmd_group=$(stat --format="%G" $(command -v zfs))
2321 log_must usermod -a -G $cmd_group $user
2326 function del_user_linux #<user_name>
2330 if id $user > /dev/null 2>&1; then
2331 log_must_retry "currently used" 6 userdel $user
2335 function add_group_linux #<group_name>
2339 # Assign 100 as the base gid, a larger value is selected for
2340 # Linux because for many distributions 1000 and under are reserved.
2342 groupadd $group > /dev/null 2>&1
2350 function del_group_linux #<group_name>
2354 getent group $group > /dev/null 2>&1
2356 # Group does not exist.
2358 # Name already exists as a group name
2359 0) log_must groupdel $group ;;
2367 # Add specified user to specified group
2371 # $3 base of the homedir (optional)
2373 function add_user #<group_name> <user_name> <basedir>
2377 typeset basedir=${3:-"/var/tmp"}
2379 if ((${#group} == 0 || ${#user} == 0)); then
2380 log_fail "group name or user name are not defined."
2385 add_user_freebsd "$group" "$user" "$basedir"
2388 add_user_linux "$group" "$user" "$basedir"
2391 add_user_illumos "$group" "$user" "$basedir"
2399 # Delete the specified user.
2402 # $2 base of the homedir (optional)
2404 function del_user #<logname> <basedir>
2407 typeset basedir=${2:-"/var/tmp"}
2409 if ((${#user} == 0)); then
2410 log_fail "login name is necessary."
2415 del_user_freebsd "$user"
2418 del_user_linux "$user"
2421 del_user_illumos "$user"
2425 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2431 # Select valid gid and create specified group.
2435 function add_group #<group_name>
2439 if ((${#group} == 0)); then
2440 log_fail "group name is necessary."
2445 add_group_freebsd "$group"
2448 add_group_linux "$group"
2451 add_group_illumos "$group"
2459 # Delete the specified group.
2463 function del_group #<group_name>
2467 if ((${#group} == 0)); then
2468 log_fail "group name is necessary."
2473 del_group_freebsd "$group"
2476 del_group_linux "$group"
2479 del_group_illumos "$group"
2487 # This function will return true if it's safe to destroy the pool passed
2488 # as argument 1. It checks for pools based on zvols and files, and also
2489 # files contained in a pool that may have a different mountpoint.
2491 function safe_to_destroy_pool { # $1 the pool name
2494 typeset DONT_DESTROY=""
2496 # We check that by deleting the $1 pool, we're not
2497 # going to pull the rug out from other pools. Do this
2498 # by looking at all other pools, ensuring that they
2499 # aren't built from files or zvols contained in this pool.
2501 for pool in $(zpool list -H -o name)
2505 # this is a list of the top-level directories in each of the
2506 # files that make up the path to the files the pool is based on
2507 FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
2509 # this is a list of the zvols that make up the pool
2510 ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
2512 # also want to determine if it's a file-based pool using an
2513 # alternate mountpoint...
2514 POOL_FILE_DIRS=$(zpool status -v $pool | \
2515 awk '/\// {print $1}' | \
2516 awk -F/ '!/dev/ {print $2}')
2518 for pooldir in $POOL_FILE_DIRS
2520 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2521 awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
2523 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2527 if [ ! -z "$ZVOLPOOL" ]
2530 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2533 if [ ! -z "$FILEPOOL" ]
2536 log_note "Pool $pool is built from $FILEPOOL on $1"
2539 if [ ! -z "$ALTMOUNTPOOL" ]
2542 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2546 if [ -z "${DONT_DESTROY}" ]
2550 log_note "Warning: it is not safe to destroy $1!"
2556 # Verify zfs operation with -p option work as expected
2557 # $1 operation, value could be create, clone or rename
2558 # $2 dataset type, value could be fs or vol
2560 # $4 new dataset name
2562 function verify_opt_p_ops
2567 typeset newdataset=$4
2569 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2570 log_fail "$datatype is not supported."
2573 # check parameters accordingly
2578 if [[ $datatype == "vol" ]]; then
2579 ops="create -V $VOLSIZE"
2583 if [[ -z $newdataset ]]; then
2584 log_fail "newdataset should not be empty" \
2587 log_must datasetexists $dataset
2588 log_must snapexists $dataset
2591 if [[ -z $newdataset ]]; then
2592 log_fail "newdataset should not be empty" \
2595 log_must datasetexists $dataset
2598 log_fail "$ops is not supported."
2602 # make sure the upper level filesystem does not exist
2603 destroy_dataset "${newdataset%/*}" "-rRf"
2605 # without -p option, operation will fail
2606 log_mustnot zfs $ops $dataset $newdataset
2607 log_mustnot datasetexists $newdataset ${newdataset%/*}
2609 # with -p option, operation should succeed
2610 log_must zfs $ops -p $dataset $newdataset
2613 if ! datasetexists $newdataset ; then
2614 log_fail "-p option does not work for $ops"
2617 # when $ops is create or clone, redo the operation still return zero
2618 if [[ $ops != "rename" ]]; then
2619 log_must zfs $ops -p $dataset $newdataset
2626 # Get configuration of pool
2635 if ! poolexists "$pool" ; then
2638 if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
2642 fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
2646 # Privated function. Random select one of items from arguments.
2651 function _random_get
2658 ((ind = RANDOM % cnt + 1))
2660 echo "$str" | cut -f $ind -d ' '
2664 # Random select one of item from arguments which include NONE string
2666 function random_get_with_non
2671 _random_get "$cnt" "$@"
2675 # Random select one of item from arguments which doesn't include NONE string
2679 _random_get "$#" "$@"
2683 # The function will generate a dataset name with specific length
2684 # $1, the length of the name
2685 # $2, the base string to construct the name
2687 function gen_dataset_name
2690 typeset basestr="$2"
2691 typeset -i baselen=${#basestr}
2695 if ((len % baselen == 0)); then
2696 ((iter = len / baselen))
2698 ((iter = len / baselen + 1))
2700 while ((iter > 0)); do
2701 l_name="${l_name}$basestr"
2710 # Get cksum tuple of dataset
2713 # sample zdb output:
2714 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2715 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2716 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2717 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2718 function datasetcksum
2723 zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
2727 # Get the given disk/slice state from the specific field of the pool
2729 function get_device_state #pool disk field("", "spares","logs")
2732 typeset disk=${2#$DEV_DSKDIR/}
2733 typeset field=${3:-$pool}
2735 zpool status -v "$pool" 2>/dev/null | \
2736 awk -v device=$disk -v pool=$pool -v field=$field \
2737 'BEGIN {startconfig=0; startfield=0; }
2738 /config:/ {startconfig=1}
2739 (startconfig==1) && ($1==field) {startfield=1; next;}
2740 (startfield==1) && ($1==device) {print $2; exit;}
2742 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
2746 # get the root filesystem name if it's zfsroot system.
2748 # return: root filesystem name
2754 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
2755 elif ! is_linux; then
2756 rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
2759 if [[ -z "$rootfs" ]]; then
2760 log_fail "Can not get rootfs"
2762 if datasetexists $rootfs; then
2765 log_fail "This is not a zfsroot system."
2770 # get the rootfs's pool name
2774 function get_rootpool
2776 typeset rootfs=$(get_rootfs)
2781 # To verify if the require numbers of disks is given
2783 function verify_disk_count
2785 typeset -i min=${2:-1}
2787 typeset -i count=$(echo "$1" | wc -w)
2789 if ((count < min)); then
2790 log_untested "A minimum of $min disks is required to run." \
2791 " You specified $count disk(s)"
2795 function ds_is_volume
2797 typeset type=$(get_prop type $1)
2798 [ $type = "volume" ]
2801 function ds_is_filesystem
2803 typeset type=$(get_prop type $1)
2804 [ $type = "filesystem" ]
2808 # Check if Trusted Extensions are installed and enabled
2810 function is_te_enabled
2812 svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
2815 # Return the number of CPUs (cross-platform)
2816 function get_num_cpus
2819 grep -c '^processor' /proc/cpuinfo
2820 elif is_freebsd; then
2821 sysctl -n kern.smp.cpus
2827 # Utility function to determine if a system has multiple cpus.
2830 [[ $(get_num_cpus) -gt 1 ]]
2833 function get_cpu_freq
2836 lscpu | awk '/CPU MHz/ { print $3 }'
2837 elif is_freebsd; then
2838 sysctl -n hw.clockrate
2840 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2844 # Run the given command as the user provided.
2850 log_note "user: $user"
2853 typeset out=$TEST_BASE_DIR/out
2854 typeset err=$TEST_BASE_DIR/err
2856 sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
2858 log_note "out: $(<$out)"
2859 log_note "err: $(<$err)"
2864 # Check if the pool contains the specified vdevs
2869 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2870 # vdevs is not in the pool, and 2 if pool name is missing.
2872 function vdevs_in_pool
2877 if [[ -z $pool ]]; then
2878 log_note "Missing pool name."
2884 # We could use 'zpool list' to only get the vdevs of the pool but we
2885 # can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
2886 # therefore we use the 'zpool status' output.
2887 typeset tmpfile=$(mktemp)
2888 zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
2889 for vdev in "$@"; do
2890 grep -wq ${vdev##*/} $tmpfile || return 1
2903 max=$((max > i ? max : i))
2909 # Write data that can be compressed into a directory
2910 function write_compressible
2914 typeset nfiles=${3:-1}
2915 typeset bs=${4:-1024k}
2916 typeset fname=${5:-file}
2918 [[ -d $dir ]] || log_fail "No directory: $dir"
2920 # Under Linux fio is not currently used since its behavior can
2921 # differ significantly across versions. This includes missing
2922 # command line options and cases where the --buffer_compress_*
2923 # options fail to behave as expected.
2925 typeset file_bytes=$(to_bytes $megs)
2926 typeset bs_bytes=4096
2927 typeset blocks=$(($file_bytes / $bs_bytes))
2929 for (( i = 0; i < $nfiles; i++ )); do
2930 truncate -s $file_bytes $dir/$fname.$i
2932 # Write every third block to get 66% compression.
2933 for (( j = 0; j < $blocks; j += 3 )); do
2934 dd if=/dev/urandom of=$dir/$fname.$i \
2935 seek=$j bs=$bs_bytes count=1 \
2936 conv=notrunc >/dev/null 2>&1
2940 command -v fio > /dev/null || log_unsupported "fio missing"
2946 --buffer_compress_percentage=66 \
2947 --buffer_compress_chunk=4096 \
2948 --directory="$dir" \
2949 --numjobs="$nfiles" \
2950 --nrfiles="$nfiles" \
2953 --filesize="$megs" \
2954 "--filename_format='$fname.\$jobnum' >/dev/null"
2963 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2965 objnum=$(stat -f "%i" $pathname)
2967 objnum=$(stat -c %i $pathname)
2973 # Sync data to the pool
2976 # $2 boolean to force uberblock (and config including zpool cache file) update
2978 function sync_pool #pool <force>
2980 typeset pool=${1:-$TESTPOOL}
2981 typeset force=${2:-false}
2983 if [[ $force == true ]]; then
2984 log_must zpool sync -f $pool
2986 log_must zpool sync $pool
2995 # $1 boolean to force uberblock (and config including zpool cache file) update
2997 function sync_all_pools #<force>
2999 typeset force=${1:-false}
3001 if [[ $force == true ]]; then
3002 log_must zpool sync -f
3011 # Wait for zpool 'freeing' property drops to zero.
3015 function wait_freeing #pool
3017 typeset pool=${1:-$TESTPOOL}
3019 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3025 # Wait for every device replace operation to complete
3030 function wait_replacing #pool timeout
3032 typeset timeout=${2:-300}
3033 typeset pool=${1:-$TESTPOOL}
3034 for (( timer = 0; timer < $timeout; timer++ )); do
3035 is_pool_replacing $pool || break;
3040 # Wait for a pool to be scrubbed
3045 function wait_scrubbed #pool timeout
3047 typeset timeout=${2:-300}
3048 typeset pool=${1:-$TESTPOOL}
3049 for (( timer = 0; timer < $timeout; timer++ )); do
3050 is_pool_scrubbed $pool && break;
3055 # Backup the zed.rc in our test directory so that we can edit it for our test.
3057 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3058 function zed_rc_backup
3060 zedrc_backup="$(mktemp)"
3061 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3065 function zed_rc_restore
3067 mv $1 $ZEDLET_DIR/zed.rc
3071 # Setup custom environment for the ZED.
3073 # $@ Optional list of zedlets to run under zed.
3077 log_unsupported "No zed on $UNAME"
3080 if [[ ! -d $ZEDLET_DIR ]]; then
3081 log_must mkdir $ZEDLET_DIR
3084 if [[ ! -e $VDEVID_CONF ]]; then
3085 log_must touch $VDEVID_CONF
3088 if [[ -e $VDEVID_CONF_ETC ]]; then
3089 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3093 # Create a symlink for /etc/zfs/vdev_id.conf file.
3094 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3096 # Setup minimal ZED configuration. Individual test cases should
3097 # add additional ZEDLETs as needed for their specific test.
3098 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3099 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3101 # Scripts must only be user writable.
3102 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3103 saved_umask=$(umask)
3105 for i in $EXTRA_ZEDLETS ; do
3106 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3108 log_must umask $saved_umask
3111 # Customize the zed.rc file to enable the full debug log.
3112 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3113 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3118 # Cleanup custom ZED environment.
3120 # $@ Optional list of zedlets to remove from our test zed.d directory.
3121 function zed_cleanup
3127 for extra_zedlet; do
3128 log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
3130 log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
3131 $ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
3136 # Check if ZED is currently running; if so, returns PIDs
3143 zedpids="$(pgrep -x zed)"
3144 zedpids2="$(pgrep -x lt-zed)"
3145 echo ${zedpids} ${zedpids2}
3149 # Check if ZED is currently running, if not start ZED.
3157 # ZEDLET_DIR=/var/tmp/zed
3158 if [[ ! -d $ZEDLET_DIR ]]; then
3159 log_must mkdir $ZEDLET_DIR
3162 # Verify the ZED is not already running.
3163 zedpids=$(zed_check)
3164 if [ -n "$zedpids" ]; then
3165 # We never, ever, really want it to just keep going if zed
3166 # is already running - usually this implies our test cases
3167 # will break very strangely because whatever we wanted to
3168 # configure zed for won't be listening to our changes in the
3170 log_fail "ZED already running - ${zedpids}"
3172 log_note "Starting ZED"
3173 # run ZED in the background and redirect foreground logging
3174 # output to $ZED_LOG.
3175 log_must truncate -s 0 $ZED_DEBUG_LOG
3176 log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3177 "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3192 log_note "Stopping ZED"
3194 zedpids=$(zed_check)
3195 [ ! -n "$zedpids" ] && break
3197 log_must kill $zedpids
3206 function zed_events_drain
3208 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3210 zpool events -c >/dev/null
3214 # Set a variable in zed.rc to something, un-commenting it in the process.
3224 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3227 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3232 # Check is provided device is being active used as a swap device.
3234 function is_swap_inuse
3238 if [[ -z $device ]] ; then
3239 log_note "No device specified."
3245 swapon -s | grep -wq $(readlink -f $device)
3248 swapctl -l | grep -wq $device
3251 swap -l | grep -wq $device
3257 # Setup a swap device using the provided device.
3265 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3266 log_must swapon $swapdev
3269 log_must swapctl -a $swapdev
3272 log_must swap -a $swapdev
3280 # Cleanup a swap device on the provided device.
3282 function swap_cleanup
3286 if is_swap_inuse $swapdev; then
3288 log_must swapoff $swapdev
3289 elif is_freebsd; then
3290 log_must swapoff $swapdev
3292 log_must swap -d $swapdev
3300 # Set a global system tunable (64-bit value)
3302 # $1 tunable name (use a NAME defined in tunables.cfg)
3305 function set_tunable64
3307 set_tunable_impl "$1" "$2" Z
3311 # Set a global system tunable (32-bit value)
3313 # $1 tunable name (use a NAME defined in tunables.cfg)
3316 function set_tunable32
3318 set_tunable_impl "$1" "$2" W
3321 function set_tunable_impl
3325 typeset mdb_cmd="$3"
3327 eval "typeset tunable=\$$name"
3330 log_unsupported "Tunable '$name' is unsupported on $UNAME"
3333 log_fail "Tunable '$name' must be added to tunables.cfg"
3339 [[ -z "$value" ]] && return 1
3340 [[ -z "$mdb_cmd" ]] && return 1
3344 typeset zfs_tunables="/sys/module/zfs/parameters"
3345 echo "$value" >"$zfs_tunables/$tunable"
3348 sysctl vfs.zfs.$tunable=$value
3351 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3356 function save_tunable
3358 [[ ! -d $TEST_BASE_DIR ]] && return 1
3359 [[ -e $TEST_BASE_DIR/tunable-$1 ]] && return 2
3360 echo "$(get_tunable """$1""")" > "$TEST_BASE_DIR"/tunable-"$1"
3363 function restore_tunable
3365 [[ ! -e $TEST_BASE_DIR/tunable-$1 ]] && return 1
3366 val="$(cat $TEST_BASE_DIR/tunable-"""$1""")"
3367 set_tunable64 "$1" "$val"
3368 rm $TEST_BASE_DIR/tunable-$1
3372 # Get a global system tunable
3374 # $1 tunable name (use a NAME defined in tunables.cfg)
3376 function get_tunable
3378 get_tunable_impl "$1"
3381 function get_tunable_impl
3384 typeset module="${2:-zfs}"
3385 typeset check_only="$3"
3387 eval "typeset tunable=\$$name"
3390 if [ -z "$check_only" ] ; then
3391 log_unsupported "Tunable '$name' is unsupported on $UNAME"
3397 if [ -z "$check_only" ] ; then
3398 log_fail "Tunable '$name' must be added to tunables.cfg"
3409 typeset zfs_tunables="/sys/module/$module/parameters"
3410 cat $zfs_tunables/$tunable
3413 sysctl -n vfs.zfs.$tunable
3416 [[ "$module" -eq "zfs" ]] || return 1
3421 # Does a tunable exist?
3424 function tunable_exists
3426 get_tunable_impl $1 "zfs" 1
3430 # Compute MD5 digest for given file or stdin if no file given.
3431 # Note: file path must not contain spaces
3443 read -r sum _ < <(md5sum -b $file)
3450 # Compute SHA256 digest for given file or stdin if no file given.
3451 # Note: file path must not contain spaces
3453 function sha256digest
3463 read -r sum _ < <(sha256sum -b $file)
3469 function new_fs #<args>
3476 echo y | newfs -v "$@"
3481 function stat_size #<path>
3495 function stat_mtime #<path>
3509 function stat_ctime #<path>
3523 function stat_crtime #<path>
3537 function stat_generation #<path>
3543 getversion "${path}"
3546 stat -f %v "${path}"
3551 # Run a command as if it was being run in a TTY.
3560 script -q /dev/null env "$@"
3562 script --return --quiet -c "$*" /dev/null
3567 # Produce a random permutation of the integers in a given range (inclusive).
3569 function range_shuffle # begin end
3574 seq ${begin} ${end} | sort -R
3578 # Cross-platform xattr helpers
3581 function get_xattr # name path
3588 getextattr -qq user "${name}" "${path}"
3591 attr -qg "${name}" "${path}"
3596 function set_xattr # name value path
3604 setextattr user "${name}" "${value}" "${path}"
3607 attr -qs "${name}" -V "${value}" "${path}"
3612 function set_xattr_stdin # name value
3619 setextattr -i user "${name}" "${path}"
3622 attr -qs "${name}" "${path}"
3627 function rm_xattr # name path
3634 rmextattr -q user "${name}" "${path}"
3637 attr -qr "${name}" "${path}"
3642 function ls_xattr # path
3648 lsextattr -qq user "${path}"
3656 function kstat # stat flags?
3659 typeset flags=${2-"-n"}
3663 sysctl $flags kstat.zfs.misc.$stat
3666 cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null
3674 function get_arcstat # stat
3680 kstat arcstats.$stat
3683 kstat arcstats | awk "/$stat/"' { print $3 }'
3691 function punch_hole # offset length file
3699 truncate -d -o $offset -l $length "$file"
3702 fallocate --punch-hole --offset $offset --length $length "$file"
3710 function zero_range # offset length file
3718 fallocate --zero-range --offset $offset --length $length "$file"
3727 # Wait for the specified arcstat to reach non-zero quiescence.
3728 # If echo is 1 echo the value after reaching quiescence, otherwise
3729 # if echo is 0 print the arcstat we are waiting on.
3731 function arcstat_quiescence # stat echo
3735 typeset do_once=true
3737 if [[ $echo -eq 0 ]]; then
3738 echo "Waiting for arcstat $1 quiescence."
3741 while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
3742 typeset stat1=$(get_arcstat $stat)
3744 typeset stat2=$(get_arcstat $stat)
3748 if [[ $echo -eq 1 ]]; then
3753 function arcstat_quiescence_noecho # stat
3756 arcstat_quiescence $stat 0
3759 function arcstat_quiescence_echo # stat
3762 arcstat_quiescence $stat 1
3766 # Given an array of pids, wait until all processes
3767 # have completed and check their return status.
3769 function wait_for_children #children
3773 for child in "${children[@]}"
3776 wait ${child} || child_exit=$?
3777 if [ $child_exit -ne 0 ]; then
3778 echo "child ${child} failed with ${child_exit}"
3786 # Compare two directory trees recursively in a manner similar to diff(1), but
3787 # using rsync. If there are any discrepancies, a summary of the differences are
3788 # output and a non-zero error is returned.
3790 # If you're comparing a directory after a ZIL replay, you should set
3791 # LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
3792 # directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
3795 function directory_diff # dir_a dir_b
3799 zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
3801 # If one of the directories doesn't exist, return 2. This is to match the
3802 # semantics of diff.
3803 if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
3807 # Run rsync with --dry-run --itemize-changes to get something akin to diff
3808 # output, but rsync is far more thorough in detecting differences (diff
3809 # doesn't compare file metadata, and cannot handle special files).
3811 # Also make sure to filter out non-user.* xattrs when comparing. On
3812 # SELinux-enabled systems the copied tree will probably have different
3814 args=("-nicaAHX" '--filter=-x! user.*' "--delete")
3816 # NOTE: Quite a few rsync builds do not support --crtimes which would be
3817 # necessary to verify that creation times are being maintained properly.
3818 # Unfortunately because of this we cannot use it unconditionally but we can
3819 # check if this rsync build supports it and use it then. This check is
3820 # based on the same check in the rsync test suite (testsuite/crtimes.test).
3822 # We check ctimes even with zil_replay=1 because the ZIL does store
3823 # creation times and we should make sure they match (if the creation times
3824 # do not match there is a "c" entry in one of the columns).
3825 if rsync --version | grep -q "[, ] crtimes"; then
3828 log_note "This rsync package does not support --crtimes (-N)."
3831 # If we are testing a ZIL replay, we need to ignore timestamp changes.
3832 # Unfortunately --no-times doesn't do what we want -- it will still tell
3833 # you if the timestamps don't match but rsync will set the timestamps to
3834 # the current time (leading to an itemised change entry). It's simpler to
3835 # just filter out those lines.
3836 if [ "$zil_replay" -eq 0 ]; then
3839 # Different rsync versions have different numbers of columns. So just
3840 # require that aside from the first two, all other columns must be
3841 # blank (literal ".") or a timestamp field ("[tT]").
3842 filter=("grep" "-v" '^\..[.Tt]\+ ')
3845 diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
3847 if [ -n "$diff" ]; then
3855 # Compare two directory trees recursively, without checking whether the mtimes
3856 # match (creation times will be checked if the available rsync binary supports
3857 # it). This is necessary for ZIL replay checks (because the ZIL does not
3858 # contain mtimes and thus after a ZIL replay, mtimes won't match).
3860 # This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
3862 function replay_directory_diff # dir_a dir_b
3864 LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
3868 # Put coredumps into $1/core.{basename}
3870 # Output must be saved and passed to pop_coredump_pattern on cleanup
3872 function push_coredump_pattern # dir
3877 cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
3878 echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
3879 echo 0 >/proc/sys/kernel/core_uses_pid
3882 sysctl -n kern.corefile
3883 sysctl kern.corefile="$1/core.%N" >/dev/null
3886 # Nothing to output – set only for this shell
3887 coreadm -p "$1/core.%f"
3893 # Put coredumps back into the default location
3895 function pop_coredump_pattern
3897 [ -s "$1" ] || return 0
3901 { read -r pat; read -r pid; } < "$1"
3902 echo "$pat" >/proc/sys/kernel/core_pattern &&
3903 echo "$pid" >/proc/sys/kernel/core_uses_pid
3906 sysctl kern.corefile="$(<"$1")" >/dev/null