]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
Fix `zfs set atime|relatime=off|on` behavior on inherited datasets
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #
2 # CDDL HEADER START
3 #
4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
7 #
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
12 #
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
18 #
19 # CDDL HEADER END
20 #
21
22 #
23 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 # Use is subject to license terms.
25 # Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26 # Copyright (c) 2017 by Tim Chase. All rights reserved.
27 # Copyright (c) 2017 by Nexenta Systems, Inc. All rights reserved.
28 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
29 # Copyright (c) 2017 Datto Inc.
30 # Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
31 # Use is subject to license terms.
32 #
33
34 . ${STF_TOOLS}/include/logapi.shlib
35 . ${STF_SUITE}/include/math.shlib
36 . ${STF_SUITE}/include/blkdev.shlib
37
38 #
39 # Apply constrained path when available. This is required since the
40 # PATH may have been modified by sudo's secure_path behavior.
41 #
42 if [ -n "$STF_PATH" ]; then
43 PATH="$STF_PATH"
44 fi
45
46 #
47 # Generic dot version comparison function
48 #
49 # Returns success when version $1 is greater than or equal to $2.
50 #
51 function compare_version_gte
52 {
53 if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
54 return 0
55 else
56 return 1
57 fi
58 }
59
60 # Linux kernel version comparison function
61 #
62 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
63 #
64 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
65 #
66 function linux_version
67 {
68 typeset ver="$1"
69
70 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
71
72 typeset version=$(echo $ver | cut -d '.' -f 1)
73 typeset major=$(echo $ver | cut -d '.' -f 2)
74 typeset minor=$(echo $ver | cut -d '.' -f 3)
75
76 [[ -z "$version" ]] && version=0
77 [[ -z "$major" ]] && major=0
78 [[ -z "$minor" ]] && minor=0
79
80 echo $((version * 10000 + major * 100 + minor))
81 }
82
83 # Determine if this is a Linux test system
84 #
85 # Return 0 if platform Linux, 1 if otherwise
86
87 function is_linux
88 {
89 if [[ $(uname -o) == "GNU/Linux" ]]; then
90 return 0
91 else
92 return 1
93 fi
94 }
95
96 # Determine if this is a 32-bit system
97 #
98 # Return 0 if platform is 32-bit, 1 if otherwise
99
100 function is_32bit
101 {
102 if [[ $(getconf LONG_BIT) == "32" ]]; then
103 return 0
104 else
105 return 1
106 fi
107 }
108
109 # Determine if kmemleak is enabled
110 #
111 # Return 0 if kmemleak is enabled, 1 if otherwise
112
113 function is_kmemleak
114 {
115 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
116 return 0
117 else
118 return 1
119 fi
120 }
121
122 # Determine whether a dataset is mounted
123 #
124 # $1 dataset name
125 # $2 filesystem type; optional - defaulted to zfs
126 #
127 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
128
129 function ismounted
130 {
131 typeset fstype=$2
132 [[ -z $fstype ]] && fstype=zfs
133 typeset out dir name ret
134
135 case $fstype in
136 zfs)
137 if [[ "$1" == "/"* ]] ; then
138 for out in $(zfs mount | awk '{print $2}'); do
139 [[ $1 == $out ]] && return 0
140 done
141 else
142 for out in $(zfs mount | awk '{print $1}'); do
143 [[ $1 == $out ]] && return 0
144 done
145 fi
146 ;;
147 ufs|nfs)
148 out=$(df -F $fstype $1 2>/dev/null)
149 ret=$?
150 (($ret != 0)) && return $ret
151
152 dir=${out%%\(*}
153 dir=${dir%% *}
154 name=${out##*\(}
155 name=${name%%\)*}
156 name=${name%% *}
157
158 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
159 ;;
160 ext*)
161 out=$(df -t $fstype $1 2>/dev/null)
162 return $?
163 ;;
164 zvol)
165 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
166 link=$(readlink -f $ZVOL_DEVDIR/$1)
167 [[ -n "$link" ]] && \
168 mount | grep -q "^$link" && \
169 return 0
170 fi
171 ;;
172 esac
173
174 return 1
175 }
176
177 # Return 0 if a dataset is mounted; 1 otherwise
178 #
179 # $1 dataset name
180 # $2 filesystem type; optional - defaulted to zfs
181
182 function mounted
183 {
184 ismounted $1 $2
185 (($? == 0)) && return 0
186 return 1
187 }
188
189 # Return 0 if a dataset is unmounted; 1 otherwise
190 #
191 # $1 dataset name
192 # $2 filesystem type; optional - defaulted to zfs
193
194 function unmounted
195 {
196 ismounted $1 $2
197 (($? == 1)) && return 0
198 return 1
199 }
200
201 # split line on ","
202 #
203 # $1 - line to split
204
205 function splitline
206 {
207 echo $1 | sed "s/,/ /g"
208 }
209
210 function default_setup
211 {
212 default_setup_noexit "$@"
213
214 log_pass
215 }
216
217 function default_setup_no_mountpoint
218 {
219 default_setup_noexit "$1" "$2" "$3" "yes"
220
221 log_pass
222 }
223
224 #
225 # Given a list of disks, setup storage pools and datasets.
226 #
227 function default_setup_noexit
228 {
229 typeset disklist=$1
230 typeset container=$2
231 typeset volume=$3
232 typeset no_mountpoint=$4
233 log_note begin default_setup_noexit
234
235 if is_global_zone; then
236 if poolexists $TESTPOOL ; then
237 destroy_pool $TESTPOOL
238 fi
239 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
240 log_must zpool create -f $TESTPOOL $disklist
241 else
242 reexport_pool
243 fi
244
245 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
246 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
247
248 log_must zfs create $TESTPOOL/$TESTFS
249 if [[ -z $no_mountpoint ]]; then
250 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
251 fi
252
253 if [[ -n $container ]]; then
254 rm -rf $TESTDIR1 || \
255 log_unresolved Could not remove $TESTDIR1
256 mkdir -p $TESTDIR1 || \
257 log_unresolved Could not create $TESTDIR1
258
259 log_must zfs create $TESTPOOL/$TESTCTR
260 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
261 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
262 if [[ -z $no_mountpoint ]]; then
263 log_must zfs set mountpoint=$TESTDIR1 \
264 $TESTPOOL/$TESTCTR/$TESTFS1
265 fi
266 fi
267
268 if [[ -n $volume ]]; then
269 if is_global_zone ; then
270 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
271 block_device_wait
272 else
273 log_must zfs create $TESTPOOL/$TESTVOL
274 fi
275 fi
276 }
277
278 #
279 # Given a list of disks, setup a storage pool, file system and
280 # a container.
281 #
282 function default_container_setup
283 {
284 typeset disklist=$1
285
286 default_setup "$disklist" "true"
287 }
288
289 #
290 # Given a list of disks, setup a storage pool,file system
291 # and a volume.
292 #
293 function default_volume_setup
294 {
295 typeset disklist=$1
296
297 default_setup "$disklist" "" "true"
298 }
299
300 #
301 # Given a list of disks, setup a storage pool,file system,
302 # a container and a volume.
303 #
304 function default_container_volume_setup
305 {
306 typeset disklist=$1
307
308 default_setup "$disklist" "true" "true"
309 }
310
311 #
312 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
313 # filesystem
314 #
315 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
316 # $2 snapshot name. Default, $TESTSNAP
317 #
318 function create_snapshot
319 {
320 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
321 typeset snap=${2:-$TESTSNAP}
322
323 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
324 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
325
326 if snapexists $fs_vol@$snap; then
327 log_fail "$fs_vol@$snap already exists."
328 fi
329 datasetexists $fs_vol || \
330 log_fail "$fs_vol must exist."
331
332 log_must zfs snapshot $fs_vol@$snap
333 }
334
335 #
336 # Create a clone from a snapshot, default clone name is $TESTCLONE.
337 #
338 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
339 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
340 #
341 function create_clone # snapshot clone
342 {
343 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
344 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
345
346 [[ -z $snap ]] && \
347 log_fail "Snapshot name is undefined."
348 [[ -z $clone ]] && \
349 log_fail "Clone name is undefined."
350
351 log_must zfs clone $snap $clone
352 }
353
354 #
355 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
356 # filesystem.
357 #
358 # $1 Existing filesystem or volume name. Default, $TESTFS
359 # $2 Existing snapshot name. Default, $TESTSNAP
360 # $3 bookmark name. Default, $TESTBKMARK
361 #
362 function create_bookmark
363 {
364 typeset fs_vol=${1:-$TESTFS}
365 typeset snap=${2:-$TESTSNAP}
366 typeset bkmark=${3:-$TESTBKMARK}
367
368 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
369 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
370 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
371
372 if bkmarkexists $fs_vol#$bkmark; then
373 log_fail "$fs_vol#$bkmark already exists."
374 fi
375 datasetexists $fs_vol || \
376 log_fail "$fs_vol must exist."
377 snapexists $fs_vol@$snap || \
378 log_fail "$fs_vol@$snap must exist."
379
380 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
381 }
382
383 #
384 # Create a temporary clone result of an interrupted resumable 'zfs receive'
385 # $1 Destination filesystem name. Must not exist, will be created as the result
386 # of this function along with its %recv temporary clone
387 # $2 Source filesystem name. Must not exist, will be created and destroyed
388 #
389 function create_recv_clone
390 {
391 typeset recvfs="$1"
392 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
393 typeset snap="$sendfs@snap1"
394 typeset incr="$sendfs@snap2"
395 typeset mountpoint="$TESTDIR/create_recv_clone"
396 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
397
398 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
399
400 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
401 datasetexists $sendfs && log_fail "Send filesystem must not exist."
402
403 log_must zfs create -o mountpoint="$mountpoint" $sendfs
404 log_must zfs snapshot $snap
405 log_must eval "zfs send $snap | zfs recv -u $recvfs"
406 log_must mkfile 1m "$mountpoint/data"
407 log_must zfs snapshot $incr
408 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 > $sendfile"
409 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
410 destroy_dataset "$sendfs" "-r"
411 log_must rm -f "$sendfile"
412
413 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
414 log_fail "Error creating temporary $recvfs/%recv clone"
415 fi
416 }
417
418 function default_mirror_setup
419 {
420 default_mirror_setup_noexit $1 $2 $3
421
422 log_pass
423 }
424
425 #
426 # Given a pair of disks, set up a storage pool and dataset for the mirror
427 # @parameters: $1 the primary side of the mirror
428 # $2 the secondary side of the mirror
429 # @uses: ZPOOL ZFS TESTPOOL TESTFS
430 function default_mirror_setup_noexit
431 {
432 readonly func="default_mirror_setup_noexit"
433 typeset primary=$1
434 typeset secondary=$2
435
436 [[ -z $primary ]] && \
437 log_fail "$func: No parameters passed"
438 [[ -z $secondary ]] && \
439 log_fail "$func: No secondary partition passed"
440 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
441 log_must zpool create -f $TESTPOOL mirror $@
442 log_must zfs create $TESTPOOL/$TESTFS
443 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
444 }
445
446 #
447 # create a number of mirrors.
448 # We create a number($1) of 2 way mirrors using the pairs of disks named
449 # on the command line. These mirrors are *not* mounted
450 # @parameters: $1 the number of mirrors to create
451 # $... the devices to use to create the mirrors on
452 # @uses: ZPOOL ZFS TESTPOOL
453 function setup_mirrors
454 {
455 typeset -i nmirrors=$1
456
457 shift
458 while ((nmirrors > 0)); do
459 log_must test -n "$1" -a -n "$2"
460 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
461 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
462 shift 2
463 ((nmirrors = nmirrors - 1))
464 done
465 }
466
467 #
468 # create a number of raidz pools.
469 # We create a number($1) of 2 raidz pools using the pairs of disks named
470 # on the command line. These pools are *not* mounted
471 # @parameters: $1 the number of pools to create
472 # $... the devices to use to create the pools on
473 # @uses: ZPOOL ZFS TESTPOOL
474 function setup_raidzs
475 {
476 typeset -i nraidzs=$1
477
478 shift
479 while ((nraidzs > 0)); do
480 log_must test -n "$1" -a -n "$2"
481 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
482 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
483 shift 2
484 ((nraidzs = nraidzs - 1))
485 done
486 }
487
488 #
489 # Destroy the configured testpool mirrors.
490 # the mirrors are of the form ${TESTPOOL}{number}
491 # @uses: ZPOOL ZFS TESTPOOL
492 function destroy_mirrors
493 {
494 default_cleanup_noexit
495
496 log_pass
497 }
498
499 #
500 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
501 # $1 the list of disks
502 #
503 function default_raidz_setup
504 {
505 typeset disklist="$*"
506 disks=(${disklist[*]})
507
508 if [[ ${#disks[*]} -lt 2 ]]; then
509 log_fail "A raid-z requires a minimum of two disks."
510 fi
511
512 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
513 log_must zpool create -f $TESTPOOL raidz $disklist
514 log_must zfs create $TESTPOOL/$TESTFS
515 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
516
517 log_pass
518 }
519
520 #
521 # Common function used to cleanup storage pools and datasets.
522 #
523 # Invoked at the start of the test suite to ensure the system
524 # is in a known state, and also at the end of each set of
525 # sub-tests to ensure errors from one set of tests doesn't
526 # impact the execution of the next set.
527
528 function default_cleanup
529 {
530 default_cleanup_noexit
531
532 log_pass
533 }
534
535 #
536 # Utility function used to list all available pool names.
537 #
538 # NOTE: $KEEP is a variable containing pool names, separated by a newline
539 # character, that must be excluded from the returned list.
540 #
541 function get_all_pools
542 {
543 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
544 }
545
546 function default_cleanup_noexit
547 {
548 typeset pool=""
549 #
550 # Destroying the pool will also destroy any
551 # filesystems it contains.
552 #
553 if is_global_zone; then
554 zfs unmount -a > /dev/null 2>&1
555 ALL_POOLS=$(get_all_pools)
556 # Here, we loop through the pools we're allowed to
557 # destroy, only destroying them if it's safe to do
558 # so.
559 while [ ! -z ${ALL_POOLS} ]
560 do
561 for pool in ${ALL_POOLS}
562 do
563 if safe_to_destroy_pool $pool ;
564 then
565 destroy_pool $pool
566 fi
567 ALL_POOLS=$(get_all_pools)
568 done
569 done
570
571 zfs mount -a
572 else
573 typeset fs=""
574 for fs in $(zfs list -H -o name \
575 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
576 destroy_dataset "$fs" "-Rf"
577 done
578
579 # Need cleanup here to avoid garbage dir left.
580 for fs in $(zfs list -H -o name); do
581 [[ $fs == /$ZONE_POOL ]] && continue
582 [[ -d $fs ]] && log_must rm -rf $fs/*
583 done
584
585 #
586 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
587 # the default value
588 #
589 for fs in $(zfs list -H -o name); do
590 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
591 log_must zfs set reservation=none $fs
592 log_must zfs set recordsize=128K $fs
593 log_must zfs set mountpoint=/$fs $fs
594 typeset enc=""
595 enc=$(get_prop encryption $fs)
596 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
597 [[ "$enc" == "off" ]]; then
598 log_must zfs set checksum=on $fs
599 fi
600 log_must zfs set compression=off $fs
601 log_must zfs set atime=on $fs
602 log_must zfs set devices=off $fs
603 log_must zfs set exec=on $fs
604 log_must zfs set setuid=on $fs
605 log_must zfs set readonly=off $fs
606 log_must zfs set snapdir=hidden $fs
607 log_must zfs set aclmode=groupmask $fs
608 log_must zfs set aclinherit=secure $fs
609 fi
610 done
611 fi
612
613 [[ -d $TESTDIR ]] && \
614 log_must rm -rf $TESTDIR
615
616 disk1=${DISKS%% *}
617 if is_mpath_device $disk1; then
618 delete_partitions
619 fi
620
621 rm -f $TEST_BASE_DIR/{err,out}
622 }
623
624
625 #
626 # Common function used to cleanup storage pools, file systems
627 # and containers.
628 #
629 function default_container_cleanup
630 {
631 if ! is_global_zone; then
632 reexport_pool
633 fi
634
635 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
636 [[ $? -eq 0 ]] && \
637 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
638
639 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
640 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
641
642 [[ -e $TESTDIR1 ]] && \
643 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
644
645 default_cleanup
646 }
647
648 #
649 # Common function used to cleanup snapshot of file system or volume. Default to
650 # delete the file system's snapshot
651 #
652 # $1 snapshot name
653 #
654 function destroy_snapshot
655 {
656 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
657
658 if ! snapexists $snap; then
659 log_fail "'$snap' does not exist."
660 fi
661
662 #
663 # For the sake of the value which come from 'get_prop' is not equal
664 # to the really mountpoint when the snapshot is unmounted. So, firstly
665 # check and make sure this snapshot's been mounted in current system.
666 #
667 typeset mtpt=""
668 if ismounted $snap; then
669 mtpt=$(get_prop mountpoint $snap)
670 (($? != 0)) && \
671 log_fail "get_prop mountpoint $snap failed."
672 fi
673
674 destroy_dataset "$snap"
675 [[ $mtpt != "" && -d $mtpt ]] && \
676 log_must rm -rf $mtpt
677 }
678
679 #
680 # Common function used to cleanup clone.
681 #
682 # $1 clone name
683 #
684 function destroy_clone
685 {
686 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
687
688 if ! datasetexists $clone; then
689 log_fail "'$clone' does not existed."
690 fi
691
692 # With the same reason in destroy_snapshot
693 typeset mtpt=""
694 if ismounted $clone; then
695 mtpt=$(get_prop mountpoint $clone)
696 (($? != 0)) && \
697 log_fail "get_prop mountpoint $clone failed."
698 fi
699
700 destroy_dataset "$clone"
701 [[ $mtpt != "" && -d $mtpt ]] && \
702 log_must rm -rf $mtpt
703 }
704
705 #
706 # Common function used to cleanup bookmark of file system or volume. Default
707 # to delete the file system's bookmark.
708 #
709 # $1 bookmark name
710 #
711 function destroy_bookmark
712 {
713 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
714
715 if ! bkmarkexists $bkmark; then
716 log_fail "'$bkmarkp' does not existed."
717 fi
718
719 destroy_dataset "$bkmark"
720 }
721
722 # Return 0 if a snapshot exists; $? otherwise
723 #
724 # $1 - snapshot name
725
726 function snapexists
727 {
728 zfs list -H -t snapshot "$1" > /dev/null 2>&1
729 return $?
730 }
731
732 #
733 # Return 0 if a bookmark exists; $? otherwise
734 #
735 # $1 - bookmark name
736 #
737 function bkmarkexists
738 {
739 zfs list -H -t bookmark "$1" > /dev/null 2>&1
740 return $?
741 }
742
743 #
744 # Set a property to a certain value on a dataset.
745 # Sets a property of the dataset to the value as passed in.
746 # @param:
747 # $1 dataset who's property is being set
748 # $2 property to set
749 # $3 value to set property to
750 # @return:
751 # 0 if the property could be set.
752 # non-zero otherwise.
753 # @use: ZFS
754 #
755 function dataset_setprop
756 {
757 typeset fn=dataset_setprop
758
759 if (($# < 3)); then
760 log_note "$fn: Insufficient parameters (need 3, had $#)"
761 return 1
762 fi
763 typeset output=
764 output=$(zfs set $2=$3 $1 2>&1)
765 typeset rv=$?
766 if ((rv != 0)); then
767 log_note "Setting property on $1 failed."
768 log_note "property $2=$3"
769 log_note "Return Code: $rv"
770 log_note "Output: $output"
771 return $rv
772 fi
773 return 0
774 }
775
776 #
777 # Assign suite defined dataset properties.
778 # This function is used to apply the suite's defined default set of
779 # properties to a dataset.
780 # @parameters: $1 dataset to use
781 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
782 # @returns:
783 # 0 if the dataset has been altered.
784 # 1 if no pool name was passed in.
785 # 2 if the dataset could not be found.
786 # 3 if the dataset could not have it's properties set.
787 #
788 function dataset_set_defaultproperties
789 {
790 typeset dataset="$1"
791
792 [[ -z $dataset ]] && return 1
793
794 typeset confset=
795 typeset -i found=0
796 for confset in $(zfs list); do
797 if [[ $dataset = $confset ]]; then
798 found=1
799 break
800 fi
801 done
802 [[ $found -eq 0 ]] && return 2
803 if [[ -n $COMPRESSION_PROP ]]; then
804 dataset_setprop $dataset compression $COMPRESSION_PROP || \
805 return 3
806 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
807 fi
808 if [[ -n $CHECKSUM_PROP ]]; then
809 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
810 return 3
811 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
812 fi
813 return 0
814 }
815
816 #
817 # Check a numeric assertion
818 # @parameter: $@ the assertion to check
819 # @output: big loud notice if assertion failed
820 # @use: log_fail
821 #
822 function assert
823 {
824 (($@)) || log_fail "$@"
825 }
826
827 #
828 # Function to format partition size of a disk
829 # Given a disk cxtxdx reduces all partitions
830 # to 0 size
831 #
832 function zero_partitions #<whole_disk_name>
833 {
834 typeset diskname=$1
835 typeset i
836
837 if is_linux; then
838 DSK=$DEV_DSKDIR/$diskname
839 DSK=$(echo $DSK | sed -e "s|//|/|g")
840 log_must parted $DSK -s -- mklabel gpt
841 blockdev --rereadpt $DSK 2>/dev/null
842 block_device_wait
843 else
844 for i in 0 1 3 4 5 6 7
845 do
846 log_must set_partition $i "" 0mb $diskname
847 done
848 fi
849
850 return 0
851 }
852
853 #
854 # Given a slice, size and disk, this function
855 # formats the slice to the specified size.
856 # Size should be specified with units as per
857 # the `format` command requirements eg. 100mb 3gb
858 #
859 # NOTE: This entire interface is problematic for the Linux parted utilty
860 # which requires the end of the partition to be specified. It would be
861 # best to retire this interface and replace it with something more flexible.
862 # At the moment a best effort is made.
863 #
864 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
865 {
866 typeset -i slicenum=$1
867 typeset start=$2
868 typeset size=$3
869 typeset disk=$4
870
871 if is_linux; then
872 if [[ -z $size || -z $disk ]]; then
873 log_fail "The size or disk name is unspecified."
874 fi
875 typeset size_mb=${size%%[mMgG]}
876
877 size_mb=${size_mb%%[mMgG][bB]}
878 if [[ ${size:1:1} == 'g' ]]; then
879 ((size_mb = size_mb * 1024))
880 fi
881
882 # Create GPT partition table when setting slice 0 or
883 # when the device doesn't already contain a GPT label.
884 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
885 typeset ret_val=$?
886 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
887 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
888 if [[ $? -ne 0 ]]; then
889 log_note "Failed to create GPT partition table on $disk"
890 return 1
891 fi
892 fi
893
894 # When no start is given align on the first cylinder.
895 if [[ -z "$start" ]]; then
896 start=1
897 fi
898
899 # Determine the cylinder size for the device and using
900 # that calculate the end offset in cylinders.
901 typeset -i cly_size_kb=0
902 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
903 unit cyl print | head -3 | tail -1 | \
904 awk -F '[:k.]' '{print $4}')
905 ((end = (size_mb * 1024 / cly_size_kb) + start))
906
907 parted $DEV_DSKDIR/$disk -s -- \
908 mkpart part$slicenum ${start}cyl ${end}cyl
909 if [[ $? -ne 0 ]]; then
910 log_note "Failed to create partition $slicenum on $disk"
911 return 1
912 fi
913
914 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
915 block_device_wait
916 else
917 if [[ -z $slicenum || -z $size || -z $disk ]]; then
918 log_fail "The slice, size or disk name is unspecified."
919 fi
920
921 typeset format_file=/var/tmp/format_in.$$
922
923 echo "partition" >$format_file
924 echo "$slicenum" >> $format_file
925 echo "" >> $format_file
926 echo "" >> $format_file
927 echo "$start" >> $format_file
928 echo "$size" >> $format_file
929 echo "label" >> $format_file
930 echo "" >> $format_file
931 echo "q" >> $format_file
932 echo "q" >> $format_file
933
934 format -e -s -d $disk -f $format_file
935 fi
936
937 typeset ret_val=$?
938 rm -f $format_file
939 if [[ $ret_val -ne 0 ]]; then
940 log_note "Unable to format $disk slice $slicenum to $size"
941 return 1
942 fi
943 return 0
944 }
945
946 #
947 # Delete all partitions on all disks - this is specifically for the use of multipath
948 # devices which currently can only be used in the test suite as raw/un-partitioned
949 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
950 #
951 function delete_partitions
952 {
953 typeset -i j=1
954
955 if [[ -z $DISK_ARRAY_NUM ]]; then
956 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
957 fi
958 if [[ -z $DISKSARRAY ]]; then
959 DISKSARRAY=$DISKS
960 fi
961
962 if is_linux; then
963 if (( $DISK_ARRAY_NUM == 1 )); then
964 while ((j < MAX_PARTITIONS)); do
965 parted $DEV_DSKDIR/$DISK -s rm $j \
966 > /dev/null 2>&1
967 if (( $? == 1 )); then
968 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
969 if (( $? == 1 )); then
970 log_note "Partitions for $DISK should be deleted"
971 else
972 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
973 fi
974 return 0
975 else
976 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
977 if (( $? == 0 )); then
978 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
979 fi
980 fi
981 ((j = j+1))
982 done
983 else
984 for disk in `echo $DISKSARRAY`; do
985 while ((j < MAX_PARTITIONS)); do
986 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
987 if (( $? == 1 )); then
988 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
989 if (( $? == 1 )); then
990 log_note "Partitions for $disk should be deleted"
991 else
992 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
993 fi
994 j=7
995 else
996 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
997 if (( $? == 0 )); then
998 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
999 fi
1000 fi
1001 ((j = j+1))
1002 done
1003 j=1
1004 done
1005 fi
1006 fi
1007 return 0
1008 }
1009
1010 #
1011 # Get the end cyl of the given slice
1012 #
1013 function get_endslice #<disk> <slice>
1014 {
1015 typeset disk=$1
1016 typeset slice=$2
1017 if [[ -z $disk || -z $slice ]] ; then
1018 log_fail "The disk name or slice number is unspecified."
1019 fi
1020
1021 if is_linux; then
1022 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
1023 grep "part${slice}" | \
1024 awk '{print $3}' | \
1025 sed 's,cyl,,')
1026 ((endcyl = (endcyl + 1)))
1027 else
1028 disk=${disk#/dev/dsk/}
1029 disk=${disk#/dev/rdsk/}
1030 disk=${disk%s*}
1031
1032 typeset -i ratio=0
1033 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1034 grep "sectors\/cylinder" | \
1035 awk '{print $2}')
1036
1037 if ((ratio == 0)); then
1038 return
1039 fi
1040
1041 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1042 nawk -v token="$slice" '{if ($1==token) print $6}')
1043
1044 ((endcyl = (endcyl + 1) / ratio))
1045 fi
1046
1047 echo $endcyl
1048 }
1049
1050
1051 #
1052 # Given a size,disk and total slice number, this function formats the
1053 # disk slices from 0 to the total slice number with the same specified
1054 # size.
1055 #
1056 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1057 {
1058 typeset -i i=0
1059 typeset slice_size=$1
1060 typeset disk_name=$2
1061 typeset total_slices=$3
1062 typeset cyl
1063
1064 zero_partitions $disk_name
1065 while ((i < $total_slices)); do
1066 if ! is_linux; then
1067 if ((i == 2)); then
1068 ((i = i + 1))
1069 continue
1070 fi
1071 fi
1072 log_must set_partition $i "$cyl" $slice_size $disk_name
1073 cyl=$(get_endslice $disk_name $i)
1074 ((i = i+1))
1075 done
1076 }
1077
1078 #
1079 # This function continues to write to a filenum number of files into dirnum
1080 # number of directories until either file_write returns an error or the
1081 # maximum number of files per directory have been written.
1082 #
1083 # Usage:
1084 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1085 #
1086 # Return value: 0 on success
1087 # non 0 on error
1088 #
1089 # Where :
1090 # destdir: is the directory where everything is to be created under
1091 # dirnum: the maximum number of subdirectories to use, -1 no limit
1092 # filenum: the maximum number of files per subdirectory
1093 # bytes: number of bytes to write
1094 # num_writes: numer of types to write out bytes
1095 # data: the data that will be written
1096 #
1097 # E.g.
1098 # file_fs /testdir 20 25 1024 256 0
1099 #
1100 # Note: bytes * num_writes equals the size of the testfile
1101 #
1102 function fill_fs # destdir dirnum filenum bytes num_writes data
1103 {
1104 typeset destdir=${1:-$TESTDIR}
1105 typeset -i dirnum=${2:-50}
1106 typeset -i filenum=${3:-50}
1107 typeset -i bytes=${4:-8192}
1108 typeset -i num_writes=${5:-10240}
1109 typeset data=${6:-0}
1110
1111 typeset -i odirnum=1
1112 typeset -i idirnum=0
1113 typeset -i fn=0
1114 typeset -i retval=0
1115
1116 mkdir -p $destdir/$idirnum
1117 while (($odirnum > 0)); do
1118 if ((dirnum >= 0 && idirnum >= dirnum)); then
1119 odirnum=0
1120 break
1121 fi
1122 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1123 -b $bytes -c $num_writes -d $data
1124 retval=$?
1125 if (($retval != 0)); then
1126 odirnum=0
1127 break
1128 fi
1129 if (($fn >= $filenum)); then
1130 fn=0
1131 ((idirnum = idirnum + 1))
1132 mkdir -p $destdir/$idirnum
1133 else
1134 ((fn = fn + 1))
1135 fi
1136 done
1137 return $retval
1138 }
1139
1140 #
1141 # Simple function to get the specified property. If unable to
1142 # get the property then exits.
1143 #
1144 # Note property is in 'parsable' format (-p)
1145 #
1146 function get_prop # property dataset
1147 {
1148 typeset prop_val
1149 typeset prop=$1
1150 typeset dataset=$2
1151
1152 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1153 if [[ $? -ne 0 ]]; then
1154 log_note "Unable to get $prop property for dataset " \
1155 "$dataset"
1156 return 1
1157 fi
1158
1159 echo "$prop_val"
1160 return 0
1161 }
1162
1163 #
1164 # Simple function to get the specified property of pool. If unable to
1165 # get the property then exits.
1166 #
1167 # Note property is in 'parsable' format (-p)
1168 #
1169 function get_pool_prop # property pool
1170 {
1171 typeset prop_val
1172 typeset prop=$1
1173 typeset pool=$2
1174
1175 if poolexists $pool ; then
1176 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1177 awk '{print $3}')
1178 if [[ $? -ne 0 ]]; then
1179 log_note "Unable to get $prop property for pool " \
1180 "$pool"
1181 return 1
1182 fi
1183 else
1184 log_note "Pool $pool not exists."
1185 return 1
1186 fi
1187
1188 echo "$prop_val"
1189 return 0
1190 }
1191
1192 # Return 0 if a pool exists; $? otherwise
1193 #
1194 # $1 - pool name
1195
1196 function poolexists
1197 {
1198 typeset pool=$1
1199
1200 if [[ -z $pool ]]; then
1201 log_note "No pool name given."
1202 return 1
1203 fi
1204
1205 zpool get name "$pool" > /dev/null 2>&1
1206 return $?
1207 }
1208
1209 # Return 0 if all the specified datasets exist; $? otherwise
1210 #
1211 # $1-n dataset name
1212 function datasetexists
1213 {
1214 if (($# == 0)); then
1215 log_note "No dataset name given."
1216 return 1
1217 fi
1218
1219 while (($# > 0)); do
1220 zfs get name $1 > /dev/null 2>&1 || \
1221 return $?
1222 shift
1223 done
1224
1225 return 0
1226 }
1227
1228 # return 0 if none of the specified datasets exists, otherwise return 1.
1229 #
1230 # $1-n dataset name
1231 function datasetnonexists
1232 {
1233 if (($# == 0)); then
1234 log_note "No dataset name given."
1235 return 1
1236 fi
1237
1238 while (($# > 0)); do
1239 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1240 && return 1
1241 shift
1242 done
1243
1244 return 0
1245 }
1246
1247 function is_shared_impl
1248 {
1249 typeset fs=$1
1250 typeset mtpt
1251
1252 if is_linux; then
1253 for mtpt in `share | awk '{print $1}'` ; do
1254 if [[ $mtpt == $fs ]] ; then
1255 return 0
1256 fi
1257 done
1258 return 1
1259 fi
1260
1261 for mtpt in `share | awk '{print $2}'` ; do
1262 if [[ $mtpt == $fs ]] ; then
1263 return 0
1264 fi
1265 done
1266
1267 typeset stat=$(svcs -H -o STA nfs/server:default)
1268 if [[ $stat != "ON" ]]; then
1269 log_note "Current nfs/server status: $stat"
1270 fi
1271
1272 return 1
1273 }
1274
1275 #
1276 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1277 #
1278 # Returns 0 if shared, 1 otherwise.
1279 #
1280 function is_shared
1281 {
1282 typeset fs=$1
1283 typeset mtpt
1284
1285 if [[ $fs != "/"* ]] ; then
1286 if datasetnonexists "$fs" ; then
1287 return 1
1288 else
1289 mtpt=$(get_prop mountpoint "$fs")
1290 case $mtpt in
1291 none|legacy|-) return 1
1292 ;;
1293 *) fs=$mtpt
1294 ;;
1295 esac
1296 fi
1297 fi
1298
1299 is_shared_impl "$fs"
1300 }
1301
1302 #
1303 # Given a dataset name determine if it is shared via SMB.
1304 #
1305 # Returns 0 if shared, 1 otherwise.
1306 #
1307 function is_shared_smb
1308 {
1309 typeset fs=$1
1310 typeset mtpt
1311
1312 if datasetnonexists "$fs" ; then
1313 return 1
1314 else
1315 fs=$(echo $fs | sed 's@/@_@g')
1316 fi
1317
1318 if is_linux; then
1319 for mtpt in `net usershare list | awk '{print $1}'` ; do
1320 if [[ $mtpt == $fs ]] ; then
1321 return 0
1322 fi
1323 done
1324 return 1
1325 else
1326 log_unsupported "Currently unsupported by the test framework"
1327 return 1
1328 fi
1329 }
1330
1331 #
1332 # Given a mountpoint, determine if it is not shared via NFS.
1333 #
1334 # Returns 0 if not shared, 1 otherwise.
1335 #
1336 function not_shared
1337 {
1338 typeset fs=$1
1339
1340 is_shared $fs
1341 if (($? == 0)); then
1342 return 1
1343 fi
1344
1345 return 0
1346 }
1347
1348 #
1349 # Given a dataset determine if it is not shared via SMB.
1350 #
1351 # Returns 0 if not shared, 1 otherwise.
1352 #
1353 function not_shared_smb
1354 {
1355 typeset fs=$1
1356
1357 is_shared_smb $fs
1358 if (($? == 0)); then
1359 return 1
1360 fi
1361
1362 return 0
1363 }
1364
1365 #
1366 # Helper function to unshare a mountpoint.
1367 #
1368 function unshare_fs #fs
1369 {
1370 typeset fs=$1
1371
1372 is_shared $fs || is_shared_smb $fs
1373 if (($? == 0)); then
1374 log_must zfs unshare $fs
1375 fi
1376
1377 return 0
1378 }
1379
1380 #
1381 # Helper function to share a NFS mountpoint.
1382 #
1383 function share_nfs #fs
1384 {
1385 typeset fs=$1
1386
1387 if is_linux; then
1388 is_shared $fs
1389 if (($? != 0)); then
1390 log_must share "*:$fs"
1391 fi
1392 else
1393 is_shared $fs
1394 if (($? != 0)); then
1395 log_must share -F nfs $fs
1396 fi
1397 fi
1398
1399 return 0
1400 }
1401
1402 #
1403 # Helper function to unshare a NFS mountpoint.
1404 #
1405 function unshare_nfs #fs
1406 {
1407 typeset fs=$1
1408
1409 if is_linux; then
1410 is_shared $fs
1411 if (($? == 0)); then
1412 log_must unshare -u "*:$fs"
1413 fi
1414 else
1415 is_shared $fs
1416 if (($? == 0)); then
1417 log_must unshare -F nfs $fs
1418 fi
1419 fi
1420
1421 return 0
1422 }
1423
1424 #
1425 # Helper function to show NFS shares.
1426 #
1427 function showshares_nfs
1428 {
1429 if is_linux; then
1430 share -v
1431 else
1432 share -F nfs
1433 fi
1434
1435 return 0
1436 }
1437
1438 #
1439 # Helper function to show SMB shares.
1440 #
1441 function showshares_smb
1442 {
1443 if is_linux; then
1444 net usershare list
1445 else
1446 share -F smb
1447 fi
1448
1449 return 0
1450 }
1451
1452 #
1453 # Check NFS server status and trigger it online.
1454 #
1455 function setup_nfs_server
1456 {
1457 # Cannot share directory in non-global zone.
1458 #
1459 if ! is_global_zone; then
1460 log_note "Cannot trigger NFS server by sharing in LZ."
1461 return
1462 fi
1463
1464 if is_linux; then
1465 #
1466 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1467 # /etc/exports.d./* to provide a clean test environment.
1468 #
1469 log_must share -r
1470
1471 log_note "NFS server must be started prior to running ZTS."
1472 return
1473 fi
1474
1475 typeset nfs_fmri="svc:/network/nfs/server:default"
1476 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1477 #
1478 # Only really sharing operation can enable NFS server
1479 # to online permanently.
1480 #
1481 typeset dummy=/tmp/dummy
1482
1483 if [[ -d $dummy ]]; then
1484 log_must rm -rf $dummy
1485 fi
1486
1487 log_must mkdir $dummy
1488 log_must share $dummy
1489
1490 #
1491 # Waiting for fmri's status to be the final status.
1492 # Otherwise, in transition, an asterisk (*) is appended for
1493 # instances, unshare will reverse status to 'DIS' again.
1494 #
1495 # Waiting for 1's at least.
1496 #
1497 log_must sleep 1
1498 timeout=10
1499 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1500 do
1501 log_must sleep 1
1502
1503 ((timeout -= 1))
1504 done
1505
1506 log_must unshare $dummy
1507 log_must rm -rf $dummy
1508 fi
1509
1510 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1511 }
1512
1513 #
1514 # To verify whether calling process is in global zone
1515 #
1516 # Return 0 if in global zone, 1 in non-global zone
1517 #
1518 function is_global_zone
1519 {
1520 if is_linux; then
1521 return 0
1522 else
1523 typeset cur_zone=$(zonename 2>/dev/null)
1524 if [[ $cur_zone != "global" ]]; then
1525 return 1
1526 fi
1527 return 0
1528 fi
1529 }
1530
1531 #
1532 # Verify whether test is permitted to run from
1533 # global zone, local zone, or both
1534 #
1535 # $1 zone limit, could be "global", "local", or "both"(no limit)
1536 #
1537 # Return 0 if permitted, otherwise exit with log_unsupported
1538 #
1539 function verify_runnable # zone limit
1540 {
1541 typeset limit=$1
1542
1543 [[ -z $limit ]] && return 0
1544
1545 if is_global_zone ; then
1546 case $limit in
1547 global|both)
1548 ;;
1549 local) log_unsupported "Test is unable to run from "\
1550 "global zone."
1551 ;;
1552 *) log_note "Warning: unknown limit $limit - " \
1553 "use both."
1554 ;;
1555 esac
1556 else
1557 case $limit in
1558 local|both)
1559 ;;
1560 global) log_unsupported "Test is unable to run from "\
1561 "local zone."
1562 ;;
1563 *) log_note "Warning: unknown limit $limit - " \
1564 "use both."
1565 ;;
1566 esac
1567
1568 reexport_pool
1569 fi
1570
1571 return 0
1572 }
1573
1574 # Return 0 if create successfully or the pool exists; $? otherwise
1575 # Note: In local zones, this function should return 0 silently.
1576 #
1577 # $1 - pool name
1578 # $2-n - [keyword] devs_list
1579
1580 function create_pool #pool devs_list
1581 {
1582 typeset pool=${1%%/*}
1583
1584 shift
1585
1586 if [[ -z $pool ]]; then
1587 log_note "Missing pool name."
1588 return 1
1589 fi
1590
1591 if poolexists $pool ; then
1592 destroy_pool $pool
1593 fi
1594
1595 if is_global_zone ; then
1596 [[ -d /$pool ]] && rm -rf /$pool
1597 log_must zpool create -f $pool $@
1598 fi
1599
1600 return 0
1601 }
1602
1603 # Return 0 if destroy successfully or the pool exists; $? otherwise
1604 # Note: In local zones, this function should return 0 silently.
1605 #
1606 # $1 - pool name
1607 # Destroy pool with the given parameters.
1608
1609 function destroy_pool #pool
1610 {
1611 typeset pool=${1%%/*}
1612 typeset mtpt
1613
1614 if [[ -z $pool ]]; then
1615 log_note "No pool name given."
1616 return 1
1617 fi
1618
1619 if is_global_zone ; then
1620 if poolexists "$pool" ; then
1621 mtpt=$(get_prop mountpoint "$pool")
1622
1623 # At times, syseventd/udev activity can cause attempts
1624 # to destroy a pool to fail with EBUSY. We retry a few
1625 # times allowing failures before requiring the destroy
1626 # to succeed.
1627 log_must_busy zpool destroy -f $pool
1628
1629 [[ -d $mtpt ]] && \
1630 log_must rm -rf $mtpt
1631 else
1632 log_note "Pool does not exist. ($pool)"
1633 return 1
1634 fi
1635 fi
1636
1637 return 0
1638 }
1639
1640 # Return 0 if created successfully; $? otherwise
1641 #
1642 # $1 - dataset name
1643 # $2-n - dataset options
1644
1645 function create_dataset #dataset dataset_options
1646 {
1647 typeset dataset=$1
1648
1649 shift
1650
1651 if [[ -z $dataset ]]; then
1652 log_note "Missing dataset name."
1653 return 1
1654 fi
1655
1656 if datasetexists $dataset ; then
1657 destroy_dataset $dataset
1658 fi
1659
1660 log_must zfs create $@ $dataset
1661
1662 return 0
1663 }
1664
1665 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1666 # Note: In local zones, this function should return 0 silently.
1667 #
1668 # $1 - dataset name
1669 # $2 - custom arguments for zfs destroy
1670 # Destroy dataset with the given parameters.
1671
1672 function destroy_dataset #dataset #args
1673 {
1674 typeset dataset=$1
1675 typeset mtpt
1676 typeset args=${2:-""}
1677
1678 if [[ -z $dataset ]]; then
1679 log_note "No dataset name given."
1680 return 1
1681 fi
1682
1683 if is_global_zone ; then
1684 if datasetexists "$dataset" ; then
1685 mtpt=$(get_prop mountpoint "$dataset")
1686 log_must_busy zfs destroy $args $dataset
1687
1688 [[ -d $mtpt ]] && \
1689 log_must rm -rf $mtpt
1690 else
1691 log_note "Dataset does not exist. ($dataset)"
1692 return 1
1693 fi
1694 fi
1695
1696 return 0
1697 }
1698
1699 #
1700 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1701 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1702 # and a zvol device to the zone.
1703 #
1704 # $1 zone name
1705 # $2 zone root directory prefix
1706 # $3 zone ip
1707 #
1708 function zfs_zones_setup #zone_name zone_root zone_ip
1709 {
1710 typeset zone_name=${1:-$(hostname)-z}
1711 typeset zone_root=${2:-"/zone_root"}
1712 typeset zone_ip=${3:-"10.1.1.10"}
1713 typeset prefix_ctr=$ZONE_CTR
1714 typeset pool_name=$ZONE_POOL
1715 typeset -i cntctr=5
1716 typeset -i i=0
1717
1718 # Create pool and 5 container within it
1719 #
1720 [[ -d /$pool_name ]] && rm -rf /$pool_name
1721 log_must zpool create -f $pool_name $DISKS
1722 while ((i < cntctr)); do
1723 log_must zfs create $pool_name/$prefix_ctr$i
1724 ((i += 1))
1725 done
1726
1727 # create a zvol
1728 log_must zfs create -V 1g $pool_name/zone_zvol
1729 block_device_wait
1730
1731 #
1732 # If current system support slog, add slog device for pool
1733 #
1734 if verify_slog_support ; then
1735 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1736 log_must mkfile $MINVDEVSIZE $sdevs
1737 log_must zpool add $pool_name log mirror $sdevs
1738 fi
1739
1740 # this isn't supported just yet.
1741 # Create a filesystem. In order to add this to
1742 # the zone, it must have it's mountpoint set to 'legacy'
1743 # log_must zfs create $pool_name/zfs_filesystem
1744 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1745
1746 [[ -d $zone_root ]] && \
1747 log_must rm -rf $zone_root/$zone_name
1748 [[ ! -d $zone_root ]] && \
1749 log_must mkdir -p -m 0700 $zone_root/$zone_name
1750
1751 # Create zone configure file and configure the zone
1752 #
1753 typeset zone_conf=/tmp/zone_conf.$$
1754 echo "create" > $zone_conf
1755 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1756 echo "set autoboot=true" >> $zone_conf
1757 i=0
1758 while ((i < cntctr)); do
1759 echo "add dataset" >> $zone_conf
1760 echo "set name=$pool_name/$prefix_ctr$i" >> \
1761 $zone_conf
1762 echo "end" >> $zone_conf
1763 ((i += 1))
1764 done
1765
1766 # add our zvol to the zone
1767 echo "add device" >> $zone_conf
1768 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1769 echo "end" >> $zone_conf
1770
1771 # add a corresponding zvol rdsk to the zone
1772 echo "add device" >> $zone_conf
1773 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1774 echo "end" >> $zone_conf
1775
1776 # once it's supported, we'll add our filesystem to the zone
1777 # echo "add fs" >> $zone_conf
1778 # echo "set type=zfs" >> $zone_conf
1779 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1780 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1781 # echo "end" >> $zone_conf
1782
1783 echo "verify" >> $zone_conf
1784 echo "commit" >> $zone_conf
1785 log_must zonecfg -z $zone_name -f $zone_conf
1786 log_must rm -f $zone_conf
1787
1788 # Install the zone
1789 zoneadm -z $zone_name install
1790 if (($? == 0)); then
1791 log_note "SUCCESS: zoneadm -z $zone_name install"
1792 else
1793 log_fail "FAIL: zoneadm -z $zone_name install"
1794 fi
1795
1796 # Install sysidcfg file
1797 #
1798 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1799 echo "system_locale=C" > $sysidcfg
1800 echo "terminal=dtterm" >> $sysidcfg
1801 echo "network_interface=primary {" >> $sysidcfg
1802 echo "hostname=$zone_name" >> $sysidcfg
1803 echo "}" >> $sysidcfg
1804 echo "name_service=NONE" >> $sysidcfg
1805 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1806 echo "security_policy=NONE" >> $sysidcfg
1807 echo "timezone=US/Eastern" >> $sysidcfg
1808
1809 # Boot this zone
1810 log_must zoneadm -z $zone_name boot
1811 }
1812
1813 #
1814 # Reexport TESTPOOL & TESTPOOL(1-4)
1815 #
1816 function reexport_pool
1817 {
1818 typeset -i cntctr=5
1819 typeset -i i=0
1820
1821 while ((i < cntctr)); do
1822 if ((i == 0)); then
1823 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1824 if ! ismounted $TESTPOOL; then
1825 log_must zfs mount $TESTPOOL
1826 fi
1827 else
1828 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1829 if eval ! ismounted \$TESTPOOL$i; then
1830 log_must eval zfs mount \$TESTPOOL$i
1831 fi
1832 fi
1833 ((i += 1))
1834 done
1835 }
1836
1837 #
1838 # Verify a given disk or pool state
1839 #
1840 # Return 0 is pool/disk matches expected state, 1 otherwise
1841 #
1842 function check_state # pool disk state{online,offline,degraded}
1843 {
1844 typeset pool=$1
1845 typeset disk=${2#$DEV_DSKDIR/}
1846 typeset state=$3
1847
1848 [[ -z $pool ]] || [[ -z $state ]] \
1849 && log_fail "Arguments invalid or missing"
1850
1851 if [[ -z $disk ]]; then
1852 #check pool state only
1853 zpool get -H -o value health $pool \
1854 | grep -i "$state" > /dev/null 2>&1
1855 else
1856 zpool status -v $pool | grep "$disk" \
1857 | grep -i "$state" > /dev/null 2>&1
1858 fi
1859
1860 return $?
1861 }
1862
1863 #
1864 # Get the mountpoint of snapshot
1865 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1866 # as its mountpoint
1867 #
1868 function snapshot_mountpoint
1869 {
1870 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1871
1872 if [[ $dataset != *@* ]]; then
1873 log_fail "Error name of snapshot '$dataset'."
1874 fi
1875
1876 typeset fs=${dataset%@*}
1877 typeset snap=${dataset#*@}
1878
1879 if [[ -z $fs || -z $snap ]]; then
1880 log_fail "Error name of snapshot '$dataset'."
1881 fi
1882
1883 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1884 }
1885
1886 #
1887 # Given a device and 'ashift' value verify it's correctly set on every label
1888 #
1889 function verify_ashift # device ashift
1890 {
1891 typeset device="$1"
1892 typeset ashift="$2"
1893
1894 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1895 if (ashift != $2)
1896 exit 1;
1897 else
1898 count++;
1899 } END {
1900 if (count != 4)
1901 exit 1;
1902 else
1903 exit 0;
1904 }'
1905
1906 return $?
1907 }
1908
1909 #
1910 # Given a pool and file system, this function will verify the file system
1911 # using the zdb internal tool. Note that the pool is exported and imported
1912 # to ensure it has consistent state.
1913 #
1914 function verify_filesys # pool filesystem dir
1915 {
1916 typeset pool="$1"
1917 typeset filesys="$2"
1918 typeset zdbout="/tmp/zdbout.$$"
1919
1920 shift
1921 shift
1922 typeset dirs=$@
1923 typeset search_path=""
1924
1925 log_note "Calling zdb to verify filesystem '$filesys'"
1926 zfs unmount -a > /dev/null 2>&1
1927 log_must zpool export $pool
1928
1929 if [[ -n $dirs ]] ; then
1930 for dir in $dirs ; do
1931 search_path="$search_path -d $dir"
1932 done
1933 fi
1934
1935 log_must zpool import $search_path $pool
1936
1937 zdb -cudi $filesys > $zdbout 2>&1
1938 if [[ $? != 0 ]]; then
1939 log_note "Output: zdb -cudi $filesys"
1940 cat $zdbout
1941 log_fail "zdb detected errors with: '$filesys'"
1942 fi
1943
1944 log_must zfs mount -a
1945 log_must rm -rf $zdbout
1946 }
1947
1948 #
1949 # Given a pool issue a scrub and verify that no checksum errors are reported.
1950 #
1951 function verify_pool
1952 {
1953 typeset pool=${1:-$TESTPOOL}
1954
1955 log_must zpool scrub $pool
1956 log_must wait_scrubbed $pool
1957
1958 cksum=$(zpool status $pool | awk 'L{print $NF;L=0} /CKSUM$/{L=1}')
1959 if [[ $cksum != 0 ]]; then
1960 log_must zpool status -v
1961 log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1962 fi
1963 }
1964
1965 #
1966 # Given a pool, and this function list all disks in the pool
1967 #
1968 function get_disklist # pool
1969 {
1970 typeset disklist=""
1971
1972 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1973 grep -v "\-\-\-\-\-" | \
1974 egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
1975
1976 echo $disklist
1977 }
1978
1979 #
1980 # Given a pool, and this function list all disks in the pool with their full
1981 # path (like "/dev/sda" instead of "sda").
1982 #
1983 function get_disklist_fullpath # pool
1984 {
1985 args="-P $1"
1986 get_disklist $args
1987 }
1988
1989
1990
1991 # /**
1992 # This function kills a given list of processes after a time period. We use
1993 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1994 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1995 # would be listed as FAIL, which we don't want : we're happy with stress tests
1996 # running for a certain amount of time, then finishing.
1997 #
1998 # @param $1 the time in seconds after which we should terminate these processes
1999 # @param $2..$n the processes we wish to terminate.
2000 # */
2001 function stress_timeout
2002 {
2003 typeset -i TIMEOUT=$1
2004 shift
2005 typeset cpids="$@"
2006
2007 log_note "Waiting for child processes($cpids). " \
2008 "It could last dozens of minutes, please be patient ..."
2009 log_must sleep $TIMEOUT
2010
2011 log_note "Killing child processes after ${TIMEOUT} stress timeout."
2012 typeset pid
2013 for pid in $cpids; do
2014 ps -p $pid > /dev/null 2>&1
2015 if (($? == 0)); then
2016 log_must kill -USR1 $pid
2017 fi
2018 done
2019 }
2020
2021 #
2022 # Verify a given hotspare disk is inuse or avail
2023 #
2024 # Return 0 is pool/disk matches expected state, 1 otherwise
2025 #
2026 function check_hotspare_state # pool disk state{inuse,avail}
2027 {
2028 typeset pool=$1
2029 typeset disk=${2#$DEV_DSKDIR/}
2030 typeset state=$3
2031
2032 cur_state=$(get_device_state $pool $disk "spares")
2033
2034 if [[ $state != ${cur_state} ]]; then
2035 return 1
2036 fi
2037 return 0
2038 }
2039
2040 #
2041 # Wait until a hotspare transitions to a given state or times out.
2042 #
2043 # Return 0 when pool/disk matches expected state, 1 on timeout.
2044 #
2045 function wait_hotspare_state # pool disk state timeout
2046 {
2047 typeset pool=$1
2048 typeset disk=${2#*$DEV_DSKDIR/}
2049 typeset state=$3
2050 typeset timeout=${4:-60}
2051 typeset -i i=0
2052
2053 while [[ $i -lt $timeout ]]; do
2054 if check_hotspare_state $pool $disk $state; then
2055 return 0
2056 fi
2057
2058 i=$((i+1))
2059 sleep 1
2060 done
2061
2062 return 1
2063 }
2064
2065 #
2066 # Verify a given slog disk is inuse or avail
2067 #
2068 # Return 0 is pool/disk matches expected state, 1 otherwise
2069 #
2070 function check_slog_state # pool disk state{online,offline,unavail}
2071 {
2072 typeset pool=$1
2073 typeset disk=${2#$DEV_DSKDIR/}
2074 typeset state=$3
2075
2076 cur_state=$(get_device_state $pool $disk "logs")
2077
2078 if [[ $state != ${cur_state} ]]; then
2079 return 1
2080 fi
2081 return 0
2082 }
2083
2084 #
2085 # Verify a given vdev disk is inuse or avail
2086 #
2087 # Return 0 is pool/disk matches expected state, 1 otherwise
2088 #
2089 function check_vdev_state # pool disk state{online,offline,unavail}
2090 {
2091 typeset pool=$1
2092 typeset disk=${2#*$DEV_DSKDIR/}
2093 typeset state=$3
2094
2095 cur_state=$(get_device_state $pool $disk)
2096
2097 if [[ $state != ${cur_state} ]]; then
2098 return 1
2099 fi
2100 return 0
2101 }
2102
2103 #
2104 # Wait until a vdev transitions to a given state or times out.
2105 #
2106 # Return 0 when pool/disk matches expected state, 1 on timeout.
2107 #
2108 function wait_vdev_state # pool disk state timeout
2109 {
2110 typeset pool=$1
2111 typeset disk=${2#*$DEV_DSKDIR/}
2112 typeset state=$3
2113 typeset timeout=${4:-60}
2114 typeset -i i=0
2115
2116 while [[ $i -lt $timeout ]]; do
2117 if check_vdev_state $pool $disk $state; then
2118 return 0
2119 fi
2120
2121 i=$((i+1))
2122 sleep 1
2123 done
2124
2125 return 1
2126 }
2127
2128 #
2129 # Check the output of 'zpool status -v <pool>',
2130 # and to see if the content of <token> contain the <keyword> specified.
2131 #
2132 # Return 0 is contain, 1 otherwise
2133 #
2134 function check_pool_status # pool token keyword <verbose>
2135 {
2136 typeset pool=$1
2137 typeset token=$2
2138 typeset keyword=$3
2139 typeset verbose=${4:-false}
2140
2141 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2142 ($1==token) {print $0}')
2143 if [[ $verbose == true ]]; then
2144 log_note $scan
2145 fi
2146 echo $scan | grep -i "$keyword" > /dev/null 2>&1
2147
2148 return $?
2149 }
2150
2151 #
2152 # These 6 following functions are instance of check_pool_status()
2153 # is_pool_resilvering - to check if the pool is resilver in progress
2154 # is_pool_resilvered - to check if the pool is resilver completed
2155 # is_pool_scrubbing - to check if the pool is scrub in progress
2156 # is_pool_scrubbed - to check if the pool is scrub completed
2157 # is_pool_scrub_stopped - to check if the pool is scrub stopped
2158 # is_pool_scrub_paused - to check if the pool has scrub paused
2159 # is_pool_removing - to check if the pool is removing a vdev
2160 # is_pool_removed - to check if the pool is remove completed
2161 #
2162 function is_pool_resilvering #pool <verbose>
2163 {
2164 check_pool_status "$1" "scan" "resilver in progress since " $2
2165 return $?
2166 }
2167
2168 function is_pool_resilvered #pool <verbose>
2169 {
2170 check_pool_status "$1" "scan" "resilvered " $2
2171 return $?
2172 }
2173
2174 function is_pool_scrubbing #pool <verbose>
2175 {
2176 check_pool_status "$1" "scan" "scrub in progress since " $2
2177 return $?
2178 }
2179
2180 function is_pool_scrubbed #pool <verbose>
2181 {
2182 check_pool_status "$1" "scan" "scrub repaired" $2
2183 return $?
2184 }
2185
2186 function is_pool_scrub_stopped #pool <verbose>
2187 {
2188 check_pool_status "$1" "scan" "scrub canceled" $2
2189 return $?
2190 }
2191
2192 function is_pool_scrub_paused #pool <verbose>
2193 {
2194 check_pool_status "$1" "scan" "scrub paused since " $2
2195 return $?
2196 }
2197
2198 function is_pool_removing #pool
2199 {
2200 check_pool_status "$1" "remove" "in progress since "
2201 return $?
2202 }
2203
2204 function is_pool_removed #pool
2205 {
2206 check_pool_status "$1" "remove" "completed on"
2207 return $?
2208 }
2209
2210 function wait_for_degraded
2211 {
2212 typeset pool=$1
2213 typeset timeout=${2:-30}
2214 typeset t0=$SECONDS
2215
2216 while :; do
2217 [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2218 log_note "$pool is not yet degraded."
2219 sleep 1
2220 if ((SECONDS - t0 > $timeout)); then
2221 log_note "$pool not degraded after $timeout seconds."
2222 return 1
2223 fi
2224 done
2225
2226 return 0
2227 }
2228
2229 #
2230 # Use create_pool()/destroy_pool() to clean up the information in
2231 # in the given disk to avoid slice overlapping.
2232 #
2233 function cleanup_devices #vdevs
2234 {
2235 typeset pool="foopool$$"
2236
2237 if poolexists $pool ; then
2238 destroy_pool $pool
2239 fi
2240
2241 create_pool $pool $@
2242 destroy_pool $pool
2243
2244 return 0
2245 }
2246
2247 #/**
2248 # A function to find and locate free disks on a system or from given
2249 # disks as the parameter. It works by locating disks that are in use
2250 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2251 #
2252 # $@ given disks to find which are free, default is all disks in
2253 # the test system
2254 #
2255 # @return a string containing the list of available disks
2256 #*/
2257 function find_disks
2258 {
2259 # Trust provided list, no attempt is made to locate unused devices.
2260 if is_linux; then
2261 echo "$@"
2262 return
2263 fi
2264
2265
2266 sfi=/tmp/swaplist.$$
2267 dmpi=/tmp/dumpdev.$$
2268 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2269
2270 swap -l > $sfi
2271 dumpadm > $dmpi 2>/dev/null
2272
2273 # write an awk script that can process the output of format
2274 # to produce a list of disks we know about. Note that we have
2275 # to escape "$2" so that the shell doesn't interpret it while
2276 # we're creating the awk script.
2277 # -------------------
2278 cat > /tmp/find_disks.awk <<EOF
2279 #!/bin/nawk -f
2280 BEGIN { FS="."; }
2281
2282 /^Specify disk/{
2283 searchdisks=0;
2284 }
2285
2286 {
2287 if (searchdisks && \$2 !~ "^$"){
2288 split(\$2,arr," ");
2289 print arr[1];
2290 }
2291 }
2292
2293 /^AVAILABLE DISK SELECTIONS:/{
2294 searchdisks=1;
2295 }
2296 EOF
2297 #---------------------
2298
2299 chmod 755 /tmp/find_disks.awk
2300 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2301 rm /tmp/find_disks.awk
2302
2303 unused=""
2304 for disk in $disks; do
2305 # Check for mounted
2306 grep "${disk}[sp]" /etc/mnttab >/dev/null
2307 (($? == 0)) && continue
2308 # Check for swap
2309 grep "${disk}[sp]" $sfi >/dev/null
2310 (($? == 0)) && continue
2311 # check for dump device
2312 grep "${disk}[sp]" $dmpi >/dev/null
2313 (($? == 0)) && continue
2314 # check to see if this disk hasn't been explicitly excluded
2315 # by a user-set environment variable
2316 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2317 (($? == 0)) && continue
2318 unused_candidates="$unused_candidates $disk"
2319 done
2320 rm $sfi
2321 rm $dmpi
2322
2323 # now just check to see if those disks do actually exist
2324 # by looking for a device pointing to the first slice in
2325 # each case. limit the number to max_finddisksnum
2326 count=0
2327 for disk in $unused_candidates; do
2328 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2329 if [ $count -lt $max_finddisksnum ]; then
2330 unused="$unused $disk"
2331 # do not impose limit if $@ is provided
2332 [[ -z $@ ]] && ((count = count + 1))
2333 fi
2334 fi
2335 done
2336
2337 # finally, return our disk list
2338 echo $unused
2339 }
2340
2341 #
2342 # Add specified user to specified group
2343 #
2344 # $1 group name
2345 # $2 user name
2346 # $3 base of the homedir (optional)
2347 #
2348 function add_user #<group_name> <user_name> <basedir>
2349 {
2350 typeset gname=$1
2351 typeset uname=$2
2352 typeset basedir=${3:-"/var/tmp"}
2353
2354 if ((${#gname} == 0 || ${#uname} == 0)); then
2355 log_fail "group name or user name are not defined."
2356 fi
2357
2358 log_must useradd -g $gname -d $basedir/$uname -m $uname
2359 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2360 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2361 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
2362
2363 # Add new users to the same group and the command line utils.
2364 # This allows them to be run out of the original users home
2365 # directory as long as it permissioned to be group readable.
2366 if is_linux; then
2367 cmd_group=$(stat --format="%G" $(which zfs))
2368 log_must usermod -a -G $cmd_group $uname
2369 fi
2370
2371 return 0
2372 }
2373
2374 #
2375 # Delete the specified user.
2376 #
2377 # $1 login name
2378 # $2 base of the homedir (optional)
2379 #
2380 function del_user #<logname> <basedir>
2381 {
2382 typeset user=$1
2383 typeset basedir=${2:-"/var/tmp"}
2384
2385 if ((${#user} == 0)); then
2386 log_fail "login name is necessary."
2387 fi
2388
2389 if id $user > /dev/null 2>&1; then
2390 log_must_retry "currently used" 5 userdel $user
2391 fi
2392
2393 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2394
2395 return 0
2396 }
2397
2398 #
2399 # Select valid gid and create specified group.
2400 #
2401 # $1 group name
2402 #
2403 function add_group #<group_name>
2404 {
2405 typeset group=$1
2406
2407 if ((${#group} == 0)); then
2408 log_fail "group name is necessary."
2409 fi
2410
2411 # Assign 100 as the base gid, a larger value is selected for
2412 # Linux because for many distributions 1000 and under are reserved.
2413 if is_linux; then
2414 while true; do
2415 groupadd $group > /dev/null 2>&1
2416 typeset -i ret=$?
2417 case $ret in
2418 0) return 0 ;;
2419 *) return 1 ;;
2420 esac
2421 done
2422 else
2423 typeset -i gid=100
2424 while true; do
2425 groupadd -g $gid $group > /dev/null 2>&1
2426 typeset -i ret=$?
2427 case $ret in
2428 0) return 0 ;;
2429 # The gid is not unique
2430 4) ((gid += 1)) ;;
2431 *) return 1 ;;
2432 esac
2433 done
2434 fi
2435 }
2436
2437 #
2438 # Delete the specified group.
2439 #
2440 # $1 group name
2441 #
2442 function del_group #<group_name>
2443 {
2444 typeset grp=$1
2445 if ((${#grp} == 0)); then
2446 log_fail "group name is necessary."
2447 fi
2448
2449 if is_linux; then
2450 getent group $grp > /dev/null 2>&1
2451 typeset -i ret=$?
2452 case $ret in
2453 # Group does not exist.
2454 2) return 0 ;;
2455 # Name already exists as a group name
2456 0) log_must groupdel $grp ;;
2457 *) return 1 ;;
2458 esac
2459 else
2460 groupmod -n $grp $grp > /dev/null 2>&1
2461 typeset -i ret=$?
2462 case $ret in
2463 # Group does not exist.
2464 6) return 0 ;;
2465 # Name already exists as a group name
2466 9) log_must groupdel $grp ;;
2467 *) return 1 ;;
2468 esac
2469 fi
2470
2471 return 0
2472 }
2473
2474 #
2475 # This function will return true if it's safe to destroy the pool passed
2476 # as argument 1. It checks for pools based on zvols and files, and also
2477 # files contained in a pool that may have a different mountpoint.
2478 #
2479 function safe_to_destroy_pool { # $1 the pool name
2480
2481 typeset pool=""
2482 typeset DONT_DESTROY=""
2483
2484 # We check that by deleting the $1 pool, we're not
2485 # going to pull the rug out from other pools. Do this
2486 # by looking at all other pools, ensuring that they
2487 # aren't built from files or zvols contained in this pool.
2488
2489 for pool in $(zpool list -H -o name)
2490 do
2491 ALTMOUNTPOOL=""
2492
2493 # this is a list of the top-level directories in each of the
2494 # files that make up the path to the files the pool is based on
2495 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2496 awk '{print $1}')
2497
2498 # this is a list of the zvols that make up the pool
2499 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2500 | awk '{print $1}')
2501
2502 # also want to determine if it's a file-based pool using an
2503 # alternate mountpoint...
2504 POOL_FILE_DIRS=$(zpool status -v $pool | \
2505 grep / | awk '{print $1}' | \
2506 awk -F/ '{print $2}' | grep -v "dev")
2507
2508 for pooldir in $POOL_FILE_DIRS
2509 do
2510 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2511 grep "${pooldir}$" | awk '{print $1}')
2512
2513 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2514 done
2515
2516
2517 if [ ! -z "$ZVOLPOOL" ]
2518 then
2519 DONT_DESTROY="true"
2520 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2521 fi
2522
2523 if [ ! -z "$FILEPOOL" ]
2524 then
2525 DONT_DESTROY="true"
2526 log_note "Pool $pool is built from $FILEPOOL on $1"
2527 fi
2528
2529 if [ ! -z "$ALTMOUNTPOOL" ]
2530 then
2531 DONT_DESTROY="true"
2532 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2533 fi
2534 done
2535
2536 if [ -z "${DONT_DESTROY}" ]
2537 then
2538 return 0
2539 else
2540 log_note "Warning: it is not safe to destroy $1!"
2541 return 1
2542 fi
2543 }
2544
2545 #
2546 # Get the available ZFS compression options
2547 # $1 option type zfs_set|zfs_compress
2548 #
2549 function get_compress_opts
2550 {
2551 typeset COMPRESS_OPTS
2552 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2553 gzip-6 gzip-7 gzip-8 gzip-9"
2554
2555 if [[ $1 == "zfs_compress" ]] ; then
2556 COMPRESS_OPTS="on lzjb"
2557 elif [[ $1 == "zfs_set" ]] ; then
2558 COMPRESS_OPTS="on off lzjb"
2559 fi
2560 typeset valid_opts="$COMPRESS_OPTS"
2561 zfs get 2>&1 | grep gzip >/dev/null 2>&1
2562 if [[ $? -eq 0 ]]; then
2563 valid_opts="$valid_opts $GZIP_OPTS"
2564 fi
2565 echo "$valid_opts"
2566 }
2567
2568 #
2569 # Verify zfs operation with -p option work as expected
2570 # $1 operation, value could be create, clone or rename
2571 # $2 dataset type, value could be fs or vol
2572 # $3 dataset name
2573 # $4 new dataset name
2574 #
2575 function verify_opt_p_ops
2576 {
2577 typeset ops=$1
2578 typeset datatype=$2
2579 typeset dataset=$3
2580 typeset newdataset=$4
2581
2582 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2583 log_fail "$datatype is not supported."
2584 fi
2585
2586 # check parameters accordingly
2587 case $ops in
2588 create)
2589 newdataset=$dataset
2590 dataset=""
2591 if [[ $datatype == "vol" ]]; then
2592 ops="create -V $VOLSIZE"
2593 fi
2594 ;;
2595 clone)
2596 if [[ -z $newdataset ]]; then
2597 log_fail "newdataset should not be empty" \
2598 "when ops is $ops."
2599 fi
2600 log_must datasetexists $dataset
2601 log_must snapexists $dataset
2602 ;;
2603 rename)
2604 if [[ -z $newdataset ]]; then
2605 log_fail "newdataset should not be empty" \
2606 "when ops is $ops."
2607 fi
2608 log_must datasetexists $dataset
2609 ;;
2610 *)
2611 log_fail "$ops is not supported."
2612 ;;
2613 esac
2614
2615 # make sure the upper level filesystem does not exist
2616 destroy_dataset "${newdataset%/*}" "-rRf"
2617
2618 # without -p option, operation will fail
2619 log_mustnot zfs $ops $dataset $newdataset
2620 log_mustnot datasetexists $newdataset ${newdataset%/*}
2621
2622 # with -p option, operation should succeed
2623 log_must zfs $ops -p $dataset $newdataset
2624 block_device_wait
2625
2626 if ! datasetexists $newdataset ; then
2627 log_fail "-p option does not work for $ops"
2628 fi
2629
2630 # when $ops is create or clone, redo the operation still return zero
2631 if [[ $ops != "rename" ]]; then
2632 log_must zfs $ops -p $dataset $newdataset
2633 fi
2634
2635 return 0
2636 }
2637
2638 #
2639 # Get configuration of pool
2640 # $1 pool name
2641 # $2 config name
2642 #
2643 function get_config
2644 {
2645 typeset pool=$1
2646 typeset config=$2
2647 typeset alt_root
2648
2649 if ! poolexists "$pool" ; then
2650 return 1
2651 fi
2652 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2653 if [[ $alt_root == "-" ]]; then
2654 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2655 '{print $2}')
2656 else
2657 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2658 '{print $2}')
2659 fi
2660 if [[ -n $value ]] ; then
2661 value=${value#'}
2662 value=${value%'}
2663 fi
2664 echo $value
2665
2666 return 0
2667 }
2668
2669 #
2670 # Privated function. Random select one of items from arguments.
2671 #
2672 # $1 count
2673 # $2-n string
2674 #
2675 function _random_get
2676 {
2677 typeset cnt=$1
2678 shift
2679
2680 typeset str="$@"
2681 typeset -i ind
2682 ((ind = RANDOM % cnt + 1))
2683
2684 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2685 echo $ret
2686 }
2687
2688 #
2689 # Random select one of item from arguments which include NONE string
2690 #
2691 function random_get_with_non
2692 {
2693 typeset -i cnt=$#
2694 ((cnt =+ 1))
2695
2696 _random_get "$cnt" "$@"
2697 }
2698
2699 #
2700 # Random select one of item from arguments which doesn't include NONE string
2701 #
2702 function random_get
2703 {
2704 _random_get "$#" "$@"
2705 }
2706
2707 #
2708 # Detect if the current system support slog
2709 #
2710 function verify_slog_support
2711 {
2712 typeset dir=$TEST_BASE_DIR/disk.$$
2713 typeset pool=foo.$$
2714 typeset vdev=$dir/a
2715 typeset sdev=$dir/b
2716
2717 mkdir -p $dir
2718 mkfile $MINVDEVSIZE $vdev $sdev
2719
2720 typeset -i ret=0
2721 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2722 ret=1
2723 fi
2724 rm -r $dir
2725
2726 return $ret
2727 }
2728
2729 #
2730 # The function will generate a dataset name with specific length
2731 # $1, the length of the name
2732 # $2, the base string to construct the name
2733 #
2734 function gen_dataset_name
2735 {
2736 typeset -i len=$1
2737 typeset basestr="$2"
2738 typeset -i baselen=${#basestr}
2739 typeset -i iter=0
2740 typeset l_name=""
2741
2742 if ((len % baselen == 0)); then
2743 ((iter = len / baselen))
2744 else
2745 ((iter = len / baselen + 1))
2746 fi
2747 while ((iter > 0)); do
2748 l_name="${l_name}$basestr"
2749
2750 ((iter -= 1))
2751 done
2752
2753 echo $l_name
2754 }
2755
2756 #
2757 # Get cksum tuple of dataset
2758 # $1 dataset name
2759 #
2760 # sample zdb output:
2761 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2762 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2763 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2764 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2765 function datasetcksum
2766 {
2767 typeset cksum
2768 sync
2769 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2770 | awk -F= '{print $7}')
2771 echo $cksum
2772 }
2773
2774 #
2775 # Get cksum of file
2776 # #1 file path
2777 #
2778 function checksum
2779 {
2780 typeset cksum
2781 cksum=$(cksum $1 | awk '{print $1}')
2782 echo $cksum
2783 }
2784
2785 #
2786 # Get the given disk/slice state from the specific field of the pool
2787 #
2788 function get_device_state #pool disk field("", "spares","logs")
2789 {
2790 typeset pool=$1
2791 typeset disk=${2#$DEV_DSKDIR/}
2792 typeset field=${3:-$pool}
2793
2794 state=$(zpool status -v "$pool" 2>/dev/null | \
2795 nawk -v device=$disk -v pool=$pool -v field=$field \
2796 'BEGIN {startconfig=0; startfield=0; }
2797 /config:/ {startconfig=1}
2798 (startconfig==1) && ($1==field) {startfield=1; next;}
2799 (startfield==1) && ($1==device) {print $2; exit;}
2800 (startfield==1) &&
2801 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2802 echo $state
2803 }
2804
2805
2806 #
2807 # print the given directory filesystem type
2808 #
2809 # $1 directory name
2810 #
2811 function get_fstype
2812 {
2813 typeset dir=$1
2814
2815 if [[ -z $dir ]]; then
2816 log_fail "Usage: get_fstype <directory>"
2817 fi
2818
2819 #
2820 # $ df -n /
2821 # / : ufs
2822 #
2823 df -n $dir | awk '{print $3}'
2824 }
2825
2826 #
2827 # Given a disk, label it to VTOC regardless what label was on the disk
2828 # $1 disk
2829 #
2830 function labelvtoc
2831 {
2832 typeset disk=$1
2833 if [[ -z $disk ]]; then
2834 log_fail "The disk name is unspecified."
2835 fi
2836 typeset label_file=/var/tmp/labelvtoc.$$
2837 typeset arch=$(uname -p)
2838
2839 if is_linux; then
2840 log_note "Currently unsupported by the test framework"
2841 return 1
2842 fi
2843
2844 if [[ $arch == "i386" ]]; then
2845 echo "label" > $label_file
2846 echo "0" >> $label_file
2847 echo "" >> $label_file
2848 echo "q" >> $label_file
2849 echo "q" >> $label_file
2850
2851 fdisk -B $disk >/dev/null 2>&1
2852 # wait a while for fdisk finishes
2853 sleep 60
2854 elif [[ $arch == "sparc" ]]; then
2855 echo "label" > $label_file
2856 echo "0" >> $label_file
2857 echo "" >> $label_file
2858 echo "" >> $label_file
2859 echo "" >> $label_file
2860 echo "q" >> $label_file
2861 else
2862 log_fail "unknown arch type"
2863 fi
2864
2865 format -e -s -d $disk -f $label_file
2866 typeset -i ret_val=$?
2867 rm -f $label_file
2868 #
2869 # wait the format to finish
2870 #
2871 sleep 60
2872 if ((ret_val != 0)); then
2873 log_fail "unable to label $disk as VTOC."
2874 fi
2875
2876 return 0
2877 }
2878
2879 #
2880 # check if the system was installed as zfsroot or not
2881 # return: 0 ture, otherwise false
2882 #
2883 function is_zfsroot
2884 {
2885 df -n / | grep zfs > /dev/null 2>&1
2886 return $?
2887 }
2888
2889 #
2890 # get the root filesystem name if it's zfsroot system.
2891 #
2892 # return: root filesystem name
2893 function get_rootfs
2894 {
2895 typeset rootfs=""
2896
2897 if ! is_linux; then
2898 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2899 /etc/mnttab)
2900 fi
2901 if [[ -z "$rootfs" ]]; then
2902 log_fail "Can not get rootfs"
2903 fi
2904 zfs list $rootfs > /dev/null 2>&1
2905 if (($? == 0)); then
2906 echo $rootfs
2907 else
2908 log_fail "This is not a zfsroot system."
2909 fi
2910 }
2911
2912 #
2913 # get the rootfs's pool name
2914 # return:
2915 # rootpool name
2916 #
2917 function get_rootpool
2918 {
2919 typeset rootfs=""
2920 typeset rootpool=""
2921
2922 if ! is_linux; then
2923 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2924 /etc/mnttab)
2925 fi
2926 if [[ -z "$rootfs" ]]; then
2927 log_fail "Can not get rootpool"
2928 fi
2929 zfs list $rootfs > /dev/null 2>&1
2930 if (($? == 0)); then
2931 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2932 echo $rootpool
2933 else
2934 log_fail "This is not a zfsroot system."
2935 fi
2936 }
2937
2938 #
2939 # Get the package name
2940 #
2941 function get_package_name
2942 {
2943 typeset dirpath=${1:-$STC_NAME}
2944
2945 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2946 }
2947
2948 #
2949 # Get the word numbers from a string separated by white space
2950 #
2951 function get_word_count
2952 {
2953 echo $1 | wc -w
2954 }
2955
2956 #
2957 # To verify if the require numbers of disks is given
2958 #
2959 function verify_disk_count
2960 {
2961 typeset -i min=${2:-1}
2962
2963 typeset -i count=$(get_word_count "$1")
2964
2965 if ((count < min)); then
2966 log_untested "A minimum of $min disks is required to run." \
2967 " You specified $count disk(s)"
2968 fi
2969 }
2970
2971 function ds_is_volume
2972 {
2973 typeset type=$(get_prop type $1)
2974 [[ $type = "volume" ]] && return 0
2975 return 1
2976 }
2977
2978 function ds_is_filesystem
2979 {
2980 typeset type=$(get_prop type $1)
2981 [[ $type = "filesystem" ]] && return 0
2982 return 1
2983 }
2984
2985 function ds_is_snapshot
2986 {
2987 typeset type=$(get_prop type $1)
2988 [[ $type = "snapshot" ]] && return 0
2989 return 1
2990 }
2991
2992 #
2993 # Check if Trusted Extensions are installed and enabled
2994 #
2995 function is_te_enabled
2996 {
2997 svcs -H -o state labeld 2>/dev/null | grep "enabled"
2998 if (($? != 0)); then
2999 return 1
3000 else
3001 return 0
3002 fi
3003 }
3004
3005 # Utility function to determine if a system has multiple cpus.
3006 function is_mp
3007 {
3008 if is_linux; then
3009 (($(nproc) > 1))
3010 else
3011 (($(psrinfo | wc -l) > 1))
3012 fi
3013
3014 return $?
3015 }
3016
3017 function get_cpu_freq
3018 {
3019 if is_linux; then
3020 lscpu | awk '/CPU MHz/ { print $3 }'
3021 else
3022 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3023 fi
3024 }
3025
3026 # Run the given command as the user provided.
3027 function user_run
3028 {
3029 typeset user=$1
3030 shift
3031
3032 log_note "user:$user $@"
3033 eval su - \$user -c \"$@\" > $TEST_BASE_DIR/out 2>$TEST_BASE_DIR/err
3034 return $?
3035 }
3036
3037 #
3038 # Check if the pool contains the specified vdevs
3039 #
3040 # $1 pool
3041 # $2..n <vdev> ...
3042 #
3043 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3044 # vdevs is not in the pool, and 2 if pool name is missing.
3045 #
3046 function vdevs_in_pool
3047 {
3048 typeset pool=$1
3049 typeset vdev
3050
3051 if [[ -z $pool ]]; then
3052 log_note "Missing pool name."
3053 return 2
3054 fi
3055
3056 shift
3057
3058 # We could use 'zpool list' to only get the vdevs of the pool but we
3059 # can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
3060 # therefore we use the 'zpool status' output.
3061 typeset tmpfile=$(mktemp)
3062 zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
3063 for vdev in $@; do
3064 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3065 [[ $? -ne 0 ]] && return 1
3066 done
3067
3068 rm -f $tmpfile
3069
3070 return 0;
3071 }
3072
3073 function get_max
3074 {
3075 typeset -l i max=$1
3076 shift
3077
3078 for i in "$@"; do
3079 max=$(echo $((max > i ? max : i)))
3080 done
3081
3082 echo $max
3083 }
3084
3085 function get_min
3086 {
3087 typeset -l i min=$1
3088 shift
3089
3090 for i in "$@"; do
3091 min=$(echo $((min < i ? min : i)))
3092 done
3093
3094 echo $min
3095 }
3096
3097 #
3098 # Generate a random number between 1 and the argument.
3099 #
3100 function random
3101 {
3102 typeset max=$1
3103 echo $(( ($RANDOM % $max) + 1 ))
3104 }
3105
3106 # Write data that can be compressed into a directory
3107 function write_compressible
3108 {
3109 typeset dir=$1
3110 typeset megs=$2
3111 typeset nfiles=${3:-1}
3112 typeset bs=${4:-1024k}
3113 typeset fname=${5:-file}
3114
3115 [[ -d $dir ]] || log_fail "No directory: $dir"
3116
3117 # Under Linux fio is not currently used since its behavior can
3118 # differ significantly across versions. This includes missing
3119 # command line options and cases where the --buffer_compress_*
3120 # options fail to behave as expected.
3121 if is_linux; then
3122 typeset file_bytes=$(to_bytes $megs)
3123 typeset bs_bytes=4096
3124 typeset blocks=$(($file_bytes / $bs_bytes))
3125
3126 for (( i = 0; i < $nfiles; i++ )); do
3127 truncate -s $file_bytes $dir/$fname.$i
3128
3129 # Write every third block to get 66% compression.
3130 for (( j = 0; j < $blocks; j += 3 )); do
3131 dd if=/dev/urandom of=$dir/$fname.$i \
3132 seek=$j bs=$bs_bytes count=1 \
3133 conv=notrunc >/dev/null 2>&1
3134 done
3135 done
3136 else
3137 log_must eval "fio \
3138 --name=job \
3139 --fallocate=0 \
3140 --minimal \
3141 --randrepeat=0 \
3142 --buffer_compress_percentage=66 \
3143 --buffer_compress_chunk=4096 \
3144 --directory=$dir \
3145 --numjobs=$nfiles \
3146 --nrfiles=$nfiles \
3147 --rw=write \
3148 --bs=$bs \
3149 --filesize=$megs \
3150 --filename_format='$fname.\$jobnum' >/dev/null"
3151 fi
3152 }
3153
3154 function get_objnum
3155 {
3156 typeset pathname=$1
3157 typeset objnum
3158
3159 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3160 objnum=$(stat -c %i $pathname)
3161 echo $objnum
3162 }
3163
3164 #
3165 # Sync data to the pool
3166 #
3167 # $1 pool name
3168 # $2 boolean to force uberblock (and config including zpool cache file) update
3169 #
3170 function sync_pool #pool <force>
3171 {
3172 typeset pool=${1:-$TESTPOOL}
3173 typeset force=${2:-false}
3174
3175 if [[ $force == true ]]; then
3176 log_must zpool sync -f $pool
3177 else
3178 log_must zpool sync $pool
3179 fi
3180
3181 return 0
3182 }
3183
3184 #
3185 # Wait for zpool 'freeing' property drops to zero.
3186 #
3187 # $1 pool name
3188 #
3189 function wait_freeing #pool
3190 {
3191 typeset pool=${1:-$TESTPOOL}
3192 while true; do
3193 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3194 log_must sleep 1
3195 done
3196 }
3197
3198 #
3199 # Wait for every device replace operation to complete
3200 #
3201 # $1 pool name
3202 #
3203 function wait_replacing #pool
3204 {
3205 typeset pool=${1:-$TESTPOOL}
3206 while true; do
3207 [[ "" == "$(zpool status $pool |
3208 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3209 log_must sleep 1
3210 done
3211 }
3212
3213 #
3214 # Wait for a pool to be scrubbed
3215 #
3216 # $1 pool name
3217 # $2 number of seconds to wait (optional)
3218 #
3219 # Returns true when pool has been scrubbed, or false if there's a timeout or if
3220 # no scrub was done.
3221 #
3222 function wait_scrubbed
3223 {
3224 typeset pool=${1:-$TESTPOOL}
3225 while true ; do
3226 is_pool_scrubbed $pool && break
3227 log_must sleep 1
3228 done
3229 }
3230
3231 # Backup the zed.rc in our test directory so that we can edit it for our test.
3232 #
3233 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3234 function zed_rc_backup
3235 {
3236 zedrc_backup="$(mktemp)"
3237 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3238 echo $zedrc_backup
3239 }
3240
3241 function zed_rc_restore
3242 {
3243 mv $1 $ZEDLET_DIR/zed.rc
3244 }
3245
3246 #
3247 # Setup custom environment for the ZED.
3248 #
3249 # $@ Optional list of zedlets to run under zed.
3250 function zed_setup
3251 {
3252 if ! is_linux; then
3253 return
3254 fi
3255
3256 if [[ ! -d $ZEDLET_DIR ]]; then
3257 log_must mkdir $ZEDLET_DIR
3258 fi
3259
3260 if [[ ! -e $VDEVID_CONF ]]; then
3261 log_must touch $VDEVID_CONF
3262 fi
3263
3264 if [[ -e $VDEVID_CONF_ETC ]]; then
3265 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3266 fi
3267 EXTRA_ZEDLETS=$@
3268
3269 # Create a symlink for /etc/zfs/vdev_id.conf file.
3270 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3271
3272 # Setup minimal ZED configuration. Individual test cases should
3273 # add additional ZEDLETs as needed for their specific test.
3274 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3275 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3276
3277 # Scripts must only be user writable.
3278 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3279 saved_umask=$(umask)
3280 log_must umask 0022
3281 for i in $EXTRA_ZEDLETS ; do
3282 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3283 done
3284 log_must umask $saved_umask
3285 fi
3286
3287 # Customize the zed.rc file to enable the full debug log.
3288 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3289 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3290
3291 }
3292
3293 #
3294 # Cleanup custom ZED environment.
3295 #
3296 # $@ Optional list of zedlets to remove from our test zed.d directory.
3297 function zed_cleanup
3298 {
3299 if ! is_linux; then
3300 return
3301 fi
3302 EXTRA_ZEDLETS=$@
3303
3304 log_must rm -f ${ZEDLET_DIR}/zed.rc
3305 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3306 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3307 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3308 log_must rm -f ${ZEDLET_DIR}/state
3309
3310 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3311 for i in $EXTRA_ZEDLETS ; do
3312 log_must rm -f ${ZEDLET_DIR}/$i
3313 done
3314 fi
3315 log_must rm -f $ZED_LOG
3316 log_must rm -f $ZED_DEBUG_LOG
3317 log_must rm -f $VDEVID_CONF_ETC
3318 log_must rm -f $VDEVID_CONF
3319 rmdir $ZEDLET_DIR
3320 }
3321
3322 #
3323 # Check if ZED is currently running, if not start ZED.
3324 #
3325 function zed_start
3326 {
3327 if ! is_linux; then
3328 return
3329 fi
3330
3331 # ZEDLET_DIR=/var/tmp/zed
3332 if [[ ! -d $ZEDLET_DIR ]]; then
3333 log_must mkdir $ZEDLET_DIR
3334 fi
3335
3336 # Verify the ZED is not already running.
3337 pgrep -x zed > /dev/null
3338 if (($? == 0)); then
3339 log_fail "ZED already running"
3340 fi
3341
3342 log_note "Starting ZED"
3343 # run ZED in the background and redirect foreground logging
3344 # output to $ZED_LOG.
3345 log_must truncate -s 0 $ZED_DEBUG_LOG
3346 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
3347 "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3348
3349 return 0
3350 }
3351
3352 #
3353 # Kill ZED process
3354 #
3355 function zed_stop
3356 {
3357 if ! is_linux; then
3358 return
3359 fi
3360
3361 log_note "Stopping ZED"
3362 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3363 zedpid=$(<${ZEDLET_DIR}/zed.pid)
3364 kill $zedpid
3365 while ps -p $zedpid > /dev/null; do
3366 sleep 1
3367 done
3368 rm -f ${ZEDLET_DIR}/zed.pid
3369 fi
3370 return 0
3371 }
3372
3373 #
3374 # Drain all zevents
3375 #
3376 function zed_events_drain
3377 {
3378 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3379 sleep 1
3380 zpool events -c >/dev/null
3381 done
3382 }
3383
3384 # Set a variable in zed.rc to something, un-commenting it in the process.
3385 #
3386 # $1 variable
3387 # $2 value
3388 function zed_rc_set
3389 {
3390 var="$1"
3391 val="$2"
3392 # Remove the line
3393 cmd="'/$var/d'"
3394 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3395
3396 # Add it at the end
3397 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3398 }
3399
3400
3401 #
3402 # Check is provided device is being active used as a swap device.
3403 #
3404 function is_swap_inuse
3405 {
3406 typeset device=$1
3407
3408 if [[ -z $device ]] ; then
3409 log_note "No device specified."
3410 return 1
3411 fi
3412
3413 if is_linux; then
3414 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3415 else
3416 swap -l | grep -w $device > /dev/null 2>&1
3417 fi
3418
3419 return $?
3420 }
3421
3422 #
3423 # Setup a swap device using the provided device.
3424 #
3425 function swap_setup
3426 {
3427 typeset swapdev=$1
3428
3429 if is_linux; then
3430 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3431 log_must swapon $swapdev
3432 else
3433 log_must swap -a $swapdev
3434 fi
3435
3436 return 0
3437 }
3438
3439 #
3440 # Cleanup a swap device on the provided device.
3441 #
3442 function swap_cleanup
3443 {
3444 typeset swapdev=$1
3445
3446 if is_swap_inuse $swapdev; then
3447 if is_linux; then
3448 log_must swapoff $swapdev
3449 else
3450 log_must swap -d $swapdev
3451 fi
3452 fi
3453
3454 return 0
3455 }
3456
3457 #
3458 # Set a global system tunable (64-bit value)
3459 #
3460 # $1 tunable name
3461 # $2 tunable values
3462 #
3463 function set_tunable64
3464 {
3465 set_tunable_impl "$1" "$2" Z
3466 }
3467
3468 #
3469 # Set a global system tunable (32-bit value)
3470 #
3471 # $1 tunable name
3472 # $2 tunable values
3473 #
3474 function set_tunable32
3475 {
3476 set_tunable_impl "$1" "$2" W
3477 }
3478
3479 function set_tunable_impl
3480 {
3481 typeset tunable="$1"
3482 typeset value="$2"
3483 typeset mdb_cmd="$3"
3484 typeset module="${4:-zfs}"
3485
3486 [[ -z "$tunable" ]] && return 1
3487 [[ -z "$value" ]] && return 1
3488 [[ -z "$mdb_cmd" ]] && return 1
3489
3490 case "$(uname)" in
3491 Linux)
3492 typeset zfs_tunables="/sys/module/$module/parameters"
3493 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3494 echo -n "$value" > "$zfs_tunables/$tunable"
3495 return "$?"
3496 ;;
3497 SunOS)
3498 [[ "$module" -eq "zfs" ]] || return 1
3499 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3500 return "$?"
3501 ;;
3502 esac
3503 }
3504
3505 #
3506 # Get a global system tunable
3507 #
3508 # $1 tunable name
3509 #
3510 function get_tunable
3511 {
3512 get_tunable_impl "$1"
3513 }
3514
3515 function get_tunable_impl
3516 {
3517 typeset tunable="$1"
3518 typeset module="${2:-zfs}"
3519
3520 [[ -z "$tunable" ]] && return 1
3521
3522 case "$(uname)" in
3523 Linux)
3524 typeset zfs_tunables="/sys/module/$module/parameters"
3525 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3526 cat $zfs_tunables/$tunable
3527 return "$?"
3528 ;;
3529 SunOS)
3530 [[ "$module" -eq "zfs" ]] || return 1
3531 ;;
3532 esac
3533
3534 return 1
3535 }
3536
3537 #
3538 # Prints the current time in seconds since UNIX Epoch.
3539 #
3540 function current_epoch
3541 {
3542 printf '%(%s)T'
3543 }
3544
3545 #
3546 # Get decimal value of global uint32_t variable using mdb.
3547 #
3548 function mdb_get_uint32
3549 {
3550 typeset variable=$1
3551 typeset value
3552
3553 value=$(mdb -k -e "$variable/X | ::eval .=U")
3554 if [[ $? -ne 0 ]]; then
3555 log_fail "Failed to get value of '$variable' from mdb."
3556 return 1
3557 fi
3558
3559 echo $value
3560 return 0
3561 }
3562
3563 #
3564 # Set global uint32_t variable to a decimal value using mdb.
3565 #
3566 function mdb_set_uint32
3567 {
3568 typeset variable=$1
3569 typeset value=$2
3570
3571 mdb -kw -e "$variable/W 0t$value" > /dev/null
3572 if [[ $? -ne 0 ]]; then
3573 echo "Failed to set '$variable' to '$value' in mdb."
3574 return 1
3575 fi
3576
3577 return 0
3578 }
3579
3580 #
3581 # Set global scalar integer variable to a hex value using mdb.
3582 # Note: Target should have CTF data loaded.
3583 #
3584 function mdb_ctf_set_int
3585 {
3586 typeset variable=$1
3587 typeset value=$2
3588
3589 mdb -kw -e "$variable/z $value" > /dev/null
3590 if [[ $? -ne 0 ]]; then
3591 echo "Failed to set '$variable' to '$value' in mdb."
3592 return 1
3593 fi
3594
3595 return 0
3596 }