]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
zdb and inuse tests don't pass with real disks
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 # Copyright (c) 2012, 2016 by Delphix. All rights reserved.
27 # Copyright 2016 Nexenta Systems, Inc.
28 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
29 # Copyright (c) 2017 Datto Inc.
30 # Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
31 #
32
33 . ${STF_TOOLS}/include/logapi.shlib
34 . ${STF_SUITE}/include/math.shlib
35 . ${STF_SUITE}/include/blkdev.shlib
36
37 #
38 # Apply constrained path when available. This is required since the
39 # PATH may have been modified by sudo's secure_path behavior.
40 #
41 if [ -n "$STF_PATH" ]; then
42 PATH="$STF_PATH"
43 fi
44
45 # Linux kernel version comparison function
46 #
47 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
48 #
49 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
50 #
51 function linux_version
52 {
53 typeset ver="$1"
54
55 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
56
57 typeset version=$(echo $ver | cut -d '.' -f 1)
58 typeset major=$(echo $ver | cut -d '.' -f 2)
59 typeset minor=$(echo $ver | cut -d '.' -f 3)
60
61 [[ -z "$version" ]] && version=0
62 [[ -z "$major" ]] && major=0
63 [[ -z "$minor" ]] && minor=0
64
65 echo $((version * 10000 + major * 100 + minor))
66 }
67
68 # Determine if this is a Linux test system
69 #
70 # Return 0 if platform Linux, 1 if otherwise
71
72 function is_linux
73 {
74 if [[ $(uname -o) == "GNU/Linux" ]]; then
75 return 0
76 else
77 return 1
78 fi
79 }
80
81 # Determine if this is a 32-bit system
82 #
83 # Return 0 if platform is 32-bit, 1 if otherwise
84
85 function is_32bit
86 {
87 if [[ $(getconf LONG_BIT) == "32" ]]; then
88 return 0
89 else
90 return 1
91 fi
92 }
93
94 # Determine if kmemleak is enabled
95 #
96 # Return 0 if kmemleak is enabled, 1 if otherwise
97
98 function is_kmemleak
99 {
100 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
101 return 0
102 else
103 return 1
104 fi
105 }
106
107 # Determine whether a dataset is mounted
108 #
109 # $1 dataset name
110 # $2 filesystem type; optional - defaulted to zfs
111 #
112 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
113
114 function ismounted
115 {
116 typeset fstype=$2
117 [[ -z $fstype ]] && fstype=zfs
118 typeset out dir name ret
119
120 case $fstype in
121 zfs)
122 if [[ "$1" == "/"* ]] ; then
123 for out in $(zfs mount | awk '{print $2}'); do
124 [[ $1 == $out ]] && return 0
125 done
126 else
127 for out in $(zfs mount | awk '{print $1}'); do
128 [[ $1 == $out ]] && return 0
129 done
130 fi
131 ;;
132 ufs|nfs)
133 out=$(df -F $fstype $1 2>/dev/null)
134 ret=$?
135 (($ret != 0)) && return $ret
136
137 dir=${out%%\(*}
138 dir=${dir%% *}
139 name=${out##*\(}
140 name=${name%%\)*}
141 name=${name%% *}
142
143 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
144 ;;
145 ext*)
146 out=$(df -t $fstype $1 2>/dev/null)
147 return $?
148 ;;
149 zvol)
150 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
151 link=$(readlink -f $ZVOL_DEVDIR/$1)
152 [[ -n "$link" ]] && \
153 mount | grep -q "^$link" && \
154 return 0
155 fi
156 ;;
157 esac
158
159 return 1
160 }
161
162 # Return 0 if a dataset is mounted; 1 otherwise
163 #
164 # $1 dataset name
165 # $2 filesystem type; optional - defaulted to zfs
166
167 function mounted
168 {
169 ismounted $1 $2
170 (($? == 0)) && return 0
171 return 1
172 }
173
174 # Return 0 if a dataset is unmounted; 1 otherwise
175 #
176 # $1 dataset name
177 # $2 filesystem type; optional - defaulted to zfs
178
179 function unmounted
180 {
181 ismounted $1 $2
182 (($? == 1)) && return 0
183 return 1
184 }
185
186 # split line on ","
187 #
188 # $1 - line to split
189
190 function splitline
191 {
192 echo $1 | sed "s/,/ /g"
193 }
194
195 function default_setup
196 {
197 default_setup_noexit "$@"
198
199 log_pass
200 }
201
202 #
203 # Given a list of disks, setup storage pools and datasets.
204 #
205 function default_setup_noexit
206 {
207 typeset disklist=$1
208 typeset container=$2
209 typeset volume=$3
210 log_note begin default_setup_noexit
211
212 if is_global_zone; then
213 if poolexists $TESTPOOL ; then
214 destroy_pool $TESTPOOL
215 fi
216 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
217 log_must zpool create -f $TESTPOOL $disklist
218 else
219 reexport_pool
220 fi
221
222 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
223 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
224
225 log_must zfs create $TESTPOOL/$TESTFS
226 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
227
228 if [[ -n $container ]]; then
229 rm -rf $TESTDIR1 || \
230 log_unresolved Could not remove $TESTDIR1
231 mkdir -p $TESTDIR1 || \
232 log_unresolved Could not create $TESTDIR1
233
234 log_must zfs create $TESTPOOL/$TESTCTR
235 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
236 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
237 log_must zfs set mountpoint=$TESTDIR1 \
238 $TESTPOOL/$TESTCTR/$TESTFS1
239 fi
240
241 if [[ -n $volume ]]; then
242 if is_global_zone ; then
243 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
244 block_device_wait
245 else
246 log_must zfs create $TESTPOOL/$TESTVOL
247 fi
248 fi
249 }
250
251 #
252 # Given a list of disks, setup a storage pool, file system and
253 # a container.
254 #
255 function default_container_setup
256 {
257 typeset disklist=$1
258
259 default_setup "$disklist" "true"
260 }
261
262 #
263 # Given a list of disks, setup a storage pool,file system
264 # and a volume.
265 #
266 function default_volume_setup
267 {
268 typeset disklist=$1
269
270 default_setup "$disklist" "" "true"
271 }
272
273 #
274 # Given a list of disks, setup a storage pool,file system,
275 # a container and a volume.
276 #
277 function default_container_volume_setup
278 {
279 typeset disklist=$1
280
281 default_setup "$disklist" "true" "true"
282 }
283
284 #
285 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
286 # filesystem
287 #
288 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
289 # $2 snapshot name. Default, $TESTSNAP
290 #
291 function create_snapshot
292 {
293 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
294 typeset snap=${2:-$TESTSNAP}
295
296 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
297 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
298
299 if snapexists $fs_vol@$snap; then
300 log_fail "$fs_vol@$snap already exists."
301 fi
302 datasetexists $fs_vol || \
303 log_fail "$fs_vol must exist."
304
305 log_must zfs snapshot $fs_vol@$snap
306 }
307
308 #
309 # Create a clone from a snapshot, default clone name is $TESTCLONE.
310 #
311 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
312 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
313 #
314 function create_clone # snapshot clone
315 {
316 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
317 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
318
319 [[ -z $snap ]] && \
320 log_fail "Snapshot name is undefined."
321 [[ -z $clone ]] && \
322 log_fail "Clone name is undefined."
323
324 log_must zfs clone $snap $clone
325 }
326
327 #
328 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
329 # filesystem.
330 #
331 # $1 Existing filesystem or volume name. Default, $TESTFS
332 # $2 Existing snapshot name. Default, $TESTSNAP
333 # $3 bookmark name. Default, $TESTBKMARK
334 #
335 function create_bookmark
336 {
337 typeset fs_vol=${1:-$TESTFS}
338 typeset snap=${2:-$TESTSNAP}
339 typeset bkmark=${3:-$TESTBKMARK}
340
341 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
342 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
343 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
344
345 if bkmarkexists $fs_vol#$bkmark; then
346 log_fail "$fs_vol#$bkmark already exists."
347 fi
348 datasetexists $fs_vol || \
349 log_fail "$fs_vol must exist."
350 snapexists $fs_vol@$snap || \
351 log_fail "$fs_vol@$snap must exist."
352
353 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
354 }
355
356 #
357 # Create a temporary clone result of an interrupted resumable 'zfs receive'
358 # $1 Destination filesystem name. Must not exist, will be created as the result
359 # of this function along with its %recv temporary clone
360 # $2 Source filesystem name. Must not exist, will be created and destroyed
361 #
362 function create_recv_clone
363 {
364 typeset recvfs="$1"
365 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
366 typeset snap="$sendfs@snap1"
367 typeset incr="$sendfs@snap2"
368 typeset mountpoint="$TESTDIR/create_recv_clone"
369 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
370
371 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
372
373 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
374 datasetexists $sendfs && log_fail "Send filesystem must not exist."
375
376 log_must zfs create -o mountpoint="$mountpoint" $sendfs
377 log_must zfs snapshot $snap
378 log_must eval "zfs send $snap | zfs recv -u $recvfs"
379 log_must mkfile 1m "$mountpoint/data"
380 log_must zfs snapshot $incr
381 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 > $sendfile"
382 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
383 destroy_dataset "$sendfs" "-r"
384 log_must rm -f "$sendfile"
385
386 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
387 log_fail "Error creating temporary $recvfs/%recv clone"
388 fi
389 }
390
391 function default_mirror_setup
392 {
393 default_mirror_setup_noexit $1 $2 $3
394
395 log_pass
396 }
397
398 #
399 # Given a pair of disks, set up a storage pool and dataset for the mirror
400 # @parameters: $1 the primary side of the mirror
401 # $2 the secondary side of the mirror
402 # @uses: ZPOOL ZFS TESTPOOL TESTFS
403 function default_mirror_setup_noexit
404 {
405 readonly func="default_mirror_setup_noexit"
406 typeset primary=$1
407 typeset secondary=$2
408
409 [[ -z $primary ]] && \
410 log_fail "$func: No parameters passed"
411 [[ -z $secondary ]] && \
412 log_fail "$func: No secondary partition passed"
413 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
414 log_must zpool create -f $TESTPOOL mirror $@
415 log_must zfs create $TESTPOOL/$TESTFS
416 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
417 }
418
419 #
420 # create a number of mirrors.
421 # We create a number($1) of 2 way mirrors using the pairs of disks named
422 # on the command line. These mirrors are *not* mounted
423 # @parameters: $1 the number of mirrors to create
424 # $... the devices to use to create the mirrors on
425 # @uses: ZPOOL ZFS TESTPOOL
426 function setup_mirrors
427 {
428 typeset -i nmirrors=$1
429
430 shift
431 while ((nmirrors > 0)); do
432 log_must test -n "$1" -a -n "$2"
433 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
434 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
435 shift 2
436 ((nmirrors = nmirrors - 1))
437 done
438 }
439
440 #
441 # create a number of raidz pools.
442 # We create a number($1) of 2 raidz pools using the pairs of disks named
443 # on the command line. These pools are *not* mounted
444 # @parameters: $1 the number of pools to create
445 # $... the devices to use to create the pools on
446 # @uses: ZPOOL ZFS TESTPOOL
447 function setup_raidzs
448 {
449 typeset -i nraidzs=$1
450
451 shift
452 while ((nraidzs > 0)); do
453 log_must test -n "$1" -a -n "$2"
454 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
455 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
456 shift 2
457 ((nraidzs = nraidzs - 1))
458 done
459 }
460
461 #
462 # Destroy the configured testpool mirrors.
463 # the mirrors are of the form ${TESTPOOL}{number}
464 # @uses: ZPOOL ZFS TESTPOOL
465 function destroy_mirrors
466 {
467 default_cleanup_noexit
468
469 log_pass
470 }
471
472 #
473 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
474 # $1 the list of disks
475 #
476 function default_raidz_setup
477 {
478 typeset disklist="$*"
479 disks=(${disklist[*]})
480
481 if [[ ${#disks[*]} -lt 2 ]]; then
482 log_fail "A raid-z requires a minimum of two disks."
483 fi
484
485 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
486 log_must zpool create -f $TESTPOOL raidz $disklist
487 log_must zfs create $TESTPOOL/$TESTFS
488 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
489
490 log_pass
491 }
492
493 #
494 # Common function used to cleanup storage pools and datasets.
495 #
496 # Invoked at the start of the test suite to ensure the system
497 # is in a known state, and also at the end of each set of
498 # sub-tests to ensure errors from one set of tests doesn't
499 # impact the execution of the next set.
500
501 function default_cleanup
502 {
503 default_cleanup_noexit
504
505 log_pass
506 }
507
508 #
509 # Utility function used to list all available pool names.
510 #
511 # NOTE: $KEEP is a variable containing pool names, separated by a newline
512 # character, that must be excluded from the returned list.
513 #
514 function get_all_pools
515 {
516 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
517 }
518
519 function default_cleanup_noexit
520 {
521 typeset pool=""
522 #
523 # Destroying the pool will also destroy any
524 # filesystems it contains.
525 #
526 if is_global_zone; then
527 zfs unmount -a > /dev/null 2>&1
528 ALL_POOLS=$(get_all_pools)
529 # Here, we loop through the pools we're allowed to
530 # destroy, only destroying them if it's safe to do
531 # so.
532 while [ ! -z ${ALL_POOLS} ]
533 do
534 for pool in ${ALL_POOLS}
535 do
536 if safe_to_destroy_pool $pool ;
537 then
538 destroy_pool $pool
539 fi
540 ALL_POOLS=$(get_all_pools)
541 done
542 done
543
544 zfs mount -a
545 else
546 typeset fs=""
547 for fs in $(zfs list -H -o name \
548 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
549 destroy_dataset "$fs" "-Rf"
550 done
551
552 # Need cleanup here to avoid garbage dir left.
553 for fs in $(zfs list -H -o name); do
554 [[ $fs == /$ZONE_POOL ]] && continue
555 [[ -d $fs ]] && log_must rm -rf $fs/*
556 done
557
558 #
559 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
560 # the default value
561 #
562 for fs in $(zfs list -H -o name); do
563 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
564 log_must zfs set reservation=none $fs
565 log_must zfs set recordsize=128K $fs
566 log_must zfs set mountpoint=/$fs $fs
567 typeset enc=""
568 enc=$(get_prop encryption $fs)
569 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
570 [[ "$enc" == "off" ]]; then
571 log_must zfs set checksum=on $fs
572 fi
573 log_must zfs set compression=off $fs
574 log_must zfs set atime=on $fs
575 log_must zfs set devices=off $fs
576 log_must zfs set exec=on $fs
577 log_must zfs set setuid=on $fs
578 log_must zfs set readonly=off $fs
579 log_must zfs set snapdir=hidden $fs
580 log_must zfs set aclmode=groupmask $fs
581 log_must zfs set aclinherit=secure $fs
582 fi
583 done
584 fi
585
586 [[ -d $TESTDIR ]] && \
587 log_must rm -rf $TESTDIR
588
589 disk1=${DISKS%% *}
590 if is_mpath_device $disk1; then
591 delete_partitions
592 fi
593 }
594
595
596 #
597 # Common function used to cleanup storage pools, file systems
598 # and containers.
599 #
600 function default_container_cleanup
601 {
602 if ! is_global_zone; then
603 reexport_pool
604 fi
605
606 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
607 [[ $? -eq 0 ]] && \
608 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
609
610 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
611 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
612
613 [[ -e $TESTDIR1 ]] && \
614 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
615
616 default_cleanup
617 }
618
619 #
620 # Common function used to cleanup snapshot of file system or volume. Default to
621 # delete the file system's snapshot
622 #
623 # $1 snapshot name
624 #
625 function destroy_snapshot
626 {
627 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
628
629 if ! snapexists $snap; then
630 log_fail "'$snap' does not existed."
631 fi
632
633 #
634 # For the sake of the value which come from 'get_prop' is not equal
635 # to the really mountpoint when the snapshot is unmounted. So, firstly
636 # check and make sure this snapshot's been mounted in current system.
637 #
638 typeset mtpt=""
639 if ismounted $snap; then
640 mtpt=$(get_prop mountpoint $snap)
641 (($? != 0)) && \
642 log_fail "get_prop mountpoint $snap failed."
643 fi
644
645 destroy_dataset "$snap"
646 [[ $mtpt != "" && -d $mtpt ]] && \
647 log_must rm -rf $mtpt
648 }
649
650 #
651 # Common function used to cleanup clone.
652 #
653 # $1 clone name
654 #
655 function destroy_clone
656 {
657 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
658
659 if ! datasetexists $clone; then
660 log_fail "'$clone' does not existed."
661 fi
662
663 # With the same reason in destroy_snapshot
664 typeset mtpt=""
665 if ismounted $clone; then
666 mtpt=$(get_prop mountpoint $clone)
667 (($? != 0)) && \
668 log_fail "get_prop mountpoint $clone failed."
669 fi
670
671 destroy_dataset "$clone"
672 [[ $mtpt != "" && -d $mtpt ]] && \
673 log_must rm -rf $mtpt
674 }
675
676 #
677 # Common function used to cleanup bookmark of file system or volume. Default
678 # to delete the file system's bookmark.
679 #
680 # $1 bookmark name
681 #
682 function destroy_bookmark
683 {
684 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
685
686 if ! bkmarkexists $bkmark; then
687 log_fail "'$bkmarkp' does not existed."
688 fi
689
690 destroy_dataset "$bkmark"
691 }
692
693 # Return 0 if a snapshot exists; $? otherwise
694 #
695 # $1 - snapshot name
696
697 function snapexists
698 {
699 zfs list -H -t snapshot "$1" > /dev/null 2>&1
700 return $?
701 }
702
703 #
704 # Return 0 if a bookmark exists; $? otherwise
705 #
706 # $1 - bookmark name
707 #
708 function bkmarkexists
709 {
710 zfs list -H -t bookmark "$1" > /dev/null 2>&1
711 return $?
712 }
713
714 #
715 # Set a property to a certain value on a dataset.
716 # Sets a property of the dataset to the value as passed in.
717 # @param:
718 # $1 dataset who's property is being set
719 # $2 property to set
720 # $3 value to set property to
721 # @return:
722 # 0 if the property could be set.
723 # non-zero otherwise.
724 # @use: ZFS
725 #
726 function dataset_setprop
727 {
728 typeset fn=dataset_setprop
729
730 if (($# < 3)); then
731 log_note "$fn: Insufficient parameters (need 3, had $#)"
732 return 1
733 fi
734 typeset output=
735 output=$(zfs set $2=$3 $1 2>&1)
736 typeset rv=$?
737 if ((rv != 0)); then
738 log_note "Setting property on $1 failed."
739 log_note "property $2=$3"
740 log_note "Return Code: $rv"
741 log_note "Output: $output"
742 return $rv
743 fi
744 return 0
745 }
746
747 #
748 # Assign suite defined dataset properties.
749 # This function is used to apply the suite's defined default set of
750 # properties to a dataset.
751 # @parameters: $1 dataset to use
752 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
753 # @returns:
754 # 0 if the dataset has been altered.
755 # 1 if no pool name was passed in.
756 # 2 if the dataset could not be found.
757 # 3 if the dataset could not have it's properties set.
758 #
759 function dataset_set_defaultproperties
760 {
761 typeset dataset="$1"
762
763 [[ -z $dataset ]] && return 1
764
765 typeset confset=
766 typeset -i found=0
767 for confset in $(zfs list); do
768 if [[ $dataset = $confset ]]; then
769 found=1
770 break
771 fi
772 done
773 [[ $found -eq 0 ]] && return 2
774 if [[ -n $COMPRESSION_PROP ]]; then
775 dataset_setprop $dataset compression $COMPRESSION_PROP || \
776 return 3
777 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
778 fi
779 if [[ -n $CHECKSUM_PROP ]]; then
780 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
781 return 3
782 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
783 fi
784 return 0
785 }
786
787 #
788 # Check a numeric assertion
789 # @parameter: $@ the assertion to check
790 # @output: big loud notice if assertion failed
791 # @use: log_fail
792 #
793 function assert
794 {
795 (($@)) || log_fail "$@"
796 }
797
798 #
799 # Function to format partition size of a disk
800 # Given a disk cxtxdx reduces all partitions
801 # to 0 size
802 #
803 function zero_partitions #<whole_disk_name>
804 {
805 typeset diskname=$1
806 typeset i
807
808 if is_linux; then
809 DSK=$DEV_DSKDIR/$diskname
810 DSK=$(echo $DSK | sed -e "s|//|/|g")
811 log_must parted $DSK -s -- mklabel gpt
812 blockdev --rereadpt $DSK 2>/dev/null
813 block_device_wait
814 else
815 for i in 0 1 3 4 5 6 7
816 do
817 log_must set_partition $i "" 0mb $diskname
818 done
819 fi
820
821 return 0
822 }
823
824 #
825 # Given a slice, size and disk, this function
826 # formats the slice to the specified size.
827 # Size should be specified with units as per
828 # the `format` command requirements eg. 100mb 3gb
829 #
830 # NOTE: This entire interface is problematic for the Linux parted utilty
831 # which requires the end of the partition to be specified. It would be
832 # best to retire this interface and replace it with something more flexible.
833 # At the moment a best effort is made.
834 #
835 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
836 {
837 typeset -i slicenum=$1
838 typeset start=$2
839 typeset size=$3
840 typeset disk=$4
841
842 if is_linux; then
843 if [[ -z $size || -z $disk ]]; then
844 log_fail "The size or disk name is unspecified."
845 fi
846 typeset size_mb=${size%%[mMgG]}
847
848 size_mb=${size_mb%%[mMgG][bB]}
849 if [[ ${size:1:1} == 'g' ]]; then
850 ((size_mb = size_mb * 1024))
851 fi
852
853 # Create GPT partition table when setting slice 0 or
854 # when the device doesn't already contain a GPT label.
855 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
856 typeset ret_val=$?
857 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
858 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
859 if [[ $? -ne 0 ]]; then
860 log_note "Failed to create GPT partition table on $disk"
861 return 1
862 fi
863 fi
864
865 # When no start is given align on the first cylinder.
866 if [[ -z "$start" ]]; then
867 start=1
868 fi
869
870 # Determine the cylinder size for the device and using
871 # that calculate the end offset in cylinders.
872 typeset -i cly_size_kb=0
873 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
874 unit cyl print | head -3 | tail -1 | \
875 awk -F '[:k.]' '{print $4}')
876 ((end = (size_mb * 1024 / cly_size_kb) + start))
877
878 parted $DEV_DSKDIR/$disk -s -- \
879 mkpart part$slicenum ${start}cyl ${end}cyl
880 if [[ $? -ne 0 ]]; then
881 log_note "Failed to create partition $slicenum on $disk"
882 return 1
883 fi
884
885 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
886 block_device_wait
887 else
888 if [[ -z $slicenum || -z $size || -z $disk ]]; then
889 log_fail "The slice, size or disk name is unspecified."
890 fi
891
892 typeset format_file=/var/tmp/format_in.$$
893
894 echo "partition" >$format_file
895 echo "$slicenum" >> $format_file
896 echo "" >> $format_file
897 echo "" >> $format_file
898 echo "$start" >> $format_file
899 echo "$size" >> $format_file
900 echo "label" >> $format_file
901 echo "" >> $format_file
902 echo "q" >> $format_file
903 echo "q" >> $format_file
904
905 format -e -s -d $disk -f $format_file
906 fi
907
908 typeset ret_val=$?
909 rm -f $format_file
910 if [[ $ret_val -ne 0 ]]; then
911 log_note "Unable to format $disk slice $slicenum to $size"
912 return 1
913 fi
914 return 0
915 }
916
917 #
918 # Delete all partitions on all disks - this is specifically for the use of multipath
919 # devices which currently can only be used in the test suite as raw/un-partitioned
920 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
921 #
922 function delete_partitions
923 {
924 typeset -i j=1
925
926 if [[ -z $DISK_ARRAY_NUM ]]; then
927 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
928 fi
929 if [[ -z $DISKSARRAY ]]; then
930 DISKSARRAY=$DISKS
931 fi
932
933 if is_linux; then
934 if (( $DISK_ARRAY_NUM == 1 )); then
935 while ((j < MAX_PARTITIONS)); do
936 parted $DEV_DSKDIR/$DISK -s rm $j \
937 > /dev/null 2>&1
938 if (( $? == 1 )); then
939 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
940 if (( $? == 1 )); then
941 log_note "Partitions for $DISK should be deleted"
942 else
943 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
944 fi
945 return 0
946 else
947 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
948 if (( $? == 0 )); then
949 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
950 fi
951 fi
952 ((j = j+1))
953 done
954 else
955 for disk in `echo $DISKSARRAY`; do
956 while ((j < MAX_PARTITIONS)); do
957 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
958 if (( $? == 1 )); then
959 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
960 if (( $? == 1 )); then
961 log_note "Partitions for $disk should be deleted"
962 else
963 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
964 fi
965 j=7
966 else
967 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
968 if (( $? == 0 )); then
969 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
970 fi
971 fi
972 ((j = j+1))
973 done
974 j=1
975 done
976 fi
977 fi
978 return 0
979 }
980
981 #
982 # Get the end cyl of the given slice
983 #
984 function get_endslice #<disk> <slice>
985 {
986 typeset disk=$1
987 typeset slice=$2
988 if [[ -z $disk || -z $slice ]] ; then
989 log_fail "The disk name or slice number is unspecified."
990 fi
991
992 if is_linux; then
993 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
994 grep "part${slice}" | \
995 awk '{print $3}' | \
996 sed 's,cyl,,')
997 ((endcyl = (endcyl + 1)))
998 else
999 disk=${disk#/dev/dsk/}
1000 disk=${disk#/dev/rdsk/}
1001 disk=${disk%s*}
1002
1003 typeset -i ratio=0
1004 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1005 grep "sectors\/cylinder" | \
1006 awk '{print $2}')
1007
1008 if ((ratio == 0)); then
1009 return
1010 fi
1011
1012 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1013 nawk -v token="$slice" '{if ($1==token) print $6}')
1014
1015 ((endcyl = (endcyl + 1) / ratio))
1016 fi
1017
1018 echo $endcyl
1019 }
1020
1021
1022 #
1023 # Given a size,disk and total slice number, this function formats the
1024 # disk slices from 0 to the total slice number with the same specified
1025 # size.
1026 #
1027 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1028 {
1029 typeset -i i=0
1030 typeset slice_size=$1
1031 typeset disk_name=$2
1032 typeset total_slices=$3
1033 typeset cyl
1034
1035 zero_partitions $disk_name
1036 while ((i < $total_slices)); do
1037 if ! is_linux; then
1038 if ((i == 2)); then
1039 ((i = i + 1))
1040 continue
1041 fi
1042 fi
1043 log_must set_partition $i "$cyl" $slice_size $disk_name
1044 cyl=$(get_endslice $disk_name $i)
1045 ((i = i+1))
1046 done
1047 }
1048
1049 #
1050 # This function continues to write to a filenum number of files into dirnum
1051 # number of directories until either file_write returns an error or the
1052 # maximum number of files per directory have been written.
1053 #
1054 # Usage:
1055 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1056 #
1057 # Return value: 0 on success
1058 # non 0 on error
1059 #
1060 # Where :
1061 # destdir: is the directory where everything is to be created under
1062 # dirnum: the maximum number of subdirectories to use, -1 no limit
1063 # filenum: the maximum number of files per subdirectory
1064 # bytes: number of bytes to write
1065 # num_writes: numer of types to write out bytes
1066 # data: the data that will be written
1067 #
1068 # E.g.
1069 # file_fs /testdir 20 25 1024 256 0
1070 #
1071 # Note: bytes * num_writes equals the size of the testfile
1072 #
1073 function fill_fs # destdir dirnum filenum bytes num_writes data
1074 {
1075 typeset destdir=${1:-$TESTDIR}
1076 typeset -i dirnum=${2:-50}
1077 typeset -i filenum=${3:-50}
1078 typeset -i bytes=${4:-8192}
1079 typeset -i num_writes=${5:-10240}
1080 typeset -i data=${6:-0}
1081
1082 typeset -i odirnum=1
1083 typeset -i idirnum=0
1084 typeset -i fn=0
1085 typeset -i retval=0
1086
1087 log_must mkdir -p $destdir/$idirnum
1088 while (($odirnum > 0)); do
1089 if ((dirnum >= 0 && idirnum >= dirnum)); then
1090 odirnum=0
1091 break
1092 fi
1093 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1094 -b $bytes -c $num_writes -d $data
1095 retval=$?
1096 if (($retval != 0)); then
1097 odirnum=0
1098 break
1099 fi
1100 if (($fn >= $filenum)); then
1101 fn=0
1102 ((idirnum = idirnum + 1))
1103 log_must mkdir -p $destdir/$idirnum
1104 else
1105 ((fn = fn + 1))
1106 fi
1107 done
1108 return $retval
1109 }
1110
1111 #
1112 # Simple function to get the specified property. If unable to
1113 # get the property then exits.
1114 #
1115 # Note property is in 'parsable' format (-p)
1116 #
1117 function get_prop # property dataset
1118 {
1119 typeset prop_val
1120 typeset prop=$1
1121 typeset dataset=$2
1122
1123 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1124 if [[ $? -ne 0 ]]; then
1125 log_note "Unable to get $prop property for dataset " \
1126 "$dataset"
1127 return 1
1128 fi
1129
1130 echo "$prop_val"
1131 return 0
1132 }
1133
1134 #
1135 # Simple function to get the specified property of pool. If unable to
1136 # get the property then exits.
1137 #
1138 # Note property is in 'parsable' format (-p)
1139 #
1140 function get_pool_prop # property pool
1141 {
1142 typeset prop_val
1143 typeset prop=$1
1144 typeset pool=$2
1145
1146 if poolexists $pool ; then
1147 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1148 awk '{print $3}')
1149 if [[ $? -ne 0 ]]; then
1150 log_note "Unable to get $prop property for pool " \
1151 "$pool"
1152 return 1
1153 fi
1154 else
1155 log_note "Pool $pool not exists."
1156 return 1
1157 fi
1158
1159 echo "$prop_val"
1160 return 0
1161 }
1162
1163 # Return 0 if a pool exists; $? otherwise
1164 #
1165 # $1 - pool name
1166
1167 function poolexists
1168 {
1169 typeset pool=$1
1170
1171 if [[ -z $pool ]]; then
1172 log_note "No pool name given."
1173 return 1
1174 fi
1175
1176 zpool get name "$pool" > /dev/null 2>&1
1177 return $?
1178 }
1179
1180 # Return 0 if all the specified datasets exist; $? otherwise
1181 #
1182 # $1-n dataset name
1183 function datasetexists
1184 {
1185 if (($# == 0)); then
1186 log_note "No dataset name given."
1187 return 1
1188 fi
1189
1190 while (($# > 0)); do
1191 zfs get name $1 > /dev/null 2>&1 || \
1192 return $?
1193 shift
1194 done
1195
1196 return 0
1197 }
1198
1199 # return 0 if none of the specified datasets exists, otherwise return 1.
1200 #
1201 # $1-n dataset name
1202 function datasetnonexists
1203 {
1204 if (($# == 0)); then
1205 log_note "No dataset name given."
1206 return 1
1207 fi
1208
1209 while (($# > 0)); do
1210 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1211 && return 1
1212 shift
1213 done
1214
1215 return 0
1216 }
1217
1218 #
1219 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1220 #
1221 # Returns 0 if shared, 1 otherwise.
1222 #
1223 function is_shared
1224 {
1225 typeset fs=$1
1226 typeset mtpt
1227
1228 if [[ $fs != "/"* ]] ; then
1229 if datasetnonexists "$fs" ; then
1230 return 1
1231 else
1232 mtpt=$(get_prop mountpoint "$fs")
1233 case $mtpt in
1234 none|legacy|-) return 1
1235 ;;
1236 *) fs=$mtpt
1237 ;;
1238 esac
1239 fi
1240 fi
1241
1242 if is_linux; then
1243 for mtpt in `share | awk '{print $1}'` ; do
1244 if [[ $mtpt == $fs ]] ; then
1245 return 0
1246 fi
1247 done
1248 return 1
1249 fi
1250
1251 for mtpt in `share | awk '{print $2}'` ; do
1252 if [[ $mtpt == $fs ]] ; then
1253 return 0
1254 fi
1255 done
1256
1257 typeset stat=$(svcs -H -o STA nfs/server:default)
1258 if [[ $stat != "ON" ]]; then
1259 log_note "Current nfs/server status: $stat"
1260 fi
1261
1262 return 1
1263 }
1264
1265 #
1266 # Given a dataset name determine if it is shared via SMB.
1267 #
1268 # Returns 0 if shared, 1 otherwise.
1269 #
1270 function is_shared_smb
1271 {
1272 typeset fs=$1
1273 typeset mtpt
1274
1275 if datasetnonexists "$fs" ; then
1276 return 1
1277 else
1278 fs=$(echo $fs | sed 's@/@_@g')
1279 fi
1280
1281 if is_linux; then
1282 for mtpt in `net usershare list | awk '{print $1}'` ; do
1283 if [[ $mtpt == $fs ]] ; then
1284 return 0
1285 fi
1286 done
1287 return 1
1288 else
1289 log_unsupported "Currently unsupported by the test framework"
1290 return 1
1291 fi
1292 }
1293
1294 #
1295 # Given a mountpoint, determine if it is not shared via NFS.
1296 #
1297 # Returns 0 if not shared, 1 otherwise.
1298 #
1299 function not_shared
1300 {
1301 typeset fs=$1
1302
1303 is_shared $fs
1304 if (($? == 0)); then
1305 return 1
1306 fi
1307
1308 return 0
1309 }
1310
1311 #
1312 # Given a dataset determine if it is not shared via SMB.
1313 #
1314 # Returns 0 if not shared, 1 otherwise.
1315 #
1316 function not_shared_smb
1317 {
1318 typeset fs=$1
1319
1320 is_shared_smb $fs
1321 if (($? == 0)); then
1322 return 1
1323 fi
1324
1325 return 0
1326 }
1327
1328 #
1329 # Helper function to unshare a mountpoint.
1330 #
1331 function unshare_fs #fs
1332 {
1333 typeset fs=$1
1334
1335 is_shared $fs || is_shared_smb $fs
1336 if (($? == 0)); then
1337 log_must zfs unshare $fs
1338 fi
1339
1340 return 0
1341 }
1342
1343 #
1344 # Helper function to share a NFS mountpoint.
1345 #
1346 function share_nfs #fs
1347 {
1348 typeset fs=$1
1349
1350 if is_linux; then
1351 is_shared $fs
1352 if (($? != 0)); then
1353 log_must share "*:$fs"
1354 fi
1355 else
1356 is_shared $fs
1357 if (($? != 0)); then
1358 log_must share -F nfs $fs
1359 fi
1360 fi
1361
1362 return 0
1363 }
1364
1365 #
1366 # Helper function to unshare a NFS mountpoint.
1367 #
1368 function unshare_nfs #fs
1369 {
1370 typeset fs=$1
1371
1372 if is_linux; then
1373 is_shared $fs
1374 if (($? == 0)); then
1375 log_must unshare -u "*:$fs"
1376 fi
1377 else
1378 is_shared $fs
1379 if (($? == 0)); then
1380 log_must unshare -F nfs $fs
1381 fi
1382 fi
1383
1384 return 0
1385 }
1386
1387 #
1388 # Helper function to show NFS shares.
1389 #
1390 function showshares_nfs
1391 {
1392 if is_linux; then
1393 share -v
1394 else
1395 share -F nfs
1396 fi
1397
1398 return 0
1399 }
1400
1401 #
1402 # Helper function to show SMB shares.
1403 #
1404 function showshares_smb
1405 {
1406 if is_linux; then
1407 net usershare list
1408 else
1409 share -F smb
1410 fi
1411
1412 return 0
1413 }
1414
1415 #
1416 # Check NFS server status and trigger it online.
1417 #
1418 function setup_nfs_server
1419 {
1420 # Cannot share directory in non-global zone.
1421 #
1422 if ! is_global_zone; then
1423 log_note "Cannot trigger NFS server by sharing in LZ."
1424 return
1425 fi
1426
1427 if is_linux; then
1428 #
1429 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1430 # /etc/exports.d./* to provide a clean test environment.
1431 #
1432 log_must share -r
1433
1434 log_note "NFS server must be started prior to running ZTS."
1435 return
1436 fi
1437
1438 typeset nfs_fmri="svc:/network/nfs/server:default"
1439 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1440 #
1441 # Only really sharing operation can enable NFS server
1442 # to online permanently.
1443 #
1444 typeset dummy=/tmp/dummy
1445
1446 if [[ -d $dummy ]]; then
1447 log_must rm -rf $dummy
1448 fi
1449
1450 log_must mkdir $dummy
1451 log_must share $dummy
1452
1453 #
1454 # Waiting for fmri's status to be the final status.
1455 # Otherwise, in transition, an asterisk (*) is appended for
1456 # instances, unshare will reverse status to 'DIS' again.
1457 #
1458 # Waiting for 1's at least.
1459 #
1460 log_must sleep 1
1461 timeout=10
1462 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1463 do
1464 log_must sleep 1
1465
1466 ((timeout -= 1))
1467 done
1468
1469 log_must unshare $dummy
1470 log_must rm -rf $dummy
1471 fi
1472
1473 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1474 }
1475
1476 #
1477 # To verify whether calling process is in global zone
1478 #
1479 # Return 0 if in global zone, 1 in non-global zone
1480 #
1481 function is_global_zone
1482 {
1483 if is_linux; then
1484 return 0
1485 else
1486 typeset cur_zone=$(zonename 2>/dev/null)
1487 if [[ $cur_zone != "global" ]]; then
1488 return 1
1489 fi
1490 return 0
1491 fi
1492 }
1493
1494 #
1495 # Verify whether test is permitted to run from
1496 # global zone, local zone, or both
1497 #
1498 # $1 zone limit, could be "global", "local", or "both"(no limit)
1499 #
1500 # Return 0 if permitted, otherwise exit with log_unsupported
1501 #
1502 function verify_runnable # zone limit
1503 {
1504 typeset limit=$1
1505
1506 [[ -z $limit ]] && return 0
1507
1508 if is_global_zone ; then
1509 case $limit in
1510 global|both)
1511 ;;
1512 local) log_unsupported "Test is unable to run from "\
1513 "global zone."
1514 ;;
1515 *) log_note "Warning: unknown limit $limit - " \
1516 "use both."
1517 ;;
1518 esac
1519 else
1520 case $limit in
1521 local|both)
1522 ;;
1523 global) log_unsupported "Test is unable to run from "\
1524 "local zone."
1525 ;;
1526 *) log_note "Warning: unknown limit $limit - " \
1527 "use both."
1528 ;;
1529 esac
1530
1531 reexport_pool
1532 fi
1533
1534 return 0
1535 }
1536
1537 # Return 0 if create successfully or the pool exists; $? otherwise
1538 # Note: In local zones, this function should return 0 silently.
1539 #
1540 # $1 - pool name
1541 # $2-n - [keyword] devs_list
1542
1543 function create_pool #pool devs_list
1544 {
1545 typeset pool=${1%%/*}
1546
1547 shift
1548
1549 if [[ -z $pool ]]; then
1550 log_note "Missing pool name."
1551 return 1
1552 fi
1553
1554 if poolexists $pool ; then
1555 destroy_pool $pool
1556 fi
1557
1558 if is_global_zone ; then
1559 [[ -d /$pool ]] && rm -rf /$pool
1560 log_must zpool create -f $pool $@
1561 fi
1562
1563 return 0
1564 }
1565
1566 # Return 0 if destroy successfully or the pool exists; $? otherwise
1567 # Note: In local zones, this function should return 0 silently.
1568 #
1569 # $1 - pool name
1570 # Destroy pool with the given parameters.
1571
1572 function destroy_pool #pool
1573 {
1574 typeset pool=${1%%/*}
1575 typeset mtpt
1576
1577 if [[ -z $pool ]]; then
1578 log_note "No pool name given."
1579 return 1
1580 fi
1581
1582 if is_global_zone ; then
1583 if poolexists "$pool" ; then
1584 mtpt=$(get_prop mountpoint "$pool")
1585
1586 # At times, syseventd/udev activity can cause attempts
1587 # to destroy a pool to fail with EBUSY. We retry a few
1588 # times allowing failures before requiring the destroy
1589 # to succeed.
1590 log_must_busy zpool destroy -f $pool
1591
1592 [[ -d $mtpt ]] && \
1593 log_must rm -rf $mtpt
1594 else
1595 log_note "Pool does not exist. ($pool)"
1596 return 1
1597 fi
1598 fi
1599
1600 return 0
1601 }
1602
1603 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1604 # Note: In local zones, this function should return 0 silently.
1605 #
1606 # $1 - dataset name
1607 # $2 - custom arguments for zfs destroy
1608 # Destroy dataset with the given parameters.
1609
1610 function destroy_dataset #dataset #args
1611 {
1612 typeset dataset=$1
1613 typeset mtpt
1614 typeset args=${2:-""}
1615
1616 if [[ -z $dataset ]]; then
1617 log_note "No dataset name given."
1618 return 1
1619 fi
1620
1621 if is_global_zone ; then
1622 if datasetexists "$dataset" ; then
1623 mtpt=$(get_prop mountpoint "$dataset")
1624 log_must_busy zfs destroy $args $dataset
1625
1626 [[ -d $mtpt ]] && \
1627 log_must rm -rf $mtpt
1628 else
1629 log_note "Dataset does not exist. ($dataset)"
1630 return 1
1631 fi
1632 fi
1633
1634 return 0
1635 }
1636
1637 #
1638 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1639 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1640 # and a zvol device to the zone.
1641 #
1642 # $1 zone name
1643 # $2 zone root directory prefix
1644 # $3 zone ip
1645 #
1646 function zfs_zones_setup #zone_name zone_root zone_ip
1647 {
1648 typeset zone_name=${1:-$(hostname)-z}
1649 typeset zone_root=${2:-"/zone_root"}
1650 typeset zone_ip=${3:-"10.1.1.10"}
1651 typeset prefix_ctr=$ZONE_CTR
1652 typeset pool_name=$ZONE_POOL
1653 typeset -i cntctr=5
1654 typeset -i i=0
1655
1656 # Create pool and 5 container within it
1657 #
1658 [[ -d /$pool_name ]] && rm -rf /$pool_name
1659 log_must zpool create -f $pool_name $DISKS
1660 while ((i < cntctr)); do
1661 log_must zfs create $pool_name/$prefix_ctr$i
1662 ((i += 1))
1663 done
1664
1665 # create a zvol
1666 log_must zfs create -V 1g $pool_name/zone_zvol
1667 block_device_wait
1668
1669 #
1670 # If current system support slog, add slog device for pool
1671 #
1672 if verify_slog_support ; then
1673 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1674 log_must mkfile $MINVDEVSIZE $sdevs
1675 log_must zpool add $pool_name log mirror $sdevs
1676 fi
1677
1678 # this isn't supported just yet.
1679 # Create a filesystem. In order to add this to
1680 # the zone, it must have it's mountpoint set to 'legacy'
1681 # log_must zfs create $pool_name/zfs_filesystem
1682 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1683
1684 [[ -d $zone_root ]] && \
1685 log_must rm -rf $zone_root/$zone_name
1686 [[ ! -d $zone_root ]] && \
1687 log_must mkdir -p -m 0700 $zone_root/$zone_name
1688
1689 # Create zone configure file and configure the zone
1690 #
1691 typeset zone_conf=/tmp/zone_conf.$$
1692 echo "create" > $zone_conf
1693 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1694 echo "set autoboot=true" >> $zone_conf
1695 i=0
1696 while ((i < cntctr)); do
1697 echo "add dataset" >> $zone_conf
1698 echo "set name=$pool_name/$prefix_ctr$i" >> \
1699 $zone_conf
1700 echo "end" >> $zone_conf
1701 ((i += 1))
1702 done
1703
1704 # add our zvol to the zone
1705 echo "add device" >> $zone_conf
1706 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1707 echo "end" >> $zone_conf
1708
1709 # add a corresponding zvol rdsk to the zone
1710 echo "add device" >> $zone_conf
1711 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1712 echo "end" >> $zone_conf
1713
1714 # once it's supported, we'll add our filesystem to the zone
1715 # echo "add fs" >> $zone_conf
1716 # echo "set type=zfs" >> $zone_conf
1717 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1718 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1719 # echo "end" >> $zone_conf
1720
1721 echo "verify" >> $zone_conf
1722 echo "commit" >> $zone_conf
1723 log_must zonecfg -z $zone_name -f $zone_conf
1724 log_must rm -f $zone_conf
1725
1726 # Install the zone
1727 zoneadm -z $zone_name install
1728 if (($? == 0)); then
1729 log_note "SUCCESS: zoneadm -z $zone_name install"
1730 else
1731 log_fail "FAIL: zoneadm -z $zone_name install"
1732 fi
1733
1734 # Install sysidcfg file
1735 #
1736 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1737 echo "system_locale=C" > $sysidcfg
1738 echo "terminal=dtterm" >> $sysidcfg
1739 echo "network_interface=primary {" >> $sysidcfg
1740 echo "hostname=$zone_name" >> $sysidcfg
1741 echo "}" >> $sysidcfg
1742 echo "name_service=NONE" >> $sysidcfg
1743 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1744 echo "security_policy=NONE" >> $sysidcfg
1745 echo "timezone=US/Eastern" >> $sysidcfg
1746
1747 # Boot this zone
1748 log_must zoneadm -z $zone_name boot
1749 }
1750
1751 #
1752 # Reexport TESTPOOL & TESTPOOL(1-4)
1753 #
1754 function reexport_pool
1755 {
1756 typeset -i cntctr=5
1757 typeset -i i=0
1758
1759 while ((i < cntctr)); do
1760 if ((i == 0)); then
1761 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1762 if ! ismounted $TESTPOOL; then
1763 log_must zfs mount $TESTPOOL
1764 fi
1765 else
1766 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1767 if eval ! ismounted \$TESTPOOL$i; then
1768 log_must eval zfs mount \$TESTPOOL$i
1769 fi
1770 fi
1771 ((i += 1))
1772 done
1773 }
1774
1775 #
1776 # Verify a given disk or pool state
1777 #
1778 # Return 0 is pool/disk matches expected state, 1 otherwise
1779 #
1780 function check_state # pool disk state{online,offline,degraded}
1781 {
1782 typeset pool=$1
1783 typeset disk=${2#$DEV_DSKDIR/}
1784 typeset state=$3
1785
1786 [[ -z $pool ]] || [[ -z $state ]] \
1787 && log_fail "Arguments invalid or missing"
1788
1789 if [[ -z $disk ]]; then
1790 #check pool state only
1791 zpool get -H -o value health $pool \
1792 | grep -i "$state" > /dev/null 2>&1
1793 else
1794 zpool status -v $pool | grep "$disk" \
1795 | grep -i "$state" > /dev/null 2>&1
1796 fi
1797
1798 return $?
1799 }
1800
1801 #
1802 # Get the mountpoint of snapshot
1803 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1804 # as its mountpoint
1805 #
1806 function snapshot_mountpoint
1807 {
1808 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1809
1810 if [[ $dataset != *@* ]]; then
1811 log_fail "Error name of snapshot '$dataset'."
1812 fi
1813
1814 typeset fs=${dataset%@*}
1815 typeset snap=${dataset#*@}
1816
1817 if [[ -z $fs || -z $snap ]]; then
1818 log_fail "Error name of snapshot '$dataset'."
1819 fi
1820
1821 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1822 }
1823
1824 #
1825 # Given a device and 'ashift' value verify it's correctly set on every label
1826 #
1827 function verify_ashift # device ashift
1828 {
1829 typeset device="$1"
1830 typeset ashift="$2"
1831
1832 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1833 if (ashift != $2)
1834 exit 1;
1835 else
1836 count++;
1837 } END {
1838 if (count != 4)
1839 exit 1;
1840 else
1841 exit 0;
1842 }'
1843
1844 return $?
1845 }
1846
1847 #
1848 # Given a pool and file system, this function will verify the file system
1849 # using the zdb internal tool. Note that the pool is exported and imported
1850 # to ensure it has consistent state.
1851 #
1852 function verify_filesys # pool filesystem dir
1853 {
1854 typeset pool="$1"
1855 typeset filesys="$2"
1856 typeset zdbout="/tmp/zdbout.$$"
1857
1858 shift
1859 shift
1860 typeset dirs=$@
1861 typeset search_path=""
1862
1863 log_note "Calling zdb to verify filesystem '$filesys'"
1864 zfs unmount -a > /dev/null 2>&1
1865 log_must zpool export $pool
1866
1867 if [[ -n $dirs ]] ; then
1868 for dir in $dirs ; do
1869 search_path="$search_path -d $dir"
1870 done
1871 fi
1872
1873 log_must zpool import $search_path $pool
1874
1875 zdb -cudi $filesys > $zdbout 2>&1
1876 if [[ $? != 0 ]]; then
1877 log_note "Output: zdb -cudi $filesys"
1878 cat $zdbout
1879 log_fail "zdb detected errors with: '$filesys'"
1880 fi
1881
1882 log_must zfs mount -a
1883 log_must rm -rf $zdbout
1884 }
1885
1886 #
1887 # Given a pool, and this function list all disks in the pool
1888 #
1889 function get_disklist # pool
1890 {
1891 typeset disklist=""
1892
1893 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1894 grep -v "\-\-\-\-\-" | \
1895 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1896
1897 echo $disklist
1898 }
1899
1900 #
1901 # Given a pool, and this function list all disks in the pool with their full
1902 # path (like "/dev/sda" instead of "sda").
1903 #
1904 function get_disklist_fullpath # pool
1905 {
1906 args="-P $1"
1907 get_disklist $args
1908 }
1909
1910
1911
1912 # /**
1913 # This function kills a given list of processes after a time period. We use
1914 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1915 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1916 # would be listed as FAIL, which we don't want : we're happy with stress tests
1917 # running for a certain amount of time, then finishing.
1918 #
1919 # @param $1 the time in seconds after which we should terminate these processes
1920 # @param $2..$n the processes we wish to terminate.
1921 # */
1922 function stress_timeout
1923 {
1924 typeset -i TIMEOUT=$1
1925 shift
1926 typeset cpids="$@"
1927
1928 log_note "Waiting for child processes($cpids). " \
1929 "It could last dozens of minutes, please be patient ..."
1930 log_must sleep $TIMEOUT
1931
1932 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1933 typeset pid
1934 for pid in $cpids; do
1935 ps -p $pid > /dev/null 2>&1
1936 if (($? == 0)); then
1937 log_must kill -USR1 $pid
1938 fi
1939 done
1940 }
1941
1942 #
1943 # Verify a given hotspare disk is inuse or avail
1944 #
1945 # Return 0 is pool/disk matches expected state, 1 otherwise
1946 #
1947 function check_hotspare_state # pool disk state{inuse,avail}
1948 {
1949 typeset pool=$1
1950 typeset disk=${2#$DEV_DSKDIR/}
1951 typeset state=$3
1952
1953 cur_state=$(get_device_state $pool $disk "spares")
1954
1955 if [[ $state != ${cur_state} ]]; then
1956 return 1
1957 fi
1958 return 0
1959 }
1960
1961 #
1962 # Wait until a hotspare transitions to a given state or times out.
1963 #
1964 # Return 0 when pool/disk matches expected state, 1 on timeout.
1965 #
1966 function wait_hotspare_state # pool disk state timeout
1967 {
1968 typeset pool=$1
1969 typeset disk=${2#$/DEV_DSKDIR/}
1970 typeset state=$3
1971 typeset timeout=${4:-60}
1972 typeset -i i=0
1973
1974 while [[ $i -lt $timeout ]]; do
1975 if check_hotspare_state $pool $disk $state; then
1976 return 0
1977 fi
1978
1979 i=$((i+1))
1980 sleep 1
1981 done
1982
1983 return 1
1984 }
1985
1986 #
1987 # Verify a given slog disk is inuse or avail
1988 #
1989 # Return 0 is pool/disk matches expected state, 1 otherwise
1990 #
1991 function check_slog_state # pool disk state{online,offline,unavail}
1992 {
1993 typeset pool=$1
1994 typeset disk=${2#$DEV_DSKDIR/}
1995 typeset state=$3
1996
1997 cur_state=$(get_device_state $pool $disk "logs")
1998
1999 if [[ $state != ${cur_state} ]]; then
2000 return 1
2001 fi
2002 return 0
2003 }
2004
2005 #
2006 # Verify a given vdev disk is inuse or avail
2007 #
2008 # Return 0 is pool/disk matches expected state, 1 otherwise
2009 #
2010 function check_vdev_state # pool disk state{online,offline,unavail}
2011 {
2012 typeset pool=$1
2013 typeset disk=${2#$/DEV_DSKDIR/}
2014 typeset state=$3
2015
2016 cur_state=$(get_device_state $pool $disk)
2017
2018 if [[ $state != ${cur_state} ]]; then
2019 return 1
2020 fi
2021 return 0
2022 }
2023
2024 #
2025 # Wait until a vdev transitions to a given state or times out.
2026 #
2027 # Return 0 when pool/disk matches expected state, 1 on timeout.
2028 #
2029 function wait_vdev_state # pool disk state timeout
2030 {
2031 typeset pool=$1
2032 typeset disk=${2#$/DEV_DSKDIR/}
2033 typeset state=$3
2034 typeset timeout=${4:-60}
2035 typeset -i i=0
2036
2037 while [[ $i -lt $timeout ]]; do
2038 if check_vdev_state $pool $disk $state; then
2039 return 0
2040 fi
2041
2042 i=$((i+1))
2043 sleep 1
2044 done
2045
2046 return 1
2047 }
2048
2049 #
2050 # Check the output of 'zpool status -v <pool>',
2051 # and to see if the content of <token> contain the <keyword> specified.
2052 #
2053 # Return 0 is contain, 1 otherwise
2054 #
2055 function check_pool_status # pool token keyword <verbose>
2056 {
2057 typeset pool=$1
2058 typeset token=$2
2059 typeset keyword=$3
2060 typeset verbose=${4:-false}
2061
2062 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2063 ($1==token) {print $0}')
2064 if [[ $verbose == true ]]; then
2065 log_note $scan
2066 fi
2067 echo $scan | grep -i "$keyword" > /dev/null 2>&1
2068
2069 return $?
2070 }
2071
2072 #
2073 # These 6 following functions are instance of check_pool_status()
2074 # is_pool_resilvering - to check if the pool is resilver in progress
2075 # is_pool_resilvered - to check if the pool is resilver completed
2076 # is_pool_scrubbing - to check if the pool is scrub in progress
2077 # is_pool_scrubbed - to check if the pool is scrub completed
2078 # is_pool_scrub_stopped - to check if the pool is scrub stopped
2079 # is_pool_scrub_paused - to check if the pool has scrub paused
2080 #
2081 function is_pool_resilvering #pool <verbose>
2082 {
2083 check_pool_status "$1" "scan" "resilver in progress since " $2
2084 return $?
2085 }
2086
2087 function is_pool_resilvered #pool <verbose>
2088 {
2089 check_pool_status "$1" "scan" "resilvered " $2
2090 return $?
2091 }
2092
2093 function is_pool_scrubbing #pool <verbose>
2094 {
2095 check_pool_status "$1" "scan" "scrub in progress since " $2
2096 return $?
2097 }
2098
2099 function is_pool_scrubbed #pool <verbose>
2100 {
2101 check_pool_status "$1" "scan" "scrub repaired" $2
2102 return $?
2103 }
2104
2105 function is_pool_scrub_stopped #pool <verbose>
2106 {
2107 check_pool_status "$1" "scan" "scrub canceled" $2
2108 return $?
2109 }
2110
2111 function is_pool_scrub_paused #pool <verbose>
2112 {
2113 check_pool_status "$1" "scan" "scrub paused since " $2
2114 return $?
2115 }
2116
2117 #
2118 # Use create_pool()/destroy_pool() to clean up the information in
2119 # in the given disk to avoid slice overlapping.
2120 #
2121 function cleanup_devices #vdevs
2122 {
2123 typeset pool="foopool$$"
2124
2125 if poolexists $pool ; then
2126 destroy_pool $pool
2127 fi
2128
2129 create_pool $pool $@
2130 destroy_pool $pool
2131
2132 return 0
2133 }
2134
2135 #/**
2136 # A function to find and locate free disks on a system or from given
2137 # disks as the parameter. It works by locating disks that are in use
2138 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2139 #
2140 # $@ given disks to find which are free, default is all disks in
2141 # the test system
2142 #
2143 # @return a string containing the list of available disks
2144 #*/
2145 function find_disks
2146 {
2147 # Trust provided list, no attempt is made to locate unused devices.
2148 if is_linux; then
2149 echo "$@"
2150 return
2151 fi
2152
2153
2154 sfi=/tmp/swaplist.$$
2155 dmpi=/tmp/dumpdev.$$
2156 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2157
2158 swap -l > $sfi
2159 dumpadm > $dmpi 2>/dev/null
2160
2161 # write an awk script that can process the output of format
2162 # to produce a list of disks we know about. Note that we have
2163 # to escape "$2" so that the shell doesn't interpret it while
2164 # we're creating the awk script.
2165 # -------------------
2166 cat > /tmp/find_disks.awk <<EOF
2167 #!/bin/nawk -f
2168 BEGIN { FS="."; }
2169
2170 /^Specify disk/{
2171 searchdisks=0;
2172 }
2173
2174 {
2175 if (searchdisks && \$2 !~ "^$"){
2176 split(\$2,arr," ");
2177 print arr[1];
2178 }
2179 }
2180
2181 /^AVAILABLE DISK SELECTIONS:/{
2182 searchdisks=1;
2183 }
2184 EOF
2185 #---------------------
2186
2187 chmod 755 /tmp/find_disks.awk
2188 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2189 rm /tmp/find_disks.awk
2190
2191 unused=""
2192 for disk in $disks; do
2193 # Check for mounted
2194 grep "${disk}[sp]" /etc/mnttab >/dev/null
2195 (($? == 0)) && continue
2196 # Check for swap
2197 grep "${disk}[sp]" $sfi >/dev/null
2198 (($? == 0)) && continue
2199 # check for dump device
2200 grep "${disk}[sp]" $dmpi >/dev/null
2201 (($? == 0)) && continue
2202 # check to see if this disk hasn't been explicitly excluded
2203 # by a user-set environment variable
2204 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2205 (($? == 0)) && continue
2206 unused_candidates="$unused_candidates $disk"
2207 done
2208 rm $sfi
2209 rm $dmpi
2210
2211 # now just check to see if those disks do actually exist
2212 # by looking for a device pointing to the first slice in
2213 # each case. limit the number to max_finddisksnum
2214 count=0
2215 for disk in $unused_candidates; do
2216 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2217 if [ $count -lt $max_finddisksnum ]; then
2218 unused="$unused $disk"
2219 # do not impose limit if $@ is provided
2220 [[ -z $@ ]] && ((count = count + 1))
2221 fi
2222 fi
2223 done
2224
2225 # finally, return our disk list
2226 echo $unused
2227 }
2228
2229 #
2230 # Add specified user to specified group
2231 #
2232 # $1 group name
2233 # $2 user name
2234 # $3 base of the homedir (optional)
2235 #
2236 function add_user #<group_name> <user_name> <basedir>
2237 {
2238 typeset gname=$1
2239 typeset uname=$2
2240 typeset basedir=${3:-"/var/tmp"}
2241
2242 if ((${#gname} == 0 || ${#uname} == 0)); then
2243 log_fail "group name or user name are not defined."
2244 fi
2245
2246 log_must useradd -g $gname -d $basedir/$uname -m $uname
2247 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2248 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2249 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
2250
2251 # Add new users to the same group and the command line utils.
2252 # This allows them to be run out of the original users home
2253 # directory as long as it permissioned to be group readable.
2254 if is_linux; then
2255 cmd_group=$(stat --format="%G" $(which zfs))
2256 log_must usermod -a -G $cmd_group $uname
2257 fi
2258
2259 return 0
2260 }
2261
2262 #
2263 # Delete the specified user.
2264 #
2265 # $1 login name
2266 # $2 base of the homedir (optional)
2267 #
2268 function del_user #<logname> <basedir>
2269 {
2270 typeset user=$1
2271 typeset basedir=${2:-"/var/tmp"}
2272
2273 if ((${#user} == 0)); then
2274 log_fail "login name is necessary."
2275 fi
2276
2277 if id $user > /dev/null 2>&1; then
2278 log_must_retry "currently used" 5 userdel $user
2279 fi
2280
2281 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2282
2283 return 0
2284 }
2285
2286 #
2287 # Select valid gid and create specified group.
2288 #
2289 # $1 group name
2290 #
2291 function add_group #<group_name>
2292 {
2293 typeset group=$1
2294
2295 if ((${#group} == 0)); then
2296 log_fail "group name is necessary."
2297 fi
2298
2299 # Assign 100 as the base gid, a larger value is selected for
2300 # Linux because for many distributions 1000 and under are reserved.
2301 if is_linux; then
2302 while true; do
2303 groupadd $group > /dev/null 2>&1
2304 typeset -i ret=$?
2305 case $ret in
2306 0) return 0 ;;
2307 *) return 1 ;;
2308 esac
2309 done
2310 else
2311 typeset -i gid=100
2312 while true; do
2313 groupadd -g $gid $group > /dev/null 2>&1
2314 typeset -i ret=$?
2315 case $ret in
2316 0) return 0 ;;
2317 # The gid is not unique
2318 4) ((gid += 1)) ;;
2319 *) return 1 ;;
2320 esac
2321 done
2322 fi
2323 }
2324
2325 #
2326 # Delete the specified group.
2327 #
2328 # $1 group name
2329 #
2330 function del_group #<group_name>
2331 {
2332 typeset grp=$1
2333 if ((${#grp} == 0)); then
2334 log_fail "group name is necessary."
2335 fi
2336
2337 if is_linux; then
2338 getent group $grp > /dev/null 2>&1
2339 typeset -i ret=$?
2340 case $ret in
2341 # Group does not exist.
2342 2) return 0 ;;
2343 # Name already exists as a group name
2344 0) log_must groupdel $grp ;;
2345 *) return 1 ;;
2346 esac
2347 else
2348 groupmod -n $grp $grp > /dev/null 2>&1
2349 typeset -i ret=$?
2350 case $ret in
2351 # Group does not exist.
2352 6) return 0 ;;
2353 # Name already exists as a group name
2354 9) log_must groupdel $grp ;;
2355 *) return 1 ;;
2356 esac
2357 fi
2358
2359 return 0
2360 }
2361
2362 #
2363 # This function will return true if it's safe to destroy the pool passed
2364 # as argument 1. It checks for pools based on zvols and files, and also
2365 # files contained in a pool that may have a different mountpoint.
2366 #
2367 function safe_to_destroy_pool { # $1 the pool name
2368
2369 typeset pool=""
2370 typeset DONT_DESTROY=""
2371
2372 # We check that by deleting the $1 pool, we're not
2373 # going to pull the rug out from other pools. Do this
2374 # by looking at all other pools, ensuring that they
2375 # aren't built from files or zvols contained in this pool.
2376
2377 for pool in $(zpool list -H -o name)
2378 do
2379 ALTMOUNTPOOL=""
2380
2381 # this is a list of the top-level directories in each of the
2382 # files that make up the path to the files the pool is based on
2383 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2384 awk '{print $1}')
2385
2386 # this is a list of the zvols that make up the pool
2387 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2388 | awk '{print $1}')
2389
2390 # also want to determine if it's a file-based pool using an
2391 # alternate mountpoint...
2392 POOL_FILE_DIRS=$(zpool status -v $pool | \
2393 grep / | awk '{print $1}' | \
2394 awk -F/ '{print $2}' | grep -v "dev")
2395
2396 for pooldir in $POOL_FILE_DIRS
2397 do
2398 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2399 grep "${pooldir}$" | awk '{print $1}')
2400
2401 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2402 done
2403
2404
2405 if [ ! -z "$ZVOLPOOL" ]
2406 then
2407 DONT_DESTROY="true"
2408 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2409 fi
2410
2411 if [ ! -z "$FILEPOOL" ]
2412 then
2413 DONT_DESTROY="true"
2414 log_note "Pool $pool is built from $FILEPOOL on $1"
2415 fi
2416
2417 if [ ! -z "$ALTMOUNTPOOL" ]
2418 then
2419 DONT_DESTROY="true"
2420 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2421 fi
2422 done
2423
2424 if [ -z "${DONT_DESTROY}" ]
2425 then
2426 return 0
2427 else
2428 log_note "Warning: it is not safe to destroy $1!"
2429 return 1
2430 fi
2431 }
2432
2433 #
2434 # Get the available ZFS compression options
2435 # $1 option type zfs_set|zfs_compress
2436 #
2437 function get_compress_opts
2438 {
2439 typeset COMPRESS_OPTS
2440 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2441 gzip-6 gzip-7 gzip-8 gzip-9"
2442
2443 if [[ $1 == "zfs_compress" ]] ; then
2444 COMPRESS_OPTS="on lzjb"
2445 elif [[ $1 == "zfs_set" ]] ; then
2446 COMPRESS_OPTS="on off lzjb"
2447 fi
2448 typeset valid_opts="$COMPRESS_OPTS"
2449 zfs get 2>&1 | grep gzip >/dev/null 2>&1
2450 if [[ $? -eq 0 ]]; then
2451 valid_opts="$valid_opts $GZIP_OPTS"
2452 fi
2453 echo "$valid_opts"
2454 }
2455
2456 #
2457 # Verify zfs operation with -p option work as expected
2458 # $1 operation, value could be create, clone or rename
2459 # $2 dataset type, value could be fs or vol
2460 # $3 dataset name
2461 # $4 new dataset name
2462 #
2463 function verify_opt_p_ops
2464 {
2465 typeset ops=$1
2466 typeset datatype=$2
2467 typeset dataset=$3
2468 typeset newdataset=$4
2469
2470 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2471 log_fail "$datatype is not supported."
2472 fi
2473
2474 # check parameters accordingly
2475 case $ops in
2476 create)
2477 newdataset=$dataset
2478 dataset=""
2479 if [[ $datatype == "vol" ]]; then
2480 ops="create -V $VOLSIZE"
2481 fi
2482 ;;
2483 clone)
2484 if [[ -z $newdataset ]]; then
2485 log_fail "newdataset should not be empty" \
2486 "when ops is $ops."
2487 fi
2488 log_must datasetexists $dataset
2489 log_must snapexists $dataset
2490 ;;
2491 rename)
2492 if [[ -z $newdataset ]]; then
2493 log_fail "newdataset should not be empty" \
2494 "when ops is $ops."
2495 fi
2496 log_must datasetexists $dataset
2497 log_mustnot snapexists $dataset
2498 ;;
2499 *)
2500 log_fail "$ops is not supported."
2501 ;;
2502 esac
2503
2504 # make sure the upper level filesystem does not exist
2505 destroy_dataset "${newdataset%/*}" "-rRf"
2506
2507 # without -p option, operation will fail
2508 log_mustnot zfs $ops $dataset $newdataset
2509 log_mustnot datasetexists $newdataset ${newdataset%/*}
2510
2511 # with -p option, operation should succeed
2512 log_must zfs $ops -p $dataset $newdataset
2513 block_device_wait
2514
2515 if ! datasetexists $newdataset ; then
2516 log_fail "-p option does not work for $ops"
2517 fi
2518
2519 # when $ops is create or clone, redo the operation still return zero
2520 if [[ $ops != "rename" ]]; then
2521 log_must zfs $ops -p $dataset $newdataset
2522 fi
2523
2524 return 0
2525 }
2526
2527 #
2528 # Get configuration of pool
2529 # $1 pool name
2530 # $2 config name
2531 #
2532 function get_config
2533 {
2534 typeset pool=$1
2535 typeset config=$2
2536 typeset alt_root
2537
2538 if ! poolexists "$pool" ; then
2539 return 1
2540 fi
2541 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2542 if [[ $alt_root == "-" ]]; then
2543 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2544 '{print $2}')
2545 else
2546 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2547 '{print $2}')
2548 fi
2549 if [[ -n $value ]] ; then
2550 value=${value#'}
2551 value=${value%'}
2552 fi
2553 echo $value
2554
2555 return 0
2556 }
2557
2558 #
2559 # Privated function. Random select one of items from arguments.
2560 #
2561 # $1 count
2562 # $2-n string
2563 #
2564 function _random_get
2565 {
2566 typeset cnt=$1
2567 shift
2568
2569 typeset str="$@"
2570 typeset -i ind
2571 ((ind = RANDOM % cnt + 1))
2572
2573 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2574 echo $ret
2575 }
2576
2577 #
2578 # Random select one of item from arguments which include NONE string
2579 #
2580 function random_get_with_non
2581 {
2582 typeset -i cnt=$#
2583 ((cnt =+ 1))
2584
2585 _random_get "$cnt" "$@"
2586 }
2587
2588 #
2589 # Random select one of item from arguments which doesn't include NONE string
2590 #
2591 function random_get
2592 {
2593 _random_get "$#" "$@"
2594 }
2595
2596 #
2597 # Detect if the current system support slog
2598 #
2599 function verify_slog_support
2600 {
2601 typeset dir=$TEST_BASE_DIR/disk.$$
2602 typeset pool=foo.$$
2603 typeset vdev=$dir/a
2604 typeset sdev=$dir/b
2605
2606 mkdir -p $dir
2607 mkfile $MINVDEVSIZE $vdev $sdev
2608
2609 typeset -i ret=0
2610 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2611 ret=1
2612 fi
2613 rm -r $dir
2614
2615 return $ret
2616 }
2617
2618 #
2619 # The function will generate a dataset name with specific length
2620 # $1, the length of the name
2621 # $2, the base string to construct the name
2622 #
2623 function gen_dataset_name
2624 {
2625 typeset -i len=$1
2626 typeset basestr="$2"
2627 typeset -i baselen=${#basestr}
2628 typeset -i iter=0
2629 typeset l_name=""
2630
2631 if ((len % baselen == 0)); then
2632 ((iter = len / baselen))
2633 else
2634 ((iter = len / baselen + 1))
2635 fi
2636 while ((iter > 0)); do
2637 l_name="${l_name}$basestr"
2638
2639 ((iter -= 1))
2640 done
2641
2642 echo $l_name
2643 }
2644
2645 #
2646 # Get cksum tuple of dataset
2647 # $1 dataset name
2648 #
2649 # sample zdb output:
2650 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2651 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2652 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2653 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2654 function datasetcksum
2655 {
2656 typeset cksum
2657 sync
2658 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2659 | awk -F= '{print $7}')
2660 echo $cksum
2661 }
2662
2663 #
2664 # Get cksum of file
2665 # #1 file path
2666 #
2667 function checksum
2668 {
2669 typeset cksum
2670 cksum=$(cksum $1 | awk '{print $1}')
2671 echo $cksum
2672 }
2673
2674 #
2675 # Get the given disk/slice state from the specific field of the pool
2676 #
2677 function get_device_state #pool disk field("", "spares","logs")
2678 {
2679 typeset pool=$1
2680 typeset disk=${2#$DEV_DSKDIR/}
2681 typeset field=${3:-$pool}
2682
2683 state=$(zpool status -v "$pool" 2>/dev/null | \
2684 nawk -v device=$disk -v pool=$pool -v field=$field \
2685 'BEGIN {startconfig=0; startfield=0; }
2686 /config:/ {startconfig=1}
2687 (startconfig==1) && ($1==field) {startfield=1; next;}
2688 (startfield==1) && ($1==device) {print $2; exit;}
2689 (startfield==1) &&
2690 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2691 echo $state
2692 }
2693
2694
2695 #
2696 # print the given directory filesystem type
2697 #
2698 # $1 directory name
2699 #
2700 function get_fstype
2701 {
2702 typeset dir=$1
2703
2704 if [[ -z $dir ]]; then
2705 log_fail "Usage: get_fstype <directory>"
2706 fi
2707
2708 #
2709 # $ df -n /
2710 # / : ufs
2711 #
2712 df -n $dir | awk '{print $3}'
2713 }
2714
2715 #
2716 # Given a disk, label it to VTOC regardless what label was on the disk
2717 # $1 disk
2718 #
2719 function labelvtoc
2720 {
2721 typeset disk=$1
2722 if [[ -z $disk ]]; then
2723 log_fail "The disk name is unspecified."
2724 fi
2725 typeset label_file=/var/tmp/labelvtoc.$$
2726 typeset arch=$(uname -p)
2727
2728 if is_linux; then
2729 log_note "Currently unsupported by the test framework"
2730 return 1
2731 fi
2732
2733 if [[ $arch == "i386" ]]; then
2734 echo "label" > $label_file
2735 echo "0" >> $label_file
2736 echo "" >> $label_file
2737 echo "q" >> $label_file
2738 echo "q" >> $label_file
2739
2740 fdisk -B $disk >/dev/null 2>&1
2741 # wait a while for fdisk finishes
2742 sleep 60
2743 elif [[ $arch == "sparc" ]]; then
2744 echo "label" > $label_file
2745 echo "0" >> $label_file
2746 echo "" >> $label_file
2747 echo "" >> $label_file
2748 echo "" >> $label_file
2749 echo "q" >> $label_file
2750 else
2751 log_fail "unknown arch type"
2752 fi
2753
2754 format -e -s -d $disk -f $label_file
2755 typeset -i ret_val=$?
2756 rm -f $label_file
2757 #
2758 # wait the format to finish
2759 #
2760 sleep 60
2761 if ((ret_val != 0)); then
2762 log_fail "unable to label $disk as VTOC."
2763 fi
2764
2765 return 0
2766 }
2767
2768 #
2769 # check if the system was installed as zfsroot or not
2770 # return: 0 ture, otherwise false
2771 #
2772 function is_zfsroot
2773 {
2774 df -n / | grep zfs > /dev/null 2>&1
2775 return $?
2776 }
2777
2778 #
2779 # get the root filesystem name if it's zfsroot system.
2780 #
2781 # return: root filesystem name
2782 function get_rootfs
2783 {
2784 typeset rootfs=""
2785
2786 if ! is_linux; then
2787 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2788 /etc/mnttab)
2789 fi
2790 if [[ -z "$rootfs" ]]; then
2791 log_fail "Can not get rootfs"
2792 fi
2793 zfs list $rootfs > /dev/null 2>&1
2794 if (($? == 0)); then
2795 echo $rootfs
2796 else
2797 log_fail "This is not a zfsroot system."
2798 fi
2799 }
2800
2801 #
2802 # get the rootfs's pool name
2803 # return:
2804 # rootpool name
2805 #
2806 function get_rootpool
2807 {
2808 typeset rootfs=""
2809 typeset rootpool=""
2810
2811 if ! is_linux; then
2812 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2813 /etc/mnttab)
2814 fi
2815 if [[ -z "$rootfs" ]]; then
2816 log_fail "Can not get rootpool"
2817 fi
2818 zfs list $rootfs > /dev/null 2>&1
2819 if (($? == 0)); then
2820 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2821 echo $rootpool
2822 else
2823 log_fail "This is not a zfsroot system."
2824 fi
2825 }
2826
2827 #
2828 # Get the package name
2829 #
2830 function get_package_name
2831 {
2832 typeset dirpath=${1:-$STC_NAME}
2833
2834 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2835 }
2836
2837 #
2838 # Get the word numbers from a string separated by white space
2839 #
2840 function get_word_count
2841 {
2842 echo $1 | wc -w
2843 }
2844
2845 #
2846 # To verify if the require numbers of disks is given
2847 #
2848 function verify_disk_count
2849 {
2850 typeset -i min=${2:-1}
2851
2852 typeset -i count=$(get_word_count "$1")
2853
2854 if ((count < min)); then
2855 log_untested "A minimum of $min disks is required to run." \
2856 " You specified $count disk(s)"
2857 fi
2858 }
2859
2860 function ds_is_volume
2861 {
2862 typeset type=$(get_prop type $1)
2863 [[ $type = "volume" ]] && return 0
2864 return 1
2865 }
2866
2867 function ds_is_filesystem
2868 {
2869 typeset type=$(get_prop type $1)
2870 [[ $type = "filesystem" ]] && return 0
2871 return 1
2872 }
2873
2874 function ds_is_snapshot
2875 {
2876 typeset type=$(get_prop type $1)
2877 [[ $type = "snapshot" ]] && return 0
2878 return 1
2879 }
2880
2881 #
2882 # Check if Trusted Extensions are installed and enabled
2883 #
2884 function is_te_enabled
2885 {
2886 svcs -H -o state labeld 2>/dev/null | grep "enabled"
2887 if (($? != 0)); then
2888 return 1
2889 else
2890 return 0
2891 fi
2892 }
2893
2894 # Utility function to determine if a system has multiple cpus.
2895 function is_mp
2896 {
2897 if is_linux; then
2898 (($(nproc) > 1))
2899 else
2900 (($(psrinfo | wc -l) > 1))
2901 fi
2902
2903 return $?
2904 }
2905
2906 function get_cpu_freq
2907 {
2908 if is_linux; then
2909 lscpu | awk '/CPU MHz/ { print $3 }'
2910 else
2911 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2912 fi
2913 }
2914
2915 # Run the given command as the user provided.
2916 function user_run
2917 {
2918 typeset user=$1
2919 shift
2920
2921 log_note "user:$user $@"
2922 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
2923 return $?
2924 }
2925
2926 #
2927 # Check if the pool contains the specified vdevs
2928 #
2929 # $1 pool
2930 # $2..n <vdev> ...
2931 #
2932 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2933 # vdevs is not in the pool, and 2 if pool name is missing.
2934 #
2935 function vdevs_in_pool
2936 {
2937 typeset pool=$1
2938 typeset vdev
2939
2940 if [[ -z $pool ]]; then
2941 log_note "Missing pool name."
2942 return 2
2943 fi
2944
2945 shift
2946
2947 typeset tmpfile=$(mktemp)
2948 zpool list -Hv "$pool" >$tmpfile
2949 for vdev in $@; do
2950 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2951 [[ $? -ne 0 ]] && return 1
2952 done
2953
2954 rm -f $tmpfile
2955
2956 return 0;
2957 }
2958
2959 function get_max
2960 {
2961 typeset -l i max=$1
2962 shift
2963
2964 for i in "$@"; do
2965 max=$(echo $((max > i ? max : i)))
2966 done
2967
2968 echo $max
2969 }
2970
2971 function get_min
2972 {
2973 typeset -l i min=$1
2974 shift
2975
2976 for i in "$@"; do
2977 min=$(echo $((min < i ? min : i)))
2978 done
2979
2980 echo $min
2981 }
2982
2983 #
2984 # Generate a random number between 1 and the argument.
2985 #
2986 function random
2987 {
2988 typeset max=$1
2989 echo $(( ($RANDOM % $max) + 1 ))
2990 }
2991
2992 # Write data that can be compressed into a directory
2993 function write_compressible
2994 {
2995 typeset dir=$1
2996 typeset megs=$2
2997 typeset nfiles=${3:-1}
2998 typeset bs=${4:-1024k}
2999 typeset fname=${5:-file}
3000
3001 [[ -d $dir ]] || log_fail "No directory: $dir"
3002
3003 # Under Linux fio is not currently used since its behavior can
3004 # differ significantly across versions. This includes missing
3005 # command line options and cases where the --buffer_compress_*
3006 # options fail to behave as expected.
3007 if is_linux; then
3008 typeset file_bytes=$(to_bytes $megs)
3009 typeset bs_bytes=4096
3010 typeset blocks=$(($file_bytes / $bs_bytes))
3011
3012 for (( i = 0; i < $nfiles; i++ )); do
3013 truncate -s $file_bytes $dir/$fname.$i
3014
3015 # Write every third block to get 66% compression.
3016 for (( j = 0; j < $blocks; j += 3 )); do
3017 dd if=/dev/urandom of=$dir/$fname.$i \
3018 seek=$j bs=$bs_bytes count=1 \
3019 conv=notrunc >/dev/null 2>&1
3020 done
3021 done
3022 else
3023 log_must eval "fio \
3024 --name=job \
3025 --fallocate=0 \
3026 --minimal \
3027 --randrepeat=0 \
3028 --buffer_compress_percentage=66 \
3029 --buffer_compress_chunk=4096 \
3030 --directory=$dir \
3031 --numjobs=$nfiles \
3032 --nrfiles=$nfiles \
3033 --rw=write \
3034 --bs=$bs \
3035 --filesize=$megs \
3036 --filename_format='$fname.\$jobnum' >/dev/null"
3037 fi
3038 }
3039
3040 function get_objnum
3041 {
3042 typeset pathname=$1
3043 typeset objnum
3044
3045 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3046 objnum=$(stat -c %i $pathname)
3047 echo $objnum
3048 }
3049
3050 #
3051 # Sync data to the pool
3052 #
3053 # $1 pool name
3054 # $2 boolean to force uberblock (and config including zpool cache file) update
3055 #
3056 function sync_pool #pool <force>
3057 {
3058 typeset pool=${1:-$TESTPOOL}
3059 typeset force=${2:-false}
3060
3061 if [[ $force == true ]]; then
3062 log_must zpool sync -f $pool
3063 else
3064 log_must zpool sync $pool
3065 fi
3066
3067 return 0
3068 }
3069
3070 #
3071 # Wait for zpool 'freeing' property drops to zero.
3072 #
3073 # $1 pool name
3074 #
3075 function wait_freeing #pool
3076 {
3077 typeset pool=${1:-$TESTPOOL}
3078 while true; do
3079 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3080 log_must sleep 1
3081 done
3082 }
3083
3084 #
3085 # Wait for every device replace operation to complete
3086 #
3087 # $1 pool name
3088 #
3089 function wait_replacing #pool
3090 {
3091 typeset pool=${1:-$TESTPOOL}
3092 while true; do
3093 [[ "" == "$(zpool status $pool |
3094 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3095 log_must sleep 1
3096 done
3097 }
3098
3099 #
3100 # Wait for a pool to be scrubbed
3101 #
3102 # $1 pool name
3103 # $2 number of seconds to wait (optional)
3104 #
3105 # Returns true when pool has been scrubbed, or false if there's a timeout or if
3106 # no scrub was done.
3107 #
3108 function wait_scrubbed
3109 {
3110 typeset pool=${1:-$TESTPOOL}
3111 typeset iter=${2:-10}
3112 for i in {1..$iter} ; do
3113 if is_pool_scrubbed $pool ; then
3114 return 0
3115 fi
3116 sleep 1
3117 done
3118 return 1
3119 }
3120
3121 # Backup the zed.rc in our test directory so that we can edit it for our test.
3122 #
3123 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3124 function zed_rc_backup
3125 {
3126 zedrc_backup="$(mktemp)"
3127 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3128 echo $zedrc_backup
3129 }
3130
3131 function zed_rc_restore
3132 {
3133 mv $1 $ZEDLET_DIR/zed.rc
3134 }
3135
3136 #
3137 # Setup custom environment for the ZED.
3138 #
3139 # $@ Optional list of zedlets to run under zed.
3140 function zed_setup
3141 {
3142 if ! is_linux; then
3143 return
3144 fi
3145
3146 if [[ ! -d $ZEDLET_DIR ]]; then
3147 log_must mkdir $ZEDLET_DIR
3148 fi
3149
3150 if [[ ! -e $VDEVID_CONF ]]; then
3151 log_must touch $VDEVID_CONF
3152 fi
3153
3154 if [[ -e $VDEVID_CONF_ETC ]]; then
3155 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3156 fi
3157 EXTRA_ZEDLETS=$@
3158
3159 # Create a symlink for /etc/zfs/vdev_id.conf file.
3160 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3161
3162 # Setup minimal ZED configuration. Individual test cases should
3163 # add additional ZEDLETs as needed for their specific test.
3164 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3165 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3166
3167 # Scripts must only be user writable.
3168 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3169 saved_umask=$(umask)
3170 log_must umask 0022
3171 for i in $EXTRA_ZEDLETS ; do
3172 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3173 done
3174 log_must umask $saved_umask
3175 fi
3176
3177 # Customize the zed.rc file to enable the full debug log.
3178 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3179 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3180
3181 }
3182
3183 #
3184 # Cleanup custom ZED environment.
3185 #
3186 # $@ Optional list of zedlets to remove from our test zed.d directory.
3187 function zed_cleanup
3188 {
3189 if ! is_linux; then
3190 return
3191 fi
3192 EXTRA_ZEDLETS=$@
3193
3194 log_must rm -f ${ZEDLET_DIR}/zed.rc
3195 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3196 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3197 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3198 log_must rm -f ${ZEDLET_DIR}/state
3199
3200 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3201 for i in $EXTRA_ZEDLETS ; do
3202 log_must rm -f ${ZEDLET_DIR}/$i
3203 done
3204 fi
3205 log_must rm -f $ZED_LOG
3206 log_must rm -f $ZED_DEBUG_LOG
3207 log_must rm -f $VDEVID_CONF_ETC
3208 log_must rm -f $VDEVID_CONF
3209 rmdir $ZEDLET_DIR
3210 }
3211
3212 #
3213 # Check if ZED is currently running, if not start ZED.
3214 #
3215 function zed_start
3216 {
3217 if ! is_linux; then
3218 return
3219 fi
3220
3221 # ZEDLET_DIR=/var/tmp/zed
3222 if [[ ! -d $ZEDLET_DIR ]]; then
3223 log_must mkdir $ZEDLET_DIR
3224 fi
3225
3226 # Verify the ZED is not already running.
3227 pgrep -x zed > /dev/null
3228 if (($? == 0)); then
3229 log_fail "ZED already running"
3230 fi
3231
3232 log_note "Starting ZED"
3233 # run ZED in the background and redirect foreground logging
3234 # output to $ZED_LOG.
3235 log_must truncate -s 0 $ZED_DEBUG_LOG
3236 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
3237 "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3238
3239 return 0
3240 }
3241
3242 #
3243 # Kill ZED process
3244 #
3245 function zed_stop
3246 {
3247 if ! is_linux; then
3248 return
3249 fi
3250
3251 log_note "Stopping ZED"
3252 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3253 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3254 kill $zedpid
3255 while ps -p $zedpid > /dev/null; do
3256 sleep 1
3257 done
3258 rm -f ${ZEDLET_DIR}/zed.pid
3259 fi
3260 return 0
3261 }
3262
3263 #
3264 # Drain all zevents
3265 #
3266 function zed_events_drain
3267 {
3268 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3269 sleep 1
3270 zpool events -c >/dev/null
3271 done
3272 }
3273
3274 # Set a variable in zed.rc to something, un-commenting it in the process.
3275 #
3276 # $1 variable
3277 # $2 value
3278 function zed_rc_set
3279 {
3280 var="$1"
3281 val="$2"
3282 # Remove the line
3283 cmd="'/$var/d'"
3284 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3285
3286 # Add it at the end
3287 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3288 }
3289
3290
3291 #
3292 # Check is provided device is being active used as a swap device.
3293 #
3294 function is_swap_inuse
3295 {
3296 typeset device=$1
3297
3298 if [[ -z $device ]] ; then
3299 log_note "No device specified."
3300 return 1
3301 fi
3302
3303 if is_linux; then
3304 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3305 else
3306 swap -l | grep -w $device > /dev/null 2>&1
3307 fi
3308
3309 return $?
3310 }
3311
3312 #
3313 # Setup a swap device using the provided device.
3314 #
3315 function swap_setup
3316 {
3317 typeset swapdev=$1
3318
3319 if is_linux; then
3320 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3321 log_must swapon $swapdev
3322 else
3323 log_must swap -a $swapdev
3324 fi
3325
3326 return 0
3327 }
3328
3329 #
3330 # Cleanup a swap device on the provided device.
3331 #
3332 function swap_cleanup
3333 {
3334 typeset swapdev=$1
3335
3336 if is_swap_inuse $swapdev; then
3337 if is_linux; then
3338 log_must swapoff $swapdev
3339 else
3340 log_must swap -d $swapdev
3341 fi
3342 fi
3343
3344 return 0
3345 }
3346
3347 #
3348 # Set a global system tunable (64-bit value)
3349 #
3350 # $1 tunable name
3351 # $2 tunable values
3352 #
3353 function set_tunable64
3354 {
3355 set_tunable_impl "$1" "$2" Z
3356 }
3357
3358 #
3359 # Set a global system tunable (32-bit value)
3360 #
3361 # $1 tunable name
3362 # $2 tunable values
3363 #
3364 function set_tunable32
3365 {
3366 set_tunable_impl "$1" "$2" W
3367 }
3368
3369 function set_tunable_impl
3370 {
3371 typeset tunable="$1"
3372 typeset value="$2"
3373 typeset mdb_cmd="$3"
3374 typeset module="${4:-zfs}"
3375
3376 [[ -z "$tunable" ]] && return 1
3377 [[ -z "$value" ]] && return 1
3378 [[ -z "$mdb_cmd" ]] && return 1
3379
3380 case "$(uname)" in
3381 Linux)
3382 typeset zfs_tunables="/sys/module/$module/parameters"
3383 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3384 echo -n "$value" > "$zfs_tunables/$tunable"
3385 return "$?"
3386 ;;
3387 SunOS)
3388 [[ "$module" -eq "zfs" ]] || return 1
3389 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3390 return "$?"
3391 ;;
3392 esac
3393 }
3394
3395 #
3396 # Get a global system tunable
3397 #
3398 # $1 tunable name
3399 #
3400 function get_tunable
3401 {
3402 get_tunable_impl "$1"
3403 }
3404
3405 function get_tunable_impl
3406 {
3407 typeset tunable="$1"
3408 typeset module="${2:-zfs}"
3409
3410 [[ -z "$tunable" ]] && return 1
3411
3412 case "$(uname)" in
3413 Linux)
3414 typeset zfs_tunables="/sys/module/$module/parameters"
3415 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3416 cat $zfs_tunables/$tunable
3417 return "$?"
3418 ;;
3419 SunOS)
3420 [[ "$module" -eq "zfs" ]] || return 1
3421 ;;
3422 esac
3423
3424 return 1
3425 }