]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
Allow to limit zed's syslog chattiness
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 # Copyright (c) 2012, 2016 by Delphix. All rights reserved.
27 # Copyright 2016 Nexenta Systems, Inc.
28 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
29 # Copyright (c) 2017 Datto Inc.
30 # Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
31 #
32
33 . ${STF_TOOLS}/include/logapi.shlib
34 . ${STF_SUITE}/include/math.shlib
35 . ${STF_SUITE}/include/blkdev.shlib
36
37 #
38 # Apply constrained path when available. This is required since the
39 # PATH may have been modified by sudo's secure_path behavior.
40 #
41 if [ -n "$STF_PATH" ]; then
42 PATH="$STF_PATH"
43 fi
44
45 # Linux kernel version comparison function
46 #
47 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
48 #
49 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
50 #
51 function linux_version
52 {
53 typeset ver="$1"
54
55 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
56
57 typeset version=$(echo $ver | cut -d '.' -f 1)
58 typeset major=$(echo $ver | cut -d '.' -f 2)
59 typeset minor=$(echo $ver | cut -d '.' -f 3)
60
61 [[ -z "$version" ]] && version=0
62 [[ -z "$major" ]] && major=0
63 [[ -z "$minor" ]] && minor=0
64
65 echo $((version * 10000 + major * 100 + minor))
66 }
67
68 # Determine if this is a Linux test system
69 #
70 # Return 0 if platform Linux, 1 if otherwise
71
72 function is_linux
73 {
74 if [[ $(uname -o) == "GNU/Linux" ]]; then
75 return 0
76 else
77 return 1
78 fi
79 }
80
81 # Determine if this is a 32-bit system
82 #
83 # Return 0 if platform is 32-bit, 1 if otherwise
84
85 function is_32bit
86 {
87 if [[ $(getconf LONG_BIT) == "32" ]]; then
88 return 0
89 else
90 return 1
91 fi
92 }
93
94 # Determine if kmemleak is enabled
95 #
96 # Return 0 if kmemleak is enabled, 1 if otherwise
97
98 function is_kmemleak
99 {
100 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
101 return 0
102 else
103 return 1
104 fi
105 }
106
107 # Determine whether a dataset is mounted
108 #
109 # $1 dataset name
110 # $2 filesystem type; optional - defaulted to zfs
111 #
112 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
113
114 function ismounted
115 {
116 typeset fstype=$2
117 [[ -z $fstype ]] && fstype=zfs
118 typeset out dir name ret
119
120 case $fstype in
121 zfs)
122 if [[ "$1" == "/"* ]] ; then
123 for out in $(zfs mount | awk '{print $2}'); do
124 [[ $1 == $out ]] && return 0
125 done
126 else
127 for out in $(zfs mount | awk '{print $1}'); do
128 [[ $1 == $out ]] && return 0
129 done
130 fi
131 ;;
132 ufs|nfs)
133 out=$(df -F $fstype $1 2>/dev/null)
134 ret=$?
135 (($ret != 0)) && return $ret
136
137 dir=${out%%\(*}
138 dir=${dir%% *}
139 name=${out##*\(}
140 name=${name%%\)*}
141 name=${name%% *}
142
143 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
144 ;;
145 ext*)
146 out=$(df -t $fstype $1 2>/dev/null)
147 return $?
148 ;;
149 zvol)
150 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
151 link=$(readlink -f $ZVOL_DEVDIR/$1)
152 [[ -n "$link" ]] && \
153 mount | grep -q "^$link" && \
154 return 0
155 fi
156 ;;
157 esac
158
159 return 1
160 }
161
162 # Return 0 if a dataset is mounted; 1 otherwise
163 #
164 # $1 dataset name
165 # $2 filesystem type; optional - defaulted to zfs
166
167 function mounted
168 {
169 ismounted $1 $2
170 (($? == 0)) && return 0
171 return 1
172 }
173
174 # Return 0 if a dataset is unmounted; 1 otherwise
175 #
176 # $1 dataset name
177 # $2 filesystem type; optional - defaulted to zfs
178
179 function unmounted
180 {
181 ismounted $1 $2
182 (($? == 1)) && return 0
183 return 1
184 }
185
186 # split line on ","
187 #
188 # $1 - line to split
189
190 function splitline
191 {
192 echo $1 | sed "s/,/ /g"
193 }
194
195 function default_setup
196 {
197 default_setup_noexit "$@"
198
199 log_pass
200 }
201
202 #
203 # Given a list of disks, setup storage pools and datasets.
204 #
205 function default_setup_noexit
206 {
207 typeset disklist=$1
208 typeset container=$2
209 typeset volume=$3
210 log_note begin default_setup_noexit
211
212 if is_global_zone; then
213 if poolexists $TESTPOOL ; then
214 destroy_pool $TESTPOOL
215 fi
216 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
217 log_must zpool create -f $TESTPOOL $disklist
218 else
219 reexport_pool
220 fi
221
222 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
223 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
224
225 log_must zfs create $TESTPOOL/$TESTFS
226 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
227
228 if [[ -n $container ]]; then
229 rm -rf $TESTDIR1 || \
230 log_unresolved Could not remove $TESTDIR1
231 mkdir -p $TESTDIR1 || \
232 log_unresolved Could not create $TESTDIR1
233
234 log_must zfs create $TESTPOOL/$TESTCTR
235 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
236 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
237 log_must zfs set mountpoint=$TESTDIR1 \
238 $TESTPOOL/$TESTCTR/$TESTFS1
239 fi
240
241 if [[ -n $volume ]]; then
242 if is_global_zone ; then
243 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
244 block_device_wait
245 else
246 log_must zfs create $TESTPOOL/$TESTVOL
247 fi
248 fi
249 }
250
251 #
252 # Given a list of disks, setup a storage pool, file system and
253 # a container.
254 #
255 function default_container_setup
256 {
257 typeset disklist=$1
258
259 default_setup "$disklist" "true"
260 }
261
262 #
263 # Given a list of disks, setup a storage pool,file system
264 # and a volume.
265 #
266 function default_volume_setup
267 {
268 typeset disklist=$1
269
270 default_setup "$disklist" "" "true"
271 }
272
273 #
274 # Given a list of disks, setup a storage pool,file system,
275 # a container and a volume.
276 #
277 function default_container_volume_setup
278 {
279 typeset disklist=$1
280
281 default_setup "$disklist" "true" "true"
282 }
283
284 #
285 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
286 # filesystem
287 #
288 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
289 # $2 snapshot name. Default, $TESTSNAP
290 #
291 function create_snapshot
292 {
293 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
294 typeset snap=${2:-$TESTSNAP}
295
296 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
297 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
298
299 if snapexists $fs_vol@$snap; then
300 log_fail "$fs_vol@$snap already exists."
301 fi
302 datasetexists $fs_vol || \
303 log_fail "$fs_vol must exist."
304
305 log_must zfs snapshot $fs_vol@$snap
306 }
307
308 #
309 # Create a clone from a snapshot, default clone name is $TESTCLONE.
310 #
311 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
312 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
313 #
314 function create_clone # snapshot clone
315 {
316 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
317 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
318
319 [[ -z $snap ]] && \
320 log_fail "Snapshot name is undefined."
321 [[ -z $clone ]] && \
322 log_fail "Clone name is undefined."
323
324 log_must zfs clone $snap $clone
325 }
326
327 #
328 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
329 # filesystem.
330 #
331 # $1 Existing filesystem or volume name. Default, $TESTFS
332 # $2 Existing snapshot name. Default, $TESTSNAP
333 # $3 bookmark name. Default, $TESTBKMARK
334 #
335 function create_bookmark
336 {
337 typeset fs_vol=${1:-$TESTFS}
338 typeset snap=${2:-$TESTSNAP}
339 typeset bkmark=${3:-$TESTBKMARK}
340
341 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
342 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
343 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
344
345 if bkmarkexists $fs_vol#$bkmark; then
346 log_fail "$fs_vol#$bkmark already exists."
347 fi
348 datasetexists $fs_vol || \
349 log_fail "$fs_vol must exist."
350 snapexists $fs_vol@$snap || \
351 log_fail "$fs_vol@$snap must exist."
352
353 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
354 }
355
356 #
357 # Create a temporary clone result of an interrupted resumable 'zfs receive'
358 # $1 Destination filesystem name. Must not exist, will be created as the result
359 # of this function along with its %recv temporary clone
360 # $2 Source filesystem name. Must not exist, will be created and destroyed
361 #
362 function create_recv_clone
363 {
364 typeset recvfs="$1"
365 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
366 typeset snap="$sendfs@snap1"
367 typeset incr="$sendfs@snap2"
368 typeset mountpoint="$TESTDIR/create_recv_clone"
369 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
370
371 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
372
373 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
374 datasetexists $sendfs && log_fail "Send filesystem must not exist."
375
376 log_must zfs create -o mountpoint="$mountpoint" $sendfs
377 log_must zfs snapshot $snap
378 log_must eval "zfs send $snap | zfs recv -u $recvfs"
379 log_must mkfile 1m "$mountpoint/data"
380 log_must zfs snapshot $incr
381 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 > $sendfile"
382 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
383 destroy_dataset "$sendfs" "-r"
384 log_must rm -f "$sendfile"
385
386 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
387 log_fail "Error creating temporary $recvfs/%recv clone"
388 fi
389 }
390
391 function default_mirror_setup
392 {
393 default_mirror_setup_noexit $1 $2 $3
394
395 log_pass
396 }
397
398 #
399 # Given a pair of disks, set up a storage pool and dataset for the mirror
400 # @parameters: $1 the primary side of the mirror
401 # $2 the secondary side of the mirror
402 # @uses: ZPOOL ZFS TESTPOOL TESTFS
403 function default_mirror_setup_noexit
404 {
405 readonly func="default_mirror_setup_noexit"
406 typeset primary=$1
407 typeset secondary=$2
408
409 [[ -z $primary ]] && \
410 log_fail "$func: No parameters passed"
411 [[ -z $secondary ]] && \
412 log_fail "$func: No secondary partition passed"
413 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
414 log_must zpool create -f $TESTPOOL mirror $@
415 log_must zfs create $TESTPOOL/$TESTFS
416 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
417 }
418
419 #
420 # create a number of mirrors.
421 # We create a number($1) of 2 way mirrors using the pairs of disks named
422 # on the command line. These mirrors are *not* mounted
423 # @parameters: $1 the number of mirrors to create
424 # $... the devices to use to create the mirrors on
425 # @uses: ZPOOL ZFS TESTPOOL
426 function setup_mirrors
427 {
428 typeset -i nmirrors=$1
429
430 shift
431 while ((nmirrors > 0)); do
432 log_must test -n "$1" -a -n "$2"
433 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
434 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
435 shift 2
436 ((nmirrors = nmirrors - 1))
437 done
438 }
439
440 #
441 # create a number of raidz pools.
442 # We create a number($1) of 2 raidz pools using the pairs of disks named
443 # on the command line. These pools are *not* mounted
444 # @parameters: $1 the number of pools to create
445 # $... the devices to use to create the pools on
446 # @uses: ZPOOL ZFS TESTPOOL
447 function setup_raidzs
448 {
449 typeset -i nraidzs=$1
450
451 shift
452 while ((nraidzs > 0)); do
453 log_must test -n "$1" -a -n "$2"
454 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
455 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
456 shift 2
457 ((nraidzs = nraidzs - 1))
458 done
459 }
460
461 #
462 # Destroy the configured testpool mirrors.
463 # the mirrors are of the form ${TESTPOOL}{number}
464 # @uses: ZPOOL ZFS TESTPOOL
465 function destroy_mirrors
466 {
467 default_cleanup_noexit
468
469 log_pass
470 }
471
472 #
473 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
474 # $1 the list of disks
475 #
476 function default_raidz_setup
477 {
478 typeset disklist="$*"
479 disks=(${disklist[*]})
480
481 if [[ ${#disks[*]} -lt 2 ]]; then
482 log_fail "A raid-z requires a minimum of two disks."
483 fi
484
485 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
486 log_must zpool create -f $TESTPOOL raidz $disklist
487 log_must zfs create $TESTPOOL/$TESTFS
488 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
489
490 log_pass
491 }
492
493 #
494 # Common function used to cleanup storage pools and datasets.
495 #
496 # Invoked at the start of the test suite to ensure the system
497 # is in a known state, and also at the end of each set of
498 # sub-tests to ensure errors from one set of tests doesn't
499 # impact the execution of the next set.
500
501 function default_cleanup
502 {
503 default_cleanup_noexit
504
505 log_pass
506 }
507
508 #
509 # Utility function used to list all available pool names.
510 #
511 # NOTE: $KEEP is a variable containing pool names, separated by a newline
512 # character, that must be excluded from the returned list.
513 #
514 function get_all_pools
515 {
516 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
517 }
518
519 function default_cleanup_noexit
520 {
521 typeset pool=""
522 #
523 # Destroying the pool will also destroy any
524 # filesystems it contains.
525 #
526 if is_global_zone; then
527 zfs unmount -a > /dev/null 2>&1
528 ALL_POOLS=$(get_all_pools)
529 # Here, we loop through the pools we're allowed to
530 # destroy, only destroying them if it's safe to do
531 # so.
532 while [ ! -z ${ALL_POOLS} ]
533 do
534 for pool in ${ALL_POOLS}
535 do
536 if safe_to_destroy_pool $pool ;
537 then
538 destroy_pool $pool
539 fi
540 ALL_POOLS=$(get_all_pools)
541 done
542 done
543
544 zfs mount -a
545 else
546 typeset fs=""
547 for fs in $(zfs list -H -o name \
548 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
549 destroy_dataset "$fs" "-Rf"
550 done
551
552 # Need cleanup here to avoid garbage dir left.
553 for fs in $(zfs list -H -o name); do
554 [[ $fs == /$ZONE_POOL ]] && continue
555 [[ -d $fs ]] && log_must rm -rf $fs/*
556 done
557
558 #
559 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
560 # the default value
561 #
562 for fs in $(zfs list -H -o name); do
563 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
564 log_must zfs set reservation=none $fs
565 log_must zfs set recordsize=128K $fs
566 log_must zfs set mountpoint=/$fs $fs
567 typeset enc=""
568 enc=$(get_prop encryption $fs)
569 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
570 [[ "$enc" == "off" ]]; then
571 log_must zfs set checksum=on $fs
572 fi
573 log_must zfs set compression=off $fs
574 log_must zfs set atime=on $fs
575 log_must zfs set devices=off $fs
576 log_must zfs set exec=on $fs
577 log_must zfs set setuid=on $fs
578 log_must zfs set readonly=off $fs
579 log_must zfs set snapdir=hidden $fs
580 log_must zfs set aclmode=groupmask $fs
581 log_must zfs set aclinherit=secure $fs
582 fi
583 done
584 fi
585
586 [[ -d $TESTDIR ]] && \
587 log_must rm -rf $TESTDIR
588
589 disk1=${DISKS%% *}
590 if is_mpath_device $disk1; then
591 delete_partitions
592 fi
593 }
594
595
596 #
597 # Common function used to cleanup storage pools, file systems
598 # and containers.
599 #
600 function default_container_cleanup
601 {
602 if ! is_global_zone; then
603 reexport_pool
604 fi
605
606 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
607 [[ $? -eq 0 ]] && \
608 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
609
610 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
611 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
612
613 [[ -e $TESTDIR1 ]] && \
614 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
615
616 default_cleanup
617 }
618
619 #
620 # Common function used to cleanup snapshot of file system or volume. Default to
621 # delete the file system's snapshot
622 #
623 # $1 snapshot name
624 #
625 function destroy_snapshot
626 {
627 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
628
629 if ! snapexists $snap; then
630 log_fail "'$snap' does not existed."
631 fi
632
633 #
634 # For the sake of the value which come from 'get_prop' is not equal
635 # to the really mountpoint when the snapshot is unmounted. So, firstly
636 # check and make sure this snapshot's been mounted in current system.
637 #
638 typeset mtpt=""
639 if ismounted $snap; then
640 mtpt=$(get_prop mountpoint $snap)
641 (($? != 0)) && \
642 log_fail "get_prop mountpoint $snap failed."
643 fi
644
645 destroy_dataset "$snap"
646 [[ $mtpt != "" && -d $mtpt ]] && \
647 log_must rm -rf $mtpt
648 }
649
650 #
651 # Common function used to cleanup clone.
652 #
653 # $1 clone name
654 #
655 function destroy_clone
656 {
657 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
658
659 if ! datasetexists $clone; then
660 log_fail "'$clone' does not existed."
661 fi
662
663 # With the same reason in destroy_snapshot
664 typeset mtpt=""
665 if ismounted $clone; then
666 mtpt=$(get_prop mountpoint $clone)
667 (($? != 0)) && \
668 log_fail "get_prop mountpoint $clone failed."
669 fi
670
671 destroy_dataset "$clone"
672 [[ $mtpt != "" && -d $mtpt ]] && \
673 log_must rm -rf $mtpt
674 }
675
676 #
677 # Common function used to cleanup bookmark of file system or volume. Default
678 # to delete the file system's bookmark.
679 #
680 # $1 bookmark name
681 #
682 function destroy_bookmark
683 {
684 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
685
686 if ! bkmarkexists $bkmark; then
687 log_fail "'$bkmarkp' does not existed."
688 fi
689
690 destroy_dataset "$bkmark"
691 }
692
693 # Return 0 if a snapshot exists; $? otherwise
694 #
695 # $1 - snapshot name
696
697 function snapexists
698 {
699 zfs list -H -t snapshot "$1" > /dev/null 2>&1
700 return $?
701 }
702
703 #
704 # Return 0 if a bookmark exists; $? otherwise
705 #
706 # $1 - bookmark name
707 #
708 function bkmarkexists
709 {
710 zfs list -H -t bookmark "$1" > /dev/null 2>&1
711 return $?
712 }
713
714 #
715 # Set a property to a certain value on a dataset.
716 # Sets a property of the dataset to the value as passed in.
717 # @param:
718 # $1 dataset who's property is being set
719 # $2 property to set
720 # $3 value to set property to
721 # @return:
722 # 0 if the property could be set.
723 # non-zero otherwise.
724 # @use: ZFS
725 #
726 function dataset_setprop
727 {
728 typeset fn=dataset_setprop
729
730 if (($# < 3)); then
731 log_note "$fn: Insufficient parameters (need 3, had $#)"
732 return 1
733 fi
734 typeset output=
735 output=$(zfs set $2=$3 $1 2>&1)
736 typeset rv=$?
737 if ((rv != 0)); then
738 log_note "Setting property on $1 failed."
739 log_note "property $2=$3"
740 log_note "Return Code: $rv"
741 log_note "Output: $output"
742 return $rv
743 fi
744 return 0
745 }
746
747 #
748 # Assign suite defined dataset properties.
749 # This function is used to apply the suite's defined default set of
750 # properties to a dataset.
751 # @parameters: $1 dataset to use
752 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
753 # @returns:
754 # 0 if the dataset has been altered.
755 # 1 if no pool name was passed in.
756 # 2 if the dataset could not be found.
757 # 3 if the dataset could not have it's properties set.
758 #
759 function dataset_set_defaultproperties
760 {
761 typeset dataset="$1"
762
763 [[ -z $dataset ]] && return 1
764
765 typeset confset=
766 typeset -i found=0
767 for confset in $(zfs list); do
768 if [[ $dataset = $confset ]]; then
769 found=1
770 break
771 fi
772 done
773 [[ $found -eq 0 ]] && return 2
774 if [[ -n $COMPRESSION_PROP ]]; then
775 dataset_setprop $dataset compression $COMPRESSION_PROP || \
776 return 3
777 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
778 fi
779 if [[ -n $CHECKSUM_PROP ]]; then
780 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
781 return 3
782 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
783 fi
784 return 0
785 }
786
787 #
788 # Check a numeric assertion
789 # @parameter: $@ the assertion to check
790 # @output: big loud notice if assertion failed
791 # @use: log_fail
792 #
793 function assert
794 {
795 (($@)) || log_fail "$@"
796 }
797
798 #
799 # Function to format partition size of a disk
800 # Given a disk cxtxdx reduces all partitions
801 # to 0 size
802 #
803 function zero_partitions #<whole_disk_name>
804 {
805 typeset diskname=$1
806 typeset i
807
808 if is_linux; then
809 log_must parted $DEV_DSKDIR/$diskname -s -- mklabel gpt
810 else
811 for i in 0 1 3 4 5 6 7
812 do
813 log_must set_partition $i "" 0mb $diskname
814 done
815 fi
816
817 return 0
818 }
819
820 #
821 # Given a slice, size and disk, this function
822 # formats the slice to the specified size.
823 # Size should be specified with units as per
824 # the `format` command requirements eg. 100mb 3gb
825 #
826 # NOTE: This entire interface is problematic for the Linux parted utilty
827 # which requires the end of the partition to be specified. It would be
828 # best to retire this interface and replace it with something more flexible.
829 # At the moment a best effort is made.
830 #
831 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
832 {
833 typeset -i slicenum=$1
834 typeset start=$2
835 typeset size=$3
836 typeset disk=$4
837 [[ -z $slicenum || -z $size || -z $disk ]] && \
838 log_fail "The slice, size or disk name is unspecified."
839
840 if is_linux; then
841 typeset size_mb=${size%%[mMgG]}
842
843 size_mb=${size_mb%%[mMgG][bB]}
844 if [[ ${size:1:1} == 'g' ]]; then
845 ((size_mb = size_mb * 1024))
846 fi
847
848 # Create GPT partition table when setting slice 0 or
849 # when the device doesn't already contain a GPT label.
850 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
851 typeset ret_val=$?
852 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
853 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
854 if [[ $? -ne 0 ]]; then
855 log_note "Failed to create GPT partition table on $disk"
856 return 1
857 fi
858 fi
859
860 # When no start is given align on the first cylinder.
861 if [[ -z "$start" ]]; then
862 start=1
863 fi
864
865 # Determine the cylinder size for the device and using
866 # that calculate the end offset in cylinders.
867 typeset -i cly_size_kb=0
868 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
869 unit cyl print | head -3 | tail -1 | \
870 awk -F '[:k.]' '{print $4}')
871 ((end = (size_mb * 1024 / cly_size_kb) + start))
872
873 parted $DEV_DSKDIR/$disk -s -- \
874 mkpart part$slicenum ${start}cyl ${end}cyl
875 if [[ $? -ne 0 ]]; then
876 log_note "Failed to create partition $slicenum on $disk"
877 return 1
878 fi
879
880 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
881 block_device_wait
882 else
883 typeset format_file=/var/tmp/format_in.$$
884
885 echo "partition" >$format_file
886 echo "$slicenum" >> $format_file
887 echo "" >> $format_file
888 echo "" >> $format_file
889 echo "$start" >> $format_file
890 echo "$size" >> $format_file
891 echo "label" >> $format_file
892 echo "" >> $format_file
893 echo "q" >> $format_file
894 echo "q" >> $format_file
895
896 format -e -s -d $disk -f $format_file
897 fi
898
899 typeset ret_val=$?
900 rm -f $format_file
901 if [[ $ret_val -ne 0 ]]; then
902 log_note "Unable to format $disk slice $slicenum to $size"
903 return 1
904 fi
905 return 0
906 }
907
908 #
909 # Delete all partitions on all disks - this is specifically for the use of multipath
910 # devices which currently can only be used in the test suite as raw/un-partitioned
911 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
912 #
913 function delete_partitions
914 {
915 typeset -i j=1
916
917 if [[ -z $DISK_ARRAY_NUM ]]; then
918 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
919 fi
920 if [[ -z $DISKSARRAY ]]; then
921 DISKSARRAY=$DISKS
922 fi
923
924 if is_linux; then
925 if (( $DISK_ARRAY_NUM == 1 )); then
926 while ((j < MAX_PARTITIONS)); do
927 parted $DEV_DSKDIR/$DISK -s rm $j \
928 > /dev/null 2>&1
929 if (( $? == 1 )); then
930 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
931 if (( $? == 1 )); then
932 log_note "Partitions for $DISK should be deleted"
933 else
934 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
935 fi
936 return 0
937 else
938 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
939 if (( $? == 0 )); then
940 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
941 fi
942 fi
943 ((j = j+1))
944 done
945 else
946 for disk in `echo $DISKSARRAY`; do
947 while ((j < MAX_PARTITIONS)); do
948 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
949 if (( $? == 1 )); then
950 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
951 if (( $? == 1 )); then
952 log_note "Partitions for $disk should be deleted"
953 else
954 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
955 fi
956 j=7
957 else
958 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
959 if (( $? == 0 )); then
960 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
961 fi
962 fi
963 ((j = j+1))
964 done
965 j=1
966 done
967 fi
968 fi
969 return 0
970 }
971
972 #
973 # Get the end cyl of the given slice
974 #
975 function get_endslice #<disk> <slice>
976 {
977 typeset disk=$1
978 typeset slice=$2
979 if [[ -z $disk || -z $slice ]] ; then
980 log_fail "The disk name or slice number is unspecified."
981 fi
982
983 if is_linux; then
984 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
985 grep "part${slice}" | \
986 awk '{print $3}' | \
987 sed 's,cyl,,')
988 ((endcyl = (endcyl + 1)))
989 else
990 disk=${disk#/dev/dsk/}
991 disk=${disk#/dev/rdsk/}
992 disk=${disk%s*}
993
994 typeset -i ratio=0
995 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
996 grep "sectors\/cylinder" | \
997 awk '{print $2}')
998
999 if ((ratio == 0)); then
1000 return
1001 fi
1002
1003 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1004 nawk -v token="$slice" '{if ($1==token) print $6}')
1005
1006 ((endcyl = (endcyl + 1) / ratio))
1007 fi
1008
1009 echo $endcyl
1010 }
1011
1012
1013 #
1014 # Given a size,disk and total slice number, this function formats the
1015 # disk slices from 0 to the total slice number with the same specified
1016 # size.
1017 #
1018 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1019 {
1020 typeset -i i=0
1021 typeset slice_size=$1
1022 typeset disk_name=$2
1023 typeset total_slices=$3
1024 typeset cyl
1025
1026 zero_partitions $disk_name
1027 while ((i < $total_slices)); do
1028 if ! is_linux; then
1029 if ((i == 2)); then
1030 ((i = i + 1))
1031 continue
1032 fi
1033 fi
1034 log_must set_partition $i "$cyl" $slice_size $disk_name
1035 cyl=$(get_endslice $disk_name $i)
1036 ((i = i+1))
1037 done
1038 }
1039
1040 #
1041 # This function continues to write to a filenum number of files into dirnum
1042 # number of directories until either file_write returns an error or the
1043 # maximum number of files per directory have been written.
1044 #
1045 # Usage:
1046 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1047 #
1048 # Return value: 0 on success
1049 # non 0 on error
1050 #
1051 # Where :
1052 # destdir: is the directory where everything is to be created under
1053 # dirnum: the maximum number of subdirectories to use, -1 no limit
1054 # filenum: the maximum number of files per subdirectory
1055 # bytes: number of bytes to write
1056 # num_writes: numer of types to write out bytes
1057 # data: the data that will be written
1058 #
1059 # E.g.
1060 # file_fs /testdir 20 25 1024 256 0
1061 #
1062 # Note: bytes * num_writes equals the size of the testfile
1063 #
1064 function fill_fs # destdir dirnum filenum bytes num_writes data
1065 {
1066 typeset destdir=${1:-$TESTDIR}
1067 typeset -i dirnum=${2:-50}
1068 typeset -i filenum=${3:-50}
1069 typeset -i bytes=${4:-8192}
1070 typeset -i num_writes=${5:-10240}
1071 typeset -i data=${6:-0}
1072
1073 typeset -i odirnum=1
1074 typeset -i idirnum=0
1075 typeset -i fn=0
1076 typeset -i retval=0
1077
1078 log_must mkdir -p $destdir/$idirnum
1079 while (($odirnum > 0)); do
1080 if ((dirnum >= 0 && idirnum >= dirnum)); then
1081 odirnum=0
1082 break
1083 fi
1084 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1085 -b $bytes -c $num_writes -d $data
1086 retval=$?
1087 if (($retval != 0)); then
1088 odirnum=0
1089 break
1090 fi
1091 if (($fn >= $filenum)); then
1092 fn=0
1093 ((idirnum = idirnum + 1))
1094 log_must mkdir -p $destdir/$idirnum
1095 else
1096 ((fn = fn + 1))
1097 fi
1098 done
1099 return $retval
1100 }
1101
1102 #
1103 # Simple function to get the specified property. If unable to
1104 # get the property then exits.
1105 #
1106 # Note property is in 'parsable' format (-p)
1107 #
1108 function get_prop # property dataset
1109 {
1110 typeset prop_val
1111 typeset prop=$1
1112 typeset dataset=$2
1113
1114 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1115 if [[ $? -ne 0 ]]; then
1116 log_note "Unable to get $prop property for dataset " \
1117 "$dataset"
1118 return 1
1119 fi
1120
1121 echo "$prop_val"
1122 return 0
1123 }
1124
1125 #
1126 # Simple function to get the specified property of pool. If unable to
1127 # get the property then exits.
1128 #
1129 # Note property is in 'parsable' format (-p)
1130 #
1131 function get_pool_prop # property pool
1132 {
1133 typeset prop_val
1134 typeset prop=$1
1135 typeset pool=$2
1136
1137 if poolexists $pool ; then
1138 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1139 awk '{print $3}')
1140 if [[ $? -ne 0 ]]; then
1141 log_note "Unable to get $prop property for pool " \
1142 "$pool"
1143 return 1
1144 fi
1145 else
1146 log_note "Pool $pool not exists."
1147 return 1
1148 fi
1149
1150 echo "$prop_val"
1151 return 0
1152 }
1153
1154 # Return 0 if a pool exists; $? otherwise
1155 #
1156 # $1 - pool name
1157
1158 function poolexists
1159 {
1160 typeset pool=$1
1161
1162 if [[ -z $pool ]]; then
1163 log_note "No pool name given."
1164 return 1
1165 fi
1166
1167 zpool get name "$pool" > /dev/null 2>&1
1168 return $?
1169 }
1170
1171 # Return 0 if all the specified datasets exist; $? otherwise
1172 #
1173 # $1-n dataset name
1174 function datasetexists
1175 {
1176 if (($# == 0)); then
1177 log_note "No dataset name given."
1178 return 1
1179 fi
1180
1181 while (($# > 0)); do
1182 zfs get name $1 > /dev/null 2>&1 || \
1183 return $?
1184 shift
1185 done
1186
1187 return 0
1188 }
1189
1190 # return 0 if none of the specified datasets exists, otherwise return 1.
1191 #
1192 # $1-n dataset name
1193 function datasetnonexists
1194 {
1195 if (($# == 0)); then
1196 log_note "No dataset name given."
1197 return 1
1198 fi
1199
1200 while (($# > 0)); do
1201 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1202 && return 1
1203 shift
1204 done
1205
1206 return 0
1207 }
1208
1209 #
1210 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1211 #
1212 # Returns 0 if shared, 1 otherwise.
1213 #
1214 function is_shared
1215 {
1216 typeset fs=$1
1217 typeset mtpt
1218
1219 if [[ $fs != "/"* ]] ; then
1220 if datasetnonexists "$fs" ; then
1221 return 1
1222 else
1223 mtpt=$(get_prop mountpoint "$fs")
1224 case $mtpt in
1225 none|legacy|-) return 1
1226 ;;
1227 *) fs=$mtpt
1228 ;;
1229 esac
1230 fi
1231 fi
1232
1233 if is_linux; then
1234 for mtpt in `share | awk '{print $1}'` ; do
1235 if [[ $mtpt == $fs ]] ; then
1236 return 0
1237 fi
1238 done
1239 return 1
1240 fi
1241
1242 for mtpt in `share | awk '{print $2}'` ; do
1243 if [[ $mtpt == $fs ]] ; then
1244 return 0
1245 fi
1246 done
1247
1248 typeset stat=$(svcs -H -o STA nfs/server:default)
1249 if [[ $stat != "ON" ]]; then
1250 log_note "Current nfs/server status: $stat"
1251 fi
1252
1253 return 1
1254 }
1255
1256 #
1257 # Given a dataset name determine if it is shared via SMB.
1258 #
1259 # Returns 0 if shared, 1 otherwise.
1260 #
1261 function is_shared_smb
1262 {
1263 typeset fs=$1
1264 typeset mtpt
1265
1266 if datasetnonexists "$fs" ; then
1267 return 1
1268 else
1269 fs=$(echo $fs | sed 's@/@_@g')
1270 fi
1271
1272 if is_linux; then
1273 for mtpt in `net usershare list | awk '{print $1}'` ; do
1274 if [[ $mtpt == $fs ]] ; then
1275 return 0
1276 fi
1277 done
1278 return 1
1279 else
1280 log_unsupported "Currently unsupported by the test framework"
1281 return 1
1282 fi
1283 }
1284
1285 #
1286 # Given a mountpoint, determine if it is not shared via NFS.
1287 #
1288 # Returns 0 if not shared, 1 otherwise.
1289 #
1290 function not_shared
1291 {
1292 typeset fs=$1
1293
1294 is_shared $fs
1295 if (($? == 0)); then
1296 return 1
1297 fi
1298
1299 return 0
1300 }
1301
1302 #
1303 # Given a dataset determine if it is not shared via SMB.
1304 #
1305 # Returns 0 if not shared, 1 otherwise.
1306 #
1307 function not_shared_smb
1308 {
1309 typeset fs=$1
1310
1311 is_shared_smb $fs
1312 if (($? == 0)); then
1313 return 1
1314 fi
1315
1316 return 0
1317 }
1318
1319 #
1320 # Helper function to unshare a mountpoint.
1321 #
1322 function unshare_fs #fs
1323 {
1324 typeset fs=$1
1325
1326 is_shared $fs || is_shared_smb $fs
1327 if (($? == 0)); then
1328 log_must zfs unshare $fs
1329 fi
1330
1331 return 0
1332 }
1333
1334 #
1335 # Helper function to share a NFS mountpoint.
1336 #
1337 function share_nfs #fs
1338 {
1339 typeset fs=$1
1340
1341 if is_linux; then
1342 is_shared $fs
1343 if (($? != 0)); then
1344 log_must share "*:$fs"
1345 fi
1346 else
1347 is_shared $fs
1348 if (($? != 0)); then
1349 log_must share -F nfs $fs
1350 fi
1351 fi
1352
1353 return 0
1354 }
1355
1356 #
1357 # Helper function to unshare a NFS mountpoint.
1358 #
1359 function unshare_nfs #fs
1360 {
1361 typeset fs=$1
1362
1363 if is_linux; then
1364 is_shared $fs
1365 if (($? == 0)); then
1366 log_must unshare -u "*:$fs"
1367 fi
1368 else
1369 is_shared $fs
1370 if (($? == 0)); then
1371 log_must unshare -F nfs $fs
1372 fi
1373 fi
1374
1375 return 0
1376 }
1377
1378 #
1379 # Helper function to show NFS shares.
1380 #
1381 function showshares_nfs
1382 {
1383 if is_linux; then
1384 share -v
1385 else
1386 share -F nfs
1387 fi
1388
1389 return 0
1390 }
1391
1392 #
1393 # Helper function to show SMB shares.
1394 #
1395 function showshares_smb
1396 {
1397 if is_linux; then
1398 net usershare list
1399 else
1400 share -F smb
1401 fi
1402
1403 return 0
1404 }
1405
1406 #
1407 # Check NFS server status and trigger it online.
1408 #
1409 function setup_nfs_server
1410 {
1411 # Cannot share directory in non-global zone.
1412 #
1413 if ! is_global_zone; then
1414 log_note "Cannot trigger NFS server by sharing in LZ."
1415 return
1416 fi
1417
1418 if is_linux; then
1419 #
1420 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1421 # /etc/exports.d./* to provide a clean test environment.
1422 #
1423 log_must share -r
1424
1425 log_note "NFS server must be started prior to running ZTS."
1426 return
1427 fi
1428
1429 typeset nfs_fmri="svc:/network/nfs/server:default"
1430 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1431 #
1432 # Only really sharing operation can enable NFS server
1433 # to online permanently.
1434 #
1435 typeset dummy=/tmp/dummy
1436
1437 if [[ -d $dummy ]]; then
1438 log_must rm -rf $dummy
1439 fi
1440
1441 log_must mkdir $dummy
1442 log_must share $dummy
1443
1444 #
1445 # Waiting for fmri's status to be the final status.
1446 # Otherwise, in transition, an asterisk (*) is appended for
1447 # instances, unshare will reverse status to 'DIS' again.
1448 #
1449 # Waiting for 1's at least.
1450 #
1451 log_must sleep 1
1452 timeout=10
1453 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1454 do
1455 log_must sleep 1
1456
1457 ((timeout -= 1))
1458 done
1459
1460 log_must unshare $dummy
1461 log_must rm -rf $dummy
1462 fi
1463
1464 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1465 }
1466
1467 #
1468 # To verify whether calling process is in global zone
1469 #
1470 # Return 0 if in global zone, 1 in non-global zone
1471 #
1472 function is_global_zone
1473 {
1474 if is_linux; then
1475 return 0
1476 else
1477 typeset cur_zone=$(zonename 2>/dev/null)
1478 if [[ $cur_zone != "global" ]]; then
1479 return 1
1480 fi
1481 return 0
1482 fi
1483 }
1484
1485 #
1486 # Verify whether test is permitted to run from
1487 # global zone, local zone, or both
1488 #
1489 # $1 zone limit, could be "global", "local", or "both"(no limit)
1490 #
1491 # Return 0 if permitted, otherwise exit with log_unsupported
1492 #
1493 function verify_runnable # zone limit
1494 {
1495 typeset limit=$1
1496
1497 [[ -z $limit ]] && return 0
1498
1499 if is_global_zone ; then
1500 case $limit in
1501 global|both)
1502 ;;
1503 local) log_unsupported "Test is unable to run from "\
1504 "global zone."
1505 ;;
1506 *) log_note "Warning: unknown limit $limit - " \
1507 "use both."
1508 ;;
1509 esac
1510 else
1511 case $limit in
1512 local|both)
1513 ;;
1514 global) log_unsupported "Test is unable to run from "\
1515 "local zone."
1516 ;;
1517 *) log_note "Warning: unknown limit $limit - " \
1518 "use both."
1519 ;;
1520 esac
1521
1522 reexport_pool
1523 fi
1524
1525 return 0
1526 }
1527
1528 # Return 0 if create successfully or the pool exists; $? otherwise
1529 # Note: In local zones, this function should return 0 silently.
1530 #
1531 # $1 - pool name
1532 # $2-n - [keyword] devs_list
1533
1534 function create_pool #pool devs_list
1535 {
1536 typeset pool=${1%%/*}
1537
1538 shift
1539
1540 if [[ -z $pool ]]; then
1541 log_note "Missing pool name."
1542 return 1
1543 fi
1544
1545 if poolexists $pool ; then
1546 destroy_pool $pool
1547 fi
1548
1549 if is_global_zone ; then
1550 [[ -d /$pool ]] && rm -rf /$pool
1551 log_must zpool create -f $pool $@
1552 fi
1553
1554 return 0
1555 }
1556
1557 # Return 0 if destroy successfully or the pool exists; $? otherwise
1558 # Note: In local zones, this function should return 0 silently.
1559 #
1560 # $1 - pool name
1561 # Destroy pool with the given parameters.
1562
1563 function destroy_pool #pool
1564 {
1565 typeset pool=${1%%/*}
1566 typeset mtpt
1567
1568 if [[ -z $pool ]]; then
1569 log_note "No pool name given."
1570 return 1
1571 fi
1572
1573 if is_global_zone ; then
1574 if poolexists "$pool" ; then
1575 mtpt=$(get_prop mountpoint "$pool")
1576
1577 # At times, syseventd/udev activity can cause attempts
1578 # to destroy a pool to fail with EBUSY. We retry a few
1579 # times allowing failures before requiring the destroy
1580 # to succeed.
1581 log_must_busy zpool destroy -f $pool
1582
1583 [[ -d $mtpt ]] && \
1584 log_must rm -rf $mtpt
1585 else
1586 log_note "Pool does not exist. ($pool)"
1587 return 1
1588 fi
1589 fi
1590
1591 return 0
1592 }
1593
1594 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1595 # Note: In local zones, this function should return 0 silently.
1596 #
1597 # $1 - dataset name
1598 # $2 - custom arguments for zfs destroy
1599 # Destroy dataset with the given parameters.
1600
1601 function destroy_dataset #dataset #args
1602 {
1603 typeset dataset=$1
1604 typeset mtpt
1605 typeset args=${2:-""}
1606
1607 if [[ -z $dataset ]]; then
1608 log_note "No dataset name given."
1609 return 1
1610 fi
1611
1612 if is_global_zone ; then
1613 if datasetexists "$dataset" ; then
1614 mtpt=$(get_prop mountpoint "$dataset")
1615 log_must_busy zfs destroy $args $dataset
1616
1617 [[ -d $mtpt ]] && \
1618 log_must rm -rf $mtpt
1619 else
1620 log_note "Dataset does not exist. ($dataset)"
1621 return 1
1622 fi
1623 fi
1624
1625 return 0
1626 }
1627
1628 #
1629 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1630 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1631 # and a zvol device to the zone.
1632 #
1633 # $1 zone name
1634 # $2 zone root directory prefix
1635 # $3 zone ip
1636 #
1637 function zfs_zones_setup #zone_name zone_root zone_ip
1638 {
1639 typeset zone_name=${1:-$(hostname)-z}
1640 typeset zone_root=${2:-"/zone_root"}
1641 typeset zone_ip=${3:-"10.1.1.10"}
1642 typeset prefix_ctr=$ZONE_CTR
1643 typeset pool_name=$ZONE_POOL
1644 typeset -i cntctr=5
1645 typeset -i i=0
1646
1647 # Create pool and 5 container within it
1648 #
1649 [[ -d /$pool_name ]] && rm -rf /$pool_name
1650 log_must zpool create -f $pool_name $DISKS
1651 while ((i < cntctr)); do
1652 log_must zfs create $pool_name/$prefix_ctr$i
1653 ((i += 1))
1654 done
1655
1656 # create a zvol
1657 log_must zfs create -V 1g $pool_name/zone_zvol
1658 block_device_wait
1659
1660 #
1661 # If current system support slog, add slog device for pool
1662 #
1663 if verify_slog_support ; then
1664 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1665 log_must mkfile $MINVDEVSIZE $sdevs
1666 log_must zpool add $pool_name log mirror $sdevs
1667 fi
1668
1669 # this isn't supported just yet.
1670 # Create a filesystem. In order to add this to
1671 # the zone, it must have it's mountpoint set to 'legacy'
1672 # log_must zfs create $pool_name/zfs_filesystem
1673 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1674
1675 [[ -d $zone_root ]] && \
1676 log_must rm -rf $zone_root/$zone_name
1677 [[ ! -d $zone_root ]] && \
1678 log_must mkdir -p -m 0700 $zone_root/$zone_name
1679
1680 # Create zone configure file and configure the zone
1681 #
1682 typeset zone_conf=/tmp/zone_conf.$$
1683 echo "create" > $zone_conf
1684 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1685 echo "set autoboot=true" >> $zone_conf
1686 i=0
1687 while ((i < cntctr)); do
1688 echo "add dataset" >> $zone_conf
1689 echo "set name=$pool_name/$prefix_ctr$i" >> \
1690 $zone_conf
1691 echo "end" >> $zone_conf
1692 ((i += 1))
1693 done
1694
1695 # add our zvol to the zone
1696 echo "add device" >> $zone_conf
1697 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1698 echo "end" >> $zone_conf
1699
1700 # add a corresponding zvol rdsk to the zone
1701 echo "add device" >> $zone_conf
1702 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1703 echo "end" >> $zone_conf
1704
1705 # once it's supported, we'll add our filesystem to the zone
1706 # echo "add fs" >> $zone_conf
1707 # echo "set type=zfs" >> $zone_conf
1708 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1709 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1710 # echo "end" >> $zone_conf
1711
1712 echo "verify" >> $zone_conf
1713 echo "commit" >> $zone_conf
1714 log_must zonecfg -z $zone_name -f $zone_conf
1715 log_must rm -f $zone_conf
1716
1717 # Install the zone
1718 zoneadm -z $zone_name install
1719 if (($? == 0)); then
1720 log_note "SUCCESS: zoneadm -z $zone_name install"
1721 else
1722 log_fail "FAIL: zoneadm -z $zone_name install"
1723 fi
1724
1725 # Install sysidcfg file
1726 #
1727 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1728 echo "system_locale=C" > $sysidcfg
1729 echo "terminal=dtterm" >> $sysidcfg
1730 echo "network_interface=primary {" >> $sysidcfg
1731 echo "hostname=$zone_name" >> $sysidcfg
1732 echo "}" >> $sysidcfg
1733 echo "name_service=NONE" >> $sysidcfg
1734 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1735 echo "security_policy=NONE" >> $sysidcfg
1736 echo "timezone=US/Eastern" >> $sysidcfg
1737
1738 # Boot this zone
1739 log_must zoneadm -z $zone_name boot
1740 }
1741
1742 #
1743 # Reexport TESTPOOL & TESTPOOL(1-4)
1744 #
1745 function reexport_pool
1746 {
1747 typeset -i cntctr=5
1748 typeset -i i=0
1749
1750 while ((i < cntctr)); do
1751 if ((i == 0)); then
1752 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1753 if ! ismounted $TESTPOOL; then
1754 log_must zfs mount $TESTPOOL
1755 fi
1756 else
1757 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1758 if eval ! ismounted \$TESTPOOL$i; then
1759 log_must eval zfs mount \$TESTPOOL$i
1760 fi
1761 fi
1762 ((i += 1))
1763 done
1764 }
1765
1766 #
1767 # Verify a given disk or pool state
1768 #
1769 # Return 0 is pool/disk matches expected state, 1 otherwise
1770 #
1771 function check_state # pool disk state{online,offline,degraded}
1772 {
1773 typeset pool=$1
1774 typeset disk=${2#$DEV_DSKDIR/}
1775 typeset state=$3
1776
1777 [[ -z $pool ]] || [[ -z $state ]] \
1778 && log_fail "Arguments invalid or missing"
1779
1780 if [[ -z $disk ]]; then
1781 #check pool state only
1782 zpool get -H -o value health $pool \
1783 | grep -i "$state" > /dev/null 2>&1
1784 else
1785 zpool status -v $pool | grep "$disk" \
1786 | grep -i "$state" > /dev/null 2>&1
1787 fi
1788
1789 return $?
1790 }
1791
1792 #
1793 # Get the mountpoint of snapshot
1794 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1795 # as its mountpoint
1796 #
1797 function snapshot_mountpoint
1798 {
1799 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1800
1801 if [[ $dataset != *@* ]]; then
1802 log_fail "Error name of snapshot '$dataset'."
1803 fi
1804
1805 typeset fs=${dataset%@*}
1806 typeset snap=${dataset#*@}
1807
1808 if [[ -z $fs || -z $snap ]]; then
1809 log_fail "Error name of snapshot '$dataset'."
1810 fi
1811
1812 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1813 }
1814
1815 #
1816 # Given a device and 'ashift' value verify it's correctly set on every label
1817 #
1818 function verify_ashift # device ashift
1819 {
1820 typeset device="$1"
1821 typeset ashift="$2"
1822
1823 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1824 if (ashift != $2)
1825 exit 1;
1826 else
1827 count++;
1828 } END {
1829 if (count != 4)
1830 exit 1;
1831 else
1832 exit 0;
1833 }'
1834
1835 return $?
1836 }
1837
1838 #
1839 # Given a pool and file system, this function will verify the file system
1840 # using the zdb internal tool. Note that the pool is exported and imported
1841 # to ensure it has consistent state.
1842 #
1843 function verify_filesys # pool filesystem dir
1844 {
1845 typeset pool="$1"
1846 typeset filesys="$2"
1847 typeset zdbout="/tmp/zdbout.$$"
1848
1849 shift
1850 shift
1851 typeset dirs=$@
1852 typeset search_path=""
1853
1854 log_note "Calling zdb to verify filesystem '$filesys'"
1855 zfs unmount -a > /dev/null 2>&1
1856 log_must zpool export $pool
1857
1858 if [[ -n $dirs ]] ; then
1859 for dir in $dirs ; do
1860 search_path="$search_path -d $dir"
1861 done
1862 fi
1863
1864 log_must zpool import $search_path $pool
1865
1866 zdb -cudi $filesys > $zdbout 2>&1
1867 if [[ $? != 0 ]]; then
1868 log_note "Output: zdb -cudi $filesys"
1869 cat $zdbout
1870 log_fail "zdb detected errors with: '$filesys'"
1871 fi
1872
1873 log_must zfs mount -a
1874 log_must rm -rf $zdbout
1875 }
1876
1877 #
1878 # Given a pool, and this function list all disks in the pool
1879 #
1880 function get_disklist # pool
1881 {
1882 typeset disklist=""
1883
1884 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1885 grep -v "\-\-\-\-\-" | \
1886 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1887
1888 echo $disklist
1889 }
1890
1891 #
1892 # Given a pool, and this function list all disks in the pool with their full
1893 # path (like "/dev/sda" instead of "sda").
1894 #
1895 function get_disklist_fullpath # pool
1896 {
1897 args="-P $1"
1898 get_disklist $args
1899 }
1900
1901
1902
1903 # /**
1904 # This function kills a given list of processes after a time period. We use
1905 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1906 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1907 # would be listed as FAIL, which we don't want : we're happy with stress tests
1908 # running for a certain amount of time, then finishing.
1909 #
1910 # @param $1 the time in seconds after which we should terminate these processes
1911 # @param $2..$n the processes we wish to terminate.
1912 # */
1913 function stress_timeout
1914 {
1915 typeset -i TIMEOUT=$1
1916 shift
1917 typeset cpids="$@"
1918
1919 log_note "Waiting for child processes($cpids). " \
1920 "It could last dozens of minutes, please be patient ..."
1921 log_must sleep $TIMEOUT
1922
1923 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1924 typeset pid
1925 for pid in $cpids; do
1926 ps -p $pid > /dev/null 2>&1
1927 if (($? == 0)); then
1928 log_must kill -USR1 $pid
1929 fi
1930 done
1931 }
1932
1933 #
1934 # Verify a given hotspare disk is inuse or avail
1935 #
1936 # Return 0 is pool/disk matches expected state, 1 otherwise
1937 #
1938 function check_hotspare_state # pool disk state{inuse,avail}
1939 {
1940 typeset pool=$1
1941 typeset disk=${2#$DEV_DSKDIR/}
1942 typeset state=$3
1943
1944 cur_state=$(get_device_state $pool $disk "spares")
1945
1946 if [[ $state != ${cur_state} ]]; then
1947 return 1
1948 fi
1949 return 0
1950 }
1951
1952 #
1953 # Wait until a hotspare transitions to a given state or times out.
1954 #
1955 # Return 0 when pool/disk matches expected state, 1 on timeout.
1956 #
1957 function wait_hotspare_state # pool disk state timeout
1958 {
1959 typeset pool=$1
1960 typeset disk=${2#$/DEV_DSKDIR/}
1961 typeset state=$3
1962 typeset timeout=${4:-60}
1963 typeset -i i=0
1964
1965 while [[ $i -lt $timeout ]]; do
1966 if check_hotspare_state $pool $disk $state; then
1967 return 0
1968 fi
1969
1970 i=$((i+1))
1971 sleep 1
1972 done
1973
1974 return 1
1975 }
1976
1977 #
1978 # Verify a given slog disk is inuse or avail
1979 #
1980 # Return 0 is pool/disk matches expected state, 1 otherwise
1981 #
1982 function check_slog_state # pool disk state{online,offline,unavail}
1983 {
1984 typeset pool=$1
1985 typeset disk=${2#$DEV_DSKDIR/}
1986 typeset state=$3
1987
1988 cur_state=$(get_device_state $pool $disk "logs")
1989
1990 if [[ $state != ${cur_state} ]]; then
1991 return 1
1992 fi
1993 return 0
1994 }
1995
1996 #
1997 # Verify a given vdev disk is inuse or avail
1998 #
1999 # Return 0 is pool/disk matches expected state, 1 otherwise
2000 #
2001 function check_vdev_state # pool disk state{online,offline,unavail}
2002 {
2003 typeset pool=$1
2004 typeset disk=${2#$/DEV_DSKDIR/}
2005 typeset state=$3
2006
2007 cur_state=$(get_device_state $pool $disk)
2008
2009 if [[ $state != ${cur_state} ]]; then
2010 return 1
2011 fi
2012 return 0
2013 }
2014
2015 #
2016 # Wait until a vdev transitions to a given state or times out.
2017 #
2018 # Return 0 when pool/disk matches expected state, 1 on timeout.
2019 #
2020 function wait_vdev_state # pool disk state timeout
2021 {
2022 typeset pool=$1
2023 typeset disk=${2#$/DEV_DSKDIR/}
2024 typeset state=$3
2025 typeset timeout=${4:-60}
2026 typeset -i i=0
2027
2028 while [[ $i -lt $timeout ]]; do
2029 if check_vdev_state $pool $disk $state; then
2030 return 0
2031 fi
2032
2033 i=$((i+1))
2034 sleep 1
2035 done
2036
2037 return 1
2038 }
2039
2040 #
2041 # Check the output of 'zpool status -v <pool>',
2042 # and to see if the content of <token> contain the <keyword> specified.
2043 #
2044 # Return 0 is contain, 1 otherwise
2045 #
2046 function check_pool_status # pool token keyword <verbose>
2047 {
2048 typeset pool=$1
2049 typeset token=$2
2050 typeset keyword=$3
2051 typeset verbose=${4:-false}
2052
2053 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2054 ($1==token) {print $0}')
2055 if [[ $verbose == true ]]; then
2056 log_note $scan
2057 fi
2058 echo $scan | grep -i "$keyword" > /dev/null 2>&1
2059
2060 return $?
2061 }
2062
2063 #
2064 # These 6 following functions are instance of check_pool_status()
2065 # is_pool_resilvering - to check if the pool is resilver in progress
2066 # is_pool_resilvered - to check if the pool is resilver completed
2067 # is_pool_scrubbing - to check if the pool is scrub in progress
2068 # is_pool_scrubbed - to check if the pool is scrub completed
2069 # is_pool_scrub_stopped - to check if the pool is scrub stopped
2070 # is_pool_scrub_paused - to check if the pool has scrub paused
2071 #
2072 function is_pool_resilvering #pool <verbose>
2073 {
2074 check_pool_status "$1" "scan" "resilver in progress since " $2
2075 return $?
2076 }
2077
2078 function is_pool_resilvered #pool <verbose>
2079 {
2080 check_pool_status "$1" "scan" "resilvered " $2
2081 return $?
2082 }
2083
2084 function is_pool_scrubbing #pool <verbose>
2085 {
2086 check_pool_status "$1" "scan" "scrub in progress since " $2
2087 return $?
2088 }
2089
2090 function is_pool_scrubbed #pool <verbose>
2091 {
2092 check_pool_status "$1" "scan" "scrub repaired" $2
2093 return $?
2094 }
2095
2096 function is_pool_scrub_stopped #pool <verbose>
2097 {
2098 check_pool_status "$1" "scan" "scrub canceled" $2
2099 return $?
2100 }
2101
2102 function is_pool_scrub_paused #pool <verbose>
2103 {
2104 check_pool_status "$1" "scan" "scrub paused since " $2
2105 return $?
2106 }
2107
2108 #
2109 # Use create_pool()/destroy_pool() to clean up the information in
2110 # in the given disk to avoid slice overlapping.
2111 #
2112 function cleanup_devices #vdevs
2113 {
2114 typeset pool="foopool$$"
2115
2116 if poolexists $pool ; then
2117 destroy_pool $pool
2118 fi
2119
2120 create_pool $pool $@
2121 destroy_pool $pool
2122
2123 return 0
2124 }
2125
2126 #/**
2127 # A function to find and locate free disks on a system or from given
2128 # disks as the parameter. It works by locating disks that are in use
2129 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2130 #
2131 # $@ given disks to find which are free, default is all disks in
2132 # the test system
2133 #
2134 # @return a string containing the list of available disks
2135 #*/
2136 function find_disks
2137 {
2138 # Trust provided list, no attempt is made to locate unused devices.
2139 if is_linux; then
2140 echo "$@"
2141 return
2142 fi
2143
2144
2145 sfi=/tmp/swaplist.$$
2146 dmpi=/tmp/dumpdev.$$
2147 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2148
2149 swap -l > $sfi
2150 dumpadm > $dmpi 2>/dev/null
2151
2152 # write an awk script that can process the output of format
2153 # to produce a list of disks we know about. Note that we have
2154 # to escape "$2" so that the shell doesn't interpret it while
2155 # we're creating the awk script.
2156 # -------------------
2157 cat > /tmp/find_disks.awk <<EOF
2158 #!/bin/nawk -f
2159 BEGIN { FS="."; }
2160
2161 /^Specify disk/{
2162 searchdisks=0;
2163 }
2164
2165 {
2166 if (searchdisks && \$2 !~ "^$"){
2167 split(\$2,arr," ");
2168 print arr[1];
2169 }
2170 }
2171
2172 /^AVAILABLE DISK SELECTIONS:/{
2173 searchdisks=1;
2174 }
2175 EOF
2176 #---------------------
2177
2178 chmod 755 /tmp/find_disks.awk
2179 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2180 rm /tmp/find_disks.awk
2181
2182 unused=""
2183 for disk in $disks; do
2184 # Check for mounted
2185 grep "${disk}[sp]" /etc/mnttab >/dev/null
2186 (($? == 0)) && continue
2187 # Check for swap
2188 grep "${disk}[sp]" $sfi >/dev/null
2189 (($? == 0)) && continue
2190 # check for dump device
2191 grep "${disk}[sp]" $dmpi >/dev/null
2192 (($? == 0)) && continue
2193 # check to see if this disk hasn't been explicitly excluded
2194 # by a user-set environment variable
2195 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2196 (($? == 0)) && continue
2197 unused_candidates="$unused_candidates $disk"
2198 done
2199 rm $sfi
2200 rm $dmpi
2201
2202 # now just check to see if those disks do actually exist
2203 # by looking for a device pointing to the first slice in
2204 # each case. limit the number to max_finddisksnum
2205 count=0
2206 for disk in $unused_candidates; do
2207 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2208 if [ $count -lt $max_finddisksnum ]; then
2209 unused="$unused $disk"
2210 # do not impose limit if $@ is provided
2211 [[ -z $@ ]] && ((count = count + 1))
2212 fi
2213 fi
2214 done
2215
2216 # finally, return our disk list
2217 echo $unused
2218 }
2219
2220 #
2221 # Add specified user to specified group
2222 #
2223 # $1 group name
2224 # $2 user name
2225 # $3 base of the homedir (optional)
2226 #
2227 function add_user #<group_name> <user_name> <basedir>
2228 {
2229 typeset gname=$1
2230 typeset uname=$2
2231 typeset basedir=${3:-"/var/tmp"}
2232
2233 if ((${#gname} == 0 || ${#uname} == 0)); then
2234 log_fail "group name or user name are not defined."
2235 fi
2236
2237 log_must useradd -g $gname -d $basedir/$uname -m $uname
2238 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2239 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2240 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
2241
2242 # Add new users to the same group and the command line utils.
2243 # This allows them to be run out of the original users home
2244 # directory as long as it permissioned to be group readable.
2245 if is_linux; then
2246 cmd_group=$(stat --format="%G" $(which zfs))
2247 log_must usermod -a -G $cmd_group $uname
2248 fi
2249
2250 return 0
2251 }
2252
2253 #
2254 # Delete the specified user.
2255 #
2256 # $1 login name
2257 # $2 base of the homedir (optional)
2258 #
2259 function del_user #<logname> <basedir>
2260 {
2261 typeset user=$1
2262 typeset basedir=${2:-"/var/tmp"}
2263
2264 if ((${#user} == 0)); then
2265 log_fail "login name is necessary."
2266 fi
2267
2268 if id $user > /dev/null 2>&1; then
2269 log_must_retry "currently used" 5 userdel $user
2270 fi
2271
2272 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2273
2274 return 0
2275 }
2276
2277 #
2278 # Select valid gid and create specified group.
2279 #
2280 # $1 group name
2281 #
2282 function add_group #<group_name>
2283 {
2284 typeset group=$1
2285
2286 if ((${#group} == 0)); then
2287 log_fail "group name is necessary."
2288 fi
2289
2290 # Assign 100 as the base gid, a larger value is selected for
2291 # Linux because for many distributions 1000 and under are reserved.
2292 if is_linux; then
2293 while true; do
2294 groupadd $group > /dev/null 2>&1
2295 typeset -i ret=$?
2296 case $ret in
2297 0) return 0 ;;
2298 *) return 1 ;;
2299 esac
2300 done
2301 else
2302 typeset -i gid=100
2303 while true; do
2304 groupadd -g $gid $group > /dev/null 2>&1
2305 typeset -i ret=$?
2306 case $ret in
2307 0) return 0 ;;
2308 # The gid is not unique
2309 4) ((gid += 1)) ;;
2310 *) return 1 ;;
2311 esac
2312 done
2313 fi
2314 }
2315
2316 #
2317 # Delete the specified group.
2318 #
2319 # $1 group name
2320 #
2321 function del_group #<group_name>
2322 {
2323 typeset grp=$1
2324 if ((${#grp} == 0)); then
2325 log_fail "group name is necessary."
2326 fi
2327
2328 if is_linux; then
2329 getent group $grp > /dev/null 2>&1
2330 typeset -i ret=$?
2331 case $ret in
2332 # Group does not exist.
2333 2) return 0 ;;
2334 # Name already exists as a group name
2335 0) log_must groupdel $grp ;;
2336 *) return 1 ;;
2337 esac
2338 else
2339 groupmod -n $grp $grp > /dev/null 2>&1
2340 typeset -i ret=$?
2341 case $ret in
2342 # Group does not exist.
2343 6) return 0 ;;
2344 # Name already exists as a group name
2345 9) log_must groupdel $grp ;;
2346 *) return 1 ;;
2347 esac
2348 fi
2349
2350 return 0
2351 }
2352
2353 #
2354 # This function will return true if it's safe to destroy the pool passed
2355 # as argument 1. It checks for pools based on zvols and files, and also
2356 # files contained in a pool that may have a different mountpoint.
2357 #
2358 function safe_to_destroy_pool { # $1 the pool name
2359
2360 typeset pool=""
2361 typeset DONT_DESTROY=""
2362
2363 # We check that by deleting the $1 pool, we're not
2364 # going to pull the rug out from other pools. Do this
2365 # by looking at all other pools, ensuring that they
2366 # aren't built from files or zvols contained in this pool.
2367
2368 for pool in $(zpool list -H -o name)
2369 do
2370 ALTMOUNTPOOL=""
2371
2372 # this is a list of the top-level directories in each of the
2373 # files that make up the path to the files the pool is based on
2374 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2375 awk '{print $1}')
2376
2377 # this is a list of the zvols that make up the pool
2378 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2379 | awk '{print $1}')
2380
2381 # also want to determine if it's a file-based pool using an
2382 # alternate mountpoint...
2383 POOL_FILE_DIRS=$(zpool status -v $pool | \
2384 grep / | awk '{print $1}' | \
2385 awk -F/ '{print $2}' | grep -v "dev")
2386
2387 for pooldir in $POOL_FILE_DIRS
2388 do
2389 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2390 grep "${pooldir}$" | awk '{print $1}')
2391
2392 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2393 done
2394
2395
2396 if [ ! -z "$ZVOLPOOL" ]
2397 then
2398 DONT_DESTROY="true"
2399 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2400 fi
2401
2402 if [ ! -z "$FILEPOOL" ]
2403 then
2404 DONT_DESTROY="true"
2405 log_note "Pool $pool is built from $FILEPOOL on $1"
2406 fi
2407
2408 if [ ! -z "$ALTMOUNTPOOL" ]
2409 then
2410 DONT_DESTROY="true"
2411 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2412 fi
2413 done
2414
2415 if [ -z "${DONT_DESTROY}" ]
2416 then
2417 return 0
2418 else
2419 log_note "Warning: it is not safe to destroy $1!"
2420 return 1
2421 fi
2422 }
2423
2424 #
2425 # Get the available ZFS compression options
2426 # $1 option type zfs_set|zfs_compress
2427 #
2428 function get_compress_opts
2429 {
2430 typeset COMPRESS_OPTS
2431 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2432 gzip-6 gzip-7 gzip-8 gzip-9"
2433
2434 if [[ $1 == "zfs_compress" ]] ; then
2435 COMPRESS_OPTS="on lzjb"
2436 elif [[ $1 == "zfs_set" ]] ; then
2437 COMPRESS_OPTS="on off lzjb"
2438 fi
2439 typeset valid_opts="$COMPRESS_OPTS"
2440 zfs get 2>&1 | grep gzip >/dev/null 2>&1
2441 if [[ $? -eq 0 ]]; then
2442 valid_opts="$valid_opts $GZIP_OPTS"
2443 fi
2444 echo "$valid_opts"
2445 }
2446
2447 #
2448 # Verify zfs operation with -p option work as expected
2449 # $1 operation, value could be create, clone or rename
2450 # $2 dataset type, value could be fs or vol
2451 # $3 dataset name
2452 # $4 new dataset name
2453 #
2454 function verify_opt_p_ops
2455 {
2456 typeset ops=$1
2457 typeset datatype=$2
2458 typeset dataset=$3
2459 typeset newdataset=$4
2460
2461 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2462 log_fail "$datatype is not supported."
2463 fi
2464
2465 # check parameters accordingly
2466 case $ops in
2467 create)
2468 newdataset=$dataset
2469 dataset=""
2470 if [[ $datatype == "vol" ]]; then
2471 ops="create -V $VOLSIZE"
2472 fi
2473 ;;
2474 clone)
2475 if [[ -z $newdataset ]]; then
2476 log_fail "newdataset should not be empty" \
2477 "when ops is $ops."
2478 fi
2479 log_must datasetexists $dataset
2480 log_must snapexists $dataset
2481 ;;
2482 rename)
2483 if [[ -z $newdataset ]]; then
2484 log_fail "newdataset should not be empty" \
2485 "when ops is $ops."
2486 fi
2487 log_must datasetexists $dataset
2488 log_mustnot snapexists $dataset
2489 ;;
2490 *)
2491 log_fail "$ops is not supported."
2492 ;;
2493 esac
2494
2495 # make sure the upper level filesystem does not exist
2496 destroy_dataset "${newdataset%/*}" "-rRf"
2497
2498 # without -p option, operation will fail
2499 log_mustnot zfs $ops $dataset $newdataset
2500 log_mustnot datasetexists $newdataset ${newdataset%/*}
2501
2502 # with -p option, operation should succeed
2503 log_must zfs $ops -p $dataset $newdataset
2504 block_device_wait
2505
2506 if ! datasetexists $newdataset ; then
2507 log_fail "-p option does not work for $ops"
2508 fi
2509
2510 # when $ops is create or clone, redo the operation still return zero
2511 if [[ $ops != "rename" ]]; then
2512 log_must zfs $ops -p $dataset $newdataset
2513 fi
2514
2515 return 0
2516 }
2517
2518 #
2519 # Get configuration of pool
2520 # $1 pool name
2521 # $2 config name
2522 #
2523 function get_config
2524 {
2525 typeset pool=$1
2526 typeset config=$2
2527 typeset alt_root
2528
2529 if ! poolexists "$pool" ; then
2530 return 1
2531 fi
2532 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2533 if [[ $alt_root == "-" ]]; then
2534 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2535 '{print $2}')
2536 else
2537 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2538 '{print $2}')
2539 fi
2540 if [[ -n $value ]] ; then
2541 value=${value#'}
2542 value=${value%'}
2543 fi
2544 echo $value
2545
2546 return 0
2547 }
2548
2549 #
2550 # Privated function. Random select one of items from arguments.
2551 #
2552 # $1 count
2553 # $2-n string
2554 #
2555 function _random_get
2556 {
2557 typeset cnt=$1
2558 shift
2559
2560 typeset str="$@"
2561 typeset -i ind
2562 ((ind = RANDOM % cnt + 1))
2563
2564 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2565 echo $ret
2566 }
2567
2568 #
2569 # Random select one of item from arguments which include NONE string
2570 #
2571 function random_get_with_non
2572 {
2573 typeset -i cnt=$#
2574 ((cnt =+ 1))
2575
2576 _random_get "$cnt" "$@"
2577 }
2578
2579 #
2580 # Random select one of item from arguments which doesn't include NONE string
2581 #
2582 function random_get
2583 {
2584 _random_get "$#" "$@"
2585 }
2586
2587 #
2588 # Detect if the current system support slog
2589 #
2590 function verify_slog_support
2591 {
2592 typeset dir=$TEST_BASE_DIR/disk.$$
2593 typeset pool=foo.$$
2594 typeset vdev=$dir/a
2595 typeset sdev=$dir/b
2596
2597 mkdir -p $dir
2598 mkfile $MINVDEVSIZE $vdev $sdev
2599
2600 typeset -i ret=0
2601 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2602 ret=1
2603 fi
2604 rm -r $dir
2605
2606 return $ret
2607 }
2608
2609 #
2610 # The function will generate a dataset name with specific length
2611 # $1, the length of the name
2612 # $2, the base string to construct the name
2613 #
2614 function gen_dataset_name
2615 {
2616 typeset -i len=$1
2617 typeset basestr="$2"
2618 typeset -i baselen=${#basestr}
2619 typeset -i iter=0
2620 typeset l_name=""
2621
2622 if ((len % baselen == 0)); then
2623 ((iter = len / baselen))
2624 else
2625 ((iter = len / baselen + 1))
2626 fi
2627 while ((iter > 0)); do
2628 l_name="${l_name}$basestr"
2629
2630 ((iter -= 1))
2631 done
2632
2633 echo $l_name
2634 }
2635
2636 #
2637 # Get cksum tuple of dataset
2638 # $1 dataset name
2639 #
2640 # sample zdb output:
2641 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2642 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2643 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2644 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2645 function datasetcksum
2646 {
2647 typeset cksum
2648 sync
2649 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2650 | awk -F= '{print $7}')
2651 echo $cksum
2652 }
2653
2654 #
2655 # Get cksum of file
2656 # #1 file path
2657 #
2658 function checksum
2659 {
2660 typeset cksum
2661 cksum=$(cksum $1 | awk '{print $1}')
2662 echo $cksum
2663 }
2664
2665 #
2666 # Get the given disk/slice state from the specific field of the pool
2667 #
2668 function get_device_state #pool disk field("", "spares","logs")
2669 {
2670 typeset pool=$1
2671 typeset disk=${2#$DEV_DSKDIR/}
2672 typeset field=${3:-$pool}
2673
2674 state=$(zpool status -v "$pool" 2>/dev/null | \
2675 nawk -v device=$disk -v pool=$pool -v field=$field \
2676 'BEGIN {startconfig=0; startfield=0; }
2677 /config:/ {startconfig=1}
2678 (startconfig==1) && ($1==field) {startfield=1; next;}
2679 (startfield==1) && ($1==device) {print $2; exit;}
2680 (startfield==1) &&
2681 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2682 echo $state
2683 }
2684
2685
2686 #
2687 # print the given directory filesystem type
2688 #
2689 # $1 directory name
2690 #
2691 function get_fstype
2692 {
2693 typeset dir=$1
2694
2695 if [[ -z $dir ]]; then
2696 log_fail "Usage: get_fstype <directory>"
2697 fi
2698
2699 #
2700 # $ df -n /
2701 # / : ufs
2702 #
2703 df -n $dir | awk '{print $3}'
2704 }
2705
2706 #
2707 # Given a disk, label it to VTOC regardless what label was on the disk
2708 # $1 disk
2709 #
2710 function labelvtoc
2711 {
2712 typeset disk=$1
2713 if [[ -z $disk ]]; then
2714 log_fail "The disk name is unspecified."
2715 fi
2716 typeset label_file=/var/tmp/labelvtoc.$$
2717 typeset arch=$(uname -p)
2718
2719 if is_linux; then
2720 log_note "Currently unsupported by the test framework"
2721 return 1
2722 fi
2723
2724 if [[ $arch == "i386" ]]; then
2725 echo "label" > $label_file
2726 echo "0" >> $label_file
2727 echo "" >> $label_file
2728 echo "q" >> $label_file
2729 echo "q" >> $label_file
2730
2731 fdisk -B $disk >/dev/null 2>&1
2732 # wait a while for fdisk finishes
2733 sleep 60
2734 elif [[ $arch == "sparc" ]]; then
2735 echo "label" > $label_file
2736 echo "0" >> $label_file
2737 echo "" >> $label_file
2738 echo "" >> $label_file
2739 echo "" >> $label_file
2740 echo "q" >> $label_file
2741 else
2742 log_fail "unknown arch type"
2743 fi
2744
2745 format -e -s -d $disk -f $label_file
2746 typeset -i ret_val=$?
2747 rm -f $label_file
2748 #
2749 # wait the format to finish
2750 #
2751 sleep 60
2752 if ((ret_val != 0)); then
2753 log_fail "unable to label $disk as VTOC."
2754 fi
2755
2756 return 0
2757 }
2758
2759 #
2760 # check if the system was installed as zfsroot or not
2761 # return: 0 ture, otherwise false
2762 #
2763 function is_zfsroot
2764 {
2765 df -n / | grep zfs > /dev/null 2>&1
2766 return $?
2767 }
2768
2769 #
2770 # get the root filesystem name if it's zfsroot system.
2771 #
2772 # return: root filesystem name
2773 function get_rootfs
2774 {
2775 typeset rootfs=""
2776
2777 if ! is_linux; then
2778 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2779 /etc/mnttab)
2780 fi
2781 if [[ -z "$rootfs" ]]; then
2782 log_fail "Can not get rootfs"
2783 fi
2784 zfs list $rootfs > /dev/null 2>&1
2785 if (($? == 0)); then
2786 echo $rootfs
2787 else
2788 log_fail "This is not a zfsroot system."
2789 fi
2790 }
2791
2792 #
2793 # get the rootfs's pool name
2794 # return:
2795 # rootpool name
2796 #
2797 function get_rootpool
2798 {
2799 typeset rootfs=""
2800 typeset rootpool=""
2801
2802 if ! is_linux; then
2803 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2804 /etc/mnttab)
2805 fi
2806 if [[ -z "$rootfs" ]]; then
2807 log_fail "Can not get rootpool"
2808 fi
2809 zfs list $rootfs > /dev/null 2>&1
2810 if (($? == 0)); then
2811 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2812 echo $rootpool
2813 else
2814 log_fail "This is not a zfsroot system."
2815 fi
2816 }
2817
2818 #
2819 # Get the package name
2820 #
2821 function get_package_name
2822 {
2823 typeset dirpath=${1:-$STC_NAME}
2824
2825 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2826 }
2827
2828 #
2829 # Get the word numbers from a string separated by white space
2830 #
2831 function get_word_count
2832 {
2833 echo $1 | wc -w
2834 }
2835
2836 #
2837 # To verify if the require numbers of disks is given
2838 #
2839 function verify_disk_count
2840 {
2841 typeset -i min=${2:-1}
2842
2843 typeset -i count=$(get_word_count "$1")
2844
2845 if ((count < min)); then
2846 log_untested "A minimum of $min disks is required to run." \
2847 " You specified $count disk(s)"
2848 fi
2849 }
2850
2851 function ds_is_volume
2852 {
2853 typeset type=$(get_prop type $1)
2854 [[ $type = "volume" ]] && return 0
2855 return 1
2856 }
2857
2858 function ds_is_filesystem
2859 {
2860 typeset type=$(get_prop type $1)
2861 [[ $type = "filesystem" ]] && return 0
2862 return 1
2863 }
2864
2865 function ds_is_snapshot
2866 {
2867 typeset type=$(get_prop type $1)
2868 [[ $type = "snapshot" ]] && return 0
2869 return 1
2870 }
2871
2872 #
2873 # Check if Trusted Extensions are installed and enabled
2874 #
2875 function is_te_enabled
2876 {
2877 svcs -H -o state labeld 2>/dev/null | grep "enabled"
2878 if (($? != 0)); then
2879 return 1
2880 else
2881 return 0
2882 fi
2883 }
2884
2885 # Utility function to determine if a system has multiple cpus.
2886 function is_mp
2887 {
2888 if is_linux; then
2889 (($(nproc) > 1))
2890 else
2891 (($(psrinfo | wc -l) > 1))
2892 fi
2893
2894 return $?
2895 }
2896
2897 function get_cpu_freq
2898 {
2899 if is_linux; then
2900 lscpu | awk '/CPU MHz/ { print $3 }'
2901 else
2902 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2903 fi
2904 }
2905
2906 # Run the given command as the user provided.
2907 function user_run
2908 {
2909 typeset user=$1
2910 shift
2911
2912 log_note "user:$user $@"
2913 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
2914 return $?
2915 }
2916
2917 #
2918 # Check if the pool contains the specified vdevs
2919 #
2920 # $1 pool
2921 # $2..n <vdev> ...
2922 #
2923 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2924 # vdevs is not in the pool, and 2 if pool name is missing.
2925 #
2926 function vdevs_in_pool
2927 {
2928 typeset pool=$1
2929 typeset vdev
2930
2931 if [[ -z $pool ]]; then
2932 log_note "Missing pool name."
2933 return 2
2934 fi
2935
2936 shift
2937
2938 typeset tmpfile=$(mktemp)
2939 zpool list -Hv "$pool" >$tmpfile
2940 for vdev in $@; do
2941 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2942 [[ $? -ne 0 ]] && return 1
2943 done
2944
2945 rm -f $tmpfile
2946
2947 return 0;
2948 }
2949
2950 function get_max
2951 {
2952 typeset -l i max=$1
2953 shift
2954
2955 for i in "$@"; do
2956 max=$(echo $((max > i ? max : i)))
2957 done
2958
2959 echo $max
2960 }
2961
2962 function get_min
2963 {
2964 typeset -l i min=$1
2965 shift
2966
2967 for i in "$@"; do
2968 min=$(echo $((min < i ? min : i)))
2969 done
2970
2971 echo $min
2972 }
2973
2974 #
2975 # Generate a random number between 1 and the argument.
2976 #
2977 function random
2978 {
2979 typeset max=$1
2980 echo $(( ($RANDOM % $max) + 1 ))
2981 }
2982
2983 # Write data that can be compressed into a directory
2984 function write_compressible
2985 {
2986 typeset dir=$1
2987 typeset megs=$2
2988 typeset nfiles=${3:-1}
2989 typeset bs=${4:-1024k}
2990 typeset fname=${5:-file}
2991
2992 [[ -d $dir ]] || log_fail "No directory: $dir"
2993
2994 # Under Linux fio is not currently used since its behavior can
2995 # differ significantly across versions. This includes missing
2996 # command line options and cases where the --buffer_compress_*
2997 # options fail to behave as expected.
2998 if is_linux; then
2999 typeset file_bytes=$(to_bytes $megs)
3000 typeset bs_bytes=4096
3001 typeset blocks=$(($file_bytes / $bs_bytes))
3002
3003 for (( i = 0; i < $nfiles; i++ )); do
3004 truncate -s $file_bytes $dir/$fname.$i
3005
3006 # Write every third block to get 66% compression.
3007 for (( j = 0; j < $blocks; j += 3 )); do
3008 dd if=/dev/urandom of=$dir/$fname.$i \
3009 seek=$j bs=$bs_bytes count=1 \
3010 conv=notrunc >/dev/null 2>&1
3011 done
3012 done
3013 else
3014 log_must eval "fio \
3015 --name=job \
3016 --fallocate=0 \
3017 --minimal \
3018 --randrepeat=0 \
3019 --buffer_compress_percentage=66 \
3020 --buffer_compress_chunk=4096 \
3021 --directory=$dir \
3022 --numjobs=$nfiles \
3023 --nrfiles=$nfiles \
3024 --rw=write \
3025 --bs=$bs \
3026 --filesize=$megs \
3027 --filename_format='$fname.\$jobnum' >/dev/null"
3028 fi
3029 }
3030
3031 function get_objnum
3032 {
3033 typeset pathname=$1
3034 typeset objnum
3035
3036 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3037 objnum=$(stat -c %i $pathname)
3038 echo $objnum
3039 }
3040
3041 #
3042 # Sync data to the pool
3043 #
3044 # $1 pool name
3045 # $2 boolean to force uberblock (and config including zpool cache file) update
3046 #
3047 function sync_pool #pool <force>
3048 {
3049 typeset pool=${1:-$TESTPOOL}
3050 typeset force=${2:-false}
3051
3052 if [[ $force == true ]]; then
3053 log_must zpool sync -f $pool
3054 else
3055 log_must zpool sync $pool
3056 fi
3057
3058 return 0
3059 }
3060
3061 #
3062 # Wait for zpool 'freeing' property drops to zero.
3063 #
3064 # $1 pool name
3065 #
3066 function wait_freeing #pool
3067 {
3068 typeset pool=${1:-$TESTPOOL}
3069 while true; do
3070 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3071 log_must sleep 1
3072 done
3073 }
3074
3075 #
3076 # Wait for every device replace operation to complete
3077 #
3078 # $1 pool name
3079 #
3080 function wait_replacing #pool
3081 {
3082 typeset pool=${1:-$TESTPOOL}
3083 while true; do
3084 [[ "" == "$(zpool status $pool |
3085 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3086 log_must sleep 1
3087 done
3088 }
3089
3090 #
3091 # Wait for a pool to be scrubbed
3092 #
3093 # $1 pool name
3094 # $2 number of seconds to wait (optional)
3095 #
3096 # Returns true when pool has been scrubbed, or false if there's a timeout or if
3097 # no scrub was done.
3098 #
3099 function wait_scrubbed
3100 {
3101 typeset pool=${1:-$TESTPOOL}
3102 typeset iter=${2:-10}
3103 for i in {1..$iter} ; do
3104 if is_pool_scrubbed $pool ; then
3105 return 0
3106 fi
3107 sleep 1
3108 done
3109 return 1
3110 }
3111
3112 # Backup the zed.rc in our test directory so that we can edit it for our test.
3113 #
3114 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3115 function zed_rc_backup
3116 {
3117 zedrc_backup="$(mktemp)"
3118 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3119 echo $zedrc_backup
3120 }
3121
3122 function zed_rc_restore
3123 {
3124 mv $1 $ZEDLET_DIR/zed.rc
3125 }
3126
3127 #
3128 # Setup custom environment for the ZED.
3129 #
3130 # $@ Optional list of zedlets to run under zed.
3131 function zed_setup
3132 {
3133 if ! is_linux; then
3134 return
3135 fi
3136
3137 if [[ ! -d $ZEDLET_DIR ]]; then
3138 log_must mkdir $ZEDLET_DIR
3139 fi
3140
3141 if [[ ! -e $VDEVID_CONF ]]; then
3142 log_must touch $VDEVID_CONF
3143 fi
3144
3145 if [[ -e $VDEVID_CONF_ETC ]]; then
3146 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3147 fi
3148 EXTRA_ZEDLETS=$@
3149
3150 # Create a symlink for /etc/zfs/vdev_id.conf file.
3151 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3152
3153 # Setup minimal ZED configuration. Individual test cases should
3154 # add additional ZEDLETs as needed for their specific test.
3155 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3156 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3157
3158 # Scripts must only be user writable.
3159 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3160 saved_umask=$(umask)
3161 log_must umask 0022
3162 for i in $EXTRA_ZEDLETS ; do
3163 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3164 done
3165 log_must umask $saved_umask
3166 fi
3167
3168 # Customize the zed.rc file to enable the full debug log.
3169 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3170 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3171
3172 }
3173
3174 #
3175 # Cleanup custom ZED environment.
3176 #
3177 # $@ Optional list of zedlets to remove from our test zed.d directory.
3178 function zed_cleanup
3179 {
3180 if ! is_linux; then
3181 return
3182 fi
3183 EXTRA_ZEDLETS=$@
3184
3185 log_must rm -f ${ZEDLET_DIR}/zed.rc
3186 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3187 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3188 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3189 log_must rm -f ${ZEDLET_DIR}/state
3190
3191 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3192 for i in $EXTRA_ZEDLETS ; do
3193 log_must rm -f ${ZEDLET_DIR}/$i
3194 done
3195 fi
3196 log_must rm -f $ZED_LOG
3197 log_must rm -f $ZED_DEBUG_LOG
3198 log_must rm -f $VDEVID_CONF_ETC
3199 log_must rm -f $VDEVID_CONF
3200 rmdir $ZEDLET_DIR
3201 }
3202
3203 #
3204 # Check if ZED is currently running, if not start ZED.
3205 #
3206 function zed_start
3207 {
3208 if ! is_linux; then
3209 return
3210 fi
3211
3212 # ZEDLET_DIR=/var/tmp/zed
3213 if [[ ! -d $ZEDLET_DIR ]]; then
3214 log_must mkdir $ZEDLET_DIR
3215 fi
3216
3217 # Verify the ZED is not already running.
3218 pgrep -x zed > /dev/null
3219 if (($? == 0)); then
3220 log_fail "ZED already running"
3221 fi
3222
3223 log_note "Starting ZED"
3224 # run ZED in the background and redirect foreground logging
3225 # output to $ZED_LOG.
3226 log_must truncate -s 0 $ZED_DEBUG_LOG
3227 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
3228 "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3229
3230 return 0
3231 }
3232
3233 #
3234 # Kill ZED process
3235 #
3236 function zed_stop
3237 {
3238 if ! is_linux; then
3239 return
3240 fi
3241
3242 log_note "Stopping ZED"
3243 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3244 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3245 kill $zedpid
3246 while ps -p $zedpid > /dev/null; do
3247 sleep 1
3248 done
3249 rm -f ${ZEDLET_DIR}/zed.pid
3250 fi
3251 return 0
3252 }
3253
3254 #
3255 # Drain all zevents
3256 #
3257 function zed_events_drain
3258 {
3259 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3260 sleep 1
3261 zpool events -c >/dev/null
3262 done
3263 }
3264
3265 # Set a variable in zed.rc to something, un-commenting it in the process.
3266 #
3267 # $1 variable
3268 # $2 value
3269 function zed_rc_set
3270 {
3271 var="$1"
3272 val="$2"
3273 # Remove the line
3274 cmd="'/$var/d'"
3275 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3276
3277 # Add it at the end
3278 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3279 }
3280
3281
3282 #
3283 # Check is provided device is being active used as a swap device.
3284 #
3285 function is_swap_inuse
3286 {
3287 typeset device=$1
3288
3289 if [[ -z $device ]] ; then
3290 log_note "No device specified."
3291 return 1
3292 fi
3293
3294 if is_linux; then
3295 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3296 else
3297 swap -l | grep -w $device > /dev/null 2>&1
3298 fi
3299
3300 return $?
3301 }
3302
3303 #
3304 # Setup a swap device using the provided device.
3305 #
3306 function swap_setup
3307 {
3308 typeset swapdev=$1
3309
3310 if is_linux; then
3311 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3312 log_must swapon $swapdev
3313 else
3314 log_must swap -a $swapdev
3315 fi
3316
3317 return 0
3318 }
3319
3320 #
3321 # Cleanup a swap device on the provided device.
3322 #
3323 function swap_cleanup
3324 {
3325 typeset swapdev=$1
3326
3327 if is_swap_inuse $swapdev; then
3328 if is_linux; then
3329 log_must swapoff $swapdev
3330 else
3331 log_must swap -d $swapdev
3332 fi
3333 fi
3334
3335 return 0
3336 }
3337
3338 #
3339 # Set a global system tunable (64-bit value)
3340 #
3341 # $1 tunable name
3342 # $2 tunable values
3343 #
3344 function set_tunable64
3345 {
3346 set_tunable_impl "$1" "$2" Z
3347 }
3348
3349 #
3350 # Set a global system tunable (32-bit value)
3351 #
3352 # $1 tunable name
3353 # $2 tunable values
3354 #
3355 function set_tunable32
3356 {
3357 set_tunable_impl "$1" "$2" W
3358 }
3359
3360 function set_tunable_impl
3361 {
3362 typeset tunable="$1"
3363 typeset value="$2"
3364 typeset mdb_cmd="$3"
3365 typeset module="${4:-zfs}"
3366
3367 [[ -z "$tunable" ]] && return 1
3368 [[ -z "$value" ]] && return 1
3369 [[ -z "$mdb_cmd" ]] && return 1
3370
3371 case "$(uname)" in
3372 Linux)
3373 typeset zfs_tunables="/sys/module/$module/parameters"
3374 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3375 echo -n "$value" > "$zfs_tunables/$tunable"
3376 return "$?"
3377 ;;
3378 SunOS)
3379 [[ "$module" -eq "zfs" ]] || return 1
3380 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3381 return "$?"
3382 ;;
3383 esac
3384 }
3385
3386 #
3387 # Get a global system tunable
3388 #
3389 # $1 tunable name
3390 #
3391 function get_tunable
3392 {
3393 get_tunable_impl "$1"
3394 }
3395
3396 function get_tunable_impl
3397 {
3398 typeset tunable="$1"
3399 typeset module="${2:-zfs}"
3400
3401 [[ -z "$tunable" ]] && return 1
3402
3403 case "$(uname)" in
3404 Linux)
3405 typeset zfs_tunables="/sys/module/$module/parameters"
3406 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3407 cat $zfs_tunables/$tunable
3408 return "$?"
3409 ;;
3410 SunOS)
3411 [[ "$module" -eq "zfs" ]] || return 1
3412 ;;
3413 esac
3414
3415 return 1
3416 }