]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
5d8500ddfce1a3a8fcdedc6b9095740edb9adc3c
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 # Copyright (c) 2012, 2016 by Delphix. All rights reserved.
27 # Copyright 2016 Nexenta Systems, Inc.
28 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
29 # Copyright (c) 2017 Datto Inc.
30 #
31
32 . ${STF_TOOLS}/include/logapi.shlib
33 . ${STF_SUITE}/include/math.shlib
34
35 #
36 # Apply constrained path when available. This is required since the
37 # PATH may have been modified by sudo's secure_path behavior.
38 #
39 if [ -n "$STF_PATH" ]; then
40 PATH="$STF_PATH"
41 fi
42
43 # Determine if this is a Linux test system
44 #
45 # Return 0 if platform Linux, 1 if otherwise
46
47 function is_linux
48 {
49 if [[ $(uname -o) == "GNU/Linux" ]]; then
50 return 0
51 else
52 return 1
53 fi
54 }
55
56 # Determine if this is a 32-bit system
57 #
58 # Return 0 if platform is 32-bit, 1 if otherwise
59
60 function is_32bit
61 {
62 if [[ $(getconf LONG_BIT) == "32" ]]; then
63 return 0
64 else
65 return 1
66 fi
67 }
68
69 # Determine if kmemleak is enabled
70 #
71 # Return 0 if kmemleak is enabled, 1 if otherwise
72
73 function is_kmemleak
74 {
75 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
76 return 0
77 else
78 return 1
79 fi
80 }
81
82 # Determine whether a dataset is mounted
83 #
84 # $1 dataset name
85 # $2 filesystem type; optional - defaulted to zfs
86 #
87 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
88
89 function ismounted
90 {
91 typeset fstype=$2
92 [[ -z $fstype ]] && fstype=zfs
93 typeset out dir name ret
94
95 case $fstype in
96 zfs)
97 if [[ "$1" == "/"* ]] ; then
98 for out in $(zfs mount | awk '{print $2}'); do
99 [[ $1 == $out ]] && return 0
100 done
101 else
102 for out in $(zfs mount | awk '{print $1}'); do
103 [[ $1 == $out ]] && return 0
104 done
105 fi
106 ;;
107 ufs|nfs)
108 out=$(df -F $fstype $1 2>/dev/null)
109 ret=$?
110 (($ret != 0)) && return $ret
111
112 dir=${out%%\(*}
113 dir=${dir%% *}
114 name=${out##*\(}
115 name=${name%%\)*}
116 name=${name%% *}
117
118 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
119 ;;
120 ext2)
121 out=$(df -t $fstype $1 2>/dev/null)
122 return $?
123 ;;
124 zvol)
125 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
126 link=$(readlink -f $ZVOL_DEVDIR/$1)
127 [[ -n "$link" ]] && \
128 mount | grep -q "^$link" && \
129 return 0
130 fi
131 ;;
132 esac
133
134 return 1
135 }
136
137 # Return 0 if a dataset is mounted; 1 otherwise
138 #
139 # $1 dataset name
140 # $2 filesystem type; optional - defaulted to zfs
141
142 function mounted
143 {
144 ismounted $1 $2
145 (($? == 0)) && return 0
146 return 1
147 }
148
149 # Return 0 if a dataset is unmounted; 1 otherwise
150 #
151 # $1 dataset name
152 # $2 filesystem type; optional - defaulted to zfs
153
154 function unmounted
155 {
156 ismounted $1 $2
157 (($? == 1)) && return 0
158 return 1
159 }
160
161 # split line on ","
162 #
163 # $1 - line to split
164
165 function splitline
166 {
167 echo $1 | sed "s/,/ /g"
168 }
169
170 function default_setup
171 {
172 default_setup_noexit "$@"
173
174 log_pass
175 }
176
177 #
178 # Given a list of disks, setup storage pools and datasets.
179 #
180 function default_setup_noexit
181 {
182 typeset disklist=$1
183 typeset container=$2
184 typeset volume=$3
185 log_note begin default_setup_noexit
186
187 if is_global_zone; then
188 if poolexists $TESTPOOL ; then
189 destroy_pool $TESTPOOL
190 fi
191 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
192 log_must zpool create -f $TESTPOOL $disklist
193 else
194 reexport_pool
195 fi
196
197 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
198 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
199
200 log_must zfs create $TESTPOOL/$TESTFS
201 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
202
203 if [[ -n $container ]]; then
204 rm -rf $TESTDIR1 || \
205 log_unresolved Could not remove $TESTDIR1
206 mkdir -p $TESTDIR1 || \
207 log_unresolved Could not create $TESTDIR1
208
209 log_must zfs create $TESTPOOL/$TESTCTR
210 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
211 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
212 log_must zfs set mountpoint=$TESTDIR1 \
213 $TESTPOOL/$TESTCTR/$TESTFS1
214 fi
215
216 if [[ -n $volume ]]; then
217 if is_global_zone ; then
218 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
219 block_device_wait
220 else
221 log_must zfs create $TESTPOOL/$TESTVOL
222 fi
223 fi
224 }
225
226 #
227 # Given a list of disks, setup a storage pool, file system and
228 # a container.
229 #
230 function default_container_setup
231 {
232 typeset disklist=$1
233
234 default_setup "$disklist" "true"
235 }
236
237 #
238 # Given a list of disks, setup a storage pool,file system
239 # and a volume.
240 #
241 function default_volume_setup
242 {
243 typeset disklist=$1
244
245 default_setup "$disklist" "" "true"
246 }
247
248 #
249 # Given a list of disks, setup a storage pool,file system,
250 # a container and a volume.
251 #
252 function default_container_volume_setup
253 {
254 typeset disklist=$1
255
256 default_setup "$disklist" "true" "true"
257 }
258
259 #
260 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
261 # filesystem
262 #
263 # $1 Existing filesystem or volume name. Default, $TESTFS
264 # $2 snapshot name. Default, $TESTSNAP
265 #
266 function create_snapshot
267 {
268 typeset fs_vol=${1:-$TESTFS}
269 typeset snap=${2:-$TESTSNAP}
270
271 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
272 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
273
274 if snapexists $fs_vol@$snap; then
275 log_fail "$fs_vol@$snap already exists."
276 fi
277 datasetexists $fs_vol || \
278 log_fail "$fs_vol must exist."
279
280 log_must zfs snapshot $fs_vol@$snap
281 }
282
283 #
284 # Create a clone from a snapshot, default clone name is $TESTCLONE.
285 #
286 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
287 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
288 #
289 function create_clone # snapshot clone
290 {
291 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
292 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
293
294 [[ -z $snap ]] && \
295 log_fail "Snapshot name is undefined."
296 [[ -z $clone ]] && \
297 log_fail "Clone name is undefined."
298
299 log_must zfs clone $snap $clone
300 }
301
302 #
303 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
304 # filesystem.
305 #
306 # $1 Existing filesystem or volume name. Default, $TESTFS
307 # $2 Existing snapshot name. Default, $TESTSNAP
308 # $3 bookmark name. Default, $TESTBKMARK
309 #
310 function create_bookmark
311 {
312 typeset fs_vol=${1:-$TESTFS}
313 typeset snap=${2:-$TESTSNAP}
314 typeset bkmark=${3:-$TESTBKMARK}
315
316 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
317 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
318 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
319
320 if bkmarkexists $fs_vol#$bkmark; then
321 log_fail "$fs_vol#$bkmark already exists."
322 fi
323 datasetexists $fs_vol || \
324 log_fail "$fs_vol must exist."
325 snapexists $fs_vol@$snap || \
326 log_fail "$fs_vol@$snap must exist."
327
328 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
329 }
330
331 function default_mirror_setup
332 {
333 default_mirror_setup_noexit $1 $2 $3
334
335 log_pass
336 }
337
338 #
339 # Given a pair of disks, set up a storage pool and dataset for the mirror
340 # @parameters: $1 the primary side of the mirror
341 # $2 the secondary side of the mirror
342 # @uses: ZPOOL ZFS TESTPOOL TESTFS
343 function default_mirror_setup_noexit
344 {
345 readonly func="default_mirror_setup_noexit"
346 typeset primary=$1
347 typeset secondary=$2
348
349 [[ -z $primary ]] && \
350 log_fail "$func: No parameters passed"
351 [[ -z $secondary ]] && \
352 log_fail "$func: No secondary partition passed"
353 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
354 log_must zpool create -f $TESTPOOL mirror $@
355 log_must zfs create $TESTPOOL/$TESTFS
356 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
357 }
358
359 #
360 # create a number of mirrors.
361 # We create a number($1) of 2 way mirrors using the pairs of disks named
362 # on the command line. These mirrors are *not* mounted
363 # @parameters: $1 the number of mirrors to create
364 # $... the devices to use to create the mirrors on
365 # @uses: ZPOOL ZFS TESTPOOL
366 function setup_mirrors
367 {
368 typeset -i nmirrors=$1
369
370 shift
371 while ((nmirrors > 0)); do
372 log_must test -n "$1" -a -n "$2"
373 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
374 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
375 shift 2
376 ((nmirrors = nmirrors - 1))
377 done
378 }
379
380 #
381 # create a number of raidz pools.
382 # We create a number($1) of 2 raidz pools using the pairs of disks named
383 # on the command line. These pools are *not* mounted
384 # @parameters: $1 the number of pools to create
385 # $... the devices to use to create the pools on
386 # @uses: ZPOOL ZFS TESTPOOL
387 function setup_raidzs
388 {
389 typeset -i nraidzs=$1
390
391 shift
392 while ((nraidzs > 0)); do
393 log_must test -n "$1" -a -n "$2"
394 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
395 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
396 shift 2
397 ((nraidzs = nraidzs - 1))
398 done
399 }
400
401 #
402 # Destroy the configured testpool mirrors.
403 # the mirrors are of the form ${TESTPOOL}{number}
404 # @uses: ZPOOL ZFS TESTPOOL
405 function destroy_mirrors
406 {
407 default_cleanup_noexit
408
409 log_pass
410 }
411
412 #
413 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
414 # $1 the list of disks
415 #
416 function default_raidz_setup
417 {
418 typeset disklist="$*"
419 disks=(${disklist[*]})
420
421 if [[ ${#disks[*]} -lt 2 ]]; then
422 log_fail "A raid-z requires a minimum of two disks."
423 fi
424
425 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
426 log_must zpool create -f $TESTPOOL raidz $1 $2 $3
427 log_must zfs create $TESTPOOL/$TESTFS
428 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
429
430 log_pass
431 }
432
433 #
434 # Common function used to cleanup storage pools and datasets.
435 #
436 # Invoked at the start of the test suite to ensure the system
437 # is in a known state, and also at the end of each set of
438 # sub-tests to ensure errors from one set of tests doesn't
439 # impact the execution of the next set.
440
441 function default_cleanup
442 {
443 default_cleanup_noexit
444
445 log_pass
446 }
447
448 function default_cleanup_noexit
449 {
450 typeset exclude=""
451 typeset pool=""
452 #
453 # Destroying the pool will also destroy any
454 # filesystems it contains.
455 #
456 if is_global_zone; then
457 zfs unmount -a > /dev/null 2>&1
458 exclude=`eval echo \"'(${KEEP})'\"`
459 ALL_POOLS=$(zpool list -H -o name \
460 | grep -v "$NO_POOLS" | egrep -v "$exclude")
461 # Here, we loop through the pools we're allowed to
462 # destroy, only destroying them if it's safe to do
463 # so.
464 while [ ! -z ${ALL_POOLS} ]
465 do
466 for pool in ${ALL_POOLS}
467 do
468 if safe_to_destroy_pool $pool ;
469 then
470 destroy_pool $pool
471 fi
472 ALL_POOLS=$(zpool list -H -o name \
473 | grep -v "$NO_POOLS" \
474 | egrep -v "$exclude")
475 done
476 done
477
478 zfs mount -a
479 else
480 typeset fs=""
481 for fs in $(zfs list -H -o name \
482 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
483 datasetexists $fs && \
484 log_must zfs destroy -Rf $fs
485 done
486
487 # Need cleanup here to avoid garbage dir left.
488 for fs in $(zfs list -H -o name); do
489 [[ $fs == /$ZONE_POOL ]] && continue
490 [[ -d $fs ]] && log_must rm -rf $fs/*
491 done
492
493 #
494 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
495 # the default value
496 #
497 for fs in $(zfs list -H -o name); do
498 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
499 log_must zfs set reservation=none $fs
500 log_must zfs set recordsize=128K $fs
501 log_must zfs set mountpoint=/$fs $fs
502 typeset enc=""
503 enc=$(get_prop encryption $fs)
504 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
505 [[ "$enc" == "off" ]]; then
506 log_must zfs set checksum=on $fs
507 fi
508 log_must zfs set compression=off $fs
509 log_must zfs set atime=on $fs
510 log_must zfs set devices=off $fs
511 log_must zfs set exec=on $fs
512 log_must zfs set setuid=on $fs
513 log_must zfs set readonly=off $fs
514 log_must zfs set snapdir=hidden $fs
515 log_must zfs set aclmode=groupmask $fs
516 log_must zfs set aclinherit=secure $fs
517 fi
518 done
519 fi
520
521 [[ -d $TESTDIR ]] && \
522 log_must rm -rf $TESTDIR
523
524 disk1=${DISKS%% *}
525 if is_mpath_device $disk1; then
526 delete_partitions
527 fi
528 }
529
530
531 #
532 # Common function used to cleanup storage pools, file systems
533 # and containers.
534 #
535 function default_container_cleanup
536 {
537 if ! is_global_zone; then
538 reexport_pool
539 fi
540
541 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
542 [[ $? -eq 0 ]] && \
543 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
544
545 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
546 log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
547
548 datasetexists $TESTPOOL/$TESTCTR && \
549 log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
550
551 [[ -e $TESTDIR1 ]] && \
552 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
553
554 default_cleanup
555 }
556
557 #
558 # Common function used to cleanup snapshot of file system or volume. Default to
559 # delete the file system's snapshot
560 #
561 # $1 snapshot name
562 #
563 function destroy_snapshot
564 {
565 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
566
567 if ! snapexists $snap; then
568 log_fail "'$snap' does not existed."
569 fi
570
571 #
572 # For the sake of the value which come from 'get_prop' is not equal
573 # to the really mountpoint when the snapshot is unmounted. So, firstly
574 # check and make sure this snapshot's been mounted in current system.
575 #
576 typeset mtpt=""
577 if ismounted $snap; then
578 mtpt=$(get_prop mountpoint $snap)
579 (($? != 0)) && \
580 log_fail "get_prop mountpoint $snap failed."
581 fi
582
583 log_must zfs destroy $snap
584 [[ $mtpt != "" && -d $mtpt ]] && \
585 log_must rm -rf $mtpt
586 }
587
588 #
589 # Common function used to cleanup clone.
590 #
591 # $1 clone name
592 #
593 function destroy_clone
594 {
595 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
596
597 if ! datasetexists $clone; then
598 log_fail "'$clone' does not existed."
599 fi
600
601 # With the same reason in destroy_snapshot
602 typeset mtpt=""
603 if ismounted $clone; then
604 mtpt=$(get_prop mountpoint $clone)
605 (($? != 0)) && \
606 log_fail "get_prop mountpoint $clone failed."
607 fi
608
609 log_must zfs destroy $clone
610 [[ $mtpt != "" && -d $mtpt ]] && \
611 log_must rm -rf $mtpt
612 }
613
614 #
615 # Common function used to cleanup bookmark of file system or volume. Default
616 # to delete the file system's bookmark.
617 #
618 # $1 bookmark name
619 #
620 function destroy_bookmark
621 {
622 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
623
624 if ! bkmarkexists $bkmark; then
625 log_fail "'$bkmarkp' does not existed."
626 fi
627
628 log_must zfs destroy $bkmark
629 }
630
631 # Return 0 if a snapshot exists; $? otherwise
632 #
633 # $1 - snapshot name
634
635 function snapexists
636 {
637 zfs list -H -t snapshot "$1" > /dev/null 2>&1
638 return $?
639 }
640
641 #
642 # Return 0 if a bookmark exists; $? otherwise
643 #
644 # $1 - bookmark name
645 #
646 function bkmarkexists
647 {
648 zfs list -H -t bookmark "$1" > /dev/null 2>&1
649 return $?
650 }
651
652 #
653 # Set a property to a certain value on a dataset.
654 # Sets a property of the dataset to the value as passed in.
655 # @param:
656 # $1 dataset who's property is being set
657 # $2 property to set
658 # $3 value to set property to
659 # @return:
660 # 0 if the property could be set.
661 # non-zero otherwise.
662 # @use: ZFS
663 #
664 function dataset_setprop
665 {
666 typeset fn=dataset_setprop
667
668 if (($# < 3)); then
669 log_note "$fn: Insufficient parameters (need 3, had $#)"
670 return 1
671 fi
672 typeset output=
673 output=$(zfs set $2=$3 $1 2>&1)
674 typeset rv=$?
675 if ((rv != 0)); then
676 log_note "Setting property on $1 failed."
677 log_note "property $2=$3"
678 log_note "Return Code: $rv"
679 log_note "Output: $output"
680 return $rv
681 fi
682 return 0
683 }
684
685 #
686 # Assign suite defined dataset properties.
687 # This function is used to apply the suite's defined default set of
688 # properties to a dataset.
689 # @parameters: $1 dataset to use
690 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
691 # @returns:
692 # 0 if the dataset has been altered.
693 # 1 if no pool name was passed in.
694 # 2 if the dataset could not be found.
695 # 3 if the dataset could not have it's properties set.
696 #
697 function dataset_set_defaultproperties
698 {
699 typeset dataset="$1"
700
701 [[ -z $dataset ]] && return 1
702
703 typeset confset=
704 typeset -i found=0
705 for confset in $(zfs list); do
706 if [[ $dataset = $confset ]]; then
707 found=1
708 break
709 fi
710 done
711 [[ $found -eq 0 ]] && return 2
712 if [[ -n $COMPRESSION_PROP ]]; then
713 dataset_setprop $dataset compression $COMPRESSION_PROP || \
714 return 3
715 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
716 fi
717 if [[ -n $CHECKSUM_PROP ]]; then
718 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
719 return 3
720 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
721 fi
722 return 0
723 }
724
725 #
726 # Check a numeric assertion
727 # @parameter: $@ the assertion to check
728 # @output: big loud notice if assertion failed
729 # @use: log_fail
730 #
731 function assert
732 {
733 (($@)) || log_fail "$@"
734 }
735
736 #
737 # Function to format partition size of a disk
738 # Given a disk cxtxdx reduces all partitions
739 # to 0 size
740 #
741 function zero_partitions #<whole_disk_name>
742 {
743 typeset diskname=$1
744 typeset i
745
746 if is_linux; then
747 log_must parted $DEV_DSKDIR/$diskname -s -- mklabel gpt
748 else
749 for i in 0 1 3 4 5 6 7
750 do
751 set_partition $i "" 0mb $diskname
752 done
753 fi
754
755 return 0
756 }
757
758 #
759 # Given a slice, size and disk, this function
760 # formats the slice to the specified size.
761 # Size should be specified with units as per
762 # the `format` command requirements eg. 100mb 3gb
763 #
764 # NOTE: This entire interface is problematic for the Linux parted utilty
765 # which requires the end of the partition to be specified. It would be
766 # best to retire this interface and replace it with something more flexible.
767 # At the moment a best effort is made.
768 #
769 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
770 {
771 typeset -i slicenum=$1
772 typeset start=$2
773 typeset size=$3
774 typeset disk=$4
775 [[ -z $slicenum || -z $size || -z $disk ]] && \
776 log_fail "The slice, size or disk name is unspecified."
777
778 if is_linux; then
779 typeset size_mb=${size%%[mMgG]}
780
781 size_mb=${size_mb%%[mMgG][bB]}
782 if [[ ${size:1:1} == 'g' ]]; then
783 ((size_mb = size_mb * 1024))
784 fi
785
786 # Create GPT partition table when setting slice 0 or
787 # when the device doesn't already contain a GPT label.
788 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
789 typeset ret_val=$?
790 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
791 log_must parted $DEV_DSKDIR/$disk -s -- mklabel gpt
792 fi
793
794 # When no start is given align on the first cylinder.
795 if [[ -z "$start" ]]; then
796 start=1
797 fi
798
799 # Determine the cylinder size for the device and using
800 # that calculate the end offset in cylinders.
801 typeset -i cly_size_kb=0
802 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
803 unit cyl print | head -3 | tail -1 | \
804 awk -F '[:k.]' '{print $4}')
805 ((end = (size_mb * 1024 / cly_size_kb) + start))
806
807 log_must parted $DEV_DSKDIR/$disk -s -- \
808 mkpart part$slicenum ${start}cyl ${end}cyl
809
810 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
811 block_device_wait
812 else
813 typeset format_file=/var/tmp/format_in.$$
814
815 echo "partition" >$format_file
816 echo "$slicenum" >> $format_file
817 echo "" >> $format_file
818 echo "" >> $format_file
819 echo "$start" >> $format_file
820 echo "$size" >> $format_file
821 echo "label" >> $format_file
822 echo "" >> $format_file
823 echo "q" >> $format_file
824 echo "q" >> $format_file
825
826 format -e -s -d $disk -f $format_file
827 fi
828
829 typeset ret_val=$?
830 rm -f $format_file
831 [[ $ret_val -ne 0 ]] && \
832 log_fail "Unable to format $disk slice $slicenum to $size"
833 return 0
834 }
835
836 #
837 # Delete all partitions on all disks - this is specifically for the use of multipath
838 # devices which currently can only be used in the test suite as raw/un-partitioned
839 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
840 #
841 function delete_partitions
842 {
843 typeset -i j=1
844
845 if [[ -z $DISK_ARRAY_NUM ]]; then
846 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
847 fi
848 if [[ -z $DISKSARRAY ]]; then
849 DISKSARRAY=$DISKS
850 fi
851
852 if is_linux; then
853 if (( $DISK_ARRAY_NUM == 1 )); then
854 while ((j < MAX_PARTITIONS)); do
855 parted $DEV_DSKDIR/$DISK -s rm $j \
856 > /dev/null 2>&1
857 if (( $? == 1 )); then
858 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
859 if (( $? == 1 )); then
860 log_note "Partitions for $DISK should be deleted"
861 else
862 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
863 fi
864 return 0
865 else
866 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
867 if (( $? == 0 )); then
868 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
869 fi
870 fi
871 ((j = j+1))
872 done
873 else
874 for disk in `echo $DISKSARRAY`; do
875 while ((j < MAX_PARTITIONS)); do
876 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
877 if (( $? == 1 )); then
878 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
879 if (( $? == 1 )); then
880 log_note "Partitions for $disk should be deleted"
881 else
882 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
883 fi
884 j=7
885 else
886 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
887 if (( $? == 0 )); then
888 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
889 fi
890 fi
891 ((j = j+1))
892 done
893 j=1
894 done
895 fi
896 fi
897 return 0
898 }
899
900 #
901 # Get the end cyl of the given slice
902 #
903 function get_endslice #<disk> <slice>
904 {
905 typeset disk=$1
906 typeset slice=$2
907 if [[ -z $disk || -z $slice ]] ; then
908 log_fail "The disk name or slice number is unspecified."
909 fi
910
911 if is_linux; then
912 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
913 grep "part${slice}" | \
914 awk '{print $3}' | \
915 sed 's,cyl,,')
916 ((endcyl = (endcyl + 1)))
917 else
918 disk=${disk#/dev/dsk/}
919 disk=${disk#/dev/rdsk/}
920 disk=${disk%s*}
921
922 typeset -i ratio=0
923 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
924 grep "sectors\/cylinder" | \
925 awk '{print $2}')
926
927 if ((ratio == 0)); then
928 return
929 fi
930
931 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
932 nawk -v token="$slice" '{if ($1==token) print $6}')
933
934 ((endcyl = (endcyl + 1) / ratio))
935 fi
936
937 echo $endcyl
938 }
939
940
941 #
942 # Given a size,disk and total slice number, this function formats the
943 # disk slices from 0 to the total slice number with the same specified
944 # size.
945 #
946 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
947 {
948 typeset -i i=0
949 typeset slice_size=$1
950 typeset disk_name=$2
951 typeset total_slices=$3
952 typeset cyl
953
954 zero_partitions $disk_name
955 while ((i < $total_slices)); do
956 if ! is_linux; then
957 if ((i == 2)); then
958 ((i = i + 1))
959 continue
960 fi
961 fi
962 set_partition $i "$cyl" $slice_size $disk_name
963 cyl=$(get_endslice $disk_name $i)
964 ((i = i+1))
965 done
966 }
967
968 #
969 # This function continues to write to a filenum number of files into dirnum
970 # number of directories until either file_write returns an error or the
971 # maximum number of files per directory have been written.
972 #
973 # Usage:
974 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
975 #
976 # Return value: 0 on success
977 # non 0 on error
978 #
979 # Where :
980 # destdir: is the directory where everything is to be created under
981 # dirnum: the maximum number of subdirectories to use, -1 no limit
982 # filenum: the maximum number of files per subdirectory
983 # bytes: number of bytes to write
984 # num_writes: numer of types to write out bytes
985 # data: the data that will be written
986 #
987 # E.g.
988 # file_fs /testdir 20 25 1024 256 0
989 #
990 # Note: bytes * num_writes equals the size of the testfile
991 #
992 function fill_fs # destdir dirnum filenum bytes num_writes data
993 {
994 typeset destdir=${1:-$TESTDIR}
995 typeset -i dirnum=${2:-50}
996 typeset -i filenum=${3:-50}
997 typeset -i bytes=${4:-8192}
998 typeset -i num_writes=${5:-10240}
999 typeset -i data=${6:-0}
1000
1001 typeset -i odirnum=1
1002 typeset -i idirnum=0
1003 typeset -i fn=0
1004 typeset -i retval=0
1005
1006 log_must mkdir -p $destdir/$idirnum
1007 while (($odirnum > 0)); do
1008 if ((dirnum >= 0 && idirnum >= dirnum)); then
1009 odirnum=0
1010 break
1011 fi
1012 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1013 -b $bytes -c $num_writes -d $data
1014 retval=$?
1015 if (($retval != 0)); then
1016 odirnum=0
1017 break
1018 fi
1019 if (($fn >= $filenum)); then
1020 fn=0
1021 ((idirnum = idirnum + 1))
1022 log_must mkdir -p $destdir/$idirnum
1023 else
1024 ((fn = fn + 1))
1025 fi
1026 done
1027 return $retval
1028 }
1029
1030 #
1031 # Simple function to get the specified property. If unable to
1032 # get the property then exits.
1033 #
1034 # Note property is in 'parsable' format (-p)
1035 #
1036 function get_prop # property dataset
1037 {
1038 typeset prop_val
1039 typeset prop=$1
1040 typeset dataset=$2
1041
1042 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1043 if [[ $? -ne 0 ]]; then
1044 log_note "Unable to get $prop property for dataset " \
1045 "$dataset"
1046 return 1
1047 fi
1048
1049 echo "$prop_val"
1050 return 0
1051 }
1052
1053 #
1054 # Simple function to get the specified property of pool. If unable to
1055 # get the property then exits.
1056 #
1057 # Note property is in 'parsable' format (-p)
1058 #
1059 function get_pool_prop # property pool
1060 {
1061 typeset prop_val
1062 typeset prop=$1
1063 typeset pool=$2
1064
1065 if poolexists $pool ; then
1066 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1067 awk '{print $3}')
1068 if [[ $? -ne 0 ]]; then
1069 log_note "Unable to get $prop property for pool " \
1070 "$pool"
1071 return 1
1072 fi
1073 else
1074 log_note "Pool $pool not exists."
1075 return 1
1076 fi
1077
1078 echo "$prop_val"
1079 return 0
1080 }
1081
1082 # Return 0 if a pool exists; $? otherwise
1083 #
1084 # $1 - pool name
1085
1086 function poolexists
1087 {
1088 typeset pool=$1
1089
1090 if [[ -z $pool ]]; then
1091 log_note "No pool name given."
1092 return 1
1093 fi
1094
1095 zpool get name "$pool" > /dev/null 2>&1
1096 return $?
1097 }
1098
1099 # Return 0 if all the specified datasets exist; $? otherwise
1100 #
1101 # $1-n dataset name
1102 function datasetexists
1103 {
1104 if (($# == 0)); then
1105 log_note "No dataset name given."
1106 return 1
1107 fi
1108
1109 while (($# > 0)); do
1110 zfs get name $1 > /dev/null 2>&1 || \
1111 return $?
1112 shift
1113 done
1114
1115 return 0
1116 }
1117
1118 # return 0 if none of the specified datasets exists, otherwise return 1.
1119 #
1120 # $1-n dataset name
1121 function datasetnonexists
1122 {
1123 if (($# == 0)); then
1124 log_note "No dataset name given."
1125 return 1
1126 fi
1127
1128 while (($# > 0)); do
1129 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1130 && return 1
1131 shift
1132 done
1133
1134 return 0
1135 }
1136
1137 #
1138 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1139 #
1140 # Returns 0 if shared, 1 otherwise.
1141 #
1142 function is_shared
1143 {
1144 typeset fs=$1
1145 typeset mtpt
1146
1147 if [[ $fs != "/"* ]] ; then
1148 if datasetnonexists "$fs" ; then
1149 return 1
1150 else
1151 mtpt=$(get_prop mountpoint "$fs")
1152 case $mtpt in
1153 none|legacy|-) return 1
1154 ;;
1155 *) fs=$mtpt
1156 ;;
1157 esac
1158 fi
1159 fi
1160
1161 if is_linux; then
1162 for mtpt in `share | awk '{print $1}'` ; do
1163 if [[ $mtpt == $fs ]] ; then
1164 return 0
1165 fi
1166 done
1167 return 1
1168 fi
1169
1170 for mtpt in `share | awk '{print $2}'` ; do
1171 if [[ $mtpt == $fs ]] ; then
1172 return 0
1173 fi
1174 done
1175
1176 typeset stat=$(svcs -H -o STA nfs/server:default)
1177 if [[ $stat != "ON" ]]; then
1178 log_note "Current nfs/server status: $stat"
1179 fi
1180
1181 return 1
1182 }
1183
1184 #
1185 # Given a dataset name determine if it is shared via SMB.
1186 #
1187 # Returns 0 if shared, 1 otherwise.
1188 #
1189 function is_shared_smb
1190 {
1191 typeset fs=$1
1192 typeset mtpt
1193
1194 if datasetnonexists "$fs" ; then
1195 return 1
1196 else
1197 fs=$(echo $fs | sed 's@/@_@g')
1198 fi
1199
1200 if is_linux; then
1201 for mtpt in `net usershare list | awk '{print $1}'` ; do
1202 if [[ $mtpt == $fs ]] ; then
1203 return 0
1204 fi
1205 done
1206 return 1
1207 else
1208 log_unsupported "Currently unsupported by the test framework"
1209 return 1
1210 fi
1211 }
1212
1213 #
1214 # Given a mountpoint, determine if it is not shared via NFS.
1215 #
1216 # Returns 0 if not shared, 1 otherwise.
1217 #
1218 function not_shared
1219 {
1220 typeset fs=$1
1221
1222 is_shared $fs
1223 if (($? == 0)); then
1224 return 1
1225 fi
1226
1227 return 0
1228 }
1229
1230 #
1231 # Given a dataset determine if it is not shared via SMB.
1232 #
1233 # Returns 0 if not shared, 1 otherwise.
1234 #
1235 function not_shared_smb
1236 {
1237 typeset fs=$1
1238
1239 is_shared_smb $fs
1240 if (($? == 0)); then
1241 return 1
1242 fi
1243
1244 return 0
1245 }
1246
1247 #
1248 # Helper function to unshare a mountpoint.
1249 #
1250 function unshare_fs #fs
1251 {
1252 typeset fs=$1
1253
1254 is_shared $fs || is_shared_smb $fs
1255 if (($? == 0)); then
1256 log_must zfs unshare $fs
1257 fi
1258
1259 return 0
1260 }
1261
1262 #
1263 # Helper function to share a NFS mountpoint.
1264 #
1265 function share_nfs #fs
1266 {
1267 typeset fs=$1
1268
1269 if is_linux; then
1270 is_shared $fs
1271 if (($? != 0)); then
1272 log_must share "*:$fs"
1273 fi
1274 else
1275 is_shared $fs
1276 if (($? != 0)); then
1277 log_must share -F nfs $fs
1278 fi
1279 fi
1280
1281 return 0
1282 }
1283
1284 #
1285 # Helper function to unshare a NFS mountpoint.
1286 #
1287 function unshare_nfs #fs
1288 {
1289 typeset fs=$1
1290
1291 if is_linux; then
1292 is_shared $fs
1293 if (($? == 0)); then
1294 log_must unshare -u "*:$fs"
1295 fi
1296 else
1297 is_shared $fs
1298 if (($? == 0)); then
1299 log_must unshare -F nfs $fs
1300 fi
1301 fi
1302
1303 return 0
1304 }
1305
1306 #
1307 # Helper function to show NFS shares.
1308 #
1309 function showshares_nfs
1310 {
1311 if is_linux; then
1312 share -v
1313 else
1314 share -F nfs
1315 fi
1316
1317 return 0
1318 }
1319
1320 #
1321 # Helper function to show SMB shares.
1322 #
1323 function showshares_smb
1324 {
1325 if is_linux; then
1326 net usershare list
1327 else
1328 share -F smb
1329 fi
1330
1331 return 0
1332 }
1333
1334 #
1335 # Check NFS server status and trigger it online.
1336 #
1337 function setup_nfs_server
1338 {
1339 # Cannot share directory in non-global zone.
1340 #
1341 if ! is_global_zone; then
1342 log_note "Cannot trigger NFS server by sharing in LZ."
1343 return
1344 fi
1345
1346 if is_linux; then
1347 log_note "NFS server must started prior to running test framework."
1348 return
1349 fi
1350
1351 typeset nfs_fmri="svc:/network/nfs/server:default"
1352 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1353 #
1354 # Only really sharing operation can enable NFS server
1355 # to online permanently.
1356 #
1357 typeset dummy=/tmp/dummy
1358
1359 if [[ -d $dummy ]]; then
1360 log_must rm -rf $dummy
1361 fi
1362
1363 log_must mkdir $dummy
1364 log_must share $dummy
1365
1366 #
1367 # Waiting for fmri's status to be the final status.
1368 # Otherwise, in transition, an asterisk (*) is appended for
1369 # instances, unshare will reverse status to 'DIS' again.
1370 #
1371 # Waiting for 1's at least.
1372 #
1373 log_must sleep 1
1374 timeout=10
1375 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1376 do
1377 log_must sleep 1
1378
1379 ((timeout -= 1))
1380 done
1381
1382 log_must unshare $dummy
1383 log_must rm -rf $dummy
1384 fi
1385
1386 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1387 }
1388
1389 #
1390 # To verify whether calling process is in global zone
1391 #
1392 # Return 0 if in global zone, 1 in non-global zone
1393 #
1394 function is_global_zone
1395 {
1396 if is_linux; then
1397 return 0
1398 else
1399 typeset cur_zone=$(zonename 2>/dev/null)
1400 if [[ $cur_zone != "global" ]]; then
1401 return 1
1402 fi
1403 return 0
1404 fi
1405 }
1406
1407 #
1408 # Verify whether test is permitted to run from
1409 # global zone, local zone, or both
1410 #
1411 # $1 zone limit, could be "global", "local", or "both"(no limit)
1412 #
1413 # Return 0 if permitted, otherwise exit with log_unsupported
1414 #
1415 function verify_runnable # zone limit
1416 {
1417 typeset limit=$1
1418
1419 [[ -z $limit ]] && return 0
1420
1421 if is_global_zone ; then
1422 case $limit in
1423 global|both)
1424 ;;
1425 local) log_unsupported "Test is unable to run from "\
1426 "global zone."
1427 ;;
1428 *) log_note "Warning: unknown limit $limit - " \
1429 "use both."
1430 ;;
1431 esac
1432 else
1433 case $limit in
1434 local|both)
1435 ;;
1436 global) log_unsupported "Test is unable to run from "\
1437 "local zone."
1438 ;;
1439 *) log_note "Warning: unknown limit $limit - " \
1440 "use both."
1441 ;;
1442 esac
1443
1444 reexport_pool
1445 fi
1446
1447 return 0
1448 }
1449
1450 # Return 0 if create successfully or the pool exists; $? otherwise
1451 # Note: In local zones, this function should return 0 silently.
1452 #
1453 # $1 - pool name
1454 # $2-n - [keyword] devs_list
1455
1456 function create_pool #pool devs_list
1457 {
1458 typeset pool=${1%%/*}
1459
1460 shift
1461
1462 if [[ -z $pool ]]; then
1463 log_note "Missing pool name."
1464 return 1
1465 fi
1466
1467 if poolexists $pool ; then
1468 destroy_pool $pool
1469 fi
1470
1471 if is_global_zone ; then
1472 [[ -d /$pool ]] && rm -rf /$pool
1473 log_must zpool create -f $pool $@
1474 fi
1475
1476 return 0
1477 }
1478
1479 # Return 0 if destroy successfully or the pool exists; $? otherwise
1480 # Note: In local zones, this function should return 0 silently.
1481 #
1482 # $1 - pool name
1483 # Destroy pool with the given parameters.
1484
1485 function destroy_pool #pool
1486 {
1487 typeset pool=${1%%/*}
1488 typeset mtpt
1489
1490 if [[ -z $pool ]]; then
1491 log_note "No pool name given."
1492 return 1
1493 fi
1494
1495 if is_global_zone ; then
1496 if poolexists "$pool" ; then
1497 mtpt=$(get_prop mountpoint "$pool")
1498
1499 # At times, syseventd/udev activity can cause attempts
1500 # to destroy a pool to fail with EBUSY. We retry a few
1501 # times allowing failures before requiring the destroy
1502 # to succeed.
1503 log_must_busy zpool destroy -f $pool
1504
1505 [[ -d $mtpt ]] && \
1506 log_must rm -rf $mtpt
1507 else
1508 log_note "Pool does not exist. ($pool)"
1509 return 1
1510 fi
1511 fi
1512
1513 return 0
1514 }
1515
1516 #
1517 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1518 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1519 # and a zvol device to the zone.
1520 #
1521 # $1 zone name
1522 # $2 zone root directory prefix
1523 # $3 zone ip
1524 #
1525 function zfs_zones_setup #zone_name zone_root zone_ip
1526 {
1527 typeset zone_name=${1:-$(hostname)-z}
1528 typeset zone_root=${2:-"/zone_root"}
1529 typeset zone_ip=${3:-"10.1.1.10"}
1530 typeset prefix_ctr=$ZONE_CTR
1531 typeset pool_name=$ZONE_POOL
1532 typeset -i cntctr=5
1533 typeset -i i=0
1534
1535 # Create pool and 5 container within it
1536 #
1537 [[ -d /$pool_name ]] && rm -rf /$pool_name
1538 log_must zpool create -f $pool_name $DISKS
1539 while ((i < cntctr)); do
1540 log_must zfs create $pool_name/$prefix_ctr$i
1541 ((i += 1))
1542 done
1543
1544 # create a zvol
1545 log_must zfs create -V 1g $pool_name/zone_zvol
1546 block_device_wait
1547
1548 #
1549 # If current system support slog, add slog device for pool
1550 #
1551 if verify_slog_support ; then
1552 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1553 log_must mkfile $MINVDEVSIZE $sdevs
1554 log_must zpool add $pool_name log mirror $sdevs
1555 fi
1556
1557 # this isn't supported just yet.
1558 # Create a filesystem. In order to add this to
1559 # the zone, it must have it's mountpoint set to 'legacy'
1560 # log_must zfs create $pool_name/zfs_filesystem
1561 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1562
1563 [[ -d $zone_root ]] && \
1564 log_must rm -rf $zone_root/$zone_name
1565 [[ ! -d $zone_root ]] && \
1566 log_must mkdir -p -m 0700 $zone_root/$zone_name
1567
1568 # Create zone configure file and configure the zone
1569 #
1570 typeset zone_conf=/tmp/zone_conf.$$
1571 echo "create" > $zone_conf
1572 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1573 echo "set autoboot=true" >> $zone_conf
1574 i=0
1575 while ((i < cntctr)); do
1576 echo "add dataset" >> $zone_conf
1577 echo "set name=$pool_name/$prefix_ctr$i" >> \
1578 $zone_conf
1579 echo "end" >> $zone_conf
1580 ((i += 1))
1581 done
1582
1583 # add our zvol to the zone
1584 echo "add device" >> $zone_conf
1585 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1586 echo "end" >> $zone_conf
1587
1588 # add a corresponding zvol rdsk to the zone
1589 echo "add device" >> $zone_conf
1590 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1591 echo "end" >> $zone_conf
1592
1593 # once it's supported, we'll add our filesystem to the zone
1594 # echo "add fs" >> $zone_conf
1595 # echo "set type=zfs" >> $zone_conf
1596 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1597 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1598 # echo "end" >> $zone_conf
1599
1600 echo "verify" >> $zone_conf
1601 echo "commit" >> $zone_conf
1602 log_must zonecfg -z $zone_name -f $zone_conf
1603 log_must rm -f $zone_conf
1604
1605 # Install the zone
1606 zoneadm -z $zone_name install
1607 if (($? == 0)); then
1608 log_note "SUCCESS: zoneadm -z $zone_name install"
1609 else
1610 log_fail "FAIL: zoneadm -z $zone_name install"
1611 fi
1612
1613 # Install sysidcfg file
1614 #
1615 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1616 echo "system_locale=C" > $sysidcfg
1617 echo "terminal=dtterm" >> $sysidcfg
1618 echo "network_interface=primary {" >> $sysidcfg
1619 echo "hostname=$zone_name" >> $sysidcfg
1620 echo "}" >> $sysidcfg
1621 echo "name_service=NONE" >> $sysidcfg
1622 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1623 echo "security_policy=NONE" >> $sysidcfg
1624 echo "timezone=US/Eastern" >> $sysidcfg
1625
1626 # Boot this zone
1627 log_must zoneadm -z $zone_name boot
1628 }
1629
1630 #
1631 # Reexport TESTPOOL & TESTPOOL(1-4)
1632 #
1633 function reexport_pool
1634 {
1635 typeset -i cntctr=5
1636 typeset -i i=0
1637
1638 while ((i < cntctr)); do
1639 if ((i == 0)); then
1640 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1641 if ! ismounted $TESTPOOL; then
1642 log_must zfs mount $TESTPOOL
1643 fi
1644 else
1645 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1646 if eval ! ismounted \$TESTPOOL$i; then
1647 log_must eval zfs mount \$TESTPOOL$i
1648 fi
1649 fi
1650 ((i += 1))
1651 done
1652 }
1653
1654 #
1655 # Verify a given disk or pool state
1656 #
1657 # Return 0 is pool/disk matches expected state, 1 otherwise
1658 #
1659 function check_state # pool disk state{online,offline,degraded}
1660 {
1661 typeset pool=$1
1662 typeset disk=${2#$DEV_DSKDIR/}
1663 typeset state=$3
1664
1665 [[ -z $pool ]] || [[ -z $state ]] \
1666 && log_fail "Arguments invalid or missing"
1667
1668 if [[ -z $disk ]]; then
1669 #check pool state only
1670 zpool get -H -o value health $pool \
1671 | grep -i "$state" > /dev/null 2>&1
1672 else
1673 zpool status -v $pool | grep "$disk" \
1674 | grep -i "$state" > /dev/null 2>&1
1675 fi
1676
1677 return $?
1678 }
1679
1680 #
1681 # Cause a scan of all scsi host adapters by default
1682 #
1683 # $1 optional host number
1684 #
1685 function scan_scsi_hosts
1686 {
1687 typeset hostnum=${1}
1688
1689 if is_linux; then
1690 if [[ -z $hostnum ]]; then
1691 for host in /sys/class/scsi_host/host*; do
1692 log_must eval "echo '- - -' > $host/scan"
1693 done
1694 else
1695 log_must eval \
1696 "echo /sys/class/scsi_host/host$hostnum/scan" \
1697 > /dev/null
1698 log_must eval \
1699 "echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
1700 fi
1701 fi
1702 }
1703 #
1704 # Wait for newly created block devices to have their minors created.
1705 #
1706 function block_device_wait
1707 {
1708 if is_linux; then
1709 udevadm trigger
1710 udevadm settle
1711 fi
1712 }
1713
1714 #
1715 # Online or offline a disk on the system
1716 #
1717 # First checks state of disk. Test will fail if disk is not properly onlined
1718 # or offlined. Online is a full rescan of SCSI disks by echoing to every
1719 # host entry.
1720 #
1721 function on_off_disk # disk state{online,offline} host
1722 {
1723 typeset disk=$1
1724 typeset state=$2
1725 typeset host=$3
1726
1727 [[ -z $disk ]] || [[ -z $state ]] && \
1728 log_fail "Arguments invalid or missing"
1729
1730 if is_linux; then
1731 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
1732 dm_name="$(readlink $DEV_DSKDIR/$disk \
1733 | nawk -F / '{print $2}')"
1734 slave="$(ls /sys/block/${dm_name}/slaves \
1735 | nawk '{print $1}')"
1736 while [[ -n $slave ]]; do
1737 #check if disk is online
1738 lsscsi | egrep $slave > /dev/null
1739 if (($? == 0)); then
1740 slave_dir="/sys/block/${dm_name}"
1741 slave_dir+="/slaves/${slave}/device"
1742 ss="${slave_dir}/state"
1743 sd="${slave_dir}/delete"
1744 log_must eval "echo 'offline' > ${ss}"
1745 log_must eval "echo '1' > ${sd}"
1746 lsscsi | egrep $slave > /dev/null
1747 if (($? == 0)); then
1748 log_fail "Offlining" \
1749 "$disk failed"
1750 fi
1751 fi
1752 slave="$(ls /sys/block/$dm_name/slaves \
1753 2>/dev/null | nawk '{print $1}')"
1754 done
1755 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1756 #check if disk is online
1757 lsscsi | egrep $disk > /dev/null
1758 if (($? == 0)); then
1759 dev_state="/sys/block/$disk/device/state"
1760 dev_delete="/sys/block/$disk/device/delete"
1761 log_must eval "echo 'offline' > ${dev_state}"
1762 log_must eval "echo '1' > ${dev_delete}"
1763 lsscsi | egrep $disk > /dev/null
1764 if (($? == 0)); then
1765 log_fail "Offlining $disk" \
1766 "failed"
1767 fi
1768 else
1769 log_note "$disk is already offline"
1770 fi
1771 elif [[ $state == "online" ]]; then
1772 #force a full rescan
1773 scan_scsi_hosts $host
1774 block_device_wait
1775 if is_mpath_device $disk; then
1776 dm_name="$(readlink $DEV_DSKDIR/$disk \
1777 | nawk -F / '{print $2}')"
1778 slave="$(ls /sys/block/$dm_name/slaves \
1779 | nawk '{print $1}')"
1780 lsscsi | egrep $slave > /dev/null
1781 if (($? != 0)); then
1782 log_fail "Onlining $disk failed"
1783 fi
1784 elif is_real_device $disk; then
1785 lsscsi | egrep $disk > /dev/null
1786 if (($? != 0)); then
1787 log_fail "Onlining $disk failed"
1788 fi
1789 else
1790 log_fail "$disk is not a real dev"
1791 fi
1792 else
1793 log_fail "$disk failed to $state"
1794 fi
1795 fi
1796 }
1797
1798 #
1799 # Get the mountpoint of snapshot
1800 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1801 # as its mountpoint
1802 #
1803 function snapshot_mountpoint
1804 {
1805 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1806
1807 if [[ $dataset != *@* ]]; then
1808 log_fail "Error name of snapshot '$dataset'."
1809 fi
1810
1811 typeset fs=${dataset%@*}
1812 typeset snap=${dataset#*@}
1813
1814 if [[ -z $fs || -z $snap ]]; then
1815 log_fail "Error name of snapshot '$dataset'."
1816 fi
1817
1818 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1819 }
1820
1821 #
1822 # Given a device and 'ashift' value verify it's correctly set on every label
1823 #
1824 function verify_ashift # device ashift
1825 {
1826 typeset device="$1"
1827 typeset ashift="$2"
1828
1829 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1830 if (ashift != $2)
1831 exit 1;
1832 else
1833 count++;
1834 } END {
1835 if (count != 4)
1836 exit 1;
1837 else
1838 exit 0;
1839 }'
1840
1841 return $?
1842 }
1843
1844 #
1845 # Given a pool and file system, this function will verify the file system
1846 # using the zdb internal tool. Note that the pool is exported and imported
1847 # to ensure it has consistent state.
1848 #
1849 function verify_filesys # pool filesystem dir
1850 {
1851 typeset pool="$1"
1852 typeset filesys="$2"
1853 typeset zdbout="/tmp/zdbout.$$"
1854
1855 shift
1856 shift
1857 typeset dirs=$@
1858 typeset search_path=""
1859
1860 log_note "Calling zdb to verify filesystem '$filesys'"
1861 zfs unmount -a > /dev/null 2>&1
1862 log_must zpool export $pool
1863
1864 if [[ -n $dirs ]] ; then
1865 for dir in $dirs ; do
1866 search_path="$search_path -d $dir"
1867 done
1868 fi
1869
1870 log_must zpool import $search_path $pool
1871
1872 zdb -cudi $filesys > $zdbout 2>&1
1873 if [[ $? != 0 ]]; then
1874 log_note "Output: zdb -cudi $filesys"
1875 cat $zdbout
1876 log_fail "zdb detected errors with: '$filesys'"
1877 fi
1878
1879 log_must zfs mount -a
1880 log_must rm -rf $zdbout
1881 }
1882
1883 #
1884 # Given a pool, and this function list all disks in the pool
1885 #
1886 function get_disklist # pool
1887 {
1888 typeset disklist=""
1889
1890 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1891 grep -v "\-\-\-\-\-" | \
1892 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1893
1894 echo $disklist
1895 }
1896
1897 #
1898 # Given a pool, and this function list all disks in the pool with their full
1899 # path (like "/dev/sda" instead of "sda").
1900 #
1901 function get_disklist_fullpath # pool
1902 {
1903 args="-P $1"
1904 get_disklist $args
1905 }
1906
1907
1908
1909 # /**
1910 # This function kills a given list of processes after a time period. We use
1911 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1912 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1913 # would be listed as FAIL, which we don't want : we're happy with stress tests
1914 # running for a certain amount of time, then finishing.
1915 #
1916 # @param $1 the time in seconds after which we should terminate these processes
1917 # @param $2..$n the processes we wish to terminate.
1918 # */
1919 function stress_timeout
1920 {
1921 typeset -i TIMEOUT=$1
1922 shift
1923 typeset cpids="$@"
1924
1925 log_note "Waiting for child processes($cpids). " \
1926 "It could last dozens of minutes, please be patient ..."
1927 log_must sleep $TIMEOUT
1928
1929 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1930 typeset pid
1931 for pid in $cpids; do
1932 ps -p $pid > /dev/null 2>&1
1933 if (($? == 0)); then
1934 log_must kill -USR1 $pid
1935 fi
1936 done
1937 }
1938
1939 #
1940 # Verify a given hotspare disk is inuse or avail
1941 #
1942 # Return 0 is pool/disk matches expected state, 1 otherwise
1943 #
1944 function check_hotspare_state # pool disk state{inuse,avail}
1945 {
1946 typeset pool=$1
1947 typeset disk=${2#$DEV_DSKDIR/}
1948 typeset state=$3
1949
1950 cur_state=$(get_device_state $pool $disk "spares")
1951
1952 if [[ $state != ${cur_state} ]]; then
1953 return 1
1954 fi
1955 return 0
1956 }
1957
1958 #
1959 # Verify a given slog disk is inuse or avail
1960 #
1961 # Return 0 is pool/disk matches expected state, 1 otherwise
1962 #
1963 function check_slog_state # pool disk state{online,offline,unavail}
1964 {
1965 typeset pool=$1
1966 typeset disk=${2#$DEV_DSKDIR/}
1967 typeset state=$3
1968
1969 cur_state=$(get_device_state $pool $disk "logs")
1970
1971 if [[ $state != ${cur_state} ]]; then
1972 return 1
1973 fi
1974 return 0
1975 }
1976
1977 #
1978 # Verify a given vdev disk is inuse or avail
1979 #
1980 # Return 0 is pool/disk matches expected state, 1 otherwise
1981 #
1982 function check_vdev_state # pool disk state{online,offline,unavail}
1983 {
1984 typeset pool=$1
1985 typeset disk=${2#$/DEV_DSKDIR/}
1986 typeset state=$3
1987
1988 cur_state=$(get_device_state $pool $disk)
1989
1990 if [[ $state != ${cur_state} ]]; then
1991 return 1
1992 fi
1993 return 0
1994 }
1995
1996 #
1997 # Check the output of 'zpool status -v <pool>',
1998 # and to see if the content of <token> contain the <keyword> specified.
1999 #
2000 # Return 0 is contain, 1 otherwise
2001 #
2002 function check_pool_status # pool token keyword
2003 {
2004 typeset pool=$1
2005 typeset token=$2
2006 typeset keyword=$3
2007
2008 zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2009 ($1==token) {print $0}' \
2010 | grep -i "$keyword" > /dev/null 2>&1
2011
2012 return $?
2013 }
2014
2015 #
2016 # These 5 following functions are instance of check_pool_status()
2017 # is_pool_resilvering - to check if the pool is resilver in progress
2018 # is_pool_resilvered - to check if the pool is resilver completed
2019 # is_pool_scrubbing - to check if the pool is scrub in progress
2020 # is_pool_scrubbed - to check if the pool is scrub completed
2021 # is_pool_scrub_stopped - to check if the pool is scrub stopped
2022 #
2023 function is_pool_resilvering #pool
2024 {
2025 check_pool_status "$1" "scan" "resilver in progress since "
2026 return $?
2027 }
2028
2029 function is_pool_resilvered #pool
2030 {
2031 check_pool_status "$1" "scan" "resilvered "
2032 return $?
2033 }
2034
2035 function is_pool_scrubbing #pool
2036 {
2037 check_pool_status "$1" "scan" "scrub in progress since "
2038 return $?
2039 }
2040
2041 function is_pool_scrubbed #pool
2042 {
2043 check_pool_status "$1" "scan" "scrub repaired"
2044 return $?
2045 }
2046
2047 function is_pool_scrub_stopped #pool
2048 {
2049 check_pool_status "$1" "scan" "scrub canceled"
2050 return $?
2051 }
2052
2053 #
2054 # Use create_pool()/destroy_pool() to clean up the information in
2055 # in the given disk to avoid slice overlapping.
2056 #
2057 function cleanup_devices #vdevs
2058 {
2059 typeset pool="foopool$$"
2060
2061 if poolexists $pool ; then
2062 destroy_pool $pool
2063 fi
2064
2065 create_pool $pool $@
2066 destroy_pool $pool
2067
2068 return 0
2069 }
2070
2071 #/**
2072 # A function to find and locate free disks on a system or from given
2073 # disks as the parameter. It works by locating disks that are in use
2074 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2075 #
2076 # $@ given disks to find which are free, default is all disks in
2077 # the test system
2078 #
2079 # @return a string containing the list of available disks
2080 #*/
2081 function find_disks
2082 {
2083 # Trust provided list, no attempt is made to locate unused devices.
2084 if is_linux; then
2085 echo "$@"
2086 return
2087 fi
2088
2089
2090 sfi=/tmp/swaplist.$$
2091 dmpi=/tmp/dumpdev.$$
2092 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2093
2094 swap -l > $sfi
2095 dumpadm > $dmpi 2>/dev/null
2096
2097 # write an awk script that can process the output of format
2098 # to produce a list of disks we know about. Note that we have
2099 # to escape "$2" so that the shell doesn't interpret it while
2100 # we're creating the awk script.
2101 # -------------------
2102 cat > /tmp/find_disks.awk <<EOF
2103 #!/bin/nawk -f
2104 BEGIN { FS="."; }
2105
2106 /^Specify disk/{
2107 searchdisks=0;
2108 }
2109
2110 {
2111 if (searchdisks && \$2 !~ "^$"){
2112 split(\$2,arr," ");
2113 print arr[1];
2114 }
2115 }
2116
2117 /^AVAILABLE DISK SELECTIONS:/{
2118 searchdisks=1;
2119 }
2120 EOF
2121 #---------------------
2122
2123 chmod 755 /tmp/find_disks.awk
2124 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2125 rm /tmp/find_disks.awk
2126
2127 unused=""
2128 for disk in $disks; do
2129 # Check for mounted
2130 grep "${disk}[sp]" /etc/mnttab >/dev/null
2131 (($? == 0)) && continue
2132 # Check for swap
2133 grep "${disk}[sp]" $sfi >/dev/null
2134 (($? == 0)) && continue
2135 # check for dump device
2136 grep "${disk}[sp]" $dmpi >/dev/null
2137 (($? == 0)) && continue
2138 # check to see if this disk hasn't been explicitly excluded
2139 # by a user-set environment variable
2140 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2141 (($? == 0)) && continue
2142 unused_candidates="$unused_candidates $disk"
2143 done
2144 rm $sfi
2145 rm $dmpi
2146
2147 # now just check to see if those disks do actually exist
2148 # by looking for a device pointing to the first slice in
2149 # each case. limit the number to max_finddisksnum
2150 count=0
2151 for disk in $unused_candidates; do
2152 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2153 if [ $count -lt $max_finddisksnum ]; then
2154 unused="$unused $disk"
2155 # do not impose limit if $@ is provided
2156 [[ -z $@ ]] && ((count = count + 1))
2157 fi
2158 fi
2159 done
2160
2161 # finally, return our disk list
2162 echo $unused
2163 }
2164
2165 #
2166 # Add specified user to specified group
2167 #
2168 # $1 group name
2169 # $2 user name
2170 # $3 base of the homedir (optional)
2171 #
2172 function add_user #<group_name> <user_name> <basedir>
2173 {
2174 typeset gname=$1
2175 typeset uname=$2
2176 typeset basedir=${3:-"/var/tmp"}
2177
2178 if ((${#gname} == 0 || ${#uname} == 0)); then
2179 log_fail "group name or user name are not defined."
2180 fi
2181
2182 log_must useradd -g $gname -d $basedir/$uname -m $uname
2183 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2184 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2185 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
2186
2187 # Add new users to the same group and the command line utils.
2188 # This allows them to be run out of the original users home
2189 # directory as long as it permissioned to be group readable.
2190 if is_linux; then
2191 cmd_group=$(stat --format="%G" $(which zfs))
2192 log_must usermod -a -G $cmd_group $uname
2193 fi
2194
2195 return 0
2196 }
2197
2198 #
2199 # Delete the specified user.
2200 #
2201 # $1 login name
2202 # $2 base of the homedir (optional)
2203 #
2204 function del_user #<logname> <basedir>
2205 {
2206 typeset user=$1
2207 typeset basedir=${2:-"/var/tmp"}
2208
2209 if ((${#user} == 0)); then
2210 log_fail "login name is necessary."
2211 fi
2212
2213 if id $user > /dev/null 2>&1; then
2214 log_must userdel $user
2215 fi
2216
2217 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2218
2219 return 0
2220 }
2221
2222 #
2223 # Select valid gid and create specified group.
2224 #
2225 # $1 group name
2226 #
2227 function add_group #<group_name>
2228 {
2229 typeset group=$1
2230
2231 if ((${#group} == 0)); then
2232 log_fail "group name is necessary."
2233 fi
2234
2235 # Assign 100 as the base gid, a larger value is selected for
2236 # Linux because for many distributions 1000 and under are reserved.
2237 if is_linux; then
2238 while true; do
2239 groupadd $group > /dev/null 2>&1
2240 typeset -i ret=$?
2241 case $ret in
2242 0) return 0 ;;
2243 *) return 1 ;;
2244 esac
2245 done
2246 else
2247 typeset -i gid=100
2248 while true; do
2249 groupadd -g $gid $group > /dev/null 2>&1
2250 typeset -i ret=$?
2251 case $ret in
2252 0) return 0 ;;
2253 # The gid is not unique
2254 4) ((gid += 1)) ;;
2255 *) return 1 ;;
2256 esac
2257 done
2258 fi
2259 }
2260
2261 #
2262 # Delete the specified group.
2263 #
2264 # $1 group name
2265 #
2266 function del_group #<group_name>
2267 {
2268 typeset grp=$1
2269 if ((${#grp} == 0)); then
2270 log_fail "group name is necessary."
2271 fi
2272
2273 if is_linux; then
2274 getent group $grp > /dev/null 2>&1
2275 typeset -i ret=$?
2276 case $ret in
2277 # Group does not exist.
2278 2) return 0 ;;
2279 # Name already exists as a group name
2280 0) log_must groupdel $grp ;;
2281 *) return 1 ;;
2282 esac
2283 else
2284 groupmod -n $grp $grp > /dev/null 2>&1
2285 typeset -i ret=$?
2286 case $ret in
2287 # Group does not exist.
2288 6) return 0 ;;
2289 # Name already exists as a group name
2290 9) log_must groupdel $grp ;;
2291 *) return 1 ;;
2292 esac
2293 fi
2294
2295 return 0
2296 }
2297
2298 #
2299 # This function will return true if it's safe to destroy the pool passed
2300 # as argument 1. It checks for pools based on zvols and files, and also
2301 # files contained in a pool that may have a different mountpoint.
2302 #
2303 function safe_to_destroy_pool { # $1 the pool name
2304
2305 typeset pool=""
2306 typeset DONT_DESTROY=""
2307
2308 # We check that by deleting the $1 pool, we're not
2309 # going to pull the rug out from other pools. Do this
2310 # by looking at all other pools, ensuring that they
2311 # aren't built from files or zvols contained in this pool.
2312
2313 for pool in $(zpool list -H -o name)
2314 do
2315 ALTMOUNTPOOL=""
2316
2317 # this is a list of the top-level directories in each of the
2318 # files that make up the path to the files the pool is based on
2319 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2320 awk '{print $1}')
2321
2322 # this is a list of the zvols that make up the pool
2323 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2324 | awk '{print $1}')
2325
2326 # also want to determine if it's a file-based pool using an
2327 # alternate mountpoint...
2328 POOL_FILE_DIRS=$(zpool status -v $pool | \
2329 grep / | awk '{print $1}' | \
2330 awk -F/ '{print $2}' | grep -v "dev")
2331
2332 for pooldir in $POOL_FILE_DIRS
2333 do
2334 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2335 grep "${pooldir}$" | awk '{print $1}')
2336
2337 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2338 done
2339
2340
2341 if [ ! -z "$ZVOLPOOL" ]
2342 then
2343 DONT_DESTROY="true"
2344 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2345 fi
2346
2347 if [ ! -z "$FILEPOOL" ]
2348 then
2349 DONT_DESTROY="true"
2350 log_note "Pool $pool is built from $FILEPOOL on $1"
2351 fi
2352
2353 if [ ! -z "$ALTMOUNTPOOL" ]
2354 then
2355 DONT_DESTROY="true"
2356 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2357 fi
2358 done
2359
2360 if [ -z "${DONT_DESTROY}" ]
2361 then
2362 return 0
2363 else
2364 log_note "Warning: it is not safe to destroy $1!"
2365 return 1
2366 fi
2367 }
2368
2369 #
2370 # Get the available ZFS compression options
2371 # $1 option type zfs_set|zfs_compress
2372 #
2373 function get_compress_opts
2374 {
2375 typeset COMPRESS_OPTS
2376 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2377 gzip-6 gzip-7 gzip-8 gzip-9"
2378
2379 if [[ $1 == "zfs_compress" ]] ; then
2380 COMPRESS_OPTS="on lzjb"
2381 elif [[ $1 == "zfs_set" ]] ; then
2382 COMPRESS_OPTS="on off lzjb"
2383 fi
2384 typeset valid_opts="$COMPRESS_OPTS"
2385 zfs get 2>&1 | grep gzip >/dev/null 2>&1
2386 if [[ $? -eq 0 ]]; then
2387 valid_opts="$valid_opts $GZIP_OPTS"
2388 fi
2389 echo "$valid_opts"
2390 }
2391
2392 #
2393 # Verify zfs operation with -p option work as expected
2394 # $1 operation, value could be create, clone or rename
2395 # $2 dataset type, value could be fs or vol
2396 # $3 dataset name
2397 # $4 new dataset name
2398 #
2399 function verify_opt_p_ops
2400 {
2401 typeset ops=$1
2402 typeset datatype=$2
2403 typeset dataset=$3
2404 typeset newdataset=$4
2405
2406 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2407 log_fail "$datatype is not supported."
2408 fi
2409
2410 # check parameters accordingly
2411 case $ops in
2412 create)
2413 newdataset=$dataset
2414 dataset=""
2415 if [[ $datatype == "vol" ]]; then
2416 ops="create -V $VOLSIZE"
2417 fi
2418 ;;
2419 clone)
2420 if [[ -z $newdataset ]]; then
2421 log_fail "newdataset should not be empty" \
2422 "when ops is $ops."
2423 fi
2424 log_must datasetexists $dataset
2425 log_must snapexists $dataset
2426 ;;
2427 rename)
2428 if [[ -z $newdataset ]]; then
2429 log_fail "newdataset should not be empty" \
2430 "when ops is $ops."
2431 fi
2432 log_must datasetexists $dataset
2433 log_mustnot snapexists $dataset
2434 ;;
2435 *)
2436 log_fail "$ops is not supported."
2437 ;;
2438 esac
2439
2440 # make sure the upper level filesystem does not exist
2441 if datasetexists ${newdataset%/*} ; then
2442 log_must zfs destroy -rRf ${newdataset%/*}
2443 fi
2444
2445 # without -p option, operation will fail
2446 log_mustnot zfs $ops $dataset $newdataset
2447 log_mustnot datasetexists $newdataset ${newdataset%/*}
2448
2449 # with -p option, operation should succeed
2450 log_must zfs $ops -p $dataset $newdataset
2451 block_device_wait
2452
2453 if ! datasetexists $newdataset ; then
2454 log_fail "-p option does not work for $ops"
2455 fi
2456
2457 # when $ops is create or clone, redo the operation still return zero
2458 if [[ $ops != "rename" ]]; then
2459 log_must zfs $ops -p $dataset $newdataset
2460 fi
2461
2462 return 0
2463 }
2464
2465 #
2466 # Get configuration of pool
2467 # $1 pool name
2468 # $2 config name
2469 #
2470 function get_config
2471 {
2472 typeset pool=$1
2473 typeset config=$2
2474 typeset alt_root
2475
2476 if ! poolexists "$pool" ; then
2477 return 1
2478 fi
2479 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2480 if [[ $alt_root == "-" ]]; then
2481 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2482 '{print $2}')
2483 else
2484 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2485 '{print $2}')
2486 fi
2487 if [[ -n $value ]] ; then
2488 value=${value#'}
2489 value=${value%'}
2490 fi
2491 echo $value
2492
2493 return 0
2494 }
2495
2496 #
2497 # Privated function. Random select one of items from arguments.
2498 #
2499 # $1 count
2500 # $2-n string
2501 #
2502 function _random_get
2503 {
2504 typeset cnt=$1
2505 shift
2506
2507 typeset str="$@"
2508 typeset -i ind
2509 ((ind = RANDOM % cnt + 1))
2510
2511 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2512 echo $ret
2513 }
2514
2515 #
2516 # Random select one of item from arguments which include NONE string
2517 #
2518 function random_get_with_non
2519 {
2520 typeset -i cnt=$#
2521 ((cnt =+ 1))
2522
2523 _random_get "$cnt" "$@"
2524 }
2525
2526 #
2527 # Random select one of item from arguments which doesn't include NONE string
2528 #
2529 function random_get
2530 {
2531 _random_get "$#" "$@"
2532 }
2533
2534 #
2535 # Detect if the current system support slog
2536 #
2537 function verify_slog_support
2538 {
2539 typeset dir=/tmp/disk.$$
2540 typeset pool=foo.$$
2541 typeset vdev=$dir/a
2542 typeset sdev=$dir/b
2543
2544 mkdir -p $dir
2545 mkfile $MINVDEVSIZE $vdev $sdev
2546
2547 typeset -i ret=0
2548 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2549 ret=1
2550 fi
2551 rm -r $dir
2552
2553 return $ret
2554 }
2555
2556 #
2557 # The function will generate a dataset name with specific length
2558 # $1, the length of the name
2559 # $2, the base string to construct the name
2560 #
2561 function gen_dataset_name
2562 {
2563 typeset -i len=$1
2564 typeset basestr="$2"
2565 typeset -i baselen=${#basestr}
2566 typeset -i iter=0
2567 typeset l_name=""
2568
2569 if ((len % baselen == 0)); then
2570 ((iter = len / baselen))
2571 else
2572 ((iter = len / baselen + 1))
2573 fi
2574 while ((iter > 0)); do
2575 l_name="${l_name}$basestr"
2576
2577 ((iter -= 1))
2578 done
2579
2580 echo $l_name
2581 }
2582
2583 #
2584 # Get cksum tuple of dataset
2585 # $1 dataset name
2586 #
2587 # sample zdb output:
2588 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2589 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2590 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2591 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2592 function datasetcksum
2593 {
2594 typeset cksum
2595 sync
2596 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2597 | awk -F= '{print $7}')
2598 echo $cksum
2599 }
2600
2601 #
2602 # Get cksum of file
2603 # #1 file path
2604 #
2605 function checksum
2606 {
2607 typeset cksum
2608 cksum=$(cksum $1 | awk '{print $1}')
2609 echo $cksum
2610 }
2611
2612 #
2613 # Get the given disk/slice state from the specific field of the pool
2614 #
2615 function get_device_state #pool disk field("", "spares","logs")
2616 {
2617 typeset pool=$1
2618 typeset disk=${2#$DEV_DSKDIR/}
2619 typeset field=${3:-$pool}
2620
2621 state=$(zpool status -v "$pool" 2>/dev/null | \
2622 nawk -v device=$disk -v pool=$pool -v field=$field \
2623 'BEGIN {startconfig=0; startfield=0; }
2624 /config:/ {startconfig=1}
2625 (startconfig==1) && ($1==field) {startfield=1; next;}
2626 (startfield==1) && ($1==device) {print $2; exit;}
2627 (startfield==1) &&
2628 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2629 echo $state
2630 }
2631
2632
2633 #
2634 # print the given directory filesystem type
2635 #
2636 # $1 directory name
2637 #
2638 function get_fstype
2639 {
2640 typeset dir=$1
2641
2642 if [[ -z $dir ]]; then
2643 log_fail "Usage: get_fstype <directory>"
2644 fi
2645
2646 #
2647 # $ df -n /
2648 # / : ufs
2649 #
2650 df -n $dir | awk '{print $3}'
2651 }
2652
2653 #
2654 # Given a disk, label it to VTOC regardless what label was on the disk
2655 # $1 disk
2656 #
2657 function labelvtoc
2658 {
2659 typeset disk=$1
2660 if [[ -z $disk ]]; then
2661 log_fail "The disk name is unspecified."
2662 fi
2663 typeset label_file=/var/tmp/labelvtoc.$$
2664 typeset arch=$(uname -p)
2665
2666 if is_linux; then
2667 log_note "Currently unsupported by the test framework"
2668 return 1
2669 fi
2670
2671 if [[ $arch == "i386" ]]; then
2672 echo "label" > $label_file
2673 echo "0" >> $label_file
2674 echo "" >> $label_file
2675 echo "q" >> $label_file
2676 echo "q" >> $label_file
2677
2678 fdisk -B $disk >/dev/null 2>&1
2679 # wait a while for fdisk finishes
2680 sleep 60
2681 elif [[ $arch == "sparc" ]]; then
2682 echo "label" > $label_file
2683 echo "0" >> $label_file
2684 echo "" >> $label_file
2685 echo "" >> $label_file
2686 echo "" >> $label_file
2687 echo "q" >> $label_file
2688 else
2689 log_fail "unknown arch type"
2690 fi
2691
2692 format -e -s -d $disk -f $label_file
2693 typeset -i ret_val=$?
2694 rm -f $label_file
2695 #
2696 # wait the format to finish
2697 #
2698 sleep 60
2699 if ((ret_val != 0)); then
2700 log_fail "unable to label $disk as VTOC."
2701 fi
2702
2703 return 0
2704 }
2705
2706 #
2707 # check if the system was installed as zfsroot or not
2708 # return: 0 ture, otherwise false
2709 #
2710 function is_zfsroot
2711 {
2712 df -n / | grep zfs > /dev/null 2>&1
2713 return $?
2714 }
2715
2716 #
2717 # get the root filesystem name if it's zfsroot system.
2718 #
2719 # return: root filesystem name
2720 function get_rootfs
2721 {
2722 typeset rootfs=""
2723
2724 if ! is_linux; then
2725 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2726 /etc/mnttab)
2727 fi
2728 if [[ -z "$rootfs" ]]; then
2729 log_fail "Can not get rootfs"
2730 fi
2731 zfs list $rootfs > /dev/null 2>&1
2732 if (($? == 0)); then
2733 echo $rootfs
2734 else
2735 log_fail "This is not a zfsroot system."
2736 fi
2737 }
2738
2739 #
2740 # get the rootfs's pool name
2741 # return:
2742 # rootpool name
2743 #
2744 function get_rootpool
2745 {
2746 typeset rootfs=""
2747 typeset rootpool=""
2748
2749 if ! is_linux; then
2750 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2751 /etc/mnttab)
2752 fi
2753 if [[ -z "$rootfs" ]]; then
2754 log_fail "Can not get rootpool"
2755 fi
2756 zfs list $rootfs > /dev/null 2>&1
2757 if (($? == 0)); then
2758 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2759 echo $rootpool
2760 else
2761 log_fail "This is not a zfsroot system."
2762 fi
2763 }
2764
2765 #
2766 # Check if the given device is physical device
2767 #
2768 function is_physical_device #device
2769 {
2770 typeset device=${1#$DEV_DSKDIR}
2771 device=${device#$DEV_RDSKDIR}
2772
2773 if is_linux; then
2774 [[ -b "$DEV_DSKDIR/$device" ]] && \
2775 [[ -f /sys/module/loop/parameters/max_part ]]
2776 return $?
2777 else
2778 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2779 return $?
2780 fi
2781 }
2782
2783 #
2784 # Check if the given device is a real device (ie SCSI device)
2785 #
2786 function is_real_device #disk
2787 {
2788 typeset disk=$1
2789 [[ -z $disk ]] && log_fail "No argument for disk given."
2790
2791 if is_linux; then
2792 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2793 egrep disk >/dev/null
2794 return $?
2795 fi
2796 }
2797
2798 #
2799 # Check if the given device is a loop device
2800 #
2801 function is_loop_device #disk
2802 {
2803 typeset disk=$1
2804 [[ -z $disk ]] && log_fail "No argument for disk given."
2805
2806 if is_linux; then
2807 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2808 egrep loop >/dev/null
2809 return $?
2810 fi
2811 }
2812
2813 #
2814 # Check if the given device is a multipath device and if there is a sybolic
2815 # link to a device mapper and to a disk
2816 # Currently no support for dm devices alone without multipath
2817 #
2818 function is_mpath_device #disk
2819 {
2820 typeset disk=$1
2821 [[ -z $disk ]] && log_fail "No argument for disk given."
2822
2823 if is_linux; then
2824 lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
2825 egrep mpath >/dev/null
2826 if (($? == 0)); then
2827 readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
2828 return $?
2829 else
2830 return $?
2831 fi
2832 fi
2833 }
2834
2835 # Set the slice prefix for disk partitioning depending
2836 # on whether the device is a real, multipath, or loop device.
2837 # Currently all disks have to be of the same type, so only
2838 # checks first disk to determine slice prefix.
2839 #
2840 function set_slice_prefix
2841 {
2842 typeset disk
2843 typeset -i i=0
2844
2845 if is_linux; then
2846 while (( i < $DISK_ARRAY_NUM )); do
2847 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2848 if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
2849 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
2850 export SLICE_PREFIX=""
2851 return 0
2852 elif ( is_mpath_device $disk || is_loop_device \
2853 $disk ); then
2854 export SLICE_PREFIX="p"
2855 return 0
2856 else
2857 log_fail "$disk not supported for partitioning."
2858 fi
2859 (( i = i + 1))
2860 done
2861 fi
2862 }
2863
2864 #
2865 # Set the directory path of the listed devices in $DISK_ARRAY_NUM
2866 # Currently all disks have to be of the same type, so only
2867 # checks first disk to determine device directory
2868 # default = /dev (linux)
2869 # real disk = /dev (linux)
2870 # multipath device = /dev/mapper (linux)
2871 #
2872 function set_device_dir
2873 {
2874 typeset disk
2875 typeset -i i=0
2876
2877 if is_linux; then
2878 while (( i < $DISK_ARRAY_NUM )); do
2879 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2880 if is_mpath_device $disk; then
2881 export DEV_DSKDIR=$DEV_MPATHDIR
2882 return 0
2883 else
2884 export DEV_DSKDIR=$DEV_RDSKDIR
2885 return 0
2886 fi
2887 (( i = i + 1))
2888 done
2889 else
2890 export DEV_DSKDIR=$DEV_RDSKDIR
2891 fi
2892 }
2893
2894 #
2895 # Get the directory path of given device
2896 #
2897 function get_device_dir #device
2898 {
2899 typeset device=$1
2900
2901 if ! $(is_physical_device $device) ; then
2902 if [[ $device != "/" ]]; then
2903 device=${device%/*}
2904 fi
2905 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2906 device="$DEV_DSKDIR"
2907 fi
2908 echo $device
2909 else
2910 echo "$DEV_DSKDIR"
2911 fi
2912 }
2913
2914 #
2915 # Get persistent name for given disk
2916 #
2917 function get_persistent_disk_name #device
2918 {
2919 typeset device=$1
2920 typeset dev_id
2921
2922 if is_linux; then
2923 if is_real_device $device; then
2924 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
2925 | egrep disk/by-id | nawk '{print $2; exit}' \
2926 | nawk -F / '{print $3}')"
2927 echo $dev_id
2928 elif is_mpath_device $device; then
2929 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
2930 | egrep disk/by-id/dm-uuid \
2931 | nawk '{print $2; exit}' \
2932 | nawk -F / '{print $3}')"
2933 echo $dev_id
2934 else
2935 echo $device
2936 fi
2937 else
2938 echo $device
2939 fi
2940 }
2941
2942 #
2943 # Load scsi_debug module with specified parameters
2944 #
2945 function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
2946 {
2947 typeset devsize=$1
2948 typeset hosts=$2
2949 typeset tgts=$3
2950 typeset luns=$4
2951
2952 [[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
2953 [[ -z $luns ]] && log_fail "Arguments invalid or missing"
2954
2955 if is_linux; then
2956 modprobe -n scsi_debug
2957 if (($? != 0)); then
2958 log_unsupported "Platform does not have scsi_debug"
2959 "module"
2960 fi
2961 lsmod | egrep scsi_debug > /dev/null
2962 if (($? == 0)); then
2963 log_fail "scsi_debug module already installed"
2964 else
2965 log_must modprobe scsi_debug dev_size_mb=$devsize \
2966 add_host=$hosts num_tgts=$tgts max_luns=$luns
2967 block_device_wait
2968 lsscsi | egrep scsi_debug > /dev/null
2969 if (($? == 1)); then
2970 log_fail "scsi_debug module install failed"
2971 fi
2972 fi
2973 fi
2974 }
2975
2976 #
2977 # Get the package name
2978 #
2979 function get_package_name
2980 {
2981 typeset dirpath=${1:-$STC_NAME}
2982
2983 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2984 }
2985
2986 #
2987 # Get the word numbers from a string separated by white space
2988 #
2989 function get_word_count
2990 {
2991 echo $1 | wc -w
2992 }
2993
2994 #
2995 # To verify if the require numbers of disks is given
2996 #
2997 function verify_disk_count
2998 {
2999 typeset -i min=${2:-1}
3000
3001 typeset -i count=$(get_word_count "$1")
3002
3003 if ((count < min)); then
3004 log_untested "A minimum of $min disks is required to run." \
3005 " You specified $count disk(s)"
3006 fi
3007 }
3008
3009 function ds_is_volume
3010 {
3011 typeset type=$(get_prop type $1)
3012 [[ $type = "volume" ]] && return 0
3013 return 1
3014 }
3015
3016 function ds_is_filesystem
3017 {
3018 typeset type=$(get_prop type $1)
3019 [[ $type = "filesystem" ]] && return 0
3020 return 1
3021 }
3022
3023 function ds_is_snapshot
3024 {
3025 typeset type=$(get_prop type $1)
3026 [[ $type = "snapshot" ]] && return 0
3027 return 1
3028 }
3029
3030 #
3031 # Check if Trusted Extensions are installed and enabled
3032 #
3033 function is_te_enabled
3034 {
3035 svcs -H -o state labeld 2>/dev/null | grep "enabled"
3036 if (($? != 0)); then
3037 return 1
3038 else
3039 return 0
3040 fi
3041 }
3042
3043 # Utility function to determine if a system has multiple cpus.
3044 function is_mp
3045 {
3046 if is_linux; then
3047 (($(nproc) > 1))
3048 else
3049 (($(psrinfo | wc -l) > 1))
3050 fi
3051
3052 return $?
3053 }
3054
3055 function get_cpu_freq
3056 {
3057 if is_linux; then
3058 lscpu | awk '/CPU MHz/ { print $3 }'
3059 else
3060 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3061 fi
3062 }
3063
3064 # Run the given command as the user provided.
3065 function user_run
3066 {
3067 typeset user=$1
3068 shift
3069
3070 log_note "user:$user $@"
3071 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
3072 return $?
3073 }
3074
3075 #
3076 # Check if the pool contains the specified vdevs
3077 #
3078 # $1 pool
3079 # $2..n <vdev> ...
3080 #
3081 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3082 # vdevs is not in the pool, and 2 if pool name is missing.
3083 #
3084 function vdevs_in_pool
3085 {
3086 typeset pool=$1
3087 typeset vdev
3088
3089 if [[ -z $pool ]]; then
3090 log_note "Missing pool name."
3091 return 2
3092 fi
3093
3094 shift
3095
3096 typeset tmpfile=$(mktemp)
3097 zpool list -Hv "$pool" >$tmpfile
3098 for vdev in $@; do
3099 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3100 [[ $? -ne 0 ]] && return 1
3101 done
3102
3103 rm -f $tmpfile
3104
3105 return 0;
3106 }
3107
3108 function get_max
3109 {
3110 typeset -l i max=$1
3111 shift
3112
3113 for i in "$@"; do
3114 max=$(echo $((max > i ? max : i)))
3115 done
3116
3117 echo $max
3118 }
3119
3120 function get_min
3121 {
3122 typeset -l i min=$1
3123 shift
3124
3125 for i in "$@"; do
3126 min=$(echo $((min < i ? min : i)))
3127 done
3128
3129 echo $min
3130 }
3131
3132 #
3133 # Generate a random number between 1 and the argument.
3134 #
3135 function random
3136 {
3137 typeset max=$1
3138 echo $(( ($RANDOM % $max) + 1 ))
3139 }
3140
3141 # Write data that can be compressed into a directory
3142 function write_compressible
3143 {
3144 typeset dir=$1
3145 typeset megs=$2
3146 typeset nfiles=${3:-1}
3147 typeset bs=${4:-1024k}
3148 typeset fname=${5:-file}
3149
3150 [[ -d $dir ]] || log_fail "No directory: $dir"
3151
3152 # Under Linux fio is not currently used since its behavior can
3153 # differ significantly across versions. This includes missing
3154 # command line options and cases where the --buffer_compress_*
3155 # options fail to behave as expected.
3156 if is_linux; then
3157 typeset file_bytes=$(to_bytes $megs)
3158 typeset bs_bytes=4096
3159 typeset blocks=$(($file_bytes / $bs_bytes))
3160
3161 for (( i = 0; i < $nfiles; i++ )); do
3162 truncate -s $file_bytes $dir/$fname.$i
3163
3164 # Write every third block to get 66% compression.
3165 for (( j = 0; j < $blocks; j += 3 )); do
3166 dd if=/dev/urandom of=$dir/$fname.$i \
3167 seek=$j bs=$bs_bytes count=1 \
3168 conv=notrunc >/dev/null 2>&1
3169 done
3170 done
3171 else
3172 log_must eval "fio \
3173 --name=job \
3174 --fallocate=0 \
3175 --minimal \
3176 --randrepeat=0 \
3177 --buffer_compress_percentage=66 \
3178 --buffer_compress_chunk=4096 \
3179 --directory=$dir \
3180 --numjobs=$nfiles \
3181 --nrfiles=$nfiles \
3182 --rw=write \
3183 --bs=$bs \
3184 --filesize=$megs \
3185 --filename_format='$fname.\$jobnum' >/dev/null"
3186 fi
3187 }
3188
3189 function get_objnum
3190 {
3191 typeset pathname=$1
3192 typeset objnum
3193
3194 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3195 objnum=$(stat -c %i $pathname)
3196 echo $objnum
3197 }
3198
3199 #
3200 # Sync data to the pool
3201 #
3202 # $1 pool name
3203 # $2 boolean to force uberblock (and config including zpool cache file) update
3204 #
3205 function sync_pool #pool <force>
3206 {
3207 typeset pool=${1:-$TESTPOOL}
3208 typeset force=${2:-false}
3209
3210 if [[ $force == true ]]; then
3211 log_must zpool sync -f $pool
3212 else
3213 log_must zpool sync $pool
3214 fi
3215
3216 return 0
3217 }
3218
3219 #
3220 # Wait for zpool 'freeing' property drops to zero.
3221 #
3222 # $1 pool name
3223 #
3224 function wait_freeing #pool
3225 {
3226 typeset pool=${1:-$TESTPOOL}
3227 while true; do
3228 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3229 log_must sleep 1
3230 done
3231 }
3232
3233 #
3234 # Wait for every device replace operation to complete
3235 #
3236 # $1 pool name
3237 #
3238 function wait_replacing #pool
3239 {
3240 typeset pool=${1:-$TESTPOOL}
3241 while true; do
3242 [[ "" == "$(zpool status $pool |
3243 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3244 log_must sleep 1
3245 done
3246 }
3247
3248 #
3249 # Setup custom environment for the ZED.
3250 #
3251 function zed_setup
3252 {
3253 if ! is_linux; then
3254 return
3255 fi
3256
3257 if [[ ! -d $ZEDLET_DIR ]]; then
3258 log_must mkdir $ZEDLET_DIR
3259 fi
3260
3261 if [[ ! -e $VDEVID_CONF ]]; then
3262 log_must touch $VDEVID_CONF
3263 fi
3264
3265 if [[ -e $VDEVID_CONF_ETC ]]; then
3266 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3267 fi
3268
3269 # Create a symlink for /etc/zfs/vdev_id.conf file.
3270 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3271
3272 # Setup minimal ZED configuration. Individual test cases should
3273 # add additional ZEDLETs as needed for their specific test.
3274 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3275 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3276
3277 # Customize the zed.rc file to enable the full debug log.
3278 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3279 echo "ZED_DEBUG_LOG=$ZEDLET_DIR/zed.debug.log" >>$ZEDLET_DIR/zed.rc
3280
3281 log_must cp ${ZEDLET_LIBEXEC_DIR}/all-syslog.sh $ZEDLET_DIR
3282 log_must cp ${ZEDLET_LIBEXEC_DIR}/all-debug.sh $ZEDLET_DIR
3283 log_must touch $ZEDLET_DIR/zed.debug.log
3284 }
3285
3286 #
3287 # Cleanup custom ZED environment.
3288 #
3289 function zed_cleanup
3290 {
3291 if ! is_linux; then
3292 return
3293 fi
3294
3295 log_must rm -f ${ZEDLET_DIR}/zed.rc
3296 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3297 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3298 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3299 log_must rm -f ${ZEDLET_DIR}/zed.pid
3300 log_must rm -f ${ZEDLET_DIR}/zedlog
3301 log_must rm -f ${ZEDLET_DIR}/zed.debug.log
3302 log_must rm -f ${ZEDLET_DIR}/state
3303 log_must rm -f $VDEVID_CONF_ETC
3304 log_must rm -f $VDEVID_CONF
3305 rmdir $ZEDLET_DIR
3306 }
3307
3308 #
3309 # Check if ZED is currently running, if not start ZED.
3310 #
3311 function zed_start
3312 {
3313 if ! is_linux; then
3314 return
3315 fi
3316
3317 # ZEDLET_DIR=/var/tmp/zed
3318 if [[ ! -d $ZEDLET_DIR ]]; then
3319 log_must mkdir $ZEDLET_DIR
3320 fi
3321
3322 # Verify the ZED is not already running.
3323 pgrep -x zed > /dev/null
3324 if (($? == 0)); then
3325 log_fail "ZED already running"
3326 fi
3327
3328 log_note "Starting ZED"
3329 # run ZED in the background and redirect foreground logging
3330 # output to zedlog
3331 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
3332 "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
3333
3334 return 0
3335 }
3336
3337 #
3338 # Kill ZED process
3339 #
3340 function zed_stop
3341 {
3342 if ! is_linux; then
3343 return
3344 fi
3345
3346 log_note "Stopping ZED"
3347 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3348 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3349 log_must kill $zedpid
3350 fi
3351
3352 return 0
3353 }
3354
3355 #
3356 # Check is provided device is being active used as a swap device.
3357 #
3358 function is_swap_inuse
3359 {
3360 typeset device=$1
3361
3362 if [[ -z $device ]] ; then
3363 log_note "No device specified."
3364 return 1
3365 fi
3366
3367 if is_linux; then
3368 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3369 else
3370 swap -l | grep -w $device > /dev/null 2>&1
3371 fi
3372
3373 return $?
3374 }
3375
3376 #
3377 # Setup a swap device using the provided device.
3378 #
3379 function swap_setup
3380 {
3381 typeset swapdev=$1
3382
3383 if is_linux; then
3384 log_must mkswap $swapdev > /dev/null 2>&1
3385 log_must swapon $swapdev
3386 else
3387 log_must swap -a $swapdev
3388 fi
3389
3390 return 0
3391 }
3392
3393 #
3394 # Cleanup a swap device on the provided device.
3395 #
3396 function swap_cleanup
3397 {
3398 typeset swapdev=$1
3399
3400 if is_swap_inuse $swapdev; then
3401 if is_linux; then
3402 log_must swapoff $swapdev
3403 else
3404 log_must swap -d $swapdev
3405 fi
3406 fi
3407
3408 return 0
3409 }