]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
Added auto-replace FMA test for the ZFS Test Suite
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 # Copyright (c) 2012, 2015 by Delphix. All rights reserved.
27 # Copyright 2016 Nexenta Systems, Inc.
28 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
29 #
30
31 . ${STF_TOOLS}/include/logapi.shlib
32
33 # Determine if this is a Linux test system
34 #
35 # Return 0 if platform Linux, 1 if otherwise
36
37 function is_linux
38 {
39 if [[ $($UNAME -o) == "GNU/Linux" ]]; then
40 return 0
41 else
42 return 1
43 fi
44 }
45
46 # Determine if this is a 32-bit system
47 #
48 # Return 0 if platform is 32-bit, 1 if otherwise
49
50 function is_32bit
51 {
52 if [[ $(getconf LONG_BIT) == "32" ]]; then
53 return 0
54 else
55 return 1
56 fi
57 }
58
59 # Determine if kmemleak is enabled
60 #
61 # Return 0 if kmemleak is enabled, 1 if otherwise
62
63 function is_kmemleak
64 {
65 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
66 return 0
67 else
68 return 1
69 fi
70 }
71
72 # Determine whether a dataset is mounted
73 #
74 # $1 dataset name
75 # $2 filesystem type; optional - defaulted to zfs
76 #
77 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
78
79 function ismounted
80 {
81 typeset fstype=$2
82 [[ -z $fstype ]] && fstype=zfs
83 typeset out dir name ret
84
85 case $fstype in
86 zfs)
87 if [[ "$1" == "/"* ]] ; then
88 for out in $($ZFS mount | $AWK '{print $2}'); do
89 [[ $1 == $out ]] && return 0
90 done
91 else
92 for out in $($ZFS mount | $AWK '{print $1}'); do
93 [[ $1 == $out ]] && return 0
94 done
95 fi
96 ;;
97 ufs|nfs)
98 out=$($DF -F $fstype $1 2>/dev/null)
99 ret=$?
100 (($ret != 0)) && return $ret
101
102 dir=${out%%\(*}
103 dir=${dir%% *}
104 name=${out##*\(}
105 name=${name%%\)*}
106 name=${name%% *}
107
108 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
109 ;;
110 ext2)
111 out=$($DF -t $fstype $1 2>/dev/null)
112 return $?
113 ;;
114 zvol)
115 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
116 link=$(readlink -f $ZVOL_DEVDIR/$1)
117 [[ -n "$link" ]] && \
118 $MOUNT | $GREP -q "^$link" && \
119 return 0
120 fi
121 ;;
122 esac
123
124 return 1
125 }
126
127 # Return 0 if a dataset is mounted; 1 otherwise
128 #
129 # $1 dataset name
130 # $2 filesystem type; optional - defaulted to zfs
131
132 function mounted
133 {
134 ismounted $1 $2
135 (($? == 0)) && return 0
136 return 1
137 }
138
139 # Return 0 if a dataset is unmounted; 1 otherwise
140 #
141 # $1 dataset name
142 # $2 filesystem type; optional - defaulted to zfs
143
144 function unmounted
145 {
146 ismounted $1 $2
147 (($? == 1)) && return 0
148 return 1
149 }
150
151 # split line on ","
152 #
153 # $1 - line to split
154
155 function splitline
156 {
157 $ECHO $1 | $SED "s/,/ /g"
158 }
159
160 function default_setup
161 {
162 default_setup_noexit "$@"
163
164 log_pass
165 }
166
167 #
168 # Given a list of disks, setup storage pools and datasets.
169 #
170 function default_setup_noexit
171 {
172 typeset disklist=$1
173 typeset container=$2
174 typeset volume=$3
175 log_note begin default_setup_noexit
176
177 if is_global_zone; then
178 if poolexists $TESTPOOL ; then
179 destroy_pool $TESTPOOL
180 fi
181 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
182 log_note creating pool $TESTPOOL $disklist
183 log_must $ZPOOL create -f $TESTPOOL $disklist
184 else
185 reexport_pool
186 fi
187
188 $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
189 $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
190
191 log_must $ZFS create $TESTPOOL/$TESTFS
192 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
193
194 if [[ -n $container ]]; then
195 $RM -rf $TESTDIR1 || \
196 log_unresolved Could not remove $TESTDIR1
197 $MKDIR -p $TESTDIR1 || \
198 log_unresolved Could not create $TESTDIR1
199
200 log_must $ZFS create $TESTPOOL/$TESTCTR
201 log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
202 log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
203 log_must $ZFS set mountpoint=$TESTDIR1 \
204 $TESTPOOL/$TESTCTR/$TESTFS1
205 fi
206
207 if [[ -n $volume ]]; then
208 if is_global_zone ; then
209 log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
210 block_device_wait
211 else
212 log_must $ZFS create $TESTPOOL/$TESTVOL
213 fi
214 fi
215 }
216
217 #
218 # Given a list of disks, setup a storage pool, file system and
219 # a container.
220 #
221 function default_container_setup
222 {
223 typeset disklist=$1
224
225 default_setup "$disklist" "true"
226 }
227
228 #
229 # Given a list of disks, setup a storage pool,file system
230 # and a volume.
231 #
232 function default_volume_setup
233 {
234 typeset disklist=$1
235
236 default_setup "$disklist" "" "true"
237 }
238
239 #
240 # Given a list of disks, setup a storage pool,file system,
241 # a container and a volume.
242 #
243 function default_container_volume_setup
244 {
245 typeset disklist=$1
246
247 default_setup "$disklist" "true" "true"
248 }
249
250 #
251 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
252 # filesystem
253 #
254 # $1 Existing filesystem or volume name. Default, $TESTFS
255 # $2 snapshot name. Default, $TESTSNAP
256 #
257 function create_snapshot
258 {
259 typeset fs_vol=${1:-$TESTFS}
260 typeset snap=${2:-$TESTSNAP}
261
262 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
263 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
264
265 if snapexists $fs_vol@$snap; then
266 log_fail "$fs_vol@$snap already exists."
267 fi
268 datasetexists $fs_vol || \
269 log_fail "$fs_vol must exist."
270
271 log_must $ZFS snapshot $fs_vol@$snap
272 }
273
274 #
275 # Create a clone from a snapshot, default clone name is $TESTCLONE.
276 #
277 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
278 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
279 #
280 function create_clone # snapshot clone
281 {
282 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
283 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
284
285 [[ -z $snap ]] && \
286 log_fail "Snapshot name is undefined."
287 [[ -z $clone ]] && \
288 log_fail "Clone name is undefined."
289
290 log_must $ZFS clone $snap $clone
291 }
292
293 #
294 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
295 # filesystem.
296 #
297 # $1 Existing filesystem or volume name. Default, $TESTFS
298 # $2 Existing snapshot name. Default, $TESTSNAP
299 # $3 bookmark name. Default, $TESTBKMARK
300 #
301 function create_bookmark
302 {
303 typeset fs_vol=${1:-$TESTFS}
304 typeset snap=${2:-$TESTSNAP}
305 typeset bkmark=${3:-$TESTBKMARK}
306
307 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
308 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
309 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
310
311 if bkmarkexists $fs_vol#$bkmark; then
312 log_fail "$fs_vol#$bkmark already exists."
313 fi
314 datasetexists $fs_vol || \
315 log_fail "$fs_vol must exist."
316 snapexists $fs_vol@$snap || \
317 log_fail "$fs_vol@$snap must exist."
318
319 log_must $ZFS bookmark $fs_vol@$snap $fs_vol#$bkmark
320 }
321
322 function default_mirror_setup
323 {
324 default_mirror_setup_noexit $1 $2 $3
325
326 log_pass
327 }
328
329 #
330 # Given a pair of disks, set up a storage pool and dataset for the mirror
331 # @parameters: $1 the primary side of the mirror
332 # $2 the secondary side of the mirror
333 # @uses: ZPOOL ZFS TESTPOOL TESTFS
334 function default_mirror_setup_noexit
335 {
336 readonly func="default_mirror_setup_noexit"
337 typeset primary=$1
338 typeset secondary=$2
339
340 [[ -z $primary ]] && \
341 log_fail "$func: No parameters passed"
342 [[ -z $secondary ]] && \
343 log_fail "$func: No secondary partition passed"
344 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
345 log_must $ZPOOL create -f $TESTPOOL mirror $@
346 log_must $ZFS create $TESTPOOL/$TESTFS
347 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
348 }
349
350 #
351 # create a number of mirrors.
352 # We create a number($1) of 2 way mirrors using the pairs of disks named
353 # on the command line. These mirrors are *not* mounted
354 # @parameters: $1 the number of mirrors to create
355 # $... the devices to use to create the mirrors on
356 # @uses: ZPOOL ZFS TESTPOOL
357 function setup_mirrors
358 {
359 typeset -i nmirrors=$1
360
361 shift
362 while ((nmirrors > 0)); do
363 log_must test -n "$1" -a -n "$2"
364 [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
365 log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
366 shift 2
367 ((nmirrors = nmirrors - 1))
368 done
369 }
370
371 #
372 # create a number of raidz pools.
373 # We create a number($1) of 2 raidz pools using the pairs of disks named
374 # on the command line. These pools are *not* mounted
375 # @parameters: $1 the number of pools to create
376 # $... the devices to use to create the pools on
377 # @uses: ZPOOL ZFS TESTPOOL
378 function setup_raidzs
379 {
380 typeset -i nraidzs=$1
381
382 shift
383 while ((nraidzs > 0)); do
384 log_must test -n "$1" -a -n "$2"
385 [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
386 log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
387 shift 2
388 ((nraidzs = nraidzs - 1))
389 done
390 }
391
392 #
393 # Destroy the configured testpool mirrors.
394 # the mirrors are of the form ${TESTPOOL}{number}
395 # @uses: ZPOOL ZFS TESTPOOL
396 function destroy_mirrors
397 {
398 default_cleanup_noexit
399
400 log_pass
401 }
402
403 #
404 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
405 # $1 the list of disks
406 #
407 function default_raidz_setup
408 {
409 typeset disklist="$*"
410 disks=(${disklist[*]})
411
412 if [[ ${#disks[*]} -lt 2 ]]; then
413 log_fail "A raid-z requires a minimum of two disks."
414 fi
415
416 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
417 log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
418 log_must $ZFS create $TESTPOOL/$TESTFS
419 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
420
421 log_pass
422 }
423
424 #
425 # Common function used to cleanup storage pools and datasets.
426 #
427 # Invoked at the start of the test suite to ensure the system
428 # is in a known state, and also at the end of each set of
429 # sub-tests to ensure errors from one set of tests doesn't
430 # impact the execution of the next set.
431
432 function default_cleanup
433 {
434 default_cleanup_noexit
435
436 log_pass
437 }
438
439 function default_cleanup_noexit
440 {
441 typeset exclude=""
442 typeset pool=""
443 #
444 # Destroying the pool will also destroy any
445 # filesystems it contains.
446 #
447 if is_global_zone; then
448 $ZFS unmount -a > /dev/null 2>&1
449 [[ -z "$KEEP" ]] && KEEP="rpool"
450 exclude=`eval $ECHO \"'(${KEEP})'\"`
451 ALL_POOLS=$($ZPOOL list -H -o name \
452 | $GREP -v "$NO_POOLS" | $EGREP -vw "$exclude")
453 # Here, we loop through the pools we're allowed to
454 # destroy, only destroying them if it's safe to do
455 # so.
456 while [ ! -z ${ALL_POOLS} ]
457 do
458 for pool in ${ALL_POOLS}
459 do
460 if safe_to_destroy_pool $pool ;
461 then
462 destroy_pool $pool
463 fi
464 ALL_POOLS=$($ZPOOL list -H -o name \
465 | $GREP -v "$NO_POOLS" \
466 | $EGREP -v "$exclude")
467 done
468 done
469
470 $ZFS mount -a
471 else
472 typeset fs=""
473 for fs in $($ZFS list -H -o name \
474 | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
475 datasetexists $fs && \
476 log_must $ZFS destroy -Rf $fs
477 done
478
479 # Need cleanup here to avoid garbage dir left.
480 for fs in $($ZFS list -H -o name); do
481 [[ $fs == /$ZONE_POOL ]] && continue
482 [[ -d $fs ]] && log_must $RM -rf $fs/*
483 done
484
485 #
486 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
487 # the default value
488 #
489 for fs in $($ZFS list -H -o name); do
490 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
491 log_must $ZFS set reservation=none $fs
492 log_must $ZFS set recordsize=128K $fs
493 log_must $ZFS set mountpoint=/$fs $fs
494 typeset enc=""
495 enc=$(get_prop encryption $fs)
496 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
497 [[ "$enc" == "off" ]]; then
498 log_must $ZFS set checksum=on $fs
499 fi
500 log_must $ZFS set compression=off $fs
501 log_must $ZFS set atime=on $fs
502 log_must $ZFS set devices=off $fs
503 log_must $ZFS set exec=on $fs
504 log_must $ZFS set setuid=on $fs
505 log_must $ZFS set readonly=off $fs
506 log_must $ZFS set snapdir=hidden $fs
507 log_must $ZFS set aclmode=groupmask $fs
508 log_must $ZFS set aclinherit=secure $fs
509 fi
510 done
511 fi
512
513 [[ -d $TESTDIR ]] && \
514 log_must $RM -rf $TESTDIR
515
516 disk1=${DISKS%% *}
517 if is_mpath_device $disk1; then
518 delete_partitions
519 fi
520 }
521
522
523 #
524 # Common function used to cleanup storage pools, file systems
525 # and containers.
526 #
527 function default_container_cleanup
528 {
529 if ! is_global_zone; then
530 reexport_pool
531 fi
532
533 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
534 [[ $? -eq 0 ]] && \
535 log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
536
537 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
538 log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
539
540 datasetexists $TESTPOOL/$TESTCTR && \
541 log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
542
543 [[ -e $TESTDIR1 ]] && \
544 log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
545
546 default_cleanup
547 }
548
549 #
550 # Common function used to cleanup snapshot of file system or volume. Default to
551 # delete the file system's snapshot
552 #
553 # $1 snapshot name
554 #
555 function destroy_snapshot
556 {
557 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
558
559 if ! snapexists $snap; then
560 log_fail "'$snap' does not existed."
561 fi
562
563 #
564 # For the sake of the value which come from 'get_prop' is not equal
565 # to the really mountpoint when the snapshot is unmounted. So, firstly
566 # check and make sure this snapshot's been mounted in current system.
567 #
568 typeset mtpt=""
569 if ismounted $snap; then
570 mtpt=$(get_prop mountpoint $snap)
571 (($? != 0)) && \
572 log_fail "get_prop mountpoint $snap failed."
573 fi
574
575 log_must $ZFS destroy $snap
576 [[ $mtpt != "" && -d $mtpt ]] && \
577 log_must $RM -rf $mtpt
578 }
579
580 #
581 # Common function used to cleanup clone.
582 #
583 # $1 clone name
584 #
585 function destroy_clone
586 {
587 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
588
589 if ! datasetexists $clone; then
590 log_fail "'$clone' does not existed."
591 fi
592
593 # With the same reason in destroy_snapshot
594 typeset mtpt=""
595 if ismounted $clone; then
596 mtpt=$(get_prop mountpoint $clone)
597 (($? != 0)) && \
598 log_fail "get_prop mountpoint $clone failed."
599 fi
600
601 log_must $ZFS destroy $clone
602 [[ $mtpt != "" && -d $mtpt ]] && \
603 log_must $RM -rf $mtpt
604 }
605
606 #
607 # Common function used to cleanup bookmark of file system or volume. Default
608 # to delete the file system's bookmark.
609 #
610 # $1 bookmark name
611 #
612 function destroy_bookmark
613 {
614 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
615
616 if ! bkmarkexists $bkmark; then
617 log_fail "'$bkmarkp' does not existed."
618 fi
619
620 log_must $ZFS destroy $bkmark
621 }
622
623 # Return 0 if a snapshot exists; $? otherwise
624 #
625 # $1 - snapshot name
626
627 function snapexists
628 {
629 $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
630 return $?
631 }
632
633 #
634 # Return 0 if a bookmark exists; $? otherwise
635 #
636 # $1 - bookmark name
637 #
638 function bkmarkexists
639 {
640 $ZFS list -H -t bookmark "$1" > /dev/null 2>&1
641 return $?
642 }
643
644 #
645 # Set a property to a certain value on a dataset.
646 # Sets a property of the dataset to the value as passed in.
647 # @param:
648 # $1 dataset who's property is being set
649 # $2 property to set
650 # $3 value to set property to
651 # @return:
652 # 0 if the property could be set.
653 # non-zero otherwise.
654 # @use: ZFS
655 #
656 function dataset_setprop
657 {
658 typeset fn=dataset_setprop
659
660 if (($# < 3)); then
661 log_note "$fn: Insufficient parameters (need 3, had $#)"
662 return 1
663 fi
664 typeset output=
665 output=$($ZFS set $2=$3 $1 2>&1)
666 typeset rv=$?
667 if ((rv != 0)); then
668 log_note "Setting property on $1 failed."
669 log_note "property $2=$3"
670 log_note "Return Code: $rv"
671 log_note "Output: $output"
672 return $rv
673 fi
674 return 0
675 }
676
677 #
678 # Assign suite defined dataset properties.
679 # This function is used to apply the suite's defined default set of
680 # properties to a dataset.
681 # @parameters: $1 dataset to use
682 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
683 # @returns:
684 # 0 if the dataset has been altered.
685 # 1 if no pool name was passed in.
686 # 2 if the dataset could not be found.
687 # 3 if the dataset could not have it's properties set.
688 #
689 function dataset_set_defaultproperties
690 {
691 typeset dataset="$1"
692
693 [[ -z $dataset ]] && return 1
694
695 typeset confset=
696 typeset -i found=0
697 for confset in $($ZFS list); do
698 if [[ $dataset = $confset ]]; then
699 found=1
700 break
701 fi
702 done
703 [[ $found -eq 0 ]] && return 2
704 if [[ -n $COMPRESSION_PROP ]]; then
705 dataset_setprop $dataset compression $COMPRESSION_PROP || \
706 return 3
707 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
708 fi
709 if [[ -n $CHECKSUM_PROP ]]; then
710 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
711 return 3
712 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
713 fi
714 return 0
715 }
716
717 #
718 # Check a numeric assertion
719 # @parameter: $@ the assertion to check
720 # @output: big loud notice if assertion failed
721 # @use: log_fail
722 #
723 function assert
724 {
725 (($@)) || log_fail "$@"
726 }
727
728 #
729 # Function to format partition size of a disk
730 # Given a disk cxtxdx reduces all partitions
731 # to 0 size
732 #
733 function zero_partitions #<whole_disk_name>
734 {
735 typeset diskname=$1
736 typeset i
737
738 if is_linux; then
739 log_must $FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt
740 else
741 for i in 0 1 3 4 5 6 7
742 do
743 set_partition $i "" 0mb $diskname
744 done
745 fi
746 }
747
748 #
749 # Given a slice, size and disk, this function
750 # formats the slice to the specified size.
751 # Size should be specified with units as per
752 # the `format` command requirements eg. 100mb 3gb
753 #
754 # NOTE: This entire interface is problematic for the Linux parted utilty
755 # which requires the end of the partition to be specified. It would be
756 # best to retire this interface and replace it with something more flexible.
757 # At the moment a best effort is made.
758 #
759 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
760 {
761 typeset -i slicenum=$1
762 typeset start=$2
763 typeset size=$3
764 typeset disk=$4
765 [[ -z $slicenum || -z $size || -z $disk ]] && \
766 log_fail "The slice, size or disk name is unspecified."
767
768 if is_linux; then
769 typeset size_mb=${size%%[mMgG]}
770
771 size_mb=${size_mb%%[mMgG][bB]}
772 if [[ ${size:1:1} == 'g' ]]; then
773 ((size_mb = size_mb * 1024))
774 fi
775
776 # Create GPT partition table when setting slice 0 or
777 # when the device doesn't already contain a GPT label.
778 $FORMAT $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
779 typeset ret_val=$?
780 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
781 log_must $FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt
782 fi
783
784 # When no start is given align on the first cylinder.
785 if [[ -z "$start" ]]; then
786 start=1
787 fi
788
789 # Determine the cylinder size for the device and using
790 # that calculate the end offset in cylinders.
791 typeset -i cly_size_kb=0
792 cly_size_kb=$($FORMAT -m $DEV_DSKDIR/$disk -s -- \
793 unit cyl print | $HEAD -3 | $TAIL -1 | \
794 $AWK -F '[:k.]' '{print $4}')
795 ((end = (size_mb * 1024 / cly_size_kb) + start))
796
797 log_must $FORMAT $DEV_DSKDIR/$disk -s -- \
798 mkpart part$slicenum ${start}cyl ${end}cyl
799
800 $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
801 block_device_wait
802 else
803 typeset format_file=/var/tmp/format_in.$$
804
805 $ECHO "partition" >$format_file
806 $ECHO "$slicenum" >> $format_file
807 $ECHO "" >> $format_file
808 $ECHO "" >> $format_file
809 $ECHO "$start" >> $format_file
810 $ECHO "$size" >> $format_file
811 $ECHO "label" >> $format_file
812 $ECHO "" >> $format_file
813 $ECHO "q" >> $format_file
814 $ECHO "q" >> $format_file
815
816 $FORMAT -e -s -d $disk -f $format_file
817 fi
818 typeset ret_val=$?
819 $RM -f $format_file
820 [[ $ret_val -ne 0 ]] && \
821 log_fail "Unable to format $disk slice $slicenum to $size"
822 return 0
823 }
824
825 #
826 # Delete all partitions on all disks - this is specifically for the use of multipath
827 # devices which currently can only be used in the test suite as raw/un-partitioned
828 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
829 #
830 function delete_partitions
831 {
832 typeset -i j=1
833
834 if [[ -z $DISK_ARRAY_NUM ]]; then
835 DISK_ARRAY_NUM=$($ECHO ${DISKS} | $NAWK '{print NF}')
836 fi
837 if [[ -z $DISKSARRAY ]]; then
838 DISKSARRAY=$DISKS
839 fi
840
841 if is_linux; then
842 if (( $DISK_ARRAY_NUM == 1 )); then
843 while ((j < MAX_PARTITIONS)); do
844 $FORMAT $DEV_DSKDIR/$DISK -s rm $j > /dev/null 2>&1
845 if (( $? == 1 )); then
846 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
847 if (( $? == 1 )); then
848 log_note "Partitions for $DISK should be deleted"
849 else
850 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
851 fi
852 return 0
853 else
854 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
855 if (( $? == 0 )); then
856 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
857 fi
858 fi
859 ((j = j+1))
860 done
861 else
862 for disk in `$ECHO $DISKSARRAY`; do
863 while ((j < MAX_PARTITIONS)); do
864 $FORMAT $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
865 if (( $? == 1 )); then
866 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
867 if (( $? == 1 )); then
868 log_note "Partitions for $disk should be deleted"
869 else
870 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
871 fi
872 j=7
873 else
874 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
875 if (( $? == 0 )); then
876 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
877 fi
878 fi
879 ((j = j+1))
880 done
881 j=1
882 done
883 fi
884 fi
885 return 0
886 }
887
888 #
889 # Get the end cyl of the given slice
890 #
891 function get_endslice #<disk> <slice>
892 {
893 typeset disk=$1
894 typeset slice=$2
895 if [[ -z $disk || -z $slice ]] ; then
896 log_fail "The disk name or slice number is unspecified."
897 fi
898
899 if is_linux; then
900 endcyl=$($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \
901 $GREP "part${slice}" | \
902 $AWK '{print $3}' | \
903 $SED 's,cyl,,')
904 ((endcyl = (endcyl + 1)))
905 else
906 disk=${disk#/dev/dsk/}
907 disk=${disk#/dev/rdsk/}
908 disk=${disk%s*}
909
910 typeset -i ratio=0
911 ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
912 $GREP "sectors\/cylinder" | \
913 $AWK '{print $2}')
914
915 if ((ratio == 0)); then
916 return
917 fi
918
919 typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
920 $NAWK -v token="$slice" '{if ($1==token) print $6}')
921
922 ((endcyl = (endcyl + 1) / ratio))
923 fi
924
925 echo $endcyl
926 }
927
928
929 #
930 # Given a size,disk and total slice number, this function formats the
931 # disk slices from 0 to the total slice number with the same specified
932 # size.
933 #
934 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
935 {
936 typeset -i i=0
937 typeset slice_size=$1
938 typeset disk_name=$2
939 typeset total_slices=$3
940 typeset cyl
941
942 zero_partitions $disk_name
943 while ((i < $total_slices)); do
944 if ! is_linux; then
945 if ((i == 2)); then
946 ((i = i + 1))
947 continue
948 fi
949 fi
950 set_partition $i "$cyl" $slice_size $disk_name
951 cyl=$(get_endslice $disk_name $i)
952 ((i = i+1))
953 done
954 }
955
956 #
957 # This function continues to write to a filenum number of files into dirnum
958 # number of directories until either $FILE_WRITE returns an error or the
959 # maximum number of files per directory have been written.
960 #
961 # Usage:
962 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
963 #
964 # Return value: 0 on success
965 # non 0 on error
966 #
967 # Where :
968 # destdir: is the directory where everything is to be created under
969 # dirnum: the maximum number of subdirectories to use, -1 no limit
970 # filenum: the maximum number of files per subdirectory
971 # bytes: number of bytes to write
972 # num_writes: numer of types to write out bytes
973 # data: the data that will be written
974 #
975 # E.g.
976 # file_fs /testdir 20 25 1024 256 0
977 #
978 # Note: bytes * num_writes equals the size of the testfile
979 #
980 function fill_fs # destdir dirnum filenum bytes num_writes data
981 {
982 typeset destdir=${1:-$TESTDIR}
983 typeset -i dirnum=${2:-50}
984 typeset -i filenum=${3:-50}
985 typeset -i bytes=${4:-8192}
986 typeset -i num_writes=${5:-10240}
987 typeset -i data=${6:-0}
988
989 typeset -i odirnum=1
990 typeset -i idirnum=0
991 typeset -i fn=0
992 typeset -i retval=0
993
994 log_must $MKDIR -p $destdir/$idirnum
995 while (($odirnum > 0)); do
996 if ((dirnum >= 0 && idirnum >= dirnum)); then
997 odirnum=0
998 break
999 fi
1000 $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1001 -b $bytes -c $num_writes -d $data
1002 retval=$?
1003 if (($retval != 0)); then
1004 odirnum=0
1005 break
1006 fi
1007 if (($fn >= $filenum)); then
1008 fn=0
1009 ((idirnum = idirnum + 1))
1010 log_must $MKDIR -p $destdir/$idirnum
1011 else
1012 ((fn = fn + 1))
1013 fi
1014 done
1015 return $retval
1016 }
1017
1018 #
1019 # Simple function to get the specified property. If unable to
1020 # get the property then exits.
1021 #
1022 # Note property is in 'parsable' format (-p)
1023 #
1024 function get_prop # property dataset
1025 {
1026 typeset prop_val
1027 typeset prop=$1
1028 typeset dataset=$2
1029
1030 prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
1031 if [[ $? -ne 0 ]]; then
1032 log_note "Unable to get $prop property for dataset " \
1033 "$dataset"
1034 return 1
1035 fi
1036
1037 $ECHO "$prop_val"
1038 return 0
1039 }
1040
1041 #
1042 # Simple function to get the specified property of pool. If unable to
1043 # get the property then exits.
1044 #
1045 # Note property is in 'parsable' format (-p)
1046 #
1047 function get_pool_prop # property pool
1048 {
1049 typeset prop_val
1050 typeset prop=$1
1051 typeset pool=$2
1052
1053 if poolexists $pool ; then
1054 prop_val=$($ZPOOL get -pH $prop $pool 2>/dev/null | $TAIL -1 | \
1055 $AWK '{print $3}')
1056 if [[ $? -ne 0 ]]; then
1057 log_note "Unable to get $prop property for pool " \
1058 "$pool"
1059 return 1
1060 fi
1061 else
1062 log_note "Pool $pool not exists."
1063 return 1
1064 fi
1065
1066 $ECHO "$prop_val"
1067 return 0
1068 }
1069
1070 # Return 0 if a pool exists; $? otherwise
1071 #
1072 # $1 - pool name
1073
1074 function poolexists
1075 {
1076 typeset pool=$1
1077
1078 if [[ -z $pool ]]; then
1079 log_note "No pool name given."
1080 return 1
1081 fi
1082
1083 $ZPOOL get name "$pool" > /dev/null 2>&1
1084 return $?
1085 }
1086
1087 # Return 0 if all the specified datasets exist; $? otherwise
1088 #
1089 # $1-n dataset name
1090 function datasetexists
1091 {
1092 if (($# == 0)); then
1093 log_note "No dataset name given."
1094 return 1
1095 fi
1096
1097 while (($# > 0)); do
1098 $ZFS get name $1 > /dev/null 2>&1 || \
1099 return $?
1100 shift
1101 done
1102
1103 return 0
1104 }
1105
1106 # return 0 if none of the specified datasets exists, otherwise return 1.
1107 #
1108 # $1-n dataset name
1109 function datasetnonexists
1110 {
1111 if (($# == 0)); then
1112 log_note "No dataset name given."
1113 return 1
1114 fi
1115
1116 while (($# > 0)); do
1117 $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1118 && return 1
1119 shift
1120 done
1121
1122 return 0
1123 }
1124
1125 #
1126 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1127 #
1128 # Returns 0 if shared, 1 otherwise.
1129 #
1130 function is_shared
1131 {
1132 typeset fs=$1
1133 typeset mtpt
1134
1135 if [[ $fs != "/"* ]] ; then
1136 if datasetnonexists "$fs" ; then
1137 return 1
1138 else
1139 mtpt=$(get_prop mountpoint "$fs")
1140 case $mtpt in
1141 none|legacy|-) return 1
1142 ;;
1143 *) fs=$mtpt
1144 ;;
1145 esac
1146 fi
1147 fi
1148
1149 if is_linux; then
1150 for mtpt in `$SHARE | $AWK '{print $1}'` ; do
1151 if [[ $mtpt == $fs ]] ; then
1152 return 0
1153 fi
1154 done
1155 return 1
1156 fi
1157
1158 for mtpt in `$SHARE | $AWK '{print $2}'` ; do
1159 if [[ $mtpt == $fs ]] ; then
1160 return 0
1161 fi
1162 done
1163
1164 typeset stat=$($SVCS -H -o STA nfs/server:default)
1165 if [[ $stat != "ON" ]]; then
1166 log_note "Current nfs/server status: $stat"
1167 fi
1168
1169 return 1
1170 }
1171
1172 #
1173 # Given a dataset name determine if it is shared via SMB.
1174 #
1175 # Returns 0 if shared, 1 otherwise.
1176 #
1177 function is_shared_smb
1178 {
1179 typeset fs=$1
1180 typeset mtpt
1181
1182 if datasetnonexists "$fs" ; then
1183 return 1
1184 else
1185 fs=$(echo $fs | sed 's@/@_@g')
1186 fi
1187
1188 if is_linux; then
1189 for mtpt in `$NET usershare list | $AWK '{print $1}'` ; do
1190 if [[ $mtpt == $fs ]] ; then
1191 return 0
1192 fi
1193 done
1194 return 1
1195 else
1196 log_unsupported "Currently unsupported by the test framework"
1197 return 1
1198 fi
1199 }
1200
1201 #
1202 # Given a mountpoint, determine if it is not shared via NFS.
1203 #
1204 # Returns 0 if not shared, 1 otherwise.
1205 #
1206 function not_shared
1207 {
1208 typeset fs=$1
1209
1210 is_shared $fs
1211 if (($? == 0)); then
1212 return 1
1213 fi
1214
1215 return 0
1216 }
1217
1218 #
1219 # Given a dataset determine if it is not shared via SMB.
1220 #
1221 # Returns 0 if not shared, 1 otherwise.
1222 #
1223 function not_shared_smb
1224 {
1225 typeset fs=$1
1226
1227 is_shared_smb $fs
1228 if (($? == 0)); then
1229 return 1
1230 fi
1231
1232 return 0
1233 }
1234
1235 #
1236 # Helper function to unshare a mountpoint.
1237 #
1238 function unshare_fs #fs
1239 {
1240 typeset fs=$1
1241
1242 is_shared $fs || is_shared_smb $fs
1243 if (($? == 0)); then
1244 log_must $ZFS unshare $fs
1245 fi
1246
1247 return 0
1248 }
1249
1250 #
1251 # Helper function to share a NFS mountpoint.
1252 #
1253 function share_nfs #fs
1254 {
1255 typeset fs=$1
1256
1257 if is_linux; then
1258 is_shared $fs
1259 if (($? != 0)); then
1260 log_must $SHARE "*:$fs"
1261 fi
1262 else
1263 is_shared $fs
1264 if (($? != 0)); then
1265 log_must $SHARE -F nfs $fs
1266 fi
1267 fi
1268
1269 return 0
1270 }
1271
1272 #
1273 # Helper function to unshare a NFS mountpoint.
1274 #
1275 function unshare_nfs #fs
1276 {
1277 typeset fs=$1
1278
1279 if is_linux; then
1280 is_shared $fs
1281 if (($? == 0)); then
1282 log_must $UNSHARE -u "*:$fs"
1283 fi
1284 else
1285 is_shared $fs
1286 if (($? == 0)); then
1287 log_must $UNSHARE -F nfs $fs
1288 fi
1289 fi
1290
1291 return 0
1292 }
1293
1294 #
1295 # Helper function to show NFS shares.
1296 #
1297 function showshares_nfs
1298 {
1299 if is_linux; then
1300 $SHARE -v
1301 else
1302 $SHARE -F nfs
1303 fi
1304
1305 return 0
1306 }
1307
1308 #
1309 # Helper function to show SMB shares.
1310 #
1311 function showshares_smb
1312 {
1313 if is_linux; then
1314 $NET usershare list
1315 else
1316 $SHARE -F smb
1317 fi
1318
1319 return 0
1320 }
1321
1322 #
1323 # Check NFS server status and trigger it online.
1324 #
1325 function setup_nfs_server
1326 {
1327 # Cannot share directory in non-global zone.
1328 #
1329 if ! is_global_zone; then
1330 log_note "Cannot trigger NFS server by sharing in LZ."
1331 return
1332 fi
1333
1334 if is_linux; then
1335 log_note "NFS server must started prior to running test framework."
1336 return
1337 fi
1338
1339 typeset nfs_fmri="svc:/network/nfs/server:default"
1340 if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
1341 #
1342 # Only really sharing operation can enable NFS server
1343 # to online permanently.
1344 #
1345 typeset dummy=/tmp/dummy
1346
1347 if [[ -d $dummy ]]; then
1348 log_must $RM -rf $dummy
1349 fi
1350
1351 log_must $MKDIR $dummy
1352 log_must $SHARE $dummy
1353
1354 #
1355 # Waiting for fmri's status to be the final status.
1356 # Otherwise, in transition, an asterisk (*) is appended for
1357 # instances, unshare will reverse status to 'DIS' again.
1358 #
1359 # Waiting for 1's at least.
1360 #
1361 log_must $SLEEP 1
1362 timeout=10
1363 while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1364 do
1365 log_must $SLEEP 1
1366
1367 ((timeout -= 1))
1368 done
1369
1370 log_must $UNSHARE $dummy
1371 log_must $RM -rf $dummy
1372 fi
1373
1374 log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1375 }
1376
1377 #
1378 # To verify whether calling process is in global zone
1379 #
1380 # Return 0 if in global zone, 1 in non-global zone
1381 #
1382 function is_global_zone
1383 {
1384 typeset cur_zone=$($ZONENAME 2>/dev/null)
1385 if [[ $cur_zone != "global" ]]; then
1386 return 1
1387 fi
1388 return 0
1389 }
1390
1391 #
1392 # Verify whether test is permitted to run from
1393 # global zone, local zone, or both
1394 #
1395 # $1 zone limit, could be "global", "local", or "both"(no limit)
1396 #
1397 # Return 0 if permitted, otherwise exit with log_unsupported
1398 #
1399 function verify_runnable # zone limit
1400 {
1401 typeset limit=$1
1402
1403 [[ -z $limit ]] && return 0
1404
1405 if is_global_zone ; then
1406 case $limit in
1407 global|both)
1408 ;;
1409 local) log_unsupported "Test is unable to run from "\
1410 "global zone."
1411 ;;
1412 *) log_note "Warning: unknown limit $limit - " \
1413 "use both."
1414 ;;
1415 esac
1416 else
1417 case $limit in
1418 local|both)
1419 ;;
1420 global) log_unsupported "Test is unable to run from "\
1421 "local zone."
1422 ;;
1423 *) log_note "Warning: unknown limit $limit - " \
1424 "use both."
1425 ;;
1426 esac
1427
1428 reexport_pool
1429 fi
1430
1431 return 0
1432 }
1433
1434 # Return 0 if create successfully or the pool exists; $? otherwise
1435 # Note: In local zones, this function should return 0 silently.
1436 #
1437 # $1 - pool name
1438 # $2-n - [keyword] devs_list
1439
1440 function create_pool #pool devs_list
1441 {
1442 typeset pool=${1%%/*}
1443
1444 shift
1445
1446 if [[ -z $pool ]]; then
1447 log_note "Missing pool name."
1448 return 1
1449 fi
1450
1451 if poolexists $pool ; then
1452 destroy_pool $pool
1453 fi
1454
1455 if is_global_zone ; then
1456 [[ -d /$pool ]] && $RM -rf /$pool
1457 log_must $ZPOOL create -f $pool $@
1458 fi
1459
1460 return 0
1461 }
1462
1463 # Return 0 if destroy successfully or the pool exists; $? otherwise
1464 # Note: In local zones, this function should return 0 silently.
1465 #
1466 # $1 - pool name
1467 # Destroy pool with the given parameters.
1468
1469 function destroy_pool #pool
1470 {
1471 typeset pool=${1%%/*}
1472 typeset mtpt
1473
1474 if [[ -z $pool ]]; then
1475 log_note "No pool name given."
1476 return 1
1477 fi
1478
1479 if is_global_zone ; then
1480 if poolexists "$pool" ; then
1481 mtpt=$(get_prop mountpoint "$pool")
1482
1483 # At times, syseventd activity can cause attempts to
1484 # destroy a pool to fail with EBUSY. We retry a few
1485 # times allowing failures before requiring the destroy
1486 # to succeed.
1487 typeset -i wait_time=10 ret=1 count=0
1488 must=""
1489 while [[ $ret -ne 0 ]]; do
1490 $must $ZPOOL destroy -f $pool
1491 ret=$?
1492 [[ $ret -eq 0 ]] && break
1493 log_note "zpool destroy failed with $ret"
1494 [[ count++ -ge 7 ]] && must=log_must
1495 $SLEEP $wait_time
1496 done
1497
1498 [[ -d $mtpt ]] && \
1499 log_must $RM -rf $mtpt
1500 else
1501 log_note "Pool does not exist. ($pool)"
1502 return 1
1503 fi
1504 fi
1505
1506 return 0
1507 }
1508
1509 #
1510 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1511 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1512 # and a zvol device to the zone.
1513 #
1514 # $1 zone name
1515 # $2 zone root directory prefix
1516 # $3 zone ip
1517 #
1518 function zfs_zones_setup #zone_name zone_root zone_ip
1519 {
1520 typeset zone_name=${1:-$(hostname)-z}
1521 typeset zone_root=${2:-"/zone_root"}
1522 typeset zone_ip=${3:-"10.1.1.10"}
1523 typeset prefix_ctr=$ZONE_CTR
1524 typeset pool_name=$ZONE_POOL
1525 typeset -i cntctr=5
1526 typeset -i i=0
1527
1528 # Create pool and 5 container within it
1529 #
1530 [[ -d /$pool_name ]] && $RM -rf /$pool_name
1531 log_must $ZPOOL create -f $pool_name $DISKS
1532 while ((i < cntctr)); do
1533 log_must $ZFS create $pool_name/$prefix_ctr$i
1534 ((i += 1))
1535 done
1536
1537 # create a zvol
1538 log_must $ZFS create -V 1g $pool_name/zone_zvol
1539 block_device_wait
1540
1541 #
1542 # If current system support slog, add slog device for pool
1543 #
1544 if verify_slog_support ; then
1545 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1546 log_must $MKFILE $MINVDEVSIZE $sdevs
1547 log_must $ZPOOL add $pool_name log mirror $sdevs
1548 fi
1549
1550 # this isn't supported just yet.
1551 # Create a filesystem. In order to add this to
1552 # the zone, it must have it's mountpoint set to 'legacy'
1553 # log_must $ZFS create $pool_name/zfs_filesystem
1554 # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1555
1556 [[ -d $zone_root ]] && \
1557 log_must $RM -rf $zone_root/$zone_name
1558 [[ ! -d $zone_root ]] && \
1559 log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1560
1561 # Create zone configure file and configure the zone
1562 #
1563 typeset zone_conf=/tmp/zone_conf.$$
1564 $ECHO "create" > $zone_conf
1565 $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1566 $ECHO "set autoboot=true" >> $zone_conf
1567 i=0
1568 while ((i < cntctr)); do
1569 $ECHO "add dataset" >> $zone_conf
1570 $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1571 $zone_conf
1572 $ECHO "end" >> $zone_conf
1573 ((i += 1))
1574 done
1575
1576 # add our zvol to the zone
1577 $ECHO "add device" >> $zone_conf
1578 $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf
1579 $ECHO "end" >> $zone_conf
1580
1581 # add a corresponding zvol rdsk to the zone
1582 $ECHO "add device" >> $zone_conf
1583 $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1584 $ECHO "end" >> $zone_conf
1585
1586 # once it's supported, we'll add our filesystem to the zone
1587 # $ECHO "add fs" >> $zone_conf
1588 # $ECHO "set type=zfs" >> $zone_conf
1589 # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1590 # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1591 # $ECHO "end" >> $zone_conf
1592
1593 $ECHO "verify" >> $zone_conf
1594 $ECHO "commit" >> $zone_conf
1595 log_must $ZONECFG -z $zone_name -f $zone_conf
1596 log_must $RM -f $zone_conf
1597
1598 # Install the zone
1599 $ZONEADM -z $zone_name install
1600 if (($? == 0)); then
1601 log_note "SUCCESS: $ZONEADM -z $zone_name install"
1602 else
1603 log_fail "FAIL: $ZONEADM -z $zone_name install"
1604 fi
1605
1606 # Install sysidcfg file
1607 #
1608 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1609 $ECHO "system_locale=C" > $sysidcfg
1610 $ECHO "terminal=dtterm" >> $sysidcfg
1611 $ECHO "network_interface=primary {" >> $sysidcfg
1612 $ECHO "hostname=$zone_name" >> $sysidcfg
1613 $ECHO "}" >> $sysidcfg
1614 $ECHO "name_service=NONE" >> $sysidcfg
1615 $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
1616 $ECHO "security_policy=NONE" >> $sysidcfg
1617 $ECHO "timezone=US/Eastern" >> $sysidcfg
1618
1619 # Boot this zone
1620 log_must $ZONEADM -z $zone_name boot
1621 }
1622
1623 #
1624 # Reexport TESTPOOL & TESTPOOL(1-4)
1625 #
1626 function reexport_pool
1627 {
1628 typeset -i cntctr=5
1629 typeset -i i=0
1630
1631 while ((i < cntctr)); do
1632 if ((i == 0)); then
1633 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1634 if ! ismounted $TESTPOOL; then
1635 log_must $ZFS mount $TESTPOOL
1636 fi
1637 else
1638 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1639 if eval ! ismounted \$TESTPOOL$i; then
1640 log_must eval $ZFS mount \$TESTPOOL$i
1641 fi
1642 fi
1643 ((i += 1))
1644 done
1645 }
1646
1647 #
1648 # Verify a given disk or pool state
1649 #
1650 # Return 0 is pool/disk matches expected state, 1 otherwise
1651 #
1652 function check_state # pool disk state{online,offline,degraded}
1653 {
1654 typeset pool=$1
1655 typeset disk=${2#$DEV_DSKDIR/}
1656 typeset state=$3
1657
1658 [[ -z $pool ]] || [[ -z $state ]] \
1659 && log_fail "Arguments invalid or missing"
1660
1661 if [[ -z $disk ]]; then
1662 #check pool state only
1663 $ZPOOL get -H -o value health $pool \
1664 | grep -i "$state" > /dev/null 2>&1
1665 else
1666 $ZPOOL status -v $pool | grep "$disk" \
1667 | grep -i "$state" > /dev/null 2>&1
1668 fi
1669
1670 return $?
1671 }
1672
1673 #
1674 # Cause a scan of all scsi host adapters by default
1675 #
1676 # $1 optional host number
1677 #
1678 function scan_scsi_hosts
1679 {
1680 typeset hostnum=${1}
1681
1682 if is_linux; then
1683 if [[ -z $hostnum ]]; then
1684 for host in /sys/class/scsi_host/host*; do
1685 log_must eval "$ECHO '- - -' > $host/scan"
1686 done
1687 else
1688 log_must eval \
1689 "$ECHO /sys/class/scsi_host/host$hostnum/scan" \
1690 > /dev/null
1691 log_must eval \
1692 "$ECHO '- - -' > /sys/class/scsi_host/host$hostnum/scan"
1693 fi
1694 fi
1695 }
1696 #
1697 # Wait for newly created block devices to have their minors created.
1698 #
1699 function block_device_wait
1700 {
1701 if is_linux; then
1702 $UDEVADM trigger
1703 $UDEVADM settle
1704 fi
1705 }
1706
1707 #
1708 # Online or offline a disk on the system
1709 #
1710 # First checks state of disk. Test will fail if disk is not properly onlined
1711 # or offlined. Online is a full rescan of SCSI disks by echoing to every
1712 # host entry.
1713 #
1714 function on_off_disk # disk state{online,offline} host
1715 {
1716 typeset disk=$1
1717 typeset state=$2
1718 typeset host=$3
1719
1720 [[ -z $disk ]] || [[ -z $state ]] && \
1721 log_fail "Arguments invalid or missing"
1722
1723 if is_linux; then
1724 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
1725 dm_name="$($READLINK $DEV_DSKDIR/$disk \
1726 | $NAWK -F / '{print $2}')"
1727 slave="$($LS /sys/block/${dm_name}/slaves \
1728 | $NAWK '{print $1}')"
1729 while [[ -n $slave ]]; do
1730 #check if disk is online
1731 $LSSCSI | $EGREP $slave > /dev/null
1732 if (($? == 0)); then
1733 slave_dir="/sys/block/${dm_name}"
1734 slave_dir+="/slaves/${slave}/device"
1735 ss="${slave_dir}/state"
1736 sd="${slave_dir}/delete"
1737 log_must eval "$ECHO 'offline' > ${ss}"
1738 log_must eval "$ECHO '1' > ${sd}"
1739 $LSSCSI | $EGREP $slave > /dev/null
1740 if (($? == 0)); then
1741 log_fail "Offlining" \
1742 "$disk failed"
1743 fi
1744 fi
1745 slave="$($LS /sys/block/$dm_name/slaves \
1746 2>/dev/null | $NAWK '{print $1}')"
1747 done
1748 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1749 #check if disk is online
1750 $LSSCSI | $EGREP $disk > /dev/null
1751 if (($? == 0)); then
1752 dev_state="/sys/block/$disk/device/state"
1753 dev_delete="/sys/block/$disk/device/delete"
1754 log_must eval "$ECHO 'offline' > ${dev_state}"
1755 log_must eval "$ECHO '1' > ${dev_delete}"
1756 $LSSCSI | $EGREP $disk > /dev/null
1757 if (($? == 0)); then
1758 log_fail "Offlining $disk" \
1759 "failed"
1760 fi
1761 else
1762 log_note "$disk is already offline"
1763 fi
1764 elif [[ $state == "online" ]]; then
1765 #force a full rescan
1766 scan_scsi_hosts $host
1767 block_device_wait
1768 if is_mpath_device $disk; then
1769 dm_name="$($READLINK $DEV_DSKDIR/$disk \
1770 | $NAWK -F / '{print $2}')"
1771 slave="$($LS /sys/block/$dm_name/slaves \
1772 | $NAWK '{print $1}')"
1773 $LSSCSI | $EGREP $slave > /dev/null
1774 if (($? != 0)); then
1775 log_fail "Onlining $disk failed"
1776 fi
1777 elif is_real_device $disk; then
1778 $LSSCSI | $EGREP $disk > /dev/null
1779 if (($? != 0)); then
1780 log_fail "Onlining $disk failed"
1781 fi
1782 else
1783 log_fail "$disk is not a real dev"
1784 fi
1785 else
1786 log_fail "$disk failed to $state"
1787 fi
1788 fi
1789 }
1790
1791 #
1792 # Get the mountpoint of snapshot
1793 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1794 # as its mountpoint
1795 #
1796 function snapshot_mountpoint
1797 {
1798 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1799
1800 if [[ $dataset != *@* ]]; then
1801 log_fail "Error name of snapshot '$dataset'."
1802 fi
1803
1804 typeset fs=${dataset%@*}
1805 typeset snap=${dataset#*@}
1806
1807 if [[ -z $fs || -z $snap ]]; then
1808 log_fail "Error name of snapshot '$dataset'."
1809 fi
1810
1811 $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1812 }
1813
1814 #
1815 # Given a pool and file system, this function will verify the file system
1816 # using the zdb internal tool. Note that the pool is exported and imported
1817 # to ensure it has consistent state.
1818 #
1819 function verify_filesys # pool filesystem dir
1820 {
1821 typeset pool="$1"
1822 typeset filesys="$2"
1823 typeset zdbout="/tmp/zdbout.$$"
1824
1825 shift
1826 shift
1827 typeset dirs=$@
1828 typeset search_path=""
1829
1830 log_note "Calling $ZDB to verify filesystem '$filesys'"
1831 $ZFS unmount -a > /dev/null 2>&1
1832 log_must $ZPOOL export $pool
1833
1834 if [[ -n $dirs ]] ; then
1835 for dir in $dirs ; do
1836 search_path="$search_path -d $dir"
1837 done
1838 fi
1839
1840 log_must $ZPOOL import $search_path $pool
1841
1842 $ZDB -cudi $filesys > $zdbout 2>&1
1843 if [[ $? != 0 ]]; then
1844 log_note "Output: $ZDB -cudi $filesys"
1845 $CAT $zdbout
1846 log_fail "$ZDB detected errors with: '$filesys'"
1847 fi
1848
1849 log_must $ZFS mount -a
1850 log_must $RM -rf $zdbout
1851 }
1852
1853 #
1854 # Given a pool, and this function list all disks in the pool
1855 #
1856 function get_disklist # pool
1857 {
1858 typeset disklist=""
1859
1860 disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
1861 $GREP -v "\-\-\-\-\-" | \
1862 $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1863
1864 $ECHO $disklist
1865 }
1866
1867 #
1868 # Given a pool, and this function list all disks in the pool with their full
1869 # path (like "/dev/sda" instead of "sda").
1870 #
1871 function get_disklist_fullpath # pool
1872 {
1873 args="-P $1"
1874 get_disklist $args
1875 }
1876
1877
1878
1879 # /**
1880 # This function kills a given list of processes after a time period. We use
1881 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1882 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1883 # would be listed as FAIL, which we don't want : we're happy with stress tests
1884 # running for a certain amount of time, then finishing.
1885 #
1886 # @param $1 the time in seconds after which we should terminate these processes
1887 # @param $2..$n the processes we wish to terminate.
1888 # */
1889 function stress_timeout
1890 {
1891 typeset -i TIMEOUT=$1
1892 shift
1893 typeset cpids="$@"
1894
1895 log_note "Waiting for child processes($cpids). " \
1896 "It could last dozens of minutes, please be patient ..."
1897 log_must $SLEEP $TIMEOUT
1898
1899 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1900 typeset pid
1901 for pid in $cpids; do
1902 $PS -p $pid > /dev/null 2>&1
1903 if (($? == 0)); then
1904 log_must $KILL -USR1 $pid
1905 fi
1906 done
1907 }
1908
1909 #
1910 # Verify a given hotspare disk is inuse or avail
1911 #
1912 # Return 0 is pool/disk matches expected state, 1 otherwise
1913 #
1914 function check_hotspare_state # pool disk state{inuse,avail}
1915 {
1916 typeset pool=$1
1917 typeset disk=${2#$DEV_DSKDIR/}
1918 typeset state=$3
1919
1920 cur_state=$(get_device_state $pool $disk "spares")
1921
1922 if [[ $state != ${cur_state} ]]; then
1923 return 1
1924 fi
1925 return 0
1926 }
1927
1928 #
1929 # Verify a given slog disk is inuse or avail
1930 #
1931 # Return 0 is pool/disk matches expected state, 1 otherwise
1932 #
1933 function check_slog_state # pool disk state{online,offline,unavail}
1934 {
1935 typeset pool=$1
1936 typeset disk=${2#$DEV_DSKDIR/}
1937 typeset state=$3
1938
1939 cur_state=$(get_device_state $pool $disk "logs")
1940
1941 if [[ $state != ${cur_state} ]]; then
1942 return 1
1943 fi
1944 return 0
1945 }
1946
1947 #
1948 # Verify a given vdev disk is inuse or avail
1949 #
1950 # Return 0 is pool/disk matches expected state, 1 otherwise
1951 #
1952 function check_vdev_state # pool disk state{online,offline,unavail}
1953 {
1954 typeset pool=$1
1955 typeset disk=${2#$/DEV_DSKDIR/}
1956 typeset state=$3
1957
1958 cur_state=$(get_device_state $pool $disk)
1959
1960 if [[ $state != ${cur_state} ]]; then
1961 return 1
1962 fi
1963 return 0
1964 }
1965
1966 #
1967 # Check the output of 'zpool status -v <pool>',
1968 # and to see if the content of <token> contain the <keyword> specified.
1969 #
1970 # Return 0 is contain, 1 otherwise
1971 #
1972 function check_pool_status # pool token keyword
1973 {
1974 typeset pool=$1
1975 typeset token=$2
1976 typeset keyword=$3
1977
1978 $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
1979 ($1==token) {print $0}' \
1980 | $GREP -i "$keyword" > /dev/null 2>&1
1981
1982 return $?
1983 }
1984
1985 #
1986 # These 5 following functions are instance of check_pool_status()
1987 # is_pool_resilvering - to check if the pool is resilver in progress
1988 # is_pool_resilvered - to check if the pool is resilver completed
1989 # is_pool_scrubbing - to check if the pool is scrub in progress
1990 # is_pool_scrubbed - to check if the pool is scrub completed
1991 # is_pool_scrub_stopped - to check if the pool is scrub stopped
1992 #
1993 function is_pool_resilvering #pool
1994 {
1995 check_pool_status "$1" "scan" "resilver in progress since "
1996 return $?
1997 }
1998
1999 function is_pool_resilvered #pool
2000 {
2001 check_pool_status "$1" "scan" "resilvered "
2002 return $?
2003 }
2004
2005 function is_pool_scrubbing #pool
2006 {
2007 check_pool_status "$1" "scan" "scrub in progress since "
2008 return $?
2009 }
2010
2011 function is_pool_scrubbed #pool
2012 {
2013 check_pool_status "$1" "scan" "scrub repaired"
2014 return $?
2015 }
2016
2017 function is_pool_scrub_stopped #pool
2018 {
2019 check_pool_status "$1" "scan" "scrub canceled"
2020 return $?
2021 }
2022
2023 #
2024 # Use create_pool()/destroy_pool() to clean up the information in
2025 # in the given disk to avoid slice overlapping.
2026 #
2027 function cleanup_devices #vdevs
2028 {
2029 typeset pool="foopool$$"
2030
2031 if poolexists $pool ; then
2032 destroy_pool $pool
2033 fi
2034
2035 create_pool $pool $@
2036 destroy_pool $pool
2037
2038 return 0
2039 }
2040
2041 #
2042 # Verify the rsh connectivity to each remote host in RHOSTS.
2043 #
2044 # Return 0 if remote host is accessible; otherwise 1.
2045 # $1 remote host name
2046 # $2 username
2047 #
2048 function verify_rsh_connect #rhost, username
2049 {
2050 typeset rhost=$1
2051 typeset username=$2
2052 typeset rsh_cmd="$RSH -n"
2053 typeset cur_user=
2054
2055 $GETENT hosts $rhost >/dev/null 2>&1
2056 if (($? != 0)); then
2057 log_note "$rhost cannot be found from" \
2058 "administrative database."
2059 return 1
2060 fi
2061
2062 $PING $rhost 3 >/dev/null 2>&1
2063 if (($? != 0)); then
2064 log_note "$rhost is not reachable."
2065 return 1
2066 fi
2067
2068 if ((${#username} != 0)); then
2069 rsh_cmd="$rsh_cmd -l $username"
2070 cur_user="given user \"$username\""
2071 else
2072 cur_user="current user \"`$LOGNAME`\""
2073 fi
2074
2075 if ! $rsh_cmd $rhost $TRUE; then
2076 log_note "$RSH to $rhost is not accessible" \
2077 "with $cur_user."
2078 return 1
2079 fi
2080
2081 return 0
2082 }
2083
2084 #
2085 # Verify the remote host connection via rsh after rebooting
2086 # $1 remote host
2087 #
2088 function verify_remote
2089 {
2090 rhost=$1
2091
2092 #
2093 # The following loop waits for the remote system rebooting.
2094 # Each iteration will wait for 150 seconds. there are
2095 # total 5 iterations, so the total timeout value will
2096 # be 12.5 minutes for the system rebooting. This number
2097 # is an approxiate number.
2098 #
2099 typeset -i count=0
2100 while ! verify_rsh_connect $rhost; do
2101 sleep 150
2102 ((count = count + 1))
2103 if ((count > 5)); then
2104 return 1
2105 fi
2106 done
2107 return 0
2108 }
2109
2110 #
2111 # Replacement function for /usr/bin/rsh. This function will include
2112 # the /usr/bin/rsh and meanwhile return the execution status of the
2113 # last command.
2114 #
2115 # $1 usrname passing down to -l option of /usr/bin/rsh
2116 # $2 remote machine hostname
2117 # $3... command string
2118 #
2119
2120 function rsh_status
2121 {
2122 typeset ruser=$1
2123 typeset rhost=$2
2124 typeset -i ret=0
2125 typeset cmd_str=""
2126 typeset rsh_str=""
2127
2128 shift; shift
2129 cmd_str="$@"
2130
2131 err_file=/tmp/${rhost}.$$.err
2132 if ((${#ruser} == 0)); then
2133 rsh_str="$RSH -n"
2134 else
2135 rsh_str="$RSH -n -l $ruser"
2136 fi
2137
2138 $rsh_str $rhost /bin/ksh -c "'$cmd_str; \
2139 print -u 2 \"status=\$?\"'" \
2140 >/dev/null 2>$err_file
2141 ret=$?
2142 if (($ret != 0)); then
2143 $CAT $err_file
2144 $RM -f $std_file $err_file
2145 log_fail "$RSH itself failed with exit code $ret..."
2146 fi
2147
2148 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
2149 $CUT -d= -f2)
2150 (($ret != 0)) && $CAT $err_file >&2
2151
2152 $RM -f $err_file >/dev/null 2>&1
2153 return $ret
2154 }
2155
2156 #
2157 # Get the SUNWstc-fs-zfs package installation path in a remote host
2158 # $1 remote host name
2159 #
2160 function get_remote_pkgpath
2161 {
2162 typeset rhost=$1
2163 typeset pkgpath=""
2164
2165 pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
2166 $CUT -d: -f2")
2167
2168 $ECHO $pkgpath
2169 }
2170
2171 #/**
2172 # A function to find and locate free disks on a system or from given
2173 # disks as the parameter. It works by locating disks that are in use
2174 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2175 #
2176 # $@ given disks to find which are free, default is all disks in
2177 # the test system
2178 #
2179 # @return a string containing the list of available disks
2180 #*/
2181 function find_disks
2182 {
2183 # Trust provided list, no attempt is made to locate unused devices.
2184 if is_linux; then
2185 $ECHO "$@"
2186 return
2187 fi
2188
2189
2190 sfi=/tmp/swaplist.$$
2191 dmpi=/tmp/dumpdev.$$
2192 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2193
2194 $SWAP -l > $sfi
2195 $DUMPADM > $dmpi 2>/dev/null
2196
2197 # write an awk script that can process the output of format
2198 # to produce a list of disks we know about. Note that we have
2199 # to escape "$2" so that the shell doesn't interpret it while
2200 # we're creating the awk script.
2201 # -------------------
2202 $CAT > /tmp/find_disks.awk <<EOF
2203 #!/bin/nawk -f
2204 BEGIN { FS="."; }
2205
2206 /^Specify disk/{
2207 searchdisks=0;
2208 }
2209
2210 {
2211 if (searchdisks && \$2 !~ "^$"){
2212 split(\$2,arr," ");
2213 print arr[1];
2214 }
2215 }
2216
2217 /^AVAILABLE DISK SELECTIONS:/{
2218 searchdisks=1;
2219 }
2220 EOF
2221 #---------------------
2222
2223 $CHMOD 755 /tmp/find_disks.awk
2224 disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
2225 $RM /tmp/find_disks.awk
2226
2227 unused=""
2228 for disk in $disks; do
2229 # Check for mounted
2230 $GREP "${disk}[sp]" /etc/mnttab >/dev/null
2231 (($? == 0)) && continue
2232 # Check for swap
2233 $GREP "${disk}[sp]" $sfi >/dev/null
2234 (($? == 0)) && continue
2235 # check for dump device
2236 $GREP "${disk}[sp]" $dmpi >/dev/null
2237 (($? == 0)) && continue
2238 # check to see if this disk hasn't been explicitly excluded
2239 # by a user-set environment variable
2240 $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
2241 (($? == 0)) && continue
2242 unused_candidates="$unused_candidates $disk"
2243 done
2244 $RM $sfi
2245 $RM $dmpi
2246
2247 # now just check to see if those disks do actually exist
2248 # by looking for a device pointing to the first slice in
2249 # each case. limit the number to max_finddisksnum
2250 count=0
2251 for disk in $unused_candidates; do
2252 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2253 if [ $count -lt $max_finddisksnum ]; then
2254 unused="$unused $disk"
2255 # do not impose limit if $@ is provided
2256 [[ -z $@ ]] && ((count = count + 1))
2257 fi
2258 fi
2259 done
2260
2261 # finally, return our disk list
2262 $ECHO $unused
2263 }
2264
2265 #
2266 # Add specified user to specified group
2267 #
2268 # $1 group name
2269 # $2 user name
2270 # $3 base of the homedir (optional)
2271 #
2272 function add_user #<group_name> <user_name> <basedir>
2273 {
2274 typeset gname=$1
2275 typeset uname=$2
2276 typeset basedir=${3:-"/var/tmp"}
2277
2278 if ((${#gname} == 0 || ${#uname} == 0)); then
2279 log_fail "group name or user name are not defined."
2280 fi
2281
2282 log_must $USERADD -g $gname -d $basedir/$uname -m $uname
2283
2284 # Add new users to the same group and the command line utils.
2285 # This allows them to be run out of the original users home
2286 # directory as long as it permissioned to be group readable.
2287 if is_linux; then
2288 cmd_group=$(stat --format="%G" $ZFS)
2289 log_must $USERMOD -a -G $cmd_group $uname
2290 fi
2291
2292 return 0
2293 }
2294
2295 #
2296 # Delete the specified user.
2297 #
2298 # $1 login name
2299 # $2 base of the homedir (optional)
2300 #
2301 function del_user #<logname> <basedir>
2302 {
2303 typeset user=$1
2304 typeset basedir=${2:-"/var/tmp"}
2305
2306 if ((${#user} == 0)); then
2307 log_fail "login name is necessary."
2308 fi
2309
2310 if $ID $user > /dev/null 2>&1; then
2311 log_must $USERDEL $user
2312 fi
2313
2314 [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
2315
2316 return 0
2317 }
2318
2319 #
2320 # Select valid gid and create specified group.
2321 #
2322 # $1 group name
2323 #
2324 function add_group #<group_name>
2325 {
2326 typeset group=$1
2327
2328 if ((${#group} == 0)); then
2329 log_fail "group name is necessary."
2330 fi
2331
2332 # Assign 100 as the base gid, a larger value is selected for
2333 # Linux because for many distributions 1000 and under are reserved.
2334 if is_linux; then
2335 while true; do
2336 $GROUPADD $group > /dev/null 2>&1
2337 typeset -i ret=$?
2338 case $ret in
2339 0) return 0 ;;
2340 *) return 1 ;;
2341 esac
2342 done
2343 else
2344 typeset -i gid=100
2345
2346 while true; do
2347 $GROUPADD -g $gid $group > /dev/null 2>&1
2348 typeset -i ret=$?
2349 case $ret in
2350 0) return 0 ;;
2351 # The gid is not unique
2352 4) ((gid += 1)) ;;
2353 *) return 1 ;;
2354 esac
2355 done
2356 fi
2357 }
2358
2359 #
2360 # Delete the specified group.
2361 #
2362 # $1 group name
2363 #
2364 function del_group #<group_name>
2365 {
2366 typeset grp=$1
2367 if ((${#grp} == 0)); then
2368 log_fail "group name is necessary."
2369 fi
2370
2371 if is_linux; then
2372 $GETENT group $grp > /dev/null 2>&1
2373 typeset -i ret=$?
2374 case $ret in
2375 # Group does not exist.
2376 2) return 0 ;;
2377 # Name already exists as a group name
2378 0) log_must $GROUPDEL $grp ;;
2379 *) return 1 ;;
2380 esac
2381 else
2382 $GROUPMOD -n $grp $grp > /dev/null 2>&1
2383 typeset -i ret=$?
2384 case $ret in
2385 # Group does not exist.
2386 6) return 0 ;;
2387 # Name already exists as a group name
2388 9) log_must $GROUPDEL $grp ;;
2389 *) return 1 ;;
2390 esac
2391 fi
2392
2393 return 0
2394 }
2395
2396 #
2397 # This function will return true if it's safe to destroy the pool passed
2398 # as argument 1. It checks for pools based on zvols and files, and also
2399 # files contained in a pool that may have a different mountpoint.
2400 #
2401 function safe_to_destroy_pool { # $1 the pool name
2402
2403 typeset pool=""
2404 typeset DONT_DESTROY=""
2405
2406 # We check that by deleting the $1 pool, we're not
2407 # going to pull the rug out from other pools. Do this
2408 # by looking at all other pools, ensuring that they
2409 # aren't built from files or zvols contained in this pool.
2410
2411 for pool in $($ZPOOL list -H -o name)
2412 do
2413 ALTMOUNTPOOL=""
2414
2415 # this is a list of the top-level directories in each of the
2416 # files that make up the path to the files the pool is based on
2417 FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2418 $AWK '{print $1}')
2419
2420 # this is a list of the zvols that make up the pool
2421 ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "$ZVOL_DEVDIR/$1$" \
2422 | $AWK '{print $1}')
2423
2424 # also want to determine if it's a file-based pool using an
2425 # alternate mountpoint...
2426 POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2427 $GREP / | $AWK '{print $1}' | \
2428 $AWK -F/ '{print $2}' | $GREP -v "dev")
2429
2430 for pooldir in $POOL_FILE_DIRS
2431 do
2432 OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2433 $GREP "${pooldir}$" | $AWK '{print $1}')
2434
2435 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2436 done
2437
2438
2439 if [ ! -z "$ZVOLPOOL" ]
2440 then
2441 DONT_DESTROY="true"
2442 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2443 fi
2444
2445 if [ ! -z "$FILEPOOL" ]
2446 then
2447 DONT_DESTROY="true"
2448 log_note "Pool $pool is built from $FILEPOOL on $1"
2449 fi
2450
2451 if [ ! -z "$ALTMOUNTPOOL" ]
2452 then
2453 DONT_DESTROY="true"
2454 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2455 fi
2456 done
2457
2458 if [ -z "${DONT_DESTROY}" ]
2459 then
2460 return 0
2461 else
2462 log_note "Warning: it is not safe to destroy $1!"
2463 return 1
2464 fi
2465 }
2466
2467 #
2468 # Get the available ZFS compression options
2469 # $1 option type zfs_set|zfs_compress
2470 #
2471 function get_compress_opts
2472 {
2473 typeset COMPRESS_OPTS
2474 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2475 gzip-6 gzip-7 gzip-8 gzip-9"
2476
2477 if [[ $1 == "zfs_compress" ]] ; then
2478 COMPRESS_OPTS="on lzjb"
2479 elif [[ $1 == "zfs_set" ]] ; then
2480 COMPRESS_OPTS="on off lzjb"
2481 fi
2482 typeset valid_opts="$COMPRESS_OPTS"
2483 $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2484 if [[ $? -eq 0 ]]; then
2485 valid_opts="$valid_opts $GZIP_OPTS"
2486 fi
2487 $ECHO "$valid_opts"
2488 }
2489
2490 #
2491 # Verify zfs operation with -p option work as expected
2492 # $1 operation, value could be create, clone or rename
2493 # $2 dataset type, value could be fs or vol
2494 # $3 dataset name
2495 # $4 new dataset name
2496 #
2497 function verify_opt_p_ops
2498 {
2499 typeset ops=$1
2500 typeset datatype=$2
2501 typeset dataset=$3
2502 typeset newdataset=$4
2503
2504 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2505 log_fail "$datatype is not supported."
2506 fi
2507
2508 # check parameters accordingly
2509 case $ops in
2510 create)
2511 newdataset=$dataset
2512 dataset=""
2513 if [[ $datatype == "vol" ]]; then
2514 ops="create -V $VOLSIZE"
2515 fi
2516 ;;
2517 clone)
2518 if [[ -z $newdataset ]]; then
2519 log_fail "newdataset should not be empty" \
2520 "when ops is $ops."
2521 fi
2522 log_must datasetexists $dataset
2523 log_must snapexists $dataset
2524 ;;
2525 rename)
2526 if [[ -z $newdataset ]]; then
2527 log_fail "newdataset should not be empty" \
2528 "when ops is $ops."
2529 fi
2530 log_must datasetexists $dataset
2531 log_mustnot snapexists $dataset
2532 ;;
2533 *)
2534 log_fail "$ops is not supported."
2535 ;;
2536 esac
2537
2538 # make sure the upper level filesystem does not exist
2539 if datasetexists ${newdataset%/*} ; then
2540 log_must $ZFS destroy -rRf ${newdataset%/*}
2541 fi
2542
2543 # without -p option, operation will fail
2544 log_mustnot $ZFS $ops $dataset $newdataset
2545 log_mustnot datasetexists $newdataset ${newdataset%/*}
2546
2547 # with -p option, operation should succeed
2548 log_must $ZFS $ops -p $dataset $newdataset
2549 block_device_wait
2550
2551 if ! datasetexists $newdataset ; then
2552 log_fail "-p option does not work for $ops"
2553 fi
2554
2555 # when $ops is create or clone, redo the operation still return zero
2556 if [[ $ops != "rename" ]]; then
2557 log_must $ZFS $ops -p $dataset $newdataset
2558 fi
2559
2560 return 0
2561 }
2562
2563 #
2564 # Get configuration of pool
2565 # $1 pool name
2566 # $2 config name
2567 #
2568 function get_config
2569 {
2570 typeset pool=$1
2571 typeset config=$2
2572 typeset alt_root
2573
2574 if ! poolexists "$pool" ; then
2575 return 1
2576 fi
2577 alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
2578 if [[ $alt_root == "-" ]]; then
2579 value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
2580 '{print $2}')
2581 else
2582 value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
2583 '{print $2}')
2584 fi
2585 if [[ -n $value ]] ; then
2586 value=${value#'}
2587 value=${value%'}
2588 fi
2589 echo $value
2590
2591 return 0
2592 }
2593
2594 #
2595 # Privated function. Random select one of items from arguments.
2596 #
2597 # $1 count
2598 # $2-n string
2599 #
2600 function _random_get
2601 {
2602 typeset cnt=$1
2603 shift
2604
2605 typeset str="$@"
2606 typeset -i ind
2607 ((ind = RANDOM % cnt + 1))
2608
2609 typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2610 $ECHO $ret
2611 }
2612
2613 #
2614 # Random select one of item from arguments which include NONE string
2615 #
2616 function random_get_with_non
2617 {
2618 typeset -i cnt=$#
2619 ((cnt =+ 1))
2620
2621 _random_get "$cnt" "$@"
2622 }
2623
2624 #
2625 # Random select one of item from arguments which doesn't include NONE string
2626 #
2627 function random_get
2628 {
2629 _random_get "$#" "$@"
2630 }
2631
2632 #
2633 # Detect if the current system support slog
2634 #
2635 function verify_slog_support
2636 {
2637 typeset dir=/tmp/disk.$$
2638 typeset pool=foo.$$
2639 typeset vdev=$dir/a
2640 typeset sdev=$dir/b
2641
2642 $MKDIR -p $dir
2643 $MKFILE $MINVDEVSIZE $vdev $sdev
2644
2645 typeset -i ret=0
2646 if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2647 ret=1
2648 fi
2649 $RM -r $dir
2650
2651 return $ret
2652 }
2653
2654 #
2655 # The function will generate a dataset name with specific length
2656 # $1, the length of the name
2657 # $2, the base string to construct the name
2658 #
2659 function gen_dataset_name
2660 {
2661 typeset -i len=$1
2662 typeset basestr="$2"
2663 typeset -i baselen=${#basestr}
2664 typeset -i iter=0
2665 typeset l_name=""
2666
2667 if ((len % baselen == 0)); then
2668 ((iter = len / baselen))
2669 else
2670 ((iter = len / baselen + 1))
2671 fi
2672 while ((iter > 0)); do
2673 l_name="${l_name}$basestr"
2674
2675 ((iter -= 1))
2676 done
2677
2678 $ECHO $l_name
2679 }
2680
2681 #
2682 # Get cksum tuple of dataset
2683 # $1 dataset name
2684 #
2685 # sample zdb output:
2686 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2687 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2688 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2689 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2690 function datasetcksum
2691 {
2692 typeset cksum
2693 $SYNC
2694 cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2695 | $AWK -F= '{print $7}')
2696 $ECHO $cksum
2697 }
2698
2699 #
2700 # Get cksum of file
2701 # #1 file path
2702 #
2703 function checksum
2704 {
2705 typeset cksum
2706 cksum=$($CKSUM $1 | $AWK '{print $1}')
2707 $ECHO $cksum
2708 }
2709
2710 #
2711 # Get the given disk/slice state from the specific field of the pool
2712 #
2713 function get_device_state #pool disk field("", "spares","logs")
2714 {
2715 typeset pool=$1
2716 typeset disk=${2#$DEV_DSKDIR/}
2717 typeset field=${3:-$pool}
2718
2719 state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2720 $NAWK -v device=$disk -v pool=$pool -v field=$field \
2721 'BEGIN {startconfig=0; startfield=0; }
2722 /config:/ {startconfig=1}
2723 (startconfig==1) && ($1==field) {startfield=1; next;}
2724 (startfield==1) && ($1==device) {print $2; exit;}
2725 (startfield==1) &&
2726 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2727 echo $state
2728 }
2729
2730
2731 #
2732 # print the given directory filesystem type
2733 #
2734 # $1 directory name
2735 #
2736 function get_fstype
2737 {
2738 typeset dir=$1
2739
2740 if [[ -z $dir ]]; then
2741 log_fail "Usage: get_fstype <directory>"
2742 fi
2743
2744 #
2745 # $ df -n /
2746 # / : ufs
2747 #
2748 $DF -n $dir | $AWK '{print $3}'
2749 }
2750
2751 #
2752 # Given a disk, label it to VTOC regardless what label was on the disk
2753 # $1 disk
2754 #
2755 function labelvtoc
2756 {
2757 typeset disk=$1
2758 if [[ -z $disk ]]; then
2759 log_fail "The disk name is unspecified."
2760 fi
2761 typeset label_file=/var/tmp/labelvtoc.$$
2762 typeset arch=$($UNAME -p)
2763
2764 if is_linux; then
2765 log_note "Currently unsupported by the test framework"
2766 return 1
2767 fi
2768
2769 if [[ $arch == "i386" ]]; then
2770 $ECHO "label" > $label_file
2771 $ECHO "0" >> $label_file
2772 $ECHO "" >> $label_file
2773 $ECHO "q" >> $label_file
2774 $ECHO "q" >> $label_file
2775
2776 $FDISK -B $disk >/dev/null 2>&1
2777 # wait a while for fdisk finishes
2778 $SLEEP 60
2779 elif [[ $arch == "sparc" ]]; then
2780 $ECHO "label" > $label_file
2781 $ECHO "0" >> $label_file
2782 $ECHO "" >> $label_file
2783 $ECHO "" >> $label_file
2784 $ECHO "" >> $label_file
2785 $ECHO "q" >> $label_file
2786 else
2787 log_fail "unknown arch type"
2788 fi
2789
2790 $FORMAT -e -s -d $disk -f $label_file
2791 typeset -i ret_val=$?
2792 $RM -f $label_file
2793 #
2794 # wait the format to finish
2795 #
2796 $SLEEP 60
2797 if ((ret_val != 0)); then
2798 log_fail "unable to label $disk as VTOC."
2799 fi
2800
2801 return 0
2802 }
2803
2804 #
2805 # check if the system was installed as zfsroot or not
2806 # return: 0 ture, otherwise false
2807 #
2808 function is_zfsroot
2809 {
2810 $DF -n / | $GREP zfs > /dev/null 2>&1
2811 return $?
2812 }
2813
2814 #
2815 # get the root filesystem name if it's zfsroot system.
2816 #
2817 # return: root filesystem name
2818 function get_rootfs
2819 {
2820 typeset rootfs=""
2821 rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2822 /etc/mnttab)
2823 if [[ -z "$rootfs" ]]; then
2824 log_fail "Can not get rootfs"
2825 fi
2826 $ZFS list $rootfs > /dev/null 2>&1
2827 if (($? == 0)); then
2828 $ECHO $rootfs
2829 else
2830 log_fail "This is not a zfsroot system."
2831 fi
2832 }
2833
2834 #
2835 # get the rootfs's pool name
2836 # return:
2837 # rootpool name
2838 #
2839 function get_rootpool
2840 {
2841 typeset rootfs=""
2842 typeset rootpool=""
2843 rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2844 /etc/mnttab)
2845 if [[ -z "$rootfs" ]]; then
2846 log_fail "Can not get rootpool"
2847 fi
2848 $ZFS list $rootfs > /dev/null 2>&1
2849 if (($? == 0)); then
2850 rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2851 $ECHO $rootpool
2852 else
2853 log_fail "This is not a zfsroot system."
2854 fi
2855 }
2856
2857 #
2858 # Check if the given device is physical device
2859 #
2860 function is_physical_device #device
2861 {
2862 typeset device=${1#$DEV_DSKDIR}
2863 device=${device#$DEV_RDSKDIR}
2864
2865 if is_linux; then
2866 [[ -b "$DEV_DSKDIR/$device" ]] && \
2867 [[ -f /sys/module/loop/parameters/max_part ]]
2868 return $?
2869 else
2870 $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2871 return $?
2872 fi
2873 }
2874
2875 #
2876 # Check if the given device is a real device (ie SCSI device)
2877 #
2878 function is_real_device #disk
2879 {
2880 typeset disk=$1
2881 [[ -z $disk ]] && log_fail "No argument for disk given."
2882
2883 if is_linux; then
2884 ($LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP disk > /dev/null) \
2885 2>/dev/null
2886 return $?
2887 fi
2888 }
2889
2890 #
2891 # Check if the given device is a loop device
2892 #
2893 function is_loop_device #disk
2894 {
2895 typeset disk=$1
2896 [[ -z $disk ]] && log_fail "No argument for disk given."
2897
2898 if is_linux; then
2899 ($LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP loop > /dev/null) \
2900 2>/dev/null
2901 return $?
2902 fi
2903 }
2904
2905 #
2906 # Check if the given device is a multipath device and if there is a sybolic
2907 # link to a device mapper and to a disk
2908 # Currently no support for dm devices alone without multipath
2909 #
2910 function is_mpath_device #disk
2911 {
2912 typeset disk=$1
2913 [[ -z $disk ]] && log_fail "No argument for disk given."
2914
2915 if is_linux; then
2916 ($LSBLK $DEV_MPATHDIR/$disk -o TYPE | $EGREP mpath >/dev/null) \
2917 2>/dev/null
2918 if (($? == 0)); then
2919 $READLINK $DEV_MPATHDIR/$disk > /dev/null 2>&1
2920 return $?
2921 else
2922 return $?
2923 fi
2924 fi
2925 }
2926
2927 # Set the slice prefix for disk partitioning depending
2928 # on whether the device is a real, multipath, or loop device.
2929 # Currently all disks have to be of the same type, so only
2930 # checks first disk to determine slice prefix.
2931 #
2932 function set_slice_prefix
2933 {
2934 typeset disk
2935 typeset -i i=0
2936
2937 if is_linux; then
2938 while (( i < $DISK_ARRAY_NUM )); do
2939 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2940 if ( is_mpath_device $disk ) && [[ -z $($ECHO $disk \
2941 | awk 'substr($1,18,1) ~ /^[[:digit:]]+$/') ]] || \
2942 ( is_real_device $disk ); then
2943 export SLICE_PREFIX=""
2944 return 0
2945 elif ( is_mpath_device $disk || is_loop_device \
2946 $disk ); then
2947 export SLICE_PREFIX="p"
2948 return 0
2949 else
2950 log_fail "$disk not supported for partitioning."
2951 fi
2952 (( i = i + 1))
2953 done
2954 fi
2955 }
2956
2957 #
2958 # Set the directory path of the listed devices in $DISK_ARRAY_NUM
2959 # Currently all disks have to be of the same type, so only
2960 # checks first disk to determine device directory
2961 # default = /dev (linux)
2962 # real disk = /dev (linux)
2963 # multipath device = /dev/mapper (linux)
2964 #
2965 function set_device_dir
2966 {
2967 typeset disk
2968 typeset -i i=0
2969
2970 if is_linux; then
2971 while (( i < $DISK_ARRAY_NUM )); do
2972 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2973 if is_mpath_device $disk; then
2974 export DEV_DSKDIR=$DEV_MPATHDIR
2975 return 0
2976 else
2977 export DEV_DSKDIR=$DEV_RDSKDIR
2978 return 0
2979 fi
2980 (( i = i + 1))
2981 done
2982 else
2983 export DEV_DSKDIR=$DEV_RDSKDIR
2984 fi
2985 }
2986
2987 #
2988 # Get the directory path of given device
2989 #
2990 function get_device_dir #device
2991 {
2992 typeset device=$1
2993
2994 if ! $(is_physical_device $device) ; then
2995 if [[ $device != "/" ]]; then
2996 device=${device%/*}
2997 fi
2998 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2999 device="$DEV_DSKDIR"
3000 fi
3001 $ECHO $device
3002 else
3003 $ECHO "$DEV_DSKDIR"
3004 fi
3005 }
3006
3007 #
3008 # Get persistent name for given disk
3009 #
3010 function get_persistent_disk_name #device
3011 {
3012 typeset device=$1
3013 typeset dev_id
3014
3015 if is_linux; then
3016 if is_real_device $device; then
3017 dev_id="$($UDEVADM info -q all -n $DEV_DSKDIR/$device \
3018 | $EGREP disk/by-id | $NAWK '{print $2; exit}' \
3019 | $NAWK -F / '{print $3}')"
3020 $ECHO $dev_id
3021 elif is_mpath_device $device; then
3022 dev_id="$($UDEVADM info -q all -n $DEV_DSKDIR/$device \
3023 | $EGREP disk/by-id/dm-uuid \
3024 | $NAWK '{print $2; exit}' \
3025 | $NAWK -F / '{print $3}')"
3026 $ECHO $dev_id
3027 else
3028 $ECHO $device
3029 fi
3030 else
3031 $ECHO $device
3032 fi
3033 }
3034
3035 #
3036 # Load scsi_debug module with specified parameters
3037 #
3038 function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
3039 {
3040 typeset devsize=$1
3041 typeset hosts=$2
3042 typeset tgts=$3
3043 typeset luns=$4
3044
3045 [[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
3046 [[ -z $luns ]] && log_fail "Arguments invalid or missing"
3047
3048 if is_linux; then
3049 $MODLOAD -n scsi_debug
3050 if (($? != 0)); then
3051 log_unsupported "Platform does not have scsi_debug"
3052 "module"
3053 fi
3054 $LSMOD | $EGREP scsi_debug > /dev/zero
3055 if (($? == 0)); then
3056 log_fail "scsi_debug module already installed"
3057 else
3058 log_must $MODLOAD scsi_debug dev_size_mb=$devsize \
3059 add_host=$hosts num_tgts=$tgts max_luns=$luns
3060 block_device_wait
3061 $LSSCSI | $EGREP scsi_debug > /dev/null
3062 if (($? == 1)); then
3063 log_fail "scsi_debug module install failed"
3064 fi
3065 fi
3066 fi
3067 }
3068
3069 #
3070 # Get the package name
3071 #
3072 function get_package_name
3073 {
3074 typeset dirpath=${1:-$STC_NAME}
3075
3076 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3077 }
3078
3079 #
3080 # Get the word numbers from a string separated by white space
3081 #
3082 function get_word_count
3083 {
3084 $ECHO $1 | $WC -w
3085 }
3086
3087 #
3088 # To verify if the require numbers of disks is given
3089 #
3090 function verify_disk_count
3091 {
3092 typeset -i min=${2:-1}
3093
3094 typeset -i count=$(get_word_count "$1")
3095
3096 if ((count < min)); then
3097 log_untested "A minimum of $min disks is required to run." \
3098 " You specified $count disk(s)"
3099 fi
3100 }
3101
3102 function ds_is_volume
3103 {
3104 typeset type=$(get_prop type $1)
3105 [[ $type = "volume" ]] && return 0
3106 return 1
3107 }
3108
3109 function ds_is_filesystem
3110 {
3111 typeset type=$(get_prop type $1)
3112 [[ $type = "filesystem" ]] && return 0
3113 return 1
3114 }
3115
3116 function ds_is_snapshot
3117 {
3118 typeset type=$(get_prop type $1)
3119 [[ $type = "snapshot" ]] && return 0
3120 return 1
3121 }
3122
3123 #
3124 # Check if Trusted Extensions are installed and enabled
3125 #
3126 function is_te_enabled
3127 {
3128 $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
3129 if (($? != 0)); then
3130 return 1
3131 else
3132 return 0
3133 fi
3134 }
3135
3136 # Utility function to determine if a system has multiple cpus.
3137 function is_mp
3138 {
3139 if is_linux; then
3140 (($($NPROC) > 1))
3141 else
3142 (($($PSRINFO | $WC -l) > 1))
3143 fi
3144
3145 return $?
3146 }
3147
3148 function get_cpu_freq
3149 {
3150 if is_linux; then
3151 lscpu | $AWK '/CPU MHz/ { print $3 }'
3152 else
3153 $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}'
3154 fi
3155 }
3156
3157 # Run the given command as the user provided.
3158 function user_run
3159 {
3160 typeset user=$1
3161 shift
3162
3163 log_note "user:$user $@"
3164 eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
3165 return $?
3166 }
3167
3168 #
3169 # Check if the pool contains the specified vdevs
3170 #
3171 # $1 pool
3172 # $2..n <vdev> ...
3173 #
3174 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3175 # vdevs is not in the pool, and 2 if pool name is missing.
3176 #
3177 function vdevs_in_pool
3178 {
3179 typeset pool=$1
3180 typeset vdev
3181
3182 if [[ -z $pool ]]; then
3183 log_note "Missing pool name."
3184 return 2
3185 fi
3186
3187 shift
3188
3189 typeset tmpfile=$($MKTEMP)
3190 $ZPOOL list -Hv "$pool" >$tmpfile
3191 for vdev in $@; do
3192 $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3193 [[ $? -ne 0 ]] && return 1
3194 done
3195
3196 $RM -f $tmpfile
3197
3198 return 0;
3199 }
3200
3201 function get_max
3202 {
3203 typeset -l i max=$1
3204 shift
3205
3206 for i in "$@"; do
3207 max=$(echo $((max > i ? max : i)))
3208 done
3209
3210 echo $max
3211 }
3212
3213 function get_min
3214 {
3215 typeset -l i min=$1
3216 shift
3217
3218 for i in "$@"; do
3219 min=$(echo $((min < i ? min : i)))
3220 done
3221
3222 echo $min
3223 }
3224
3225 #
3226 # Synchronize all the data in pool
3227 #
3228 # $1 pool name
3229 #
3230 function sync_pool #pool
3231 {
3232 typeset pool=${1:-$TESTPOOL}
3233
3234 log_must $SYNC
3235 log_must $SLEEP 2
3236 # Flush all the pool data.
3237 typeset -i ret
3238 $ZPOOL scrub $pool >/dev/null 2>&1
3239 ret=$?
3240 (( $ret != 0 )) && \
3241 log_fail "$ZPOOL scrub $pool failed."
3242
3243 while ! is_pool_scrubbed $pool; do
3244 if is_pool_resilvered $pool ; then
3245 log_fail "$pool should not be resilver completed."
3246 fi
3247 log_must $SLEEP 2
3248 done
3249 }
3250
3251 #
3252 # Wait for zpool 'freeing' property drops to zero.
3253 #
3254 # $1 pool name
3255 #
3256 function wait_freeing #pool
3257 {
3258 typeset pool=${1:-$TESTPOOL}
3259 while true; do
3260 [[ "0" == "$($ZPOOL list -Ho freeing $pool)" ]] && break
3261 log_must $SLEEP 1
3262 done
3263 }
3264
3265 #
3266 # Check if ZED is currently running, if not start ZED.
3267 #
3268 function zed_start
3269 {
3270 if is_linux; then
3271 # ZEDLET_DIR=/var/tmp/zed
3272 if [[ ! -d $ZEDLET_DIR ]]; then
3273 log_must $MKDIR $ZEDLET_DIR
3274 fi
3275
3276 # Verify the ZED is not already running.
3277 $PGREP -x zed > /dev/null
3278 if (($? == 0)); then
3279 log_fail "ZED already running"
3280 fi
3281
3282 log_must $CP ${ZEDLETDIR}/all-syslog.sh $ZEDLET_DIR
3283
3284 log_note "Starting ZED"
3285 # run ZED in the background and redirect foreground logging
3286 # output to zedlog
3287 log_must eval "$ZED -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
3288 "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
3289 fi
3290 }
3291
3292 #
3293 # Kill ZED process
3294 #
3295 function zed_stop
3296 {
3297 if is_linux; then
3298 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3299 zedpid=$($CAT ${ZEDLET_DIR}/zed.pid)
3300 log_must $KILL $zedpid
3301 fi
3302 log_must $RM -f ${ZEDLET_DIR}/all-syslog.sh
3303 log_must $RM -f ${ZEDLET_DIR}/zed.pid
3304 log_must $RM -f ${ZEDLET_DIR}/zedlog
3305 log_must $RM -f ${ZEDLET_DIR}/state
3306 log_must $RMDIR $ZEDLET_DIR
3307
3308 fi
3309 }