]> git.proxmox.com Git - mirror_zfs.git/blame - tests/zfs-tests/include/libtest.shlib
Accept raidz and mirror with similar redundancy
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
CommitLineData
6bb24f4d
BB
1#!/bin/ksh -p
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25# Use is subject to license terms.
6bb24f4d 26# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
8db2dc32 27# Copyright 2016 Nexenta Systems, Inc.
a454868b 28# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
6bb24f4d
BB
29#
30
31. ${STF_TOOLS}/include/logapi.shlib
32
6bb24f4d
BB
33# Determine if this is a Linux test system
34#
35# Return 0 if platform Linux, 1 if otherwise
36
37function is_linux
38{
39 if [[ $($UNAME -o) == "GNU/Linux" ]]; then
40 return 0
41 else
42 return 1
43 fi
44}
45
e676a196
BB
46# Determine if this is a 32-bit system
47#
48# Return 0 if platform is 32-bit, 1 if otherwise
49
50function is_32bit
51{
52 if [[ $(getconf LONG_BIT) == "32" ]]; then
53 return 0
54 else
55 return 1
56 fi
57}
58
c6ced726
BB
59# Determine if kmemleak is enabled
60#
61# Return 0 if kmemleak is enabled, 1 if otherwise
62
63function is_kmemleak
64{
65 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
66 return 0
67 else
68 return 1
69 fi
70}
71
6bb24f4d
BB
72# Determine whether a dataset is mounted
73#
74# $1 dataset name
75# $2 filesystem type; optional - defaulted to zfs
76#
77# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
78
79function ismounted
80{
81 typeset fstype=$2
82 [[ -z $fstype ]] && fstype=zfs
83 typeset out dir name ret
84
85 case $fstype in
86 zfs)
87 if [[ "$1" == "/"* ]] ; then
88 for out in $($ZFS mount | $AWK '{print $2}'); do
89 [[ $1 == $out ]] && return 0
90 done
91 else
92 for out in $($ZFS mount | $AWK '{print $1}'); do
93 [[ $1 == $out ]] && return 0
94 done
95 fi
96 ;;
97 ufs|nfs)
98 out=$($DF -F $fstype $1 2>/dev/null)
99 ret=$?
100 (($ret != 0)) && return $ret
101
102 dir=${out%%\(*}
103 dir=${dir%% *}
104 name=${out##*\(}
105 name=${name%%\)*}
106 name=${name%% *}
107
108 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
109 ;;
110 ext2)
111 out=$($DF -t $fstype $1 2>/dev/null)
112 return $?
113 ;;
114 zvol)
115 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
116 link=$(readlink -f $ZVOL_DEVDIR/$1)
117 [[ -n "$link" ]] && \
118 $MOUNT | $GREP -q "^$link" && \
119 return 0
120 fi
121 ;;
122 esac
123
124 return 1
125}
126
127# Return 0 if a dataset is mounted; 1 otherwise
128#
129# $1 dataset name
130# $2 filesystem type; optional - defaulted to zfs
131
132function mounted
133{
134 ismounted $1 $2
135 (($? == 0)) && return 0
136 return 1
137}
138
139# Return 0 if a dataset is unmounted; 1 otherwise
140#
141# $1 dataset name
142# $2 filesystem type; optional - defaulted to zfs
143
144function unmounted
145{
146 ismounted $1 $2
147 (($? == 1)) && return 0
148 return 1
149}
150
151# split line on ","
152#
153# $1 - line to split
154
155function splitline
156{
157 $ECHO $1 | $SED "s/,/ /g"
158}
159
160function default_setup
161{
162 default_setup_noexit "$@"
163
164 log_pass
165}
166
167#
168# Given a list of disks, setup storage pools and datasets.
169#
170function default_setup_noexit
171{
172 typeset disklist=$1
173 typeset container=$2
174 typeset volume=$3
3c67d83a 175 log_note begin default_setup_noexit
6bb24f4d
BB
176
177 if is_global_zone; then
178 if poolexists $TESTPOOL ; then
179 destroy_pool $TESTPOOL
180 fi
181 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
3c67d83a 182 log_note creating pool $TESTPOOL $disklist
6bb24f4d
BB
183 log_must $ZPOOL create -f $TESTPOOL $disklist
184 else
185 reexport_pool
186 fi
187
188 $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
189 $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
190
191 log_must $ZFS create $TESTPOOL/$TESTFS
192 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
193
194 if [[ -n $container ]]; then
195 $RM -rf $TESTDIR1 || \
196 log_unresolved Could not remove $TESTDIR1
197 $MKDIR -p $TESTDIR1 || \
198 log_unresolved Could not create $TESTDIR1
199
200 log_must $ZFS create $TESTPOOL/$TESTCTR
201 log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
202 log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
203 log_must $ZFS set mountpoint=$TESTDIR1 \
204 $TESTPOOL/$TESTCTR/$TESTFS1
205 fi
206
207 if [[ -n $volume ]]; then
208 if is_global_zone ; then
209 log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
210 block_device_wait
211 else
212 log_must $ZFS create $TESTPOOL/$TESTVOL
213 fi
214 fi
215}
216
217#
218# Given a list of disks, setup a storage pool, file system and
219# a container.
220#
221function default_container_setup
222{
223 typeset disklist=$1
224
225 default_setup "$disklist" "true"
226}
227
228#
229# Given a list of disks, setup a storage pool,file system
230# and a volume.
231#
232function default_volume_setup
233{
234 typeset disklist=$1
235
236 default_setup "$disklist" "" "true"
237}
238
239#
240# Given a list of disks, setup a storage pool,file system,
241# a container and a volume.
242#
243function default_container_volume_setup
244{
245 typeset disklist=$1
246
247 default_setup "$disklist" "true" "true"
248}
249
250#
251# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
252# filesystem
253#
254# $1 Existing filesystem or volume name. Default, $TESTFS
255# $2 snapshot name. Default, $TESTSNAP
256#
257function create_snapshot
258{
259 typeset fs_vol=${1:-$TESTFS}
260 typeset snap=${2:-$TESTSNAP}
261
262 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
263 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
264
265 if snapexists $fs_vol@$snap; then
266 log_fail "$fs_vol@$snap already exists."
267 fi
268 datasetexists $fs_vol || \
269 log_fail "$fs_vol must exist."
270
271 log_must $ZFS snapshot $fs_vol@$snap
272}
273
274#
275# Create a clone from a snapshot, default clone name is $TESTCLONE.
276#
277# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
278# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
279#
280function create_clone # snapshot clone
281{
282 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
283 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
284
285 [[ -z $snap ]] && \
286 log_fail "Snapshot name is undefined."
287 [[ -z $clone ]] && \
288 log_fail "Clone name is undefined."
289
290 log_must $ZFS clone $snap $clone
291}
292
aeacdefe
GM
293#
294# Create a bookmark of the given snapshot. Defaultly create a bookmark on
295# filesystem.
296#
297# $1 Existing filesystem or volume name. Default, $TESTFS
298# $2 Existing snapshot name. Default, $TESTSNAP
299# $3 bookmark name. Default, $TESTBKMARK
300#
301function create_bookmark
302{
303 typeset fs_vol=${1:-$TESTFS}
304 typeset snap=${2:-$TESTSNAP}
305 typeset bkmark=${3:-$TESTBKMARK}
306
307 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
308 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
309 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
310
311 if bkmarkexists $fs_vol#$bkmark; then
312 log_fail "$fs_vol#$bkmark already exists."
313 fi
314 datasetexists $fs_vol || \
315 log_fail "$fs_vol must exist."
316 snapexists $fs_vol@$snap || \
317 log_fail "$fs_vol@$snap must exist."
318
319 log_must $ZFS bookmark $fs_vol@$snap $fs_vol#$bkmark
320}
321
6bb24f4d
BB
322function default_mirror_setup
323{
324 default_mirror_setup_noexit $1 $2 $3
325
326 log_pass
327}
328
329#
330# Given a pair of disks, set up a storage pool and dataset for the mirror
331# @parameters: $1 the primary side of the mirror
332# $2 the secondary side of the mirror
333# @uses: ZPOOL ZFS TESTPOOL TESTFS
334function default_mirror_setup_noexit
335{
336 readonly func="default_mirror_setup_noexit"
337 typeset primary=$1
338 typeset secondary=$2
339
340 [[ -z $primary ]] && \
341 log_fail "$func: No parameters passed"
342 [[ -z $secondary ]] && \
343 log_fail "$func: No secondary partition passed"
344 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
345 log_must $ZPOOL create -f $TESTPOOL mirror $@
346 log_must $ZFS create $TESTPOOL/$TESTFS
347 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
348}
349
350#
351# create a number of mirrors.
352# We create a number($1) of 2 way mirrors using the pairs of disks named
353# on the command line. These mirrors are *not* mounted
354# @parameters: $1 the number of mirrors to create
355# $... the devices to use to create the mirrors on
356# @uses: ZPOOL ZFS TESTPOOL
357function setup_mirrors
358{
359 typeset -i nmirrors=$1
360
361 shift
362 while ((nmirrors > 0)); do
363 log_must test -n "$1" -a -n "$2"
364 [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
365 log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
366 shift 2
367 ((nmirrors = nmirrors - 1))
368 done
369}
370
371#
372# create a number of raidz pools.
373# We create a number($1) of 2 raidz pools using the pairs of disks named
374# on the command line. These pools are *not* mounted
375# @parameters: $1 the number of pools to create
376# $... the devices to use to create the pools on
377# @uses: ZPOOL ZFS TESTPOOL
378function setup_raidzs
379{
380 typeset -i nraidzs=$1
381
382 shift
383 while ((nraidzs > 0)); do
384 log_must test -n "$1" -a -n "$2"
385 [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
386 log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
387 shift 2
388 ((nraidzs = nraidzs - 1))
389 done
390}
391
392#
393# Destroy the configured testpool mirrors.
394# the mirrors are of the form ${TESTPOOL}{number}
395# @uses: ZPOOL ZFS TESTPOOL
396function destroy_mirrors
397{
398 default_cleanup_noexit
399
400 log_pass
401}
402
403#
404# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
405# $1 the list of disks
406#
407function default_raidz_setup
408{
409 typeset disklist="$*"
410 disks=(${disklist[*]})
411
412 if [[ ${#disks[*]} -lt 2 ]]; then
413 log_fail "A raid-z requires a minimum of two disks."
414 fi
415
416 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
417 log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
418 log_must $ZFS create $TESTPOOL/$TESTFS
419 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
420
421 log_pass
422}
423
424#
425# Common function used to cleanup storage pools and datasets.
426#
427# Invoked at the start of the test suite to ensure the system
428# is in a known state, and also at the end of each set of
429# sub-tests to ensure errors from one set of tests doesn't
430# impact the execution of the next set.
431
432function default_cleanup
433{
434 default_cleanup_noexit
435
436 log_pass
437}
438
439function default_cleanup_noexit
440{
441 typeset exclude=""
442 typeset pool=""
443 #
444 # Destroying the pool will also destroy any
445 # filesystems it contains.
446 #
447 if is_global_zone; then
448 $ZFS unmount -a > /dev/null 2>&1
449 [[ -z "$KEEP" ]] && KEEP="rpool"
450 exclude=`eval $ECHO \"'(${KEEP})'\"`
451 ALL_POOLS=$($ZPOOL list -H -o name \
544b8053 452 | $GREP -v "$NO_POOLS" | $EGREP -vw "$exclude")
6bb24f4d
BB
453 # Here, we loop through the pools we're allowed to
454 # destroy, only destroying them if it's safe to do
455 # so.
456 while [ ! -z ${ALL_POOLS} ]
457 do
458 for pool in ${ALL_POOLS}
459 do
460 if safe_to_destroy_pool $pool ;
461 then
462 destroy_pool $pool
463 fi
464 ALL_POOLS=$($ZPOOL list -H -o name \
465 | $GREP -v "$NO_POOLS" \
466 | $EGREP -v "$exclude")
467 done
468 done
469
470 $ZFS mount -a
471 else
472 typeset fs=""
473 for fs in $($ZFS list -H -o name \
474 | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
475 datasetexists $fs && \
476 log_must $ZFS destroy -Rf $fs
477 done
478
479 # Need cleanup here to avoid garbage dir left.
480 for fs in $($ZFS list -H -o name); do
481 [[ $fs == /$ZONE_POOL ]] && continue
482 [[ -d $fs ]] && log_must $RM -rf $fs/*
483 done
484
485 #
486 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
487 # the default value
488 #
489 for fs in $($ZFS list -H -o name); do
490 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
491 log_must $ZFS set reservation=none $fs
492 log_must $ZFS set recordsize=128K $fs
493 log_must $ZFS set mountpoint=/$fs $fs
494 typeset enc=""
495 enc=$(get_prop encryption $fs)
496 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
497 [[ "$enc" == "off" ]]; then
498 log_must $ZFS set checksum=on $fs
499 fi
500 log_must $ZFS set compression=off $fs
501 log_must $ZFS set atime=on $fs
502 log_must $ZFS set devices=off $fs
503 log_must $ZFS set exec=on $fs
504 log_must $ZFS set setuid=on $fs
505 log_must $ZFS set readonly=off $fs
506 log_must $ZFS set snapdir=hidden $fs
507 log_must $ZFS set aclmode=groupmask $fs
508 log_must $ZFS set aclinherit=secure $fs
509 fi
510 done
511 fi
512
513 [[ -d $TESTDIR ]] && \
514 log_must $RM -rf $TESTDIR
7050a65d
SV
515
516 disk1=${DISKS%% *}
517 if is_mpath_device $disk1; then
518 delete_partitions
519 fi
6bb24f4d
BB
520}
521
522
523#
524# Common function used to cleanup storage pools, file systems
525# and containers.
526#
527function default_container_cleanup
528{
529 if ! is_global_zone; then
530 reexport_pool
531 fi
532
533 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
534 [[ $? -eq 0 ]] && \
535 log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
536
537 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
538 log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
539
540 datasetexists $TESTPOOL/$TESTCTR && \
541 log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
542
543 [[ -e $TESTDIR1 ]] && \
544 log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
545
546 default_cleanup
547}
548
549#
550# Common function used to cleanup snapshot of file system or volume. Default to
551# delete the file system's snapshot
552#
553# $1 snapshot name
554#
555function destroy_snapshot
556{
557 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
558
559 if ! snapexists $snap; then
560 log_fail "'$snap' does not existed."
561 fi
562
563 #
564 # For the sake of the value which come from 'get_prop' is not equal
565 # to the really mountpoint when the snapshot is unmounted. So, firstly
566 # check and make sure this snapshot's been mounted in current system.
567 #
568 typeset mtpt=""
569 if ismounted $snap; then
570 mtpt=$(get_prop mountpoint $snap)
571 (($? != 0)) && \
572 log_fail "get_prop mountpoint $snap failed."
573 fi
574
575 log_must $ZFS destroy $snap
576 [[ $mtpt != "" && -d $mtpt ]] && \
577 log_must $RM -rf $mtpt
578}
579
580#
581# Common function used to cleanup clone.
582#
583# $1 clone name
584#
585function destroy_clone
586{
587 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
588
589 if ! datasetexists $clone; then
590 log_fail "'$clone' does not existed."
591 fi
592
593 # With the same reason in destroy_snapshot
594 typeset mtpt=""
595 if ismounted $clone; then
596 mtpt=$(get_prop mountpoint $clone)
597 (($? != 0)) && \
598 log_fail "get_prop mountpoint $clone failed."
599 fi
600
601 log_must $ZFS destroy $clone
602 [[ $mtpt != "" && -d $mtpt ]] && \
603 log_must $RM -rf $mtpt
604}
605
aeacdefe
GM
606#
607# Common function used to cleanup bookmark of file system or volume. Default
608# to delete the file system's bookmark.
609#
610# $1 bookmark name
611#
612function destroy_bookmark
613{
614 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
615
616 if ! bkmarkexists $bkmark; then
617 log_fail "'$bkmarkp' does not existed."
618 fi
619
620 log_must $ZFS destroy $bkmark
621}
622
6bb24f4d
BB
623# Return 0 if a snapshot exists; $? otherwise
624#
625# $1 - snapshot name
626
627function snapexists
628{
629 $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
630 return $?
631}
632
aeacdefe
GM
633#
634# Return 0 if a bookmark exists; $? otherwise
635#
636# $1 - bookmark name
637#
638function bkmarkexists
639{
640 $ZFS list -H -t bookmark "$1" > /dev/null 2>&1
641 return $?
642}
643
6bb24f4d
BB
644#
645# Set a property to a certain value on a dataset.
646# Sets a property of the dataset to the value as passed in.
647# @param:
648# $1 dataset who's property is being set
649# $2 property to set
650# $3 value to set property to
651# @return:
652# 0 if the property could be set.
653# non-zero otherwise.
654# @use: ZFS
655#
656function dataset_setprop
657{
658 typeset fn=dataset_setprop
659
660 if (($# < 3)); then
661 log_note "$fn: Insufficient parameters (need 3, had $#)"
662 return 1
663 fi
664 typeset output=
665 output=$($ZFS set $2=$3 $1 2>&1)
666 typeset rv=$?
667 if ((rv != 0)); then
668 log_note "Setting property on $1 failed."
669 log_note "property $2=$3"
670 log_note "Return Code: $rv"
671 log_note "Output: $output"
672 return $rv
673 fi
674 return 0
675}
676
677#
678# Assign suite defined dataset properties.
679# This function is used to apply the suite's defined default set of
680# properties to a dataset.
681# @parameters: $1 dataset to use
682# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
683# @returns:
684# 0 if the dataset has been altered.
685# 1 if no pool name was passed in.
686# 2 if the dataset could not be found.
687# 3 if the dataset could not have it's properties set.
688#
689function dataset_set_defaultproperties
690{
691 typeset dataset="$1"
692
693 [[ -z $dataset ]] && return 1
694
695 typeset confset=
696 typeset -i found=0
697 for confset in $($ZFS list); do
698 if [[ $dataset = $confset ]]; then
699 found=1
700 break
701 fi
702 done
703 [[ $found -eq 0 ]] && return 2
704 if [[ -n $COMPRESSION_PROP ]]; then
705 dataset_setprop $dataset compression $COMPRESSION_PROP || \
706 return 3
707 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
708 fi
709 if [[ -n $CHECKSUM_PROP ]]; then
710 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
711 return 3
712 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
713 fi
714 return 0
715}
716
717#
718# Check a numeric assertion
719# @parameter: $@ the assertion to check
720# @output: big loud notice if assertion failed
721# @use: log_fail
722#
723function assert
724{
725 (($@)) || log_fail "$@"
726}
727
728#
729# Function to format partition size of a disk
730# Given a disk cxtxdx reduces all partitions
731# to 0 size
732#
733function zero_partitions #<whole_disk_name>
734{
735 typeset diskname=$1
736 typeset i
737
738 if is_linux; then
739 log_must $FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt
740 else
741 for i in 0 1 3 4 5 6 7
742 do
743 set_partition $i "" 0mb $diskname
744 done
745 fi
746}
747
748#
749# Given a slice, size and disk, this function
750# formats the slice to the specified size.
751# Size should be specified with units as per
752# the `format` command requirements eg. 100mb 3gb
753#
754# NOTE: This entire interface is problematic for the Linux parted utilty
755# which requires the end of the partition to be specified. It would be
756# best to retire this interface and replace it with something more flexible.
757# At the moment a best effort is made.
758#
759function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
760{
761 typeset -i slicenum=$1
762 typeset start=$2
763 typeset size=$3
764 typeset disk=$4
765 [[ -z $slicenum || -z $size || -z $disk ]] && \
766 log_fail "The slice, size or disk name is unspecified."
767
768 if is_linux; then
769 typeset size_mb=${size%%[mMgG]}
770
771 size_mb=${size_mb%%[mMgG][bB]}
772 if [[ ${size:1:1} == 'g' ]]; then
773 ((size_mb = size_mb * 1024))
774 fi
775
776 # Create GPT partition table when setting slice 0 or
777 # when the device doesn't already contain a GPT label.
778 $FORMAT $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
779 typeset ret_val=$?
780 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
781 log_must $FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt
782 fi
783
784 # When no start is given align on the first cylinder.
785 if [[ -z "$start" ]]; then
786 start=1
787 fi
788
789 # Determine the cylinder size for the device and using
790 # that calculate the end offset in cylinders.
791 typeset -i cly_size_kb=0
792 cly_size_kb=$($FORMAT -m $DEV_DSKDIR/$disk -s -- \
793 unit cyl print | $HEAD -3 | $TAIL -1 | \
794 $AWK -F '[:k.]' '{print $4}')
795 ((end = (size_mb * 1024 / cly_size_kb) + start))
796
797 log_must $FORMAT $DEV_DSKDIR/$disk -s -- \
798 mkpart part$slicenum ${start}cyl ${end}cyl
799
800 $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
801 block_device_wait
802 else
803 typeset format_file=/var/tmp/format_in.$$
804
805 $ECHO "partition" >$format_file
806 $ECHO "$slicenum" >> $format_file
807 $ECHO "" >> $format_file
808 $ECHO "" >> $format_file
809 $ECHO "$start" >> $format_file
810 $ECHO "$size" >> $format_file
811 $ECHO "label" >> $format_file
812 $ECHO "" >> $format_file
813 $ECHO "q" >> $format_file
814 $ECHO "q" >> $format_file
815
816 $FORMAT -e -s -d $disk -f $format_file
817 fi
818 typeset ret_val=$?
819 $RM -f $format_file
820 [[ $ret_val -ne 0 ]] && \
821 log_fail "Unable to format $disk slice $slicenum to $size"
822 return 0
823}
824
7050a65d
SV
825#
826# Delete all partitions on all disks - this is specifically for the use of multipath
827# devices which currently can only be used in the test suite as raw/un-partitioned
828# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
829#
830function delete_partitions
831{
832 typeset -i j=1
833
834 if [[ -z $DISK_ARRAY_NUM ]]; then
835 DISK_ARRAY_NUM=$($ECHO ${DISKS} | $NAWK '{print NF}')
836 fi
837 if [[ -z $DISKSARRAY ]]; then
838 DISKSARRAY=$DISKS
839 fi
840
841 if is_linux; then
842 if (( $DISK_ARRAY_NUM == 1 )); then
843 while ((j < MAX_PARTITIONS)); do
844 $FORMAT $DEV_DSKDIR/$DISK -s rm $j > /dev/null 2>&1
845 if (( $? == 1 )); then
846 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
847 if (( $? == 1 )); then
848 log_note "Partitions for $DISK should be deleted"
849 else
850 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
851 fi
852 return 0
853 else
854 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
855 if (( $? == 0 )); then
856 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
857 fi
858 fi
859 ((j = j+1))
860 done
861 else
862 for disk in `$ECHO $DISKSARRAY`; do
863 while ((j < MAX_PARTITIONS)); do
864 $FORMAT $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
865 if (( $? == 1 )); then
866 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
867 if (( $? == 1 )); then
868 log_note "Partitions for $disk should be deleted"
869 else
870 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
871 fi
872 j=7
873 else
874 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
875 if (( $? == 0 )); then
876 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
877 fi
878 fi
879 ((j = j+1))
880 done
881 j=1
882 done
883 fi
884 fi
885 return 0
886}
887
6bb24f4d
BB
888#
889# Get the end cyl of the given slice
890#
891function get_endslice #<disk> <slice>
892{
893 typeset disk=$1
894 typeset slice=$2
895 if [[ -z $disk || -z $slice ]] ; then
896 log_fail "The disk name or slice number is unspecified."
897 fi
898
899 if is_linux; then
900 endcyl=$($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \
901 $GREP "part${slice}" | \
902 $AWK '{print $3}' | \
903 $SED 's,cyl,,')
904 ((endcyl = (endcyl + 1)))
905 else
906 disk=${disk#/dev/dsk/}
907 disk=${disk#/dev/rdsk/}
908 disk=${disk%s*}
909
910 typeset -i ratio=0
911 ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
912 $GREP "sectors\/cylinder" | \
913 $AWK '{print $2}')
914
915 if ((ratio == 0)); then
916 return
917 fi
918
919 typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
920 $NAWK -v token="$slice" '{if ($1==token) print $6}')
921
922 ((endcyl = (endcyl + 1) / ratio))
923 fi
924
925 echo $endcyl
926}
927
928
929#
930# Given a size,disk and total slice number, this function formats the
931# disk slices from 0 to the total slice number with the same specified
932# size.
933#
934function partition_disk #<slice_size> <whole_disk_name> <total_slices>
935{
936 typeset -i i=0
937 typeset slice_size=$1
938 typeset disk_name=$2
939 typeset total_slices=$3
940 typeset cyl
941
942 zero_partitions $disk_name
943 while ((i < $total_slices)); do
944 if ! is_linux; then
945 if ((i == 2)); then
946 ((i = i + 1))
947 continue
948 fi
949 fi
950 set_partition $i "$cyl" $slice_size $disk_name
951 cyl=$(get_endslice $disk_name $i)
952 ((i = i+1))
953 done
954}
955
956#
957# This function continues to write to a filenum number of files into dirnum
958# number of directories until either $FILE_WRITE returns an error or the
959# maximum number of files per directory have been written.
960#
961# Usage:
962# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
963#
964# Return value: 0 on success
965# non 0 on error
966#
967# Where :
968# destdir: is the directory where everything is to be created under
969# dirnum: the maximum number of subdirectories to use, -1 no limit
970# filenum: the maximum number of files per subdirectory
971# bytes: number of bytes to write
972# num_writes: numer of types to write out bytes
4e33ba4c 973# data: the data that will be written
6bb24f4d
BB
974#
975# E.g.
976# file_fs /testdir 20 25 1024 256 0
977#
978# Note: bytes * num_writes equals the size of the testfile
979#
980function fill_fs # destdir dirnum filenum bytes num_writes data
981{
982 typeset destdir=${1:-$TESTDIR}
983 typeset -i dirnum=${2:-50}
984 typeset -i filenum=${3:-50}
985 typeset -i bytes=${4:-8192}
986 typeset -i num_writes=${5:-10240}
987 typeset -i data=${6:-0}
988
989 typeset -i odirnum=1
990 typeset -i idirnum=0
991 typeset -i fn=0
992 typeset -i retval=0
993
994 log_must $MKDIR -p $destdir/$idirnum
995 while (($odirnum > 0)); do
996 if ((dirnum >= 0 && idirnum >= dirnum)); then
997 odirnum=0
998 break
999 fi
1000 $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1001 -b $bytes -c $num_writes -d $data
1002 retval=$?
1003 if (($retval != 0)); then
1004 odirnum=0
1005 break
1006 fi
1007 if (($fn >= $filenum)); then
1008 fn=0
1009 ((idirnum = idirnum + 1))
1010 log_must $MKDIR -p $destdir/$idirnum
1011 else
1012 ((fn = fn + 1))
1013 fi
1014 done
1015 return $retval
1016}
1017
1018#
1019# Simple function to get the specified property. If unable to
1020# get the property then exits.
1021#
1022# Note property is in 'parsable' format (-p)
1023#
1024function get_prop # property dataset
1025{
1026 typeset prop_val
1027 typeset prop=$1
1028 typeset dataset=$2
1029
1030 prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
1031 if [[ $? -ne 0 ]]; then
1032 log_note "Unable to get $prop property for dataset " \
1033 "$dataset"
1034 return 1
1035 fi
1036
582cc014 1037 $ECHO "$prop_val"
6bb24f4d
BB
1038 return 0
1039}
1040
1041#
1042# Simple function to get the specified property of pool. If unable to
1043# get the property then exits.
1044#
a454868b
OF
1045# Note property is in 'parsable' format (-p)
1046#
6bb24f4d
BB
1047function get_pool_prop # property pool
1048{
1049 typeset prop_val
1050 typeset prop=$1
1051 typeset pool=$2
1052
1053 if poolexists $pool ; then
a454868b 1054 prop_val=$($ZPOOL get -pH $prop $pool 2>/dev/null | $TAIL -1 | \
6bb24f4d
BB
1055 $AWK '{print $3}')
1056 if [[ $? -ne 0 ]]; then
1057 log_note "Unable to get $prop property for pool " \
1058 "$pool"
1059 return 1
1060 fi
1061 else
1062 log_note "Pool $pool not exists."
1063 return 1
1064 fi
1065
582cc014 1066 $ECHO "$prop_val"
6bb24f4d
BB
1067 return 0
1068}
1069
1070# Return 0 if a pool exists; $? otherwise
1071#
1072# $1 - pool name
1073
1074function poolexists
1075{
1076 typeset pool=$1
1077
1078 if [[ -z $pool ]]; then
1079 log_note "No pool name given."
1080 return 1
1081 fi
1082
1083 $ZPOOL get name "$pool" > /dev/null 2>&1
1084 return $?
1085}
1086
1087# Return 0 if all the specified datasets exist; $? otherwise
1088#
1089# $1-n dataset name
1090function datasetexists
1091{
1092 if (($# == 0)); then
1093 log_note "No dataset name given."
1094 return 1
1095 fi
1096
1097 while (($# > 0)); do
1098 $ZFS get name $1 > /dev/null 2>&1 || \
1099 return $?
1100 shift
1101 done
1102
1103 return 0
1104}
1105
1106# return 0 if none of the specified datasets exists, otherwise return 1.
1107#
1108# $1-n dataset name
1109function datasetnonexists
1110{
1111 if (($# == 0)); then
1112 log_note "No dataset name given."
1113 return 1
1114 fi
1115
1116 while (($# > 0)); do
1117 $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1118 && return 1
1119 shift
1120 done
1121
1122 return 0
1123}
1124
1125#
2f71caf2 1126# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
6bb24f4d
BB
1127#
1128# Returns 0 if shared, 1 otherwise.
1129#
1130function is_shared
1131{
1132 typeset fs=$1
1133 typeset mtpt
1134
6bb24f4d
BB
1135 if [[ $fs != "/"* ]] ; then
1136 if datasetnonexists "$fs" ; then
1137 return 1
1138 else
1139 mtpt=$(get_prop mountpoint "$fs")
1140 case $mtpt in
1141 none|legacy|-) return 1
1142 ;;
1143 *) fs=$mtpt
1144 ;;
1145 esac
1146 fi
1147 fi
1148
2f71caf2 1149 if is_linux; then
1150 for mtpt in `$SHARE | $AWK '{print $1}'` ; do
1151 if [[ $mtpt == $fs ]] ; then
1152 return 0
1153 fi
1154 done
1155 return 1
1156 fi
1157
6bb24f4d
BB
1158 for mtpt in `$SHARE | $AWK '{print $2}'` ; do
1159 if [[ $mtpt == $fs ]] ; then
1160 return 0
1161 fi
1162 done
1163
1164 typeset stat=$($SVCS -H -o STA nfs/server:default)
1165 if [[ $stat != "ON" ]]; then
1166 log_note "Current nfs/server status: $stat"
1167 fi
1168
1169 return 1
1170}
1171
1172#
2f71caf2 1173# Given a dataset name determine if it is shared via SMB.
6bb24f4d 1174#
2f71caf2 1175# Returns 0 if shared, 1 otherwise.
6bb24f4d 1176#
2f71caf2 1177function is_shared_smb
6bb24f4d
BB
1178{
1179 typeset fs=$1
2f71caf2 1180 typeset mtpt
1181
1182 if datasetnonexists "$fs" ; then
1183 return 1
1184 else
1185 fs=$(echo $fs | sed 's@/@_@g')
1186 fi
6bb24f4d
BB
1187
1188 if is_linux; then
2f71caf2 1189 for mtpt in `$NET usershare list | $AWK '{print $1}'` ; do
1190 if [[ $mtpt == $fs ]] ; then
1191 return 0
1192 fi
1193 done
1194 return 1
1195 else
6bb24f4d
BB
1196 log_unsupported "Currently unsupported by the test framework"
1197 return 1
1198 fi
2f71caf2 1199}
1200
1201#
1202# Given a mountpoint, determine if it is not shared via NFS.
1203#
1204# Returns 0 if not shared, 1 otherwise.
1205#
1206function not_shared
1207{
1208 typeset fs=$1
6bb24f4d
BB
1209
1210 is_shared $fs
1211 if (($? == 0)); then
1212 return 1
1213 fi
1214
1215 return 0
1216}
1217
1218#
2f71caf2 1219# Given a dataset determine if it is not shared via SMB.
6bb24f4d 1220#
2f71caf2 1221# Returns 0 if not shared, 1 otherwise.
1222#
1223function not_shared_smb
6bb24f4d
BB
1224{
1225 typeset fs=$1
1226
2f71caf2 1227 is_shared_smb $fs
1228 if (($? == 0)); then
6bb24f4d
BB
1229 return 1
1230 fi
1231
2f71caf2 1232 return 0
1233}
1234
1235#
1236# Helper function to unshare a mountpoint.
1237#
1238function unshare_fs #fs
1239{
1240 typeset fs=$1
1241
1242 is_shared $fs || is_shared_smb $fs
6bb24f4d
BB
1243 if (($? == 0)); then
1244 log_must $ZFS unshare $fs
1245 fi
1246
1247 return 0
1248}
1249
2f71caf2 1250#
1251# Helper function to share a NFS mountpoint.
1252#
1253function share_nfs #fs
1254{
1255 typeset fs=$1
1256
1257 if is_linux; then
1258 is_shared $fs
1259 if (($? != 0)); then
1260 log_must $SHARE "*:$fs"
1261 fi
1262 else
1263 is_shared $fs
1264 if (($? != 0)); then
1265 log_must $SHARE -F nfs $fs
1266 fi
1267 fi
1268
1269 return 0
1270}
1271
1272#
1273# Helper function to unshare a NFS mountpoint.
1274#
1275function unshare_nfs #fs
1276{
1277 typeset fs=$1
1278
1279 if is_linux; then
1280 is_shared $fs
1281 if (($? == 0)); then
1282 log_must $UNSHARE -u "*:$fs"
1283 fi
1284 else
1285 is_shared $fs
1286 if (($? == 0)); then
1287 log_must $UNSHARE -F nfs $fs
1288 fi
1289 fi
1290
1291 return 0
1292}
1293
1294#
1295# Helper function to show NFS shares.
1296#
1297function showshares_nfs
1298{
1299 if is_linux; then
1300 $SHARE -v
1301 else
1302 $SHARE -F nfs
1303 fi
1304
1305 return 0
1306}
1307
1308#
1309# Helper function to show SMB shares.
1310#
1311function showshares_smb
1312{
1313 if is_linux; then
1314 $NET usershare list
1315 else
1316 $SHARE -F smb
1317 fi
1318
1319 return 0
1320}
1321
6bb24f4d
BB
1322#
1323# Check NFS server status and trigger it online.
1324#
1325function setup_nfs_server
1326{
1327 # Cannot share directory in non-global zone.
1328 #
1329 if ! is_global_zone; then
1330 log_note "Cannot trigger NFS server by sharing in LZ."
1331 return
1332 fi
1333
1334 if is_linux; then
2f71caf2 1335 log_note "NFS server must started prior to running test framework."
6bb24f4d
BB
1336 return
1337 fi
1338
1339 typeset nfs_fmri="svc:/network/nfs/server:default"
1340 if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
1341 #
1342 # Only really sharing operation can enable NFS server
1343 # to online permanently.
1344 #
1345 typeset dummy=/tmp/dummy
1346
1347 if [[ -d $dummy ]]; then
1348 log_must $RM -rf $dummy
1349 fi
1350
1351 log_must $MKDIR $dummy
1352 log_must $SHARE $dummy
1353
1354 #
1355 # Waiting for fmri's status to be the final status.
1356 # Otherwise, in transition, an asterisk (*) is appended for
1357 # instances, unshare will reverse status to 'DIS' again.
1358 #
1359 # Waiting for 1's at least.
1360 #
1361 log_must $SLEEP 1
1362 timeout=10
1363 while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1364 do
1365 log_must $SLEEP 1
1366
1367 ((timeout -= 1))
1368 done
1369
1370 log_must $UNSHARE $dummy
1371 log_must $RM -rf $dummy
1372 fi
1373
1374 log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1375}
1376
1377#
1378# To verify whether calling process is in global zone
1379#
1380# Return 0 if in global zone, 1 in non-global zone
1381#
1382function is_global_zone
1383{
1384 typeset cur_zone=$($ZONENAME 2>/dev/null)
1385 if [[ $cur_zone != "global" ]]; then
1386 return 1
1387 fi
1388 return 0
1389}
1390
1391#
1392# Verify whether test is permitted to run from
1393# global zone, local zone, or both
1394#
1395# $1 zone limit, could be "global", "local", or "both"(no limit)
1396#
1397# Return 0 if permitted, otherwise exit with log_unsupported
1398#
1399function verify_runnable # zone limit
1400{
1401 typeset limit=$1
1402
1403 [[ -z $limit ]] && return 0
1404
1405 if is_global_zone ; then
1406 case $limit in
1407 global|both)
1408 ;;
1409 local) log_unsupported "Test is unable to run from "\
1410 "global zone."
1411 ;;
1412 *) log_note "Warning: unknown limit $limit - " \
1413 "use both."
1414 ;;
1415 esac
1416 else
1417 case $limit in
1418 local|both)
1419 ;;
1420 global) log_unsupported "Test is unable to run from "\
1421 "local zone."
1422 ;;
1423 *) log_note "Warning: unknown limit $limit - " \
1424 "use both."
1425 ;;
1426 esac
1427
1428 reexport_pool
1429 fi
1430
1431 return 0
1432}
1433
1434# Return 0 if create successfully or the pool exists; $? otherwise
1435# Note: In local zones, this function should return 0 silently.
1436#
1437# $1 - pool name
1438# $2-n - [keyword] devs_list
1439
1440function create_pool #pool devs_list
1441{
1442 typeset pool=${1%%/*}
1443
1444 shift
1445
1446 if [[ -z $pool ]]; then
1447 log_note "Missing pool name."
1448 return 1
1449 fi
1450
1451 if poolexists $pool ; then
1452 destroy_pool $pool
1453 fi
1454
1455 if is_global_zone ; then
1456 [[ -d /$pool ]] && $RM -rf /$pool
1457 log_must $ZPOOL create -f $pool $@
1458 fi
1459
1460 return 0
1461}
1462
1463# Return 0 if destroy successfully or the pool exists; $? otherwise
1464# Note: In local zones, this function should return 0 silently.
1465#
1466# $1 - pool name
1467# Destroy pool with the given parameters.
1468
1469function destroy_pool #pool
1470{
1471 typeset pool=${1%%/*}
1472 typeset mtpt
1473
1474 if [[ -z $pool ]]; then
1475 log_note "No pool name given."
1476 return 1
1477 fi
1478
1479 if is_global_zone ; then
1480 if poolexists "$pool" ; then
1481 mtpt=$(get_prop mountpoint "$pool")
1482
1483 # At times, syseventd activity can cause attempts to
1484 # destroy a pool to fail with EBUSY. We retry a few
1485 # times allowing failures before requiring the destroy
1486 # to succeed.
1487 typeset -i wait_time=10 ret=1 count=0
1488 must=""
1489 while [[ $ret -ne 0 ]]; do
1490 $must $ZPOOL destroy -f $pool
1491 ret=$?
1492 [[ $ret -eq 0 ]] && break
1493 log_note "zpool destroy failed with $ret"
1494 [[ count++ -ge 7 ]] && must=log_must
1495 $SLEEP $wait_time
1496 done
1497
1498 [[ -d $mtpt ]] && \
1499 log_must $RM -rf $mtpt
1500 else
1501 log_note "Pool does not exist. ($pool)"
1502 return 1
1503 fi
1504 fi
1505
1506 return 0
1507}
1508
1509#
1510# Firstly, create a pool with 5 datasets. Then, create a single zone and
1511# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1512# and a zvol device to the zone.
1513#
1514# $1 zone name
1515# $2 zone root directory prefix
1516# $3 zone ip
1517#
1518function zfs_zones_setup #zone_name zone_root zone_ip
1519{
1520 typeset zone_name=${1:-$(hostname)-z}
1521 typeset zone_root=${2:-"/zone_root"}
1522 typeset zone_ip=${3:-"10.1.1.10"}
1523 typeset prefix_ctr=$ZONE_CTR
1524 typeset pool_name=$ZONE_POOL
1525 typeset -i cntctr=5
1526 typeset -i i=0
1527
1528 # Create pool and 5 container within it
1529 #
1530 [[ -d /$pool_name ]] && $RM -rf /$pool_name
1531 log_must $ZPOOL create -f $pool_name $DISKS
1532 while ((i < cntctr)); do
1533 log_must $ZFS create $pool_name/$prefix_ctr$i
1534 ((i += 1))
1535 done
1536
1537 # create a zvol
1538 log_must $ZFS create -V 1g $pool_name/zone_zvol
1539 block_device_wait
1540
1541 #
1542 # If current system support slog, add slog device for pool
1543 #
1544 if verify_slog_support ; then
1545 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
d7958b4c 1546 log_must $MKFILE $MINVDEVSIZE $sdevs
6bb24f4d
BB
1547 log_must $ZPOOL add $pool_name log mirror $sdevs
1548 fi
1549
1550 # this isn't supported just yet.
1551 # Create a filesystem. In order to add this to
1552 # the zone, it must have it's mountpoint set to 'legacy'
1553 # log_must $ZFS create $pool_name/zfs_filesystem
1554 # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1555
1556 [[ -d $zone_root ]] && \
1557 log_must $RM -rf $zone_root/$zone_name
1558 [[ ! -d $zone_root ]] && \
1559 log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1560
1561 # Create zone configure file and configure the zone
1562 #
1563 typeset zone_conf=/tmp/zone_conf.$$
1564 $ECHO "create" > $zone_conf
1565 $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1566 $ECHO "set autoboot=true" >> $zone_conf
1567 i=0
1568 while ((i < cntctr)); do
1569 $ECHO "add dataset" >> $zone_conf
1570 $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1571 $zone_conf
1572 $ECHO "end" >> $zone_conf
1573 ((i += 1))
1574 done
1575
1576 # add our zvol to the zone
1577 $ECHO "add device" >> $zone_conf
1578 $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf
1579 $ECHO "end" >> $zone_conf
1580
1581 # add a corresponding zvol rdsk to the zone
1582 $ECHO "add device" >> $zone_conf
1583 $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1584 $ECHO "end" >> $zone_conf
1585
1586 # once it's supported, we'll add our filesystem to the zone
1587 # $ECHO "add fs" >> $zone_conf
1588 # $ECHO "set type=zfs" >> $zone_conf
1589 # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1590 # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1591 # $ECHO "end" >> $zone_conf
1592
1593 $ECHO "verify" >> $zone_conf
1594 $ECHO "commit" >> $zone_conf
1595 log_must $ZONECFG -z $zone_name -f $zone_conf
1596 log_must $RM -f $zone_conf
1597
1598 # Install the zone
1599 $ZONEADM -z $zone_name install
1600 if (($? == 0)); then
1601 log_note "SUCCESS: $ZONEADM -z $zone_name install"
1602 else
1603 log_fail "FAIL: $ZONEADM -z $zone_name install"
1604 fi
1605
1606 # Install sysidcfg file
1607 #
1608 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1609 $ECHO "system_locale=C" > $sysidcfg
1610 $ECHO "terminal=dtterm" >> $sysidcfg
1611 $ECHO "network_interface=primary {" >> $sysidcfg
1612 $ECHO "hostname=$zone_name" >> $sysidcfg
1613 $ECHO "}" >> $sysidcfg
1614 $ECHO "name_service=NONE" >> $sysidcfg
1615 $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
1616 $ECHO "security_policy=NONE" >> $sysidcfg
1617 $ECHO "timezone=US/Eastern" >> $sysidcfg
1618
1619 # Boot this zone
1620 log_must $ZONEADM -z $zone_name boot
1621}
1622
1623#
1624# Reexport TESTPOOL & TESTPOOL(1-4)
1625#
1626function reexport_pool
1627{
1628 typeset -i cntctr=5
1629 typeset -i i=0
1630
1631 while ((i < cntctr)); do
1632 if ((i == 0)); then
1633 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1634 if ! ismounted $TESTPOOL; then
1635 log_must $ZFS mount $TESTPOOL
1636 fi
1637 else
1638 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1639 if eval ! ismounted \$TESTPOOL$i; then
1640 log_must eval $ZFS mount \$TESTPOOL$i
1641 fi
1642 fi
1643 ((i += 1))
1644 done
1645}
1646
1647#
ec0e24c2 1648# Verify a given disk or pool state
6bb24f4d
BB
1649#
1650# Return 0 is pool/disk matches expected state, 1 otherwise
1651#
ec0e24c2 1652function check_state # pool disk state{online,offline,degraded}
6bb24f4d
BB
1653{
1654 typeset pool=$1
1655 typeset disk=${2#$DEV_DSKDIR/}
1656 typeset state=$3
1657
ec0e24c2
SV
1658 [[ -z $pool ]] || [[ -z $state ]] \
1659 && log_fail "Arguments invalid or missing"
1660
1661 if [[ -z $disk ]]; then
1662 #check pool state only
1663 $ZPOOL get -H -o value health $pool \
1664 | grep -i "$state" > /dev/null 2>&1
1665 else
1666 $ZPOOL status -v $pool | grep "$disk" \
1667 | grep -i "$state" > /dev/null 2>&1
1668 fi
6bb24f4d
BB
1669
1670 return $?
1671}
1672
ec0e24c2
SV
1673#
1674# Cause a scan of all scsi host adapters by default
1675#
1676# $1 optional host number
1677#
1678function scan_scsi_hosts
1679{
1680 typeset hostnum=${1}
1681
1682 if [[ -z $hostnum ]]; then
1683 for host in /sys/class/scsi_host/host*; do
1684 echo '- - -' > $host/scan
1685 done
1686 else
1687 echo "/sys/class/scsi_host/host$hostnum/scan"
1688 echo '- - -' > "/sys/class/scsi_host/host$hostnum/scan"
1689 fi
1690}
1691#
1692# Wait for newly created block devices to have their minors created.
1693#
1694function block_device_wait
1695{
1696 if is_linux; then
1697 $UDEVADM trigger
1698 $UDEVADM settle
1699 fi
1700}
1701
1702#
1703# Online or offline a disk on the system
1704#
1705# First checks state of disk. Test will fail if disk is not properly onlined
1706# or offlined. Online is a full rescan of SCSI disks by echoing to every
1707# host entry.
1708#
1709function on_off_disk # disk state{online,offline} host
1710{
1711 typeset disk=$1
1712 typeset state=$2
1713 typeset host=$3
1714
1715 [[ -z $disk ]] || [[ -z $state ]] && \
1716 log_fail "Arguments invalid or missing"
1717
1718 if is_linux; then
1719 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
1720 dm_name="$($READLINK $DEV_DSKDIR/$disk \
1721 | $NAWK -F / '{print $2}')"
1722 slave="$($LS /sys/block/${dm_name}/slaves \
1723 | $NAWK '{print $1}')"
1724 while [[ -n $slave ]]; do
1725 #check if disk is online
1726 $LSSCSI | $EGREP $slave > /dev/null
1727 if (($? == 0)); then
1728 slave_dir="/sys/block/${dm_name}"
1729 slave_dir+="/slaves/${slave}/device"
1730 ss="${slave_dir}/state"
1731 sd="${slave_dir}/delete"
1732 log_must eval "$ECHO 'offline' > ${ss}"
1733 log_must eval "$ECHO '1' > ${sd}"
1734 $LSSCSI | $EGREP $slave > /dev/null
1735 if (($? == 0)); then
1736 log_fail "Offlining" \
1737 "$disk failed"
1738 fi
1739 fi
1740 slave="$($LS /sys/block/$dm_name/slaves \
1741 2>/dev/null | $NAWK '{print $1}')"
1742 done
1743 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1744 #check if disk is online
1745 $LSSCSI | $EGREP $disk > /dev/null
1746 if (($? == 0)); then
1747 dev_state="/sys/block/$disk/device/state"
1748 dev_delete="/sys/block/$disk/device/delete"
1749 log_must eval "$ECHO 'offline' > ${dev_state}"
1750 log_must eval "$ECHO '1' > ${dev_delete}"
1751 $LSSCSI | $EGREP $disk > /dev/null
1752 if (($? == 0)); then
1753 log_fail "Offlining $disk" \
1754 "failed"
1755 fi
1756 else
1757 log_note "$disk is already offline"
1758 fi
1759 elif [[ $state == "online" ]]; then
1760 #force a full rescan
1761 log_must scan_scsi_hosts $host
1762 block_device_wait
1763 if is_mpath_device $disk; then
1764 dm_name="$($READLINK $DEV_DSKDIR/$disk \
1765 | $NAWK -F / '{print $2}')"
1766 slave="$($LS /sys/block/$dm_name/slaves \
1767 | $NAWK '{print $1}')"
1768 $LSSCSI | $EGREP $slave > /dev/null
1769 if (($? != 0)); then
1770 log_fail "Onlining $disk failed"
1771 fi
1772 elif is_real_device $disk; then
1773 $LSSCSI | $EGREP $disk > /dev/null
1774 if (($? != 0)); then
1775 log_fail "Onlining $disk failed"
1776 fi
1777 else
1778 log_fail "$disk is not a real dev"
1779 fi
1780 else
1781 log_fail "$disk failed to $state"
1782 fi
1783 fi
1784}
1785
6bb24f4d
BB
1786#
1787# Get the mountpoint of snapshot
1788# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1789# as its mountpoint
1790#
1791function snapshot_mountpoint
1792{
1793 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1794
1795 if [[ $dataset != *@* ]]; then
1796 log_fail "Error name of snapshot '$dataset'."
1797 fi
1798
1799 typeset fs=${dataset%@*}
1800 typeset snap=${dataset#*@}
1801
1802 if [[ -z $fs || -z $snap ]]; then
1803 log_fail "Error name of snapshot '$dataset'."
1804 fi
1805
1806 $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1807}
1808
1809#
1810# Given a pool and file system, this function will verify the file system
1811# using the zdb internal tool. Note that the pool is exported and imported
1812# to ensure it has consistent state.
1813#
1814function verify_filesys # pool filesystem dir
1815{
1816 typeset pool="$1"
1817 typeset filesys="$2"
1818 typeset zdbout="/tmp/zdbout.$$"
1819
1820 shift
1821 shift
1822 typeset dirs=$@
1823 typeset search_path=""
1824
1825 log_note "Calling $ZDB to verify filesystem '$filesys'"
1826 $ZFS unmount -a > /dev/null 2>&1
1827 log_must $ZPOOL export $pool
1828
1829 if [[ -n $dirs ]] ; then
1830 for dir in $dirs ; do
1831 search_path="$search_path -d $dir"
1832 done
1833 fi
1834
1835 log_must $ZPOOL import $search_path $pool
1836
1837 $ZDB -cudi $filesys > $zdbout 2>&1
1838 if [[ $? != 0 ]]; then
1839 log_note "Output: $ZDB -cudi $filesys"
1840 $CAT $zdbout
1841 log_fail "$ZDB detected errors with: '$filesys'"
1842 fi
1843
1844 log_must $ZFS mount -a
1845 log_must $RM -rf $zdbout
1846}
1847
1848#
1849# Given a pool, and this function list all disks in the pool
1850#
1851function get_disklist # pool
1852{
1853 typeset disklist=""
1854
1855 disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
1856 $GREP -v "\-\-\-\-\-" | \
1857 $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1858
1859 $ECHO $disklist
1860}
1861
3c67d83a
TH
1862#
1863# Given a pool, and this function list all disks in the pool with their full
1864# path (like "/dev/sda" instead of "sda").
1865#
1866function get_disklist_fullpath # pool
1867{
1868 args="-P $1"
1869 get_disklist $args
1870}
1871
1872
1873
6bb24f4d
BB
1874# /**
1875# This function kills a given list of processes after a time period. We use
1876# this in the stress tests instead of STF_TIMEOUT so that we can have processes
1877# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1878# would be listed as FAIL, which we don't want : we're happy with stress tests
1879# running for a certain amount of time, then finishing.
1880#
1881# @param $1 the time in seconds after which we should terminate these processes
1882# @param $2..$n the processes we wish to terminate.
1883# */
1884function stress_timeout
1885{
1886 typeset -i TIMEOUT=$1
1887 shift
1888 typeset cpids="$@"
1889
1890 log_note "Waiting for child processes($cpids). " \
1891 "It could last dozens of minutes, please be patient ..."
1892 log_must $SLEEP $TIMEOUT
1893
1894 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1895 typeset pid
1896 for pid in $cpids; do
1897 $PS -p $pid > /dev/null 2>&1
1898 if (($? == 0)); then
1899 log_must $KILL -USR1 $pid
1900 fi
1901 done
1902}
1903
1904#
1905# Verify a given hotspare disk is inuse or avail
1906#
1907# Return 0 is pool/disk matches expected state, 1 otherwise
1908#
1909function check_hotspare_state # pool disk state{inuse,avail}
1910{
1911 typeset pool=$1
1912 typeset disk=${2#$DEV_DSKDIR/}
1913 typeset state=$3
1914
1915 cur_state=$(get_device_state $pool $disk "spares")
1916
1917 if [[ $state != ${cur_state} ]]; then
1918 return 1
1919 fi
1920 return 0
1921}
1922
1923#
1924# Verify a given slog disk is inuse or avail
1925#
1926# Return 0 is pool/disk matches expected state, 1 otherwise
1927#
1928function check_slog_state # pool disk state{online,offline,unavail}
1929{
1930 typeset pool=$1
1931 typeset disk=${2#$DEV_DSKDIR/}
1932 typeset state=$3
1933
1934 cur_state=$(get_device_state $pool $disk "logs")
1935
1936 if [[ $state != ${cur_state} ]]; then
1937 return 1
1938 fi
1939 return 0
1940}
1941
1942#
1943# Verify a given vdev disk is inuse or avail
1944#
1945# Return 0 is pool/disk matches expected state, 1 otherwise
1946#
1947function check_vdev_state # pool disk state{online,offline,unavail}
1948{
1949 typeset pool=$1
1950 typeset disk=${2#$/DEV_DSKDIR/}
1951 typeset state=$3
1952
1953 cur_state=$(get_device_state $pool $disk)
1954
1955 if [[ $state != ${cur_state} ]]; then
1956 return 1
1957 fi
1958 return 0
1959}
1960
1961#
1962# Check the output of 'zpool status -v <pool>',
1963# and to see if the content of <token> contain the <keyword> specified.
1964#
1965# Return 0 is contain, 1 otherwise
1966#
1967function check_pool_status # pool token keyword
1968{
1969 typeset pool=$1
1970 typeset token=$2
1971 typeset keyword=$3
1972
1973 $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
1974 ($1==token) {print $0}' \
1975 | $GREP -i "$keyword" > /dev/null 2>&1
1976
1977 return $?
1978}
1979
1980#
1981# These 5 following functions are instance of check_pool_status()
1982# is_pool_resilvering - to check if the pool is resilver in progress
1983# is_pool_resilvered - to check if the pool is resilver completed
1984# is_pool_scrubbing - to check if the pool is scrub in progress
1985# is_pool_scrubbed - to check if the pool is scrub completed
1986# is_pool_scrub_stopped - to check if the pool is scrub stopped
1987#
1988function is_pool_resilvering #pool
1989{
1990 check_pool_status "$1" "scan" "resilver in progress since "
1991 return $?
1992}
1993
1994function is_pool_resilvered #pool
1995{
1996 check_pool_status "$1" "scan" "resilvered "
1997 return $?
1998}
1999
2000function is_pool_scrubbing #pool
2001{
2002 check_pool_status "$1" "scan" "scrub in progress since "
2003 return $?
2004}
2005
2006function is_pool_scrubbed #pool
2007{
2008 check_pool_status "$1" "scan" "scrub repaired"
2009 return $?
2010}
2011
2012function is_pool_scrub_stopped #pool
2013{
2014 check_pool_status "$1" "scan" "scrub canceled"
2015 return $?
2016}
2017
2018#
4e33ba4c 2019# Use create_pool()/destroy_pool() to clean up the information in
6bb24f4d
BB
2020# in the given disk to avoid slice overlapping.
2021#
2022function cleanup_devices #vdevs
2023{
2024 typeset pool="foopool$$"
2025
2026 if poolexists $pool ; then
2027 destroy_pool $pool
2028 fi
2029
2030 create_pool $pool $@
2031 destroy_pool $pool
2032
2033 return 0
2034}
2035
2036#
2037# Verify the rsh connectivity to each remote host in RHOSTS.
2038#
2039# Return 0 if remote host is accessible; otherwise 1.
2040# $1 remote host name
2041# $2 username
2042#
2043function verify_rsh_connect #rhost, username
2044{
2045 typeset rhost=$1
2046 typeset username=$2
2047 typeset rsh_cmd="$RSH -n"
2048 typeset cur_user=
2049
2050 $GETENT hosts $rhost >/dev/null 2>&1
2051 if (($? != 0)); then
2052 log_note "$rhost cannot be found from" \
2053 "administrative database."
2054 return 1
2055 fi
2056
2057 $PING $rhost 3 >/dev/null 2>&1
2058 if (($? != 0)); then
2059 log_note "$rhost is not reachable."
2060 return 1
2061 fi
2062
2063 if ((${#username} != 0)); then
2064 rsh_cmd="$rsh_cmd -l $username"
2065 cur_user="given user \"$username\""
2066 else
2067 cur_user="current user \"`$LOGNAME`\""
2068 fi
2069
2070 if ! $rsh_cmd $rhost $TRUE; then
2071 log_note "$RSH to $rhost is not accessible" \
2072 "with $cur_user."
2073 return 1
2074 fi
2075
2076 return 0
2077}
2078
2079#
2080# Verify the remote host connection via rsh after rebooting
2081# $1 remote host
2082#
2083function verify_remote
2084{
2085 rhost=$1
2086
2087 #
2088 # The following loop waits for the remote system rebooting.
2089 # Each iteration will wait for 150 seconds. there are
2090 # total 5 iterations, so the total timeout value will
2091 # be 12.5 minutes for the system rebooting. This number
2092 # is an approxiate number.
2093 #
2094 typeset -i count=0
2095 while ! verify_rsh_connect $rhost; do
2096 sleep 150
2097 ((count = count + 1))
2098 if ((count > 5)); then
2099 return 1
2100 fi
2101 done
2102 return 0
2103}
2104
2105#
2106# Replacement function for /usr/bin/rsh. This function will include
2107# the /usr/bin/rsh and meanwhile return the execution status of the
2108# last command.
2109#
2110# $1 usrname passing down to -l option of /usr/bin/rsh
2111# $2 remote machine hostname
2112# $3... command string
2113#
2114
2115function rsh_status
2116{
2117 typeset ruser=$1
2118 typeset rhost=$2
2119 typeset -i ret=0
2120 typeset cmd_str=""
2121 typeset rsh_str=""
2122
2123 shift; shift
2124 cmd_str="$@"
2125
2126 err_file=/tmp/${rhost}.$$.err
2127 if ((${#ruser} == 0)); then
2128 rsh_str="$RSH -n"
2129 else
2130 rsh_str="$RSH -n -l $ruser"
2131 fi
2132
2133 $rsh_str $rhost /bin/ksh -c "'$cmd_str; \
2134 print -u 2 \"status=\$?\"'" \
2135 >/dev/null 2>$err_file
2136 ret=$?
2137 if (($ret != 0)); then
2138 $CAT $err_file
2139 $RM -f $std_file $err_file
2140 log_fail "$RSH itself failed with exit code $ret..."
2141 fi
2142
2143 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
2144 $CUT -d= -f2)
2145 (($ret != 0)) && $CAT $err_file >&2
2146
2147 $RM -f $err_file >/dev/null 2>&1
2148 return $ret
2149}
2150
2151#
2152# Get the SUNWstc-fs-zfs package installation path in a remote host
2153# $1 remote host name
2154#
2155function get_remote_pkgpath
2156{
2157 typeset rhost=$1
2158 typeset pkgpath=""
2159
2160 pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
2161 $CUT -d: -f2")
2162
2163 $ECHO $pkgpath
2164}
2165
2166#/**
2167# A function to find and locate free disks on a system or from given
2168# disks as the parameter. It works by locating disks that are in use
2169# as swap devices and dump devices, and also disks listed in /etc/vfstab
2170#
2171# $@ given disks to find which are free, default is all disks in
2172# the test system
2173#
2174# @return a string containing the list of available disks
2175#*/
2176function find_disks
2177{
2178 # Trust provided list, no attempt is made to locate unused devices.
2179 if is_linux; then
2180 $ECHO "$@"
2181 return
2182 fi
2183
2184
2185 sfi=/tmp/swaplist.$$
2186 dmpi=/tmp/dumpdev.$$
2187 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2188
2189 $SWAP -l > $sfi
2190 $DUMPADM > $dmpi 2>/dev/null
2191
2192# write an awk script that can process the output of format
2193# to produce a list of disks we know about. Note that we have
2194# to escape "$2" so that the shell doesn't interpret it while
2195# we're creating the awk script.
2196# -------------------
2197 $CAT > /tmp/find_disks.awk <<EOF
2198#!/bin/nawk -f
2199 BEGIN { FS="."; }
2200
2201 /^Specify disk/{
2202 searchdisks=0;
2203 }
2204
2205 {
2206 if (searchdisks && \$2 !~ "^$"){
2207 split(\$2,arr," ");
2208 print arr[1];
2209 }
2210 }
2211
2212 /^AVAILABLE DISK SELECTIONS:/{
2213 searchdisks=1;
2214 }
2215EOF
2216#---------------------
2217
2218 $CHMOD 755 /tmp/find_disks.awk
2219 disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
2220 $RM /tmp/find_disks.awk
2221
2222 unused=""
2223 for disk in $disks; do
2224 # Check for mounted
2225 $GREP "${disk}[sp]" /etc/mnttab >/dev/null
2226 (($? == 0)) && continue
2227 # Check for swap
2228 $GREP "${disk}[sp]" $sfi >/dev/null
2229 (($? == 0)) && continue
2230 # check for dump device
2231 $GREP "${disk}[sp]" $dmpi >/dev/null
2232 (($? == 0)) && continue
2233 # check to see if this disk hasn't been explicitly excluded
2234 # by a user-set environment variable
2235 $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
2236 (($? == 0)) && continue
2237 unused_candidates="$unused_candidates $disk"
2238 done
2239 $RM $sfi
2240 $RM $dmpi
2241
2242# now just check to see if those disks do actually exist
2243# by looking for a device pointing to the first slice in
2244# each case. limit the number to max_finddisksnum
2245 count=0
2246 for disk in $unused_candidates; do
2247 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2248 if [ $count -lt $max_finddisksnum ]; then
2249 unused="$unused $disk"
2250 # do not impose limit if $@ is provided
2251 [[ -z $@ ]] && ((count = count + 1))
2252 fi
2253 fi
2254 done
2255
2256# finally, return our disk list
2257 $ECHO $unused
2258}
2259
2260#
2261# Add specified user to specified group
2262#
2263# $1 group name
2264# $2 user name
2265# $3 base of the homedir (optional)
2266#
2267function add_user #<group_name> <user_name> <basedir>
2268{
2269 typeset gname=$1
2270 typeset uname=$2
2271 typeset basedir=${3:-"/var/tmp"}
2272
2273 if ((${#gname} == 0 || ${#uname} == 0)); then
2274 log_fail "group name or user name are not defined."
2275 fi
2276
2277 log_must $USERADD -g $gname -d $basedir/$uname -m $uname
2278
f74b821a
BB
2279 # Add new users to the same group and the command line utils.
2280 # This allows them to be run out of the original users home
2281 # directory as long as it permissioned to be group readable.
2282 if is_linux; then
2283 cmd_group=$(stat --format="%G" $ZFS)
2284 log_must $USERMOD -a -G $cmd_group $uname
2285 fi
2286
6bb24f4d
BB
2287 return 0
2288}
2289
2290#
2291# Delete the specified user.
2292#
2293# $1 login name
2294# $2 base of the homedir (optional)
2295#
2296function del_user #<logname> <basedir>
2297{
2298 typeset user=$1
2299 typeset basedir=${2:-"/var/tmp"}
2300
2301 if ((${#user} == 0)); then
2302 log_fail "login name is necessary."
2303 fi
2304
2305 if $ID $user > /dev/null 2>&1; then
2306 log_must $USERDEL $user
2307 fi
2308
2309 [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
2310
2311 return 0
2312}
2313
2314#
2315# Select valid gid and create specified group.
2316#
2317# $1 group name
2318#
2319function add_group #<group_name>
2320{
2321 typeset group=$1
2322
2323 if ((${#group} == 0)); then
2324 log_fail "group name is necessary."
2325 fi
2326
2327 # Assign 100 as the base gid, a larger value is selected for
2328 # Linux because for many distributions 1000 and under are reserved.
2329 if is_linux; then
6bb24f4d 2330 while true; do
f74b821a 2331 $GROUPADD $group > /dev/null 2>&1
6bb24f4d
BB
2332 typeset -i ret=$?
2333 case $ret in
2334 0) return 0 ;;
6bb24f4d
BB
2335 *) return 1 ;;
2336 esac
2337 done
2338 else
2339 typeset -i gid=100
2340
2341 while true; do
2342 $GROUPADD -g $gid $group > /dev/null 2>&1
2343 typeset -i ret=$?
2344 case $ret in
2345 0) return 0 ;;
2346 # The gid is not unique
2347 4) ((gid += 1)) ;;
2348 *) return 1 ;;
2349 esac
2350 done
2351 fi
2352}
2353
2354#
2355# Delete the specified group.
2356#
2357# $1 group name
2358#
2359function del_group #<group_name>
2360{
2361 typeset grp=$1
2362 if ((${#grp} == 0)); then
2363 log_fail "group name is necessary."
2364 fi
2365
2366 if is_linux; then
2367 $GETENT group $grp > /dev/null 2>&1
2368 typeset -i ret=$?
2369 case $ret in
2370 # Group does not exist.
2371 2) return 0 ;;
2372 # Name already exists as a group name
2373 0) log_must $GROUPDEL $grp ;;
2374 *) return 1 ;;
2375 esac
2376 else
2377 $GROUPMOD -n $grp $grp > /dev/null 2>&1
2378 typeset -i ret=$?
2379 case $ret in
2380 # Group does not exist.
2381 6) return 0 ;;
2382 # Name already exists as a group name
2383 9) log_must $GROUPDEL $grp ;;
2384 *) return 1 ;;
2385 esac
2386 fi
2387
2388 return 0
2389}
2390
2391#
2392# This function will return true if it's safe to destroy the pool passed
2393# as argument 1. It checks for pools based on zvols and files, and also
2394# files contained in a pool that may have a different mountpoint.
2395#
2396function safe_to_destroy_pool { # $1 the pool name
2397
2398 typeset pool=""
2399 typeset DONT_DESTROY=""
2400
2401 # We check that by deleting the $1 pool, we're not
2402 # going to pull the rug out from other pools. Do this
2403 # by looking at all other pools, ensuring that they
2404 # aren't built from files or zvols contained in this pool.
2405
2406 for pool in $($ZPOOL list -H -o name)
2407 do
2408 ALTMOUNTPOOL=""
2409
2410 # this is a list of the top-level directories in each of the
2411 # files that make up the path to the files the pool is based on
2412 FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2413 $AWK '{print $1}')
2414
2415 # this is a list of the zvols that make up the pool
2416 ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "$ZVOL_DEVDIR/$1$" \
2417 | $AWK '{print $1}')
2418
2419 # also want to determine if it's a file-based pool using an
2420 # alternate mountpoint...
2421 POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2422 $GREP / | $AWK '{print $1}' | \
2423 $AWK -F/ '{print $2}' | $GREP -v "dev")
2424
2425 for pooldir in $POOL_FILE_DIRS
2426 do
2427 OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2428 $GREP "${pooldir}$" | $AWK '{print $1}')
2429
2430 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2431 done
2432
2433
2434 if [ ! -z "$ZVOLPOOL" ]
2435 then
2436 DONT_DESTROY="true"
2437 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2438 fi
2439
2440 if [ ! -z "$FILEPOOL" ]
2441 then
2442 DONT_DESTROY="true"
2443 log_note "Pool $pool is built from $FILEPOOL on $1"
2444 fi
2445
2446 if [ ! -z "$ALTMOUNTPOOL" ]
2447 then
2448 DONT_DESTROY="true"
2449 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2450 fi
2451 done
2452
2453 if [ -z "${DONT_DESTROY}" ]
2454 then
2455 return 0
2456 else
2457 log_note "Warning: it is not safe to destroy $1!"
2458 return 1
2459 fi
2460}
2461
2462#
2463# Get the available ZFS compression options
2464# $1 option type zfs_set|zfs_compress
2465#
2466function get_compress_opts
2467{
2468 typeset COMPRESS_OPTS
2469 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2470 gzip-6 gzip-7 gzip-8 gzip-9"
2471
2472 if [[ $1 == "zfs_compress" ]] ; then
2473 COMPRESS_OPTS="on lzjb"
2474 elif [[ $1 == "zfs_set" ]] ; then
2475 COMPRESS_OPTS="on off lzjb"
2476 fi
2477 typeset valid_opts="$COMPRESS_OPTS"
2478 $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2479 if [[ $? -eq 0 ]]; then
2480 valid_opts="$valid_opts $GZIP_OPTS"
2481 fi
2482 $ECHO "$valid_opts"
2483}
2484
2485#
2486# Verify zfs operation with -p option work as expected
2487# $1 operation, value could be create, clone or rename
2488# $2 dataset type, value could be fs or vol
2489# $3 dataset name
2490# $4 new dataset name
2491#
2492function verify_opt_p_ops
2493{
2494 typeset ops=$1
2495 typeset datatype=$2
2496 typeset dataset=$3
2497 typeset newdataset=$4
2498
2499 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2500 log_fail "$datatype is not supported."
2501 fi
2502
2503 # check parameters accordingly
2504 case $ops in
2505 create)
2506 newdataset=$dataset
2507 dataset=""
2508 if [[ $datatype == "vol" ]]; then
2509 ops="create -V $VOLSIZE"
2510 fi
2511 ;;
2512 clone)
2513 if [[ -z $newdataset ]]; then
2514 log_fail "newdataset should not be empty" \
2515 "when ops is $ops."
2516 fi
2517 log_must datasetexists $dataset
2518 log_must snapexists $dataset
2519 ;;
2520 rename)
2521 if [[ -z $newdataset ]]; then
2522 log_fail "newdataset should not be empty" \
2523 "when ops is $ops."
2524 fi
2525 log_must datasetexists $dataset
2526 log_mustnot snapexists $dataset
2527 ;;
2528 *)
2529 log_fail "$ops is not supported."
2530 ;;
2531 esac
2532
2533 # make sure the upper level filesystem does not exist
2534 if datasetexists ${newdataset%/*} ; then
2535 log_must $ZFS destroy -rRf ${newdataset%/*}
2536 fi
2537
2538 # without -p option, operation will fail
2539 log_mustnot $ZFS $ops $dataset $newdataset
2540 log_mustnot datasetexists $newdataset ${newdataset%/*}
2541
2542 # with -p option, operation should succeed
2543 log_must $ZFS $ops -p $dataset $newdataset
2544 block_device_wait
2545
2546 if ! datasetexists $newdataset ; then
2547 log_fail "-p option does not work for $ops"
2548 fi
2549
2550 # when $ops is create or clone, redo the operation still return zero
2551 if [[ $ops != "rename" ]]; then
2552 log_must $ZFS $ops -p $dataset $newdataset
2553 fi
2554
2555 return 0
2556}
2557
2558#
2559# Get configuration of pool
2560# $1 pool name
2561# $2 config name
2562#
2563function get_config
2564{
2565 typeset pool=$1
2566 typeset config=$2
2567 typeset alt_root
2568
2569 if ! poolexists "$pool" ; then
2570 return 1
2571 fi
2572 alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
2573 if [[ $alt_root == "-" ]]; then
2574 value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
2575 '{print $2}')
2576 else
2577 value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
2578 '{print $2}')
2579 fi
2580 if [[ -n $value ]] ; then
2581 value=${value#'}
2582 value=${value%'}
2583 fi
2584 echo $value
2585
2586 return 0
2587}
2588
2589#
2590# Privated function. Random select one of items from arguments.
2591#
2592# $1 count
2593# $2-n string
2594#
2595function _random_get
2596{
2597 typeset cnt=$1
2598 shift
2599
2600 typeset str="$@"
2601 typeset -i ind
2602 ((ind = RANDOM % cnt + 1))
2603
2604 typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2605 $ECHO $ret
2606}
2607
2608#
2609# Random select one of item from arguments which include NONE string
2610#
2611function random_get_with_non
2612{
2613 typeset -i cnt=$#
2614 ((cnt =+ 1))
2615
2616 _random_get "$cnt" "$@"
2617}
2618
2619#
2620# Random select one of item from arguments which doesn't include NONE string
2621#
2622function random_get
2623{
2624 _random_get "$#" "$@"
2625}
2626
2627#
2628# Detect if the current system support slog
2629#
2630function verify_slog_support
2631{
2632 typeset dir=/tmp/disk.$$
2633 typeset pool=foo.$$
2634 typeset vdev=$dir/a
2635 typeset sdev=$dir/b
2636
2637 $MKDIR -p $dir
d7958b4c 2638 $MKFILE $MINVDEVSIZE $vdev $sdev
6bb24f4d
BB
2639
2640 typeset -i ret=0
2641 if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2642 ret=1
2643 fi
2644 $RM -r $dir
2645
2646 return $ret
2647}
2648
2649#
2650# The function will generate a dataset name with specific length
2651# $1, the length of the name
2652# $2, the base string to construct the name
2653#
2654function gen_dataset_name
2655{
2656 typeset -i len=$1
2657 typeset basestr="$2"
2658 typeset -i baselen=${#basestr}
2659 typeset -i iter=0
2660 typeset l_name=""
2661
2662 if ((len % baselen == 0)); then
2663 ((iter = len / baselen))
2664 else
2665 ((iter = len / baselen + 1))
2666 fi
2667 while ((iter > 0)); do
2668 l_name="${l_name}$basestr"
2669
2670 ((iter -= 1))
2671 done
2672
2673 $ECHO $l_name
2674}
2675
2676#
2677# Get cksum tuple of dataset
2678# $1 dataset name
2679#
2680# sample zdb output:
2681# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2682# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2683# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2684# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2685function datasetcksum
2686{
2687 typeset cksum
2688 $SYNC
2689 cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2690 | $AWK -F= '{print $7}')
2691 $ECHO $cksum
2692}
2693
2694#
2695# Get cksum of file
2696# #1 file path
2697#
2698function checksum
2699{
2700 typeset cksum
2701 cksum=$($CKSUM $1 | $AWK '{print $1}')
2702 $ECHO $cksum
2703}
2704
2705#
2706# Get the given disk/slice state from the specific field of the pool
2707#
2708function get_device_state #pool disk field("", "spares","logs")
2709{
2710 typeset pool=$1
2711 typeset disk=${2#$DEV_DSKDIR/}
2712 typeset field=${3:-$pool}
2713
2714 state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2715 $NAWK -v device=$disk -v pool=$pool -v field=$field \
2716 'BEGIN {startconfig=0; startfield=0; }
2717 /config:/ {startconfig=1}
2718 (startconfig==1) && ($1==field) {startfield=1; next;}
2719 (startfield==1) && ($1==device) {print $2; exit;}
2720 (startfield==1) &&
2721 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2722 echo $state
2723}
2724
2725
2726#
2727# print the given directory filesystem type
2728#
2729# $1 directory name
2730#
2731function get_fstype
2732{
2733 typeset dir=$1
2734
2735 if [[ -z $dir ]]; then
2736 log_fail "Usage: get_fstype <directory>"
2737 fi
2738
2739 #
2740 # $ df -n /
2741 # / : ufs
2742 #
2743 $DF -n $dir | $AWK '{print $3}'
2744}
2745
2746#
2747# Given a disk, label it to VTOC regardless what label was on the disk
2748# $1 disk
2749#
2750function labelvtoc
2751{
2752 typeset disk=$1
2753 if [[ -z $disk ]]; then
2754 log_fail "The disk name is unspecified."
2755 fi
2756 typeset label_file=/var/tmp/labelvtoc.$$
2757 typeset arch=$($UNAME -p)
2758
2759 if is_linux; then
2760 log_note "Currently unsupported by the test framework"
2761 return 1
2762 fi
2763
2764 if [[ $arch == "i386" ]]; then
2765 $ECHO "label" > $label_file
2766 $ECHO "0" >> $label_file
2767 $ECHO "" >> $label_file
2768 $ECHO "q" >> $label_file
2769 $ECHO "q" >> $label_file
2770
2771 $FDISK -B $disk >/dev/null 2>&1
2772 # wait a while for fdisk finishes
2773 $SLEEP 60
2774 elif [[ $arch == "sparc" ]]; then
2775 $ECHO "label" > $label_file
2776 $ECHO "0" >> $label_file
2777 $ECHO "" >> $label_file
2778 $ECHO "" >> $label_file
2779 $ECHO "" >> $label_file
2780 $ECHO "q" >> $label_file
2781 else
2782 log_fail "unknown arch type"
2783 fi
2784
2785 $FORMAT -e -s -d $disk -f $label_file
2786 typeset -i ret_val=$?
2787 $RM -f $label_file
2788 #
2789 # wait the format to finish
2790 #
2791 $SLEEP 60
2792 if ((ret_val != 0)); then
2793 log_fail "unable to label $disk as VTOC."
2794 fi
2795
2796 return 0
2797}
2798
2799#
2800# check if the system was installed as zfsroot or not
2801# return: 0 ture, otherwise false
2802#
2803function is_zfsroot
2804{
2805 $DF -n / | $GREP zfs > /dev/null 2>&1
2806 return $?
2807}
2808
2809#
2810# get the root filesystem name if it's zfsroot system.
2811#
2812# return: root filesystem name
2813function get_rootfs
2814{
2815 typeset rootfs=""
2816 rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2817 /etc/mnttab)
2818 if [[ -z "$rootfs" ]]; then
2819 log_fail "Can not get rootfs"
2820 fi
2821 $ZFS list $rootfs > /dev/null 2>&1
2822 if (($? == 0)); then
2823 $ECHO $rootfs
2824 else
2825 log_fail "This is not a zfsroot system."
2826 fi
2827}
2828
2829#
2830# get the rootfs's pool name
2831# return:
2832# rootpool name
2833#
2834function get_rootpool
2835{
2836 typeset rootfs=""
2837 typeset rootpool=""
2838 rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2839 /etc/mnttab)
2840 if [[ -z "$rootfs" ]]; then
2841 log_fail "Can not get rootpool"
2842 fi
2843 $ZFS list $rootfs > /dev/null 2>&1
2844 if (($? == 0)); then
2845 rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2846 $ECHO $rootpool
2847 else
2848 log_fail "This is not a zfsroot system."
2849 fi
2850}
2851
6bb24f4d
BB
2852#
2853# Check if the given device is physical device
2854#
2855function is_physical_device #device
2856{
2857 typeset device=${1#$DEV_DSKDIR}
2858 device=${device#$DEV_RDSKDIR}
2859
2860 if is_linux; then
2861 [[ -b "$DEV_DSKDIR/$device" ]] && \
2862 [[ -f /sys/module/loop/parameters/max_part ]]
2863 return $?
2864 else
2865 $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2866 return $?
2867 fi
2868}
2869
7050a65d
SV
2870#
2871# Check if the given device is a real device (ie SCSI device)
2872#
2873function is_real_device #disk
2874{
2875 typeset disk=$1
2876 [[ -z $disk ]] && log_fail "No argument for disk given."
2877
2878 if is_linux; then
ec0e24c2
SV
2879 ($LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP disk > /dev/null) \
2880 2>/dev/null
7050a65d
SV
2881 return $?
2882 fi
2883}
2884
2885#
2886# Check if the given device is a loop device
2887#
2888function is_loop_device #disk
2889{
2890 typeset disk=$1
2891 [[ -z $disk ]] && log_fail "No argument for disk given."
2892
2893 if is_linux; then
ec0e24c2
SV
2894 ($LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP loop > /dev/null) \
2895 2>/dev/null
7050a65d
SV
2896 return $?
2897 fi
2898}
2899
2900#
2901# Check if the given device is a multipath device and if there is a sybolic
2902# link to a device mapper and to a disk
2903# Currently no support for dm devices alone without multipath
2904#
2905function is_mpath_device #disk
2906{
2907 typeset disk=$1
2908 [[ -z $disk ]] && log_fail "No argument for disk given."
2909
2910 if is_linux; then
ec0e24c2
SV
2911 ($LSBLK $DEV_MPATHDIR/$disk -o TYPE | $EGREP mpath >/dev/null) \
2912 2>/dev/null
7050a65d
SV
2913 if (($? == 0)); then
2914 $READLINK $DEV_MPATHDIR/$disk > /dev/null 2>&1
2915 return $?
2916 else
2917 return $?
2918 fi
2919 fi
2920}
2921
2922# Set the slice prefix for disk partitioning depending
2923# on whether the device is a real, multipath, or loop device.
2924# Currently all disks have to be of the same type, so only
2925# checks first disk to determine slice prefix.
2926#
2927function set_slice_prefix
2928{
2929 typeset disk
2930 typeset -i i=0
2931
2932 if is_linux; then
2933 while (( i < $DISK_ARRAY_NUM )); do
2934 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
ec0e24c2
SV
2935 if ( is_mpath_device $disk ) && [[ -z $($ECHO $disk \
2936 | awk 'substr($1,18,1) ~ /^[[:digit:]]+$/') ]] || \
2937 ( is_real_device $disk ); then
7050a65d
SV
2938 export SLICE_PREFIX=""
2939 return 0
ec0e24c2
SV
2940 elif ( is_mpath_device $disk || is_loop_device \
2941 $disk ); then
7050a65d
SV
2942 export SLICE_PREFIX="p"
2943 return 0
2944 else
2945 log_fail "$disk not supported for partitioning."
2946 fi
2947 (( i = i + 1))
2948 done
2949 fi
2950}
2951
2952#
2953# Set the directory path of the listed devices in $DISK_ARRAY_NUM
2954# Currently all disks have to be of the same type, so only
2955# checks first disk to determine device directory
2956# default = /dev (linux)
2957# real disk = /dev (linux)
2958# multipath device = /dev/mapper (linux)
2959#
2960function set_device_dir
2961{
2962 typeset disk
2963 typeset -i i=0
2964
2965 if is_linux; then
2966 while (( i < $DISK_ARRAY_NUM )); do
2967 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2968 if is_mpath_device $disk; then
2969 export DEV_DSKDIR=$DEV_MPATHDIR
2970 return 0
2971 else
2972 export DEV_DSKDIR=$DEV_RDSKDIR
2973 return 0
2974 fi
2975 (( i = i + 1))
2976 done
2977 else
2978 export DEV_DSKDIR=$DEV_RDSKDIR
2979 fi
2980}
2981
6bb24f4d
BB
2982#
2983# Get the directory path of given device
2984#
2985function get_device_dir #device
2986{
2987 typeset device=$1
2988
2989 if ! $(is_physical_device $device) ; then
2990 if [[ $device != "/" ]]; then
2991 device=${device%/*}
2992 fi
2993 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2994 device="$DEV_DSKDIR"
2995 fi
2996 $ECHO $device
2997 else
2998 $ECHO "$DEV_DSKDIR"
2999 fi
3000}
3001
ec0e24c2
SV
3002#
3003# Get persistent name for given disk
3004#
3005function get_persistent_disk_name #device
3006{
3007 typeset device=$1
3008 typeset dev_id
3009
3010 if is_linux; then
3011 if is_real_device $device; then
3012 dev_id="$($UDEVADM info -q all -n $DEV_DSKDIR/$device \
3013 | $EGREP disk/by-id | $NAWK '{print $2; exit}' \
3014 | $NAWK -F / '{print $3}')"
3015 $ECHO $dev_id
3016 elif is_mpath_device $device; then
3017 dev_id="$($UDEVADM info -q all -n $DEV_DSKDIR/$device \
3018 | $EGREP disk/by-id/dm-uuid \
3019 | $NAWK '{print $2; exit}' \
3020 | $NAWK -F / '{print $3}')"
3021 $ECHO $dev_id
3022 else
3023 $ECHO $device
3024 fi
3025 else
3026 $ECHO $device
3027 fi
3028}
3029
6bb24f4d
BB
3030#
3031# Get the package name
3032#
3033function get_package_name
3034{
3035 typeset dirpath=${1:-$STC_NAME}
3036
3037 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3038}
3039
3040#
3041# Get the word numbers from a string separated by white space
3042#
3043function get_word_count
3044{
3045 $ECHO $1 | $WC -w
3046}
3047
3048#
3049# To verify if the require numbers of disks is given
3050#
3051function verify_disk_count
3052{
3053 typeset -i min=${2:-1}
3054
3055 typeset -i count=$(get_word_count "$1")
3056
3057 if ((count < min)); then
3058 log_untested "A minimum of $min disks is required to run." \
3059 " You specified $count disk(s)"
3060 fi
3061}
3062
3063function ds_is_volume
3064{
3065 typeset type=$(get_prop type $1)
3066 [[ $type = "volume" ]] && return 0
3067 return 1
3068}
3069
3070function ds_is_filesystem
3071{
3072 typeset type=$(get_prop type $1)
3073 [[ $type = "filesystem" ]] && return 0
3074 return 1
3075}
3076
3077function ds_is_snapshot
3078{
3079 typeset type=$(get_prop type $1)
3080 [[ $type = "snapshot" ]] && return 0
3081 return 1
3082}
3083
3084#
3085# Check if Trusted Extensions are installed and enabled
3086#
3087function is_te_enabled
3088{
3089 $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
3090 if (($? != 0)); then
3091 return 1
3092 else
3093 return 0
3094 fi
3095}
3096
3097# Utility function to determine if a system has multiple cpus.
3098function is_mp
3099{
3100 if is_linux; then
3101 (($($NPROC) > 1))
3102 else
3103 (($($PSRINFO | $WC -l) > 1))
3104 fi
3105
3106 return $?
3107}
3108
3109function get_cpu_freq
3110{
3111 if is_linux; then
3112 lscpu | $AWK '/CPU MHz/ { print $3 }'
3113 else
3114 $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}'
3115 fi
3116}
3117
3118# Run the given command as the user provided.
3119function user_run
3120{
3121 typeset user=$1
3122 shift
3123
f74b821a 3124 log_note "user:$user $@"
6bb24f4d
BB
3125 eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
3126 return $?
3127}
3128
3129#
3130# Check if the pool contains the specified vdevs
3131#
3132# $1 pool
3133# $2..n <vdev> ...
3134#
3135# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3136# vdevs is not in the pool, and 2 if pool name is missing.
3137#
3138function vdevs_in_pool
3139{
3140 typeset pool=$1
3141 typeset vdev
3142
3143 if [[ -z $pool ]]; then
3144 log_note "Missing pool name."
3145 return 2
3146 fi
3147
3148 shift
3149
3150 typeset tmpfile=$($MKTEMP)
3151 $ZPOOL list -Hv "$pool" >$tmpfile
3152 for vdev in $@; do
3153 $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3154 [[ $? -ne 0 ]] && return 1
3155 done
3156
3157 $RM -f $tmpfile
3158
3159 return 0;
3160}
3161
679d73e9
JWK
3162function get_max
3163{
3164 typeset -l i max=$1
3165 shift
3166
3167 for i in "$@"; do
3168 max=$(echo $((max > i ? max : i)))
3169 done
3170
3171 echo $max
3172}
3173
3174function get_min
3175{
3176 typeset -l i min=$1
3177 shift
3178
3179 for i in "$@"; do
3180 min=$(echo $((min < i ? min : i)))
3181 done
3182
3183 echo $min
3184}
3185
1de321e6
JX
3186#
3187# Synchronize all the data in pool
3188#
3189# $1 pool name
3190#
3191function sync_pool #pool
3192{
3193 typeset pool=${1:-$TESTPOOL}
3194
3195 log_must $SYNC
3196 log_must $SLEEP 2
3197 # Flush all the pool data.
3198 typeset -i ret
3199 $ZPOOL scrub $pool >/dev/null 2>&1
3200 ret=$?
3201 (( $ret != 0 )) && \
3202 log_fail "$ZPOOL scrub $pool failed."
3203
3204 while ! is_pool_scrubbed $pool; do
3205 if is_pool_resilvered $pool ; then
3206 log_fail "$pool should not be resilver completed."
3207 fi
3208 log_must $SLEEP 2
3209 done
3210}
d834b9ce
GM
3211
3212#
3213# Wait for zpool 'freeing' property drops to zero.
3214#
3215# $1 pool name
3216#
3217function wait_freeing #pool
3218{
3219 typeset pool=${1:-$TESTPOOL}
3220 while true; do
3221 [[ "0" == "$($ZPOOL list -Ho freeing $pool)" ]] && break
3222 log_must $SLEEP 1
3223 done
3224}