]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
Add `zfs allow` and `zfs unallow` support
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 #
27
28 #
29 # Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30 #
31
32 . ${STF_TOOLS}/include/logapi.shlib
33
34 # Determine if this is a Linux test system
35 #
36 # Return 0 if platform Linux, 1 if otherwise
37
38 function is_linux
39 {
40 if [[ $($UNAME -o) == "GNU/Linux" ]]; then
41 return 0
42 else
43 return 1
44 fi
45 }
46
47 # Determine whether a dataset is mounted
48 #
49 # $1 dataset name
50 # $2 filesystem type; optional - defaulted to zfs
51 #
52 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
53
54 function ismounted
55 {
56 typeset fstype=$2
57 [[ -z $fstype ]] && fstype=zfs
58 typeset out dir name ret
59
60 case $fstype in
61 zfs)
62 if [[ "$1" == "/"* ]] ; then
63 for out in $($ZFS mount | $AWK '{print $2}'); do
64 [[ $1 == $out ]] && return 0
65 done
66 else
67 for out in $($ZFS mount | $AWK '{print $1}'); do
68 [[ $1 == $out ]] && return 0
69 done
70 fi
71 ;;
72 ufs|nfs)
73 out=$($DF -F $fstype $1 2>/dev/null)
74 ret=$?
75 (($ret != 0)) && return $ret
76
77 dir=${out%%\(*}
78 dir=${dir%% *}
79 name=${out##*\(}
80 name=${name%%\)*}
81 name=${name%% *}
82
83 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
84 ;;
85 ext2)
86 out=$($DF -t $fstype $1 2>/dev/null)
87 return $?
88 ;;
89 zvol)
90 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
91 link=$(readlink -f $ZVOL_DEVDIR/$1)
92 [[ -n "$link" ]] && \
93 $MOUNT | $GREP -q "^$link" && \
94 return 0
95 fi
96 ;;
97 esac
98
99 return 1
100 }
101
102 # Return 0 if a dataset is mounted; 1 otherwise
103 #
104 # $1 dataset name
105 # $2 filesystem type; optional - defaulted to zfs
106
107 function mounted
108 {
109 ismounted $1 $2
110 (($? == 0)) && return 0
111 return 1
112 }
113
114 # Return 0 if a dataset is unmounted; 1 otherwise
115 #
116 # $1 dataset name
117 # $2 filesystem type; optional - defaulted to zfs
118
119 function unmounted
120 {
121 ismounted $1 $2
122 (($? == 1)) && return 0
123 return 1
124 }
125
126 # split line on ","
127 #
128 # $1 - line to split
129
130 function splitline
131 {
132 $ECHO $1 | $SED "s/,/ /g"
133 }
134
135 function default_setup
136 {
137 default_setup_noexit "$@"
138
139 log_pass
140 }
141
142 #
143 # Given a list of disks, setup storage pools and datasets.
144 #
145 function default_setup_noexit
146 {
147 typeset disklist=$1
148 typeset container=$2
149 typeset volume=$3
150
151 if is_global_zone; then
152 if poolexists $TESTPOOL ; then
153 destroy_pool $TESTPOOL
154 fi
155 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
156 log_must $ZPOOL create -f $TESTPOOL $disklist
157 else
158 reexport_pool
159 fi
160
161 $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
162 $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
163
164 log_must $ZFS create $TESTPOOL/$TESTFS
165 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
166
167 if [[ -n $container ]]; then
168 $RM -rf $TESTDIR1 || \
169 log_unresolved Could not remove $TESTDIR1
170 $MKDIR -p $TESTDIR1 || \
171 log_unresolved Could not create $TESTDIR1
172
173 log_must $ZFS create $TESTPOOL/$TESTCTR
174 log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
175 log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
176 log_must $ZFS set mountpoint=$TESTDIR1 \
177 $TESTPOOL/$TESTCTR/$TESTFS1
178 fi
179
180 if [[ -n $volume ]]; then
181 if is_global_zone ; then
182 log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
183 block_device_wait
184 else
185 log_must $ZFS create $TESTPOOL/$TESTVOL
186 fi
187 fi
188 }
189
190 #
191 # Given a list of disks, setup a storage pool, file system and
192 # a container.
193 #
194 function default_container_setup
195 {
196 typeset disklist=$1
197
198 default_setup "$disklist" "true"
199 }
200
201 #
202 # Given a list of disks, setup a storage pool,file system
203 # and a volume.
204 #
205 function default_volume_setup
206 {
207 typeset disklist=$1
208
209 default_setup "$disklist" "" "true"
210 }
211
212 #
213 # Given a list of disks, setup a storage pool,file system,
214 # a container and a volume.
215 #
216 function default_container_volume_setup
217 {
218 typeset disklist=$1
219
220 default_setup "$disklist" "true" "true"
221 }
222
223 #
224 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
225 # filesystem
226 #
227 # $1 Existing filesystem or volume name. Default, $TESTFS
228 # $2 snapshot name. Default, $TESTSNAP
229 #
230 function create_snapshot
231 {
232 typeset fs_vol=${1:-$TESTFS}
233 typeset snap=${2:-$TESTSNAP}
234
235 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
236 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
237
238 if snapexists $fs_vol@$snap; then
239 log_fail "$fs_vol@$snap already exists."
240 fi
241 datasetexists $fs_vol || \
242 log_fail "$fs_vol must exist."
243
244 log_must $ZFS snapshot $fs_vol@$snap
245 }
246
247 #
248 # Create a clone from a snapshot, default clone name is $TESTCLONE.
249 #
250 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
251 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
252 #
253 function create_clone # snapshot clone
254 {
255 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
256 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
257
258 [[ -z $snap ]] && \
259 log_fail "Snapshot name is undefined."
260 [[ -z $clone ]] && \
261 log_fail "Clone name is undefined."
262
263 log_must $ZFS clone $snap $clone
264 }
265
266 function default_mirror_setup
267 {
268 default_mirror_setup_noexit $1 $2 $3
269
270 log_pass
271 }
272
273 #
274 # Given a pair of disks, set up a storage pool and dataset for the mirror
275 # @parameters: $1 the primary side of the mirror
276 # $2 the secondary side of the mirror
277 # @uses: ZPOOL ZFS TESTPOOL TESTFS
278 function default_mirror_setup_noexit
279 {
280 readonly func="default_mirror_setup_noexit"
281 typeset primary=$1
282 typeset secondary=$2
283
284 [[ -z $primary ]] && \
285 log_fail "$func: No parameters passed"
286 [[ -z $secondary ]] && \
287 log_fail "$func: No secondary partition passed"
288 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
289 log_must $ZPOOL create -f $TESTPOOL mirror $@
290 log_must $ZFS create $TESTPOOL/$TESTFS
291 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
292 }
293
294 #
295 # create a number of mirrors.
296 # We create a number($1) of 2 way mirrors using the pairs of disks named
297 # on the command line. These mirrors are *not* mounted
298 # @parameters: $1 the number of mirrors to create
299 # $... the devices to use to create the mirrors on
300 # @uses: ZPOOL ZFS TESTPOOL
301 function setup_mirrors
302 {
303 typeset -i nmirrors=$1
304
305 shift
306 while ((nmirrors > 0)); do
307 log_must test -n "$1" -a -n "$2"
308 [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
309 log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
310 shift 2
311 ((nmirrors = nmirrors - 1))
312 done
313 }
314
315 #
316 # create a number of raidz pools.
317 # We create a number($1) of 2 raidz pools using the pairs of disks named
318 # on the command line. These pools are *not* mounted
319 # @parameters: $1 the number of pools to create
320 # $... the devices to use to create the pools on
321 # @uses: ZPOOL ZFS TESTPOOL
322 function setup_raidzs
323 {
324 typeset -i nraidzs=$1
325
326 shift
327 while ((nraidzs > 0)); do
328 log_must test -n "$1" -a -n "$2"
329 [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
330 log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
331 shift 2
332 ((nraidzs = nraidzs - 1))
333 done
334 }
335
336 #
337 # Destroy the configured testpool mirrors.
338 # the mirrors are of the form ${TESTPOOL}{number}
339 # @uses: ZPOOL ZFS TESTPOOL
340 function destroy_mirrors
341 {
342 default_cleanup_noexit
343
344 log_pass
345 }
346
347 #
348 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
349 # $1 the list of disks
350 #
351 function default_raidz_setup
352 {
353 typeset disklist="$*"
354 disks=(${disklist[*]})
355
356 if [[ ${#disks[*]} -lt 2 ]]; then
357 log_fail "A raid-z requires a minimum of two disks."
358 fi
359
360 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
361 log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
362 log_must $ZFS create $TESTPOOL/$TESTFS
363 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
364
365 log_pass
366 }
367
368 #
369 # Common function used to cleanup storage pools and datasets.
370 #
371 # Invoked at the start of the test suite to ensure the system
372 # is in a known state, and also at the end of each set of
373 # sub-tests to ensure errors from one set of tests doesn't
374 # impact the execution of the next set.
375
376 function default_cleanup
377 {
378 default_cleanup_noexit
379
380 log_pass
381 }
382
383 function default_cleanup_noexit
384 {
385 typeset exclude=""
386 typeset pool=""
387 #
388 # Destroying the pool will also destroy any
389 # filesystems it contains.
390 #
391 if is_global_zone; then
392 $ZFS unmount -a > /dev/null 2>&1
393 [[ -z "$KEEP" ]] && KEEP="rpool"
394 exclude=`eval $ECHO \"'(${KEEP})'\"`
395 ALL_POOLS=$($ZPOOL list -H -o name \
396 | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
397 # Here, we loop through the pools we're allowed to
398 # destroy, only destroying them if it's safe to do
399 # so.
400 while [ ! -z ${ALL_POOLS} ]
401 do
402 for pool in ${ALL_POOLS}
403 do
404 if safe_to_destroy_pool $pool ;
405 then
406 destroy_pool $pool
407 fi
408 ALL_POOLS=$($ZPOOL list -H -o name \
409 | $GREP -v "$NO_POOLS" \
410 | $EGREP -v "$exclude")
411 done
412 done
413
414 $ZFS mount -a
415 else
416 typeset fs=""
417 for fs in $($ZFS list -H -o name \
418 | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
419 datasetexists $fs && \
420 log_must $ZFS destroy -Rf $fs
421 done
422
423 # Need cleanup here to avoid garbage dir left.
424 for fs in $($ZFS list -H -o name); do
425 [[ $fs == /$ZONE_POOL ]] && continue
426 [[ -d $fs ]] && log_must $RM -rf $fs/*
427 done
428
429 #
430 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
431 # the default value
432 #
433 for fs in $($ZFS list -H -o name); do
434 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
435 log_must $ZFS set reservation=none $fs
436 log_must $ZFS set recordsize=128K $fs
437 log_must $ZFS set mountpoint=/$fs $fs
438 typeset enc=""
439 enc=$(get_prop encryption $fs)
440 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
441 [[ "$enc" == "off" ]]; then
442 log_must $ZFS set checksum=on $fs
443 fi
444 log_must $ZFS set compression=off $fs
445 log_must $ZFS set atime=on $fs
446 log_must $ZFS set devices=off $fs
447 log_must $ZFS set exec=on $fs
448 log_must $ZFS set setuid=on $fs
449 log_must $ZFS set readonly=off $fs
450 log_must $ZFS set snapdir=hidden $fs
451 log_must $ZFS set aclmode=groupmask $fs
452 log_must $ZFS set aclinherit=secure $fs
453 fi
454 done
455 fi
456
457 [[ -d $TESTDIR ]] && \
458 log_must $RM -rf $TESTDIR
459 }
460
461
462 #
463 # Common function used to cleanup storage pools, file systems
464 # and containers.
465 #
466 function default_container_cleanup
467 {
468 if ! is_global_zone; then
469 reexport_pool
470 fi
471
472 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
473 [[ $? -eq 0 ]] && \
474 log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
475
476 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
477 log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
478
479 datasetexists $TESTPOOL/$TESTCTR && \
480 log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
481
482 [[ -e $TESTDIR1 ]] && \
483 log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
484
485 default_cleanup
486 }
487
488 #
489 # Common function used to cleanup snapshot of file system or volume. Default to
490 # delete the file system's snapshot
491 #
492 # $1 snapshot name
493 #
494 function destroy_snapshot
495 {
496 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
497
498 if ! snapexists $snap; then
499 log_fail "'$snap' does not existed."
500 fi
501
502 #
503 # For the sake of the value which come from 'get_prop' is not equal
504 # to the really mountpoint when the snapshot is unmounted. So, firstly
505 # check and make sure this snapshot's been mounted in current system.
506 #
507 typeset mtpt=""
508 if ismounted $snap; then
509 mtpt=$(get_prop mountpoint $snap)
510 (($? != 0)) && \
511 log_fail "get_prop mountpoint $snap failed."
512 fi
513
514 log_must $ZFS destroy $snap
515 [[ $mtpt != "" && -d $mtpt ]] && \
516 log_must $RM -rf $mtpt
517 }
518
519 #
520 # Common function used to cleanup clone.
521 #
522 # $1 clone name
523 #
524 function destroy_clone
525 {
526 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
527
528 if ! datasetexists $clone; then
529 log_fail "'$clone' does not existed."
530 fi
531
532 # With the same reason in destroy_snapshot
533 typeset mtpt=""
534 if ismounted $clone; then
535 mtpt=$(get_prop mountpoint $clone)
536 (($? != 0)) && \
537 log_fail "get_prop mountpoint $clone failed."
538 fi
539
540 log_must $ZFS destroy $clone
541 [[ $mtpt != "" && -d $mtpt ]] && \
542 log_must $RM -rf $mtpt
543 }
544
545 # Return 0 if a snapshot exists; $? otherwise
546 #
547 # $1 - snapshot name
548
549 function snapexists
550 {
551 $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
552 return $?
553 }
554
555 #
556 # Set a property to a certain value on a dataset.
557 # Sets a property of the dataset to the value as passed in.
558 # @param:
559 # $1 dataset who's property is being set
560 # $2 property to set
561 # $3 value to set property to
562 # @return:
563 # 0 if the property could be set.
564 # non-zero otherwise.
565 # @use: ZFS
566 #
567 function dataset_setprop
568 {
569 typeset fn=dataset_setprop
570
571 if (($# < 3)); then
572 log_note "$fn: Insufficient parameters (need 3, had $#)"
573 return 1
574 fi
575 typeset output=
576 output=$($ZFS set $2=$3 $1 2>&1)
577 typeset rv=$?
578 if ((rv != 0)); then
579 log_note "Setting property on $1 failed."
580 log_note "property $2=$3"
581 log_note "Return Code: $rv"
582 log_note "Output: $output"
583 return $rv
584 fi
585 return 0
586 }
587
588 #
589 # Assign suite defined dataset properties.
590 # This function is used to apply the suite's defined default set of
591 # properties to a dataset.
592 # @parameters: $1 dataset to use
593 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
594 # @returns:
595 # 0 if the dataset has been altered.
596 # 1 if no pool name was passed in.
597 # 2 if the dataset could not be found.
598 # 3 if the dataset could not have it's properties set.
599 #
600 function dataset_set_defaultproperties
601 {
602 typeset dataset="$1"
603
604 [[ -z $dataset ]] && return 1
605
606 typeset confset=
607 typeset -i found=0
608 for confset in $($ZFS list); do
609 if [[ $dataset = $confset ]]; then
610 found=1
611 break
612 fi
613 done
614 [[ $found -eq 0 ]] && return 2
615 if [[ -n $COMPRESSION_PROP ]]; then
616 dataset_setprop $dataset compression $COMPRESSION_PROP || \
617 return 3
618 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
619 fi
620 if [[ -n $CHECKSUM_PROP ]]; then
621 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
622 return 3
623 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
624 fi
625 return 0
626 }
627
628 #
629 # Check a numeric assertion
630 # @parameter: $@ the assertion to check
631 # @output: big loud notice if assertion failed
632 # @use: log_fail
633 #
634 function assert
635 {
636 (($@)) || log_fail "$@"
637 }
638
639 #
640 # Function to format partition size of a disk
641 # Given a disk cxtxdx reduces all partitions
642 # to 0 size
643 #
644 function zero_partitions #<whole_disk_name>
645 {
646 typeset diskname=$1
647 typeset i
648
649 if is_linux; then
650 log_must $FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt
651 else
652 for i in 0 1 3 4 5 6 7
653 do
654 set_partition $i "" 0mb $diskname
655 done
656 fi
657 }
658
659 #
660 # Given a slice, size and disk, this function
661 # formats the slice to the specified size.
662 # Size should be specified with units as per
663 # the `format` command requirements eg. 100mb 3gb
664 #
665 # NOTE: This entire interface is problematic for the Linux parted utilty
666 # which requires the end of the partition to be specified. It would be
667 # best to retire this interface and replace it with something more flexible.
668 # At the moment a best effort is made.
669 #
670 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
671 {
672 typeset -i slicenum=$1
673 typeset start=$2
674 typeset size=$3
675 typeset disk=$4
676 [[ -z $slicenum || -z $size || -z $disk ]] && \
677 log_fail "The slice, size or disk name is unspecified."
678
679 if is_linux; then
680 typeset size_mb=${size%%[mMgG]}
681
682 size_mb=${size_mb%%[mMgG][bB]}
683 if [[ ${size:1:1} == 'g' ]]; then
684 ((size_mb = size_mb * 1024))
685 fi
686
687 # Create GPT partition table when setting slice 0 or
688 # when the device doesn't already contain a GPT label.
689 $FORMAT $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
690 typeset ret_val=$?
691 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
692 log_must $FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt
693 fi
694
695 # When no start is given align on the first cylinder.
696 if [[ -z "$start" ]]; then
697 start=1
698 fi
699
700 # Determine the cylinder size for the device and using
701 # that calculate the end offset in cylinders.
702 typeset -i cly_size_kb=0
703 cly_size_kb=$($FORMAT -m $DEV_DSKDIR/$disk -s -- \
704 unit cyl print | $HEAD -3 | $TAIL -1 | \
705 $AWK -F '[:k.]' '{print $4}')
706 ((end = (size_mb * 1024 / cly_size_kb) + start))
707
708 log_must $FORMAT $DEV_DSKDIR/$disk -s -- \
709 mkpart part$slicenum ${start}cyl ${end}cyl
710
711 $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
712 block_device_wait
713 else
714 typeset format_file=/var/tmp/format_in.$$
715
716 $ECHO "partition" >$format_file
717 $ECHO "$slicenum" >> $format_file
718 $ECHO "" >> $format_file
719 $ECHO "" >> $format_file
720 $ECHO "$start" >> $format_file
721 $ECHO "$size" >> $format_file
722 $ECHO "label" >> $format_file
723 $ECHO "" >> $format_file
724 $ECHO "q" >> $format_file
725 $ECHO "q" >> $format_file
726
727 $FORMAT -e -s -d $disk -f $format_file
728 fi
729 typeset ret_val=$?
730 $RM -f $format_file
731 [[ $ret_val -ne 0 ]] && \
732 log_fail "Unable to format $disk slice $slicenum to $size"
733 return 0
734 }
735
736 #
737 # Get the end cyl of the given slice
738 #
739 function get_endslice #<disk> <slice>
740 {
741 typeset disk=$1
742 typeset slice=$2
743 if [[ -z $disk || -z $slice ]] ; then
744 log_fail "The disk name or slice number is unspecified."
745 fi
746
747 if is_linux; then
748 endcyl=$($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \
749 $GREP "part${slice}" | \
750 $AWK '{print $3}' | \
751 $SED 's,cyl,,')
752 ((endcyl = (endcyl + 1)))
753 else
754 disk=${disk#/dev/dsk/}
755 disk=${disk#/dev/rdsk/}
756 disk=${disk%s*}
757
758 typeset -i ratio=0
759 ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
760 $GREP "sectors\/cylinder" | \
761 $AWK '{print $2}')
762
763 if ((ratio == 0)); then
764 return
765 fi
766
767 typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
768 $NAWK -v token="$slice" '{if ($1==token) print $6}')
769
770 ((endcyl = (endcyl + 1) / ratio))
771 fi
772
773 echo $endcyl
774 }
775
776
777 #
778 # Given a size,disk and total slice number, this function formats the
779 # disk slices from 0 to the total slice number with the same specified
780 # size.
781 #
782 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
783 {
784 typeset -i i=0
785 typeset slice_size=$1
786 typeset disk_name=$2
787 typeset total_slices=$3
788 typeset cyl
789
790 zero_partitions $disk_name
791 while ((i < $total_slices)); do
792 if ! is_linux; then
793 if ((i == 2)); then
794 ((i = i + 1))
795 continue
796 fi
797 fi
798 set_partition $i "$cyl" $slice_size $disk_name
799 cyl=$(get_endslice $disk_name $i)
800 ((i = i+1))
801 done
802 }
803
804 #
805 # This function continues to write to a filenum number of files into dirnum
806 # number of directories until either $FILE_WRITE returns an error or the
807 # maximum number of files per directory have been written.
808 #
809 # Usage:
810 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
811 #
812 # Return value: 0 on success
813 # non 0 on error
814 #
815 # Where :
816 # destdir: is the directory where everything is to be created under
817 # dirnum: the maximum number of subdirectories to use, -1 no limit
818 # filenum: the maximum number of files per subdirectory
819 # bytes: number of bytes to write
820 # num_writes: numer of types to write out bytes
821 # data: the data that will be writen
822 #
823 # E.g.
824 # file_fs /testdir 20 25 1024 256 0
825 #
826 # Note: bytes * num_writes equals the size of the testfile
827 #
828 function fill_fs # destdir dirnum filenum bytes num_writes data
829 {
830 typeset destdir=${1:-$TESTDIR}
831 typeset -i dirnum=${2:-50}
832 typeset -i filenum=${3:-50}
833 typeset -i bytes=${4:-8192}
834 typeset -i num_writes=${5:-10240}
835 typeset -i data=${6:-0}
836
837 typeset -i odirnum=1
838 typeset -i idirnum=0
839 typeset -i fn=0
840 typeset -i retval=0
841
842 log_must $MKDIR -p $destdir/$idirnum
843 while (($odirnum > 0)); do
844 if ((dirnum >= 0 && idirnum >= dirnum)); then
845 odirnum=0
846 break
847 fi
848 $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
849 -b $bytes -c $num_writes -d $data
850 retval=$?
851 if (($retval != 0)); then
852 odirnum=0
853 break
854 fi
855 if (($fn >= $filenum)); then
856 fn=0
857 ((idirnum = idirnum + 1))
858 log_must $MKDIR -p $destdir/$idirnum
859 else
860 ((fn = fn + 1))
861 fi
862 done
863 return $retval
864 }
865
866 #
867 # Simple function to get the specified property. If unable to
868 # get the property then exits.
869 #
870 # Note property is in 'parsable' format (-p)
871 #
872 function get_prop # property dataset
873 {
874 typeset prop_val
875 typeset prop=$1
876 typeset dataset=$2
877
878 prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
879 if [[ $? -ne 0 ]]; then
880 log_note "Unable to get $prop property for dataset " \
881 "$dataset"
882 return 1
883 fi
884
885 $ECHO $prop_val
886 return 0
887 }
888
889 #
890 # Simple function to get the specified property of pool. If unable to
891 # get the property then exits.
892 #
893 function get_pool_prop # property pool
894 {
895 typeset prop_val
896 typeset prop=$1
897 typeset pool=$2
898
899 if poolexists $pool ; then
900 prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
901 $AWK '{print $3}')
902 if [[ $? -ne 0 ]]; then
903 log_note "Unable to get $prop property for pool " \
904 "$pool"
905 return 1
906 fi
907 else
908 log_note "Pool $pool not exists."
909 return 1
910 fi
911
912 $ECHO $prop_val
913 return 0
914 }
915
916 # Return 0 if a pool exists; $? otherwise
917 #
918 # $1 - pool name
919
920 function poolexists
921 {
922 typeset pool=$1
923
924 if [[ -z $pool ]]; then
925 log_note "No pool name given."
926 return 1
927 fi
928
929 $ZPOOL get name "$pool" > /dev/null 2>&1
930 return $?
931 }
932
933 # Return 0 if all the specified datasets exist; $? otherwise
934 #
935 # $1-n dataset name
936 function datasetexists
937 {
938 if (($# == 0)); then
939 log_note "No dataset name given."
940 return 1
941 fi
942
943 while (($# > 0)); do
944 $ZFS get name $1 > /dev/null 2>&1 || \
945 return $?
946 shift
947 done
948
949 return 0
950 }
951
952 # return 0 if none of the specified datasets exists, otherwise return 1.
953 #
954 # $1-n dataset name
955 function datasetnonexists
956 {
957 if (($# == 0)); then
958 log_note "No dataset name given."
959 return 1
960 fi
961
962 while (($# > 0)); do
963 $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
964 && return 1
965 shift
966 done
967
968 return 0
969 }
970
971 #
972 # Given a mountpoint, or a dataset name, determine if it is shared.
973 #
974 # Returns 0 if shared, 1 otherwise.
975 #
976 function is_shared
977 {
978 typeset fs=$1
979 typeset mtpt
980
981 if is_linux; then
982 log_unsupported "Currently unsupported by the test framework"
983 return 1
984 fi
985
986 if [[ $fs != "/"* ]] ; then
987 if datasetnonexists "$fs" ; then
988 return 1
989 else
990 mtpt=$(get_prop mountpoint "$fs")
991 case $mtpt in
992 none|legacy|-) return 1
993 ;;
994 *) fs=$mtpt
995 ;;
996 esac
997 fi
998 fi
999
1000 for mtpt in `$SHARE | $AWK '{print $2}'` ; do
1001 if [[ $mtpt == $fs ]] ; then
1002 return 0
1003 fi
1004 done
1005
1006 typeset stat=$($SVCS -H -o STA nfs/server:default)
1007 if [[ $stat != "ON" ]]; then
1008 log_note "Current nfs/server status: $stat"
1009 fi
1010
1011 return 1
1012 }
1013
1014 #
1015 # Given a mountpoint, determine if it is not shared.
1016 #
1017 # Returns 0 if not shared, 1 otherwise.
1018 #
1019 function not_shared
1020 {
1021 typeset fs=$1
1022
1023 if is_linux; then
1024 log_unsupported "Currently unsupported by the test framework"
1025 return 1
1026 fi
1027
1028 is_shared $fs
1029 if (($? == 0)); then
1030 return 1
1031 fi
1032
1033 return 0
1034 }
1035
1036 #
1037 # Helper function to unshare a mountpoint.
1038 #
1039 function unshare_fs #fs
1040 {
1041 typeset fs=$1
1042
1043 if is_linux; then
1044 log_unsupported "Currently unsupported by the test framework"
1045 return 1
1046 fi
1047
1048 is_shared $fs
1049 if (($? == 0)); then
1050 log_must $ZFS unshare $fs
1051 fi
1052
1053 return 0
1054 }
1055
1056 #
1057 # Check NFS server status and trigger it online.
1058 #
1059 function setup_nfs_server
1060 {
1061 # Cannot share directory in non-global zone.
1062 #
1063 if ! is_global_zone; then
1064 log_note "Cannot trigger NFS server by sharing in LZ."
1065 return
1066 fi
1067
1068 if is_linux; then
1069 log_unsupported "Currently unsupported by the test framework"
1070 return
1071 fi
1072
1073 typeset nfs_fmri="svc:/network/nfs/server:default"
1074 if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
1075 #
1076 # Only really sharing operation can enable NFS server
1077 # to online permanently.
1078 #
1079 typeset dummy=/tmp/dummy
1080
1081 if [[ -d $dummy ]]; then
1082 log_must $RM -rf $dummy
1083 fi
1084
1085 log_must $MKDIR $dummy
1086 log_must $SHARE $dummy
1087
1088 #
1089 # Waiting for fmri's status to be the final status.
1090 # Otherwise, in transition, an asterisk (*) is appended for
1091 # instances, unshare will reverse status to 'DIS' again.
1092 #
1093 # Waiting for 1's at least.
1094 #
1095 log_must $SLEEP 1
1096 timeout=10
1097 while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1098 do
1099 log_must $SLEEP 1
1100
1101 ((timeout -= 1))
1102 done
1103
1104 log_must $UNSHARE $dummy
1105 log_must $RM -rf $dummy
1106 fi
1107
1108 log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1109 }
1110
1111 #
1112 # To verify whether calling process is in global zone
1113 #
1114 # Return 0 if in global zone, 1 in non-global zone
1115 #
1116 function is_global_zone
1117 {
1118 typeset cur_zone=$($ZONENAME 2>/dev/null)
1119 if [[ $cur_zone != "global" ]]; then
1120 return 1
1121 fi
1122 return 0
1123 }
1124
1125 #
1126 # Verify whether test is permitted to run from
1127 # global zone, local zone, or both
1128 #
1129 # $1 zone limit, could be "global", "local", or "both"(no limit)
1130 #
1131 # Return 0 if permitted, otherwise exit with log_unsupported
1132 #
1133 function verify_runnable # zone limit
1134 {
1135 typeset limit=$1
1136
1137 [[ -z $limit ]] && return 0
1138
1139 if is_global_zone ; then
1140 case $limit in
1141 global|both)
1142 ;;
1143 local) log_unsupported "Test is unable to run from "\
1144 "global zone."
1145 ;;
1146 *) log_note "Warning: unknown limit $limit - " \
1147 "use both."
1148 ;;
1149 esac
1150 else
1151 case $limit in
1152 local|both)
1153 ;;
1154 global) log_unsupported "Test is unable to run from "\
1155 "local zone."
1156 ;;
1157 *) log_note "Warning: unknown limit $limit - " \
1158 "use both."
1159 ;;
1160 esac
1161
1162 reexport_pool
1163 fi
1164
1165 return 0
1166 }
1167
1168 # Return 0 if create successfully or the pool exists; $? otherwise
1169 # Note: In local zones, this function should return 0 silently.
1170 #
1171 # $1 - pool name
1172 # $2-n - [keyword] devs_list
1173
1174 function create_pool #pool devs_list
1175 {
1176 typeset pool=${1%%/*}
1177
1178 shift
1179
1180 if [[ -z $pool ]]; then
1181 log_note "Missing pool name."
1182 return 1
1183 fi
1184
1185 if poolexists $pool ; then
1186 destroy_pool $pool
1187 fi
1188
1189 if is_global_zone ; then
1190 [[ -d /$pool ]] && $RM -rf /$pool
1191 log_must $ZPOOL create -f $pool $@
1192 fi
1193
1194 return 0
1195 }
1196
1197 # Return 0 if destroy successfully or the pool exists; $? otherwise
1198 # Note: In local zones, this function should return 0 silently.
1199 #
1200 # $1 - pool name
1201 # Destroy pool with the given parameters.
1202
1203 function destroy_pool #pool
1204 {
1205 typeset pool=${1%%/*}
1206 typeset mtpt
1207
1208 if [[ -z $pool ]]; then
1209 log_note "No pool name given."
1210 return 1
1211 fi
1212
1213 if is_global_zone ; then
1214 if poolexists "$pool" ; then
1215 mtpt=$(get_prop mountpoint "$pool")
1216
1217 # At times, syseventd activity can cause attempts to
1218 # destroy a pool to fail with EBUSY. We retry a few
1219 # times allowing failures before requiring the destroy
1220 # to succeed.
1221 typeset -i wait_time=10 ret=1 count=0
1222 must=""
1223 while [[ $ret -ne 0 ]]; do
1224 $must $ZPOOL destroy -f $pool
1225 ret=$?
1226 [[ $ret -eq 0 ]] && break
1227 log_note "zpool destroy failed with $ret"
1228 [[ count++ -ge 7 ]] && must=log_must
1229 $SLEEP $wait_time
1230 done
1231
1232 [[ -d $mtpt ]] && \
1233 log_must $RM -rf $mtpt
1234 else
1235 log_note "Pool does not exist. ($pool)"
1236 return 1
1237 fi
1238 fi
1239
1240 return 0
1241 }
1242
1243 #
1244 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1245 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1246 # and a zvol device to the zone.
1247 #
1248 # $1 zone name
1249 # $2 zone root directory prefix
1250 # $3 zone ip
1251 #
1252 function zfs_zones_setup #zone_name zone_root zone_ip
1253 {
1254 typeset zone_name=${1:-$(hostname)-z}
1255 typeset zone_root=${2:-"/zone_root"}
1256 typeset zone_ip=${3:-"10.1.1.10"}
1257 typeset prefix_ctr=$ZONE_CTR
1258 typeset pool_name=$ZONE_POOL
1259 typeset -i cntctr=5
1260 typeset -i i=0
1261
1262 # Create pool and 5 container within it
1263 #
1264 [[ -d /$pool_name ]] && $RM -rf /$pool_name
1265 log_must $ZPOOL create -f $pool_name $DISKS
1266 while ((i < cntctr)); do
1267 log_must $ZFS create $pool_name/$prefix_ctr$i
1268 ((i += 1))
1269 done
1270
1271 # create a zvol
1272 log_must $ZFS create -V 1g $pool_name/zone_zvol
1273 block_device_wait
1274
1275 #
1276 # If current system support slog, add slog device for pool
1277 #
1278 if verify_slog_support ; then
1279 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1280 log_must $MKFILE 100M $sdevs
1281 log_must $ZPOOL add $pool_name log mirror $sdevs
1282 fi
1283
1284 # this isn't supported just yet.
1285 # Create a filesystem. In order to add this to
1286 # the zone, it must have it's mountpoint set to 'legacy'
1287 # log_must $ZFS create $pool_name/zfs_filesystem
1288 # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1289
1290 [[ -d $zone_root ]] && \
1291 log_must $RM -rf $zone_root/$zone_name
1292 [[ ! -d $zone_root ]] && \
1293 log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1294
1295 # Create zone configure file and configure the zone
1296 #
1297 typeset zone_conf=/tmp/zone_conf.$$
1298 $ECHO "create" > $zone_conf
1299 $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1300 $ECHO "set autoboot=true" >> $zone_conf
1301 i=0
1302 while ((i < cntctr)); do
1303 $ECHO "add dataset" >> $zone_conf
1304 $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1305 $zone_conf
1306 $ECHO "end" >> $zone_conf
1307 ((i += 1))
1308 done
1309
1310 # add our zvol to the zone
1311 $ECHO "add device" >> $zone_conf
1312 $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf
1313 $ECHO "end" >> $zone_conf
1314
1315 # add a corresponding zvol rdsk to the zone
1316 $ECHO "add device" >> $zone_conf
1317 $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1318 $ECHO "end" >> $zone_conf
1319
1320 # once it's supported, we'll add our filesystem to the zone
1321 # $ECHO "add fs" >> $zone_conf
1322 # $ECHO "set type=zfs" >> $zone_conf
1323 # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1324 # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1325 # $ECHO "end" >> $zone_conf
1326
1327 $ECHO "verify" >> $zone_conf
1328 $ECHO "commit" >> $zone_conf
1329 log_must $ZONECFG -z $zone_name -f $zone_conf
1330 log_must $RM -f $zone_conf
1331
1332 # Install the zone
1333 $ZONEADM -z $zone_name install
1334 if (($? == 0)); then
1335 log_note "SUCCESS: $ZONEADM -z $zone_name install"
1336 else
1337 log_fail "FAIL: $ZONEADM -z $zone_name install"
1338 fi
1339
1340 # Install sysidcfg file
1341 #
1342 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1343 $ECHO "system_locale=C" > $sysidcfg
1344 $ECHO "terminal=dtterm" >> $sysidcfg
1345 $ECHO "network_interface=primary {" >> $sysidcfg
1346 $ECHO "hostname=$zone_name" >> $sysidcfg
1347 $ECHO "}" >> $sysidcfg
1348 $ECHO "name_service=NONE" >> $sysidcfg
1349 $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
1350 $ECHO "security_policy=NONE" >> $sysidcfg
1351 $ECHO "timezone=US/Eastern" >> $sysidcfg
1352
1353 # Boot this zone
1354 log_must $ZONEADM -z $zone_name boot
1355 }
1356
1357 #
1358 # Reexport TESTPOOL & TESTPOOL(1-4)
1359 #
1360 function reexport_pool
1361 {
1362 typeset -i cntctr=5
1363 typeset -i i=0
1364
1365 while ((i < cntctr)); do
1366 if ((i == 0)); then
1367 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1368 if ! ismounted $TESTPOOL; then
1369 log_must $ZFS mount $TESTPOOL
1370 fi
1371 else
1372 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1373 if eval ! ismounted \$TESTPOOL$i; then
1374 log_must eval $ZFS mount \$TESTPOOL$i
1375 fi
1376 fi
1377 ((i += 1))
1378 done
1379 }
1380
1381 #
1382 # Verify a given disk is online or offline
1383 #
1384 # Return 0 is pool/disk matches expected state, 1 otherwise
1385 #
1386 function check_state # pool disk state{online,offline}
1387 {
1388 typeset pool=$1
1389 typeset disk=${2#$DEV_DSKDIR/}
1390 typeset state=$3
1391
1392 $ZPOOL status -v $pool | grep "$disk" \
1393 | grep -i "$state" > /dev/null 2>&1
1394
1395 return $?
1396 }
1397
1398 #
1399 # Get the mountpoint of snapshot
1400 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1401 # as its mountpoint
1402 #
1403 function snapshot_mountpoint
1404 {
1405 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1406
1407 if [[ $dataset != *@* ]]; then
1408 log_fail "Error name of snapshot '$dataset'."
1409 fi
1410
1411 typeset fs=${dataset%@*}
1412 typeset snap=${dataset#*@}
1413
1414 if [[ -z $fs || -z $snap ]]; then
1415 log_fail "Error name of snapshot '$dataset'."
1416 fi
1417
1418 $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1419 }
1420
1421 #
1422 # Given a pool and file system, this function will verify the file system
1423 # using the zdb internal tool. Note that the pool is exported and imported
1424 # to ensure it has consistent state.
1425 #
1426 function verify_filesys # pool filesystem dir
1427 {
1428 typeset pool="$1"
1429 typeset filesys="$2"
1430 typeset zdbout="/tmp/zdbout.$$"
1431
1432 shift
1433 shift
1434 typeset dirs=$@
1435 typeset search_path=""
1436
1437 log_note "Calling $ZDB to verify filesystem '$filesys'"
1438 $ZFS unmount -a > /dev/null 2>&1
1439 log_must $ZPOOL export $pool
1440
1441 if [[ -n $dirs ]] ; then
1442 for dir in $dirs ; do
1443 search_path="$search_path -d $dir"
1444 done
1445 fi
1446
1447 log_must $ZPOOL import $search_path $pool
1448
1449 $ZDB -cudi $filesys > $zdbout 2>&1
1450 if [[ $? != 0 ]]; then
1451 log_note "Output: $ZDB -cudi $filesys"
1452 $CAT $zdbout
1453 log_fail "$ZDB detected errors with: '$filesys'"
1454 fi
1455
1456 log_must $ZFS mount -a
1457 log_must $RM -rf $zdbout
1458 }
1459
1460 #
1461 # Given a pool, and this function list all disks in the pool
1462 #
1463 function get_disklist # pool
1464 {
1465 typeset disklist=""
1466
1467 disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
1468 $GREP -v "\-\-\-\-\-" | \
1469 $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1470
1471 $ECHO $disklist
1472 }
1473
1474 # /**
1475 # This function kills a given list of processes after a time period. We use
1476 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1477 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1478 # would be listed as FAIL, which we don't want : we're happy with stress tests
1479 # running for a certain amount of time, then finishing.
1480 #
1481 # @param $1 the time in seconds after which we should terminate these processes
1482 # @param $2..$n the processes we wish to terminate.
1483 # */
1484 function stress_timeout
1485 {
1486 typeset -i TIMEOUT=$1
1487 shift
1488 typeset cpids="$@"
1489
1490 log_note "Waiting for child processes($cpids). " \
1491 "It could last dozens of minutes, please be patient ..."
1492 log_must $SLEEP $TIMEOUT
1493
1494 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1495 typeset pid
1496 for pid in $cpids; do
1497 $PS -p $pid > /dev/null 2>&1
1498 if (($? == 0)); then
1499 log_must $KILL -USR1 $pid
1500 fi
1501 done
1502 }
1503
1504 #
1505 # Verify a given hotspare disk is inuse or avail
1506 #
1507 # Return 0 is pool/disk matches expected state, 1 otherwise
1508 #
1509 function check_hotspare_state # pool disk state{inuse,avail}
1510 {
1511 typeset pool=$1
1512 typeset disk=${2#$DEV_DSKDIR/}
1513 typeset state=$3
1514
1515 cur_state=$(get_device_state $pool $disk "spares")
1516
1517 if [[ $state != ${cur_state} ]]; then
1518 return 1
1519 fi
1520 return 0
1521 }
1522
1523 #
1524 # Verify a given slog disk is inuse or avail
1525 #
1526 # Return 0 is pool/disk matches expected state, 1 otherwise
1527 #
1528 function check_slog_state # pool disk state{online,offline,unavail}
1529 {
1530 typeset pool=$1
1531 typeset disk=${2#$DEV_DSKDIR/}
1532 typeset state=$3
1533
1534 cur_state=$(get_device_state $pool $disk "logs")
1535
1536 if [[ $state != ${cur_state} ]]; then
1537 return 1
1538 fi
1539 return 0
1540 }
1541
1542 #
1543 # Verify a given vdev disk is inuse or avail
1544 #
1545 # Return 0 is pool/disk matches expected state, 1 otherwise
1546 #
1547 function check_vdev_state # pool disk state{online,offline,unavail}
1548 {
1549 typeset pool=$1
1550 typeset disk=${2#$/DEV_DSKDIR/}
1551 typeset state=$3
1552
1553 cur_state=$(get_device_state $pool $disk)
1554
1555 if [[ $state != ${cur_state} ]]; then
1556 return 1
1557 fi
1558 return 0
1559 }
1560
1561 #
1562 # Check the output of 'zpool status -v <pool>',
1563 # and to see if the content of <token> contain the <keyword> specified.
1564 #
1565 # Return 0 is contain, 1 otherwise
1566 #
1567 function check_pool_status # pool token keyword
1568 {
1569 typeset pool=$1
1570 typeset token=$2
1571 typeset keyword=$3
1572
1573 $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
1574 ($1==token) {print $0}' \
1575 | $GREP -i "$keyword" > /dev/null 2>&1
1576
1577 return $?
1578 }
1579
1580 #
1581 # These 5 following functions are instance of check_pool_status()
1582 # is_pool_resilvering - to check if the pool is resilver in progress
1583 # is_pool_resilvered - to check if the pool is resilver completed
1584 # is_pool_scrubbing - to check if the pool is scrub in progress
1585 # is_pool_scrubbed - to check if the pool is scrub completed
1586 # is_pool_scrub_stopped - to check if the pool is scrub stopped
1587 #
1588 function is_pool_resilvering #pool
1589 {
1590 check_pool_status "$1" "scan" "resilver in progress since "
1591 return $?
1592 }
1593
1594 function is_pool_resilvered #pool
1595 {
1596 check_pool_status "$1" "scan" "resilvered "
1597 return $?
1598 }
1599
1600 function is_pool_scrubbing #pool
1601 {
1602 check_pool_status "$1" "scan" "scrub in progress since "
1603 return $?
1604 }
1605
1606 function is_pool_scrubbed #pool
1607 {
1608 check_pool_status "$1" "scan" "scrub repaired"
1609 return $?
1610 }
1611
1612 function is_pool_scrub_stopped #pool
1613 {
1614 check_pool_status "$1" "scan" "scrub canceled"
1615 return $?
1616 }
1617
1618 #
1619 # Use create_pool()/destroy_pool() to clean up the infomation in
1620 # in the given disk to avoid slice overlapping.
1621 #
1622 function cleanup_devices #vdevs
1623 {
1624 typeset pool="foopool$$"
1625
1626 if poolexists $pool ; then
1627 destroy_pool $pool
1628 fi
1629
1630 create_pool $pool $@
1631 destroy_pool $pool
1632
1633 return 0
1634 }
1635
1636 #
1637 # Verify the rsh connectivity to each remote host in RHOSTS.
1638 #
1639 # Return 0 if remote host is accessible; otherwise 1.
1640 # $1 remote host name
1641 # $2 username
1642 #
1643 function verify_rsh_connect #rhost, username
1644 {
1645 typeset rhost=$1
1646 typeset username=$2
1647 typeset rsh_cmd="$RSH -n"
1648 typeset cur_user=
1649
1650 $GETENT hosts $rhost >/dev/null 2>&1
1651 if (($? != 0)); then
1652 log_note "$rhost cannot be found from" \
1653 "administrative database."
1654 return 1
1655 fi
1656
1657 $PING $rhost 3 >/dev/null 2>&1
1658 if (($? != 0)); then
1659 log_note "$rhost is not reachable."
1660 return 1
1661 fi
1662
1663 if ((${#username} != 0)); then
1664 rsh_cmd="$rsh_cmd -l $username"
1665 cur_user="given user \"$username\""
1666 else
1667 cur_user="current user \"`$LOGNAME`\""
1668 fi
1669
1670 if ! $rsh_cmd $rhost $TRUE; then
1671 log_note "$RSH to $rhost is not accessible" \
1672 "with $cur_user."
1673 return 1
1674 fi
1675
1676 return 0
1677 }
1678
1679 #
1680 # Verify the remote host connection via rsh after rebooting
1681 # $1 remote host
1682 #
1683 function verify_remote
1684 {
1685 rhost=$1
1686
1687 #
1688 # The following loop waits for the remote system rebooting.
1689 # Each iteration will wait for 150 seconds. there are
1690 # total 5 iterations, so the total timeout value will
1691 # be 12.5 minutes for the system rebooting. This number
1692 # is an approxiate number.
1693 #
1694 typeset -i count=0
1695 while ! verify_rsh_connect $rhost; do
1696 sleep 150
1697 ((count = count + 1))
1698 if ((count > 5)); then
1699 return 1
1700 fi
1701 done
1702 return 0
1703 }
1704
1705 #
1706 # Replacement function for /usr/bin/rsh. This function will include
1707 # the /usr/bin/rsh and meanwhile return the execution status of the
1708 # last command.
1709 #
1710 # $1 usrname passing down to -l option of /usr/bin/rsh
1711 # $2 remote machine hostname
1712 # $3... command string
1713 #
1714
1715 function rsh_status
1716 {
1717 typeset ruser=$1
1718 typeset rhost=$2
1719 typeset -i ret=0
1720 typeset cmd_str=""
1721 typeset rsh_str=""
1722
1723 shift; shift
1724 cmd_str="$@"
1725
1726 err_file=/tmp/${rhost}.$$.err
1727 if ((${#ruser} == 0)); then
1728 rsh_str="$RSH -n"
1729 else
1730 rsh_str="$RSH -n -l $ruser"
1731 fi
1732
1733 $rsh_str $rhost /bin/ksh -c "'$cmd_str; \
1734 print -u 2 \"status=\$?\"'" \
1735 >/dev/null 2>$err_file
1736 ret=$?
1737 if (($ret != 0)); then
1738 $CAT $err_file
1739 $RM -f $std_file $err_file
1740 log_fail "$RSH itself failed with exit code $ret..."
1741 fi
1742
1743 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1744 $CUT -d= -f2)
1745 (($ret != 0)) && $CAT $err_file >&2
1746
1747 $RM -f $err_file >/dev/null 2>&1
1748 return $ret
1749 }
1750
1751 #
1752 # Get the SUNWstc-fs-zfs package installation path in a remote host
1753 # $1 remote host name
1754 #
1755 function get_remote_pkgpath
1756 {
1757 typeset rhost=$1
1758 typeset pkgpath=""
1759
1760 pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1761 $CUT -d: -f2")
1762
1763 $ECHO $pkgpath
1764 }
1765
1766 #/**
1767 # A function to find and locate free disks on a system or from given
1768 # disks as the parameter. It works by locating disks that are in use
1769 # as swap devices and dump devices, and also disks listed in /etc/vfstab
1770 #
1771 # $@ given disks to find which are free, default is all disks in
1772 # the test system
1773 #
1774 # @return a string containing the list of available disks
1775 #*/
1776 function find_disks
1777 {
1778 # Trust provided list, no attempt is made to locate unused devices.
1779 if is_linux; then
1780 $ECHO "$@"
1781 return
1782 fi
1783
1784
1785 sfi=/tmp/swaplist.$$
1786 dmpi=/tmp/dumpdev.$$
1787 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1788
1789 $SWAP -l > $sfi
1790 $DUMPADM > $dmpi 2>/dev/null
1791
1792 # write an awk script that can process the output of format
1793 # to produce a list of disks we know about. Note that we have
1794 # to escape "$2" so that the shell doesn't interpret it while
1795 # we're creating the awk script.
1796 # -------------------
1797 $CAT > /tmp/find_disks.awk <<EOF
1798 #!/bin/nawk -f
1799 BEGIN { FS="."; }
1800
1801 /^Specify disk/{
1802 searchdisks=0;
1803 }
1804
1805 {
1806 if (searchdisks && \$2 !~ "^$"){
1807 split(\$2,arr," ");
1808 print arr[1];
1809 }
1810 }
1811
1812 /^AVAILABLE DISK SELECTIONS:/{
1813 searchdisks=1;
1814 }
1815 EOF
1816 #---------------------
1817
1818 $CHMOD 755 /tmp/find_disks.awk
1819 disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
1820 $RM /tmp/find_disks.awk
1821
1822 unused=""
1823 for disk in $disks; do
1824 # Check for mounted
1825 $GREP "${disk}[sp]" /etc/mnttab >/dev/null
1826 (($? == 0)) && continue
1827 # Check for swap
1828 $GREP "${disk}[sp]" $sfi >/dev/null
1829 (($? == 0)) && continue
1830 # check for dump device
1831 $GREP "${disk}[sp]" $dmpi >/dev/null
1832 (($? == 0)) && continue
1833 # check to see if this disk hasn't been explicitly excluded
1834 # by a user-set environment variable
1835 $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
1836 (($? == 0)) && continue
1837 unused_candidates="$unused_candidates $disk"
1838 done
1839 $RM $sfi
1840 $RM $dmpi
1841
1842 # now just check to see if those disks do actually exist
1843 # by looking for a device pointing to the first slice in
1844 # each case. limit the number to max_finddisksnum
1845 count=0
1846 for disk in $unused_candidates; do
1847 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
1848 if [ $count -lt $max_finddisksnum ]; then
1849 unused="$unused $disk"
1850 # do not impose limit if $@ is provided
1851 [[ -z $@ ]] && ((count = count + 1))
1852 fi
1853 fi
1854 done
1855
1856 # finally, return our disk list
1857 $ECHO $unused
1858 }
1859
1860 #
1861 # Add specified user to specified group
1862 #
1863 # $1 group name
1864 # $2 user name
1865 # $3 base of the homedir (optional)
1866 #
1867 function add_user #<group_name> <user_name> <basedir>
1868 {
1869 typeset gname=$1
1870 typeset uname=$2
1871 typeset basedir=${3:-"/var/tmp"}
1872
1873 if ((${#gname} == 0 || ${#uname} == 0)); then
1874 log_fail "group name or user name are not defined."
1875 fi
1876
1877 log_must $USERADD -g $gname -d $basedir/$uname -m $uname
1878
1879 # Add new users to the same group and the command line utils.
1880 # This allows them to be run out of the original users home
1881 # directory as long as it permissioned to be group readable.
1882 if is_linux; then
1883 cmd_group=$(stat --format="%G" $ZFS)
1884 log_must $USERMOD -a -G $cmd_group $uname
1885 fi
1886
1887 return 0
1888 }
1889
1890 #
1891 # Delete the specified user.
1892 #
1893 # $1 login name
1894 # $2 base of the homedir (optional)
1895 #
1896 function del_user #<logname> <basedir>
1897 {
1898 typeset user=$1
1899 typeset basedir=${2:-"/var/tmp"}
1900
1901 if ((${#user} == 0)); then
1902 log_fail "login name is necessary."
1903 fi
1904
1905 if $ID $user > /dev/null 2>&1; then
1906 log_must $USERDEL $user
1907 fi
1908
1909 [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
1910
1911 return 0
1912 }
1913
1914 #
1915 # Select valid gid and create specified group.
1916 #
1917 # $1 group name
1918 #
1919 function add_group #<group_name>
1920 {
1921 typeset group=$1
1922
1923 if ((${#group} == 0)); then
1924 log_fail "group name is necessary."
1925 fi
1926
1927 # Assign 100 as the base gid, a larger value is selected for
1928 # Linux because for many distributions 1000 and under are reserved.
1929 if is_linux; then
1930 while true; do
1931 $GROUPADD $group > /dev/null 2>&1
1932 typeset -i ret=$?
1933 case $ret in
1934 0) return 0 ;;
1935 *) return 1 ;;
1936 esac
1937 done
1938 else
1939 typeset -i gid=100
1940
1941 while true; do
1942 $GROUPADD -g $gid $group > /dev/null 2>&1
1943 typeset -i ret=$?
1944 case $ret in
1945 0) return 0 ;;
1946 # The gid is not unique
1947 4) ((gid += 1)) ;;
1948 *) return 1 ;;
1949 esac
1950 done
1951 fi
1952 }
1953
1954 #
1955 # Delete the specified group.
1956 #
1957 # $1 group name
1958 #
1959 function del_group #<group_name>
1960 {
1961 typeset grp=$1
1962 if ((${#grp} == 0)); then
1963 log_fail "group name is necessary."
1964 fi
1965
1966 if is_linux; then
1967 $GETENT group $grp > /dev/null 2>&1
1968 typeset -i ret=$?
1969 case $ret in
1970 # Group does not exist.
1971 2) return 0 ;;
1972 # Name already exists as a group name
1973 0) log_must $GROUPDEL $grp ;;
1974 *) return 1 ;;
1975 esac
1976 else
1977 $GROUPMOD -n $grp $grp > /dev/null 2>&1
1978 typeset -i ret=$?
1979 case $ret in
1980 # Group does not exist.
1981 6) return 0 ;;
1982 # Name already exists as a group name
1983 9) log_must $GROUPDEL $grp ;;
1984 *) return 1 ;;
1985 esac
1986 fi
1987
1988 return 0
1989 }
1990
1991 #
1992 # This function will return true if it's safe to destroy the pool passed
1993 # as argument 1. It checks for pools based on zvols and files, and also
1994 # files contained in a pool that may have a different mountpoint.
1995 #
1996 function safe_to_destroy_pool { # $1 the pool name
1997
1998 typeset pool=""
1999 typeset DONT_DESTROY=""
2000
2001 # We check that by deleting the $1 pool, we're not
2002 # going to pull the rug out from other pools. Do this
2003 # by looking at all other pools, ensuring that they
2004 # aren't built from files or zvols contained in this pool.
2005
2006 for pool in $($ZPOOL list -H -o name)
2007 do
2008 ALTMOUNTPOOL=""
2009
2010 # this is a list of the top-level directories in each of the
2011 # files that make up the path to the files the pool is based on
2012 FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2013 $AWK '{print $1}')
2014
2015 # this is a list of the zvols that make up the pool
2016 ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "$ZVOL_DEVDIR/$1$" \
2017 | $AWK '{print $1}')
2018
2019 # also want to determine if it's a file-based pool using an
2020 # alternate mountpoint...
2021 POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2022 $GREP / | $AWK '{print $1}' | \
2023 $AWK -F/ '{print $2}' | $GREP -v "dev")
2024
2025 for pooldir in $POOL_FILE_DIRS
2026 do
2027 OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2028 $GREP "${pooldir}$" | $AWK '{print $1}')
2029
2030 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2031 done
2032
2033
2034 if [ ! -z "$ZVOLPOOL" ]
2035 then
2036 DONT_DESTROY="true"
2037 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2038 fi
2039
2040 if [ ! -z "$FILEPOOL" ]
2041 then
2042 DONT_DESTROY="true"
2043 log_note "Pool $pool is built from $FILEPOOL on $1"
2044 fi
2045
2046 if [ ! -z "$ALTMOUNTPOOL" ]
2047 then
2048 DONT_DESTROY="true"
2049 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2050 fi
2051 done
2052
2053 if [ -z "${DONT_DESTROY}" ]
2054 then
2055 return 0
2056 else
2057 log_note "Warning: it is not safe to destroy $1!"
2058 return 1
2059 fi
2060 }
2061
2062 #
2063 # Get the available ZFS compression options
2064 # $1 option type zfs_set|zfs_compress
2065 #
2066 function get_compress_opts
2067 {
2068 typeset COMPRESS_OPTS
2069 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2070 gzip-6 gzip-7 gzip-8 gzip-9"
2071
2072 if [[ $1 == "zfs_compress" ]] ; then
2073 COMPRESS_OPTS="on lzjb"
2074 elif [[ $1 == "zfs_set" ]] ; then
2075 COMPRESS_OPTS="on off lzjb"
2076 fi
2077 typeset valid_opts="$COMPRESS_OPTS"
2078 $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2079 if [[ $? -eq 0 ]]; then
2080 valid_opts="$valid_opts $GZIP_OPTS"
2081 fi
2082 $ECHO "$valid_opts"
2083 }
2084
2085 #
2086 # Verify zfs operation with -p option work as expected
2087 # $1 operation, value could be create, clone or rename
2088 # $2 dataset type, value could be fs or vol
2089 # $3 dataset name
2090 # $4 new dataset name
2091 #
2092 function verify_opt_p_ops
2093 {
2094 typeset ops=$1
2095 typeset datatype=$2
2096 typeset dataset=$3
2097 typeset newdataset=$4
2098
2099 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2100 log_fail "$datatype is not supported."
2101 fi
2102
2103 # check parameters accordingly
2104 case $ops in
2105 create)
2106 newdataset=$dataset
2107 dataset=""
2108 if [[ $datatype == "vol" ]]; then
2109 ops="create -V $VOLSIZE"
2110 fi
2111 ;;
2112 clone)
2113 if [[ -z $newdataset ]]; then
2114 log_fail "newdataset should not be empty" \
2115 "when ops is $ops."
2116 fi
2117 log_must datasetexists $dataset
2118 log_must snapexists $dataset
2119 ;;
2120 rename)
2121 if [[ -z $newdataset ]]; then
2122 log_fail "newdataset should not be empty" \
2123 "when ops is $ops."
2124 fi
2125 log_must datasetexists $dataset
2126 log_mustnot snapexists $dataset
2127 ;;
2128 *)
2129 log_fail "$ops is not supported."
2130 ;;
2131 esac
2132
2133 # make sure the upper level filesystem does not exist
2134 if datasetexists ${newdataset%/*} ; then
2135 log_must $ZFS destroy -rRf ${newdataset%/*}
2136 fi
2137
2138 # without -p option, operation will fail
2139 log_mustnot $ZFS $ops $dataset $newdataset
2140 log_mustnot datasetexists $newdataset ${newdataset%/*}
2141
2142 # with -p option, operation should succeed
2143 log_must $ZFS $ops -p $dataset $newdataset
2144 block_device_wait
2145
2146 if ! datasetexists $newdataset ; then
2147 log_fail "-p option does not work for $ops"
2148 fi
2149
2150 # when $ops is create or clone, redo the operation still return zero
2151 if [[ $ops != "rename" ]]; then
2152 log_must $ZFS $ops -p $dataset $newdataset
2153 fi
2154
2155 return 0
2156 }
2157
2158 #
2159 # Get configuration of pool
2160 # $1 pool name
2161 # $2 config name
2162 #
2163 function get_config
2164 {
2165 typeset pool=$1
2166 typeset config=$2
2167 typeset alt_root
2168
2169 if ! poolexists "$pool" ; then
2170 return 1
2171 fi
2172 alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
2173 if [[ $alt_root == "-" ]]; then
2174 value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
2175 '{print $2}')
2176 else
2177 value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
2178 '{print $2}')
2179 fi
2180 if [[ -n $value ]] ; then
2181 value=${value#'}
2182 value=${value%'}
2183 fi
2184 echo $value
2185
2186 return 0
2187 }
2188
2189 #
2190 # Privated function. Random select one of items from arguments.
2191 #
2192 # $1 count
2193 # $2-n string
2194 #
2195 function _random_get
2196 {
2197 typeset cnt=$1
2198 shift
2199
2200 typeset str="$@"
2201 typeset -i ind
2202 ((ind = RANDOM % cnt + 1))
2203
2204 typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2205 $ECHO $ret
2206 }
2207
2208 #
2209 # Random select one of item from arguments which include NONE string
2210 #
2211 function random_get_with_non
2212 {
2213 typeset -i cnt=$#
2214 ((cnt =+ 1))
2215
2216 _random_get "$cnt" "$@"
2217 }
2218
2219 #
2220 # Random select one of item from arguments which doesn't include NONE string
2221 #
2222 function random_get
2223 {
2224 _random_get "$#" "$@"
2225 }
2226
2227 #
2228 # Detect if the current system support slog
2229 #
2230 function verify_slog_support
2231 {
2232 typeset dir=/tmp/disk.$$
2233 typeset pool=foo.$$
2234 typeset vdev=$dir/a
2235 typeset sdev=$dir/b
2236
2237 $MKDIR -p $dir
2238 $MKFILE 64M $vdev $sdev
2239
2240 typeset -i ret=0
2241 if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2242 ret=1
2243 fi
2244 $RM -r $dir
2245
2246 return $ret
2247 }
2248
2249 #
2250 # The function will generate a dataset name with specific length
2251 # $1, the length of the name
2252 # $2, the base string to construct the name
2253 #
2254 function gen_dataset_name
2255 {
2256 typeset -i len=$1
2257 typeset basestr="$2"
2258 typeset -i baselen=${#basestr}
2259 typeset -i iter=0
2260 typeset l_name=""
2261
2262 if ((len % baselen == 0)); then
2263 ((iter = len / baselen))
2264 else
2265 ((iter = len / baselen + 1))
2266 fi
2267 while ((iter > 0)); do
2268 l_name="${l_name}$basestr"
2269
2270 ((iter -= 1))
2271 done
2272
2273 $ECHO $l_name
2274 }
2275
2276 #
2277 # Get cksum tuple of dataset
2278 # $1 dataset name
2279 #
2280 # sample zdb output:
2281 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2282 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2283 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2284 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2285 function datasetcksum
2286 {
2287 typeset cksum
2288 $SYNC
2289 cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2290 | $AWK -F= '{print $7}')
2291 $ECHO $cksum
2292 }
2293
2294 #
2295 # Get cksum of file
2296 # #1 file path
2297 #
2298 function checksum
2299 {
2300 typeset cksum
2301 cksum=$($CKSUM $1 | $AWK '{print $1}')
2302 $ECHO $cksum
2303 }
2304
2305 #
2306 # Get the given disk/slice state from the specific field of the pool
2307 #
2308 function get_device_state #pool disk field("", "spares","logs")
2309 {
2310 typeset pool=$1
2311 typeset disk=${2#$DEV_DSKDIR/}
2312 typeset field=${3:-$pool}
2313
2314 state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2315 $NAWK -v device=$disk -v pool=$pool -v field=$field \
2316 'BEGIN {startconfig=0; startfield=0; }
2317 /config:/ {startconfig=1}
2318 (startconfig==1) && ($1==field) {startfield=1; next;}
2319 (startfield==1) && ($1==device) {print $2; exit;}
2320 (startfield==1) &&
2321 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2322 echo $state
2323 }
2324
2325
2326 #
2327 # print the given directory filesystem type
2328 #
2329 # $1 directory name
2330 #
2331 function get_fstype
2332 {
2333 typeset dir=$1
2334
2335 if [[ -z $dir ]]; then
2336 log_fail "Usage: get_fstype <directory>"
2337 fi
2338
2339 #
2340 # $ df -n /
2341 # / : ufs
2342 #
2343 $DF -n $dir | $AWK '{print $3}'
2344 }
2345
2346 #
2347 # Given a disk, label it to VTOC regardless what label was on the disk
2348 # $1 disk
2349 #
2350 function labelvtoc
2351 {
2352 typeset disk=$1
2353 if [[ -z $disk ]]; then
2354 log_fail "The disk name is unspecified."
2355 fi
2356 typeset label_file=/var/tmp/labelvtoc.$$
2357 typeset arch=$($UNAME -p)
2358
2359 if is_linux; then
2360 log_note "Currently unsupported by the test framework"
2361 return 1
2362 fi
2363
2364 if [[ $arch == "i386" ]]; then
2365 $ECHO "label" > $label_file
2366 $ECHO "0" >> $label_file
2367 $ECHO "" >> $label_file
2368 $ECHO "q" >> $label_file
2369 $ECHO "q" >> $label_file
2370
2371 $FDISK -B $disk >/dev/null 2>&1
2372 # wait a while for fdisk finishes
2373 $SLEEP 60
2374 elif [[ $arch == "sparc" ]]; then
2375 $ECHO "label" > $label_file
2376 $ECHO "0" >> $label_file
2377 $ECHO "" >> $label_file
2378 $ECHO "" >> $label_file
2379 $ECHO "" >> $label_file
2380 $ECHO "q" >> $label_file
2381 else
2382 log_fail "unknown arch type"
2383 fi
2384
2385 $FORMAT -e -s -d $disk -f $label_file
2386 typeset -i ret_val=$?
2387 $RM -f $label_file
2388 #
2389 # wait the format to finish
2390 #
2391 $SLEEP 60
2392 if ((ret_val != 0)); then
2393 log_fail "unable to label $disk as VTOC."
2394 fi
2395
2396 return 0
2397 }
2398
2399 #
2400 # check if the system was installed as zfsroot or not
2401 # return: 0 ture, otherwise false
2402 #
2403 function is_zfsroot
2404 {
2405 $DF -n / | $GREP zfs > /dev/null 2>&1
2406 return $?
2407 }
2408
2409 #
2410 # get the root filesystem name if it's zfsroot system.
2411 #
2412 # return: root filesystem name
2413 function get_rootfs
2414 {
2415 typeset rootfs=""
2416 rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2417 /etc/mnttab)
2418 if [[ -z "$rootfs" ]]; then
2419 log_fail "Can not get rootfs"
2420 fi
2421 $ZFS list $rootfs > /dev/null 2>&1
2422 if (($? == 0)); then
2423 $ECHO $rootfs
2424 else
2425 log_fail "This is not a zfsroot system."
2426 fi
2427 }
2428
2429 #
2430 # get the rootfs's pool name
2431 # return:
2432 # rootpool name
2433 #
2434 function get_rootpool
2435 {
2436 typeset rootfs=""
2437 typeset rootpool=""
2438 rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2439 /etc/mnttab)
2440 if [[ -z "$rootfs" ]]; then
2441 log_fail "Can not get rootpool"
2442 fi
2443 $ZFS list $rootfs > /dev/null 2>&1
2444 if (($? == 0)); then
2445 rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2446 $ECHO $rootpool
2447 else
2448 log_fail "This is not a zfsroot system."
2449 fi
2450 }
2451
2452 #
2453 # Get the sub string from specified source string
2454 #
2455 # $1 source string
2456 # $2 start position. Count from 1
2457 # $3 offset
2458 #
2459 function get_substr #src_str pos offset
2460 {
2461 typeset pos offset
2462
2463 $ECHO $1 | \
2464 $NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
2465 }
2466
2467 #
2468 # Check if the given device is physical device
2469 #
2470 function is_physical_device #device
2471 {
2472 typeset device=${1#$DEV_DSKDIR}
2473 device=${device#$DEV_RDSKDIR}
2474
2475 if is_linux; then
2476 [[ -b "$DEV_DSKDIR/$device" ]] && \
2477 [[ -f /sys/module/loop/parameters/max_part ]]
2478 return $?
2479 else
2480 $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2481 return $?
2482 fi
2483 }
2484
2485 #
2486 # Get the directory path of given device
2487 #
2488 function get_device_dir #device
2489 {
2490 typeset device=$1
2491
2492 if ! $(is_physical_device $device) ; then
2493 if [[ $device != "/" ]]; then
2494 device=${device%/*}
2495 fi
2496 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2497 device="$DEV_DSKDIR"
2498 fi
2499 $ECHO $device
2500 else
2501 $ECHO "$DEV_DSKDIR"
2502 fi
2503 }
2504
2505 #
2506 # Get the package name
2507 #
2508 function get_package_name
2509 {
2510 typeset dirpath=${1:-$STC_NAME}
2511
2512 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2513 }
2514
2515 #
2516 # Get the word numbers from a string separated by white space
2517 #
2518 function get_word_count
2519 {
2520 $ECHO $1 | $WC -w
2521 }
2522
2523 #
2524 # To verify if the require numbers of disks is given
2525 #
2526 function verify_disk_count
2527 {
2528 typeset -i min=${2:-1}
2529
2530 typeset -i count=$(get_word_count "$1")
2531
2532 if ((count < min)); then
2533 log_untested "A minimum of $min disks is required to run." \
2534 " You specified $count disk(s)"
2535 fi
2536 }
2537
2538 function ds_is_volume
2539 {
2540 typeset type=$(get_prop type $1)
2541 [[ $type = "volume" ]] && return 0
2542 return 1
2543 }
2544
2545 function ds_is_filesystem
2546 {
2547 typeset type=$(get_prop type $1)
2548 [[ $type = "filesystem" ]] && return 0
2549 return 1
2550 }
2551
2552 function ds_is_snapshot
2553 {
2554 typeset type=$(get_prop type $1)
2555 [[ $type = "snapshot" ]] && return 0
2556 return 1
2557 }
2558
2559 #
2560 # Check if Trusted Extensions are installed and enabled
2561 #
2562 function is_te_enabled
2563 {
2564 $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
2565 if (($? != 0)); then
2566 return 1
2567 else
2568 return 0
2569 fi
2570 }
2571
2572 # Utility function to determine if a system has multiple cpus.
2573 function is_mp
2574 {
2575 if is_linux; then
2576 (($($NPROC) > 1))
2577 else
2578 (($($PSRINFO | $WC -l) > 1))
2579 fi
2580
2581 return $?
2582 }
2583
2584 function get_cpu_freq
2585 {
2586 if is_linux; then
2587 lscpu | $AWK '/CPU MHz/ { print $3 }'
2588 else
2589 $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}'
2590 fi
2591 }
2592
2593 # Run the given command as the user provided.
2594 function user_run
2595 {
2596 typeset user=$1
2597 shift
2598
2599 log_note "user:$user $@"
2600 eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
2601 return $?
2602 }
2603
2604 #
2605 # Check if the pool contains the specified vdevs
2606 #
2607 # $1 pool
2608 # $2..n <vdev> ...
2609 #
2610 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2611 # vdevs is not in the pool, and 2 if pool name is missing.
2612 #
2613 function vdevs_in_pool
2614 {
2615 typeset pool=$1
2616 typeset vdev
2617
2618 if [[ -z $pool ]]; then
2619 log_note "Missing pool name."
2620 return 2
2621 fi
2622
2623 shift
2624
2625 typeset tmpfile=$($MKTEMP)
2626 $ZPOOL list -Hv "$pool" >$tmpfile
2627 for vdev in $@; do
2628 $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2629 [[ $? -ne 0 ]] && return 1
2630 done
2631
2632 $RM -f $tmpfile
2633
2634 return 0;
2635 }
2636
2637 #
2638 # Wait for newly created block devices to have their minors created.
2639 #
2640 function block_device_wait
2641 {
2642 if is_linux; then
2643 $UDEVADM trigger
2644 $UDEVADM settle
2645 fi
2646 }