]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
37f173e126e7c1295e6c455e413e2974b815a1f0
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 #
27
28 #
29 # Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30 #
31
32 . ${STF_TOOLS}/include/logapi.shlib
33
34 # Determine if this is a Linux test system
35 #
36 # Return 0 if platform Linux, 1 if otherwise
37
38 function is_linux
39 {
40 if [[ $($UNAME -o) == "GNU/Linux" ]]; then
41 return 0
42 else
43 return 1
44 fi
45 }
46
47 # Determine whether a dataset is mounted
48 #
49 # $1 dataset name
50 # $2 filesystem type; optional - defaulted to zfs
51 #
52 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
53
54 function ismounted
55 {
56 typeset fstype=$2
57 [[ -z $fstype ]] && fstype=zfs
58 typeset out dir name ret
59
60 case $fstype in
61 zfs)
62 if [[ "$1" == "/"* ]] ; then
63 for out in $($ZFS mount | $AWK '{print $2}'); do
64 [[ $1 == $out ]] && return 0
65 done
66 else
67 for out in $($ZFS mount | $AWK '{print $1}'); do
68 [[ $1 == $out ]] && return 0
69 done
70 fi
71 ;;
72 ufs|nfs)
73 out=$($DF -F $fstype $1 2>/dev/null)
74 ret=$?
75 (($ret != 0)) && return $ret
76
77 dir=${out%%\(*}
78 dir=${dir%% *}
79 name=${out##*\(}
80 name=${name%%\)*}
81 name=${name%% *}
82
83 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
84 ;;
85 ext2)
86 out=$($DF -t $fstype $1 2>/dev/null)
87 return $?
88 ;;
89 zvol)
90 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
91 link=$(readlink -f $ZVOL_DEVDIR/$1)
92 [[ -n "$link" ]] && \
93 $MOUNT | $GREP -q "^$link" && \
94 return 0
95 fi
96 ;;
97 esac
98
99 return 1
100 }
101
102 # Return 0 if a dataset is mounted; 1 otherwise
103 #
104 # $1 dataset name
105 # $2 filesystem type; optional - defaulted to zfs
106
107 function mounted
108 {
109 ismounted $1 $2
110 (($? == 0)) && return 0
111 return 1
112 }
113
114 # Return 0 if a dataset is unmounted; 1 otherwise
115 #
116 # $1 dataset name
117 # $2 filesystem type; optional - defaulted to zfs
118
119 function unmounted
120 {
121 ismounted $1 $2
122 (($? == 1)) && return 0
123 return 1
124 }
125
126 # split line on ","
127 #
128 # $1 - line to split
129
130 function splitline
131 {
132 $ECHO $1 | $SED "s/,/ /g"
133 }
134
135 function default_setup
136 {
137 default_setup_noexit "$@"
138
139 log_pass
140 }
141
142 #
143 # Given a list of disks, setup storage pools and datasets.
144 #
145 function default_setup_noexit
146 {
147 typeset disklist=$1
148 typeset container=$2
149 typeset volume=$3
150
151 if is_global_zone; then
152 if poolexists $TESTPOOL ; then
153 destroy_pool $TESTPOOL
154 fi
155 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
156 log_must $ZPOOL create -f $TESTPOOL $disklist
157 else
158 reexport_pool
159 fi
160
161 $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
162 $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
163
164 log_must $ZFS create $TESTPOOL/$TESTFS
165 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
166
167 if [[ -n $container ]]; then
168 $RM -rf $TESTDIR1 || \
169 log_unresolved Could not remove $TESTDIR1
170 $MKDIR -p $TESTDIR1 || \
171 log_unresolved Could not create $TESTDIR1
172
173 log_must $ZFS create $TESTPOOL/$TESTCTR
174 log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
175 log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
176 log_must $ZFS set mountpoint=$TESTDIR1 \
177 $TESTPOOL/$TESTCTR/$TESTFS1
178 fi
179
180 if [[ -n $volume ]]; then
181 if is_global_zone ; then
182 log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
183 block_device_wait
184 else
185 log_must $ZFS create $TESTPOOL/$TESTVOL
186 fi
187 fi
188 }
189
190 #
191 # Given a list of disks, setup a storage pool, file system and
192 # a container.
193 #
194 function default_container_setup
195 {
196 typeset disklist=$1
197
198 default_setup "$disklist" "true"
199 }
200
201 #
202 # Given a list of disks, setup a storage pool,file system
203 # and a volume.
204 #
205 function default_volume_setup
206 {
207 typeset disklist=$1
208
209 default_setup "$disklist" "" "true"
210 }
211
212 #
213 # Given a list of disks, setup a storage pool,file system,
214 # a container and a volume.
215 #
216 function default_container_volume_setup
217 {
218 typeset disklist=$1
219
220 default_setup "$disklist" "true" "true"
221 }
222
223 #
224 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
225 # filesystem
226 #
227 # $1 Existing filesystem or volume name. Default, $TESTFS
228 # $2 snapshot name. Default, $TESTSNAP
229 #
230 function create_snapshot
231 {
232 typeset fs_vol=${1:-$TESTFS}
233 typeset snap=${2:-$TESTSNAP}
234
235 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
236 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
237
238 if snapexists $fs_vol@$snap; then
239 log_fail "$fs_vol@$snap already exists."
240 fi
241 datasetexists $fs_vol || \
242 log_fail "$fs_vol must exist."
243
244 log_must $ZFS snapshot $fs_vol@$snap
245 }
246
247 #
248 # Create a clone from a snapshot, default clone name is $TESTCLONE.
249 #
250 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
251 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
252 #
253 function create_clone # snapshot clone
254 {
255 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
256 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
257
258 [[ -z $snap ]] && \
259 log_fail "Snapshot name is undefined."
260 [[ -z $clone ]] && \
261 log_fail "Clone name is undefined."
262
263 log_must $ZFS clone $snap $clone
264 }
265
266 function default_mirror_setup
267 {
268 default_mirror_setup_noexit $1 $2 $3
269
270 log_pass
271 }
272
273 #
274 # Given a pair of disks, set up a storage pool and dataset for the mirror
275 # @parameters: $1 the primary side of the mirror
276 # $2 the secondary side of the mirror
277 # @uses: ZPOOL ZFS TESTPOOL TESTFS
278 function default_mirror_setup_noexit
279 {
280 readonly func="default_mirror_setup_noexit"
281 typeset primary=$1
282 typeset secondary=$2
283
284 [[ -z $primary ]] && \
285 log_fail "$func: No parameters passed"
286 [[ -z $secondary ]] && \
287 log_fail "$func: No secondary partition passed"
288 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
289 log_must $ZPOOL create -f $TESTPOOL mirror $@
290 log_must $ZFS create $TESTPOOL/$TESTFS
291 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
292 }
293
294 #
295 # create a number of mirrors.
296 # We create a number($1) of 2 way mirrors using the pairs of disks named
297 # on the command line. These mirrors are *not* mounted
298 # @parameters: $1 the number of mirrors to create
299 # $... the devices to use to create the mirrors on
300 # @uses: ZPOOL ZFS TESTPOOL
301 function setup_mirrors
302 {
303 typeset -i nmirrors=$1
304
305 shift
306 while ((nmirrors > 0)); do
307 log_must test -n "$1" -a -n "$2"
308 [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
309 log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
310 shift 2
311 ((nmirrors = nmirrors - 1))
312 done
313 }
314
315 #
316 # create a number of raidz pools.
317 # We create a number($1) of 2 raidz pools using the pairs of disks named
318 # on the command line. These pools are *not* mounted
319 # @parameters: $1 the number of pools to create
320 # $... the devices to use to create the pools on
321 # @uses: ZPOOL ZFS TESTPOOL
322 function setup_raidzs
323 {
324 typeset -i nraidzs=$1
325
326 shift
327 while ((nraidzs > 0)); do
328 log_must test -n "$1" -a -n "$2"
329 [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
330 log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
331 shift 2
332 ((nraidzs = nraidzs - 1))
333 done
334 }
335
336 #
337 # Destroy the configured testpool mirrors.
338 # the mirrors are of the form ${TESTPOOL}{number}
339 # @uses: ZPOOL ZFS TESTPOOL
340 function destroy_mirrors
341 {
342 default_cleanup_noexit
343
344 log_pass
345 }
346
347 #
348 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
349 # $1 the list of disks
350 #
351 function default_raidz_setup
352 {
353 typeset disklist="$*"
354 disks=(${disklist[*]})
355
356 if [[ ${#disks[*]} -lt 2 ]]; then
357 log_fail "A raid-z requires a minimum of two disks."
358 fi
359
360 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
361 log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
362 log_must $ZFS create $TESTPOOL/$TESTFS
363 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
364
365 log_pass
366 }
367
368 #
369 # Common function used to cleanup storage pools and datasets.
370 #
371 # Invoked at the start of the test suite to ensure the system
372 # is in a known state, and also at the end of each set of
373 # sub-tests to ensure errors from one set of tests doesn't
374 # impact the execution of the next set.
375
376 function default_cleanup
377 {
378 default_cleanup_noexit
379
380 log_pass
381 }
382
383 function default_cleanup_noexit
384 {
385 typeset exclude=""
386 typeset pool=""
387 #
388 # Destroying the pool will also destroy any
389 # filesystems it contains.
390 #
391 if is_global_zone; then
392 $ZFS unmount -a > /dev/null 2>&1
393 [[ -z "$KEEP" ]] && KEEP="rpool"
394 exclude=`eval $ECHO \"'(${KEEP})'\"`
395 ALL_POOLS=$($ZPOOL list -H -o name \
396 | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
397 # Here, we loop through the pools we're allowed to
398 # destroy, only destroying them if it's safe to do
399 # so.
400 while [ ! -z ${ALL_POOLS} ]
401 do
402 for pool in ${ALL_POOLS}
403 do
404 if safe_to_destroy_pool $pool ;
405 then
406 destroy_pool $pool
407 fi
408 ALL_POOLS=$($ZPOOL list -H -o name \
409 | $GREP -v "$NO_POOLS" \
410 | $EGREP -v "$exclude")
411 done
412 done
413
414 $ZFS mount -a
415 else
416 typeset fs=""
417 for fs in $($ZFS list -H -o name \
418 | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
419 datasetexists $fs && \
420 log_must $ZFS destroy -Rf $fs
421 done
422
423 # Need cleanup here to avoid garbage dir left.
424 for fs in $($ZFS list -H -o name); do
425 [[ $fs == /$ZONE_POOL ]] && continue
426 [[ -d $fs ]] && log_must $RM -rf $fs/*
427 done
428
429 #
430 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
431 # the default value
432 #
433 for fs in $($ZFS list -H -o name); do
434 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
435 log_must $ZFS set reservation=none $fs
436 log_must $ZFS set recordsize=128K $fs
437 log_must $ZFS set mountpoint=/$fs $fs
438 typeset enc=""
439 enc=$(get_prop encryption $fs)
440 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
441 [[ "$enc" == "off" ]]; then
442 log_must $ZFS set checksum=on $fs
443 fi
444 log_must $ZFS set compression=off $fs
445 log_must $ZFS set atime=on $fs
446 log_must $ZFS set devices=off $fs
447 log_must $ZFS set exec=on $fs
448 log_must $ZFS set setuid=on $fs
449 log_must $ZFS set readonly=off $fs
450 log_must $ZFS set snapdir=hidden $fs
451 log_must $ZFS set aclmode=groupmask $fs
452 log_must $ZFS set aclinherit=secure $fs
453 fi
454 done
455 fi
456
457 [[ -d $TESTDIR ]] && \
458 log_must $RM -rf $TESTDIR
459
460 disk1=${DISKS%% *}
461 if is_mpath_device $disk1; then
462 delete_partitions
463 fi
464 }
465
466
467 #
468 # Common function used to cleanup storage pools, file systems
469 # and containers.
470 #
471 function default_container_cleanup
472 {
473 if ! is_global_zone; then
474 reexport_pool
475 fi
476
477 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
478 [[ $? -eq 0 ]] && \
479 log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
480
481 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
482 log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
483
484 datasetexists $TESTPOOL/$TESTCTR && \
485 log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
486
487 [[ -e $TESTDIR1 ]] && \
488 log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
489
490 default_cleanup
491 }
492
493 #
494 # Common function used to cleanup snapshot of file system or volume. Default to
495 # delete the file system's snapshot
496 #
497 # $1 snapshot name
498 #
499 function destroy_snapshot
500 {
501 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
502
503 if ! snapexists $snap; then
504 log_fail "'$snap' does not existed."
505 fi
506
507 #
508 # For the sake of the value which come from 'get_prop' is not equal
509 # to the really mountpoint when the snapshot is unmounted. So, firstly
510 # check and make sure this snapshot's been mounted in current system.
511 #
512 typeset mtpt=""
513 if ismounted $snap; then
514 mtpt=$(get_prop mountpoint $snap)
515 (($? != 0)) && \
516 log_fail "get_prop mountpoint $snap failed."
517 fi
518
519 log_must $ZFS destroy $snap
520 [[ $mtpt != "" && -d $mtpt ]] && \
521 log_must $RM -rf $mtpt
522 }
523
524 #
525 # Common function used to cleanup clone.
526 #
527 # $1 clone name
528 #
529 function destroy_clone
530 {
531 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
532
533 if ! datasetexists $clone; then
534 log_fail "'$clone' does not existed."
535 fi
536
537 # With the same reason in destroy_snapshot
538 typeset mtpt=""
539 if ismounted $clone; then
540 mtpt=$(get_prop mountpoint $clone)
541 (($? != 0)) && \
542 log_fail "get_prop mountpoint $clone failed."
543 fi
544
545 log_must $ZFS destroy $clone
546 [[ $mtpt != "" && -d $mtpt ]] && \
547 log_must $RM -rf $mtpt
548 }
549
550 # Return 0 if a snapshot exists; $? otherwise
551 #
552 # $1 - snapshot name
553
554 function snapexists
555 {
556 $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
557 return $?
558 }
559
560 #
561 # Set a property to a certain value on a dataset.
562 # Sets a property of the dataset to the value as passed in.
563 # @param:
564 # $1 dataset who's property is being set
565 # $2 property to set
566 # $3 value to set property to
567 # @return:
568 # 0 if the property could be set.
569 # non-zero otherwise.
570 # @use: ZFS
571 #
572 function dataset_setprop
573 {
574 typeset fn=dataset_setprop
575
576 if (($# < 3)); then
577 log_note "$fn: Insufficient parameters (need 3, had $#)"
578 return 1
579 fi
580 typeset output=
581 output=$($ZFS set $2=$3 $1 2>&1)
582 typeset rv=$?
583 if ((rv != 0)); then
584 log_note "Setting property on $1 failed."
585 log_note "property $2=$3"
586 log_note "Return Code: $rv"
587 log_note "Output: $output"
588 return $rv
589 fi
590 return 0
591 }
592
593 #
594 # Assign suite defined dataset properties.
595 # This function is used to apply the suite's defined default set of
596 # properties to a dataset.
597 # @parameters: $1 dataset to use
598 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
599 # @returns:
600 # 0 if the dataset has been altered.
601 # 1 if no pool name was passed in.
602 # 2 if the dataset could not be found.
603 # 3 if the dataset could not have it's properties set.
604 #
605 function dataset_set_defaultproperties
606 {
607 typeset dataset="$1"
608
609 [[ -z $dataset ]] && return 1
610
611 typeset confset=
612 typeset -i found=0
613 for confset in $($ZFS list); do
614 if [[ $dataset = $confset ]]; then
615 found=1
616 break
617 fi
618 done
619 [[ $found -eq 0 ]] && return 2
620 if [[ -n $COMPRESSION_PROP ]]; then
621 dataset_setprop $dataset compression $COMPRESSION_PROP || \
622 return 3
623 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
624 fi
625 if [[ -n $CHECKSUM_PROP ]]; then
626 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
627 return 3
628 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
629 fi
630 return 0
631 }
632
633 #
634 # Check a numeric assertion
635 # @parameter: $@ the assertion to check
636 # @output: big loud notice if assertion failed
637 # @use: log_fail
638 #
639 function assert
640 {
641 (($@)) || log_fail "$@"
642 }
643
644 #
645 # Function to format partition size of a disk
646 # Given a disk cxtxdx reduces all partitions
647 # to 0 size
648 #
649 function zero_partitions #<whole_disk_name>
650 {
651 typeset diskname=$1
652 typeset i
653
654 if is_linux; then
655 log_must $FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt
656 else
657 for i in 0 1 3 4 5 6 7
658 do
659 set_partition $i "" 0mb $diskname
660 done
661 fi
662 }
663
664 #
665 # Given a slice, size and disk, this function
666 # formats the slice to the specified size.
667 # Size should be specified with units as per
668 # the `format` command requirements eg. 100mb 3gb
669 #
670 # NOTE: This entire interface is problematic for the Linux parted utilty
671 # which requires the end of the partition to be specified. It would be
672 # best to retire this interface and replace it with something more flexible.
673 # At the moment a best effort is made.
674 #
675 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
676 {
677 typeset -i slicenum=$1
678 typeset start=$2
679 typeset size=$3
680 typeset disk=$4
681 [[ -z $slicenum || -z $size || -z $disk ]] && \
682 log_fail "The slice, size or disk name is unspecified."
683
684 if is_linux; then
685 typeset size_mb=${size%%[mMgG]}
686
687 size_mb=${size_mb%%[mMgG][bB]}
688 if [[ ${size:1:1} == 'g' ]]; then
689 ((size_mb = size_mb * 1024))
690 fi
691
692 # Create GPT partition table when setting slice 0 or
693 # when the device doesn't already contain a GPT label.
694 $FORMAT $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
695 typeset ret_val=$?
696 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
697 log_must $FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt
698 fi
699
700 # When no start is given align on the first cylinder.
701 if [[ -z "$start" ]]; then
702 start=1
703 fi
704
705 # Determine the cylinder size for the device and using
706 # that calculate the end offset in cylinders.
707 typeset -i cly_size_kb=0
708 cly_size_kb=$($FORMAT -m $DEV_DSKDIR/$disk -s -- \
709 unit cyl print | $HEAD -3 | $TAIL -1 | \
710 $AWK -F '[:k.]' '{print $4}')
711 ((end = (size_mb * 1024 / cly_size_kb) + start))
712
713 log_must $FORMAT $DEV_DSKDIR/$disk -s -- \
714 mkpart part$slicenum ${start}cyl ${end}cyl
715
716 $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
717 block_device_wait
718 else
719 typeset format_file=/var/tmp/format_in.$$
720
721 $ECHO "partition" >$format_file
722 $ECHO "$slicenum" >> $format_file
723 $ECHO "" >> $format_file
724 $ECHO "" >> $format_file
725 $ECHO "$start" >> $format_file
726 $ECHO "$size" >> $format_file
727 $ECHO "label" >> $format_file
728 $ECHO "" >> $format_file
729 $ECHO "q" >> $format_file
730 $ECHO "q" >> $format_file
731
732 $FORMAT -e -s -d $disk -f $format_file
733 fi
734 typeset ret_val=$?
735 $RM -f $format_file
736 [[ $ret_val -ne 0 ]] && \
737 log_fail "Unable to format $disk slice $slicenum to $size"
738 return 0
739 }
740
741 #
742 # Delete all partitions on all disks - this is specifically for the use of multipath
743 # devices which currently can only be used in the test suite as raw/un-partitioned
744 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
745 #
746 function delete_partitions
747 {
748 typeset -i j=1
749
750 if [[ -z $DISK_ARRAY_NUM ]]; then
751 DISK_ARRAY_NUM=$($ECHO ${DISKS} | $NAWK '{print NF}')
752 fi
753 if [[ -z $DISKSARRAY ]]; then
754 DISKSARRAY=$DISKS
755 fi
756
757 if is_linux; then
758 if (( $DISK_ARRAY_NUM == 1 )); then
759 while ((j < MAX_PARTITIONS)); do
760 $FORMAT $DEV_DSKDIR/$DISK -s rm $j > /dev/null 2>&1
761 if (( $? == 1 )); then
762 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
763 if (( $? == 1 )); then
764 log_note "Partitions for $DISK should be deleted"
765 else
766 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
767 fi
768 return 0
769 else
770 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
771 if (( $? == 0 )); then
772 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
773 fi
774 fi
775 ((j = j+1))
776 done
777 else
778 for disk in `$ECHO $DISKSARRAY`; do
779 while ((j < MAX_PARTITIONS)); do
780 $FORMAT $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
781 if (( $? == 1 )); then
782 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
783 if (( $? == 1 )); then
784 log_note "Partitions for $disk should be deleted"
785 else
786 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
787 fi
788 j=7
789 else
790 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
791 if (( $? == 0 )); then
792 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
793 fi
794 fi
795 ((j = j+1))
796 done
797 j=1
798 done
799 fi
800 fi
801 return 0
802 }
803
804 #
805 # Get the end cyl of the given slice
806 #
807 function get_endslice #<disk> <slice>
808 {
809 typeset disk=$1
810 typeset slice=$2
811 if [[ -z $disk || -z $slice ]] ; then
812 log_fail "The disk name or slice number is unspecified."
813 fi
814
815 if is_linux; then
816 endcyl=$($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \
817 $GREP "part${slice}" | \
818 $AWK '{print $3}' | \
819 $SED 's,cyl,,')
820 ((endcyl = (endcyl + 1)))
821 else
822 disk=${disk#/dev/dsk/}
823 disk=${disk#/dev/rdsk/}
824 disk=${disk%s*}
825
826 typeset -i ratio=0
827 ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
828 $GREP "sectors\/cylinder" | \
829 $AWK '{print $2}')
830
831 if ((ratio == 0)); then
832 return
833 fi
834
835 typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
836 $NAWK -v token="$slice" '{if ($1==token) print $6}')
837
838 ((endcyl = (endcyl + 1) / ratio))
839 fi
840
841 echo $endcyl
842 }
843
844
845 #
846 # Given a size,disk and total slice number, this function formats the
847 # disk slices from 0 to the total slice number with the same specified
848 # size.
849 #
850 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
851 {
852 typeset -i i=0
853 typeset slice_size=$1
854 typeset disk_name=$2
855 typeset total_slices=$3
856 typeset cyl
857
858 zero_partitions $disk_name
859 while ((i < $total_slices)); do
860 if ! is_linux; then
861 if ((i == 2)); then
862 ((i = i + 1))
863 continue
864 fi
865 fi
866 set_partition $i "$cyl" $slice_size $disk_name
867 cyl=$(get_endslice $disk_name $i)
868 ((i = i+1))
869 done
870 }
871
872 #
873 # This function continues to write to a filenum number of files into dirnum
874 # number of directories until either $FILE_WRITE returns an error or the
875 # maximum number of files per directory have been written.
876 #
877 # Usage:
878 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
879 #
880 # Return value: 0 on success
881 # non 0 on error
882 #
883 # Where :
884 # destdir: is the directory where everything is to be created under
885 # dirnum: the maximum number of subdirectories to use, -1 no limit
886 # filenum: the maximum number of files per subdirectory
887 # bytes: number of bytes to write
888 # num_writes: numer of types to write out bytes
889 # data: the data that will be writen
890 #
891 # E.g.
892 # file_fs /testdir 20 25 1024 256 0
893 #
894 # Note: bytes * num_writes equals the size of the testfile
895 #
896 function fill_fs # destdir dirnum filenum bytes num_writes data
897 {
898 typeset destdir=${1:-$TESTDIR}
899 typeset -i dirnum=${2:-50}
900 typeset -i filenum=${3:-50}
901 typeset -i bytes=${4:-8192}
902 typeset -i num_writes=${5:-10240}
903 typeset -i data=${6:-0}
904
905 typeset -i odirnum=1
906 typeset -i idirnum=0
907 typeset -i fn=0
908 typeset -i retval=0
909
910 log_must $MKDIR -p $destdir/$idirnum
911 while (($odirnum > 0)); do
912 if ((dirnum >= 0 && idirnum >= dirnum)); then
913 odirnum=0
914 break
915 fi
916 $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
917 -b $bytes -c $num_writes -d $data
918 retval=$?
919 if (($retval != 0)); then
920 odirnum=0
921 break
922 fi
923 if (($fn >= $filenum)); then
924 fn=0
925 ((idirnum = idirnum + 1))
926 log_must $MKDIR -p $destdir/$idirnum
927 else
928 ((fn = fn + 1))
929 fi
930 done
931 return $retval
932 }
933
934 #
935 # Simple function to get the specified property. If unable to
936 # get the property then exits.
937 #
938 # Note property is in 'parsable' format (-p)
939 #
940 function get_prop # property dataset
941 {
942 typeset prop_val
943 typeset prop=$1
944 typeset dataset=$2
945
946 prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
947 if [[ $? -ne 0 ]]; then
948 log_note "Unable to get $prop property for dataset " \
949 "$dataset"
950 return 1
951 fi
952
953 $ECHO $prop_val
954 return 0
955 }
956
957 #
958 # Simple function to get the specified property of pool. If unable to
959 # get the property then exits.
960 #
961 function get_pool_prop # property pool
962 {
963 typeset prop_val
964 typeset prop=$1
965 typeset pool=$2
966
967 if poolexists $pool ; then
968 prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
969 $AWK '{print $3}')
970 if [[ $? -ne 0 ]]; then
971 log_note "Unable to get $prop property for pool " \
972 "$pool"
973 return 1
974 fi
975 else
976 log_note "Pool $pool not exists."
977 return 1
978 fi
979
980 $ECHO $prop_val
981 return 0
982 }
983
984 # Return 0 if a pool exists; $? otherwise
985 #
986 # $1 - pool name
987
988 function poolexists
989 {
990 typeset pool=$1
991
992 if [[ -z $pool ]]; then
993 log_note "No pool name given."
994 return 1
995 fi
996
997 $ZPOOL get name "$pool" > /dev/null 2>&1
998 return $?
999 }
1000
1001 # Return 0 if all the specified datasets exist; $? otherwise
1002 #
1003 # $1-n dataset name
1004 function datasetexists
1005 {
1006 if (($# == 0)); then
1007 log_note "No dataset name given."
1008 return 1
1009 fi
1010
1011 while (($# > 0)); do
1012 $ZFS get name $1 > /dev/null 2>&1 || \
1013 return $?
1014 shift
1015 done
1016
1017 return 0
1018 }
1019
1020 # return 0 if none of the specified datasets exists, otherwise return 1.
1021 #
1022 # $1-n dataset name
1023 function datasetnonexists
1024 {
1025 if (($# == 0)); then
1026 log_note "No dataset name given."
1027 return 1
1028 fi
1029
1030 while (($# > 0)); do
1031 $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1032 && return 1
1033 shift
1034 done
1035
1036 return 0
1037 }
1038
1039 #
1040 # Given a mountpoint, or a dataset name, determine if it is shared.
1041 #
1042 # Returns 0 if shared, 1 otherwise.
1043 #
1044 function is_shared
1045 {
1046 typeset fs=$1
1047 typeset mtpt
1048
1049 if is_linux; then
1050 log_unsupported "Currently unsupported by the test framework"
1051 return 1
1052 fi
1053
1054 if [[ $fs != "/"* ]] ; then
1055 if datasetnonexists "$fs" ; then
1056 return 1
1057 else
1058 mtpt=$(get_prop mountpoint "$fs")
1059 case $mtpt in
1060 none|legacy|-) return 1
1061 ;;
1062 *) fs=$mtpt
1063 ;;
1064 esac
1065 fi
1066 fi
1067
1068 for mtpt in `$SHARE | $AWK '{print $2}'` ; do
1069 if [[ $mtpt == $fs ]] ; then
1070 return 0
1071 fi
1072 done
1073
1074 typeset stat=$($SVCS -H -o STA nfs/server:default)
1075 if [[ $stat != "ON" ]]; then
1076 log_note "Current nfs/server status: $stat"
1077 fi
1078
1079 return 1
1080 }
1081
1082 #
1083 # Given a mountpoint, determine if it is not shared.
1084 #
1085 # Returns 0 if not shared, 1 otherwise.
1086 #
1087 function not_shared
1088 {
1089 typeset fs=$1
1090
1091 if is_linux; then
1092 log_unsupported "Currently unsupported by the test framework"
1093 return 1
1094 fi
1095
1096 is_shared $fs
1097 if (($? == 0)); then
1098 return 1
1099 fi
1100
1101 return 0
1102 }
1103
1104 #
1105 # Helper function to unshare a mountpoint.
1106 #
1107 function unshare_fs #fs
1108 {
1109 typeset fs=$1
1110
1111 if is_linux; then
1112 log_unsupported "Currently unsupported by the test framework"
1113 return 1
1114 fi
1115
1116 is_shared $fs
1117 if (($? == 0)); then
1118 log_must $ZFS unshare $fs
1119 fi
1120
1121 return 0
1122 }
1123
1124 #
1125 # Check NFS server status and trigger it online.
1126 #
1127 function setup_nfs_server
1128 {
1129 # Cannot share directory in non-global zone.
1130 #
1131 if ! is_global_zone; then
1132 log_note "Cannot trigger NFS server by sharing in LZ."
1133 return
1134 fi
1135
1136 if is_linux; then
1137 log_unsupported "Currently unsupported by the test framework"
1138 return
1139 fi
1140
1141 typeset nfs_fmri="svc:/network/nfs/server:default"
1142 if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
1143 #
1144 # Only really sharing operation can enable NFS server
1145 # to online permanently.
1146 #
1147 typeset dummy=/tmp/dummy
1148
1149 if [[ -d $dummy ]]; then
1150 log_must $RM -rf $dummy
1151 fi
1152
1153 log_must $MKDIR $dummy
1154 log_must $SHARE $dummy
1155
1156 #
1157 # Waiting for fmri's status to be the final status.
1158 # Otherwise, in transition, an asterisk (*) is appended for
1159 # instances, unshare will reverse status to 'DIS' again.
1160 #
1161 # Waiting for 1's at least.
1162 #
1163 log_must $SLEEP 1
1164 timeout=10
1165 while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1166 do
1167 log_must $SLEEP 1
1168
1169 ((timeout -= 1))
1170 done
1171
1172 log_must $UNSHARE $dummy
1173 log_must $RM -rf $dummy
1174 fi
1175
1176 log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1177 }
1178
1179 #
1180 # To verify whether calling process is in global zone
1181 #
1182 # Return 0 if in global zone, 1 in non-global zone
1183 #
1184 function is_global_zone
1185 {
1186 typeset cur_zone=$($ZONENAME 2>/dev/null)
1187 if [[ $cur_zone != "global" ]]; then
1188 return 1
1189 fi
1190 return 0
1191 }
1192
1193 #
1194 # Verify whether test is permitted to run from
1195 # global zone, local zone, or both
1196 #
1197 # $1 zone limit, could be "global", "local", or "both"(no limit)
1198 #
1199 # Return 0 if permitted, otherwise exit with log_unsupported
1200 #
1201 function verify_runnable # zone limit
1202 {
1203 typeset limit=$1
1204
1205 [[ -z $limit ]] && return 0
1206
1207 if is_global_zone ; then
1208 case $limit in
1209 global|both)
1210 ;;
1211 local) log_unsupported "Test is unable to run from "\
1212 "global zone."
1213 ;;
1214 *) log_note "Warning: unknown limit $limit - " \
1215 "use both."
1216 ;;
1217 esac
1218 else
1219 case $limit in
1220 local|both)
1221 ;;
1222 global) log_unsupported "Test is unable to run from "\
1223 "local zone."
1224 ;;
1225 *) log_note "Warning: unknown limit $limit - " \
1226 "use both."
1227 ;;
1228 esac
1229
1230 reexport_pool
1231 fi
1232
1233 return 0
1234 }
1235
1236 # Return 0 if create successfully or the pool exists; $? otherwise
1237 # Note: In local zones, this function should return 0 silently.
1238 #
1239 # $1 - pool name
1240 # $2-n - [keyword] devs_list
1241
1242 function create_pool #pool devs_list
1243 {
1244 typeset pool=${1%%/*}
1245
1246 shift
1247
1248 if [[ -z $pool ]]; then
1249 log_note "Missing pool name."
1250 return 1
1251 fi
1252
1253 if poolexists $pool ; then
1254 destroy_pool $pool
1255 fi
1256
1257 if is_global_zone ; then
1258 [[ -d /$pool ]] && $RM -rf /$pool
1259 log_must $ZPOOL create -f $pool $@
1260 fi
1261
1262 return 0
1263 }
1264
1265 # Return 0 if destroy successfully or the pool exists; $? otherwise
1266 # Note: In local zones, this function should return 0 silently.
1267 #
1268 # $1 - pool name
1269 # Destroy pool with the given parameters.
1270
1271 function destroy_pool #pool
1272 {
1273 typeset pool=${1%%/*}
1274 typeset mtpt
1275
1276 if [[ -z $pool ]]; then
1277 log_note "No pool name given."
1278 return 1
1279 fi
1280
1281 if is_global_zone ; then
1282 if poolexists "$pool" ; then
1283 mtpt=$(get_prop mountpoint "$pool")
1284
1285 # At times, syseventd activity can cause attempts to
1286 # destroy a pool to fail with EBUSY. We retry a few
1287 # times allowing failures before requiring the destroy
1288 # to succeed.
1289 typeset -i wait_time=10 ret=1 count=0
1290 must=""
1291 while [[ $ret -ne 0 ]]; do
1292 $must $ZPOOL destroy -f $pool
1293 ret=$?
1294 [[ $ret -eq 0 ]] && break
1295 log_note "zpool destroy failed with $ret"
1296 [[ count++ -ge 7 ]] && must=log_must
1297 $SLEEP $wait_time
1298 done
1299
1300 [[ -d $mtpt ]] && \
1301 log_must $RM -rf $mtpt
1302 else
1303 log_note "Pool does not exist. ($pool)"
1304 return 1
1305 fi
1306 fi
1307
1308 return 0
1309 }
1310
1311 #
1312 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1313 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1314 # and a zvol device to the zone.
1315 #
1316 # $1 zone name
1317 # $2 zone root directory prefix
1318 # $3 zone ip
1319 #
1320 function zfs_zones_setup #zone_name zone_root zone_ip
1321 {
1322 typeset zone_name=${1:-$(hostname)-z}
1323 typeset zone_root=${2:-"/zone_root"}
1324 typeset zone_ip=${3:-"10.1.1.10"}
1325 typeset prefix_ctr=$ZONE_CTR
1326 typeset pool_name=$ZONE_POOL
1327 typeset -i cntctr=5
1328 typeset -i i=0
1329
1330 # Create pool and 5 container within it
1331 #
1332 [[ -d /$pool_name ]] && $RM -rf /$pool_name
1333 log_must $ZPOOL create -f $pool_name $DISKS
1334 while ((i < cntctr)); do
1335 log_must $ZFS create $pool_name/$prefix_ctr$i
1336 ((i += 1))
1337 done
1338
1339 # create a zvol
1340 log_must $ZFS create -V 1g $pool_name/zone_zvol
1341 block_device_wait
1342
1343 #
1344 # If current system support slog, add slog device for pool
1345 #
1346 if verify_slog_support ; then
1347 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1348 log_must $MKFILE 100M $sdevs
1349 log_must $ZPOOL add $pool_name log mirror $sdevs
1350 fi
1351
1352 # this isn't supported just yet.
1353 # Create a filesystem. In order to add this to
1354 # the zone, it must have it's mountpoint set to 'legacy'
1355 # log_must $ZFS create $pool_name/zfs_filesystem
1356 # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1357
1358 [[ -d $zone_root ]] && \
1359 log_must $RM -rf $zone_root/$zone_name
1360 [[ ! -d $zone_root ]] && \
1361 log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1362
1363 # Create zone configure file and configure the zone
1364 #
1365 typeset zone_conf=/tmp/zone_conf.$$
1366 $ECHO "create" > $zone_conf
1367 $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1368 $ECHO "set autoboot=true" >> $zone_conf
1369 i=0
1370 while ((i < cntctr)); do
1371 $ECHO "add dataset" >> $zone_conf
1372 $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1373 $zone_conf
1374 $ECHO "end" >> $zone_conf
1375 ((i += 1))
1376 done
1377
1378 # add our zvol to the zone
1379 $ECHO "add device" >> $zone_conf
1380 $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf
1381 $ECHO "end" >> $zone_conf
1382
1383 # add a corresponding zvol rdsk to the zone
1384 $ECHO "add device" >> $zone_conf
1385 $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1386 $ECHO "end" >> $zone_conf
1387
1388 # once it's supported, we'll add our filesystem to the zone
1389 # $ECHO "add fs" >> $zone_conf
1390 # $ECHO "set type=zfs" >> $zone_conf
1391 # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1392 # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1393 # $ECHO "end" >> $zone_conf
1394
1395 $ECHO "verify" >> $zone_conf
1396 $ECHO "commit" >> $zone_conf
1397 log_must $ZONECFG -z $zone_name -f $zone_conf
1398 log_must $RM -f $zone_conf
1399
1400 # Install the zone
1401 $ZONEADM -z $zone_name install
1402 if (($? == 0)); then
1403 log_note "SUCCESS: $ZONEADM -z $zone_name install"
1404 else
1405 log_fail "FAIL: $ZONEADM -z $zone_name install"
1406 fi
1407
1408 # Install sysidcfg file
1409 #
1410 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1411 $ECHO "system_locale=C" > $sysidcfg
1412 $ECHO "terminal=dtterm" >> $sysidcfg
1413 $ECHO "network_interface=primary {" >> $sysidcfg
1414 $ECHO "hostname=$zone_name" >> $sysidcfg
1415 $ECHO "}" >> $sysidcfg
1416 $ECHO "name_service=NONE" >> $sysidcfg
1417 $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
1418 $ECHO "security_policy=NONE" >> $sysidcfg
1419 $ECHO "timezone=US/Eastern" >> $sysidcfg
1420
1421 # Boot this zone
1422 log_must $ZONEADM -z $zone_name boot
1423 }
1424
1425 #
1426 # Reexport TESTPOOL & TESTPOOL(1-4)
1427 #
1428 function reexport_pool
1429 {
1430 typeset -i cntctr=5
1431 typeset -i i=0
1432
1433 while ((i < cntctr)); do
1434 if ((i == 0)); then
1435 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1436 if ! ismounted $TESTPOOL; then
1437 log_must $ZFS mount $TESTPOOL
1438 fi
1439 else
1440 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1441 if eval ! ismounted \$TESTPOOL$i; then
1442 log_must eval $ZFS mount \$TESTPOOL$i
1443 fi
1444 fi
1445 ((i += 1))
1446 done
1447 }
1448
1449 #
1450 # Verify a given disk is online or offline
1451 #
1452 # Return 0 is pool/disk matches expected state, 1 otherwise
1453 #
1454 function check_state # pool disk state{online,offline}
1455 {
1456 typeset pool=$1
1457 typeset disk=${2#$DEV_DSKDIR/}
1458 typeset state=$3
1459
1460 $ZPOOL status -v $pool | grep "$disk" \
1461 | grep -i "$state" > /dev/null 2>&1
1462
1463 return $?
1464 }
1465
1466 #
1467 # Get the mountpoint of snapshot
1468 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1469 # as its mountpoint
1470 #
1471 function snapshot_mountpoint
1472 {
1473 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1474
1475 if [[ $dataset != *@* ]]; then
1476 log_fail "Error name of snapshot '$dataset'."
1477 fi
1478
1479 typeset fs=${dataset%@*}
1480 typeset snap=${dataset#*@}
1481
1482 if [[ -z $fs || -z $snap ]]; then
1483 log_fail "Error name of snapshot '$dataset'."
1484 fi
1485
1486 $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1487 }
1488
1489 #
1490 # Given a pool and file system, this function will verify the file system
1491 # using the zdb internal tool. Note that the pool is exported and imported
1492 # to ensure it has consistent state.
1493 #
1494 function verify_filesys # pool filesystem dir
1495 {
1496 typeset pool="$1"
1497 typeset filesys="$2"
1498 typeset zdbout="/tmp/zdbout.$$"
1499
1500 shift
1501 shift
1502 typeset dirs=$@
1503 typeset search_path=""
1504
1505 log_note "Calling $ZDB to verify filesystem '$filesys'"
1506 $ZFS unmount -a > /dev/null 2>&1
1507 log_must $ZPOOL export $pool
1508
1509 if [[ -n $dirs ]] ; then
1510 for dir in $dirs ; do
1511 search_path="$search_path -d $dir"
1512 done
1513 fi
1514
1515 log_must $ZPOOL import $search_path $pool
1516
1517 $ZDB -cudi $filesys > $zdbout 2>&1
1518 if [[ $? != 0 ]]; then
1519 log_note "Output: $ZDB -cudi $filesys"
1520 $CAT $zdbout
1521 log_fail "$ZDB detected errors with: '$filesys'"
1522 fi
1523
1524 log_must $ZFS mount -a
1525 log_must $RM -rf $zdbout
1526 }
1527
1528 #
1529 # Given a pool, and this function list all disks in the pool
1530 #
1531 function get_disklist # pool
1532 {
1533 typeset disklist=""
1534
1535 disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
1536 $GREP -v "\-\-\-\-\-" | \
1537 $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1538
1539 $ECHO $disklist
1540 }
1541
1542 # /**
1543 # This function kills a given list of processes after a time period. We use
1544 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1545 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1546 # would be listed as FAIL, which we don't want : we're happy with stress tests
1547 # running for a certain amount of time, then finishing.
1548 #
1549 # @param $1 the time in seconds after which we should terminate these processes
1550 # @param $2..$n the processes we wish to terminate.
1551 # */
1552 function stress_timeout
1553 {
1554 typeset -i TIMEOUT=$1
1555 shift
1556 typeset cpids="$@"
1557
1558 log_note "Waiting for child processes($cpids). " \
1559 "It could last dozens of minutes, please be patient ..."
1560 log_must $SLEEP $TIMEOUT
1561
1562 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1563 typeset pid
1564 for pid in $cpids; do
1565 $PS -p $pid > /dev/null 2>&1
1566 if (($? == 0)); then
1567 log_must $KILL -USR1 $pid
1568 fi
1569 done
1570 }
1571
1572 #
1573 # Verify a given hotspare disk is inuse or avail
1574 #
1575 # Return 0 is pool/disk matches expected state, 1 otherwise
1576 #
1577 function check_hotspare_state # pool disk state{inuse,avail}
1578 {
1579 typeset pool=$1
1580 typeset disk=${2#$DEV_DSKDIR/}
1581 typeset state=$3
1582
1583 cur_state=$(get_device_state $pool $disk "spares")
1584
1585 if [[ $state != ${cur_state} ]]; then
1586 return 1
1587 fi
1588 return 0
1589 }
1590
1591 #
1592 # Verify a given slog disk is inuse or avail
1593 #
1594 # Return 0 is pool/disk matches expected state, 1 otherwise
1595 #
1596 function check_slog_state # pool disk state{online,offline,unavail}
1597 {
1598 typeset pool=$1
1599 typeset disk=${2#$DEV_DSKDIR/}
1600 typeset state=$3
1601
1602 cur_state=$(get_device_state $pool $disk "logs")
1603
1604 if [[ $state != ${cur_state} ]]; then
1605 return 1
1606 fi
1607 return 0
1608 }
1609
1610 #
1611 # Verify a given vdev disk is inuse or avail
1612 #
1613 # Return 0 is pool/disk matches expected state, 1 otherwise
1614 #
1615 function check_vdev_state # pool disk state{online,offline,unavail}
1616 {
1617 typeset pool=$1
1618 typeset disk=${2#$/DEV_DSKDIR/}
1619 typeset state=$3
1620
1621 cur_state=$(get_device_state $pool $disk)
1622
1623 if [[ $state != ${cur_state} ]]; then
1624 return 1
1625 fi
1626 return 0
1627 }
1628
1629 #
1630 # Check the output of 'zpool status -v <pool>',
1631 # and to see if the content of <token> contain the <keyword> specified.
1632 #
1633 # Return 0 is contain, 1 otherwise
1634 #
1635 function check_pool_status # pool token keyword
1636 {
1637 typeset pool=$1
1638 typeset token=$2
1639 typeset keyword=$3
1640
1641 $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
1642 ($1==token) {print $0}' \
1643 | $GREP -i "$keyword" > /dev/null 2>&1
1644
1645 return $?
1646 }
1647
1648 #
1649 # These 5 following functions are instance of check_pool_status()
1650 # is_pool_resilvering - to check if the pool is resilver in progress
1651 # is_pool_resilvered - to check if the pool is resilver completed
1652 # is_pool_scrubbing - to check if the pool is scrub in progress
1653 # is_pool_scrubbed - to check if the pool is scrub completed
1654 # is_pool_scrub_stopped - to check if the pool is scrub stopped
1655 #
1656 function is_pool_resilvering #pool
1657 {
1658 check_pool_status "$1" "scan" "resilver in progress since "
1659 return $?
1660 }
1661
1662 function is_pool_resilvered #pool
1663 {
1664 check_pool_status "$1" "scan" "resilvered "
1665 return $?
1666 }
1667
1668 function is_pool_scrubbing #pool
1669 {
1670 check_pool_status "$1" "scan" "scrub in progress since "
1671 return $?
1672 }
1673
1674 function is_pool_scrubbed #pool
1675 {
1676 check_pool_status "$1" "scan" "scrub repaired"
1677 return $?
1678 }
1679
1680 function is_pool_scrub_stopped #pool
1681 {
1682 check_pool_status "$1" "scan" "scrub canceled"
1683 return $?
1684 }
1685
1686 #
1687 # Use create_pool()/destroy_pool() to clean up the infomation in
1688 # in the given disk to avoid slice overlapping.
1689 #
1690 function cleanup_devices #vdevs
1691 {
1692 typeset pool="foopool$$"
1693
1694 if poolexists $pool ; then
1695 destroy_pool $pool
1696 fi
1697
1698 create_pool $pool $@
1699 destroy_pool $pool
1700
1701 return 0
1702 }
1703
1704 #
1705 # Verify the rsh connectivity to each remote host in RHOSTS.
1706 #
1707 # Return 0 if remote host is accessible; otherwise 1.
1708 # $1 remote host name
1709 # $2 username
1710 #
1711 function verify_rsh_connect #rhost, username
1712 {
1713 typeset rhost=$1
1714 typeset username=$2
1715 typeset rsh_cmd="$RSH -n"
1716 typeset cur_user=
1717
1718 $GETENT hosts $rhost >/dev/null 2>&1
1719 if (($? != 0)); then
1720 log_note "$rhost cannot be found from" \
1721 "administrative database."
1722 return 1
1723 fi
1724
1725 $PING $rhost 3 >/dev/null 2>&1
1726 if (($? != 0)); then
1727 log_note "$rhost is not reachable."
1728 return 1
1729 fi
1730
1731 if ((${#username} != 0)); then
1732 rsh_cmd="$rsh_cmd -l $username"
1733 cur_user="given user \"$username\""
1734 else
1735 cur_user="current user \"`$LOGNAME`\""
1736 fi
1737
1738 if ! $rsh_cmd $rhost $TRUE; then
1739 log_note "$RSH to $rhost is not accessible" \
1740 "with $cur_user."
1741 return 1
1742 fi
1743
1744 return 0
1745 }
1746
1747 #
1748 # Verify the remote host connection via rsh after rebooting
1749 # $1 remote host
1750 #
1751 function verify_remote
1752 {
1753 rhost=$1
1754
1755 #
1756 # The following loop waits for the remote system rebooting.
1757 # Each iteration will wait for 150 seconds. there are
1758 # total 5 iterations, so the total timeout value will
1759 # be 12.5 minutes for the system rebooting. This number
1760 # is an approxiate number.
1761 #
1762 typeset -i count=0
1763 while ! verify_rsh_connect $rhost; do
1764 sleep 150
1765 ((count = count + 1))
1766 if ((count > 5)); then
1767 return 1
1768 fi
1769 done
1770 return 0
1771 }
1772
1773 #
1774 # Replacement function for /usr/bin/rsh. This function will include
1775 # the /usr/bin/rsh and meanwhile return the execution status of the
1776 # last command.
1777 #
1778 # $1 usrname passing down to -l option of /usr/bin/rsh
1779 # $2 remote machine hostname
1780 # $3... command string
1781 #
1782
1783 function rsh_status
1784 {
1785 typeset ruser=$1
1786 typeset rhost=$2
1787 typeset -i ret=0
1788 typeset cmd_str=""
1789 typeset rsh_str=""
1790
1791 shift; shift
1792 cmd_str="$@"
1793
1794 err_file=/tmp/${rhost}.$$.err
1795 if ((${#ruser} == 0)); then
1796 rsh_str="$RSH -n"
1797 else
1798 rsh_str="$RSH -n -l $ruser"
1799 fi
1800
1801 $rsh_str $rhost /bin/ksh -c "'$cmd_str; \
1802 print -u 2 \"status=\$?\"'" \
1803 >/dev/null 2>$err_file
1804 ret=$?
1805 if (($ret != 0)); then
1806 $CAT $err_file
1807 $RM -f $std_file $err_file
1808 log_fail "$RSH itself failed with exit code $ret..."
1809 fi
1810
1811 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1812 $CUT -d= -f2)
1813 (($ret != 0)) && $CAT $err_file >&2
1814
1815 $RM -f $err_file >/dev/null 2>&1
1816 return $ret
1817 }
1818
1819 #
1820 # Get the SUNWstc-fs-zfs package installation path in a remote host
1821 # $1 remote host name
1822 #
1823 function get_remote_pkgpath
1824 {
1825 typeset rhost=$1
1826 typeset pkgpath=""
1827
1828 pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1829 $CUT -d: -f2")
1830
1831 $ECHO $pkgpath
1832 }
1833
1834 #/**
1835 # A function to find and locate free disks on a system or from given
1836 # disks as the parameter. It works by locating disks that are in use
1837 # as swap devices and dump devices, and also disks listed in /etc/vfstab
1838 #
1839 # $@ given disks to find which are free, default is all disks in
1840 # the test system
1841 #
1842 # @return a string containing the list of available disks
1843 #*/
1844 function find_disks
1845 {
1846 # Trust provided list, no attempt is made to locate unused devices.
1847 if is_linux; then
1848 $ECHO "$@"
1849 return
1850 fi
1851
1852
1853 sfi=/tmp/swaplist.$$
1854 dmpi=/tmp/dumpdev.$$
1855 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1856
1857 $SWAP -l > $sfi
1858 $DUMPADM > $dmpi 2>/dev/null
1859
1860 # write an awk script that can process the output of format
1861 # to produce a list of disks we know about. Note that we have
1862 # to escape "$2" so that the shell doesn't interpret it while
1863 # we're creating the awk script.
1864 # -------------------
1865 $CAT > /tmp/find_disks.awk <<EOF
1866 #!/bin/nawk -f
1867 BEGIN { FS="."; }
1868
1869 /^Specify disk/{
1870 searchdisks=0;
1871 }
1872
1873 {
1874 if (searchdisks && \$2 !~ "^$"){
1875 split(\$2,arr," ");
1876 print arr[1];
1877 }
1878 }
1879
1880 /^AVAILABLE DISK SELECTIONS:/{
1881 searchdisks=1;
1882 }
1883 EOF
1884 #---------------------
1885
1886 $CHMOD 755 /tmp/find_disks.awk
1887 disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
1888 $RM /tmp/find_disks.awk
1889
1890 unused=""
1891 for disk in $disks; do
1892 # Check for mounted
1893 $GREP "${disk}[sp]" /etc/mnttab >/dev/null
1894 (($? == 0)) && continue
1895 # Check for swap
1896 $GREP "${disk}[sp]" $sfi >/dev/null
1897 (($? == 0)) && continue
1898 # check for dump device
1899 $GREP "${disk}[sp]" $dmpi >/dev/null
1900 (($? == 0)) && continue
1901 # check to see if this disk hasn't been explicitly excluded
1902 # by a user-set environment variable
1903 $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
1904 (($? == 0)) && continue
1905 unused_candidates="$unused_candidates $disk"
1906 done
1907 $RM $sfi
1908 $RM $dmpi
1909
1910 # now just check to see if those disks do actually exist
1911 # by looking for a device pointing to the first slice in
1912 # each case. limit the number to max_finddisksnum
1913 count=0
1914 for disk in $unused_candidates; do
1915 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
1916 if [ $count -lt $max_finddisksnum ]; then
1917 unused="$unused $disk"
1918 # do not impose limit if $@ is provided
1919 [[ -z $@ ]] && ((count = count + 1))
1920 fi
1921 fi
1922 done
1923
1924 # finally, return our disk list
1925 $ECHO $unused
1926 }
1927
1928 #
1929 # Add specified user to specified group
1930 #
1931 # $1 group name
1932 # $2 user name
1933 # $3 base of the homedir (optional)
1934 #
1935 function add_user #<group_name> <user_name> <basedir>
1936 {
1937 typeset gname=$1
1938 typeset uname=$2
1939 typeset basedir=${3:-"/var/tmp"}
1940
1941 if ((${#gname} == 0 || ${#uname} == 0)); then
1942 log_fail "group name or user name are not defined."
1943 fi
1944
1945 log_must $USERADD -g $gname -d $basedir/$uname -m $uname
1946
1947 # Add new users to the same group and the command line utils.
1948 # This allows them to be run out of the original users home
1949 # directory as long as it permissioned to be group readable.
1950 if is_linux; then
1951 cmd_group=$(stat --format="%G" $ZFS)
1952 log_must $USERMOD -a -G $cmd_group $uname
1953 fi
1954
1955 return 0
1956 }
1957
1958 #
1959 # Delete the specified user.
1960 #
1961 # $1 login name
1962 # $2 base of the homedir (optional)
1963 #
1964 function del_user #<logname> <basedir>
1965 {
1966 typeset user=$1
1967 typeset basedir=${2:-"/var/tmp"}
1968
1969 if ((${#user} == 0)); then
1970 log_fail "login name is necessary."
1971 fi
1972
1973 if $ID $user > /dev/null 2>&1; then
1974 log_must $USERDEL $user
1975 fi
1976
1977 [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
1978
1979 return 0
1980 }
1981
1982 #
1983 # Select valid gid and create specified group.
1984 #
1985 # $1 group name
1986 #
1987 function add_group #<group_name>
1988 {
1989 typeset group=$1
1990
1991 if ((${#group} == 0)); then
1992 log_fail "group name is necessary."
1993 fi
1994
1995 # Assign 100 as the base gid, a larger value is selected for
1996 # Linux because for many distributions 1000 and under are reserved.
1997 if is_linux; then
1998 while true; do
1999 $GROUPADD $group > /dev/null 2>&1
2000 typeset -i ret=$?
2001 case $ret in
2002 0) return 0 ;;
2003 *) return 1 ;;
2004 esac
2005 done
2006 else
2007 typeset -i gid=100
2008
2009 while true; do
2010 $GROUPADD -g $gid $group > /dev/null 2>&1
2011 typeset -i ret=$?
2012 case $ret in
2013 0) return 0 ;;
2014 # The gid is not unique
2015 4) ((gid += 1)) ;;
2016 *) return 1 ;;
2017 esac
2018 done
2019 fi
2020 }
2021
2022 #
2023 # Delete the specified group.
2024 #
2025 # $1 group name
2026 #
2027 function del_group #<group_name>
2028 {
2029 typeset grp=$1
2030 if ((${#grp} == 0)); then
2031 log_fail "group name is necessary."
2032 fi
2033
2034 if is_linux; then
2035 $GETENT group $grp > /dev/null 2>&1
2036 typeset -i ret=$?
2037 case $ret in
2038 # Group does not exist.
2039 2) return 0 ;;
2040 # Name already exists as a group name
2041 0) log_must $GROUPDEL $grp ;;
2042 *) return 1 ;;
2043 esac
2044 else
2045 $GROUPMOD -n $grp $grp > /dev/null 2>&1
2046 typeset -i ret=$?
2047 case $ret in
2048 # Group does not exist.
2049 6) return 0 ;;
2050 # Name already exists as a group name
2051 9) log_must $GROUPDEL $grp ;;
2052 *) return 1 ;;
2053 esac
2054 fi
2055
2056 return 0
2057 }
2058
2059 #
2060 # This function will return true if it's safe to destroy the pool passed
2061 # as argument 1. It checks for pools based on zvols and files, and also
2062 # files contained in a pool that may have a different mountpoint.
2063 #
2064 function safe_to_destroy_pool { # $1 the pool name
2065
2066 typeset pool=""
2067 typeset DONT_DESTROY=""
2068
2069 # We check that by deleting the $1 pool, we're not
2070 # going to pull the rug out from other pools. Do this
2071 # by looking at all other pools, ensuring that they
2072 # aren't built from files or zvols contained in this pool.
2073
2074 for pool in $($ZPOOL list -H -o name)
2075 do
2076 ALTMOUNTPOOL=""
2077
2078 # this is a list of the top-level directories in each of the
2079 # files that make up the path to the files the pool is based on
2080 FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2081 $AWK '{print $1}')
2082
2083 # this is a list of the zvols that make up the pool
2084 ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "$ZVOL_DEVDIR/$1$" \
2085 | $AWK '{print $1}')
2086
2087 # also want to determine if it's a file-based pool using an
2088 # alternate mountpoint...
2089 POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2090 $GREP / | $AWK '{print $1}' | \
2091 $AWK -F/ '{print $2}' | $GREP -v "dev")
2092
2093 for pooldir in $POOL_FILE_DIRS
2094 do
2095 OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2096 $GREP "${pooldir}$" | $AWK '{print $1}')
2097
2098 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2099 done
2100
2101
2102 if [ ! -z "$ZVOLPOOL" ]
2103 then
2104 DONT_DESTROY="true"
2105 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2106 fi
2107
2108 if [ ! -z "$FILEPOOL" ]
2109 then
2110 DONT_DESTROY="true"
2111 log_note "Pool $pool is built from $FILEPOOL on $1"
2112 fi
2113
2114 if [ ! -z "$ALTMOUNTPOOL" ]
2115 then
2116 DONT_DESTROY="true"
2117 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2118 fi
2119 done
2120
2121 if [ -z "${DONT_DESTROY}" ]
2122 then
2123 return 0
2124 else
2125 log_note "Warning: it is not safe to destroy $1!"
2126 return 1
2127 fi
2128 }
2129
2130 #
2131 # Get the available ZFS compression options
2132 # $1 option type zfs_set|zfs_compress
2133 #
2134 function get_compress_opts
2135 {
2136 typeset COMPRESS_OPTS
2137 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2138 gzip-6 gzip-7 gzip-8 gzip-9"
2139
2140 if [[ $1 == "zfs_compress" ]] ; then
2141 COMPRESS_OPTS="on lzjb"
2142 elif [[ $1 == "zfs_set" ]] ; then
2143 COMPRESS_OPTS="on off lzjb"
2144 fi
2145 typeset valid_opts="$COMPRESS_OPTS"
2146 $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2147 if [[ $? -eq 0 ]]; then
2148 valid_opts="$valid_opts $GZIP_OPTS"
2149 fi
2150 $ECHO "$valid_opts"
2151 }
2152
2153 #
2154 # Verify zfs operation with -p option work as expected
2155 # $1 operation, value could be create, clone or rename
2156 # $2 dataset type, value could be fs or vol
2157 # $3 dataset name
2158 # $4 new dataset name
2159 #
2160 function verify_opt_p_ops
2161 {
2162 typeset ops=$1
2163 typeset datatype=$2
2164 typeset dataset=$3
2165 typeset newdataset=$4
2166
2167 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2168 log_fail "$datatype is not supported."
2169 fi
2170
2171 # check parameters accordingly
2172 case $ops in
2173 create)
2174 newdataset=$dataset
2175 dataset=""
2176 if [[ $datatype == "vol" ]]; then
2177 ops="create -V $VOLSIZE"
2178 fi
2179 ;;
2180 clone)
2181 if [[ -z $newdataset ]]; then
2182 log_fail "newdataset should not be empty" \
2183 "when ops is $ops."
2184 fi
2185 log_must datasetexists $dataset
2186 log_must snapexists $dataset
2187 ;;
2188 rename)
2189 if [[ -z $newdataset ]]; then
2190 log_fail "newdataset should not be empty" \
2191 "when ops is $ops."
2192 fi
2193 log_must datasetexists $dataset
2194 log_mustnot snapexists $dataset
2195 ;;
2196 *)
2197 log_fail "$ops is not supported."
2198 ;;
2199 esac
2200
2201 # make sure the upper level filesystem does not exist
2202 if datasetexists ${newdataset%/*} ; then
2203 log_must $ZFS destroy -rRf ${newdataset%/*}
2204 fi
2205
2206 # without -p option, operation will fail
2207 log_mustnot $ZFS $ops $dataset $newdataset
2208 log_mustnot datasetexists $newdataset ${newdataset%/*}
2209
2210 # with -p option, operation should succeed
2211 log_must $ZFS $ops -p $dataset $newdataset
2212 block_device_wait
2213
2214 if ! datasetexists $newdataset ; then
2215 log_fail "-p option does not work for $ops"
2216 fi
2217
2218 # when $ops is create or clone, redo the operation still return zero
2219 if [[ $ops != "rename" ]]; then
2220 log_must $ZFS $ops -p $dataset $newdataset
2221 fi
2222
2223 return 0
2224 }
2225
2226 #
2227 # Get configuration of pool
2228 # $1 pool name
2229 # $2 config name
2230 #
2231 function get_config
2232 {
2233 typeset pool=$1
2234 typeset config=$2
2235 typeset alt_root
2236
2237 if ! poolexists "$pool" ; then
2238 return 1
2239 fi
2240 alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
2241 if [[ $alt_root == "-" ]]; then
2242 value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
2243 '{print $2}')
2244 else
2245 value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
2246 '{print $2}')
2247 fi
2248 if [[ -n $value ]] ; then
2249 value=${value#'}
2250 value=${value%'}
2251 fi
2252 echo $value
2253
2254 return 0
2255 }
2256
2257 #
2258 # Privated function. Random select one of items from arguments.
2259 #
2260 # $1 count
2261 # $2-n string
2262 #
2263 function _random_get
2264 {
2265 typeset cnt=$1
2266 shift
2267
2268 typeset str="$@"
2269 typeset -i ind
2270 ((ind = RANDOM % cnt + 1))
2271
2272 typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2273 $ECHO $ret
2274 }
2275
2276 #
2277 # Random select one of item from arguments which include NONE string
2278 #
2279 function random_get_with_non
2280 {
2281 typeset -i cnt=$#
2282 ((cnt =+ 1))
2283
2284 _random_get "$cnt" "$@"
2285 }
2286
2287 #
2288 # Random select one of item from arguments which doesn't include NONE string
2289 #
2290 function random_get
2291 {
2292 _random_get "$#" "$@"
2293 }
2294
2295 #
2296 # Detect if the current system support slog
2297 #
2298 function verify_slog_support
2299 {
2300 typeset dir=/tmp/disk.$$
2301 typeset pool=foo.$$
2302 typeset vdev=$dir/a
2303 typeset sdev=$dir/b
2304
2305 $MKDIR -p $dir
2306 $MKFILE 64M $vdev $sdev
2307
2308 typeset -i ret=0
2309 if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2310 ret=1
2311 fi
2312 $RM -r $dir
2313
2314 return $ret
2315 }
2316
2317 #
2318 # The function will generate a dataset name with specific length
2319 # $1, the length of the name
2320 # $2, the base string to construct the name
2321 #
2322 function gen_dataset_name
2323 {
2324 typeset -i len=$1
2325 typeset basestr="$2"
2326 typeset -i baselen=${#basestr}
2327 typeset -i iter=0
2328 typeset l_name=""
2329
2330 if ((len % baselen == 0)); then
2331 ((iter = len / baselen))
2332 else
2333 ((iter = len / baselen + 1))
2334 fi
2335 while ((iter > 0)); do
2336 l_name="${l_name}$basestr"
2337
2338 ((iter -= 1))
2339 done
2340
2341 $ECHO $l_name
2342 }
2343
2344 #
2345 # Get cksum tuple of dataset
2346 # $1 dataset name
2347 #
2348 # sample zdb output:
2349 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2350 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2351 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2352 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2353 function datasetcksum
2354 {
2355 typeset cksum
2356 $SYNC
2357 cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2358 | $AWK -F= '{print $7}')
2359 $ECHO $cksum
2360 }
2361
2362 #
2363 # Get cksum of file
2364 # #1 file path
2365 #
2366 function checksum
2367 {
2368 typeset cksum
2369 cksum=$($CKSUM $1 | $AWK '{print $1}')
2370 $ECHO $cksum
2371 }
2372
2373 #
2374 # Get the given disk/slice state from the specific field of the pool
2375 #
2376 function get_device_state #pool disk field("", "spares","logs")
2377 {
2378 typeset pool=$1
2379 typeset disk=${2#$DEV_DSKDIR/}
2380 typeset field=${3:-$pool}
2381
2382 state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2383 $NAWK -v device=$disk -v pool=$pool -v field=$field \
2384 'BEGIN {startconfig=0; startfield=0; }
2385 /config:/ {startconfig=1}
2386 (startconfig==1) && ($1==field) {startfield=1; next;}
2387 (startfield==1) && ($1==device) {print $2; exit;}
2388 (startfield==1) &&
2389 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2390 echo $state
2391 }
2392
2393
2394 #
2395 # print the given directory filesystem type
2396 #
2397 # $1 directory name
2398 #
2399 function get_fstype
2400 {
2401 typeset dir=$1
2402
2403 if [[ -z $dir ]]; then
2404 log_fail "Usage: get_fstype <directory>"
2405 fi
2406
2407 #
2408 # $ df -n /
2409 # / : ufs
2410 #
2411 $DF -n $dir | $AWK '{print $3}'
2412 }
2413
2414 #
2415 # Given a disk, label it to VTOC regardless what label was on the disk
2416 # $1 disk
2417 #
2418 function labelvtoc
2419 {
2420 typeset disk=$1
2421 if [[ -z $disk ]]; then
2422 log_fail "The disk name is unspecified."
2423 fi
2424 typeset label_file=/var/tmp/labelvtoc.$$
2425 typeset arch=$($UNAME -p)
2426
2427 if is_linux; then
2428 log_note "Currently unsupported by the test framework"
2429 return 1
2430 fi
2431
2432 if [[ $arch == "i386" ]]; then
2433 $ECHO "label" > $label_file
2434 $ECHO "0" >> $label_file
2435 $ECHO "" >> $label_file
2436 $ECHO "q" >> $label_file
2437 $ECHO "q" >> $label_file
2438
2439 $FDISK -B $disk >/dev/null 2>&1
2440 # wait a while for fdisk finishes
2441 $SLEEP 60
2442 elif [[ $arch == "sparc" ]]; then
2443 $ECHO "label" > $label_file
2444 $ECHO "0" >> $label_file
2445 $ECHO "" >> $label_file
2446 $ECHO "" >> $label_file
2447 $ECHO "" >> $label_file
2448 $ECHO "q" >> $label_file
2449 else
2450 log_fail "unknown arch type"
2451 fi
2452
2453 $FORMAT -e -s -d $disk -f $label_file
2454 typeset -i ret_val=$?
2455 $RM -f $label_file
2456 #
2457 # wait the format to finish
2458 #
2459 $SLEEP 60
2460 if ((ret_val != 0)); then
2461 log_fail "unable to label $disk as VTOC."
2462 fi
2463
2464 return 0
2465 }
2466
2467 #
2468 # check if the system was installed as zfsroot or not
2469 # return: 0 ture, otherwise false
2470 #
2471 function is_zfsroot
2472 {
2473 $DF -n / | $GREP zfs > /dev/null 2>&1
2474 return $?
2475 }
2476
2477 #
2478 # get the root filesystem name if it's zfsroot system.
2479 #
2480 # return: root filesystem name
2481 function get_rootfs
2482 {
2483 typeset rootfs=""
2484 rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2485 /etc/mnttab)
2486 if [[ -z "$rootfs" ]]; then
2487 log_fail "Can not get rootfs"
2488 fi
2489 $ZFS list $rootfs > /dev/null 2>&1
2490 if (($? == 0)); then
2491 $ECHO $rootfs
2492 else
2493 log_fail "This is not a zfsroot system."
2494 fi
2495 }
2496
2497 #
2498 # get the rootfs's pool name
2499 # return:
2500 # rootpool name
2501 #
2502 function get_rootpool
2503 {
2504 typeset rootfs=""
2505 typeset rootpool=""
2506 rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2507 /etc/mnttab)
2508 if [[ -z "$rootfs" ]]; then
2509 log_fail "Can not get rootpool"
2510 fi
2511 $ZFS list $rootfs > /dev/null 2>&1
2512 if (($? == 0)); then
2513 rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2514 $ECHO $rootpool
2515 else
2516 log_fail "This is not a zfsroot system."
2517 fi
2518 }
2519
2520 #
2521 # Get the sub string from specified source string
2522 #
2523 # $1 source string
2524 # $2 start position. Count from 1
2525 # $3 offset
2526 #
2527 function get_substr #src_str pos offset
2528 {
2529 typeset pos offset
2530
2531 $ECHO $1 | \
2532 $NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
2533 }
2534
2535 #
2536 # Check if the given device is physical device
2537 #
2538 function is_physical_device #device
2539 {
2540 typeset device=${1#$DEV_DSKDIR}
2541 device=${device#$DEV_RDSKDIR}
2542
2543 if is_linux; then
2544 [[ -b "$DEV_DSKDIR/$device" ]] && \
2545 [[ -f /sys/module/loop/parameters/max_part ]]
2546 return $?
2547 else
2548 $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2549 return $?
2550 fi
2551 }
2552
2553 #
2554 # Check if the given device is a real device (ie SCSI device)
2555 #
2556 function is_real_device #disk
2557 {
2558 typeset disk=$1
2559 [[ -z $disk ]] && log_fail "No argument for disk given."
2560
2561 if is_linux; then
2562 $LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP disk > /dev/null 2>&1
2563 return $?
2564 fi
2565 }
2566
2567 #
2568 # Check if the given device is a loop device
2569 #
2570 function is_loop_device #disk
2571 {
2572 typeset disk=$1
2573 [[ -z $disk ]] && log_fail "No argument for disk given."
2574
2575 if is_linux; then
2576 $LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP loop > /dev/null 2>&1
2577 return $?
2578 fi
2579 }
2580
2581 #
2582 # Check if the given device is a multipath device and if there is a sybolic
2583 # link to a device mapper and to a disk
2584 # Currently no support for dm devices alone without multipath
2585 #
2586 function is_mpath_device #disk
2587 {
2588 typeset disk=$1
2589 [[ -z $disk ]] && log_fail "No argument for disk given."
2590
2591 if is_linux; then
2592 $LSBLK $DEV_MPATHDIR/$disk -o TYPE | $EGREP mpath > /dev/null 2>&1
2593 if (($? == 0)); then
2594 $READLINK $DEV_MPATHDIR/$disk > /dev/null 2>&1
2595 return $?
2596 else
2597 return $?
2598 fi
2599 fi
2600 }
2601
2602 # Set the slice prefix for disk partitioning depending
2603 # on whether the device is a real, multipath, or loop device.
2604 # Currently all disks have to be of the same type, so only
2605 # checks first disk to determine slice prefix.
2606 #
2607 function set_slice_prefix
2608 {
2609 typeset disk
2610 typeset -i i=0
2611
2612 if is_linux; then
2613 while (( i < $DISK_ARRAY_NUM )); do
2614 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2615 if ( is_mpath_device $disk ) && [[ -z $($ECHO $disk | awk 'substr($1,18,1)\
2616 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
2617 export SLICE_PREFIX=""
2618 return 0
2619 elif ( is_mpath_device $disk || is_loop_device $disk ); then
2620 export SLICE_PREFIX="p"
2621 return 0
2622 else
2623 log_fail "$disk not supported for partitioning."
2624 fi
2625 (( i = i + 1))
2626 done
2627 fi
2628 }
2629
2630 #
2631 # Set the directory path of the listed devices in $DISK_ARRAY_NUM
2632 # Currently all disks have to be of the same type, so only
2633 # checks first disk to determine device directory
2634 # default = /dev (linux)
2635 # real disk = /dev (linux)
2636 # multipath device = /dev/mapper (linux)
2637 #
2638 function set_device_dir
2639 {
2640 typeset disk
2641 typeset -i i=0
2642
2643 if is_linux; then
2644 while (( i < $DISK_ARRAY_NUM )); do
2645 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2646 if is_mpath_device $disk; then
2647 export DEV_DSKDIR=$DEV_MPATHDIR
2648 return 0
2649 else
2650 export DEV_DSKDIR=$DEV_RDSKDIR
2651 return 0
2652 fi
2653 (( i = i + 1))
2654 done
2655 else
2656 export DEV_DSKDIR=$DEV_RDSKDIR
2657 fi
2658 }
2659
2660 #
2661 # Get the directory path of given device
2662 #
2663 function get_device_dir #device
2664 {
2665 typeset device=$1
2666
2667 if ! $(is_physical_device $device) ; then
2668 if [[ $device != "/" ]]; then
2669 device=${device%/*}
2670 fi
2671 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2672 device="$DEV_DSKDIR"
2673 fi
2674 $ECHO $device
2675 else
2676 $ECHO "$DEV_DSKDIR"
2677 fi
2678 }
2679
2680 #
2681 # Get the package name
2682 #
2683 function get_package_name
2684 {
2685 typeset dirpath=${1:-$STC_NAME}
2686
2687 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2688 }
2689
2690 #
2691 # Get the word numbers from a string separated by white space
2692 #
2693 function get_word_count
2694 {
2695 $ECHO $1 | $WC -w
2696 }
2697
2698 #
2699 # To verify if the require numbers of disks is given
2700 #
2701 function verify_disk_count
2702 {
2703 typeset -i min=${2:-1}
2704
2705 typeset -i count=$(get_word_count "$1")
2706
2707 if ((count < min)); then
2708 log_untested "A minimum of $min disks is required to run." \
2709 " You specified $count disk(s)"
2710 fi
2711 }
2712
2713 function ds_is_volume
2714 {
2715 typeset type=$(get_prop type $1)
2716 [[ $type = "volume" ]] && return 0
2717 return 1
2718 }
2719
2720 function ds_is_filesystem
2721 {
2722 typeset type=$(get_prop type $1)
2723 [[ $type = "filesystem" ]] && return 0
2724 return 1
2725 }
2726
2727 function ds_is_snapshot
2728 {
2729 typeset type=$(get_prop type $1)
2730 [[ $type = "snapshot" ]] && return 0
2731 return 1
2732 }
2733
2734 #
2735 # Check if Trusted Extensions are installed and enabled
2736 #
2737 function is_te_enabled
2738 {
2739 $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
2740 if (($? != 0)); then
2741 return 1
2742 else
2743 return 0
2744 fi
2745 }
2746
2747 # Utility function to determine if a system has multiple cpus.
2748 function is_mp
2749 {
2750 if is_linux; then
2751 (($($NPROC) > 1))
2752 else
2753 (($($PSRINFO | $WC -l) > 1))
2754 fi
2755
2756 return $?
2757 }
2758
2759 function get_cpu_freq
2760 {
2761 if is_linux; then
2762 lscpu | $AWK '/CPU MHz/ { print $3 }'
2763 else
2764 $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}'
2765 fi
2766 }
2767
2768 # Run the given command as the user provided.
2769 function user_run
2770 {
2771 typeset user=$1
2772 shift
2773
2774 log_note "user:$user $@"
2775 eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
2776 return $?
2777 }
2778
2779 #
2780 # Check if the pool contains the specified vdevs
2781 #
2782 # $1 pool
2783 # $2..n <vdev> ...
2784 #
2785 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2786 # vdevs is not in the pool, and 2 if pool name is missing.
2787 #
2788 function vdevs_in_pool
2789 {
2790 typeset pool=$1
2791 typeset vdev
2792
2793 if [[ -z $pool ]]; then
2794 log_note "Missing pool name."
2795 return 2
2796 fi
2797
2798 shift
2799
2800 typeset tmpfile=$($MKTEMP)
2801 $ZPOOL list -Hv "$pool" >$tmpfile
2802 for vdev in $@; do
2803 $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2804 [[ $? -ne 0 ]] && return 1
2805 done
2806
2807 $RM -f $tmpfile
2808
2809 return 0;
2810 }
2811
2812 function get_max
2813 {
2814 typeset -l i max=$1
2815 shift
2816
2817 for i in "$@"; do
2818 max=$(echo $((max > i ? max : i)))
2819 done
2820
2821 echo $max
2822 }
2823
2824 function get_min
2825 {
2826 typeset -l i min=$1
2827 shift
2828
2829 for i in "$@"; do
2830 min=$(echo $((min < i ? min : i)))
2831 done
2832
2833 echo $min
2834 }
2835
2836 #
2837 # Wait for newly created block devices to have their minors created.
2838 #
2839 function block_device_wait
2840 {
2841 if is_linux; then
2842 $UDEVADM trigger
2843 $UDEVADM settle
2844 fi
2845 }