]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
4e68ffc3e5985bb93474d9ce57efc3c5c5989907
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 #
27
28 #
29 # Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30 #
31
32 . ${STF_TOOLS}/include/logapi.shlib
33
34 # Determine if this is a Linux test system
35 #
36 # Return 0 if platform Linux, 1 if otherwise
37
38 function is_linux
39 {
40 if [[ $($UNAME -o) == "GNU/Linux" ]]; then
41 return 0
42 else
43 return 1
44 fi
45 }
46
47 # Determine if this is a 32-bit system
48 #
49 # Return 0 if platform is 32-bit, 1 if otherwise
50
51 function is_32bit
52 {
53 if [[ $(getconf LONG_BIT) == "32" ]]; then
54 return 0
55 else
56 return 1
57 fi
58 }
59
60 # Determine if kmemleak is enabled
61 #
62 # Return 0 if kmemleak is enabled, 1 if otherwise
63
64 function is_kmemleak
65 {
66 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
67 return 0
68 else
69 return 1
70 fi
71 }
72
73 # Determine whether a dataset is mounted
74 #
75 # $1 dataset name
76 # $2 filesystem type; optional - defaulted to zfs
77 #
78 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
79
80 function ismounted
81 {
82 typeset fstype=$2
83 [[ -z $fstype ]] && fstype=zfs
84 typeset out dir name ret
85
86 case $fstype in
87 zfs)
88 if [[ "$1" == "/"* ]] ; then
89 for out in $($ZFS mount | $AWK '{print $2}'); do
90 [[ $1 == $out ]] && return 0
91 done
92 else
93 for out in $($ZFS mount | $AWK '{print $1}'); do
94 [[ $1 == $out ]] && return 0
95 done
96 fi
97 ;;
98 ufs|nfs)
99 out=$($DF -F $fstype $1 2>/dev/null)
100 ret=$?
101 (($ret != 0)) && return $ret
102
103 dir=${out%%\(*}
104 dir=${dir%% *}
105 name=${out##*\(}
106 name=${name%%\)*}
107 name=${name%% *}
108
109 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
110 ;;
111 ext2)
112 out=$($DF -t $fstype $1 2>/dev/null)
113 return $?
114 ;;
115 zvol)
116 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
117 link=$(readlink -f $ZVOL_DEVDIR/$1)
118 [[ -n "$link" ]] && \
119 $MOUNT | $GREP -q "^$link" && \
120 return 0
121 fi
122 ;;
123 esac
124
125 return 1
126 }
127
128 # Return 0 if a dataset is mounted; 1 otherwise
129 #
130 # $1 dataset name
131 # $2 filesystem type; optional - defaulted to zfs
132
133 function mounted
134 {
135 ismounted $1 $2
136 (($? == 0)) && return 0
137 return 1
138 }
139
140 # Return 0 if a dataset is unmounted; 1 otherwise
141 #
142 # $1 dataset name
143 # $2 filesystem type; optional - defaulted to zfs
144
145 function unmounted
146 {
147 ismounted $1 $2
148 (($? == 1)) && return 0
149 return 1
150 }
151
152 # split line on ","
153 #
154 # $1 - line to split
155
156 function splitline
157 {
158 $ECHO $1 | $SED "s/,/ /g"
159 }
160
161 function default_setup
162 {
163 default_setup_noexit "$@"
164
165 log_pass
166 }
167
168 #
169 # Given a list of disks, setup storage pools and datasets.
170 #
171 function default_setup_noexit
172 {
173 typeset disklist=$1
174 typeset container=$2
175 typeset volume=$3
176 log_note begin default_setup_noexit
177
178 if is_global_zone; then
179 if poolexists $TESTPOOL ; then
180 destroy_pool $TESTPOOL
181 fi
182 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
183 log_note creating pool $TESTPOOL $disklist
184 log_must $ZPOOL create -f $TESTPOOL $disklist
185 else
186 reexport_pool
187 fi
188
189 $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
190 $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
191
192 log_must $ZFS create $TESTPOOL/$TESTFS
193 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
194
195 if [[ -n $container ]]; then
196 $RM -rf $TESTDIR1 || \
197 log_unresolved Could not remove $TESTDIR1
198 $MKDIR -p $TESTDIR1 || \
199 log_unresolved Could not create $TESTDIR1
200
201 log_must $ZFS create $TESTPOOL/$TESTCTR
202 log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
203 log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
204 log_must $ZFS set mountpoint=$TESTDIR1 \
205 $TESTPOOL/$TESTCTR/$TESTFS1
206 fi
207
208 if [[ -n $volume ]]; then
209 if is_global_zone ; then
210 log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
211 block_device_wait
212 else
213 log_must $ZFS create $TESTPOOL/$TESTVOL
214 fi
215 fi
216 }
217
218 #
219 # Given a list of disks, setup a storage pool, file system and
220 # a container.
221 #
222 function default_container_setup
223 {
224 typeset disklist=$1
225
226 default_setup "$disklist" "true"
227 }
228
229 #
230 # Given a list of disks, setup a storage pool,file system
231 # and a volume.
232 #
233 function default_volume_setup
234 {
235 typeset disklist=$1
236
237 default_setup "$disklist" "" "true"
238 }
239
240 #
241 # Given a list of disks, setup a storage pool,file system,
242 # a container and a volume.
243 #
244 function default_container_volume_setup
245 {
246 typeset disklist=$1
247
248 default_setup "$disklist" "true" "true"
249 }
250
251 #
252 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
253 # filesystem
254 #
255 # $1 Existing filesystem or volume name. Default, $TESTFS
256 # $2 snapshot name. Default, $TESTSNAP
257 #
258 function create_snapshot
259 {
260 typeset fs_vol=${1:-$TESTFS}
261 typeset snap=${2:-$TESTSNAP}
262
263 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
264 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
265
266 if snapexists $fs_vol@$snap; then
267 log_fail "$fs_vol@$snap already exists."
268 fi
269 datasetexists $fs_vol || \
270 log_fail "$fs_vol must exist."
271
272 log_must $ZFS snapshot $fs_vol@$snap
273 }
274
275 #
276 # Create a clone from a snapshot, default clone name is $TESTCLONE.
277 #
278 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
279 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
280 #
281 function create_clone # snapshot clone
282 {
283 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
284 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
285
286 [[ -z $snap ]] && \
287 log_fail "Snapshot name is undefined."
288 [[ -z $clone ]] && \
289 log_fail "Clone name is undefined."
290
291 log_must $ZFS clone $snap $clone
292 }
293
294 function default_mirror_setup
295 {
296 default_mirror_setup_noexit $1 $2 $3
297
298 log_pass
299 }
300
301 #
302 # Given a pair of disks, set up a storage pool and dataset for the mirror
303 # @parameters: $1 the primary side of the mirror
304 # $2 the secondary side of the mirror
305 # @uses: ZPOOL ZFS TESTPOOL TESTFS
306 function default_mirror_setup_noexit
307 {
308 readonly func="default_mirror_setup_noexit"
309 typeset primary=$1
310 typeset secondary=$2
311
312 [[ -z $primary ]] && \
313 log_fail "$func: No parameters passed"
314 [[ -z $secondary ]] && \
315 log_fail "$func: No secondary partition passed"
316 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
317 log_must $ZPOOL create -f $TESTPOOL mirror $@
318 log_must $ZFS create $TESTPOOL/$TESTFS
319 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
320 }
321
322 #
323 # create a number of mirrors.
324 # We create a number($1) of 2 way mirrors using the pairs of disks named
325 # on the command line. These mirrors are *not* mounted
326 # @parameters: $1 the number of mirrors to create
327 # $... the devices to use to create the mirrors on
328 # @uses: ZPOOL ZFS TESTPOOL
329 function setup_mirrors
330 {
331 typeset -i nmirrors=$1
332
333 shift
334 while ((nmirrors > 0)); do
335 log_must test -n "$1" -a -n "$2"
336 [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
337 log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
338 shift 2
339 ((nmirrors = nmirrors - 1))
340 done
341 }
342
343 #
344 # create a number of raidz pools.
345 # We create a number($1) of 2 raidz pools using the pairs of disks named
346 # on the command line. These pools are *not* mounted
347 # @parameters: $1 the number of pools to create
348 # $... the devices to use to create the pools on
349 # @uses: ZPOOL ZFS TESTPOOL
350 function setup_raidzs
351 {
352 typeset -i nraidzs=$1
353
354 shift
355 while ((nraidzs > 0)); do
356 log_must test -n "$1" -a -n "$2"
357 [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
358 log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
359 shift 2
360 ((nraidzs = nraidzs - 1))
361 done
362 }
363
364 #
365 # Destroy the configured testpool mirrors.
366 # the mirrors are of the form ${TESTPOOL}{number}
367 # @uses: ZPOOL ZFS TESTPOOL
368 function destroy_mirrors
369 {
370 default_cleanup_noexit
371
372 log_pass
373 }
374
375 #
376 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
377 # $1 the list of disks
378 #
379 function default_raidz_setup
380 {
381 typeset disklist="$*"
382 disks=(${disklist[*]})
383
384 if [[ ${#disks[*]} -lt 2 ]]; then
385 log_fail "A raid-z requires a minimum of two disks."
386 fi
387
388 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
389 log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
390 log_must $ZFS create $TESTPOOL/$TESTFS
391 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
392
393 log_pass
394 }
395
396 #
397 # Common function used to cleanup storage pools and datasets.
398 #
399 # Invoked at the start of the test suite to ensure the system
400 # is in a known state, and also at the end of each set of
401 # sub-tests to ensure errors from one set of tests doesn't
402 # impact the execution of the next set.
403
404 function default_cleanup
405 {
406 default_cleanup_noexit
407
408 log_pass
409 }
410
411 function default_cleanup_noexit
412 {
413 typeset exclude=""
414 typeset pool=""
415 #
416 # Destroying the pool will also destroy any
417 # filesystems it contains.
418 #
419 if is_global_zone; then
420 $ZFS unmount -a > /dev/null 2>&1
421 [[ -z "$KEEP" ]] && KEEP="rpool"
422 exclude=`eval $ECHO \"'(${KEEP})'\"`
423 ALL_POOLS=$($ZPOOL list -H -o name \
424 | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
425 # Here, we loop through the pools we're allowed to
426 # destroy, only destroying them if it's safe to do
427 # so.
428 while [ ! -z ${ALL_POOLS} ]
429 do
430 for pool in ${ALL_POOLS}
431 do
432 if safe_to_destroy_pool $pool ;
433 then
434 destroy_pool $pool
435 fi
436 ALL_POOLS=$($ZPOOL list -H -o name \
437 | $GREP -v "$NO_POOLS" \
438 | $EGREP -v "$exclude")
439 done
440 done
441
442 $ZFS mount -a
443 else
444 typeset fs=""
445 for fs in $($ZFS list -H -o name \
446 | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
447 datasetexists $fs && \
448 log_must $ZFS destroy -Rf $fs
449 done
450
451 # Need cleanup here to avoid garbage dir left.
452 for fs in $($ZFS list -H -o name); do
453 [[ $fs == /$ZONE_POOL ]] && continue
454 [[ -d $fs ]] && log_must $RM -rf $fs/*
455 done
456
457 #
458 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
459 # the default value
460 #
461 for fs in $($ZFS list -H -o name); do
462 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
463 log_must $ZFS set reservation=none $fs
464 log_must $ZFS set recordsize=128K $fs
465 log_must $ZFS set mountpoint=/$fs $fs
466 typeset enc=""
467 enc=$(get_prop encryption $fs)
468 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
469 [[ "$enc" == "off" ]]; then
470 log_must $ZFS set checksum=on $fs
471 fi
472 log_must $ZFS set compression=off $fs
473 log_must $ZFS set atime=on $fs
474 log_must $ZFS set devices=off $fs
475 log_must $ZFS set exec=on $fs
476 log_must $ZFS set setuid=on $fs
477 log_must $ZFS set readonly=off $fs
478 log_must $ZFS set snapdir=hidden $fs
479 log_must $ZFS set aclmode=groupmask $fs
480 log_must $ZFS set aclinherit=secure $fs
481 fi
482 done
483 fi
484
485 [[ -d $TESTDIR ]] && \
486 log_must $RM -rf $TESTDIR
487
488 disk1=${DISKS%% *}
489 if is_mpath_device $disk1; then
490 delete_partitions
491 fi
492 }
493
494
495 #
496 # Common function used to cleanup storage pools, file systems
497 # and containers.
498 #
499 function default_container_cleanup
500 {
501 if ! is_global_zone; then
502 reexport_pool
503 fi
504
505 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
506 [[ $? -eq 0 ]] && \
507 log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
508
509 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
510 log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
511
512 datasetexists $TESTPOOL/$TESTCTR && \
513 log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
514
515 [[ -e $TESTDIR1 ]] && \
516 log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
517
518 default_cleanup
519 }
520
521 #
522 # Common function used to cleanup snapshot of file system or volume. Default to
523 # delete the file system's snapshot
524 #
525 # $1 snapshot name
526 #
527 function destroy_snapshot
528 {
529 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
530
531 if ! snapexists $snap; then
532 log_fail "'$snap' does not existed."
533 fi
534
535 #
536 # For the sake of the value which come from 'get_prop' is not equal
537 # to the really mountpoint when the snapshot is unmounted. So, firstly
538 # check and make sure this snapshot's been mounted in current system.
539 #
540 typeset mtpt=""
541 if ismounted $snap; then
542 mtpt=$(get_prop mountpoint $snap)
543 (($? != 0)) && \
544 log_fail "get_prop mountpoint $snap failed."
545 fi
546
547 log_must $ZFS destroy $snap
548 [[ $mtpt != "" && -d $mtpt ]] && \
549 log_must $RM -rf $mtpt
550 }
551
552 #
553 # Common function used to cleanup clone.
554 #
555 # $1 clone name
556 #
557 function destroy_clone
558 {
559 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
560
561 if ! datasetexists $clone; then
562 log_fail "'$clone' does not existed."
563 fi
564
565 # With the same reason in destroy_snapshot
566 typeset mtpt=""
567 if ismounted $clone; then
568 mtpt=$(get_prop mountpoint $clone)
569 (($? != 0)) && \
570 log_fail "get_prop mountpoint $clone failed."
571 fi
572
573 log_must $ZFS destroy $clone
574 [[ $mtpt != "" && -d $mtpt ]] && \
575 log_must $RM -rf $mtpt
576 }
577
578 # Return 0 if a snapshot exists; $? otherwise
579 #
580 # $1 - snapshot name
581
582 function snapexists
583 {
584 $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
585 return $?
586 }
587
588 #
589 # Set a property to a certain value on a dataset.
590 # Sets a property of the dataset to the value as passed in.
591 # @param:
592 # $1 dataset who's property is being set
593 # $2 property to set
594 # $3 value to set property to
595 # @return:
596 # 0 if the property could be set.
597 # non-zero otherwise.
598 # @use: ZFS
599 #
600 function dataset_setprop
601 {
602 typeset fn=dataset_setprop
603
604 if (($# < 3)); then
605 log_note "$fn: Insufficient parameters (need 3, had $#)"
606 return 1
607 fi
608 typeset output=
609 output=$($ZFS set $2=$3 $1 2>&1)
610 typeset rv=$?
611 if ((rv != 0)); then
612 log_note "Setting property on $1 failed."
613 log_note "property $2=$3"
614 log_note "Return Code: $rv"
615 log_note "Output: $output"
616 return $rv
617 fi
618 return 0
619 }
620
621 #
622 # Assign suite defined dataset properties.
623 # This function is used to apply the suite's defined default set of
624 # properties to a dataset.
625 # @parameters: $1 dataset to use
626 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
627 # @returns:
628 # 0 if the dataset has been altered.
629 # 1 if no pool name was passed in.
630 # 2 if the dataset could not be found.
631 # 3 if the dataset could not have it's properties set.
632 #
633 function dataset_set_defaultproperties
634 {
635 typeset dataset="$1"
636
637 [[ -z $dataset ]] && return 1
638
639 typeset confset=
640 typeset -i found=0
641 for confset in $($ZFS list); do
642 if [[ $dataset = $confset ]]; then
643 found=1
644 break
645 fi
646 done
647 [[ $found -eq 0 ]] && return 2
648 if [[ -n $COMPRESSION_PROP ]]; then
649 dataset_setprop $dataset compression $COMPRESSION_PROP || \
650 return 3
651 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
652 fi
653 if [[ -n $CHECKSUM_PROP ]]; then
654 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
655 return 3
656 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
657 fi
658 return 0
659 }
660
661 #
662 # Check a numeric assertion
663 # @parameter: $@ the assertion to check
664 # @output: big loud notice if assertion failed
665 # @use: log_fail
666 #
667 function assert
668 {
669 (($@)) || log_fail "$@"
670 }
671
672 #
673 # Function to format partition size of a disk
674 # Given a disk cxtxdx reduces all partitions
675 # to 0 size
676 #
677 function zero_partitions #<whole_disk_name>
678 {
679 typeset diskname=$1
680 typeset i
681
682 if is_linux; then
683 log_must $FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt
684 else
685 for i in 0 1 3 4 5 6 7
686 do
687 set_partition $i "" 0mb $diskname
688 done
689 fi
690 }
691
692 #
693 # Given a slice, size and disk, this function
694 # formats the slice to the specified size.
695 # Size should be specified with units as per
696 # the `format` command requirements eg. 100mb 3gb
697 #
698 # NOTE: This entire interface is problematic for the Linux parted utilty
699 # which requires the end of the partition to be specified. It would be
700 # best to retire this interface and replace it with something more flexible.
701 # At the moment a best effort is made.
702 #
703 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
704 {
705 typeset -i slicenum=$1
706 typeset start=$2
707 typeset size=$3
708 typeset disk=$4
709 [[ -z $slicenum || -z $size || -z $disk ]] && \
710 log_fail "The slice, size or disk name is unspecified."
711
712 if is_linux; then
713 typeset size_mb=${size%%[mMgG]}
714
715 size_mb=${size_mb%%[mMgG][bB]}
716 if [[ ${size:1:1} == 'g' ]]; then
717 ((size_mb = size_mb * 1024))
718 fi
719
720 # Create GPT partition table when setting slice 0 or
721 # when the device doesn't already contain a GPT label.
722 $FORMAT $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
723 typeset ret_val=$?
724 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
725 log_must $FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt
726 fi
727
728 # When no start is given align on the first cylinder.
729 if [[ -z "$start" ]]; then
730 start=1
731 fi
732
733 # Determine the cylinder size for the device and using
734 # that calculate the end offset in cylinders.
735 typeset -i cly_size_kb=0
736 cly_size_kb=$($FORMAT -m $DEV_DSKDIR/$disk -s -- \
737 unit cyl print | $HEAD -3 | $TAIL -1 | \
738 $AWK -F '[:k.]' '{print $4}')
739 ((end = (size_mb * 1024 / cly_size_kb) + start))
740
741 log_must $FORMAT $DEV_DSKDIR/$disk -s -- \
742 mkpart part$slicenum ${start}cyl ${end}cyl
743
744 $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
745 block_device_wait
746 else
747 typeset format_file=/var/tmp/format_in.$$
748
749 $ECHO "partition" >$format_file
750 $ECHO "$slicenum" >> $format_file
751 $ECHO "" >> $format_file
752 $ECHO "" >> $format_file
753 $ECHO "$start" >> $format_file
754 $ECHO "$size" >> $format_file
755 $ECHO "label" >> $format_file
756 $ECHO "" >> $format_file
757 $ECHO "q" >> $format_file
758 $ECHO "q" >> $format_file
759
760 $FORMAT -e -s -d $disk -f $format_file
761 fi
762 typeset ret_val=$?
763 $RM -f $format_file
764 [[ $ret_val -ne 0 ]] && \
765 log_fail "Unable to format $disk slice $slicenum to $size"
766 return 0
767 }
768
769 #
770 # Delete all partitions on all disks - this is specifically for the use of multipath
771 # devices which currently can only be used in the test suite as raw/un-partitioned
772 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
773 #
774 function delete_partitions
775 {
776 typeset -i j=1
777
778 if [[ -z $DISK_ARRAY_NUM ]]; then
779 DISK_ARRAY_NUM=$($ECHO ${DISKS} | $NAWK '{print NF}')
780 fi
781 if [[ -z $DISKSARRAY ]]; then
782 DISKSARRAY=$DISKS
783 fi
784
785 if is_linux; then
786 if (( $DISK_ARRAY_NUM == 1 )); then
787 while ((j < MAX_PARTITIONS)); do
788 $FORMAT $DEV_DSKDIR/$DISK -s rm $j > /dev/null 2>&1
789 if (( $? == 1 )); then
790 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
791 if (( $? == 1 )); then
792 log_note "Partitions for $DISK should be deleted"
793 else
794 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
795 fi
796 return 0
797 else
798 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
799 if (( $? == 0 )); then
800 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
801 fi
802 fi
803 ((j = j+1))
804 done
805 else
806 for disk in `$ECHO $DISKSARRAY`; do
807 while ((j < MAX_PARTITIONS)); do
808 $FORMAT $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
809 if (( $? == 1 )); then
810 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
811 if (( $? == 1 )); then
812 log_note "Partitions for $disk should be deleted"
813 else
814 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
815 fi
816 j=7
817 else
818 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
819 if (( $? == 0 )); then
820 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
821 fi
822 fi
823 ((j = j+1))
824 done
825 j=1
826 done
827 fi
828 fi
829 return 0
830 }
831
832 #
833 # Get the end cyl of the given slice
834 #
835 function get_endslice #<disk> <slice>
836 {
837 typeset disk=$1
838 typeset slice=$2
839 if [[ -z $disk || -z $slice ]] ; then
840 log_fail "The disk name or slice number is unspecified."
841 fi
842
843 if is_linux; then
844 endcyl=$($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \
845 $GREP "part${slice}" | \
846 $AWK '{print $3}' | \
847 $SED 's,cyl,,')
848 ((endcyl = (endcyl + 1)))
849 else
850 disk=${disk#/dev/dsk/}
851 disk=${disk#/dev/rdsk/}
852 disk=${disk%s*}
853
854 typeset -i ratio=0
855 ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
856 $GREP "sectors\/cylinder" | \
857 $AWK '{print $2}')
858
859 if ((ratio == 0)); then
860 return
861 fi
862
863 typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
864 $NAWK -v token="$slice" '{if ($1==token) print $6}')
865
866 ((endcyl = (endcyl + 1) / ratio))
867 fi
868
869 echo $endcyl
870 }
871
872
873 #
874 # Given a size,disk and total slice number, this function formats the
875 # disk slices from 0 to the total slice number with the same specified
876 # size.
877 #
878 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
879 {
880 typeset -i i=0
881 typeset slice_size=$1
882 typeset disk_name=$2
883 typeset total_slices=$3
884 typeset cyl
885
886 zero_partitions $disk_name
887 while ((i < $total_slices)); do
888 if ! is_linux; then
889 if ((i == 2)); then
890 ((i = i + 1))
891 continue
892 fi
893 fi
894 set_partition $i "$cyl" $slice_size $disk_name
895 cyl=$(get_endslice $disk_name $i)
896 ((i = i+1))
897 done
898 }
899
900 #
901 # This function continues to write to a filenum number of files into dirnum
902 # number of directories until either $FILE_WRITE returns an error or the
903 # maximum number of files per directory have been written.
904 #
905 # Usage:
906 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
907 #
908 # Return value: 0 on success
909 # non 0 on error
910 #
911 # Where :
912 # destdir: is the directory where everything is to be created under
913 # dirnum: the maximum number of subdirectories to use, -1 no limit
914 # filenum: the maximum number of files per subdirectory
915 # bytes: number of bytes to write
916 # num_writes: numer of types to write out bytes
917 # data: the data that will be written
918 #
919 # E.g.
920 # file_fs /testdir 20 25 1024 256 0
921 #
922 # Note: bytes * num_writes equals the size of the testfile
923 #
924 function fill_fs # destdir dirnum filenum bytes num_writes data
925 {
926 typeset destdir=${1:-$TESTDIR}
927 typeset -i dirnum=${2:-50}
928 typeset -i filenum=${3:-50}
929 typeset -i bytes=${4:-8192}
930 typeset -i num_writes=${5:-10240}
931 typeset -i data=${6:-0}
932
933 typeset -i odirnum=1
934 typeset -i idirnum=0
935 typeset -i fn=0
936 typeset -i retval=0
937
938 log_must $MKDIR -p $destdir/$idirnum
939 while (($odirnum > 0)); do
940 if ((dirnum >= 0 && idirnum >= dirnum)); then
941 odirnum=0
942 break
943 fi
944 $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
945 -b $bytes -c $num_writes -d $data
946 retval=$?
947 if (($retval != 0)); then
948 odirnum=0
949 break
950 fi
951 if (($fn >= $filenum)); then
952 fn=0
953 ((idirnum = idirnum + 1))
954 log_must $MKDIR -p $destdir/$idirnum
955 else
956 ((fn = fn + 1))
957 fi
958 done
959 return $retval
960 }
961
962 #
963 # Simple function to get the specified property. If unable to
964 # get the property then exits.
965 #
966 # Note property is in 'parsable' format (-p)
967 #
968 function get_prop # property dataset
969 {
970 typeset prop_val
971 typeset prop=$1
972 typeset dataset=$2
973
974 prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
975 if [[ $? -ne 0 ]]; then
976 log_note "Unable to get $prop property for dataset " \
977 "$dataset"
978 return 1
979 fi
980
981 $ECHO $prop_val
982 return 0
983 }
984
985 #
986 # Simple function to get the specified property of pool. If unable to
987 # get the property then exits.
988 #
989 function get_pool_prop # property pool
990 {
991 typeset prop_val
992 typeset prop=$1
993 typeset pool=$2
994
995 if poolexists $pool ; then
996 prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
997 $AWK '{print $3}')
998 if [[ $? -ne 0 ]]; then
999 log_note "Unable to get $prop property for pool " \
1000 "$pool"
1001 return 1
1002 fi
1003 else
1004 log_note "Pool $pool not exists."
1005 return 1
1006 fi
1007
1008 $ECHO $prop_val
1009 return 0
1010 }
1011
1012 # Return 0 if a pool exists; $? otherwise
1013 #
1014 # $1 - pool name
1015
1016 function poolexists
1017 {
1018 typeset pool=$1
1019
1020 if [[ -z $pool ]]; then
1021 log_note "No pool name given."
1022 return 1
1023 fi
1024
1025 $ZPOOL get name "$pool" > /dev/null 2>&1
1026 return $?
1027 }
1028
1029 # Return 0 if all the specified datasets exist; $? otherwise
1030 #
1031 # $1-n dataset name
1032 function datasetexists
1033 {
1034 if (($# == 0)); then
1035 log_note "No dataset name given."
1036 return 1
1037 fi
1038
1039 while (($# > 0)); do
1040 $ZFS get name $1 > /dev/null 2>&1 || \
1041 return $?
1042 shift
1043 done
1044
1045 return 0
1046 }
1047
1048 # return 0 if none of the specified datasets exists, otherwise return 1.
1049 #
1050 # $1-n dataset name
1051 function datasetnonexists
1052 {
1053 if (($# == 0)); then
1054 log_note "No dataset name given."
1055 return 1
1056 fi
1057
1058 while (($# > 0)); do
1059 $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1060 && return 1
1061 shift
1062 done
1063
1064 return 0
1065 }
1066
1067 #
1068 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1069 #
1070 # Returns 0 if shared, 1 otherwise.
1071 #
1072 function is_shared
1073 {
1074 typeset fs=$1
1075 typeset mtpt
1076
1077 if [[ $fs != "/"* ]] ; then
1078 if datasetnonexists "$fs" ; then
1079 return 1
1080 else
1081 mtpt=$(get_prop mountpoint "$fs")
1082 case $mtpt in
1083 none|legacy|-) return 1
1084 ;;
1085 *) fs=$mtpt
1086 ;;
1087 esac
1088 fi
1089 fi
1090
1091 if is_linux; then
1092 for mtpt in `$SHARE | $AWK '{print $1}'` ; do
1093 if [[ $mtpt == $fs ]] ; then
1094 return 0
1095 fi
1096 done
1097 return 1
1098 fi
1099
1100 for mtpt in `$SHARE | $AWK '{print $2}'` ; do
1101 if [[ $mtpt == $fs ]] ; then
1102 return 0
1103 fi
1104 done
1105
1106 typeset stat=$($SVCS -H -o STA nfs/server:default)
1107 if [[ $stat != "ON" ]]; then
1108 log_note "Current nfs/server status: $stat"
1109 fi
1110
1111 return 1
1112 }
1113
1114 #
1115 # Given a dataset name determine if it is shared via SMB.
1116 #
1117 # Returns 0 if shared, 1 otherwise.
1118 #
1119 function is_shared_smb
1120 {
1121 typeset fs=$1
1122 typeset mtpt
1123
1124 if datasetnonexists "$fs" ; then
1125 return 1
1126 else
1127 fs=$(echo $fs | sed 's@/@_@g')
1128 fi
1129
1130 if is_linux; then
1131 for mtpt in `$NET usershare list | $AWK '{print $1}'` ; do
1132 if [[ $mtpt == $fs ]] ; then
1133 return 0
1134 fi
1135 done
1136 return 1
1137 else
1138 log_unsupported "Currently unsupported by the test framework"
1139 return 1
1140 fi
1141 }
1142
1143 #
1144 # Given a mountpoint, determine if it is not shared via NFS.
1145 #
1146 # Returns 0 if not shared, 1 otherwise.
1147 #
1148 function not_shared
1149 {
1150 typeset fs=$1
1151
1152 is_shared $fs
1153 if (($? == 0)); then
1154 return 1
1155 fi
1156
1157 return 0
1158 }
1159
1160 #
1161 # Given a dataset determine if it is not shared via SMB.
1162 #
1163 # Returns 0 if not shared, 1 otherwise.
1164 #
1165 function not_shared_smb
1166 {
1167 typeset fs=$1
1168
1169 is_shared_smb $fs
1170 if (($? == 0)); then
1171 return 1
1172 fi
1173
1174 return 0
1175 }
1176
1177 #
1178 # Helper function to unshare a mountpoint.
1179 #
1180 function unshare_fs #fs
1181 {
1182 typeset fs=$1
1183
1184 is_shared $fs || is_shared_smb $fs
1185 if (($? == 0)); then
1186 log_must $ZFS unshare $fs
1187 fi
1188
1189 return 0
1190 }
1191
1192 #
1193 # Helper function to share a NFS mountpoint.
1194 #
1195 function share_nfs #fs
1196 {
1197 typeset fs=$1
1198
1199 if is_linux; then
1200 is_shared $fs
1201 if (($? != 0)); then
1202 log_must $SHARE "*:$fs"
1203 fi
1204 else
1205 is_shared $fs
1206 if (($? != 0)); then
1207 log_must $SHARE -F nfs $fs
1208 fi
1209 fi
1210
1211 return 0
1212 }
1213
1214 #
1215 # Helper function to unshare a NFS mountpoint.
1216 #
1217 function unshare_nfs #fs
1218 {
1219 typeset fs=$1
1220
1221 if is_linux; then
1222 is_shared $fs
1223 if (($? == 0)); then
1224 log_must $UNSHARE -u "*:$fs"
1225 fi
1226 else
1227 is_shared $fs
1228 if (($? == 0)); then
1229 log_must $UNSHARE -F nfs $fs
1230 fi
1231 fi
1232
1233 return 0
1234 }
1235
1236 #
1237 # Helper function to show NFS shares.
1238 #
1239 function showshares_nfs
1240 {
1241 if is_linux; then
1242 $SHARE -v
1243 else
1244 $SHARE -F nfs
1245 fi
1246
1247 return 0
1248 }
1249
1250 #
1251 # Helper function to show SMB shares.
1252 #
1253 function showshares_smb
1254 {
1255 if is_linux; then
1256 $NET usershare list
1257 else
1258 $SHARE -F smb
1259 fi
1260
1261 return 0
1262 }
1263
1264 #
1265 # Check NFS server status and trigger it online.
1266 #
1267 function setup_nfs_server
1268 {
1269 # Cannot share directory in non-global zone.
1270 #
1271 if ! is_global_zone; then
1272 log_note "Cannot trigger NFS server by sharing in LZ."
1273 return
1274 fi
1275
1276 if is_linux; then
1277 log_note "NFS server must started prior to running test framework."
1278 return
1279 fi
1280
1281 typeset nfs_fmri="svc:/network/nfs/server:default"
1282 if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
1283 #
1284 # Only really sharing operation can enable NFS server
1285 # to online permanently.
1286 #
1287 typeset dummy=/tmp/dummy
1288
1289 if [[ -d $dummy ]]; then
1290 log_must $RM -rf $dummy
1291 fi
1292
1293 log_must $MKDIR $dummy
1294 log_must $SHARE $dummy
1295
1296 #
1297 # Waiting for fmri's status to be the final status.
1298 # Otherwise, in transition, an asterisk (*) is appended for
1299 # instances, unshare will reverse status to 'DIS' again.
1300 #
1301 # Waiting for 1's at least.
1302 #
1303 log_must $SLEEP 1
1304 timeout=10
1305 while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1306 do
1307 log_must $SLEEP 1
1308
1309 ((timeout -= 1))
1310 done
1311
1312 log_must $UNSHARE $dummy
1313 log_must $RM -rf $dummy
1314 fi
1315
1316 log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1317 }
1318
1319 #
1320 # To verify whether calling process is in global zone
1321 #
1322 # Return 0 if in global zone, 1 in non-global zone
1323 #
1324 function is_global_zone
1325 {
1326 typeset cur_zone=$($ZONENAME 2>/dev/null)
1327 if [[ $cur_zone != "global" ]]; then
1328 return 1
1329 fi
1330 return 0
1331 }
1332
1333 #
1334 # Verify whether test is permitted to run from
1335 # global zone, local zone, or both
1336 #
1337 # $1 zone limit, could be "global", "local", or "both"(no limit)
1338 #
1339 # Return 0 if permitted, otherwise exit with log_unsupported
1340 #
1341 function verify_runnable # zone limit
1342 {
1343 typeset limit=$1
1344
1345 [[ -z $limit ]] && return 0
1346
1347 if is_global_zone ; then
1348 case $limit in
1349 global|both)
1350 ;;
1351 local) log_unsupported "Test is unable to run from "\
1352 "global zone."
1353 ;;
1354 *) log_note "Warning: unknown limit $limit - " \
1355 "use both."
1356 ;;
1357 esac
1358 else
1359 case $limit in
1360 local|both)
1361 ;;
1362 global) log_unsupported "Test is unable to run from "\
1363 "local zone."
1364 ;;
1365 *) log_note "Warning: unknown limit $limit - " \
1366 "use both."
1367 ;;
1368 esac
1369
1370 reexport_pool
1371 fi
1372
1373 return 0
1374 }
1375
1376 # Return 0 if create successfully or the pool exists; $? otherwise
1377 # Note: In local zones, this function should return 0 silently.
1378 #
1379 # $1 - pool name
1380 # $2-n - [keyword] devs_list
1381
1382 function create_pool #pool devs_list
1383 {
1384 typeset pool=${1%%/*}
1385
1386 shift
1387
1388 if [[ -z $pool ]]; then
1389 log_note "Missing pool name."
1390 return 1
1391 fi
1392
1393 if poolexists $pool ; then
1394 destroy_pool $pool
1395 fi
1396
1397 if is_global_zone ; then
1398 [[ -d /$pool ]] && $RM -rf /$pool
1399 log_must $ZPOOL create -f $pool $@
1400 fi
1401
1402 return 0
1403 }
1404
1405 # Return 0 if destroy successfully or the pool exists; $? otherwise
1406 # Note: In local zones, this function should return 0 silently.
1407 #
1408 # $1 - pool name
1409 # Destroy pool with the given parameters.
1410
1411 function destroy_pool #pool
1412 {
1413 typeset pool=${1%%/*}
1414 typeset mtpt
1415
1416 if [[ -z $pool ]]; then
1417 log_note "No pool name given."
1418 return 1
1419 fi
1420
1421 if is_global_zone ; then
1422 if poolexists "$pool" ; then
1423 mtpt=$(get_prop mountpoint "$pool")
1424
1425 # At times, syseventd activity can cause attempts to
1426 # destroy a pool to fail with EBUSY. We retry a few
1427 # times allowing failures before requiring the destroy
1428 # to succeed.
1429 typeset -i wait_time=10 ret=1 count=0
1430 must=""
1431 while [[ $ret -ne 0 ]]; do
1432 $must $ZPOOL destroy -f $pool
1433 ret=$?
1434 [[ $ret -eq 0 ]] && break
1435 log_note "zpool destroy failed with $ret"
1436 [[ count++ -ge 7 ]] && must=log_must
1437 $SLEEP $wait_time
1438 done
1439
1440 [[ -d $mtpt ]] && \
1441 log_must $RM -rf $mtpt
1442 else
1443 log_note "Pool does not exist. ($pool)"
1444 return 1
1445 fi
1446 fi
1447
1448 return 0
1449 }
1450
1451 #
1452 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1453 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1454 # and a zvol device to the zone.
1455 #
1456 # $1 zone name
1457 # $2 zone root directory prefix
1458 # $3 zone ip
1459 #
1460 function zfs_zones_setup #zone_name zone_root zone_ip
1461 {
1462 typeset zone_name=${1:-$(hostname)-z}
1463 typeset zone_root=${2:-"/zone_root"}
1464 typeset zone_ip=${3:-"10.1.1.10"}
1465 typeset prefix_ctr=$ZONE_CTR
1466 typeset pool_name=$ZONE_POOL
1467 typeset -i cntctr=5
1468 typeset -i i=0
1469
1470 # Create pool and 5 container within it
1471 #
1472 [[ -d /$pool_name ]] && $RM -rf /$pool_name
1473 log_must $ZPOOL create -f $pool_name $DISKS
1474 while ((i < cntctr)); do
1475 log_must $ZFS create $pool_name/$prefix_ctr$i
1476 ((i += 1))
1477 done
1478
1479 # create a zvol
1480 log_must $ZFS create -V 1g $pool_name/zone_zvol
1481 block_device_wait
1482
1483 #
1484 # If current system support slog, add slog device for pool
1485 #
1486 if verify_slog_support ; then
1487 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1488 log_must $MKFILE 100M $sdevs
1489 log_must $ZPOOL add $pool_name log mirror $sdevs
1490 fi
1491
1492 # this isn't supported just yet.
1493 # Create a filesystem. In order to add this to
1494 # the zone, it must have it's mountpoint set to 'legacy'
1495 # log_must $ZFS create $pool_name/zfs_filesystem
1496 # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1497
1498 [[ -d $zone_root ]] && \
1499 log_must $RM -rf $zone_root/$zone_name
1500 [[ ! -d $zone_root ]] && \
1501 log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1502
1503 # Create zone configure file and configure the zone
1504 #
1505 typeset zone_conf=/tmp/zone_conf.$$
1506 $ECHO "create" > $zone_conf
1507 $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1508 $ECHO "set autoboot=true" >> $zone_conf
1509 i=0
1510 while ((i < cntctr)); do
1511 $ECHO "add dataset" >> $zone_conf
1512 $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1513 $zone_conf
1514 $ECHO "end" >> $zone_conf
1515 ((i += 1))
1516 done
1517
1518 # add our zvol to the zone
1519 $ECHO "add device" >> $zone_conf
1520 $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf
1521 $ECHO "end" >> $zone_conf
1522
1523 # add a corresponding zvol rdsk to the zone
1524 $ECHO "add device" >> $zone_conf
1525 $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1526 $ECHO "end" >> $zone_conf
1527
1528 # once it's supported, we'll add our filesystem to the zone
1529 # $ECHO "add fs" >> $zone_conf
1530 # $ECHO "set type=zfs" >> $zone_conf
1531 # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1532 # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1533 # $ECHO "end" >> $zone_conf
1534
1535 $ECHO "verify" >> $zone_conf
1536 $ECHO "commit" >> $zone_conf
1537 log_must $ZONECFG -z $zone_name -f $zone_conf
1538 log_must $RM -f $zone_conf
1539
1540 # Install the zone
1541 $ZONEADM -z $zone_name install
1542 if (($? == 0)); then
1543 log_note "SUCCESS: $ZONEADM -z $zone_name install"
1544 else
1545 log_fail "FAIL: $ZONEADM -z $zone_name install"
1546 fi
1547
1548 # Install sysidcfg file
1549 #
1550 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1551 $ECHO "system_locale=C" > $sysidcfg
1552 $ECHO "terminal=dtterm" >> $sysidcfg
1553 $ECHO "network_interface=primary {" >> $sysidcfg
1554 $ECHO "hostname=$zone_name" >> $sysidcfg
1555 $ECHO "}" >> $sysidcfg
1556 $ECHO "name_service=NONE" >> $sysidcfg
1557 $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
1558 $ECHO "security_policy=NONE" >> $sysidcfg
1559 $ECHO "timezone=US/Eastern" >> $sysidcfg
1560
1561 # Boot this zone
1562 log_must $ZONEADM -z $zone_name boot
1563 }
1564
1565 #
1566 # Reexport TESTPOOL & TESTPOOL(1-4)
1567 #
1568 function reexport_pool
1569 {
1570 typeset -i cntctr=5
1571 typeset -i i=0
1572
1573 while ((i < cntctr)); do
1574 if ((i == 0)); then
1575 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1576 if ! ismounted $TESTPOOL; then
1577 log_must $ZFS mount $TESTPOOL
1578 fi
1579 else
1580 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1581 if eval ! ismounted \$TESTPOOL$i; then
1582 log_must eval $ZFS mount \$TESTPOOL$i
1583 fi
1584 fi
1585 ((i += 1))
1586 done
1587 }
1588
1589 #
1590 # Verify a given disk is online or offline
1591 #
1592 # Return 0 is pool/disk matches expected state, 1 otherwise
1593 #
1594 function check_state # pool disk state{online,offline}
1595 {
1596 typeset pool=$1
1597 typeset disk=${2#$DEV_DSKDIR/}
1598 typeset state=$3
1599
1600 $ZPOOL status -v $pool | grep "$disk" \
1601 | grep -i "$state" > /dev/null 2>&1
1602
1603 return $?
1604 }
1605
1606 #
1607 # Get the mountpoint of snapshot
1608 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1609 # as its mountpoint
1610 #
1611 function snapshot_mountpoint
1612 {
1613 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1614
1615 if [[ $dataset != *@* ]]; then
1616 log_fail "Error name of snapshot '$dataset'."
1617 fi
1618
1619 typeset fs=${dataset%@*}
1620 typeset snap=${dataset#*@}
1621
1622 if [[ -z $fs || -z $snap ]]; then
1623 log_fail "Error name of snapshot '$dataset'."
1624 fi
1625
1626 $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1627 }
1628
1629 #
1630 # Given a pool and file system, this function will verify the file system
1631 # using the zdb internal tool. Note that the pool is exported and imported
1632 # to ensure it has consistent state.
1633 #
1634 function verify_filesys # pool filesystem dir
1635 {
1636 typeset pool="$1"
1637 typeset filesys="$2"
1638 typeset zdbout="/tmp/zdbout.$$"
1639
1640 shift
1641 shift
1642 typeset dirs=$@
1643 typeset search_path=""
1644
1645 log_note "Calling $ZDB to verify filesystem '$filesys'"
1646 $ZFS unmount -a > /dev/null 2>&1
1647 log_must $ZPOOL export $pool
1648
1649 if [[ -n $dirs ]] ; then
1650 for dir in $dirs ; do
1651 search_path="$search_path -d $dir"
1652 done
1653 fi
1654
1655 log_must $ZPOOL import $search_path $pool
1656
1657 $ZDB -cudi $filesys > $zdbout 2>&1
1658 if [[ $? != 0 ]]; then
1659 log_note "Output: $ZDB -cudi $filesys"
1660 $CAT $zdbout
1661 log_fail "$ZDB detected errors with: '$filesys'"
1662 fi
1663
1664 log_must $ZFS mount -a
1665 log_must $RM -rf $zdbout
1666 }
1667
1668 #
1669 # Given a pool, and this function list all disks in the pool
1670 #
1671 function get_disklist # pool
1672 {
1673 typeset disklist=""
1674
1675 disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
1676 $GREP -v "\-\-\-\-\-" | \
1677 $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1678
1679 $ECHO $disklist
1680 }
1681
1682 #
1683 # Given a pool, and this function list all disks in the pool with their full
1684 # path (like "/dev/sda" instead of "sda").
1685 #
1686 function get_disklist_fullpath # pool
1687 {
1688 args="-P $1"
1689 get_disklist $args
1690 }
1691
1692
1693
1694 # /**
1695 # This function kills a given list of processes after a time period. We use
1696 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1697 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1698 # would be listed as FAIL, which we don't want : we're happy with stress tests
1699 # running for a certain amount of time, then finishing.
1700 #
1701 # @param $1 the time in seconds after which we should terminate these processes
1702 # @param $2..$n the processes we wish to terminate.
1703 # */
1704 function stress_timeout
1705 {
1706 typeset -i TIMEOUT=$1
1707 shift
1708 typeset cpids="$@"
1709
1710 log_note "Waiting for child processes($cpids). " \
1711 "It could last dozens of minutes, please be patient ..."
1712 log_must $SLEEP $TIMEOUT
1713
1714 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1715 typeset pid
1716 for pid in $cpids; do
1717 $PS -p $pid > /dev/null 2>&1
1718 if (($? == 0)); then
1719 log_must $KILL -USR1 $pid
1720 fi
1721 done
1722 }
1723
1724 #
1725 # Verify a given hotspare disk is inuse or avail
1726 #
1727 # Return 0 is pool/disk matches expected state, 1 otherwise
1728 #
1729 function check_hotspare_state # pool disk state{inuse,avail}
1730 {
1731 typeset pool=$1
1732 typeset disk=${2#$DEV_DSKDIR/}
1733 typeset state=$3
1734
1735 cur_state=$(get_device_state $pool $disk "spares")
1736
1737 if [[ $state != ${cur_state} ]]; then
1738 return 1
1739 fi
1740 return 0
1741 }
1742
1743 #
1744 # Verify a given slog disk is inuse or avail
1745 #
1746 # Return 0 is pool/disk matches expected state, 1 otherwise
1747 #
1748 function check_slog_state # pool disk state{online,offline,unavail}
1749 {
1750 typeset pool=$1
1751 typeset disk=${2#$DEV_DSKDIR/}
1752 typeset state=$3
1753
1754 cur_state=$(get_device_state $pool $disk "logs")
1755
1756 if [[ $state != ${cur_state} ]]; then
1757 return 1
1758 fi
1759 return 0
1760 }
1761
1762 #
1763 # Verify a given vdev disk is inuse or avail
1764 #
1765 # Return 0 is pool/disk matches expected state, 1 otherwise
1766 #
1767 function check_vdev_state # pool disk state{online,offline,unavail}
1768 {
1769 typeset pool=$1
1770 typeset disk=${2#$/DEV_DSKDIR/}
1771 typeset state=$3
1772
1773 cur_state=$(get_device_state $pool $disk)
1774
1775 if [[ $state != ${cur_state} ]]; then
1776 return 1
1777 fi
1778 return 0
1779 }
1780
1781 #
1782 # Check the output of 'zpool status -v <pool>',
1783 # and to see if the content of <token> contain the <keyword> specified.
1784 #
1785 # Return 0 is contain, 1 otherwise
1786 #
1787 function check_pool_status # pool token keyword
1788 {
1789 typeset pool=$1
1790 typeset token=$2
1791 typeset keyword=$3
1792
1793 $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
1794 ($1==token) {print $0}' \
1795 | $GREP -i "$keyword" > /dev/null 2>&1
1796
1797 return $?
1798 }
1799
1800 #
1801 # These 5 following functions are instance of check_pool_status()
1802 # is_pool_resilvering - to check if the pool is resilver in progress
1803 # is_pool_resilvered - to check if the pool is resilver completed
1804 # is_pool_scrubbing - to check if the pool is scrub in progress
1805 # is_pool_scrubbed - to check if the pool is scrub completed
1806 # is_pool_scrub_stopped - to check if the pool is scrub stopped
1807 #
1808 function is_pool_resilvering #pool
1809 {
1810 check_pool_status "$1" "scan" "resilver in progress since "
1811 return $?
1812 }
1813
1814 function is_pool_resilvered #pool
1815 {
1816 check_pool_status "$1" "scan" "resilvered "
1817 return $?
1818 }
1819
1820 function is_pool_scrubbing #pool
1821 {
1822 check_pool_status "$1" "scan" "scrub in progress since "
1823 return $?
1824 }
1825
1826 function is_pool_scrubbed #pool
1827 {
1828 check_pool_status "$1" "scan" "scrub repaired"
1829 return $?
1830 }
1831
1832 function is_pool_scrub_stopped #pool
1833 {
1834 check_pool_status "$1" "scan" "scrub canceled"
1835 return $?
1836 }
1837
1838 #
1839 # Use create_pool()/destroy_pool() to clean up the information in
1840 # in the given disk to avoid slice overlapping.
1841 #
1842 function cleanup_devices #vdevs
1843 {
1844 typeset pool="foopool$$"
1845
1846 if poolexists $pool ; then
1847 destroy_pool $pool
1848 fi
1849
1850 create_pool $pool $@
1851 destroy_pool $pool
1852
1853 return 0
1854 }
1855
1856 #
1857 # Verify the rsh connectivity to each remote host in RHOSTS.
1858 #
1859 # Return 0 if remote host is accessible; otherwise 1.
1860 # $1 remote host name
1861 # $2 username
1862 #
1863 function verify_rsh_connect #rhost, username
1864 {
1865 typeset rhost=$1
1866 typeset username=$2
1867 typeset rsh_cmd="$RSH -n"
1868 typeset cur_user=
1869
1870 $GETENT hosts $rhost >/dev/null 2>&1
1871 if (($? != 0)); then
1872 log_note "$rhost cannot be found from" \
1873 "administrative database."
1874 return 1
1875 fi
1876
1877 $PING $rhost 3 >/dev/null 2>&1
1878 if (($? != 0)); then
1879 log_note "$rhost is not reachable."
1880 return 1
1881 fi
1882
1883 if ((${#username} != 0)); then
1884 rsh_cmd="$rsh_cmd -l $username"
1885 cur_user="given user \"$username\""
1886 else
1887 cur_user="current user \"`$LOGNAME`\""
1888 fi
1889
1890 if ! $rsh_cmd $rhost $TRUE; then
1891 log_note "$RSH to $rhost is not accessible" \
1892 "with $cur_user."
1893 return 1
1894 fi
1895
1896 return 0
1897 }
1898
1899 #
1900 # Verify the remote host connection via rsh after rebooting
1901 # $1 remote host
1902 #
1903 function verify_remote
1904 {
1905 rhost=$1
1906
1907 #
1908 # The following loop waits for the remote system rebooting.
1909 # Each iteration will wait for 150 seconds. there are
1910 # total 5 iterations, so the total timeout value will
1911 # be 12.5 minutes for the system rebooting. This number
1912 # is an approxiate number.
1913 #
1914 typeset -i count=0
1915 while ! verify_rsh_connect $rhost; do
1916 sleep 150
1917 ((count = count + 1))
1918 if ((count > 5)); then
1919 return 1
1920 fi
1921 done
1922 return 0
1923 }
1924
1925 #
1926 # Replacement function for /usr/bin/rsh. This function will include
1927 # the /usr/bin/rsh and meanwhile return the execution status of the
1928 # last command.
1929 #
1930 # $1 usrname passing down to -l option of /usr/bin/rsh
1931 # $2 remote machine hostname
1932 # $3... command string
1933 #
1934
1935 function rsh_status
1936 {
1937 typeset ruser=$1
1938 typeset rhost=$2
1939 typeset -i ret=0
1940 typeset cmd_str=""
1941 typeset rsh_str=""
1942
1943 shift; shift
1944 cmd_str="$@"
1945
1946 err_file=/tmp/${rhost}.$$.err
1947 if ((${#ruser} == 0)); then
1948 rsh_str="$RSH -n"
1949 else
1950 rsh_str="$RSH -n -l $ruser"
1951 fi
1952
1953 $rsh_str $rhost /bin/ksh -c "'$cmd_str; \
1954 print -u 2 \"status=\$?\"'" \
1955 >/dev/null 2>$err_file
1956 ret=$?
1957 if (($ret != 0)); then
1958 $CAT $err_file
1959 $RM -f $std_file $err_file
1960 log_fail "$RSH itself failed with exit code $ret..."
1961 fi
1962
1963 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1964 $CUT -d= -f2)
1965 (($ret != 0)) && $CAT $err_file >&2
1966
1967 $RM -f $err_file >/dev/null 2>&1
1968 return $ret
1969 }
1970
1971 #
1972 # Get the SUNWstc-fs-zfs package installation path in a remote host
1973 # $1 remote host name
1974 #
1975 function get_remote_pkgpath
1976 {
1977 typeset rhost=$1
1978 typeset pkgpath=""
1979
1980 pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1981 $CUT -d: -f2")
1982
1983 $ECHO $pkgpath
1984 }
1985
1986 #/**
1987 # A function to find and locate free disks on a system or from given
1988 # disks as the parameter. It works by locating disks that are in use
1989 # as swap devices and dump devices, and also disks listed in /etc/vfstab
1990 #
1991 # $@ given disks to find which are free, default is all disks in
1992 # the test system
1993 #
1994 # @return a string containing the list of available disks
1995 #*/
1996 function find_disks
1997 {
1998 # Trust provided list, no attempt is made to locate unused devices.
1999 if is_linux; then
2000 $ECHO "$@"
2001 return
2002 fi
2003
2004
2005 sfi=/tmp/swaplist.$$
2006 dmpi=/tmp/dumpdev.$$
2007 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2008
2009 $SWAP -l > $sfi
2010 $DUMPADM > $dmpi 2>/dev/null
2011
2012 # write an awk script that can process the output of format
2013 # to produce a list of disks we know about. Note that we have
2014 # to escape "$2" so that the shell doesn't interpret it while
2015 # we're creating the awk script.
2016 # -------------------
2017 $CAT > /tmp/find_disks.awk <<EOF
2018 #!/bin/nawk -f
2019 BEGIN { FS="."; }
2020
2021 /^Specify disk/{
2022 searchdisks=0;
2023 }
2024
2025 {
2026 if (searchdisks && \$2 !~ "^$"){
2027 split(\$2,arr," ");
2028 print arr[1];
2029 }
2030 }
2031
2032 /^AVAILABLE DISK SELECTIONS:/{
2033 searchdisks=1;
2034 }
2035 EOF
2036 #---------------------
2037
2038 $CHMOD 755 /tmp/find_disks.awk
2039 disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
2040 $RM /tmp/find_disks.awk
2041
2042 unused=""
2043 for disk in $disks; do
2044 # Check for mounted
2045 $GREP "${disk}[sp]" /etc/mnttab >/dev/null
2046 (($? == 0)) && continue
2047 # Check for swap
2048 $GREP "${disk}[sp]" $sfi >/dev/null
2049 (($? == 0)) && continue
2050 # check for dump device
2051 $GREP "${disk}[sp]" $dmpi >/dev/null
2052 (($? == 0)) && continue
2053 # check to see if this disk hasn't been explicitly excluded
2054 # by a user-set environment variable
2055 $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
2056 (($? == 0)) && continue
2057 unused_candidates="$unused_candidates $disk"
2058 done
2059 $RM $sfi
2060 $RM $dmpi
2061
2062 # now just check to see if those disks do actually exist
2063 # by looking for a device pointing to the first slice in
2064 # each case. limit the number to max_finddisksnum
2065 count=0
2066 for disk in $unused_candidates; do
2067 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2068 if [ $count -lt $max_finddisksnum ]; then
2069 unused="$unused $disk"
2070 # do not impose limit if $@ is provided
2071 [[ -z $@ ]] && ((count = count + 1))
2072 fi
2073 fi
2074 done
2075
2076 # finally, return our disk list
2077 $ECHO $unused
2078 }
2079
2080 #
2081 # Add specified user to specified group
2082 #
2083 # $1 group name
2084 # $2 user name
2085 # $3 base of the homedir (optional)
2086 #
2087 function add_user #<group_name> <user_name> <basedir>
2088 {
2089 typeset gname=$1
2090 typeset uname=$2
2091 typeset basedir=${3:-"/var/tmp"}
2092
2093 if ((${#gname} == 0 || ${#uname} == 0)); then
2094 log_fail "group name or user name are not defined."
2095 fi
2096
2097 log_must $USERADD -g $gname -d $basedir/$uname -m $uname
2098
2099 # Add new users to the same group and the command line utils.
2100 # This allows them to be run out of the original users home
2101 # directory as long as it permissioned to be group readable.
2102 if is_linux; then
2103 cmd_group=$(stat --format="%G" $ZFS)
2104 log_must $USERMOD -a -G $cmd_group $uname
2105 fi
2106
2107 return 0
2108 }
2109
2110 #
2111 # Delete the specified user.
2112 #
2113 # $1 login name
2114 # $2 base of the homedir (optional)
2115 #
2116 function del_user #<logname> <basedir>
2117 {
2118 typeset user=$1
2119 typeset basedir=${2:-"/var/tmp"}
2120
2121 if ((${#user} == 0)); then
2122 log_fail "login name is necessary."
2123 fi
2124
2125 if $ID $user > /dev/null 2>&1; then
2126 log_must $USERDEL $user
2127 fi
2128
2129 [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
2130
2131 return 0
2132 }
2133
2134 #
2135 # Select valid gid and create specified group.
2136 #
2137 # $1 group name
2138 #
2139 function add_group #<group_name>
2140 {
2141 typeset group=$1
2142
2143 if ((${#group} == 0)); then
2144 log_fail "group name is necessary."
2145 fi
2146
2147 # Assign 100 as the base gid, a larger value is selected for
2148 # Linux because for many distributions 1000 and under are reserved.
2149 if is_linux; then
2150 while true; do
2151 $GROUPADD $group > /dev/null 2>&1
2152 typeset -i ret=$?
2153 case $ret in
2154 0) return 0 ;;
2155 *) return 1 ;;
2156 esac
2157 done
2158 else
2159 typeset -i gid=100
2160
2161 while true; do
2162 $GROUPADD -g $gid $group > /dev/null 2>&1
2163 typeset -i ret=$?
2164 case $ret in
2165 0) return 0 ;;
2166 # The gid is not unique
2167 4) ((gid += 1)) ;;
2168 *) return 1 ;;
2169 esac
2170 done
2171 fi
2172 }
2173
2174 #
2175 # Delete the specified group.
2176 #
2177 # $1 group name
2178 #
2179 function del_group #<group_name>
2180 {
2181 typeset grp=$1
2182 if ((${#grp} == 0)); then
2183 log_fail "group name is necessary."
2184 fi
2185
2186 if is_linux; then
2187 $GETENT group $grp > /dev/null 2>&1
2188 typeset -i ret=$?
2189 case $ret in
2190 # Group does not exist.
2191 2) return 0 ;;
2192 # Name already exists as a group name
2193 0) log_must $GROUPDEL $grp ;;
2194 *) return 1 ;;
2195 esac
2196 else
2197 $GROUPMOD -n $grp $grp > /dev/null 2>&1
2198 typeset -i ret=$?
2199 case $ret in
2200 # Group does not exist.
2201 6) return 0 ;;
2202 # Name already exists as a group name
2203 9) log_must $GROUPDEL $grp ;;
2204 *) return 1 ;;
2205 esac
2206 fi
2207
2208 return 0
2209 }
2210
2211 #
2212 # This function will return true if it's safe to destroy the pool passed
2213 # as argument 1. It checks for pools based on zvols and files, and also
2214 # files contained in a pool that may have a different mountpoint.
2215 #
2216 function safe_to_destroy_pool { # $1 the pool name
2217
2218 typeset pool=""
2219 typeset DONT_DESTROY=""
2220
2221 # We check that by deleting the $1 pool, we're not
2222 # going to pull the rug out from other pools. Do this
2223 # by looking at all other pools, ensuring that they
2224 # aren't built from files or zvols contained in this pool.
2225
2226 for pool in $($ZPOOL list -H -o name)
2227 do
2228 ALTMOUNTPOOL=""
2229
2230 # this is a list of the top-level directories in each of the
2231 # files that make up the path to the files the pool is based on
2232 FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2233 $AWK '{print $1}')
2234
2235 # this is a list of the zvols that make up the pool
2236 ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "$ZVOL_DEVDIR/$1$" \
2237 | $AWK '{print $1}')
2238
2239 # also want to determine if it's a file-based pool using an
2240 # alternate mountpoint...
2241 POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2242 $GREP / | $AWK '{print $1}' | \
2243 $AWK -F/ '{print $2}' | $GREP -v "dev")
2244
2245 for pooldir in $POOL_FILE_DIRS
2246 do
2247 OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2248 $GREP "${pooldir}$" | $AWK '{print $1}')
2249
2250 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2251 done
2252
2253
2254 if [ ! -z "$ZVOLPOOL" ]
2255 then
2256 DONT_DESTROY="true"
2257 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2258 fi
2259
2260 if [ ! -z "$FILEPOOL" ]
2261 then
2262 DONT_DESTROY="true"
2263 log_note "Pool $pool is built from $FILEPOOL on $1"
2264 fi
2265
2266 if [ ! -z "$ALTMOUNTPOOL" ]
2267 then
2268 DONT_DESTROY="true"
2269 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2270 fi
2271 done
2272
2273 if [ -z "${DONT_DESTROY}" ]
2274 then
2275 return 0
2276 else
2277 log_note "Warning: it is not safe to destroy $1!"
2278 return 1
2279 fi
2280 }
2281
2282 #
2283 # Get the available ZFS compression options
2284 # $1 option type zfs_set|zfs_compress
2285 #
2286 function get_compress_opts
2287 {
2288 typeset COMPRESS_OPTS
2289 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2290 gzip-6 gzip-7 gzip-8 gzip-9"
2291
2292 if [[ $1 == "zfs_compress" ]] ; then
2293 COMPRESS_OPTS="on lzjb"
2294 elif [[ $1 == "zfs_set" ]] ; then
2295 COMPRESS_OPTS="on off lzjb"
2296 fi
2297 typeset valid_opts="$COMPRESS_OPTS"
2298 $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2299 if [[ $? -eq 0 ]]; then
2300 valid_opts="$valid_opts $GZIP_OPTS"
2301 fi
2302 $ECHO "$valid_opts"
2303 }
2304
2305 #
2306 # Verify zfs operation with -p option work as expected
2307 # $1 operation, value could be create, clone or rename
2308 # $2 dataset type, value could be fs or vol
2309 # $3 dataset name
2310 # $4 new dataset name
2311 #
2312 function verify_opt_p_ops
2313 {
2314 typeset ops=$1
2315 typeset datatype=$2
2316 typeset dataset=$3
2317 typeset newdataset=$4
2318
2319 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2320 log_fail "$datatype is not supported."
2321 fi
2322
2323 # check parameters accordingly
2324 case $ops in
2325 create)
2326 newdataset=$dataset
2327 dataset=""
2328 if [[ $datatype == "vol" ]]; then
2329 ops="create -V $VOLSIZE"
2330 fi
2331 ;;
2332 clone)
2333 if [[ -z $newdataset ]]; then
2334 log_fail "newdataset should not be empty" \
2335 "when ops is $ops."
2336 fi
2337 log_must datasetexists $dataset
2338 log_must snapexists $dataset
2339 ;;
2340 rename)
2341 if [[ -z $newdataset ]]; then
2342 log_fail "newdataset should not be empty" \
2343 "when ops is $ops."
2344 fi
2345 log_must datasetexists $dataset
2346 log_mustnot snapexists $dataset
2347 ;;
2348 *)
2349 log_fail "$ops is not supported."
2350 ;;
2351 esac
2352
2353 # make sure the upper level filesystem does not exist
2354 if datasetexists ${newdataset%/*} ; then
2355 log_must $ZFS destroy -rRf ${newdataset%/*}
2356 fi
2357
2358 # without -p option, operation will fail
2359 log_mustnot $ZFS $ops $dataset $newdataset
2360 log_mustnot datasetexists $newdataset ${newdataset%/*}
2361
2362 # with -p option, operation should succeed
2363 log_must $ZFS $ops -p $dataset $newdataset
2364 block_device_wait
2365
2366 if ! datasetexists $newdataset ; then
2367 log_fail "-p option does not work for $ops"
2368 fi
2369
2370 # when $ops is create or clone, redo the operation still return zero
2371 if [[ $ops != "rename" ]]; then
2372 log_must $ZFS $ops -p $dataset $newdataset
2373 fi
2374
2375 return 0
2376 }
2377
2378 #
2379 # Get configuration of pool
2380 # $1 pool name
2381 # $2 config name
2382 #
2383 function get_config
2384 {
2385 typeset pool=$1
2386 typeset config=$2
2387 typeset alt_root
2388
2389 if ! poolexists "$pool" ; then
2390 return 1
2391 fi
2392 alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
2393 if [[ $alt_root == "-" ]]; then
2394 value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
2395 '{print $2}')
2396 else
2397 value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
2398 '{print $2}')
2399 fi
2400 if [[ -n $value ]] ; then
2401 value=${value#'}
2402 value=${value%'}
2403 fi
2404 echo $value
2405
2406 return 0
2407 }
2408
2409 #
2410 # Privated function. Random select one of items from arguments.
2411 #
2412 # $1 count
2413 # $2-n string
2414 #
2415 function _random_get
2416 {
2417 typeset cnt=$1
2418 shift
2419
2420 typeset str="$@"
2421 typeset -i ind
2422 ((ind = RANDOM % cnt + 1))
2423
2424 typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2425 $ECHO $ret
2426 }
2427
2428 #
2429 # Random select one of item from arguments which include NONE string
2430 #
2431 function random_get_with_non
2432 {
2433 typeset -i cnt=$#
2434 ((cnt =+ 1))
2435
2436 _random_get "$cnt" "$@"
2437 }
2438
2439 #
2440 # Random select one of item from arguments which doesn't include NONE string
2441 #
2442 function random_get
2443 {
2444 _random_get "$#" "$@"
2445 }
2446
2447 #
2448 # Detect if the current system support slog
2449 #
2450 function verify_slog_support
2451 {
2452 typeset dir=/tmp/disk.$$
2453 typeset pool=foo.$$
2454 typeset vdev=$dir/a
2455 typeset sdev=$dir/b
2456
2457 $MKDIR -p $dir
2458 $MKFILE 64M $vdev $sdev
2459
2460 typeset -i ret=0
2461 if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2462 ret=1
2463 fi
2464 $RM -r $dir
2465
2466 return $ret
2467 }
2468
2469 #
2470 # The function will generate a dataset name with specific length
2471 # $1, the length of the name
2472 # $2, the base string to construct the name
2473 #
2474 function gen_dataset_name
2475 {
2476 typeset -i len=$1
2477 typeset basestr="$2"
2478 typeset -i baselen=${#basestr}
2479 typeset -i iter=0
2480 typeset l_name=""
2481
2482 if ((len % baselen == 0)); then
2483 ((iter = len / baselen))
2484 else
2485 ((iter = len / baselen + 1))
2486 fi
2487 while ((iter > 0)); do
2488 l_name="${l_name}$basestr"
2489
2490 ((iter -= 1))
2491 done
2492
2493 $ECHO $l_name
2494 }
2495
2496 #
2497 # Get cksum tuple of dataset
2498 # $1 dataset name
2499 #
2500 # sample zdb output:
2501 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2502 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2503 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2504 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2505 function datasetcksum
2506 {
2507 typeset cksum
2508 $SYNC
2509 cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2510 | $AWK -F= '{print $7}')
2511 $ECHO $cksum
2512 }
2513
2514 #
2515 # Get cksum of file
2516 # #1 file path
2517 #
2518 function checksum
2519 {
2520 typeset cksum
2521 cksum=$($CKSUM $1 | $AWK '{print $1}')
2522 $ECHO $cksum
2523 }
2524
2525 #
2526 # Get the given disk/slice state from the specific field of the pool
2527 #
2528 function get_device_state #pool disk field("", "spares","logs")
2529 {
2530 typeset pool=$1
2531 typeset disk=${2#$DEV_DSKDIR/}
2532 typeset field=${3:-$pool}
2533
2534 state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2535 $NAWK -v device=$disk -v pool=$pool -v field=$field \
2536 'BEGIN {startconfig=0; startfield=0; }
2537 /config:/ {startconfig=1}
2538 (startconfig==1) && ($1==field) {startfield=1; next;}
2539 (startfield==1) && ($1==device) {print $2; exit;}
2540 (startfield==1) &&
2541 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2542 echo $state
2543 }
2544
2545
2546 #
2547 # print the given directory filesystem type
2548 #
2549 # $1 directory name
2550 #
2551 function get_fstype
2552 {
2553 typeset dir=$1
2554
2555 if [[ -z $dir ]]; then
2556 log_fail "Usage: get_fstype <directory>"
2557 fi
2558
2559 #
2560 # $ df -n /
2561 # / : ufs
2562 #
2563 $DF -n $dir | $AWK '{print $3}'
2564 }
2565
2566 #
2567 # Given a disk, label it to VTOC regardless what label was on the disk
2568 # $1 disk
2569 #
2570 function labelvtoc
2571 {
2572 typeset disk=$1
2573 if [[ -z $disk ]]; then
2574 log_fail "The disk name is unspecified."
2575 fi
2576 typeset label_file=/var/tmp/labelvtoc.$$
2577 typeset arch=$($UNAME -p)
2578
2579 if is_linux; then
2580 log_note "Currently unsupported by the test framework"
2581 return 1
2582 fi
2583
2584 if [[ $arch == "i386" ]]; then
2585 $ECHO "label" > $label_file
2586 $ECHO "0" >> $label_file
2587 $ECHO "" >> $label_file
2588 $ECHO "q" >> $label_file
2589 $ECHO "q" >> $label_file
2590
2591 $FDISK -B $disk >/dev/null 2>&1
2592 # wait a while for fdisk finishes
2593 $SLEEP 60
2594 elif [[ $arch == "sparc" ]]; then
2595 $ECHO "label" > $label_file
2596 $ECHO "0" >> $label_file
2597 $ECHO "" >> $label_file
2598 $ECHO "" >> $label_file
2599 $ECHO "" >> $label_file
2600 $ECHO "q" >> $label_file
2601 else
2602 log_fail "unknown arch type"
2603 fi
2604
2605 $FORMAT -e -s -d $disk -f $label_file
2606 typeset -i ret_val=$?
2607 $RM -f $label_file
2608 #
2609 # wait the format to finish
2610 #
2611 $SLEEP 60
2612 if ((ret_val != 0)); then
2613 log_fail "unable to label $disk as VTOC."
2614 fi
2615
2616 return 0
2617 }
2618
2619 #
2620 # check if the system was installed as zfsroot or not
2621 # return: 0 ture, otherwise false
2622 #
2623 function is_zfsroot
2624 {
2625 $DF -n / | $GREP zfs > /dev/null 2>&1
2626 return $?
2627 }
2628
2629 #
2630 # get the root filesystem name if it's zfsroot system.
2631 #
2632 # return: root filesystem name
2633 function get_rootfs
2634 {
2635 typeset rootfs=""
2636 rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2637 /etc/mnttab)
2638 if [[ -z "$rootfs" ]]; then
2639 log_fail "Can not get rootfs"
2640 fi
2641 $ZFS list $rootfs > /dev/null 2>&1
2642 if (($? == 0)); then
2643 $ECHO $rootfs
2644 else
2645 log_fail "This is not a zfsroot system."
2646 fi
2647 }
2648
2649 #
2650 # get the rootfs's pool name
2651 # return:
2652 # rootpool name
2653 #
2654 function get_rootpool
2655 {
2656 typeset rootfs=""
2657 typeset rootpool=""
2658 rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2659 /etc/mnttab)
2660 if [[ -z "$rootfs" ]]; then
2661 log_fail "Can not get rootpool"
2662 fi
2663 $ZFS list $rootfs > /dev/null 2>&1
2664 if (($? == 0)); then
2665 rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2666 $ECHO $rootpool
2667 else
2668 log_fail "This is not a zfsroot system."
2669 fi
2670 }
2671
2672 #
2673 # Get the sub string from specified source string
2674 #
2675 # $1 source string
2676 # $2 start position. Count from 1
2677 # $3 offset
2678 #
2679 function get_substr #src_str pos offset
2680 {
2681 typeset pos offset
2682
2683 $ECHO $1 | \
2684 $NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
2685 }
2686
2687 #
2688 # Check if the given device is physical device
2689 #
2690 function is_physical_device #device
2691 {
2692 typeset device=${1#$DEV_DSKDIR}
2693 device=${device#$DEV_RDSKDIR}
2694
2695 if is_linux; then
2696 [[ -b "$DEV_DSKDIR/$device" ]] && \
2697 [[ -f /sys/module/loop/parameters/max_part ]]
2698 return $?
2699 else
2700 $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2701 return $?
2702 fi
2703 }
2704
2705 #
2706 # Check if the given device is a real device (ie SCSI device)
2707 #
2708 function is_real_device #disk
2709 {
2710 typeset disk=$1
2711 [[ -z $disk ]] && log_fail "No argument for disk given."
2712
2713 if is_linux; then
2714 $LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP disk > /dev/null 2>&1
2715 return $?
2716 fi
2717 }
2718
2719 #
2720 # Check if the given device is a loop device
2721 #
2722 function is_loop_device #disk
2723 {
2724 typeset disk=$1
2725 [[ -z $disk ]] && log_fail "No argument for disk given."
2726
2727 if is_linux; then
2728 $LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP loop > /dev/null 2>&1
2729 return $?
2730 fi
2731 }
2732
2733 #
2734 # Check if the given device is a multipath device and if there is a sybolic
2735 # link to a device mapper and to a disk
2736 # Currently no support for dm devices alone without multipath
2737 #
2738 function is_mpath_device #disk
2739 {
2740 typeset disk=$1
2741 [[ -z $disk ]] && log_fail "No argument for disk given."
2742
2743 if is_linux; then
2744 $LSBLK $DEV_MPATHDIR/$disk -o TYPE | $EGREP mpath > /dev/null 2>&1
2745 if (($? == 0)); then
2746 $READLINK $DEV_MPATHDIR/$disk > /dev/null 2>&1
2747 return $?
2748 else
2749 return $?
2750 fi
2751 fi
2752 }
2753
2754 # Set the slice prefix for disk partitioning depending
2755 # on whether the device is a real, multipath, or loop device.
2756 # Currently all disks have to be of the same type, so only
2757 # checks first disk to determine slice prefix.
2758 #
2759 function set_slice_prefix
2760 {
2761 typeset disk
2762 typeset -i i=0
2763
2764 if is_linux; then
2765 while (( i < $DISK_ARRAY_NUM )); do
2766 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2767 if ( is_mpath_device $disk ) && [[ -z $($ECHO $disk | awk 'substr($1,18,1)\
2768 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
2769 export SLICE_PREFIX=""
2770 return 0
2771 elif ( is_mpath_device $disk || is_loop_device $disk ); then
2772 export SLICE_PREFIX="p"
2773 return 0
2774 else
2775 log_fail "$disk not supported for partitioning."
2776 fi
2777 (( i = i + 1))
2778 done
2779 fi
2780 }
2781
2782 #
2783 # Set the directory path of the listed devices in $DISK_ARRAY_NUM
2784 # Currently all disks have to be of the same type, so only
2785 # checks first disk to determine device directory
2786 # default = /dev (linux)
2787 # real disk = /dev (linux)
2788 # multipath device = /dev/mapper (linux)
2789 #
2790 function set_device_dir
2791 {
2792 typeset disk
2793 typeset -i i=0
2794
2795 if is_linux; then
2796 while (( i < $DISK_ARRAY_NUM )); do
2797 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2798 if is_mpath_device $disk; then
2799 export DEV_DSKDIR=$DEV_MPATHDIR
2800 return 0
2801 else
2802 export DEV_DSKDIR=$DEV_RDSKDIR
2803 return 0
2804 fi
2805 (( i = i + 1))
2806 done
2807 else
2808 export DEV_DSKDIR=$DEV_RDSKDIR
2809 fi
2810 }
2811
2812 #
2813 # Get the directory path of given device
2814 #
2815 function get_device_dir #device
2816 {
2817 typeset device=$1
2818
2819 if ! $(is_physical_device $device) ; then
2820 if [[ $device != "/" ]]; then
2821 device=${device%/*}
2822 fi
2823 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2824 device="$DEV_DSKDIR"
2825 fi
2826 $ECHO $device
2827 else
2828 $ECHO "$DEV_DSKDIR"
2829 fi
2830 }
2831
2832 #
2833 # Get the package name
2834 #
2835 function get_package_name
2836 {
2837 typeset dirpath=${1:-$STC_NAME}
2838
2839 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2840 }
2841
2842 #
2843 # Get the word numbers from a string separated by white space
2844 #
2845 function get_word_count
2846 {
2847 $ECHO $1 | $WC -w
2848 }
2849
2850 #
2851 # To verify if the require numbers of disks is given
2852 #
2853 function verify_disk_count
2854 {
2855 typeset -i min=${2:-1}
2856
2857 typeset -i count=$(get_word_count "$1")
2858
2859 if ((count < min)); then
2860 log_untested "A minimum of $min disks is required to run." \
2861 " You specified $count disk(s)"
2862 fi
2863 }
2864
2865 function ds_is_volume
2866 {
2867 typeset type=$(get_prop type $1)
2868 [[ $type = "volume" ]] && return 0
2869 return 1
2870 }
2871
2872 function ds_is_filesystem
2873 {
2874 typeset type=$(get_prop type $1)
2875 [[ $type = "filesystem" ]] && return 0
2876 return 1
2877 }
2878
2879 function ds_is_snapshot
2880 {
2881 typeset type=$(get_prop type $1)
2882 [[ $type = "snapshot" ]] && return 0
2883 return 1
2884 }
2885
2886 #
2887 # Check if Trusted Extensions are installed and enabled
2888 #
2889 function is_te_enabled
2890 {
2891 $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
2892 if (($? != 0)); then
2893 return 1
2894 else
2895 return 0
2896 fi
2897 }
2898
2899 # Utility function to determine if a system has multiple cpus.
2900 function is_mp
2901 {
2902 if is_linux; then
2903 (($($NPROC) > 1))
2904 else
2905 (($($PSRINFO | $WC -l) > 1))
2906 fi
2907
2908 return $?
2909 }
2910
2911 function get_cpu_freq
2912 {
2913 if is_linux; then
2914 lscpu | $AWK '/CPU MHz/ { print $3 }'
2915 else
2916 $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}'
2917 fi
2918 }
2919
2920 # Run the given command as the user provided.
2921 function user_run
2922 {
2923 typeset user=$1
2924 shift
2925
2926 log_note "user:$user $@"
2927 eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
2928 return $?
2929 }
2930
2931 #
2932 # Check if the pool contains the specified vdevs
2933 #
2934 # $1 pool
2935 # $2..n <vdev> ...
2936 #
2937 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2938 # vdevs is not in the pool, and 2 if pool name is missing.
2939 #
2940 function vdevs_in_pool
2941 {
2942 typeset pool=$1
2943 typeset vdev
2944
2945 if [[ -z $pool ]]; then
2946 log_note "Missing pool name."
2947 return 2
2948 fi
2949
2950 shift
2951
2952 typeset tmpfile=$($MKTEMP)
2953 $ZPOOL list -Hv "$pool" >$tmpfile
2954 for vdev in $@; do
2955 $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2956 [[ $? -ne 0 ]] && return 1
2957 done
2958
2959 $RM -f $tmpfile
2960
2961 return 0;
2962 }
2963
2964 function get_max
2965 {
2966 typeset -l i max=$1
2967 shift
2968
2969 for i in "$@"; do
2970 max=$(echo $((max > i ? max : i)))
2971 done
2972
2973 echo $max
2974 }
2975
2976 function get_min
2977 {
2978 typeset -l i min=$1
2979 shift
2980
2981 for i in "$@"; do
2982 min=$(echo $((min < i ? min : i)))
2983 done
2984
2985 echo $min
2986 }
2987
2988 #
2989 # Wait for newly created block devices to have their minors created.
2990 #
2991 function block_device_wait
2992 {
2993 if is_linux; then
2994 $UDEVADM trigger
2995 $UDEVADM settle
2996 fi
2997 }
2998
2999 #
3000 # Synchronize all the data in pool
3001 #
3002 # $1 pool name
3003 #
3004 function sync_pool #pool
3005 {
3006 typeset pool=${1:-$TESTPOOL}
3007
3008 log_must $SYNC
3009 log_must $SLEEP 2
3010 # Flush all the pool data.
3011 typeset -i ret
3012 $ZPOOL scrub $pool >/dev/null 2>&1
3013 ret=$?
3014 (( $ret != 0 )) && \
3015 log_fail "$ZPOOL scrub $pool failed."
3016
3017 while ! is_pool_scrubbed $pool; do
3018 if is_pool_resilvered $pool ; then
3019 log_fail "$pool should not be resilver completed."
3020 fi
3021 log_must $SLEEP 2
3022 done
3023 }