]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
d51d73e619f94736fe82e52687660da5f9eb0221
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 # Copyright (c) 2012, 2016 by Delphix. All rights reserved.
27 # Copyright 2016 Nexenta Systems, Inc.
28 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
29 # Copyright (c) 2017 Datto Inc.
30 #
31
32 . ${STF_TOOLS}/include/logapi.shlib
33 . ${STF_SUITE}/include/math.shlib
34
35 #
36 # Apply constrained path when available. This is required since the
37 # PATH may have been modified by sudo's secure_path behavior.
38 #
39 if [ -n "$STF_PATH" ]; then
40 PATH="$STF_PATH"
41 fi
42
43 # Linux kernel version comparison function
44 #
45 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
46 #
47 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
48 #
49 function linux_version
50 {
51 typeset ver="$1"
52
53 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
54
55 typeset version=$(echo $ver | cut -d '.' -f 1)
56 typeset major=$(echo $ver | cut -d '.' -f 2)
57 typeset minor=$(echo $ver | cut -d '.' -f 3)
58
59 [[ -z "$version" ]] && version=0
60 [[ -z "$major" ]] && major=0
61 [[ -z "$minor" ]] && minor=0
62
63 echo $((version * 10000 + major * 100 + minor))
64 }
65
66 # Determine if this is a Linux test system
67 #
68 # Return 0 if platform Linux, 1 if otherwise
69
70 function is_linux
71 {
72 if [[ $(uname -o) == "GNU/Linux" ]]; then
73 return 0
74 else
75 return 1
76 fi
77 }
78
79 # Determine if this is a 32-bit system
80 #
81 # Return 0 if platform is 32-bit, 1 if otherwise
82
83 function is_32bit
84 {
85 if [[ $(getconf LONG_BIT) == "32" ]]; then
86 return 0
87 else
88 return 1
89 fi
90 }
91
92 # Determine if kmemleak is enabled
93 #
94 # Return 0 if kmemleak is enabled, 1 if otherwise
95
96 function is_kmemleak
97 {
98 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
99 return 0
100 else
101 return 1
102 fi
103 }
104
105 # Determine whether a dataset is mounted
106 #
107 # $1 dataset name
108 # $2 filesystem type; optional - defaulted to zfs
109 #
110 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
111
112 function ismounted
113 {
114 typeset fstype=$2
115 [[ -z $fstype ]] && fstype=zfs
116 typeset out dir name ret
117
118 case $fstype in
119 zfs)
120 if [[ "$1" == "/"* ]] ; then
121 for out in $(zfs mount | awk '{print $2}'); do
122 [[ $1 == $out ]] && return 0
123 done
124 else
125 for out in $(zfs mount | awk '{print $1}'); do
126 [[ $1 == $out ]] && return 0
127 done
128 fi
129 ;;
130 ufs|nfs)
131 out=$(df -F $fstype $1 2>/dev/null)
132 ret=$?
133 (($ret != 0)) && return $ret
134
135 dir=${out%%\(*}
136 dir=${dir%% *}
137 name=${out##*\(}
138 name=${name%%\)*}
139 name=${name%% *}
140
141 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
142 ;;
143 ext*)
144 out=$(df -t $fstype $1 2>/dev/null)
145 return $?
146 ;;
147 zvol)
148 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
149 link=$(readlink -f $ZVOL_DEVDIR/$1)
150 [[ -n "$link" ]] && \
151 mount | grep -q "^$link" && \
152 return 0
153 fi
154 ;;
155 esac
156
157 return 1
158 }
159
160 # Return 0 if a dataset is mounted; 1 otherwise
161 #
162 # $1 dataset name
163 # $2 filesystem type; optional - defaulted to zfs
164
165 function mounted
166 {
167 ismounted $1 $2
168 (($? == 0)) && return 0
169 return 1
170 }
171
172 # Return 0 if a dataset is unmounted; 1 otherwise
173 #
174 # $1 dataset name
175 # $2 filesystem type; optional - defaulted to zfs
176
177 function unmounted
178 {
179 ismounted $1 $2
180 (($? == 1)) && return 0
181 return 1
182 }
183
184 # split line on ","
185 #
186 # $1 - line to split
187
188 function splitline
189 {
190 echo $1 | sed "s/,/ /g"
191 }
192
193 function default_setup
194 {
195 default_setup_noexit "$@"
196
197 log_pass
198 }
199
200 #
201 # Given a list of disks, setup storage pools and datasets.
202 #
203 function default_setup_noexit
204 {
205 typeset disklist=$1
206 typeset container=$2
207 typeset volume=$3
208 log_note begin default_setup_noexit
209
210 if is_global_zone; then
211 if poolexists $TESTPOOL ; then
212 destroy_pool $TESTPOOL
213 fi
214 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
215 log_must zpool create -f $TESTPOOL $disklist
216 else
217 reexport_pool
218 fi
219
220 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
221 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
222
223 log_must zfs create $TESTPOOL/$TESTFS
224 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
225
226 if [[ -n $container ]]; then
227 rm -rf $TESTDIR1 || \
228 log_unresolved Could not remove $TESTDIR1
229 mkdir -p $TESTDIR1 || \
230 log_unresolved Could not create $TESTDIR1
231
232 log_must zfs create $TESTPOOL/$TESTCTR
233 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
234 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
235 log_must zfs set mountpoint=$TESTDIR1 \
236 $TESTPOOL/$TESTCTR/$TESTFS1
237 fi
238
239 if [[ -n $volume ]]; then
240 if is_global_zone ; then
241 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
242 block_device_wait
243 else
244 log_must zfs create $TESTPOOL/$TESTVOL
245 fi
246 fi
247 }
248
249 #
250 # Given a list of disks, setup a storage pool, file system and
251 # a container.
252 #
253 function default_container_setup
254 {
255 typeset disklist=$1
256
257 default_setup "$disklist" "true"
258 }
259
260 #
261 # Given a list of disks, setup a storage pool,file system
262 # and a volume.
263 #
264 function default_volume_setup
265 {
266 typeset disklist=$1
267
268 default_setup "$disklist" "" "true"
269 }
270
271 #
272 # Given a list of disks, setup a storage pool,file system,
273 # a container and a volume.
274 #
275 function default_container_volume_setup
276 {
277 typeset disklist=$1
278
279 default_setup "$disklist" "true" "true"
280 }
281
282 #
283 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
284 # filesystem
285 #
286 # $1 Existing filesystem or volume name. Default, $TESTFS
287 # $2 snapshot name. Default, $TESTSNAP
288 #
289 function create_snapshot
290 {
291 typeset fs_vol=${1:-$TESTFS}
292 typeset snap=${2:-$TESTSNAP}
293
294 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
295 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
296
297 if snapexists $fs_vol@$snap; then
298 log_fail "$fs_vol@$snap already exists."
299 fi
300 datasetexists $fs_vol || \
301 log_fail "$fs_vol must exist."
302
303 log_must zfs snapshot $fs_vol@$snap
304 }
305
306 #
307 # Create a clone from a snapshot, default clone name is $TESTCLONE.
308 #
309 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
310 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
311 #
312 function create_clone # snapshot clone
313 {
314 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
315 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
316
317 [[ -z $snap ]] && \
318 log_fail "Snapshot name is undefined."
319 [[ -z $clone ]] && \
320 log_fail "Clone name is undefined."
321
322 log_must zfs clone $snap $clone
323 }
324
325 #
326 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
327 # filesystem.
328 #
329 # $1 Existing filesystem or volume name. Default, $TESTFS
330 # $2 Existing snapshot name. Default, $TESTSNAP
331 # $3 bookmark name. Default, $TESTBKMARK
332 #
333 function create_bookmark
334 {
335 typeset fs_vol=${1:-$TESTFS}
336 typeset snap=${2:-$TESTSNAP}
337 typeset bkmark=${3:-$TESTBKMARK}
338
339 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
340 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
341 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
342
343 if bkmarkexists $fs_vol#$bkmark; then
344 log_fail "$fs_vol#$bkmark already exists."
345 fi
346 datasetexists $fs_vol || \
347 log_fail "$fs_vol must exist."
348 snapexists $fs_vol@$snap || \
349 log_fail "$fs_vol@$snap must exist."
350
351 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
352 }
353
354 function default_mirror_setup
355 {
356 default_mirror_setup_noexit $1 $2 $3
357
358 log_pass
359 }
360
361 #
362 # Given a pair of disks, set up a storage pool and dataset for the mirror
363 # @parameters: $1 the primary side of the mirror
364 # $2 the secondary side of the mirror
365 # @uses: ZPOOL ZFS TESTPOOL TESTFS
366 function default_mirror_setup_noexit
367 {
368 readonly func="default_mirror_setup_noexit"
369 typeset primary=$1
370 typeset secondary=$2
371
372 [[ -z $primary ]] && \
373 log_fail "$func: No parameters passed"
374 [[ -z $secondary ]] && \
375 log_fail "$func: No secondary partition passed"
376 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
377 log_must zpool create -f $TESTPOOL mirror $@
378 log_must zfs create $TESTPOOL/$TESTFS
379 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
380 }
381
382 #
383 # create a number of mirrors.
384 # We create a number($1) of 2 way mirrors using the pairs of disks named
385 # on the command line. These mirrors are *not* mounted
386 # @parameters: $1 the number of mirrors to create
387 # $... the devices to use to create the mirrors on
388 # @uses: ZPOOL ZFS TESTPOOL
389 function setup_mirrors
390 {
391 typeset -i nmirrors=$1
392
393 shift
394 while ((nmirrors > 0)); do
395 log_must test -n "$1" -a -n "$2"
396 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
397 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
398 shift 2
399 ((nmirrors = nmirrors - 1))
400 done
401 }
402
403 #
404 # create a number of raidz pools.
405 # We create a number($1) of 2 raidz pools using the pairs of disks named
406 # on the command line. These pools are *not* mounted
407 # @parameters: $1 the number of pools to create
408 # $... the devices to use to create the pools on
409 # @uses: ZPOOL ZFS TESTPOOL
410 function setup_raidzs
411 {
412 typeset -i nraidzs=$1
413
414 shift
415 while ((nraidzs > 0)); do
416 log_must test -n "$1" -a -n "$2"
417 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
418 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
419 shift 2
420 ((nraidzs = nraidzs - 1))
421 done
422 }
423
424 #
425 # Destroy the configured testpool mirrors.
426 # the mirrors are of the form ${TESTPOOL}{number}
427 # @uses: ZPOOL ZFS TESTPOOL
428 function destroy_mirrors
429 {
430 default_cleanup_noexit
431
432 log_pass
433 }
434
435 #
436 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
437 # $1 the list of disks
438 #
439 function default_raidz_setup
440 {
441 typeset disklist="$*"
442 disks=(${disklist[*]})
443
444 if [[ ${#disks[*]} -lt 2 ]]; then
445 log_fail "A raid-z requires a minimum of two disks."
446 fi
447
448 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
449 log_must zpool create -f $TESTPOOL raidz $1 $2 $3
450 log_must zfs create $TESTPOOL/$TESTFS
451 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
452
453 log_pass
454 }
455
456 #
457 # Common function used to cleanup storage pools and datasets.
458 #
459 # Invoked at the start of the test suite to ensure the system
460 # is in a known state, and also at the end of each set of
461 # sub-tests to ensure errors from one set of tests doesn't
462 # impact the execution of the next set.
463
464 function default_cleanup
465 {
466 default_cleanup_noexit
467
468 log_pass
469 }
470
471 function default_cleanup_noexit
472 {
473 typeset exclude=""
474 typeset pool=""
475 #
476 # Destroying the pool will also destroy any
477 # filesystems it contains.
478 #
479 if is_global_zone; then
480 zfs unmount -a > /dev/null 2>&1
481 exclude=`eval echo \"'(${KEEP})'\"`
482 ALL_POOLS=$(zpool list -H -o name \
483 | grep -v "$NO_POOLS" | egrep -v "$exclude")
484 # Here, we loop through the pools we're allowed to
485 # destroy, only destroying them if it's safe to do
486 # so.
487 while [ ! -z ${ALL_POOLS} ]
488 do
489 for pool in ${ALL_POOLS}
490 do
491 if safe_to_destroy_pool $pool ;
492 then
493 destroy_pool $pool
494 fi
495 ALL_POOLS=$(zpool list -H -o name \
496 | grep -v "$NO_POOLS" \
497 | egrep -v "$exclude")
498 done
499 done
500
501 zfs mount -a
502 else
503 typeset fs=""
504 for fs in $(zfs list -H -o name \
505 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
506 datasetexists $fs && \
507 log_must zfs destroy -Rf $fs
508 done
509
510 # Need cleanup here to avoid garbage dir left.
511 for fs in $(zfs list -H -o name); do
512 [[ $fs == /$ZONE_POOL ]] && continue
513 [[ -d $fs ]] && log_must rm -rf $fs/*
514 done
515
516 #
517 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
518 # the default value
519 #
520 for fs in $(zfs list -H -o name); do
521 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
522 log_must zfs set reservation=none $fs
523 log_must zfs set recordsize=128K $fs
524 log_must zfs set mountpoint=/$fs $fs
525 typeset enc=""
526 enc=$(get_prop encryption $fs)
527 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
528 [[ "$enc" == "off" ]]; then
529 log_must zfs set checksum=on $fs
530 fi
531 log_must zfs set compression=off $fs
532 log_must zfs set atime=on $fs
533 log_must zfs set devices=off $fs
534 log_must zfs set exec=on $fs
535 log_must zfs set setuid=on $fs
536 log_must zfs set readonly=off $fs
537 log_must zfs set snapdir=hidden $fs
538 log_must zfs set aclmode=groupmask $fs
539 log_must zfs set aclinherit=secure $fs
540 fi
541 done
542 fi
543
544 [[ -d $TESTDIR ]] && \
545 log_must rm -rf $TESTDIR
546
547 disk1=${DISKS%% *}
548 if is_mpath_device $disk1; then
549 delete_partitions
550 fi
551 }
552
553
554 #
555 # Common function used to cleanup storage pools, file systems
556 # and containers.
557 #
558 function default_container_cleanup
559 {
560 if ! is_global_zone; then
561 reexport_pool
562 fi
563
564 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
565 [[ $? -eq 0 ]] && \
566 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
567
568 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
569 log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
570
571 datasetexists $TESTPOOL/$TESTCTR && \
572 log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
573
574 [[ -e $TESTDIR1 ]] && \
575 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
576
577 default_cleanup
578 }
579
580 #
581 # Common function used to cleanup snapshot of file system or volume. Default to
582 # delete the file system's snapshot
583 #
584 # $1 snapshot name
585 #
586 function destroy_snapshot
587 {
588 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
589
590 if ! snapexists $snap; then
591 log_fail "'$snap' does not existed."
592 fi
593
594 #
595 # For the sake of the value which come from 'get_prop' is not equal
596 # to the really mountpoint when the snapshot is unmounted. So, firstly
597 # check and make sure this snapshot's been mounted in current system.
598 #
599 typeset mtpt=""
600 if ismounted $snap; then
601 mtpt=$(get_prop mountpoint $snap)
602 (($? != 0)) && \
603 log_fail "get_prop mountpoint $snap failed."
604 fi
605
606 log_must zfs destroy $snap
607 [[ $mtpt != "" && -d $mtpt ]] && \
608 log_must rm -rf $mtpt
609 }
610
611 #
612 # Common function used to cleanup clone.
613 #
614 # $1 clone name
615 #
616 function destroy_clone
617 {
618 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
619
620 if ! datasetexists $clone; then
621 log_fail "'$clone' does not existed."
622 fi
623
624 # With the same reason in destroy_snapshot
625 typeset mtpt=""
626 if ismounted $clone; then
627 mtpt=$(get_prop mountpoint $clone)
628 (($? != 0)) && \
629 log_fail "get_prop mountpoint $clone failed."
630 fi
631
632 log_must zfs destroy $clone
633 [[ $mtpt != "" && -d $mtpt ]] && \
634 log_must rm -rf $mtpt
635 }
636
637 #
638 # Common function used to cleanup bookmark of file system or volume. Default
639 # to delete the file system's bookmark.
640 #
641 # $1 bookmark name
642 #
643 function destroy_bookmark
644 {
645 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
646
647 if ! bkmarkexists $bkmark; then
648 log_fail "'$bkmarkp' does not existed."
649 fi
650
651 log_must zfs destroy $bkmark
652 }
653
654 # Return 0 if a snapshot exists; $? otherwise
655 #
656 # $1 - snapshot name
657
658 function snapexists
659 {
660 zfs list -H -t snapshot "$1" > /dev/null 2>&1
661 return $?
662 }
663
664 #
665 # Return 0 if a bookmark exists; $? otherwise
666 #
667 # $1 - bookmark name
668 #
669 function bkmarkexists
670 {
671 zfs list -H -t bookmark "$1" > /dev/null 2>&1
672 return $?
673 }
674
675 #
676 # Set a property to a certain value on a dataset.
677 # Sets a property of the dataset to the value as passed in.
678 # @param:
679 # $1 dataset who's property is being set
680 # $2 property to set
681 # $3 value to set property to
682 # @return:
683 # 0 if the property could be set.
684 # non-zero otherwise.
685 # @use: ZFS
686 #
687 function dataset_setprop
688 {
689 typeset fn=dataset_setprop
690
691 if (($# < 3)); then
692 log_note "$fn: Insufficient parameters (need 3, had $#)"
693 return 1
694 fi
695 typeset output=
696 output=$(zfs set $2=$3 $1 2>&1)
697 typeset rv=$?
698 if ((rv != 0)); then
699 log_note "Setting property on $1 failed."
700 log_note "property $2=$3"
701 log_note "Return Code: $rv"
702 log_note "Output: $output"
703 return $rv
704 fi
705 return 0
706 }
707
708 #
709 # Assign suite defined dataset properties.
710 # This function is used to apply the suite's defined default set of
711 # properties to a dataset.
712 # @parameters: $1 dataset to use
713 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
714 # @returns:
715 # 0 if the dataset has been altered.
716 # 1 if no pool name was passed in.
717 # 2 if the dataset could not be found.
718 # 3 if the dataset could not have it's properties set.
719 #
720 function dataset_set_defaultproperties
721 {
722 typeset dataset="$1"
723
724 [[ -z $dataset ]] && return 1
725
726 typeset confset=
727 typeset -i found=0
728 for confset in $(zfs list); do
729 if [[ $dataset = $confset ]]; then
730 found=1
731 break
732 fi
733 done
734 [[ $found -eq 0 ]] && return 2
735 if [[ -n $COMPRESSION_PROP ]]; then
736 dataset_setprop $dataset compression $COMPRESSION_PROP || \
737 return 3
738 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
739 fi
740 if [[ -n $CHECKSUM_PROP ]]; then
741 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
742 return 3
743 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
744 fi
745 return 0
746 }
747
748 #
749 # Check a numeric assertion
750 # @parameter: $@ the assertion to check
751 # @output: big loud notice if assertion failed
752 # @use: log_fail
753 #
754 function assert
755 {
756 (($@)) || log_fail "$@"
757 }
758
759 #
760 # Function to format partition size of a disk
761 # Given a disk cxtxdx reduces all partitions
762 # to 0 size
763 #
764 function zero_partitions #<whole_disk_name>
765 {
766 typeset diskname=$1
767 typeset i
768
769 if is_linux; then
770 DSK=$DEV_DSKDIR/$diskname
771 DSK=$(echo $DSK | sed -e "s|//|/|g")
772 log_must parted $DSK -s -- mklabel gpt
773 blockdev --rereadpt $DSK 2>/dev/null
774 block_device_wait
775 else
776 for i in 0 1 3 4 5 6 7
777 do
778 log_must set_partition $i "" 0mb $diskname
779 done
780 fi
781
782 return 0
783 }
784
785 #
786 # Given a slice, size and disk, this function
787 # formats the slice to the specified size.
788 # Size should be specified with units as per
789 # the `format` command requirements eg. 100mb 3gb
790 #
791 # NOTE: This entire interface is problematic for the Linux parted utilty
792 # which requires the end of the partition to be specified. It would be
793 # best to retire this interface and replace it with something more flexible.
794 # At the moment a best effort is made.
795 #
796 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
797 {
798 typeset -i slicenum=$1
799 typeset start=$2
800 typeset size=$3
801 typeset disk=$4
802
803 if is_linux; then
804 if [[ -z $size || -z $disk ]]; then
805 log_fail "The size or disk name is unspecified."
806 fi
807 typeset size_mb=${size%%[mMgG]}
808
809 size_mb=${size_mb%%[mMgG][bB]}
810 if [[ ${size:1:1} == 'g' ]]; then
811 ((size_mb = size_mb * 1024))
812 fi
813
814 # Create GPT partition table when setting slice 0 or
815 # when the device doesn't already contain a GPT label.
816 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
817 typeset ret_val=$?
818 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
819 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
820 if [[ $? -ne 0 ]]; then
821 log_note "Failed to create GPT partition table on $disk"
822 return 1
823 fi
824 fi
825
826 # When no start is given align on the first cylinder.
827 if [[ -z "$start" ]]; then
828 start=1
829 fi
830
831 # Determine the cylinder size for the device and using
832 # that calculate the end offset in cylinders.
833 typeset -i cly_size_kb=0
834 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
835 unit cyl print | head -3 | tail -1 | \
836 awk -F '[:k.]' '{print $4}')
837 ((end = (size_mb * 1024 / cly_size_kb) + start))
838
839 parted $DEV_DSKDIR/$disk -s -- \
840 mkpart part$slicenum ${start}cyl ${end}cyl
841 if [[ $? -ne 0 ]]; then
842 log_note "Failed to create partition $slicenum on $disk"
843 return 1
844 fi
845
846 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
847 block_device_wait
848 else
849 if [[ -z $slicenum || -z $size || -z $disk ]]; then
850 log_fail "The slice, size or disk name is unspecified."
851 fi
852
853 typeset format_file=/var/tmp/format_in.$$
854
855 echo "partition" >$format_file
856 echo "$slicenum" >> $format_file
857 echo "" >> $format_file
858 echo "" >> $format_file
859 echo "$start" >> $format_file
860 echo "$size" >> $format_file
861 echo "label" >> $format_file
862 echo "" >> $format_file
863 echo "q" >> $format_file
864 echo "q" >> $format_file
865
866 format -e -s -d $disk -f $format_file
867 fi
868
869 typeset ret_val=$?
870 rm -f $format_file
871 if [[ $ret_val -ne 0 ]]; then
872 log_note "Unable to format $disk slice $slicenum to $size"
873 return 1
874 fi
875 return 0
876 }
877
878 #
879 # Delete all partitions on all disks - this is specifically for the use of multipath
880 # devices which currently can only be used in the test suite as raw/un-partitioned
881 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
882 #
883 function delete_partitions
884 {
885 typeset -i j=1
886
887 if [[ -z $DISK_ARRAY_NUM ]]; then
888 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
889 fi
890 if [[ -z $DISKSARRAY ]]; then
891 DISKSARRAY=$DISKS
892 fi
893
894 if is_linux; then
895 if (( $DISK_ARRAY_NUM == 1 )); then
896 while ((j < MAX_PARTITIONS)); do
897 parted $DEV_DSKDIR/$DISK -s rm $j \
898 > /dev/null 2>&1
899 if (( $? == 1 )); then
900 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
901 if (( $? == 1 )); then
902 log_note "Partitions for $DISK should be deleted"
903 else
904 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
905 fi
906 return 0
907 else
908 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
909 if (( $? == 0 )); then
910 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
911 fi
912 fi
913 ((j = j+1))
914 done
915 else
916 for disk in `echo $DISKSARRAY`; do
917 while ((j < MAX_PARTITIONS)); do
918 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
919 if (( $? == 1 )); then
920 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
921 if (( $? == 1 )); then
922 log_note "Partitions for $disk should be deleted"
923 else
924 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
925 fi
926 j=7
927 else
928 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
929 if (( $? == 0 )); then
930 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
931 fi
932 fi
933 ((j = j+1))
934 done
935 j=1
936 done
937 fi
938 fi
939 return 0
940 }
941
942 #
943 # Get the end cyl of the given slice
944 #
945 function get_endslice #<disk> <slice>
946 {
947 typeset disk=$1
948 typeset slice=$2
949 if [[ -z $disk || -z $slice ]] ; then
950 log_fail "The disk name or slice number is unspecified."
951 fi
952
953 if is_linux; then
954 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
955 grep "part${slice}" | \
956 awk '{print $3}' | \
957 sed 's,cyl,,')
958 ((endcyl = (endcyl + 1)))
959 else
960 disk=${disk#/dev/dsk/}
961 disk=${disk#/dev/rdsk/}
962 disk=${disk%s*}
963
964 typeset -i ratio=0
965 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
966 grep "sectors\/cylinder" | \
967 awk '{print $2}')
968
969 if ((ratio == 0)); then
970 return
971 fi
972
973 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
974 nawk -v token="$slice" '{if ($1==token) print $6}')
975
976 ((endcyl = (endcyl + 1) / ratio))
977 fi
978
979 echo $endcyl
980 }
981
982
983 #
984 # Given a size,disk and total slice number, this function formats the
985 # disk slices from 0 to the total slice number with the same specified
986 # size.
987 #
988 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
989 {
990 typeset -i i=0
991 typeset slice_size=$1
992 typeset disk_name=$2
993 typeset total_slices=$3
994 typeset cyl
995
996 zero_partitions $disk_name
997 while ((i < $total_slices)); do
998 if ! is_linux; then
999 if ((i == 2)); then
1000 ((i = i + 1))
1001 continue
1002 fi
1003 fi
1004 log_must set_partition $i "$cyl" $slice_size $disk_name
1005 cyl=$(get_endslice $disk_name $i)
1006 ((i = i+1))
1007 done
1008 }
1009
1010 #
1011 # This function continues to write to a filenum number of files into dirnum
1012 # number of directories until either file_write returns an error or the
1013 # maximum number of files per directory have been written.
1014 #
1015 # Usage:
1016 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1017 #
1018 # Return value: 0 on success
1019 # non 0 on error
1020 #
1021 # Where :
1022 # destdir: is the directory where everything is to be created under
1023 # dirnum: the maximum number of subdirectories to use, -1 no limit
1024 # filenum: the maximum number of files per subdirectory
1025 # bytes: number of bytes to write
1026 # num_writes: numer of types to write out bytes
1027 # data: the data that will be written
1028 #
1029 # E.g.
1030 # file_fs /testdir 20 25 1024 256 0
1031 #
1032 # Note: bytes * num_writes equals the size of the testfile
1033 #
1034 function fill_fs # destdir dirnum filenum bytes num_writes data
1035 {
1036 typeset destdir=${1:-$TESTDIR}
1037 typeset -i dirnum=${2:-50}
1038 typeset -i filenum=${3:-50}
1039 typeset -i bytes=${4:-8192}
1040 typeset -i num_writes=${5:-10240}
1041 typeset -i data=${6:-0}
1042
1043 typeset -i odirnum=1
1044 typeset -i idirnum=0
1045 typeset -i fn=0
1046 typeset -i retval=0
1047
1048 log_must mkdir -p $destdir/$idirnum
1049 while (($odirnum > 0)); do
1050 if ((dirnum >= 0 && idirnum >= dirnum)); then
1051 odirnum=0
1052 break
1053 fi
1054 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1055 -b $bytes -c $num_writes -d $data
1056 retval=$?
1057 if (($retval != 0)); then
1058 odirnum=0
1059 break
1060 fi
1061 if (($fn >= $filenum)); then
1062 fn=0
1063 ((idirnum = idirnum + 1))
1064 log_must mkdir -p $destdir/$idirnum
1065 else
1066 ((fn = fn + 1))
1067 fi
1068 done
1069 return $retval
1070 }
1071
1072 #
1073 # Simple function to get the specified property. If unable to
1074 # get the property then exits.
1075 #
1076 # Note property is in 'parsable' format (-p)
1077 #
1078 function get_prop # property dataset
1079 {
1080 typeset prop_val
1081 typeset prop=$1
1082 typeset dataset=$2
1083
1084 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1085 if [[ $? -ne 0 ]]; then
1086 log_note "Unable to get $prop property for dataset " \
1087 "$dataset"
1088 return 1
1089 fi
1090
1091 echo "$prop_val"
1092 return 0
1093 }
1094
1095 #
1096 # Simple function to get the specified property of pool. If unable to
1097 # get the property then exits.
1098 #
1099 # Note property is in 'parsable' format (-p)
1100 #
1101 function get_pool_prop # property pool
1102 {
1103 typeset prop_val
1104 typeset prop=$1
1105 typeset pool=$2
1106
1107 if poolexists $pool ; then
1108 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1109 awk '{print $3}')
1110 if [[ $? -ne 0 ]]; then
1111 log_note "Unable to get $prop property for pool " \
1112 "$pool"
1113 return 1
1114 fi
1115 else
1116 log_note "Pool $pool not exists."
1117 return 1
1118 fi
1119
1120 echo "$prop_val"
1121 return 0
1122 }
1123
1124 # Return 0 if a pool exists; $? otherwise
1125 #
1126 # $1 - pool name
1127
1128 function poolexists
1129 {
1130 typeset pool=$1
1131
1132 if [[ -z $pool ]]; then
1133 log_note "No pool name given."
1134 return 1
1135 fi
1136
1137 zpool get name "$pool" > /dev/null 2>&1
1138 return $?
1139 }
1140
1141 # Return 0 if all the specified datasets exist; $? otherwise
1142 #
1143 # $1-n dataset name
1144 function datasetexists
1145 {
1146 if (($# == 0)); then
1147 log_note "No dataset name given."
1148 return 1
1149 fi
1150
1151 while (($# > 0)); do
1152 zfs get name $1 > /dev/null 2>&1 || \
1153 return $?
1154 shift
1155 done
1156
1157 return 0
1158 }
1159
1160 # return 0 if none of the specified datasets exists, otherwise return 1.
1161 #
1162 # $1-n dataset name
1163 function datasetnonexists
1164 {
1165 if (($# == 0)); then
1166 log_note "No dataset name given."
1167 return 1
1168 fi
1169
1170 while (($# > 0)); do
1171 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1172 && return 1
1173 shift
1174 done
1175
1176 return 0
1177 }
1178
1179 #
1180 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1181 #
1182 # Returns 0 if shared, 1 otherwise.
1183 #
1184 function is_shared
1185 {
1186 typeset fs=$1
1187 typeset mtpt
1188
1189 if [[ $fs != "/"* ]] ; then
1190 if datasetnonexists "$fs" ; then
1191 return 1
1192 else
1193 mtpt=$(get_prop mountpoint "$fs")
1194 case $mtpt in
1195 none|legacy|-) return 1
1196 ;;
1197 *) fs=$mtpt
1198 ;;
1199 esac
1200 fi
1201 fi
1202
1203 if is_linux; then
1204 for mtpt in `share | awk '{print $1}'` ; do
1205 if [[ $mtpt == $fs ]] ; then
1206 return 0
1207 fi
1208 done
1209 return 1
1210 fi
1211
1212 for mtpt in `share | awk '{print $2}'` ; do
1213 if [[ $mtpt == $fs ]] ; then
1214 return 0
1215 fi
1216 done
1217
1218 typeset stat=$(svcs -H -o STA nfs/server:default)
1219 if [[ $stat != "ON" ]]; then
1220 log_note "Current nfs/server status: $stat"
1221 fi
1222
1223 return 1
1224 }
1225
1226 #
1227 # Given a dataset name determine if it is shared via SMB.
1228 #
1229 # Returns 0 if shared, 1 otherwise.
1230 #
1231 function is_shared_smb
1232 {
1233 typeset fs=$1
1234 typeset mtpt
1235
1236 if datasetnonexists "$fs" ; then
1237 return 1
1238 else
1239 fs=$(echo $fs | sed 's@/@_@g')
1240 fi
1241
1242 if is_linux; then
1243 for mtpt in `net usershare list | awk '{print $1}'` ; do
1244 if [[ $mtpt == $fs ]] ; then
1245 return 0
1246 fi
1247 done
1248 return 1
1249 else
1250 log_unsupported "Currently unsupported by the test framework"
1251 return 1
1252 fi
1253 }
1254
1255 #
1256 # Given a mountpoint, determine if it is not shared via NFS.
1257 #
1258 # Returns 0 if not shared, 1 otherwise.
1259 #
1260 function not_shared
1261 {
1262 typeset fs=$1
1263
1264 is_shared $fs
1265 if (($? == 0)); then
1266 return 1
1267 fi
1268
1269 return 0
1270 }
1271
1272 #
1273 # Given a dataset determine if it is not shared via SMB.
1274 #
1275 # Returns 0 if not shared, 1 otherwise.
1276 #
1277 function not_shared_smb
1278 {
1279 typeset fs=$1
1280
1281 is_shared_smb $fs
1282 if (($? == 0)); then
1283 return 1
1284 fi
1285
1286 return 0
1287 }
1288
1289 #
1290 # Helper function to unshare a mountpoint.
1291 #
1292 function unshare_fs #fs
1293 {
1294 typeset fs=$1
1295
1296 is_shared $fs || is_shared_smb $fs
1297 if (($? == 0)); then
1298 log_must zfs unshare $fs
1299 fi
1300
1301 return 0
1302 }
1303
1304 #
1305 # Helper function to share a NFS mountpoint.
1306 #
1307 function share_nfs #fs
1308 {
1309 typeset fs=$1
1310
1311 if is_linux; then
1312 is_shared $fs
1313 if (($? != 0)); then
1314 log_must share "*:$fs"
1315 fi
1316 else
1317 is_shared $fs
1318 if (($? != 0)); then
1319 log_must share -F nfs $fs
1320 fi
1321 fi
1322
1323 return 0
1324 }
1325
1326 #
1327 # Helper function to unshare a NFS mountpoint.
1328 #
1329 function unshare_nfs #fs
1330 {
1331 typeset fs=$1
1332
1333 if is_linux; then
1334 is_shared $fs
1335 if (($? == 0)); then
1336 log_must unshare -u "*:$fs"
1337 fi
1338 else
1339 is_shared $fs
1340 if (($? == 0)); then
1341 log_must unshare -F nfs $fs
1342 fi
1343 fi
1344
1345 return 0
1346 }
1347
1348 #
1349 # Helper function to show NFS shares.
1350 #
1351 function showshares_nfs
1352 {
1353 if is_linux; then
1354 share -v
1355 else
1356 share -F nfs
1357 fi
1358
1359 return 0
1360 }
1361
1362 #
1363 # Helper function to show SMB shares.
1364 #
1365 function showshares_smb
1366 {
1367 if is_linux; then
1368 net usershare list
1369 else
1370 share -F smb
1371 fi
1372
1373 return 0
1374 }
1375
1376 #
1377 # Check NFS server status and trigger it online.
1378 #
1379 function setup_nfs_server
1380 {
1381 # Cannot share directory in non-global zone.
1382 #
1383 if ! is_global_zone; then
1384 log_note "Cannot trigger NFS server by sharing in LZ."
1385 return
1386 fi
1387
1388 if is_linux; then
1389 log_note "NFS server must started prior to running test framework."
1390 return
1391 fi
1392
1393 typeset nfs_fmri="svc:/network/nfs/server:default"
1394 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1395 #
1396 # Only really sharing operation can enable NFS server
1397 # to online permanently.
1398 #
1399 typeset dummy=/tmp/dummy
1400
1401 if [[ -d $dummy ]]; then
1402 log_must rm -rf $dummy
1403 fi
1404
1405 log_must mkdir $dummy
1406 log_must share $dummy
1407
1408 #
1409 # Waiting for fmri's status to be the final status.
1410 # Otherwise, in transition, an asterisk (*) is appended for
1411 # instances, unshare will reverse status to 'DIS' again.
1412 #
1413 # Waiting for 1's at least.
1414 #
1415 log_must sleep 1
1416 timeout=10
1417 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1418 do
1419 log_must sleep 1
1420
1421 ((timeout -= 1))
1422 done
1423
1424 log_must unshare $dummy
1425 log_must rm -rf $dummy
1426 fi
1427
1428 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1429 }
1430
1431 #
1432 # To verify whether calling process is in global zone
1433 #
1434 # Return 0 if in global zone, 1 in non-global zone
1435 #
1436 function is_global_zone
1437 {
1438 if is_linux; then
1439 return 0
1440 else
1441 typeset cur_zone=$(zonename 2>/dev/null)
1442 if [[ $cur_zone != "global" ]]; then
1443 return 1
1444 fi
1445 return 0
1446 fi
1447 }
1448
1449 #
1450 # Verify whether test is permitted to run from
1451 # global zone, local zone, or both
1452 #
1453 # $1 zone limit, could be "global", "local", or "both"(no limit)
1454 #
1455 # Return 0 if permitted, otherwise exit with log_unsupported
1456 #
1457 function verify_runnable # zone limit
1458 {
1459 typeset limit=$1
1460
1461 [[ -z $limit ]] && return 0
1462
1463 if is_global_zone ; then
1464 case $limit in
1465 global|both)
1466 ;;
1467 local) log_unsupported "Test is unable to run from "\
1468 "global zone."
1469 ;;
1470 *) log_note "Warning: unknown limit $limit - " \
1471 "use both."
1472 ;;
1473 esac
1474 else
1475 case $limit in
1476 local|both)
1477 ;;
1478 global) log_unsupported "Test is unable to run from "\
1479 "local zone."
1480 ;;
1481 *) log_note "Warning: unknown limit $limit - " \
1482 "use both."
1483 ;;
1484 esac
1485
1486 reexport_pool
1487 fi
1488
1489 return 0
1490 }
1491
1492 # Return 0 if create successfully or the pool exists; $? otherwise
1493 # Note: In local zones, this function should return 0 silently.
1494 #
1495 # $1 - pool name
1496 # $2-n - [keyword] devs_list
1497
1498 function create_pool #pool devs_list
1499 {
1500 typeset pool=${1%%/*}
1501
1502 shift
1503
1504 if [[ -z $pool ]]; then
1505 log_note "Missing pool name."
1506 return 1
1507 fi
1508
1509 if poolexists $pool ; then
1510 destroy_pool $pool
1511 fi
1512
1513 if is_global_zone ; then
1514 [[ -d /$pool ]] && rm -rf /$pool
1515 log_must zpool create -f $pool $@
1516 fi
1517
1518 return 0
1519 }
1520
1521 # Return 0 if destroy successfully or the pool exists; $? otherwise
1522 # Note: In local zones, this function should return 0 silently.
1523 #
1524 # $1 - pool name
1525 # Destroy pool with the given parameters.
1526
1527 function destroy_pool #pool
1528 {
1529 typeset pool=${1%%/*}
1530 typeset mtpt
1531
1532 if [[ -z $pool ]]; then
1533 log_note "No pool name given."
1534 return 1
1535 fi
1536
1537 if is_global_zone ; then
1538 if poolexists "$pool" ; then
1539 mtpt=$(get_prop mountpoint "$pool")
1540
1541 # At times, syseventd/udev activity can cause attempts
1542 # to destroy a pool to fail with EBUSY. We retry a few
1543 # times allowing failures before requiring the destroy
1544 # to succeed.
1545 log_must_busy zpool destroy -f $pool
1546
1547 [[ -d $mtpt ]] && \
1548 log_must rm -rf $mtpt
1549 else
1550 log_note "Pool does not exist. ($pool)"
1551 return 1
1552 fi
1553 fi
1554
1555 return 0
1556 }
1557
1558 #
1559 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1560 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1561 # and a zvol device to the zone.
1562 #
1563 # $1 zone name
1564 # $2 zone root directory prefix
1565 # $3 zone ip
1566 #
1567 function zfs_zones_setup #zone_name zone_root zone_ip
1568 {
1569 typeset zone_name=${1:-$(hostname)-z}
1570 typeset zone_root=${2:-"/zone_root"}
1571 typeset zone_ip=${3:-"10.1.1.10"}
1572 typeset prefix_ctr=$ZONE_CTR
1573 typeset pool_name=$ZONE_POOL
1574 typeset -i cntctr=5
1575 typeset -i i=0
1576
1577 # Create pool and 5 container within it
1578 #
1579 [[ -d /$pool_name ]] && rm -rf /$pool_name
1580 log_must zpool create -f $pool_name $DISKS
1581 while ((i < cntctr)); do
1582 log_must zfs create $pool_name/$prefix_ctr$i
1583 ((i += 1))
1584 done
1585
1586 # create a zvol
1587 log_must zfs create -V 1g $pool_name/zone_zvol
1588 block_device_wait
1589
1590 #
1591 # If current system support slog, add slog device for pool
1592 #
1593 if verify_slog_support ; then
1594 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1595 log_must mkfile $MINVDEVSIZE $sdevs
1596 log_must zpool add $pool_name log mirror $sdevs
1597 fi
1598
1599 # this isn't supported just yet.
1600 # Create a filesystem. In order to add this to
1601 # the zone, it must have it's mountpoint set to 'legacy'
1602 # log_must zfs create $pool_name/zfs_filesystem
1603 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1604
1605 [[ -d $zone_root ]] && \
1606 log_must rm -rf $zone_root/$zone_name
1607 [[ ! -d $zone_root ]] && \
1608 log_must mkdir -p -m 0700 $zone_root/$zone_name
1609
1610 # Create zone configure file and configure the zone
1611 #
1612 typeset zone_conf=/tmp/zone_conf.$$
1613 echo "create" > $zone_conf
1614 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1615 echo "set autoboot=true" >> $zone_conf
1616 i=0
1617 while ((i < cntctr)); do
1618 echo "add dataset" >> $zone_conf
1619 echo "set name=$pool_name/$prefix_ctr$i" >> \
1620 $zone_conf
1621 echo "end" >> $zone_conf
1622 ((i += 1))
1623 done
1624
1625 # add our zvol to the zone
1626 echo "add device" >> $zone_conf
1627 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1628 echo "end" >> $zone_conf
1629
1630 # add a corresponding zvol rdsk to the zone
1631 echo "add device" >> $zone_conf
1632 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1633 echo "end" >> $zone_conf
1634
1635 # once it's supported, we'll add our filesystem to the zone
1636 # echo "add fs" >> $zone_conf
1637 # echo "set type=zfs" >> $zone_conf
1638 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1639 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1640 # echo "end" >> $zone_conf
1641
1642 echo "verify" >> $zone_conf
1643 echo "commit" >> $zone_conf
1644 log_must zonecfg -z $zone_name -f $zone_conf
1645 log_must rm -f $zone_conf
1646
1647 # Install the zone
1648 zoneadm -z $zone_name install
1649 if (($? == 0)); then
1650 log_note "SUCCESS: zoneadm -z $zone_name install"
1651 else
1652 log_fail "FAIL: zoneadm -z $zone_name install"
1653 fi
1654
1655 # Install sysidcfg file
1656 #
1657 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1658 echo "system_locale=C" > $sysidcfg
1659 echo "terminal=dtterm" >> $sysidcfg
1660 echo "network_interface=primary {" >> $sysidcfg
1661 echo "hostname=$zone_name" >> $sysidcfg
1662 echo "}" >> $sysidcfg
1663 echo "name_service=NONE" >> $sysidcfg
1664 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1665 echo "security_policy=NONE" >> $sysidcfg
1666 echo "timezone=US/Eastern" >> $sysidcfg
1667
1668 # Boot this zone
1669 log_must zoneadm -z $zone_name boot
1670 }
1671
1672 #
1673 # Reexport TESTPOOL & TESTPOOL(1-4)
1674 #
1675 function reexport_pool
1676 {
1677 typeset -i cntctr=5
1678 typeset -i i=0
1679
1680 while ((i < cntctr)); do
1681 if ((i == 0)); then
1682 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1683 if ! ismounted $TESTPOOL; then
1684 log_must zfs mount $TESTPOOL
1685 fi
1686 else
1687 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1688 if eval ! ismounted \$TESTPOOL$i; then
1689 log_must eval zfs mount \$TESTPOOL$i
1690 fi
1691 fi
1692 ((i += 1))
1693 done
1694 }
1695
1696 #
1697 # Verify a given disk or pool state
1698 #
1699 # Return 0 is pool/disk matches expected state, 1 otherwise
1700 #
1701 function check_state # pool disk state{online,offline,degraded}
1702 {
1703 typeset pool=$1
1704 typeset disk=${2#$DEV_DSKDIR/}
1705 typeset state=$3
1706
1707 [[ -z $pool ]] || [[ -z $state ]] \
1708 && log_fail "Arguments invalid or missing"
1709
1710 if [[ -z $disk ]]; then
1711 #check pool state only
1712 zpool get -H -o value health $pool \
1713 | grep -i "$state" > /dev/null 2>&1
1714 else
1715 zpool status -v $pool | grep "$disk" \
1716 | grep -i "$state" > /dev/null 2>&1
1717 fi
1718
1719 return $?
1720 }
1721
1722 #
1723 # Cause a scan of all scsi host adapters by default
1724 #
1725 # $1 optional host number
1726 #
1727 function scan_scsi_hosts
1728 {
1729 typeset hostnum=${1}
1730
1731 if is_linux; then
1732 if [[ -z $hostnum ]]; then
1733 for host in /sys/class/scsi_host/host*; do
1734 log_must eval "echo '- - -' > $host/scan"
1735 done
1736 else
1737 log_must eval \
1738 "echo /sys/class/scsi_host/host$hostnum/scan" \
1739 > /dev/null
1740 log_must eval \
1741 "echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
1742 fi
1743 fi
1744 }
1745 #
1746 # Wait for newly created block devices to have their minors created.
1747 #
1748 function block_device_wait
1749 {
1750 if is_linux; then
1751 udevadm trigger
1752 udevadm settle
1753 fi
1754 }
1755
1756 #
1757 # Online or offline a disk on the system
1758 #
1759 # First checks state of disk. Test will fail if disk is not properly onlined
1760 # or offlined. Online is a full rescan of SCSI disks by echoing to every
1761 # host entry.
1762 #
1763 function on_off_disk # disk state{online,offline} host
1764 {
1765 typeset disk=$1
1766 typeset state=$2
1767 typeset host=$3
1768
1769 [[ -z $disk ]] || [[ -z $state ]] && \
1770 log_fail "Arguments invalid or missing"
1771
1772 if is_linux; then
1773 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
1774 dm_name="$(readlink $DEV_DSKDIR/$disk \
1775 | nawk -F / '{print $2}')"
1776 slave="$(ls /sys/block/${dm_name}/slaves \
1777 | nawk '{print $1}')"
1778 while [[ -n $slave ]]; do
1779 #check if disk is online
1780 lsscsi | egrep $slave > /dev/null
1781 if (($? == 0)); then
1782 slave_dir="/sys/block/${dm_name}"
1783 slave_dir+="/slaves/${slave}/device"
1784 ss="${slave_dir}/state"
1785 sd="${slave_dir}/delete"
1786 log_must eval "echo 'offline' > ${ss}"
1787 log_must eval "echo '1' > ${sd}"
1788 lsscsi | egrep $slave > /dev/null
1789 if (($? == 0)); then
1790 log_fail "Offlining" \
1791 "$disk failed"
1792 fi
1793 fi
1794 slave="$(ls /sys/block/$dm_name/slaves \
1795 2>/dev/null | nawk '{print $1}')"
1796 done
1797 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1798 #check if disk is online
1799 lsscsi | egrep $disk > /dev/null
1800 if (($? == 0)); then
1801 dev_state="/sys/block/$disk/device/state"
1802 dev_delete="/sys/block/$disk/device/delete"
1803 log_must eval "echo 'offline' > ${dev_state}"
1804 log_must eval "echo '1' > ${dev_delete}"
1805 lsscsi | egrep $disk > /dev/null
1806 if (($? == 0)); then
1807 log_fail "Offlining $disk" \
1808 "failed"
1809 fi
1810 else
1811 log_note "$disk is already offline"
1812 fi
1813 elif [[ $state == "online" ]]; then
1814 #force a full rescan
1815 scan_scsi_hosts $host
1816 block_device_wait
1817 if is_mpath_device $disk; then
1818 dm_name="$(readlink $DEV_DSKDIR/$disk \
1819 | nawk -F / '{print $2}')"
1820 slave="$(ls /sys/block/$dm_name/slaves \
1821 | nawk '{print $1}')"
1822 lsscsi | egrep $slave > /dev/null
1823 if (($? != 0)); then
1824 log_fail "Onlining $disk failed"
1825 fi
1826 elif is_real_device $disk; then
1827 lsscsi | egrep $disk > /dev/null
1828 if (($? != 0)); then
1829 log_fail "Onlining $disk failed"
1830 fi
1831 else
1832 log_fail "$disk is not a real dev"
1833 fi
1834 else
1835 log_fail "$disk failed to $state"
1836 fi
1837 fi
1838 }
1839
1840 #
1841 # Get the mountpoint of snapshot
1842 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1843 # as its mountpoint
1844 #
1845 function snapshot_mountpoint
1846 {
1847 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1848
1849 if [[ $dataset != *@* ]]; then
1850 log_fail "Error name of snapshot '$dataset'."
1851 fi
1852
1853 typeset fs=${dataset%@*}
1854 typeset snap=${dataset#*@}
1855
1856 if [[ -z $fs || -z $snap ]]; then
1857 log_fail "Error name of snapshot '$dataset'."
1858 fi
1859
1860 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1861 }
1862
1863 #
1864 # Given a device and 'ashift' value verify it's correctly set on every label
1865 #
1866 function verify_ashift # device ashift
1867 {
1868 typeset device="$1"
1869 typeset ashift="$2"
1870
1871 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1872 if (ashift != $2)
1873 exit 1;
1874 else
1875 count++;
1876 } END {
1877 if (count != 4)
1878 exit 1;
1879 else
1880 exit 0;
1881 }'
1882
1883 return $?
1884 }
1885
1886 #
1887 # Given a pool and file system, this function will verify the file system
1888 # using the zdb internal tool. Note that the pool is exported and imported
1889 # to ensure it has consistent state.
1890 #
1891 function verify_filesys # pool filesystem dir
1892 {
1893 typeset pool="$1"
1894 typeset filesys="$2"
1895 typeset zdbout="/tmp/zdbout.$$"
1896
1897 shift
1898 shift
1899 typeset dirs=$@
1900 typeset search_path=""
1901
1902 log_note "Calling zdb to verify filesystem '$filesys'"
1903 zfs unmount -a > /dev/null 2>&1
1904 log_must zpool export $pool
1905
1906 if [[ -n $dirs ]] ; then
1907 for dir in $dirs ; do
1908 search_path="$search_path -d $dir"
1909 done
1910 fi
1911
1912 log_must zpool import $search_path $pool
1913
1914 zdb -cudi $filesys > $zdbout 2>&1
1915 if [[ $? != 0 ]]; then
1916 log_note "Output: zdb -cudi $filesys"
1917 cat $zdbout
1918 log_fail "zdb detected errors with: '$filesys'"
1919 fi
1920
1921 log_must zfs mount -a
1922 log_must rm -rf $zdbout
1923 }
1924
1925 #
1926 # Given a pool, and this function list all disks in the pool
1927 #
1928 function get_disklist # pool
1929 {
1930 typeset disklist=""
1931
1932 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1933 grep -v "\-\-\-\-\-" | \
1934 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1935
1936 echo $disklist
1937 }
1938
1939 #
1940 # Given a pool, and this function list all disks in the pool with their full
1941 # path (like "/dev/sda" instead of "sda").
1942 #
1943 function get_disklist_fullpath # pool
1944 {
1945 args="-P $1"
1946 get_disklist $args
1947 }
1948
1949
1950
1951 # /**
1952 # This function kills a given list of processes after a time period. We use
1953 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1954 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1955 # would be listed as FAIL, which we don't want : we're happy with stress tests
1956 # running for a certain amount of time, then finishing.
1957 #
1958 # @param $1 the time in seconds after which we should terminate these processes
1959 # @param $2..$n the processes we wish to terminate.
1960 # */
1961 function stress_timeout
1962 {
1963 typeset -i TIMEOUT=$1
1964 shift
1965 typeset cpids="$@"
1966
1967 log_note "Waiting for child processes($cpids). " \
1968 "It could last dozens of minutes, please be patient ..."
1969 log_must sleep $TIMEOUT
1970
1971 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1972 typeset pid
1973 for pid in $cpids; do
1974 ps -p $pid > /dev/null 2>&1
1975 if (($? == 0)); then
1976 log_must kill -USR1 $pid
1977 fi
1978 done
1979 }
1980
1981 #
1982 # Verify a given hotspare disk is inuse or avail
1983 #
1984 # Return 0 is pool/disk matches expected state, 1 otherwise
1985 #
1986 function check_hotspare_state # pool disk state{inuse,avail}
1987 {
1988 typeset pool=$1
1989 typeset disk=${2#$DEV_DSKDIR/}
1990 typeset state=$3
1991
1992 cur_state=$(get_device_state $pool $disk "spares")
1993
1994 if [[ $state != ${cur_state} ]]; then
1995 return 1
1996 fi
1997 return 0
1998 }
1999
2000 #
2001 # Wait until a hotspare transitions to a given state or times out.
2002 #
2003 # Return 0 when pool/disk matches expected state, 1 on timeout.
2004 #
2005 function wait_hotspare_state # pool disk state timeout
2006 {
2007 typeset pool=$1
2008 typeset disk=${2#$/DEV_DSKDIR/}
2009 typeset state=$3
2010 typeset timeout=${4:-60}
2011 typeset -i i=0
2012
2013 while [[ $i -lt $timeout ]]; do
2014 if check_hotspare_state $pool $disk $state; then
2015 return 0
2016 fi
2017
2018 i=$((i+1))
2019 sleep 1
2020 done
2021
2022 return 1
2023 }
2024
2025 #
2026 # Verify a given slog disk is inuse or avail
2027 #
2028 # Return 0 is pool/disk matches expected state, 1 otherwise
2029 #
2030 function check_slog_state # pool disk state{online,offline,unavail}
2031 {
2032 typeset pool=$1
2033 typeset disk=${2#$DEV_DSKDIR/}
2034 typeset state=$3
2035
2036 cur_state=$(get_device_state $pool $disk "logs")
2037
2038 if [[ $state != ${cur_state} ]]; then
2039 return 1
2040 fi
2041 return 0
2042 }
2043
2044 #
2045 # Verify a given vdev disk is inuse or avail
2046 #
2047 # Return 0 is pool/disk matches expected state, 1 otherwise
2048 #
2049 function check_vdev_state # pool disk state{online,offline,unavail}
2050 {
2051 typeset pool=$1
2052 typeset disk=${2#$/DEV_DSKDIR/}
2053 typeset state=$3
2054
2055 cur_state=$(get_device_state $pool $disk)
2056
2057 if [[ $state != ${cur_state} ]]; then
2058 return 1
2059 fi
2060 return 0
2061 }
2062
2063 #
2064 # Wait until a vdev transitions to a given state or times out.
2065 #
2066 # Return 0 when pool/disk matches expected state, 1 on timeout.
2067 #
2068 function wait_vdev_state # pool disk state timeout
2069 {
2070 typeset pool=$1
2071 typeset disk=${2#$/DEV_DSKDIR/}
2072 typeset state=$3
2073 typeset timeout=${4:-60}
2074 typeset -i i=0
2075
2076 while [[ $i -lt $timeout ]]; do
2077 if check_vdev_state $pool $disk $state; then
2078 return 0
2079 fi
2080
2081 i=$((i+1))
2082 sleep 1
2083 done
2084
2085 return 1
2086 }
2087
2088 #
2089 # Check the output of 'zpool status -v <pool>',
2090 # and to see if the content of <token> contain the <keyword> specified.
2091 #
2092 # Return 0 is contain, 1 otherwise
2093 #
2094 function check_pool_status # pool token keyword <verbose>
2095 {
2096 typeset pool=$1
2097 typeset token=$2
2098 typeset keyword=$3
2099 typeset verbose=${4:-false}
2100
2101 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2102 ($1==token) {print $0}')
2103 if [[ $verbose == true ]]; then
2104 log_note $scan
2105 fi
2106 echo $scan | grep -i "$keyword" > /dev/null 2>&1
2107
2108 return $?
2109 }
2110
2111 #
2112 # These 6 following functions are instance of check_pool_status()
2113 # is_pool_resilvering - to check if the pool is resilver in progress
2114 # is_pool_resilvered - to check if the pool is resilver completed
2115 # is_pool_scrubbing - to check if the pool is scrub in progress
2116 # is_pool_scrubbed - to check if the pool is scrub completed
2117 # is_pool_scrub_stopped - to check if the pool is scrub stopped
2118 # is_pool_scrub_paused - to check if the pool has scrub paused
2119 #
2120 function is_pool_resilvering #pool <verbose>
2121 {
2122 check_pool_status "$1" "scan" "resilver in progress since " $2
2123 return $?
2124 }
2125
2126 function is_pool_resilvered #pool <verbose>
2127 {
2128 check_pool_status "$1" "scan" "resilvered " $2
2129 return $?
2130 }
2131
2132 function is_pool_scrubbing #pool <verbose>
2133 {
2134 check_pool_status "$1" "scan" "scrub in progress since " $2
2135 return $?
2136 }
2137
2138 function is_pool_scrubbed #pool <verbose>
2139 {
2140 check_pool_status "$1" "scan" "scrub repaired" $2
2141 return $?
2142 }
2143
2144 function is_pool_scrub_stopped #pool <verbose>
2145 {
2146 check_pool_status "$1" "scan" "scrub canceled" $2
2147 return $?
2148 }
2149
2150 function is_pool_scrub_paused #pool <verbose>
2151 {
2152 check_pool_status "$1" "scan" "scrub paused since " $2
2153 return $?
2154 }
2155
2156 #
2157 # Use create_pool()/destroy_pool() to clean up the information in
2158 # in the given disk to avoid slice overlapping.
2159 #
2160 function cleanup_devices #vdevs
2161 {
2162 typeset pool="foopool$$"
2163
2164 if poolexists $pool ; then
2165 destroy_pool $pool
2166 fi
2167
2168 create_pool $pool $@
2169 destroy_pool $pool
2170
2171 return 0
2172 }
2173
2174 #/**
2175 # A function to find and locate free disks on a system or from given
2176 # disks as the parameter. It works by locating disks that are in use
2177 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2178 #
2179 # $@ given disks to find which are free, default is all disks in
2180 # the test system
2181 #
2182 # @return a string containing the list of available disks
2183 #*/
2184 function find_disks
2185 {
2186 # Trust provided list, no attempt is made to locate unused devices.
2187 if is_linux; then
2188 echo "$@"
2189 return
2190 fi
2191
2192
2193 sfi=/tmp/swaplist.$$
2194 dmpi=/tmp/dumpdev.$$
2195 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2196
2197 swap -l > $sfi
2198 dumpadm > $dmpi 2>/dev/null
2199
2200 # write an awk script that can process the output of format
2201 # to produce a list of disks we know about. Note that we have
2202 # to escape "$2" so that the shell doesn't interpret it while
2203 # we're creating the awk script.
2204 # -------------------
2205 cat > /tmp/find_disks.awk <<EOF
2206 #!/bin/nawk -f
2207 BEGIN { FS="."; }
2208
2209 /^Specify disk/{
2210 searchdisks=0;
2211 }
2212
2213 {
2214 if (searchdisks && \$2 !~ "^$"){
2215 split(\$2,arr," ");
2216 print arr[1];
2217 }
2218 }
2219
2220 /^AVAILABLE DISK SELECTIONS:/{
2221 searchdisks=1;
2222 }
2223 EOF
2224 #---------------------
2225
2226 chmod 755 /tmp/find_disks.awk
2227 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2228 rm /tmp/find_disks.awk
2229
2230 unused=""
2231 for disk in $disks; do
2232 # Check for mounted
2233 grep "${disk}[sp]" /etc/mnttab >/dev/null
2234 (($? == 0)) && continue
2235 # Check for swap
2236 grep "${disk}[sp]" $sfi >/dev/null
2237 (($? == 0)) && continue
2238 # check for dump device
2239 grep "${disk}[sp]" $dmpi >/dev/null
2240 (($? == 0)) && continue
2241 # check to see if this disk hasn't been explicitly excluded
2242 # by a user-set environment variable
2243 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2244 (($? == 0)) && continue
2245 unused_candidates="$unused_candidates $disk"
2246 done
2247 rm $sfi
2248 rm $dmpi
2249
2250 # now just check to see if those disks do actually exist
2251 # by looking for a device pointing to the first slice in
2252 # each case. limit the number to max_finddisksnum
2253 count=0
2254 for disk in $unused_candidates; do
2255 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2256 if [ $count -lt $max_finddisksnum ]; then
2257 unused="$unused $disk"
2258 # do not impose limit if $@ is provided
2259 [[ -z $@ ]] && ((count = count + 1))
2260 fi
2261 fi
2262 done
2263
2264 # finally, return our disk list
2265 echo $unused
2266 }
2267
2268 #
2269 # Add specified user to specified group
2270 #
2271 # $1 group name
2272 # $2 user name
2273 # $3 base of the homedir (optional)
2274 #
2275 function add_user #<group_name> <user_name> <basedir>
2276 {
2277 typeset gname=$1
2278 typeset uname=$2
2279 typeset basedir=${3:-"/var/tmp"}
2280
2281 if ((${#gname} == 0 || ${#uname} == 0)); then
2282 log_fail "group name or user name are not defined."
2283 fi
2284
2285 log_must useradd -g $gname -d $basedir/$uname -m $uname
2286 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2287 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2288 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
2289
2290 # Add new users to the same group and the command line utils.
2291 # This allows them to be run out of the original users home
2292 # directory as long as it permissioned to be group readable.
2293 if is_linux; then
2294 cmd_group=$(stat --format="%G" $(which zfs))
2295 log_must usermod -a -G $cmd_group $uname
2296 fi
2297
2298 return 0
2299 }
2300
2301 #
2302 # Delete the specified user.
2303 #
2304 # $1 login name
2305 # $2 base of the homedir (optional)
2306 #
2307 function del_user #<logname> <basedir>
2308 {
2309 typeset user=$1
2310 typeset basedir=${2:-"/var/tmp"}
2311
2312 if ((${#user} == 0)); then
2313 log_fail "login name is necessary."
2314 fi
2315
2316 if id $user > /dev/null 2>&1; then
2317 log_must_retry "currently used" 5 userdel $user
2318 fi
2319
2320 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2321
2322 return 0
2323 }
2324
2325 #
2326 # Select valid gid and create specified group.
2327 #
2328 # $1 group name
2329 #
2330 function add_group #<group_name>
2331 {
2332 typeset group=$1
2333
2334 if ((${#group} == 0)); then
2335 log_fail "group name is necessary."
2336 fi
2337
2338 # Assign 100 as the base gid, a larger value is selected for
2339 # Linux because for many distributions 1000 and under are reserved.
2340 if is_linux; then
2341 while true; do
2342 groupadd $group > /dev/null 2>&1
2343 typeset -i ret=$?
2344 case $ret in
2345 0) return 0 ;;
2346 *) return 1 ;;
2347 esac
2348 done
2349 else
2350 typeset -i gid=100
2351 while true; do
2352 groupadd -g $gid $group > /dev/null 2>&1
2353 typeset -i ret=$?
2354 case $ret in
2355 0) return 0 ;;
2356 # The gid is not unique
2357 4) ((gid += 1)) ;;
2358 *) return 1 ;;
2359 esac
2360 done
2361 fi
2362 }
2363
2364 #
2365 # Delete the specified group.
2366 #
2367 # $1 group name
2368 #
2369 function del_group #<group_name>
2370 {
2371 typeset grp=$1
2372 if ((${#grp} == 0)); then
2373 log_fail "group name is necessary."
2374 fi
2375
2376 if is_linux; then
2377 getent group $grp > /dev/null 2>&1
2378 typeset -i ret=$?
2379 case $ret in
2380 # Group does not exist.
2381 2) return 0 ;;
2382 # Name already exists as a group name
2383 0) log_must groupdel $grp ;;
2384 *) return 1 ;;
2385 esac
2386 else
2387 groupmod -n $grp $grp > /dev/null 2>&1
2388 typeset -i ret=$?
2389 case $ret in
2390 # Group does not exist.
2391 6) return 0 ;;
2392 # Name already exists as a group name
2393 9) log_must groupdel $grp ;;
2394 *) return 1 ;;
2395 esac
2396 fi
2397
2398 return 0
2399 }
2400
2401 #
2402 # This function will return true if it's safe to destroy the pool passed
2403 # as argument 1. It checks for pools based on zvols and files, and also
2404 # files contained in a pool that may have a different mountpoint.
2405 #
2406 function safe_to_destroy_pool { # $1 the pool name
2407
2408 typeset pool=""
2409 typeset DONT_DESTROY=""
2410
2411 # We check that by deleting the $1 pool, we're not
2412 # going to pull the rug out from other pools. Do this
2413 # by looking at all other pools, ensuring that they
2414 # aren't built from files or zvols contained in this pool.
2415
2416 for pool in $(zpool list -H -o name)
2417 do
2418 ALTMOUNTPOOL=""
2419
2420 # this is a list of the top-level directories in each of the
2421 # files that make up the path to the files the pool is based on
2422 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2423 awk '{print $1}')
2424
2425 # this is a list of the zvols that make up the pool
2426 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2427 | awk '{print $1}')
2428
2429 # also want to determine if it's a file-based pool using an
2430 # alternate mountpoint...
2431 POOL_FILE_DIRS=$(zpool status -v $pool | \
2432 grep / | awk '{print $1}' | \
2433 awk -F/ '{print $2}' | grep -v "dev")
2434
2435 for pooldir in $POOL_FILE_DIRS
2436 do
2437 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2438 grep "${pooldir}$" | awk '{print $1}')
2439
2440 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2441 done
2442
2443
2444 if [ ! -z "$ZVOLPOOL" ]
2445 then
2446 DONT_DESTROY="true"
2447 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2448 fi
2449
2450 if [ ! -z "$FILEPOOL" ]
2451 then
2452 DONT_DESTROY="true"
2453 log_note "Pool $pool is built from $FILEPOOL on $1"
2454 fi
2455
2456 if [ ! -z "$ALTMOUNTPOOL" ]
2457 then
2458 DONT_DESTROY="true"
2459 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2460 fi
2461 done
2462
2463 if [ -z "${DONT_DESTROY}" ]
2464 then
2465 return 0
2466 else
2467 log_note "Warning: it is not safe to destroy $1!"
2468 return 1
2469 fi
2470 }
2471
2472 #
2473 # Get the available ZFS compression options
2474 # $1 option type zfs_set|zfs_compress
2475 #
2476 function get_compress_opts
2477 {
2478 typeset COMPRESS_OPTS
2479 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2480 gzip-6 gzip-7 gzip-8 gzip-9"
2481
2482 if [[ $1 == "zfs_compress" ]] ; then
2483 COMPRESS_OPTS="on lzjb"
2484 elif [[ $1 == "zfs_set" ]] ; then
2485 COMPRESS_OPTS="on off lzjb"
2486 fi
2487 typeset valid_opts="$COMPRESS_OPTS"
2488 zfs get 2>&1 | grep gzip >/dev/null 2>&1
2489 if [[ $? -eq 0 ]]; then
2490 valid_opts="$valid_opts $GZIP_OPTS"
2491 fi
2492 echo "$valid_opts"
2493 }
2494
2495 #
2496 # Verify zfs operation with -p option work as expected
2497 # $1 operation, value could be create, clone or rename
2498 # $2 dataset type, value could be fs or vol
2499 # $3 dataset name
2500 # $4 new dataset name
2501 #
2502 function verify_opt_p_ops
2503 {
2504 typeset ops=$1
2505 typeset datatype=$2
2506 typeset dataset=$3
2507 typeset newdataset=$4
2508
2509 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2510 log_fail "$datatype is not supported."
2511 fi
2512
2513 # check parameters accordingly
2514 case $ops in
2515 create)
2516 newdataset=$dataset
2517 dataset=""
2518 if [[ $datatype == "vol" ]]; then
2519 ops="create -V $VOLSIZE"
2520 fi
2521 ;;
2522 clone)
2523 if [[ -z $newdataset ]]; then
2524 log_fail "newdataset should not be empty" \
2525 "when ops is $ops."
2526 fi
2527 log_must datasetexists $dataset
2528 log_must snapexists $dataset
2529 ;;
2530 rename)
2531 if [[ -z $newdataset ]]; then
2532 log_fail "newdataset should not be empty" \
2533 "when ops is $ops."
2534 fi
2535 log_must datasetexists $dataset
2536 log_mustnot snapexists $dataset
2537 ;;
2538 *)
2539 log_fail "$ops is not supported."
2540 ;;
2541 esac
2542
2543 # make sure the upper level filesystem does not exist
2544 if datasetexists ${newdataset%/*} ; then
2545 log_must zfs destroy -rRf ${newdataset%/*}
2546 fi
2547
2548 # without -p option, operation will fail
2549 log_mustnot zfs $ops $dataset $newdataset
2550 log_mustnot datasetexists $newdataset ${newdataset%/*}
2551
2552 # with -p option, operation should succeed
2553 log_must zfs $ops -p $dataset $newdataset
2554 block_device_wait
2555
2556 if ! datasetexists $newdataset ; then
2557 log_fail "-p option does not work for $ops"
2558 fi
2559
2560 # when $ops is create or clone, redo the operation still return zero
2561 if [[ $ops != "rename" ]]; then
2562 log_must zfs $ops -p $dataset $newdataset
2563 fi
2564
2565 return 0
2566 }
2567
2568 #
2569 # Get configuration of pool
2570 # $1 pool name
2571 # $2 config name
2572 #
2573 function get_config
2574 {
2575 typeset pool=$1
2576 typeset config=$2
2577 typeset alt_root
2578
2579 if ! poolexists "$pool" ; then
2580 return 1
2581 fi
2582 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2583 if [[ $alt_root == "-" ]]; then
2584 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2585 '{print $2}')
2586 else
2587 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2588 '{print $2}')
2589 fi
2590 if [[ -n $value ]] ; then
2591 value=${value#'}
2592 value=${value%'}
2593 fi
2594 echo $value
2595
2596 return 0
2597 }
2598
2599 #
2600 # Privated function. Random select one of items from arguments.
2601 #
2602 # $1 count
2603 # $2-n string
2604 #
2605 function _random_get
2606 {
2607 typeset cnt=$1
2608 shift
2609
2610 typeset str="$@"
2611 typeset -i ind
2612 ((ind = RANDOM % cnt + 1))
2613
2614 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2615 echo $ret
2616 }
2617
2618 #
2619 # Random select one of item from arguments which include NONE string
2620 #
2621 function random_get_with_non
2622 {
2623 typeset -i cnt=$#
2624 ((cnt =+ 1))
2625
2626 _random_get "$cnt" "$@"
2627 }
2628
2629 #
2630 # Random select one of item from arguments which doesn't include NONE string
2631 #
2632 function random_get
2633 {
2634 _random_get "$#" "$@"
2635 }
2636
2637 #
2638 # Detect if the current system support slog
2639 #
2640 function verify_slog_support
2641 {
2642 typeset dir=/tmp/disk.$$
2643 typeset pool=foo.$$
2644 typeset vdev=$dir/a
2645 typeset sdev=$dir/b
2646
2647 mkdir -p $dir
2648 mkfile $MINVDEVSIZE $vdev $sdev
2649
2650 typeset -i ret=0
2651 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2652 ret=1
2653 fi
2654 rm -r $dir
2655
2656 return $ret
2657 }
2658
2659 #
2660 # The function will generate a dataset name with specific length
2661 # $1, the length of the name
2662 # $2, the base string to construct the name
2663 #
2664 function gen_dataset_name
2665 {
2666 typeset -i len=$1
2667 typeset basestr="$2"
2668 typeset -i baselen=${#basestr}
2669 typeset -i iter=0
2670 typeset l_name=""
2671
2672 if ((len % baselen == 0)); then
2673 ((iter = len / baselen))
2674 else
2675 ((iter = len / baselen + 1))
2676 fi
2677 while ((iter > 0)); do
2678 l_name="${l_name}$basestr"
2679
2680 ((iter -= 1))
2681 done
2682
2683 echo $l_name
2684 }
2685
2686 #
2687 # Get cksum tuple of dataset
2688 # $1 dataset name
2689 #
2690 # sample zdb output:
2691 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2692 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2693 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2694 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2695 function datasetcksum
2696 {
2697 typeset cksum
2698 sync
2699 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2700 | awk -F= '{print $7}')
2701 echo $cksum
2702 }
2703
2704 #
2705 # Get cksum of file
2706 # #1 file path
2707 #
2708 function checksum
2709 {
2710 typeset cksum
2711 cksum=$(cksum $1 | awk '{print $1}')
2712 echo $cksum
2713 }
2714
2715 #
2716 # Get the given disk/slice state from the specific field of the pool
2717 #
2718 function get_device_state #pool disk field("", "spares","logs")
2719 {
2720 typeset pool=$1
2721 typeset disk=${2#$DEV_DSKDIR/}
2722 typeset field=${3:-$pool}
2723
2724 state=$(zpool status -v "$pool" 2>/dev/null | \
2725 nawk -v device=$disk -v pool=$pool -v field=$field \
2726 'BEGIN {startconfig=0; startfield=0; }
2727 /config:/ {startconfig=1}
2728 (startconfig==1) && ($1==field) {startfield=1; next;}
2729 (startfield==1) && ($1==device) {print $2; exit;}
2730 (startfield==1) &&
2731 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2732 echo $state
2733 }
2734
2735
2736 #
2737 # print the given directory filesystem type
2738 #
2739 # $1 directory name
2740 #
2741 function get_fstype
2742 {
2743 typeset dir=$1
2744
2745 if [[ -z $dir ]]; then
2746 log_fail "Usage: get_fstype <directory>"
2747 fi
2748
2749 #
2750 # $ df -n /
2751 # / : ufs
2752 #
2753 df -n $dir | awk '{print $3}'
2754 }
2755
2756 #
2757 # Given a disk, label it to VTOC regardless what label was on the disk
2758 # $1 disk
2759 #
2760 function labelvtoc
2761 {
2762 typeset disk=$1
2763 if [[ -z $disk ]]; then
2764 log_fail "The disk name is unspecified."
2765 fi
2766 typeset label_file=/var/tmp/labelvtoc.$$
2767 typeset arch=$(uname -p)
2768
2769 if is_linux; then
2770 log_note "Currently unsupported by the test framework"
2771 return 1
2772 fi
2773
2774 if [[ $arch == "i386" ]]; then
2775 echo "label" > $label_file
2776 echo "0" >> $label_file
2777 echo "" >> $label_file
2778 echo "q" >> $label_file
2779 echo "q" >> $label_file
2780
2781 fdisk -B $disk >/dev/null 2>&1
2782 # wait a while for fdisk finishes
2783 sleep 60
2784 elif [[ $arch == "sparc" ]]; then
2785 echo "label" > $label_file
2786 echo "0" >> $label_file
2787 echo "" >> $label_file
2788 echo "" >> $label_file
2789 echo "" >> $label_file
2790 echo "q" >> $label_file
2791 else
2792 log_fail "unknown arch type"
2793 fi
2794
2795 format -e -s -d $disk -f $label_file
2796 typeset -i ret_val=$?
2797 rm -f $label_file
2798 #
2799 # wait the format to finish
2800 #
2801 sleep 60
2802 if ((ret_val != 0)); then
2803 log_fail "unable to label $disk as VTOC."
2804 fi
2805
2806 return 0
2807 }
2808
2809 #
2810 # check if the system was installed as zfsroot or not
2811 # return: 0 ture, otherwise false
2812 #
2813 function is_zfsroot
2814 {
2815 df -n / | grep zfs > /dev/null 2>&1
2816 return $?
2817 }
2818
2819 #
2820 # get the root filesystem name if it's zfsroot system.
2821 #
2822 # return: root filesystem name
2823 function get_rootfs
2824 {
2825 typeset rootfs=""
2826
2827 if ! is_linux; then
2828 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2829 /etc/mnttab)
2830 fi
2831 if [[ -z "$rootfs" ]]; then
2832 log_fail "Can not get rootfs"
2833 fi
2834 zfs list $rootfs > /dev/null 2>&1
2835 if (($? == 0)); then
2836 echo $rootfs
2837 else
2838 log_fail "This is not a zfsroot system."
2839 fi
2840 }
2841
2842 #
2843 # get the rootfs's pool name
2844 # return:
2845 # rootpool name
2846 #
2847 function get_rootpool
2848 {
2849 typeset rootfs=""
2850 typeset rootpool=""
2851
2852 if ! is_linux; then
2853 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2854 /etc/mnttab)
2855 fi
2856 if [[ -z "$rootfs" ]]; then
2857 log_fail "Can not get rootpool"
2858 fi
2859 zfs list $rootfs > /dev/null 2>&1
2860 if (($? == 0)); then
2861 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2862 echo $rootpool
2863 else
2864 log_fail "This is not a zfsroot system."
2865 fi
2866 }
2867
2868 #
2869 # Check if the given device is physical device
2870 #
2871 function is_physical_device #device
2872 {
2873 typeset device=${1#$DEV_DSKDIR}
2874 device=${device#$DEV_RDSKDIR}
2875
2876 if is_linux; then
2877 [[ -b "$DEV_DSKDIR/$device" ]] && \
2878 [[ -f /sys/module/loop/parameters/max_part ]]
2879 return $?
2880 else
2881 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2882 return $?
2883 fi
2884 }
2885
2886 #
2887 # Check if the given device is a real device (ie SCSI device)
2888 #
2889 function is_real_device #disk
2890 {
2891 typeset disk=$1
2892 [[ -z $disk ]] && log_fail "No argument for disk given."
2893
2894 if is_linux; then
2895 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2896 egrep disk >/dev/null
2897 return $?
2898 fi
2899 }
2900
2901 #
2902 # Check if the given device is a loop device
2903 #
2904 function is_loop_device #disk
2905 {
2906 typeset disk=$1
2907 [[ -z $disk ]] && log_fail "No argument for disk given."
2908
2909 if is_linux; then
2910 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2911 egrep loop >/dev/null
2912 return $?
2913 fi
2914 }
2915
2916 #
2917 # Check if the given device is a multipath device and if there is a sybolic
2918 # link to a device mapper and to a disk
2919 # Currently no support for dm devices alone without multipath
2920 #
2921 function is_mpath_device #disk
2922 {
2923 typeset disk=$1
2924 [[ -z $disk ]] && log_fail "No argument for disk given."
2925
2926 if is_linux; then
2927 lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
2928 egrep mpath >/dev/null
2929 if (($? == 0)); then
2930 readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
2931 return $?
2932 else
2933 return $?
2934 fi
2935 fi
2936 }
2937
2938 # Set the slice prefix for disk partitioning depending
2939 # on whether the device is a real, multipath, or loop device.
2940 # Currently all disks have to be of the same type, so only
2941 # checks first disk to determine slice prefix.
2942 #
2943 function set_slice_prefix
2944 {
2945 typeset disk
2946 typeset -i i=0
2947
2948 if is_linux; then
2949 while (( i < $DISK_ARRAY_NUM )); do
2950 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2951 if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
2952 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
2953 export SLICE_PREFIX=""
2954 return 0
2955 elif ( is_mpath_device $disk || is_loop_device \
2956 $disk ); then
2957 export SLICE_PREFIX="p"
2958 return 0
2959 else
2960 log_fail "$disk not supported for partitioning."
2961 fi
2962 (( i = i + 1))
2963 done
2964 fi
2965 }
2966
2967 #
2968 # Set the directory path of the listed devices in $DISK_ARRAY_NUM
2969 # Currently all disks have to be of the same type, so only
2970 # checks first disk to determine device directory
2971 # default = /dev (linux)
2972 # real disk = /dev (linux)
2973 # multipath device = /dev/mapper (linux)
2974 #
2975 function set_device_dir
2976 {
2977 typeset disk
2978 typeset -i i=0
2979
2980 if is_linux; then
2981 while (( i < $DISK_ARRAY_NUM )); do
2982 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2983 if is_mpath_device $disk; then
2984 export DEV_DSKDIR=$DEV_MPATHDIR
2985 return 0
2986 else
2987 export DEV_DSKDIR=$DEV_RDSKDIR
2988 return 0
2989 fi
2990 (( i = i + 1))
2991 done
2992 else
2993 export DEV_DSKDIR=$DEV_RDSKDIR
2994 fi
2995 }
2996
2997 #
2998 # Get the directory path of given device
2999 #
3000 function get_device_dir #device
3001 {
3002 typeset device=$1
3003
3004 if ! $(is_physical_device $device) ; then
3005 if [[ $device != "/" ]]; then
3006 device=${device%/*}
3007 fi
3008 if [[ -b "$DEV_DSKDIR/$device" ]]; then
3009 device="$DEV_DSKDIR"
3010 fi
3011 echo $device
3012 else
3013 echo "$DEV_DSKDIR"
3014 fi
3015 }
3016
3017 #
3018 # Get persistent name for given disk
3019 #
3020 function get_persistent_disk_name #device
3021 {
3022 typeset device=$1
3023 typeset dev_id
3024
3025 if is_linux; then
3026 if is_real_device $device; then
3027 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
3028 | egrep disk/by-id | nawk '{print $2; exit}' \
3029 | nawk -F / '{print $3}')"
3030 echo $dev_id
3031 elif is_mpath_device $device; then
3032 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
3033 | egrep disk/by-id/dm-uuid \
3034 | nawk '{print $2; exit}' \
3035 | nawk -F / '{print $3}')"
3036 echo $dev_id
3037 else
3038 echo $device
3039 fi
3040 else
3041 echo $device
3042 fi
3043 }
3044
3045 #
3046 # Load scsi_debug module with specified parameters
3047 #
3048 function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
3049 {
3050 typeset devsize=$1
3051 typeset hosts=$2
3052 typeset tgts=$3
3053 typeset luns=$4
3054
3055 [[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
3056 [[ -z $luns ]] && log_fail "Arguments invalid or missing"
3057
3058 if is_linux; then
3059 modprobe -n scsi_debug
3060 if (($? != 0)); then
3061 log_unsupported "Platform does not have scsi_debug"
3062 "module"
3063 fi
3064 lsmod | egrep scsi_debug > /dev/null
3065 if (($? == 0)); then
3066 log_fail "scsi_debug module already installed"
3067 else
3068 log_must modprobe scsi_debug dev_size_mb=$devsize \
3069 add_host=$hosts num_tgts=$tgts max_luns=$luns
3070 block_device_wait
3071 lsscsi | egrep scsi_debug > /dev/null
3072 if (($? == 1)); then
3073 log_fail "scsi_debug module install failed"
3074 fi
3075 fi
3076 fi
3077 }
3078
3079 #
3080 # Get the package name
3081 #
3082 function get_package_name
3083 {
3084 typeset dirpath=${1:-$STC_NAME}
3085
3086 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3087 }
3088
3089 #
3090 # Get the word numbers from a string separated by white space
3091 #
3092 function get_word_count
3093 {
3094 echo $1 | wc -w
3095 }
3096
3097 #
3098 # To verify if the require numbers of disks is given
3099 #
3100 function verify_disk_count
3101 {
3102 typeset -i min=${2:-1}
3103
3104 typeset -i count=$(get_word_count "$1")
3105
3106 if ((count < min)); then
3107 log_untested "A minimum of $min disks is required to run." \
3108 " You specified $count disk(s)"
3109 fi
3110 }
3111
3112 function ds_is_volume
3113 {
3114 typeset type=$(get_prop type $1)
3115 [[ $type = "volume" ]] && return 0
3116 return 1
3117 }
3118
3119 function ds_is_filesystem
3120 {
3121 typeset type=$(get_prop type $1)
3122 [[ $type = "filesystem" ]] && return 0
3123 return 1
3124 }
3125
3126 function ds_is_snapshot
3127 {
3128 typeset type=$(get_prop type $1)
3129 [[ $type = "snapshot" ]] && return 0
3130 return 1
3131 }
3132
3133 #
3134 # Check if Trusted Extensions are installed and enabled
3135 #
3136 function is_te_enabled
3137 {
3138 svcs -H -o state labeld 2>/dev/null | grep "enabled"
3139 if (($? != 0)); then
3140 return 1
3141 else
3142 return 0
3143 fi
3144 }
3145
3146 # Utility function to determine if a system has multiple cpus.
3147 function is_mp
3148 {
3149 if is_linux; then
3150 (($(nproc) > 1))
3151 else
3152 (($(psrinfo | wc -l) > 1))
3153 fi
3154
3155 return $?
3156 }
3157
3158 function get_cpu_freq
3159 {
3160 if is_linux; then
3161 lscpu | awk '/CPU MHz/ { print $3 }'
3162 else
3163 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3164 fi
3165 }
3166
3167 # Run the given command as the user provided.
3168 function user_run
3169 {
3170 typeset user=$1
3171 shift
3172
3173 log_note "user:$user $@"
3174 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
3175 return $?
3176 }
3177
3178 #
3179 # Check if the pool contains the specified vdevs
3180 #
3181 # $1 pool
3182 # $2..n <vdev> ...
3183 #
3184 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3185 # vdevs is not in the pool, and 2 if pool name is missing.
3186 #
3187 function vdevs_in_pool
3188 {
3189 typeset pool=$1
3190 typeset vdev
3191
3192 if [[ -z $pool ]]; then
3193 log_note "Missing pool name."
3194 return 2
3195 fi
3196
3197 shift
3198
3199 typeset tmpfile=$(mktemp)
3200 zpool list -Hv "$pool" >$tmpfile
3201 for vdev in $@; do
3202 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3203 [[ $? -ne 0 ]] && return 1
3204 done
3205
3206 rm -f $tmpfile
3207
3208 return 0;
3209 }
3210
3211 function get_max
3212 {
3213 typeset -l i max=$1
3214 shift
3215
3216 for i in "$@"; do
3217 max=$(echo $((max > i ? max : i)))
3218 done
3219
3220 echo $max
3221 }
3222
3223 function get_min
3224 {
3225 typeset -l i min=$1
3226 shift
3227
3228 for i in "$@"; do
3229 min=$(echo $((min < i ? min : i)))
3230 done
3231
3232 echo $min
3233 }
3234
3235 #
3236 # Generate a random number between 1 and the argument.
3237 #
3238 function random
3239 {
3240 typeset max=$1
3241 echo $(( ($RANDOM % $max) + 1 ))
3242 }
3243
3244 # Write data that can be compressed into a directory
3245 function write_compressible
3246 {
3247 typeset dir=$1
3248 typeset megs=$2
3249 typeset nfiles=${3:-1}
3250 typeset bs=${4:-1024k}
3251 typeset fname=${5:-file}
3252
3253 [[ -d $dir ]] || log_fail "No directory: $dir"
3254
3255 # Under Linux fio is not currently used since its behavior can
3256 # differ significantly across versions. This includes missing
3257 # command line options and cases where the --buffer_compress_*
3258 # options fail to behave as expected.
3259 if is_linux; then
3260 typeset file_bytes=$(to_bytes $megs)
3261 typeset bs_bytes=4096
3262 typeset blocks=$(($file_bytes / $bs_bytes))
3263
3264 for (( i = 0; i < $nfiles; i++ )); do
3265 truncate -s $file_bytes $dir/$fname.$i
3266
3267 # Write every third block to get 66% compression.
3268 for (( j = 0; j < $blocks; j += 3 )); do
3269 dd if=/dev/urandom of=$dir/$fname.$i \
3270 seek=$j bs=$bs_bytes count=1 \
3271 conv=notrunc >/dev/null 2>&1
3272 done
3273 done
3274 else
3275 log_must eval "fio \
3276 --name=job \
3277 --fallocate=0 \
3278 --minimal \
3279 --randrepeat=0 \
3280 --buffer_compress_percentage=66 \
3281 --buffer_compress_chunk=4096 \
3282 --directory=$dir \
3283 --numjobs=$nfiles \
3284 --nrfiles=$nfiles \
3285 --rw=write \
3286 --bs=$bs \
3287 --filesize=$megs \
3288 --filename_format='$fname.\$jobnum' >/dev/null"
3289 fi
3290 }
3291
3292 function get_objnum
3293 {
3294 typeset pathname=$1
3295 typeset objnum
3296
3297 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3298 objnum=$(stat -c %i $pathname)
3299 echo $objnum
3300 }
3301
3302 #
3303 # Sync data to the pool
3304 #
3305 # $1 pool name
3306 # $2 boolean to force uberblock (and config including zpool cache file) update
3307 #
3308 function sync_pool #pool <force>
3309 {
3310 typeset pool=${1:-$TESTPOOL}
3311 typeset force=${2:-false}
3312
3313 if [[ $force == true ]]; then
3314 log_must zpool sync -f $pool
3315 else
3316 log_must zpool sync $pool
3317 fi
3318
3319 return 0
3320 }
3321
3322 #
3323 # Wait for zpool 'freeing' property drops to zero.
3324 #
3325 # $1 pool name
3326 #
3327 function wait_freeing #pool
3328 {
3329 typeset pool=${1:-$TESTPOOL}
3330 while true; do
3331 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3332 log_must sleep 1
3333 done
3334 }
3335
3336 #
3337 # Wait for every device replace operation to complete
3338 #
3339 # $1 pool name
3340 #
3341 function wait_replacing #pool
3342 {
3343 typeset pool=${1:-$TESTPOOL}
3344 while true; do
3345 [[ "" == "$(zpool status $pool |
3346 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3347 log_must sleep 1
3348 done
3349 }
3350
3351 #
3352 # Wait for a pool to be scrubbed
3353 #
3354 # $1 pool name
3355 # $2 number of seconds to wait (optional)
3356 #
3357 # Returns true when pool has been scrubbed, or false if there's a timeout or if
3358 # no scrub was done.
3359 #
3360 function wait_scrubbed
3361 {
3362 typeset pool=${1:-$TESTPOOL}
3363 typeset iter=${2:-10}
3364 for i in {1..$iter} ; do
3365 if is_pool_scrubbed $pool ; then
3366 return 0
3367 fi
3368 sleep 1
3369 done
3370 return 1
3371 }
3372
3373 #
3374 # Setup custom environment for the ZED.
3375 #
3376 # $@ Optional list of zedlets to run under zed.
3377 function zed_setup
3378 {
3379 if ! is_linux; then
3380 return
3381 fi
3382
3383 if [[ ! -d $ZEDLET_DIR ]]; then
3384 log_must mkdir $ZEDLET_DIR
3385 fi
3386
3387 if [[ ! -e $VDEVID_CONF ]]; then
3388 log_must touch $VDEVID_CONF
3389 fi
3390
3391 if [[ -e $VDEVID_CONF_ETC ]]; then
3392 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3393 fi
3394 EXTRA_ZEDLETS=$@
3395
3396 # Create a symlink for /etc/zfs/vdev_id.conf file.
3397 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3398
3399 # Setup minimal ZED configuration. Individual test cases should
3400 # add additional ZEDLETs as needed for their specific test.
3401 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3402 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3403
3404 # Scripts must only be user writable.
3405 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3406 saved_umask=$(umask)
3407 log_must umask 0022
3408 for i in $EXTRA_ZEDLETS ; do
3409 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3410 done
3411 log_must umask $saved_umask
3412 fi
3413
3414 # Customize the zed.rc file to enable the full debug log.
3415 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3416 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3417
3418 }
3419
3420 #
3421 # Cleanup custom ZED environment.
3422 #
3423 # $@ Optional list of zedlets to remove from our test zed.d directory.
3424 function zed_cleanup
3425 {
3426 if ! is_linux; then
3427 return
3428 fi
3429 EXTRA_ZEDLETS=$@
3430
3431 log_must rm -f ${ZEDLET_DIR}/zed.rc
3432 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3433 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3434 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3435 log_must rm -f ${ZEDLET_DIR}/state
3436
3437 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3438 for i in $EXTRA_ZEDLETS ; do
3439 log_must rm -f ${ZEDLET_DIR}/$i
3440 done
3441 fi
3442 log_must rm -f $ZED_LOG
3443 log_must rm -f $ZED_DEBUG_LOG
3444 log_must rm -f $VDEVID_CONF_ETC
3445 log_must rm -f $VDEVID_CONF
3446 rmdir $ZEDLET_DIR
3447 }
3448
3449 #
3450 # Check if ZED is currently running, if not start ZED.
3451 #
3452 function zed_start
3453 {
3454 if ! is_linux; then
3455 return
3456 fi
3457
3458 # ZEDLET_DIR=/var/tmp/zed
3459 if [[ ! -d $ZEDLET_DIR ]]; then
3460 log_must mkdir $ZEDLET_DIR
3461 fi
3462
3463 # Verify the ZED is not already running.
3464 pgrep -x zed > /dev/null
3465 if (($? == 0)); then
3466 log_fail "ZED already running"
3467 fi
3468
3469 log_note "Starting ZED"
3470 # run ZED in the background and redirect foreground logging
3471 # output to $ZED_LOG.
3472 log_must truncate -s 0 $ZED_DEBUG_LOG
3473 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
3474 "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3475
3476 return 0
3477 }
3478
3479 #
3480 # Kill ZED process
3481 #
3482 function zed_stop
3483 {
3484 if ! is_linux; then
3485 return
3486 fi
3487
3488 log_note "Stopping ZED"
3489 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3490 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3491 kill $zedpid
3492 while ps -p $zedpid > /dev/null; do
3493 sleep 1
3494 done
3495 rm -f ${ZEDLET_DIR}/zed.pid
3496 fi
3497 return 0
3498 }
3499
3500 #
3501 # Drain all zevents
3502 #
3503 function zed_events_drain
3504 {
3505 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3506 sleep 1
3507 zpool events -c >/dev/null
3508 done
3509 }
3510
3511 #
3512 # Check is provided device is being active used as a swap device.
3513 #
3514 function is_swap_inuse
3515 {
3516 typeset device=$1
3517
3518 if [[ -z $device ]] ; then
3519 log_note "No device specified."
3520 return 1
3521 fi
3522
3523 if is_linux; then
3524 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3525 else
3526 swap -l | grep -w $device > /dev/null 2>&1
3527 fi
3528
3529 return $?
3530 }
3531
3532 #
3533 # Setup a swap device using the provided device.
3534 #
3535 function swap_setup
3536 {
3537 typeset swapdev=$1
3538
3539 if is_linux; then
3540 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3541 log_must swapon $swapdev
3542 else
3543 log_must swap -a $swapdev
3544 fi
3545
3546 return 0
3547 }
3548
3549 #
3550 # Cleanup a swap device on the provided device.
3551 #
3552 function swap_cleanup
3553 {
3554 typeset swapdev=$1
3555
3556 if is_swap_inuse $swapdev; then
3557 if is_linux; then
3558 log_must swapoff $swapdev
3559 else
3560 log_must swap -d $swapdev
3561 fi
3562 fi
3563
3564 return 0
3565 }
3566
3567 #
3568 # Set a global system tunable (64-bit value)
3569 #
3570 # $1 tunable name
3571 # $2 tunable values
3572 #
3573 function set_tunable64
3574 {
3575 set_tunable_impl "$1" "$2" Z
3576 }
3577
3578 #
3579 # Set a global system tunable (32-bit value)
3580 #
3581 # $1 tunable name
3582 # $2 tunable values
3583 #
3584 function set_tunable32
3585 {
3586 set_tunable_impl "$1" "$2" W
3587 }
3588
3589 function set_tunable_impl
3590 {
3591 typeset tunable="$1"
3592 typeset value="$2"
3593 typeset mdb_cmd="$3"
3594 typeset module="${4:-zfs}"
3595
3596 [[ -z "$tunable" ]] && return 1
3597 [[ -z "$value" ]] && return 1
3598 [[ -z "$mdb_cmd" ]] && return 1
3599
3600 case "$(uname)" in
3601 Linux)
3602 typeset zfs_tunables="/sys/module/$module/parameters"
3603 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3604 echo -n "$value" > "$zfs_tunables/$tunable"
3605 return "$?"
3606 ;;
3607 SunOS)
3608 [[ "$module" -eq "zfs" ]] || return 1
3609 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3610 return "$?"
3611 ;;
3612 esac
3613 }
3614
3615 #
3616 # Get a global system tunable
3617 #
3618 # $1 tunable name
3619 #
3620 function get_tunable
3621 {
3622 get_tunable_impl "$1"
3623 }
3624
3625 function get_tunable_impl
3626 {
3627 typeset tunable="$1"
3628 typeset module="${2:-zfs}"
3629
3630 [[ -z "$tunable" ]] && return 1
3631
3632 case "$(uname)" in
3633 Linux)
3634 typeset zfs_tunables="/sys/module/$module/parameters"
3635 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3636 cat $zfs_tunables/$tunable
3637 return "$?"
3638 ;;
3639 SunOS)
3640 [[ "$module" -eq "zfs" ]] || return 1
3641 ;;
3642 esac
3643
3644 return 1
3645 }
3646
3647 #
3648 # Get actual devices used by the pool (i.e. linux sdb1 not sdb).
3649 #
3650 function get_pool_devices #testpool #devdir
3651 {
3652 typeset testpool=$1
3653 typeset devdir=$2
3654 typeset out=""
3655
3656 if is_linux; then
3657 out=$(zpool status -P $testpool |grep ${devdir} | awk '{print $1}')
3658 out=$(echo $out | sed -e "s|${devdir}/||g" | tr '\n' ' ')
3659 fi
3660 echo $out
3661 }