]> git.proxmox.com Git - mirror_zfs-debian.git/blob - tests/zfs-tests/include/libtest.shlib
New upstream version 0.7.9
[mirror_zfs-debian.git] / tests / zfs-tests / include / libtest.shlib
1 #
2 # CDDL HEADER START
3 #
4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
7 #
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
12 #
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
18 #
19 # CDDL HEADER END
20 #
21
22 #
23 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 # Use is subject to license terms.
25 # Copyright (c) 2012, 2016 by Delphix. All rights reserved.
26 # Copyright 2016 Nexenta Systems, Inc.
27 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
28 # Copyright (c) 2017 Datto Inc.
29 #
30
31 . ${STF_TOOLS}/include/logapi.shlib
32 . ${STF_SUITE}/include/math.shlib
33
34 #
35 # Apply constrained path when available. This is required since the
36 # PATH may have been modified by sudo's secure_path behavior.
37 #
38 if [ -n "$STF_PATH" ]; then
39 PATH="$STF_PATH"
40 fi
41
42 # Linux kernel version comparison function
43 #
44 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
45 #
46 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
47 #
48 function linux_version
49 {
50 typeset ver="$1"
51
52 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
53
54 typeset version=$(echo $ver | cut -d '.' -f 1)
55 typeset major=$(echo $ver | cut -d '.' -f 2)
56 typeset minor=$(echo $ver | cut -d '.' -f 3)
57
58 [[ -z "$version" ]] && version=0
59 [[ -z "$major" ]] && major=0
60 [[ -z "$minor" ]] && minor=0
61
62 echo $((version * 10000 + major * 100 + minor))
63 }
64
65 # Determine if this is a Linux test system
66 #
67 # Return 0 if platform Linux, 1 if otherwise
68
69 function is_linux
70 {
71 if [[ $(uname -o) == "GNU/Linux" ]]; then
72 return 0
73 else
74 return 1
75 fi
76 }
77
78 # Determine if this is a 32-bit system
79 #
80 # Return 0 if platform is 32-bit, 1 if otherwise
81
82 function is_32bit
83 {
84 if [[ $(getconf LONG_BIT) == "32" ]]; then
85 return 0
86 else
87 return 1
88 fi
89 }
90
91 # Determine if kmemleak is enabled
92 #
93 # Return 0 if kmemleak is enabled, 1 if otherwise
94
95 function is_kmemleak
96 {
97 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
98 return 0
99 else
100 return 1
101 fi
102 }
103
104 # Determine whether a dataset is mounted
105 #
106 # $1 dataset name
107 # $2 filesystem type; optional - defaulted to zfs
108 #
109 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
110
111 function ismounted
112 {
113 typeset fstype=$2
114 [[ -z $fstype ]] && fstype=zfs
115 typeset out dir name ret
116
117 case $fstype in
118 zfs)
119 if [[ "$1" == "/"* ]] ; then
120 for out in $(zfs mount | awk '{print $2}'); do
121 [[ $1 == $out ]] && return 0
122 done
123 else
124 for out in $(zfs mount | awk '{print $1}'); do
125 [[ $1 == $out ]] && return 0
126 done
127 fi
128 ;;
129 ufs|nfs)
130 out=$(df -F $fstype $1 2>/dev/null)
131 ret=$?
132 (($ret != 0)) && return $ret
133
134 dir=${out%%\(*}
135 dir=${dir%% *}
136 name=${out##*\(}
137 name=${name%%\)*}
138 name=${name%% *}
139
140 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
141 ;;
142 ext*)
143 out=$(df -t $fstype $1 2>/dev/null)
144 return $?
145 ;;
146 zvol)
147 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
148 link=$(readlink -f $ZVOL_DEVDIR/$1)
149 [[ -n "$link" ]] && \
150 mount | grep -q "^$link" && \
151 return 0
152 fi
153 ;;
154 esac
155
156 return 1
157 }
158
159 # Return 0 if a dataset is mounted; 1 otherwise
160 #
161 # $1 dataset name
162 # $2 filesystem type; optional - defaulted to zfs
163
164 function mounted
165 {
166 ismounted $1 $2
167 (($? == 0)) && return 0
168 return 1
169 }
170
171 # Return 0 if a dataset is unmounted; 1 otherwise
172 #
173 # $1 dataset name
174 # $2 filesystem type; optional - defaulted to zfs
175
176 function unmounted
177 {
178 ismounted $1 $2
179 (($? == 1)) && return 0
180 return 1
181 }
182
183 # split line on ","
184 #
185 # $1 - line to split
186
187 function splitline
188 {
189 echo $1 | sed "s/,/ /g"
190 }
191
192 function default_setup
193 {
194 default_setup_noexit "$@"
195
196 log_pass
197 }
198
199 #
200 # Given a list of disks, setup storage pools and datasets.
201 #
202 function default_setup_noexit
203 {
204 typeset disklist=$1
205 typeset container=$2
206 typeset volume=$3
207 log_note begin default_setup_noexit
208
209 if is_global_zone; then
210 if poolexists $TESTPOOL ; then
211 destroy_pool $TESTPOOL
212 fi
213 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
214 log_must zpool create -f $TESTPOOL $disklist
215 else
216 reexport_pool
217 fi
218
219 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
220 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
221
222 log_must zfs create $TESTPOOL/$TESTFS
223 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
224
225 if [[ -n $container ]]; then
226 rm -rf $TESTDIR1 || \
227 log_unresolved Could not remove $TESTDIR1
228 mkdir -p $TESTDIR1 || \
229 log_unresolved Could not create $TESTDIR1
230
231 log_must zfs create $TESTPOOL/$TESTCTR
232 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
233 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
234 log_must zfs set mountpoint=$TESTDIR1 \
235 $TESTPOOL/$TESTCTR/$TESTFS1
236 fi
237
238 if [[ -n $volume ]]; then
239 if is_global_zone ; then
240 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
241 block_device_wait
242 else
243 log_must zfs create $TESTPOOL/$TESTVOL
244 fi
245 fi
246 }
247
248 #
249 # Given a list of disks, setup a storage pool, file system and
250 # a container.
251 #
252 function default_container_setup
253 {
254 typeset disklist=$1
255
256 default_setup "$disklist" "true"
257 }
258
259 #
260 # Given a list of disks, setup a storage pool,file system
261 # and a volume.
262 #
263 function default_volume_setup
264 {
265 typeset disklist=$1
266
267 default_setup "$disklist" "" "true"
268 }
269
270 #
271 # Given a list of disks, setup a storage pool,file system,
272 # a container and a volume.
273 #
274 function default_container_volume_setup
275 {
276 typeset disklist=$1
277
278 default_setup "$disklist" "true" "true"
279 }
280
281 #
282 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
283 # filesystem
284 #
285 # $1 Existing filesystem or volume name. Default, $TESTFS
286 # $2 snapshot name. Default, $TESTSNAP
287 #
288 function create_snapshot
289 {
290 typeset fs_vol=${1:-$TESTFS}
291 typeset snap=${2:-$TESTSNAP}
292
293 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
294 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
295
296 if snapexists $fs_vol@$snap; then
297 log_fail "$fs_vol@$snap already exists."
298 fi
299 datasetexists $fs_vol || \
300 log_fail "$fs_vol must exist."
301
302 log_must zfs snapshot $fs_vol@$snap
303 }
304
305 #
306 # Create a clone from a snapshot, default clone name is $TESTCLONE.
307 #
308 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
309 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
310 #
311 function create_clone # snapshot clone
312 {
313 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
314 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
315
316 [[ -z $snap ]] && \
317 log_fail "Snapshot name is undefined."
318 [[ -z $clone ]] && \
319 log_fail "Clone name is undefined."
320
321 log_must zfs clone $snap $clone
322 }
323
324 #
325 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
326 # filesystem.
327 #
328 # $1 Existing filesystem or volume name. Default, $TESTFS
329 # $2 Existing snapshot name. Default, $TESTSNAP
330 # $3 bookmark name. Default, $TESTBKMARK
331 #
332 function create_bookmark
333 {
334 typeset fs_vol=${1:-$TESTFS}
335 typeset snap=${2:-$TESTSNAP}
336 typeset bkmark=${3:-$TESTBKMARK}
337
338 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
339 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
340 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
341
342 if bkmarkexists $fs_vol#$bkmark; then
343 log_fail "$fs_vol#$bkmark already exists."
344 fi
345 datasetexists $fs_vol || \
346 log_fail "$fs_vol must exist."
347 snapexists $fs_vol@$snap || \
348 log_fail "$fs_vol@$snap must exist."
349
350 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
351 }
352
353 function default_mirror_setup
354 {
355 default_mirror_setup_noexit $1 $2 $3
356
357 log_pass
358 }
359
360 #
361 # Given a pair of disks, set up a storage pool and dataset for the mirror
362 # @parameters: $1 the primary side of the mirror
363 # $2 the secondary side of the mirror
364 # @uses: ZPOOL ZFS TESTPOOL TESTFS
365 function default_mirror_setup_noexit
366 {
367 readonly func="default_mirror_setup_noexit"
368 typeset primary=$1
369 typeset secondary=$2
370
371 [[ -z $primary ]] && \
372 log_fail "$func: No parameters passed"
373 [[ -z $secondary ]] && \
374 log_fail "$func: No secondary partition passed"
375 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
376 log_must zpool create -f $TESTPOOL mirror $@
377 log_must zfs create $TESTPOOL/$TESTFS
378 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
379 }
380
381 #
382 # create a number of mirrors.
383 # We create a number($1) of 2 way mirrors using the pairs of disks named
384 # on the command line. These mirrors are *not* mounted
385 # @parameters: $1 the number of mirrors to create
386 # $... the devices to use to create the mirrors on
387 # @uses: ZPOOL ZFS TESTPOOL
388 function setup_mirrors
389 {
390 typeset -i nmirrors=$1
391
392 shift
393 while ((nmirrors > 0)); do
394 log_must test -n "$1" -a -n "$2"
395 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
396 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
397 shift 2
398 ((nmirrors = nmirrors - 1))
399 done
400 }
401
402 #
403 # create a number of raidz pools.
404 # We create a number($1) of 2 raidz pools using the pairs of disks named
405 # on the command line. These pools are *not* mounted
406 # @parameters: $1 the number of pools to create
407 # $... the devices to use to create the pools on
408 # @uses: ZPOOL ZFS TESTPOOL
409 function setup_raidzs
410 {
411 typeset -i nraidzs=$1
412
413 shift
414 while ((nraidzs > 0)); do
415 log_must test -n "$1" -a -n "$2"
416 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
417 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
418 shift 2
419 ((nraidzs = nraidzs - 1))
420 done
421 }
422
423 #
424 # Destroy the configured testpool mirrors.
425 # the mirrors are of the form ${TESTPOOL}{number}
426 # @uses: ZPOOL ZFS TESTPOOL
427 function destroy_mirrors
428 {
429 default_cleanup_noexit
430
431 log_pass
432 }
433
434 #
435 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
436 # $1 the list of disks
437 #
438 function default_raidz_setup
439 {
440 typeset disklist="$*"
441 disks=(${disklist[*]})
442
443 if [[ ${#disks[*]} -lt 2 ]]; then
444 log_fail "A raid-z requires a minimum of two disks."
445 fi
446
447 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
448 log_must zpool create -f $TESTPOOL raidz $1 $2 $3
449 log_must zfs create $TESTPOOL/$TESTFS
450 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
451
452 log_pass
453 }
454
455 #
456 # Common function used to cleanup storage pools and datasets.
457 #
458 # Invoked at the start of the test suite to ensure the system
459 # is in a known state, and also at the end of each set of
460 # sub-tests to ensure errors from one set of tests doesn't
461 # impact the execution of the next set.
462
463 function default_cleanup
464 {
465 default_cleanup_noexit
466
467 log_pass
468 }
469
470 function default_cleanup_noexit
471 {
472 typeset exclude=""
473 typeset pool=""
474 #
475 # Destroying the pool will also destroy any
476 # filesystems it contains.
477 #
478 if is_global_zone; then
479 zfs unmount -a > /dev/null 2>&1
480 exclude=`eval echo \"'(${KEEP})'\"`
481 ALL_POOLS=$(zpool list -H -o name \
482 | grep -v "$NO_POOLS" | egrep -v "$exclude")
483 # Here, we loop through the pools we're allowed to
484 # destroy, only destroying them if it's safe to do
485 # so.
486 while [ ! -z ${ALL_POOLS} ]
487 do
488 for pool in ${ALL_POOLS}
489 do
490 if safe_to_destroy_pool $pool ;
491 then
492 destroy_pool $pool
493 fi
494 ALL_POOLS=$(zpool list -H -o name \
495 | grep -v "$NO_POOLS" \
496 | egrep -v "$exclude")
497 done
498 done
499
500 zfs mount -a
501 else
502 typeset fs=""
503 for fs in $(zfs list -H -o name \
504 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
505 destroy_dataset "$fs" "-Rf"
506 done
507
508 # Need cleanup here to avoid garbage dir left.
509 for fs in $(zfs list -H -o name); do
510 [[ $fs == /$ZONE_POOL ]] && continue
511 [[ -d $fs ]] && log_must rm -rf $fs/*
512 done
513
514 #
515 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
516 # the default value
517 #
518 for fs in $(zfs list -H -o name); do
519 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
520 log_must zfs set reservation=none $fs
521 log_must zfs set recordsize=128K $fs
522 log_must zfs set mountpoint=/$fs $fs
523 typeset enc=""
524 enc=$(get_prop encryption $fs)
525 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
526 [[ "$enc" == "off" ]]; then
527 log_must zfs set checksum=on $fs
528 fi
529 log_must zfs set compression=off $fs
530 log_must zfs set atime=on $fs
531 log_must zfs set devices=off $fs
532 log_must zfs set exec=on $fs
533 log_must zfs set setuid=on $fs
534 log_must zfs set readonly=off $fs
535 log_must zfs set snapdir=hidden $fs
536 log_must zfs set aclmode=groupmask $fs
537 log_must zfs set aclinherit=secure $fs
538 fi
539 done
540 fi
541
542 [[ -d $TESTDIR ]] && \
543 log_must rm -rf $TESTDIR
544
545 disk1=${DISKS%% *}
546 if is_mpath_device $disk1; then
547 delete_partitions
548 fi
549 }
550
551
552 #
553 # Common function used to cleanup storage pools, file systems
554 # and containers.
555 #
556 function default_container_cleanup
557 {
558 if ! is_global_zone; then
559 reexport_pool
560 fi
561
562 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
563 [[ $? -eq 0 ]] && \
564 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
565
566 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
567 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
568
569 [[ -e $TESTDIR1 ]] && \
570 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
571
572 default_cleanup
573 }
574
575 #
576 # Common function used to cleanup snapshot of file system or volume. Default to
577 # delete the file system's snapshot
578 #
579 # $1 snapshot name
580 #
581 function destroy_snapshot
582 {
583 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
584
585 if ! snapexists $snap; then
586 log_fail "'$snap' does not existed."
587 fi
588
589 #
590 # For the sake of the value which come from 'get_prop' is not equal
591 # to the really mountpoint when the snapshot is unmounted. So, firstly
592 # check and make sure this snapshot's been mounted in current system.
593 #
594 typeset mtpt=""
595 if ismounted $snap; then
596 mtpt=$(get_prop mountpoint $snap)
597 (($? != 0)) && \
598 log_fail "get_prop mountpoint $snap failed."
599 fi
600
601 destroy_dataset "$snap"
602 [[ $mtpt != "" && -d $mtpt ]] && \
603 log_must rm -rf $mtpt
604 }
605
606 #
607 # Common function used to cleanup clone.
608 #
609 # $1 clone name
610 #
611 function destroy_clone
612 {
613 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
614
615 if ! datasetexists $clone; then
616 log_fail "'$clone' does not existed."
617 fi
618
619 # With the same reason in destroy_snapshot
620 typeset mtpt=""
621 if ismounted $clone; then
622 mtpt=$(get_prop mountpoint $clone)
623 (($? != 0)) && \
624 log_fail "get_prop mountpoint $clone failed."
625 fi
626
627 destroy_dataset "$clone"
628 [[ $mtpt != "" && -d $mtpt ]] && \
629 log_must rm -rf $mtpt
630 }
631
632 #
633 # Common function used to cleanup bookmark of file system or volume. Default
634 # to delete the file system's bookmark.
635 #
636 # $1 bookmark name
637 #
638 function destroy_bookmark
639 {
640 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
641
642 if ! bkmarkexists $bkmark; then
643 log_fail "'$bkmarkp' does not existed."
644 fi
645
646 destroy_dataset "$bkmark"
647 }
648
649 # Return 0 if a snapshot exists; $? otherwise
650 #
651 # $1 - snapshot name
652
653 function snapexists
654 {
655 zfs list -H -t snapshot "$1" > /dev/null 2>&1
656 return $?
657 }
658
659 #
660 # Return 0 if a bookmark exists; $? otherwise
661 #
662 # $1 - bookmark name
663 #
664 function bkmarkexists
665 {
666 zfs list -H -t bookmark "$1" > /dev/null 2>&1
667 return $?
668 }
669
670 #
671 # Set a property to a certain value on a dataset.
672 # Sets a property of the dataset to the value as passed in.
673 # @param:
674 # $1 dataset who's property is being set
675 # $2 property to set
676 # $3 value to set property to
677 # @return:
678 # 0 if the property could be set.
679 # non-zero otherwise.
680 # @use: ZFS
681 #
682 function dataset_setprop
683 {
684 typeset fn=dataset_setprop
685
686 if (($# < 3)); then
687 log_note "$fn: Insufficient parameters (need 3, had $#)"
688 return 1
689 fi
690 typeset output=
691 output=$(zfs set $2=$3 $1 2>&1)
692 typeset rv=$?
693 if ((rv != 0)); then
694 log_note "Setting property on $1 failed."
695 log_note "property $2=$3"
696 log_note "Return Code: $rv"
697 log_note "Output: $output"
698 return $rv
699 fi
700 return 0
701 }
702
703 #
704 # Assign suite defined dataset properties.
705 # This function is used to apply the suite's defined default set of
706 # properties to a dataset.
707 # @parameters: $1 dataset to use
708 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
709 # @returns:
710 # 0 if the dataset has been altered.
711 # 1 if no pool name was passed in.
712 # 2 if the dataset could not be found.
713 # 3 if the dataset could not have it's properties set.
714 #
715 function dataset_set_defaultproperties
716 {
717 typeset dataset="$1"
718
719 [[ -z $dataset ]] && return 1
720
721 typeset confset=
722 typeset -i found=0
723 for confset in $(zfs list); do
724 if [[ $dataset = $confset ]]; then
725 found=1
726 break
727 fi
728 done
729 [[ $found -eq 0 ]] && return 2
730 if [[ -n $COMPRESSION_PROP ]]; then
731 dataset_setprop $dataset compression $COMPRESSION_PROP || \
732 return 3
733 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
734 fi
735 if [[ -n $CHECKSUM_PROP ]]; then
736 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
737 return 3
738 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
739 fi
740 return 0
741 }
742
743 #
744 # Check a numeric assertion
745 # @parameter: $@ the assertion to check
746 # @output: big loud notice if assertion failed
747 # @use: log_fail
748 #
749 function assert
750 {
751 (($@)) || log_fail "$@"
752 }
753
754 #
755 # Function to format partition size of a disk
756 # Given a disk cxtxdx reduces all partitions
757 # to 0 size
758 #
759 function zero_partitions #<whole_disk_name>
760 {
761 typeset diskname=$1
762 typeset i
763
764 if is_linux; then
765 DSK=$DEV_DSKDIR/$diskname
766 DSK=$(echo $DSK | sed -e "s|//|/|g")
767 log_must parted $DSK -s -- mklabel gpt
768 blockdev --rereadpt $DSK 2>/dev/null
769 block_device_wait
770 else
771 for i in 0 1 3 4 5 6 7
772 do
773 log_must set_partition $i "" 0mb $diskname
774 done
775 fi
776
777 return 0
778 }
779
780 #
781 # Given a slice, size and disk, this function
782 # formats the slice to the specified size.
783 # Size should be specified with units as per
784 # the `format` command requirements eg. 100mb 3gb
785 #
786 # NOTE: This entire interface is problematic for the Linux parted utilty
787 # which requires the end of the partition to be specified. It would be
788 # best to retire this interface and replace it with something more flexible.
789 # At the moment a best effort is made.
790 #
791 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
792 {
793 typeset -i slicenum=$1
794 typeset start=$2
795 typeset size=$3
796 typeset disk=$4
797
798 if is_linux; then
799 if [[ -z $size || -z $disk ]]; then
800 log_fail "The size or disk name is unspecified."
801 fi
802 typeset size_mb=${size%%[mMgG]}
803
804 size_mb=${size_mb%%[mMgG][bB]}
805 if [[ ${size:1:1} == 'g' ]]; then
806 ((size_mb = size_mb * 1024))
807 fi
808
809 # Create GPT partition table when setting slice 0 or
810 # when the device doesn't already contain a GPT label.
811 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
812 typeset ret_val=$?
813 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
814 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
815 if [[ $? -ne 0 ]]; then
816 log_note "Failed to create GPT partition table on $disk"
817 return 1
818 fi
819 fi
820
821 # When no start is given align on the first cylinder.
822 if [[ -z "$start" ]]; then
823 start=1
824 fi
825
826 # Determine the cylinder size for the device and using
827 # that calculate the end offset in cylinders.
828 typeset -i cly_size_kb=0
829 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
830 unit cyl print | head -3 | tail -1 | \
831 awk -F '[:k.]' '{print $4}')
832 ((end = (size_mb * 1024 / cly_size_kb) + start))
833
834 parted $DEV_DSKDIR/$disk -s -- \
835 mkpart part$slicenum ${start}cyl ${end}cyl
836 if [[ $? -ne 0 ]]; then
837 log_note "Failed to create partition $slicenum on $disk"
838 return 1
839 fi
840
841 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
842 block_device_wait
843 else
844 if [[ -z $slicenum || -z $size || -z $disk ]]; then
845 log_fail "The slice, size or disk name is unspecified."
846 fi
847
848 typeset format_file=/var/tmp/format_in.$$
849
850 echo "partition" >$format_file
851 echo "$slicenum" >> $format_file
852 echo "" >> $format_file
853 echo "" >> $format_file
854 echo "$start" >> $format_file
855 echo "$size" >> $format_file
856 echo "label" >> $format_file
857 echo "" >> $format_file
858 echo "q" >> $format_file
859 echo "q" >> $format_file
860
861 format -e -s -d $disk -f $format_file
862 fi
863
864 typeset ret_val=$?
865 rm -f $format_file
866 if [[ $ret_val -ne 0 ]]; then
867 log_note "Unable to format $disk slice $slicenum to $size"
868 return 1
869 fi
870 return 0
871 }
872
873 #
874 # Delete all partitions on all disks - this is specifically for the use of multipath
875 # devices which currently can only be used in the test suite as raw/un-partitioned
876 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
877 #
878 function delete_partitions
879 {
880 typeset -i j=1
881
882 if [[ -z $DISK_ARRAY_NUM ]]; then
883 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
884 fi
885 if [[ -z $DISKSARRAY ]]; then
886 DISKSARRAY=$DISKS
887 fi
888
889 if is_linux; then
890 if (( $DISK_ARRAY_NUM == 1 )); then
891 while ((j < MAX_PARTITIONS)); do
892 parted $DEV_DSKDIR/$DISK -s rm $j \
893 > /dev/null 2>&1
894 if (( $? == 1 )); then
895 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
896 if (( $? == 1 )); then
897 log_note "Partitions for $DISK should be deleted"
898 else
899 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
900 fi
901 return 0
902 else
903 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
904 if (( $? == 0 )); then
905 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
906 fi
907 fi
908 ((j = j+1))
909 done
910 else
911 for disk in `echo $DISKSARRAY`; do
912 while ((j < MAX_PARTITIONS)); do
913 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
914 if (( $? == 1 )); then
915 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
916 if (( $? == 1 )); then
917 log_note "Partitions for $disk should be deleted"
918 else
919 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
920 fi
921 j=7
922 else
923 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
924 if (( $? == 0 )); then
925 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
926 fi
927 fi
928 ((j = j+1))
929 done
930 j=1
931 done
932 fi
933 fi
934 return 0
935 }
936
937 #
938 # Get the end cyl of the given slice
939 #
940 function get_endslice #<disk> <slice>
941 {
942 typeset disk=$1
943 typeset slice=$2
944 if [[ -z $disk || -z $slice ]] ; then
945 log_fail "The disk name or slice number is unspecified."
946 fi
947
948 if is_linux; then
949 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
950 grep "part${slice}" | \
951 awk '{print $3}' | \
952 sed 's,cyl,,')
953 ((endcyl = (endcyl + 1)))
954 else
955 disk=${disk#/dev/dsk/}
956 disk=${disk#/dev/rdsk/}
957 disk=${disk%s*}
958
959 typeset -i ratio=0
960 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
961 grep "sectors\/cylinder" | \
962 awk '{print $2}')
963
964 if ((ratio == 0)); then
965 return
966 fi
967
968 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
969 nawk -v token="$slice" '{if ($1==token) print $6}')
970
971 ((endcyl = (endcyl + 1) / ratio))
972 fi
973
974 echo $endcyl
975 }
976
977
978 #
979 # Given a size,disk and total slice number, this function formats the
980 # disk slices from 0 to the total slice number with the same specified
981 # size.
982 #
983 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
984 {
985 typeset -i i=0
986 typeset slice_size=$1
987 typeset disk_name=$2
988 typeset total_slices=$3
989 typeset cyl
990
991 zero_partitions $disk_name
992 while ((i < $total_slices)); do
993 if ! is_linux; then
994 if ((i == 2)); then
995 ((i = i + 1))
996 continue
997 fi
998 fi
999 log_must set_partition $i "$cyl" $slice_size $disk_name
1000 cyl=$(get_endslice $disk_name $i)
1001 ((i = i+1))
1002 done
1003 }
1004
1005 #
1006 # This function continues to write to a filenum number of files into dirnum
1007 # number of directories until either file_write returns an error or the
1008 # maximum number of files per directory have been written.
1009 #
1010 # Usage:
1011 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1012 #
1013 # Return value: 0 on success
1014 # non 0 on error
1015 #
1016 # Where :
1017 # destdir: is the directory where everything is to be created under
1018 # dirnum: the maximum number of subdirectories to use, -1 no limit
1019 # filenum: the maximum number of files per subdirectory
1020 # bytes: number of bytes to write
1021 # num_writes: numer of types to write out bytes
1022 # data: the data that will be written
1023 #
1024 # E.g.
1025 # file_fs /testdir 20 25 1024 256 0
1026 #
1027 # Note: bytes * num_writes equals the size of the testfile
1028 #
1029 function fill_fs # destdir dirnum filenum bytes num_writes data
1030 {
1031 typeset destdir=${1:-$TESTDIR}
1032 typeset -i dirnum=${2:-50}
1033 typeset -i filenum=${3:-50}
1034 typeset -i bytes=${4:-8192}
1035 typeset -i num_writes=${5:-10240}
1036 typeset -i data=${6:-0}
1037
1038 typeset -i odirnum=1
1039 typeset -i idirnum=0
1040 typeset -i fn=0
1041 typeset -i retval=0
1042
1043 log_must mkdir -p $destdir/$idirnum
1044 while (($odirnum > 0)); do
1045 if ((dirnum >= 0 && idirnum >= dirnum)); then
1046 odirnum=0
1047 break
1048 fi
1049 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1050 -b $bytes -c $num_writes -d $data
1051 retval=$?
1052 if (($retval != 0)); then
1053 odirnum=0
1054 break
1055 fi
1056 if (($fn >= $filenum)); then
1057 fn=0
1058 ((idirnum = idirnum + 1))
1059 log_must mkdir -p $destdir/$idirnum
1060 else
1061 ((fn = fn + 1))
1062 fi
1063 done
1064 return $retval
1065 }
1066
1067 #
1068 # Simple function to get the specified property. If unable to
1069 # get the property then exits.
1070 #
1071 # Note property is in 'parsable' format (-p)
1072 #
1073 function get_prop # property dataset
1074 {
1075 typeset prop_val
1076 typeset prop=$1
1077 typeset dataset=$2
1078
1079 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1080 if [[ $? -ne 0 ]]; then
1081 log_note "Unable to get $prop property for dataset " \
1082 "$dataset"
1083 return 1
1084 fi
1085
1086 echo "$prop_val"
1087 return 0
1088 }
1089
1090 #
1091 # Simple function to get the specified property of pool. If unable to
1092 # get the property then exits.
1093 #
1094 # Note property is in 'parsable' format (-p)
1095 #
1096 function get_pool_prop # property pool
1097 {
1098 typeset prop_val
1099 typeset prop=$1
1100 typeset pool=$2
1101
1102 if poolexists $pool ; then
1103 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1104 awk '{print $3}')
1105 if [[ $? -ne 0 ]]; then
1106 log_note "Unable to get $prop property for pool " \
1107 "$pool"
1108 return 1
1109 fi
1110 else
1111 log_note "Pool $pool not exists."
1112 return 1
1113 fi
1114
1115 echo "$prop_val"
1116 return 0
1117 }
1118
1119 # Return 0 if a pool exists; $? otherwise
1120 #
1121 # $1 - pool name
1122
1123 function poolexists
1124 {
1125 typeset pool=$1
1126
1127 if [[ -z $pool ]]; then
1128 log_note "No pool name given."
1129 return 1
1130 fi
1131
1132 zpool get name "$pool" > /dev/null 2>&1
1133 return $?
1134 }
1135
1136 # Return 0 if all the specified datasets exist; $? otherwise
1137 #
1138 # $1-n dataset name
1139 function datasetexists
1140 {
1141 if (($# == 0)); then
1142 log_note "No dataset name given."
1143 return 1
1144 fi
1145
1146 while (($# > 0)); do
1147 zfs get name $1 > /dev/null 2>&1 || \
1148 return $?
1149 shift
1150 done
1151
1152 return 0
1153 }
1154
1155 # return 0 if none of the specified datasets exists, otherwise return 1.
1156 #
1157 # $1-n dataset name
1158 function datasetnonexists
1159 {
1160 if (($# == 0)); then
1161 log_note "No dataset name given."
1162 return 1
1163 fi
1164
1165 while (($# > 0)); do
1166 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1167 && return 1
1168 shift
1169 done
1170
1171 return 0
1172 }
1173
1174 #
1175 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1176 #
1177 # Returns 0 if shared, 1 otherwise.
1178 #
1179 function is_shared
1180 {
1181 typeset fs=$1
1182 typeset mtpt
1183
1184 if [[ $fs != "/"* ]] ; then
1185 if datasetnonexists "$fs" ; then
1186 return 1
1187 else
1188 mtpt=$(get_prop mountpoint "$fs")
1189 case $mtpt in
1190 none|legacy|-) return 1
1191 ;;
1192 *) fs=$mtpt
1193 ;;
1194 esac
1195 fi
1196 fi
1197
1198 if is_linux; then
1199 for mtpt in `share | awk '{print $1}'` ; do
1200 if [[ $mtpt == $fs ]] ; then
1201 return 0
1202 fi
1203 done
1204 return 1
1205 fi
1206
1207 for mtpt in `share | awk '{print $2}'` ; do
1208 if [[ $mtpt == $fs ]] ; then
1209 return 0
1210 fi
1211 done
1212
1213 typeset stat=$(svcs -H -o STA nfs/server:default)
1214 if [[ $stat != "ON" ]]; then
1215 log_note "Current nfs/server status: $stat"
1216 fi
1217
1218 return 1
1219 }
1220
1221 #
1222 # Given a dataset name determine if it is shared via SMB.
1223 #
1224 # Returns 0 if shared, 1 otherwise.
1225 #
1226 function is_shared_smb
1227 {
1228 typeset fs=$1
1229 typeset mtpt
1230
1231 if datasetnonexists "$fs" ; then
1232 return 1
1233 else
1234 fs=$(echo $fs | sed 's@/@_@g')
1235 fi
1236
1237 if is_linux; then
1238 for mtpt in `net usershare list | awk '{print $1}'` ; do
1239 if [[ $mtpt == $fs ]] ; then
1240 return 0
1241 fi
1242 done
1243 return 1
1244 else
1245 log_unsupported "Currently unsupported by the test framework"
1246 return 1
1247 fi
1248 }
1249
1250 #
1251 # Given a mountpoint, determine if it is not shared via NFS.
1252 #
1253 # Returns 0 if not shared, 1 otherwise.
1254 #
1255 function not_shared
1256 {
1257 typeset fs=$1
1258
1259 is_shared $fs
1260 if (($? == 0)); then
1261 return 1
1262 fi
1263
1264 return 0
1265 }
1266
1267 #
1268 # Given a dataset determine if it is not shared via SMB.
1269 #
1270 # Returns 0 if not shared, 1 otherwise.
1271 #
1272 function not_shared_smb
1273 {
1274 typeset fs=$1
1275
1276 is_shared_smb $fs
1277 if (($? == 0)); then
1278 return 1
1279 fi
1280
1281 return 0
1282 }
1283
1284 #
1285 # Helper function to unshare a mountpoint.
1286 #
1287 function unshare_fs #fs
1288 {
1289 typeset fs=$1
1290
1291 is_shared $fs || is_shared_smb $fs
1292 if (($? == 0)); then
1293 log_must zfs unshare $fs
1294 fi
1295
1296 return 0
1297 }
1298
1299 #
1300 # Helper function to share a NFS mountpoint.
1301 #
1302 function share_nfs #fs
1303 {
1304 typeset fs=$1
1305
1306 if is_linux; then
1307 is_shared $fs
1308 if (($? != 0)); then
1309 log_must share "*:$fs"
1310 fi
1311 else
1312 is_shared $fs
1313 if (($? != 0)); then
1314 log_must share -F nfs $fs
1315 fi
1316 fi
1317
1318 return 0
1319 }
1320
1321 #
1322 # Helper function to unshare a NFS mountpoint.
1323 #
1324 function unshare_nfs #fs
1325 {
1326 typeset fs=$1
1327
1328 if is_linux; then
1329 is_shared $fs
1330 if (($? == 0)); then
1331 log_must unshare -u "*:$fs"
1332 fi
1333 else
1334 is_shared $fs
1335 if (($? == 0)); then
1336 log_must unshare -F nfs $fs
1337 fi
1338 fi
1339
1340 return 0
1341 }
1342
1343 #
1344 # Helper function to show NFS shares.
1345 #
1346 function showshares_nfs
1347 {
1348 if is_linux; then
1349 share -v
1350 else
1351 share -F nfs
1352 fi
1353
1354 return 0
1355 }
1356
1357 #
1358 # Helper function to show SMB shares.
1359 #
1360 function showshares_smb
1361 {
1362 if is_linux; then
1363 net usershare list
1364 else
1365 share -F smb
1366 fi
1367
1368 return 0
1369 }
1370
1371 #
1372 # Check NFS server status and trigger it online.
1373 #
1374 function setup_nfs_server
1375 {
1376 # Cannot share directory in non-global zone.
1377 #
1378 if ! is_global_zone; then
1379 log_note "Cannot trigger NFS server by sharing in LZ."
1380 return
1381 fi
1382
1383 if is_linux; then
1384 log_note "NFS server must started prior to running test framework."
1385 return
1386 fi
1387
1388 typeset nfs_fmri="svc:/network/nfs/server:default"
1389 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1390 #
1391 # Only really sharing operation can enable NFS server
1392 # to online permanently.
1393 #
1394 typeset dummy=/tmp/dummy
1395
1396 if [[ -d $dummy ]]; then
1397 log_must rm -rf $dummy
1398 fi
1399
1400 log_must mkdir $dummy
1401 log_must share $dummy
1402
1403 #
1404 # Waiting for fmri's status to be the final status.
1405 # Otherwise, in transition, an asterisk (*) is appended for
1406 # instances, unshare will reverse status to 'DIS' again.
1407 #
1408 # Waiting for 1's at least.
1409 #
1410 log_must sleep 1
1411 timeout=10
1412 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1413 do
1414 log_must sleep 1
1415
1416 ((timeout -= 1))
1417 done
1418
1419 log_must unshare $dummy
1420 log_must rm -rf $dummy
1421 fi
1422
1423 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1424 }
1425
1426 #
1427 # To verify whether calling process is in global zone
1428 #
1429 # Return 0 if in global zone, 1 in non-global zone
1430 #
1431 function is_global_zone
1432 {
1433 if is_linux; then
1434 return 0
1435 else
1436 typeset cur_zone=$(zonename 2>/dev/null)
1437 if [[ $cur_zone != "global" ]]; then
1438 return 1
1439 fi
1440 return 0
1441 fi
1442 }
1443
1444 #
1445 # Verify whether test is permitted to run from
1446 # global zone, local zone, or both
1447 #
1448 # $1 zone limit, could be "global", "local", or "both"(no limit)
1449 #
1450 # Return 0 if permitted, otherwise exit with log_unsupported
1451 #
1452 function verify_runnable # zone limit
1453 {
1454 typeset limit=$1
1455
1456 [[ -z $limit ]] && return 0
1457
1458 if is_global_zone ; then
1459 case $limit in
1460 global|both)
1461 ;;
1462 local) log_unsupported "Test is unable to run from "\
1463 "global zone."
1464 ;;
1465 *) log_note "Warning: unknown limit $limit - " \
1466 "use both."
1467 ;;
1468 esac
1469 else
1470 case $limit in
1471 local|both)
1472 ;;
1473 global) log_unsupported "Test is unable to run from "\
1474 "local zone."
1475 ;;
1476 *) log_note "Warning: unknown limit $limit - " \
1477 "use both."
1478 ;;
1479 esac
1480
1481 reexport_pool
1482 fi
1483
1484 return 0
1485 }
1486
1487 # Return 0 if create successfully or the pool exists; $? otherwise
1488 # Note: In local zones, this function should return 0 silently.
1489 #
1490 # $1 - pool name
1491 # $2-n - [keyword] devs_list
1492
1493 function create_pool #pool devs_list
1494 {
1495 typeset pool=${1%%/*}
1496
1497 shift
1498
1499 if [[ -z $pool ]]; then
1500 log_note "Missing pool name."
1501 return 1
1502 fi
1503
1504 if poolexists $pool ; then
1505 destroy_pool $pool
1506 fi
1507
1508 if is_global_zone ; then
1509 [[ -d /$pool ]] && rm -rf /$pool
1510 log_must zpool create -f $pool $@
1511 fi
1512
1513 return 0
1514 }
1515
1516 # Return 0 if destroy successfully or the pool exists; $? otherwise
1517 # Note: In local zones, this function should return 0 silently.
1518 #
1519 # $1 - pool name
1520 # Destroy pool with the given parameters.
1521
1522 function destroy_pool #pool
1523 {
1524 typeset pool=${1%%/*}
1525 typeset mtpt
1526
1527 if [[ -z $pool ]]; then
1528 log_note "No pool name given."
1529 return 1
1530 fi
1531
1532 if is_global_zone ; then
1533 if poolexists "$pool" ; then
1534 mtpt=$(get_prop mountpoint "$pool")
1535
1536 # At times, syseventd/udev activity can cause attempts
1537 # to destroy a pool to fail with EBUSY. We retry a few
1538 # times allowing failures before requiring the destroy
1539 # to succeed.
1540 log_must_busy zpool destroy -f $pool
1541
1542 [[ -d $mtpt ]] && \
1543 log_must rm -rf $mtpt
1544 else
1545 log_note "Pool does not exist. ($pool)"
1546 return 1
1547 fi
1548 fi
1549
1550 return 0
1551 }
1552
1553 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1554 # Note: In local zones, this function should return 0 silently.
1555 #
1556 # $1 - dataset name
1557 # $2 - custom arguments for zfs destroy
1558 # Destroy dataset with the given parameters.
1559
1560 function destroy_dataset #dataset #args
1561 {
1562 typeset dataset=$1
1563 typeset mtpt
1564 typeset args=${2:-""}
1565
1566 if [[ -z $dataset ]]; then
1567 log_note "No dataset name given."
1568 return 1
1569 fi
1570
1571 if is_global_zone ; then
1572 if datasetexists "$dataset" ; then
1573 mtpt=$(get_prop mountpoint "$dataset")
1574 log_must_busy zfs destroy $args $dataset
1575
1576 [[ -d $mtpt ]] && \
1577 log_must rm -rf $mtpt
1578 else
1579 log_note "Dataset does not exist. ($dataset)"
1580 return 1
1581 fi
1582 fi
1583
1584 return 0
1585 }
1586
1587 #
1588 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1589 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1590 # and a zvol device to the zone.
1591 #
1592 # $1 zone name
1593 # $2 zone root directory prefix
1594 # $3 zone ip
1595 #
1596 function zfs_zones_setup #zone_name zone_root zone_ip
1597 {
1598 typeset zone_name=${1:-$(hostname)-z}
1599 typeset zone_root=${2:-"/zone_root"}
1600 typeset zone_ip=${3:-"10.1.1.10"}
1601 typeset prefix_ctr=$ZONE_CTR
1602 typeset pool_name=$ZONE_POOL
1603 typeset -i cntctr=5
1604 typeset -i i=0
1605
1606 # Create pool and 5 container within it
1607 #
1608 [[ -d /$pool_name ]] && rm -rf /$pool_name
1609 log_must zpool create -f $pool_name $DISKS
1610 while ((i < cntctr)); do
1611 log_must zfs create $pool_name/$prefix_ctr$i
1612 ((i += 1))
1613 done
1614
1615 # create a zvol
1616 log_must zfs create -V 1g $pool_name/zone_zvol
1617 block_device_wait
1618
1619 #
1620 # If current system support slog, add slog device for pool
1621 #
1622 if verify_slog_support ; then
1623 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1624 log_must mkfile $MINVDEVSIZE $sdevs
1625 log_must zpool add $pool_name log mirror $sdevs
1626 fi
1627
1628 # this isn't supported just yet.
1629 # Create a filesystem. In order to add this to
1630 # the zone, it must have it's mountpoint set to 'legacy'
1631 # log_must zfs create $pool_name/zfs_filesystem
1632 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1633
1634 [[ -d $zone_root ]] && \
1635 log_must rm -rf $zone_root/$zone_name
1636 [[ ! -d $zone_root ]] && \
1637 log_must mkdir -p -m 0700 $zone_root/$zone_name
1638
1639 # Create zone configure file and configure the zone
1640 #
1641 typeset zone_conf=/tmp/zone_conf.$$
1642 echo "create" > $zone_conf
1643 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1644 echo "set autoboot=true" >> $zone_conf
1645 i=0
1646 while ((i < cntctr)); do
1647 echo "add dataset" >> $zone_conf
1648 echo "set name=$pool_name/$prefix_ctr$i" >> \
1649 $zone_conf
1650 echo "end" >> $zone_conf
1651 ((i += 1))
1652 done
1653
1654 # add our zvol to the zone
1655 echo "add device" >> $zone_conf
1656 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1657 echo "end" >> $zone_conf
1658
1659 # add a corresponding zvol rdsk to the zone
1660 echo "add device" >> $zone_conf
1661 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1662 echo "end" >> $zone_conf
1663
1664 # once it's supported, we'll add our filesystem to the zone
1665 # echo "add fs" >> $zone_conf
1666 # echo "set type=zfs" >> $zone_conf
1667 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1668 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1669 # echo "end" >> $zone_conf
1670
1671 echo "verify" >> $zone_conf
1672 echo "commit" >> $zone_conf
1673 log_must zonecfg -z $zone_name -f $zone_conf
1674 log_must rm -f $zone_conf
1675
1676 # Install the zone
1677 zoneadm -z $zone_name install
1678 if (($? == 0)); then
1679 log_note "SUCCESS: zoneadm -z $zone_name install"
1680 else
1681 log_fail "FAIL: zoneadm -z $zone_name install"
1682 fi
1683
1684 # Install sysidcfg file
1685 #
1686 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1687 echo "system_locale=C" > $sysidcfg
1688 echo "terminal=dtterm" >> $sysidcfg
1689 echo "network_interface=primary {" >> $sysidcfg
1690 echo "hostname=$zone_name" >> $sysidcfg
1691 echo "}" >> $sysidcfg
1692 echo "name_service=NONE" >> $sysidcfg
1693 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1694 echo "security_policy=NONE" >> $sysidcfg
1695 echo "timezone=US/Eastern" >> $sysidcfg
1696
1697 # Boot this zone
1698 log_must zoneadm -z $zone_name boot
1699 }
1700
1701 #
1702 # Reexport TESTPOOL & TESTPOOL(1-4)
1703 #
1704 function reexport_pool
1705 {
1706 typeset -i cntctr=5
1707 typeset -i i=0
1708
1709 while ((i < cntctr)); do
1710 if ((i == 0)); then
1711 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1712 if ! ismounted $TESTPOOL; then
1713 log_must zfs mount $TESTPOOL
1714 fi
1715 else
1716 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1717 if eval ! ismounted \$TESTPOOL$i; then
1718 log_must eval zfs mount \$TESTPOOL$i
1719 fi
1720 fi
1721 ((i += 1))
1722 done
1723 }
1724
1725 #
1726 # Verify a given disk or pool state
1727 #
1728 # Return 0 is pool/disk matches expected state, 1 otherwise
1729 #
1730 function check_state # pool disk state{online,offline,degraded}
1731 {
1732 typeset pool=$1
1733 typeset disk=${2#$DEV_DSKDIR/}
1734 typeset state=$3
1735
1736 [[ -z $pool ]] || [[ -z $state ]] \
1737 && log_fail "Arguments invalid or missing"
1738
1739 if [[ -z $disk ]]; then
1740 #check pool state only
1741 zpool get -H -o value health $pool \
1742 | grep -i "$state" > /dev/null 2>&1
1743 else
1744 zpool status -v $pool | grep "$disk" \
1745 | grep -i "$state" > /dev/null 2>&1
1746 fi
1747
1748 return $?
1749 }
1750
1751 #
1752 # Cause a scan of all scsi host adapters by default
1753 #
1754 # $1 optional host number
1755 #
1756 function scan_scsi_hosts
1757 {
1758 typeset hostnum=${1}
1759
1760 if is_linux; then
1761 if [[ -z $hostnum ]]; then
1762 for host in /sys/class/scsi_host/host*; do
1763 log_must eval "echo '- - -' > $host/scan"
1764 done
1765 else
1766 log_must eval \
1767 "echo /sys/class/scsi_host/host$hostnum/scan" \
1768 > /dev/null
1769 log_must eval \
1770 "echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
1771 fi
1772 fi
1773 }
1774 #
1775 # Wait for newly created block devices to have their minors created.
1776 #
1777 function block_device_wait
1778 {
1779 if is_linux; then
1780 udevadm trigger
1781 udevadm settle
1782 fi
1783 }
1784
1785 #
1786 # Online or offline a disk on the system
1787 #
1788 # First checks state of disk. Test will fail if disk is not properly onlined
1789 # or offlined. Online is a full rescan of SCSI disks by echoing to every
1790 # host entry.
1791 #
1792 function on_off_disk # disk state{online,offline} host
1793 {
1794 typeset disk=$1
1795 typeset state=$2
1796 typeset host=$3
1797
1798 [[ -z $disk ]] || [[ -z $state ]] && \
1799 log_fail "Arguments invalid or missing"
1800
1801 if is_linux; then
1802 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
1803 dm_name="$(readlink $DEV_DSKDIR/$disk \
1804 | nawk -F / '{print $2}')"
1805 slave="$(ls /sys/block/${dm_name}/slaves \
1806 | nawk '{print $1}')"
1807 while [[ -n $slave ]]; do
1808 #check if disk is online
1809 lsscsi | egrep $slave > /dev/null
1810 if (($? == 0)); then
1811 slave_dir="/sys/block/${dm_name}"
1812 slave_dir+="/slaves/${slave}/device"
1813 ss="${slave_dir}/state"
1814 sd="${slave_dir}/delete"
1815 log_must eval "echo 'offline' > ${ss}"
1816 log_must eval "echo '1' > ${sd}"
1817 lsscsi | egrep $slave > /dev/null
1818 if (($? == 0)); then
1819 log_fail "Offlining" \
1820 "$disk failed"
1821 fi
1822 fi
1823 slave="$(ls /sys/block/$dm_name/slaves \
1824 2>/dev/null | nawk '{print $1}')"
1825 done
1826 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1827 #check if disk is online
1828 lsscsi | egrep $disk > /dev/null
1829 if (($? == 0)); then
1830 dev_state="/sys/block/$disk/device/state"
1831 dev_delete="/sys/block/$disk/device/delete"
1832 log_must eval "echo 'offline' > ${dev_state}"
1833 log_must eval "echo '1' > ${dev_delete}"
1834 lsscsi | egrep $disk > /dev/null
1835 if (($? == 0)); then
1836 log_fail "Offlining $disk" \
1837 "failed"
1838 fi
1839 else
1840 log_note "$disk is already offline"
1841 fi
1842 elif [[ $state == "online" ]]; then
1843 #force a full rescan
1844 scan_scsi_hosts $host
1845 block_device_wait
1846 if is_mpath_device $disk; then
1847 dm_name="$(readlink $DEV_DSKDIR/$disk \
1848 | nawk -F / '{print $2}')"
1849 slave="$(ls /sys/block/$dm_name/slaves \
1850 | nawk '{print $1}')"
1851 lsscsi | egrep $slave > /dev/null
1852 if (($? != 0)); then
1853 log_fail "Onlining $disk failed"
1854 fi
1855 elif is_real_device $disk; then
1856 lsscsi | egrep $disk > /dev/null
1857 if (($? != 0)); then
1858 log_fail "Onlining $disk failed"
1859 fi
1860 else
1861 log_fail "$disk is not a real dev"
1862 fi
1863 else
1864 log_fail "$disk failed to $state"
1865 fi
1866 fi
1867 }
1868
1869 #
1870 # Get the mountpoint of snapshot
1871 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1872 # as its mountpoint
1873 #
1874 function snapshot_mountpoint
1875 {
1876 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1877
1878 if [[ $dataset != *@* ]]; then
1879 log_fail "Error name of snapshot '$dataset'."
1880 fi
1881
1882 typeset fs=${dataset%@*}
1883 typeset snap=${dataset#*@}
1884
1885 if [[ -z $fs || -z $snap ]]; then
1886 log_fail "Error name of snapshot '$dataset'."
1887 fi
1888
1889 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1890 }
1891
1892 #
1893 # Given a device and 'ashift' value verify it's correctly set on every label
1894 #
1895 function verify_ashift # device ashift
1896 {
1897 typeset device="$1"
1898 typeset ashift="$2"
1899
1900 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1901 if (ashift != $2)
1902 exit 1;
1903 else
1904 count++;
1905 } END {
1906 if (count != 4)
1907 exit 1;
1908 else
1909 exit 0;
1910 }'
1911
1912 return $?
1913 }
1914
1915 #
1916 # Given a pool and file system, this function will verify the file system
1917 # using the zdb internal tool. Note that the pool is exported and imported
1918 # to ensure it has consistent state.
1919 #
1920 function verify_filesys # pool filesystem dir
1921 {
1922 typeset pool="$1"
1923 typeset filesys="$2"
1924 typeset zdbout="/tmp/zdbout.$$"
1925
1926 shift
1927 shift
1928 typeset dirs=$@
1929 typeset search_path=""
1930
1931 log_note "Calling zdb to verify filesystem '$filesys'"
1932 zfs unmount -a > /dev/null 2>&1
1933 log_must zpool export $pool
1934
1935 if [[ -n $dirs ]] ; then
1936 for dir in $dirs ; do
1937 search_path="$search_path -d $dir"
1938 done
1939 fi
1940
1941 log_must zpool import $search_path $pool
1942
1943 zdb -cudi $filesys > $zdbout 2>&1
1944 if [[ $? != 0 ]]; then
1945 log_note "Output: zdb -cudi $filesys"
1946 cat $zdbout
1947 log_fail "zdb detected errors with: '$filesys'"
1948 fi
1949
1950 log_must zfs mount -a
1951 log_must rm -rf $zdbout
1952 }
1953
1954 #
1955 # Given a pool, and this function list all disks in the pool
1956 #
1957 function get_disklist # pool
1958 {
1959 typeset disklist=""
1960
1961 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1962 grep -v "\-\-\-\-\-" | \
1963 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1964
1965 echo $disklist
1966 }
1967
1968 #
1969 # Given a pool, and this function list all disks in the pool with their full
1970 # path (like "/dev/sda" instead of "sda").
1971 #
1972 function get_disklist_fullpath # pool
1973 {
1974 args="-P $1"
1975 get_disklist $args
1976 }
1977
1978
1979
1980 # /**
1981 # This function kills a given list of processes after a time period. We use
1982 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1983 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1984 # would be listed as FAIL, which we don't want : we're happy with stress tests
1985 # running for a certain amount of time, then finishing.
1986 #
1987 # @param $1 the time in seconds after which we should terminate these processes
1988 # @param $2..$n the processes we wish to terminate.
1989 # */
1990 function stress_timeout
1991 {
1992 typeset -i TIMEOUT=$1
1993 shift
1994 typeset cpids="$@"
1995
1996 log_note "Waiting for child processes($cpids). " \
1997 "It could last dozens of minutes, please be patient ..."
1998 log_must sleep $TIMEOUT
1999
2000 log_note "Killing child processes after ${TIMEOUT} stress timeout."
2001 typeset pid
2002 for pid in $cpids; do
2003 ps -p $pid > /dev/null 2>&1
2004 if (($? == 0)); then
2005 log_must kill -USR1 $pid
2006 fi
2007 done
2008 }
2009
2010 #
2011 # Verify a given hotspare disk is inuse or avail
2012 #
2013 # Return 0 is pool/disk matches expected state, 1 otherwise
2014 #
2015 function check_hotspare_state # pool disk state{inuse,avail}
2016 {
2017 typeset pool=$1
2018 typeset disk=${2#$DEV_DSKDIR/}
2019 typeset state=$3
2020
2021 cur_state=$(get_device_state $pool $disk "spares")
2022
2023 if [[ $state != ${cur_state} ]]; then
2024 return 1
2025 fi
2026 return 0
2027 }
2028
2029 #
2030 # Wait until a hotspare transitions to a given state or times out.
2031 #
2032 # Return 0 when pool/disk matches expected state, 1 on timeout.
2033 #
2034 function wait_hotspare_state # pool disk state timeout
2035 {
2036 typeset pool=$1
2037 typeset disk=${2#$/DEV_DSKDIR/}
2038 typeset state=$3
2039 typeset timeout=${4:-60}
2040 typeset -i i=0
2041
2042 while [[ $i -lt $timeout ]]; do
2043 if check_hotspare_state $pool $disk $state; then
2044 return 0
2045 fi
2046
2047 i=$((i+1))
2048 sleep 1
2049 done
2050
2051 return 1
2052 }
2053
2054 #
2055 # Verify a given slog disk is inuse or avail
2056 #
2057 # Return 0 is pool/disk matches expected state, 1 otherwise
2058 #
2059 function check_slog_state # pool disk state{online,offline,unavail}
2060 {
2061 typeset pool=$1
2062 typeset disk=${2#$DEV_DSKDIR/}
2063 typeset state=$3
2064
2065 cur_state=$(get_device_state $pool $disk "logs")
2066
2067 if [[ $state != ${cur_state} ]]; then
2068 return 1
2069 fi
2070 return 0
2071 }
2072
2073 #
2074 # Verify a given vdev disk is inuse or avail
2075 #
2076 # Return 0 is pool/disk matches expected state, 1 otherwise
2077 #
2078 function check_vdev_state # pool disk state{online,offline,unavail}
2079 {
2080 typeset pool=$1
2081 typeset disk=${2#$/DEV_DSKDIR/}
2082 typeset state=$3
2083
2084 cur_state=$(get_device_state $pool $disk)
2085
2086 if [[ $state != ${cur_state} ]]; then
2087 return 1
2088 fi
2089 return 0
2090 }
2091
2092 #
2093 # Wait until a vdev transitions to a given state or times out.
2094 #
2095 # Return 0 when pool/disk matches expected state, 1 on timeout.
2096 #
2097 function wait_vdev_state # pool disk state timeout
2098 {
2099 typeset pool=$1
2100 typeset disk=${2#$/DEV_DSKDIR/}
2101 typeset state=$3
2102 typeset timeout=${4:-60}
2103 typeset -i i=0
2104
2105 while [[ $i -lt $timeout ]]; do
2106 if check_vdev_state $pool $disk $state; then
2107 return 0
2108 fi
2109
2110 i=$((i+1))
2111 sleep 1
2112 done
2113
2114 return 1
2115 }
2116
2117 #
2118 # Check the output of 'zpool status -v <pool>',
2119 # and to see if the content of <token> contain the <keyword> specified.
2120 #
2121 # Return 0 is contain, 1 otherwise
2122 #
2123 function check_pool_status # pool token keyword <verbose>
2124 {
2125 typeset pool=$1
2126 typeset token=$2
2127 typeset keyword=$3
2128 typeset verbose=${4:-false}
2129
2130 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2131 ($1==token) {print $0}')
2132 if [[ $verbose == true ]]; then
2133 log_note $scan
2134 fi
2135 echo $scan | grep -i "$keyword" > /dev/null 2>&1
2136
2137 return $?
2138 }
2139
2140 #
2141 # These 6 following functions are instance of check_pool_status()
2142 # is_pool_resilvering - to check if the pool is resilver in progress
2143 # is_pool_resilvered - to check if the pool is resilver completed
2144 # is_pool_scrubbing - to check if the pool is scrub in progress
2145 # is_pool_scrubbed - to check if the pool is scrub completed
2146 # is_pool_scrub_stopped - to check if the pool is scrub stopped
2147 # is_pool_scrub_paused - to check if the pool has scrub paused
2148 #
2149 function is_pool_resilvering #pool <verbose>
2150 {
2151 check_pool_status "$1" "scan" "resilver in progress since " $2
2152 return $?
2153 }
2154
2155 function is_pool_resilvered #pool <verbose>
2156 {
2157 check_pool_status "$1" "scan" "resilvered " $2
2158 return $?
2159 }
2160
2161 function is_pool_scrubbing #pool <verbose>
2162 {
2163 check_pool_status "$1" "scan" "scrub in progress since " $2
2164 return $?
2165 }
2166
2167 function is_pool_scrubbed #pool <verbose>
2168 {
2169 check_pool_status "$1" "scan" "scrub repaired" $2
2170 return $?
2171 }
2172
2173 function is_pool_scrub_stopped #pool <verbose>
2174 {
2175 check_pool_status "$1" "scan" "scrub canceled" $2
2176 return $?
2177 }
2178
2179 function is_pool_scrub_paused #pool <verbose>
2180 {
2181 check_pool_status "$1" "scan" "scrub paused since " $2
2182 return $?
2183 }
2184
2185 #
2186 # Use create_pool()/destroy_pool() to clean up the information in
2187 # in the given disk to avoid slice overlapping.
2188 #
2189 function cleanup_devices #vdevs
2190 {
2191 typeset pool="foopool$$"
2192
2193 if poolexists $pool ; then
2194 destroy_pool $pool
2195 fi
2196
2197 create_pool $pool $@
2198 destroy_pool $pool
2199
2200 return 0
2201 }
2202
2203 #/**
2204 # A function to find and locate free disks on a system or from given
2205 # disks as the parameter. It works by locating disks that are in use
2206 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2207 #
2208 # $@ given disks to find which are free, default is all disks in
2209 # the test system
2210 #
2211 # @return a string containing the list of available disks
2212 #*/
2213 function find_disks
2214 {
2215 # Trust provided list, no attempt is made to locate unused devices.
2216 if is_linux; then
2217 echo "$@"
2218 return
2219 fi
2220
2221
2222 sfi=/tmp/swaplist.$$
2223 dmpi=/tmp/dumpdev.$$
2224 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2225
2226 swap -l > $sfi
2227 dumpadm > $dmpi 2>/dev/null
2228
2229 # write an awk script that can process the output of format
2230 # to produce a list of disks we know about. Note that we have
2231 # to escape "$2" so that the shell doesn't interpret it while
2232 # we're creating the awk script.
2233 # -------------------
2234 cat > /tmp/find_disks.awk <<EOF
2235 #!/bin/nawk -f
2236 BEGIN { FS="."; }
2237
2238 /^Specify disk/{
2239 searchdisks=0;
2240 }
2241
2242 {
2243 if (searchdisks && \$2 !~ "^$"){
2244 split(\$2,arr," ");
2245 print arr[1];
2246 }
2247 }
2248
2249 /^AVAILABLE DISK SELECTIONS:/{
2250 searchdisks=1;
2251 }
2252 EOF
2253 #---------------------
2254
2255 chmod 755 /tmp/find_disks.awk
2256 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2257 rm /tmp/find_disks.awk
2258
2259 unused=""
2260 for disk in $disks; do
2261 # Check for mounted
2262 grep "${disk}[sp]" /etc/mnttab >/dev/null
2263 (($? == 0)) && continue
2264 # Check for swap
2265 grep "${disk}[sp]" $sfi >/dev/null
2266 (($? == 0)) && continue
2267 # check for dump device
2268 grep "${disk}[sp]" $dmpi >/dev/null
2269 (($? == 0)) && continue
2270 # check to see if this disk hasn't been explicitly excluded
2271 # by a user-set environment variable
2272 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2273 (($? == 0)) && continue
2274 unused_candidates="$unused_candidates $disk"
2275 done
2276 rm $sfi
2277 rm $dmpi
2278
2279 # now just check to see if those disks do actually exist
2280 # by looking for a device pointing to the first slice in
2281 # each case. limit the number to max_finddisksnum
2282 count=0
2283 for disk in $unused_candidates; do
2284 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2285 if [ $count -lt $max_finddisksnum ]; then
2286 unused="$unused $disk"
2287 # do not impose limit if $@ is provided
2288 [[ -z $@ ]] && ((count = count + 1))
2289 fi
2290 fi
2291 done
2292
2293 # finally, return our disk list
2294 echo $unused
2295 }
2296
2297 #
2298 # Add specified user to specified group
2299 #
2300 # $1 group name
2301 # $2 user name
2302 # $3 base of the homedir (optional)
2303 #
2304 function add_user #<group_name> <user_name> <basedir>
2305 {
2306 typeset gname=$1
2307 typeset uname=$2
2308 typeset basedir=${3:-"/var/tmp"}
2309
2310 if ((${#gname} == 0 || ${#uname} == 0)); then
2311 log_fail "group name or user name are not defined."
2312 fi
2313
2314 log_must useradd -g $gname -d $basedir/$uname -m $uname
2315 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2316 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2317 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
2318
2319 # Add new users to the same group and the command line utils.
2320 # This allows them to be run out of the original users home
2321 # directory as long as it permissioned to be group readable.
2322 if is_linux; then
2323 cmd_group=$(stat --format="%G" $(which zfs))
2324 log_must usermod -a -G $cmd_group $uname
2325 fi
2326
2327 return 0
2328 }
2329
2330 #
2331 # Delete the specified user.
2332 #
2333 # $1 login name
2334 # $2 base of the homedir (optional)
2335 #
2336 function del_user #<logname> <basedir>
2337 {
2338 typeset user=$1
2339 typeset basedir=${2:-"/var/tmp"}
2340
2341 if ((${#user} == 0)); then
2342 log_fail "login name is necessary."
2343 fi
2344
2345 if id $user > /dev/null 2>&1; then
2346 log_must_retry "currently used" 5 userdel $user
2347 fi
2348
2349 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2350
2351 return 0
2352 }
2353
2354 #
2355 # Select valid gid and create specified group.
2356 #
2357 # $1 group name
2358 #
2359 function add_group #<group_name>
2360 {
2361 typeset group=$1
2362
2363 if ((${#group} == 0)); then
2364 log_fail "group name is necessary."
2365 fi
2366
2367 # Assign 100 as the base gid, a larger value is selected for
2368 # Linux because for many distributions 1000 and under are reserved.
2369 if is_linux; then
2370 while true; do
2371 groupadd $group > /dev/null 2>&1
2372 typeset -i ret=$?
2373 case $ret in
2374 0) return 0 ;;
2375 *) return 1 ;;
2376 esac
2377 done
2378 else
2379 typeset -i gid=100
2380 while true; do
2381 groupadd -g $gid $group > /dev/null 2>&1
2382 typeset -i ret=$?
2383 case $ret in
2384 0) return 0 ;;
2385 # The gid is not unique
2386 4) ((gid += 1)) ;;
2387 *) return 1 ;;
2388 esac
2389 done
2390 fi
2391 }
2392
2393 #
2394 # Delete the specified group.
2395 #
2396 # $1 group name
2397 #
2398 function del_group #<group_name>
2399 {
2400 typeset grp=$1
2401 if ((${#grp} == 0)); then
2402 log_fail "group name is necessary."
2403 fi
2404
2405 if is_linux; then
2406 getent group $grp > /dev/null 2>&1
2407 typeset -i ret=$?
2408 case $ret in
2409 # Group does not exist.
2410 2) return 0 ;;
2411 # Name already exists as a group name
2412 0) log_must groupdel $grp ;;
2413 *) return 1 ;;
2414 esac
2415 else
2416 groupmod -n $grp $grp > /dev/null 2>&1
2417 typeset -i ret=$?
2418 case $ret in
2419 # Group does not exist.
2420 6) return 0 ;;
2421 # Name already exists as a group name
2422 9) log_must groupdel $grp ;;
2423 *) return 1 ;;
2424 esac
2425 fi
2426
2427 return 0
2428 }
2429
2430 #
2431 # This function will return true if it's safe to destroy the pool passed
2432 # as argument 1. It checks for pools based on zvols and files, and also
2433 # files contained in a pool that may have a different mountpoint.
2434 #
2435 function safe_to_destroy_pool { # $1 the pool name
2436
2437 typeset pool=""
2438 typeset DONT_DESTROY=""
2439
2440 # We check that by deleting the $1 pool, we're not
2441 # going to pull the rug out from other pools. Do this
2442 # by looking at all other pools, ensuring that they
2443 # aren't built from files or zvols contained in this pool.
2444
2445 for pool in $(zpool list -H -o name)
2446 do
2447 ALTMOUNTPOOL=""
2448
2449 # this is a list of the top-level directories in each of the
2450 # files that make up the path to the files the pool is based on
2451 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2452 awk '{print $1}')
2453
2454 # this is a list of the zvols that make up the pool
2455 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2456 | awk '{print $1}')
2457
2458 # also want to determine if it's a file-based pool using an
2459 # alternate mountpoint...
2460 POOL_FILE_DIRS=$(zpool status -v $pool | \
2461 grep / | awk '{print $1}' | \
2462 awk -F/ '{print $2}' | grep -v "dev")
2463
2464 for pooldir in $POOL_FILE_DIRS
2465 do
2466 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2467 grep "${pooldir}$" | awk '{print $1}')
2468
2469 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2470 done
2471
2472
2473 if [ ! -z "$ZVOLPOOL" ]
2474 then
2475 DONT_DESTROY="true"
2476 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2477 fi
2478
2479 if [ ! -z "$FILEPOOL" ]
2480 then
2481 DONT_DESTROY="true"
2482 log_note "Pool $pool is built from $FILEPOOL on $1"
2483 fi
2484
2485 if [ ! -z "$ALTMOUNTPOOL" ]
2486 then
2487 DONT_DESTROY="true"
2488 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2489 fi
2490 done
2491
2492 if [ -z "${DONT_DESTROY}" ]
2493 then
2494 return 0
2495 else
2496 log_note "Warning: it is not safe to destroy $1!"
2497 return 1
2498 fi
2499 }
2500
2501 #
2502 # Get the available ZFS compression options
2503 # $1 option type zfs_set|zfs_compress
2504 #
2505 function get_compress_opts
2506 {
2507 typeset COMPRESS_OPTS
2508 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2509 gzip-6 gzip-7 gzip-8 gzip-9"
2510
2511 if [[ $1 == "zfs_compress" ]] ; then
2512 COMPRESS_OPTS="on lzjb"
2513 elif [[ $1 == "zfs_set" ]] ; then
2514 COMPRESS_OPTS="on off lzjb"
2515 fi
2516 typeset valid_opts="$COMPRESS_OPTS"
2517 zfs get 2>&1 | grep gzip >/dev/null 2>&1
2518 if [[ $? -eq 0 ]]; then
2519 valid_opts="$valid_opts $GZIP_OPTS"
2520 fi
2521 echo "$valid_opts"
2522 }
2523
2524 #
2525 # Verify zfs operation with -p option work as expected
2526 # $1 operation, value could be create, clone or rename
2527 # $2 dataset type, value could be fs or vol
2528 # $3 dataset name
2529 # $4 new dataset name
2530 #
2531 function verify_opt_p_ops
2532 {
2533 typeset ops=$1
2534 typeset datatype=$2
2535 typeset dataset=$3
2536 typeset newdataset=$4
2537
2538 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2539 log_fail "$datatype is not supported."
2540 fi
2541
2542 # check parameters accordingly
2543 case $ops in
2544 create)
2545 newdataset=$dataset
2546 dataset=""
2547 if [[ $datatype == "vol" ]]; then
2548 ops="create -V $VOLSIZE"
2549 fi
2550 ;;
2551 clone)
2552 if [[ -z $newdataset ]]; then
2553 log_fail "newdataset should not be empty" \
2554 "when ops is $ops."
2555 fi
2556 log_must datasetexists $dataset
2557 log_must snapexists $dataset
2558 ;;
2559 rename)
2560 if [[ -z $newdataset ]]; then
2561 log_fail "newdataset should not be empty" \
2562 "when ops is $ops."
2563 fi
2564 log_must datasetexists $dataset
2565 log_mustnot snapexists $dataset
2566 ;;
2567 *)
2568 log_fail "$ops is not supported."
2569 ;;
2570 esac
2571
2572 # make sure the upper level filesystem does not exist
2573 destroy_dataset "${newdataset%/*}" "-rRf"
2574
2575 # without -p option, operation will fail
2576 log_mustnot zfs $ops $dataset $newdataset
2577 log_mustnot datasetexists $newdataset ${newdataset%/*}
2578
2579 # with -p option, operation should succeed
2580 log_must zfs $ops -p $dataset $newdataset
2581 block_device_wait
2582
2583 if ! datasetexists $newdataset ; then
2584 log_fail "-p option does not work for $ops"
2585 fi
2586
2587 # when $ops is create or clone, redo the operation still return zero
2588 if [[ $ops != "rename" ]]; then
2589 log_must zfs $ops -p $dataset $newdataset
2590 fi
2591
2592 return 0
2593 }
2594
2595 #
2596 # Get configuration of pool
2597 # $1 pool name
2598 # $2 config name
2599 #
2600 function get_config
2601 {
2602 typeset pool=$1
2603 typeset config=$2
2604 typeset alt_root
2605
2606 if ! poolexists "$pool" ; then
2607 return 1
2608 fi
2609 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2610 if [[ $alt_root == "-" ]]; then
2611 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2612 '{print $2}')
2613 else
2614 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2615 '{print $2}')
2616 fi
2617 if [[ -n $value ]] ; then
2618 value=${value#'}
2619 value=${value%'}
2620 fi
2621 echo $value
2622
2623 return 0
2624 }
2625
2626 #
2627 # Privated function. Random select one of items from arguments.
2628 #
2629 # $1 count
2630 # $2-n string
2631 #
2632 function _random_get
2633 {
2634 typeset cnt=$1
2635 shift
2636
2637 typeset str="$@"
2638 typeset -i ind
2639 ((ind = RANDOM % cnt + 1))
2640
2641 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2642 echo $ret
2643 }
2644
2645 #
2646 # Random select one of item from arguments which include NONE string
2647 #
2648 function random_get_with_non
2649 {
2650 typeset -i cnt=$#
2651 ((cnt =+ 1))
2652
2653 _random_get "$cnt" "$@"
2654 }
2655
2656 #
2657 # Random select one of item from arguments which doesn't include NONE string
2658 #
2659 function random_get
2660 {
2661 _random_get "$#" "$@"
2662 }
2663
2664 #
2665 # Detect if the current system support slog
2666 #
2667 function verify_slog_support
2668 {
2669 typeset dir=/tmp/disk.$$
2670 typeset pool=foo.$$
2671 typeset vdev=$dir/a
2672 typeset sdev=$dir/b
2673
2674 mkdir -p $dir
2675 mkfile $MINVDEVSIZE $vdev $sdev
2676
2677 typeset -i ret=0
2678 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2679 ret=1
2680 fi
2681 rm -r $dir
2682
2683 return $ret
2684 }
2685
2686 #
2687 # The function will generate a dataset name with specific length
2688 # $1, the length of the name
2689 # $2, the base string to construct the name
2690 #
2691 function gen_dataset_name
2692 {
2693 typeset -i len=$1
2694 typeset basestr="$2"
2695 typeset -i baselen=${#basestr}
2696 typeset -i iter=0
2697 typeset l_name=""
2698
2699 if ((len % baselen == 0)); then
2700 ((iter = len / baselen))
2701 else
2702 ((iter = len / baselen + 1))
2703 fi
2704 while ((iter > 0)); do
2705 l_name="${l_name}$basestr"
2706
2707 ((iter -= 1))
2708 done
2709
2710 echo $l_name
2711 }
2712
2713 #
2714 # Get cksum tuple of dataset
2715 # $1 dataset name
2716 #
2717 # sample zdb output:
2718 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2719 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2720 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2721 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2722 function datasetcksum
2723 {
2724 typeset cksum
2725 sync
2726 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2727 | awk -F= '{print $7}')
2728 echo $cksum
2729 }
2730
2731 #
2732 # Get cksum of file
2733 # #1 file path
2734 #
2735 function checksum
2736 {
2737 typeset cksum
2738 cksum=$(cksum $1 | awk '{print $1}')
2739 echo $cksum
2740 }
2741
2742 #
2743 # Get the given disk/slice state from the specific field of the pool
2744 #
2745 function get_device_state #pool disk field("", "spares","logs")
2746 {
2747 typeset pool=$1
2748 typeset disk=${2#$DEV_DSKDIR/}
2749 typeset field=${3:-$pool}
2750
2751 state=$(zpool status -v "$pool" 2>/dev/null | \
2752 nawk -v device=$disk -v pool=$pool -v field=$field \
2753 'BEGIN {startconfig=0; startfield=0; }
2754 /config:/ {startconfig=1}
2755 (startconfig==1) && ($1==field) {startfield=1; next;}
2756 (startfield==1) && ($1==device) {print $2; exit;}
2757 (startfield==1) &&
2758 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2759 echo $state
2760 }
2761
2762
2763 #
2764 # print the given directory filesystem type
2765 #
2766 # $1 directory name
2767 #
2768 function get_fstype
2769 {
2770 typeset dir=$1
2771
2772 if [[ -z $dir ]]; then
2773 log_fail "Usage: get_fstype <directory>"
2774 fi
2775
2776 #
2777 # $ df -n /
2778 # / : ufs
2779 #
2780 df -n $dir | awk '{print $3}'
2781 }
2782
2783 #
2784 # Given a disk, label it to VTOC regardless what label was on the disk
2785 # $1 disk
2786 #
2787 function labelvtoc
2788 {
2789 typeset disk=$1
2790 if [[ -z $disk ]]; then
2791 log_fail "The disk name is unspecified."
2792 fi
2793 typeset label_file=/var/tmp/labelvtoc.$$
2794 typeset arch=$(uname -p)
2795
2796 if is_linux; then
2797 log_note "Currently unsupported by the test framework"
2798 return 1
2799 fi
2800
2801 if [[ $arch == "i386" ]]; then
2802 echo "label" > $label_file
2803 echo "0" >> $label_file
2804 echo "" >> $label_file
2805 echo "q" >> $label_file
2806 echo "q" >> $label_file
2807
2808 fdisk -B $disk >/dev/null 2>&1
2809 # wait a while for fdisk finishes
2810 sleep 60
2811 elif [[ $arch == "sparc" ]]; then
2812 echo "label" > $label_file
2813 echo "0" >> $label_file
2814 echo "" >> $label_file
2815 echo "" >> $label_file
2816 echo "" >> $label_file
2817 echo "q" >> $label_file
2818 else
2819 log_fail "unknown arch type"
2820 fi
2821
2822 format -e -s -d $disk -f $label_file
2823 typeset -i ret_val=$?
2824 rm -f $label_file
2825 #
2826 # wait the format to finish
2827 #
2828 sleep 60
2829 if ((ret_val != 0)); then
2830 log_fail "unable to label $disk as VTOC."
2831 fi
2832
2833 return 0
2834 }
2835
2836 #
2837 # check if the system was installed as zfsroot or not
2838 # return: 0 ture, otherwise false
2839 #
2840 function is_zfsroot
2841 {
2842 df -n / | grep zfs > /dev/null 2>&1
2843 return $?
2844 }
2845
2846 #
2847 # get the root filesystem name if it's zfsroot system.
2848 #
2849 # return: root filesystem name
2850 function get_rootfs
2851 {
2852 typeset rootfs=""
2853
2854 if ! is_linux; then
2855 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2856 /etc/mnttab)
2857 fi
2858 if [[ -z "$rootfs" ]]; then
2859 log_fail "Can not get rootfs"
2860 fi
2861 zfs list $rootfs > /dev/null 2>&1
2862 if (($? == 0)); then
2863 echo $rootfs
2864 else
2865 log_fail "This is not a zfsroot system."
2866 fi
2867 }
2868
2869 #
2870 # get the rootfs's pool name
2871 # return:
2872 # rootpool name
2873 #
2874 function get_rootpool
2875 {
2876 typeset rootfs=""
2877 typeset rootpool=""
2878
2879 if ! is_linux; then
2880 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2881 /etc/mnttab)
2882 fi
2883 if [[ -z "$rootfs" ]]; then
2884 log_fail "Can not get rootpool"
2885 fi
2886 zfs list $rootfs > /dev/null 2>&1
2887 if (($? == 0)); then
2888 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2889 echo $rootpool
2890 else
2891 log_fail "This is not a zfsroot system."
2892 fi
2893 }
2894
2895 #
2896 # Check if the given device is physical device
2897 #
2898 function is_physical_device #device
2899 {
2900 typeset device=${1#$DEV_DSKDIR}
2901 device=${device#$DEV_RDSKDIR}
2902
2903 if is_linux; then
2904 [[ -b "$DEV_DSKDIR/$device" ]] && \
2905 [[ -f /sys/module/loop/parameters/max_part ]]
2906 return $?
2907 else
2908 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2909 return $?
2910 fi
2911 }
2912
2913 #
2914 # Check if the given device is a real device (ie SCSI device)
2915 #
2916 function is_real_device #disk
2917 {
2918 typeset disk=$1
2919 [[ -z $disk ]] && log_fail "No argument for disk given."
2920
2921 if is_linux; then
2922 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2923 egrep disk >/dev/null
2924 return $?
2925 fi
2926 }
2927
2928 #
2929 # Check if the given device is a loop device
2930 #
2931 function is_loop_device #disk
2932 {
2933 typeset disk=$1
2934 [[ -z $disk ]] && log_fail "No argument for disk given."
2935
2936 if is_linux; then
2937 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2938 egrep loop >/dev/null
2939 return $?
2940 fi
2941 }
2942
2943 #
2944 # Check if the given device is a multipath device and if there is a sybolic
2945 # link to a device mapper and to a disk
2946 # Currently no support for dm devices alone without multipath
2947 #
2948 function is_mpath_device #disk
2949 {
2950 typeset disk=$1
2951 [[ -z $disk ]] && log_fail "No argument for disk given."
2952
2953 if is_linux; then
2954 lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
2955 egrep mpath >/dev/null
2956 if (($? == 0)); then
2957 readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
2958 return $?
2959 else
2960 return $?
2961 fi
2962 fi
2963 }
2964
2965 # Set the slice prefix for disk partitioning depending
2966 # on whether the device is a real, multipath, or loop device.
2967 # Currently all disks have to be of the same type, so only
2968 # checks first disk to determine slice prefix.
2969 #
2970 function set_slice_prefix
2971 {
2972 typeset disk
2973 typeset -i i=0
2974
2975 if is_linux; then
2976 while (( i < $DISK_ARRAY_NUM )); do
2977 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2978 if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
2979 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
2980 export SLICE_PREFIX=""
2981 return 0
2982 elif ( is_mpath_device $disk || is_loop_device \
2983 $disk ); then
2984 export SLICE_PREFIX="p"
2985 return 0
2986 else
2987 log_fail "$disk not supported for partitioning."
2988 fi
2989 (( i = i + 1))
2990 done
2991 fi
2992 }
2993
2994 #
2995 # Set the directory path of the listed devices in $DISK_ARRAY_NUM
2996 # Currently all disks have to be of the same type, so only
2997 # checks first disk to determine device directory
2998 # default = /dev (linux)
2999 # real disk = /dev (linux)
3000 # multipath device = /dev/mapper (linux)
3001 #
3002 function set_device_dir
3003 {
3004 typeset disk
3005 typeset -i i=0
3006
3007 if is_linux; then
3008 while (( i < $DISK_ARRAY_NUM )); do
3009 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
3010 if is_mpath_device $disk; then
3011 export DEV_DSKDIR=$DEV_MPATHDIR
3012 return 0
3013 else
3014 export DEV_DSKDIR=$DEV_RDSKDIR
3015 return 0
3016 fi
3017 (( i = i + 1))
3018 done
3019 else
3020 export DEV_DSKDIR=$DEV_RDSKDIR
3021 fi
3022 }
3023
3024 #
3025 # Get the directory path of given device
3026 #
3027 function get_device_dir #device
3028 {
3029 typeset device=$1
3030
3031 if ! $(is_physical_device $device) ; then
3032 if [[ $device != "/" ]]; then
3033 device=${device%/*}
3034 fi
3035 if [[ -b "$DEV_DSKDIR/$device" ]]; then
3036 device="$DEV_DSKDIR"
3037 fi
3038 echo $device
3039 else
3040 echo "$DEV_DSKDIR"
3041 fi
3042 }
3043
3044 #
3045 # Get persistent name for given disk
3046 #
3047 function get_persistent_disk_name #device
3048 {
3049 typeset device=$1
3050 typeset dev_id
3051
3052 if is_linux; then
3053 if is_real_device $device; then
3054 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
3055 | egrep disk/by-id | nawk '{print $2; exit}' \
3056 | nawk -F / '{print $3}')"
3057 echo $dev_id
3058 elif is_mpath_device $device; then
3059 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
3060 | egrep disk/by-id/dm-uuid \
3061 | nawk '{print $2; exit}' \
3062 | nawk -F / '{print $3}')"
3063 echo $dev_id
3064 else
3065 echo $device
3066 fi
3067 else
3068 echo $device
3069 fi
3070 }
3071
3072 #
3073 # Load scsi_debug module with specified parameters
3074 #
3075 function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
3076 {
3077 typeset devsize=$1
3078 typeset hosts=$2
3079 typeset tgts=$3
3080 typeset luns=$4
3081
3082 [[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
3083 [[ -z $luns ]] && log_fail "Arguments invalid or missing"
3084
3085 if is_linux; then
3086 modprobe -n scsi_debug
3087 if (($? != 0)); then
3088 log_unsupported "Platform does not have scsi_debug"
3089 "module"
3090 fi
3091 lsmod | egrep scsi_debug > /dev/null
3092 if (($? == 0)); then
3093 log_fail "scsi_debug module already installed"
3094 else
3095 log_must modprobe scsi_debug dev_size_mb=$devsize \
3096 add_host=$hosts num_tgts=$tgts max_luns=$luns
3097 block_device_wait
3098 lsscsi | egrep scsi_debug > /dev/null
3099 if (($? == 1)); then
3100 log_fail "scsi_debug module install failed"
3101 fi
3102 fi
3103 fi
3104 }
3105
3106 #
3107 # Get the package name
3108 #
3109 function get_package_name
3110 {
3111 typeset dirpath=${1:-$STC_NAME}
3112
3113 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3114 }
3115
3116 #
3117 # Get the word numbers from a string separated by white space
3118 #
3119 function get_word_count
3120 {
3121 echo $1 | wc -w
3122 }
3123
3124 #
3125 # To verify if the require numbers of disks is given
3126 #
3127 function verify_disk_count
3128 {
3129 typeset -i min=${2:-1}
3130
3131 typeset -i count=$(get_word_count "$1")
3132
3133 if ((count < min)); then
3134 log_untested "A minimum of $min disks is required to run." \
3135 " You specified $count disk(s)"
3136 fi
3137 }
3138
3139 function ds_is_volume
3140 {
3141 typeset type=$(get_prop type $1)
3142 [[ $type = "volume" ]] && return 0
3143 return 1
3144 }
3145
3146 function ds_is_filesystem
3147 {
3148 typeset type=$(get_prop type $1)
3149 [[ $type = "filesystem" ]] && return 0
3150 return 1
3151 }
3152
3153 function ds_is_snapshot
3154 {
3155 typeset type=$(get_prop type $1)
3156 [[ $type = "snapshot" ]] && return 0
3157 return 1
3158 }
3159
3160 #
3161 # Check if Trusted Extensions are installed and enabled
3162 #
3163 function is_te_enabled
3164 {
3165 svcs -H -o state labeld 2>/dev/null | grep "enabled"
3166 if (($? != 0)); then
3167 return 1
3168 else
3169 return 0
3170 fi
3171 }
3172
3173 # Utility function to determine if a system has multiple cpus.
3174 function is_mp
3175 {
3176 if is_linux; then
3177 (($(nproc) > 1))
3178 else
3179 (($(psrinfo | wc -l) > 1))
3180 fi
3181
3182 return $?
3183 }
3184
3185 function get_cpu_freq
3186 {
3187 if is_linux; then
3188 lscpu | awk '/CPU MHz/ { print $3 }'
3189 else
3190 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3191 fi
3192 }
3193
3194 # Run the given command as the user provided.
3195 function user_run
3196 {
3197 typeset user=$1
3198 shift
3199
3200 log_note "user:$user $@"
3201 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
3202 return $?
3203 }
3204
3205 #
3206 # Check if the pool contains the specified vdevs
3207 #
3208 # $1 pool
3209 # $2..n <vdev> ...
3210 #
3211 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3212 # vdevs is not in the pool, and 2 if pool name is missing.
3213 #
3214 function vdevs_in_pool
3215 {
3216 typeset pool=$1
3217 typeset vdev
3218
3219 if [[ -z $pool ]]; then
3220 log_note "Missing pool name."
3221 return 2
3222 fi
3223
3224 shift
3225
3226 typeset tmpfile=$(mktemp)
3227 zpool list -Hv "$pool" >$tmpfile
3228 for vdev in $@; do
3229 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3230 [[ $? -ne 0 ]] && return 1
3231 done
3232
3233 rm -f $tmpfile
3234
3235 return 0;
3236 }
3237
3238 function get_max
3239 {
3240 typeset -l i max=$1
3241 shift
3242
3243 for i in "$@"; do
3244 max=$(echo $((max > i ? max : i)))
3245 done
3246
3247 echo $max
3248 }
3249
3250 function get_min
3251 {
3252 typeset -l i min=$1
3253 shift
3254
3255 for i in "$@"; do
3256 min=$(echo $((min < i ? min : i)))
3257 done
3258
3259 echo $min
3260 }
3261
3262 #
3263 # Generate a random number between 1 and the argument.
3264 #
3265 function random
3266 {
3267 typeset max=$1
3268 echo $(( ($RANDOM % $max) + 1 ))
3269 }
3270
3271 # Write data that can be compressed into a directory
3272 function write_compressible
3273 {
3274 typeset dir=$1
3275 typeset megs=$2
3276 typeset nfiles=${3:-1}
3277 typeset bs=${4:-1024k}
3278 typeset fname=${5:-file}
3279
3280 [[ -d $dir ]] || log_fail "No directory: $dir"
3281
3282 # Under Linux fio is not currently used since its behavior can
3283 # differ significantly across versions. This includes missing
3284 # command line options and cases where the --buffer_compress_*
3285 # options fail to behave as expected.
3286 if is_linux; then
3287 typeset file_bytes=$(to_bytes $megs)
3288 typeset bs_bytes=4096
3289 typeset blocks=$(($file_bytes / $bs_bytes))
3290
3291 for (( i = 0; i < $nfiles; i++ )); do
3292 truncate -s $file_bytes $dir/$fname.$i
3293
3294 # Write every third block to get 66% compression.
3295 for (( j = 0; j < $blocks; j += 3 )); do
3296 dd if=/dev/urandom of=$dir/$fname.$i \
3297 seek=$j bs=$bs_bytes count=1 \
3298 conv=notrunc >/dev/null 2>&1
3299 done
3300 done
3301 else
3302 log_must eval "fio \
3303 --name=job \
3304 --fallocate=0 \
3305 --minimal \
3306 --randrepeat=0 \
3307 --buffer_compress_percentage=66 \
3308 --buffer_compress_chunk=4096 \
3309 --directory=$dir \
3310 --numjobs=$nfiles \
3311 --nrfiles=$nfiles \
3312 --rw=write \
3313 --bs=$bs \
3314 --filesize=$megs \
3315 --filename_format='$fname.\$jobnum' >/dev/null"
3316 fi
3317 }
3318
3319 function get_objnum
3320 {
3321 typeset pathname=$1
3322 typeset objnum
3323
3324 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3325 objnum=$(stat -c %i $pathname)
3326 echo $objnum
3327 }
3328
3329 #
3330 # Sync data to the pool
3331 #
3332 # $1 pool name
3333 # $2 boolean to force uberblock (and config including zpool cache file) update
3334 #
3335 function sync_pool #pool <force>
3336 {
3337 typeset pool=${1:-$TESTPOOL}
3338 typeset force=${2:-false}
3339
3340 if [[ $force == true ]]; then
3341 log_must zpool sync -f $pool
3342 else
3343 log_must zpool sync $pool
3344 fi
3345
3346 return 0
3347 }
3348
3349 #
3350 # Wait for zpool 'freeing' property drops to zero.
3351 #
3352 # $1 pool name
3353 #
3354 function wait_freeing #pool
3355 {
3356 typeset pool=${1:-$TESTPOOL}
3357 while true; do
3358 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3359 log_must sleep 1
3360 done
3361 }
3362
3363 #
3364 # Wait for every device replace operation to complete
3365 #
3366 # $1 pool name
3367 #
3368 function wait_replacing #pool
3369 {
3370 typeset pool=${1:-$TESTPOOL}
3371 while true; do
3372 [[ "" == "$(zpool status $pool |
3373 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3374 log_must sleep 1
3375 done
3376 }
3377
3378 #
3379 # Wait for a pool to be scrubbed
3380 #
3381 # $1 pool name
3382 # $2 number of seconds to wait (optional)
3383 #
3384 # Returns true when pool has been scrubbed, or false if there's a timeout or if
3385 # no scrub was done.
3386 #
3387 function wait_scrubbed
3388 {
3389 typeset pool=${1:-$TESTPOOL}
3390 typeset iter=${2:-10}
3391 for i in {1..$iter} ; do
3392 if is_pool_scrubbed $pool ; then
3393 return 0
3394 fi
3395 sleep 1
3396 done
3397 return 1
3398 }
3399
3400 # Backup the zed.rc in our test directory so that we can edit it for our test.
3401 #
3402 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3403 function zed_rc_backup
3404 {
3405 zedrc_backup="$(mktemp)"
3406 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3407 echo $zedrc_backup
3408 }
3409
3410 function zed_rc_restore
3411 {
3412 mv $1 $ZEDLET_DIR/zed.rc
3413 }
3414
3415 #
3416 # Setup custom environment for the ZED.
3417 #
3418 # $@ Optional list of zedlets to run under zed.
3419 function zed_setup
3420 {
3421 if ! is_linux; then
3422 return
3423 fi
3424
3425 if [[ ! -d $ZEDLET_DIR ]]; then
3426 log_must mkdir $ZEDLET_DIR
3427 fi
3428
3429 if [[ ! -e $VDEVID_CONF ]]; then
3430 log_must touch $VDEVID_CONF
3431 fi
3432
3433 if [[ -e $VDEVID_CONF_ETC ]]; then
3434 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3435 fi
3436 EXTRA_ZEDLETS=$@
3437
3438 # Create a symlink for /etc/zfs/vdev_id.conf file.
3439 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3440
3441 # Setup minimal ZED configuration. Individual test cases should
3442 # add additional ZEDLETs as needed for their specific test.
3443 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3444 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3445
3446 # Scripts must only be user writable.
3447 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3448 saved_umask=$(umask)
3449 log_must umask 0022
3450 for i in $EXTRA_ZEDLETS ; do
3451 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3452 done
3453 log_must umask $saved_umask
3454 fi
3455
3456 # Customize the zed.rc file to enable the full debug log.
3457 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3458 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3459
3460 }
3461
3462 #
3463 # Cleanup custom ZED environment.
3464 #
3465 # $@ Optional list of zedlets to remove from our test zed.d directory.
3466 function zed_cleanup
3467 {
3468 if ! is_linux; then
3469 return
3470 fi
3471 EXTRA_ZEDLETS=$@
3472
3473 log_must rm -f ${ZEDLET_DIR}/zed.rc
3474 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3475 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3476 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3477 log_must rm -f ${ZEDLET_DIR}/state
3478
3479 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3480 for i in $EXTRA_ZEDLETS ; do
3481 log_must rm -f ${ZEDLET_DIR}/$i
3482 done
3483 fi
3484 log_must rm -f $ZED_LOG
3485 log_must rm -f $ZED_DEBUG_LOG
3486 log_must rm -f $VDEVID_CONF_ETC
3487 log_must rm -f $VDEVID_CONF
3488 rmdir $ZEDLET_DIR
3489 }
3490
3491 #
3492 # Check if ZED is currently running, if not start ZED.
3493 #
3494 function zed_start
3495 {
3496 if ! is_linux; then
3497 return
3498 fi
3499
3500 # ZEDLET_DIR=/var/tmp/zed
3501 if [[ ! -d $ZEDLET_DIR ]]; then
3502 log_must mkdir $ZEDLET_DIR
3503 fi
3504
3505 # Verify the ZED is not already running.
3506 pgrep -x zed > /dev/null
3507 if (($? == 0)); then
3508 log_fail "ZED already running"
3509 fi
3510
3511 log_note "Starting ZED"
3512 # run ZED in the background and redirect foreground logging
3513 # output to $ZED_LOG.
3514 log_must truncate -s 0 $ZED_DEBUG_LOG
3515 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
3516 "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3517
3518 return 0
3519 }
3520
3521 #
3522 # Kill ZED process
3523 #
3524 function zed_stop
3525 {
3526 if ! is_linux; then
3527 return
3528 fi
3529
3530 log_note "Stopping ZED"
3531 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3532 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3533 kill $zedpid
3534 while ps -p $zedpid > /dev/null; do
3535 sleep 1
3536 done
3537 rm -f ${ZEDLET_DIR}/zed.pid
3538 fi
3539 return 0
3540 }
3541
3542 #
3543 # Drain all zevents
3544 #
3545 function zed_events_drain
3546 {
3547 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3548 sleep 1
3549 zpool events -c >/dev/null
3550 done
3551 }
3552
3553 # Set a variable in zed.rc to something, un-commenting it in the process.
3554 #
3555 # $1 variable
3556 # $2 value
3557 function zed_rc_set
3558 {
3559 var="$1"
3560 val="$2"
3561 # Remove the line
3562 cmd="'/$var/d'"
3563 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3564
3565 # Add it at the end
3566 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3567 }
3568
3569
3570 #
3571 # Check is provided device is being active used as a swap device.
3572 #
3573 function is_swap_inuse
3574 {
3575 typeset device=$1
3576
3577 if [[ -z $device ]] ; then
3578 log_note "No device specified."
3579 return 1
3580 fi
3581
3582 if is_linux; then
3583 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3584 else
3585 swap -l | grep -w $device > /dev/null 2>&1
3586 fi
3587
3588 return $?
3589 }
3590
3591 #
3592 # Setup a swap device using the provided device.
3593 #
3594 function swap_setup
3595 {
3596 typeset swapdev=$1
3597
3598 if is_linux; then
3599 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3600 log_must swapon $swapdev
3601 else
3602 log_must swap -a $swapdev
3603 fi
3604
3605 return 0
3606 }
3607
3608 #
3609 # Cleanup a swap device on the provided device.
3610 #
3611 function swap_cleanup
3612 {
3613 typeset swapdev=$1
3614
3615 if is_swap_inuse $swapdev; then
3616 if is_linux; then
3617 log_must swapoff $swapdev
3618 else
3619 log_must swap -d $swapdev
3620 fi
3621 fi
3622
3623 return 0
3624 }
3625
3626 #
3627 # Set a global system tunable (64-bit value)
3628 #
3629 # $1 tunable name
3630 # $2 tunable values
3631 #
3632 function set_tunable64
3633 {
3634 set_tunable_impl "$1" "$2" Z
3635 }
3636
3637 #
3638 # Set a global system tunable (32-bit value)
3639 #
3640 # $1 tunable name
3641 # $2 tunable values
3642 #
3643 function set_tunable32
3644 {
3645 set_tunable_impl "$1" "$2" W
3646 }
3647
3648 function set_tunable_impl
3649 {
3650 typeset tunable="$1"
3651 typeset value="$2"
3652 typeset mdb_cmd="$3"
3653 typeset module="${4:-zfs}"
3654
3655 [[ -z "$tunable" ]] && return 1
3656 [[ -z "$value" ]] && return 1
3657 [[ -z "$mdb_cmd" ]] && return 1
3658
3659 case "$(uname)" in
3660 Linux)
3661 typeset zfs_tunables="/sys/module/$module/parameters"
3662 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3663 echo -n "$value" > "$zfs_tunables/$tunable"
3664 return "$?"
3665 ;;
3666 SunOS)
3667 [[ "$module" -eq "zfs" ]] || return 1
3668 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3669 return "$?"
3670 ;;
3671 esac
3672 }
3673
3674 #
3675 # Get a global system tunable
3676 #
3677 # $1 tunable name
3678 #
3679 function get_tunable
3680 {
3681 get_tunable_impl "$1"
3682 }
3683
3684 function get_tunable_impl
3685 {
3686 typeset tunable="$1"
3687 typeset module="${2:-zfs}"
3688
3689 [[ -z "$tunable" ]] && return 1
3690
3691 case "$(uname)" in
3692 Linux)
3693 typeset zfs_tunables="/sys/module/$module/parameters"
3694 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3695 cat $zfs_tunables/$tunable
3696 return "$?"
3697 ;;
3698 SunOS)
3699 [[ "$module" -eq "zfs" ]] || return 1
3700 ;;
3701 esac
3702
3703 return 1
3704 }
3705
3706 #
3707 # Get actual devices used by the pool (i.e. linux sdb1 not sdb).
3708 #
3709 function get_pool_devices #testpool #devdir
3710 {
3711 typeset testpool=$1
3712 typeset devdir=$2
3713 typeset out=""
3714
3715 if is_linux; then
3716 out=$(zpool status -P $testpool |grep ${devdir} | awk '{print $1}')
3717 out=$(echo $out | sed -e "s|${devdir}/||g" | tr '\n' ' ')
3718 fi
3719 echo $out
3720 }