]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/include/libtest.shlib
ddfe550bff23d9c448bcb1bf5bf0b933d3b20045
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
1 #!/bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
8 #
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
13 #
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 #
20 # CDDL HEADER END
21 #
22
23 #
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
26 # Copyright (c) 2012, 2016 by Delphix. All rights reserved.
27 # Copyright 2016 Nexenta Systems, Inc.
28 # Copyright (c) 2017 Lawrence Livermore National Security, LLC.
29 # Copyright (c) 2017 Datto Inc.
30 #
31
32 . ${STF_TOOLS}/include/logapi.shlib
33 . ${STF_SUITE}/include/math.shlib
34
35 #
36 # Apply constrained path when available. This is required since the
37 # PATH may have been modified by sudo's secure_path behavior.
38 #
39 if [ -n "$STF_PATH" ]; then
40 PATH="$STF_PATH"
41 fi
42
43 # Determine if this is a Linux test system
44 #
45 # Return 0 if platform Linux, 1 if otherwise
46
47 function is_linux
48 {
49 if [[ $(uname -o) == "GNU/Linux" ]]; then
50 return 0
51 else
52 return 1
53 fi
54 }
55
56 # Determine if this is a 32-bit system
57 #
58 # Return 0 if platform is 32-bit, 1 if otherwise
59
60 function is_32bit
61 {
62 if [[ $(getconf LONG_BIT) == "32" ]]; then
63 return 0
64 else
65 return 1
66 fi
67 }
68
69 # Determine if kmemleak is enabled
70 #
71 # Return 0 if kmemleak is enabled, 1 if otherwise
72
73 function is_kmemleak
74 {
75 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
76 return 0
77 else
78 return 1
79 fi
80 }
81
82 # Determine whether a dataset is mounted
83 #
84 # $1 dataset name
85 # $2 filesystem type; optional - defaulted to zfs
86 #
87 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
88
89 function ismounted
90 {
91 typeset fstype=$2
92 [[ -z $fstype ]] && fstype=zfs
93 typeset out dir name ret
94
95 case $fstype in
96 zfs)
97 if [[ "$1" == "/"* ]] ; then
98 for out in $(zfs mount | awk '{print $2}'); do
99 [[ $1 == $out ]] && return 0
100 done
101 else
102 for out in $(zfs mount | awk '{print $1}'); do
103 [[ $1 == $out ]] && return 0
104 done
105 fi
106 ;;
107 ufs|nfs)
108 out=$(df -F $fstype $1 2>/dev/null)
109 ret=$?
110 (($ret != 0)) && return $ret
111
112 dir=${out%%\(*}
113 dir=${dir%% *}
114 name=${out##*\(}
115 name=${name%%\)*}
116 name=${name%% *}
117
118 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
119 ;;
120 ext2)
121 out=$(df -t $fstype $1 2>/dev/null)
122 return $?
123 ;;
124 zvol)
125 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
126 link=$(readlink -f $ZVOL_DEVDIR/$1)
127 [[ -n "$link" ]] && \
128 mount | grep -q "^$link" && \
129 return 0
130 fi
131 ;;
132 esac
133
134 return 1
135 }
136
137 # Return 0 if a dataset is mounted; 1 otherwise
138 #
139 # $1 dataset name
140 # $2 filesystem type; optional - defaulted to zfs
141
142 function mounted
143 {
144 ismounted $1 $2
145 (($? == 0)) && return 0
146 return 1
147 }
148
149 # Return 0 if a dataset is unmounted; 1 otherwise
150 #
151 # $1 dataset name
152 # $2 filesystem type; optional - defaulted to zfs
153
154 function unmounted
155 {
156 ismounted $1 $2
157 (($? == 1)) && return 0
158 return 1
159 }
160
161 # split line on ","
162 #
163 # $1 - line to split
164
165 function splitline
166 {
167 echo $1 | sed "s/,/ /g"
168 }
169
170 function default_setup
171 {
172 default_setup_noexit "$@"
173
174 log_pass
175 }
176
177 #
178 # Given a list of disks, setup storage pools and datasets.
179 #
180 function default_setup_noexit
181 {
182 typeset disklist=$1
183 typeset container=$2
184 typeset volume=$3
185 log_note begin default_setup_noexit
186
187 if is_global_zone; then
188 if poolexists $TESTPOOL ; then
189 destroy_pool $TESTPOOL
190 fi
191 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
192 log_must zpool create -f $TESTPOOL $disklist
193 else
194 reexport_pool
195 fi
196
197 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
198 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
199
200 log_must zfs create $TESTPOOL/$TESTFS
201 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
202
203 if [[ -n $container ]]; then
204 rm -rf $TESTDIR1 || \
205 log_unresolved Could not remove $TESTDIR1
206 mkdir -p $TESTDIR1 || \
207 log_unresolved Could not create $TESTDIR1
208
209 log_must zfs create $TESTPOOL/$TESTCTR
210 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
211 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
212 log_must zfs set mountpoint=$TESTDIR1 \
213 $TESTPOOL/$TESTCTR/$TESTFS1
214 fi
215
216 if [[ -n $volume ]]; then
217 if is_global_zone ; then
218 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
219 block_device_wait
220 else
221 log_must zfs create $TESTPOOL/$TESTVOL
222 fi
223 fi
224 }
225
226 #
227 # Given a list of disks, setup a storage pool, file system and
228 # a container.
229 #
230 function default_container_setup
231 {
232 typeset disklist=$1
233
234 default_setup "$disklist" "true"
235 }
236
237 #
238 # Given a list of disks, setup a storage pool,file system
239 # and a volume.
240 #
241 function default_volume_setup
242 {
243 typeset disklist=$1
244
245 default_setup "$disklist" "" "true"
246 }
247
248 #
249 # Given a list of disks, setup a storage pool,file system,
250 # a container and a volume.
251 #
252 function default_container_volume_setup
253 {
254 typeset disklist=$1
255
256 default_setup "$disklist" "true" "true"
257 }
258
259 #
260 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
261 # filesystem
262 #
263 # $1 Existing filesystem or volume name. Default, $TESTFS
264 # $2 snapshot name. Default, $TESTSNAP
265 #
266 function create_snapshot
267 {
268 typeset fs_vol=${1:-$TESTFS}
269 typeset snap=${2:-$TESTSNAP}
270
271 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
272 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
273
274 if snapexists $fs_vol@$snap; then
275 log_fail "$fs_vol@$snap already exists."
276 fi
277 datasetexists $fs_vol || \
278 log_fail "$fs_vol must exist."
279
280 log_must zfs snapshot $fs_vol@$snap
281 }
282
283 #
284 # Create a clone from a snapshot, default clone name is $TESTCLONE.
285 #
286 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
287 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
288 #
289 function create_clone # snapshot clone
290 {
291 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
292 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
293
294 [[ -z $snap ]] && \
295 log_fail "Snapshot name is undefined."
296 [[ -z $clone ]] && \
297 log_fail "Clone name is undefined."
298
299 log_must zfs clone $snap $clone
300 }
301
302 #
303 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
304 # filesystem.
305 #
306 # $1 Existing filesystem or volume name. Default, $TESTFS
307 # $2 Existing snapshot name. Default, $TESTSNAP
308 # $3 bookmark name. Default, $TESTBKMARK
309 #
310 function create_bookmark
311 {
312 typeset fs_vol=${1:-$TESTFS}
313 typeset snap=${2:-$TESTSNAP}
314 typeset bkmark=${3:-$TESTBKMARK}
315
316 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
317 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
318 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
319
320 if bkmarkexists $fs_vol#$bkmark; then
321 log_fail "$fs_vol#$bkmark already exists."
322 fi
323 datasetexists $fs_vol || \
324 log_fail "$fs_vol must exist."
325 snapexists $fs_vol@$snap || \
326 log_fail "$fs_vol@$snap must exist."
327
328 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
329 }
330
331 function default_mirror_setup
332 {
333 default_mirror_setup_noexit $1 $2 $3
334
335 log_pass
336 }
337
338 #
339 # Given a pair of disks, set up a storage pool and dataset for the mirror
340 # @parameters: $1 the primary side of the mirror
341 # $2 the secondary side of the mirror
342 # @uses: ZPOOL ZFS TESTPOOL TESTFS
343 function default_mirror_setup_noexit
344 {
345 readonly func="default_mirror_setup_noexit"
346 typeset primary=$1
347 typeset secondary=$2
348
349 [[ -z $primary ]] && \
350 log_fail "$func: No parameters passed"
351 [[ -z $secondary ]] && \
352 log_fail "$func: No secondary partition passed"
353 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
354 log_must zpool create -f $TESTPOOL mirror $@
355 log_must zfs create $TESTPOOL/$TESTFS
356 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
357 }
358
359 #
360 # create a number of mirrors.
361 # We create a number($1) of 2 way mirrors using the pairs of disks named
362 # on the command line. These mirrors are *not* mounted
363 # @parameters: $1 the number of mirrors to create
364 # $... the devices to use to create the mirrors on
365 # @uses: ZPOOL ZFS TESTPOOL
366 function setup_mirrors
367 {
368 typeset -i nmirrors=$1
369
370 shift
371 while ((nmirrors > 0)); do
372 log_must test -n "$1" -a -n "$2"
373 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
374 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
375 shift 2
376 ((nmirrors = nmirrors - 1))
377 done
378 }
379
380 #
381 # create a number of raidz pools.
382 # We create a number($1) of 2 raidz pools using the pairs of disks named
383 # on the command line. These pools are *not* mounted
384 # @parameters: $1 the number of pools to create
385 # $... the devices to use to create the pools on
386 # @uses: ZPOOL ZFS TESTPOOL
387 function setup_raidzs
388 {
389 typeset -i nraidzs=$1
390
391 shift
392 while ((nraidzs > 0)); do
393 log_must test -n "$1" -a -n "$2"
394 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
395 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
396 shift 2
397 ((nraidzs = nraidzs - 1))
398 done
399 }
400
401 #
402 # Destroy the configured testpool mirrors.
403 # the mirrors are of the form ${TESTPOOL}{number}
404 # @uses: ZPOOL ZFS TESTPOOL
405 function destroy_mirrors
406 {
407 default_cleanup_noexit
408
409 log_pass
410 }
411
412 #
413 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
414 # $1 the list of disks
415 #
416 function default_raidz_setup
417 {
418 typeset disklist="$*"
419 disks=(${disklist[*]})
420
421 if [[ ${#disks[*]} -lt 2 ]]; then
422 log_fail "A raid-z requires a minimum of two disks."
423 fi
424
425 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
426 log_must zpool create -f $TESTPOOL raidz $1 $2 $3
427 log_must zfs create $TESTPOOL/$TESTFS
428 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
429
430 log_pass
431 }
432
433 #
434 # Common function used to cleanup storage pools and datasets.
435 #
436 # Invoked at the start of the test suite to ensure the system
437 # is in a known state, and also at the end of each set of
438 # sub-tests to ensure errors from one set of tests doesn't
439 # impact the execution of the next set.
440
441 function default_cleanup
442 {
443 default_cleanup_noexit
444
445 log_pass
446 }
447
448 function default_cleanup_noexit
449 {
450 typeset exclude=""
451 typeset pool=""
452 #
453 # Destroying the pool will also destroy any
454 # filesystems it contains.
455 #
456 if is_global_zone; then
457 zfs unmount -a > /dev/null 2>&1
458 exclude=`eval echo \"'(${KEEP})'\"`
459 ALL_POOLS=$(zpool list -H -o name \
460 | grep -v "$NO_POOLS" | egrep -v "$exclude")
461 # Here, we loop through the pools we're allowed to
462 # destroy, only destroying them if it's safe to do
463 # so.
464 while [ ! -z ${ALL_POOLS} ]
465 do
466 for pool in ${ALL_POOLS}
467 do
468 if safe_to_destroy_pool $pool ;
469 then
470 destroy_pool $pool
471 fi
472 ALL_POOLS=$(zpool list -H -o name \
473 | grep -v "$NO_POOLS" \
474 | egrep -v "$exclude")
475 done
476 done
477
478 zfs mount -a
479 else
480 typeset fs=""
481 for fs in $(zfs list -H -o name \
482 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
483 datasetexists $fs && \
484 log_must zfs destroy -Rf $fs
485 done
486
487 # Need cleanup here to avoid garbage dir left.
488 for fs in $(zfs list -H -o name); do
489 [[ $fs == /$ZONE_POOL ]] && continue
490 [[ -d $fs ]] && log_must rm -rf $fs/*
491 done
492
493 #
494 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
495 # the default value
496 #
497 for fs in $(zfs list -H -o name); do
498 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
499 log_must zfs set reservation=none $fs
500 log_must zfs set recordsize=128K $fs
501 log_must zfs set mountpoint=/$fs $fs
502 typeset enc=""
503 enc=$(get_prop encryption $fs)
504 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
505 [[ "$enc" == "off" ]]; then
506 log_must zfs set checksum=on $fs
507 fi
508 log_must zfs set compression=off $fs
509 log_must zfs set atime=on $fs
510 log_must zfs set devices=off $fs
511 log_must zfs set exec=on $fs
512 log_must zfs set setuid=on $fs
513 log_must zfs set readonly=off $fs
514 log_must zfs set snapdir=hidden $fs
515 log_must zfs set aclmode=groupmask $fs
516 log_must zfs set aclinherit=secure $fs
517 fi
518 done
519 fi
520
521 [[ -d $TESTDIR ]] && \
522 log_must rm -rf $TESTDIR
523
524 disk1=${DISKS%% *}
525 if is_mpath_device $disk1; then
526 delete_partitions
527 fi
528 }
529
530
531 #
532 # Common function used to cleanup storage pools, file systems
533 # and containers.
534 #
535 function default_container_cleanup
536 {
537 if ! is_global_zone; then
538 reexport_pool
539 fi
540
541 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
542 [[ $? -eq 0 ]] && \
543 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
544
545 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
546 log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
547
548 datasetexists $TESTPOOL/$TESTCTR && \
549 log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
550
551 [[ -e $TESTDIR1 ]] && \
552 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
553
554 default_cleanup
555 }
556
557 #
558 # Common function used to cleanup snapshot of file system or volume. Default to
559 # delete the file system's snapshot
560 #
561 # $1 snapshot name
562 #
563 function destroy_snapshot
564 {
565 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
566
567 if ! snapexists $snap; then
568 log_fail "'$snap' does not existed."
569 fi
570
571 #
572 # For the sake of the value which come from 'get_prop' is not equal
573 # to the really mountpoint when the snapshot is unmounted. So, firstly
574 # check and make sure this snapshot's been mounted in current system.
575 #
576 typeset mtpt=""
577 if ismounted $snap; then
578 mtpt=$(get_prop mountpoint $snap)
579 (($? != 0)) && \
580 log_fail "get_prop mountpoint $snap failed."
581 fi
582
583 log_must zfs destroy $snap
584 [[ $mtpt != "" && -d $mtpt ]] && \
585 log_must rm -rf $mtpt
586 }
587
588 #
589 # Common function used to cleanup clone.
590 #
591 # $1 clone name
592 #
593 function destroy_clone
594 {
595 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
596
597 if ! datasetexists $clone; then
598 log_fail "'$clone' does not existed."
599 fi
600
601 # With the same reason in destroy_snapshot
602 typeset mtpt=""
603 if ismounted $clone; then
604 mtpt=$(get_prop mountpoint $clone)
605 (($? != 0)) && \
606 log_fail "get_prop mountpoint $clone failed."
607 fi
608
609 log_must zfs destroy $clone
610 [[ $mtpt != "" && -d $mtpt ]] && \
611 log_must rm -rf $mtpt
612 }
613
614 #
615 # Common function used to cleanup bookmark of file system or volume. Default
616 # to delete the file system's bookmark.
617 #
618 # $1 bookmark name
619 #
620 function destroy_bookmark
621 {
622 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
623
624 if ! bkmarkexists $bkmark; then
625 log_fail "'$bkmarkp' does not existed."
626 fi
627
628 log_must zfs destroy $bkmark
629 }
630
631 # Return 0 if a snapshot exists; $? otherwise
632 #
633 # $1 - snapshot name
634
635 function snapexists
636 {
637 zfs list -H -t snapshot "$1" > /dev/null 2>&1
638 return $?
639 }
640
641 #
642 # Return 0 if a bookmark exists; $? otherwise
643 #
644 # $1 - bookmark name
645 #
646 function bkmarkexists
647 {
648 zfs list -H -t bookmark "$1" > /dev/null 2>&1
649 return $?
650 }
651
652 #
653 # Set a property to a certain value on a dataset.
654 # Sets a property of the dataset to the value as passed in.
655 # @param:
656 # $1 dataset who's property is being set
657 # $2 property to set
658 # $3 value to set property to
659 # @return:
660 # 0 if the property could be set.
661 # non-zero otherwise.
662 # @use: ZFS
663 #
664 function dataset_setprop
665 {
666 typeset fn=dataset_setprop
667
668 if (($# < 3)); then
669 log_note "$fn: Insufficient parameters (need 3, had $#)"
670 return 1
671 fi
672 typeset output=
673 output=$(zfs set $2=$3 $1 2>&1)
674 typeset rv=$?
675 if ((rv != 0)); then
676 log_note "Setting property on $1 failed."
677 log_note "property $2=$3"
678 log_note "Return Code: $rv"
679 log_note "Output: $output"
680 return $rv
681 fi
682 return 0
683 }
684
685 #
686 # Assign suite defined dataset properties.
687 # This function is used to apply the suite's defined default set of
688 # properties to a dataset.
689 # @parameters: $1 dataset to use
690 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
691 # @returns:
692 # 0 if the dataset has been altered.
693 # 1 if no pool name was passed in.
694 # 2 if the dataset could not be found.
695 # 3 if the dataset could not have it's properties set.
696 #
697 function dataset_set_defaultproperties
698 {
699 typeset dataset="$1"
700
701 [[ -z $dataset ]] && return 1
702
703 typeset confset=
704 typeset -i found=0
705 for confset in $(zfs list); do
706 if [[ $dataset = $confset ]]; then
707 found=1
708 break
709 fi
710 done
711 [[ $found -eq 0 ]] && return 2
712 if [[ -n $COMPRESSION_PROP ]]; then
713 dataset_setprop $dataset compression $COMPRESSION_PROP || \
714 return 3
715 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
716 fi
717 if [[ -n $CHECKSUM_PROP ]]; then
718 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
719 return 3
720 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
721 fi
722 return 0
723 }
724
725 #
726 # Check a numeric assertion
727 # @parameter: $@ the assertion to check
728 # @output: big loud notice if assertion failed
729 # @use: log_fail
730 #
731 function assert
732 {
733 (($@)) || log_fail "$@"
734 }
735
736 #
737 # Function to format partition size of a disk
738 # Given a disk cxtxdx reduces all partitions
739 # to 0 size
740 #
741 function zero_partitions #<whole_disk_name>
742 {
743 typeset diskname=$1
744 typeset i
745
746 if is_linux; then
747 log_must parted $DEV_DSKDIR/$diskname -s -- mklabel gpt
748 else
749 for i in 0 1 3 4 5 6 7
750 do
751 log_must set_partition $i "" 0mb $diskname
752 done
753 fi
754
755 return 0
756 }
757
758 #
759 # Given a slice, size and disk, this function
760 # formats the slice to the specified size.
761 # Size should be specified with units as per
762 # the `format` command requirements eg. 100mb 3gb
763 #
764 # NOTE: This entire interface is problematic for the Linux parted utilty
765 # which requires the end of the partition to be specified. It would be
766 # best to retire this interface and replace it with something more flexible.
767 # At the moment a best effort is made.
768 #
769 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
770 {
771 typeset -i slicenum=$1
772 typeset start=$2
773 typeset size=$3
774 typeset disk=$4
775 [[ -z $slicenum || -z $size || -z $disk ]] && \
776 log_fail "The slice, size or disk name is unspecified."
777
778 if is_linux; then
779 typeset size_mb=${size%%[mMgG]}
780
781 size_mb=${size_mb%%[mMgG][bB]}
782 if [[ ${size:1:1} == 'g' ]]; then
783 ((size_mb = size_mb * 1024))
784 fi
785
786 # Create GPT partition table when setting slice 0 or
787 # when the device doesn't already contain a GPT label.
788 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
789 typeset ret_val=$?
790 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
791 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
792 if [[ $? -ne 0 ]]; then
793 log_note "Failed to create GPT partition table on $disk"
794 return 1
795 fi
796 fi
797
798 # When no start is given align on the first cylinder.
799 if [[ -z "$start" ]]; then
800 start=1
801 fi
802
803 # Determine the cylinder size for the device and using
804 # that calculate the end offset in cylinders.
805 typeset -i cly_size_kb=0
806 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
807 unit cyl print | head -3 | tail -1 | \
808 awk -F '[:k.]' '{print $4}')
809 ((end = (size_mb * 1024 / cly_size_kb) + start))
810
811 parted $DEV_DSKDIR/$disk -s -- \
812 mkpart part$slicenum ${start}cyl ${end}cyl
813 if [[ $? -ne 0 ]]; then
814 log_note "Failed to create partition $slicenum on $disk"
815 return 1
816 fi
817
818 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
819 block_device_wait
820 else
821 typeset format_file=/var/tmp/format_in.$$
822
823 echo "partition" >$format_file
824 echo "$slicenum" >> $format_file
825 echo "" >> $format_file
826 echo "" >> $format_file
827 echo "$start" >> $format_file
828 echo "$size" >> $format_file
829 echo "label" >> $format_file
830 echo "" >> $format_file
831 echo "q" >> $format_file
832 echo "q" >> $format_file
833
834 format -e -s -d $disk -f $format_file
835 fi
836
837 typeset ret_val=$?
838 rm -f $format_file
839 if [[ $ret_val -ne 0 ]]; then
840 log_note "Unable to format $disk slice $slicenum to $size"
841 return 1
842 fi
843 return 0
844 }
845
846 #
847 # Delete all partitions on all disks - this is specifically for the use of multipath
848 # devices which currently can only be used in the test suite as raw/un-partitioned
849 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
850 #
851 function delete_partitions
852 {
853 typeset -i j=1
854
855 if [[ -z $DISK_ARRAY_NUM ]]; then
856 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
857 fi
858 if [[ -z $DISKSARRAY ]]; then
859 DISKSARRAY=$DISKS
860 fi
861
862 if is_linux; then
863 if (( $DISK_ARRAY_NUM == 1 )); then
864 while ((j < MAX_PARTITIONS)); do
865 parted $DEV_DSKDIR/$DISK -s rm $j \
866 > /dev/null 2>&1
867 if (( $? == 1 )); then
868 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
869 if (( $? == 1 )); then
870 log_note "Partitions for $DISK should be deleted"
871 else
872 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
873 fi
874 return 0
875 else
876 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
877 if (( $? == 0 )); then
878 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
879 fi
880 fi
881 ((j = j+1))
882 done
883 else
884 for disk in `echo $DISKSARRAY`; do
885 while ((j < MAX_PARTITIONS)); do
886 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
887 if (( $? == 1 )); then
888 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
889 if (( $? == 1 )); then
890 log_note "Partitions for $disk should be deleted"
891 else
892 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
893 fi
894 j=7
895 else
896 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
897 if (( $? == 0 )); then
898 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
899 fi
900 fi
901 ((j = j+1))
902 done
903 j=1
904 done
905 fi
906 fi
907 return 0
908 }
909
910 #
911 # Get the end cyl of the given slice
912 #
913 function get_endslice #<disk> <slice>
914 {
915 typeset disk=$1
916 typeset slice=$2
917 if [[ -z $disk || -z $slice ]] ; then
918 log_fail "The disk name or slice number is unspecified."
919 fi
920
921 if is_linux; then
922 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
923 grep "part${slice}" | \
924 awk '{print $3}' | \
925 sed 's,cyl,,')
926 ((endcyl = (endcyl + 1)))
927 else
928 disk=${disk#/dev/dsk/}
929 disk=${disk#/dev/rdsk/}
930 disk=${disk%s*}
931
932 typeset -i ratio=0
933 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
934 grep "sectors\/cylinder" | \
935 awk '{print $2}')
936
937 if ((ratio == 0)); then
938 return
939 fi
940
941 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
942 nawk -v token="$slice" '{if ($1==token) print $6}')
943
944 ((endcyl = (endcyl + 1) / ratio))
945 fi
946
947 echo $endcyl
948 }
949
950
951 #
952 # Given a size,disk and total slice number, this function formats the
953 # disk slices from 0 to the total slice number with the same specified
954 # size.
955 #
956 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
957 {
958 typeset -i i=0
959 typeset slice_size=$1
960 typeset disk_name=$2
961 typeset total_slices=$3
962 typeset cyl
963
964 zero_partitions $disk_name
965 while ((i < $total_slices)); do
966 if ! is_linux; then
967 if ((i == 2)); then
968 ((i = i + 1))
969 continue
970 fi
971 fi
972 log_must set_partition $i "$cyl" $slice_size $disk_name
973 cyl=$(get_endslice $disk_name $i)
974 ((i = i+1))
975 done
976 }
977
978 #
979 # This function continues to write to a filenum number of files into dirnum
980 # number of directories until either file_write returns an error or the
981 # maximum number of files per directory have been written.
982 #
983 # Usage:
984 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
985 #
986 # Return value: 0 on success
987 # non 0 on error
988 #
989 # Where :
990 # destdir: is the directory where everything is to be created under
991 # dirnum: the maximum number of subdirectories to use, -1 no limit
992 # filenum: the maximum number of files per subdirectory
993 # bytes: number of bytes to write
994 # num_writes: numer of types to write out bytes
995 # data: the data that will be written
996 #
997 # E.g.
998 # file_fs /testdir 20 25 1024 256 0
999 #
1000 # Note: bytes * num_writes equals the size of the testfile
1001 #
1002 function fill_fs # destdir dirnum filenum bytes num_writes data
1003 {
1004 typeset destdir=${1:-$TESTDIR}
1005 typeset -i dirnum=${2:-50}
1006 typeset -i filenum=${3:-50}
1007 typeset -i bytes=${4:-8192}
1008 typeset -i num_writes=${5:-10240}
1009 typeset -i data=${6:-0}
1010
1011 typeset -i odirnum=1
1012 typeset -i idirnum=0
1013 typeset -i fn=0
1014 typeset -i retval=0
1015
1016 log_must mkdir -p $destdir/$idirnum
1017 while (($odirnum > 0)); do
1018 if ((dirnum >= 0 && idirnum >= dirnum)); then
1019 odirnum=0
1020 break
1021 fi
1022 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
1023 -b $bytes -c $num_writes -d $data
1024 retval=$?
1025 if (($retval != 0)); then
1026 odirnum=0
1027 break
1028 fi
1029 if (($fn >= $filenum)); then
1030 fn=0
1031 ((idirnum = idirnum + 1))
1032 log_must mkdir -p $destdir/$idirnum
1033 else
1034 ((fn = fn + 1))
1035 fi
1036 done
1037 return $retval
1038 }
1039
1040 #
1041 # Simple function to get the specified property. If unable to
1042 # get the property then exits.
1043 #
1044 # Note property is in 'parsable' format (-p)
1045 #
1046 function get_prop # property dataset
1047 {
1048 typeset prop_val
1049 typeset prop=$1
1050 typeset dataset=$2
1051
1052 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1053 if [[ $? -ne 0 ]]; then
1054 log_note "Unable to get $prop property for dataset " \
1055 "$dataset"
1056 return 1
1057 fi
1058
1059 echo "$prop_val"
1060 return 0
1061 }
1062
1063 #
1064 # Simple function to get the specified property of pool. If unable to
1065 # get the property then exits.
1066 #
1067 # Note property is in 'parsable' format (-p)
1068 #
1069 function get_pool_prop # property pool
1070 {
1071 typeset prop_val
1072 typeset prop=$1
1073 typeset pool=$2
1074
1075 if poolexists $pool ; then
1076 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1077 awk '{print $3}')
1078 if [[ $? -ne 0 ]]; then
1079 log_note "Unable to get $prop property for pool " \
1080 "$pool"
1081 return 1
1082 fi
1083 else
1084 log_note "Pool $pool not exists."
1085 return 1
1086 fi
1087
1088 echo "$prop_val"
1089 return 0
1090 }
1091
1092 # Return 0 if a pool exists; $? otherwise
1093 #
1094 # $1 - pool name
1095
1096 function poolexists
1097 {
1098 typeset pool=$1
1099
1100 if [[ -z $pool ]]; then
1101 log_note "No pool name given."
1102 return 1
1103 fi
1104
1105 zpool get name "$pool" > /dev/null 2>&1
1106 return $?
1107 }
1108
1109 # Return 0 if all the specified datasets exist; $? otherwise
1110 #
1111 # $1-n dataset name
1112 function datasetexists
1113 {
1114 if (($# == 0)); then
1115 log_note "No dataset name given."
1116 return 1
1117 fi
1118
1119 while (($# > 0)); do
1120 zfs get name $1 > /dev/null 2>&1 || \
1121 return $?
1122 shift
1123 done
1124
1125 return 0
1126 }
1127
1128 # return 0 if none of the specified datasets exists, otherwise return 1.
1129 #
1130 # $1-n dataset name
1131 function datasetnonexists
1132 {
1133 if (($# == 0)); then
1134 log_note "No dataset name given."
1135 return 1
1136 fi
1137
1138 while (($# > 0)); do
1139 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1140 && return 1
1141 shift
1142 done
1143
1144 return 0
1145 }
1146
1147 #
1148 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1149 #
1150 # Returns 0 if shared, 1 otherwise.
1151 #
1152 function is_shared
1153 {
1154 typeset fs=$1
1155 typeset mtpt
1156
1157 if [[ $fs != "/"* ]] ; then
1158 if datasetnonexists "$fs" ; then
1159 return 1
1160 else
1161 mtpt=$(get_prop mountpoint "$fs")
1162 case $mtpt in
1163 none|legacy|-) return 1
1164 ;;
1165 *) fs=$mtpt
1166 ;;
1167 esac
1168 fi
1169 fi
1170
1171 if is_linux; then
1172 for mtpt in `share | awk '{print $1}'` ; do
1173 if [[ $mtpt == $fs ]] ; then
1174 return 0
1175 fi
1176 done
1177 return 1
1178 fi
1179
1180 for mtpt in `share | awk '{print $2}'` ; do
1181 if [[ $mtpt == $fs ]] ; then
1182 return 0
1183 fi
1184 done
1185
1186 typeset stat=$(svcs -H -o STA nfs/server:default)
1187 if [[ $stat != "ON" ]]; then
1188 log_note "Current nfs/server status: $stat"
1189 fi
1190
1191 return 1
1192 }
1193
1194 #
1195 # Given a dataset name determine if it is shared via SMB.
1196 #
1197 # Returns 0 if shared, 1 otherwise.
1198 #
1199 function is_shared_smb
1200 {
1201 typeset fs=$1
1202 typeset mtpt
1203
1204 if datasetnonexists "$fs" ; then
1205 return 1
1206 else
1207 fs=$(echo $fs | sed 's@/@_@g')
1208 fi
1209
1210 if is_linux; then
1211 for mtpt in `net usershare list | awk '{print $1}'` ; do
1212 if [[ $mtpt == $fs ]] ; then
1213 return 0
1214 fi
1215 done
1216 return 1
1217 else
1218 log_unsupported "Currently unsupported by the test framework"
1219 return 1
1220 fi
1221 }
1222
1223 #
1224 # Given a mountpoint, determine if it is not shared via NFS.
1225 #
1226 # Returns 0 if not shared, 1 otherwise.
1227 #
1228 function not_shared
1229 {
1230 typeset fs=$1
1231
1232 is_shared $fs
1233 if (($? == 0)); then
1234 return 1
1235 fi
1236
1237 return 0
1238 }
1239
1240 #
1241 # Given a dataset determine if it is not shared via SMB.
1242 #
1243 # Returns 0 if not shared, 1 otherwise.
1244 #
1245 function not_shared_smb
1246 {
1247 typeset fs=$1
1248
1249 is_shared_smb $fs
1250 if (($? == 0)); then
1251 return 1
1252 fi
1253
1254 return 0
1255 }
1256
1257 #
1258 # Helper function to unshare a mountpoint.
1259 #
1260 function unshare_fs #fs
1261 {
1262 typeset fs=$1
1263
1264 is_shared $fs || is_shared_smb $fs
1265 if (($? == 0)); then
1266 log_must zfs unshare $fs
1267 fi
1268
1269 return 0
1270 }
1271
1272 #
1273 # Helper function to share a NFS mountpoint.
1274 #
1275 function share_nfs #fs
1276 {
1277 typeset fs=$1
1278
1279 if is_linux; then
1280 is_shared $fs
1281 if (($? != 0)); then
1282 log_must share "*:$fs"
1283 fi
1284 else
1285 is_shared $fs
1286 if (($? != 0)); then
1287 log_must share -F nfs $fs
1288 fi
1289 fi
1290
1291 return 0
1292 }
1293
1294 #
1295 # Helper function to unshare a NFS mountpoint.
1296 #
1297 function unshare_nfs #fs
1298 {
1299 typeset fs=$1
1300
1301 if is_linux; then
1302 is_shared $fs
1303 if (($? == 0)); then
1304 log_must unshare -u "*:$fs"
1305 fi
1306 else
1307 is_shared $fs
1308 if (($? == 0)); then
1309 log_must unshare -F nfs $fs
1310 fi
1311 fi
1312
1313 return 0
1314 }
1315
1316 #
1317 # Helper function to show NFS shares.
1318 #
1319 function showshares_nfs
1320 {
1321 if is_linux; then
1322 share -v
1323 else
1324 share -F nfs
1325 fi
1326
1327 return 0
1328 }
1329
1330 #
1331 # Helper function to show SMB shares.
1332 #
1333 function showshares_smb
1334 {
1335 if is_linux; then
1336 net usershare list
1337 else
1338 share -F smb
1339 fi
1340
1341 return 0
1342 }
1343
1344 #
1345 # Check NFS server status and trigger it online.
1346 #
1347 function setup_nfs_server
1348 {
1349 # Cannot share directory in non-global zone.
1350 #
1351 if ! is_global_zone; then
1352 log_note "Cannot trigger NFS server by sharing in LZ."
1353 return
1354 fi
1355
1356 if is_linux; then
1357 log_note "NFS server must started prior to running test framework."
1358 return
1359 fi
1360
1361 typeset nfs_fmri="svc:/network/nfs/server:default"
1362 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1363 #
1364 # Only really sharing operation can enable NFS server
1365 # to online permanently.
1366 #
1367 typeset dummy=/tmp/dummy
1368
1369 if [[ -d $dummy ]]; then
1370 log_must rm -rf $dummy
1371 fi
1372
1373 log_must mkdir $dummy
1374 log_must share $dummy
1375
1376 #
1377 # Waiting for fmri's status to be the final status.
1378 # Otherwise, in transition, an asterisk (*) is appended for
1379 # instances, unshare will reverse status to 'DIS' again.
1380 #
1381 # Waiting for 1's at least.
1382 #
1383 log_must sleep 1
1384 timeout=10
1385 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1386 do
1387 log_must sleep 1
1388
1389 ((timeout -= 1))
1390 done
1391
1392 log_must unshare $dummy
1393 log_must rm -rf $dummy
1394 fi
1395
1396 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1397 }
1398
1399 #
1400 # To verify whether calling process is in global zone
1401 #
1402 # Return 0 if in global zone, 1 in non-global zone
1403 #
1404 function is_global_zone
1405 {
1406 if is_linux; then
1407 return 0
1408 else
1409 typeset cur_zone=$(zonename 2>/dev/null)
1410 if [[ $cur_zone != "global" ]]; then
1411 return 1
1412 fi
1413 return 0
1414 fi
1415 }
1416
1417 #
1418 # Verify whether test is permitted to run from
1419 # global zone, local zone, or both
1420 #
1421 # $1 zone limit, could be "global", "local", or "both"(no limit)
1422 #
1423 # Return 0 if permitted, otherwise exit with log_unsupported
1424 #
1425 function verify_runnable # zone limit
1426 {
1427 typeset limit=$1
1428
1429 [[ -z $limit ]] && return 0
1430
1431 if is_global_zone ; then
1432 case $limit in
1433 global|both)
1434 ;;
1435 local) log_unsupported "Test is unable to run from "\
1436 "global zone."
1437 ;;
1438 *) log_note "Warning: unknown limit $limit - " \
1439 "use both."
1440 ;;
1441 esac
1442 else
1443 case $limit in
1444 local|both)
1445 ;;
1446 global) log_unsupported "Test is unable to run from "\
1447 "local zone."
1448 ;;
1449 *) log_note "Warning: unknown limit $limit - " \
1450 "use both."
1451 ;;
1452 esac
1453
1454 reexport_pool
1455 fi
1456
1457 return 0
1458 }
1459
1460 # Return 0 if create successfully or the pool exists; $? otherwise
1461 # Note: In local zones, this function should return 0 silently.
1462 #
1463 # $1 - pool name
1464 # $2-n - [keyword] devs_list
1465
1466 function create_pool #pool devs_list
1467 {
1468 typeset pool=${1%%/*}
1469
1470 shift
1471
1472 if [[ -z $pool ]]; then
1473 log_note "Missing pool name."
1474 return 1
1475 fi
1476
1477 if poolexists $pool ; then
1478 destroy_pool $pool
1479 fi
1480
1481 if is_global_zone ; then
1482 [[ -d /$pool ]] && rm -rf /$pool
1483 log_must zpool create -f $pool $@
1484 fi
1485
1486 return 0
1487 }
1488
1489 # Return 0 if destroy successfully or the pool exists; $? otherwise
1490 # Note: In local zones, this function should return 0 silently.
1491 #
1492 # $1 - pool name
1493 # Destroy pool with the given parameters.
1494
1495 function destroy_pool #pool
1496 {
1497 typeset pool=${1%%/*}
1498 typeset mtpt
1499
1500 if [[ -z $pool ]]; then
1501 log_note "No pool name given."
1502 return 1
1503 fi
1504
1505 if is_global_zone ; then
1506 if poolexists "$pool" ; then
1507 mtpt=$(get_prop mountpoint "$pool")
1508
1509 # At times, syseventd/udev activity can cause attempts
1510 # to destroy a pool to fail with EBUSY. We retry a few
1511 # times allowing failures before requiring the destroy
1512 # to succeed.
1513 log_must_busy zpool destroy -f $pool
1514
1515 [[ -d $mtpt ]] && \
1516 log_must rm -rf $mtpt
1517 else
1518 log_note "Pool does not exist. ($pool)"
1519 return 1
1520 fi
1521 fi
1522
1523 return 0
1524 }
1525
1526 #
1527 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1528 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1529 # and a zvol device to the zone.
1530 #
1531 # $1 zone name
1532 # $2 zone root directory prefix
1533 # $3 zone ip
1534 #
1535 function zfs_zones_setup #zone_name zone_root zone_ip
1536 {
1537 typeset zone_name=${1:-$(hostname)-z}
1538 typeset zone_root=${2:-"/zone_root"}
1539 typeset zone_ip=${3:-"10.1.1.10"}
1540 typeset prefix_ctr=$ZONE_CTR
1541 typeset pool_name=$ZONE_POOL
1542 typeset -i cntctr=5
1543 typeset -i i=0
1544
1545 # Create pool and 5 container within it
1546 #
1547 [[ -d /$pool_name ]] && rm -rf /$pool_name
1548 log_must zpool create -f $pool_name $DISKS
1549 while ((i < cntctr)); do
1550 log_must zfs create $pool_name/$prefix_ctr$i
1551 ((i += 1))
1552 done
1553
1554 # create a zvol
1555 log_must zfs create -V 1g $pool_name/zone_zvol
1556 block_device_wait
1557
1558 #
1559 # If current system support slog, add slog device for pool
1560 #
1561 if verify_slog_support ; then
1562 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1563 log_must mkfile $MINVDEVSIZE $sdevs
1564 log_must zpool add $pool_name log mirror $sdevs
1565 fi
1566
1567 # this isn't supported just yet.
1568 # Create a filesystem. In order to add this to
1569 # the zone, it must have it's mountpoint set to 'legacy'
1570 # log_must zfs create $pool_name/zfs_filesystem
1571 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1572
1573 [[ -d $zone_root ]] && \
1574 log_must rm -rf $zone_root/$zone_name
1575 [[ ! -d $zone_root ]] && \
1576 log_must mkdir -p -m 0700 $zone_root/$zone_name
1577
1578 # Create zone configure file and configure the zone
1579 #
1580 typeset zone_conf=/tmp/zone_conf.$$
1581 echo "create" > $zone_conf
1582 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1583 echo "set autoboot=true" >> $zone_conf
1584 i=0
1585 while ((i < cntctr)); do
1586 echo "add dataset" >> $zone_conf
1587 echo "set name=$pool_name/$prefix_ctr$i" >> \
1588 $zone_conf
1589 echo "end" >> $zone_conf
1590 ((i += 1))
1591 done
1592
1593 # add our zvol to the zone
1594 echo "add device" >> $zone_conf
1595 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1596 echo "end" >> $zone_conf
1597
1598 # add a corresponding zvol rdsk to the zone
1599 echo "add device" >> $zone_conf
1600 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1601 echo "end" >> $zone_conf
1602
1603 # once it's supported, we'll add our filesystem to the zone
1604 # echo "add fs" >> $zone_conf
1605 # echo "set type=zfs" >> $zone_conf
1606 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1607 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1608 # echo "end" >> $zone_conf
1609
1610 echo "verify" >> $zone_conf
1611 echo "commit" >> $zone_conf
1612 log_must zonecfg -z $zone_name -f $zone_conf
1613 log_must rm -f $zone_conf
1614
1615 # Install the zone
1616 zoneadm -z $zone_name install
1617 if (($? == 0)); then
1618 log_note "SUCCESS: zoneadm -z $zone_name install"
1619 else
1620 log_fail "FAIL: zoneadm -z $zone_name install"
1621 fi
1622
1623 # Install sysidcfg file
1624 #
1625 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1626 echo "system_locale=C" > $sysidcfg
1627 echo "terminal=dtterm" >> $sysidcfg
1628 echo "network_interface=primary {" >> $sysidcfg
1629 echo "hostname=$zone_name" >> $sysidcfg
1630 echo "}" >> $sysidcfg
1631 echo "name_service=NONE" >> $sysidcfg
1632 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1633 echo "security_policy=NONE" >> $sysidcfg
1634 echo "timezone=US/Eastern" >> $sysidcfg
1635
1636 # Boot this zone
1637 log_must zoneadm -z $zone_name boot
1638 }
1639
1640 #
1641 # Reexport TESTPOOL & TESTPOOL(1-4)
1642 #
1643 function reexport_pool
1644 {
1645 typeset -i cntctr=5
1646 typeset -i i=0
1647
1648 while ((i < cntctr)); do
1649 if ((i == 0)); then
1650 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1651 if ! ismounted $TESTPOOL; then
1652 log_must zfs mount $TESTPOOL
1653 fi
1654 else
1655 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1656 if eval ! ismounted \$TESTPOOL$i; then
1657 log_must eval zfs mount \$TESTPOOL$i
1658 fi
1659 fi
1660 ((i += 1))
1661 done
1662 }
1663
1664 #
1665 # Verify a given disk or pool state
1666 #
1667 # Return 0 is pool/disk matches expected state, 1 otherwise
1668 #
1669 function check_state # pool disk state{online,offline,degraded}
1670 {
1671 typeset pool=$1
1672 typeset disk=${2#$DEV_DSKDIR/}
1673 typeset state=$3
1674
1675 [[ -z $pool ]] || [[ -z $state ]] \
1676 && log_fail "Arguments invalid or missing"
1677
1678 if [[ -z $disk ]]; then
1679 #check pool state only
1680 zpool get -H -o value health $pool \
1681 | grep -i "$state" > /dev/null 2>&1
1682 else
1683 zpool status -v $pool | grep "$disk" \
1684 | grep -i "$state" > /dev/null 2>&1
1685 fi
1686
1687 return $?
1688 }
1689
1690 #
1691 # Cause a scan of all scsi host adapters by default
1692 #
1693 # $1 optional host number
1694 #
1695 function scan_scsi_hosts
1696 {
1697 typeset hostnum=${1}
1698
1699 if is_linux; then
1700 if [[ -z $hostnum ]]; then
1701 for host in /sys/class/scsi_host/host*; do
1702 log_must eval "echo '- - -' > $host/scan"
1703 done
1704 else
1705 log_must eval \
1706 "echo /sys/class/scsi_host/host$hostnum/scan" \
1707 > /dev/null
1708 log_must eval \
1709 "echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
1710 fi
1711 fi
1712 }
1713 #
1714 # Wait for newly created block devices to have their minors created.
1715 #
1716 function block_device_wait
1717 {
1718 if is_linux; then
1719 udevadm trigger
1720 udevadm settle
1721 fi
1722 }
1723
1724 #
1725 # Online or offline a disk on the system
1726 #
1727 # First checks state of disk. Test will fail if disk is not properly onlined
1728 # or offlined. Online is a full rescan of SCSI disks by echoing to every
1729 # host entry.
1730 #
1731 function on_off_disk # disk state{online,offline} host
1732 {
1733 typeset disk=$1
1734 typeset state=$2
1735 typeset host=$3
1736
1737 [[ -z $disk ]] || [[ -z $state ]] && \
1738 log_fail "Arguments invalid or missing"
1739
1740 if is_linux; then
1741 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
1742 dm_name="$(readlink $DEV_DSKDIR/$disk \
1743 | nawk -F / '{print $2}')"
1744 slave="$(ls /sys/block/${dm_name}/slaves \
1745 | nawk '{print $1}')"
1746 while [[ -n $slave ]]; do
1747 #check if disk is online
1748 lsscsi | egrep $slave > /dev/null
1749 if (($? == 0)); then
1750 slave_dir="/sys/block/${dm_name}"
1751 slave_dir+="/slaves/${slave}/device"
1752 ss="${slave_dir}/state"
1753 sd="${slave_dir}/delete"
1754 log_must eval "echo 'offline' > ${ss}"
1755 log_must eval "echo '1' > ${sd}"
1756 lsscsi | egrep $slave > /dev/null
1757 if (($? == 0)); then
1758 log_fail "Offlining" \
1759 "$disk failed"
1760 fi
1761 fi
1762 slave="$(ls /sys/block/$dm_name/slaves \
1763 2>/dev/null | nawk '{print $1}')"
1764 done
1765 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1766 #check if disk is online
1767 lsscsi | egrep $disk > /dev/null
1768 if (($? == 0)); then
1769 dev_state="/sys/block/$disk/device/state"
1770 dev_delete="/sys/block/$disk/device/delete"
1771 log_must eval "echo 'offline' > ${dev_state}"
1772 log_must eval "echo '1' > ${dev_delete}"
1773 lsscsi | egrep $disk > /dev/null
1774 if (($? == 0)); then
1775 log_fail "Offlining $disk" \
1776 "failed"
1777 fi
1778 else
1779 log_note "$disk is already offline"
1780 fi
1781 elif [[ $state == "online" ]]; then
1782 #force a full rescan
1783 scan_scsi_hosts $host
1784 block_device_wait
1785 if is_mpath_device $disk; then
1786 dm_name="$(readlink $DEV_DSKDIR/$disk \
1787 | nawk -F / '{print $2}')"
1788 slave="$(ls /sys/block/$dm_name/slaves \
1789 | nawk '{print $1}')"
1790 lsscsi | egrep $slave > /dev/null
1791 if (($? != 0)); then
1792 log_fail "Onlining $disk failed"
1793 fi
1794 elif is_real_device $disk; then
1795 lsscsi | egrep $disk > /dev/null
1796 if (($? != 0)); then
1797 log_fail "Onlining $disk failed"
1798 fi
1799 else
1800 log_fail "$disk is not a real dev"
1801 fi
1802 else
1803 log_fail "$disk failed to $state"
1804 fi
1805 fi
1806 }
1807
1808 #
1809 # Get the mountpoint of snapshot
1810 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1811 # as its mountpoint
1812 #
1813 function snapshot_mountpoint
1814 {
1815 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1816
1817 if [[ $dataset != *@* ]]; then
1818 log_fail "Error name of snapshot '$dataset'."
1819 fi
1820
1821 typeset fs=${dataset%@*}
1822 typeset snap=${dataset#*@}
1823
1824 if [[ -z $fs || -z $snap ]]; then
1825 log_fail "Error name of snapshot '$dataset'."
1826 fi
1827
1828 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1829 }
1830
1831 #
1832 # Given a device and 'ashift' value verify it's correctly set on every label
1833 #
1834 function verify_ashift # device ashift
1835 {
1836 typeset device="$1"
1837 typeset ashift="$2"
1838
1839 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1840 if (ashift != $2)
1841 exit 1;
1842 else
1843 count++;
1844 } END {
1845 if (count != 4)
1846 exit 1;
1847 else
1848 exit 0;
1849 }'
1850
1851 return $?
1852 }
1853
1854 #
1855 # Given a pool and file system, this function will verify the file system
1856 # using the zdb internal tool. Note that the pool is exported and imported
1857 # to ensure it has consistent state.
1858 #
1859 function verify_filesys # pool filesystem dir
1860 {
1861 typeset pool="$1"
1862 typeset filesys="$2"
1863 typeset zdbout="/tmp/zdbout.$$"
1864
1865 shift
1866 shift
1867 typeset dirs=$@
1868 typeset search_path=""
1869
1870 log_note "Calling zdb to verify filesystem '$filesys'"
1871 zfs unmount -a > /dev/null 2>&1
1872 log_must zpool export $pool
1873
1874 if [[ -n $dirs ]] ; then
1875 for dir in $dirs ; do
1876 search_path="$search_path -d $dir"
1877 done
1878 fi
1879
1880 log_must zpool import $search_path $pool
1881
1882 zdb -cudi $filesys > $zdbout 2>&1
1883 if [[ $? != 0 ]]; then
1884 log_note "Output: zdb -cudi $filesys"
1885 cat $zdbout
1886 log_fail "zdb detected errors with: '$filesys'"
1887 fi
1888
1889 log_must zfs mount -a
1890 log_must rm -rf $zdbout
1891 }
1892
1893 #
1894 # Given a pool, and this function list all disks in the pool
1895 #
1896 function get_disklist # pool
1897 {
1898 typeset disklist=""
1899
1900 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1901 grep -v "\-\-\-\-\-" | \
1902 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1903
1904 echo $disklist
1905 }
1906
1907 #
1908 # Given a pool, and this function list all disks in the pool with their full
1909 # path (like "/dev/sda" instead of "sda").
1910 #
1911 function get_disklist_fullpath # pool
1912 {
1913 args="-P $1"
1914 get_disklist $args
1915 }
1916
1917
1918
1919 # /**
1920 # This function kills a given list of processes after a time period. We use
1921 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1922 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1923 # would be listed as FAIL, which we don't want : we're happy with stress tests
1924 # running for a certain amount of time, then finishing.
1925 #
1926 # @param $1 the time in seconds after which we should terminate these processes
1927 # @param $2..$n the processes we wish to terminate.
1928 # */
1929 function stress_timeout
1930 {
1931 typeset -i TIMEOUT=$1
1932 shift
1933 typeset cpids="$@"
1934
1935 log_note "Waiting for child processes($cpids). " \
1936 "It could last dozens of minutes, please be patient ..."
1937 log_must sleep $TIMEOUT
1938
1939 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1940 typeset pid
1941 for pid in $cpids; do
1942 ps -p $pid > /dev/null 2>&1
1943 if (($? == 0)); then
1944 log_must kill -USR1 $pid
1945 fi
1946 done
1947 }
1948
1949 #
1950 # Verify a given hotspare disk is inuse or avail
1951 #
1952 # Return 0 is pool/disk matches expected state, 1 otherwise
1953 #
1954 function check_hotspare_state # pool disk state{inuse,avail}
1955 {
1956 typeset pool=$1
1957 typeset disk=${2#$DEV_DSKDIR/}
1958 typeset state=$3
1959
1960 cur_state=$(get_device_state $pool $disk "spares")
1961
1962 if [[ $state != ${cur_state} ]]; then
1963 return 1
1964 fi
1965 return 0
1966 }
1967
1968 #
1969 # Verify a given slog disk is inuse or avail
1970 #
1971 # Return 0 is pool/disk matches expected state, 1 otherwise
1972 #
1973 function check_slog_state # pool disk state{online,offline,unavail}
1974 {
1975 typeset pool=$1
1976 typeset disk=${2#$DEV_DSKDIR/}
1977 typeset state=$3
1978
1979 cur_state=$(get_device_state $pool $disk "logs")
1980
1981 if [[ $state != ${cur_state} ]]; then
1982 return 1
1983 fi
1984 return 0
1985 }
1986
1987 #
1988 # Verify a given vdev disk is inuse or avail
1989 #
1990 # Return 0 is pool/disk matches expected state, 1 otherwise
1991 #
1992 function check_vdev_state # pool disk state{online,offline,unavail}
1993 {
1994 typeset pool=$1
1995 typeset disk=${2#$/DEV_DSKDIR/}
1996 typeset state=$3
1997
1998 cur_state=$(get_device_state $pool $disk)
1999
2000 if [[ $state != ${cur_state} ]]; then
2001 return 1
2002 fi
2003 return 0
2004 }
2005
2006 #
2007 # Check the output of 'zpool status -v <pool>',
2008 # and to see if the content of <token> contain the <keyword> specified.
2009 #
2010 # Return 0 is contain, 1 otherwise
2011 #
2012 function check_pool_status # pool token keyword <verbose>
2013 {
2014 typeset pool=$1
2015 typeset token=$2
2016 typeset keyword=$3
2017 typeset verbose=${4:-false}
2018
2019 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2020 ($1==token) {print $0}')
2021 if [[ $verbose == true ]]; then
2022 log_note $scan
2023 fi
2024 echo $scan | grep -i "$keyword" > /dev/null 2>&1
2025
2026 return $?
2027 }
2028
2029 #
2030 # These 6 following functions are instance of check_pool_status()
2031 # is_pool_resilvering - to check if the pool is resilver in progress
2032 # is_pool_resilvered - to check if the pool is resilver completed
2033 # is_pool_scrubbing - to check if the pool is scrub in progress
2034 # is_pool_scrubbed - to check if the pool is scrub completed
2035 # is_pool_scrub_stopped - to check if the pool is scrub stopped
2036 # is_pool_scrub_paused - to check if the pool has scrub paused
2037 #
2038 function is_pool_resilvering #pool <verbose>
2039 {
2040 check_pool_status "$1" "scan" "resilver in progress since " $2
2041 return $?
2042 }
2043
2044 function is_pool_resilvered #pool <verbose>
2045 {
2046 check_pool_status "$1" "scan" "resilvered " $2
2047 return $?
2048 }
2049
2050 function is_pool_scrubbing #pool <verbose>
2051 {
2052 check_pool_status "$1" "scan" "scrub in progress since " $2
2053 return $?
2054 }
2055
2056 function is_pool_scrubbed #pool <verbose>
2057 {
2058 check_pool_status "$1" "scan" "scrub repaired" $2
2059 return $?
2060 }
2061
2062 function is_pool_scrub_stopped #pool <verbose>
2063 {
2064 check_pool_status "$1" "scan" "scrub canceled" $2
2065 return $?
2066 }
2067
2068 function is_pool_scrub_paused #pool <verbose>
2069 {
2070 check_pool_status "$1" "scan" "scrub paused since " $2
2071 return $?
2072 }
2073
2074 #
2075 # Use create_pool()/destroy_pool() to clean up the information in
2076 # in the given disk to avoid slice overlapping.
2077 #
2078 function cleanup_devices #vdevs
2079 {
2080 typeset pool="foopool$$"
2081
2082 if poolexists $pool ; then
2083 destroy_pool $pool
2084 fi
2085
2086 create_pool $pool $@
2087 destroy_pool $pool
2088
2089 return 0
2090 }
2091
2092 #/**
2093 # A function to find and locate free disks on a system or from given
2094 # disks as the parameter. It works by locating disks that are in use
2095 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2096 #
2097 # $@ given disks to find which are free, default is all disks in
2098 # the test system
2099 #
2100 # @return a string containing the list of available disks
2101 #*/
2102 function find_disks
2103 {
2104 # Trust provided list, no attempt is made to locate unused devices.
2105 if is_linux; then
2106 echo "$@"
2107 return
2108 fi
2109
2110
2111 sfi=/tmp/swaplist.$$
2112 dmpi=/tmp/dumpdev.$$
2113 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2114
2115 swap -l > $sfi
2116 dumpadm > $dmpi 2>/dev/null
2117
2118 # write an awk script that can process the output of format
2119 # to produce a list of disks we know about. Note that we have
2120 # to escape "$2" so that the shell doesn't interpret it while
2121 # we're creating the awk script.
2122 # -------------------
2123 cat > /tmp/find_disks.awk <<EOF
2124 #!/bin/nawk -f
2125 BEGIN { FS="."; }
2126
2127 /^Specify disk/{
2128 searchdisks=0;
2129 }
2130
2131 {
2132 if (searchdisks && \$2 !~ "^$"){
2133 split(\$2,arr," ");
2134 print arr[1];
2135 }
2136 }
2137
2138 /^AVAILABLE DISK SELECTIONS:/{
2139 searchdisks=1;
2140 }
2141 EOF
2142 #---------------------
2143
2144 chmod 755 /tmp/find_disks.awk
2145 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2146 rm /tmp/find_disks.awk
2147
2148 unused=""
2149 for disk in $disks; do
2150 # Check for mounted
2151 grep "${disk}[sp]" /etc/mnttab >/dev/null
2152 (($? == 0)) && continue
2153 # Check for swap
2154 grep "${disk}[sp]" $sfi >/dev/null
2155 (($? == 0)) && continue
2156 # check for dump device
2157 grep "${disk}[sp]" $dmpi >/dev/null
2158 (($? == 0)) && continue
2159 # check to see if this disk hasn't been explicitly excluded
2160 # by a user-set environment variable
2161 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2162 (($? == 0)) && continue
2163 unused_candidates="$unused_candidates $disk"
2164 done
2165 rm $sfi
2166 rm $dmpi
2167
2168 # now just check to see if those disks do actually exist
2169 # by looking for a device pointing to the first slice in
2170 # each case. limit the number to max_finddisksnum
2171 count=0
2172 for disk in $unused_candidates; do
2173 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2174 if [ $count -lt $max_finddisksnum ]; then
2175 unused="$unused $disk"
2176 # do not impose limit if $@ is provided
2177 [[ -z $@ ]] && ((count = count + 1))
2178 fi
2179 fi
2180 done
2181
2182 # finally, return our disk list
2183 echo $unused
2184 }
2185
2186 #
2187 # Add specified user to specified group
2188 #
2189 # $1 group name
2190 # $2 user name
2191 # $3 base of the homedir (optional)
2192 #
2193 function add_user #<group_name> <user_name> <basedir>
2194 {
2195 typeset gname=$1
2196 typeset uname=$2
2197 typeset basedir=${3:-"/var/tmp"}
2198
2199 if ((${#gname} == 0 || ${#uname} == 0)); then
2200 log_fail "group name or user name are not defined."
2201 fi
2202
2203 log_must useradd -g $gname -d $basedir/$uname -m $uname
2204 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2205 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2206 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
2207
2208 # Add new users to the same group and the command line utils.
2209 # This allows them to be run out of the original users home
2210 # directory as long as it permissioned to be group readable.
2211 if is_linux; then
2212 cmd_group=$(stat --format="%G" $(which zfs))
2213 log_must usermod -a -G $cmd_group $uname
2214 fi
2215
2216 return 0
2217 }
2218
2219 #
2220 # Delete the specified user.
2221 #
2222 # $1 login name
2223 # $2 base of the homedir (optional)
2224 #
2225 function del_user #<logname> <basedir>
2226 {
2227 typeset user=$1
2228 typeset basedir=${2:-"/var/tmp"}
2229
2230 if ((${#user} == 0)); then
2231 log_fail "login name is necessary."
2232 fi
2233
2234 if id $user > /dev/null 2>&1; then
2235 log_must userdel $user
2236 fi
2237
2238 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2239
2240 return 0
2241 }
2242
2243 #
2244 # Select valid gid and create specified group.
2245 #
2246 # $1 group name
2247 #
2248 function add_group #<group_name>
2249 {
2250 typeset group=$1
2251
2252 if ((${#group} == 0)); then
2253 log_fail "group name is necessary."
2254 fi
2255
2256 # Assign 100 as the base gid, a larger value is selected for
2257 # Linux because for many distributions 1000 and under are reserved.
2258 if is_linux; then
2259 while true; do
2260 groupadd $group > /dev/null 2>&1
2261 typeset -i ret=$?
2262 case $ret in
2263 0) return 0 ;;
2264 *) return 1 ;;
2265 esac
2266 done
2267 else
2268 typeset -i gid=100
2269 while true; do
2270 groupadd -g $gid $group > /dev/null 2>&1
2271 typeset -i ret=$?
2272 case $ret in
2273 0) return 0 ;;
2274 # The gid is not unique
2275 4) ((gid += 1)) ;;
2276 *) return 1 ;;
2277 esac
2278 done
2279 fi
2280 }
2281
2282 #
2283 # Delete the specified group.
2284 #
2285 # $1 group name
2286 #
2287 function del_group #<group_name>
2288 {
2289 typeset grp=$1
2290 if ((${#grp} == 0)); then
2291 log_fail "group name is necessary."
2292 fi
2293
2294 if is_linux; then
2295 getent group $grp > /dev/null 2>&1
2296 typeset -i ret=$?
2297 case $ret in
2298 # Group does not exist.
2299 2) return 0 ;;
2300 # Name already exists as a group name
2301 0) log_must groupdel $grp ;;
2302 *) return 1 ;;
2303 esac
2304 else
2305 groupmod -n $grp $grp > /dev/null 2>&1
2306 typeset -i ret=$?
2307 case $ret in
2308 # Group does not exist.
2309 6) return 0 ;;
2310 # Name already exists as a group name
2311 9) log_must groupdel $grp ;;
2312 *) return 1 ;;
2313 esac
2314 fi
2315
2316 return 0
2317 }
2318
2319 #
2320 # This function will return true if it's safe to destroy the pool passed
2321 # as argument 1. It checks for pools based on zvols and files, and also
2322 # files contained in a pool that may have a different mountpoint.
2323 #
2324 function safe_to_destroy_pool { # $1 the pool name
2325
2326 typeset pool=""
2327 typeset DONT_DESTROY=""
2328
2329 # We check that by deleting the $1 pool, we're not
2330 # going to pull the rug out from other pools. Do this
2331 # by looking at all other pools, ensuring that they
2332 # aren't built from files or zvols contained in this pool.
2333
2334 for pool in $(zpool list -H -o name)
2335 do
2336 ALTMOUNTPOOL=""
2337
2338 # this is a list of the top-level directories in each of the
2339 # files that make up the path to the files the pool is based on
2340 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2341 awk '{print $1}')
2342
2343 # this is a list of the zvols that make up the pool
2344 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2345 | awk '{print $1}')
2346
2347 # also want to determine if it's a file-based pool using an
2348 # alternate mountpoint...
2349 POOL_FILE_DIRS=$(zpool status -v $pool | \
2350 grep / | awk '{print $1}' | \
2351 awk -F/ '{print $2}' | grep -v "dev")
2352
2353 for pooldir in $POOL_FILE_DIRS
2354 do
2355 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2356 grep "${pooldir}$" | awk '{print $1}')
2357
2358 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2359 done
2360
2361
2362 if [ ! -z "$ZVOLPOOL" ]
2363 then
2364 DONT_DESTROY="true"
2365 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2366 fi
2367
2368 if [ ! -z "$FILEPOOL" ]
2369 then
2370 DONT_DESTROY="true"
2371 log_note "Pool $pool is built from $FILEPOOL on $1"
2372 fi
2373
2374 if [ ! -z "$ALTMOUNTPOOL" ]
2375 then
2376 DONT_DESTROY="true"
2377 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2378 fi
2379 done
2380
2381 if [ -z "${DONT_DESTROY}" ]
2382 then
2383 return 0
2384 else
2385 log_note "Warning: it is not safe to destroy $1!"
2386 return 1
2387 fi
2388 }
2389
2390 #
2391 # Get the available ZFS compression options
2392 # $1 option type zfs_set|zfs_compress
2393 #
2394 function get_compress_opts
2395 {
2396 typeset COMPRESS_OPTS
2397 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2398 gzip-6 gzip-7 gzip-8 gzip-9"
2399
2400 if [[ $1 == "zfs_compress" ]] ; then
2401 COMPRESS_OPTS="on lzjb"
2402 elif [[ $1 == "zfs_set" ]] ; then
2403 COMPRESS_OPTS="on off lzjb"
2404 fi
2405 typeset valid_opts="$COMPRESS_OPTS"
2406 zfs get 2>&1 | grep gzip >/dev/null 2>&1
2407 if [[ $? -eq 0 ]]; then
2408 valid_opts="$valid_opts $GZIP_OPTS"
2409 fi
2410 echo "$valid_opts"
2411 }
2412
2413 #
2414 # Verify zfs operation with -p option work as expected
2415 # $1 operation, value could be create, clone or rename
2416 # $2 dataset type, value could be fs or vol
2417 # $3 dataset name
2418 # $4 new dataset name
2419 #
2420 function verify_opt_p_ops
2421 {
2422 typeset ops=$1
2423 typeset datatype=$2
2424 typeset dataset=$3
2425 typeset newdataset=$4
2426
2427 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2428 log_fail "$datatype is not supported."
2429 fi
2430
2431 # check parameters accordingly
2432 case $ops in
2433 create)
2434 newdataset=$dataset
2435 dataset=""
2436 if [[ $datatype == "vol" ]]; then
2437 ops="create -V $VOLSIZE"
2438 fi
2439 ;;
2440 clone)
2441 if [[ -z $newdataset ]]; then
2442 log_fail "newdataset should not be empty" \
2443 "when ops is $ops."
2444 fi
2445 log_must datasetexists $dataset
2446 log_must snapexists $dataset
2447 ;;
2448 rename)
2449 if [[ -z $newdataset ]]; then
2450 log_fail "newdataset should not be empty" \
2451 "when ops is $ops."
2452 fi
2453 log_must datasetexists $dataset
2454 log_mustnot snapexists $dataset
2455 ;;
2456 *)
2457 log_fail "$ops is not supported."
2458 ;;
2459 esac
2460
2461 # make sure the upper level filesystem does not exist
2462 if datasetexists ${newdataset%/*} ; then
2463 log_must zfs destroy -rRf ${newdataset%/*}
2464 fi
2465
2466 # without -p option, operation will fail
2467 log_mustnot zfs $ops $dataset $newdataset
2468 log_mustnot datasetexists $newdataset ${newdataset%/*}
2469
2470 # with -p option, operation should succeed
2471 log_must zfs $ops -p $dataset $newdataset
2472 block_device_wait
2473
2474 if ! datasetexists $newdataset ; then
2475 log_fail "-p option does not work for $ops"
2476 fi
2477
2478 # when $ops is create or clone, redo the operation still return zero
2479 if [[ $ops != "rename" ]]; then
2480 log_must zfs $ops -p $dataset $newdataset
2481 fi
2482
2483 return 0
2484 }
2485
2486 #
2487 # Get configuration of pool
2488 # $1 pool name
2489 # $2 config name
2490 #
2491 function get_config
2492 {
2493 typeset pool=$1
2494 typeset config=$2
2495 typeset alt_root
2496
2497 if ! poolexists "$pool" ; then
2498 return 1
2499 fi
2500 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2501 if [[ $alt_root == "-" ]]; then
2502 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2503 '{print $2}')
2504 else
2505 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2506 '{print $2}')
2507 fi
2508 if [[ -n $value ]] ; then
2509 value=${value#'}
2510 value=${value%'}
2511 fi
2512 echo $value
2513
2514 return 0
2515 }
2516
2517 #
2518 # Privated function. Random select one of items from arguments.
2519 #
2520 # $1 count
2521 # $2-n string
2522 #
2523 function _random_get
2524 {
2525 typeset cnt=$1
2526 shift
2527
2528 typeset str="$@"
2529 typeset -i ind
2530 ((ind = RANDOM % cnt + 1))
2531
2532 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2533 echo $ret
2534 }
2535
2536 #
2537 # Random select one of item from arguments which include NONE string
2538 #
2539 function random_get_with_non
2540 {
2541 typeset -i cnt=$#
2542 ((cnt =+ 1))
2543
2544 _random_get "$cnt" "$@"
2545 }
2546
2547 #
2548 # Random select one of item from arguments which doesn't include NONE string
2549 #
2550 function random_get
2551 {
2552 _random_get "$#" "$@"
2553 }
2554
2555 #
2556 # Detect if the current system support slog
2557 #
2558 function verify_slog_support
2559 {
2560 typeset dir=/tmp/disk.$$
2561 typeset pool=foo.$$
2562 typeset vdev=$dir/a
2563 typeset sdev=$dir/b
2564
2565 mkdir -p $dir
2566 mkfile $MINVDEVSIZE $vdev $sdev
2567
2568 typeset -i ret=0
2569 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2570 ret=1
2571 fi
2572 rm -r $dir
2573
2574 return $ret
2575 }
2576
2577 #
2578 # The function will generate a dataset name with specific length
2579 # $1, the length of the name
2580 # $2, the base string to construct the name
2581 #
2582 function gen_dataset_name
2583 {
2584 typeset -i len=$1
2585 typeset basestr="$2"
2586 typeset -i baselen=${#basestr}
2587 typeset -i iter=0
2588 typeset l_name=""
2589
2590 if ((len % baselen == 0)); then
2591 ((iter = len / baselen))
2592 else
2593 ((iter = len / baselen + 1))
2594 fi
2595 while ((iter > 0)); do
2596 l_name="${l_name}$basestr"
2597
2598 ((iter -= 1))
2599 done
2600
2601 echo $l_name
2602 }
2603
2604 #
2605 # Get cksum tuple of dataset
2606 # $1 dataset name
2607 #
2608 # sample zdb output:
2609 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2610 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2611 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2612 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2613 function datasetcksum
2614 {
2615 typeset cksum
2616 sync
2617 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2618 | awk -F= '{print $7}')
2619 echo $cksum
2620 }
2621
2622 #
2623 # Get cksum of file
2624 # #1 file path
2625 #
2626 function checksum
2627 {
2628 typeset cksum
2629 cksum=$(cksum $1 | awk '{print $1}')
2630 echo $cksum
2631 }
2632
2633 #
2634 # Get the given disk/slice state from the specific field of the pool
2635 #
2636 function get_device_state #pool disk field("", "spares","logs")
2637 {
2638 typeset pool=$1
2639 typeset disk=${2#$DEV_DSKDIR/}
2640 typeset field=${3:-$pool}
2641
2642 state=$(zpool status -v "$pool" 2>/dev/null | \
2643 nawk -v device=$disk -v pool=$pool -v field=$field \
2644 'BEGIN {startconfig=0; startfield=0; }
2645 /config:/ {startconfig=1}
2646 (startconfig==1) && ($1==field) {startfield=1; next;}
2647 (startfield==1) && ($1==device) {print $2; exit;}
2648 (startfield==1) &&
2649 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2650 echo $state
2651 }
2652
2653
2654 #
2655 # print the given directory filesystem type
2656 #
2657 # $1 directory name
2658 #
2659 function get_fstype
2660 {
2661 typeset dir=$1
2662
2663 if [[ -z $dir ]]; then
2664 log_fail "Usage: get_fstype <directory>"
2665 fi
2666
2667 #
2668 # $ df -n /
2669 # / : ufs
2670 #
2671 df -n $dir | awk '{print $3}'
2672 }
2673
2674 #
2675 # Given a disk, label it to VTOC regardless what label was on the disk
2676 # $1 disk
2677 #
2678 function labelvtoc
2679 {
2680 typeset disk=$1
2681 if [[ -z $disk ]]; then
2682 log_fail "The disk name is unspecified."
2683 fi
2684 typeset label_file=/var/tmp/labelvtoc.$$
2685 typeset arch=$(uname -p)
2686
2687 if is_linux; then
2688 log_note "Currently unsupported by the test framework"
2689 return 1
2690 fi
2691
2692 if [[ $arch == "i386" ]]; then
2693 echo "label" > $label_file
2694 echo "0" >> $label_file
2695 echo "" >> $label_file
2696 echo "q" >> $label_file
2697 echo "q" >> $label_file
2698
2699 fdisk -B $disk >/dev/null 2>&1
2700 # wait a while for fdisk finishes
2701 sleep 60
2702 elif [[ $arch == "sparc" ]]; then
2703 echo "label" > $label_file
2704 echo "0" >> $label_file
2705 echo "" >> $label_file
2706 echo "" >> $label_file
2707 echo "" >> $label_file
2708 echo "q" >> $label_file
2709 else
2710 log_fail "unknown arch type"
2711 fi
2712
2713 format -e -s -d $disk -f $label_file
2714 typeset -i ret_val=$?
2715 rm -f $label_file
2716 #
2717 # wait the format to finish
2718 #
2719 sleep 60
2720 if ((ret_val != 0)); then
2721 log_fail "unable to label $disk as VTOC."
2722 fi
2723
2724 return 0
2725 }
2726
2727 #
2728 # check if the system was installed as zfsroot or not
2729 # return: 0 ture, otherwise false
2730 #
2731 function is_zfsroot
2732 {
2733 df -n / | grep zfs > /dev/null 2>&1
2734 return $?
2735 }
2736
2737 #
2738 # get the root filesystem name if it's zfsroot system.
2739 #
2740 # return: root filesystem name
2741 function get_rootfs
2742 {
2743 typeset rootfs=""
2744
2745 if ! is_linux; then
2746 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2747 /etc/mnttab)
2748 fi
2749 if [[ -z "$rootfs" ]]; then
2750 log_fail "Can not get rootfs"
2751 fi
2752 zfs list $rootfs > /dev/null 2>&1
2753 if (($? == 0)); then
2754 echo $rootfs
2755 else
2756 log_fail "This is not a zfsroot system."
2757 fi
2758 }
2759
2760 #
2761 # get the rootfs's pool name
2762 # return:
2763 # rootpool name
2764 #
2765 function get_rootpool
2766 {
2767 typeset rootfs=""
2768 typeset rootpool=""
2769
2770 if ! is_linux; then
2771 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2772 /etc/mnttab)
2773 fi
2774 if [[ -z "$rootfs" ]]; then
2775 log_fail "Can not get rootpool"
2776 fi
2777 zfs list $rootfs > /dev/null 2>&1
2778 if (($? == 0)); then
2779 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2780 echo $rootpool
2781 else
2782 log_fail "This is not a zfsroot system."
2783 fi
2784 }
2785
2786 #
2787 # Check if the given device is physical device
2788 #
2789 function is_physical_device #device
2790 {
2791 typeset device=${1#$DEV_DSKDIR}
2792 device=${device#$DEV_RDSKDIR}
2793
2794 if is_linux; then
2795 [[ -b "$DEV_DSKDIR/$device" ]] && \
2796 [[ -f /sys/module/loop/parameters/max_part ]]
2797 return $?
2798 else
2799 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2800 return $?
2801 fi
2802 }
2803
2804 #
2805 # Check if the given device is a real device (ie SCSI device)
2806 #
2807 function is_real_device #disk
2808 {
2809 typeset disk=$1
2810 [[ -z $disk ]] && log_fail "No argument for disk given."
2811
2812 if is_linux; then
2813 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2814 egrep disk >/dev/null
2815 return $?
2816 fi
2817 }
2818
2819 #
2820 # Check if the given device is a loop device
2821 #
2822 function is_loop_device #disk
2823 {
2824 typeset disk=$1
2825 [[ -z $disk ]] && log_fail "No argument for disk given."
2826
2827 if is_linux; then
2828 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2829 egrep loop >/dev/null
2830 return $?
2831 fi
2832 }
2833
2834 #
2835 # Check if the given device is a multipath device and if there is a sybolic
2836 # link to a device mapper and to a disk
2837 # Currently no support for dm devices alone without multipath
2838 #
2839 function is_mpath_device #disk
2840 {
2841 typeset disk=$1
2842 [[ -z $disk ]] && log_fail "No argument for disk given."
2843
2844 if is_linux; then
2845 lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
2846 egrep mpath >/dev/null
2847 if (($? == 0)); then
2848 readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
2849 return $?
2850 else
2851 return $?
2852 fi
2853 fi
2854 }
2855
2856 # Set the slice prefix for disk partitioning depending
2857 # on whether the device is a real, multipath, or loop device.
2858 # Currently all disks have to be of the same type, so only
2859 # checks first disk to determine slice prefix.
2860 #
2861 function set_slice_prefix
2862 {
2863 typeset disk
2864 typeset -i i=0
2865
2866 if is_linux; then
2867 while (( i < $DISK_ARRAY_NUM )); do
2868 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2869 if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
2870 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
2871 export SLICE_PREFIX=""
2872 return 0
2873 elif ( is_mpath_device $disk || is_loop_device \
2874 $disk ); then
2875 export SLICE_PREFIX="p"
2876 return 0
2877 else
2878 log_fail "$disk not supported for partitioning."
2879 fi
2880 (( i = i + 1))
2881 done
2882 fi
2883 }
2884
2885 #
2886 # Set the directory path of the listed devices in $DISK_ARRAY_NUM
2887 # Currently all disks have to be of the same type, so only
2888 # checks first disk to determine device directory
2889 # default = /dev (linux)
2890 # real disk = /dev (linux)
2891 # multipath device = /dev/mapper (linux)
2892 #
2893 function set_device_dir
2894 {
2895 typeset disk
2896 typeset -i i=0
2897
2898 if is_linux; then
2899 while (( i < $DISK_ARRAY_NUM )); do
2900 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2901 if is_mpath_device $disk; then
2902 export DEV_DSKDIR=$DEV_MPATHDIR
2903 return 0
2904 else
2905 export DEV_DSKDIR=$DEV_RDSKDIR
2906 return 0
2907 fi
2908 (( i = i + 1))
2909 done
2910 else
2911 export DEV_DSKDIR=$DEV_RDSKDIR
2912 fi
2913 }
2914
2915 #
2916 # Get the directory path of given device
2917 #
2918 function get_device_dir #device
2919 {
2920 typeset device=$1
2921
2922 if ! $(is_physical_device $device) ; then
2923 if [[ $device != "/" ]]; then
2924 device=${device%/*}
2925 fi
2926 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2927 device="$DEV_DSKDIR"
2928 fi
2929 echo $device
2930 else
2931 echo "$DEV_DSKDIR"
2932 fi
2933 }
2934
2935 #
2936 # Get persistent name for given disk
2937 #
2938 function get_persistent_disk_name #device
2939 {
2940 typeset device=$1
2941 typeset dev_id
2942
2943 if is_linux; then
2944 if is_real_device $device; then
2945 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
2946 | egrep disk/by-id | nawk '{print $2; exit}' \
2947 | nawk -F / '{print $3}')"
2948 echo $dev_id
2949 elif is_mpath_device $device; then
2950 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
2951 | egrep disk/by-id/dm-uuid \
2952 | nawk '{print $2; exit}' \
2953 | nawk -F / '{print $3}')"
2954 echo $dev_id
2955 else
2956 echo $device
2957 fi
2958 else
2959 echo $device
2960 fi
2961 }
2962
2963 #
2964 # Load scsi_debug module with specified parameters
2965 #
2966 function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
2967 {
2968 typeset devsize=$1
2969 typeset hosts=$2
2970 typeset tgts=$3
2971 typeset luns=$4
2972
2973 [[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
2974 [[ -z $luns ]] && log_fail "Arguments invalid or missing"
2975
2976 if is_linux; then
2977 modprobe -n scsi_debug
2978 if (($? != 0)); then
2979 log_unsupported "Platform does not have scsi_debug"
2980 "module"
2981 fi
2982 lsmod | egrep scsi_debug > /dev/null
2983 if (($? == 0)); then
2984 log_fail "scsi_debug module already installed"
2985 else
2986 log_must modprobe scsi_debug dev_size_mb=$devsize \
2987 add_host=$hosts num_tgts=$tgts max_luns=$luns
2988 block_device_wait
2989 lsscsi | egrep scsi_debug > /dev/null
2990 if (($? == 1)); then
2991 log_fail "scsi_debug module install failed"
2992 fi
2993 fi
2994 fi
2995 }
2996
2997 #
2998 # Get the package name
2999 #
3000 function get_package_name
3001 {
3002 typeset dirpath=${1:-$STC_NAME}
3003
3004 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3005 }
3006
3007 #
3008 # Get the word numbers from a string separated by white space
3009 #
3010 function get_word_count
3011 {
3012 echo $1 | wc -w
3013 }
3014
3015 #
3016 # To verify if the require numbers of disks is given
3017 #
3018 function verify_disk_count
3019 {
3020 typeset -i min=${2:-1}
3021
3022 typeset -i count=$(get_word_count "$1")
3023
3024 if ((count < min)); then
3025 log_untested "A minimum of $min disks is required to run." \
3026 " You specified $count disk(s)"
3027 fi
3028 }
3029
3030 function ds_is_volume
3031 {
3032 typeset type=$(get_prop type $1)
3033 [[ $type = "volume" ]] && return 0
3034 return 1
3035 }
3036
3037 function ds_is_filesystem
3038 {
3039 typeset type=$(get_prop type $1)
3040 [[ $type = "filesystem" ]] && return 0
3041 return 1
3042 }
3043
3044 function ds_is_snapshot
3045 {
3046 typeset type=$(get_prop type $1)
3047 [[ $type = "snapshot" ]] && return 0
3048 return 1
3049 }
3050
3051 #
3052 # Check if Trusted Extensions are installed and enabled
3053 #
3054 function is_te_enabled
3055 {
3056 svcs -H -o state labeld 2>/dev/null | grep "enabled"
3057 if (($? != 0)); then
3058 return 1
3059 else
3060 return 0
3061 fi
3062 }
3063
3064 # Utility function to determine if a system has multiple cpus.
3065 function is_mp
3066 {
3067 if is_linux; then
3068 (($(nproc) > 1))
3069 else
3070 (($(psrinfo | wc -l) > 1))
3071 fi
3072
3073 return $?
3074 }
3075
3076 function get_cpu_freq
3077 {
3078 if is_linux; then
3079 lscpu | awk '/CPU MHz/ { print $3 }'
3080 else
3081 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3082 fi
3083 }
3084
3085 # Run the given command as the user provided.
3086 function user_run
3087 {
3088 typeset user=$1
3089 shift
3090
3091 log_note "user:$user $@"
3092 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
3093 return $?
3094 }
3095
3096 #
3097 # Check if the pool contains the specified vdevs
3098 #
3099 # $1 pool
3100 # $2..n <vdev> ...
3101 #
3102 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3103 # vdevs is not in the pool, and 2 if pool name is missing.
3104 #
3105 function vdevs_in_pool
3106 {
3107 typeset pool=$1
3108 typeset vdev
3109
3110 if [[ -z $pool ]]; then
3111 log_note "Missing pool name."
3112 return 2
3113 fi
3114
3115 shift
3116
3117 typeset tmpfile=$(mktemp)
3118 zpool list -Hv "$pool" >$tmpfile
3119 for vdev in $@; do
3120 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3121 [[ $? -ne 0 ]] && return 1
3122 done
3123
3124 rm -f $tmpfile
3125
3126 return 0;
3127 }
3128
3129 function get_max
3130 {
3131 typeset -l i max=$1
3132 shift
3133
3134 for i in "$@"; do
3135 max=$(echo $((max > i ? max : i)))
3136 done
3137
3138 echo $max
3139 }
3140
3141 function get_min
3142 {
3143 typeset -l i min=$1
3144 shift
3145
3146 for i in "$@"; do
3147 min=$(echo $((min < i ? min : i)))
3148 done
3149
3150 echo $min
3151 }
3152
3153 #
3154 # Generate a random number between 1 and the argument.
3155 #
3156 function random
3157 {
3158 typeset max=$1
3159 echo $(( ($RANDOM % $max) + 1 ))
3160 }
3161
3162 # Write data that can be compressed into a directory
3163 function write_compressible
3164 {
3165 typeset dir=$1
3166 typeset megs=$2
3167 typeset nfiles=${3:-1}
3168 typeset bs=${4:-1024k}
3169 typeset fname=${5:-file}
3170
3171 [[ -d $dir ]] || log_fail "No directory: $dir"
3172
3173 # Under Linux fio is not currently used since its behavior can
3174 # differ significantly across versions. This includes missing
3175 # command line options and cases where the --buffer_compress_*
3176 # options fail to behave as expected.
3177 if is_linux; then
3178 typeset file_bytes=$(to_bytes $megs)
3179 typeset bs_bytes=4096
3180 typeset blocks=$(($file_bytes / $bs_bytes))
3181
3182 for (( i = 0; i < $nfiles; i++ )); do
3183 truncate -s $file_bytes $dir/$fname.$i
3184
3185 # Write every third block to get 66% compression.
3186 for (( j = 0; j < $blocks; j += 3 )); do
3187 dd if=/dev/urandom of=$dir/$fname.$i \
3188 seek=$j bs=$bs_bytes count=1 \
3189 conv=notrunc >/dev/null 2>&1
3190 done
3191 done
3192 else
3193 log_must eval "fio \
3194 --name=job \
3195 --fallocate=0 \
3196 --minimal \
3197 --randrepeat=0 \
3198 --buffer_compress_percentage=66 \
3199 --buffer_compress_chunk=4096 \
3200 --directory=$dir \
3201 --numjobs=$nfiles \
3202 --nrfiles=$nfiles \
3203 --rw=write \
3204 --bs=$bs \
3205 --filesize=$megs \
3206 --filename_format='$fname.\$jobnum' >/dev/null"
3207 fi
3208 }
3209
3210 function get_objnum
3211 {
3212 typeset pathname=$1
3213 typeset objnum
3214
3215 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3216 objnum=$(stat -c %i $pathname)
3217 echo $objnum
3218 }
3219
3220 #
3221 # Sync data to the pool
3222 #
3223 # $1 pool name
3224 # $2 boolean to force uberblock (and config including zpool cache file) update
3225 #
3226 function sync_pool #pool <force>
3227 {
3228 typeset pool=${1:-$TESTPOOL}
3229 typeset force=${2:-false}
3230
3231 if [[ $force == true ]]; then
3232 log_must zpool sync -f $pool
3233 else
3234 log_must zpool sync $pool
3235 fi
3236
3237 return 0
3238 }
3239
3240 #
3241 # Wait for zpool 'freeing' property drops to zero.
3242 #
3243 # $1 pool name
3244 #
3245 function wait_freeing #pool
3246 {
3247 typeset pool=${1:-$TESTPOOL}
3248 while true; do
3249 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3250 log_must sleep 1
3251 done
3252 }
3253
3254 #
3255 # Wait for every device replace operation to complete
3256 #
3257 # $1 pool name
3258 #
3259 function wait_replacing #pool
3260 {
3261 typeset pool=${1:-$TESTPOOL}
3262 while true; do
3263 [[ "" == "$(zpool status $pool |
3264 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3265 log_must sleep 1
3266 done
3267 }
3268
3269 #
3270 # Setup custom environment for the ZED.
3271 #
3272 function zed_setup
3273 {
3274 if ! is_linux; then
3275 return
3276 fi
3277
3278 if [[ ! -d $ZEDLET_DIR ]]; then
3279 log_must mkdir $ZEDLET_DIR
3280 fi
3281
3282 if [[ ! -e $VDEVID_CONF ]]; then
3283 log_must touch $VDEVID_CONF
3284 fi
3285
3286 if [[ -e $VDEVID_CONF_ETC ]]; then
3287 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3288 fi
3289
3290 # Create a symlink for /etc/zfs/vdev_id.conf file.
3291 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3292
3293 # Setup minimal ZED configuration. Individual test cases should
3294 # add additional ZEDLETs as needed for their specific test.
3295 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3296 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3297
3298 # Customize the zed.rc file to enable the full debug log.
3299 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3300 echo "ZED_DEBUG_LOG=$ZEDLET_DIR/zed.debug.log" >>$ZEDLET_DIR/zed.rc
3301
3302 log_must cp ${ZEDLET_LIBEXEC_DIR}/all-syslog.sh $ZEDLET_DIR
3303 log_must cp ${ZEDLET_LIBEXEC_DIR}/all-debug.sh $ZEDLET_DIR
3304 log_must touch $ZEDLET_DIR/zed.debug.log
3305 }
3306
3307 #
3308 # Cleanup custom ZED environment.
3309 #
3310 function zed_cleanup
3311 {
3312 if ! is_linux; then
3313 return
3314 fi
3315
3316 log_must rm -f ${ZEDLET_DIR}/zed.rc
3317 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3318 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3319 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3320 log_must rm -f ${ZEDLET_DIR}/zed.pid
3321 log_must rm -f ${ZEDLET_DIR}/zedlog
3322 log_must rm -f ${ZEDLET_DIR}/zed.debug.log
3323 log_must rm -f ${ZEDLET_DIR}/state
3324 log_must rm -f $VDEVID_CONF_ETC
3325 log_must rm -f $VDEVID_CONF
3326 rmdir $ZEDLET_DIR
3327 }
3328
3329 #
3330 # Check if ZED is currently running, if not start ZED.
3331 #
3332 function zed_start
3333 {
3334 if ! is_linux; then
3335 return
3336 fi
3337
3338 # ZEDLET_DIR=/var/tmp/zed
3339 if [[ ! -d $ZEDLET_DIR ]]; then
3340 log_must mkdir $ZEDLET_DIR
3341 fi
3342
3343 # Verify the ZED is not already running.
3344 pgrep -x zed > /dev/null
3345 if (($? == 0)); then
3346 log_fail "ZED already running"
3347 fi
3348
3349 log_note "Starting ZED"
3350 # run ZED in the background and redirect foreground logging
3351 # output to zedlog
3352 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
3353 "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
3354
3355 return 0
3356 }
3357
3358 #
3359 # Kill ZED process
3360 #
3361 function zed_stop
3362 {
3363 if ! is_linux; then
3364 return
3365 fi
3366
3367 log_note "Stopping ZED"
3368 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3369 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3370 log_must kill $zedpid
3371 fi
3372
3373 return 0
3374 }
3375
3376 #
3377 # Check is provided device is being active used as a swap device.
3378 #
3379 function is_swap_inuse
3380 {
3381 typeset device=$1
3382
3383 if [[ -z $device ]] ; then
3384 log_note "No device specified."
3385 return 1
3386 fi
3387
3388 if is_linux; then
3389 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3390 else
3391 swap -l | grep -w $device > /dev/null 2>&1
3392 fi
3393
3394 return $?
3395 }
3396
3397 #
3398 # Setup a swap device using the provided device.
3399 #
3400 function swap_setup
3401 {
3402 typeset swapdev=$1
3403
3404 if is_linux; then
3405 log_must mkswap $swapdev > /dev/null 2>&1
3406 log_must swapon $swapdev
3407 else
3408 log_must swap -a $swapdev
3409 fi
3410
3411 return 0
3412 }
3413
3414 #
3415 # Cleanup a swap device on the provided device.
3416 #
3417 function swap_cleanup
3418 {
3419 typeset swapdev=$1
3420
3421 if is_swap_inuse $swapdev; then
3422 if is_linux; then
3423 log_must swapoff $swapdev
3424 else
3425 log_must swap -d $swapdev
3426 fi
3427 fi
3428
3429 return 0
3430 }