]> git.proxmox.com Git - mirror_zfs.git/blame - tests/zfs-tests/include/libtest.shlib
Ensure that perf regression tests cleanup properly
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
CommitLineData
6bb24f4d
BB
1#!/bin/ksh -p
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25# Use is subject to license terms.
26#
27
28#
29# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
30#
31
32. ${STF_TOOLS}/include/logapi.shlib
33
6bb24f4d
BB
34# Determine if this is a Linux test system
35#
36# Return 0 if platform Linux, 1 if otherwise
37
38function is_linux
39{
40 if [[ $($UNAME -o) == "GNU/Linux" ]]; then
41 return 0
42 else
43 return 1
44 fi
45}
46
e676a196
BB
47# Determine if this is a 32-bit system
48#
49# Return 0 if platform is 32-bit, 1 if otherwise
50
51function is_32bit
52{
53 if [[ $(getconf LONG_BIT) == "32" ]]; then
54 return 0
55 else
56 return 1
57 fi
58}
59
6bb24f4d
BB
60# Determine whether a dataset is mounted
61#
62# $1 dataset name
63# $2 filesystem type; optional - defaulted to zfs
64#
65# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
66
67function ismounted
68{
69 typeset fstype=$2
70 [[ -z $fstype ]] && fstype=zfs
71 typeset out dir name ret
72
73 case $fstype in
74 zfs)
75 if [[ "$1" == "/"* ]] ; then
76 for out in $($ZFS mount | $AWK '{print $2}'); do
77 [[ $1 == $out ]] && return 0
78 done
79 else
80 for out in $($ZFS mount | $AWK '{print $1}'); do
81 [[ $1 == $out ]] && return 0
82 done
83 fi
84 ;;
85 ufs|nfs)
86 out=$($DF -F $fstype $1 2>/dev/null)
87 ret=$?
88 (($ret != 0)) && return $ret
89
90 dir=${out%%\(*}
91 dir=${dir%% *}
92 name=${out##*\(}
93 name=${name%%\)*}
94 name=${name%% *}
95
96 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
97 ;;
98 ext2)
99 out=$($DF -t $fstype $1 2>/dev/null)
100 return $?
101 ;;
102 zvol)
103 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
104 link=$(readlink -f $ZVOL_DEVDIR/$1)
105 [[ -n "$link" ]] && \
106 $MOUNT | $GREP -q "^$link" && \
107 return 0
108 fi
109 ;;
110 esac
111
112 return 1
113}
114
115# Return 0 if a dataset is mounted; 1 otherwise
116#
117# $1 dataset name
118# $2 filesystem type; optional - defaulted to zfs
119
120function mounted
121{
122 ismounted $1 $2
123 (($? == 0)) && return 0
124 return 1
125}
126
127# Return 0 if a dataset is unmounted; 1 otherwise
128#
129# $1 dataset name
130# $2 filesystem type; optional - defaulted to zfs
131
132function unmounted
133{
134 ismounted $1 $2
135 (($? == 1)) && return 0
136 return 1
137}
138
139# split line on ","
140#
141# $1 - line to split
142
143function splitline
144{
145 $ECHO $1 | $SED "s/,/ /g"
146}
147
148function default_setup
149{
150 default_setup_noexit "$@"
151
152 log_pass
153}
154
155#
156# Given a list of disks, setup storage pools and datasets.
157#
158function default_setup_noexit
159{
160 typeset disklist=$1
161 typeset container=$2
162 typeset volume=$3
3c67d83a 163 log_note begin default_setup_noexit
6bb24f4d
BB
164
165 if is_global_zone; then
166 if poolexists $TESTPOOL ; then
167 destroy_pool $TESTPOOL
168 fi
169 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
3c67d83a 170 log_note creating pool $TESTPOOL $disklist
6bb24f4d
BB
171 log_must $ZPOOL create -f $TESTPOOL $disklist
172 else
173 reexport_pool
174 fi
175
176 $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
177 $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
178
179 log_must $ZFS create $TESTPOOL/$TESTFS
180 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
181
182 if [[ -n $container ]]; then
183 $RM -rf $TESTDIR1 || \
184 log_unresolved Could not remove $TESTDIR1
185 $MKDIR -p $TESTDIR1 || \
186 log_unresolved Could not create $TESTDIR1
187
188 log_must $ZFS create $TESTPOOL/$TESTCTR
189 log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
190 log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
191 log_must $ZFS set mountpoint=$TESTDIR1 \
192 $TESTPOOL/$TESTCTR/$TESTFS1
193 fi
194
195 if [[ -n $volume ]]; then
196 if is_global_zone ; then
197 log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
198 block_device_wait
199 else
200 log_must $ZFS create $TESTPOOL/$TESTVOL
201 fi
202 fi
203}
204
205#
206# Given a list of disks, setup a storage pool, file system and
207# a container.
208#
209function default_container_setup
210{
211 typeset disklist=$1
212
213 default_setup "$disklist" "true"
214}
215
216#
217# Given a list of disks, setup a storage pool,file system
218# and a volume.
219#
220function default_volume_setup
221{
222 typeset disklist=$1
223
224 default_setup "$disklist" "" "true"
225}
226
227#
228# Given a list of disks, setup a storage pool,file system,
229# a container and a volume.
230#
231function default_container_volume_setup
232{
233 typeset disklist=$1
234
235 default_setup "$disklist" "true" "true"
236}
237
238#
239# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
240# filesystem
241#
242# $1 Existing filesystem or volume name. Default, $TESTFS
243# $2 snapshot name. Default, $TESTSNAP
244#
245function create_snapshot
246{
247 typeset fs_vol=${1:-$TESTFS}
248 typeset snap=${2:-$TESTSNAP}
249
250 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
251 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
252
253 if snapexists $fs_vol@$snap; then
254 log_fail "$fs_vol@$snap already exists."
255 fi
256 datasetexists $fs_vol || \
257 log_fail "$fs_vol must exist."
258
259 log_must $ZFS snapshot $fs_vol@$snap
260}
261
262#
263# Create a clone from a snapshot, default clone name is $TESTCLONE.
264#
265# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
266# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
267#
268function create_clone # snapshot clone
269{
270 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
271 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
272
273 [[ -z $snap ]] && \
274 log_fail "Snapshot name is undefined."
275 [[ -z $clone ]] && \
276 log_fail "Clone name is undefined."
277
278 log_must $ZFS clone $snap $clone
279}
280
281function default_mirror_setup
282{
283 default_mirror_setup_noexit $1 $2 $3
284
285 log_pass
286}
287
288#
289# Given a pair of disks, set up a storage pool and dataset for the mirror
290# @parameters: $1 the primary side of the mirror
291# $2 the secondary side of the mirror
292# @uses: ZPOOL ZFS TESTPOOL TESTFS
293function default_mirror_setup_noexit
294{
295 readonly func="default_mirror_setup_noexit"
296 typeset primary=$1
297 typeset secondary=$2
298
299 [[ -z $primary ]] && \
300 log_fail "$func: No parameters passed"
301 [[ -z $secondary ]] && \
302 log_fail "$func: No secondary partition passed"
303 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
304 log_must $ZPOOL create -f $TESTPOOL mirror $@
305 log_must $ZFS create $TESTPOOL/$TESTFS
306 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
307}
308
309#
310# create a number of mirrors.
311# We create a number($1) of 2 way mirrors using the pairs of disks named
312# on the command line. These mirrors are *not* mounted
313# @parameters: $1 the number of mirrors to create
314# $... the devices to use to create the mirrors on
315# @uses: ZPOOL ZFS TESTPOOL
316function setup_mirrors
317{
318 typeset -i nmirrors=$1
319
320 shift
321 while ((nmirrors > 0)); do
322 log_must test -n "$1" -a -n "$2"
323 [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
324 log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
325 shift 2
326 ((nmirrors = nmirrors - 1))
327 done
328}
329
330#
331# create a number of raidz pools.
332# We create a number($1) of 2 raidz pools using the pairs of disks named
333# on the command line. These pools are *not* mounted
334# @parameters: $1 the number of pools to create
335# $... the devices to use to create the pools on
336# @uses: ZPOOL ZFS TESTPOOL
337function setup_raidzs
338{
339 typeset -i nraidzs=$1
340
341 shift
342 while ((nraidzs > 0)); do
343 log_must test -n "$1" -a -n "$2"
344 [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
345 log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
346 shift 2
347 ((nraidzs = nraidzs - 1))
348 done
349}
350
351#
352# Destroy the configured testpool mirrors.
353# the mirrors are of the form ${TESTPOOL}{number}
354# @uses: ZPOOL ZFS TESTPOOL
355function destroy_mirrors
356{
357 default_cleanup_noexit
358
359 log_pass
360}
361
362#
363# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
364# $1 the list of disks
365#
366function default_raidz_setup
367{
368 typeset disklist="$*"
369 disks=(${disklist[*]})
370
371 if [[ ${#disks[*]} -lt 2 ]]; then
372 log_fail "A raid-z requires a minimum of two disks."
373 fi
374
375 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
376 log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
377 log_must $ZFS create $TESTPOOL/$TESTFS
378 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
379
380 log_pass
381}
382
383#
384# Common function used to cleanup storage pools and datasets.
385#
386# Invoked at the start of the test suite to ensure the system
387# is in a known state, and also at the end of each set of
388# sub-tests to ensure errors from one set of tests doesn't
389# impact the execution of the next set.
390
391function default_cleanup
392{
393 default_cleanup_noexit
394
395 log_pass
396}
397
398function default_cleanup_noexit
399{
400 typeset exclude=""
401 typeset pool=""
402 #
403 # Destroying the pool will also destroy any
404 # filesystems it contains.
405 #
406 if is_global_zone; then
407 $ZFS unmount -a > /dev/null 2>&1
408 [[ -z "$KEEP" ]] && KEEP="rpool"
409 exclude=`eval $ECHO \"'(${KEEP})'\"`
410 ALL_POOLS=$($ZPOOL list -H -o name \
411 | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
412 # Here, we loop through the pools we're allowed to
413 # destroy, only destroying them if it's safe to do
414 # so.
415 while [ ! -z ${ALL_POOLS} ]
416 do
417 for pool in ${ALL_POOLS}
418 do
419 if safe_to_destroy_pool $pool ;
420 then
421 destroy_pool $pool
422 fi
423 ALL_POOLS=$($ZPOOL list -H -o name \
424 | $GREP -v "$NO_POOLS" \
425 | $EGREP -v "$exclude")
426 done
427 done
428
429 $ZFS mount -a
430 else
431 typeset fs=""
432 for fs in $($ZFS list -H -o name \
433 | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
434 datasetexists $fs && \
435 log_must $ZFS destroy -Rf $fs
436 done
437
438 # Need cleanup here to avoid garbage dir left.
439 for fs in $($ZFS list -H -o name); do
440 [[ $fs == /$ZONE_POOL ]] && continue
441 [[ -d $fs ]] && log_must $RM -rf $fs/*
442 done
443
444 #
445 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
446 # the default value
447 #
448 for fs in $($ZFS list -H -o name); do
449 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
450 log_must $ZFS set reservation=none $fs
451 log_must $ZFS set recordsize=128K $fs
452 log_must $ZFS set mountpoint=/$fs $fs
453 typeset enc=""
454 enc=$(get_prop encryption $fs)
455 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
456 [[ "$enc" == "off" ]]; then
457 log_must $ZFS set checksum=on $fs
458 fi
459 log_must $ZFS set compression=off $fs
460 log_must $ZFS set atime=on $fs
461 log_must $ZFS set devices=off $fs
462 log_must $ZFS set exec=on $fs
463 log_must $ZFS set setuid=on $fs
464 log_must $ZFS set readonly=off $fs
465 log_must $ZFS set snapdir=hidden $fs
466 log_must $ZFS set aclmode=groupmask $fs
467 log_must $ZFS set aclinherit=secure $fs
468 fi
469 done
470 fi
471
472 [[ -d $TESTDIR ]] && \
473 log_must $RM -rf $TESTDIR
7050a65d
SV
474
475 disk1=${DISKS%% *}
476 if is_mpath_device $disk1; then
477 delete_partitions
478 fi
6bb24f4d
BB
479}
480
481
482#
483# Common function used to cleanup storage pools, file systems
484# and containers.
485#
486function default_container_cleanup
487{
488 if ! is_global_zone; then
489 reexport_pool
490 fi
491
492 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
493 [[ $? -eq 0 ]] && \
494 log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
495
496 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
497 log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
498
499 datasetexists $TESTPOOL/$TESTCTR && \
500 log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
501
502 [[ -e $TESTDIR1 ]] && \
503 log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
504
505 default_cleanup
506}
507
508#
509# Common function used to cleanup snapshot of file system or volume. Default to
510# delete the file system's snapshot
511#
512# $1 snapshot name
513#
514function destroy_snapshot
515{
516 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
517
518 if ! snapexists $snap; then
519 log_fail "'$snap' does not existed."
520 fi
521
522 #
523 # For the sake of the value which come from 'get_prop' is not equal
524 # to the really mountpoint when the snapshot is unmounted. So, firstly
525 # check and make sure this snapshot's been mounted in current system.
526 #
527 typeset mtpt=""
528 if ismounted $snap; then
529 mtpt=$(get_prop mountpoint $snap)
530 (($? != 0)) && \
531 log_fail "get_prop mountpoint $snap failed."
532 fi
533
534 log_must $ZFS destroy $snap
535 [[ $mtpt != "" && -d $mtpt ]] && \
536 log_must $RM -rf $mtpt
537}
538
539#
540# Common function used to cleanup clone.
541#
542# $1 clone name
543#
544function destroy_clone
545{
546 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
547
548 if ! datasetexists $clone; then
549 log_fail "'$clone' does not existed."
550 fi
551
552 # With the same reason in destroy_snapshot
553 typeset mtpt=""
554 if ismounted $clone; then
555 mtpt=$(get_prop mountpoint $clone)
556 (($? != 0)) && \
557 log_fail "get_prop mountpoint $clone failed."
558 fi
559
560 log_must $ZFS destroy $clone
561 [[ $mtpt != "" && -d $mtpt ]] && \
562 log_must $RM -rf $mtpt
563}
564
565# Return 0 if a snapshot exists; $? otherwise
566#
567# $1 - snapshot name
568
569function snapexists
570{
571 $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
572 return $?
573}
574
575#
576# Set a property to a certain value on a dataset.
577# Sets a property of the dataset to the value as passed in.
578# @param:
579# $1 dataset who's property is being set
580# $2 property to set
581# $3 value to set property to
582# @return:
583# 0 if the property could be set.
584# non-zero otherwise.
585# @use: ZFS
586#
587function dataset_setprop
588{
589 typeset fn=dataset_setprop
590
591 if (($# < 3)); then
592 log_note "$fn: Insufficient parameters (need 3, had $#)"
593 return 1
594 fi
595 typeset output=
596 output=$($ZFS set $2=$3 $1 2>&1)
597 typeset rv=$?
598 if ((rv != 0)); then
599 log_note "Setting property on $1 failed."
600 log_note "property $2=$3"
601 log_note "Return Code: $rv"
602 log_note "Output: $output"
603 return $rv
604 fi
605 return 0
606}
607
608#
609# Assign suite defined dataset properties.
610# This function is used to apply the suite's defined default set of
611# properties to a dataset.
612# @parameters: $1 dataset to use
613# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
614# @returns:
615# 0 if the dataset has been altered.
616# 1 if no pool name was passed in.
617# 2 if the dataset could not be found.
618# 3 if the dataset could not have it's properties set.
619#
620function dataset_set_defaultproperties
621{
622 typeset dataset="$1"
623
624 [[ -z $dataset ]] && return 1
625
626 typeset confset=
627 typeset -i found=0
628 for confset in $($ZFS list); do
629 if [[ $dataset = $confset ]]; then
630 found=1
631 break
632 fi
633 done
634 [[ $found -eq 0 ]] && return 2
635 if [[ -n $COMPRESSION_PROP ]]; then
636 dataset_setprop $dataset compression $COMPRESSION_PROP || \
637 return 3
638 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
639 fi
640 if [[ -n $CHECKSUM_PROP ]]; then
641 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
642 return 3
643 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
644 fi
645 return 0
646}
647
648#
649# Check a numeric assertion
650# @parameter: $@ the assertion to check
651# @output: big loud notice if assertion failed
652# @use: log_fail
653#
654function assert
655{
656 (($@)) || log_fail "$@"
657}
658
659#
660# Function to format partition size of a disk
661# Given a disk cxtxdx reduces all partitions
662# to 0 size
663#
664function zero_partitions #<whole_disk_name>
665{
666 typeset diskname=$1
667 typeset i
668
669 if is_linux; then
670 log_must $FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt
671 else
672 for i in 0 1 3 4 5 6 7
673 do
674 set_partition $i "" 0mb $diskname
675 done
676 fi
677}
678
679#
680# Given a slice, size and disk, this function
681# formats the slice to the specified size.
682# Size should be specified with units as per
683# the `format` command requirements eg. 100mb 3gb
684#
685# NOTE: This entire interface is problematic for the Linux parted utilty
686# which requires the end of the partition to be specified. It would be
687# best to retire this interface and replace it with something more flexible.
688# At the moment a best effort is made.
689#
690function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
691{
692 typeset -i slicenum=$1
693 typeset start=$2
694 typeset size=$3
695 typeset disk=$4
696 [[ -z $slicenum || -z $size || -z $disk ]] && \
697 log_fail "The slice, size or disk name is unspecified."
698
699 if is_linux; then
700 typeset size_mb=${size%%[mMgG]}
701
702 size_mb=${size_mb%%[mMgG][bB]}
703 if [[ ${size:1:1} == 'g' ]]; then
704 ((size_mb = size_mb * 1024))
705 fi
706
707 # Create GPT partition table when setting slice 0 or
708 # when the device doesn't already contain a GPT label.
709 $FORMAT $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
710 typeset ret_val=$?
711 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
712 log_must $FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt
713 fi
714
715 # When no start is given align on the first cylinder.
716 if [[ -z "$start" ]]; then
717 start=1
718 fi
719
720 # Determine the cylinder size for the device and using
721 # that calculate the end offset in cylinders.
722 typeset -i cly_size_kb=0
723 cly_size_kb=$($FORMAT -m $DEV_DSKDIR/$disk -s -- \
724 unit cyl print | $HEAD -3 | $TAIL -1 | \
725 $AWK -F '[:k.]' '{print $4}')
726 ((end = (size_mb * 1024 / cly_size_kb) + start))
727
728 log_must $FORMAT $DEV_DSKDIR/$disk -s -- \
729 mkpart part$slicenum ${start}cyl ${end}cyl
730
731 $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
732 block_device_wait
733 else
734 typeset format_file=/var/tmp/format_in.$$
735
736 $ECHO "partition" >$format_file
737 $ECHO "$slicenum" >> $format_file
738 $ECHO "" >> $format_file
739 $ECHO "" >> $format_file
740 $ECHO "$start" >> $format_file
741 $ECHO "$size" >> $format_file
742 $ECHO "label" >> $format_file
743 $ECHO "" >> $format_file
744 $ECHO "q" >> $format_file
745 $ECHO "q" >> $format_file
746
747 $FORMAT -e -s -d $disk -f $format_file
748 fi
749 typeset ret_val=$?
750 $RM -f $format_file
751 [[ $ret_val -ne 0 ]] && \
752 log_fail "Unable to format $disk slice $slicenum to $size"
753 return 0
754}
755
7050a65d
SV
756#
757# Delete all partitions on all disks - this is specifically for the use of multipath
758# devices which currently can only be used in the test suite as raw/un-partitioned
759# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
760#
761function delete_partitions
762{
763 typeset -i j=1
764
765 if [[ -z $DISK_ARRAY_NUM ]]; then
766 DISK_ARRAY_NUM=$($ECHO ${DISKS} | $NAWK '{print NF}')
767 fi
768 if [[ -z $DISKSARRAY ]]; then
769 DISKSARRAY=$DISKS
770 fi
771
772 if is_linux; then
773 if (( $DISK_ARRAY_NUM == 1 )); then
774 while ((j < MAX_PARTITIONS)); do
775 $FORMAT $DEV_DSKDIR/$DISK -s rm $j > /dev/null 2>&1
776 if (( $? == 1 )); then
777 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
778 if (( $? == 1 )); then
779 log_note "Partitions for $DISK should be deleted"
780 else
781 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
782 fi
783 return 0
784 else
785 $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null
786 if (( $? == 0 )); then
787 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
788 fi
789 fi
790 ((j = j+1))
791 done
792 else
793 for disk in `$ECHO $DISKSARRAY`; do
794 while ((j < MAX_PARTITIONS)); do
795 $FORMAT $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
796 if (( $? == 1 )); then
797 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
798 if (( $? == 1 )); then
799 log_note "Partitions for $disk should be deleted"
800 else
801 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
802 fi
803 j=7
804 else
805 $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null
806 if (( $? == 0 )); then
807 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
808 fi
809 fi
810 ((j = j+1))
811 done
812 j=1
813 done
814 fi
815 fi
816 return 0
817}
818
6bb24f4d
BB
819#
820# Get the end cyl of the given slice
821#
822function get_endslice #<disk> <slice>
823{
824 typeset disk=$1
825 typeset slice=$2
826 if [[ -z $disk || -z $slice ]] ; then
827 log_fail "The disk name or slice number is unspecified."
828 fi
829
830 if is_linux; then
831 endcyl=$($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \
832 $GREP "part${slice}" | \
833 $AWK '{print $3}' | \
834 $SED 's,cyl,,')
835 ((endcyl = (endcyl + 1)))
836 else
837 disk=${disk#/dev/dsk/}
838 disk=${disk#/dev/rdsk/}
839 disk=${disk%s*}
840
841 typeset -i ratio=0
842 ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
843 $GREP "sectors\/cylinder" | \
844 $AWK '{print $2}')
845
846 if ((ratio == 0)); then
847 return
848 fi
849
850 typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
851 $NAWK -v token="$slice" '{if ($1==token) print $6}')
852
853 ((endcyl = (endcyl + 1) / ratio))
854 fi
855
856 echo $endcyl
857}
858
859
860#
861# Given a size,disk and total slice number, this function formats the
862# disk slices from 0 to the total slice number with the same specified
863# size.
864#
865function partition_disk #<slice_size> <whole_disk_name> <total_slices>
866{
867 typeset -i i=0
868 typeset slice_size=$1
869 typeset disk_name=$2
870 typeset total_slices=$3
871 typeset cyl
872
873 zero_partitions $disk_name
874 while ((i < $total_slices)); do
875 if ! is_linux; then
876 if ((i == 2)); then
877 ((i = i + 1))
878 continue
879 fi
880 fi
881 set_partition $i "$cyl" $slice_size $disk_name
882 cyl=$(get_endslice $disk_name $i)
883 ((i = i+1))
884 done
885}
886
887#
888# This function continues to write to a filenum number of files into dirnum
889# number of directories until either $FILE_WRITE returns an error or the
890# maximum number of files per directory have been written.
891#
892# Usage:
893# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
894#
895# Return value: 0 on success
896# non 0 on error
897#
898# Where :
899# destdir: is the directory where everything is to be created under
900# dirnum: the maximum number of subdirectories to use, -1 no limit
901# filenum: the maximum number of files per subdirectory
902# bytes: number of bytes to write
903# num_writes: numer of types to write out bytes
904# data: the data that will be writen
905#
906# E.g.
907# file_fs /testdir 20 25 1024 256 0
908#
909# Note: bytes * num_writes equals the size of the testfile
910#
911function fill_fs # destdir dirnum filenum bytes num_writes data
912{
913 typeset destdir=${1:-$TESTDIR}
914 typeset -i dirnum=${2:-50}
915 typeset -i filenum=${3:-50}
916 typeset -i bytes=${4:-8192}
917 typeset -i num_writes=${5:-10240}
918 typeset -i data=${6:-0}
919
920 typeset -i odirnum=1
921 typeset -i idirnum=0
922 typeset -i fn=0
923 typeset -i retval=0
924
925 log_must $MKDIR -p $destdir/$idirnum
926 while (($odirnum > 0)); do
927 if ((dirnum >= 0 && idirnum >= dirnum)); then
928 odirnum=0
929 break
930 fi
931 $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
932 -b $bytes -c $num_writes -d $data
933 retval=$?
934 if (($retval != 0)); then
935 odirnum=0
936 break
937 fi
938 if (($fn >= $filenum)); then
939 fn=0
940 ((idirnum = idirnum + 1))
941 log_must $MKDIR -p $destdir/$idirnum
942 else
943 ((fn = fn + 1))
944 fi
945 done
946 return $retval
947}
948
949#
950# Simple function to get the specified property. If unable to
951# get the property then exits.
952#
953# Note property is in 'parsable' format (-p)
954#
955function get_prop # property dataset
956{
957 typeset prop_val
958 typeset prop=$1
959 typeset dataset=$2
960
961 prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
962 if [[ $? -ne 0 ]]; then
963 log_note "Unable to get $prop property for dataset " \
964 "$dataset"
965 return 1
966 fi
967
968 $ECHO $prop_val
969 return 0
970}
971
972#
973# Simple function to get the specified property of pool. If unable to
974# get the property then exits.
975#
976function get_pool_prop # property pool
977{
978 typeset prop_val
979 typeset prop=$1
980 typeset pool=$2
981
982 if poolexists $pool ; then
983 prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
984 $AWK '{print $3}')
985 if [[ $? -ne 0 ]]; then
986 log_note "Unable to get $prop property for pool " \
987 "$pool"
988 return 1
989 fi
990 else
991 log_note "Pool $pool not exists."
992 return 1
993 fi
994
995 $ECHO $prop_val
996 return 0
997}
998
999# Return 0 if a pool exists; $? otherwise
1000#
1001# $1 - pool name
1002
1003function poolexists
1004{
1005 typeset pool=$1
1006
1007 if [[ -z $pool ]]; then
1008 log_note "No pool name given."
1009 return 1
1010 fi
1011
1012 $ZPOOL get name "$pool" > /dev/null 2>&1
1013 return $?
1014}
1015
1016# Return 0 if all the specified datasets exist; $? otherwise
1017#
1018# $1-n dataset name
1019function datasetexists
1020{
1021 if (($# == 0)); then
1022 log_note "No dataset name given."
1023 return 1
1024 fi
1025
1026 while (($# > 0)); do
1027 $ZFS get name $1 > /dev/null 2>&1 || \
1028 return $?
1029 shift
1030 done
1031
1032 return 0
1033}
1034
1035# return 0 if none of the specified datasets exists, otherwise return 1.
1036#
1037# $1-n dataset name
1038function datasetnonexists
1039{
1040 if (($# == 0)); then
1041 log_note "No dataset name given."
1042 return 1
1043 fi
1044
1045 while (($# > 0)); do
1046 $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1047 && return 1
1048 shift
1049 done
1050
1051 return 0
1052}
1053
1054#
1055# Given a mountpoint, or a dataset name, determine if it is shared.
1056#
1057# Returns 0 if shared, 1 otherwise.
1058#
1059function is_shared
1060{
1061 typeset fs=$1
1062 typeset mtpt
1063
1064 if is_linux; then
1065 log_unsupported "Currently unsupported by the test framework"
1066 return 1
1067 fi
1068
1069 if [[ $fs != "/"* ]] ; then
1070 if datasetnonexists "$fs" ; then
1071 return 1
1072 else
1073 mtpt=$(get_prop mountpoint "$fs")
1074 case $mtpt in
1075 none|legacy|-) return 1
1076 ;;
1077 *) fs=$mtpt
1078 ;;
1079 esac
1080 fi
1081 fi
1082
1083 for mtpt in `$SHARE | $AWK '{print $2}'` ; do
1084 if [[ $mtpt == $fs ]] ; then
1085 return 0
1086 fi
1087 done
1088
1089 typeset stat=$($SVCS -H -o STA nfs/server:default)
1090 if [[ $stat != "ON" ]]; then
1091 log_note "Current nfs/server status: $stat"
1092 fi
1093
1094 return 1
1095}
1096
1097#
1098# Given a mountpoint, determine if it is not shared.
1099#
1100# Returns 0 if not shared, 1 otherwise.
1101#
1102function not_shared
1103{
1104 typeset fs=$1
1105
1106 if is_linux; then
1107 log_unsupported "Currently unsupported by the test framework"
1108 return 1
1109 fi
1110
1111 is_shared $fs
1112 if (($? == 0)); then
1113 return 1
1114 fi
1115
1116 return 0
1117}
1118
1119#
1120# Helper function to unshare a mountpoint.
1121#
1122function unshare_fs #fs
1123{
1124 typeset fs=$1
1125
1126 if is_linux; then
1127 log_unsupported "Currently unsupported by the test framework"
1128 return 1
1129 fi
1130
1131 is_shared $fs
1132 if (($? == 0)); then
1133 log_must $ZFS unshare $fs
1134 fi
1135
1136 return 0
1137}
1138
1139#
1140# Check NFS server status and trigger it online.
1141#
1142function setup_nfs_server
1143{
1144 # Cannot share directory in non-global zone.
1145 #
1146 if ! is_global_zone; then
1147 log_note "Cannot trigger NFS server by sharing in LZ."
1148 return
1149 fi
1150
1151 if is_linux; then
1152 log_unsupported "Currently unsupported by the test framework"
1153 return
1154 fi
1155
1156 typeset nfs_fmri="svc:/network/nfs/server:default"
1157 if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
1158 #
1159 # Only really sharing operation can enable NFS server
1160 # to online permanently.
1161 #
1162 typeset dummy=/tmp/dummy
1163
1164 if [[ -d $dummy ]]; then
1165 log_must $RM -rf $dummy
1166 fi
1167
1168 log_must $MKDIR $dummy
1169 log_must $SHARE $dummy
1170
1171 #
1172 # Waiting for fmri's status to be the final status.
1173 # Otherwise, in transition, an asterisk (*) is appended for
1174 # instances, unshare will reverse status to 'DIS' again.
1175 #
1176 # Waiting for 1's at least.
1177 #
1178 log_must $SLEEP 1
1179 timeout=10
1180 while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1181 do
1182 log_must $SLEEP 1
1183
1184 ((timeout -= 1))
1185 done
1186
1187 log_must $UNSHARE $dummy
1188 log_must $RM -rf $dummy
1189 fi
1190
1191 log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1192}
1193
1194#
1195# To verify whether calling process is in global zone
1196#
1197# Return 0 if in global zone, 1 in non-global zone
1198#
1199function is_global_zone
1200{
1201 typeset cur_zone=$($ZONENAME 2>/dev/null)
1202 if [[ $cur_zone != "global" ]]; then
1203 return 1
1204 fi
1205 return 0
1206}
1207
1208#
1209# Verify whether test is permitted to run from
1210# global zone, local zone, or both
1211#
1212# $1 zone limit, could be "global", "local", or "both"(no limit)
1213#
1214# Return 0 if permitted, otherwise exit with log_unsupported
1215#
1216function verify_runnable # zone limit
1217{
1218 typeset limit=$1
1219
1220 [[ -z $limit ]] && return 0
1221
1222 if is_global_zone ; then
1223 case $limit in
1224 global|both)
1225 ;;
1226 local) log_unsupported "Test is unable to run from "\
1227 "global zone."
1228 ;;
1229 *) log_note "Warning: unknown limit $limit - " \
1230 "use both."
1231 ;;
1232 esac
1233 else
1234 case $limit in
1235 local|both)
1236 ;;
1237 global) log_unsupported "Test is unable to run from "\
1238 "local zone."
1239 ;;
1240 *) log_note "Warning: unknown limit $limit - " \
1241 "use both."
1242 ;;
1243 esac
1244
1245 reexport_pool
1246 fi
1247
1248 return 0
1249}
1250
1251# Return 0 if create successfully or the pool exists; $? otherwise
1252# Note: In local zones, this function should return 0 silently.
1253#
1254# $1 - pool name
1255# $2-n - [keyword] devs_list
1256
1257function create_pool #pool devs_list
1258{
1259 typeset pool=${1%%/*}
1260
1261 shift
1262
1263 if [[ -z $pool ]]; then
1264 log_note "Missing pool name."
1265 return 1
1266 fi
1267
1268 if poolexists $pool ; then
1269 destroy_pool $pool
1270 fi
1271
1272 if is_global_zone ; then
1273 [[ -d /$pool ]] && $RM -rf /$pool
1274 log_must $ZPOOL create -f $pool $@
1275 fi
1276
1277 return 0
1278}
1279
1280# Return 0 if destroy successfully or the pool exists; $? otherwise
1281# Note: In local zones, this function should return 0 silently.
1282#
1283# $1 - pool name
1284# Destroy pool with the given parameters.
1285
1286function destroy_pool #pool
1287{
1288 typeset pool=${1%%/*}
1289 typeset mtpt
1290
1291 if [[ -z $pool ]]; then
1292 log_note "No pool name given."
1293 return 1
1294 fi
1295
1296 if is_global_zone ; then
1297 if poolexists "$pool" ; then
1298 mtpt=$(get_prop mountpoint "$pool")
1299
1300 # At times, syseventd activity can cause attempts to
1301 # destroy a pool to fail with EBUSY. We retry a few
1302 # times allowing failures before requiring the destroy
1303 # to succeed.
1304 typeset -i wait_time=10 ret=1 count=0
1305 must=""
1306 while [[ $ret -ne 0 ]]; do
1307 $must $ZPOOL destroy -f $pool
1308 ret=$?
1309 [[ $ret -eq 0 ]] && break
1310 log_note "zpool destroy failed with $ret"
1311 [[ count++ -ge 7 ]] && must=log_must
1312 $SLEEP $wait_time
1313 done
1314
1315 [[ -d $mtpt ]] && \
1316 log_must $RM -rf $mtpt
1317 else
1318 log_note "Pool does not exist. ($pool)"
1319 return 1
1320 fi
1321 fi
1322
1323 return 0
1324}
1325
1326#
1327# Firstly, create a pool with 5 datasets. Then, create a single zone and
1328# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1329# and a zvol device to the zone.
1330#
1331# $1 zone name
1332# $2 zone root directory prefix
1333# $3 zone ip
1334#
1335function zfs_zones_setup #zone_name zone_root zone_ip
1336{
1337 typeset zone_name=${1:-$(hostname)-z}
1338 typeset zone_root=${2:-"/zone_root"}
1339 typeset zone_ip=${3:-"10.1.1.10"}
1340 typeset prefix_ctr=$ZONE_CTR
1341 typeset pool_name=$ZONE_POOL
1342 typeset -i cntctr=5
1343 typeset -i i=0
1344
1345 # Create pool and 5 container within it
1346 #
1347 [[ -d /$pool_name ]] && $RM -rf /$pool_name
1348 log_must $ZPOOL create -f $pool_name $DISKS
1349 while ((i < cntctr)); do
1350 log_must $ZFS create $pool_name/$prefix_ctr$i
1351 ((i += 1))
1352 done
1353
1354 # create a zvol
1355 log_must $ZFS create -V 1g $pool_name/zone_zvol
1356 block_device_wait
1357
1358 #
1359 # If current system support slog, add slog device for pool
1360 #
1361 if verify_slog_support ; then
1362 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1363 log_must $MKFILE 100M $sdevs
1364 log_must $ZPOOL add $pool_name log mirror $sdevs
1365 fi
1366
1367 # this isn't supported just yet.
1368 # Create a filesystem. In order to add this to
1369 # the zone, it must have it's mountpoint set to 'legacy'
1370 # log_must $ZFS create $pool_name/zfs_filesystem
1371 # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1372
1373 [[ -d $zone_root ]] && \
1374 log_must $RM -rf $zone_root/$zone_name
1375 [[ ! -d $zone_root ]] && \
1376 log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1377
1378 # Create zone configure file and configure the zone
1379 #
1380 typeset zone_conf=/tmp/zone_conf.$$
1381 $ECHO "create" > $zone_conf
1382 $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1383 $ECHO "set autoboot=true" >> $zone_conf
1384 i=0
1385 while ((i < cntctr)); do
1386 $ECHO "add dataset" >> $zone_conf
1387 $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1388 $zone_conf
1389 $ECHO "end" >> $zone_conf
1390 ((i += 1))
1391 done
1392
1393 # add our zvol to the zone
1394 $ECHO "add device" >> $zone_conf
1395 $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf
1396 $ECHO "end" >> $zone_conf
1397
1398 # add a corresponding zvol rdsk to the zone
1399 $ECHO "add device" >> $zone_conf
1400 $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1401 $ECHO "end" >> $zone_conf
1402
1403 # once it's supported, we'll add our filesystem to the zone
1404 # $ECHO "add fs" >> $zone_conf
1405 # $ECHO "set type=zfs" >> $zone_conf
1406 # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1407 # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1408 # $ECHO "end" >> $zone_conf
1409
1410 $ECHO "verify" >> $zone_conf
1411 $ECHO "commit" >> $zone_conf
1412 log_must $ZONECFG -z $zone_name -f $zone_conf
1413 log_must $RM -f $zone_conf
1414
1415 # Install the zone
1416 $ZONEADM -z $zone_name install
1417 if (($? == 0)); then
1418 log_note "SUCCESS: $ZONEADM -z $zone_name install"
1419 else
1420 log_fail "FAIL: $ZONEADM -z $zone_name install"
1421 fi
1422
1423 # Install sysidcfg file
1424 #
1425 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1426 $ECHO "system_locale=C" > $sysidcfg
1427 $ECHO "terminal=dtterm" >> $sysidcfg
1428 $ECHO "network_interface=primary {" >> $sysidcfg
1429 $ECHO "hostname=$zone_name" >> $sysidcfg
1430 $ECHO "}" >> $sysidcfg
1431 $ECHO "name_service=NONE" >> $sysidcfg
1432 $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
1433 $ECHO "security_policy=NONE" >> $sysidcfg
1434 $ECHO "timezone=US/Eastern" >> $sysidcfg
1435
1436 # Boot this zone
1437 log_must $ZONEADM -z $zone_name boot
1438}
1439
1440#
1441# Reexport TESTPOOL & TESTPOOL(1-4)
1442#
1443function reexport_pool
1444{
1445 typeset -i cntctr=5
1446 typeset -i i=0
1447
1448 while ((i < cntctr)); do
1449 if ((i == 0)); then
1450 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1451 if ! ismounted $TESTPOOL; then
1452 log_must $ZFS mount $TESTPOOL
1453 fi
1454 else
1455 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1456 if eval ! ismounted \$TESTPOOL$i; then
1457 log_must eval $ZFS mount \$TESTPOOL$i
1458 fi
1459 fi
1460 ((i += 1))
1461 done
1462}
1463
1464#
1465# Verify a given disk is online or offline
1466#
1467# Return 0 is pool/disk matches expected state, 1 otherwise
1468#
1469function check_state # pool disk state{online,offline}
1470{
1471 typeset pool=$1
1472 typeset disk=${2#$DEV_DSKDIR/}
1473 typeset state=$3
1474
1475 $ZPOOL status -v $pool | grep "$disk" \
1476 | grep -i "$state" > /dev/null 2>&1
1477
1478 return $?
1479}
1480
1481#
1482# Get the mountpoint of snapshot
1483# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1484# as its mountpoint
1485#
1486function snapshot_mountpoint
1487{
1488 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1489
1490 if [[ $dataset != *@* ]]; then
1491 log_fail "Error name of snapshot '$dataset'."
1492 fi
1493
1494 typeset fs=${dataset%@*}
1495 typeset snap=${dataset#*@}
1496
1497 if [[ -z $fs || -z $snap ]]; then
1498 log_fail "Error name of snapshot '$dataset'."
1499 fi
1500
1501 $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1502}
1503
1504#
1505# Given a pool and file system, this function will verify the file system
1506# using the zdb internal tool. Note that the pool is exported and imported
1507# to ensure it has consistent state.
1508#
1509function verify_filesys # pool filesystem dir
1510{
1511 typeset pool="$1"
1512 typeset filesys="$2"
1513 typeset zdbout="/tmp/zdbout.$$"
1514
1515 shift
1516 shift
1517 typeset dirs=$@
1518 typeset search_path=""
1519
1520 log_note "Calling $ZDB to verify filesystem '$filesys'"
1521 $ZFS unmount -a > /dev/null 2>&1
1522 log_must $ZPOOL export $pool
1523
1524 if [[ -n $dirs ]] ; then
1525 for dir in $dirs ; do
1526 search_path="$search_path -d $dir"
1527 done
1528 fi
1529
1530 log_must $ZPOOL import $search_path $pool
1531
1532 $ZDB -cudi $filesys > $zdbout 2>&1
1533 if [[ $? != 0 ]]; then
1534 log_note "Output: $ZDB -cudi $filesys"
1535 $CAT $zdbout
1536 log_fail "$ZDB detected errors with: '$filesys'"
1537 fi
1538
1539 log_must $ZFS mount -a
1540 log_must $RM -rf $zdbout
1541}
1542
1543#
1544# Given a pool, and this function list all disks in the pool
1545#
1546function get_disklist # pool
1547{
1548 typeset disklist=""
1549
1550 disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
1551 $GREP -v "\-\-\-\-\-" | \
1552 $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1553
1554 $ECHO $disklist
1555}
1556
3c67d83a
TH
1557#
1558# Given a pool, and this function list all disks in the pool with their full
1559# path (like "/dev/sda" instead of "sda").
1560#
1561function get_disklist_fullpath # pool
1562{
1563 args="-P $1"
1564 get_disklist $args
1565}
1566
1567
1568
6bb24f4d
BB
1569# /**
1570# This function kills a given list of processes after a time period. We use
1571# this in the stress tests instead of STF_TIMEOUT so that we can have processes
1572# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1573# would be listed as FAIL, which we don't want : we're happy with stress tests
1574# running for a certain amount of time, then finishing.
1575#
1576# @param $1 the time in seconds after which we should terminate these processes
1577# @param $2..$n the processes we wish to terminate.
1578# */
1579function stress_timeout
1580{
1581 typeset -i TIMEOUT=$1
1582 shift
1583 typeset cpids="$@"
1584
1585 log_note "Waiting for child processes($cpids). " \
1586 "It could last dozens of minutes, please be patient ..."
1587 log_must $SLEEP $TIMEOUT
1588
1589 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1590 typeset pid
1591 for pid in $cpids; do
1592 $PS -p $pid > /dev/null 2>&1
1593 if (($? == 0)); then
1594 log_must $KILL -USR1 $pid
1595 fi
1596 done
1597}
1598
1599#
1600# Verify a given hotspare disk is inuse or avail
1601#
1602# Return 0 is pool/disk matches expected state, 1 otherwise
1603#
1604function check_hotspare_state # pool disk state{inuse,avail}
1605{
1606 typeset pool=$1
1607 typeset disk=${2#$DEV_DSKDIR/}
1608 typeset state=$3
1609
1610 cur_state=$(get_device_state $pool $disk "spares")
1611
1612 if [[ $state != ${cur_state} ]]; then
1613 return 1
1614 fi
1615 return 0
1616}
1617
1618#
1619# Verify a given slog disk is inuse or avail
1620#
1621# Return 0 is pool/disk matches expected state, 1 otherwise
1622#
1623function check_slog_state # pool disk state{online,offline,unavail}
1624{
1625 typeset pool=$1
1626 typeset disk=${2#$DEV_DSKDIR/}
1627 typeset state=$3
1628
1629 cur_state=$(get_device_state $pool $disk "logs")
1630
1631 if [[ $state != ${cur_state} ]]; then
1632 return 1
1633 fi
1634 return 0
1635}
1636
1637#
1638# Verify a given vdev disk is inuse or avail
1639#
1640# Return 0 is pool/disk matches expected state, 1 otherwise
1641#
1642function check_vdev_state # pool disk state{online,offline,unavail}
1643{
1644 typeset pool=$1
1645 typeset disk=${2#$/DEV_DSKDIR/}
1646 typeset state=$3
1647
1648 cur_state=$(get_device_state $pool $disk)
1649
1650 if [[ $state != ${cur_state} ]]; then
1651 return 1
1652 fi
1653 return 0
1654}
1655
1656#
1657# Check the output of 'zpool status -v <pool>',
1658# and to see if the content of <token> contain the <keyword> specified.
1659#
1660# Return 0 is contain, 1 otherwise
1661#
1662function check_pool_status # pool token keyword
1663{
1664 typeset pool=$1
1665 typeset token=$2
1666 typeset keyword=$3
1667
1668 $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
1669 ($1==token) {print $0}' \
1670 | $GREP -i "$keyword" > /dev/null 2>&1
1671
1672 return $?
1673}
1674
1675#
1676# These 5 following functions are instance of check_pool_status()
1677# is_pool_resilvering - to check if the pool is resilver in progress
1678# is_pool_resilvered - to check if the pool is resilver completed
1679# is_pool_scrubbing - to check if the pool is scrub in progress
1680# is_pool_scrubbed - to check if the pool is scrub completed
1681# is_pool_scrub_stopped - to check if the pool is scrub stopped
1682#
1683function is_pool_resilvering #pool
1684{
1685 check_pool_status "$1" "scan" "resilver in progress since "
1686 return $?
1687}
1688
1689function is_pool_resilvered #pool
1690{
1691 check_pool_status "$1" "scan" "resilvered "
1692 return $?
1693}
1694
1695function is_pool_scrubbing #pool
1696{
1697 check_pool_status "$1" "scan" "scrub in progress since "
1698 return $?
1699}
1700
1701function is_pool_scrubbed #pool
1702{
1703 check_pool_status "$1" "scan" "scrub repaired"
1704 return $?
1705}
1706
1707function is_pool_scrub_stopped #pool
1708{
1709 check_pool_status "$1" "scan" "scrub canceled"
1710 return $?
1711}
1712
1713#
1714# Use create_pool()/destroy_pool() to clean up the infomation in
1715# in the given disk to avoid slice overlapping.
1716#
1717function cleanup_devices #vdevs
1718{
1719 typeset pool="foopool$$"
1720
1721 if poolexists $pool ; then
1722 destroy_pool $pool
1723 fi
1724
1725 create_pool $pool $@
1726 destroy_pool $pool
1727
1728 return 0
1729}
1730
1731#
1732# Verify the rsh connectivity to each remote host in RHOSTS.
1733#
1734# Return 0 if remote host is accessible; otherwise 1.
1735# $1 remote host name
1736# $2 username
1737#
1738function verify_rsh_connect #rhost, username
1739{
1740 typeset rhost=$1
1741 typeset username=$2
1742 typeset rsh_cmd="$RSH -n"
1743 typeset cur_user=
1744
1745 $GETENT hosts $rhost >/dev/null 2>&1
1746 if (($? != 0)); then
1747 log_note "$rhost cannot be found from" \
1748 "administrative database."
1749 return 1
1750 fi
1751
1752 $PING $rhost 3 >/dev/null 2>&1
1753 if (($? != 0)); then
1754 log_note "$rhost is not reachable."
1755 return 1
1756 fi
1757
1758 if ((${#username} != 0)); then
1759 rsh_cmd="$rsh_cmd -l $username"
1760 cur_user="given user \"$username\""
1761 else
1762 cur_user="current user \"`$LOGNAME`\""
1763 fi
1764
1765 if ! $rsh_cmd $rhost $TRUE; then
1766 log_note "$RSH to $rhost is not accessible" \
1767 "with $cur_user."
1768 return 1
1769 fi
1770
1771 return 0
1772}
1773
1774#
1775# Verify the remote host connection via rsh after rebooting
1776# $1 remote host
1777#
1778function verify_remote
1779{
1780 rhost=$1
1781
1782 #
1783 # The following loop waits for the remote system rebooting.
1784 # Each iteration will wait for 150 seconds. there are
1785 # total 5 iterations, so the total timeout value will
1786 # be 12.5 minutes for the system rebooting. This number
1787 # is an approxiate number.
1788 #
1789 typeset -i count=0
1790 while ! verify_rsh_connect $rhost; do
1791 sleep 150
1792 ((count = count + 1))
1793 if ((count > 5)); then
1794 return 1
1795 fi
1796 done
1797 return 0
1798}
1799
1800#
1801# Replacement function for /usr/bin/rsh. This function will include
1802# the /usr/bin/rsh and meanwhile return the execution status of the
1803# last command.
1804#
1805# $1 usrname passing down to -l option of /usr/bin/rsh
1806# $2 remote machine hostname
1807# $3... command string
1808#
1809
1810function rsh_status
1811{
1812 typeset ruser=$1
1813 typeset rhost=$2
1814 typeset -i ret=0
1815 typeset cmd_str=""
1816 typeset rsh_str=""
1817
1818 shift; shift
1819 cmd_str="$@"
1820
1821 err_file=/tmp/${rhost}.$$.err
1822 if ((${#ruser} == 0)); then
1823 rsh_str="$RSH -n"
1824 else
1825 rsh_str="$RSH -n -l $ruser"
1826 fi
1827
1828 $rsh_str $rhost /bin/ksh -c "'$cmd_str; \
1829 print -u 2 \"status=\$?\"'" \
1830 >/dev/null 2>$err_file
1831 ret=$?
1832 if (($ret != 0)); then
1833 $CAT $err_file
1834 $RM -f $std_file $err_file
1835 log_fail "$RSH itself failed with exit code $ret..."
1836 fi
1837
1838 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1839 $CUT -d= -f2)
1840 (($ret != 0)) && $CAT $err_file >&2
1841
1842 $RM -f $err_file >/dev/null 2>&1
1843 return $ret
1844}
1845
1846#
1847# Get the SUNWstc-fs-zfs package installation path in a remote host
1848# $1 remote host name
1849#
1850function get_remote_pkgpath
1851{
1852 typeset rhost=$1
1853 typeset pkgpath=""
1854
1855 pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1856 $CUT -d: -f2")
1857
1858 $ECHO $pkgpath
1859}
1860
1861#/**
1862# A function to find and locate free disks on a system or from given
1863# disks as the parameter. It works by locating disks that are in use
1864# as swap devices and dump devices, and also disks listed in /etc/vfstab
1865#
1866# $@ given disks to find which are free, default is all disks in
1867# the test system
1868#
1869# @return a string containing the list of available disks
1870#*/
1871function find_disks
1872{
1873 # Trust provided list, no attempt is made to locate unused devices.
1874 if is_linux; then
1875 $ECHO "$@"
1876 return
1877 fi
1878
1879
1880 sfi=/tmp/swaplist.$$
1881 dmpi=/tmp/dumpdev.$$
1882 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1883
1884 $SWAP -l > $sfi
1885 $DUMPADM > $dmpi 2>/dev/null
1886
1887# write an awk script that can process the output of format
1888# to produce a list of disks we know about. Note that we have
1889# to escape "$2" so that the shell doesn't interpret it while
1890# we're creating the awk script.
1891# -------------------
1892 $CAT > /tmp/find_disks.awk <<EOF
1893#!/bin/nawk -f
1894 BEGIN { FS="."; }
1895
1896 /^Specify disk/{
1897 searchdisks=0;
1898 }
1899
1900 {
1901 if (searchdisks && \$2 !~ "^$"){
1902 split(\$2,arr," ");
1903 print arr[1];
1904 }
1905 }
1906
1907 /^AVAILABLE DISK SELECTIONS:/{
1908 searchdisks=1;
1909 }
1910EOF
1911#---------------------
1912
1913 $CHMOD 755 /tmp/find_disks.awk
1914 disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
1915 $RM /tmp/find_disks.awk
1916
1917 unused=""
1918 for disk in $disks; do
1919 # Check for mounted
1920 $GREP "${disk}[sp]" /etc/mnttab >/dev/null
1921 (($? == 0)) && continue
1922 # Check for swap
1923 $GREP "${disk}[sp]" $sfi >/dev/null
1924 (($? == 0)) && continue
1925 # check for dump device
1926 $GREP "${disk}[sp]" $dmpi >/dev/null
1927 (($? == 0)) && continue
1928 # check to see if this disk hasn't been explicitly excluded
1929 # by a user-set environment variable
1930 $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
1931 (($? == 0)) && continue
1932 unused_candidates="$unused_candidates $disk"
1933 done
1934 $RM $sfi
1935 $RM $dmpi
1936
1937# now just check to see if those disks do actually exist
1938# by looking for a device pointing to the first slice in
1939# each case. limit the number to max_finddisksnum
1940 count=0
1941 for disk in $unused_candidates; do
1942 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
1943 if [ $count -lt $max_finddisksnum ]; then
1944 unused="$unused $disk"
1945 # do not impose limit if $@ is provided
1946 [[ -z $@ ]] && ((count = count + 1))
1947 fi
1948 fi
1949 done
1950
1951# finally, return our disk list
1952 $ECHO $unused
1953}
1954
1955#
1956# Add specified user to specified group
1957#
1958# $1 group name
1959# $2 user name
1960# $3 base of the homedir (optional)
1961#
1962function add_user #<group_name> <user_name> <basedir>
1963{
1964 typeset gname=$1
1965 typeset uname=$2
1966 typeset basedir=${3:-"/var/tmp"}
1967
1968 if ((${#gname} == 0 || ${#uname} == 0)); then
1969 log_fail "group name or user name are not defined."
1970 fi
1971
1972 log_must $USERADD -g $gname -d $basedir/$uname -m $uname
1973
f74b821a
BB
1974 # Add new users to the same group and the command line utils.
1975 # This allows them to be run out of the original users home
1976 # directory as long as it permissioned to be group readable.
1977 if is_linux; then
1978 cmd_group=$(stat --format="%G" $ZFS)
1979 log_must $USERMOD -a -G $cmd_group $uname
1980 fi
1981
6bb24f4d
BB
1982 return 0
1983}
1984
1985#
1986# Delete the specified user.
1987#
1988# $1 login name
1989# $2 base of the homedir (optional)
1990#
1991function del_user #<logname> <basedir>
1992{
1993 typeset user=$1
1994 typeset basedir=${2:-"/var/tmp"}
1995
1996 if ((${#user} == 0)); then
1997 log_fail "login name is necessary."
1998 fi
1999
2000 if $ID $user > /dev/null 2>&1; then
2001 log_must $USERDEL $user
2002 fi
2003
2004 [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
2005
2006 return 0
2007}
2008
2009#
2010# Select valid gid and create specified group.
2011#
2012# $1 group name
2013#
2014function add_group #<group_name>
2015{
2016 typeset group=$1
2017
2018 if ((${#group} == 0)); then
2019 log_fail "group name is necessary."
2020 fi
2021
2022 # Assign 100 as the base gid, a larger value is selected for
2023 # Linux because for many distributions 1000 and under are reserved.
2024 if is_linux; then
6bb24f4d 2025 while true; do
f74b821a 2026 $GROUPADD $group > /dev/null 2>&1
6bb24f4d
BB
2027 typeset -i ret=$?
2028 case $ret in
2029 0) return 0 ;;
6bb24f4d
BB
2030 *) return 1 ;;
2031 esac
2032 done
2033 else
2034 typeset -i gid=100
2035
2036 while true; do
2037 $GROUPADD -g $gid $group > /dev/null 2>&1
2038 typeset -i ret=$?
2039 case $ret in
2040 0) return 0 ;;
2041 # The gid is not unique
2042 4) ((gid += 1)) ;;
2043 *) return 1 ;;
2044 esac
2045 done
2046 fi
2047}
2048
2049#
2050# Delete the specified group.
2051#
2052# $1 group name
2053#
2054function del_group #<group_name>
2055{
2056 typeset grp=$1
2057 if ((${#grp} == 0)); then
2058 log_fail "group name is necessary."
2059 fi
2060
2061 if is_linux; then
2062 $GETENT group $grp > /dev/null 2>&1
2063 typeset -i ret=$?
2064 case $ret in
2065 # Group does not exist.
2066 2) return 0 ;;
2067 # Name already exists as a group name
2068 0) log_must $GROUPDEL $grp ;;
2069 *) return 1 ;;
2070 esac
2071 else
2072 $GROUPMOD -n $grp $grp > /dev/null 2>&1
2073 typeset -i ret=$?
2074 case $ret in
2075 # Group does not exist.
2076 6) return 0 ;;
2077 # Name already exists as a group name
2078 9) log_must $GROUPDEL $grp ;;
2079 *) return 1 ;;
2080 esac
2081 fi
2082
2083 return 0
2084}
2085
2086#
2087# This function will return true if it's safe to destroy the pool passed
2088# as argument 1. It checks for pools based on zvols and files, and also
2089# files contained in a pool that may have a different mountpoint.
2090#
2091function safe_to_destroy_pool { # $1 the pool name
2092
2093 typeset pool=""
2094 typeset DONT_DESTROY=""
2095
2096 # We check that by deleting the $1 pool, we're not
2097 # going to pull the rug out from other pools. Do this
2098 # by looking at all other pools, ensuring that they
2099 # aren't built from files or zvols contained in this pool.
2100
2101 for pool in $($ZPOOL list -H -o name)
2102 do
2103 ALTMOUNTPOOL=""
2104
2105 # this is a list of the top-level directories in each of the
2106 # files that make up the path to the files the pool is based on
2107 FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2108 $AWK '{print $1}')
2109
2110 # this is a list of the zvols that make up the pool
2111 ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "$ZVOL_DEVDIR/$1$" \
2112 | $AWK '{print $1}')
2113
2114 # also want to determine if it's a file-based pool using an
2115 # alternate mountpoint...
2116 POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2117 $GREP / | $AWK '{print $1}' | \
2118 $AWK -F/ '{print $2}' | $GREP -v "dev")
2119
2120 for pooldir in $POOL_FILE_DIRS
2121 do
2122 OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2123 $GREP "${pooldir}$" | $AWK '{print $1}')
2124
2125 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2126 done
2127
2128
2129 if [ ! -z "$ZVOLPOOL" ]
2130 then
2131 DONT_DESTROY="true"
2132 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2133 fi
2134
2135 if [ ! -z "$FILEPOOL" ]
2136 then
2137 DONT_DESTROY="true"
2138 log_note "Pool $pool is built from $FILEPOOL on $1"
2139 fi
2140
2141 if [ ! -z "$ALTMOUNTPOOL" ]
2142 then
2143 DONT_DESTROY="true"
2144 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2145 fi
2146 done
2147
2148 if [ -z "${DONT_DESTROY}" ]
2149 then
2150 return 0
2151 else
2152 log_note "Warning: it is not safe to destroy $1!"
2153 return 1
2154 fi
2155}
2156
2157#
2158# Get the available ZFS compression options
2159# $1 option type zfs_set|zfs_compress
2160#
2161function get_compress_opts
2162{
2163 typeset COMPRESS_OPTS
2164 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2165 gzip-6 gzip-7 gzip-8 gzip-9"
2166
2167 if [[ $1 == "zfs_compress" ]] ; then
2168 COMPRESS_OPTS="on lzjb"
2169 elif [[ $1 == "zfs_set" ]] ; then
2170 COMPRESS_OPTS="on off lzjb"
2171 fi
2172 typeset valid_opts="$COMPRESS_OPTS"
2173 $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2174 if [[ $? -eq 0 ]]; then
2175 valid_opts="$valid_opts $GZIP_OPTS"
2176 fi
2177 $ECHO "$valid_opts"
2178}
2179
2180#
2181# Verify zfs operation with -p option work as expected
2182# $1 operation, value could be create, clone or rename
2183# $2 dataset type, value could be fs or vol
2184# $3 dataset name
2185# $4 new dataset name
2186#
2187function verify_opt_p_ops
2188{
2189 typeset ops=$1
2190 typeset datatype=$2
2191 typeset dataset=$3
2192 typeset newdataset=$4
2193
2194 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2195 log_fail "$datatype is not supported."
2196 fi
2197
2198 # check parameters accordingly
2199 case $ops in
2200 create)
2201 newdataset=$dataset
2202 dataset=""
2203 if [[ $datatype == "vol" ]]; then
2204 ops="create -V $VOLSIZE"
2205 fi
2206 ;;
2207 clone)
2208 if [[ -z $newdataset ]]; then
2209 log_fail "newdataset should not be empty" \
2210 "when ops is $ops."
2211 fi
2212 log_must datasetexists $dataset
2213 log_must snapexists $dataset
2214 ;;
2215 rename)
2216 if [[ -z $newdataset ]]; then
2217 log_fail "newdataset should not be empty" \
2218 "when ops is $ops."
2219 fi
2220 log_must datasetexists $dataset
2221 log_mustnot snapexists $dataset
2222 ;;
2223 *)
2224 log_fail "$ops is not supported."
2225 ;;
2226 esac
2227
2228 # make sure the upper level filesystem does not exist
2229 if datasetexists ${newdataset%/*} ; then
2230 log_must $ZFS destroy -rRf ${newdataset%/*}
2231 fi
2232
2233 # without -p option, operation will fail
2234 log_mustnot $ZFS $ops $dataset $newdataset
2235 log_mustnot datasetexists $newdataset ${newdataset%/*}
2236
2237 # with -p option, operation should succeed
2238 log_must $ZFS $ops -p $dataset $newdataset
2239 block_device_wait
2240
2241 if ! datasetexists $newdataset ; then
2242 log_fail "-p option does not work for $ops"
2243 fi
2244
2245 # when $ops is create or clone, redo the operation still return zero
2246 if [[ $ops != "rename" ]]; then
2247 log_must $ZFS $ops -p $dataset $newdataset
2248 fi
2249
2250 return 0
2251}
2252
2253#
2254# Get configuration of pool
2255# $1 pool name
2256# $2 config name
2257#
2258function get_config
2259{
2260 typeset pool=$1
2261 typeset config=$2
2262 typeset alt_root
2263
2264 if ! poolexists "$pool" ; then
2265 return 1
2266 fi
2267 alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
2268 if [[ $alt_root == "-" ]]; then
2269 value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
2270 '{print $2}')
2271 else
2272 value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
2273 '{print $2}')
2274 fi
2275 if [[ -n $value ]] ; then
2276 value=${value#'}
2277 value=${value%'}
2278 fi
2279 echo $value
2280
2281 return 0
2282}
2283
2284#
2285# Privated function. Random select one of items from arguments.
2286#
2287# $1 count
2288# $2-n string
2289#
2290function _random_get
2291{
2292 typeset cnt=$1
2293 shift
2294
2295 typeset str="$@"
2296 typeset -i ind
2297 ((ind = RANDOM % cnt + 1))
2298
2299 typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2300 $ECHO $ret
2301}
2302
2303#
2304# Random select one of item from arguments which include NONE string
2305#
2306function random_get_with_non
2307{
2308 typeset -i cnt=$#
2309 ((cnt =+ 1))
2310
2311 _random_get "$cnt" "$@"
2312}
2313
2314#
2315# Random select one of item from arguments which doesn't include NONE string
2316#
2317function random_get
2318{
2319 _random_get "$#" "$@"
2320}
2321
2322#
2323# Detect if the current system support slog
2324#
2325function verify_slog_support
2326{
2327 typeset dir=/tmp/disk.$$
2328 typeset pool=foo.$$
2329 typeset vdev=$dir/a
2330 typeset sdev=$dir/b
2331
2332 $MKDIR -p $dir
2333 $MKFILE 64M $vdev $sdev
2334
2335 typeset -i ret=0
2336 if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2337 ret=1
2338 fi
2339 $RM -r $dir
2340
2341 return $ret
2342}
2343
2344#
2345# The function will generate a dataset name with specific length
2346# $1, the length of the name
2347# $2, the base string to construct the name
2348#
2349function gen_dataset_name
2350{
2351 typeset -i len=$1
2352 typeset basestr="$2"
2353 typeset -i baselen=${#basestr}
2354 typeset -i iter=0
2355 typeset l_name=""
2356
2357 if ((len % baselen == 0)); then
2358 ((iter = len / baselen))
2359 else
2360 ((iter = len / baselen + 1))
2361 fi
2362 while ((iter > 0)); do
2363 l_name="${l_name}$basestr"
2364
2365 ((iter -= 1))
2366 done
2367
2368 $ECHO $l_name
2369}
2370
2371#
2372# Get cksum tuple of dataset
2373# $1 dataset name
2374#
2375# sample zdb output:
2376# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2377# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2378# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2379# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2380function datasetcksum
2381{
2382 typeset cksum
2383 $SYNC
2384 cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2385 | $AWK -F= '{print $7}')
2386 $ECHO $cksum
2387}
2388
2389#
2390# Get cksum of file
2391# #1 file path
2392#
2393function checksum
2394{
2395 typeset cksum
2396 cksum=$($CKSUM $1 | $AWK '{print $1}')
2397 $ECHO $cksum
2398}
2399
2400#
2401# Get the given disk/slice state from the specific field of the pool
2402#
2403function get_device_state #pool disk field("", "spares","logs")
2404{
2405 typeset pool=$1
2406 typeset disk=${2#$DEV_DSKDIR/}
2407 typeset field=${3:-$pool}
2408
2409 state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2410 $NAWK -v device=$disk -v pool=$pool -v field=$field \
2411 'BEGIN {startconfig=0; startfield=0; }
2412 /config:/ {startconfig=1}
2413 (startconfig==1) && ($1==field) {startfield=1; next;}
2414 (startfield==1) && ($1==device) {print $2; exit;}
2415 (startfield==1) &&
2416 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2417 echo $state
2418}
2419
2420
2421#
2422# print the given directory filesystem type
2423#
2424# $1 directory name
2425#
2426function get_fstype
2427{
2428 typeset dir=$1
2429
2430 if [[ -z $dir ]]; then
2431 log_fail "Usage: get_fstype <directory>"
2432 fi
2433
2434 #
2435 # $ df -n /
2436 # / : ufs
2437 #
2438 $DF -n $dir | $AWK '{print $3}'
2439}
2440
2441#
2442# Given a disk, label it to VTOC regardless what label was on the disk
2443# $1 disk
2444#
2445function labelvtoc
2446{
2447 typeset disk=$1
2448 if [[ -z $disk ]]; then
2449 log_fail "The disk name is unspecified."
2450 fi
2451 typeset label_file=/var/tmp/labelvtoc.$$
2452 typeset arch=$($UNAME -p)
2453
2454 if is_linux; then
2455 log_note "Currently unsupported by the test framework"
2456 return 1
2457 fi
2458
2459 if [[ $arch == "i386" ]]; then
2460 $ECHO "label" > $label_file
2461 $ECHO "0" >> $label_file
2462 $ECHO "" >> $label_file
2463 $ECHO "q" >> $label_file
2464 $ECHO "q" >> $label_file
2465
2466 $FDISK -B $disk >/dev/null 2>&1
2467 # wait a while for fdisk finishes
2468 $SLEEP 60
2469 elif [[ $arch == "sparc" ]]; then
2470 $ECHO "label" > $label_file
2471 $ECHO "0" >> $label_file
2472 $ECHO "" >> $label_file
2473 $ECHO "" >> $label_file
2474 $ECHO "" >> $label_file
2475 $ECHO "q" >> $label_file
2476 else
2477 log_fail "unknown arch type"
2478 fi
2479
2480 $FORMAT -e -s -d $disk -f $label_file
2481 typeset -i ret_val=$?
2482 $RM -f $label_file
2483 #
2484 # wait the format to finish
2485 #
2486 $SLEEP 60
2487 if ((ret_val != 0)); then
2488 log_fail "unable to label $disk as VTOC."
2489 fi
2490
2491 return 0
2492}
2493
2494#
2495# check if the system was installed as zfsroot or not
2496# return: 0 ture, otherwise false
2497#
2498function is_zfsroot
2499{
2500 $DF -n / | $GREP zfs > /dev/null 2>&1
2501 return $?
2502}
2503
2504#
2505# get the root filesystem name if it's zfsroot system.
2506#
2507# return: root filesystem name
2508function get_rootfs
2509{
2510 typeset rootfs=""
2511 rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2512 /etc/mnttab)
2513 if [[ -z "$rootfs" ]]; then
2514 log_fail "Can not get rootfs"
2515 fi
2516 $ZFS list $rootfs > /dev/null 2>&1
2517 if (($? == 0)); then
2518 $ECHO $rootfs
2519 else
2520 log_fail "This is not a zfsroot system."
2521 fi
2522}
2523
2524#
2525# get the rootfs's pool name
2526# return:
2527# rootpool name
2528#
2529function get_rootpool
2530{
2531 typeset rootfs=""
2532 typeset rootpool=""
2533 rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2534 /etc/mnttab)
2535 if [[ -z "$rootfs" ]]; then
2536 log_fail "Can not get rootpool"
2537 fi
2538 $ZFS list $rootfs > /dev/null 2>&1
2539 if (($? == 0)); then
2540 rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2541 $ECHO $rootpool
2542 else
2543 log_fail "This is not a zfsroot system."
2544 fi
2545}
2546
2547#
2548# Get the sub string from specified source string
2549#
2550# $1 source string
2551# $2 start position. Count from 1
2552# $3 offset
2553#
2554function get_substr #src_str pos offset
2555{
2556 typeset pos offset
2557
2558 $ECHO $1 | \
2559 $NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
2560}
2561
2562#
2563# Check if the given device is physical device
2564#
2565function is_physical_device #device
2566{
2567 typeset device=${1#$DEV_DSKDIR}
2568 device=${device#$DEV_RDSKDIR}
2569
2570 if is_linux; then
2571 [[ -b "$DEV_DSKDIR/$device" ]] && \
2572 [[ -f /sys/module/loop/parameters/max_part ]]
2573 return $?
2574 else
2575 $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2576 return $?
2577 fi
2578}
2579
7050a65d
SV
2580#
2581# Check if the given device is a real device (ie SCSI device)
2582#
2583function is_real_device #disk
2584{
2585 typeset disk=$1
2586 [[ -z $disk ]] && log_fail "No argument for disk given."
2587
2588 if is_linux; then
2589 $LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP disk > /dev/null 2>&1
2590 return $?
2591 fi
2592}
2593
2594#
2595# Check if the given device is a loop device
2596#
2597function is_loop_device #disk
2598{
2599 typeset disk=$1
2600 [[ -z $disk ]] && log_fail "No argument for disk given."
2601
2602 if is_linux; then
2603 $LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP loop > /dev/null 2>&1
2604 return $?
2605 fi
2606}
2607
2608#
2609# Check if the given device is a multipath device and if there is a sybolic
2610# link to a device mapper and to a disk
2611# Currently no support for dm devices alone without multipath
2612#
2613function is_mpath_device #disk
2614{
2615 typeset disk=$1
2616 [[ -z $disk ]] && log_fail "No argument for disk given."
2617
2618 if is_linux; then
2619 $LSBLK $DEV_MPATHDIR/$disk -o TYPE | $EGREP mpath > /dev/null 2>&1
2620 if (($? == 0)); then
2621 $READLINK $DEV_MPATHDIR/$disk > /dev/null 2>&1
2622 return $?
2623 else
2624 return $?
2625 fi
2626 fi
2627}
2628
2629# Set the slice prefix for disk partitioning depending
2630# on whether the device is a real, multipath, or loop device.
2631# Currently all disks have to be of the same type, so only
2632# checks first disk to determine slice prefix.
2633#
2634function set_slice_prefix
2635{
2636 typeset disk
2637 typeset -i i=0
2638
2639 if is_linux; then
2640 while (( i < $DISK_ARRAY_NUM )); do
2641 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2642 if ( is_mpath_device $disk ) && [[ -z $($ECHO $disk | awk 'substr($1,18,1)\
2643 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
2644 export SLICE_PREFIX=""
2645 return 0
2646 elif ( is_mpath_device $disk || is_loop_device $disk ); then
2647 export SLICE_PREFIX="p"
2648 return 0
2649 else
2650 log_fail "$disk not supported for partitioning."
2651 fi
2652 (( i = i + 1))
2653 done
2654 fi
2655}
2656
2657#
2658# Set the directory path of the listed devices in $DISK_ARRAY_NUM
2659# Currently all disks have to be of the same type, so only
2660# checks first disk to determine device directory
2661# default = /dev (linux)
2662# real disk = /dev (linux)
2663# multipath device = /dev/mapper (linux)
2664#
2665function set_device_dir
2666{
2667 typeset disk
2668 typeset -i i=0
2669
2670 if is_linux; then
2671 while (( i < $DISK_ARRAY_NUM )); do
2672 disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')"
2673 if is_mpath_device $disk; then
2674 export DEV_DSKDIR=$DEV_MPATHDIR
2675 return 0
2676 else
2677 export DEV_DSKDIR=$DEV_RDSKDIR
2678 return 0
2679 fi
2680 (( i = i + 1))
2681 done
2682 else
2683 export DEV_DSKDIR=$DEV_RDSKDIR
2684 fi
2685}
2686
6bb24f4d
BB
2687#
2688# Get the directory path of given device
2689#
2690function get_device_dir #device
2691{
2692 typeset device=$1
2693
2694 if ! $(is_physical_device $device) ; then
2695 if [[ $device != "/" ]]; then
2696 device=${device%/*}
2697 fi
2698 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2699 device="$DEV_DSKDIR"
2700 fi
2701 $ECHO $device
2702 else
2703 $ECHO "$DEV_DSKDIR"
2704 fi
2705}
2706
2707#
2708# Get the package name
2709#
2710function get_package_name
2711{
2712 typeset dirpath=${1:-$STC_NAME}
2713
2714 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2715}
2716
2717#
2718# Get the word numbers from a string separated by white space
2719#
2720function get_word_count
2721{
2722 $ECHO $1 | $WC -w
2723}
2724
2725#
2726# To verify if the require numbers of disks is given
2727#
2728function verify_disk_count
2729{
2730 typeset -i min=${2:-1}
2731
2732 typeset -i count=$(get_word_count "$1")
2733
2734 if ((count < min)); then
2735 log_untested "A minimum of $min disks is required to run." \
2736 " You specified $count disk(s)"
2737 fi
2738}
2739
2740function ds_is_volume
2741{
2742 typeset type=$(get_prop type $1)
2743 [[ $type = "volume" ]] && return 0
2744 return 1
2745}
2746
2747function ds_is_filesystem
2748{
2749 typeset type=$(get_prop type $1)
2750 [[ $type = "filesystem" ]] && return 0
2751 return 1
2752}
2753
2754function ds_is_snapshot
2755{
2756 typeset type=$(get_prop type $1)
2757 [[ $type = "snapshot" ]] && return 0
2758 return 1
2759}
2760
2761#
2762# Check if Trusted Extensions are installed and enabled
2763#
2764function is_te_enabled
2765{
2766 $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
2767 if (($? != 0)); then
2768 return 1
2769 else
2770 return 0
2771 fi
2772}
2773
2774# Utility function to determine if a system has multiple cpus.
2775function is_mp
2776{
2777 if is_linux; then
2778 (($($NPROC) > 1))
2779 else
2780 (($($PSRINFO | $WC -l) > 1))
2781 fi
2782
2783 return $?
2784}
2785
2786function get_cpu_freq
2787{
2788 if is_linux; then
2789 lscpu | $AWK '/CPU MHz/ { print $3 }'
2790 else
2791 $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}'
2792 fi
2793}
2794
2795# Run the given command as the user provided.
2796function user_run
2797{
2798 typeset user=$1
2799 shift
2800
f74b821a 2801 log_note "user:$user $@"
6bb24f4d
BB
2802 eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
2803 return $?
2804}
2805
2806#
2807# Check if the pool contains the specified vdevs
2808#
2809# $1 pool
2810# $2..n <vdev> ...
2811#
2812# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2813# vdevs is not in the pool, and 2 if pool name is missing.
2814#
2815function vdevs_in_pool
2816{
2817 typeset pool=$1
2818 typeset vdev
2819
2820 if [[ -z $pool ]]; then
2821 log_note "Missing pool name."
2822 return 2
2823 fi
2824
2825 shift
2826
2827 typeset tmpfile=$($MKTEMP)
2828 $ZPOOL list -Hv "$pool" >$tmpfile
2829 for vdev in $@; do
2830 $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2831 [[ $? -ne 0 ]] && return 1
2832 done
2833
2834 $RM -f $tmpfile
2835
2836 return 0;
2837}
2838
679d73e9
JWK
2839function get_max
2840{
2841 typeset -l i max=$1
2842 shift
2843
2844 for i in "$@"; do
2845 max=$(echo $((max > i ? max : i)))
2846 done
2847
2848 echo $max
2849}
2850
2851function get_min
2852{
2853 typeset -l i min=$1
2854 shift
2855
2856 for i in "$@"; do
2857 min=$(echo $((min < i ? min : i)))
2858 done
2859
2860 echo $min
2861}
2862
6bb24f4d
BB
2863#
2864# Wait for newly created block devices to have their minors created.
2865#
2866function block_device_wait
2867{
2868 if is_linux; then
2869 $UDEVADM trigger
2870 $UDEVADM settle
2871 fi
2872}
1de321e6
JX
2873
2874#
2875# Synchronize all the data in pool
2876#
2877# $1 pool name
2878#
2879function sync_pool #pool
2880{
2881 typeset pool=${1:-$TESTPOOL}
2882
2883 log_must $SYNC
2884 log_must $SLEEP 2
2885 # Flush all the pool data.
2886 typeset -i ret
2887 $ZPOOL scrub $pool >/dev/null 2>&1
2888 ret=$?
2889 (( $ret != 0 )) && \
2890 log_fail "$ZPOOL scrub $pool failed."
2891
2892 while ! is_pool_scrubbed $pool; do
2893 if is_pool_resilvered $pool ; then
2894 log_fail "$pool should not be resilver completed."
2895 fi
2896 log_must $SLEEP 2
2897 done
2898}