]> git.proxmox.com Git - mirror_zfs.git/blame - tests/zfs-tests/include/libtest.shlib
Fix LZ4_uncompress_unknownOutputSize caused panic
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
CommitLineData
6bb24f4d
BB
1#!/bin/ksh -p
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25# Use is subject to license terms.
c1d9abf9 26# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
8db2dc32 27# Copyright 2016 Nexenta Systems, Inc.
a454868b 28# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
bec1067d 29# Copyright (c) 2017 Datto Inc.
6bb24f4d
BB
30#
31
32. ${STF_TOOLS}/include/logapi.shlib
a7004725 33. ${STF_SUITE}/include/math.shlib
6bb24f4d 34
c1d9abf9
JWK
35#
36# Apply constrained path when available. This is required since the
37# PATH may have been modified by sudo's secure_path behavior.
38#
39if [ -n "$STF_PATH" ]; then
40 PATH="$STF_PATH"
41fi
42
6bb24f4d
BB
43# Determine if this is a Linux test system
44#
45# Return 0 if platform Linux, 1 if otherwise
46
47function is_linux
48{
c1d9abf9 49 if [[ $(uname -o) == "GNU/Linux" ]]; then
6bb24f4d
BB
50 return 0
51 else
52 return 1
53 fi
54}
55
e676a196
BB
56# Determine if this is a 32-bit system
57#
58# Return 0 if platform is 32-bit, 1 if otherwise
59
60function is_32bit
61{
62 if [[ $(getconf LONG_BIT) == "32" ]]; then
63 return 0
64 else
65 return 1
66 fi
67}
68
c6ced726
BB
69# Determine if kmemleak is enabled
70#
71# Return 0 if kmemleak is enabled, 1 if otherwise
72
73function is_kmemleak
74{
75 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
76 return 0
77 else
78 return 1
79 fi
80}
81
6bb24f4d
BB
82# Determine whether a dataset is mounted
83#
84# $1 dataset name
85# $2 filesystem type; optional - defaulted to zfs
86#
87# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
88
89function ismounted
90{
91 typeset fstype=$2
92 [[ -z $fstype ]] && fstype=zfs
93 typeset out dir name ret
94
95 case $fstype in
96 zfs)
97 if [[ "$1" == "/"* ]] ; then
c1d9abf9 98 for out in $(zfs mount | awk '{print $2}'); do
6bb24f4d
BB
99 [[ $1 == $out ]] && return 0
100 done
101 else
c1d9abf9 102 for out in $(zfs mount | awk '{print $1}'); do
6bb24f4d
BB
103 [[ $1 == $out ]] && return 0
104 done
105 fi
106 ;;
107 ufs|nfs)
c1d9abf9 108 out=$(df -F $fstype $1 2>/dev/null)
6bb24f4d
BB
109 ret=$?
110 (($ret != 0)) && return $ret
111
112 dir=${out%%\(*}
113 dir=${dir%% *}
114 name=${out##*\(}
115 name=${name%%\)*}
116 name=${name%% *}
117
118 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
119 ;;
120 ext2)
c1d9abf9 121 out=$(df -t $fstype $1 2>/dev/null)
6bb24f4d
BB
122 return $?
123 ;;
124 zvol)
125 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
126 link=$(readlink -f $ZVOL_DEVDIR/$1)
127 [[ -n "$link" ]] && \
c1d9abf9 128 mount | grep -q "^$link" && \
6bb24f4d
BB
129 return 0
130 fi
131 ;;
132 esac
133
134 return 1
135}
136
137# Return 0 if a dataset is mounted; 1 otherwise
138#
139# $1 dataset name
140# $2 filesystem type; optional - defaulted to zfs
141
142function mounted
143{
144 ismounted $1 $2
145 (($? == 0)) && return 0
146 return 1
147}
148
149# Return 0 if a dataset is unmounted; 1 otherwise
150#
151# $1 dataset name
152# $2 filesystem type; optional - defaulted to zfs
153
154function unmounted
155{
156 ismounted $1 $2
157 (($? == 1)) && return 0
158 return 1
159}
160
161# split line on ","
162#
163# $1 - line to split
164
165function splitline
166{
c1d9abf9 167 echo $1 | sed "s/,/ /g"
6bb24f4d
BB
168}
169
170function default_setup
171{
172 default_setup_noexit "$@"
173
174 log_pass
175}
176
177#
178# Given a list of disks, setup storage pools and datasets.
179#
180function default_setup_noexit
181{
182 typeset disklist=$1
183 typeset container=$2
184 typeset volume=$3
3c67d83a 185 log_note begin default_setup_noexit
6bb24f4d
BB
186
187 if is_global_zone; then
188 if poolexists $TESTPOOL ; then
189 destroy_pool $TESTPOOL
190 fi
c1d9abf9
JWK
191 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
192 log_must zpool create -f $TESTPOOL $disklist
6bb24f4d
BB
193 else
194 reexport_pool
195 fi
196
c1d9abf9
JWK
197 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
198 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
6bb24f4d 199
c1d9abf9
JWK
200 log_must zfs create $TESTPOOL/$TESTFS
201 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
202
203 if [[ -n $container ]]; then
c1d9abf9 204 rm -rf $TESTDIR1 || \
6bb24f4d 205 log_unresolved Could not remove $TESTDIR1
c1d9abf9 206 mkdir -p $TESTDIR1 || \
6bb24f4d
BB
207 log_unresolved Could not create $TESTDIR1
208
c1d9abf9
JWK
209 log_must zfs create $TESTPOOL/$TESTCTR
210 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
211 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
212 log_must zfs set mountpoint=$TESTDIR1 \
6bb24f4d
BB
213 $TESTPOOL/$TESTCTR/$TESTFS1
214 fi
215
216 if [[ -n $volume ]]; then
217 if is_global_zone ; then
c1d9abf9 218 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
6bb24f4d
BB
219 block_device_wait
220 else
c1d9abf9 221 log_must zfs create $TESTPOOL/$TESTVOL
6bb24f4d
BB
222 fi
223 fi
224}
225
226#
227# Given a list of disks, setup a storage pool, file system and
228# a container.
229#
230function default_container_setup
231{
232 typeset disklist=$1
233
234 default_setup "$disklist" "true"
235}
236
237#
238# Given a list of disks, setup a storage pool,file system
239# and a volume.
240#
241function default_volume_setup
242{
243 typeset disklist=$1
244
245 default_setup "$disklist" "" "true"
246}
247
248#
249# Given a list of disks, setup a storage pool,file system,
250# a container and a volume.
251#
252function default_container_volume_setup
253{
254 typeset disklist=$1
255
256 default_setup "$disklist" "true" "true"
257}
258
259#
260# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
261# filesystem
262#
263# $1 Existing filesystem or volume name. Default, $TESTFS
264# $2 snapshot name. Default, $TESTSNAP
265#
266function create_snapshot
267{
268 typeset fs_vol=${1:-$TESTFS}
269 typeset snap=${2:-$TESTSNAP}
270
271 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
272 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
273
274 if snapexists $fs_vol@$snap; then
275 log_fail "$fs_vol@$snap already exists."
276 fi
277 datasetexists $fs_vol || \
278 log_fail "$fs_vol must exist."
279
c1d9abf9 280 log_must zfs snapshot $fs_vol@$snap
6bb24f4d
BB
281}
282
283#
284# Create a clone from a snapshot, default clone name is $TESTCLONE.
285#
286# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
287# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
288#
289function create_clone # snapshot clone
290{
291 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
292 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
293
294 [[ -z $snap ]] && \
295 log_fail "Snapshot name is undefined."
296 [[ -z $clone ]] && \
297 log_fail "Clone name is undefined."
298
c1d9abf9 299 log_must zfs clone $snap $clone
6bb24f4d
BB
300}
301
aeacdefe
GM
302#
303# Create a bookmark of the given snapshot. Defaultly create a bookmark on
304# filesystem.
305#
306# $1 Existing filesystem or volume name. Default, $TESTFS
307# $2 Existing snapshot name. Default, $TESTSNAP
308# $3 bookmark name. Default, $TESTBKMARK
309#
310function create_bookmark
311{
312 typeset fs_vol=${1:-$TESTFS}
313 typeset snap=${2:-$TESTSNAP}
314 typeset bkmark=${3:-$TESTBKMARK}
315
316 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
317 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
318 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
319
320 if bkmarkexists $fs_vol#$bkmark; then
321 log_fail "$fs_vol#$bkmark already exists."
322 fi
323 datasetexists $fs_vol || \
324 log_fail "$fs_vol must exist."
325 snapexists $fs_vol@$snap || \
326 log_fail "$fs_vol@$snap must exist."
327
c1d9abf9 328 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
aeacdefe
GM
329}
330
6bb24f4d
BB
331function default_mirror_setup
332{
333 default_mirror_setup_noexit $1 $2 $3
334
335 log_pass
336}
337
338#
339# Given a pair of disks, set up a storage pool and dataset for the mirror
340# @parameters: $1 the primary side of the mirror
341# $2 the secondary side of the mirror
342# @uses: ZPOOL ZFS TESTPOOL TESTFS
343function default_mirror_setup_noexit
344{
345 readonly func="default_mirror_setup_noexit"
346 typeset primary=$1
347 typeset secondary=$2
348
349 [[ -z $primary ]] && \
350 log_fail "$func: No parameters passed"
351 [[ -z $secondary ]] && \
352 log_fail "$func: No secondary partition passed"
c1d9abf9
JWK
353 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
354 log_must zpool create -f $TESTPOOL mirror $@
355 log_must zfs create $TESTPOOL/$TESTFS
356 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
357}
358
359#
360# create a number of mirrors.
361# We create a number($1) of 2 way mirrors using the pairs of disks named
362# on the command line. These mirrors are *not* mounted
363# @parameters: $1 the number of mirrors to create
364# $... the devices to use to create the mirrors on
365# @uses: ZPOOL ZFS TESTPOOL
366function setup_mirrors
367{
368 typeset -i nmirrors=$1
369
370 shift
371 while ((nmirrors > 0)); do
372 log_must test -n "$1" -a -n "$2"
c1d9abf9
JWK
373 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
374 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
6bb24f4d
BB
375 shift 2
376 ((nmirrors = nmirrors - 1))
377 done
378}
379
380#
381# create a number of raidz pools.
382# We create a number($1) of 2 raidz pools using the pairs of disks named
383# on the command line. These pools are *not* mounted
384# @parameters: $1 the number of pools to create
385# $... the devices to use to create the pools on
386# @uses: ZPOOL ZFS TESTPOOL
387function setup_raidzs
388{
389 typeset -i nraidzs=$1
390
391 shift
392 while ((nraidzs > 0)); do
393 log_must test -n "$1" -a -n "$2"
c1d9abf9
JWK
394 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
395 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
6bb24f4d
BB
396 shift 2
397 ((nraidzs = nraidzs - 1))
398 done
399}
400
401#
402# Destroy the configured testpool mirrors.
403# the mirrors are of the form ${TESTPOOL}{number}
404# @uses: ZPOOL ZFS TESTPOOL
405function destroy_mirrors
406{
407 default_cleanup_noexit
408
409 log_pass
410}
411
412#
413# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
414# $1 the list of disks
415#
416function default_raidz_setup
417{
418 typeset disklist="$*"
419 disks=(${disklist[*]})
420
421 if [[ ${#disks[*]} -lt 2 ]]; then
422 log_fail "A raid-z requires a minimum of two disks."
423 fi
424
c1d9abf9
JWK
425 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
426 log_must zpool create -f $TESTPOOL raidz $1 $2 $3
427 log_must zfs create $TESTPOOL/$TESTFS
428 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
429
430 log_pass
431}
432
433#
434# Common function used to cleanup storage pools and datasets.
435#
436# Invoked at the start of the test suite to ensure the system
437# is in a known state, and also at the end of each set of
438# sub-tests to ensure errors from one set of tests doesn't
439# impact the execution of the next set.
440
441function default_cleanup
442{
443 default_cleanup_noexit
444
445 log_pass
446}
447
448function default_cleanup_noexit
449{
450 typeset exclude=""
451 typeset pool=""
452 #
453 # Destroying the pool will also destroy any
454 # filesystems it contains.
455 #
456 if is_global_zone; then
c1d9abf9
JWK
457 zfs unmount -a > /dev/null 2>&1
458 exclude=`eval echo \"'(${KEEP})'\"`
459 ALL_POOLS=$(zpool list -H -o name \
460 | grep -v "$NO_POOLS" | egrep -v "$exclude")
6bb24f4d
BB
461 # Here, we loop through the pools we're allowed to
462 # destroy, only destroying them if it's safe to do
463 # so.
464 while [ ! -z ${ALL_POOLS} ]
465 do
466 for pool in ${ALL_POOLS}
467 do
468 if safe_to_destroy_pool $pool ;
469 then
470 destroy_pool $pool
471 fi
c1d9abf9
JWK
472 ALL_POOLS=$(zpool list -H -o name \
473 | grep -v "$NO_POOLS" \
474 | egrep -v "$exclude")
6bb24f4d
BB
475 done
476 done
477
c1d9abf9 478 zfs mount -a
6bb24f4d
BB
479 else
480 typeset fs=""
c1d9abf9
JWK
481 for fs in $(zfs list -H -o name \
482 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
6bb24f4d 483 datasetexists $fs && \
c1d9abf9 484 log_must zfs destroy -Rf $fs
6bb24f4d
BB
485 done
486
487 # Need cleanup here to avoid garbage dir left.
c1d9abf9 488 for fs in $(zfs list -H -o name); do
6bb24f4d 489 [[ $fs == /$ZONE_POOL ]] && continue
c1d9abf9 490 [[ -d $fs ]] && log_must rm -rf $fs/*
6bb24f4d
BB
491 done
492
493 #
494 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
495 # the default value
496 #
c1d9abf9 497 for fs in $(zfs list -H -o name); do
6bb24f4d 498 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
c1d9abf9
JWK
499 log_must zfs set reservation=none $fs
500 log_must zfs set recordsize=128K $fs
501 log_must zfs set mountpoint=/$fs $fs
6bb24f4d
BB
502 typeset enc=""
503 enc=$(get_prop encryption $fs)
504 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
505 [[ "$enc" == "off" ]]; then
c1d9abf9 506 log_must zfs set checksum=on $fs
6bb24f4d 507 fi
c1d9abf9
JWK
508 log_must zfs set compression=off $fs
509 log_must zfs set atime=on $fs
510 log_must zfs set devices=off $fs
511 log_must zfs set exec=on $fs
512 log_must zfs set setuid=on $fs
513 log_must zfs set readonly=off $fs
514 log_must zfs set snapdir=hidden $fs
515 log_must zfs set aclmode=groupmask $fs
516 log_must zfs set aclinherit=secure $fs
6bb24f4d
BB
517 fi
518 done
519 fi
520
521 [[ -d $TESTDIR ]] && \
c1d9abf9 522 log_must rm -rf $TESTDIR
7050a65d
SV
523
524 disk1=${DISKS%% *}
525 if is_mpath_device $disk1; then
526 delete_partitions
527 fi
6bb24f4d
BB
528}
529
530
531#
532# Common function used to cleanup storage pools, file systems
533# and containers.
534#
535function default_container_cleanup
536{
537 if ! is_global_zone; then
538 reexport_pool
539 fi
540
541 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
542 [[ $? -eq 0 ]] && \
c1d9abf9 543 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
6bb24f4d
BB
544
545 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
c1d9abf9 546 log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
6bb24f4d
BB
547
548 datasetexists $TESTPOOL/$TESTCTR && \
c1d9abf9 549 log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
6bb24f4d
BB
550
551 [[ -e $TESTDIR1 ]] && \
c1d9abf9 552 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
6bb24f4d
BB
553
554 default_cleanup
555}
556
557#
558# Common function used to cleanup snapshot of file system or volume. Default to
559# delete the file system's snapshot
560#
561# $1 snapshot name
562#
563function destroy_snapshot
564{
565 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
566
567 if ! snapexists $snap; then
568 log_fail "'$snap' does not existed."
569 fi
570
571 #
572 # For the sake of the value which come from 'get_prop' is not equal
573 # to the really mountpoint when the snapshot is unmounted. So, firstly
574 # check and make sure this snapshot's been mounted in current system.
575 #
576 typeset mtpt=""
577 if ismounted $snap; then
578 mtpt=$(get_prop mountpoint $snap)
579 (($? != 0)) && \
580 log_fail "get_prop mountpoint $snap failed."
581 fi
582
c1d9abf9 583 log_must zfs destroy $snap
6bb24f4d 584 [[ $mtpt != "" && -d $mtpt ]] && \
c1d9abf9 585 log_must rm -rf $mtpt
6bb24f4d
BB
586}
587
588#
589# Common function used to cleanup clone.
590#
591# $1 clone name
592#
593function destroy_clone
594{
595 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
596
597 if ! datasetexists $clone; then
598 log_fail "'$clone' does not existed."
599 fi
600
601 # With the same reason in destroy_snapshot
602 typeset mtpt=""
603 if ismounted $clone; then
604 mtpt=$(get_prop mountpoint $clone)
605 (($? != 0)) && \
606 log_fail "get_prop mountpoint $clone failed."
607 fi
608
c1d9abf9 609 log_must zfs destroy $clone
6bb24f4d 610 [[ $mtpt != "" && -d $mtpt ]] && \
c1d9abf9 611 log_must rm -rf $mtpt
6bb24f4d
BB
612}
613
aeacdefe
GM
614#
615# Common function used to cleanup bookmark of file system or volume. Default
616# to delete the file system's bookmark.
617#
618# $1 bookmark name
619#
620function destroy_bookmark
621{
622 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
623
624 if ! bkmarkexists $bkmark; then
625 log_fail "'$bkmarkp' does not existed."
626 fi
627
c1d9abf9 628 log_must zfs destroy $bkmark
aeacdefe
GM
629}
630
6bb24f4d
BB
631# Return 0 if a snapshot exists; $? otherwise
632#
633# $1 - snapshot name
634
635function snapexists
636{
c1d9abf9 637 zfs list -H -t snapshot "$1" > /dev/null 2>&1
6bb24f4d
BB
638 return $?
639}
640
aeacdefe
GM
641#
642# Return 0 if a bookmark exists; $? otherwise
643#
644# $1 - bookmark name
645#
646function bkmarkexists
647{
c1d9abf9 648 zfs list -H -t bookmark "$1" > /dev/null 2>&1
aeacdefe
GM
649 return $?
650}
651
6bb24f4d
BB
652#
653# Set a property to a certain value on a dataset.
654# Sets a property of the dataset to the value as passed in.
655# @param:
656# $1 dataset who's property is being set
657# $2 property to set
658# $3 value to set property to
659# @return:
660# 0 if the property could be set.
661# non-zero otherwise.
662# @use: ZFS
663#
664function dataset_setprop
665{
666 typeset fn=dataset_setprop
667
668 if (($# < 3)); then
669 log_note "$fn: Insufficient parameters (need 3, had $#)"
670 return 1
671 fi
672 typeset output=
c1d9abf9 673 output=$(zfs set $2=$3 $1 2>&1)
6bb24f4d
BB
674 typeset rv=$?
675 if ((rv != 0)); then
676 log_note "Setting property on $1 failed."
677 log_note "property $2=$3"
678 log_note "Return Code: $rv"
679 log_note "Output: $output"
680 return $rv
681 fi
682 return 0
683}
684
685#
686# Assign suite defined dataset properties.
687# This function is used to apply the suite's defined default set of
688# properties to a dataset.
689# @parameters: $1 dataset to use
690# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
691# @returns:
692# 0 if the dataset has been altered.
693# 1 if no pool name was passed in.
694# 2 if the dataset could not be found.
695# 3 if the dataset could not have it's properties set.
696#
697function dataset_set_defaultproperties
698{
699 typeset dataset="$1"
700
701 [[ -z $dataset ]] && return 1
702
703 typeset confset=
704 typeset -i found=0
c1d9abf9 705 for confset in $(zfs list); do
6bb24f4d
BB
706 if [[ $dataset = $confset ]]; then
707 found=1
708 break
709 fi
710 done
711 [[ $found -eq 0 ]] && return 2
712 if [[ -n $COMPRESSION_PROP ]]; then
713 dataset_setprop $dataset compression $COMPRESSION_PROP || \
714 return 3
715 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
716 fi
717 if [[ -n $CHECKSUM_PROP ]]; then
718 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
719 return 3
720 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
721 fi
722 return 0
723}
724
725#
726# Check a numeric assertion
727# @parameter: $@ the assertion to check
728# @output: big loud notice if assertion failed
729# @use: log_fail
730#
731function assert
732{
733 (($@)) || log_fail "$@"
734}
735
736#
737# Function to format partition size of a disk
738# Given a disk cxtxdx reduces all partitions
739# to 0 size
740#
741function zero_partitions #<whole_disk_name>
742{
743 typeset diskname=$1
744 typeset i
745
746 if is_linux; then
c1d9abf9 747 log_must parted $DEV_DSKDIR/$diskname -s -- mklabel gpt
6bb24f4d
BB
748 else
749 for i in 0 1 3 4 5 6 7
750 do
751 set_partition $i "" 0mb $diskname
752 done
753 fi
754}
755
756#
757# Given a slice, size and disk, this function
758# formats the slice to the specified size.
759# Size should be specified with units as per
760# the `format` command requirements eg. 100mb 3gb
761#
762# NOTE: This entire interface is problematic for the Linux parted utilty
763# which requires the end of the partition to be specified. It would be
764# best to retire this interface and replace it with something more flexible.
765# At the moment a best effort is made.
766#
767function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
768{
769 typeset -i slicenum=$1
770 typeset start=$2
771 typeset size=$3
772 typeset disk=$4
773 [[ -z $slicenum || -z $size || -z $disk ]] && \
774 log_fail "The slice, size or disk name is unspecified."
775
776 if is_linux; then
777 typeset size_mb=${size%%[mMgG]}
778
779 size_mb=${size_mb%%[mMgG][bB]}
780 if [[ ${size:1:1} == 'g' ]]; then
781 ((size_mb = size_mb * 1024))
782 fi
783
784 # Create GPT partition table when setting slice 0 or
785 # when the device doesn't already contain a GPT label.
c1d9abf9 786 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
6bb24f4d
BB
787 typeset ret_val=$?
788 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
c1d9abf9 789 log_must parted $DEV_DSKDIR/$disk -s -- mklabel gpt
6bb24f4d
BB
790 fi
791
792 # When no start is given align on the first cylinder.
793 if [[ -z "$start" ]]; then
794 start=1
795 fi
796
797 # Determine the cylinder size for the device and using
798 # that calculate the end offset in cylinders.
799 typeset -i cly_size_kb=0
c1d9abf9
JWK
800 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
801 unit cyl print | head -3 | tail -1 | \
802 awk -F '[:k.]' '{print $4}')
6bb24f4d
BB
803 ((end = (size_mb * 1024 / cly_size_kb) + start))
804
c1d9abf9 805 log_must parted $DEV_DSKDIR/$disk -s -- \
6bb24f4d
BB
806 mkpart part$slicenum ${start}cyl ${end}cyl
807
c1d9abf9 808 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
6bb24f4d
BB
809 block_device_wait
810 else
811 typeset format_file=/var/tmp/format_in.$$
812
c1d9abf9
JWK
813 echo "partition" >$format_file
814 echo "$slicenum" >> $format_file
815 echo "" >> $format_file
816 echo "" >> $format_file
817 echo "$start" >> $format_file
818 echo "$size" >> $format_file
819 echo "label" >> $format_file
820 echo "" >> $format_file
821 echo "q" >> $format_file
822 echo "q" >> $format_file
6bb24f4d 823
c1d9abf9 824 format -e -s -d $disk -f $format_file
6bb24f4d 825 fi
c1d9abf9 826
6bb24f4d 827 typeset ret_val=$?
c1d9abf9 828 rm -f $format_file
6bb24f4d
BB
829 [[ $ret_val -ne 0 ]] && \
830 log_fail "Unable to format $disk slice $slicenum to $size"
831 return 0
832}
833
7050a65d
SV
834#
835# Delete all partitions on all disks - this is specifically for the use of multipath
836# devices which currently can only be used in the test suite as raw/un-partitioned
837# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
838#
839function delete_partitions
840{
841 typeset -i j=1
842
843 if [[ -z $DISK_ARRAY_NUM ]]; then
c1d9abf9 844 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
7050a65d
SV
845 fi
846 if [[ -z $DISKSARRAY ]]; then
847 DISKSARRAY=$DISKS
848 fi
849
850 if is_linux; then
851 if (( $DISK_ARRAY_NUM == 1 )); then
852 while ((j < MAX_PARTITIONS)); do
c1d9abf9
JWK
853 parted $DEV_DSKDIR/$DISK -s rm $j \
854 > /dev/null 2>&1
7050a65d 855 if (( $? == 1 )); then
c1d9abf9 856 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
857 if (( $? == 1 )); then
858 log_note "Partitions for $DISK should be deleted"
859 else
860 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
861 fi
862 return 0
863 else
c1d9abf9 864 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
865 if (( $? == 0 )); then
866 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
867 fi
868 fi
869 ((j = j+1))
870 done
871 else
c1d9abf9 872 for disk in `echo $DISKSARRAY`; do
7050a65d 873 while ((j < MAX_PARTITIONS)); do
c1d9abf9 874 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
7050a65d 875 if (( $? == 1 )); then
c1d9abf9 876 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
877 if (( $? == 1 )); then
878 log_note "Partitions for $disk should be deleted"
879 else
880 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
881 fi
882 j=7
883 else
c1d9abf9 884 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
885 if (( $? == 0 )); then
886 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
887 fi
888 fi
889 ((j = j+1))
890 done
891 j=1
892 done
893 fi
894 fi
895 return 0
896}
897
6bb24f4d
BB
898#
899# Get the end cyl of the given slice
900#
901function get_endslice #<disk> <slice>
902{
903 typeset disk=$1
904 typeset slice=$2
905 if [[ -z $disk || -z $slice ]] ; then
906 log_fail "The disk name or slice number is unspecified."
907 fi
908
909 if is_linux; then
c1d9abf9
JWK
910 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
911 grep "part${slice}" | \
912 awk '{print $3}' | \
913 sed 's,cyl,,')
6bb24f4d
BB
914 ((endcyl = (endcyl + 1)))
915 else
916 disk=${disk#/dev/dsk/}
917 disk=${disk#/dev/rdsk/}
918 disk=${disk%s*}
919
920 typeset -i ratio=0
c1d9abf9
JWK
921 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
922 grep "sectors\/cylinder" | \
923 awk '{print $2}')
6bb24f4d
BB
924
925 if ((ratio == 0)); then
926 return
927 fi
928
c1d9abf9
JWK
929 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
930 nawk -v token="$slice" '{if ($1==token) print $6}')
6bb24f4d
BB
931
932 ((endcyl = (endcyl + 1) / ratio))
933 fi
c1d9abf9 934
6bb24f4d
BB
935 echo $endcyl
936}
937
938
939#
940# Given a size,disk and total slice number, this function formats the
941# disk slices from 0 to the total slice number with the same specified
942# size.
943#
944function partition_disk #<slice_size> <whole_disk_name> <total_slices>
945{
946 typeset -i i=0
947 typeset slice_size=$1
948 typeset disk_name=$2
949 typeset total_slices=$3
950 typeset cyl
951
952 zero_partitions $disk_name
953 while ((i < $total_slices)); do
954 if ! is_linux; then
955 if ((i == 2)); then
956 ((i = i + 1))
957 continue
958 fi
959 fi
960 set_partition $i "$cyl" $slice_size $disk_name
961 cyl=$(get_endslice $disk_name $i)
962 ((i = i+1))
963 done
964}
965
966#
967# This function continues to write to a filenum number of files into dirnum
c1d9abf9 968# number of directories until either file_write returns an error or the
6bb24f4d
BB
969# maximum number of files per directory have been written.
970#
971# Usage:
972# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
973#
974# Return value: 0 on success
975# non 0 on error
976#
977# Where :
978# destdir: is the directory where everything is to be created under
979# dirnum: the maximum number of subdirectories to use, -1 no limit
980# filenum: the maximum number of files per subdirectory
981# bytes: number of bytes to write
982# num_writes: numer of types to write out bytes
4e33ba4c 983# data: the data that will be written
6bb24f4d
BB
984#
985# E.g.
986# file_fs /testdir 20 25 1024 256 0
987#
988# Note: bytes * num_writes equals the size of the testfile
989#
990function fill_fs # destdir dirnum filenum bytes num_writes data
991{
992 typeset destdir=${1:-$TESTDIR}
993 typeset -i dirnum=${2:-50}
994 typeset -i filenum=${3:-50}
995 typeset -i bytes=${4:-8192}
996 typeset -i num_writes=${5:-10240}
997 typeset -i data=${6:-0}
998
999 typeset -i odirnum=1
1000 typeset -i idirnum=0
1001 typeset -i fn=0
1002 typeset -i retval=0
1003
c1d9abf9 1004 log_must mkdir -p $destdir/$idirnum
6bb24f4d
BB
1005 while (($odirnum > 0)); do
1006 if ((dirnum >= 0 && idirnum >= dirnum)); then
1007 odirnum=0
1008 break
1009 fi
c1d9abf9 1010 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
6bb24f4d
BB
1011 -b $bytes -c $num_writes -d $data
1012 retval=$?
1013 if (($retval != 0)); then
1014 odirnum=0
1015 break
1016 fi
1017 if (($fn >= $filenum)); then
1018 fn=0
1019 ((idirnum = idirnum + 1))
c1d9abf9 1020 log_must mkdir -p $destdir/$idirnum
6bb24f4d
BB
1021 else
1022 ((fn = fn + 1))
1023 fi
1024 done
1025 return $retval
1026}
1027
1028#
1029# Simple function to get the specified property. If unable to
1030# get the property then exits.
1031#
1032# Note property is in 'parsable' format (-p)
1033#
1034function get_prop # property dataset
1035{
1036 typeset prop_val
1037 typeset prop=$1
1038 typeset dataset=$2
1039
c1d9abf9 1040 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
6bb24f4d
BB
1041 if [[ $? -ne 0 ]]; then
1042 log_note "Unable to get $prop property for dataset " \
1043 "$dataset"
1044 return 1
1045 fi
1046
c1d9abf9 1047 echo "$prop_val"
6bb24f4d
BB
1048 return 0
1049}
1050
1051#
1052# Simple function to get the specified property of pool. If unable to
1053# get the property then exits.
1054#
a454868b
OF
1055# Note property is in 'parsable' format (-p)
1056#
6bb24f4d
BB
1057function get_pool_prop # property pool
1058{
1059 typeset prop_val
1060 typeset prop=$1
1061 typeset pool=$2
1062
1063 if poolexists $pool ; then
c1d9abf9
JWK
1064 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1065 awk '{print $3}')
6bb24f4d
BB
1066 if [[ $? -ne 0 ]]; then
1067 log_note "Unable to get $prop property for pool " \
1068 "$pool"
1069 return 1
1070 fi
1071 else
1072 log_note "Pool $pool not exists."
1073 return 1
1074 fi
1075
c1d9abf9 1076 echo "$prop_val"
6bb24f4d
BB
1077 return 0
1078}
1079
1080# Return 0 if a pool exists; $? otherwise
1081#
1082# $1 - pool name
1083
1084function poolexists
1085{
1086 typeset pool=$1
1087
1088 if [[ -z $pool ]]; then
1089 log_note "No pool name given."
1090 return 1
1091 fi
1092
c1d9abf9 1093 zpool get name "$pool" > /dev/null 2>&1
6bb24f4d
BB
1094 return $?
1095}
1096
1097# Return 0 if all the specified datasets exist; $? otherwise
1098#
1099# $1-n dataset name
1100function datasetexists
1101{
1102 if (($# == 0)); then
1103 log_note "No dataset name given."
1104 return 1
1105 fi
1106
1107 while (($# > 0)); do
c1d9abf9 1108 zfs get name $1 > /dev/null 2>&1 || \
6bb24f4d
BB
1109 return $?
1110 shift
1111 done
1112
1113 return 0
1114}
1115
1116# return 0 if none of the specified datasets exists, otherwise return 1.
1117#
1118# $1-n dataset name
1119function datasetnonexists
1120{
1121 if (($# == 0)); then
1122 log_note "No dataset name given."
1123 return 1
1124 fi
1125
1126 while (($# > 0)); do
c1d9abf9 1127 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
6bb24f4d
BB
1128 && return 1
1129 shift
1130 done
1131
1132 return 0
1133}
1134
1135#
2f71caf2 1136# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
6bb24f4d
BB
1137#
1138# Returns 0 if shared, 1 otherwise.
1139#
1140function is_shared
1141{
1142 typeset fs=$1
1143 typeset mtpt
1144
6bb24f4d
BB
1145 if [[ $fs != "/"* ]] ; then
1146 if datasetnonexists "$fs" ; then
1147 return 1
1148 else
1149 mtpt=$(get_prop mountpoint "$fs")
1150 case $mtpt in
1151 none|legacy|-) return 1
1152 ;;
1153 *) fs=$mtpt
1154 ;;
1155 esac
1156 fi
1157 fi
1158
2f71caf2 1159 if is_linux; then
c1d9abf9 1160 for mtpt in `share | awk '{print $1}'` ; do
2f71caf2 1161 if [[ $mtpt == $fs ]] ; then
1162 return 0
1163 fi
1164 done
1165 return 1
1166 fi
1167
c1d9abf9 1168 for mtpt in `share | awk '{print $2}'` ; do
6bb24f4d
BB
1169 if [[ $mtpt == $fs ]] ; then
1170 return 0
1171 fi
1172 done
1173
c1d9abf9 1174 typeset stat=$(svcs -H -o STA nfs/server:default)
6bb24f4d
BB
1175 if [[ $stat != "ON" ]]; then
1176 log_note "Current nfs/server status: $stat"
1177 fi
1178
1179 return 1
1180}
1181
1182#
2f71caf2 1183# Given a dataset name determine if it is shared via SMB.
6bb24f4d 1184#
2f71caf2 1185# Returns 0 if shared, 1 otherwise.
6bb24f4d 1186#
2f71caf2 1187function is_shared_smb
6bb24f4d
BB
1188{
1189 typeset fs=$1
2f71caf2 1190 typeset mtpt
1191
1192 if datasetnonexists "$fs" ; then
1193 return 1
1194 else
1195 fs=$(echo $fs | sed 's@/@_@g')
1196 fi
6bb24f4d
BB
1197
1198 if is_linux; then
c1d9abf9 1199 for mtpt in `net usershare list | awk '{print $1}'` ; do
2f71caf2 1200 if [[ $mtpt == $fs ]] ; then
1201 return 0
1202 fi
1203 done
1204 return 1
1205 else
6bb24f4d
BB
1206 log_unsupported "Currently unsupported by the test framework"
1207 return 1
1208 fi
2f71caf2 1209}
1210
1211#
1212# Given a mountpoint, determine if it is not shared via NFS.
1213#
1214# Returns 0 if not shared, 1 otherwise.
1215#
1216function not_shared
1217{
1218 typeset fs=$1
6bb24f4d
BB
1219
1220 is_shared $fs
1221 if (($? == 0)); then
1222 return 1
1223 fi
1224
1225 return 0
1226}
1227
1228#
2f71caf2 1229# Given a dataset determine if it is not shared via SMB.
6bb24f4d 1230#
2f71caf2 1231# Returns 0 if not shared, 1 otherwise.
1232#
1233function not_shared_smb
6bb24f4d
BB
1234{
1235 typeset fs=$1
1236
2f71caf2 1237 is_shared_smb $fs
1238 if (($? == 0)); then
6bb24f4d
BB
1239 return 1
1240 fi
1241
2f71caf2 1242 return 0
1243}
1244
1245#
1246# Helper function to unshare a mountpoint.
1247#
1248function unshare_fs #fs
1249{
1250 typeset fs=$1
1251
1252 is_shared $fs || is_shared_smb $fs
6bb24f4d 1253 if (($? == 0)); then
c1d9abf9 1254 log_must zfs unshare $fs
6bb24f4d
BB
1255 fi
1256
1257 return 0
1258}
1259
2f71caf2 1260#
1261# Helper function to share a NFS mountpoint.
1262#
1263function share_nfs #fs
1264{
1265 typeset fs=$1
1266
1267 if is_linux; then
1268 is_shared $fs
1269 if (($? != 0)); then
c1d9abf9 1270 log_must share "*:$fs"
2f71caf2 1271 fi
1272 else
1273 is_shared $fs
1274 if (($? != 0)); then
c1d9abf9 1275 log_must share -F nfs $fs
2f71caf2 1276 fi
1277 fi
1278
1279 return 0
1280}
1281
1282#
1283# Helper function to unshare a NFS mountpoint.
1284#
1285function unshare_nfs #fs
1286{
1287 typeset fs=$1
1288
1289 if is_linux; then
1290 is_shared $fs
1291 if (($? == 0)); then
c1d9abf9 1292 log_must unshare -u "*:$fs"
2f71caf2 1293 fi
1294 else
1295 is_shared $fs
1296 if (($? == 0)); then
c1d9abf9 1297 log_must unshare -F nfs $fs
2f71caf2 1298 fi
1299 fi
1300
1301 return 0
1302}
1303
1304#
1305# Helper function to show NFS shares.
1306#
1307function showshares_nfs
1308{
1309 if is_linux; then
c1d9abf9 1310 share -v
2f71caf2 1311 else
c1d9abf9 1312 share -F nfs
2f71caf2 1313 fi
1314
1315 return 0
1316}
1317
1318#
1319# Helper function to show SMB shares.
1320#
1321function showshares_smb
1322{
1323 if is_linux; then
c1d9abf9 1324 net usershare list
2f71caf2 1325 else
c1d9abf9 1326 share -F smb
2f71caf2 1327 fi
1328
1329 return 0
1330}
1331
6bb24f4d
BB
1332#
1333# Check NFS server status and trigger it online.
1334#
1335function setup_nfs_server
1336{
1337 # Cannot share directory in non-global zone.
1338 #
1339 if ! is_global_zone; then
1340 log_note "Cannot trigger NFS server by sharing in LZ."
1341 return
1342 fi
1343
1344 if is_linux; then
2f71caf2 1345 log_note "NFS server must started prior to running test framework."
6bb24f4d
BB
1346 return
1347 fi
1348
1349 typeset nfs_fmri="svc:/network/nfs/server:default"
c1d9abf9 1350 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
6bb24f4d
BB
1351 #
1352 # Only really sharing operation can enable NFS server
1353 # to online permanently.
1354 #
1355 typeset dummy=/tmp/dummy
1356
1357 if [[ -d $dummy ]]; then
c1d9abf9 1358 log_must rm -rf $dummy
6bb24f4d
BB
1359 fi
1360
c1d9abf9
JWK
1361 log_must mkdir $dummy
1362 log_must share $dummy
6bb24f4d
BB
1363
1364 #
1365 # Waiting for fmri's status to be the final status.
1366 # Otherwise, in transition, an asterisk (*) is appended for
1367 # instances, unshare will reverse status to 'DIS' again.
1368 #
1369 # Waiting for 1's at least.
1370 #
c1d9abf9 1371 log_must sleep 1
6bb24f4d 1372 timeout=10
c1d9abf9 1373 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
6bb24f4d 1374 do
c1d9abf9 1375 log_must sleep 1
6bb24f4d
BB
1376
1377 ((timeout -= 1))
1378 done
1379
c1d9abf9
JWK
1380 log_must unshare $dummy
1381 log_must rm -rf $dummy
6bb24f4d
BB
1382 fi
1383
c1d9abf9 1384 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
6bb24f4d
BB
1385}
1386
1387#
1388# To verify whether calling process is in global zone
1389#
1390# Return 0 if in global zone, 1 in non-global zone
1391#
1392function is_global_zone
1393{
c1d9abf9
JWK
1394 if is_linux; then
1395 return 0
1396 else
1397 typeset cur_zone=$(zonename 2>/dev/null)
1398 if [[ $cur_zone != "global" ]]; then
1399 return 1
1400 fi
1401 return 0
6bb24f4d 1402 fi
6bb24f4d
BB
1403}
1404
1405#
1406# Verify whether test is permitted to run from
1407# global zone, local zone, or both
1408#
1409# $1 zone limit, could be "global", "local", or "both"(no limit)
1410#
1411# Return 0 if permitted, otherwise exit with log_unsupported
1412#
1413function verify_runnable # zone limit
1414{
1415 typeset limit=$1
1416
1417 [[ -z $limit ]] && return 0
1418
1419 if is_global_zone ; then
1420 case $limit in
1421 global|both)
1422 ;;
1423 local) log_unsupported "Test is unable to run from "\
1424 "global zone."
1425 ;;
1426 *) log_note "Warning: unknown limit $limit - " \
1427 "use both."
1428 ;;
1429 esac
1430 else
1431 case $limit in
1432 local|both)
1433 ;;
1434 global) log_unsupported "Test is unable to run from "\
1435 "local zone."
1436 ;;
1437 *) log_note "Warning: unknown limit $limit - " \
1438 "use both."
1439 ;;
1440 esac
1441
1442 reexport_pool
1443 fi
1444
1445 return 0
1446}
1447
1448# Return 0 if create successfully or the pool exists; $? otherwise
1449# Note: In local zones, this function should return 0 silently.
1450#
1451# $1 - pool name
1452# $2-n - [keyword] devs_list
1453
1454function create_pool #pool devs_list
1455{
1456 typeset pool=${1%%/*}
1457
1458 shift
1459
1460 if [[ -z $pool ]]; then
1461 log_note "Missing pool name."
1462 return 1
1463 fi
1464
1465 if poolexists $pool ; then
1466 destroy_pool $pool
1467 fi
1468
1469 if is_global_zone ; then
c1d9abf9
JWK
1470 [[ -d /$pool ]] && rm -rf /$pool
1471 log_must zpool create -f $pool $@
6bb24f4d
BB
1472 fi
1473
1474 return 0
1475}
1476
1477# Return 0 if destroy successfully or the pool exists; $? otherwise
1478# Note: In local zones, this function should return 0 silently.
1479#
1480# $1 - pool name
1481# Destroy pool with the given parameters.
1482
1483function destroy_pool #pool
1484{
1485 typeset pool=${1%%/*}
1486 typeset mtpt
1487
1488 if [[ -z $pool ]]; then
1489 log_note "No pool name given."
1490 return 1
1491 fi
1492
1493 if is_global_zone ; then
1494 if poolexists "$pool" ; then
1495 mtpt=$(get_prop mountpoint "$pool")
1496
1497 # At times, syseventd activity can cause attempts to
1498 # destroy a pool to fail with EBUSY. We retry a few
1499 # times allowing failures before requiring the destroy
1500 # to succeed.
1501 typeset -i wait_time=10 ret=1 count=0
1502 must=""
1503 while [[ $ret -ne 0 ]]; do
c1d9abf9 1504 $must zpool destroy -f $pool
6bb24f4d
BB
1505 ret=$?
1506 [[ $ret -eq 0 ]] && break
1507 log_note "zpool destroy failed with $ret"
1508 [[ count++ -ge 7 ]] && must=log_must
c1d9abf9 1509 sleep $wait_time
6bb24f4d
BB
1510 done
1511
1512 [[ -d $mtpt ]] && \
c1d9abf9 1513 log_must rm -rf $mtpt
6bb24f4d
BB
1514 else
1515 log_note "Pool does not exist. ($pool)"
1516 return 1
1517 fi
1518 fi
1519
1520 return 0
1521}
1522
1523#
1524# Firstly, create a pool with 5 datasets. Then, create a single zone and
1525# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1526# and a zvol device to the zone.
1527#
1528# $1 zone name
1529# $2 zone root directory prefix
1530# $3 zone ip
1531#
1532function zfs_zones_setup #zone_name zone_root zone_ip
1533{
1534 typeset zone_name=${1:-$(hostname)-z}
1535 typeset zone_root=${2:-"/zone_root"}
1536 typeset zone_ip=${3:-"10.1.1.10"}
1537 typeset prefix_ctr=$ZONE_CTR
1538 typeset pool_name=$ZONE_POOL
1539 typeset -i cntctr=5
1540 typeset -i i=0
1541
1542 # Create pool and 5 container within it
1543 #
c1d9abf9
JWK
1544 [[ -d /$pool_name ]] && rm -rf /$pool_name
1545 log_must zpool create -f $pool_name $DISKS
6bb24f4d 1546 while ((i < cntctr)); do
c1d9abf9 1547 log_must zfs create $pool_name/$prefix_ctr$i
6bb24f4d
BB
1548 ((i += 1))
1549 done
1550
1551 # create a zvol
c1d9abf9 1552 log_must zfs create -V 1g $pool_name/zone_zvol
6bb24f4d
BB
1553 block_device_wait
1554
1555 #
1556 # If current system support slog, add slog device for pool
1557 #
1558 if verify_slog_support ; then
1559 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
c1d9abf9
JWK
1560 log_must mkfile $MINVDEVSIZE $sdevs
1561 log_must zpool add $pool_name log mirror $sdevs
6bb24f4d
BB
1562 fi
1563
1564 # this isn't supported just yet.
1565 # Create a filesystem. In order to add this to
1566 # the zone, it must have it's mountpoint set to 'legacy'
c1d9abf9
JWK
1567 # log_must zfs create $pool_name/zfs_filesystem
1568 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
6bb24f4d
BB
1569
1570 [[ -d $zone_root ]] && \
c1d9abf9 1571 log_must rm -rf $zone_root/$zone_name
6bb24f4d 1572 [[ ! -d $zone_root ]] && \
c1d9abf9 1573 log_must mkdir -p -m 0700 $zone_root/$zone_name
6bb24f4d
BB
1574
1575 # Create zone configure file and configure the zone
1576 #
1577 typeset zone_conf=/tmp/zone_conf.$$
c1d9abf9
JWK
1578 echo "create" > $zone_conf
1579 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1580 echo "set autoboot=true" >> $zone_conf
6bb24f4d
BB
1581 i=0
1582 while ((i < cntctr)); do
c1d9abf9
JWK
1583 echo "add dataset" >> $zone_conf
1584 echo "set name=$pool_name/$prefix_ctr$i" >> \
6bb24f4d 1585 $zone_conf
c1d9abf9 1586 echo "end" >> $zone_conf
6bb24f4d
BB
1587 ((i += 1))
1588 done
1589
1590 # add our zvol to the zone
c1d9abf9
JWK
1591 echo "add device" >> $zone_conf
1592 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1593 echo "end" >> $zone_conf
6bb24f4d
BB
1594
1595 # add a corresponding zvol rdsk to the zone
c1d9abf9
JWK
1596 echo "add device" >> $zone_conf
1597 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1598 echo "end" >> $zone_conf
6bb24f4d
BB
1599
1600 # once it's supported, we'll add our filesystem to the zone
c1d9abf9
JWK
1601 # echo "add fs" >> $zone_conf
1602 # echo "set type=zfs" >> $zone_conf
1603 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1604 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1605 # echo "end" >> $zone_conf
6bb24f4d 1606
c1d9abf9
JWK
1607 echo "verify" >> $zone_conf
1608 echo "commit" >> $zone_conf
1609 log_must zonecfg -z $zone_name -f $zone_conf
1610 log_must rm -f $zone_conf
6bb24f4d
BB
1611
1612 # Install the zone
c1d9abf9 1613 zoneadm -z $zone_name install
6bb24f4d 1614 if (($? == 0)); then
c1d9abf9 1615 log_note "SUCCESS: zoneadm -z $zone_name install"
6bb24f4d 1616 else
c1d9abf9 1617 log_fail "FAIL: zoneadm -z $zone_name install"
6bb24f4d
BB
1618 fi
1619
1620 # Install sysidcfg file
1621 #
1622 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
c1d9abf9
JWK
1623 echo "system_locale=C" > $sysidcfg
1624 echo "terminal=dtterm" >> $sysidcfg
1625 echo "network_interface=primary {" >> $sysidcfg
1626 echo "hostname=$zone_name" >> $sysidcfg
1627 echo "}" >> $sysidcfg
1628 echo "name_service=NONE" >> $sysidcfg
1629 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1630 echo "security_policy=NONE" >> $sysidcfg
1631 echo "timezone=US/Eastern" >> $sysidcfg
6bb24f4d
BB
1632
1633 # Boot this zone
c1d9abf9 1634 log_must zoneadm -z $zone_name boot
6bb24f4d
BB
1635}
1636
1637#
1638# Reexport TESTPOOL & TESTPOOL(1-4)
1639#
1640function reexport_pool
1641{
1642 typeset -i cntctr=5
1643 typeset -i i=0
1644
1645 while ((i < cntctr)); do
1646 if ((i == 0)); then
1647 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1648 if ! ismounted $TESTPOOL; then
c1d9abf9 1649 log_must zfs mount $TESTPOOL
6bb24f4d
BB
1650 fi
1651 else
1652 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1653 if eval ! ismounted \$TESTPOOL$i; then
c1d9abf9 1654 log_must eval zfs mount \$TESTPOOL$i
6bb24f4d
BB
1655 fi
1656 fi
1657 ((i += 1))
1658 done
1659}
1660
1661#
ec0e24c2 1662# Verify a given disk or pool state
6bb24f4d
BB
1663#
1664# Return 0 is pool/disk matches expected state, 1 otherwise
1665#
ec0e24c2 1666function check_state # pool disk state{online,offline,degraded}
6bb24f4d
BB
1667{
1668 typeset pool=$1
1669 typeset disk=${2#$DEV_DSKDIR/}
1670 typeset state=$3
1671
ec0e24c2
SV
1672 [[ -z $pool ]] || [[ -z $state ]] \
1673 && log_fail "Arguments invalid or missing"
1674
1675 if [[ -z $disk ]]; then
1676 #check pool state only
c1d9abf9 1677 zpool get -H -o value health $pool \
ec0e24c2
SV
1678 | grep -i "$state" > /dev/null 2>&1
1679 else
c1d9abf9 1680 zpool status -v $pool | grep "$disk" \
ec0e24c2
SV
1681 | grep -i "$state" > /dev/null 2>&1
1682 fi
6bb24f4d
BB
1683
1684 return $?
1685}
1686
ec0e24c2
SV
1687#
1688# Cause a scan of all scsi host adapters by default
1689#
1690# $1 optional host number
1691#
1692function scan_scsi_hosts
1693{
1694 typeset hostnum=${1}
1695
7a4500a1
SV
1696 if is_linux; then
1697 if [[ -z $hostnum ]]; then
1698 for host in /sys/class/scsi_host/host*; do
c1d9abf9 1699 log_must eval "echo '- - -' > $host/scan"
7a4500a1
SV
1700 done
1701 else
1702 log_must eval \
c1d9abf9 1703 "echo /sys/class/scsi_host/host$hostnum/scan" \
7a4500a1
SV
1704 > /dev/null
1705 log_must eval \
c1d9abf9 1706 "echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
7a4500a1 1707 fi
ec0e24c2
SV
1708 fi
1709}
1710#
1711# Wait for newly created block devices to have their minors created.
1712#
1713function block_device_wait
1714{
1715 if is_linux; then
c1d9abf9
JWK
1716 udevadm trigger
1717 udevadm settle
ec0e24c2
SV
1718 fi
1719}
1720
1721#
1722# Online or offline a disk on the system
1723#
1724# First checks state of disk. Test will fail if disk is not properly onlined
1725# or offlined. Online is a full rescan of SCSI disks by echoing to every
1726# host entry.
1727#
1728function on_off_disk # disk state{online,offline} host
1729{
1730 typeset disk=$1
1731 typeset state=$2
1732 typeset host=$3
1733
1734 [[ -z $disk ]] || [[ -z $state ]] && \
1735 log_fail "Arguments invalid or missing"
1736
1737 if is_linux; then
1738 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
c1d9abf9
JWK
1739 dm_name="$(readlink $DEV_DSKDIR/$disk \
1740 | nawk -F / '{print $2}')"
1741 slave="$(ls /sys/block/${dm_name}/slaves \
1742 | nawk '{print $1}')"
ec0e24c2
SV
1743 while [[ -n $slave ]]; do
1744 #check if disk is online
c1d9abf9 1745 lsscsi | egrep $slave > /dev/null
ec0e24c2
SV
1746 if (($? == 0)); then
1747 slave_dir="/sys/block/${dm_name}"
1748 slave_dir+="/slaves/${slave}/device"
1749 ss="${slave_dir}/state"
1750 sd="${slave_dir}/delete"
c1d9abf9
JWK
1751 log_must eval "echo 'offline' > ${ss}"
1752 log_must eval "echo '1' > ${sd}"
1753 lsscsi | egrep $slave > /dev/null
ec0e24c2
SV
1754 if (($? == 0)); then
1755 log_fail "Offlining" \
1756 "$disk failed"
1757 fi
1758 fi
c1d9abf9
JWK
1759 slave="$(ls /sys/block/$dm_name/slaves \
1760 2>/dev/null | nawk '{print $1}')"
ec0e24c2
SV
1761 done
1762 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1763 #check if disk is online
c1d9abf9 1764 lsscsi | egrep $disk > /dev/null
ec0e24c2
SV
1765 if (($? == 0)); then
1766 dev_state="/sys/block/$disk/device/state"
1767 dev_delete="/sys/block/$disk/device/delete"
c1d9abf9
JWK
1768 log_must eval "echo 'offline' > ${dev_state}"
1769 log_must eval "echo '1' > ${dev_delete}"
1770 lsscsi | egrep $disk > /dev/null
ec0e24c2
SV
1771 if (($? == 0)); then
1772 log_fail "Offlining $disk" \
1773 "failed"
1774 fi
1775 else
1776 log_note "$disk is already offline"
1777 fi
1778 elif [[ $state == "online" ]]; then
1779 #force a full rescan
7a4500a1 1780 scan_scsi_hosts $host
ec0e24c2
SV
1781 block_device_wait
1782 if is_mpath_device $disk; then
c1d9abf9
JWK
1783 dm_name="$(readlink $DEV_DSKDIR/$disk \
1784 | nawk -F / '{print $2}')"
1785 slave="$(ls /sys/block/$dm_name/slaves \
1786 | nawk '{print $1}')"
1787 lsscsi | egrep $slave > /dev/null
ec0e24c2
SV
1788 if (($? != 0)); then
1789 log_fail "Onlining $disk failed"
1790 fi
1791 elif is_real_device $disk; then
c1d9abf9 1792 lsscsi | egrep $disk > /dev/null
ec0e24c2
SV
1793 if (($? != 0)); then
1794 log_fail "Onlining $disk failed"
1795 fi
1796 else
1797 log_fail "$disk is not a real dev"
1798 fi
1799 else
1800 log_fail "$disk failed to $state"
1801 fi
1802 fi
1803}
1804
6bb24f4d
BB
1805#
1806# Get the mountpoint of snapshot
1807# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1808# as its mountpoint
1809#
1810function snapshot_mountpoint
1811{
1812 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1813
1814 if [[ $dataset != *@* ]]; then
1815 log_fail "Error name of snapshot '$dataset'."
1816 fi
1817
1818 typeset fs=${dataset%@*}
1819 typeset snap=${dataset#*@}
1820
1821 if [[ -z $fs || -z $snap ]]; then
1822 log_fail "Error name of snapshot '$dataset'."
1823 fi
1824
c1d9abf9 1825 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
6bb24f4d
BB
1826}
1827
dddef7d6 1828#
1829# Given a device and 'ashift' value verify it's correctly set on every label
1830#
1831function verify_ashift # device ashift
1832{
1833 typeset device="$1"
1834 typeset ashift="$2"
1835
1836 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1837 if (ashift != $2)
1838 exit 1;
1839 else
1840 count++;
1841 } END {
1842 if (count != 4)
1843 exit 1;
1844 else
1845 exit 0;
1846 }'
1847
1848 return $?
1849}
1850
6bb24f4d
BB
1851#
1852# Given a pool and file system, this function will verify the file system
1853# using the zdb internal tool. Note that the pool is exported and imported
1854# to ensure it has consistent state.
1855#
1856function verify_filesys # pool filesystem dir
1857{
1858 typeset pool="$1"
1859 typeset filesys="$2"
1860 typeset zdbout="/tmp/zdbout.$$"
1861
1862 shift
1863 shift
1864 typeset dirs=$@
1865 typeset search_path=""
1866
c1d9abf9
JWK
1867 log_note "Calling zdb to verify filesystem '$filesys'"
1868 zfs unmount -a > /dev/null 2>&1
1869 log_must zpool export $pool
6bb24f4d
BB
1870
1871 if [[ -n $dirs ]] ; then
1872 for dir in $dirs ; do
1873 search_path="$search_path -d $dir"
1874 done
1875 fi
1876
c1d9abf9 1877 log_must zpool import $search_path $pool
6bb24f4d 1878
c1d9abf9 1879 zdb -cudi $filesys > $zdbout 2>&1
6bb24f4d 1880 if [[ $? != 0 ]]; then
c1d9abf9
JWK
1881 log_note "Output: zdb -cudi $filesys"
1882 cat $zdbout
1883 log_fail "zdb detected errors with: '$filesys'"
6bb24f4d
BB
1884 fi
1885
c1d9abf9
JWK
1886 log_must zfs mount -a
1887 log_must rm -rf $zdbout
6bb24f4d
BB
1888}
1889
1890#
1891# Given a pool, and this function list all disks in the pool
1892#
1893function get_disklist # pool
1894{
1895 typeset disklist=""
1896
c1d9abf9
JWK
1897 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1898 grep -v "\-\-\-\-\-" | \
1899 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
6bb24f4d 1900
c1d9abf9 1901 echo $disklist
6bb24f4d
BB
1902}
1903
3c67d83a
TH
1904#
1905# Given a pool, and this function list all disks in the pool with their full
1906# path (like "/dev/sda" instead of "sda").
1907#
1908function get_disklist_fullpath # pool
1909{
1910 args="-P $1"
1911 get_disklist $args
1912}
1913
1914
1915
6bb24f4d
BB
1916# /**
1917# This function kills a given list of processes after a time period. We use
1918# this in the stress tests instead of STF_TIMEOUT so that we can have processes
1919# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1920# would be listed as FAIL, which we don't want : we're happy with stress tests
1921# running for a certain amount of time, then finishing.
1922#
1923# @param $1 the time in seconds after which we should terminate these processes
1924# @param $2..$n the processes we wish to terminate.
1925# */
1926function stress_timeout
1927{
1928 typeset -i TIMEOUT=$1
1929 shift
1930 typeset cpids="$@"
1931
1932 log_note "Waiting for child processes($cpids). " \
1933 "It could last dozens of minutes, please be patient ..."
c1d9abf9 1934 log_must sleep $TIMEOUT
6bb24f4d
BB
1935
1936 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1937 typeset pid
1938 for pid in $cpids; do
c1d9abf9 1939 ps -p $pid > /dev/null 2>&1
6bb24f4d 1940 if (($? == 0)); then
c1d9abf9 1941 log_must kill -USR1 $pid
6bb24f4d
BB
1942 fi
1943 done
1944}
1945
1946#
1947# Verify a given hotspare disk is inuse or avail
1948#
1949# Return 0 is pool/disk matches expected state, 1 otherwise
1950#
1951function check_hotspare_state # pool disk state{inuse,avail}
1952{
1953 typeset pool=$1
1954 typeset disk=${2#$DEV_DSKDIR/}
1955 typeset state=$3
1956
1957 cur_state=$(get_device_state $pool $disk "spares")
1958
1959 if [[ $state != ${cur_state} ]]; then
1960 return 1
1961 fi
1962 return 0
1963}
1964
1965#
1966# Verify a given slog disk is inuse or avail
1967#
1968# Return 0 is pool/disk matches expected state, 1 otherwise
1969#
1970function check_slog_state # pool disk state{online,offline,unavail}
1971{
1972 typeset pool=$1
1973 typeset disk=${2#$DEV_DSKDIR/}
1974 typeset state=$3
1975
1976 cur_state=$(get_device_state $pool $disk "logs")
1977
1978 if [[ $state != ${cur_state} ]]; then
1979 return 1
1980 fi
1981 return 0
1982}
1983
1984#
1985# Verify a given vdev disk is inuse or avail
1986#
1987# Return 0 is pool/disk matches expected state, 1 otherwise
1988#
1989function check_vdev_state # pool disk state{online,offline,unavail}
1990{
1991 typeset pool=$1
1992 typeset disk=${2#$/DEV_DSKDIR/}
1993 typeset state=$3
1994
1995 cur_state=$(get_device_state $pool $disk)
1996
1997 if [[ $state != ${cur_state} ]]; then
1998 return 1
1999 fi
2000 return 0
2001}
2002
2003#
2004# Check the output of 'zpool status -v <pool>',
2005# and to see if the content of <token> contain the <keyword> specified.
2006#
2007# Return 0 is contain, 1 otherwise
2008#
2009function check_pool_status # pool token keyword
2010{
2011 typeset pool=$1
2012 typeset token=$2
2013 typeset keyword=$3
2014
c1d9abf9 2015 zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
6bb24f4d 2016 ($1==token) {print $0}' \
c1d9abf9 2017 | grep -i "$keyword" > /dev/null 2>&1
6bb24f4d
BB
2018
2019 return $?
2020}
2021
2022#
2023# These 5 following functions are instance of check_pool_status()
2024# is_pool_resilvering - to check if the pool is resilver in progress
2025# is_pool_resilvered - to check if the pool is resilver completed
2026# is_pool_scrubbing - to check if the pool is scrub in progress
2027# is_pool_scrubbed - to check if the pool is scrub completed
2028# is_pool_scrub_stopped - to check if the pool is scrub stopped
2029#
2030function is_pool_resilvering #pool
2031{
2032 check_pool_status "$1" "scan" "resilver in progress since "
2033 return $?
2034}
2035
2036function is_pool_resilvered #pool
2037{
2038 check_pool_status "$1" "scan" "resilvered "
2039 return $?
2040}
2041
2042function is_pool_scrubbing #pool
2043{
2044 check_pool_status "$1" "scan" "scrub in progress since "
2045 return $?
2046}
2047
2048function is_pool_scrubbed #pool
2049{
2050 check_pool_status "$1" "scan" "scrub repaired"
2051 return $?
2052}
2053
2054function is_pool_scrub_stopped #pool
2055{
2056 check_pool_status "$1" "scan" "scrub canceled"
2057 return $?
2058}
2059
2060#
4e33ba4c 2061# Use create_pool()/destroy_pool() to clean up the information in
6bb24f4d
BB
2062# in the given disk to avoid slice overlapping.
2063#
2064function cleanup_devices #vdevs
2065{
2066 typeset pool="foopool$$"
2067
2068 if poolexists $pool ; then
2069 destroy_pool $pool
2070 fi
2071
2072 create_pool $pool $@
2073 destroy_pool $pool
2074
2075 return 0
2076}
2077
6bb24f4d
BB
2078#/**
2079# A function to find and locate free disks on a system or from given
2080# disks as the parameter. It works by locating disks that are in use
2081# as swap devices and dump devices, and also disks listed in /etc/vfstab
2082#
2083# $@ given disks to find which are free, default is all disks in
2084# the test system
2085#
2086# @return a string containing the list of available disks
2087#*/
2088function find_disks
2089{
2090 # Trust provided list, no attempt is made to locate unused devices.
2091 if is_linux; then
c1d9abf9 2092 echo "$@"
6bb24f4d
BB
2093 return
2094 fi
2095
2096
2097 sfi=/tmp/swaplist.$$
2098 dmpi=/tmp/dumpdev.$$
2099 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2100
c1d9abf9
JWK
2101 swap -l > $sfi
2102 dumpadm > $dmpi 2>/dev/null
6bb24f4d
BB
2103
2104# write an awk script that can process the output of format
2105# to produce a list of disks we know about. Note that we have
2106# to escape "$2" so that the shell doesn't interpret it while
2107# we're creating the awk script.
2108# -------------------
c1d9abf9 2109 cat > /tmp/find_disks.awk <<EOF
6bb24f4d
BB
2110#!/bin/nawk -f
2111 BEGIN { FS="."; }
2112
2113 /^Specify disk/{
2114 searchdisks=0;
2115 }
2116
2117 {
2118 if (searchdisks && \$2 !~ "^$"){
2119 split(\$2,arr," ");
2120 print arr[1];
2121 }
2122 }
2123
2124 /^AVAILABLE DISK SELECTIONS:/{
2125 searchdisks=1;
2126 }
2127EOF
2128#---------------------
2129
c1d9abf9
JWK
2130 chmod 755 /tmp/find_disks.awk
2131 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2132 rm /tmp/find_disks.awk
6bb24f4d
BB
2133
2134 unused=""
2135 for disk in $disks; do
2136 # Check for mounted
c1d9abf9 2137 grep "${disk}[sp]" /etc/mnttab >/dev/null
6bb24f4d
BB
2138 (($? == 0)) && continue
2139 # Check for swap
c1d9abf9 2140 grep "${disk}[sp]" $sfi >/dev/null
6bb24f4d
BB
2141 (($? == 0)) && continue
2142 # check for dump device
c1d9abf9 2143 grep "${disk}[sp]" $dmpi >/dev/null
6bb24f4d
BB
2144 (($? == 0)) && continue
2145 # check to see if this disk hasn't been explicitly excluded
2146 # by a user-set environment variable
c1d9abf9 2147 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
6bb24f4d
BB
2148 (($? == 0)) && continue
2149 unused_candidates="$unused_candidates $disk"
2150 done
c1d9abf9
JWK
2151 rm $sfi
2152 rm $dmpi
6bb24f4d
BB
2153
2154# now just check to see if those disks do actually exist
2155# by looking for a device pointing to the first slice in
2156# each case. limit the number to max_finddisksnum
2157 count=0
2158 for disk in $unused_candidates; do
2159 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2160 if [ $count -lt $max_finddisksnum ]; then
2161 unused="$unused $disk"
2162 # do not impose limit if $@ is provided
2163 [[ -z $@ ]] && ((count = count + 1))
2164 fi
2165 fi
2166 done
2167
2168# finally, return our disk list
c1d9abf9 2169 echo $unused
6bb24f4d
BB
2170}
2171
2172#
2173# Add specified user to specified group
2174#
2175# $1 group name
2176# $2 user name
2177# $3 base of the homedir (optional)
2178#
2179function add_user #<group_name> <user_name> <basedir>
2180{
2181 typeset gname=$1
2182 typeset uname=$2
2183 typeset basedir=${3:-"/var/tmp"}
2184
2185 if ((${#gname} == 0 || ${#uname} == 0)); then
2186 log_fail "group name or user name are not defined."
2187 fi
2188
c1d9abf9
JWK
2189 log_must useradd -g $gname -d $basedir/$uname -m $uname
2190 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2191 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2192 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
6bb24f4d 2193
f74b821a
BB
2194 # Add new users to the same group and the command line utils.
2195 # This allows them to be run out of the original users home
2196 # directory as long as it permissioned to be group readable.
2197 if is_linux; then
c1d9abf9
JWK
2198 cmd_group=$(stat --format="%G" $(which zfs))
2199 log_must usermod -a -G $cmd_group $uname
f74b821a
BB
2200 fi
2201
6bb24f4d
BB
2202 return 0
2203}
2204
2205#
2206# Delete the specified user.
2207#
2208# $1 login name
2209# $2 base of the homedir (optional)
2210#
2211function del_user #<logname> <basedir>
2212{
2213 typeset user=$1
2214 typeset basedir=${2:-"/var/tmp"}
2215
2216 if ((${#user} == 0)); then
2217 log_fail "login name is necessary."
2218 fi
2219
c1d9abf9
JWK
2220 if id $user > /dev/null 2>&1; then
2221 log_must userdel $user
6bb24f4d
BB
2222 fi
2223
c1d9abf9 2224 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
6bb24f4d
BB
2225
2226 return 0
2227}
2228
2229#
2230# Select valid gid and create specified group.
2231#
2232# $1 group name
2233#
2234function add_group #<group_name>
2235{
2236 typeset group=$1
2237
2238 if ((${#group} == 0)); then
2239 log_fail "group name is necessary."
2240 fi
2241
2242 # Assign 100 as the base gid, a larger value is selected for
2243 # Linux because for many distributions 1000 and under are reserved.
2244 if is_linux; then
6bb24f4d 2245 while true; do
c1d9abf9 2246 groupadd $group > /dev/null 2>&1
6bb24f4d
BB
2247 typeset -i ret=$?
2248 case $ret in
2249 0) return 0 ;;
6bb24f4d
BB
2250 *) return 1 ;;
2251 esac
2252 done
2253 else
2254 typeset -i gid=100
6bb24f4d 2255 while true; do
c1d9abf9 2256 groupadd -g $gid $group > /dev/null 2>&1
6bb24f4d
BB
2257 typeset -i ret=$?
2258 case $ret in
2259 0) return 0 ;;
2260 # The gid is not unique
2261 4) ((gid += 1)) ;;
2262 *) return 1 ;;
2263 esac
2264 done
2265 fi
2266}
2267
2268#
2269# Delete the specified group.
2270#
2271# $1 group name
2272#
2273function del_group #<group_name>
2274{
2275 typeset grp=$1
2276 if ((${#grp} == 0)); then
2277 log_fail "group name is necessary."
2278 fi
2279
2280 if is_linux; then
c1d9abf9 2281 getent group $grp > /dev/null 2>&1
6bb24f4d
BB
2282 typeset -i ret=$?
2283 case $ret in
2284 # Group does not exist.
2285 2) return 0 ;;
2286 # Name already exists as a group name
c1d9abf9 2287 0) log_must groupdel $grp ;;
6bb24f4d
BB
2288 *) return 1 ;;
2289 esac
2290 else
c1d9abf9 2291 groupmod -n $grp $grp > /dev/null 2>&1
6bb24f4d
BB
2292 typeset -i ret=$?
2293 case $ret in
2294 # Group does not exist.
2295 6) return 0 ;;
2296 # Name already exists as a group name
c1d9abf9 2297 9) log_must groupdel $grp ;;
6bb24f4d
BB
2298 *) return 1 ;;
2299 esac
2300 fi
2301
2302 return 0
2303}
2304
2305#
2306# This function will return true if it's safe to destroy the pool passed
2307# as argument 1. It checks for pools based on zvols and files, and also
2308# files contained in a pool that may have a different mountpoint.
2309#
2310function safe_to_destroy_pool { # $1 the pool name
2311
2312 typeset pool=""
2313 typeset DONT_DESTROY=""
2314
2315 # We check that by deleting the $1 pool, we're not
2316 # going to pull the rug out from other pools. Do this
2317 # by looking at all other pools, ensuring that they
2318 # aren't built from files or zvols contained in this pool.
2319
c1d9abf9 2320 for pool in $(zpool list -H -o name)
6bb24f4d
BB
2321 do
2322 ALTMOUNTPOOL=""
2323
2324 # this is a list of the top-level directories in each of the
2325 # files that make up the path to the files the pool is based on
c1d9abf9
JWK
2326 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2327 awk '{print $1}')
6bb24f4d
BB
2328
2329 # this is a list of the zvols that make up the pool
c1d9abf9
JWK
2330 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2331 | awk '{print $1}')
6bb24f4d
BB
2332
2333 # also want to determine if it's a file-based pool using an
2334 # alternate mountpoint...
c1d9abf9
JWK
2335 POOL_FILE_DIRS=$(zpool status -v $pool | \
2336 grep / | awk '{print $1}' | \
2337 awk -F/ '{print $2}' | grep -v "dev")
6bb24f4d
BB
2338
2339 for pooldir in $POOL_FILE_DIRS
2340 do
c1d9abf9
JWK
2341 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2342 grep "${pooldir}$" | awk '{print $1}')
6bb24f4d
BB
2343
2344 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2345 done
2346
2347
2348 if [ ! -z "$ZVOLPOOL" ]
2349 then
2350 DONT_DESTROY="true"
2351 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2352 fi
2353
2354 if [ ! -z "$FILEPOOL" ]
2355 then
2356 DONT_DESTROY="true"
2357 log_note "Pool $pool is built from $FILEPOOL on $1"
2358 fi
2359
2360 if [ ! -z "$ALTMOUNTPOOL" ]
2361 then
2362 DONT_DESTROY="true"
2363 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2364 fi
2365 done
2366
2367 if [ -z "${DONT_DESTROY}" ]
2368 then
2369 return 0
2370 else
2371 log_note "Warning: it is not safe to destroy $1!"
2372 return 1
2373 fi
2374}
2375
2376#
2377# Get the available ZFS compression options
2378# $1 option type zfs_set|zfs_compress
2379#
2380function get_compress_opts
2381{
2382 typeset COMPRESS_OPTS
2383 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2384 gzip-6 gzip-7 gzip-8 gzip-9"
2385
2386 if [[ $1 == "zfs_compress" ]] ; then
2387 COMPRESS_OPTS="on lzjb"
2388 elif [[ $1 == "zfs_set" ]] ; then
2389 COMPRESS_OPTS="on off lzjb"
2390 fi
2391 typeset valid_opts="$COMPRESS_OPTS"
c1d9abf9 2392 zfs get 2>&1 | grep gzip >/dev/null 2>&1
6bb24f4d
BB
2393 if [[ $? -eq 0 ]]; then
2394 valid_opts="$valid_opts $GZIP_OPTS"
2395 fi
c1d9abf9 2396 echo "$valid_opts"
6bb24f4d
BB
2397}
2398
2399#
2400# Verify zfs operation with -p option work as expected
2401# $1 operation, value could be create, clone or rename
2402# $2 dataset type, value could be fs or vol
2403# $3 dataset name
2404# $4 new dataset name
2405#
2406function verify_opt_p_ops
2407{
2408 typeset ops=$1
2409 typeset datatype=$2
2410 typeset dataset=$3
2411 typeset newdataset=$4
2412
2413 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2414 log_fail "$datatype is not supported."
2415 fi
2416
2417 # check parameters accordingly
2418 case $ops in
2419 create)
2420 newdataset=$dataset
2421 dataset=""
2422 if [[ $datatype == "vol" ]]; then
2423 ops="create -V $VOLSIZE"
2424 fi
2425 ;;
2426 clone)
2427 if [[ -z $newdataset ]]; then
2428 log_fail "newdataset should not be empty" \
2429 "when ops is $ops."
2430 fi
2431 log_must datasetexists $dataset
2432 log_must snapexists $dataset
2433 ;;
2434 rename)
2435 if [[ -z $newdataset ]]; then
2436 log_fail "newdataset should not be empty" \
2437 "when ops is $ops."
2438 fi
2439 log_must datasetexists $dataset
2440 log_mustnot snapexists $dataset
2441 ;;
2442 *)
2443 log_fail "$ops is not supported."
2444 ;;
2445 esac
2446
2447 # make sure the upper level filesystem does not exist
2448 if datasetexists ${newdataset%/*} ; then
c1d9abf9 2449 log_must zfs destroy -rRf ${newdataset%/*}
6bb24f4d
BB
2450 fi
2451
2452 # without -p option, operation will fail
c1d9abf9 2453 log_mustnot zfs $ops $dataset $newdataset
6bb24f4d
BB
2454 log_mustnot datasetexists $newdataset ${newdataset%/*}
2455
2456 # with -p option, operation should succeed
c1d9abf9 2457 log_must zfs $ops -p $dataset $newdataset
6bb24f4d
BB
2458 block_device_wait
2459
2460 if ! datasetexists $newdataset ; then
2461 log_fail "-p option does not work for $ops"
2462 fi
2463
2464 # when $ops is create or clone, redo the operation still return zero
2465 if [[ $ops != "rename" ]]; then
c1d9abf9 2466 log_must zfs $ops -p $dataset $newdataset
6bb24f4d
BB
2467 fi
2468
2469 return 0
2470}
2471
2472#
2473# Get configuration of pool
2474# $1 pool name
2475# $2 config name
2476#
2477function get_config
2478{
2479 typeset pool=$1
2480 typeset config=$2
2481 typeset alt_root
2482
2483 if ! poolexists "$pool" ; then
2484 return 1
2485 fi
c1d9abf9 2486 alt_root=$(zpool list -H $pool | awk '{print $NF}')
6bb24f4d 2487 if [[ $alt_root == "-" ]]; then
c1d9abf9 2488 value=$(zdb -C $pool | grep "$config:" | awk -F: \
6bb24f4d
BB
2489 '{print $2}')
2490 else
c1d9abf9 2491 value=$(zdb -e $pool | grep "$config:" | awk -F: \
6bb24f4d
BB
2492 '{print $2}')
2493 fi
2494 if [[ -n $value ]] ; then
2495 value=${value#'}
2496 value=${value%'}
2497 fi
2498 echo $value
2499
2500 return 0
2501}
2502
2503#
2504# Privated function. Random select one of items from arguments.
2505#
2506# $1 count
2507# $2-n string
2508#
2509function _random_get
2510{
2511 typeset cnt=$1
2512 shift
2513
2514 typeset str="$@"
2515 typeset -i ind
2516 ((ind = RANDOM % cnt + 1))
2517
c1d9abf9
JWK
2518 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2519 echo $ret
6bb24f4d
BB
2520}
2521
2522#
2523# Random select one of item from arguments which include NONE string
2524#
2525function random_get_with_non
2526{
2527 typeset -i cnt=$#
2528 ((cnt =+ 1))
2529
2530 _random_get "$cnt" "$@"
2531}
2532
2533#
2534# Random select one of item from arguments which doesn't include NONE string
2535#
2536function random_get
2537{
2538 _random_get "$#" "$@"
2539}
2540
2541#
2542# Detect if the current system support slog
2543#
2544function verify_slog_support
2545{
2546 typeset dir=/tmp/disk.$$
2547 typeset pool=foo.$$
2548 typeset vdev=$dir/a
2549 typeset sdev=$dir/b
2550
c1d9abf9
JWK
2551 mkdir -p $dir
2552 mkfile $MINVDEVSIZE $vdev $sdev
6bb24f4d
BB
2553
2554 typeset -i ret=0
c1d9abf9 2555 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
6bb24f4d
BB
2556 ret=1
2557 fi
c1d9abf9 2558 rm -r $dir
6bb24f4d
BB
2559
2560 return $ret
2561}
2562
2563#
2564# The function will generate a dataset name with specific length
2565# $1, the length of the name
2566# $2, the base string to construct the name
2567#
2568function gen_dataset_name
2569{
2570 typeset -i len=$1
2571 typeset basestr="$2"
2572 typeset -i baselen=${#basestr}
2573 typeset -i iter=0
2574 typeset l_name=""
2575
2576 if ((len % baselen == 0)); then
2577 ((iter = len / baselen))
2578 else
2579 ((iter = len / baselen + 1))
2580 fi
2581 while ((iter > 0)); do
2582 l_name="${l_name}$basestr"
2583
2584 ((iter -= 1))
2585 done
2586
c1d9abf9 2587 echo $l_name
6bb24f4d
BB
2588}
2589
2590#
2591# Get cksum tuple of dataset
2592# $1 dataset name
2593#
2594# sample zdb output:
2595# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2596# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2597# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2598# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2599function datasetcksum
2600{
2601 typeset cksum
c1d9abf9
JWK
2602 sync
2603 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2604 | awk -F= '{print $7}')
2605 echo $cksum
6bb24f4d
BB
2606}
2607
2608#
2609# Get cksum of file
2610# #1 file path
2611#
2612function checksum
2613{
2614 typeset cksum
c1d9abf9
JWK
2615 cksum=$(cksum $1 | awk '{print $1}')
2616 echo $cksum
6bb24f4d
BB
2617}
2618
2619#
2620# Get the given disk/slice state from the specific field of the pool
2621#
2622function get_device_state #pool disk field("", "spares","logs")
2623{
2624 typeset pool=$1
2625 typeset disk=${2#$DEV_DSKDIR/}
2626 typeset field=${3:-$pool}
2627
c1d9abf9
JWK
2628 state=$(zpool status -v "$pool" 2>/dev/null | \
2629 nawk -v device=$disk -v pool=$pool -v field=$field \
6bb24f4d
BB
2630 'BEGIN {startconfig=0; startfield=0; }
2631 /config:/ {startconfig=1}
2632 (startconfig==1) && ($1==field) {startfield=1; next;}
2633 (startfield==1) && ($1==device) {print $2; exit;}
2634 (startfield==1) &&
2635 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2636 echo $state
2637}
2638
2639
2640#
2641# print the given directory filesystem type
2642#
2643# $1 directory name
2644#
2645function get_fstype
2646{
2647 typeset dir=$1
2648
2649 if [[ -z $dir ]]; then
2650 log_fail "Usage: get_fstype <directory>"
2651 fi
2652
2653 #
2654 # $ df -n /
2655 # / : ufs
2656 #
c1d9abf9 2657 df -n $dir | awk '{print $3}'
6bb24f4d
BB
2658}
2659
2660#
2661# Given a disk, label it to VTOC regardless what label was on the disk
2662# $1 disk
2663#
2664function labelvtoc
2665{
2666 typeset disk=$1
2667 if [[ -z $disk ]]; then
2668 log_fail "The disk name is unspecified."
2669 fi
2670 typeset label_file=/var/tmp/labelvtoc.$$
c1d9abf9 2671 typeset arch=$(uname -p)
6bb24f4d
BB
2672
2673 if is_linux; then
2674 log_note "Currently unsupported by the test framework"
2675 return 1
2676 fi
2677
2678 if [[ $arch == "i386" ]]; then
c1d9abf9
JWK
2679 echo "label" > $label_file
2680 echo "0" >> $label_file
2681 echo "" >> $label_file
2682 echo "q" >> $label_file
2683 echo "q" >> $label_file
6bb24f4d 2684
c1d9abf9 2685 fdisk -B $disk >/dev/null 2>&1
6bb24f4d 2686 # wait a while for fdisk finishes
c1d9abf9 2687 sleep 60
6bb24f4d 2688 elif [[ $arch == "sparc" ]]; then
c1d9abf9
JWK
2689 echo "label" > $label_file
2690 echo "0" >> $label_file
2691 echo "" >> $label_file
2692 echo "" >> $label_file
2693 echo "" >> $label_file
2694 echo "q" >> $label_file
6bb24f4d
BB
2695 else
2696 log_fail "unknown arch type"
2697 fi
2698
c1d9abf9 2699 format -e -s -d $disk -f $label_file
6bb24f4d 2700 typeset -i ret_val=$?
c1d9abf9 2701 rm -f $label_file
6bb24f4d
BB
2702 #
2703 # wait the format to finish
2704 #
c1d9abf9 2705 sleep 60
6bb24f4d
BB
2706 if ((ret_val != 0)); then
2707 log_fail "unable to label $disk as VTOC."
2708 fi
2709
2710 return 0
2711}
2712
2713#
2714# check if the system was installed as zfsroot or not
2715# return: 0 ture, otherwise false
2716#
2717function is_zfsroot
2718{
c1d9abf9 2719 df -n / | grep zfs > /dev/null 2>&1
6bb24f4d
BB
2720 return $?
2721}
2722
2723#
2724# get the root filesystem name if it's zfsroot system.
2725#
2726# return: root filesystem name
2727function get_rootfs
2728{
2729 typeset rootfs=""
8aab1218
TS
2730
2731 if ! is_linux; then
2732 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2733 /etc/mnttab)
2734 fi
6bb24f4d
BB
2735 if [[ -z "$rootfs" ]]; then
2736 log_fail "Can not get rootfs"
2737 fi
c1d9abf9 2738 zfs list $rootfs > /dev/null 2>&1
6bb24f4d 2739 if (($? == 0)); then
c1d9abf9 2740 echo $rootfs
6bb24f4d
BB
2741 else
2742 log_fail "This is not a zfsroot system."
2743 fi
2744}
2745
2746#
2747# get the rootfs's pool name
2748# return:
2749# rootpool name
2750#
2751function get_rootpool
2752{
2753 typeset rootfs=""
2754 typeset rootpool=""
8aab1218
TS
2755
2756 if ! is_linux; then
2757 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2758 /etc/mnttab)
2759 fi
6bb24f4d
BB
2760 if [[ -z "$rootfs" ]]; then
2761 log_fail "Can not get rootpool"
2762 fi
c1d9abf9 2763 zfs list $rootfs > /dev/null 2>&1
6bb24f4d 2764 if (($? == 0)); then
c1d9abf9
JWK
2765 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2766 echo $rootpool
6bb24f4d
BB
2767 else
2768 log_fail "This is not a zfsroot system."
2769 fi
2770}
2771
6bb24f4d
BB
2772#
2773# Check if the given device is physical device
2774#
2775function is_physical_device #device
2776{
2777 typeset device=${1#$DEV_DSKDIR}
2778 device=${device#$DEV_RDSKDIR}
2779
2780 if is_linux; then
2781 [[ -b "$DEV_DSKDIR/$device" ]] && \
2782 [[ -f /sys/module/loop/parameters/max_part ]]
2783 return $?
2784 else
c1d9abf9 2785 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
6bb24f4d
BB
2786 return $?
2787 fi
2788}
2789
7050a65d
SV
2790#
2791# Check if the given device is a real device (ie SCSI device)
2792#
2793function is_real_device #disk
2794{
2795 typeset disk=$1
2796 [[ -z $disk ]] && log_fail "No argument for disk given."
2797
2798 if is_linux; then
c1d9abf9
JWK
2799 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2800 egrep disk >/dev/null
7050a65d
SV
2801 return $?
2802 fi
2803}
2804
2805#
2806# Check if the given device is a loop device
2807#
2808function is_loop_device #disk
2809{
2810 typeset disk=$1
2811 [[ -z $disk ]] && log_fail "No argument for disk given."
2812
2813 if is_linux; then
c1d9abf9
JWK
2814 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2815 egrep loop >/dev/null
7050a65d
SV
2816 return $?
2817 fi
2818}
2819
2820#
2821# Check if the given device is a multipath device and if there is a sybolic
2822# link to a device mapper and to a disk
2823# Currently no support for dm devices alone without multipath
2824#
2825function is_mpath_device #disk
2826{
2827 typeset disk=$1
2828 [[ -z $disk ]] && log_fail "No argument for disk given."
2829
2830 if is_linux; then
c1d9abf9
JWK
2831 lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
2832 egrep mpath >/dev/null
7050a65d 2833 if (($? == 0)); then
c1d9abf9 2834 readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
7050a65d
SV
2835 return $?
2836 else
2837 return $?
2838 fi
2839 fi
2840}
2841
2842# Set the slice prefix for disk partitioning depending
2843# on whether the device is a real, multipath, or loop device.
2844# Currently all disks have to be of the same type, so only
2845# checks first disk to determine slice prefix.
2846#
2847function set_slice_prefix
2848{
2849 typeset disk
2850 typeset -i i=0
2851
2852 if is_linux; then
2853 while (( i < $DISK_ARRAY_NUM )); do
c1d9abf9
JWK
2854 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2855 if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
2856 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
7050a65d
SV
2857 export SLICE_PREFIX=""
2858 return 0
ec0e24c2
SV
2859 elif ( is_mpath_device $disk || is_loop_device \
2860 $disk ); then
7050a65d
SV
2861 export SLICE_PREFIX="p"
2862 return 0
2863 else
2864 log_fail "$disk not supported for partitioning."
2865 fi
2866 (( i = i + 1))
2867 done
2868 fi
2869}
2870
2871#
2872# Set the directory path of the listed devices in $DISK_ARRAY_NUM
2873# Currently all disks have to be of the same type, so only
2874# checks first disk to determine device directory
2875# default = /dev (linux)
2876# real disk = /dev (linux)
2877# multipath device = /dev/mapper (linux)
2878#
2879function set_device_dir
2880{
2881 typeset disk
2882 typeset -i i=0
2883
2884 if is_linux; then
2885 while (( i < $DISK_ARRAY_NUM )); do
c1d9abf9 2886 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
7050a65d
SV
2887 if is_mpath_device $disk; then
2888 export DEV_DSKDIR=$DEV_MPATHDIR
2889 return 0
2890 else
2891 export DEV_DSKDIR=$DEV_RDSKDIR
2892 return 0
2893 fi
2894 (( i = i + 1))
2895 done
2896 else
2897 export DEV_DSKDIR=$DEV_RDSKDIR
2898 fi
2899}
2900
6bb24f4d
BB
2901#
2902# Get the directory path of given device
2903#
2904function get_device_dir #device
2905{
2906 typeset device=$1
2907
2908 if ! $(is_physical_device $device) ; then
2909 if [[ $device != "/" ]]; then
2910 device=${device%/*}
2911 fi
2912 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2913 device="$DEV_DSKDIR"
2914 fi
c1d9abf9 2915 echo $device
6bb24f4d 2916 else
c1d9abf9 2917 echo "$DEV_DSKDIR"
6bb24f4d
BB
2918 fi
2919}
2920
ec0e24c2
SV
2921#
2922# Get persistent name for given disk
2923#
2924function get_persistent_disk_name #device
2925{
2926 typeset device=$1
2927 typeset dev_id
2928
2929 if is_linux; then
2930 if is_real_device $device; then
c1d9abf9
JWK
2931 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
2932 | egrep disk/by-id | nawk '{print $2; exit}' \
2933 | nawk -F / '{print $3}')"
2934 echo $dev_id
ec0e24c2 2935 elif is_mpath_device $device; then
c1d9abf9
JWK
2936 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
2937 | egrep disk/by-id/dm-uuid \
2938 | nawk '{print $2; exit}' \
2939 | nawk -F / '{print $3}')"
2940 echo $dev_id
ec0e24c2 2941 else
c1d9abf9 2942 echo $device
ec0e24c2
SV
2943 fi
2944 else
c1d9abf9 2945 echo $device
ec0e24c2
SV
2946 fi
2947}
2948
7a4500a1
SV
2949#
2950# Load scsi_debug module with specified parameters
2951#
2952function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
2953{
2954 typeset devsize=$1
2955 typeset hosts=$2
2956 typeset tgts=$3
2957 typeset luns=$4
2958
2959 [[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
2960 [[ -z $luns ]] && log_fail "Arguments invalid or missing"
2961
2962 if is_linux; then
c1d9abf9 2963 modprobe -n scsi_debug
7a4500a1
SV
2964 if (($? != 0)); then
2965 log_unsupported "Platform does not have scsi_debug"
2966 "module"
2967 fi
c1d9abf9 2968 lsmod | egrep scsi_debug > /dev/null
7a4500a1
SV
2969 if (($? == 0)); then
2970 log_fail "scsi_debug module already installed"
2971 else
c1d9abf9 2972 log_must modprobe scsi_debug dev_size_mb=$devsize \
7a4500a1
SV
2973 add_host=$hosts num_tgts=$tgts max_luns=$luns
2974 block_device_wait
c1d9abf9 2975 lsscsi | egrep scsi_debug > /dev/null
7a4500a1
SV
2976 if (($? == 1)); then
2977 log_fail "scsi_debug module install failed"
2978 fi
2979 fi
2980 fi
2981}
2982
6bb24f4d
BB
2983#
2984# Get the package name
2985#
2986function get_package_name
2987{
2988 typeset dirpath=${1:-$STC_NAME}
2989
2990 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2991}
2992
2993#
2994# Get the word numbers from a string separated by white space
2995#
2996function get_word_count
2997{
c1d9abf9 2998 echo $1 | wc -w
6bb24f4d
BB
2999}
3000
3001#
3002# To verify if the require numbers of disks is given
3003#
3004function verify_disk_count
3005{
3006 typeset -i min=${2:-1}
3007
3008 typeset -i count=$(get_word_count "$1")
3009
3010 if ((count < min)); then
3011 log_untested "A minimum of $min disks is required to run." \
3012 " You specified $count disk(s)"
3013 fi
3014}
3015
3016function ds_is_volume
3017{
3018 typeset type=$(get_prop type $1)
3019 [[ $type = "volume" ]] && return 0
3020 return 1
3021}
3022
3023function ds_is_filesystem
3024{
3025 typeset type=$(get_prop type $1)
3026 [[ $type = "filesystem" ]] && return 0
3027 return 1
3028}
3029
3030function ds_is_snapshot
3031{
3032 typeset type=$(get_prop type $1)
3033 [[ $type = "snapshot" ]] && return 0
3034 return 1
3035}
3036
3037#
3038# Check if Trusted Extensions are installed and enabled
3039#
3040function is_te_enabled
3041{
c1d9abf9 3042 svcs -H -o state labeld 2>/dev/null | grep "enabled"
6bb24f4d
BB
3043 if (($? != 0)); then
3044 return 1
3045 else
3046 return 0
3047 fi
3048}
3049
3050# Utility function to determine if a system has multiple cpus.
3051function is_mp
3052{
3053 if is_linux; then
c1d9abf9 3054 (($(nproc) > 1))
6bb24f4d 3055 else
c1d9abf9 3056 (($(psrinfo | wc -l) > 1))
6bb24f4d
BB
3057 fi
3058
3059 return $?
3060}
3061
3062function get_cpu_freq
3063{
3064 if is_linux; then
c1d9abf9 3065 lscpu | awk '/CPU MHz/ { print $3 }'
6bb24f4d 3066 else
c1d9abf9 3067 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
6bb24f4d
BB
3068 fi
3069}
3070
3071# Run the given command as the user provided.
3072function user_run
3073{
3074 typeset user=$1
3075 shift
3076
f74b821a 3077 log_note "user:$user $@"
c1d9abf9 3078 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
6bb24f4d
BB
3079 return $?
3080}
3081
3082#
3083# Check if the pool contains the specified vdevs
3084#
3085# $1 pool
3086# $2..n <vdev> ...
3087#
3088# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3089# vdevs is not in the pool, and 2 if pool name is missing.
3090#
3091function vdevs_in_pool
3092{
3093 typeset pool=$1
3094 typeset vdev
3095
3096 if [[ -z $pool ]]; then
3097 log_note "Missing pool name."
3098 return 2
3099 fi
3100
3101 shift
3102
c1d9abf9
JWK
3103 typeset tmpfile=$(mktemp)
3104 zpool list -Hv "$pool" >$tmpfile
6bb24f4d 3105 for vdev in $@; do
c1d9abf9 3106 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
6bb24f4d
BB
3107 [[ $? -ne 0 ]] && return 1
3108 done
3109
c1d9abf9 3110 rm -f $tmpfile
6bb24f4d
BB
3111
3112 return 0;
3113}
3114
679d73e9
JWK
3115function get_max
3116{
3117 typeset -l i max=$1
3118 shift
3119
3120 for i in "$@"; do
3121 max=$(echo $((max > i ? max : i)))
3122 done
3123
3124 echo $max
3125}
3126
3127function get_min
3128{
3129 typeset -l i min=$1
3130 shift
3131
3132 for i in "$@"; do
3133 min=$(echo $((min < i ? min : i)))
3134 done
3135
3136 echo $min
3137}
3138
a7004725
DK
3139#
3140# Generate a random number between 1 and the argument.
3141#
3142function random
3143{
3144 typeset max=$1
3145 echo $(( ($RANDOM % $max) + 1 ))
3146}
3147
3148# Write data that can be compressed into a directory
3149function write_compressible
3150{
3151 typeset dir=$1
3152 typeset megs=$2
3153 typeset nfiles=${3:-1}
3154 typeset bs=${4:-1024k}
3155 typeset fname=${5:-file}
3156
3157 [[ -d $dir ]] || log_fail "No directory: $dir"
3158
3159 # Under Linux fio is not currently used since its behavior can
3160 # differ significantly across versions. This includes missing
3161 # command line options and cases where the --buffer_compress_*
3162 # options fail to behave as expected.
3163 if is_linux; then
3164 typeset file_bytes=$(to_bytes $megs)
3165 typeset bs_bytes=4096
3166 typeset blocks=$(($file_bytes / $bs_bytes))
3167
3168 for (( i = 0; i < $nfiles; i++ )); do
3169 truncate -s $file_bytes $dir/$fname.$i
3170
3171 # Write every third block to get 66% compression.
3172 for (( j = 0; j < $blocks; j += 3 )); do
3173 dd if=/dev/urandom of=$dir/$fname.$i \
3174 seek=$j bs=$bs_bytes count=1 \
3175 conv=notrunc >/dev/null 2>&1
3176 done
3177 done
3178 else
3179 log_must eval "fio \
3180 --name=job \
3181 --fallocate=0 \
3182 --minimal \
3183 --randrepeat=0 \
3184 --buffer_compress_percentage=66 \
3185 --buffer_compress_chunk=4096 \
3186 --directory=$dir \
3187 --numjobs=$nfiles \
3188 --nrfiles=$nfiles \
3189 --rw=write \
3190 --bs=$bs \
3191 --filesize=$megs \
3192 --filename_format='$fname.\$jobnum' >/dev/null"
3193 fi
3194}
3195
3196function get_objnum
3197{
3198 typeset pathname=$1
3199 typeset objnum
3200
3201 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3202 objnum=$(stat -c %i $pathname)
3203 echo $objnum
3204}
3205
1de321e6 3206#
bec1067d 3207# Sync data to the pool
1de321e6
JX
3208#
3209# $1 pool name
bec1067d 3210# $2 boolean to force uberblock (and config including zpool cache file) update
1de321e6 3211#
bec1067d 3212function sync_pool #pool <force>
1de321e6
JX
3213{
3214 typeset pool=${1:-$TESTPOOL}
bec1067d 3215 typeset force=${2:-false}
1de321e6 3216
bec1067d
AP
3217 if [[ $force == true ]]; then
3218 log_must zpool sync -f $pool
3219 else
3220 log_must zpool sync $pool
3221 fi
3222
3223 return 0
1de321e6 3224}
d834b9ce
GM
3225
3226#
3227# Wait for zpool 'freeing' property drops to zero.
3228#
3229# $1 pool name
3230#
3231function wait_freeing #pool
3232{
3233 typeset pool=${1:-$TESTPOOL}
3234 while true; do
c1d9abf9
JWK
3235 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3236 log_must sleep 1
d834b9ce
GM
3237 done
3238}
7a4500a1 3239
dddef7d6 3240#
3241# Wait for every device replace operation to complete
3242#
3243# $1 pool name
3244#
3245function wait_replacing #pool
3246{
3247 typeset pool=${1:-$TESTPOOL}
3248 while true; do
3249 [[ "" == "$(zpool status $pool |
3250 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3251 log_must sleep 1
3252 done
3253}
3254
7a4500a1
SV
3255#
3256# Check if ZED is currently running, if not start ZED.
3257#
3258function zed_start
3259{
3260 if is_linux; then
3261 # ZEDLET_DIR=/var/tmp/zed
3262 if [[ ! -d $ZEDLET_DIR ]]; then
c1d9abf9 3263 log_must mkdir $ZEDLET_DIR
7a4500a1
SV
3264 fi
3265
3266 # Verify the ZED is not already running.
c1d9abf9 3267 pgrep -x zed > /dev/null
7a4500a1
SV
3268 if (($? == 0)); then
3269 log_fail "ZED already running"
3270 fi
3271
c1d9abf9
JWK
3272 # ZEDLETDIR=</etc/zfs/zed.d | ${SRCDIR}/cmd/zed/zed.d>
3273 log_must cp ${ZEDLETDIR}/all-syslog.sh $ZEDLET_DIR
7a4500a1
SV
3274
3275 log_note "Starting ZED"
3276 # run ZED in the background and redirect foreground logging
3277 # output to zedlog
c1d9abf9 3278 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
7a4500a1
SV
3279 "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
3280 fi
3281}
3282
3283#
3284# Kill ZED process
3285#
3286function zed_stop
3287{
3288 if is_linux; then
3289 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
c1d9abf9
JWK
3290 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3291 log_must kill $zedpid
7a4500a1 3292 fi
c1d9abf9
JWK
3293 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3294 log_must rm -f ${ZEDLET_DIR}/zed.pid
3295 log_must rm -f ${ZEDLET_DIR}/zedlog
3296 log_must rm -f ${ZEDLET_DIR}/state
3297 log_must rmdir $ZEDLET_DIR
7a4500a1
SV
3298 fi
3299}
8c54ddd3
BB
3300
3301#
3302# Check is provided device is being active used as a swap device.
3303#
3304function is_swap_inuse
3305{
3306 typeset device=$1
3307
3308 if [[ -z $device ]] ; then
3309 log_note "No device specified."
3310 return 1
3311 fi
3312
3313 if is_linux; then
3314 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3315 else
3316 swap -l | grep -w $device > /dev/null 2>&1
3317 fi
3318
3319 return $?
3320}
3321
3322#
3323# Setup a swap device using the provided device.
3324#
3325function swap_setup
3326{
3327 typeset swapdev=$1
3328
3329 if is_linux; then
3330 log_must mkswap $swapdev > /dev/null 2>&1
3331 log_must swapon $swapdev
3332 else
3333 log_must swap -a $swapdev
3334 fi
3335
3336 return 0
3337}
3338
3339#
3340# Cleanup a swap device on the provided device.
3341#
3342function swap_cleanup
3343{
3344 typeset swapdev=$1
3345
3346 if is_swap_inuse $swapdev; then
3347 if is_linux; then
3348 log_must swapoff $swapdev
3349 else
3350 log_must swap -d $swapdev
3351 fi
3352 fi
3353
3354 return 0
3355}