]> git.proxmox.com Git - mirror_zfs.git/blame - tests/zfs-tests/include/libtest.shlib
OpenZFS 7614, 9064 - zfs device evacuation/removal
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
CommitLineData
6bb24f4d
BB
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24# Use is subject to license terms.
c1d9abf9 25# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
8db2dc32 26# Copyright 2016 Nexenta Systems, Inc.
a454868b 27# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
bec1067d 28# Copyright (c) 2017 Datto Inc.
d3f2cd7e 29# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
6bb24f4d
BB
30#
31
32. ${STF_TOOLS}/include/logapi.shlib
a7004725 33. ${STF_SUITE}/include/math.shlib
d3f2cd7e 34. ${STF_SUITE}/include/blkdev.shlib
6bb24f4d 35
c1d9abf9
JWK
36#
37# Apply constrained path when available. This is required since the
38# PATH may have been modified by sudo's secure_path behavior.
39#
40if [ -n "$STF_PATH" ]; then
41 PATH="$STF_PATH"
42fi
43
0c656a96
GDN
44# Linux kernel version comparison function
45#
46# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
47#
48# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
49#
50function linux_version
51{
52 typeset ver="$1"
53
54 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
55
56 typeset version=$(echo $ver | cut -d '.' -f 1)
57 typeset major=$(echo $ver | cut -d '.' -f 2)
58 typeset minor=$(echo $ver | cut -d '.' -f 3)
59
60 [[ -z "$version" ]] && version=0
61 [[ -z "$major" ]] && major=0
62 [[ -z "$minor" ]] && minor=0
63
64 echo $((version * 10000 + major * 100 + minor))
65}
66
6bb24f4d
BB
67# Determine if this is a Linux test system
68#
69# Return 0 if platform Linux, 1 if otherwise
70
71function is_linux
72{
c1d9abf9 73 if [[ $(uname -o) == "GNU/Linux" ]]; then
6bb24f4d
BB
74 return 0
75 else
76 return 1
77 fi
78}
79
e676a196
BB
80# Determine if this is a 32-bit system
81#
82# Return 0 if platform is 32-bit, 1 if otherwise
83
84function is_32bit
85{
86 if [[ $(getconf LONG_BIT) == "32" ]]; then
87 return 0
88 else
89 return 1
90 fi
91}
92
c6ced726
BB
93# Determine if kmemleak is enabled
94#
95# Return 0 if kmemleak is enabled, 1 if otherwise
96
97function is_kmemleak
98{
99 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
100 return 0
101 else
102 return 1
103 fi
104}
105
6bb24f4d
BB
106# Determine whether a dataset is mounted
107#
108# $1 dataset name
109# $2 filesystem type; optional - defaulted to zfs
110#
111# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
112
113function ismounted
114{
115 typeset fstype=$2
116 [[ -z $fstype ]] && fstype=zfs
117 typeset out dir name ret
118
119 case $fstype in
120 zfs)
121 if [[ "$1" == "/"* ]] ; then
c1d9abf9 122 for out in $(zfs mount | awk '{print $2}'); do
6bb24f4d
BB
123 [[ $1 == $out ]] && return 0
124 done
125 else
c1d9abf9 126 for out in $(zfs mount | awk '{print $1}'); do
6bb24f4d
BB
127 [[ $1 == $out ]] && return 0
128 done
129 fi
130 ;;
131 ufs|nfs)
c1d9abf9 132 out=$(df -F $fstype $1 2>/dev/null)
6bb24f4d
BB
133 ret=$?
134 (($ret != 0)) && return $ret
135
136 dir=${out%%\(*}
137 dir=${dir%% *}
138 name=${out##*\(}
139 name=${name%%\)*}
140 name=${name%% *}
141
142 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
143 ;;
5c214ae3 144 ext*)
c1d9abf9 145 out=$(df -t $fstype $1 2>/dev/null)
6bb24f4d
BB
146 return $?
147 ;;
148 zvol)
149 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
150 link=$(readlink -f $ZVOL_DEVDIR/$1)
151 [[ -n "$link" ]] && \
c1d9abf9 152 mount | grep -q "^$link" && \
6bb24f4d
BB
153 return 0
154 fi
155 ;;
156 esac
157
158 return 1
159}
160
161# Return 0 if a dataset is mounted; 1 otherwise
162#
163# $1 dataset name
164# $2 filesystem type; optional - defaulted to zfs
165
166function mounted
167{
168 ismounted $1 $2
169 (($? == 0)) && return 0
170 return 1
171}
172
173# Return 0 if a dataset is unmounted; 1 otherwise
174#
175# $1 dataset name
176# $2 filesystem type; optional - defaulted to zfs
177
178function unmounted
179{
180 ismounted $1 $2
181 (($? == 1)) && return 0
182 return 1
183}
184
185# split line on ","
186#
187# $1 - line to split
188
189function splitline
190{
c1d9abf9 191 echo $1 | sed "s/,/ /g"
6bb24f4d
BB
192}
193
194function default_setup
195{
196 default_setup_noexit "$@"
197
198 log_pass
199}
200
201#
202# Given a list of disks, setup storage pools and datasets.
203#
204function default_setup_noexit
205{
206 typeset disklist=$1
207 typeset container=$2
208 typeset volume=$3
3c67d83a 209 log_note begin default_setup_noexit
6bb24f4d
BB
210
211 if is_global_zone; then
212 if poolexists $TESTPOOL ; then
213 destroy_pool $TESTPOOL
214 fi
c1d9abf9
JWK
215 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
216 log_must zpool create -f $TESTPOOL $disklist
6bb24f4d
BB
217 else
218 reexport_pool
219 fi
220
c1d9abf9
JWK
221 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
222 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
6bb24f4d 223
c1d9abf9
JWK
224 log_must zfs create $TESTPOOL/$TESTFS
225 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
226
227 if [[ -n $container ]]; then
c1d9abf9 228 rm -rf $TESTDIR1 || \
6bb24f4d 229 log_unresolved Could not remove $TESTDIR1
c1d9abf9 230 mkdir -p $TESTDIR1 || \
6bb24f4d
BB
231 log_unresolved Could not create $TESTDIR1
232
c1d9abf9
JWK
233 log_must zfs create $TESTPOOL/$TESTCTR
234 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
235 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
236 log_must zfs set mountpoint=$TESTDIR1 \
6bb24f4d
BB
237 $TESTPOOL/$TESTCTR/$TESTFS1
238 fi
239
240 if [[ -n $volume ]]; then
241 if is_global_zone ; then
c1d9abf9 242 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
6bb24f4d
BB
243 block_device_wait
244 else
c1d9abf9 245 log_must zfs create $TESTPOOL/$TESTVOL
6bb24f4d
BB
246 fi
247 fi
248}
249
250#
251# Given a list of disks, setup a storage pool, file system and
252# a container.
253#
254function default_container_setup
255{
256 typeset disklist=$1
257
258 default_setup "$disklist" "true"
259}
260
261#
262# Given a list of disks, setup a storage pool,file system
263# and a volume.
264#
265function default_volume_setup
266{
267 typeset disklist=$1
268
269 default_setup "$disklist" "" "true"
270}
271
272#
273# Given a list of disks, setup a storage pool,file system,
274# a container and a volume.
275#
276function default_container_volume_setup
277{
278 typeset disklist=$1
279
280 default_setup "$disklist" "true" "true"
281}
282
283#
284# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
285# filesystem
286#
d99a0153 287# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
6bb24f4d
BB
288# $2 snapshot name. Default, $TESTSNAP
289#
290function create_snapshot
291{
d99a0153 292 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
6bb24f4d
BB
293 typeset snap=${2:-$TESTSNAP}
294
295 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
296 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
297
298 if snapexists $fs_vol@$snap; then
299 log_fail "$fs_vol@$snap already exists."
300 fi
301 datasetexists $fs_vol || \
302 log_fail "$fs_vol must exist."
303
c1d9abf9 304 log_must zfs snapshot $fs_vol@$snap
6bb24f4d
BB
305}
306
307#
308# Create a clone from a snapshot, default clone name is $TESTCLONE.
309#
310# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
311# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
312#
313function create_clone # snapshot clone
314{
315 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
316 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
317
318 [[ -z $snap ]] && \
319 log_fail "Snapshot name is undefined."
320 [[ -z $clone ]] && \
321 log_fail "Clone name is undefined."
322
c1d9abf9 323 log_must zfs clone $snap $clone
6bb24f4d
BB
324}
325
aeacdefe
GM
326#
327# Create a bookmark of the given snapshot. Defaultly create a bookmark on
328# filesystem.
329#
330# $1 Existing filesystem or volume name. Default, $TESTFS
331# $2 Existing snapshot name. Default, $TESTSNAP
332# $3 bookmark name. Default, $TESTBKMARK
333#
334function create_bookmark
335{
336 typeset fs_vol=${1:-$TESTFS}
337 typeset snap=${2:-$TESTSNAP}
338 typeset bkmark=${3:-$TESTBKMARK}
339
340 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
341 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
342 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
343
344 if bkmarkexists $fs_vol#$bkmark; then
345 log_fail "$fs_vol#$bkmark already exists."
346 fi
347 datasetexists $fs_vol || \
348 log_fail "$fs_vol must exist."
349 snapexists $fs_vol@$snap || \
350 log_fail "$fs_vol@$snap must exist."
351
c1d9abf9 352 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
aeacdefe
GM
353}
354
650258d7 355#
356# Create a temporary clone result of an interrupted resumable 'zfs receive'
357# $1 Destination filesystem name. Must not exist, will be created as the result
358# of this function along with its %recv temporary clone
359# $2 Source filesystem name. Must not exist, will be created and destroyed
360#
361function create_recv_clone
362{
363 typeset recvfs="$1"
364 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
365 typeset snap="$sendfs@snap1"
366 typeset incr="$sendfs@snap2"
367 typeset mountpoint="$TESTDIR/create_recv_clone"
368 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
369
370 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
371
372 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
373 datasetexists $sendfs && log_fail "Send filesystem must not exist."
374
375 log_must zfs create -o mountpoint="$mountpoint" $sendfs
376 log_must zfs snapshot $snap
377 log_must eval "zfs send $snap | zfs recv -u $recvfs"
378 log_must mkfile 1m "$mountpoint/data"
379 log_must zfs snapshot $incr
380 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 > $sendfile"
381 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
c7b55e71 382 destroy_dataset "$sendfs" "-r"
650258d7 383 log_must rm -f "$sendfile"
384
385 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
386 log_fail "Error creating temporary $recvfs/%recv clone"
387 fi
388}
389
6bb24f4d
BB
390function default_mirror_setup
391{
392 default_mirror_setup_noexit $1 $2 $3
393
394 log_pass
395}
396
397#
398# Given a pair of disks, set up a storage pool and dataset for the mirror
399# @parameters: $1 the primary side of the mirror
400# $2 the secondary side of the mirror
401# @uses: ZPOOL ZFS TESTPOOL TESTFS
402function default_mirror_setup_noexit
403{
404 readonly func="default_mirror_setup_noexit"
405 typeset primary=$1
406 typeset secondary=$2
407
408 [[ -z $primary ]] && \
409 log_fail "$func: No parameters passed"
410 [[ -z $secondary ]] && \
411 log_fail "$func: No secondary partition passed"
c1d9abf9
JWK
412 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
413 log_must zpool create -f $TESTPOOL mirror $@
414 log_must zfs create $TESTPOOL/$TESTFS
415 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
416}
417
418#
419# create a number of mirrors.
420# We create a number($1) of 2 way mirrors using the pairs of disks named
421# on the command line. These mirrors are *not* mounted
422# @parameters: $1 the number of mirrors to create
423# $... the devices to use to create the mirrors on
424# @uses: ZPOOL ZFS TESTPOOL
425function setup_mirrors
426{
427 typeset -i nmirrors=$1
428
429 shift
430 while ((nmirrors > 0)); do
431 log_must test -n "$1" -a -n "$2"
c1d9abf9
JWK
432 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
433 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
6bb24f4d
BB
434 shift 2
435 ((nmirrors = nmirrors - 1))
436 done
437}
438
439#
440# create a number of raidz pools.
441# We create a number($1) of 2 raidz pools using the pairs of disks named
442# on the command line. These pools are *not* mounted
443# @parameters: $1 the number of pools to create
444# $... the devices to use to create the pools on
445# @uses: ZPOOL ZFS TESTPOOL
446function setup_raidzs
447{
448 typeset -i nraidzs=$1
449
450 shift
451 while ((nraidzs > 0)); do
452 log_must test -n "$1" -a -n "$2"
c1d9abf9
JWK
453 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
454 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
6bb24f4d
BB
455 shift 2
456 ((nraidzs = nraidzs - 1))
457 done
458}
459
460#
461# Destroy the configured testpool mirrors.
462# the mirrors are of the form ${TESTPOOL}{number}
463# @uses: ZPOOL ZFS TESTPOOL
464function destroy_mirrors
465{
466 default_cleanup_noexit
467
468 log_pass
469}
470
471#
472# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
473# $1 the list of disks
474#
475function default_raidz_setup
476{
477 typeset disklist="$*"
478 disks=(${disklist[*]})
479
480 if [[ ${#disks[*]} -lt 2 ]]; then
481 log_fail "A raid-z requires a minimum of two disks."
482 fi
483
c1d9abf9 484 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
3fd3e56c 485 log_must zpool create -f $TESTPOOL raidz $disklist
c1d9abf9
JWK
486 log_must zfs create $TESTPOOL/$TESTFS
487 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
488
489 log_pass
490}
491
492#
493# Common function used to cleanup storage pools and datasets.
494#
495# Invoked at the start of the test suite to ensure the system
496# is in a known state, and also at the end of each set of
497# sub-tests to ensure errors from one set of tests doesn't
498# impact the execution of the next set.
499
500function default_cleanup
501{
502 default_cleanup_noexit
503
504 log_pass
505}
506
3fd3e56c 507#
508# Utility function used to list all available pool names.
509#
510# NOTE: $KEEP is a variable containing pool names, separated by a newline
511# character, that must be excluded from the returned list.
512#
513function get_all_pools
514{
515 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
516}
517
6bb24f4d
BB
518function default_cleanup_noexit
519{
6bb24f4d
BB
520 typeset pool=""
521 #
522 # Destroying the pool will also destroy any
523 # filesystems it contains.
524 #
525 if is_global_zone; then
c1d9abf9 526 zfs unmount -a > /dev/null 2>&1
3fd3e56c 527 ALL_POOLS=$(get_all_pools)
6bb24f4d
BB
528 # Here, we loop through the pools we're allowed to
529 # destroy, only destroying them if it's safe to do
530 # so.
531 while [ ! -z ${ALL_POOLS} ]
532 do
533 for pool in ${ALL_POOLS}
534 do
535 if safe_to_destroy_pool $pool ;
536 then
537 destroy_pool $pool
538 fi
3fd3e56c 539 ALL_POOLS=$(get_all_pools)
6bb24f4d
BB
540 done
541 done
542
c1d9abf9 543 zfs mount -a
6bb24f4d
BB
544 else
545 typeset fs=""
c1d9abf9
JWK
546 for fs in $(zfs list -H -o name \
547 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
c7b55e71 548 destroy_dataset "$fs" "-Rf"
6bb24f4d
BB
549 done
550
551 # Need cleanup here to avoid garbage dir left.
c1d9abf9 552 for fs in $(zfs list -H -o name); do
6bb24f4d 553 [[ $fs == /$ZONE_POOL ]] && continue
c1d9abf9 554 [[ -d $fs ]] && log_must rm -rf $fs/*
6bb24f4d
BB
555 done
556
557 #
558 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
559 # the default value
560 #
c1d9abf9 561 for fs in $(zfs list -H -o name); do
6bb24f4d 562 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
c1d9abf9
JWK
563 log_must zfs set reservation=none $fs
564 log_must zfs set recordsize=128K $fs
565 log_must zfs set mountpoint=/$fs $fs
6bb24f4d
BB
566 typeset enc=""
567 enc=$(get_prop encryption $fs)
568 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
569 [[ "$enc" == "off" ]]; then
c1d9abf9 570 log_must zfs set checksum=on $fs
6bb24f4d 571 fi
c1d9abf9
JWK
572 log_must zfs set compression=off $fs
573 log_must zfs set atime=on $fs
574 log_must zfs set devices=off $fs
575 log_must zfs set exec=on $fs
576 log_must zfs set setuid=on $fs
577 log_must zfs set readonly=off $fs
578 log_must zfs set snapdir=hidden $fs
579 log_must zfs set aclmode=groupmask $fs
580 log_must zfs set aclinherit=secure $fs
6bb24f4d
BB
581 fi
582 done
583 fi
584
585 [[ -d $TESTDIR ]] && \
c1d9abf9 586 log_must rm -rf $TESTDIR
7050a65d
SV
587
588 disk1=${DISKS%% *}
589 if is_mpath_device $disk1; then
590 delete_partitions
591 fi
6bb24f4d
BB
592}
593
594
595#
596# Common function used to cleanup storage pools, file systems
597# and containers.
598#
599function default_container_cleanup
600{
601 if ! is_global_zone; then
602 reexport_pool
603 fi
604
605 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
606 [[ $? -eq 0 ]] && \
c1d9abf9 607 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
6bb24f4d 608
c7b55e71
GDN
609 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
610 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
6bb24f4d
BB
611
612 [[ -e $TESTDIR1 ]] && \
c1d9abf9 613 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
6bb24f4d
BB
614
615 default_cleanup
616}
617
618#
619# Common function used to cleanup snapshot of file system or volume. Default to
620# delete the file system's snapshot
621#
622# $1 snapshot name
623#
624function destroy_snapshot
625{
626 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
627
628 if ! snapexists $snap; then
629 log_fail "'$snap' does not existed."
630 fi
631
632 #
633 # For the sake of the value which come from 'get_prop' is not equal
634 # to the really mountpoint when the snapshot is unmounted. So, firstly
635 # check and make sure this snapshot's been mounted in current system.
636 #
637 typeset mtpt=""
638 if ismounted $snap; then
639 mtpt=$(get_prop mountpoint $snap)
640 (($? != 0)) && \
641 log_fail "get_prop mountpoint $snap failed."
642 fi
643
c7b55e71 644 destroy_dataset "$snap"
6bb24f4d 645 [[ $mtpt != "" && -d $mtpt ]] && \
c1d9abf9 646 log_must rm -rf $mtpt
6bb24f4d
BB
647}
648
649#
650# Common function used to cleanup clone.
651#
652# $1 clone name
653#
654function destroy_clone
655{
656 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
657
658 if ! datasetexists $clone; then
659 log_fail "'$clone' does not existed."
660 fi
661
662 # With the same reason in destroy_snapshot
663 typeset mtpt=""
664 if ismounted $clone; then
665 mtpt=$(get_prop mountpoint $clone)
666 (($? != 0)) && \
667 log_fail "get_prop mountpoint $clone failed."
668 fi
669
c7b55e71 670 destroy_dataset "$clone"
6bb24f4d 671 [[ $mtpt != "" && -d $mtpt ]] && \
c1d9abf9 672 log_must rm -rf $mtpt
6bb24f4d
BB
673}
674
aeacdefe
GM
675#
676# Common function used to cleanup bookmark of file system or volume. Default
677# to delete the file system's bookmark.
678#
679# $1 bookmark name
680#
681function destroy_bookmark
682{
683 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
684
685 if ! bkmarkexists $bkmark; then
686 log_fail "'$bkmarkp' does not existed."
687 fi
688
c7b55e71 689 destroy_dataset "$bkmark"
aeacdefe
GM
690}
691
6bb24f4d
BB
692# Return 0 if a snapshot exists; $? otherwise
693#
694# $1 - snapshot name
695
696function snapexists
697{
c1d9abf9 698 zfs list -H -t snapshot "$1" > /dev/null 2>&1
6bb24f4d
BB
699 return $?
700}
701
aeacdefe
GM
702#
703# Return 0 if a bookmark exists; $? otherwise
704#
705# $1 - bookmark name
706#
707function bkmarkexists
708{
c1d9abf9 709 zfs list -H -t bookmark "$1" > /dev/null 2>&1
aeacdefe
GM
710 return $?
711}
712
6bb24f4d
BB
713#
714# Set a property to a certain value on a dataset.
715# Sets a property of the dataset to the value as passed in.
716# @param:
717# $1 dataset who's property is being set
718# $2 property to set
719# $3 value to set property to
720# @return:
721# 0 if the property could be set.
722# non-zero otherwise.
723# @use: ZFS
724#
725function dataset_setprop
726{
727 typeset fn=dataset_setprop
728
729 if (($# < 3)); then
730 log_note "$fn: Insufficient parameters (need 3, had $#)"
731 return 1
732 fi
733 typeset output=
c1d9abf9 734 output=$(zfs set $2=$3 $1 2>&1)
6bb24f4d
BB
735 typeset rv=$?
736 if ((rv != 0)); then
737 log_note "Setting property on $1 failed."
738 log_note "property $2=$3"
739 log_note "Return Code: $rv"
740 log_note "Output: $output"
741 return $rv
742 fi
743 return 0
744}
745
746#
747# Assign suite defined dataset properties.
748# This function is used to apply the suite's defined default set of
749# properties to a dataset.
750# @parameters: $1 dataset to use
751# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
752# @returns:
753# 0 if the dataset has been altered.
754# 1 if no pool name was passed in.
755# 2 if the dataset could not be found.
756# 3 if the dataset could not have it's properties set.
757#
758function dataset_set_defaultproperties
759{
760 typeset dataset="$1"
761
762 [[ -z $dataset ]] && return 1
763
764 typeset confset=
765 typeset -i found=0
c1d9abf9 766 for confset in $(zfs list); do
6bb24f4d
BB
767 if [[ $dataset = $confset ]]; then
768 found=1
769 break
770 fi
771 done
772 [[ $found -eq 0 ]] && return 2
773 if [[ -n $COMPRESSION_PROP ]]; then
774 dataset_setprop $dataset compression $COMPRESSION_PROP || \
775 return 3
776 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
777 fi
778 if [[ -n $CHECKSUM_PROP ]]; then
779 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
780 return 3
781 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
782 fi
783 return 0
784}
785
786#
787# Check a numeric assertion
788# @parameter: $@ the assertion to check
789# @output: big loud notice if assertion failed
790# @use: log_fail
791#
792function assert
793{
794 (($@)) || log_fail "$@"
795}
796
797#
798# Function to format partition size of a disk
799# Given a disk cxtxdx reduces all partitions
800# to 0 size
801#
802function zero_partitions #<whole_disk_name>
803{
804 typeset diskname=$1
805 typeset i
806
807 if is_linux; then
8e5d1484
PZ
808 DSK=$DEV_DSKDIR/$diskname
809 DSK=$(echo $DSK | sed -e "s|//|/|g")
810 log_must parted $DSK -s -- mklabel gpt
811 blockdev --rereadpt $DSK 2>/dev/null
812 block_device_wait
6bb24f4d
BB
813 else
814 for i in 0 1 3 4 5 6 7
815 do
cf8738d8 816 log_must set_partition $i "" 0mb $diskname
6bb24f4d
BB
817 done
818 fi
95401cb6
BB
819
820 return 0
6bb24f4d
BB
821}
822
823#
824# Given a slice, size and disk, this function
825# formats the slice to the specified size.
826# Size should be specified with units as per
827# the `format` command requirements eg. 100mb 3gb
828#
829# NOTE: This entire interface is problematic for the Linux parted utilty
830# which requires the end of the partition to be specified. It would be
831# best to retire this interface and replace it with something more flexible.
832# At the moment a best effort is made.
833#
834function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
835{
836 typeset -i slicenum=$1
837 typeset start=$2
838 typeset size=$3
839 typeset disk=$4
6bb24f4d
BB
840
841 if is_linux; then
8e5d1484
PZ
842 if [[ -z $size || -z $disk ]]; then
843 log_fail "The size or disk name is unspecified."
844 fi
6bb24f4d
BB
845 typeset size_mb=${size%%[mMgG]}
846
847 size_mb=${size_mb%%[mMgG][bB]}
848 if [[ ${size:1:1} == 'g' ]]; then
849 ((size_mb = size_mb * 1024))
850 fi
851
852 # Create GPT partition table when setting slice 0 or
853 # when the device doesn't already contain a GPT label.
c1d9abf9 854 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
6bb24f4d
BB
855 typeset ret_val=$?
856 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
cf8738d8 857 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
858 if [[ $? -ne 0 ]]; then
859 log_note "Failed to create GPT partition table on $disk"
860 return 1
861 fi
6bb24f4d
BB
862 fi
863
864 # When no start is given align on the first cylinder.
865 if [[ -z "$start" ]]; then
866 start=1
867 fi
868
869 # Determine the cylinder size for the device and using
870 # that calculate the end offset in cylinders.
871 typeset -i cly_size_kb=0
c1d9abf9
JWK
872 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
873 unit cyl print | head -3 | tail -1 | \
874 awk -F '[:k.]' '{print $4}')
6bb24f4d
BB
875 ((end = (size_mb * 1024 / cly_size_kb) + start))
876
cf8738d8 877 parted $DEV_DSKDIR/$disk -s -- \
6bb24f4d 878 mkpart part$slicenum ${start}cyl ${end}cyl
cf8738d8 879 if [[ $? -ne 0 ]]; then
880 log_note "Failed to create partition $slicenum on $disk"
881 return 1
882 fi
6bb24f4d 883
c1d9abf9 884 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
6bb24f4d
BB
885 block_device_wait
886 else
8e5d1484
PZ
887 if [[ -z $slicenum || -z $size || -z $disk ]]; then
888 log_fail "The slice, size or disk name is unspecified."
889 fi
890
6bb24f4d
BB
891 typeset format_file=/var/tmp/format_in.$$
892
c1d9abf9
JWK
893 echo "partition" >$format_file
894 echo "$slicenum" >> $format_file
895 echo "" >> $format_file
896 echo "" >> $format_file
897 echo "$start" >> $format_file
898 echo "$size" >> $format_file
899 echo "label" >> $format_file
900 echo "" >> $format_file
901 echo "q" >> $format_file
902 echo "q" >> $format_file
6bb24f4d 903
c1d9abf9 904 format -e -s -d $disk -f $format_file
6bb24f4d 905 fi
c1d9abf9 906
6bb24f4d 907 typeset ret_val=$?
c1d9abf9 908 rm -f $format_file
cf8738d8 909 if [[ $ret_val -ne 0 ]]; then
910 log_note "Unable to format $disk slice $slicenum to $size"
911 return 1
912 fi
6bb24f4d
BB
913 return 0
914}
915
7050a65d
SV
916#
917# Delete all partitions on all disks - this is specifically for the use of multipath
918# devices which currently can only be used in the test suite as raw/un-partitioned
919# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
920#
921function delete_partitions
922{
923 typeset -i j=1
924
925 if [[ -z $DISK_ARRAY_NUM ]]; then
c1d9abf9 926 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
7050a65d
SV
927 fi
928 if [[ -z $DISKSARRAY ]]; then
929 DISKSARRAY=$DISKS
930 fi
931
932 if is_linux; then
933 if (( $DISK_ARRAY_NUM == 1 )); then
934 while ((j < MAX_PARTITIONS)); do
c1d9abf9
JWK
935 parted $DEV_DSKDIR/$DISK -s rm $j \
936 > /dev/null 2>&1
7050a65d 937 if (( $? == 1 )); then
c1d9abf9 938 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
939 if (( $? == 1 )); then
940 log_note "Partitions for $DISK should be deleted"
941 else
942 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
943 fi
944 return 0
945 else
c1d9abf9 946 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
947 if (( $? == 0 )); then
948 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
949 fi
950 fi
951 ((j = j+1))
952 done
953 else
c1d9abf9 954 for disk in `echo $DISKSARRAY`; do
7050a65d 955 while ((j < MAX_PARTITIONS)); do
c1d9abf9 956 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
7050a65d 957 if (( $? == 1 )); then
c1d9abf9 958 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
959 if (( $? == 1 )); then
960 log_note "Partitions for $disk should be deleted"
961 else
962 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
963 fi
964 j=7
965 else
c1d9abf9 966 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
967 if (( $? == 0 )); then
968 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
969 fi
970 fi
971 ((j = j+1))
972 done
973 j=1
974 done
975 fi
976 fi
977 return 0
978}
979
6bb24f4d
BB
980#
981# Get the end cyl of the given slice
982#
983function get_endslice #<disk> <slice>
984{
985 typeset disk=$1
986 typeset slice=$2
987 if [[ -z $disk || -z $slice ]] ; then
988 log_fail "The disk name or slice number is unspecified."
989 fi
990
991 if is_linux; then
c1d9abf9
JWK
992 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
993 grep "part${slice}" | \
994 awk '{print $3}' | \
995 sed 's,cyl,,')
6bb24f4d
BB
996 ((endcyl = (endcyl + 1)))
997 else
998 disk=${disk#/dev/dsk/}
999 disk=${disk#/dev/rdsk/}
1000 disk=${disk%s*}
1001
1002 typeset -i ratio=0
c1d9abf9
JWK
1003 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1004 grep "sectors\/cylinder" | \
1005 awk '{print $2}')
6bb24f4d
BB
1006
1007 if ((ratio == 0)); then
1008 return
1009 fi
1010
c1d9abf9
JWK
1011 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1012 nawk -v token="$slice" '{if ($1==token) print $6}')
6bb24f4d
BB
1013
1014 ((endcyl = (endcyl + 1) / ratio))
1015 fi
c1d9abf9 1016
6bb24f4d
BB
1017 echo $endcyl
1018}
1019
1020
1021#
1022# Given a size,disk and total slice number, this function formats the
1023# disk slices from 0 to the total slice number with the same specified
1024# size.
1025#
1026function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1027{
1028 typeset -i i=0
1029 typeset slice_size=$1
1030 typeset disk_name=$2
1031 typeset total_slices=$3
1032 typeset cyl
1033
1034 zero_partitions $disk_name
1035 while ((i < $total_slices)); do
1036 if ! is_linux; then
1037 if ((i == 2)); then
1038 ((i = i + 1))
1039 continue
1040 fi
1041 fi
cf8738d8 1042 log_must set_partition $i "$cyl" $slice_size $disk_name
6bb24f4d
BB
1043 cyl=$(get_endslice $disk_name $i)
1044 ((i = i+1))
1045 done
1046}
1047
1048#
1049# This function continues to write to a filenum number of files into dirnum
c1d9abf9 1050# number of directories until either file_write returns an error or the
6bb24f4d
BB
1051# maximum number of files per directory have been written.
1052#
1053# Usage:
1054# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1055#
1056# Return value: 0 on success
1057# non 0 on error
1058#
1059# Where :
1060# destdir: is the directory where everything is to be created under
1061# dirnum: the maximum number of subdirectories to use, -1 no limit
1062# filenum: the maximum number of files per subdirectory
1063# bytes: number of bytes to write
1064# num_writes: numer of types to write out bytes
4e33ba4c 1065# data: the data that will be written
6bb24f4d
BB
1066#
1067# E.g.
1068# file_fs /testdir 20 25 1024 256 0
1069#
1070# Note: bytes * num_writes equals the size of the testfile
1071#
1072function fill_fs # destdir dirnum filenum bytes num_writes data
1073{
1074 typeset destdir=${1:-$TESTDIR}
1075 typeset -i dirnum=${2:-50}
1076 typeset -i filenum=${3:-50}
1077 typeset -i bytes=${4:-8192}
1078 typeset -i num_writes=${5:-10240}
1079 typeset -i data=${6:-0}
1080
1081 typeset -i odirnum=1
1082 typeset -i idirnum=0
1083 typeset -i fn=0
1084 typeset -i retval=0
1085
c1d9abf9 1086 log_must mkdir -p $destdir/$idirnum
6bb24f4d
BB
1087 while (($odirnum > 0)); do
1088 if ((dirnum >= 0 && idirnum >= dirnum)); then
1089 odirnum=0
1090 break
1091 fi
c1d9abf9 1092 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
6bb24f4d
BB
1093 -b $bytes -c $num_writes -d $data
1094 retval=$?
1095 if (($retval != 0)); then
1096 odirnum=0
1097 break
1098 fi
1099 if (($fn >= $filenum)); then
1100 fn=0
1101 ((idirnum = idirnum + 1))
c1d9abf9 1102 log_must mkdir -p $destdir/$idirnum
6bb24f4d
BB
1103 else
1104 ((fn = fn + 1))
1105 fi
1106 done
1107 return $retval
1108}
1109
1110#
1111# Simple function to get the specified property. If unable to
1112# get the property then exits.
1113#
1114# Note property is in 'parsable' format (-p)
1115#
1116function get_prop # property dataset
1117{
1118 typeset prop_val
1119 typeset prop=$1
1120 typeset dataset=$2
1121
c1d9abf9 1122 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
6bb24f4d
BB
1123 if [[ $? -ne 0 ]]; then
1124 log_note "Unable to get $prop property for dataset " \
1125 "$dataset"
1126 return 1
1127 fi
1128
c1d9abf9 1129 echo "$prop_val"
6bb24f4d
BB
1130 return 0
1131}
1132
1133#
1134# Simple function to get the specified property of pool. If unable to
1135# get the property then exits.
1136#
a454868b
OF
1137# Note property is in 'parsable' format (-p)
1138#
6bb24f4d
BB
1139function get_pool_prop # property pool
1140{
1141 typeset prop_val
1142 typeset prop=$1
1143 typeset pool=$2
1144
1145 if poolexists $pool ; then
c1d9abf9
JWK
1146 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1147 awk '{print $3}')
6bb24f4d
BB
1148 if [[ $? -ne 0 ]]; then
1149 log_note "Unable to get $prop property for pool " \
1150 "$pool"
1151 return 1
1152 fi
1153 else
1154 log_note "Pool $pool not exists."
1155 return 1
1156 fi
1157
c1d9abf9 1158 echo "$prop_val"
6bb24f4d
BB
1159 return 0
1160}
1161
1162# Return 0 if a pool exists; $? otherwise
1163#
1164# $1 - pool name
1165
1166function poolexists
1167{
1168 typeset pool=$1
1169
1170 if [[ -z $pool ]]; then
1171 log_note "No pool name given."
1172 return 1
1173 fi
1174
c1d9abf9 1175 zpool get name "$pool" > /dev/null 2>&1
6bb24f4d
BB
1176 return $?
1177}
1178
1179# Return 0 if all the specified datasets exist; $? otherwise
1180#
1181# $1-n dataset name
1182function datasetexists
1183{
1184 if (($# == 0)); then
1185 log_note "No dataset name given."
1186 return 1
1187 fi
1188
1189 while (($# > 0)); do
c1d9abf9 1190 zfs get name $1 > /dev/null 2>&1 || \
6bb24f4d
BB
1191 return $?
1192 shift
1193 done
1194
1195 return 0
1196}
1197
1198# return 0 if none of the specified datasets exists, otherwise return 1.
1199#
1200# $1-n dataset name
1201function datasetnonexists
1202{
1203 if (($# == 0)); then
1204 log_note "No dataset name given."
1205 return 1
1206 fi
1207
1208 while (($# > 0)); do
c1d9abf9 1209 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
6bb24f4d
BB
1210 && return 1
1211 shift
1212 done
1213
1214 return 0
1215}
1216
1217#
2f71caf2 1218# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
6bb24f4d
BB
1219#
1220# Returns 0 if shared, 1 otherwise.
1221#
1222function is_shared
1223{
1224 typeset fs=$1
1225 typeset mtpt
1226
6bb24f4d
BB
1227 if [[ $fs != "/"* ]] ; then
1228 if datasetnonexists "$fs" ; then
1229 return 1
1230 else
1231 mtpt=$(get_prop mountpoint "$fs")
1232 case $mtpt in
1233 none|legacy|-) return 1
1234 ;;
1235 *) fs=$mtpt
1236 ;;
1237 esac
1238 fi
1239 fi
1240
2f71caf2 1241 if is_linux; then
c1d9abf9 1242 for mtpt in `share | awk '{print $1}'` ; do
2f71caf2 1243 if [[ $mtpt == $fs ]] ; then
1244 return 0
1245 fi
1246 done
1247 return 1
1248 fi
1249
c1d9abf9 1250 for mtpt in `share | awk '{print $2}'` ; do
6bb24f4d
BB
1251 if [[ $mtpt == $fs ]] ; then
1252 return 0
1253 fi
1254 done
1255
c1d9abf9 1256 typeset stat=$(svcs -H -o STA nfs/server:default)
6bb24f4d
BB
1257 if [[ $stat != "ON" ]]; then
1258 log_note "Current nfs/server status: $stat"
1259 fi
1260
1261 return 1
1262}
1263
1264#
2f71caf2 1265# Given a dataset name determine if it is shared via SMB.
6bb24f4d 1266#
2f71caf2 1267# Returns 0 if shared, 1 otherwise.
6bb24f4d 1268#
2f71caf2 1269function is_shared_smb
6bb24f4d
BB
1270{
1271 typeset fs=$1
2f71caf2 1272 typeset mtpt
1273
1274 if datasetnonexists "$fs" ; then
1275 return 1
1276 else
1277 fs=$(echo $fs | sed 's@/@_@g')
1278 fi
6bb24f4d
BB
1279
1280 if is_linux; then
c1d9abf9 1281 for mtpt in `net usershare list | awk '{print $1}'` ; do
2f71caf2 1282 if [[ $mtpt == $fs ]] ; then
1283 return 0
1284 fi
1285 done
1286 return 1
1287 else
6bb24f4d
BB
1288 log_unsupported "Currently unsupported by the test framework"
1289 return 1
1290 fi
2f71caf2 1291}
1292
1293#
1294# Given a mountpoint, determine if it is not shared via NFS.
1295#
1296# Returns 0 if not shared, 1 otherwise.
1297#
1298function not_shared
1299{
1300 typeset fs=$1
6bb24f4d
BB
1301
1302 is_shared $fs
1303 if (($? == 0)); then
1304 return 1
1305 fi
1306
1307 return 0
1308}
1309
1310#
2f71caf2 1311# Given a dataset determine if it is not shared via SMB.
6bb24f4d 1312#
2f71caf2 1313# Returns 0 if not shared, 1 otherwise.
1314#
1315function not_shared_smb
6bb24f4d
BB
1316{
1317 typeset fs=$1
1318
2f71caf2 1319 is_shared_smb $fs
1320 if (($? == 0)); then
6bb24f4d
BB
1321 return 1
1322 fi
1323
2f71caf2 1324 return 0
1325}
1326
1327#
1328# Helper function to unshare a mountpoint.
1329#
1330function unshare_fs #fs
1331{
1332 typeset fs=$1
1333
1334 is_shared $fs || is_shared_smb $fs
6bb24f4d 1335 if (($? == 0)); then
c1d9abf9 1336 log_must zfs unshare $fs
6bb24f4d
BB
1337 fi
1338
1339 return 0
1340}
1341
2f71caf2 1342#
1343# Helper function to share a NFS mountpoint.
1344#
1345function share_nfs #fs
1346{
1347 typeset fs=$1
1348
1349 if is_linux; then
1350 is_shared $fs
1351 if (($? != 0)); then
c1d9abf9 1352 log_must share "*:$fs"
2f71caf2 1353 fi
1354 else
1355 is_shared $fs
1356 if (($? != 0)); then
c1d9abf9 1357 log_must share -F nfs $fs
2f71caf2 1358 fi
1359 fi
1360
1361 return 0
1362}
1363
1364#
1365# Helper function to unshare a NFS mountpoint.
1366#
1367function unshare_nfs #fs
1368{
1369 typeset fs=$1
1370
1371 if is_linux; then
1372 is_shared $fs
1373 if (($? == 0)); then
c1d9abf9 1374 log_must unshare -u "*:$fs"
2f71caf2 1375 fi
1376 else
1377 is_shared $fs
1378 if (($? == 0)); then
c1d9abf9 1379 log_must unshare -F nfs $fs
2f71caf2 1380 fi
1381 fi
1382
1383 return 0
1384}
1385
1386#
1387# Helper function to show NFS shares.
1388#
1389function showshares_nfs
1390{
1391 if is_linux; then
c1d9abf9 1392 share -v
2f71caf2 1393 else
c1d9abf9 1394 share -F nfs
2f71caf2 1395 fi
1396
1397 return 0
1398}
1399
1400#
1401# Helper function to show SMB shares.
1402#
1403function showshares_smb
1404{
1405 if is_linux; then
c1d9abf9 1406 net usershare list
2f71caf2 1407 else
c1d9abf9 1408 share -F smb
2f71caf2 1409 fi
1410
1411 return 0
1412}
1413
6bb24f4d
BB
1414#
1415# Check NFS server status and trigger it online.
1416#
1417function setup_nfs_server
1418{
1419 # Cannot share directory in non-global zone.
1420 #
1421 if ! is_global_zone; then
1422 log_note "Cannot trigger NFS server by sharing in LZ."
1423 return
1424 fi
1425
1426 if is_linux; then
2a0428f1
BB
1427 #
1428 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1429 # /etc/exports.d./* to provide a clean test environment.
1430 #
1431 log_must share -r
1432
1433 log_note "NFS server must be started prior to running ZTS."
6bb24f4d
BB
1434 return
1435 fi
1436
1437 typeset nfs_fmri="svc:/network/nfs/server:default"
c1d9abf9 1438 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
6bb24f4d
BB
1439 #
1440 # Only really sharing operation can enable NFS server
1441 # to online permanently.
1442 #
1443 typeset dummy=/tmp/dummy
1444
1445 if [[ -d $dummy ]]; then
c1d9abf9 1446 log_must rm -rf $dummy
6bb24f4d
BB
1447 fi
1448
c1d9abf9
JWK
1449 log_must mkdir $dummy
1450 log_must share $dummy
6bb24f4d
BB
1451
1452 #
1453 # Waiting for fmri's status to be the final status.
1454 # Otherwise, in transition, an asterisk (*) is appended for
1455 # instances, unshare will reverse status to 'DIS' again.
1456 #
1457 # Waiting for 1's at least.
1458 #
c1d9abf9 1459 log_must sleep 1
6bb24f4d 1460 timeout=10
c1d9abf9 1461 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
6bb24f4d 1462 do
c1d9abf9 1463 log_must sleep 1
6bb24f4d
BB
1464
1465 ((timeout -= 1))
1466 done
1467
c1d9abf9
JWK
1468 log_must unshare $dummy
1469 log_must rm -rf $dummy
6bb24f4d
BB
1470 fi
1471
c1d9abf9 1472 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
6bb24f4d
BB
1473}
1474
1475#
1476# To verify whether calling process is in global zone
1477#
1478# Return 0 if in global zone, 1 in non-global zone
1479#
1480function is_global_zone
1481{
c1d9abf9
JWK
1482 if is_linux; then
1483 return 0
1484 else
1485 typeset cur_zone=$(zonename 2>/dev/null)
1486 if [[ $cur_zone != "global" ]]; then
1487 return 1
1488 fi
1489 return 0
6bb24f4d 1490 fi
6bb24f4d
BB
1491}
1492
1493#
1494# Verify whether test is permitted to run from
1495# global zone, local zone, or both
1496#
1497# $1 zone limit, could be "global", "local", or "both"(no limit)
1498#
1499# Return 0 if permitted, otherwise exit with log_unsupported
1500#
1501function verify_runnable # zone limit
1502{
1503 typeset limit=$1
1504
1505 [[ -z $limit ]] && return 0
1506
1507 if is_global_zone ; then
1508 case $limit in
1509 global|both)
1510 ;;
1511 local) log_unsupported "Test is unable to run from "\
1512 "global zone."
1513 ;;
1514 *) log_note "Warning: unknown limit $limit - " \
1515 "use both."
1516 ;;
1517 esac
1518 else
1519 case $limit in
1520 local|both)
1521 ;;
1522 global) log_unsupported "Test is unable to run from "\
1523 "local zone."
1524 ;;
1525 *) log_note "Warning: unknown limit $limit - " \
1526 "use both."
1527 ;;
1528 esac
1529
1530 reexport_pool
1531 fi
1532
1533 return 0
1534}
1535
1536# Return 0 if create successfully or the pool exists; $? otherwise
1537# Note: In local zones, this function should return 0 silently.
1538#
1539# $1 - pool name
1540# $2-n - [keyword] devs_list
1541
1542function create_pool #pool devs_list
1543{
1544 typeset pool=${1%%/*}
1545
1546 shift
1547
1548 if [[ -z $pool ]]; then
1549 log_note "Missing pool name."
1550 return 1
1551 fi
1552
1553 if poolexists $pool ; then
1554 destroy_pool $pool
1555 fi
1556
1557 if is_global_zone ; then
c1d9abf9
JWK
1558 [[ -d /$pool ]] && rm -rf /$pool
1559 log_must zpool create -f $pool $@
6bb24f4d
BB
1560 fi
1561
1562 return 0
1563}
1564
1565# Return 0 if destroy successfully or the pool exists; $? otherwise
1566# Note: In local zones, this function should return 0 silently.
1567#
1568# $1 - pool name
1569# Destroy pool with the given parameters.
1570
1571function destroy_pool #pool
1572{
1573 typeset pool=${1%%/*}
1574 typeset mtpt
1575
1576 if [[ -z $pool ]]; then
1577 log_note "No pool name given."
1578 return 1
1579 fi
1580
1581 if is_global_zone ; then
1582 if poolexists "$pool" ; then
1583 mtpt=$(get_prop mountpoint "$pool")
1584
851aa99c
BB
1585 # At times, syseventd/udev activity can cause attempts
1586 # to destroy a pool to fail with EBUSY. We retry a few
6bb24f4d
BB
1587 # times allowing failures before requiring the destroy
1588 # to succeed.
851aa99c 1589 log_must_busy zpool destroy -f $pool
6bb24f4d
BB
1590
1591 [[ -d $mtpt ]] && \
c1d9abf9 1592 log_must rm -rf $mtpt
6bb24f4d
BB
1593 else
1594 log_note "Pool does not exist. ($pool)"
1595 return 1
1596 fi
1597 fi
1598
1599 return 0
1600}
1601
c7b55e71
GDN
1602# Return 0 if destroy successfully or the dataset exists; $? otherwise
1603# Note: In local zones, this function should return 0 silently.
1604#
1605# $1 - dataset name
1606# $2 - custom arguments for zfs destroy
1607# Destroy dataset with the given parameters.
1608
1609function destroy_dataset #dataset #args
1610{
1611 typeset dataset=$1
1612 typeset mtpt
1613 typeset args=${2:-""}
1614
1615 if [[ -z $dataset ]]; then
1616 log_note "No dataset name given."
1617 return 1
1618 fi
1619
1620 if is_global_zone ; then
1621 if datasetexists "$dataset" ; then
1622 mtpt=$(get_prop mountpoint "$dataset")
1623 log_must_busy zfs destroy $args $dataset
1624
1625 [[ -d $mtpt ]] && \
1626 log_must rm -rf $mtpt
1627 else
1628 log_note "Dataset does not exist. ($dataset)"
1629 return 1
1630 fi
1631 fi
1632
1633 return 0
1634}
1635
6bb24f4d
BB
1636#
1637# Firstly, create a pool with 5 datasets. Then, create a single zone and
1638# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1639# and a zvol device to the zone.
1640#
1641# $1 zone name
1642# $2 zone root directory prefix
1643# $3 zone ip
1644#
1645function zfs_zones_setup #zone_name zone_root zone_ip
1646{
1647 typeset zone_name=${1:-$(hostname)-z}
1648 typeset zone_root=${2:-"/zone_root"}
1649 typeset zone_ip=${3:-"10.1.1.10"}
1650 typeset prefix_ctr=$ZONE_CTR
1651 typeset pool_name=$ZONE_POOL
1652 typeset -i cntctr=5
1653 typeset -i i=0
1654
1655 # Create pool and 5 container within it
1656 #
c1d9abf9
JWK
1657 [[ -d /$pool_name ]] && rm -rf /$pool_name
1658 log_must zpool create -f $pool_name $DISKS
6bb24f4d 1659 while ((i < cntctr)); do
c1d9abf9 1660 log_must zfs create $pool_name/$prefix_ctr$i
6bb24f4d
BB
1661 ((i += 1))
1662 done
1663
1664 # create a zvol
c1d9abf9 1665 log_must zfs create -V 1g $pool_name/zone_zvol
6bb24f4d
BB
1666 block_device_wait
1667
1668 #
1669 # If current system support slog, add slog device for pool
1670 #
1671 if verify_slog_support ; then
3fd3e56c 1672 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
c1d9abf9
JWK
1673 log_must mkfile $MINVDEVSIZE $sdevs
1674 log_must zpool add $pool_name log mirror $sdevs
6bb24f4d
BB
1675 fi
1676
1677 # this isn't supported just yet.
1678 # Create a filesystem. In order to add this to
1679 # the zone, it must have it's mountpoint set to 'legacy'
c1d9abf9
JWK
1680 # log_must zfs create $pool_name/zfs_filesystem
1681 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
6bb24f4d
BB
1682
1683 [[ -d $zone_root ]] && \
c1d9abf9 1684 log_must rm -rf $zone_root/$zone_name
6bb24f4d 1685 [[ ! -d $zone_root ]] && \
c1d9abf9 1686 log_must mkdir -p -m 0700 $zone_root/$zone_name
6bb24f4d
BB
1687
1688 # Create zone configure file and configure the zone
1689 #
1690 typeset zone_conf=/tmp/zone_conf.$$
c1d9abf9
JWK
1691 echo "create" > $zone_conf
1692 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1693 echo "set autoboot=true" >> $zone_conf
6bb24f4d
BB
1694 i=0
1695 while ((i < cntctr)); do
c1d9abf9
JWK
1696 echo "add dataset" >> $zone_conf
1697 echo "set name=$pool_name/$prefix_ctr$i" >> \
6bb24f4d 1698 $zone_conf
c1d9abf9 1699 echo "end" >> $zone_conf
6bb24f4d
BB
1700 ((i += 1))
1701 done
1702
1703 # add our zvol to the zone
c1d9abf9
JWK
1704 echo "add device" >> $zone_conf
1705 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1706 echo "end" >> $zone_conf
6bb24f4d
BB
1707
1708 # add a corresponding zvol rdsk to the zone
c1d9abf9
JWK
1709 echo "add device" >> $zone_conf
1710 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1711 echo "end" >> $zone_conf
6bb24f4d
BB
1712
1713 # once it's supported, we'll add our filesystem to the zone
c1d9abf9
JWK
1714 # echo "add fs" >> $zone_conf
1715 # echo "set type=zfs" >> $zone_conf
1716 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1717 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1718 # echo "end" >> $zone_conf
6bb24f4d 1719
c1d9abf9
JWK
1720 echo "verify" >> $zone_conf
1721 echo "commit" >> $zone_conf
1722 log_must zonecfg -z $zone_name -f $zone_conf
1723 log_must rm -f $zone_conf
6bb24f4d
BB
1724
1725 # Install the zone
c1d9abf9 1726 zoneadm -z $zone_name install
6bb24f4d 1727 if (($? == 0)); then
c1d9abf9 1728 log_note "SUCCESS: zoneadm -z $zone_name install"
6bb24f4d 1729 else
c1d9abf9 1730 log_fail "FAIL: zoneadm -z $zone_name install"
6bb24f4d
BB
1731 fi
1732
1733 # Install sysidcfg file
1734 #
1735 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
c1d9abf9
JWK
1736 echo "system_locale=C" > $sysidcfg
1737 echo "terminal=dtterm" >> $sysidcfg
1738 echo "network_interface=primary {" >> $sysidcfg
1739 echo "hostname=$zone_name" >> $sysidcfg
1740 echo "}" >> $sysidcfg
1741 echo "name_service=NONE" >> $sysidcfg
1742 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1743 echo "security_policy=NONE" >> $sysidcfg
1744 echo "timezone=US/Eastern" >> $sysidcfg
6bb24f4d
BB
1745
1746 # Boot this zone
c1d9abf9 1747 log_must zoneadm -z $zone_name boot
6bb24f4d
BB
1748}
1749
1750#
1751# Reexport TESTPOOL & TESTPOOL(1-4)
1752#
1753function reexport_pool
1754{
1755 typeset -i cntctr=5
1756 typeset -i i=0
1757
1758 while ((i < cntctr)); do
1759 if ((i == 0)); then
1760 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1761 if ! ismounted $TESTPOOL; then
c1d9abf9 1762 log_must zfs mount $TESTPOOL
6bb24f4d
BB
1763 fi
1764 else
1765 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1766 if eval ! ismounted \$TESTPOOL$i; then
c1d9abf9 1767 log_must eval zfs mount \$TESTPOOL$i
6bb24f4d
BB
1768 fi
1769 fi
1770 ((i += 1))
1771 done
1772}
1773
1774#
ec0e24c2 1775# Verify a given disk or pool state
6bb24f4d
BB
1776#
1777# Return 0 is pool/disk matches expected state, 1 otherwise
1778#
ec0e24c2 1779function check_state # pool disk state{online,offline,degraded}
6bb24f4d
BB
1780{
1781 typeset pool=$1
1782 typeset disk=${2#$DEV_DSKDIR/}
1783 typeset state=$3
1784
ec0e24c2
SV
1785 [[ -z $pool ]] || [[ -z $state ]] \
1786 && log_fail "Arguments invalid or missing"
1787
1788 if [[ -z $disk ]]; then
1789 #check pool state only
c1d9abf9 1790 zpool get -H -o value health $pool \
ec0e24c2
SV
1791 | grep -i "$state" > /dev/null 2>&1
1792 else
c1d9abf9 1793 zpool status -v $pool | grep "$disk" \
ec0e24c2
SV
1794 | grep -i "$state" > /dev/null 2>&1
1795 fi
6bb24f4d
BB
1796
1797 return $?
1798}
1799
1800#
1801# Get the mountpoint of snapshot
1802# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1803# as its mountpoint
1804#
1805function snapshot_mountpoint
1806{
1807 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1808
1809 if [[ $dataset != *@* ]]; then
1810 log_fail "Error name of snapshot '$dataset'."
1811 fi
1812
1813 typeset fs=${dataset%@*}
1814 typeset snap=${dataset#*@}
1815
1816 if [[ -z $fs || -z $snap ]]; then
1817 log_fail "Error name of snapshot '$dataset'."
1818 fi
1819
c1d9abf9 1820 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
6bb24f4d
BB
1821}
1822
dddef7d6 1823#
1824# Given a device and 'ashift' value verify it's correctly set on every label
1825#
1826function verify_ashift # device ashift
1827{
1828 typeset device="$1"
1829 typeset ashift="$2"
1830
1831 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1832 if (ashift != $2)
1833 exit 1;
1834 else
1835 count++;
1836 } END {
1837 if (count != 4)
1838 exit 1;
1839 else
1840 exit 0;
1841 }'
1842
1843 return $?
1844}
1845
6bb24f4d
BB
1846#
1847# Given a pool and file system, this function will verify the file system
1848# using the zdb internal tool. Note that the pool is exported and imported
1849# to ensure it has consistent state.
1850#
1851function verify_filesys # pool filesystem dir
1852{
1853 typeset pool="$1"
1854 typeset filesys="$2"
1855 typeset zdbout="/tmp/zdbout.$$"
1856
1857 shift
1858 shift
1859 typeset dirs=$@
1860 typeset search_path=""
1861
c1d9abf9
JWK
1862 log_note "Calling zdb to verify filesystem '$filesys'"
1863 zfs unmount -a > /dev/null 2>&1
1864 log_must zpool export $pool
6bb24f4d
BB
1865
1866 if [[ -n $dirs ]] ; then
1867 for dir in $dirs ; do
1868 search_path="$search_path -d $dir"
1869 done
1870 fi
1871
c1d9abf9 1872 log_must zpool import $search_path $pool
6bb24f4d 1873
c1d9abf9 1874 zdb -cudi $filesys > $zdbout 2>&1
6bb24f4d 1875 if [[ $? != 0 ]]; then
c1d9abf9
JWK
1876 log_note "Output: zdb -cudi $filesys"
1877 cat $zdbout
1878 log_fail "zdb detected errors with: '$filesys'"
6bb24f4d
BB
1879 fi
1880
c1d9abf9
JWK
1881 log_must zfs mount -a
1882 log_must rm -rf $zdbout
6bb24f4d
BB
1883}
1884
1885#
1886# Given a pool, and this function list all disks in the pool
1887#
1888function get_disklist # pool
1889{
1890 typeset disklist=""
1891
c1d9abf9
JWK
1892 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1893 grep -v "\-\-\-\-\-" | \
1894 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
6bb24f4d 1895
c1d9abf9 1896 echo $disklist
6bb24f4d
BB
1897}
1898
3c67d83a
TH
1899#
1900# Given a pool, and this function list all disks in the pool with their full
1901# path (like "/dev/sda" instead of "sda").
1902#
1903function get_disklist_fullpath # pool
1904{
1905 args="-P $1"
1906 get_disklist $args
1907}
1908
1909
1910
6bb24f4d
BB
1911# /**
1912# This function kills a given list of processes after a time period. We use
1913# this in the stress tests instead of STF_TIMEOUT so that we can have processes
1914# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1915# would be listed as FAIL, which we don't want : we're happy with stress tests
1916# running for a certain amount of time, then finishing.
1917#
1918# @param $1 the time in seconds after which we should terminate these processes
1919# @param $2..$n the processes we wish to terminate.
1920# */
1921function stress_timeout
1922{
1923 typeset -i TIMEOUT=$1
1924 shift
1925 typeset cpids="$@"
1926
1927 log_note "Waiting for child processes($cpids). " \
1928 "It could last dozens of minutes, please be patient ..."
c1d9abf9 1929 log_must sleep $TIMEOUT
6bb24f4d
BB
1930
1931 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1932 typeset pid
1933 for pid in $cpids; do
c1d9abf9 1934 ps -p $pid > /dev/null 2>&1
6bb24f4d 1935 if (($? == 0)); then
c1d9abf9 1936 log_must kill -USR1 $pid
6bb24f4d
BB
1937 fi
1938 done
1939}
1940
1941#
1942# Verify a given hotspare disk is inuse or avail
1943#
1944# Return 0 is pool/disk matches expected state, 1 otherwise
1945#
1946function check_hotspare_state # pool disk state{inuse,avail}
1947{
1948 typeset pool=$1
1949 typeset disk=${2#$DEV_DSKDIR/}
1950 typeset state=$3
1951
1952 cur_state=$(get_device_state $pool $disk "spares")
1953
1954 if [[ $state != ${cur_state} ]]; then
1955 return 1
1956 fi
1957 return 0
1958}
1959
d9daa7ab
DQ
1960#
1961# Wait until a hotspare transitions to a given state or times out.
1962#
1963# Return 0 when pool/disk matches expected state, 1 on timeout.
1964#
1965function wait_hotspare_state # pool disk state timeout
1966{
1967 typeset pool=$1
1968 typeset disk=${2#$/DEV_DSKDIR/}
1969 typeset state=$3
1970 typeset timeout=${4:-60}
1971 typeset -i i=0
1972
1973 while [[ $i -lt $timeout ]]; do
1974 if check_hotspare_state $pool $disk $state; then
1975 return 0
1976 fi
1977
1978 i=$((i+1))
1979 sleep 1
1980 done
1981
1982 return 1
1983}
1984
6bb24f4d
BB
1985#
1986# Verify a given slog disk is inuse or avail
1987#
1988# Return 0 is pool/disk matches expected state, 1 otherwise
1989#
1990function check_slog_state # pool disk state{online,offline,unavail}
1991{
1992 typeset pool=$1
1993 typeset disk=${2#$DEV_DSKDIR/}
1994 typeset state=$3
1995
1996 cur_state=$(get_device_state $pool $disk "logs")
1997
1998 if [[ $state != ${cur_state} ]]; then
1999 return 1
2000 fi
2001 return 0
2002}
2003
2004#
2005# Verify a given vdev disk is inuse or avail
2006#
2007# Return 0 is pool/disk matches expected state, 1 otherwise
2008#
2009function check_vdev_state # pool disk state{online,offline,unavail}
2010{
2011 typeset pool=$1
2012 typeset disk=${2#$/DEV_DSKDIR/}
2013 typeset state=$3
2014
2015 cur_state=$(get_device_state $pool $disk)
2016
2017 if [[ $state != ${cur_state} ]]; then
2018 return 1
2019 fi
2020 return 0
2021}
2022
d9daa7ab
DQ
2023#
2024# Wait until a vdev transitions to a given state or times out.
2025#
2026# Return 0 when pool/disk matches expected state, 1 on timeout.
2027#
2028function wait_vdev_state # pool disk state timeout
2029{
2030 typeset pool=$1
2031 typeset disk=${2#$/DEV_DSKDIR/}
2032 typeset state=$3
2033 typeset timeout=${4:-60}
2034 typeset -i i=0
2035
2036 while [[ $i -lt $timeout ]]; do
2037 if check_vdev_state $pool $disk $state; then
2038 return 0
2039 fi
2040
2041 i=$((i+1))
2042 sleep 1
2043 done
2044
2045 return 1
2046}
2047
6bb24f4d
BB
2048#
2049# Check the output of 'zpool status -v <pool>',
2050# and to see if the content of <token> contain the <keyword> specified.
2051#
2052# Return 0 is contain, 1 otherwise
2053#
0ea05c64 2054function check_pool_status # pool token keyword <verbose>
6bb24f4d
BB
2055{
2056 typeset pool=$1
2057 typeset token=$2
2058 typeset keyword=$3
0ea05c64 2059 typeset verbose=${4:-false}
6bb24f4d 2060
0ea05c64
AP
2061 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2062 ($1==token) {print $0}')
2063 if [[ $verbose == true ]]; then
2064 log_note $scan
2065 fi
2066 echo $scan | grep -i "$keyword" > /dev/null 2>&1
6bb24f4d
BB
2067
2068 return $?
2069}
2070
2071#
0ea05c64 2072# These 6 following functions are instance of check_pool_status()
6bb24f4d
BB
2073# is_pool_resilvering - to check if the pool is resilver in progress
2074# is_pool_resilvered - to check if the pool is resilver completed
2075# is_pool_scrubbing - to check if the pool is scrub in progress
2076# is_pool_scrubbed - to check if the pool is scrub completed
2077# is_pool_scrub_stopped - to check if the pool is scrub stopped
0ea05c64 2078# is_pool_scrub_paused - to check if the pool has scrub paused
a1d477c2
MA
2079# is_pool_removing - to check if the pool is removing a vdev
2080# is_pool_removed - to check if the pool is remove completed
6bb24f4d 2081#
0ea05c64
AP
2082function is_pool_resilvering #pool <verbose>
2083{
2084 check_pool_status "$1" "scan" "resilver in progress since " $2
2085 return $?
2086}
2087
2088function is_pool_resilvered #pool <verbose>
6bb24f4d 2089{
0ea05c64 2090 check_pool_status "$1" "scan" "resilvered " $2
6bb24f4d
BB
2091 return $?
2092}
2093
0ea05c64 2094function is_pool_scrubbing #pool <verbose>
6bb24f4d 2095{
0ea05c64 2096 check_pool_status "$1" "scan" "scrub in progress since " $2
6bb24f4d
BB
2097 return $?
2098}
2099
0ea05c64 2100function is_pool_scrubbed #pool <verbose>
6bb24f4d 2101{
0ea05c64 2102 check_pool_status "$1" "scan" "scrub repaired" $2
6bb24f4d
BB
2103 return $?
2104}
2105
0ea05c64 2106function is_pool_scrub_stopped #pool <verbose>
6bb24f4d 2107{
0ea05c64 2108 check_pool_status "$1" "scan" "scrub canceled" $2
6bb24f4d
BB
2109 return $?
2110}
2111
0ea05c64 2112function is_pool_scrub_paused #pool <verbose>
6bb24f4d 2113{
0ea05c64 2114 check_pool_status "$1" "scan" "scrub paused since " $2
6bb24f4d
BB
2115 return $?
2116}
2117
a1d477c2
MA
2118function is_pool_removing #pool
2119{
2120 check_pool_status "$1" "remove" "in progress since "
2121 return $?
2122}
2123
2124function is_pool_removed #pool
2125{
2126 check_pool_status "$1" "remove" "completed on"
2127 return $?
2128}
2129
6bb24f4d 2130#
4e33ba4c 2131# Use create_pool()/destroy_pool() to clean up the information in
6bb24f4d
BB
2132# in the given disk to avoid slice overlapping.
2133#
2134function cleanup_devices #vdevs
2135{
2136 typeset pool="foopool$$"
2137
2138 if poolexists $pool ; then
2139 destroy_pool $pool
2140 fi
2141
2142 create_pool $pool $@
2143 destroy_pool $pool
2144
2145 return 0
2146}
2147
6bb24f4d
BB
2148#/**
2149# A function to find and locate free disks on a system or from given
2150# disks as the parameter. It works by locating disks that are in use
2151# as swap devices and dump devices, and also disks listed in /etc/vfstab
2152#
2153# $@ given disks to find which are free, default is all disks in
2154# the test system
2155#
2156# @return a string containing the list of available disks
2157#*/
2158function find_disks
2159{
2160 # Trust provided list, no attempt is made to locate unused devices.
2161 if is_linux; then
c1d9abf9 2162 echo "$@"
6bb24f4d
BB
2163 return
2164 fi
2165
2166
2167 sfi=/tmp/swaplist.$$
2168 dmpi=/tmp/dumpdev.$$
2169 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2170
c1d9abf9
JWK
2171 swap -l > $sfi
2172 dumpadm > $dmpi 2>/dev/null
6bb24f4d
BB
2173
2174# write an awk script that can process the output of format
2175# to produce a list of disks we know about. Note that we have
2176# to escape "$2" so that the shell doesn't interpret it while
2177# we're creating the awk script.
2178# -------------------
c1d9abf9 2179 cat > /tmp/find_disks.awk <<EOF
6bb24f4d
BB
2180#!/bin/nawk -f
2181 BEGIN { FS="."; }
2182
2183 /^Specify disk/{
2184 searchdisks=0;
2185 }
2186
2187 {
2188 if (searchdisks && \$2 !~ "^$"){
2189 split(\$2,arr," ");
2190 print arr[1];
2191 }
2192 }
2193
2194 /^AVAILABLE DISK SELECTIONS:/{
2195 searchdisks=1;
2196 }
2197EOF
2198#---------------------
2199
c1d9abf9
JWK
2200 chmod 755 /tmp/find_disks.awk
2201 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2202 rm /tmp/find_disks.awk
6bb24f4d
BB
2203
2204 unused=""
2205 for disk in $disks; do
2206 # Check for mounted
c1d9abf9 2207 grep "${disk}[sp]" /etc/mnttab >/dev/null
6bb24f4d
BB
2208 (($? == 0)) && continue
2209 # Check for swap
c1d9abf9 2210 grep "${disk}[sp]" $sfi >/dev/null
6bb24f4d
BB
2211 (($? == 0)) && continue
2212 # check for dump device
c1d9abf9 2213 grep "${disk}[sp]" $dmpi >/dev/null
6bb24f4d
BB
2214 (($? == 0)) && continue
2215 # check to see if this disk hasn't been explicitly excluded
2216 # by a user-set environment variable
c1d9abf9 2217 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
6bb24f4d
BB
2218 (($? == 0)) && continue
2219 unused_candidates="$unused_candidates $disk"
2220 done
c1d9abf9
JWK
2221 rm $sfi
2222 rm $dmpi
6bb24f4d
BB
2223
2224# now just check to see if those disks do actually exist
2225# by looking for a device pointing to the first slice in
2226# each case. limit the number to max_finddisksnum
2227 count=0
2228 for disk in $unused_candidates; do
2229 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2230 if [ $count -lt $max_finddisksnum ]; then
2231 unused="$unused $disk"
2232 # do not impose limit if $@ is provided
2233 [[ -z $@ ]] && ((count = count + 1))
2234 fi
2235 fi
2236 done
2237
2238# finally, return our disk list
c1d9abf9 2239 echo $unused
6bb24f4d
BB
2240}
2241
2242#
2243# Add specified user to specified group
2244#
2245# $1 group name
2246# $2 user name
2247# $3 base of the homedir (optional)
2248#
2249function add_user #<group_name> <user_name> <basedir>
2250{
2251 typeset gname=$1
2252 typeset uname=$2
2253 typeset basedir=${3:-"/var/tmp"}
2254
2255 if ((${#gname} == 0 || ${#uname} == 0)); then
2256 log_fail "group name or user name are not defined."
2257 fi
2258
c1d9abf9
JWK
2259 log_must useradd -g $gname -d $basedir/$uname -m $uname
2260 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2261 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2262 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
6bb24f4d 2263
f74b821a
BB
2264 # Add new users to the same group and the command line utils.
2265 # This allows them to be run out of the original users home
2266 # directory as long as it permissioned to be group readable.
2267 if is_linux; then
c1d9abf9
JWK
2268 cmd_group=$(stat --format="%G" $(which zfs))
2269 log_must usermod -a -G $cmd_group $uname
f74b821a
BB
2270 fi
2271
6bb24f4d
BB
2272 return 0
2273}
2274
2275#
2276# Delete the specified user.
2277#
2278# $1 login name
2279# $2 base of the homedir (optional)
2280#
2281function del_user #<logname> <basedir>
2282{
2283 typeset user=$1
2284 typeset basedir=${2:-"/var/tmp"}
2285
2286 if ((${#user} == 0)); then
2287 log_fail "login name is necessary."
2288 fi
2289
c1d9abf9 2290 if id $user > /dev/null 2>&1; then
29e07af5 2291 log_must_retry "currently used" 5 userdel $user
6bb24f4d
BB
2292 fi
2293
c1d9abf9 2294 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
6bb24f4d
BB
2295
2296 return 0
2297}
2298
2299#
2300# Select valid gid and create specified group.
2301#
2302# $1 group name
2303#
2304function add_group #<group_name>
2305{
2306 typeset group=$1
2307
2308 if ((${#group} == 0)); then
2309 log_fail "group name is necessary."
2310 fi
2311
2312 # Assign 100 as the base gid, a larger value is selected for
2313 # Linux because for many distributions 1000 and under are reserved.
2314 if is_linux; then
6bb24f4d 2315 while true; do
c1d9abf9 2316 groupadd $group > /dev/null 2>&1
6bb24f4d
BB
2317 typeset -i ret=$?
2318 case $ret in
2319 0) return 0 ;;
6bb24f4d
BB
2320 *) return 1 ;;
2321 esac
2322 done
2323 else
2324 typeset -i gid=100
6bb24f4d 2325 while true; do
c1d9abf9 2326 groupadd -g $gid $group > /dev/null 2>&1
6bb24f4d
BB
2327 typeset -i ret=$?
2328 case $ret in
2329 0) return 0 ;;
2330 # The gid is not unique
2331 4) ((gid += 1)) ;;
2332 *) return 1 ;;
2333 esac
2334 done
2335 fi
2336}
2337
2338#
2339# Delete the specified group.
2340#
2341# $1 group name
2342#
2343function del_group #<group_name>
2344{
2345 typeset grp=$1
2346 if ((${#grp} == 0)); then
2347 log_fail "group name is necessary."
2348 fi
2349
2350 if is_linux; then
c1d9abf9 2351 getent group $grp > /dev/null 2>&1
6bb24f4d
BB
2352 typeset -i ret=$?
2353 case $ret in
2354 # Group does not exist.
2355 2) return 0 ;;
2356 # Name already exists as a group name
c1d9abf9 2357 0) log_must groupdel $grp ;;
6bb24f4d
BB
2358 *) return 1 ;;
2359 esac
2360 else
c1d9abf9 2361 groupmod -n $grp $grp > /dev/null 2>&1
6bb24f4d
BB
2362 typeset -i ret=$?
2363 case $ret in
2364 # Group does not exist.
2365 6) return 0 ;;
2366 # Name already exists as a group name
c1d9abf9 2367 9) log_must groupdel $grp ;;
6bb24f4d
BB
2368 *) return 1 ;;
2369 esac
2370 fi
2371
2372 return 0
2373}
2374
2375#
2376# This function will return true if it's safe to destroy the pool passed
2377# as argument 1. It checks for pools based on zvols and files, and also
2378# files contained in a pool that may have a different mountpoint.
2379#
2380function safe_to_destroy_pool { # $1 the pool name
2381
2382 typeset pool=""
2383 typeset DONT_DESTROY=""
2384
2385 # We check that by deleting the $1 pool, we're not
2386 # going to pull the rug out from other pools. Do this
2387 # by looking at all other pools, ensuring that they
2388 # aren't built from files or zvols contained in this pool.
2389
c1d9abf9 2390 for pool in $(zpool list -H -o name)
6bb24f4d
BB
2391 do
2392 ALTMOUNTPOOL=""
2393
2394 # this is a list of the top-level directories in each of the
2395 # files that make up the path to the files the pool is based on
c1d9abf9
JWK
2396 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2397 awk '{print $1}')
6bb24f4d
BB
2398
2399 # this is a list of the zvols that make up the pool
c1d9abf9
JWK
2400 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2401 | awk '{print $1}')
6bb24f4d
BB
2402
2403 # also want to determine if it's a file-based pool using an
2404 # alternate mountpoint...
c1d9abf9
JWK
2405 POOL_FILE_DIRS=$(zpool status -v $pool | \
2406 grep / | awk '{print $1}' | \
2407 awk -F/ '{print $2}' | grep -v "dev")
6bb24f4d
BB
2408
2409 for pooldir in $POOL_FILE_DIRS
2410 do
c1d9abf9
JWK
2411 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2412 grep "${pooldir}$" | awk '{print $1}')
6bb24f4d
BB
2413
2414 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2415 done
2416
2417
2418 if [ ! -z "$ZVOLPOOL" ]
2419 then
2420 DONT_DESTROY="true"
2421 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2422 fi
2423
2424 if [ ! -z "$FILEPOOL" ]
2425 then
2426 DONT_DESTROY="true"
2427 log_note "Pool $pool is built from $FILEPOOL on $1"
2428 fi
2429
2430 if [ ! -z "$ALTMOUNTPOOL" ]
2431 then
2432 DONT_DESTROY="true"
2433 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2434 fi
2435 done
2436
2437 if [ -z "${DONT_DESTROY}" ]
2438 then
2439 return 0
2440 else
2441 log_note "Warning: it is not safe to destroy $1!"
2442 return 1
2443 fi
2444}
2445
2446#
2447# Get the available ZFS compression options
2448# $1 option type zfs_set|zfs_compress
2449#
2450function get_compress_opts
2451{
2452 typeset COMPRESS_OPTS
2453 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2454 gzip-6 gzip-7 gzip-8 gzip-9"
2455
2456 if [[ $1 == "zfs_compress" ]] ; then
2457 COMPRESS_OPTS="on lzjb"
2458 elif [[ $1 == "zfs_set" ]] ; then
2459 COMPRESS_OPTS="on off lzjb"
2460 fi
2461 typeset valid_opts="$COMPRESS_OPTS"
c1d9abf9 2462 zfs get 2>&1 | grep gzip >/dev/null 2>&1
6bb24f4d
BB
2463 if [[ $? -eq 0 ]]; then
2464 valid_opts="$valid_opts $GZIP_OPTS"
2465 fi
c1d9abf9 2466 echo "$valid_opts"
6bb24f4d
BB
2467}
2468
2469#
2470# Verify zfs operation with -p option work as expected
2471# $1 operation, value could be create, clone or rename
2472# $2 dataset type, value could be fs or vol
2473# $3 dataset name
2474# $4 new dataset name
2475#
2476function verify_opt_p_ops
2477{
2478 typeset ops=$1
2479 typeset datatype=$2
2480 typeset dataset=$3
2481 typeset newdataset=$4
2482
2483 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2484 log_fail "$datatype is not supported."
2485 fi
2486
2487 # check parameters accordingly
2488 case $ops in
2489 create)
2490 newdataset=$dataset
2491 dataset=""
2492 if [[ $datatype == "vol" ]]; then
2493 ops="create -V $VOLSIZE"
2494 fi
2495 ;;
2496 clone)
2497 if [[ -z $newdataset ]]; then
2498 log_fail "newdataset should not be empty" \
2499 "when ops is $ops."
2500 fi
2501 log_must datasetexists $dataset
2502 log_must snapexists $dataset
2503 ;;
2504 rename)
2505 if [[ -z $newdataset ]]; then
2506 log_fail "newdataset should not be empty" \
2507 "when ops is $ops."
2508 fi
2509 log_must datasetexists $dataset
2510 log_mustnot snapexists $dataset
2511 ;;
2512 *)
2513 log_fail "$ops is not supported."
2514 ;;
2515 esac
2516
2517 # make sure the upper level filesystem does not exist
c7b55e71 2518 destroy_dataset "${newdataset%/*}" "-rRf"
6bb24f4d
BB
2519
2520 # without -p option, operation will fail
c1d9abf9 2521 log_mustnot zfs $ops $dataset $newdataset
6bb24f4d
BB
2522 log_mustnot datasetexists $newdataset ${newdataset%/*}
2523
2524 # with -p option, operation should succeed
c1d9abf9 2525 log_must zfs $ops -p $dataset $newdataset
6bb24f4d
BB
2526 block_device_wait
2527
2528 if ! datasetexists $newdataset ; then
2529 log_fail "-p option does not work for $ops"
2530 fi
2531
2532 # when $ops is create or clone, redo the operation still return zero
2533 if [[ $ops != "rename" ]]; then
c1d9abf9 2534 log_must zfs $ops -p $dataset $newdataset
6bb24f4d
BB
2535 fi
2536
2537 return 0
2538}
2539
2540#
2541# Get configuration of pool
2542# $1 pool name
2543# $2 config name
2544#
2545function get_config
2546{
2547 typeset pool=$1
2548 typeset config=$2
2549 typeset alt_root
2550
2551 if ! poolexists "$pool" ; then
2552 return 1
2553 fi
c1d9abf9 2554 alt_root=$(zpool list -H $pool | awk '{print $NF}')
6bb24f4d 2555 if [[ $alt_root == "-" ]]; then
c1d9abf9 2556 value=$(zdb -C $pool | grep "$config:" | awk -F: \
6bb24f4d
BB
2557 '{print $2}')
2558 else
c1d9abf9 2559 value=$(zdb -e $pool | grep "$config:" | awk -F: \
6bb24f4d
BB
2560 '{print $2}')
2561 fi
2562 if [[ -n $value ]] ; then
2563 value=${value#'}
2564 value=${value%'}
2565 fi
2566 echo $value
2567
2568 return 0
2569}
2570
2571#
2572# Privated function. Random select one of items from arguments.
2573#
2574# $1 count
2575# $2-n string
2576#
2577function _random_get
2578{
2579 typeset cnt=$1
2580 shift
2581
2582 typeset str="$@"
2583 typeset -i ind
2584 ((ind = RANDOM % cnt + 1))
2585
c1d9abf9
JWK
2586 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2587 echo $ret
6bb24f4d
BB
2588}
2589
2590#
2591# Random select one of item from arguments which include NONE string
2592#
2593function random_get_with_non
2594{
2595 typeset -i cnt=$#
2596 ((cnt =+ 1))
2597
2598 _random_get "$cnt" "$@"
2599}
2600
2601#
2602# Random select one of item from arguments which doesn't include NONE string
2603#
2604function random_get
2605{
2606 _random_get "$#" "$@"
2607}
2608
2609#
2610# Detect if the current system support slog
2611#
2612function verify_slog_support
2613{
3fd3e56c 2614 typeset dir=$TEST_BASE_DIR/disk.$$
6bb24f4d
BB
2615 typeset pool=foo.$$
2616 typeset vdev=$dir/a
2617 typeset sdev=$dir/b
2618
c1d9abf9
JWK
2619 mkdir -p $dir
2620 mkfile $MINVDEVSIZE $vdev $sdev
6bb24f4d
BB
2621
2622 typeset -i ret=0
c1d9abf9 2623 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
6bb24f4d
BB
2624 ret=1
2625 fi
c1d9abf9 2626 rm -r $dir
6bb24f4d
BB
2627
2628 return $ret
2629}
2630
2631#
2632# The function will generate a dataset name with specific length
2633# $1, the length of the name
2634# $2, the base string to construct the name
2635#
2636function gen_dataset_name
2637{
2638 typeset -i len=$1
2639 typeset basestr="$2"
2640 typeset -i baselen=${#basestr}
2641 typeset -i iter=0
2642 typeset l_name=""
2643
2644 if ((len % baselen == 0)); then
2645 ((iter = len / baselen))
2646 else
2647 ((iter = len / baselen + 1))
2648 fi
2649 while ((iter > 0)); do
2650 l_name="${l_name}$basestr"
2651
2652 ((iter -= 1))
2653 done
2654
c1d9abf9 2655 echo $l_name
6bb24f4d
BB
2656}
2657
2658#
2659# Get cksum tuple of dataset
2660# $1 dataset name
2661#
2662# sample zdb output:
2663# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2664# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2665# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2666# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2667function datasetcksum
2668{
2669 typeset cksum
c1d9abf9
JWK
2670 sync
2671 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2672 | awk -F= '{print $7}')
2673 echo $cksum
6bb24f4d
BB
2674}
2675
2676#
2677# Get cksum of file
2678# #1 file path
2679#
2680function checksum
2681{
2682 typeset cksum
c1d9abf9
JWK
2683 cksum=$(cksum $1 | awk '{print $1}')
2684 echo $cksum
6bb24f4d
BB
2685}
2686
2687#
2688# Get the given disk/slice state from the specific field of the pool
2689#
2690function get_device_state #pool disk field("", "spares","logs")
2691{
2692 typeset pool=$1
2693 typeset disk=${2#$DEV_DSKDIR/}
2694 typeset field=${3:-$pool}
2695
c1d9abf9
JWK
2696 state=$(zpool status -v "$pool" 2>/dev/null | \
2697 nawk -v device=$disk -v pool=$pool -v field=$field \
6bb24f4d
BB
2698 'BEGIN {startconfig=0; startfield=0; }
2699 /config:/ {startconfig=1}
2700 (startconfig==1) && ($1==field) {startfield=1; next;}
2701 (startfield==1) && ($1==device) {print $2; exit;}
2702 (startfield==1) &&
2703 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2704 echo $state
2705}
2706
2707
2708#
2709# print the given directory filesystem type
2710#
2711# $1 directory name
2712#
2713function get_fstype
2714{
2715 typeset dir=$1
2716
2717 if [[ -z $dir ]]; then
2718 log_fail "Usage: get_fstype <directory>"
2719 fi
2720
2721 #
2722 # $ df -n /
2723 # / : ufs
2724 #
c1d9abf9 2725 df -n $dir | awk '{print $3}'
6bb24f4d
BB
2726}
2727
2728#
2729# Given a disk, label it to VTOC regardless what label was on the disk
2730# $1 disk
2731#
2732function labelvtoc
2733{
2734 typeset disk=$1
2735 if [[ -z $disk ]]; then
2736 log_fail "The disk name is unspecified."
2737 fi
2738 typeset label_file=/var/tmp/labelvtoc.$$
c1d9abf9 2739 typeset arch=$(uname -p)
6bb24f4d
BB
2740
2741 if is_linux; then
2742 log_note "Currently unsupported by the test framework"
2743 return 1
2744 fi
2745
2746 if [[ $arch == "i386" ]]; then
c1d9abf9
JWK
2747 echo "label" > $label_file
2748 echo "0" >> $label_file
2749 echo "" >> $label_file
2750 echo "q" >> $label_file
2751 echo "q" >> $label_file
6bb24f4d 2752
c1d9abf9 2753 fdisk -B $disk >/dev/null 2>&1
6bb24f4d 2754 # wait a while for fdisk finishes
c1d9abf9 2755 sleep 60
6bb24f4d 2756 elif [[ $arch == "sparc" ]]; then
c1d9abf9
JWK
2757 echo "label" > $label_file
2758 echo "0" >> $label_file
2759 echo "" >> $label_file
2760 echo "" >> $label_file
2761 echo "" >> $label_file
2762 echo "q" >> $label_file
6bb24f4d
BB
2763 else
2764 log_fail "unknown arch type"
2765 fi
2766
c1d9abf9 2767 format -e -s -d $disk -f $label_file
6bb24f4d 2768 typeset -i ret_val=$?
c1d9abf9 2769 rm -f $label_file
6bb24f4d
BB
2770 #
2771 # wait the format to finish
2772 #
c1d9abf9 2773 sleep 60
6bb24f4d
BB
2774 if ((ret_val != 0)); then
2775 log_fail "unable to label $disk as VTOC."
2776 fi
2777
2778 return 0
2779}
2780
2781#
2782# check if the system was installed as zfsroot or not
2783# return: 0 ture, otherwise false
2784#
2785function is_zfsroot
2786{
c1d9abf9 2787 df -n / | grep zfs > /dev/null 2>&1
6bb24f4d
BB
2788 return $?
2789}
2790
2791#
2792# get the root filesystem name if it's zfsroot system.
2793#
2794# return: root filesystem name
2795function get_rootfs
2796{
2797 typeset rootfs=""
8aab1218
TS
2798
2799 if ! is_linux; then
2800 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2801 /etc/mnttab)
2802 fi
6bb24f4d
BB
2803 if [[ -z "$rootfs" ]]; then
2804 log_fail "Can not get rootfs"
2805 fi
c1d9abf9 2806 zfs list $rootfs > /dev/null 2>&1
6bb24f4d 2807 if (($? == 0)); then
c1d9abf9 2808 echo $rootfs
6bb24f4d
BB
2809 else
2810 log_fail "This is not a zfsroot system."
2811 fi
2812}
2813
2814#
2815# get the rootfs's pool name
2816# return:
2817# rootpool name
2818#
2819function get_rootpool
2820{
2821 typeset rootfs=""
2822 typeset rootpool=""
8aab1218
TS
2823
2824 if ! is_linux; then
2825 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2826 /etc/mnttab)
2827 fi
6bb24f4d
BB
2828 if [[ -z "$rootfs" ]]; then
2829 log_fail "Can not get rootpool"
2830 fi
c1d9abf9 2831 zfs list $rootfs > /dev/null 2>&1
6bb24f4d 2832 if (($? == 0)); then
c1d9abf9
JWK
2833 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2834 echo $rootpool
6bb24f4d
BB
2835 else
2836 log_fail "This is not a zfsroot system."
2837 fi
2838}
2839
6bb24f4d
BB
2840#
2841# Get the package name
2842#
2843function get_package_name
2844{
2845 typeset dirpath=${1:-$STC_NAME}
2846
2847 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2848}
2849
2850#
2851# Get the word numbers from a string separated by white space
2852#
2853function get_word_count
2854{
c1d9abf9 2855 echo $1 | wc -w
6bb24f4d
BB
2856}
2857
2858#
2859# To verify if the require numbers of disks is given
2860#
2861function verify_disk_count
2862{
2863 typeset -i min=${2:-1}
2864
2865 typeset -i count=$(get_word_count "$1")
2866
2867 if ((count < min)); then
2868 log_untested "A minimum of $min disks is required to run." \
2869 " You specified $count disk(s)"
2870 fi
2871}
2872
2873function ds_is_volume
2874{
2875 typeset type=$(get_prop type $1)
2876 [[ $type = "volume" ]] && return 0
2877 return 1
2878}
2879
2880function ds_is_filesystem
2881{
2882 typeset type=$(get_prop type $1)
2883 [[ $type = "filesystem" ]] && return 0
2884 return 1
2885}
2886
2887function ds_is_snapshot
2888{
2889 typeset type=$(get_prop type $1)
2890 [[ $type = "snapshot" ]] && return 0
2891 return 1
2892}
2893
2894#
2895# Check if Trusted Extensions are installed and enabled
2896#
2897function is_te_enabled
2898{
c1d9abf9 2899 svcs -H -o state labeld 2>/dev/null | grep "enabled"
6bb24f4d
BB
2900 if (($? != 0)); then
2901 return 1
2902 else
2903 return 0
2904 fi
2905}
2906
2907# Utility function to determine if a system has multiple cpus.
2908function is_mp
2909{
2910 if is_linux; then
c1d9abf9 2911 (($(nproc) > 1))
6bb24f4d 2912 else
c1d9abf9 2913 (($(psrinfo | wc -l) > 1))
6bb24f4d
BB
2914 fi
2915
2916 return $?
2917}
2918
2919function get_cpu_freq
2920{
2921 if is_linux; then
c1d9abf9 2922 lscpu | awk '/CPU MHz/ { print $3 }'
6bb24f4d 2923 else
c1d9abf9 2924 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
6bb24f4d
BB
2925 fi
2926}
2927
2928# Run the given command as the user provided.
2929function user_run
2930{
2931 typeset user=$1
2932 shift
2933
f74b821a 2934 log_note "user:$user $@"
c1d9abf9 2935 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
6bb24f4d
BB
2936 return $?
2937}
2938
2939#
2940# Check if the pool contains the specified vdevs
2941#
2942# $1 pool
2943# $2..n <vdev> ...
2944#
2945# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2946# vdevs is not in the pool, and 2 if pool name is missing.
2947#
2948function vdevs_in_pool
2949{
2950 typeset pool=$1
2951 typeset vdev
2952
2953 if [[ -z $pool ]]; then
2954 log_note "Missing pool name."
2955 return 2
2956 fi
2957
2958 shift
2959
c1d9abf9
JWK
2960 typeset tmpfile=$(mktemp)
2961 zpool list -Hv "$pool" >$tmpfile
6bb24f4d 2962 for vdev in $@; do
c1d9abf9 2963 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
6bb24f4d
BB
2964 [[ $? -ne 0 ]] && return 1
2965 done
2966
c1d9abf9 2967 rm -f $tmpfile
6bb24f4d
BB
2968
2969 return 0;
2970}
2971
679d73e9
JWK
2972function get_max
2973{
2974 typeset -l i max=$1
2975 shift
2976
2977 for i in "$@"; do
2978 max=$(echo $((max > i ? max : i)))
2979 done
2980
2981 echo $max
2982}
2983
2984function get_min
2985{
2986 typeset -l i min=$1
2987 shift
2988
2989 for i in "$@"; do
2990 min=$(echo $((min < i ? min : i)))
2991 done
2992
2993 echo $min
2994}
2995
a7004725
DK
2996#
2997# Generate a random number between 1 and the argument.
2998#
2999function random
3000{
3001 typeset max=$1
3002 echo $(( ($RANDOM % $max) + 1 ))
3003}
3004
3005# Write data that can be compressed into a directory
3006function write_compressible
3007{
3008 typeset dir=$1
3009 typeset megs=$2
3010 typeset nfiles=${3:-1}
3011 typeset bs=${4:-1024k}
3012 typeset fname=${5:-file}
3013
3014 [[ -d $dir ]] || log_fail "No directory: $dir"
3015
3016 # Under Linux fio is not currently used since its behavior can
3017 # differ significantly across versions. This includes missing
3018 # command line options and cases where the --buffer_compress_*
3019 # options fail to behave as expected.
3020 if is_linux; then
3021 typeset file_bytes=$(to_bytes $megs)
3022 typeset bs_bytes=4096
3023 typeset blocks=$(($file_bytes / $bs_bytes))
3024
3025 for (( i = 0; i < $nfiles; i++ )); do
3026 truncate -s $file_bytes $dir/$fname.$i
3027
3028 # Write every third block to get 66% compression.
3029 for (( j = 0; j < $blocks; j += 3 )); do
3030 dd if=/dev/urandom of=$dir/$fname.$i \
3031 seek=$j bs=$bs_bytes count=1 \
3032 conv=notrunc >/dev/null 2>&1
3033 done
3034 done
3035 else
3036 log_must eval "fio \
3037 --name=job \
3038 --fallocate=0 \
3039 --minimal \
3040 --randrepeat=0 \
3041 --buffer_compress_percentage=66 \
3042 --buffer_compress_chunk=4096 \
3043 --directory=$dir \
3044 --numjobs=$nfiles \
3045 --nrfiles=$nfiles \
3046 --rw=write \
3047 --bs=$bs \
3048 --filesize=$megs \
3049 --filename_format='$fname.\$jobnum' >/dev/null"
3050 fi
3051}
3052
3053function get_objnum
3054{
3055 typeset pathname=$1
3056 typeset objnum
3057
3058 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3059 objnum=$(stat -c %i $pathname)
3060 echo $objnum
3061}
3062
1de321e6 3063#
bec1067d 3064# Sync data to the pool
1de321e6
JX
3065#
3066# $1 pool name
bec1067d 3067# $2 boolean to force uberblock (and config including zpool cache file) update
1de321e6 3068#
bec1067d 3069function sync_pool #pool <force>
1de321e6
JX
3070{
3071 typeset pool=${1:-$TESTPOOL}
bec1067d 3072 typeset force=${2:-false}
1de321e6 3073
bec1067d
AP
3074 if [[ $force == true ]]; then
3075 log_must zpool sync -f $pool
3076 else
3077 log_must zpool sync $pool
3078 fi
3079
3080 return 0
1de321e6 3081}
d834b9ce
GM
3082
3083#
3084# Wait for zpool 'freeing' property drops to zero.
3085#
3086# $1 pool name
3087#
3088function wait_freeing #pool
3089{
3090 typeset pool=${1:-$TESTPOOL}
3091 while true; do
c1d9abf9
JWK
3092 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3093 log_must sleep 1
d834b9ce
GM
3094 done
3095}
7a4500a1 3096
dddef7d6 3097#
3098# Wait for every device replace operation to complete
3099#
3100# $1 pool name
3101#
3102function wait_replacing #pool
3103{
3104 typeset pool=${1:-$TESTPOOL}
3105 while true; do
3106 [[ "" == "$(zpool status $pool |
3107 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3108 log_must sleep 1
3109 done
3110}
3111
bf95a000
TH
3112#
3113# Wait for a pool to be scrubbed
3114#
3115# $1 pool name
3116# $2 number of seconds to wait (optional)
3117#
3118# Returns true when pool has been scrubbed, or false if there's a timeout or if
3119# no scrub was done.
3120#
3121function wait_scrubbed
3122{
3123 typeset pool=${1:-$TESTPOOL}
3124 typeset iter=${2:-10}
3125 for i in {1..$iter} ; do
3126 if is_pool_scrubbed $pool ; then
3127 return 0
3128 fi
3129 sleep 1
3130 done
3131 return 1
3132}
3133
639b1894
TH
3134# Backup the zed.rc in our test directory so that we can edit it for our test.
3135#
3136# Returns: Backup file name. You will need to pass this to zed_rc_restore().
3137function zed_rc_backup
3138{
3139 zedrc_backup="$(mktemp)"
3140 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3141 echo $zedrc_backup
3142}
3143
3144function zed_rc_restore
3145{
3146 mv $1 $ZEDLET_DIR/zed.rc
3147}
3148
95401cb6
BB
3149#
3150# Setup custom environment for the ZED.
3151#
bf95a000 3152# $@ Optional list of zedlets to run under zed.
95401cb6
BB
3153function zed_setup
3154{
3155 if ! is_linux; then
3156 return
3157 fi
3158
3159 if [[ ! -d $ZEDLET_DIR ]]; then
3160 log_must mkdir $ZEDLET_DIR
3161 fi
3162
3163 if [[ ! -e $VDEVID_CONF ]]; then
3164 log_must touch $VDEVID_CONF
3165 fi
3166
3167 if [[ -e $VDEVID_CONF_ETC ]]; then
3168 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3169 fi
bf95a000 3170 EXTRA_ZEDLETS=$@
95401cb6
BB
3171
3172 # Create a symlink for /etc/zfs/vdev_id.conf file.
3173 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3174
3175 # Setup minimal ZED configuration. Individual test cases should
3176 # add additional ZEDLETs as needed for their specific test.
3f03fc8d
BB
3177 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3178 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
95401cb6 3179
bf95a000
TH
3180 # Scripts must only be user writable.
3181 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3182 saved_umask=$(umask)
3183 log_must umask 0022
3184 for i in $EXTRA_ZEDLETS ; do
3185 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3186 done
3187 log_must umask $saved_umask
3188 fi
3189
3f03fc8d
BB
3190 # Customize the zed.rc file to enable the full debug log.
3191 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
d5e024cb 3192 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3f03fc8d 3193
95401cb6
BB
3194}
3195
3196#
3197# Cleanup custom ZED environment.
3198#
bf95a000 3199# $@ Optional list of zedlets to remove from our test zed.d directory.
95401cb6
BB
3200function zed_cleanup
3201{
3202 if ! is_linux; then
3203 return
3204 fi
bf95a000 3205 EXTRA_ZEDLETS=$@
95401cb6
BB
3206
3207 log_must rm -f ${ZEDLET_DIR}/zed.rc
3208 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3209 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3f03fc8d 3210 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
95401cb6 3211 log_must rm -f ${ZEDLET_DIR}/state
bf95a000
TH
3212
3213 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3214 for i in $EXTRA_ZEDLETS ; do
3215 log_must rm -f ${ZEDLET_DIR}/$i
3216 done
3217 fi
d5e024cb
BB
3218 log_must rm -f $ZED_LOG
3219 log_must rm -f $ZED_DEBUG_LOG
95401cb6
BB
3220 log_must rm -f $VDEVID_CONF_ETC
3221 log_must rm -f $VDEVID_CONF
3222 rmdir $ZEDLET_DIR
3223}
3224
7a4500a1
SV
3225#
3226# Check if ZED is currently running, if not start ZED.
3227#
3228function zed_start
3229{
95401cb6
BB
3230 if ! is_linux; then
3231 return
3232 fi
7a4500a1 3233
95401cb6
BB
3234 # ZEDLET_DIR=/var/tmp/zed
3235 if [[ ! -d $ZEDLET_DIR ]]; then
3236 log_must mkdir $ZEDLET_DIR
3237 fi
7a4500a1 3238
95401cb6
BB
3239 # Verify the ZED is not already running.
3240 pgrep -x zed > /dev/null
3241 if (($? == 0)); then
3242 log_fail "ZED already running"
7a4500a1 3243 fi
95401cb6
BB
3244
3245 log_note "Starting ZED"
3246 # run ZED in the background and redirect foreground logging
d5e024cb
BB
3247 # output to $ZED_LOG.
3248 log_must truncate -s 0 $ZED_DEBUG_LOG
bf95a000 3249 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
d5e024cb 3250 "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3f03fc8d
BB
3251
3252 return 0
7a4500a1
SV
3253}
3254
3255#
3256# Kill ZED process
3257#
3258function zed_stop
3259{
95401cb6
BB
3260 if ! is_linux; then
3261 return
3262 fi
3263
3f03fc8d 3264 log_note "Stopping ZED"
95401cb6
BB
3265 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3266 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
d5e024cb 3267 kill $zedpid
4e9b1569 3268 while ps -p $zedpid > /dev/null; do
3269 sleep 1
3270 done
d5e024cb 3271 rm -f ${ZEDLET_DIR}/zed.pid
7a4500a1 3272 fi
3f03fc8d 3273 return 0
7a4500a1 3274}
8c54ddd3 3275
4e9b1569 3276#
3277# Drain all zevents
3278#
3279function zed_events_drain
3280{
3281 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3282 sleep 1
3283 zpool events -c >/dev/null
3284 done
3285}
3286
639b1894
TH
3287# Set a variable in zed.rc to something, un-commenting it in the process.
3288#
3289# $1 variable
3290# $2 value
3291function zed_rc_set
3292{
3293 var="$1"
3294 val="$2"
3295 # Remove the line
3296 cmd="'/$var/d'"
3297 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3298
3299 # Add it at the end
3300 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3301}
3302
3303
8c54ddd3
BB
3304#
3305# Check is provided device is being active used as a swap device.
3306#
3307function is_swap_inuse
3308{
3309 typeset device=$1
3310
3311 if [[ -z $device ]] ; then
3312 log_note "No device specified."
3313 return 1
3314 fi
3315
3316 if is_linux; then
3317 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3318 else
3319 swap -l | grep -w $device > /dev/null 2>&1
3320 fi
3321
3322 return $?
3323}
3324
3325#
3326# Setup a swap device using the provided device.
3327#
3328function swap_setup
3329{
3330 typeset swapdev=$1
3331
3332 if is_linux; then
c7a7601c 3333 log_must eval "mkswap $swapdev > /dev/null 2>&1"
8c54ddd3
BB
3334 log_must swapon $swapdev
3335 else
3336 log_must swap -a $swapdev
3337 fi
3338
3339 return 0
3340}
3341
3342#
3343# Cleanup a swap device on the provided device.
3344#
3345function swap_cleanup
3346{
3347 typeset swapdev=$1
3348
3349 if is_swap_inuse $swapdev; then
3350 if is_linux; then
3351 log_must swapoff $swapdev
3352 else
3353 log_must swap -d $swapdev
3354 fi
3355 fi
3356
3357 return 0
3358}
379ca9cf
OF
3359
3360#
3361# Set a global system tunable (64-bit value)
3362#
3363# $1 tunable name
3364# $2 tunable values
3365#
3366function set_tunable64
3367{
3368 set_tunable_impl "$1" "$2" Z
3369}
3370
3371#
3372# Set a global system tunable (32-bit value)
3373#
3374# $1 tunable name
3375# $2 tunable values
3376#
3377function set_tunable32
3378{
3379 set_tunable_impl "$1" "$2" W
3380}
3381
3382function set_tunable_impl
3383{
3384 typeset tunable="$1"
3385 typeset value="$2"
3386 typeset mdb_cmd="$3"
3387 typeset module="${4:-zfs}"
3388
3389 [[ -z "$tunable" ]] && return 1
3390 [[ -z "$value" ]] && return 1
3391 [[ -z "$mdb_cmd" ]] && return 1
3392
3393 case "$(uname)" in
3394 Linux)
3395 typeset zfs_tunables="/sys/module/$module/parameters"
3396 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3397 echo -n "$value" > "$zfs_tunables/$tunable"
3398 return "$?"
3399 ;;
3400 SunOS)
3401 [[ "$module" -eq "zfs" ]] || return 1
3402 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3403 return "$?"
3404 ;;
3405 esac
3406}
3407
3408#
3409# Get a global system tunable
3410#
3411# $1 tunable name
3412#
3413function get_tunable
3414{
3415 get_tunable_impl "$1"
3416}
3417
3418function get_tunable_impl
3419{
3420 typeset tunable="$1"
3421 typeset module="${2:-zfs}"
3422
3423 [[ -z "$tunable" ]] && return 1
3424
3425 case "$(uname)" in
3426 Linux)
3427 typeset zfs_tunables="/sys/module/$module/parameters"
3428 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3429 cat $zfs_tunables/$tunable
3430 return "$?"
3431 ;;
3432 SunOS)
3433 [[ "$module" -eq "zfs" ]] || return 1
3434 ;;
3435 esac
3436
3437 return 1
3438}
a1d477c2
MA
3439
3440#
3441# Prints the current time in seconds since UNIX Epoch.
3442#
3443function current_epoch
3444{
3445 printf '%(%s)T'
3446}
3447
3448#
3449# Get decimal value of global uint32_t variable using mdb.
3450#
3451function mdb_get_uint32
3452{
3453 typeset variable=$1
3454 typeset value
3455
3456 value=$(mdb -k -e "$variable/X | ::eval .=U")
3457 if [[ $? -ne 0 ]]; then
3458 log_fail "Failed to get value of '$variable' from mdb."
3459 return 1
3460 fi
3461
3462 echo $value
3463 return 0
3464}
3465
3466#
3467# Set global uint32_t variable to a decimal value using mdb.
3468#
3469function mdb_set_uint32
3470{
3471 typeset variable=$1
3472 typeset value=$2
3473
3474 mdb -kw -e "$variable/W 0t$value" > /dev/null
3475 if [[ $? -ne 0 ]]; then
3476 echo "Failed to set '$variable' to '$value' in mdb."
3477 return 1
3478 fi
3479
3480 return 0
3481}