]> git.proxmox.com Git - mirror_zfs.git/blame - tests/zfs-tests/include/libtest.shlib
tests: include: use already-set $UNAME instead of shelling out to uname each time
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
CommitLineData
6bb24f4d
BB
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
618b6adf 23# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
c15d36c6 24# Copyright (c) 2012, 2020, Delphix. All rights reserved.
618b6adf
KS
25# Copyright (c) 2017, Tim Chase. All rights reserved.
26# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27# Copyright (c) 2017, Lawrence Livermore National Security LLC.
28# Copyright (c) 2017, Datto Inc. All rights reserved.
29# Copyright (c) 2017, Open-E Inc. All rights reserved.
c3cb57ae 30# Copyright (c) 2021, The FreeBSD Foundation.
1b939560 31# Use is subject to license terms.
6bb24f4d
BB
32#
33
5c9f744b
AZ
34. ${STF_SUITE}/include/tunables.cfg
35
6bb24f4d 36. ${STF_TOOLS}/include/logapi.shlib
a7004725 37. ${STF_SUITE}/include/math.shlib
d3f2cd7e 38. ${STF_SUITE}/include/blkdev.shlib
6bb24f4d 39
c1d9abf9
JWK
40#
41# Apply constrained path when available. This is required since the
42# PATH may have been modified by sudo's secure_path behavior.
43#
44if [ -n "$STF_PATH" ]; then
e0b53a5d 45 export PATH="$STF_PATH"
c1d9abf9
JWK
46fi
47
a584ef26
BB
48#
49# Generic dot version comparison function
50#
51# Returns success when version $1 is greater than or equal to $2.
52#
53function compare_version_gte
54{
5b0e75ca 55 [ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
a584ef26
BB
56}
57
0c656a96
GDN
58# Linux kernel version comparison function
59#
60# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
61#
62# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
63#
64function linux_version
65{
66 typeset ver="$1"
67
23914a3b 68 [ -z "$ver" ] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
0c656a96 69
23914a3b
AZ
70 typeset version major minor _
71 IFS='.' read -r version major minor _ <<<"$ver"
0c656a96 72
23914a3b
AZ
73 [ -z "$version" ] && version=0
74 [ -z "$major" ] && major=0
75 [ -z "$minor" ] && minor=0
0c656a96 76
23914a3b 77 echo $((version * 100000 + major * 1000 + minor))
0c656a96
GDN
78}
79
6bb24f4d
BB
80# Determine if this is a Linux test system
81#
82# Return 0 if platform Linux, 1 if otherwise
83
84function is_linux
85{
5c9f744b 86 [ "$UNAME" = "Linux" ]
6bb24f4d
BB
87}
88
d7164b27
RM
89# Determine if this is an illumos test system
90#
91# Return 0 if platform illumos, 1 if otherwise
92function is_illumos
93{
5c9f744b 94 [ "$UNAME" = "illumos" ]
d7164b27
RM
95}
96
7839c4b5
MM
97# Determine if this is a FreeBSD test system
98#
99# Return 0 if platform FreeBSD, 1 if otherwise
100
101function is_freebsd
102{
5c9f744b 103 [ "$UNAME" = "FreeBSD" ]
7839c4b5
MM
104}
105
818d4a87
I
106# Determine if this is a DilOS test system
107#
108# Return 0 if platform DilOS, 1 if otherwise
109
110function is_dilos
111{
112 typeset ID=""
113 [[ -f /etc/os-release ]] && . /etc/os-release
bf228f3d 114 [ "$ID" = "dilos" ]
818d4a87
I
115}
116
e676a196
BB
117# Determine if this is a 32-bit system
118#
119# Return 0 if platform is 32-bit, 1 if otherwise
120
121function is_32bit
122{
bf228f3d 123 [ $(getconf LONG_BIT) = "32" ]
e676a196
BB
124}
125
c6ced726
BB
126# Determine if kmemleak is enabled
127#
128# Return 0 if kmemleak is enabled, 1 if otherwise
129
130function is_kmemleak
131{
bf228f3d 132 is_linux && [ -e /sys/kernel/debug/kmemleak ]
c6ced726
BB
133}
134
6bb24f4d
BB
135# Determine whether a dataset is mounted
136#
137# $1 dataset name
138# $2 filesystem type; optional - defaulted to zfs
139#
140# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
141
142function ismounted
143{
144 typeset fstype=$2
145 [[ -z $fstype ]] && fstype=zfs
23914a3b 146 typeset out dir name
6bb24f4d
BB
147
148 case $fstype in
149 zfs)
150 if [[ "$1" == "/"* ]] ; then
75746e9a 151 ! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
6bb24f4d 152 else
75746e9a 153 ! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
6bb24f4d
BB
154 fi
155 ;;
156 ufs|nfs)
7839c4b5
MM
157 if is_freebsd; then
158 mount -pt $fstype | while read dev dir _t _flags; do
159 [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
160 done
161 else
23914a3b 162 out=$(df -F $fstype $1 2>/dev/null) || return
6bb24f4d 163
7839c4b5
MM
164 dir=${out%%\(*}
165 dir=${dir%% *}
166 name=${out##*\(}
167 name=${name%%\)*}
168 name=${name%% *}
6bb24f4d 169
7839c4b5
MM
170 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
171 fi
6bb24f4d 172 ;;
5c214ae3 173 ext*)
75746e9a 174 df -t $fstype $1 > /dev/null 2>&1
6bb24f4d
BB
175 ;;
176 zvol)
177 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
178 link=$(readlink -f $ZVOL_DEVDIR/$1)
179 [[ -n "$link" ]] && \
c1d9abf9 180 mount | grep -q "^$link" && \
6bb24f4d
BB
181 return 0
182 fi
183 ;;
23914a3b
AZ
184 *)
185 false
186 ;;
6bb24f4d 187 esac
6bb24f4d
BB
188}
189
190# Return 0 if a dataset is mounted; 1 otherwise
191#
192# $1 dataset name
193# $2 filesystem type; optional - defaulted to zfs
194
195function mounted
196{
197 ismounted $1 $2
6bb24f4d
BB
198}
199
200# Return 0 if a dataset is unmounted; 1 otherwise
201#
202# $1 dataset name
203# $2 filesystem type; optional - defaulted to zfs
204
205function unmounted
206{
23914a3b 207 ! ismounted $1 $2
6bb24f4d
BB
208}
209
6bb24f4d
BB
210function default_setup
211{
212 default_setup_noexit "$@"
213
214 log_pass
215}
216
9c53e516
TK
217function default_setup_no_mountpoint
218{
219 default_setup_noexit "$1" "$2" "$3" "yes"
220
221 log_pass
222}
223
6bb24f4d
BB
224#
225# Given a list of disks, setup storage pools and datasets.
226#
227function default_setup_noexit
228{
229 typeset disklist=$1
230 typeset container=$2
231 typeset volume=$3
9c53e516 232 typeset no_mountpoint=$4
3c67d83a 233 log_note begin default_setup_noexit
6bb24f4d
BB
234
235 if is_global_zone; then
236 if poolexists $TESTPOOL ; then
237 destroy_pool $TESTPOOL
238 fi
c1d9abf9
JWK
239 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
240 log_must zpool create -f $TESTPOOL $disklist
6bb24f4d
BB
241 else
242 reexport_pool
243 fi
244
c1d9abf9
JWK
245 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
246 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
6bb24f4d 247
c1d9abf9 248 log_must zfs create $TESTPOOL/$TESTFS
9c53e516
TK
249 if [[ -z $no_mountpoint ]]; then
250 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
251 fi
6bb24f4d
BB
252
253 if [[ -n $container ]]; then
c1d9abf9 254 rm -rf $TESTDIR1 || \
6bb24f4d 255 log_unresolved Could not remove $TESTDIR1
c1d9abf9 256 mkdir -p $TESTDIR1 || \
6bb24f4d
BB
257 log_unresolved Could not create $TESTDIR1
258
c1d9abf9
JWK
259 log_must zfs create $TESTPOOL/$TESTCTR
260 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
261 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
9c53e516
TK
262 if [[ -z $no_mountpoint ]]; then
263 log_must zfs set mountpoint=$TESTDIR1 \
264 $TESTPOOL/$TESTCTR/$TESTFS1
265 fi
6bb24f4d
BB
266 fi
267
268 if [[ -n $volume ]]; then
269 if is_global_zone ; then
c1d9abf9 270 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
6bb24f4d
BB
271 block_device_wait
272 else
c1d9abf9 273 log_must zfs create $TESTPOOL/$TESTVOL
6bb24f4d
BB
274 fi
275 fi
276}
277
278#
279# Given a list of disks, setup a storage pool, file system and
280# a container.
281#
282function default_container_setup
283{
284 typeset disklist=$1
285
286 default_setup "$disklist" "true"
287}
288
289#
290# Given a list of disks, setup a storage pool,file system
291# and a volume.
292#
293function default_volume_setup
294{
295 typeset disklist=$1
296
297 default_setup "$disklist" "" "true"
298}
299
300#
301# Given a list of disks, setup a storage pool,file system,
302# a container and a volume.
303#
304function default_container_volume_setup
305{
306 typeset disklist=$1
307
308 default_setup "$disklist" "true" "true"
309}
310
311#
312# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
313# filesystem
314#
d99a0153 315# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
6bb24f4d
BB
316# $2 snapshot name. Default, $TESTSNAP
317#
318function create_snapshot
319{
d99a0153 320 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
6bb24f4d
BB
321 typeset snap=${2:-$TESTSNAP}
322
323 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
324 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
325
326 if snapexists $fs_vol@$snap; then
327 log_fail "$fs_vol@$snap already exists."
328 fi
329 datasetexists $fs_vol || \
330 log_fail "$fs_vol must exist."
331
c1d9abf9 332 log_must zfs snapshot $fs_vol@$snap
6bb24f4d
BB
333}
334
335#
336# Create a clone from a snapshot, default clone name is $TESTCLONE.
337#
338# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
339# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
340#
341function create_clone # snapshot clone
342{
343 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
344 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
345
346 [[ -z $snap ]] && \
347 log_fail "Snapshot name is undefined."
348 [[ -z $clone ]] && \
349 log_fail "Clone name is undefined."
350
c1d9abf9 351 log_must zfs clone $snap $clone
6bb24f4d
BB
352}
353
aeacdefe
GM
354#
355# Create a bookmark of the given snapshot. Defaultly create a bookmark on
356# filesystem.
357#
358# $1 Existing filesystem or volume name. Default, $TESTFS
359# $2 Existing snapshot name. Default, $TESTSNAP
360# $3 bookmark name. Default, $TESTBKMARK
361#
362function create_bookmark
363{
364 typeset fs_vol=${1:-$TESTFS}
365 typeset snap=${2:-$TESTSNAP}
366 typeset bkmark=${3:-$TESTBKMARK}
367
368 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
369 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
370 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
371
372 if bkmarkexists $fs_vol#$bkmark; then
373 log_fail "$fs_vol#$bkmark already exists."
374 fi
375 datasetexists $fs_vol || \
376 log_fail "$fs_vol must exist."
377 snapexists $fs_vol@$snap || \
378 log_fail "$fs_vol@$snap must exist."
379
c1d9abf9 380 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
aeacdefe
GM
381}
382
650258d7 383#
384# Create a temporary clone result of an interrupted resumable 'zfs receive'
385# $1 Destination filesystem name. Must not exist, will be created as the result
386# of this function along with its %recv temporary clone
387# $2 Source filesystem name. Must not exist, will be created and destroyed
388#
389function create_recv_clone
390{
391 typeset recvfs="$1"
392 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
393 typeset snap="$sendfs@snap1"
394 typeset incr="$sendfs@snap2"
395 typeset mountpoint="$TESTDIR/create_recv_clone"
396 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
397
398 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
399
400 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
401 datasetexists $sendfs && log_fail "Send filesystem must not exist."
402
56fa4aa9 403 log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
650258d7 404 log_must zfs snapshot $snap
405 log_must eval "zfs send $snap | zfs recv -u $recvfs"
406 log_must mkfile 1m "$mountpoint/data"
407 log_must zfs snapshot $incr
30af21b0
PD
408 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
409 iflag=fullblock > $sendfile"
650258d7 410 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
c7b55e71 411 destroy_dataset "$sendfs" "-r"
650258d7 412 log_must rm -f "$sendfile"
413
414 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
415 log_fail "Error creating temporary $recvfs/%recv clone"
416 fi
417}
418
6bb24f4d
BB
419function default_mirror_setup
420{
421 default_mirror_setup_noexit $1 $2 $3
422
423 log_pass
424}
425
426#
427# Given a pair of disks, set up a storage pool and dataset for the mirror
428# @parameters: $1 the primary side of the mirror
429# $2 the secondary side of the mirror
430# @uses: ZPOOL ZFS TESTPOOL TESTFS
431function default_mirror_setup_noexit
432{
433 readonly func="default_mirror_setup_noexit"
434 typeset primary=$1
435 typeset secondary=$2
436
437 [[ -z $primary ]] && \
438 log_fail "$func: No parameters passed"
439 [[ -z $secondary ]] && \
440 log_fail "$func: No secondary partition passed"
c1d9abf9
JWK
441 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
442 log_must zpool create -f $TESTPOOL mirror $@
443 log_must zfs create $TESTPOOL/$TESTFS
444 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
445}
446
447#
448# create a number of mirrors.
449# We create a number($1) of 2 way mirrors using the pairs of disks named
450# on the command line. These mirrors are *not* mounted
451# @parameters: $1 the number of mirrors to create
452# $... the devices to use to create the mirrors on
453# @uses: ZPOOL ZFS TESTPOOL
454function setup_mirrors
455{
456 typeset -i nmirrors=$1
457
458 shift
459 while ((nmirrors > 0)); do
460 log_must test -n "$1" -a -n "$2"
c1d9abf9
JWK
461 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
462 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
6bb24f4d
BB
463 shift 2
464 ((nmirrors = nmirrors - 1))
465 done
466}
467
468#
469# create a number of raidz pools.
470# We create a number($1) of 2 raidz pools using the pairs of disks named
471# on the command line. These pools are *not* mounted
472# @parameters: $1 the number of pools to create
473# $... the devices to use to create the pools on
474# @uses: ZPOOL ZFS TESTPOOL
475function setup_raidzs
476{
477 typeset -i nraidzs=$1
478
479 shift
480 while ((nraidzs > 0)); do
481 log_must test -n "$1" -a -n "$2"
c1d9abf9
JWK
482 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
483 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
6bb24f4d
BB
484 shift 2
485 ((nraidzs = nraidzs - 1))
486 done
487}
488
489#
490# Destroy the configured testpool mirrors.
491# the mirrors are of the form ${TESTPOOL}{number}
492# @uses: ZPOOL ZFS TESTPOOL
493function destroy_mirrors
494{
495 default_cleanup_noexit
496
497 log_pass
498}
499
1b609d4b
BA
500function default_raidz_setup
501{
502 default_raidz_setup_noexit "$*"
503
504 log_pass
505}
506
6bb24f4d
BB
507#
508# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
509# $1 the list of disks
510#
1b609d4b 511function default_raidz_setup_noexit
6bb24f4d
BB
512{
513 typeset disklist="$*"
514 disks=(${disklist[*]})
515
516 if [[ ${#disks[*]} -lt 2 ]]; then
517 log_fail "A raid-z requires a minimum of two disks."
518 fi
519
c1d9abf9 520 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
3fd3e56c 521 log_must zpool create -f $TESTPOOL raidz $disklist
c1d9abf9
JWK
522 log_must zfs create $TESTPOOL/$TESTFS
523 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
524}
525
526#
527# Common function used to cleanup storage pools and datasets.
528#
529# Invoked at the start of the test suite to ensure the system
530# is in a known state, and also at the end of each set of
531# sub-tests to ensure errors from one set of tests doesn't
532# impact the execution of the next set.
533
534function default_cleanup
535{
536 default_cleanup_noexit
537
538 log_pass
539}
540
3fd3e56c 541#
542# Utility function used to list all available pool names.
543#
544# NOTE: $KEEP is a variable containing pool names, separated by a newline
545# character, that must be excluded from the returned list.
546#
547function get_all_pools
548{
549 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
550}
551
6bb24f4d
BB
552function default_cleanup_noexit
553{
6bb24f4d
BB
554 typeset pool=""
555 #
556 # Destroying the pool will also destroy any
557 # filesystems it contains.
558 #
559 if is_global_zone; then
c1d9abf9 560 zfs unmount -a > /dev/null 2>&1
3fd3e56c 561 ALL_POOLS=$(get_all_pools)
6bb24f4d
BB
562 # Here, we loop through the pools we're allowed to
563 # destroy, only destroying them if it's safe to do
564 # so.
565 while [ ! -z ${ALL_POOLS} ]
566 do
567 for pool in ${ALL_POOLS}
568 do
569 if safe_to_destroy_pool $pool ;
570 then
571 destroy_pool $pool
572 fi
6bb24f4d 573 done
2b95e911 574 ALL_POOLS=$(get_all_pools)
6bb24f4d
BB
575 done
576
c1d9abf9 577 zfs mount -a
6bb24f4d
BB
578 else
579 typeset fs=""
c1d9abf9
JWK
580 for fs in $(zfs list -H -o name \
581 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
c7b55e71 582 destroy_dataset "$fs" "-Rf"
6bb24f4d
BB
583 done
584
585 # Need cleanup here to avoid garbage dir left.
c1d9abf9 586 for fs in $(zfs list -H -o name); do
6bb24f4d 587 [[ $fs == /$ZONE_POOL ]] && continue
c1d9abf9 588 [[ -d $fs ]] && log_must rm -rf $fs/*
6bb24f4d
BB
589 done
590
591 #
592 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
593 # the default value
594 #
c1d9abf9 595 for fs in $(zfs list -H -o name); do
6bb24f4d 596 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
c1d9abf9
JWK
597 log_must zfs set reservation=none $fs
598 log_must zfs set recordsize=128K $fs
599 log_must zfs set mountpoint=/$fs $fs
75746e9a
AZ
600 typeset enc=$(get_prop encryption $fs)
601 if [ -z "$enc" ] || [ "$enc" = "off" ]; then
c1d9abf9 602 log_must zfs set checksum=on $fs
6bb24f4d 603 fi
c1d9abf9
JWK
604 log_must zfs set compression=off $fs
605 log_must zfs set atime=on $fs
606 log_must zfs set devices=off $fs
607 log_must zfs set exec=on $fs
608 log_must zfs set setuid=on $fs
609 log_must zfs set readonly=off $fs
610 log_must zfs set snapdir=hidden $fs
611 log_must zfs set aclmode=groupmask $fs
612 log_must zfs set aclinherit=secure $fs
6bb24f4d
BB
613 fi
614 done
615 fi
616
617 [[ -d $TESTDIR ]] && \
c1d9abf9 618 log_must rm -rf $TESTDIR
7050a65d
SV
619
620 disk1=${DISKS%% *}
621 if is_mpath_device $disk1; then
622 delete_partitions
623 fi
52775712 624
625 rm -f $TEST_BASE_DIR/{err,out}
6bb24f4d
BB
626}
627
628
629#
630# Common function used to cleanup storage pools, file systems
631# and containers.
632#
633function default_container_cleanup
634{
635 if ! is_global_zone; then
636 reexport_pool
637 fi
638
23914a3b 639 ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
c1d9abf9 640 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
6bb24f4d 641
c7b55e71
GDN
642 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
643 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
6bb24f4d
BB
644
645 [[ -e $TESTDIR1 ]] && \
62c5ccdf 646 log_must rm -rf $TESTDIR1
6bb24f4d
BB
647
648 default_cleanup
649}
650
651#
652# Common function used to cleanup snapshot of file system or volume. Default to
653# delete the file system's snapshot
654#
655# $1 snapshot name
656#
657function destroy_snapshot
658{
659 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
660
661 if ! snapexists $snap; then
838bd5ff 662 log_fail "'$snap' does not exist."
6bb24f4d
BB
663 fi
664
665 #
666 # For the sake of the value which come from 'get_prop' is not equal
667 # to the really mountpoint when the snapshot is unmounted. So, firstly
668 # check and make sure this snapshot's been mounted in current system.
669 #
670 typeset mtpt=""
671 if ismounted $snap; then
672 mtpt=$(get_prop mountpoint $snap)
6bb24f4d
BB
673 fi
674
c7b55e71 675 destroy_dataset "$snap"
6bb24f4d 676 [[ $mtpt != "" && -d $mtpt ]] && \
c1d9abf9 677 log_must rm -rf $mtpt
6bb24f4d
BB
678}
679
680#
681# Common function used to cleanup clone.
682#
683# $1 clone name
684#
685function destroy_clone
686{
687 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
688
689 if ! datasetexists $clone; then
690 log_fail "'$clone' does not existed."
691 fi
692
693 # With the same reason in destroy_snapshot
694 typeset mtpt=""
695 if ismounted $clone; then
696 mtpt=$(get_prop mountpoint $clone)
6bb24f4d
BB
697 fi
698
c7b55e71 699 destroy_dataset "$clone"
6bb24f4d 700 [[ $mtpt != "" && -d $mtpt ]] && \
c1d9abf9 701 log_must rm -rf $mtpt
6bb24f4d
BB
702}
703
aeacdefe
GM
704#
705# Common function used to cleanup bookmark of file system or volume. Default
706# to delete the file system's bookmark.
707#
708# $1 bookmark name
709#
710function destroy_bookmark
711{
712 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
713
714 if ! bkmarkexists $bkmark; then
715 log_fail "'$bkmarkp' does not existed."
716 fi
717
c7b55e71 718 destroy_dataset "$bkmark"
aeacdefe
GM
719}
720
6bb24f4d
BB
721# Return 0 if a snapshot exists; $? otherwise
722#
723# $1 - snapshot name
724
725function snapexists
726{
c1d9abf9 727 zfs list -H -t snapshot "$1" > /dev/null 2>&1
6bb24f4d
BB
728}
729
aeacdefe
GM
730#
731# Return 0 if a bookmark exists; $? otherwise
732#
733# $1 - bookmark name
734#
735function bkmarkexists
736{
c1d9abf9 737 zfs list -H -t bookmark "$1" > /dev/null 2>&1
aeacdefe
GM
738}
739
3b9edd7b
SD
740#
741# Return 0 if a hold exists; $? otherwise
742#
743# $1 - hold tag
744# $2 - snapshot name
745#
746function holdexists
747{
75746e9a 748 ! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
3b9edd7b
SD
749}
750
6bb24f4d
BB
751#
752# Set a property to a certain value on a dataset.
753# Sets a property of the dataset to the value as passed in.
754# @param:
755# $1 dataset who's property is being set
756# $2 property to set
757# $3 value to set property to
758# @return:
759# 0 if the property could be set.
760# non-zero otherwise.
761# @use: ZFS
762#
763function dataset_setprop
764{
765 typeset fn=dataset_setprop
766
767 if (($# < 3)); then
768 log_note "$fn: Insufficient parameters (need 3, had $#)"
769 return 1
770 fi
771 typeset output=
c1d9abf9 772 output=$(zfs set $2=$3 $1 2>&1)
6bb24f4d
BB
773 typeset rv=$?
774 if ((rv != 0)); then
775 log_note "Setting property on $1 failed."
776 log_note "property $2=$3"
777 log_note "Return Code: $rv"
778 log_note "Output: $output"
779 return $rv
780 fi
781 return 0
782}
783
784#
785# Assign suite defined dataset properties.
786# This function is used to apply the suite's defined default set of
787# properties to a dataset.
788# @parameters: $1 dataset to use
789# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
790# @returns:
791# 0 if the dataset has been altered.
792# 1 if no pool name was passed in.
793# 2 if the dataset could not be found.
794# 3 if the dataset could not have it's properties set.
795#
796function dataset_set_defaultproperties
797{
798 typeset dataset="$1"
799
800 [[ -z $dataset ]] && return 1
801
802 typeset confset=
803 typeset -i found=0
c1d9abf9 804 for confset in $(zfs list); do
6bb24f4d
BB
805 if [[ $dataset = $confset ]]; then
806 found=1
807 break
808 fi
809 done
810 [[ $found -eq 0 ]] && return 2
811 if [[ -n $COMPRESSION_PROP ]]; then
812 dataset_setprop $dataset compression $COMPRESSION_PROP || \
813 return 3
814 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
815 fi
816 if [[ -n $CHECKSUM_PROP ]]; then
817 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
818 return 3
819 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
820 fi
821 return 0
822}
823
824#
825# Check a numeric assertion
826# @parameter: $@ the assertion to check
827# @output: big loud notice if assertion failed
828# @use: log_fail
829#
830function assert
831{
832 (($@)) || log_fail "$@"
833}
834
835#
836# Function to format partition size of a disk
837# Given a disk cxtxdx reduces all partitions
838# to 0 size
839#
840function zero_partitions #<whole_disk_name>
841{
842 typeset diskname=$1
843 typeset i
844
395a6b19
RM
845 if is_freebsd; then
846 gpart destroy -F $diskname
847 elif is_linux; then
8e5d1484
PZ
848 DSK=$DEV_DSKDIR/$diskname
849 DSK=$(echo $DSK | sed -e "s|//|/|g")
850 log_must parted $DSK -s -- mklabel gpt
851 blockdev --rereadpt $DSK 2>/dev/null
852 block_device_wait
6bb24f4d
BB
853 else
854 for i in 0 1 3 4 5 6 7
855 do
cf8738d8 856 log_must set_partition $i "" 0mb $diskname
6bb24f4d
BB
857 done
858 fi
95401cb6
BB
859
860 return 0
6bb24f4d
BB
861}
862
863#
864# Given a slice, size and disk, this function
865# formats the slice to the specified size.
866# Size should be specified with units as per
867# the `format` command requirements eg. 100mb 3gb
868#
c6e457df 869# NOTE: This entire interface is problematic for the Linux parted utility
6bb24f4d
BB
870# which requires the end of the partition to be specified. It would be
871# best to retire this interface and replace it with something more flexible.
872# At the moment a best effort is made.
873#
2ff615b2
RE
874# arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
875function set_partition
6bb24f4d
BB
876{
877 typeset -i slicenum=$1
878 typeset start=$2
879 typeset size=$3
a3bddd49
RM
880 typeset disk=${4#$DEV_DSKDIR/}
881 disk=${disk#$DEV_RDSKDIR/}
6bb24f4d 882
5c9f744b 883 case "$UNAME" in
7839c4b5 884 Linux)
8e5d1484
PZ
885 if [[ -z $size || -z $disk ]]; then
886 log_fail "The size or disk name is unspecified."
887 fi
a3bddd49 888 disk=$DEV_DSKDIR/$disk
6bb24f4d
BB
889 typeset size_mb=${size%%[mMgG]}
890
891 size_mb=${size_mb%%[mMgG][bB]}
892 if [[ ${size:1:1} == 'g' ]]; then
893 ((size_mb = size_mb * 1024))
894 fi
895
896 # Create GPT partition table when setting slice 0 or
897 # when the device doesn't already contain a GPT label.
2ff615b2 898 parted $disk -s -- print 1 >/dev/null
6bb24f4d
BB
899 typeset ret_val=$?
900 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
23914a3b 901 if ! parted $disk -s -- mklabel gpt; then
cf8738d8 902 log_note "Failed to create GPT partition table on $disk"
903 return 1
904 fi
6bb24f4d
BB
905 fi
906
907 # When no start is given align on the first cylinder.
908 if [[ -z "$start" ]]; then
909 start=1
910 fi
911
912 # Determine the cylinder size for the device and using
913 # that calculate the end offset in cylinders.
914 typeset -i cly_size_kb=0
75746e9a
AZ
915 cly_size_kb=$(parted -m $disk -s -- unit cyl print |
916 awk -F '[:k.]' 'NR == 3 {print $4}')
6bb24f4d
BB
917 ((end = (size_mb * 1024 / cly_size_kb) + start))
918
2ff615b2 919 parted $disk -s -- \
6bb24f4d 920 mkpart part$slicenum ${start}cyl ${end}cyl
2ff615b2
RE
921 typeset ret_val=$?
922 if [[ $ret_val -ne 0 ]]; then
cf8738d8 923 log_note "Failed to create partition $slicenum on $disk"
924 return 1
925 fi
6bb24f4d 926
2ff615b2
RE
927 blockdev --rereadpt $disk 2>/dev/null
928 block_device_wait $disk
7839c4b5
MM
929 ;;
930 FreeBSD)
931 if [[ -z $size || -z $disk ]]; then
932 log_fail "The size or disk name is unspecified."
933 fi
a3bddd49 934 disk=$DEV_DSKDIR/$disk
7839c4b5
MM
935
936 if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
937 gpart destroy -F $disk >/dev/null 2>&1
23914a3b 938 if ! gpart create -s GPT $disk; then
7839c4b5
MM
939 log_note "Failed to create GPT partition table on $disk"
940 return 1
941 fi
942 fi
943
944 typeset index=$((slicenum + 1))
945
946 if [[ -n $start ]]; then
947 start="-b $start"
948 fi
949 gpart add -t freebsd-zfs $start -s $size -i $index $disk
950 if [[ $ret_val -ne 0 ]]; then
951 log_note "Failed to create partition $slicenum on $disk"
952 return 1
953 fi
954
955 block_device_wait $disk
956 ;;
957 *)
8e5d1484
PZ
958 if [[ -z $slicenum || -z $size || -z $disk ]]; then
959 log_fail "The slice, size or disk name is unspecified."
960 fi
961
6bb24f4d
BB
962 typeset format_file=/var/tmp/format_in.$$
963
c1d9abf9
JWK
964 echo "partition" >$format_file
965 echo "$slicenum" >> $format_file
966 echo "" >> $format_file
967 echo "" >> $format_file
968 echo "$start" >> $format_file
969 echo "$size" >> $format_file
970 echo "label" >> $format_file
971 echo "" >> $format_file
972 echo "q" >> $format_file
973 echo "q" >> $format_file
6bb24f4d 974
c1d9abf9 975 format -e -s -d $disk -f $format_file
2ff615b2
RE
976 typeset ret_val=$?
977 rm -f $format_file
7839c4b5
MM
978 ;;
979 esac
c1d9abf9 980
cf8738d8 981 if [[ $ret_val -ne 0 ]]; then
982 log_note "Unable to format $disk slice $slicenum to $size"
983 return 1
984 fi
6bb24f4d
BB
985 return 0
986}
987
7050a65d
SV
988#
989# Delete all partitions on all disks - this is specifically for the use of multipath
990# devices which currently can only be used in the test suite as raw/un-partitioned
991# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
992#
993function delete_partitions
994{
815a6456 995 typeset disk
7050a65d 996
7050a65d
SV
997 if [[ -z $DISKSARRAY ]]; then
998 DISKSARRAY=$DISKS
999 fi
1000
1001 if is_linux; then
815a6456
RM
1002 typeset -i part
1003 for disk in $DISKSARRAY; do
1004 for (( part = 1; part < MAX_PARTITIONS; part++ )); do
1005 typeset partition=${disk}${SLICE_PREFIX}${part}
1006 parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
1007 if lsblk | grep -qF ${partition}; then
1008 log_fail "Partition ${partition} not deleted"
7050a65d 1009 else
815a6456 1010 log_note "Partition ${partition} deleted"
7050a65d 1011 fi
7050a65d 1012 done
815a6456 1013 done
7839c4b5
MM
1014 elif is_freebsd; then
1015 for disk in $DISKSARRAY; do
1016 if gpart destroy -F $disk; then
1017 log_note "Partitions for ${disk} deleted"
1018 else
1019 log_fail "Partitions for ${disk} not deleted"
1020 fi
1021 done
7050a65d 1022 fi
7050a65d
SV
1023}
1024
6bb24f4d
BB
1025#
1026# Get the end cyl of the given slice
1027#
1028function get_endslice #<disk> <slice>
1029{
1030 typeset disk=$1
1031 typeset slice=$2
1032 if [[ -z $disk || -z $slice ]] ; then
1033 log_fail "The disk name or slice number is unspecified."
1034 fi
1035
5c9f744b 1036 case "$UNAME" in
7839c4b5 1037 Linux)
c1d9abf9 1038 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
420b4448 1039 awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
6bb24f4d 1040 ((endcyl = (endcyl + 1)))
7839c4b5
MM
1041 ;;
1042 FreeBSD)
1043 disk=${disk#/dev/zvol/}
1044 disk=${disk%p*}
1045 slice=$((slice + 1))
1046 endcyl=$(gpart show $disk | \
1047 awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1048 ;;
1049 *)
6bb24f4d
BB
1050 disk=${disk#/dev/dsk/}
1051 disk=${disk#/dev/rdsk/}
1052 disk=${disk%s*}
1053
1054 typeset -i ratio=0
c1d9abf9 1055 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
75746e9a 1056 awk '/sectors\/cylinder/ {print $2}')
6bb24f4d
BB
1057
1058 if ((ratio == 0)); then
1059 return
1060 fi
1061
c1d9abf9 1062 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
75746e9a 1063 awk -v token="$slice" '$1 == token {print $6}')
6bb24f4d
BB
1064
1065 ((endcyl = (endcyl + 1) / ratio))
7839c4b5
MM
1066 ;;
1067 esac
7c468940 1068
6bb24f4d
BB
1069 echo $endcyl
1070}
1071
1072
1073#
1074# Given a size,disk and total slice number, this function formats the
1075# disk slices from 0 to the total slice number with the same specified
1076# size.
1077#
1078function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1079{
1080 typeset -i i=0
1081 typeset slice_size=$1
1082 typeset disk_name=$2
1083 typeset total_slices=$3
1084 typeset cyl
1085
1086 zero_partitions $disk_name
1087 while ((i < $total_slices)); do
1088 if ! is_linux; then
1089 if ((i == 2)); then
1090 ((i = i + 1))
1091 continue
1092 fi
1093 fi
cf8738d8 1094 log_must set_partition $i "$cyl" $slice_size $disk_name
6bb24f4d
BB
1095 cyl=$(get_endslice $disk_name $i)
1096 ((i = i+1))
1097 done
1098}
1099
1100#
1101# This function continues to write to a filenum number of files into dirnum
c1d9abf9 1102# number of directories until either file_write returns an error or the
6bb24f4d
BB
1103# maximum number of files per directory have been written.
1104#
1105# Usage:
1106# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1107#
1108# Return value: 0 on success
1109# non 0 on error
1110#
1111# Where :
1112# destdir: is the directory where everything is to be created under
1113# dirnum: the maximum number of subdirectories to use, -1 no limit
1114# filenum: the maximum number of files per subdirectory
1115# bytes: number of bytes to write
c6e457df 1116# num_writes: number of types to write out bytes
4e33ba4c 1117# data: the data that will be written
6bb24f4d
BB
1118#
1119# E.g.
9be70c37 1120# fill_fs /testdir 20 25 1024 256 0
6bb24f4d
BB
1121#
1122# Note: bytes * num_writes equals the size of the testfile
1123#
1124function fill_fs # destdir dirnum filenum bytes num_writes data
1125{
1126 typeset destdir=${1:-$TESTDIR}
1127 typeset -i dirnum=${2:-50}
1128 typeset -i filenum=${3:-50}
1129 typeset -i bytes=${4:-8192}
1130 typeset -i num_writes=${5:-10240}
1b939560 1131 typeset data=${6:-0}
6bb24f4d 1132
9be70c37
RM
1133 mkdir -p $destdir/{1..$dirnum}
1134 for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1135 file_write -o create -f $f -b $bytes -c $num_writes -d $data \
23914a3b 1136 || return
6bb24f4d 1137 done
6bb24f4d
BB
1138}
1139
75746e9a 1140# Get the specified dataset property in parsable format or fail
6bb24f4d
BB
1141function get_prop # property dataset
1142{
6bb24f4d
BB
1143 typeset prop=$1
1144 typeset dataset=$2
1145
75746e9a 1146 zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
6bb24f4d
BB
1147}
1148
75746e9a 1149# Get the specified pool property in parsable format or fail
6bb24f4d
BB
1150function get_pool_prop # property pool
1151{
6bb24f4d
BB
1152 typeset prop=$1
1153 typeset pool=$2
1154
75746e9a 1155 zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
6bb24f4d
BB
1156}
1157
1158# Return 0 if a pool exists; $? otherwise
1159#
1160# $1 - pool name
1161
1162function poolexists
1163{
1164 typeset pool=$1
1165
1166 if [[ -z $pool ]]; then
1167 log_note "No pool name given."
1168 return 1
1169 fi
1170
c1d9abf9 1171 zpool get name "$pool" > /dev/null 2>&1
6bb24f4d
BB
1172}
1173
1174# Return 0 if all the specified datasets exist; $? otherwise
1175#
1176# $1-n dataset name
1177function datasetexists
1178{
1179 if (($# == 0)); then
1180 log_note "No dataset name given."
1181 return 1
1182 fi
1183
41ebf403 1184 zfs get name "$@" > /dev/null 2>&1
6bb24f4d
BB
1185}
1186
1187# return 0 if none of the specified datasets exists, otherwise return 1.
1188#
1189# $1-n dataset name
1190function datasetnonexists
1191{
1192 if (($# == 0)); then
1193 log_note "No dataset name given."
1194 return 1
1195 fi
1196
1197 while (($# > 0)); do
c1d9abf9 1198 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
6bb24f4d
BB
1199 && return 1
1200 shift
1201 done
1202
1203 return 0
1204}
1205
b7dbbf6a 1206function is_shared_freebsd
6bb24f4d
BB
1207{
1208 typeset fs=$1
6bb24f4d 1209
5b0e75ca 1210 pgrep -q mountd && showmount -E | grep -qx "$fs"
b7dbbf6a
RM
1211}
1212
1213function is_shared_illumos
1214{
1215 typeset fs=$1
1216 typeset mtpt
2f71caf2 1217
c1d9abf9 1218 for mtpt in `share | awk '{print $2}'` ; do
6bb24f4d
BB
1219 if [[ $mtpt == $fs ]] ; then
1220 return 0
1221 fi
1222 done
1223
c1d9abf9 1224 typeset stat=$(svcs -H -o STA nfs/server:default)
6bb24f4d
BB
1225 if [[ $stat != "ON" ]]; then
1226 log_note "Current nfs/server status: $stat"
1227 fi
1228
1229 return 1
1230}
1231
b7dbbf6a
RM
1232function is_shared_linux
1233{
1234 typeset fs=$1
5b0e75ca 1235 ! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
b7dbbf6a
RM
1236}
1237
54eb2c41
PS
1238#
1239# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1240#
1241# Returns 0 if shared, 1 otherwise.
1242#
1243function is_shared
1244{
1245 typeset fs=$1
1246 typeset mtpt
1247
1248 if [[ $fs != "/"* ]] ; then
1249 if datasetnonexists "$fs" ; then
1250 return 1
1251 else
1252 mtpt=$(get_prop mountpoint "$fs")
5b0e75ca 1253 case "$mtpt" in
54eb2c41
PS
1254 none|legacy|-) return 1
1255 ;;
1256 *) fs=$mtpt
1257 ;;
1258 esac
1259 fi
1260 fi
1261
5c9f744b 1262 case "$UNAME" in
b7dbbf6a
RM
1263 FreeBSD) is_shared_freebsd "$fs" ;;
1264 Linux) is_shared_linux "$fs" ;;
1265 *) is_shared_illumos "$fs" ;;
1266 esac
54eb2c41
PS
1267}
1268
c15d36c6
GW
1269function is_exported_illumos
1270{
1271 typeset fs=$1
5b0e75ca 1272 typeset mtpt _
c15d36c6 1273
5b0e75ca
AZ
1274 while read -r mtpt _; do
1275 [ "$mtpt" = "$fs" ] && return
1276 done < /etc/dfs/sharetab
c15d36c6
GW
1277
1278 return 1
1279}
1280
1281function is_exported_freebsd
1282{
1283 typeset fs=$1
5b0e75ca 1284 typeset mtpt _
c15d36c6 1285
5b0e75ca
AZ
1286 while read -r mtpt _; do
1287 [ "$mtpt" = "$fs" ] && return
1288 done < /etc/zfs/exports
c15d36c6
GW
1289
1290 return 1
1291}
1292
1293function is_exported_linux
1294{
1295 typeset fs=$1
5b0e75ca 1296 typeset mtpt _
c15d36c6 1297
5b0e75ca
AZ
1298 while read -r mtpt _; do
1299 [ "$(printf "$mtpt")" = "$fs" ] && return
1300 done < /etc/exports.d/zfs.exports
c15d36c6
GW
1301
1302 return 1
1303}
1304
1305#
1306# Given a mountpoint, or a dataset name, determine if it is exported via
1307# the os-specific NFS exports file.
1308#
1309# Returns 0 if exported, 1 otherwise.
1310#
1311function is_exported
1312{
1313 typeset fs=$1
1314 typeset mtpt
1315
1316 if [[ $fs != "/"* ]] ; then
1317 if datasetnonexists "$fs" ; then
1318 return 1
1319 else
1320 mtpt=$(get_prop mountpoint "$fs")
1321 case $mtpt in
1322 none|legacy|-) return 1
1323 ;;
1324 *) fs=$mtpt
1325 ;;
1326 esac
1327 fi
1328 fi
1329
5c9f744b 1330 case "$UNAME" in
c15d36c6
GW
1331 FreeBSD) is_exported_freebsd "$fs" ;;
1332 Linux) is_exported_linux "$fs" ;;
1333 *) is_exported_illumos "$fs" ;;
1334 esac
1335}
1336
6bb24f4d 1337#
2f71caf2 1338# Given a dataset name determine if it is shared via SMB.
6bb24f4d 1339#
2f71caf2 1340# Returns 0 if shared, 1 otherwise.
6bb24f4d 1341#
2f71caf2 1342function is_shared_smb
6bb24f4d
BB
1343{
1344 typeset fs=$1
2f71caf2 1345
5b0e75ca 1346 datasetexists "$fs" || return
6bb24f4d
BB
1347
1348 if is_linux; then
5b0e75ca 1349 net usershare list | grep -xFq "${fs//\//_}"
2f71caf2 1350 else
5c9f744b 1351 log_note "SMB on $UNAME currently unsupported by the test framework"
6bb24f4d
BB
1352 return 1
1353 fi
2f71caf2 1354}
1355
1356#
1357# Given a mountpoint, determine if it is not shared via NFS.
1358#
1359# Returns 0 if not shared, 1 otherwise.
1360#
1361function not_shared
1362{
bf228f3d 1363 ! is_shared $1
6bb24f4d
BB
1364}
1365
1366#
2f71caf2 1367# Given a dataset determine if it is not shared via SMB.
6bb24f4d 1368#
2f71caf2 1369# Returns 0 if not shared, 1 otherwise.
1370#
1371function not_shared_smb
6bb24f4d 1372{
bf228f3d 1373 ! is_shared_smb $1
2f71caf2 1374}
1375
1376#
1377# Helper function to unshare a mountpoint.
1378#
1379function unshare_fs #fs
1380{
1381 typeset fs=$1
1382
bf228f3d 1383 if is_shared $fs || is_shared_smb $fs; then
bd328a58 1384 log_must zfs unshare $fs
6bb24f4d 1385 fi
6bb24f4d
BB
1386}
1387
2f71caf2 1388#
1389# Helper function to share a NFS mountpoint.
1390#
1391function share_nfs #fs
1392{
1393 typeset fs=$1
1394
5b0e75ca
AZ
1395 is_shared "$fs" && return
1396
5c9f744b 1397 case "$UNAME" in
5b0e75ca
AZ
1398 Linux)
1399 log_must exportfs "*:$fs"
1400 ;;
1401 FreeBSD)
1402 typeset mountd
1403 read -r mountd < /var/run/mountd.pid
1404 log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
1405 log_must kill -s HUP "$mountd"
1406 ;;
1407 *)
1408 log_must share -F nfs "$fs"
1409 ;;
1410 esac
2f71caf2 1411
1412 return 0
1413}
1414
1415#
1416# Helper function to unshare a NFS mountpoint.
1417#
1418function unshare_nfs #fs
1419{
1420 typeset fs=$1
1421
5b0e75ca
AZ
1422 ! is_shared "$fs" && return
1423
5c9f744b 1424 case "$UNAME" in
5b0e75ca
AZ
1425 Linux)
1426 log_must exportfs -u "*:$fs"
1427 ;;
1428 FreeBSD)
1429 typeset mountd
1430 read -r mountd < /var/run/mountd.pid
1431 awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
1432 log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
1433 log_must kill -s HUP "$mountd"
1434 ;;
1435 *)
1436 log_must unshare -F nfs $fs
1437 ;;
1438 esac
2f71caf2 1439
1440 return 0
1441}
1442
1443#
1444# Helper function to show NFS shares.
1445#
1446function showshares_nfs
1447{
5c9f744b 1448 case "$UNAME" in
5b0e75ca
AZ
1449 Linux)
1450 exportfs -v
1451 ;;
1452 FreeBSD)
1453 showmount
1454 ;;
1455 *)
c1d9abf9 1456 share -F nfs
5b0e75ca
AZ
1457 ;;
1458 esac
2f71caf2 1459}
1460
1461#
1462# Helper function to show SMB shares.
1463#
1464function showshares_smb
1465{
1466 if is_linux; then
c1d9abf9 1467 net usershare list
2f71caf2 1468 else
c1d9abf9 1469 share -F smb
2f71caf2 1470 fi
1471
1472 return 0
1473}
1474
c15d36c6
GW
1475function check_nfs
1476{
5c9f744b 1477 case "$UNAME" in
5b0e75ca
AZ
1478 Linux)
1479 exportfs -s
1480 ;;
1481 FreeBSD)
c15d36c6 1482 showmount -e
5b0e75ca
AZ
1483 ;;
1484 *)
c15d36c6 1485 log_unsupported "Unknown platform"
5b0e75ca
AZ
1486 ;;
1487 esac || log_unsupported "The NFS utilities are not installed"
c15d36c6
GW
1488}
1489
6bb24f4d
BB
1490#
1491# Check NFS server status and trigger it online.
1492#
1493function setup_nfs_server
1494{
1495 # Cannot share directory in non-global zone.
1496 #
1497 if ! is_global_zone; then
1498 log_note "Cannot trigger NFS server by sharing in LZ."
1499 return
1500 fi
1501
c15d36c6 1502 if is_linux; then
2a0428f1
BB
1503 #
1504 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1505 # /etc/exports.d./* to provide a clean test environment.
1506 #
5b0e75ca 1507 log_must exportfs -r
2a0428f1 1508
c15d36c6
GW
1509 log_note "NFS server must be started prior to running ZTS."
1510 return
1511 elif is_freebsd; then
5b0e75ca 1512 log_must kill -s HUP $(</var/run/mountd.pid)
c15d36c6 1513
2a0428f1 1514 log_note "NFS server must be started prior to running ZTS."
6bb24f4d
BB
1515 return
1516 fi
1517
1518 typeset nfs_fmri="svc:/network/nfs/server:default"
c1d9abf9 1519 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
6bb24f4d
BB
1520 #
1521 # Only really sharing operation can enable NFS server
1522 # to online permanently.
1523 #
1524 typeset dummy=/tmp/dummy
1525
1526 if [[ -d $dummy ]]; then
c1d9abf9 1527 log_must rm -rf $dummy
6bb24f4d
BB
1528 fi
1529
c1d9abf9
JWK
1530 log_must mkdir $dummy
1531 log_must share $dummy
6bb24f4d
BB
1532
1533 #
1534 # Waiting for fmri's status to be the final status.
1535 # Otherwise, in transition, an asterisk (*) is appended for
1536 # instances, unshare will reverse status to 'DIS' again.
1537 #
1538 # Waiting for 1's at least.
1539 #
c1d9abf9 1540 log_must sleep 1
6bb24f4d 1541 timeout=10
c1d9abf9 1542 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
6bb24f4d 1543 do
c1d9abf9 1544 log_must sleep 1
6bb24f4d
BB
1545
1546 ((timeout -= 1))
1547 done
1548
c1d9abf9
JWK
1549 log_must unshare $dummy
1550 log_must rm -rf $dummy
6bb24f4d
BB
1551 fi
1552
c1d9abf9 1553 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
6bb24f4d
BB
1554}
1555
1556#
1557# To verify whether calling process is in global zone
1558#
1559# Return 0 if in global zone, 1 in non-global zone
1560#
1561function is_global_zone
1562{
7839c4b5 1563 if is_linux || is_freebsd; then
c1d9abf9
JWK
1564 return 0
1565 else
1566 typeset cur_zone=$(zonename 2>/dev/null)
bf228f3d 1567 [ $cur_zone = "global" ]
6bb24f4d 1568 fi
6bb24f4d
BB
1569}
1570
1571#
1572# Verify whether test is permitted to run from
1573# global zone, local zone, or both
1574#
1575# $1 zone limit, could be "global", "local", or "both"(no limit)
1576#
1577# Return 0 if permitted, otherwise exit with log_unsupported
1578#
1579function verify_runnable # zone limit
1580{
1581 typeset limit=$1
1582
1583 [[ -z $limit ]] && return 0
1584
1585 if is_global_zone ; then
1586 case $limit in
1587 global|both)
1588 ;;
1589 local) log_unsupported "Test is unable to run from "\
1590 "global zone."
1591 ;;
1592 *) log_note "Warning: unknown limit $limit - " \
1593 "use both."
1594 ;;
1595 esac
1596 else
1597 case $limit in
1598 local|both)
1599 ;;
1600 global) log_unsupported "Test is unable to run from "\
1601 "local zone."
1602 ;;
1603 *) log_note "Warning: unknown limit $limit - " \
1604 "use both."
1605 ;;
1606 esac
1607
1608 reexport_pool
1609 fi
1610
1611 return 0
1612}
1613
1614# Return 0 if create successfully or the pool exists; $? otherwise
1615# Note: In local zones, this function should return 0 silently.
1616#
1617# $1 - pool name
1618# $2-n - [keyword] devs_list
1619
1620function create_pool #pool devs_list
1621{
1622 typeset pool=${1%%/*}
1623
1624 shift
1625
1626 if [[ -z $pool ]]; then
1627 log_note "Missing pool name."
1628 return 1
1629 fi
1630
1631 if poolexists $pool ; then
1632 destroy_pool $pool
1633 fi
1634
1635 if is_global_zone ; then
c1d9abf9
JWK
1636 [[ -d /$pool ]] && rm -rf /$pool
1637 log_must zpool create -f $pool $@
6bb24f4d
BB
1638 fi
1639
1640 return 0
1641}
1642
1643# Return 0 if destroy successfully or the pool exists; $? otherwise
1644# Note: In local zones, this function should return 0 silently.
1645#
1646# $1 - pool name
1647# Destroy pool with the given parameters.
1648
1649function destroy_pool #pool
1650{
1651 typeset pool=${1%%/*}
1652 typeset mtpt
1653
1654 if [[ -z $pool ]]; then
1655 log_note "No pool name given."
1656 return 1
1657 fi
1658
1659 if is_global_zone ; then
1660 if poolexists "$pool" ; then
1661 mtpt=$(get_prop mountpoint "$pool")
1662
851aa99c
BB
1663 # At times, syseventd/udev activity can cause attempts
1664 # to destroy a pool to fail with EBUSY. We retry a few
6bb24f4d
BB
1665 # times allowing failures before requiring the destroy
1666 # to succeed.
851aa99c 1667 log_must_busy zpool destroy -f $pool
6bb24f4d
BB
1668
1669 [[ -d $mtpt ]] && \
c1d9abf9 1670 log_must rm -rf $mtpt
6bb24f4d
BB
1671 else
1672 log_note "Pool does not exist. ($pool)"
1673 return 1
1674 fi
1675 fi
1676
1677 return 0
1678}
1679
93491c4b
JWK
1680# Return 0 if created successfully; $? otherwise
1681#
1682# $1 - dataset name
1683# $2-n - dataset options
1684
1685function create_dataset #dataset dataset_options
1686{
1687 typeset dataset=$1
1688
1689 shift
1690
1691 if [[ -z $dataset ]]; then
1692 log_note "Missing dataset name."
1693 return 1
1694 fi
1695
1696 if datasetexists $dataset ; then
1697 destroy_dataset $dataset
1698 fi
1699
1700 log_must zfs create $@ $dataset
1701
1702 return 0
1703}
1704
c7b55e71
GDN
1705# Return 0 if destroy successfully or the dataset exists; $? otherwise
1706# Note: In local zones, this function should return 0 silently.
1707#
1708# $1 - dataset name
1709# $2 - custom arguments for zfs destroy
1710# Destroy dataset with the given parameters.
1711
41ebf403 1712function destroy_dataset # dataset [args]
c7b55e71
GDN
1713{
1714 typeset dataset=$1
1715 typeset mtpt
1716 typeset args=${2:-""}
1717
1718 if [[ -z $dataset ]]; then
1719 log_note "No dataset name given."
1720 return 1
1721 fi
1722
1723 if is_global_zone ; then
1724 if datasetexists "$dataset" ; then
1725 mtpt=$(get_prop mountpoint "$dataset")
1726 log_must_busy zfs destroy $args $dataset
1727
41ebf403 1728 [ -d $mtpt ] && log_must rm -rf $mtpt
c7b55e71
GDN
1729 else
1730 log_note "Dataset does not exist. ($dataset)"
1731 return 1
1732 fi
1733 fi
1734
1735 return 0
1736}
1737
6bb24f4d
BB
1738#
1739# Firstly, create a pool with 5 datasets. Then, create a single zone and
1740# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1741# and a zvol device to the zone.
1742#
1743# $1 zone name
1744# $2 zone root directory prefix
1745# $3 zone ip
1746#
1747function zfs_zones_setup #zone_name zone_root zone_ip
1748{
1749 typeset zone_name=${1:-$(hostname)-z}
1750 typeset zone_root=${2:-"/zone_root"}
1751 typeset zone_ip=${3:-"10.1.1.10"}
1752 typeset prefix_ctr=$ZONE_CTR
1753 typeset pool_name=$ZONE_POOL
1754 typeset -i cntctr=5
1755 typeset -i i=0
1756
1757 # Create pool and 5 container within it
1758 #
c1d9abf9
JWK
1759 [[ -d /$pool_name ]] && rm -rf /$pool_name
1760 log_must zpool create -f $pool_name $DISKS
6bb24f4d 1761 while ((i < cntctr)); do
c1d9abf9 1762 log_must zfs create $pool_name/$prefix_ctr$i
6bb24f4d
BB
1763 ((i += 1))
1764 done
1765
1766 # create a zvol
c1d9abf9 1767 log_must zfs create -V 1g $pool_name/zone_zvol
6bb24f4d
BB
1768 block_device_wait
1769
1770 #
0989d798 1771 # Add slog device for pool
6bb24f4d 1772 #
0989d798
CS
1773 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1774 log_must mkfile $MINVDEVSIZE $sdevs
1775 log_must zpool add $pool_name log mirror $sdevs
6bb24f4d
BB
1776
1777 # this isn't supported just yet.
1778 # Create a filesystem. In order to add this to
1779 # the zone, it must have it's mountpoint set to 'legacy'
c1d9abf9
JWK
1780 # log_must zfs create $pool_name/zfs_filesystem
1781 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
6bb24f4d
BB
1782
1783 [[ -d $zone_root ]] && \
c1d9abf9 1784 log_must rm -rf $zone_root/$zone_name
6bb24f4d 1785 [[ ! -d $zone_root ]] && \
c1d9abf9 1786 log_must mkdir -p -m 0700 $zone_root/$zone_name
6bb24f4d
BB
1787
1788 # Create zone configure file and configure the zone
1789 #
1790 typeset zone_conf=/tmp/zone_conf.$$
c1d9abf9
JWK
1791 echo "create" > $zone_conf
1792 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1793 echo "set autoboot=true" >> $zone_conf
6bb24f4d
BB
1794 i=0
1795 while ((i < cntctr)); do
c1d9abf9
JWK
1796 echo "add dataset" >> $zone_conf
1797 echo "set name=$pool_name/$prefix_ctr$i" >> \
6bb24f4d 1798 $zone_conf
c1d9abf9 1799 echo "end" >> $zone_conf
6bb24f4d
BB
1800 ((i += 1))
1801 done
1802
1803 # add our zvol to the zone
c1d9abf9
JWK
1804 echo "add device" >> $zone_conf
1805 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1806 echo "end" >> $zone_conf
6bb24f4d
BB
1807
1808 # add a corresponding zvol rdsk to the zone
c1d9abf9
JWK
1809 echo "add device" >> $zone_conf
1810 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1811 echo "end" >> $zone_conf
6bb24f4d
BB
1812
1813 # once it's supported, we'll add our filesystem to the zone
c1d9abf9
JWK
1814 # echo "add fs" >> $zone_conf
1815 # echo "set type=zfs" >> $zone_conf
1816 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1817 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1818 # echo "end" >> $zone_conf
6bb24f4d 1819
c1d9abf9
JWK
1820 echo "verify" >> $zone_conf
1821 echo "commit" >> $zone_conf
1822 log_must zonecfg -z $zone_name -f $zone_conf
1823 log_must rm -f $zone_conf
6bb24f4d
BB
1824
1825 # Install the zone
23914a3b 1826 if zoneadm -z $zone_name install; then
c1d9abf9 1827 log_note "SUCCESS: zoneadm -z $zone_name install"
6bb24f4d 1828 else
c1d9abf9 1829 log_fail "FAIL: zoneadm -z $zone_name install"
6bb24f4d
BB
1830 fi
1831
1832 # Install sysidcfg file
1833 #
1834 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
c1d9abf9
JWK
1835 echo "system_locale=C" > $sysidcfg
1836 echo "terminal=dtterm" >> $sysidcfg
1837 echo "network_interface=primary {" >> $sysidcfg
1838 echo "hostname=$zone_name" >> $sysidcfg
1839 echo "}" >> $sysidcfg
1840 echo "name_service=NONE" >> $sysidcfg
1841 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1842 echo "security_policy=NONE" >> $sysidcfg
1843 echo "timezone=US/Eastern" >> $sysidcfg
6bb24f4d
BB
1844
1845 # Boot this zone
c1d9abf9 1846 log_must zoneadm -z $zone_name boot
6bb24f4d
BB
1847}
1848
1849#
1850# Reexport TESTPOOL & TESTPOOL(1-4)
1851#
1852function reexport_pool
1853{
1854 typeset -i cntctr=5
1855 typeset -i i=0
1856
1857 while ((i < cntctr)); do
1858 if ((i == 0)); then
1859 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1860 if ! ismounted $TESTPOOL; then
c1d9abf9 1861 log_must zfs mount $TESTPOOL
6bb24f4d
BB
1862 fi
1863 else
1864 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1865 if eval ! ismounted \$TESTPOOL$i; then
c1d9abf9 1866 log_must eval zfs mount \$TESTPOOL$i
6bb24f4d
BB
1867 fi
1868 fi
1869 ((i += 1))
1870 done
1871}
1872
1873#
ec0e24c2 1874# Verify a given disk or pool state
6bb24f4d
BB
1875#
1876# Return 0 is pool/disk matches expected state, 1 otherwise
1877#
ec0e24c2 1878function check_state # pool disk state{online,offline,degraded}
6bb24f4d
BB
1879{
1880 typeset pool=$1
1881 typeset disk=${2#$DEV_DSKDIR/}
1882 typeset state=$3
1883
ec0e24c2
SV
1884 [[ -z $pool ]] || [[ -z $state ]] \
1885 && log_fail "Arguments invalid or missing"
1886
1887 if [[ -z $disk ]]; then
1888 #check pool state only
23914a3b 1889 zpool get -H -o value health $pool | grep -qi "$state"
ec0e24c2 1890 else
23914a3b 1891 zpool status -v $pool | grep "$disk" | grep -qi "$state"
ec0e24c2 1892 fi
6bb24f4d
BB
1893}
1894
1895#
1896# Get the mountpoint of snapshot
1897# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1898# as its mountpoint
1899#
1900function snapshot_mountpoint
1901{
1902 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1903
1904 if [[ $dataset != *@* ]]; then
1905 log_fail "Error name of snapshot '$dataset'."
1906 fi
1907
1908 typeset fs=${dataset%@*}
1909 typeset snap=${dataset#*@}
1910
1911 if [[ -z $fs || -z $snap ]]; then
1912 log_fail "Error name of snapshot '$dataset'."
1913 fi
1914
c1d9abf9 1915 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
6bb24f4d
BB
1916}
1917
dddef7d6 1918#
1919# Given a device and 'ashift' value verify it's correctly set on every label
1920#
1921function verify_ashift # device ashift
1922{
1923 typeset device="$1"
1924 typeset ashift="$2"
1925
75746e9a
AZ
1926 zdb -e -lll $device | awk -v ashift=$ashift '
1927 /ashift: / {
1928 if (ashift != $2)
1929 exit 1;
1930 else
1931 count++;
1932 }
1933 END {
1934 exit (count != 4);
dddef7d6 1935 }'
dddef7d6 1936}
1937
6bb24f4d
BB
1938#
1939# Given a pool and file system, this function will verify the file system
1940# using the zdb internal tool. Note that the pool is exported and imported
1941# to ensure it has consistent state.
1942#
1943function verify_filesys # pool filesystem dir
1944{
1945 typeset pool="$1"
1946 typeset filesys="$2"
1947 typeset zdbout="/tmp/zdbout.$$"
1948
1949 shift
1950 shift
1951 typeset dirs=$@
1952 typeset search_path=""
1953
c1d9abf9
JWK
1954 log_note "Calling zdb to verify filesystem '$filesys'"
1955 zfs unmount -a > /dev/null 2>&1
1956 log_must zpool export $pool
6bb24f4d
BB
1957
1958 if [[ -n $dirs ]] ; then
1959 for dir in $dirs ; do
1960 search_path="$search_path -d $dir"
1961 done
1962 fi
1963
c1d9abf9 1964 log_must zpool import $search_path $pool
6bb24f4d 1965
23914a3b 1966 if ! zdb -cudi $filesys > $zdbout 2>&1; then
c1d9abf9
JWK
1967 log_note "Output: zdb -cudi $filesys"
1968 cat $zdbout
23914a3b 1969 rm -f $zdbout
c1d9abf9 1970 log_fail "zdb detected errors with: '$filesys'"
6bb24f4d
BB
1971 fi
1972
c1d9abf9
JWK
1973 log_must zfs mount -a
1974 log_must rm -rf $zdbout
6bb24f4d
BB
1975}
1976
7c9a4292
BB
1977#
1978# Given a pool issue a scrub and verify that no checksum errors are reported.
1979#
1980function verify_pool
1981{
1982 typeset pool=${1:-$TESTPOOL}
1983
1984 log_must zpool scrub $pool
1985 log_must wait_scrubbed $pool
1986
6ed4391d
RM
1987 typeset -i cksum=$(zpool status $pool | awk '
1988 !NF { isvdev = 0 }
1989 isvdev { errors += $NF }
1990 /CKSUM$/ { isvdev = 1 }
1991 END { print errors }
1992 ')
7c9a4292
BB
1993 if [[ $cksum != 0 ]]; then
1994 log_must zpool status -v
1995 log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1996 fi
1997}
1998
6bb24f4d
BB
1999#
2000# Given a pool, and this function list all disks in the pool
2001#
2002function get_disklist # pool
2003{
f63c9dc7
AZ
2004 echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
2005 grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
6bb24f4d
BB
2006}
2007
3c67d83a
TH
2008#
2009# Given a pool, and this function list all disks in the pool with their full
2010# path (like "/dev/sda" instead of "sda").
2011#
2012function get_disklist_fullpath # pool
2013{
270d0a5a 2014 get_disklist "-P $1"
3c67d83a
TH
2015}
2016
2017
2018
6bb24f4d
BB
2019# /**
2020# This function kills a given list of processes after a time period. We use
2021# this in the stress tests instead of STF_TIMEOUT so that we can have processes
2022# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
2023# would be listed as FAIL, which we don't want : we're happy with stress tests
2024# running for a certain amount of time, then finishing.
2025#
2026# @param $1 the time in seconds after which we should terminate these processes
2027# @param $2..$n the processes we wish to terminate.
2028# */
2029function stress_timeout
2030{
2031 typeset -i TIMEOUT=$1
2032 shift
2033 typeset cpids="$@"
2034
2035 log_note "Waiting for child processes($cpids). " \
2036 "It could last dozens of minutes, please be patient ..."
c1d9abf9 2037 log_must sleep $TIMEOUT
6bb24f4d
BB
2038
2039 log_note "Killing child processes after ${TIMEOUT} stress timeout."
2040 typeset pid
2041 for pid in $cpids; do
23914a3b 2042 ps -p $pid > /dev/null 2>&1 &&
c1d9abf9 2043 log_must kill -USR1 $pid
6bb24f4d
BB
2044 done
2045}
2046
2047#
2048# Verify a given hotspare disk is inuse or avail
2049#
2050# Return 0 is pool/disk matches expected state, 1 otherwise
2051#
2052function check_hotspare_state # pool disk state{inuse,avail}
2053{
2054 typeset pool=$1
2055 typeset disk=${2#$DEV_DSKDIR/}
2056 typeset state=$3
2057
2058 cur_state=$(get_device_state $pool $disk "spares")
2059
bd328a58 2060 [ $state = $cur_state ]
6bb24f4d
BB
2061}
2062
d9daa7ab
DQ
2063#
2064# Wait until a hotspare transitions to a given state or times out.
2065#
2066# Return 0 when pool/disk matches expected state, 1 on timeout.
2067#
2068function wait_hotspare_state # pool disk state timeout
2069{
2070 typeset pool=$1
6fa1e1e7 2071 typeset disk=${2#*$DEV_DSKDIR/}
d9daa7ab
DQ
2072 typeset state=$3
2073 typeset timeout=${4:-60}
2074 typeset -i i=0
2075
2076 while [[ $i -lt $timeout ]]; do
2077 if check_hotspare_state $pool $disk $state; then
2078 return 0
2079 fi
2080
2081 i=$((i+1))
2082 sleep 1
2083 done
2084
2085 return 1
2086}
2087
6bb24f4d
BB
2088#
2089# Verify a given slog disk is inuse or avail
2090#
2091# Return 0 is pool/disk matches expected state, 1 otherwise
2092#
2093function check_slog_state # pool disk state{online,offline,unavail}
2094{
2095 typeset pool=$1
2096 typeset disk=${2#$DEV_DSKDIR/}
2097 typeset state=$3
2098
2099 cur_state=$(get_device_state $pool $disk "logs")
2100
2101 if [[ $state != ${cur_state} ]]; then
2102 return 1
2103 fi
2104 return 0
2105}
2106
2107#
2108# Verify a given vdev disk is inuse or avail
2109#
2110# Return 0 is pool/disk matches expected state, 1 otherwise
2111#
2112function check_vdev_state # pool disk state{online,offline,unavail}
2113{
2114 typeset pool=$1
6fa1e1e7 2115 typeset disk=${2#*$DEV_DSKDIR/}
6bb24f4d
BB
2116 typeset state=$3
2117
2118 cur_state=$(get_device_state $pool $disk)
2119
bd328a58 2120 [ $state = $cur_state ]
6bb24f4d
BB
2121}
2122
d9daa7ab
DQ
2123#
2124# Wait until a vdev transitions to a given state or times out.
2125#
2126# Return 0 when pool/disk matches expected state, 1 on timeout.
2127#
2128function wait_vdev_state # pool disk state timeout
2129{
2130 typeset pool=$1
6fa1e1e7 2131 typeset disk=${2#*$DEV_DSKDIR/}
d9daa7ab
DQ
2132 typeset state=$3
2133 typeset timeout=${4:-60}
2134 typeset -i i=0
2135
2136 while [[ $i -lt $timeout ]]; do
2137 if check_vdev_state $pool $disk $state; then
2138 return 0
2139 fi
2140
2141 i=$((i+1))
2142 sleep 1
2143 done
2144
2145 return 1
2146}
2147
6bb24f4d
BB
2148#
2149# Check the output of 'zpool status -v <pool>',
2150# and to see if the content of <token> contain the <keyword> specified.
2151#
2152# Return 0 is contain, 1 otherwise
2153#
0ea05c64 2154function check_pool_status # pool token keyword <verbose>
6bb24f4d
BB
2155{
2156 typeset pool=$1
2157 typeset token=$2
2158 typeset keyword=$3
0ea05c64 2159 typeset verbose=${4:-false}
6bb24f4d 2160
964d4180 2161 scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
0ea05c64
AP
2162 if [[ $verbose == true ]]; then
2163 log_note $scan
2164 fi
f63c9dc7 2165 echo $scan | grep -qi "$keyword"
6bb24f4d
BB
2166}
2167
2168#
e60e158e 2169# The following functions are instance of check_pool_status()
9a49d3f3
BB
2170# is_pool_resilvering - to check if the pool resilver is in progress
2171# is_pool_resilvered - to check if the pool resilver is completed
2172# is_pool_scrubbing - to check if the pool scrub is in progress
2173# is_pool_scrubbed - to check if the pool scrub is completed
2174# is_pool_scrub_stopped - to check if the pool scrub is stopped
2175# is_pool_scrub_paused - to check if the pool scrub has paused
2176# is_pool_removing - to check if the pool removing is a vdev
2177# is_pool_removed - to check if the pool remove is completed
2178# is_pool_discarding - to check if the pool checkpoint is being discarded
6bb24f4d 2179#
0ea05c64
AP
2180function is_pool_resilvering #pool <verbose>
2181{
9a49d3f3 2182 check_pool_status "$1" "scan" \
b2255edc 2183 "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
0ea05c64
AP
2184}
2185
2186function is_pool_resilvered #pool <verbose>
6bb24f4d 2187{
0ea05c64 2188 check_pool_status "$1" "scan" "resilvered " $2
6bb24f4d
BB
2189}
2190
0ea05c64 2191function is_pool_scrubbing #pool <verbose>
6bb24f4d 2192{
0ea05c64 2193 check_pool_status "$1" "scan" "scrub in progress since " $2
6bb24f4d
BB
2194}
2195
0ea05c64 2196function is_pool_scrubbed #pool <verbose>
6bb24f4d 2197{
0ea05c64 2198 check_pool_status "$1" "scan" "scrub repaired" $2
6bb24f4d
BB
2199}
2200
0ea05c64 2201function is_pool_scrub_stopped #pool <verbose>
6bb24f4d 2202{
0ea05c64 2203 check_pool_status "$1" "scan" "scrub canceled" $2
6bb24f4d
BB
2204}
2205
0ea05c64 2206function is_pool_scrub_paused #pool <verbose>
6bb24f4d 2207{
0ea05c64 2208 check_pool_status "$1" "scan" "scrub paused since " $2
6bb24f4d
BB
2209}
2210
a1d477c2
MA
2211function is_pool_removing #pool
2212{
2213 check_pool_status "$1" "remove" "in progress since "
a1d477c2
MA
2214}
2215
2216function is_pool_removed #pool
2217{
2218 check_pool_status "$1" "remove" "completed on"
a1d477c2
MA
2219}
2220
e60e158e
JG
2221function is_pool_discarding #pool
2222{
2223 check_pool_status "$1" "checkpoint" "discarding"
e60e158e
JG
2224}
2225
ab44e511
JWK
2226function wait_for_degraded
2227{
2228 typeset pool=$1
2229 typeset timeout=${2:-30}
2230 typeset t0=$SECONDS
2231
2232 while :; do
2233 [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2234 log_note "$pool is not yet degraded."
2235 sleep 1
2236 if ((SECONDS - t0 > $timeout)); then
2237 log_note "$pool not degraded after $timeout seconds."
2238 return 1
2239 fi
2240 done
2241
2242 return 0
2243}
2244
6bb24f4d 2245#
4e33ba4c 2246# Use create_pool()/destroy_pool() to clean up the information in
6bb24f4d
BB
2247# in the given disk to avoid slice overlapping.
2248#
2249function cleanup_devices #vdevs
2250{
2251 typeset pool="foopool$$"
2252
581ca281
BB
2253 for vdev in $@; do
2254 zero_partitions $vdev
2255 done
6bb24f4d 2256
581ca281 2257 poolexists $pool && destroy_pool $pool
6bb24f4d
BB
2258 create_pool $pool $@
2259 destroy_pool $pool
2260
2261 return 0
2262}
2263
6bb24f4d
BB
2264#/**
2265# A function to find and locate free disks on a system or from given
2266# disks as the parameter. It works by locating disks that are in use
2267# as swap devices and dump devices, and also disks listed in /etc/vfstab
2268#
2269# $@ given disks to find which are free, default is all disks in
2270# the test system
2271#
2272# @return a string containing the list of available disks
2273#*/
2274function find_disks
2275{
2276 # Trust provided list, no attempt is made to locate unused devices.
7839c4b5 2277 if is_linux || is_freebsd; then
c1d9abf9 2278 echo "$@"
6bb24f4d
BB
2279 return
2280 fi
2281
2282
2283 sfi=/tmp/swaplist.$$
2284 dmpi=/tmp/dumpdev.$$
2285 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2286
c1d9abf9
JWK
2287 swap -l > $sfi
2288 dumpadm > $dmpi 2>/dev/null
6bb24f4d 2289
75746e9a
AZ
2290 disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
2291BEGIN { FS="."; }
6bb24f4d 2292
75746e9a
AZ
2293/^Specify disk/{
2294 searchdisks=0;
2295}
6bb24f4d 2296
75746e9a
AZ
2297{
2298 if (searchdisks && $2 !~ "^$"){
2299 split($2,arr," ");
2300 print arr[1];
6bb24f4d 2301 }
75746e9a 2302}
6bb24f4d 2303
75746e9a
AZ
2304/^AVAILABLE DISK SELECTIONS:/{
2305 searchdisks=1;
2306}
2307')}
6bb24f4d
BB
2308
2309 unused=""
2310 for disk in $disks; do
2311 # Check for mounted
23914a3b 2312 grep -q "${disk}[sp]" /etc/mnttab && continue
6bb24f4d 2313 # Check for swap
23914a3b 2314 grep -q "${disk}[sp]" $sfi && continue
6bb24f4d 2315 # check for dump device
23914a3b 2316 grep -q "${disk}[sp]" $dmpi && continue
6bb24f4d
BB
2317 # check to see if this disk hasn't been explicitly excluded
2318 # by a user-set environment variable
23914a3b 2319 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
6bb24f4d
BB
2320 unused_candidates="$unused_candidates $disk"
2321 done
23914a3b 2322 rm $sfi $dmpi
6bb24f4d
BB
2323
2324# now just check to see if those disks do actually exist
2325# by looking for a device pointing to the first slice in
2326# each case. limit the number to max_finddisksnum
2327 count=0
2328 for disk in $unused_candidates; do
665684d7
RM
2329 if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2330 [ $count -lt $max_finddisksnum ]; then
6bb24f4d
BB
2331 unused="$unused $disk"
2332 # do not impose limit if $@ is provided
2333 [[ -z $@ ]] && ((count = count + 1))
2334 fi
6bb24f4d
BB
2335 done
2336
2337# finally, return our disk list
c1d9abf9 2338 echo $unused
6bb24f4d
BB
2339}
2340
7839c4b5
MM
2341function add_user_freebsd #<group_name> <user_name> <basedir>
2342{
2343 typeset group=$1
2344 typeset user=$2
2345 typeset basedir=$3
2346
2347 # Check to see if the user exists.
2348 if id $user > /dev/null 2>&1; then
2349 return 0
2350 fi
2351
2352 # Assign 1000 as the base uid
2353 typeset -i uid=1000
2354 while true; do
7839c4b5 2355 pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
23914a3b 2356 case $? in
7839c4b5
MM
2357 0) break ;;
2358 # The uid is not unique
2359 65) ((uid += 1)) ;;
2360 *) return 1 ;;
2361 esac
2362 if [[ $uid == 65000 ]]; then
2363 log_fail "No user id available under 65000 for $user"
2364 fi
2365 done
2366
2367 # Silence MOTD
2368 touch $basedir/$user/.hushlogin
2369
2370 return 0
2371}
2372
2373#
2374# Delete the specified user.
2375#
2376# $1 login name
2377#
2378function del_user_freebsd #<logname>
2379{
2380 typeset user=$1
2381
2382 if id $user > /dev/null 2>&1; then
2383 log_must pw userdel $user
2384 fi
2385
2386 return 0
2387}
2388
2389#
2390# Select valid gid and create specified group.
2391#
2392# $1 group name
2393#
2394function add_group_freebsd #<group_name>
2395{
2396 typeset group=$1
2397
2398 # See if the group already exists.
2399 if pw groupshow $group >/dev/null 2>&1; then
2400 return 0
2401 fi
2402
2403 # Assign 1000 as the base gid
2404 typeset -i gid=1000
2405 while true; do
2406 pw groupadd -g $gid -n $group > /dev/null 2>&1
23914a3b 2407 case $? in
7839c4b5
MM
2408 0) return 0 ;;
2409 # The gid is not unique
2410 65) ((gid += 1)) ;;
2411 *) return 1 ;;
2412 esac
2413 if [[ $gid == 65000 ]]; then
2414 log_fail "No user id available under 65000 for $group"
2415 fi
2416 done
2417}
2418
2419#
2420# Delete the specified group.
2421#
2422# $1 group name
2423#
2424function del_group_freebsd #<group_name>
2425{
2426 typeset group=$1
2427
2428 pw groupdel -n $group > /dev/null 2>&1
23914a3b 2429 case $? in
7839c4b5
MM
2430 # Group does not exist, or was deleted successfully.
2431 0|6|65) return 0 ;;
2432 # Name already exists as a group name
2433 9) log_must pw groupdel $group ;;
2434 *) return 1 ;;
2435 esac
2436
2437 return 0
2438}
2439
2440function add_user_illumos #<group_name> <user_name> <basedir>
2441{
2442 typeset group=$1
2443 typeset user=$2
2444 typeset basedir=$3
2445
2446 log_must useradd -g $group -d $basedir/$user -m $user
2447
2448 return 0
2449}
2450
2451function del_user_illumos #<user_name>
2452{
2453 typeset user=$1
2454
2455 if id $user > /dev/null 2>&1; then
2456 log_must_retry "currently used" 6 userdel $user
2457 fi
2458
2459 return 0
2460}
2461
2462function add_group_illumos #<group_name>
2463{
2464 typeset group=$1
2465
2466 typeset -i gid=100
2467 while true; do
2468 groupadd -g $gid $group > /dev/null 2>&1
23914a3b 2469 case $? in
7839c4b5
MM
2470 0) return 0 ;;
2471 # The gid is not unique
2472 4) ((gid += 1)) ;;
2473 *) return 1 ;;
2474 esac
2475 done
2476}
2477
2478function del_group_illumos #<group_name>
2479{
2480 typeset group=$1
2481
2482 groupmod -n $grp $grp > /dev/null 2>&1
23914a3b 2483 case $? in
7839c4b5
MM
2484 # Group does not exist.
2485 6) return 0 ;;
2486 # Name already exists as a group name
2487 9) log_must groupdel $grp ;;
2488 *) return 1 ;;
2489 esac
2490}
2491
2492function add_user_linux #<group_name> <user_name> <basedir>
2493{
2494 typeset group=$1
2495 typeset user=$2
2496 typeset basedir=$3
2497
2498 log_must useradd -g $group -d $basedir/$user -m $user
2499
2500 # Add new users to the same group and the command line utils.
2501 # This allows them to be run out of the original users home
2502 # directory as long as it permissioned to be group readable.
592cf7f1 2503 cmd_group=$(stat --format="%G" $(command -v zfs))
7839c4b5
MM
2504 log_must usermod -a -G $cmd_group $user
2505
2506 return 0
2507}
2508
2509function del_user_linux #<user_name>
2510{
2511 typeset user=$1
2512
2513 if id $user > /dev/null 2>&1; then
2514 log_must_retry "currently used" 6 userdel $user
2515 fi
7839c4b5
MM
2516}
2517
2518function add_group_linux #<group_name>
2519{
2520 typeset group=$1
2521
2522 # Assign 100 as the base gid, a larger value is selected for
2523 # Linux because for many distributions 1000 and under are reserved.
2524 while true; do
2525 groupadd $group > /dev/null 2>&1
23914a3b 2526 case $? in
7839c4b5
MM
2527 0) return 0 ;;
2528 *) return 1 ;;
2529 esac
2530 done
2531}
2532
2533function del_group_linux #<group_name>
2534{
2535 typeset group=$1
2536
2537 getent group $group > /dev/null 2>&1
23914a3b 2538 case $? in
7839c4b5
MM
2539 # Group does not exist.
2540 2) return 0 ;;
2541 # Name already exists as a group name
2542 0) log_must groupdel $group ;;
2543 *) return 1 ;;
2544 esac
2545
2546 return 0
2547}
2548
6bb24f4d
BB
2549#
2550# Add specified user to specified group
2551#
2552# $1 group name
2553# $2 user name
2554# $3 base of the homedir (optional)
2555#
2556function add_user #<group_name> <user_name> <basedir>
2557{
7839c4b5
MM
2558 typeset group=$1
2559 typeset user=$2
6bb24f4d
BB
2560 typeset basedir=${3:-"/var/tmp"}
2561
7839c4b5 2562 if ((${#group} == 0 || ${#user} == 0)); then
6bb24f4d
BB
2563 log_fail "group name or user name are not defined."
2564 fi
2565
5c9f744b 2566 case "$UNAME" in
7839c4b5
MM
2567 FreeBSD)
2568 add_user_freebsd "$group" "$user" "$basedir"
2569 ;;
2570 Linux)
2571 add_user_linux "$group" "$user" "$basedir"
2572 ;;
2573 *)
2574 add_user_illumos "$group" "$user" "$basedir"
2575 ;;
2576 esac
6bb24f4d
BB
2577
2578 return 0
2579}
2580
2581#
2582# Delete the specified user.
2583#
2584# $1 login name
2585# $2 base of the homedir (optional)
2586#
2587function del_user #<logname> <basedir>
2588{
2589 typeset user=$1
2590 typeset basedir=${2:-"/var/tmp"}
2591
2592 if ((${#user} == 0)); then
2593 log_fail "login name is necessary."
2594 fi
2595
5c9f744b 2596 case "$UNAME" in
7839c4b5
MM
2597 FreeBSD)
2598 del_user_freebsd "$user"
2599 ;;
2600 Linux)
2601 del_user_linux "$user"
2602 ;;
2603 *)
2604 del_user_illumos "$user"
2605 ;;
2606 esac
6bb24f4d 2607
c1d9abf9 2608 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
6bb24f4d
BB
2609
2610 return 0
2611}
2612
2613#
2614# Select valid gid and create specified group.
2615#
2616# $1 group name
2617#
2618function add_group #<group_name>
2619{
2620 typeset group=$1
2621
2622 if ((${#group} == 0)); then
2623 log_fail "group name is necessary."
2624 fi
2625
5c9f744b 2626 case "$UNAME" in
7839c4b5
MM
2627 FreeBSD)
2628 add_group_freebsd "$group"
2629 ;;
2630 Linux)
2631 add_group_linux "$group"
2632 ;;
2633 *)
2634 add_group_illumos "$group"
2635 ;;
2636 esac
2637
2638 return 0
6bb24f4d
BB
2639}
2640
2641#
2642# Delete the specified group.
2643#
2644# $1 group name
2645#
2646function del_group #<group_name>
2647{
7839c4b5
MM
2648 typeset group=$1
2649
2650 if ((${#group} == 0)); then
6bb24f4d
BB
2651 log_fail "group name is necessary."
2652 fi
2653
5c9f744b 2654 case "$UNAME" in
7839c4b5
MM
2655 FreeBSD)
2656 del_group_freebsd "$group"
2657 ;;
2658 Linux)
2659 del_group_linux "$group"
2660 ;;
2661 *)
2662 del_group_illumos "$group"
2663 ;;
2664 esac
6bb24f4d
BB
2665
2666 return 0
2667}
2668
2669#
2670# This function will return true if it's safe to destroy the pool passed
2671# as argument 1. It checks for pools based on zvols and files, and also
2672# files contained in a pool that may have a different mountpoint.
2673#
2674function safe_to_destroy_pool { # $1 the pool name
2675
2676 typeset pool=""
2677 typeset DONT_DESTROY=""
2678
2679 # We check that by deleting the $1 pool, we're not
2680 # going to pull the rug out from other pools. Do this
2681 # by looking at all other pools, ensuring that they
2682 # aren't built from files or zvols contained in this pool.
2683
c1d9abf9 2684 for pool in $(zpool list -H -o name)
6bb24f4d
BB
2685 do
2686 ALTMOUNTPOOL=""
2687
2688 # this is a list of the top-level directories in each of the
2689 # files that make up the path to the files the pool is based on
75746e9a 2690 FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
6bb24f4d
BB
2691
2692 # this is a list of the zvols that make up the pool
75746e9a 2693 ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
6bb24f4d
BB
2694
2695 # also want to determine if it's a file-based pool using an
2696 # alternate mountpoint...
c1d9abf9 2697 POOL_FILE_DIRS=$(zpool status -v $pool | \
75746e9a
AZ
2698 awk '/\// {print $1}' | \
2699 awk -F/ '!/dev/ {print $2}')
6bb24f4d
BB
2700
2701 for pooldir in $POOL_FILE_DIRS
2702 do
c1d9abf9 2703 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
75746e9a 2704 awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
6bb24f4d
BB
2705
2706 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2707 done
2708
2709
2710 if [ ! -z "$ZVOLPOOL" ]
2711 then
2712 DONT_DESTROY="true"
2713 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2714 fi
2715
2716 if [ ! -z "$FILEPOOL" ]
2717 then
2718 DONT_DESTROY="true"
2719 log_note "Pool $pool is built from $FILEPOOL on $1"
2720 fi
2721
2722 if [ ! -z "$ALTMOUNTPOOL" ]
2723 then
2724 DONT_DESTROY="true"
2725 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2726 fi
2727 done
2728
2729 if [ -z "${DONT_DESTROY}" ]
2730 then
2731 return 0
2732 else
2733 log_note "Warning: it is not safe to destroy $1!"
2734 return 1
2735 fi
2736}
2737
6bb24f4d
BB
2738#
2739# Verify zfs operation with -p option work as expected
2740# $1 operation, value could be create, clone or rename
2741# $2 dataset type, value could be fs or vol
2742# $3 dataset name
2743# $4 new dataset name
2744#
2745function verify_opt_p_ops
2746{
2747 typeset ops=$1
2748 typeset datatype=$2
2749 typeset dataset=$3
2750 typeset newdataset=$4
2751
2752 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2753 log_fail "$datatype is not supported."
2754 fi
2755
2756 # check parameters accordingly
2757 case $ops in
2758 create)
2759 newdataset=$dataset
2760 dataset=""
2761 if [[ $datatype == "vol" ]]; then
2762 ops="create -V $VOLSIZE"
2763 fi
2764 ;;
2765 clone)
2766 if [[ -z $newdataset ]]; then
2767 log_fail "newdataset should not be empty" \
2768 "when ops is $ops."
2769 fi
2770 log_must datasetexists $dataset
2771 log_must snapexists $dataset
2772 ;;
2773 rename)
2774 if [[ -z $newdataset ]]; then
2775 log_fail "newdataset should not be empty" \
2776 "when ops is $ops."
2777 fi
2778 log_must datasetexists $dataset
6bb24f4d
BB
2779 ;;
2780 *)
2781 log_fail "$ops is not supported."
2782 ;;
2783 esac
2784
2785 # make sure the upper level filesystem does not exist
c7b55e71 2786 destroy_dataset "${newdataset%/*}" "-rRf"
6bb24f4d
BB
2787
2788 # without -p option, operation will fail
c1d9abf9 2789 log_mustnot zfs $ops $dataset $newdataset
6bb24f4d
BB
2790 log_mustnot datasetexists $newdataset ${newdataset%/*}
2791
2792 # with -p option, operation should succeed
c1d9abf9 2793 log_must zfs $ops -p $dataset $newdataset
6bb24f4d
BB
2794 block_device_wait
2795
2796 if ! datasetexists $newdataset ; then
2797 log_fail "-p option does not work for $ops"
2798 fi
2799
2800 # when $ops is create or clone, redo the operation still return zero
2801 if [[ $ops != "rename" ]]; then
c1d9abf9 2802 log_must zfs $ops -p $dataset $newdataset
6bb24f4d
BB
2803 fi
2804
2805 return 0
2806}
2807
2808#
2809# Get configuration of pool
2810# $1 pool name
2811# $2 config name
2812#
2813function get_config
2814{
2815 typeset pool=$1
2816 typeset config=$2
6bb24f4d
BB
2817
2818 if ! poolexists "$pool" ; then
2819 return 1
2820 fi
75746e9a
AZ
2821 if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
2822 zdb -e $pool
6bb24f4d 2823 else
75746e9a
AZ
2824 zdb -C $pool
2825 fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
6bb24f4d
BB
2826}
2827
2828#
2829# Privated function. Random select one of items from arguments.
2830#
2831# $1 count
2832# $2-n string
2833#
2834function _random_get
2835{
2836 typeset cnt=$1
2837 shift
2838
2839 typeset str="$@"
2840 typeset -i ind
2841 ((ind = RANDOM % cnt + 1))
2842
75746e9a 2843 echo "$str" | cut -f $ind -d ' '
6bb24f4d
BB
2844}
2845
2846#
2847# Random select one of item from arguments which include NONE string
2848#
2849function random_get_with_non
2850{
2851 typeset -i cnt=$#
2852 ((cnt =+ 1))
2853
2854 _random_get "$cnt" "$@"
2855}
2856
2857#
2858# Random select one of item from arguments which doesn't include NONE string
2859#
2860function random_get
2861{
2862 _random_get "$#" "$@"
2863}
2864
6bb24f4d
BB
2865#
2866# The function will generate a dataset name with specific length
2867# $1, the length of the name
2868# $2, the base string to construct the name
2869#
2870function gen_dataset_name
2871{
2872 typeset -i len=$1
2873 typeset basestr="$2"
2874 typeset -i baselen=${#basestr}
2875 typeset -i iter=0
2876 typeset l_name=""
2877
2878 if ((len % baselen == 0)); then
2879 ((iter = len / baselen))
2880 else
2881 ((iter = len / baselen + 1))
2882 fi
2883 while ((iter > 0)); do
2884 l_name="${l_name}$basestr"
2885
2886 ((iter -= 1))
2887 done
2888
c1d9abf9 2889 echo $l_name
6bb24f4d
BB
2890}
2891
2892#
2893# Get cksum tuple of dataset
2894# $1 dataset name
2895#
2896# sample zdb output:
2897# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2898# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2899# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2900# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2901function datasetcksum
2902{
2903 typeset cksum
c1d9abf9 2904 sync
7454275a 2905 sync_all_pools
75746e9a 2906 zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
6bb24f4d
BB
2907}
2908
2909#
2910# Get the given disk/slice state from the specific field of the pool
2911#
2912function get_device_state #pool disk field("", "spares","logs")
2913{
2914 typeset pool=$1
2915 typeset disk=${2#$DEV_DSKDIR/}
2916 typeset field=${3:-$pool}
2917
270d0a5a
AZ
2918 zpool status -v "$pool" 2>/dev/null | \
2919 awk -v device=$disk -v pool=$pool -v field=$field \
6bb24f4d
BB
2920 'BEGIN {startconfig=0; startfield=0; }
2921 /config:/ {startconfig=1}
2922 (startconfig==1) && ($1==field) {startfield=1; next;}
2923 (startfield==1) && ($1==device) {print $2; exit;}
2924 (startfield==1) &&
270d0a5a 2925 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
6bb24f4d
BB
2926}
2927
6bb24f4d
BB
2928#
2929# get the root filesystem name if it's zfsroot system.
2930#
2931# return: root filesystem name
2932function get_rootfs
2933{
2934 typeset rootfs=""
8aab1218 2935
7839c4b5
MM
2936 if is_freebsd; then
2937 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
2938 elif ! is_linux; then
75746e9a 2939 rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
8aab1218
TS
2940 /etc/mnttab)
2941 fi
6bb24f4d
BB
2942 if [[ -z "$rootfs" ]]; then
2943 log_fail "Can not get rootfs"
2944 fi
23914a3b 2945 if datasetexists $rootfs; then
c1d9abf9 2946 echo $rootfs
6bb24f4d
BB
2947 else
2948 log_fail "This is not a zfsroot system."
2949 fi
2950}
2951
2952#
2953# get the rootfs's pool name
2954# return:
2955# rootpool name
2956#
2957function get_rootpool
2958{
bd328a58
AZ
2959 typeset rootfs=$(get_rootfs)
2960 echo ${rootfs%%/*}
6bb24f4d
BB
2961}
2962
2963#
2964# To verify if the require numbers of disks is given
2965#
2966function verify_disk_count
2967{
2968 typeset -i min=${2:-1}
2969
bd328a58 2970 typeset -i count=$(echo "$1" | wc -w)
6bb24f4d
BB
2971
2972 if ((count < min)); then
2973 log_untested "A minimum of $min disks is required to run." \
2974 " You specified $count disk(s)"
2975 fi
2976}
2977
2978function ds_is_volume
2979{
2980 typeset type=$(get_prop type $1)
caccfc87 2981 [ $type = "volume" ]
6bb24f4d
BB
2982}
2983
2984function ds_is_filesystem
2985{
2986 typeset type=$(get_prop type $1)
caccfc87 2987 [ $type = "filesystem" ]
6bb24f4d
BB
2988}
2989
2990function ds_is_snapshot
2991{
2992 typeset type=$(get_prop type $1)
caccfc87 2993 [ $type = "snapshot" ]
6bb24f4d
BB
2994}
2995
2996#
2997# Check if Trusted Extensions are installed and enabled
2998#
2999function is_te_enabled
3000{
bd328a58 3001 svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
6bb24f4d
BB
3002}
3003
3004# Utility function to determine if a system has multiple cpus.
3005function is_mp
3006{
5c9f744b 3007 case "$UNAME" in
bd328a58 3008 Linux)
caccfc87 3009 (($(grep -c '^processor' /proc/cpuinfo) > 1))
bd328a58
AZ
3010 ;;
3011 FreeBSD)
3012 sysctl -n kern.smp.cpus
3013 ;;
3014 *)
3015 (($(psrinfo | wc -l) > 1))
3016 ;;
3017 esac
6bb24f4d
BB
3018}
3019
3020function get_cpu_freq
3021{
3022 if is_linux; then
c1d9abf9 3023 lscpu | awk '/CPU MHz/ { print $3 }'
7839c4b5 3024 elif is_freebsd; then
9be70c37 3025 sysctl -n hw.clockrate
6bb24f4d 3026 else
c1d9abf9 3027 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
6bb24f4d
BB
3028 fi
3029}
3030
3031# Run the given command as the user provided.
3032function user_run
3033{
3034 typeset user=$1
3035 shift
3036
e0b53a5d
RM
3037 log_note "user: $user"
3038 log_note "cmd: $*"
3039
3040 typeset out=$TEST_BASE_DIR/out
3041 typeset err=$TEST_BASE_DIR/err
3042
3043 sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
3044 typeset res=$?
3045 log_note "out: $(<$out)"
3046 log_note "err: $(<$err)"
3047 return $res
6bb24f4d
BB
3048}
3049
3050#
3051# Check if the pool contains the specified vdevs
3052#
3053# $1 pool
3054# $2..n <vdev> ...
3055#
3056# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3057# vdevs is not in the pool, and 2 if pool name is missing.
3058#
3059function vdevs_in_pool
3060{
3061 typeset pool=$1
3062 typeset vdev
3063
3064 if [[ -z $pool ]]; then
3065 log_note "Missing pool name."
3066 return 2
3067 fi
3068
3069 shift
3070
7c9a4292
BB
3071 # We could use 'zpool list' to only get the vdevs of the pool but we
3072 # can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
3073 # therefore we use the 'zpool status' output.
c1d9abf9 3074 typeset tmpfile=$(mktemp)
7c9a4292 3075 zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
23914a3b 3076 for vdev in "$@"; do
964d4180 3077 grep -wq ${vdev##*/} $tmpfile || return 1
6bb24f4d
BB
3078 done
3079
c1d9abf9 3080 rm -f $tmpfile
23914a3b 3081 return 0
6bb24f4d
BB
3082}
3083
679d73e9
JWK
3084function get_max
3085{
3086 typeset -l i max=$1
3087 shift
3088
3089 for i in "$@"; do
9be70c37 3090 max=$((max > i ? max : i))
679d73e9
JWK
3091 done
3092
3093 echo $max
3094}
3095
3096function get_min
3097{
3098 typeset -l i min=$1
3099 shift
3100
3101 for i in "$@"; do
9be70c37 3102 min=$((min < i ? min : i))
679d73e9
JWK
3103 done
3104
3105 echo $min
3106}
3107
a7004725
DK
3108# Write data that can be compressed into a directory
3109function write_compressible
3110{
3111 typeset dir=$1
3112 typeset megs=$2
3113 typeset nfiles=${3:-1}
3114 typeset bs=${4:-1024k}
3115 typeset fname=${5:-file}
3116
3117 [[ -d $dir ]] || log_fail "No directory: $dir"
3118
3119 # Under Linux fio is not currently used since its behavior can
3120 # differ significantly across versions. This includes missing
3121 # command line options and cases where the --buffer_compress_*
3122 # options fail to behave as expected.
3123 if is_linux; then
3124 typeset file_bytes=$(to_bytes $megs)
3125 typeset bs_bytes=4096
3126 typeset blocks=$(($file_bytes / $bs_bytes))
3127
3128 for (( i = 0; i < $nfiles; i++ )); do
3129 truncate -s $file_bytes $dir/$fname.$i
3130
3131 # Write every third block to get 66% compression.
3132 for (( j = 0; j < $blocks; j += 3 )); do
3133 dd if=/dev/urandom of=$dir/$fname.$i \
3134 seek=$j bs=$bs_bytes count=1 \
3135 conv=notrunc >/dev/null 2>&1
3136 done
3137 done
3138 else
2d9da5e1 3139 command -v fio > /dev/null || log_unsupported "fio missing"
bd328a58 3140 log_must eval fio \
a7004725
DK
3141 --name=job \
3142 --fallocate=0 \
3143 --minimal \
3144 --randrepeat=0 \
3145 --buffer_compress_percentage=66 \
3146 --buffer_compress_chunk=4096 \
bd328a58
AZ
3147 --directory="$dir" \
3148 --numjobs="$nfiles" \
3149 --nrfiles="$nfiles" \
a7004725 3150 --rw=write \
bd328a58
AZ
3151 --bs="$bs" \
3152 --filesize="$megs" \
3153 "--filename_format='$fname.\$jobnum' >/dev/null"
a7004725
DK
3154 fi
3155}
3156
3157function get_objnum
3158{
3159 typeset pathname=$1
3160 typeset objnum
3161
3162 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
7839c4b5
MM
3163 if is_freebsd; then
3164 objnum=$(stat -f "%i" $pathname)
3165 else
3166 objnum=$(stat -c %i $pathname)
3167 fi
a7004725
DK
3168 echo $objnum
3169}
3170
1de321e6 3171#
bec1067d 3172# Sync data to the pool
1de321e6
JX
3173#
3174# $1 pool name
bec1067d 3175# $2 boolean to force uberblock (and config including zpool cache file) update
1de321e6 3176#
bec1067d 3177function sync_pool #pool <force>
1de321e6
JX
3178{
3179 typeset pool=${1:-$TESTPOOL}
bec1067d 3180 typeset force=${2:-false}
1de321e6 3181
bec1067d
AP
3182 if [[ $force == true ]]; then
3183 log_must zpool sync -f $pool
3184 else
3185 log_must zpool sync $pool
3186 fi
3187
3188 return 0
1de321e6 3189}
d834b9ce 3190
7454275a
AJ
3191#
3192# Sync all pools
3193#
3194# $1 boolean to force uberblock (and config including zpool cache file) update
3195#
3196function sync_all_pools #<force>
3197{
3198 typeset force=${1:-false}
3199
3200 if [[ $force == true ]]; then
3201 log_must zpool sync -f
3202 else
3203 log_must zpool sync
3204 fi
3205
3206 return 0
3207}
3208
d834b9ce
GM
3209#
3210# Wait for zpool 'freeing' property drops to zero.
3211#
3212# $1 pool name
3213#
3214function wait_freeing #pool
3215{
3216 typeset pool=${1:-$TESTPOOL}
3217 while true; do
c1d9abf9
JWK
3218 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3219 log_must sleep 1
d834b9ce
GM
3220 done
3221}
7a4500a1 3222
dddef7d6 3223#
3224# Wait for every device replace operation to complete
3225#
3226# $1 pool name
3227#
3228function wait_replacing #pool
3229{
3230 typeset pool=${1:-$TESTPOOL}
75746e9a 3231 while zpool status $pool | grep -qE 'replacing-[0-9]+'; do
dddef7d6 3232 log_must sleep 1
3233 done
3234}
3235
bf95a000
TH
3236# Wait for a pool to be scrubbed
3237#
3238# $1 pool name
56fa4aa9 3239# $2 timeout
bf95a000 3240#
56fa4aa9 3241function wait_scrubbed #pool timeout
bf95a000 3242{
56fa4aa9
RE
3243 typeset timeout=${2:-300}
3244 typeset pool=${1:-$TESTPOOL}
3245 for (( timer = 0; timer < $timeout; timer++ )); do
3246 is_pool_scrubbed $pool && break;
3247 sleep 1;
3248 done
bf95a000
TH
3249}
3250
639b1894
TH
3251# Backup the zed.rc in our test directory so that we can edit it for our test.
3252#
3253# Returns: Backup file name. You will need to pass this to zed_rc_restore().
3254function zed_rc_backup
3255{
3256 zedrc_backup="$(mktemp)"
3257 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3258 echo $zedrc_backup
3259}
3260
3261function zed_rc_restore
3262{
3263 mv $1 $ZEDLET_DIR/zed.rc
3264}
3265
95401cb6
BB
3266#
3267# Setup custom environment for the ZED.
3268#
bf95a000 3269# $@ Optional list of zedlets to run under zed.
95401cb6
BB
3270function zed_setup
3271{
3272 if ! is_linux; then
5c9f744b 3273 log_unsupported "No zed on $UNAME"
95401cb6
BB
3274 fi
3275
3276 if [[ ! -d $ZEDLET_DIR ]]; then
3277 log_must mkdir $ZEDLET_DIR
3278 fi
3279
3280 if [[ ! -e $VDEVID_CONF ]]; then
3281 log_must touch $VDEVID_CONF
3282 fi
3283
3284 if [[ -e $VDEVID_CONF_ETC ]]; then
3285 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3286 fi
bf95a000 3287 EXTRA_ZEDLETS=$@
95401cb6
BB
3288
3289 # Create a symlink for /etc/zfs/vdev_id.conf file.
3290 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3291
3292 # Setup minimal ZED configuration. Individual test cases should
3293 # add additional ZEDLETs as needed for their specific test.
3f03fc8d
BB
3294 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3295 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
95401cb6 3296
bf95a000
TH
3297 # Scripts must only be user writable.
3298 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3299 saved_umask=$(umask)
3300 log_must umask 0022
3301 for i in $EXTRA_ZEDLETS ; do
3302 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3303 done
3304 log_must umask $saved_umask
3305 fi
3306
3f03fc8d
BB
3307 # Customize the zed.rc file to enable the full debug log.
3308 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
d5e024cb 3309 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3f03fc8d 3310
95401cb6
BB
3311}
3312
3313#
3314# Cleanup custom ZED environment.
3315#
bf95a000 3316# $@ Optional list of zedlets to remove from our test zed.d directory.
95401cb6
BB
3317function zed_cleanup
3318{
3319 if ! is_linux; then
3320 return
3321 fi
3322
bd328a58
AZ
3323 for extra_zedlet; do
3324 log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
3325 done
3326 log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
3327 $ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
3328 $ZEDLET_DIR
95401cb6
BB
3329}
3330
56fa4aa9
RE
3331#
3332# Check if ZED is currently running; if so, returns PIDs
3333#
3334function zed_check
3335{
3336 if ! is_linux; then
3337 return
3338 fi
3339 zedpids="$(pgrep -x zed)"
56fa4aa9 3340 zedpids2="$(pgrep -x lt-zed)"
56fa4aa9
RE
3341 echo ${zedpids} ${zedpids2}
3342}
3343
7a4500a1
SV
3344#
3345# Check if ZED is currently running, if not start ZED.
3346#
3347function zed_start
3348{
95401cb6
BB
3349 if ! is_linux; then
3350 return
3351 fi
7a4500a1 3352
95401cb6
BB
3353 # ZEDLET_DIR=/var/tmp/zed
3354 if [[ ! -d $ZEDLET_DIR ]]; then
3355 log_must mkdir $ZEDLET_DIR
3356 fi
7a4500a1 3357
95401cb6 3358 # Verify the ZED is not already running.
56fa4aa9
RE
3359 zedpids=$(zed_check)
3360 if [ -n "$zedpids" ]; then
3361 # We never, ever, really want it to just keep going if zed
3362 # is already running - usually this implies our test cases
3363 # will break very strangely because whatever we wanted to
3364 # configure zed for won't be listening to our changes in the
3365 # tmpdir
3366 log_fail "ZED already running - ${zedpids}"
03431390
OF
3367 else
3368 log_note "Starting ZED"
3369 # run ZED in the background and redirect foreground logging
3370 # output to $ZED_LOG.
3371 log_must truncate -s 0 $ZED_DEBUG_LOG
73218f41
AZ
3372 log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3373 "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
7a4500a1 3374 fi
95401cb6 3375
3f03fc8d 3376 return 0
7a4500a1
SV
3377}
3378
3379#
3380# Kill ZED process
3381#
3382function zed_stop
3383{
95401cb6 3384 if ! is_linux; then
56fa4aa9 3385 return ""
95401cb6
BB
3386 fi
3387
3f03fc8d 3388 log_note "Stopping ZED"
73218f41 3389 while true; do
56fa4aa9
RE
3390 zedpids=$(zed_check)
3391 [ ! -n "$zedpids" ] && break
73218f41
AZ
3392
3393 log_must kill $zedpids
3394 sleep 1
3395 done
3f03fc8d 3396 return 0
7a4500a1 3397}
8c54ddd3 3398
4e9b1569 3399#
3400# Drain all zevents
3401#
3402function zed_events_drain
3403{
3404 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3405 sleep 1
3406 zpool events -c >/dev/null
3407 done
3408}
3409
639b1894
TH
3410# Set a variable in zed.rc to something, un-commenting it in the process.
3411#
3412# $1 variable
3413# $2 value
3414function zed_rc_set
3415{
3416 var="$1"
3417 val="$2"
3418 # Remove the line
3419 cmd="'/$var/d'"
3420 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3421
3422 # Add it at the end
3423 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3424}
3425
3426
8c54ddd3
BB
3427#
3428# Check is provided device is being active used as a swap device.
3429#
3430function is_swap_inuse
3431{
3432 typeset device=$1
3433
3434 if [[ -z $device ]] ; then
3435 log_note "No device specified."
3436 return 1
3437 fi
3438
5c9f744b 3439 case "$UNAME" in
62c5ccdf
AZ
3440 Linux)
3441 swapon -s | grep -wq $(readlink -f $device)
3442 ;;
3443 FreeBSD)
3444 swapctl -l | grep -wq $device
3445 ;;
3446 *)
3447 swap -l | grep -wq $device
3448 ;;
3449 esac
8c54ddd3
BB
3450}
3451
3452#
3453# Setup a swap device using the provided device.
3454#
3455function swap_setup
3456{
3457 typeset swapdev=$1
3458
5c9f744b 3459 case "$UNAME" in
62c5ccdf 3460 Linux)
c7a7601c 3461 log_must eval "mkswap $swapdev > /dev/null 2>&1"
8c54ddd3 3462 log_must swapon $swapdev
62c5ccdf
AZ
3463 ;;
3464 FreeBSD)
7839c4b5 3465 log_must swapctl -a $swapdev
62c5ccdf
AZ
3466 ;;
3467 *)
3468 log_must swap -a $swapdev
3469 ;;
3470 esac
8c54ddd3
BB
3471
3472 return 0
3473}
3474
3475#
3476# Cleanup a swap device on the provided device.
3477#
3478function swap_cleanup
3479{
3480 typeset swapdev=$1
3481
3482 if is_swap_inuse $swapdev; then
3483 if is_linux; then
3484 log_must swapoff $swapdev
7839c4b5
MM
3485 elif is_freebsd; then
3486 log_must swapoff $swapdev
8c54ddd3
BB
3487 else
3488 log_must swap -d $swapdev
3489 fi
3490 fi
3491
3492 return 0
3493}
379ca9cf
OF
3494
3495#
3496# Set a global system tunable (64-bit value)
3497#
2476f103 3498# $1 tunable name (use a NAME defined in tunables.cfg)
379ca9cf
OF
3499# $2 tunable values
3500#
3501function set_tunable64
3502{
3503 set_tunable_impl "$1" "$2" Z
3504}
3505
3506#
3507# Set a global system tunable (32-bit value)
3508#
2476f103 3509# $1 tunable name (use a NAME defined in tunables.cfg)
379ca9cf
OF
3510# $2 tunable values
3511#
3512function set_tunable32
3513{
3514 set_tunable_impl "$1" "$2" W
3515}
3516
3517function set_tunable_impl
3518{
2476f103 3519 typeset name="$1"
379ca9cf
OF
3520 typeset value="$2"
3521 typeset mdb_cmd="$3"
3522 typeset module="${4:-zfs}"
3523
2476f103
RM
3524 eval "typeset tunable=\$$name"
3525 case "$tunable" in
3526 UNSUPPORTED)
5c9f744b 3527 log_unsupported "Tunable '$name' is unsupported on $UNAME"
2476f103
RM
3528 ;;
3529 "")
3530 log_fail "Tunable '$name' must be added to tunables.cfg"
3531 ;;
3532 *)
3533 ;;
3534 esac
3535
379ca9cf
OF
3536 [[ -z "$value" ]] && return 1
3537 [[ -z "$mdb_cmd" ]] && return 1
3538
5c9f744b 3539 case "$UNAME" in
379ca9cf
OF
3540 Linux)
3541 typeset zfs_tunables="/sys/module/$module/parameters"
23914a3b 3542 echo "$value" >"$zfs_tunables/$tunable"
379ca9cf 3543 ;;
7839c4b5
MM
3544 FreeBSD)
3545 sysctl vfs.zfs.$tunable=$value
7839c4b5 3546 ;;
379ca9cf
OF
3547 SunOS)
3548 [[ "$module" -eq "zfs" ]] || return 1
3549 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
379ca9cf
OF
3550 ;;
3551 esac
3552}
3553
3554#
3555# Get a global system tunable
3556#
2476f103 3557# $1 tunable name (use a NAME defined in tunables.cfg)
379ca9cf
OF
3558#
3559function get_tunable
3560{
3561 get_tunable_impl "$1"
3562}
3563
3564function get_tunable_impl
3565{
2476f103 3566 typeset name="$1"
379ca9cf
OF
3567 typeset module="${2:-zfs}"
3568
2476f103
RM
3569 eval "typeset tunable=\$$name"
3570 case "$tunable" in
3571 UNSUPPORTED)
5c9f744b 3572 log_unsupported "Tunable '$name' is unsupported on $UNAME"
2476f103
RM
3573 ;;
3574 "")
3575 log_fail "Tunable '$name' must be added to tunables.cfg"
3576 ;;
3577 *)
3578 ;;
3579 esac
379ca9cf 3580
5c9f744b 3581 case "$UNAME" in
379ca9cf
OF
3582 Linux)
3583 typeset zfs_tunables="/sys/module/$module/parameters"
379ca9cf 3584 cat $zfs_tunables/$tunable
379ca9cf 3585 ;;
7839c4b5
MM
3586 FreeBSD)
3587 sysctl -n vfs.zfs.$tunable
3588 ;;
379ca9cf
OF
3589 SunOS)
3590 [[ "$module" -eq "zfs" ]] || return 1
3591 ;;
3592 esac
d2734cce 3593}
240c015a
RM
3594
3595#
3596# Compute MD5 digest for given file or stdin if no file given.
3597# Note: file path must not contain spaces
3598#
3599function md5digest
3600{
3601 typeset file=$1
3602
5c9f744b 3603 case "$UNAME" in
7839c4b5
MM
3604 FreeBSD)
3605 md5 -q $file
3606 ;;
3607 *)
75746e9a
AZ
3608 typeset sum _
3609 read -r sum _ < <(md5sum -b $file)
3610 echo $sum
7839c4b5
MM
3611 ;;
3612 esac
240c015a
RM
3613}
3614
3615#
3616# Compute SHA256 digest for given file or stdin if no file given.
3617# Note: file path must not contain spaces
3618#
3619function sha256digest
3620{
3621 typeset file=$1
3622
5c9f744b 3623 case "$UNAME" in
7839c4b5
MM
3624 FreeBSD)
3625 sha256 -q $file
3626 ;;
3627 *)
75746e9a
AZ
3628 typeset sum _
3629 read -r sum _ < <(sha256sum -b $file)
3630 echo $sum
7839c4b5
MM
3631 ;;
3632 esac
3633}
3634
3635function new_fs #<args>
3636{
5c9f744b 3637 case "$UNAME" in
7839c4b5
MM
3638 FreeBSD)
3639 newfs "$@"
3640 ;;
3641 *)
3642 echo y | newfs -v "$@"
3643 ;;
3644 esac
3645}
3646
3647function stat_size #<path>
3648{
3649 typeset path=$1
3650
5c9f744b 3651 case "$UNAME" in
7839c4b5
MM
3652 FreeBSD)
3653 stat -f %z "$path"
3654 ;;
3655 *)
3656 stat -c %s "$path"
3657 ;;
3658 esac
240c015a 3659}
9fb2771a 3660
8ae86e2e
RM
3661function stat_ctime #<path>
3662{
3663 typeset path=$1
3664
5c9f744b 3665 case "$UNAME" in
8ae86e2e
RM
3666 FreeBSD)
3667 stat -f %c "$path"
3668 ;;
3669 *)
3670 stat -c %Z "$path"
3671 ;;
3672 esac
3673}
3674
3675function stat_crtime #<path>
3676{
3677 typeset path=$1
3678
5c9f744b 3679 case "$UNAME" in
8ae86e2e
RM
3680 FreeBSD)
3681 stat -f %B "$path"
3682 ;;
3683 *)
3684 stat -c %W "$path"
3685 ;;
3686 esac
3687}
3688
3fa5266d
RM
3689function stat_generation #<path>
3690{
3691 typeset path=$1
3692
5c9f744b 3693 case "$UNAME" in
3fa5266d
RM
3694 Linux)
3695 getversion "${path}"
3696 ;;
3697 *)
3698 stat -f %v "${path}"
3699 ;;
3700 esac
3701}
3702
9fb2771a
TH
3703# Run a command as if it was being run in a TTY.
3704#
3705# Usage:
3706#
3707# faketty command
3708#
3709function faketty
3710{
3711 if is_freebsd; then
834f274f 3712 script -q /dev/null env "$@"
9fb2771a
TH
3713 else
3714 script --return --quiet -c "$*" /dev/null
3715 fi
3716}
90ae4873
RM
3717
3718#
3719# Produce a random permutation of the integers in a given range (inclusive).
3720#
3721function range_shuffle # begin end
3722{
3723 typeset -i begin=$1
3724 typeset -i end=$2
3725
7a298ae9 3726 seq ${begin} ${end} | sort -R
90ae4873 3727}
6e1c594d
RM
3728
3729#
3730# Cross-platform xattr helpers
3731#
3732
3733function get_xattr # name path
3734{
3735 typeset name=$1
3736 typeset path=$2
3737
5c9f744b 3738 case "$UNAME" in
6e1c594d
RM
3739 FreeBSD)
3740 getextattr -qq user "${name}" "${path}"
3741 ;;
3742 *)
3743 attr -qg "${name}" "${path}"
3744 ;;
3745 esac
3746}
3747
3748function set_xattr # name value path
3749{
3750 typeset name=$1
3751 typeset value=$2
3752 typeset path=$3
3753
5c9f744b 3754 case "$UNAME" in
6e1c594d
RM
3755 FreeBSD)
3756 setextattr user "${name}" "${value}" "${path}"
3757 ;;
3758 *)
3759 attr -qs "${name}" -V "${value}" "${path}"
3760 ;;
3761 esac
3762}
3763
3764function set_xattr_stdin # name value
3765{
3766 typeset name=$1
3767 typeset path=$2
3768
5c9f744b 3769 case "$UNAME" in
6e1c594d
RM
3770 FreeBSD)
3771 setextattr -i user "${name}" "${path}"
3772 ;;
3773 *)
3774 attr -qs "${name}" "${path}"
3775 ;;
3776 esac
3777}
3778
3779function rm_xattr # name path
3780{
3781 typeset name=$1
3782 typeset path=$2
3783
5c9f744b 3784 case "$UNAME" in
6e1c594d
RM
3785 FreeBSD)
3786 rmextattr -q user "${name}" "${path}"
3787 ;;
3788 *)
3789 attr -qr "${name}" "${path}"
3790 ;;
3791 esac
3792}
3793
3794function ls_xattr # path
3795{
3796 typeset path=$1
3797
5c9f744b 3798 case "$UNAME" in
6e1c594d
RM
3799 FreeBSD)
3800 lsextattr -qq user "${path}"
3801 ;;
3802 *)
3803 attr -ql "${path}"
3804 ;;
3805 esac
3806}
37c22948 3807
73989f4b
RM
3808function kstat # stat flags?
3809{
3810 typeset stat=$1
3811 typeset flags=${2-"-n"}
3812
5c9f744b 3813 case "$UNAME" in
73989f4b
RM
3814 FreeBSD)
3815 sysctl $flags kstat.zfs.misc.$stat
3816 ;;
3817 Linux)
75746e9a 3818 cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null
73989f4b
RM
3819 ;;
3820 *)
3821 false
3822 ;;
3823 esac
3824}
3825
37c22948
GA
3826function get_arcstat # stat
3827{
9f0a21e6
MM
3828 typeset stat=$1
3829
5c9f744b 3830 case "$UNAME" in
9f0a21e6 3831 FreeBSD)
73989f4b 3832 kstat arcstats.$stat
9f0a21e6
MM
3833 ;;
3834 Linux)
75746e9a 3835 kstat arcstats | awk "/$stat/"' { print $3 }'
9f0a21e6
MM
3836 ;;
3837 *)
3838 false
3839 ;;
3840 esac
37c22948 3841}
c15d36c6 3842
c3cb57ae
KHN
3843function punch_hole # offset length file
3844{
3845 typeset offset=$1
3846 typeset length=$2
3847 typeset file=$3
3848
5c9f744b 3849 case "$UNAME" in
c3cb57ae
KHN
3850 FreeBSD)
3851 truncate -d -o $offset -l $length "$file"
3852 ;;
3853 Linux)
3854 fallocate --punch-hole --offset $offset --length $length "$file"
3855 ;;
3856 *)
3857 false
3858 ;;
3859 esac
3860}
3861
a76e4e67
GA
3862#
3863# Wait for the specified arcstat to reach non-zero quiescence.
3864# If echo is 1 echo the value after reaching quiescence, otherwise
3865# if echo is 0 print the arcstat we are waiting on.
3866#
3867function arcstat_quiescence # stat echo
3868{
3869 typeset stat=$1
3870 typeset echo=$2
3871 typeset do_once=true
3872
3873 if [[ $echo -eq 0 ]]; then
3874 echo "Waiting for arcstat $1 quiescence."
3875 fi
3876
3877 while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
3878 typeset stat1=$(get_arcstat $stat)
3879 sleep 2
3880 typeset stat2=$(get_arcstat $stat)
3881 do_once=false
3882 done
3883
3884 if [[ $echo -eq 1 ]]; then
3885 echo $stat2
3886 fi
3887}
3888
3889function arcstat_quiescence_noecho # stat
3890{
3891 typeset stat=$1
3892 arcstat_quiescence $stat 0
3893}
3894
3895function arcstat_quiescence_echo # stat
3896{
3897 typeset stat=$1
3898 arcstat_quiescence $stat 1
3899}
3900
c15d36c6
GW
3901#
3902# Given an array of pids, wait until all processes
3903# have completed and check their return status.
3904#
3905function wait_for_children #children
3906{
3907 rv=0
3908 children=("$@")
3909 for child in "${children[@]}"
3910 do
3911 child_exit=0
3912 wait ${child} || child_exit=$?
3913 if [ $child_exit -ne 0 ]; then
3914 echo "child ${child} failed with ${child_exit}"
3915 rv=1
3916 fi
3917 done
3918 return $rv
3919}
669683c4
AS
3920
3921#
3922# Compare two directory trees recursively in a manner similar to diff(1), but
3923# using rsync. If there are any discrepancies, a summary of the differences are
3924# output and a non-zero error is returned.
3925#
3926# If you're comparing a directory after a ZIL replay, you should set
3927# LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
3928# directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
3929# information).
3930#
3931function directory_diff # dir_a dir_b
3932{
3933 dir_a="$1"
3934 dir_b="$2"
3935 zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
3936
3937 # If one of the directories doesn't exist, return 2. This is to match the
3938 # semantics of diff.
3939 if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
3940 return 2
3941 fi
3942
3943 # Run rsync with --dry-run --itemize-changes to get something akin to diff
3944 # output, but rsync is far more thorough in detecting differences (diff
3945 # doesn't compare file metadata, and cannot handle special files).
3946 #
3947 # Also make sure to filter out non-user.* xattrs when comparing. On
3948 # SELinux-enabled systems the copied tree will probably have different
3949 # SELinux labels.
3950 args=("-nicaAHX" '--filter=-x! user.*' "--delete")
3951
3952 # NOTE: Quite a few rsync builds do not support --crtimes which would be
3953 # necessary to verify that creation times are being maintained properly.
3954 # Unfortunately because of this we cannot use it unconditionally but we can
3955 # check if this rsync build supports it and use it then. This check is
3956 # based on the same check in the rsync test suite (testsuite/crtimes.test).
3957 #
3958 # We check ctimes even with zil_replay=1 because the ZIL does store
3959 # creation times and we should make sure they match (if the creation times
3960 # do not match there is a "c" entry in one of the columns).
964d4180 3961 if rsync --version | grep -q "[, ] crtimes"; then
669683c4
AS
3962 args+=("--crtimes")
3963 else
964d4180 3964 log_note "This rsync package does not support --crtimes (-N)."
669683c4
AS
3965 fi
3966
3967 # If we are testing a ZIL replay, we need to ignore timestamp changes.
3968 # Unfortunately --no-times doesn't do what we want -- it will still tell
3969 # you if the timestamps don't match but rsync will set the timestamps to
3970 # the current time (leading to an itemised change entry). It's simpler to
3971 # just filter out those lines.
3972 if [ "$zil_replay" -eq 0 ]; then
3973 filter=("cat")
3974 else
3975 # Different rsync versions have different numbers of columns. So just
3976 # require that aside from the first two, all other columns must be
3977 # blank (literal ".") or a timestamp field ("[tT]").
3978 filter=("grep" "-v" '^\..[.Tt]\+ ')
3979 fi
3980
3981 diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
3982 rv=0
3983 if [ -n "$diff" ]; then
3984 echo "$diff"
3985 rv=1
3986 fi
3987 return $rv
3988}
3989
3990#
3991# Compare two directory trees recursively, without checking whether the mtimes
3992# match (creation times will be checked if the available rsync binary supports
3993# it). This is necessary for ZIL replay checks (because the ZIL does not
3994# contain mtimes and thus after a ZIL replay, mtimes won't match).
3995#
3996# This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
3997#
3998function replay_directory_diff # dir_a dir_b
3999{
4000 LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
669683c4 4001}
598fed7e
AZ
4002
4003#
4004# Put coredumps into $1/core.{basename}
4005#
4006# Output must be saved and passed to pop_coredump_pattern on cleanup
4007#
4008function push_coredump_pattern # dir
4009{
4010 ulimit -c unlimited
5c9f744b 4011 case "$UNAME" in
598fed7e
AZ
4012 Linux)
4013 cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
4014 echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
4015 echo 0 >/proc/sys/kernel/core_uses_pid
4016 ;;
4017 FreeBSD)
4018 sysctl -n kern.corefile
4019 sysctl kern.corefile="$1/core.%N" >/dev/null
4020 ;;
4021 *)
4022 # Nothing to output – set only for this shell
4023 coreadm -p "$1/core.%f"
4024 ;;
4025 esac
4026}
4027
4028#
4029# Put coredumps back into the default location
4030#
4031function pop_coredump_pattern
4032{
4033 [ -s "$1" ] || return 0
5c9f744b 4034 case "$UNAME" in
598fed7e
AZ
4035 Linux)
4036 typeset pat pid
4037 { read -r pat; read -r pid; } < "$1"
4038 echo "$pat" >/proc/sys/kernel/core_pattern &&
4039 echo "$pid" >/proc/sys/kernel/core_uses_pid
4040 ;;
4041 FreeBSD)
4042 sysctl kern.corefile="$(<"$1")" >/dev/null
4043 ;;
4044 esac
4045}