]> git.proxmox.com Git - mirror_zfs.git/blame - tests/zfs-tests/include/libtest.shlib
Support integration with new QAT products
[mirror_zfs.git] / tests / zfs-tests / include / libtest.shlib
CommitLineData
6bb24f4d
BB
1#!/bin/ksh -p
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25# Use is subject to license terms.
c1d9abf9 26# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
8db2dc32 27# Copyright 2016 Nexenta Systems, Inc.
a454868b 28# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
bec1067d 29# Copyright (c) 2017 Datto Inc.
6bb24f4d
BB
30#
31
32. ${STF_TOOLS}/include/logapi.shlib
a7004725 33. ${STF_SUITE}/include/math.shlib
6bb24f4d 34
c1d9abf9
JWK
35#
36# Apply constrained path when available. This is required since the
37# PATH may have been modified by sudo's secure_path behavior.
38#
39if [ -n "$STF_PATH" ]; then
40 PATH="$STF_PATH"
41fi
42
0c656a96
GDN
43# Linux kernel version comparison function
44#
45# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
46#
47# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
48#
49function linux_version
50{
51 typeset ver="$1"
52
53 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
54
55 typeset version=$(echo $ver | cut -d '.' -f 1)
56 typeset major=$(echo $ver | cut -d '.' -f 2)
57 typeset minor=$(echo $ver | cut -d '.' -f 3)
58
59 [[ -z "$version" ]] && version=0
60 [[ -z "$major" ]] && major=0
61 [[ -z "$minor" ]] && minor=0
62
63 echo $((version * 10000 + major * 100 + minor))
64}
65
6bb24f4d
BB
66# Determine if this is a Linux test system
67#
68# Return 0 if platform Linux, 1 if otherwise
69
70function is_linux
71{
c1d9abf9 72 if [[ $(uname -o) == "GNU/Linux" ]]; then
6bb24f4d
BB
73 return 0
74 else
75 return 1
76 fi
77}
78
e676a196
BB
79# Determine if this is a 32-bit system
80#
81# Return 0 if platform is 32-bit, 1 if otherwise
82
83function is_32bit
84{
85 if [[ $(getconf LONG_BIT) == "32" ]]; then
86 return 0
87 else
88 return 1
89 fi
90}
91
c6ced726
BB
92# Determine if kmemleak is enabled
93#
94# Return 0 if kmemleak is enabled, 1 if otherwise
95
96function is_kmemleak
97{
98 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
99 return 0
100 else
101 return 1
102 fi
103}
104
6bb24f4d
BB
105# Determine whether a dataset is mounted
106#
107# $1 dataset name
108# $2 filesystem type; optional - defaulted to zfs
109#
110# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
111
112function ismounted
113{
114 typeset fstype=$2
115 [[ -z $fstype ]] && fstype=zfs
116 typeset out dir name ret
117
118 case $fstype in
119 zfs)
120 if [[ "$1" == "/"* ]] ; then
c1d9abf9 121 for out in $(zfs mount | awk '{print $2}'); do
6bb24f4d
BB
122 [[ $1 == $out ]] && return 0
123 done
124 else
c1d9abf9 125 for out in $(zfs mount | awk '{print $1}'); do
6bb24f4d
BB
126 [[ $1 == $out ]] && return 0
127 done
128 fi
129 ;;
130 ufs|nfs)
c1d9abf9 131 out=$(df -F $fstype $1 2>/dev/null)
6bb24f4d
BB
132 ret=$?
133 (($ret != 0)) && return $ret
134
135 dir=${out%%\(*}
136 dir=${dir%% *}
137 name=${out##*\(}
138 name=${name%%\)*}
139 name=${name%% *}
140
141 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
142 ;;
5c214ae3 143 ext*)
c1d9abf9 144 out=$(df -t $fstype $1 2>/dev/null)
6bb24f4d
BB
145 return $?
146 ;;
147 zvol)
148 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
149 link=$(readlink -f $ZVOL_DEVDIR/$1)
150 [[ -n "$link" ]] && \
c1d9abf9 151 mount | grep -q "^$link" && \
6bb24f4d
BB
152 return 0
153 fi
154 ;;
155 esac
156
157 return 1
158}
159
160# Return 0 if a dataset is mounted; 1 otherwise
161#
162# $1 dataset name
163# $2 filesystem type; optional - defaulted to zfs
164
165function mounted
166{
167 ismounted $1 $2
168 (($? == 0)) && return 0
169 return 1
170}
171
172# Return 0 if a dataset is unmounted; 1 otherwise
173#
174# $1 dataset name
175# $2 filesystem type; optional - defaulted to zfs
176
177function unmounted
178{
179 ismounted $1 $2
180 (($? == 1)) && return 0
181 return 1
182}
183
184# split line on ","
185#
186# $1 - line to split
187
188function splitline
189{
c1d9abf9 190 echo $1 | sed "s/,/ /g"
6bb24f4d
BB
191}
192
193function default_setup
194{
195 default_setup_noexit "$@"
196
197 log_pass
198}
199
200#
201# Given a list of disks, setup storage pools and datasets.
202#
203function default_setup_noexit
204{
205 typeset disklist=$1
206 typeset container=$2
207 typeset volume=$3
3c67d83a 208 log_note begin default_setup_noexit
6bb24f4d
BB
209
210 if is_global_zone; then
211 if poolexists $TESTPOOL ; then
212 destroy_pool $TESTPOOL
213 fi
c1d9abf9
JWK
214 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
215 log_must zpool create -f $TESTPOOL $disklist
6bb24f4d
BB
216 else
217 reexport_pool
218 fi
219
c1d9abf9
JWK
220 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
221 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
6bb24f4d 222
c1d9abf9
JWK
223 log_must zfs create $TESTPOOL/$TESTFS
224 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
225
226 if [[ -n $container ]]; then
c1d9abf9 227 rm -rf $TESTDIR1 || \
6bb24f4d 228 log_unresolved Could not remove $TESTDIR1
c1d9abf9 229 mkdir -p $TESTDIR1 || \
6bb24f4d
BB
230 log_unresolved Could not create $TESTDIR1
231
c1d9abf9
JWK
232 log_must zfs create $TESTPOOL/$TESTCTR
233 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
234 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
235 log_must zfs set mountpoint=$TESTDIR1 \
6bb24f4d
BB
236 $TESTPOOL/$TESTCTR/$TESTFS1
237 fi
238
239 if [[ -n $volume ]]; then
240 if is_global_zone ; then
c1d9abf9 241 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
6bb24f4d
BB
242 block_device_wait
243 else
c1d9abf9 244 log_must zfs create $TESTPOOL/$TESTVOL
6bb24f4d
BB
245 fi
246 fi
247}
248
249#
250# Given a list of disks, setup a storage pool, file system and
251# a container.
252#
253function default_container_setup
254{
255 typeset disklist=$1
256
257 default_setup "$disklist" "true"
258}
259
260#
261# Given a list of disks, setup a storage pool,file system
262# and a volume.
263#
264function default_volume_setup
265{
266 typeset disklist=$1
267
268 default_setup "$disklist" "" "true"
269}
270
271#
272# Given a list of disks, setup a storage pool,file system,
273# a container and a volume.
274#
275function default_container_volume_setup
276{
277 typeset disklist=$1
278
279 default_setup "$disklist" "true" "true"
280}
281
282#
283# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
284# filesystem
285#
286# $1 Existing filesystem or volume name. Default, $TESTFS
287# $2 snapshot name. Default, $TESTSNAP
288#
289function create_snapshot
290{
291 typeset fs_vol=${1:-$TESTFS}
292 typeset snap=${2:-$TESTSNAP}
293
294 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
295 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
296
297 if snapexists $fs_vol@$snap; then
298 log_fail "$fs_vol@$snap already exists."
299 fi
300 datasetexists $fs_vol || \
301 log_fail "$fs_vol must exist."
302
c1d9abf9 303 log_must zfs snapshot $fs_vol@$snap
6bb24f4d
BB
304}
305
306#
307# Create a clone from a snapshot, default clone name is $TESTCLONE.
308#
309# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
310# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
311#
312function create_clone # snapshot clone
313{
314 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
315 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
316
317 [[ -z $snap ]] && \
318 log_fail "Snapshot name is undefined."
319 [[ -z $clone ]] && \
320 log_fail "Clone name is undefined."
321
c1d9abf9 322 log_must zfs clone $snap $clone
6bb24f4d
BB
323}
324
aeacdefe
GM
325#
326# Create a bookmark of the given snapshot. Defaultly create a bookmark on
327# filesystem.
328#
329# $1 Existing filesystem or volume name. Default, $TESTFS
330# $2 Existing snapshot name. Default, $TESTSNAP
331# $3 bookmark name. Default, $TESTBKMARK
332#
333function create_bookmark
334{
335 typeset fs_vol=${1:-$TESTFS}
336 typeset snap=${2:-$TESTSNAP}
337 typeset bkmark=${3:-$TESTBKMARK}
338
339 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
340 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
341 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
342
343 if bkmarkexists $fs_vol#$bkmark; then
344 log_fail "$fs_vol#$bkmark already exists."
345 fi
346 datasetexists $fs_vol || \
347 log_fail "$fs_vol must exist."
348 snapexists $fs_vol@$snap || \
349 log_fail "$fs_vol@$snap must exist."
350
c1d9abf9 351 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
aeacdefe
GM
352}
353
650258d7 354#
355# Create a temporary clone result of an interrupted resumable 'zfs receive'
356# $1 Destination filesystem name. Must not exist, will be created as the result
357# of this function along with its %recv temporary clone
358# $2 Source filesystem name. Must not exist, will be created and destroyed
359#
360function create_recv_clone
361{
362 typeset recvfs="$1"
363 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
364 typeset snap="$sendfs@snap1"
365 typeset incr="$sendfs@snap2"
366 typeset mountpoint="$TESTDIR/create_recv_clone"
367 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
368
369 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
370
371 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
372 datasetexists $sendfs && log_fail "Send filesystem must not exist."
373
374 log_must zfs create -o mountpoint="$mountpoint" $sendfs
375 log_must zfs snapshot $snap
376 log_must eval "zfs send $snap | zfs recv -u $recvfs"
377 log_must mkfile 1m "$mountpoint/data"
378 log_must zfs snapshot $incr
379 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 > $sendfile"
380 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
381 log_must zfs destroy -r $sendfs
382 log_must rm -f "$sendfile"
383
384 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
385 log_fail "Error creating temporary $recvfs/%recv clone"
386 fi
387}
388
6bb24f4d
BB
389function default_mirror_setup
390{
391 default_mirror_setup_noexit $1 $2 $3
392
393 log_pass
394}
395
396#
397# Given a pair of disks, set up a storage pool and dataset for the mirror
398# @parameters: $1 the primary side of the mirror
399# $2 the secondary side of the mirror
400# @uses: ZPOOL ZFS TESTPOOL TESTFS
401function default_mirror_setup_noexit
402{
403 readonly func="default_mirror_setup_noexit"
404 typeset primary=$1
405 typeset secondary=$2
406
407 [[ -z $primary ]] && \
408 log_fail "$func: No parameters passed"
409 [[ -z $secondary ]] && \
410 log_fail "$func: No secondary partition passed"
c1d9abf9
JWK
411 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
412 log_must zpool create -f $TESTPOOL mirror $@
413 log_must zfs create $TESTPOOL/$TESTFS
414 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
415}
416
417#
418# create a number of mirrors.
419# We create a number($1) of 2 way mirrors using the pairs of disks named
420# on the command line. These mirrors are *not* mounted
421# @parameters: $1 the number of mirrors to create
422# $... the devices to use to create the mirrors on
423# @uses: ZPOOL ZFS TESTPOOL
424function setup_mirrors
425{
426 typeset -i nmirrors=$1
427
428 shift
429 while ((nmirrors > 0)); do
430 log_must test -n "$1" -a -n "$2"
c1d9abf9
JWK
431 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
432 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
6bb24f4d
BB
433 shift 2
434 ((nmirrors = nmirrors - 1))
435 done
436}
437
438#
439# create a number of raidz pools.
440# We create a number($1) of 2 raidz pools using the pairs of disks named
441# on the command line. These pools are *not* mounted
442# @parameters: $1 the number of pools to create
443# $... the devices to use to create the pools on
444# @uses: ZPOOL ZFS TESTPOOL
445function setup_raidzs
446{
447 typeset -i nraidzs=$1
448
449 shift
450 while ((nraidzs > 0)); do
451 log_must test -n "$1" -a -n "$2"
c1d9abf9
JWK
452 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
453 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
6bb24f4d
BB
454 shift 2
455 ((nraidzs = nraidzs - 1))
456 done
457}
458
459#
460# Destroy the configured testpool mirrors.
461# the mirrors are of the form ${TESTPOOL}{number}
462# @uses: ZPOOL ZFS TESTPOOL
463function destroy_mirrors
464{
465 default_cleanup_noexit
466
467 log_pass
468}
469
470#
471# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
472# $1 the list of disks
473#
474function default_raidz_setup
475{
476 typeset disklist="$*"
477 disks=(${disklist[*]})
478
479 if [[ ${#disks[*]} -lt 2 ]]; then
480 log_fail "A raid-z requires a minimum of two disks."
481 fi
482
c1d9abf9 483 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
3fd3e56c 484 log_must zpool create -f $TESTPOOL raidz $disklist
c1d9abf9
JWK
485 log_must zfs create $TESTPOOL/$TESTFS
486 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
6bb24f4d
BB
487
488 log_pass
489}
490
491#
492# Common function used to cleanup storage pools and datasets.
493#
494# Invoked at the start of the test suite to ensure the system
495# is in a known state, and also at the end of each set of
496# sub-tests to ensure errors from one set of tests doesn't
497# impact the execution of the next set.
498
499function default_cleanup
500{
501 default_cleanup_noexit
502
503 log_pass
504}
505
3fd3e56c 506#
507# Utility function used to list all available pool names.
508#
509# NOTE: $KEEP is a variable containing pool names, separated by a newline
510# character, that must be excluded from the returned list.
511#
512function get_all_pools
513{
514 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
515}
516
6bb24f4d
BB
517function default_cleanup_noexit
518{
6bb24f4d
BB
519 typeset pool=""
520 #
521 # Destroying the pool will also destroy any
522 # filesystems it contains.
523 #
524 if is_global_zone; then
c1d9abf9 525 zfs unmount -a > /dev/null 2>&1
3fd3e56c 526 ALL_POOLS=$(get_all_pools)
6bb24f4d
BB
527 # Here, we loop through the pools we're allowed to
528 # destroy, only destroying them if it's safe to do
529 # so.
530 while [ ! -z ${ALL_POOLS} ]
531 do
532 for pool in ${ALL_POOLS}
533 do
534 if safe_to_destroy_pool $pool ;
535 then
536 destroy_pool $pool
537 fi
3fd3e56c 538 ALL_POOLS=$(get_all_pools)
6bb24f4d
BB
539 done
540 done
541
c1d9abf9 542 zfs mount -a
6bb24f4d
BB
543 else
544 typeset fs=""
c1d9abf9
JWK
545 for fs in $(zfs list -H -o name \
546 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
6bb24f4d 547 datasetexists $fs && \
c1d9abf9 548 log_must zfs destroy -Rf $fs
6bb24f4d
BB
549 done
550
551 # Need cleanup here to avoid garbage dir left.
c1d9abf9 552 for fs in $(zfs list -H -o name); do
6bb24f4d 553 [[ $fs == /$ZONE_POOL ]] && continue
c1d9abf9 554 [[ -d $fs ]] && log_must rm -rf $fs/*
6bb24f4d
BB
555 done
556
557 #
558 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
559 # the default value
560 #
c1d9abf9 561 for fs in $(zfs list -H -o name); do
6bb24f4d 562 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
c1d9abf9
JWK
563 log_must zfs set reservation=none $fs
564 log_must zfs set recordsize=128K $fs
565 log_must zfs set mountpoint=/$fs $fs
6bb24f4d
BB
566 typeset enc=""
567 enc=$(get_prop encryption $fs)
568 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
569 [[ "$enc" == "off" ]]; then
c1d9abf9 570 log_must zfs set checksum=on $fs
6bb24f4d 571 fi
c1d9abf9
JWK
572 log_must zfs set compression=off $fs
573 log_must zfs set atime=on $fs
574 log_must zfs set devices=off $fs
575 log_must zfs set exec=on $fs
576 log_must zfs set setuid=on $fs
577 log_must zfs set readonly=off $fs
578 log_must zfs set snapdir=hidden $fs
579 log_must zfs set aclmode=groupmask $fs
580 log_must zfs set aclinherit=secure $fs
6bb24f4d
BB
581 fi
582 done
583 fi
584
585 [[ -d $TESTDIR ]] && \
c1d9abf9 586 log_must rm -rf $TESTDIR
7050a65d
SV
587
588 disk1=${DISKS%% *}
589 if is_mpath_device $disk1; then
590 delete_partitions
591 fi
6bb24f4d
BB
592}
593
594
595#
596# Common function used to cleanup storage pools, file systems
597# and containers.
598#
599function default_container_cleanup
600{
601 if ! is_global_zone; then
602 reexport_pool
603 fi
604
605 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
606 [[ $? -eq 0 ]] && \
c1d9abf9 607 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
6bb24f4d
BB
608
609 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
c1d9abf9 610 log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
6bb24f4d
BB
611
612 datasetexists $TESTPOOL/$TESTCTR && \
c1d9abf9 613 log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
6bb24f4d
BB
614
615 [[ -e $TESTDIR1 ]] && \
c1d9abf9 616 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
6bb24f4d
BB
617
618 default_cleanup
619}
620
621#
622# Common function used to cleanup snapshot of file system or volume. Default to
623# delete the file system's snapshot
624#
625# $1 snapshot name
626#
627function destroy_snapshot
628{
629 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
630
631 if ! snapexists $snap; then
632 log_fail "'$snap' does not existed."
633 fi
634
635 #
636 # For the sake of the value which come from 'get_prop' is not equal
637 # to the really mountpoint when the snapshot is unmounted. So, firstly
638 # check and make sure this snapshot's been mounted in current system.
639 #
640 typeset mtpt=""
641 if ismounted $snap; then
642 mtpt=$(get_prop mountpoint $snap)
643 (($? != 0)) && \
644 log_fail "get_prop mountpoint $snap failed."
645 fi
646
c1d9abf9 647 log_must zfs destroy $snap
6bb24f4d 648 [[ $mtpt != "" && -d $mtpt ]] && \
c1d9abf9 649 log_must rm -rf $mtpt
6bb24f4d
BB
650}
651
652#
653# Common function used to cleanup clone.
654#
655# $1 clone name
656#
657function destroy_clone
658{
659 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
660
661 if ! datasetexists $clone; then
662 log_fail "'$clone' does not existed."
663 fi
664
665 # With the same reason in destroy_snapshot
666 typeset mtpt=""
667 if ismounted $clone; then
668 mtpt=$(get_prop mountpoint $clone)
669 (($? != 0)) && \
670 log_fail "get_prop mountpoint $clone failed."
671 fi
672
c1d9abf9 673 log_must zfs destroy $clone
6bb24f4d 674 [[ $mtpt != "" && -d $mtpt ]] && \
c1d9abf9 675 log_must rm -rf $mtpt
6bb24f4d
BB
676}
677
aeacdefe
GM
678#
679# Common function used to cleanup bookmark of file system or volume. Default
680# to delete the file system's bookmark.
681#
682# $1 bookmark name
683#
684function destroy_bookmark
685{
686 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
687
688 if ! bkmarkexists $bkmark; then
689 log_fail "'$bkmarkp' does not existed."
690 fi
691
c1d9abf9 692 log_must zfs destroy $bkmark
aeacdefe
GM
693}
694
6bb24f4d
BB
695# Return 0 if a snapshot exists; $? otherwise
696#
697# $1 - snapshot name
698
699function snapexists
700{
c1d9abf9 701 zfs list -H -t snapshot "$1" > /dev/null 2>&1
6bb24f4d
BB
702 return $?
703}
704
aeacdefe
GM
705#
706# Return 0 if a bookmark exists; $? otherwise
707#
708# $1 - bookmark name
709#
710function bkmarkexists
711{
c1d9abf9 712 zfs list -H -t bookmark "$1" > /dev/null 2>&1
aeacdefe
GM
713 return $?
714}
715
6bb24f4d
BB
716#
717# Set a property to a certain value on a dataset.
718# Sets a property of the dataset to the value as passed in.
719# @param:
720# $1 dataset who's property is being set
721# $2 property to set
722# $3 value to set property to
723# @return:
724# 0 if the property could be set.
725# non-zero otherwise.
726# @use: ZFS
727#
728function dataset_setprop
729{
730 typeset fn=dataset_setprop
731
732 if (($# < 3)); then
733 log_note "$fn: Insufficient parameters (need 3, had $#)"
734 return 1
735 fi
736 typeset output=
c1d9abf9 737 output=$(zfs set $2=$3 $1 2>&1)
6bb24f4d
BB
738 typeset rv=$?
739 if ((rv != 0)); then
740 log_note "Setting property on $1 failed."
741 log_note "property $2=$3"
742 log_note "Return Code: $rv"
743 log_note "Output: $output"
744 return $rv
745 fi
746 return 0
747}
748
749#
750# Assign suite defined dataset properties.
751# This function is used to apply the suite's defined default set of
752# properties to a dataset.
753# @parameters: $1 dataset to use
754# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
755# @returns:
756# 0 if the dataset has been altered.
757# 1 if no pool name was passed in.
758# 2 if the dataset could not be found.
759# 3 if the dataset could not have it's properties set.
760#
761function dataset_set_defaultproperties
762{
763 typeset dataset="$1"
764
765 [[ -z $dataset ]] && return 1
766
767 typeset confset=
768 typeset -i found=0
c1d9abf9 769 for confset in $(zfs list); do
6bb24f4d
BB
770 if [[ $dataset = $confset ]]; then
771 found=1
772 break
773 fi
774 done
775 [[ $found -eq 0 ]] && return 2
776 if [[ -n $COMPRESSION_PROP ]]; then
777 dataset_setprop $dataset compression $COMPRESSION_PROP || \
778 return 3
779 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
780 fi
781 if [[ -n $CHECKSUM_PROP ]]; then
782 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
783 return 3
784 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
785 fi
786 return 0
787}
788
789#
790# Check a numeric assertion
791# @parameter: $@ the assertion to check
792# @output: big loud notice if assertion failed
793# @use: log_fail
794#
795function assert
796{
797 (($@)) || log_fail "$@"
798}
799
800#
801# Function to format partition size of a disk
802# Given a disk cxtxdx reduces all partitions
803# to 0 size
804#
805function zero_partitions #<whole_disk_name>
806{
807 typeset diskname=$1
808 typeset i
809
810 if is_linux; then
c1d9abf9 811 log_must parted $DEV_DSKDIR/$diskname -s -- mklabel gpt
6bb24f4d
BB
812 else
813 for i in 0 1 3 4 5 6 7
814 do
cf8738d8 815 log_must set_partition $i "" 0mb $diskname
6bb24f4d
BB
816 done
817 fi
95401cb6
BB
818
819 return 0
6bb24f4d
BB
820}
821
822#
823# Given a slice, size and disk, this function
824# formats the slice to the specified size.
825# Size should be specified with units as per
826# the `format` command requirements eg. 100mb 3gb
827#
828# NOTE: This entire interface is problematic for the Linux parted utilty
829# which requires the end of the partition to be specified. It would be
830# best to retire this interface and replace it with something more flexible.
831# At the moment a best effort is made.
832#
833function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
834{
835 typeset -i slicenum=$1
836 typeset start=$2
837 typeset size=$3
838 typeset disk=$4
839 [[ -z $slicenum || -z $size || -z $disk ]] && \
840 log_fail "The slice, size or disk name is unspecified."
841
842 if is_linux; then
843 typeset size_mb=${size%%[mMgG]}
844
845 size_mb=${size_mb%%[mMgG][bB]}
846 if [[ ${size:1:1} == 'g' ]]; then
847 ((size_mb = size_mb * 1024))
848 fi
849
850 # Create GPT partition table when setting slice 0 or
851 # when the device doesn't already contain a GPT label.
c1d9abf9 852 parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
6bb24f4d
BB
853 typeset ret_val=$?
854 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
cf8738d8 855 parted $DEV_DSKDIR/$disk -s -- mklabel gpt
856 if [[ $? -ne 0 ]]; then
857 log_note "Failed to create GPT partition table on $disk"
858 return 1
859 fi
6bb24f4d
BB
860 fi
861
862 # When no start is given align on the first cylinder.
863 if [[ -z "$start" ]]; then
864 start=1
865 fi
866
867 # Determine the cylinder size for the device and using
868 # that calculate the end offset in cylinders.
869 typeset -i cly_size_kb=0
c1d9abf9
JWK
870 cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
871 unit cyl print | head -3 | tail -1 | \
872 awk -F '[:k.]' '{print $4}')
6bb24f4d
BB
873 ((end = (size_mb * 1024 / cly_size_kb) + start))
874
cf8738d8 875 parted $DEV_DSKDIR/$disk -s -- \
6bb24f4d 876 mkpart part$slicenum ${start}cyl ${end}cyl
cf8738d8 877 if [[ $? -ne 0 ]]; then
878 log_note "Failed to create partition $slicenum on $disk"
879 return 1
880 fi
6bb24f4d 881
c1d9abf9 882 blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
6bb24f4d
BB
883 block_device_wait
884 else
885 typeset format_file=/var/tmp/format_in.$$
886
c1d9abf9
JWK
887 echo "partition" >$format_file
888 echo "$slicenum" >> $format_file
889 echo "" >> $format_file
890 echo "" >> $format_file
891 echo "$start" >> $format_file
892 echo "$size" >> $format_file
893 echo "label" >> $format_file
894 echo "" >> $format_file
895 echo "q" >> $format_file
896 echo "q" >> $format_file
6bb24f4d 897
c1d9abf9 898 format -e -s -d $disk -f $format_file
6bb24f4d 899 fi
c1d9abf9 900
6bb24f4d 901 typeset ret_val=$?
c1d9abf9 902 rm -f $format_file
cf8738d8 903 if [[ $ret_val -ne 0 ]]; then
904 log_note "Unable to format $disk slice $slicenum to $size"
905 return 1
906 fi
6bb24f4d
BB
907 return 0
908}
909
7050a65d
SV
910#
911# Delete all partitions on all disks - this is specifically for the use of multipath
912# devices which currently can only be used in the test suite as raw/un-partitioned
913# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
914#
915function delete_partitions
916{
917 typeset -i j=1
918
919 if [[ -z $DISK_ARRAY_NUM ]]; then
c1d9abf9 920 DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
7050a65d
SV
921 fi
922 if [[ -z $DISKSARRAY ]]; then
923 DISKSARRAY=$DISKS
924 fi
925
926 if is_linux; then
927 if (( $DISK_ARRAY_NUM == 1 )); then
928 while ((j < MAX_PARTITIONS)); do
c1d9abf9
JWK
929 parted $DEV_DSKDIR/$DISK -s rm $j \
930 > /dev/null 2>&1
7050a65d 931 if (( $? == 1 )); then
c1d9abf9 932 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
933 if (( $? == 1 )); then
934 log_note "Partitions for $DISK should be deleted"
935 else
936 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
937 fi
938 return 0
939 else
c1d9abf9 940 lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
941 if (( $? == 0 )); then
942 log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
943 fi
944 fi
945 ((j = j+1))
946 done
947 else
c1d9abf9 948 for disk in `echo $DISKSARRAY`; do
7050a65d 949 while ((j < MAX_PARTITIONS)); do
c1d9abf9 950 parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
7050a65d 951 if (( $? == 1 )); then
c1d9abf9 952 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
953 if (( $? == 1 )); then
954 log_note "Partitions for $disk should be deleted"
955 else
956 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
957 fi
958 j=7
959 else
c1d9abf9 960 lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
7050a65d
SV
961 if (( $? == 0 )); then
962 log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
963 fi
964 fi
965 ((j = j+1))
966 done
967 j=1
968 done
969 fi
970 fi
971 return 0
972}
973
6bb24f4d
BB
974#
975# Get the end cyl of the given slice
976#
977function get_endslice #<disk> <slice>
978{
979 typeset disk=$1
980 typeset slice=$2
981 if [[ -z $disk || -z $slice ]] ; then
982 log_fail "The disk name or slice number is unspecified."
983 fi
984
985 if is_linux; then
c1d9abf9
JWK
986 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
987 grep "part${slice}" | \
988 awk '{print $3}' | \
989 sed 's,cyl,,')
6bb24f4d
BB
990 ((endcyl = (endcyl + 1)))
991 else
992 disk=${disk#/dev/dsk/}
993 disk=${disk#/dev/rdsk/}
994 disk=${disk%s*}
995
996 typeset -i ratio=0
c1d9abf9
JWK
997 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
998 grep "sectors\/cylinder" | \
999 awk '{print $2}')
6bb24f4d
BB
1000
1001 if ((ratio == 0)); then
1002 return
1003 fi
1004
c1d9abf9
JWK
1005 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1006 nawk -v token="$slice" '{if ($1==token) print $6}')
6bb24f4d
BB
1007
1008 ((endcyl = (endcyl + 1) / ratio))
1009 fi
c1d9abf9 1010
6bb24f4d
BB
1011 echo $endcyl
1012}
1013
1014
1015#
1016# Given a size,disk and total slice number, this function formats the
1017# disk slices from 0 to the total slice number with the same specified
1018# size.
1019#
1020function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1021{
1022 typeset -i i=0
1023 typeset slice_size=$1
1024 typeset disk_name=$2
1025 typeset total_slices=$3
1026 typeset cyl
1027
1028 zero_partitions $disk_name
1029 while ((i < $total_slices)); do
1030 if ! is_linux; then
1031 if ((i == 2)); then
1032 ((i = i + 1))
1033 continue
1034 fi
1035 fi
cf8738d8 1036 log_must set_partition $i "$cyl" $slice_size $disk_name
6bb24f4d
BB
1037 cyl=$(get_endslice $disk_name $i)
1038 ((i = i+1))
1039 done
1040}
1041
1042#
1043# This function continues to write to a filenum number of files into dirnum
c1d9abf9 1044# number of directories until either file_write returns an error or the
6bb24f4d
BB
1045# maximum number of files per directory have been written.
1046#
1047# Usage:
1048# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1049#
1050# Return value: 0 on success
1051# non 0 on error
1052#
1053# Where :
1054# destdir: is the directory where everything is to be created under
1055# dirnum: the maximum number of subdirectories to use, -1 no limit
1056# filenum: the maximum number of files per subdirectory
1057# bytes: number of bytes to write
1058# num_writes: numer of types to write out bytes
4e33ba4c 1059# data: the data that will be written
6bb24f4d
BB
1060#
1061# E.g.
1062# file_fs /testdir 20 25 1024 256 0
1063#
1064# Note: bytes * num_writes equals the size of the testfile
1065#
1066function fill_fs # destdir dirnum filenum bytes num_writes data
1067{
1068 typeset destdir=${1:-$TESTDIR}
1069 typeset -i dirnum=${2:-50}
1070 typeset -i filenum=${3:-50}
1071 typeset -i bytes=${4:-8192}
1072 typeset -i num_writes=${5:-10240}
1073 typeset -i data=${6:-0}
1074
1075 typeset -i odirnum=1
1076 typeset -i idirnum=0
1077 typeset -i fn=0
1078 typeset -i retval=0
1079
c1d9abf9 1080 log_must mkdir -p $destdir/$idirnum
6bb24f4d
BB
1081 while (($odirnum > 0)); do
1082 if ((dirnum >= 0 && idirnum >= dirnum)); then
1083 odirnum=0
1084 break
1085 fi
c1d9abf9 1086 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
6bb24f4d
BB
1087 -b $bytes -c $num_writes -d $data
1088 retval=$?
1089 if (($retval != 0)); then
1090 odirnum=0
1091 break
1092 fi
1093 if (($fn >= $filenum)); then
1094 fn=0
1095 ((idirnum = idirnum + 1))
c1d9abf9 1096 log_must mkdir -p $destdir/$idirnum
6bb24f4d
BB
1097 else
1098 ((fn = fn + 1))
1099 fi
1100 done
1101 return $retval
1102}
1103
1104#
1105# Simple function to get the specified property. If unable to
1106# get the property then exits.
1107#
1108# Note property is in 'parsable' format (-p)
1109#
1110function get_prop # property dataset
1111{
1112 typeset prop_val
1113 typeset prop=$1
1114 typeset dataset=$2
1115
c1d9abf9 1116 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
6bb24f4d
BB
1117 if [[ $? -ne 0 ]]; then
1118 log_note "Unable to get $prop property for dataset " \
1119 "$dataset"
1120 return 1
1121 fi
1122
c1d9abf9 1123 echo "$prop_val"
6bb24f4d
BB
1124 return 0
1125}
1126
1127#
1128# Simple function to get the specified property of pool. If unable to
1129# get the property then exits.
1130#
a454868b
OF
1131# Note property is in 'parsable' format (-p)
1132#
6bb24f4d
BB
1133function get_pool_prop # property pool
1134{
1135 typeset prop_val
1136 typeset prop=$1
1137 typeset pool=$2
1138
1139 if poolexists $pool ; then
c1d9abf9
JWK
1140 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1141 awk '{print $3}')
6bb24f4d
BB
1142 if [[ $? -ne 0 ]]; then
1143 log_note "Unable to get $prop property for pool " \
1144 "$pool"
1145 return 1
1146 fi
1147 else
1148 log_note "Pool $pool not exists."
1149 return 1
1150 fi
1151
c1d9abf9 1152 echo "$prop_val"
6bb24f4d
BB
1153 return 0
1154}
1155
1156# Return 0 if a pool exists; $? otherwise
1157#
1158# $1 - pool name
1159
1160function poolexists
1161{
1162 typeset pool=$1
1163
1164 if [[ -z $pool ]]; then
1165 log_note "No pool name given."
1166 return 1
1167 fi
1168
c1d9abf9 1169 zpool get name "$pool" > /dev/null 2>&1
6bb24f4d
BB
1170 return $?
1171}
1172
1173# Return 0 if all the specified datasets exist; $? otherwise
1174#
1175# $1-n dataset name
1176function datasetexists
1177{
1178 if (($# == 0)); then
1179 log_note "No dataset name given."
1180 return 1
1181 fi
1182
1183 while (($# > 0)); do
c1d9abf9 1184 zfs get name $1 > /dev/null 2>&1 || \
6bb24f4d
BB
1185 return $?
1186 shift
1187 done
1188
1189 return 0
1190}
1191
1192# return 0 if none of the specified datasets exists, otherwise return 1.
1193#
1194# $1-n dataset name
1195function datasetnonexists
1196{
1197 if (($# == 0)); then
1198 log_note "No dataset name given."
1199 return 1
1200 fi
1201
1202 while (($# > 0)); do
c1d9abf9 1203 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
6bb24f4d
BB
1204 && return 1
1205 shift
1206 done
1207
1208 return 0
1209}
1210
1211#
2f71caf2 1212# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
6bb24f4d
BB
1213#
1214# Returns 0 if shared, 1 otherwise.
1215#
1216function is_shared
1217{
1218 typeset fs=$1
1219 typeset mtpt
1220
6bb24f4d
BB
1221 if [[ $fs != "/"* ]] ; then
1222 if datasetnonexists "$fs" ; then
1223 return 1
1224 else
1225 mtpt=$(get_prop mountpoint "$fs")
1226 case $mtpt in
1227 none|legacy|-) return 1
1228 ;;
1229 *) fs=$mtpt
1230 ;;
1231 esac
1232 fi
1233 fi
1234
2f71caf2 1235 if is_linux; then
c1d9abf9 1236 for mtpt in `share | awk '{print $1}'` ; do
2f71caf2 1237 if [[ $mtpt == $fs ]] ; then
1238 return 0
1239 fi
1240 done
1241 return 1
1242 fi
1243
c1d9abf9 1244 for mtpt in `share | awk '{print $2}'` ; do
6bb24f4d
BB
1245 if [[ $mtpt == $fs ]] ; then
1246 return 0
1247 fi
1248 done
1249
c1d9abf9 1250 typeset stat=$(svcs -H -o STA nfs/server:default)
6bb24f4d
BB
1251 if [[ $stat != "ON" ]]; then
1252 log_note "Current nfs/server status: $stat"
1253 fi
1254
1255 return 1
1256}
1257
1258#
2f71caf2 1259# Given a dataset name determine if it is shared via SMB.
6bb24f4d 1260#
2f71caf2 1261# Returns 0 if shared, 1 otherwise.
6bb24f4d 1262#
2f71caf2 1263function is_shared_smb
6bb24f4d
BB
1264{
1265 typeset fs=$1
2f71caf2 1266 typeset mtpt
1267
1268 if datasetnonexists "$fs" ; then
1269 return 1
1270 else
1271 fs=$(echo $fs | sed 's@/@_@g')
1272 fi
6bb24f4d
BB
1273
1274 if is_linux; then
c1d9abf9 1275 for mtpt in `net usershare list | awk '{print $1}'` ; do
2f71caf2 1276 if [[ $mtpt == $fs ]] ; then
1277 return 0
1278 fi
1279 done
1280 return 1
1281 else
6bb24f4d
BB
1282 log_unsupported "Currently unsupported by the test framework"
1283 return 1
1284 fi
2f71caf2 1285}
1286
1287#
1288# Given a mountpoint, determine if it is not shared via NFS.
1289#
1290# Returns 0 if not shared, 1 otherwise.
1291#
1292function not_shared
1293{
1294 typeset fs=$1
6bb24f4d
BB
1295
1296 is_shared $fs
1297 if (($? == 0)); then
1298 return 1
1299 fi
1300
1301 return 0
1302}
1303
1304#
2f71caf2 1305# Given a dataset determine if it is not shared via SMB.
6bb24f4d 1306#
2f71caf2 1307# Returns 0 if not shared, 1 otherwise.
1308#
1309function not_shared_smb
6bb24f4d
BB
1310{
1311 typeset fs=$1
1312
2f71caf2 1313 is_shared_smb $fs
1314 if (($? == 0)); then
6bb24f4d
BB
1315 return 1
1316 fi
1317
2f71caf2 1318 return 0
1319}
1320
1321#
1322# Helper function to unshare a mountpoint.
1323#
1324function unshare_fs #fs
1325{
1326 typeset fs=$1
1327
1328 is_shared $fs || is_shared_smb $fs
6bb24f4d 1329 if (($? == 0)); then
c1d9abf9 1330 log_must zfs unshare $fs
6bb24f4d
BB
1331 fi
1332
1333 return 0
1334}
1335
2f71caf2 1336#
1337# Helper function to share a NFS mountpoint.
1338#
1339function share_nfs #fs
1340{
1341 typeset fs=$1
1342
1343 if is_linux; then
1344 is_shared $fs
1345 if (($? != 0)); then
c1d9abf9 1346 log_must share "*:$fs"
2f71caf2 1347 fi
1348 else
1349 is_shared $fs
1350 if (($? != 0)); then
c1d9abf9 1351 log_must share -F nfs $fs
2f71caf2 1352 fi
1353 fi
1354
1355 return 0
1356}
1357
1358#
1359# Helper function to unshare a NFS mountpoint.
1360#
1361function unshare_nfs #fs
1362{
1363 typeset fs=$1
1364
1365 if is_linux; then
1366 is_shared $fs
1367 if (($? == 0)); then
c1d9abf9 1368 log_must unshare -u "*:$fs"
2f71caf2 1369 fi
1370 else
1371 is_shared $fs
1372 if (($? == 0)); then
c1d9abf9 1373 log_must unshare -F nfs $fs
2f71caf2 1374 fi
1375 fi
1376
1377 return 0
1378}
1379
1380#
1381# Helper function to show NFS shares.
1382#
1383function showshares_nfs
1384{
1385 if is_linux; then
c1d9abf9 1386 share -v
2f71caf2 1387 else
c1d9abf9 1388 share -F nfs
2f71caf2 1389 fi
1390
1391 return 0
1392}
1393
1394#
1395# Helper function to show SMB shares.
1396#
1397function showshares_smb
1398{
1399 if is_linux; then
c1d9abf9 1400 net usershare list
2f71caf2 1401 else
c1d9abf9 1402 share -F smb
2f71caf2 1403 fi
1404
1405 return 0
1406}
1407
6bb24f4d
BB
1408#
1409# Check NFS server status and trigger it online.
1410#
1411function setup_nfs_server
1412{
1413 # Cannot share directory in non-global zone.
1414 #
1415 if ! is_global_zone; then
1416 log_note "Cannot trigger NFS server by sharing in LZ."
1417 return
1418 fi
1419
1420 if is_linux; then
2f71caf2 1421 log_note "NFS server must started prior to running test framework."
6bb24f4d
BB
1422 return
1423 fi
1424
1425 typeset nfs_fmri="svc:/network/nfs/server:default"
c1d9abf9 1426 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
6bb24f4d
BB
1427 #
1428 # Only really sharing operation can enable NFS server
1429 # to online permanently.
1430 #
1431 typeset dummy=/tmp/dummy
1432
1433 if [[ -d $dummy ]]; then
c1d9abf9 1434 log_must rm -rf $dummy
6bb24f4d
BB
1435 fi
1436
c1d9abf9
JWK
1437 log_must mkdir $dummy
1438 log_must share $dummy
6bb24f4d
BB
1439
1440 #
1441 # Waiting for fmri's status to be the final status.
1442 # Otherwise, in transition, an asterisk (*) is appended for
1443 # instances, unshare will reverse status to 'DIS' again.
1444 #
1445 # Waiting for 1's at least.
1446 #
c1d9abf9 1447 log_must sleep 1
6bb24f4d 1448 timeout=10
c1d9abf9 1449 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
6bb24f4d 1450 do
c1d9abf9 1451 log_must sleep 1
6bb24f4d
BB
1452
1453 ((timeout -= 1))
1454 done
1455
c1d9abf9
JWK
1456 log_must unshare $dummy
1457 log_must rm -rf $dummy
6bb24f4d
BB
1458 fi
1459
c1d9abf9 1460 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
6bb24f4d
BB
1461}
1462
1463#
1464# To verify whether calling process is in global zone
1465#
1466# Return 0 if in global zone, 1 in non-global zone
1467#
1468function is_global_zone
1469{
c1d9abf9
JWK
1470 if is_linux; then
1471 return 0
1472 else
1473 typeset cur_zone=$(zonename 2>/dev/null)
1474 if [[ $cur_zone != "global" ]]; then
1475 return 1
1476 fi
1477 return 0
6bb24f4d 1478 fi
6bb24f4d
BB
1479}
1480
1481#
1482# Verify whether test is permitted to run from
1483# global zone, local zone, or both
1484#
1485# $1 zone limit, could be "global", "local", or "both"(no limit)
1486#
1487# Return 0 if permitted, otherwise exit with log_unsupported
1488#
1489function verify_runnable # zone limit
1490{
1491 typeset limit=$1
1492
1493 [[ -z $limit ]] && return 0
1494
1495 if is_global_zone ; then
1496 case $limit in
1497 global|both)
1498 ;;
1499 local) log_unsupported "Test is unable to run from "\
1500 "global zone."
1501 ;;
1502 *) log_note "Warning: unknown limit $limit - " \
1503 "use both."
1504 ;;
1505 esac
1506 else
1507 case $limit in
1508 local|both)
1509 ;;
1510 global) log_unsupported "Test is unable to run from "\
1511 "local zone."
1512 ;;
1513 *) log_note "Warning: unknown limit $limit - " \
1514 "use both."
1515 ;;
1516 esac
1517
1518 reexport_pool
1519 fi
1520
1521 return 0
1522}
1523
1524# Return 0 if create successfully or the pool exists; $? otherwise
1525# Note: In local zones, this function should return 0 silently.
1526#
1527# $1 - pool name
1528# $2-n - [keyword] devs_list
1529
1530function create_pool #pool devs_list
1531{
1532 typeset pool=${1%%/*}
1533
1534 shift
1535
1536 if [[ -z $pool ]]; then
1537 log_note "Missing pool name."
1538 return 1
1539 fi
1540
1541 if poolexists $pool ; then
1542 destroy_pool $pool
1543 fi
1544
1545 if is_global_zone ; then
c1d9abf9
JWK
1546 [[ -d /$pool ]] && rm -rf /$pool
1547 log_must zpool create -f $pool $@
6bb24f4d
BB
1548 fi
1549
1550 return 0
1551}
1552
1553# Return 0 if destroy successfully or the pool exists; $? otherwise
1554# Note: In local zones, this function should return 0 silently.
1555#
1556# $1 - pool name
1557# Destroy pool with the given parameters.
1558
1559function destroy_pool #pool
1560{
1561 typeset pool=${1%%/*}
1562 typeset mtpt
1563
1564 if [[ -z $pool ]]; then
1565 log_note "No pool name given."
1566 return 1
1567 fi
1568
1569 if is_global_zone ; then
1570 if poolexists "$pool" ; then
1571 mtpt=$(get_prop mountpoint "$pool")
1572
851aa99c
BB
1573 # At times, syseventd/udev activity can cause attempts
1574 # to destroy a pool to fail with EBUSY. We retry a few
6bb24f4d
BB
1575 # times allowing failures before requiring the destroy
1576 # to succeed.
851aa99c 1577 log_must_busy zpool destroy -f $pool
6bb24f4d
BB
1578
1579 [[ -d $mtpt ]] && \
c1d9abf9 1580 log_must rm -rf $mtpt
6bb24f4d
BB
1581 else
1582 log_note "Pool does not exist. ($pool)"
1583 return 1
1584 fi
1585 fi
1586
1587 return 0
1588}
1589
1590#
1591# Firstly, create a pool with 5 datasets. Then, create a single zone and
1592# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1593# and a zvol device to the zone.
1594#
1595# $1 zone name
1596# $2 zone root directory prefix
1597# $3 zone ip
1598#
1599function zfs_zones_setup #zone_name zone_root zone_ip
1600{
1601 typeset zone_name=${1:-$(hostname)-z}
1602 typeset zone_root=${2:-"/zone_root"}
1603 typeset zone_ip=${3:-"10.1.1.10"}
1604 typeset prefix_ctr=$ZONE_CTR
1605 typeset pool_name=$ZONE_POOL
1606 typeset -i cntctr=5
1607 typeset -i i=0
1608
1609 # Create pool and 5 container within it
1610 #
c1d9abf9
JWK
1611 [[ -d /$pool_name ]] && rm -rf /$pool_name
1612 log_must zpool create -f $pool_name $DISKS
6bb24f4d 1613 while ((i < cntctr)); do
c1d9abf9 1614 log_must zfs create $pool_name/$prefix_ctr$i
6bb24f4d
BB
1615 ((i += 1))
1616 done
1617
1618 # create a zvol
c1d9abf9 1619 log_must zfs create -V 1g $pool_name/zone_zvol
6bb24f4d
BB
1620 block_device_wait
1621
1622 #
1623 # If current system support slog, add slog device for pool
1624 #
1625 if verify_slog_support ; then
3fd3e56c 1626 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
c1d9abf9
JWK
1627 log_must mkfile $MINVDEVSIZE $sdevs
1628 log_must zpool add $pool_name log mirror $sdevs
6bb24f4d
BB
1629 fi
1630
1631 # this isn't supported just yet.
1632 # Create a filesystem. In order to add this to
1633 # the zone, it must have it's mountpoint set to 'legacy'
c1d9abf9
JWK
1634 # log_must zfs create $pool_name/zfs_filesystem
1635 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
6bb24f4d
BB
1636
1637 [[ -d $zone_root ]] && \
c1d9abf9 1638 log_must rm -rf $zone_root/$zone_name
6bb24f4d 1639 [[ ! -d $zone_root ]] && \
c1d9abf9 1640 log_must mkdir -p -m 0700 $zone_root/$zone_name
6bb24f4d
BB
1641
1642 # Create zone configure file and configure the zone
1643 #
1644 typeset zone_conf=/tmp/zone_conf.$$
c1d9abf9
JWK
1645 echo "create" > $zone_conf
1646 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1647 echo "set autoboot=true" >> $zone_conf
6bb24f4d
BB
1648 i=0
1649 while ((i < cntctr)); do
c1d9abf9
JWK
1650 echo "add dataset" >> $zone_conf
1651 echo "set name=$pool_name/$prefix_ctr$i" >> \
6bb24f4d 1652 $zone_conf
c1d9abf9 1653 echo "end" >> $zone_conf
6bb24f4d
BB
1654 ((i += 1))
1655 done
1656
1657 # add our zvol to the zone
c1d9abf9
JWK
1658 echo "add device" >> $zone_conf
1659 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1660 echo "end" >> $zone_conf
6bb24f4d
BB
1661
1662 # add a corresponding zvol rdsk to the zone
c1d9abf9
JWK
1663 echo "add device" >> $zone_conf
1664 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1665 echo "end" >> $zone_conf
6bb24f4d
BB
1666
1667 # once it's supported, we'll add our filesystem to the zone
c1d9abf9
JWK
1668 # echo "add fs" >> $zone_conf
1669 # echo "set type=zfs" >> $zone_conf
1670 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1671 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1672 # echo "end" >> $zone_conf
6bb24f4d 1673
c1d9abf9
JWK
1674 echo "verify" >> $zone_conf
1675 echo "commit" >> $zone_conf
1676 log_must zonecfg -z $zone_name -f $zone_conf
1677 log_must rm -f $zone_conf
6bb24f4d
BB
1678
1679 # Install the zone
c1d9abf9 1680 zoneadm -z $zone_name install
6bb24f4d 1681 if (($? == 0)); then
c1d9abf9 1682 log_note "SUCCESS: zoneadm -z $zone_name install"
6bb24f4d 1683 else
c1d9abf9 1684 log_fail "FAIL: zoneadm -z $zone_name install"
6bb24f4d
BB
1685 fi
1686
1687 # Install sysidcfg file
1688 #
1689 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
c1d9abf9
JWK
1690 echo "system_locale=C" > $sysidcfg
1691 echo "terminal=dtterm" >> $sysidcfg
1692 echo "network_interface=primary {" >> $sysidcfg
1693 echo "hostname=$zone_name" >> $sysidcfg
1694 echo "}" >> $sysidcfg
1695 echo "name_service=NONE" >> $sysidcfg
1696 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1697 echo "security_policy=NONE" >> $sysidcfg
1698 echo "timezone=US/Eastern" >> $sysidcfg
6bb24f4d
BB
1699
1700 # Boot this zone
c1d9abf9 1701 log_must zoneadm -z $zone_name boot
6bb24f4d
BB
1702}
1703
1704#
1705# Reexport TESTPOOL & TESTPOOL(1-4)
1706#
1707function reexport_pool
1708{
1709 typeset -i cntctr=5
1710 typeset -i i=0
1711
1712 while ((i < cntctr)); do
1713 if ((i == 0)); then
1714 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1715 if ! ismounted $TESTPOOL; then
c1d9abf9 1716 log_must zfs mount $TESTPOOL
6bb24f4d
BB
1717 fi
1718 else
1719 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1720 if eval ! ismounted \$TESTPOOL$i; then
c1d9abf9 1721 log_must eval zfs mount \$TESTPOOL$i
6bb24f4d
BB
1722 fi
1723 fi
1724 ((i += 1))
1725 done
1726}
1727
1728#
ec0e24c2 1729# Verify a given disk or pool state
6bb24f4d
BB
1730#
1731# Return 0 is pool/disk matches expected state, 1 otherwise
1732#
ec0e24c2 1733function check_state # pool disk state{online,offline,degraded}
6bb24f4d
BB
1734{
1735 typeset pool=$1
1736 typeset disk=${2#$DEV_DSKDIR/}
1737 typeset state=$3
1738
ec0e24c2
SV
1739 [[ -z $pool ]] || [[ -z $state ]] \
1740 && log_fail "Arguments invalid or missing"
1741
1742 if [[ -z $disk ]]; then
1743 #check pool state only
c1d9abf9 1744 zpool get -H -o value health $pool \
ec0e24c2
SV
1745 | grep -i "$state" > /dev/null 2>&1
1746 else
c1d9abf9 1747 zpool status -v $pool | grep "$disk" \
ec0e24c2
SV
1748 | grep -i "$state" > /dev/null 2>&1
1749 fi
6bb24f4d
BB
1750
1751 return $?
1752}
1753
ec0e24c2
SV
1754#
1755# Cause a scan of all scsi host adapters by default
1756#
1757# $1 optional host number
1758#
1759function scan_scsi_hosts
1760{
1761 typeset hostnum=${1}
1762
7a4500a1
SV
1763 if is_linux; then
1764 if [[ -z $hostnum ]]; then
1765 for host in /sys/class/scsi_host/host*; do
c1d9abf9 1766 log_must eval "echo '- - -' > $host/scan"
7a4500a1
SV
1767 done
1768 else
1769 log_must eval \
c1d9abf9 1770 "echo /sys/class/scsi_host/host$hostnum/scan" \
7a4500a1
SV
1771 > /dev/null
1772 log_must eval \
c1d9abf9 1773 "echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
7a4500a1 1774 fi
ec0e24c2
SV
1775 fi
1776}
1777#
1778# Wait for newly created block devices to have their minors created.
1779#
1780function block_device_wait
1781{
1782 if is_linux; then
c1d9abf9
JWK
1783 udevadm trigger
1784 udevadm settle
ec0e24c2
SV
1785 fi
1786}
1787
1788#
1789# Online or offline a disk on the system
1790#
1791# First checks state of disk. Test will fail if disk is not properly onlined
1792# or offlined. Online is a full rescan of SCSI disks by echoing to every
1793# host entry.
1794#
1795function on_off_disk # disk state{online,offline} host
1796{
1797 typeset disk=$1
1798 typeset state=$2
1799 typeset host=$3
1800
1801 [[ -z $disk ]] || [[ -z $state ]] && \
1802 log_fail "Arguments invalid or missing"
1803
1804 if is_linux; then
1805 if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
c1d9abf9
JWK
1806 dm_name="$(readlink $DEV_DSKDIR/$disk \
1807 | nawk -F / '{print $2}')"
1808 slave="$(ls /sys/block/${dm_name}/slaves \
1809 | nawk '{print $1}')"
ec0e24c2
SV
1810 while [[ -n $slave ]]; do
1811 #check if disk is online
c1d9abf9 1812 lsscsi | egrep $slave > /dev/null
ec0e24c2
SV
1813 if (($? == 0)); then
1814 slave_dir="/sys/block/${dm_name}"
1815 slave_dir+="/slaves/${slave}/device"
1816 ss="${slave_dir}/state"
1817 sd="${slave_dir}/delete"
c1d9abf9
JWK
1818 log_must eval "echo 'offline' > ${ss}"
1819 log_must eval "echo '1' > ${sd}"
1820 lsscsi | egrep $slave > /dev/null
ec0e24c2
SV
1821 if (($? == 0)); then
1822 log_fail "Offlining" \
1823 "$disk failed"
1824 fi
1825 fi
c1d9abf9
JWK
1826 slave="$(ls /sys/block/$dm_name/slaves \
1827 2>/dev/null | nawk '{print $1}')"
ec0e24c2
SV
1828 done
1829 elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
1830 #check if disk is online
c1d9abf9 1831 lsscsi | egrep $disk > /dev/null
ec0e24c2
SV
1832 if (($? == 0)); then
1833 dev_state="/sys/block/$disk/device/state"
1834 dev_delete="/sys/block/$disk/device/delete"
c1d9abf9
JWK
1835 log_must eval "echo 'offline' > ${dev_state}"
1836 log_must eval "echo '1' > ${dev_delete}"
1837 lsscsi | egrep $disk > /dev/null
ec0e24c2
SV
1838 if (($? == 0)); then
1839 log_fail "Offlining $disk" \
1840 "failed"
1841 fi
1842 else
1843 log_note "$disk is already offline"
1844 fi
1845 elif [[ $state == "online" ]]; then
1846 #force a full rescan
7a4500a1 1847 scan_scsi_hosts $host
ec0e24c2
SV
1848 block_device_wait
1849 if is_mpath_device $disk; then
c1d9abf9
JWK
1850 dm_name="$(readlink $DEV_DSKDIR/$disk \
1851 | nawk -F / '{print $2}')"
1852 slave="$(ls /sys/block/$dm_name/slaves \
1853 | nawk '{print $1}')"
1854 lsscsi | egrep $slave > /dev/null
ec0e24c2
SV
1855 if (($? != 0)); then
1856 log_fail "Onlining $disk failed"
1857 fi
1858 elif is_real_device $disk; then
c1d9abf9 1859 lsscsi | egrep $disk > /dev/null
ec0e24c2
SV
1860 if (($? != 0)); then
1861 log_fail "Onlining $disk failed"
1862 fi
1863 else
1864 log_fail "$disk is not a real dev"
1865 fi
1866 else
1867 log_fail "$disk failed to $state"
1868 fi
1869 fi
1870}
1871
6bb24f4d
BB
1872#
1873# Get the mountpoint of snapshot
1874# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1875# as its mountpoint
1876#
1877function snapshot_mountpoint
1878{
1879 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1880
1881 if [[ $dataset != *@* ]]; then
1882 log_fail "Error name of snapshot '$dataset'."
1883 fi
1884
1885 typeset fs=${dataset%@*}
1886 typeset snap=${dataset#*@}
1887
1888 if [[ -z $fs || -z $snap ]]; then
1889 log_fail "Error name of snapshot '$dataset'."
1890 fi
1891
c1d9abf9 1892 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
6bb24f4d
BB
1893}
1894
dddef7d6 1895#
1896# Given a device and 'ashift' value verify it's correctly set on every label
1897#
1898function verify_ashift # device ashift
1899{
1900 typeset device="$1"
1901 typeset ashift="$2"
1902
1903 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
1904 if (ashift != $2)
1905 exit 1;
1906 else
1907 count++;
1908 } END {
1909 if (count != 4)
1910 exit 1;
1911 else
1912 exit 0;
1913 }'
1914
1915 return $?
1916}
1917
6bb24f4d
BB
1918#
1919# Given a pool and file system, this function will verify the file system
1920# using the zdb internal tool. Note that the pool is exported and imported
1921# to ensure it has consistent state.
1922#
1923function verify_filesys # pool filesystem dir
1924{
1925 typeset pool="$1"
1926 typeset filesys="$2"
1927 typeset zdbout="/tmp/zdbout.$$"
1928
1929 shift
1930 shift
1931 typeset dirs=$@
1932 typeset search_path=""
1933
c1d9abf9
JWK
1934 log_note "Calling zdb to verify filesystem '$filesys'"
1935 zfs unmount -a > /dev/null 2>&1
1936 log_must zpool export $pool
6bb24f4d
BB
1937
1938 if [[ -n $dirs ]] ; then
1939 for dir in $dirs ; do
1940 search_path="$search_path -d $dir"
1941 done
1942 fi
1943
c1d9abf9 1944 log_must zpool import $search_path $pool
6bb24f4d 1945
c1d9abf9 1946 zdb -cudi $filesys > $zdbout 2>&1
6bb24f4d 1947 if [[ $? != 0 ]]; then
c1d9abf9
JWK
1948 log_note "Output: zdb -cudi $filesys"
1949 cat $zdbout
1950 log_fail "zdb detected errors with: '$filesys'"
6bb24f4d
BB
1951 fi
1952
c1d9abf9
JWK
1953 log_must zfs mount -a
1954 log_must rm -rf $zdbout
6bb24f4d
BB
1955}
1956
1957#
1958# Given a pool, and this function list all disks in the pool
1959#
1960function get_disklist # pool
1961{
1962 typeset disklist=""
1963
c1d9abf9
JWK
1964 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1965 grep -v "\-\-\-\-\-" | \
1966 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
6bb24f4d 1967
c1d9abf9 1968 echo $disklist
6bb24f4d
BB
1969}
1970
3c67d83a
TH
1971#
1972# Given a pool, and this function list all disks in the pool with their full
1973# path (like "/dev/sda" instead of "sda").
1974#
1975function get_disklist_fullpath # pool
1976{
1977 args="-P $1"
1978 get_disklist $args
1979}
1980
1981
1982
6bb24f4d
BB
1983# /**
1984# This function kills a given list of processes after a time period. We use
1985# this in the stress tests instead of STF_TIMEOUT so that we can have processes
1986# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1987# would be listed as FAIL, which we don't want : we're happy with stress tests
1988# running for a certain amount of time, then finishing.
1989#
1990# @param $1 the time in seconds after which we should terminate these processes
1991# @param $2..$n the processes we wish to terminate.
1992# */
1993function stress_timeout
1994{
1995 typeset -i TIMEOUT=$1
1996 shift
1997 typeset cpids="$@"
1998
1999 log_note "Waiting for child processes($cpids). " \
2000 "It could last dozens of minutes, please be patient ..."
c1d9abf9 2001 log_must sleep $TIMEOUT
6bb24f4d
BB
2002
2003 log_note "Killing child processes after ${TIMEOUT} stress timeout."
2004 typeset pid
2005 for pid in $cpids; do
c1d9abf9 2006 ps -p $pid > /dev/null 2>&1
6bb24f4d 2007 if (($? == 0)); then
c1d9abf9 2008 log_must kill -USR1 $pid
6bb24f4d
BB
2009 fi
2010 done
2011}
2012
2013#
2014# Verify a given hotspare disk is inuse or avail
2015#
2016# Return 0 is pool/disk matches expected state, 1 otherwise
2017#
2018function check_hotspare_state # pool disk state{inuse,avail}
2019{
2020 typeset pool=$1
2021 typeset disk=${2#$DEV_DSKDIR/}
2022 typeset state=$3
2023
2024 cur_state=$(get_device_state $pool $disk "spares")
2025
2026 if [[ $state != ${cur_state} ]]; then
2027 return 1
2028 fi
2029 return 0
2030}
2031
2032#
2033# Verify a given slog disk is inuse or avail
2034#
2035# Return 0 is pool/disk matches expected state, 1 otherwise
2036#
2037function check_slog_state # pool disk state{online,offline,unavail}
2038{
2039 typeset pool=$1
2040 typeset disk=${2#$DEV_DSKDIR/}
2041 typeset state=$3
2042
2043 cur_state=$(get_device_state $pool $disk "logs")
2044
2045 if [[ $state != ${cur_state} ]]; then
2046 return 1
2047 fi
2048 return 0
2049}
2050
2051#
2052# Verify a given vdev disk is inuse or avail
2053#
2054# Return 0 is pool/disk matches expected state, 1 otherwise
2055#
2056function check_vdev_state # pool disk state{online,offline,unavail}
2057{
2058 typeset pool=$1
2059 typeset disk=${2#$/DEV_DSKDIR/}
2060 typeset state=$3
2061
2062 cur_state=$(get_device_state $pool $disk)
2063
2064 if [[ $state != ${cur_state} ]]; then
2065 return 1
2066 fi
2067 return 0
2068}
2069
2070#
2071# Check the output of 'zpool status -v <pool>',
2072# and to see if the content of <token> contain the <keyword> specified.
2073#
2074# Return 0 is contain, 1 otherwise
2075#
0ea05c64 2076function check_pool_status # pool token keyword <verbose>
6bb24f4d
BB
2077{
2078 typeset pool=$1
2079 typeset token=$2
2080 typeset keyword=$3
0ea05c64 2081 typeset verbose=${4:-false}
6bb24f4d 2082
0ea05c64
AP
2083 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2084 ($1==token) {print $0}')
2085 if [[ $verbose == true ]]; then
2086 log_note $scan
2087 fi
2088 echo $scan | grep -i "$keyword" > /dev/null 2>&1
6bb24f4d
BB
2089
2090 return $?
2091}
2092
2093#
0ea05c64 2094# These 6 following functions are instance of check_pool_status()
6bb24f4d
BB
2095# is_pool_resilvering - to check if the pool is resilver in progress
2096# is_pool_resilvered - to check if the pool is resilver completed
2097# is_pool_scrubbing - to check if the pool is scrub in progress
2098# is_pool_scrubbed - to check if the pool is scrub completed
2099# is_pool_scrub_stopped - to check if the pool is scrub stopped
0ea05c64 2100# is_pool_scrub_paused - to check if the pool has scrub paused
6bb24f4d 2101#
0ea05c64
AP
2102function is_pool_resilvering #pool <verbose>
2103{
2104 check_pool_status "$1" "scan" "resilver in progress since " $2
2105 return $?
2106}
2107
2108function is_pool_resilvered #pool <verbose>
6bb24f4d 2109{
0ea05c64 2110 check_pool_status "$1" "scan" "resilvered " $2
6bb24f4d
BB
2111 return $?
2112}
2113
0ea05c64 2114function is_pool_scrubbing #pool <verbose>
6bb24f4d 2115{
0ea05c64 2116 check_pool_status "$1" "scan" "scrub in progress since " $2
6bb24f4d
BB
2117 return $?
2118}
2119
0ea05c64 2120function is_pool_scrubbed #pool <verbose>
6bb24f4d 2121{
0ea05c64 2122 check_pool_status "$1" "scan" "scrub repaired" $2
6bb24f4d
BB
2123 return $?
2124}
2125
0ea05c64 2126function is_pool_scrub_stopped #pool <verbose>
6bb24f4d 2127{
0ea05c64 2128 check_pool_status "$1" "scan" "scrub canceled" $2
6bb24f4d
BB
2129 return $?
2130}
2131
0ea05c64 2132function is_pool_scrub_paused #pool <verbose>
6bb24f4d 2133{
0ea05c64 2134 check_pool_status "$1" "scan" "scrub paused since " $2
6bb24f4d
BB
2135 return $?
2136}
2137
2138#
4e33ba4c 2139# Use create_pool()/destroy_pool() to clean up the information in
6bb24f4d
BB
2140# in the given disk to avoid slice overlapping.
2141#
2142function cleanup_devices #vdevs
2143{
2144 typeset pool="foopool$$"
2145
2146 if poolexists $pool ; then
2147 destroy_pool $pool
2148 fi
2149
2150 create_pool $pool $@
2151 destroy_pool $pool
2152
2153 return 0
2154}
2155
6bb24f4d
BB
2156#/**
2157# A function to find and locate free disks on a system or from given
2158# disks as the parameter. It works by locating disks that are in use
2159# as swap devices and dump devices, and also disks listed in /etc/vfstab
2160#
2161# $@ given disks to find which are free, default is all disks in
2162# the test system
2163#
2164# @return a string containing the list of available disks
2165#*/
2166function find_disks
2167{
2168 # Trust provided list, no attempt is made to locate unused devices.
2169 if is_linux; then
c1d9abf9 2170 echo "$@"
6bb24f4d
BB
2171 return
2172 fi
2173
2174
2175 sfi=/tmp/swaplist.$$
2176 dmpi=/tmp/dumpdev.$$
2177 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2178
c1d9abf9
JWK
2179 swap -l > $sfi
2180 dumpadm > $dmpi 2>/dev/null
6bb24f4d
BB
2181
2182# write an awk script that can process the output of format
2183# to produce a list of disks we know about. Note that we have
2184# to escape "$2" so that the shell doesn't interpret it while
2185# we're creating the awk script.
2186# -------------------
c1d9abf9 2187 cat > /tmp/find_disks.awk <<EOF
6bb24f4d
BB
2188#!/bin/nawk -f
2189 BEGIN { FS="."; }
2190
2191 /^Specify disk/{
2192 searchdisks=0;
2193 }
2194
2195 {
2196 if (searchdisks && \$2 !~ "^$"){
2197 split(\$2,arr," ");
2198 print arr[1];
2199 }
2200 }
2201
2202 /^AVAILABLE DISK SELECTIONS:/{
2203 searchdisks=1;
2204 }
2205EOF
2206#---------------------
2207
c1d9abf9
JWK
2208 chmod 755 /tmp/find_disks.awk
2209 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2210 rm /tmp/find_disks.awk
6bb24f4d
BB
2211
2212 unused=""
2213 for disk in $disks; do
2214 # Check for mounted
c1d9abf9 2215 grep "${disk}[sp]" /etc/mnttab >/dev/null
6bb24f4d
BB
2216 (($? == 0)) && continue
2217 # Check for swap
c1d9abf9 2218 grep "${disk}[sp]" $sfi >/dev/null
6bb24f4d
BB
2219 (($? == 0)) && continue
2220 # check for dump device
c1d9abf9 2221 grep "${disk}[sp]" $dmpi >/dev/null
6bb24f4d
BB
2222 (($? == 0)) && continue
2223 # check to see if this disk hasn't been explicitly excluded
2224 # by a user-set environment variable
c1d9abf9 2225 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
6bb24f4d
BB
2226 (($? == 0)) && continue
2227 unused_candidates="$unused_candidates $disk"
2228 done
c1d9abf9
JWK
2229 rm $sfi
2230 rm $dmpi
6bb24f4d
BB
2231
2232# now just check to see if those disks do actually exist
2233# by looking for a device pointing to the first slice in
2234# each case. limit the number to max_finddisksnum
2235 count=0
2236 for disk in $unused_candidates; do
2237 if [ -b $DEV_DSKDIR/${disk}s0 ]; then
2238 if [ $count -lt $max_finddisksnum ]; then
2239 unused="$unused $disk"
2240 # do not impose limit if $@ is provided
2241 [[ -z $@ ]] && ((count = count + 1))
2242 fi
2243 fi
2244 done
2245
2246# finally, return our disk list
c1d9abf9 2247 echo $unused
6bb24f4d
BB
2248}
2249
2250#
2251# Add specified user to specified group
2252#
2253# $1 group name
2254# $2 user name
2255# $3 base of the homedir (optional)
2256#
2257function add_user #<group_name> <user_name> <basedir>
2258{
2259 typeset gname=$1
2260 typeset uname=$2
2261 typeset basedir=${3:-"/var/tmp"}
2262
2263 if ((${#gname} == 0 || ${#uname} == 0)); then
2264 log_fail "group name or user name are not defined."
2265 fi
2266
c1d9abf9
JWK
2267 log_must useradd -g $gname -d $basedir/$uname -m $uname
2268 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
2269 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
2270 echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
6bb24f4d 2271
f74b821a
BB
2272 # Add new users to the same group and the command line utils.
2273 # This allows them to be run out of the original users home
2274 # directory as long as it permissioned to be group readable.
2275 if is_linux; then
c1d9abf9
JWK
2276 cmd_group=$(stat --format="%G" $(which zfs))
2277 log_must usermod -a -G $cmd_group $uname
f74b821a
BB
2278 fi
2279
6bb24f4d
BB
2280 return 0
2281}
2282
2283#
2284# Delete the specified user.
2285#
2286# $1 login name
2287# $2 base of the homedir (optional)
2288#
2289function del_user #<logname> <basedir>
2290{
2291 typeset user=$1
2292 typeset basedir=${2:-"/var/tmp"}
2293
2294 if ((${#user} == 0)); then
2295 log_fail "login name is necessary."
2296 fi
2297
c1d9abf9 2298 if id $user > /dev/null 2>&1; then
29e07af5 2299 log_must_retry "currently used" 5 userdel $user
6bb24f4d
BB
2300 fi
2301
c1d9abf9 2302 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
6bb24f4d
BB
2303
2304 return 0
2305}
2306
2307#
2308# Select valid gid and create specified group.
2309#
2310# $1 group name
2311#
2312function add_group #<group_name>
2313{
2314 typeset group=$1
2315
2316 if ((${#group} == 0)); then
2317 log_fail "group name is necessary."
2318 fi
2319
2320 # Assign 100 as the base gid, a larger value is selected for
2321 # Linux because for many distributions 1000 and under are reserved.
2322 if is_linux; then
6bb24f4d 2323 while true; do
c1d9abf9 2324 groupadd $group > /dev/null 2>&1
6bb24f4d
BB
2325 typeset -i ret=$?
2326 case $ret in
2327 0) return 0 ;;
6bb24f4d
BB
2328 *) return 1 ;;
2329 esac
2330 done
2331 else
2332 typeset -i gid=100
6bb24f4d 2333 while true; do
c1d9abf9 2334 groupadd -g $gid $group > /dev/null 2>&1
6bb24f4d
BB
2335 typeset -i ret=$?
2336 case $ret in
2337 0) return 0 ;;
2338 # The gid is not unique
2339 4) ((gid += 1)) ;;
2340 *) return 1 ;;
2341 esac
2342 done
2343 fi
2344}
2345
2346#
2347# Delete the specified group.
2348#
2349# $1 group name
2350#
2351function del_group #<group_name>
2352{
2353 typeset grp=$1
2354 if ((${#grp} == 0)); then
2355 log_fail "group name is necessary."
2356 fi
2357
2358 if is_linux; then
c1d9abf9 2359 getent group $grp > /dev/null 2>&1
6bb24f4d
BB
2360 typeset -i ret=$?
2361 case $ret in
2362 # Group does not exist.
2363 2) return 0 ;;
2364 # Name already exists as a group name
c1d9abf9 2365 0) log_must groupdel $grp ;;
6bb24f4d
BB
2366 *) return 1 ;;
2367 esac
2368 else
c1d9abf9 2369 groupmod -n $grp $grp > /dev/null 2>&1
6bb24f4d
BB
2370 typeset -i ret=$?
2371 case $ret in
2372 # Group does not exist.
2373 6) return 0 ;;
2374 # Name already exists as a group name
c1d9abf9 2375 9) log_must groupdel $grp ;;
6bb24f4d
BB
2376 *) return 1 ;;
2377 esac
2378 fi
2379
2380 return 0
2381}
2382
2383#
2384# This function will return true if it's safe to destroy the pool passed
2385# as argument 1. It checks for pools based on zvols and files, and also
2386# files contained in a pool that may have a different mountpoint.
2387#
2388function safe_to_destroy_pool { # $1 the pool name
2389
2390 typeset pool=""
2391 typeset DONT_DESTROY=""
2392
2393 # We check that by deleting the $1 pool, we're not
2394 # going to pull the rug out from other pools. Do this
2395 # by looking at all other pools, ensuring that they
2396 # aren't built from files or zvols contained in this pool.
2397
c1d9abf9 2398 for pool in $(zpool list -H -o name)
6bb24f4d
BB
2399 do
2400 ALTMOUNTPOOL=""
2401
2402 # this is a list of the top-level directories in each of the
2403 # files that make up the path to the files the pool is based on
c1d9abf9
JWK
2404 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2405 awk '{print $1}')
6bb24f4d
BB
2406
2407 # this is a list of the zvols that make up the pool
c1d9abf9
JWK
2408 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2409 | awk '{print $1}')
6bb24f4d
BB
2410
2411 # also want to determine if it's a file-based pool using an
2412 # alternate mountpoint...
c1d9abf9
JWK
2413 POOL_FILE_DIRS=$(zpool status -v $pool | \
2414 grep / | awk '{print $1}' | \
2415 awk -F/ '{print $2}' | grep -v "dev")
6bb24f4d
BB
2416
2417 for pooldir in $POOL_FILE_DIRS
2418 do
c1d9abf9
JWK
2419 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2420 grep "${pooldir}$" | awk '{print $1}')
6bb24f4d
BB
2421
2422 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2423 done
2424
2425
2426 if [ ! -z "$ZVOLPOOL" ]
2427 then
2428 DONT_DESTROY="true"
2429 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2430 fi
2431
2432 if [ ! -z "$FILEPOOL" ]
2433 then
2434 DONT_DESTROY="true"
2435 log_note "Pool $pool is built from $FILEPOOL on $1"
2436 fi
2437
2438 if [ ! -z "$ALTMOUNTPOOL" ]
2439 then
2440 DONT_DESTROY="true"
2441 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2442 fi
2443 done
2444
2445 if [ -z "${DONT_DESTROY}" ]
2446 then
2447 return 0
2448 else
2449 log_note "Warning: it is not safe to destroy $1!"
2450 return 1
2451 fi
2452}
2453
2454#
2455# Get the available ZFS compression options
2456# $1 option type zfs_set|zfs_compress
2457#
2458function get_compress_opts
2459{
2460 typeset COMPRESS_OPTS
2461 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2462 gzip-6 gzip-7 gzip-8 gzip-9"
2463
2464 if [[ $1 == "zfs_compress" ]] ; then
2465 COMPRESS_OPTS="on lzjb"
2466 elif [[ $1 == "zfs_set" ]] ; then
2467 COMPRESS_OPTS="on off lzjb"
2468 fi
2469 typeset valid_opts="$COMPRESS_OPTS"
c1d9abf9 2470 zfs get 2>&1 | grep gzip >/dev/null 2>&1
6bb24f4d
BB
2471 if [[ $? -eq 0 ]]; then
2472 valid_opts="$valid_opts $GZIP_OPTS"
2473 fi
c1d9abf9 2474 echo "$valid_opts"
6bb24f4d
BB
2475}
2476
2477#
2478# Verify zfs operation with -p option work as expected
2479# $1 operation, value could be create, clone or rename
2480# $2 dataset type, value could be fs or vol
2481# $3 dataset name
2482# $4 new dataset name
2483#
2484function verify_opt_p_ops
2485{
2486 typeset ops=$1
2487 typeset datatype=$2
2488 typeset dataset=$3
2489 typeset newdataset=$4
2490
2491 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2492 log_fail "$datatype is not supported."
2493 fi
2494
2495 # check parameters accordingly
2496 case $ops in
2497 create)
2498 newdataset=$dataset
2499 dataset=""
2500 if [[ $datatype == "vol" ]]; then
2501 ops="create -V $VOLSIZE"
2502 fi
2503 ;;
2504 clone)
2505 if [[ -z $newdataset ]]; then
2506 log_fail "newdataset should not be empty" \
2507 "when ops is $ops."
2508 fi
2509 log_must datasetexists $dataset
2510 log_must snapexists $dataset
2511 ;;
2512 rename)
2513 if [[ -z $newdataset ]]; then
2514 log_fail "newdataset should not be empty" \
2515 "when ops is $ops."
2516 fi
2517 log_must datasetexists $dataset
2518 log_mustnot snapexists $dataset
2519 ;;
2520 *)
2521 log_fail "$ops is not supported."
2522 ;;
2523 esac
2524
2525 # make sure the upper level filesystem does not exist
2526 if datasetexists ${newdataset%/*} ; then
c1d9abf9 2527 log_must zfs destroy -rRf ${newdataset%/*}
6bb24f4d
BB
2528 fi
2529
2530 # without -p option, operation will fail
c1d9abf9 2531 log_mustnot zfs $ops $dataset $newdataset
6bb24f4d
BB
2532 log_mustnot datasetexists $newdataset ${newdataset%/*}
2533
2534 # with -p option, operation should succeed
c1d9abf9 2535 log_must zfs $ops -p $dataset $newdataset
6bb24f4d
BB
2536 block_device_wait
2537
2538 if ! datasetexists $newdataset ; then
2539 log_fail "-p option does not work for $ops"
2540 fi
2541
2542 # when $ops is create or clone, redo the operation still return zero
2543 if [[ $ops != "rename" ]]; then
c1d9abf9 2544 log_must zfs $ops -p $dataset $newdataset
6bb24f4d
BB
2545 fi
2546
2547 return 0
2548}
2549
2550#
2551# Get configuration of pool
2552# $1 pool name
2553# $2 config name
2554#
2555function get_config
2556{
2557 typeset pool=$1
2558 typeset config=$2
2559 typeset alt_root
2560
2561 if ! poolexists "$pool" ; then
2562 return 1
2563 fi
c1d9abf9 2564 alt_root=$(zpool list -H $pool | awk '{print $NF}')
6bb24f4d 2565 if [[ $alt_root == "-" ]]; then
c1d9abf9 2566 value=$(zdb -C $pool | grep "$config:" | awk -F: \
6bb24f4d
BB
2567 '{print $2}')
2568 else
c1d9abf9 2569 value=$(zdb -e $pool | grep "$config:" | awk -F: \
6bb24f4d
BB
2570 '{print $2}')
2571 fi
2572 if [[ -n $value ]] ; then
2573 value=${value#'}
2574 value=${value%'}
2575 fi
2576 echo $value
2577
2578 return 0
2579}
2580
2581#
2582# Privated function. Random select one of items from arguments.
2583#
2584# $1 count
2585# $2-n string
2586#
2587function _random_get
2588{
2589 typeset cnt=$1
2590 shift
2591
2592 typeset str="$@"
2593 typeset -i ind
2594 ((ind = RANDOM % cnt + 1))
2595
c1d9abf9
JWK
2596 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2597 echo $ret
6bb24f4d
BB
2598}
2599
2600#
2601# Random select one of item from arguments which include NONE string
2602#
2603function random_get_with_non
2604{
2605 typeset -i cnt=$#
2606 ((cnt =+ 1))
2607
2608 _random_get "$cnt" "$@"
2609}
2610
2611#
2612# Random select one of item from arguments which doesn't include NONE string
2613#
2614function random_get
2615{
2616 _random_get "$#" "$@"
2617}
2618
2619#
2620# Detect if the current system support slog
2621#
2622function verify_slog_support
2623{
3fd3e56c 2624 typeset dir=$TEST_BASE_DIR/disk.$$
6bb24f4d
BB
2625 typeset pool=foo.$$
2626 typeset vdev=$dir/a
2627 typeset sdev=$dir/b
2628
c1d9abf9
JWK
2629 mkdir -p $dir
2630 mkfile $MINVDEVSIZE $vdev $sdev
6bb24f4d
BB
2631
2632 typeset -i ret=0
c1d9abf9 2633 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
6bb24f4d
BB
2634 ret=1
2635 fi
c1d9abf9 2636 rm -r $dir
6bb24f4d
BB
2637
2638 return $ret
2639}
2640
2641#
2642# The function will generate a dataset name with specific length
2643# $1, the length of the name
2644# $2, the base string to construct the name
2645#
2646function gen_dataset_name
2647{
2648 typeset -i len=$1
2649 typeset basestr="$2"
2650 typeset -i baselen=${#basestr}
2651 typeset -i iter=0
2652 typeset l_name=""
2653
2654 if ((len % baselen == 0)); then
2655 ((iter = len / baselen))
2656 else
2657 ((iter = len / baselen + 1))
2658 fi
2659 while ((iter > 0)); do
2660 l_name="${l_name}$basestr"
2661
2662 ((iter -= 1))
2663 done
2664
c1d9abf9 2665 echo $l_name
6bb24f4d
BB
2666}
2667
2668#
2669# Get cksum tuple of dataset
2670# $1 dataset name
2671#
2672# sample zdb output:
2673# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2674# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2675# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2676# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2677function datasetcksum
2678{
2679 typeset cksum
c1d9abf9
JWK
2680 sync
2681 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2682 | awk -F= '{print $7}')
2683 echo $cksum
6bb24f4d
BB
2684}
2685
2686#
2687# Get cksum of file
2688# #1 file path
2689#
2690function checksum
2691{
2692 typeset cksum
c1d9abf9
JWK
2693 cksum=$(cksum $1 | awk '{print $1}')
2694 echo $cksum
6bb24f4d
BB
2695}
2696
2697#
2698# Get the given disk/slice state from the specific field of the pool
2699#
2700function get_device_state #pool disk field("", "spares","logs")
2701{
2702 typeset pool=$1
2703 typeset disk=${2#$DEV_DSKDIR/}
2704 typeset field=${3:-$pool}
2705
c1d9abf9
JWK
2706 state=$(zpool status -v "$pool" 2>/dev/null | \
2707 nawk -v device=$disk -v pool=$pool -v field=$field \
6bb24f4d
BB
2708 'BEGIN {startconfig=0; startfield=0; }
2709 /config:/ {startconfig=1}
2710 (startconfig==1) && ($1==field) {startfield=1; next;}
2711 (startfield==1) && ($1==device) {print $2; exit;}
2712 (startfield==1) &&
2713 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2714 echo $state
2715}
2716
2717
2718#
2719# print the given directory filesystem type
2720#
2721# $1 directory name
2722#
2723function get_fstype
2724{
2725 typeset dir=$1
2726
2727 if [[ -z $dir ]]; then
2728 log_fail "Usage: get_fstype <directory>"
2729 fi
2730
2731 #
2732 # $ df -n /
2733 # / : ufs
2734 #
c1d9abf9 2735 df -n $dir | awk '{print $3}'
6bb24f4d
BB
2736}
2737
2738#
2739# Given a disk, label it to VTOC regardless what label was on the disk
2740# $1 disk
2741#
2742function labelvtoc
2743{
2744 typeset disk=$1
2745 if [[ -z $disk ]]; then
2746 log_fail "The disk name is unspecified."
2747 fi
2748 typeset label_file=/var/tmp/labelvtoc.$$
c1d9abf9 2749 typeset arch=$(uname -p)
6bb24f4d
BB
2750
2751 if is_linux; then
2752 log_note "Currently unsupported by the test framework"
2753 return 1
2754 fi
2755
2756 if [[ $arch == "i386" ]]; then
c1d9abf9
JWK
2757 echo "label" > $label_file
2758 echo "0" >> $label_file
2759 echo "" >> $label_file
2760 echo "q" >> $label_file
2761 echo "q" >> $label_file
6bb24f4d 2762
c1d9abf9 2763 fdisk -B $disk >/dev/null 2>&1
6bb24f4d 2764 # wait a while for fdisk finishes
c1d9abf9 2765 sleep 60
6bb24f4d 2766 elif [[ $arch == "sparc" ]]; then
c1d9abf9
JWK
2767 echo "label" > $label_file
2768 echo "0" >> $label_file
2769 echo "" >> $label_file
2770 echo "" >> $label_file
2771 echo "" >> $label_file
2772 echo "q" >> $label_file
6bb24f4d
BB
2773 else
2774 log_fail "unknown arch type"
2775 fi
2776
c1d9abf9 2777 format -e -s -d $disk -f $label_file
6bb24f4d 2778 typeset -i ret_val=$?
c1d9abf9 2779 rm -f $label_file
6bb24f4d
BB
2780 #
2781 # wait the format to finish
2782 #
c1d9abf9 2783 sleep 60
6bb24f4d
BB
2784 if ((ret_val != 0)); then
2785 log_fail "unable to label $disk as VTOC."
2786 fi
2787
2788 return 0
2789}
2790
2791#
2792# check if the system was installed as zfsroot or not
2793# return: 0 ture, otherwise false
2794#
2795function is_zfsroot
2796{
c1d9abf9 2797 df -n / | grep zfs > /dev/null 2>&1
6bb24f4d
BB
2798 return $?
2799}
2800
2801#
2802# get the root filesystem name if it's zfsroot system.
2803#
2804# return: root filesystem name
2805function get_rootfs
2806{
2807 typeset rootfs=""
8aab1218
TS
2808
2809 if ! is_linux; then
2810 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2811 /etc/mnttab)
2812 fi
6bb24f4d
BB
2813 if [[ -z "$rootfs" ]]; then
2814 log_fail "Can not get rootfs"
2815 fi
c1d9abf9 2816 zfs list $rootfs > /dev/null 2>&1
6bb24f4d 2817 if (($? == 0)); then
c1d9abf9 2818 echo $rootfs
6bb24f4d
BB
2819 else
2820 log_fail "This is not a zfsroot system."
2821 fi
2822}
2823
2824#
2825# get the rootfs's pool name
2826# return:
2827# rootpool name
2828#
2829function get_rootpool
2830{
2831 typeset rootfs=""
2832 typeset rootpool=""
8aab1218
TS
2833
2834 if ! is_linux; then
2835 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2836 /etc/mnttab)
2837 fi
6bb24f4d
BB
2838 if [[ -z "$rootfs" ]]; then
2839 log_fail "Can not get rootpool"
2840 fi
c1d9abf9 2841 zfs list $rootfs > /dev/null 2>&1
6bb24f4d 2842 if (($? == 0)); then
c1d9abf9
JWK
2843 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2844 echo $rootpool
6bb24f4d
BB
2845 else
2846 log_fail "This is not a zfsroot system."
2847 fi
2848}
2849
6bb24f4d
BB
2850#
2851# Check if the given device is physical device
2852#
2853function is_physical_device #device
2854{
2855 typeset device=${1#$DEV_DSKDIR}
2856 device=${device#$DEV_RDSKDIR}
2857
2858 if is_linux; then
2859 [[ -b "$DEV_DSKDIR/$device" ]] && \
2860 [[ -f /sys/module/loop/parameters/max_part ]]
2861 return $?
2862 else
c1d9abf9 2863 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
6bb24f4d
BB
2864 return $?
2865 fi
2866}
2867
7050a65d
SV
2868#
2869# Check if the given device is a real device (ie SCSI device)
2870#
2871function is_real_device #disk
2872{
2873 typeset disk=$1
2874 [[ -z $disk ]] && log_fail "No argument for disk given."
2875
2876 if is_linux; then
c1d9abf9
JWK
2877 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2878 egrep disk >/dev/null
7050a65d
SV
2879 return $?
2880 fi
2881}
2882
2883#
2884# Check if the given device is a loop device
2885#
2886function is_loop_device #disk
2887{
2888 typeset disk=$1
2889 [[ -z $disk ]] && log_fail "No argument for disk given."
2890
2891 if is_linux; then
c1d9abf9
JWK
2892 lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
2893 egrep loop >/dev/null
7050a65d
SV
2894 return $?
2895 fi
2896}
2897
2898#
2899# Check if the given device is a multipath device and if there is a sybolic
2900# link to a device mapper and to a disk
2901# Currently no support for dm devices alone without multipath
2902#
2903function is_mpath_device #disk
2904{
2905 typeset disk=$1
2906 [[ -z $disk ]] && log_fail "No argument for disk given."
2907
2908 if is_linux; then
c1d9abf9
JWK
2909 lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
2910 egrep mpath >/dev/null
7050a65d 2911 if (($? == 0)); then
c1d9abf9 2912 readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
7050a65d
SV
2913 return $?
2914 else
2915 return $?
2916 fi
2917 fi
2918}
2919
2920# Set the slice prefix for disk partitioning depending
2921# on whether the device is a real, multipath, or loop device.
2922# Currently all disks have to be of the same type, so only
2923# checks first disk to determine slice prefix.
2924#
2925function set_slice_prefix
2926{
2927 typeset disk
2928 typeset -i i=0
2929
2930 if is_linux; then
2931 while (( i < $DISK_ARRAY_NUM )); do
c1d9abf9
JWK
2932 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
2933 if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
2934 ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
7050a65d
SV
2935 export SLICE_PREFIX=""
2936 return 0
ec0e24c2
SV
2937 elif ( is_mpath_device $disk || is_loop_device \
2938 $disk ); then
7050a65d
SV
2939 export SLICE_PREFIX="p"
2940 return 0
2941 else
2942 log_fail "$disk not supported for partitioning."
2943 fi
2944 (( i = i + 1))
2945 done
2946 fi
2947}
2948
2949#
2950# Set the directory path of the listed devices in $DISK_ARRAY_NUM
2951# Currently all disks have to be of the same type, so only
2952# checks first disk to determine device directory
2953# default = /dev (linux)
2954# real disk = /dev (linux)
2955# multipath device = /dev/mapper (linux)
2956#
2957function set_device_dir
2958{
2959 typeset disk
2960 typeset -i i=0
2961
2962 if is_linux; then
2963 while (( i < $DISK_ARRAY_NUM )); do
c1d9abf9 2964 disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
7050a65d
SV
2965 if is_mpath_device $disk; then
2966 export DEV_DSKDIR=$DEV_MPATHDIR
2967 return 0
2968 else
2969 export DEV_DSKDIR=$DEV_RDSKDIR
2970 return 0
2971 fi
2972 (( i = i + 1))
2973 done
2974 else
2975 export DEV_DSKDIR=$DEV_RDSKDIR
2976 fi
2977}
2978
6bb24f4d
BB
2979#
2980# Get the directory path of given device
2981#
2982function get_device_dir #device
2983{
2984 typeset device=$1
2985
2986 if ! $(is_physical_device $device) ; then
2987 if [[ $device != "/" ]]; then
2988 device=${device%/*}
2989 fi
2990 if [[ -b "$DEV_DSKDIR/$device" ]]; then
2991 device="$DEV_DSKDIR"
2992 fi
c1d9abf9 2993 echo $device
6bb24f4d 2994 else
c1d9abf9 2995 echo "$DEV_DSKDIR"
6bb24f4d
BB
2996 fi
2997}
2998
ec0e24c2
SV
2999#
3000# Get persistent name for given disk
3001#
3002function get_persistent_disk_name #device
3003{
3004 typeset device=$1
3005 typeset dev_id
3006
3007 if is_linux; then
3008 if is_real_device $device; then
c1d9abf9
JWK
3009 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
3010 | egrep disk/by-id | nawk '{print $2; exit}' \
3011 | nawk -F / '{print $3}')"
3012 echo $dev_id
ec0e24c2 3013 elif is_mpath_device $device; then
c1d9abf9
JWK
3014 dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
3015 | egrep disk/by-id/dm-uuid \
3016 | nawk '{print $2; exit}' \
3017 | nawk -F / '{print $3}')"
3018 echo $dev_id
ec0e24c2 3019 else
c1d9abf9 3020 echo $device
ec0e24c2
SV
3021 fi
3022 else
c1d9abf9 3023 echo $device
ec0e24c2
SV
3024 fi
3025}
3026
7a4500a1
SV
3027#
3028# Load scsi_debug module with specified parameters
3029#
3030function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
3031{
3032 typeset devsize=$1
3033 typeset hosts=$2
3034 typeset tgts=$3
3035 typeset luns=$4
3036
3037 [[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
3038 [[ -z $luns ]] && log_fail "Arguments invalid or missing"
3039
3040 if is_linux; then
c1d9abf9 3041 modprobe -n scsi_debug
7a4500a1
SV
3042 if (($? != 0)); then
3043 log_unsupported "Platform does not have scsi_debug"
3044 "module"
3045 fi
c1d9abf9 3046 lsmod | egrep scsi_debug > /dev/null
7a4500a1
SV
3047 if (($? == 0)); then
3048 log_fail "scsi_debug module already installed"
3049 else
c1d9abf9 3050 log_must modprobe scsi_debug dev_size_mb=$devsize \
7a4500a1
SV
3051 add_host=$hosts num_tgts=$tgts max_luns=$luns
3052 block_device_wait
c1d9abf9 3053 lsscsi | egrep scsi_debug > /dev/null
7a4500a1
SV
3054 if (($? == 1)); then
3055 log_fail "scsi_debug module install failed"
3056 fi
3057 fi
3058 fi
3059}
3060
6bb24f4d
BB
3061#
3062# Get the package name
3063#
3064function get_package_name
3065{
3066 typeset dirpath=${1:-$STC_NAME}
3067
3068 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3069}
3070
3071#
3072# Get the word numbers from a string separated by white space
3073#
3074function get_word_count
3075{
c1d9abf9 3076 echo $1 | wc -w
6bb24f4d
BB
3077}
3078
3079#
3080# To verify if the require numbers of disks is given
3081#
3082function verify_disk_count
3083{
3084 typeset -i min=${2:-1}
3085
3086 typeset -i count=$(get_word_count "$1")
3087
3088 if ((count < min)); then
3089 log_untested "A minimum of $min disks is required to run." \
3090 " You specified $count disk(s)"
3091 fi
3092}
3093
3094function ds_is_volume
3095{
3096 typeset type=$(get_prop type $1)
3097 [[ $type = "volume" ]] && return 0
3098 return 1
3099}
3100
3101function ds_is_filesystem
3102{
3103 typeset type=$(get_prop type $1)
3104 [[ $type = "filesystem" ]] && return 0
3105 return 1
3106}
3107
3108function ds_is_snapshot
3109{
3110 typeset type=$(get_prop type $1)
3111 [[ $type = "snapshot" ]] && return 0
3112 return 1
3113}
3114
3115#
3116# Check if Trusted Extensions are installed and enabled
3117#
3118function is_te_enabled
3119{
c1d9abf9 3120 svcs -H -o state labeld 2>/dev/null | grep "enabled"
6bb24f4d
BB
3121 if (($? != 0)); then
3122 return 1
3123 else
3124 return 0
3125 fi
3126}
3127
3128# Utility function to determine if a system has multiple cpus.
3129function is_mp
3130{
3131 if is_linux; then
c1d9abf9 3132 (($(nproc) > 1))
6bb24f4d 3133 else
c1d9abf9 3134 (($(psrinfo | wc -l) > 1))
6bb24f4d
BB
3135 fi
3136
3137 return $?
3138}
3139
3140function get_cpu_freq
3141{
3142 if is_linux; then
c1d9abf9 3143 lscpu | awk '/CPU MHz/ { print $3 }'
6bb24f4d 3144 else
c1d9abf9 3145 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
6bb24f4d
BB
3146 fi
3147}
3148
3149# Run the given command as the user provided.
3150function user_run
3151{
3152 typeset user=$1
3153 shift
3154
f74b821a 3155 log_note "user:$user $@"
c1d9abf9 3156 eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
6bb24f4d
BB
3157 return $?
3158}
3159
3160#
3161# Check if the pool contains the specified vdevs
3162#
3163# $1 pool
3164# $2..n <vdev> ...
3165#
3166# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3167# vdevs is not in the pool, and 2 if pool name is missing.
3168#
3169function vdevs_in_pool
3170{
3171 typeset pool=$1
3172 typeset vdev
3173
3174 if [[ -z $pool ]]; then
3175 log_note "Missing pool name."
3176 return 2
3177 fi
3178
3179 shift
3180
c1d9abf9
JWK
3181 typeset tmpfile=$(mktemp)
3182 zpool list -Hv "$pool" >$tmpfile
6bb24f4d 3183 for vdev in $@; do
c1d9abf9 3184 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
6bb24f4d
BB
3185 [[ $? -ne 0 ]] && return 1
3186 done
3187
c1d9abf9 3188 rm -f $tmpfile
6bb24f4d
BB
3189
3190 return 0;
3191}
3192
679d73e9
JWK
3193function get_max
3194{
3195 typeset -l i max=$1
3196 shift
3197
3198 for i in "$@"; do
3199 max=$(echo $((max > i ? max : i)))
3200 done
3201
3202 echo $max
3203}
3204
3205function get_min
3206{
3207 typeset -l i min=$1
3208 shift
3209
3210 for i in "$@"; do
3211 min=$(echo $((min < i ? min : i)))
3212 done
3213
3214 echo $min
3215}
3216
a7004725
DK
3217#
3218# Generate a random number between 1 and the argument.
3219#
3220function random
3221{
3222 typeset max=$1
3223 echo $(( ($RANDOM % $max) + 1 ))
3224}
3225
3226# Write data that can be compressed into a directory
3227function write_compressible
3228{
3229 typeset dir=$1
3230 typeset megs=$2
3231 typeset nfiles=${3:-1}
3232 typeset bs=${4:-1024k}
3233 typeset fname=${5:-file}
3234
3235 [[ -d $dir ]] || log_fail "No directory: $dir"
3236
3237 # Under Linux fio is not currently used since its behavior can
3238 # differ significantly across versions. This includes missing
3239 # command line options and cases where the --buffer_compress_*
3240 # options fail to behave as expected.
3241 if is_linux; then
3242 typeset file_bytes=$(to_bytes $megs)
3243 typeset bs_bytes=4096
3244 typeset blocks=$(($file_bytes / $bs_bytes))
3245
3246 for (( i = 0; i < $nfiles; i++ )); do
3247 truncate -s $file_bytes $dir/$fname.$i
3248
3249 # Write every third block to get 66% compression.
3250 for (( j = 0; j < $blocks; j += 3 )); do
3251 dd if=/dev/urandom of=$dir/$fname.$i \
3252 seek=$j bs=$bs_bytes count=1 \
3253 conv=notrunc >/dev/null 2>&1
3254 done
3255 done
3256 else
3257 log_must eval "fio \
3258 --name=job \
3259 --fallocate=0 \
3260 --minimal \
3261 --randrepeat=0 \
3262 --buffer_compress_percentage=66 \
3263 --buffer_compress_chunk=4096 \
3264 --directory=$dir \
3265 --numjobs=$nfiles \
3266 --nrfiles=$nfiles \
3267 --rw=write \
3268 --bs=$bs \
3269 --filesize=$megs \
3270 --filename_format='$fname.\$jobnum' >/dev/null"
3271 fi
3272}
3273
3274function get_objnum
3275{
3276 typeset pathname=$1
3277 typeset objnum
3278
3279 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3280 objnum=$(stat -c %i $pathname)
3281 echo $objnum
3282}
3283
1de321e6 3284#
bec1067d 3285# Sync data to the pool
1de321e6
JX
3286#
3287# $1 pool name
bec1067d 3288# $2 boolean to force uberblock (and config including zpool cache file) update
1de321e6 3289#
bec1067d 3290function sync_pool #pool <force>
1de321e6
JX
3291{
3292 typeset pool=${1:-$TESTPOOL}
bec1067d 3293 typeset force=${2:-false}
1de321e6 3294
bec1067d
AP
3295 if [[ $force == true ]]; then
3296 log_must zpool sync -f $pool
3297 else
3298 log_must zpool sync $pool
3299 fi
3300
3301 return 0
1de321e6 3302}
d834b9ce
GM
3303
3304#
3305# Wait for zpool 'freeing' property drops to zero.
3306#
3307# $1 pool name
3308#
3309function wait_freeing #pool
3310{
3311 typeset pool=${1:-$TESTPOOL}
3312 while true; do
c1d9abf9
JWK
3313 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3314 log_must sleep 1
d834b9ce
GM
3315 done
3316}
7a4500a1 3317
dddef7d6 3318#
3319# Wait for every device replace operation to complete
3320#
3321# $1 pool name
3322#
3323function wait_replacing #pool
3324{
3325 typeset pool=${1:-$TESTPOOL}
3326 while true; do
3327 [[ "" == "$(zpool status $pool |
3328 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3329 log_must sleep 1
3330 done
3331}
3332
95401cb6
BB
3333#
3334# Setup custom environment for the ZED.
3335#
3336function zed_setup
3337{
3338 if ! is_linux; then
3339 return
3340 fi
3341
3342 if [[ ! -d $ZEDLET_DIR ]]; then
3343 log_must mkdir $ZEDLET_DIR
3344 fi
3345
3346 if [[ ! -e $VDEVID_CONF ]]; then
3347 log_must touch $VDEVID_CONF
3348 fi
3349
3350 if [[ -e $VDEVID_CONF_ETC ]]; then
3351 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3352 fi
3353
3354 # Create a symlink for /etc/zfs/vdev_id.conf file.
3355 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3356
3357 # Setup minimal ZED configuration. Individual test cases should
3358 # add additional ZEDLETs as needed for their specific test.
3f03fc8d
BB
3359 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3360 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
95401cb6 3361
3f03fc8d
BB
3362 # Customize the zed.rc file to enable the full debug log.
3363 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3364 echo "ZED_DEBUG_LOG=$ZEDLET_DIR/zed.debug.log" >>$ZEDLET_DIR/zed.rc
3365
3366 log_must cp ${ZEDLET_LIBEXEC_DIR}/all-syslog.sh $ZEDLET_DIR
3367 log_must cp ${ZEDLET_LIBEXEC_DIR}/all-debug.sh $ZEDLET_DIR
3368 log_must touch $ZEDLET_DIR/zed.debug.log
95401cb6
BB
3369}
3370
3371#
3372# Cleanup custom ZED environment.
3373#
3374function zed_cleanup
3375{
3376 if ! is_linux; then
3377 return
3378 fi
3379
3380 log_must rm -f ${ZEDLET_DIR}/zed.rc
3381 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3382 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3f03fc8d 3383 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
95401cb6
BB
3384 log_must rm -f ${ZEDLET_DIR}/zed.pid
3385 log_must rm -f ${ZEDLET_DIR}/zedlog
3f03fc8d 3386 log_must rm -f ${ZEDLET_DIR}/zed.debug.log
95401cb6
BB
3387 log_must rm -f ${ZEDLET_DIR}/state
3388 log_must rm -f $VDEVID_CONF_ETC
3389 log_must rm -f $VDEVID_CONF
3390 rmdir $ZEDLET_DIR
3391}
3392
7a4500a1
SV
3393#
3394# Check if ZED is currently running, if not start ZED.
3395#
3396function zed_start
3397{
95401cb6
BB
3398 if ! is_linux; then
3399 return
3400 fi
7a4500a1 3401
95401cb6
BB
3402 # ZEDLET_DIR=/var/tmp/zed
3403 if [[ ! -d $ZEDLET_DIR ]]; then
3404 log_must mkdir $ZEDLET_DIR
3405 fi
7a4500a1 3406
95401cb6
BB
3407 # Verify the ZED is not already running.
3408 pgrep -x zed > /dev/null
3409 if (($? == 0)); then
3410 log_fail "ZED already running"
7a4500a1 3411 fi
95401cb6
BB
3412
3413 log_note "Starting ZED"
3414 # run ZED in the background and redirect foreground logging
3415 # output to zedlog
3416 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
3417 "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
3f03fc8d
BB
3418
3419 return 0
7a4500a1
SV
3420}
3421
3422#
3423# Kill ZED process
3424#
3425function zed_stop
3426{
95401cb6
BB
3427 if ! is_linux; then
3428 return
3429 fi
3430
3f03fc8d 3431 log_note "Stopping ZED"
95401cb6
BB
3432 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3433 zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
3434 log_must kill $zedpid
7a4500a1 3435 fi
3f03fc8d
BB
3436
3437 return 0
7a4500a1 3438}
8c54ddd3
BB
3439
3440#
3441# Check is provided device is being active used as a swap device.
3442#
3443function is_swap_inuse
3444{
3445 typeset device=$1
3446
3447 if [[ -z $device ]] ; then
3448 log_note "No device specified."
3449 return 1
3450 fi
3451
3452 if is_linux; then
3453 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3454 else
3455 swap -l | grep -w $device > /dev/null 2>&1
3456 fi
3457
3458 return $?
3459}
3460
3461#
3462# Setup a swap device using the provided device.
3463#
3464function swap_setup
3465{
3466 typeset swapdev=$1
3467
3468 if is_linux; then
c7a7601c 3469 log_must eval "mkswap $swapdev > /dev/null 2>&1"
8c54ddd3
BB
3470 log_must swapon $swapdev
3471 else
3472 log_must swap -a $swapdev
3473 fi
3474
3475 return 0
3476}
3477
3478#
3479# Cleanup a swap device on the provided device.
3480#
3481function swap_cleanup
3482{
3483 typeset swapdev=$1
3484
3485 if is_swap_inuse $swapdev; then
3486 if is_linux; then
3487 log_must swapoff $swapdev
3488 else
3489 log_must swap -d $swapdev
3490 fi
3491 fi
3492
3493 return 0
3494}
379ca9cf
OF
3495
3496#
3497# Set a global system tunable (64-bit value)
3498#
3499# $1 tunable name
3500# $2 tunable values
3501#
3502function set_tunable64
3503{
3504 set_tunable_impl "$1" "$2" Z
3505}
3506
3507#
3508# Set a global system tunable (32-bit value)
3509#
3510# $1 tunable name
3511# $2 tunable values
3512#
3513function set_tunable32
3514{
3515 set_tunable_impl "$1" "$2" W
3516}
3517
3518function set_tunable_impl
3519{
3520 typeset tunable="$1"
3521 typeset value="$2"
3522 typeset mdb_cmd="$3"
3523 typeset module="${4:-zfs}"
3524
3525 [[ -z "$tunable" ]] && return 1
3526 [[ -z "$value" ]] && return 1
3527 [[ -z "$mdb_cmd" ]] && return 1
3528
3529 case "$(uname)" in
3530 Linux)
3531 typeset zfs_tunables="/sys/module/$module/parameters"
3532 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3533 echo -n "$value" > "$zfs_tunables/$tunable"
3534 return "$?"
3535 ;;
3536 SunOS)
3537 [[ "$module" -eq "zfs" ]] || return 1
3538 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3539 return "$?"
3540 ;;
3541 esac
3542}
3543
3544#
3545# Get a global system tunable
3546#
3547# $1 tunable name
3548#
3549function get_tunable
3550{
3551 get_tunable_impl "$1"
3552}
3553
3554function get_tunable_impl
3555{
3556 typeset tunable="$1"
3557 typeset module="${2:-zfs}"
3558
3559 [[ -z "$tunable" ]] && return 1
3560
3561 case "$(uname)" in
3562 Linux)
3563 typeset zfs_tunables="/sys/module/$module/parameters"
3564 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3565 cat $zfs_tunables/$tunable
3566 return "$?"
3567 ;;
3568 SunOS)
3569 [[ "$module" -eq "zfs" ]] || return 1
3570 ;;
3571 esac
3572
3573 return 1
3574}