]> git.proxmox.com Git - mirror_zfs-debian.git/blob - scripts/common.sh
Imported Upstream version 0.6.5.9
[mirror_zfs-debian.git] / scripts / common.sh
1 #!/bin/bash
2 #
3 # Common support functions for testing scripts. If a script-config
4 # files is available it will be sourced so in-tree kernel modules and
5 # utilities will be used. If no script-config can be found then the
6 # installed kernel modules and utilities will be used.
7
8 basedir="$(dirname $0)"
9
10 SCRIPT_CONFIG=zfs-script-config.sh
11 if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12 . "${basedir}/../${SCRIPT_CONFIG}"
13 else
14 KERNEL_MODULES=(zlib_deflate zlib_inflate)
15 MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
16 fi
17
18 PROG="<define PROG>"
19 CLEANUP=
20 VERBOSE=
21 VERBOSE_FLAG=
22 FORCE=
23 FORCE_FLAG=
24 DUMP_LOG=
25 ERROR=
26 RAID0S=()
27 RAID10S=()
28 RAIDZS=()
29 RAIDZ2S=()
30 TESTS_RUN=${TESTS_RUN:-'*'}
31 TESTS_SKIP=${TESTS_SKIP:-}
32
33 prefix=/usr/local
34 exec_prefix=${prefix}
35 pkgdatadir=${prefix}/share/zfs
36 bindir=${exec_prefix}/bin
37 sbindir=${exec_prefix}/sbin
38 udevdir=
39 udevruledir=
40 sysconfdir=${prefix}/etc
41 localstatedir=${prefix}/var
42
43 ETCDIR=${ETCDIR:-/etc}
44 DEVDIR=${DEVDIR:-/dev/disk/by-vdev}
45 ZPOOLDIR=${ZPOOLDIR:-${pkgdatadir}/zpool-config}
46 ZPIOSDIR=${ZPIOSDIR:-${pkgdatadir}/zpios-test}
47 ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkgdatadir}/zpios-profile}
48
49 ZDB=${ZDB:-${sbindir}/zdb}
50 ZFS=${ZFS:-${sbindir}/zfs}
51 ZINJECT=${ZINJECT:-${sbindir}/zinject}
52 ZPOOL=${ZPOOL:-${sbindir}/zpool}
53 ZTEST=${ZTEST:-${sbindir}/ztest}
54 ZPIOS=${ZPIOS:-${sbindir}/zpios}
55
56 COMMON_SH=${COMMON_SH:-${pkgdatadir}/common.sh}
57 ZFS_SH=${ZFS_SH:-${pkgdatadir}/zfs.sh}
58 ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkgdatadir}/zpool-create.sh}
59 ZPIOS_SH=${ZPIOS_SH:-${pkgdatadir}/zpios.sh}
60 ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkgdatadir}/zpios-survey.sh}
61
62 LDMOD=${LDMOD:-/sbin/modprobe}
63 LSMOD=${LSMOD:-/sbin/lsmod}
64 RMMOD=${RMMOD:-/sbin/rmmod}
65 INFOMOD=${INFOMOD:-/sbin/modinfo}
66 LOSETUP=${LOSETUP:-/sbin/losetup}
67 MDADM=${MDADM:-/sbin/mdadm}
68 PARTED=${PARTED:-/sbin/parted}
69 BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
70 LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
71 SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
72 SYSCTL=${SYSCTL:-/sbin/sysctl}
73 UDEVADM=${UDEVADM:-/sbin/udevadm}
74 AWK=${AWK:-/usr/bin/awk}
75
76 ZED_PIDFILE=${ZED_PIDFILE:-${localstatedir}/run/zed.pid}
77
78 COLOR_BLACK="\033[0;30m"
79 COLOR_DK_GRAY="\033[1;30m"
80 COLOR_BLUE="\033[0;34m"
81 COLOR_LT_BLUE="\033[1;34m"
82 COLOR_GREEN="\033[0;32m"
83 COLOR_LT_GREEN="\033[1;32m"
84 COLOR_CYAN="\033[0;36m"
85 COLOR_LT_CYAN="\033[1;36m"
86 COLOR_RED="\033[0;31m"
87 COLOR_LT_RED="\033[1;31m"
88 COLOR_PURPLE="\033[0;35m"
89 COLOR_LT_PURPLE="\033[1;35m"
90 COLOR_BROWN="\033[0;33m"
91 COLOR_YELLOW="\033[1;33m"
92 COLOR_LT_GRAY="\033[0;37m"
93 COLOR_WHITE="\033[1;37m"
94 COLOR_RESET="\033[0m"
95
96 die() {
97 echo -e "${PROG}: $1" >&2
98 exit 1
99 }
100
101 msg() {
102 if [ ${VERBOSE} ]; then
103 echo "$@"
104 fi
105 }
106
107 pass() {
108 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
109 }
110
111 fail() {
112 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
113 exit $1
114 }
115
116 skip() {
117 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
118 }
119
120 populate() {
121 local ROOT=$1
122 local MAX_DIR_SIZE=$2
123 local MAX_FILE_SIZE=$3
124
125 mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
126 DIRS=`find $ROOT`
127
128 for DIR in $DIRS; do
129 COUNT=$(($RANDOM % $MAX_DIR_SIZE))
130
131 for i in `seq $COUNT`; do
132 FILE=`mktemp -p ${DIR}`
133 SIZE=$(($RANDOM % $MAX_FILE_SIZE))
134 dd if=/dev/urandom of=$FILE bs=1k count=$SIZE &>/dev/null
135 done
136 done
137
138 return 0
139 }
140
141 init() {
142 # Disable the udev rule 90-zfs.rules to prevent the zfs module
143 # stack from being loaded due to the detection of a zfs device.
144 # This is important because the test scripts require full control
145 # over when and how the modules are loaded/unloaded. A trap is
146 # set to ensure the udev rule is correctly replaced on exit.
147 local RULE=${udevruledir}/90-zfs.rules
148 if test -e ${RULE}; then
149 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
150 mv ${RULE} ${RULE}.disabled
151 fi
152
153 # Create a random directory tree of files and sub-directories to
154 # to act as a copy source for the various regression tests.
155 SRC_DIR=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
156 trap "rm -Rf $SRC_DIR" INT TERM EXIT
157 populate $SRC_DIR 10 100
158 }
159
160 spl_dump_log() {
161 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
162 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
163 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
164 echo
165 echo "Dumped debug log: ${NAME}.log"
166 tail -n1 ${NAME}.log
167 echo
168 return 0
169 }
170
171 check_modules() {
172 local LOADED_MODULES=()
173 local MISSING_MODULES=()
174
175 for MOD in ${MODULES[*]}; do
176 local NAME=`basename $MOD .ko`
177
178 if ${LSMOD} | egrep -q "^${NAME}"; then
179 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
180 fi
181
182 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
183 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
184 fi
185 done
186
187 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
188 ERROR="Unload these modules with '${PROG} -u':\n"
189 ERROR="${ERROR}${LOADED_MODULES[*]}"
190 return 1
191 fi
192
193 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
194 ERROR="The following modules can not be found,"
195 ERROR="${ERROR} ensure your source trees are built:\n"
196 ERROR="${ERROR}${MISSING_MODULES[*]}"
197 return 1
198 fi
199
200 return 0
201 }
202
203 load_module() {
204 local NAME=`basename $1 .ko`
205
206 if [ ${VERBOSE} ]; then
207 echo "Loading ${NAME} ($@)"
208 fi
209
210 ${LDMOD} $* &>/dev/null
211 if [ $? -ne 0 ]; then
212 echo "Failed to load ${NAME} ($@)"
213 return 1
214 fi
215
216 return 0
217 }
218
219 load_modules() {
220 mkdir -p /etc/zfs
221
222 for MOD in ${KERNEL_MODULES[*]}; do
223 load_module ${MOD} >/dev/null
224 done
225
226 for MOD in ${MODULES[*]}; do
227 local NAME=`basename ${MOD} .ko`
228 local VALUE=
229
230 for OPT in "$@"; do
231 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
232
233 if [ ${NAME} = "${OPT_NAME}" ]; then
234 VALUE=`echo ${OPT} | cut -f2- -d'='`
235 fi
236 done
237
238 load_module ${MOD} ${VALUE} || return 1
239 done
240
241 if [ ${VERBOSE} ]; then
242 echo "Successfully loaded ZFS module stack"
243 fi
244
245 return 0
246 }
247
248 unload_module() {
249 local NAME=`basename $1 .ko`
250
251 if [ ${VERBOSE} ]; then
252 echo "Unloading ${NAME} ($@)"
253 fi
254
255 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
256
257 return 0
258 }
259
260 unload_modules() {
261 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
262 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
263
264 for MOD in ${MODULES_REVERSE[*]}; do
265 local NAME=`basename ${MOD} .ko`
266 local USE_COUNT=`${LSMOD} |
267 egrep "^${NAME} "| ${AWK} '{print $3}'`
268
269 if [ "${USE_COUNT}" = 0 ] ; then
270
271 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
272 spl_dump_log
273 fi
274
275 unload_module ${MOD} || return 1
276 fi
277 done
278
279 if [ ${VERBOSE} ]; then
280 echo "Successfully unloaded ZFS module stack"
281 fi
282
283 return 0
284 }
285
286 #
287 # Check that the mdadm utilities are installed.
288 #
289 check_loop_utils() {
290 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
291 }
292
293
294 #
295 # Find and return an unused loop device. A new /dev/loopN node will be
296 # created if required. The kernel loop driver will automatically register
297 # the minor as long as it's less than /sys/module/loop/parameters/max_loop.
298 #
299 unused_loop_device() {
300 local DEVICE=`${LOSETUP} -f`
301 local MAX_LOOP_PATH="/sys/module/loop/parameters/max_loop"
302 local MAX_LOOP;
303
304 # An existing /dev/loopN device was available.
305 if [ -n "${DEVICE}" ]; then
306 echo "${DEVICE}"
307 return 0
308 fi
309
310 # Create a new /dev/loopN provided we are not at MAX_LOOP.
311 if [ -f "${MAX_LOOP_PATH}" ]; then
312 MAX_LOOP=`cat /sys/module/loop/parameters/max_loop`
313 if [ ${MAX_LOOP} -eq 0 ]; then
314 MAX_LOOP=255
315 fi
316
317 for (( i=0; i<=${MAX_LOOP}; i++ )); do
318 DEVICE="/dev/loop$i"
319
320 if [ -b "${DEVICE}" ]; then
321 continue
322 else
323 mknod -m660 "${DEVICE}" b 7 $i
324 chown root.disk "${DEVICE}"
325 chmod 666 "${DEVICE}"
326
327 echo "${DEVICE}"
328 return 0
329 fi
330 done
331 fi
332
333 die "Error: Unable to create new loopback device"
334 }
335
336 #
337 # This can be slightly dangerous because the loop devices we are
338 # cleaning up may not be ours. However, if the devices are currently
339 # in use we will not be able to remove them, and we only remove
340 # devices which include 'zpool' or 'deleted' in the name. So any
341 # damage we might do should be limited to other zfs related testing.
342 #
343 cleanup_loop_devices() {
344 local TMP_FILE=`mktemp`
345
346 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
347 ${AWK} -F":" -v losetup="$LOSETUP" \
348 '/zpool/ || /deleted/ { system("losetup -d "$1) }' ${TMP_FILE}
349 ${AWK} -F" " '/zpool/ || /deleted/ { system("rm -f "$3) }' ${TMP_FILE}
350
351 rm -f ${TMP_FILE}
352 }
353
354 #
355 # Destroy the passed loopback devices, this is used when you know
356 # the names of the loopback devices.
357 #
358 destroy_loop_devices() {
359 local LODEVICES="$1"
360
361 msg "Destroying ${LODEVICES}"
362 ${LOSETUP} -d ${LODEVICES} || \
363 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
364
365 rm -f ${FILES}
366 return 0
367 }
368
369 #
370 # Create a device label taking care to briefly wait if udev needs to settle.
371 #
372 label() {
373 local DEVICE=$1
374 local LABEL=$2
375
376 wait_udev ${DEVICE} 30 || return 1
377 ${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 2
378
379 return 0
380 }
381
382 #
383 # Create a primary partition on a block device.
384 #
385 partition() {
386 local DEVICE=$1
387 local TYPE=$2
388 local START=$3
389 local END=$4
390
391 ${PARTED} --align optimal ${DEVICE} --script -- \
392 mkpart ${TYPE} ${START} ${END} || return 1
393 udev_trigger
394
395 return 0
396 }
397
398 #
399 # Create a filesystem on the block device
400 #
401 format() {
402 local DEVICE=$1
403 local FSTYPE=$2
404
405 # Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
406 # won't mount
407 /sbin/mkfs.${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev/null || return 1
408
409 return 0
410 }
411
412 #
413 # Check that the mdadm utilities are installed.
414 #
415 check_md_utils() {
416 test -f ${MDADM} || die "${MDADM} utility must be installed"
417 test -f ${PARTED} || die "${PARTED} utility must be installed"
418 }
419
420 check_md_partitionable() {
421 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
422 local LODEVICE=`unused_loop_device`
423 local MDDEVICE=`unused_md_device`
424 local RESULT=1
425
426 check_md_utils
427
428 rm -f ${LOFILE}
429 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
430 &>/dev/null || return ${RESULT}
431
432 msg "Creating ${LODEVICE} using ${LOFILE}"
433 ${LOSETUP} ${LODEVICE} ${LOFILE}
434 if [ $? -ne 0 ]; then
435 rm -f ${LOFILE}
436 return ${RESULT}
437 fi
438
439 msg "Creating ${MDDEVICE} using ${LODEVICE}"
440 ${MDADM} --build ${MDDEVICE} --level=faulty \
441 --raid-devices=1 ${LODEVICE} &>/dev/null
442 if [ $? -ne 0 ]; then
443 destroy_loop_devices ${LODEVICE}
444 rm -f ${LOFILE}
445 return ${RESULT}
446 fi
447 wait_udev ${MDDEVICE} 30
448
449 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
450 RESULT=$?
451
452 destroy_md_devices ${MDDEVICE}
453 destroy_loop_devices ${LODEVICE}
454 rm -f ${LOFILE}
455
456 return ${RESULT}
457 }
458
459 #
460 # Find and return an unused md device.
461 #
462 unused_md_device() {
463 for (( i=0; i<32; i++ )); do
464 MDDEVICE=md${i}
465
466 # Skip active devicesudo in /proc/mdstat.
467 grep -q "${MDDEVICE} " /proc/mdstat && continue
468
469 # Device doesn't exist, use it.
470 if [ ! -e $/dev/{MDDEVICE} ]; then
471 echo /dev/${MDDEVICE}
472 return
473 fi
474
475 # Device exists but may not be in use.
476 if [ -b /dev/${MDDEVICE} ]; then
477 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
478 if [ $? -eq 1 ]; then
479 echo /dev/${MDDEVICE}
480 return
481 fi
482 fi
483 done
484
485 die "Error: Unable to find unused md device"
486 }
487
488 #
489 # This can be slightly dangerous because it is possible the md devices
490 # we are cleaning up may not be ours. However, if the devices are
491 # currently in use we will not be able to remove them, and even if
492 # we remove devices which were not out we do not zero the super block
493 # so you should be able to reconstruct them.
494 #
495 cleanup_md_devices() {
496 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
497 udev_trigger
498 }
499
500 #
501 # Destroy the passed md devices, this is used when you know
502 # the names of the md devices.
503 #
504 destroy_md_devices() {
505 local MDDEVICES="$1"
506
507 msg "Destroying ${MDDEVICES}"
508 for MDDEVICE in ${MDDEVICES}; do
509 ${MDADM} --stop ${MDDEVICE} &>/dev/null
510 ${MDADM} --remove ${MDDEVICE} &>/dev/null
511 ${MDADM} --detail ${MDDEVICE} &>/dev/null
512 done
513
514 return 0
515 }
516
517 #
518 # Check that the scsi utilities are installed.
519 #
520 check_sd_utils() {
521 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
522 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
523 }
524
525 #
526 # Rescan the scsi bus for scsi_debug devices. It is preferable to use the
527 # scsi-rescan tool if it is installed, but if it's not we can fall back to
528 # removing and readding the device manually. This rescan will only effect
529 # the first scsi_debug device if scsi-rescan is missing.
530 #
531 scsi_rescan() {
532 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
533
534 if [ -f ${SCSIRESCAN} ]; then
535 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
536 else
537 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
538 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
539 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
540 udev_trigger
541 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
542 udev_trigger
543 fi
544 }
545
546 #
547 # Trigger udev and wait for it to settle.
548 #
549 udev_trigger() {
550 if [ -f ${UDEVADM} ]; then
551 ${UDEVADM} trigger --action=change --subsystem-match=block
552 ${UDEVADM} settle
553 else
554 /sbin/udevtrigger
555 /sbin/udevsettle
556 fi
557 }
558
559 #
560 # The following udev helper functions assume that the provided
561 # udev rules file will create a /dev/disk/by-vdev/<CHANNEL><RANK>
562 # disk mapping. In this mapping each CHANNEL is represented by
563 # the letters a-z, and the RANK is represented by the numbers
564 # 1-n. A CHANNEL should identify a group of RANKS which are all
565 # attached to a single controller, each RANK represents a disk.
566 # This provides a simply mechanism to locate a specific drive
567 # given a known hardware configuration.
568 #
569 udev_setup() {
570 local SRC_PATH=$1
571
572 # When running in tree manually contruct symlinks in tree to
573 # the proper devices. Symlinks are installed for all entires
574 # in the config file regardless of if that device actually
575 # exists. When installed as a package udev can be relied on for
576 # this and it will only create links for devices which exist.
577 if [ ${INTREE} ]; then
578 PWD=`pwd`
579 mkdir -p ${DEVDIR}/
580 cd ${DEVDIR}/
581 ${AWK} '!/^#/ && /./ { system( \
582 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
583 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
584 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
585 ) }' $SRC_PATH
586 cd ${PWD}
587 else
588 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
589 DST_PATH=/etc/zfs/${DST_FILE}
590
591 if [ -e ${DST_PATH} ]; then
592 die "Error: Config ${DST_PATH} already exists"
593 fi
594
595 cp ${SRC_PATH} ${DST_PATH}
596 udev_trigger
597 fi
598
599 return 0
600 }
601
602 udev_cleanup() {
603 local SRC_PATH=$1
604
605 if [ ${INTREE} ]; then
606 PWD=`pwd`
607 cd ${DEVDIR}/
608 ${AWK} '!/^#/ && /./ { system( \
609 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
610 cd ${PWD}
611 fi
612
613 return 0
614 }
615
616 udev_cr2d() {
617 local CHANNEL=`echo "obase=16; $1+96" | bc`
618 local RANK=$2
619
620 printf "\x${CHANNEL}${RANK}"
621 }
622
623 udev_raid0_setup() {
624 local RANKS=$1
625 local CHANNELS=$2
626 local IDX=0
627
628 RAID0S=()
629 for RANK in `seq 1 ${RANKS}`; do
630 for CHANNEL in `seq 1 ${CHANNELS}`; do
631 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
632 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
633 let IDX=IDX+1
634 done
635 done
636
637 return 0
638 }
639
640 udev_raid10_setup() {
641 local RANKS=$1
642 local CHANNELS=$2
643 local IDX=0
644
645 RAID10S=()
646 for RANK in `seq 1 ${RANKS}`; do
647 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
648 let CHANNEL2=CHANNEL1+1
649 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
650 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
651 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
652 RAID10S[${IDX}]="mirror ${GROUP}"
653 let IDX=IDX+1
654 done
655 done
656
657 return 0
658 }
659
660 udev_raidz_setup() {
661 local RANKS=$1
662 local CHANNELS=$2
663
664 RAIDZS=()
665 for RANK in `seq 1 ${RANKS}`; do
666 RAIDZ=("raidz")
667
668 for CHANNEL in `seq 1 ${CHANNELS}`; do
669 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
670 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
671 done
672
673 RAIDZS[${RANK}]="${RAIDZ[*]}"
674 done
675
676 return 0
677 }
678
679 udev_raidz2_setup() {
680 local RANKS=$1
681 local CHANNELS=$2
682
683 RAIDZ2S=()
684 for RANK in `seq 1 ${RANKS}`; do
685 RAIDZ2=("raidz2")
686
687 for CHANNEL in `seq 1 ${CHANNELS}`; do
688 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
689 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
690 done
691
692 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
693 done
694
695 return 0
696 }
697
698 run_one_test() {
699 local TEST_NUM=$1
700 local TEST_NAME=$2
701
702 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
703 test_${TEST_NUM}
704 }
705
706 skip_one_test() {
707 local TEST_NUM=$1
708 local TEST_NAME=$2
709
710 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
711 skip
712 }
713
714 run_test() {
715 local TEST_NUM=$1
716 local TEST_NAME=$2
717
718 for i in ${TESTS_SKIP[@]}; do
719 if [[ $i == ${TEST_NUM} ]] ; then
720 skip_one_test ${TEST_NUM} "${TEST_NAME}"
721 return 0
722 fi
723 done
724
725 if [ "${TESTS_RUN[0]}" = "*" ]; then
726 run_one_test ${TEST_NUM} "${TEST_NAME}"
727 else
728 for i in ${TESTS_RUN[@]}; do
729 if [[ $i == ${TEST_NUM} ]] ; then
730 run_one_test ${TEST_NUM} "${TEST_NAME}"
731 return 0
732 fi
733 done
734
735 skip_one_test ${TEST_NUM} "${TEST_NAME}"
736 fi
737 }
738
739 wait_udev() {
740 local DEVICE=$1
741 local DELAY=$2
742 local COUNT=0
743
744 udev_trigger
745 while [ ! -e ${DEVICE} ]; do
746 if [ ${COUNT} -gt ${DELAY} ]; then
747 return 1
748 fi
749
750 let COUNT=${COUNT}+1
751 sleep 1
752 done
753
754 return 0
755 }
756
757 stack_clear() {
758 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
759 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
760
761 if [ -e $STACK_MAX_SIZE ]; then
762 echo 1 >$STACK_TRACER_ENABLED
763 echo 0 >$STACK_MAX_SIZE
764 fi
765 }
766
767 stack_check() {
768 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
769 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
770 local STACK_LIMIT=7000
771
772 if [ -e $STACK_MAX_SIZE ]; then
773 STACK_SIZE=`cat $STACK_MAX_SIZE`
774
775 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
776 echo
777 echo "Warning: max stack size $STACK_SIZE bytes"
778 cat $STACK_TRACE
779 fi
780 fi
781 }
782
783 kill_zed() {
784 if [ -f $ZED_PIDFILE ]; then
785 kill $(cat $ZED_PIDFILE)
786 fi
787 }