]> git.proxmox.com Git - mirror_zfs.git/blob - scripts/common.sh.in
Add the ZFS Test Suite
[mirror_zfs.git] / scripts / common.sh.in
1 #!/bin/bash
2 #
3 # Common support functions for testing scripts. If a script-config
4 # files is available it will be sourced so in-tree kernel modules and
5 # utilities will be used. If no script-config can be found then the
6 # installed kernel modules and utilities will be used.
7
8 basedir="$(dirname $0)"
9
10 SCRIPT_CONFIG=zfs-script-config.sh
11 if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12 . "${basedir}/../${SCRIPT_CONFIG}"
13 else
14 KERNEL_MODULES=(zlib_deflate zlib_inflate)
15 MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
16 fi
17
18 PROG="<define PROG>"
19 CLEANUP=
20 VERBOSE=
21 VERBOSE_FLAG=
22 FORCE=
23 FORCE_FLAG=
24 DUMP_LOG=
25 ERROR=
26 RAID0S=()
27 RAID10S=()
28 RAIDZS=()
29 RAIDZ2S=()
30 TESTS_RUN=${TESTS_RUN:-'*'}
31 TESTS_SKIP=${TESTS_SKIP:-}
32
33 prefix=@prefix@
34 exec_prefix=@exec_prefix@
35 pkgdatadir=@datarootdir@/@PACKAGE@
36 bindir=@bindir@
37 sbindir=@sbindir@
38 udevdir=@udevdir@
39 udevruledir=@udevruledir@
40 sysconfdir=@sysconfdir@
41 localstatedir=@localstatedir@
42
43 ETCDIR=${ETCDIR:-/etc}
44 DEVDIR=${DEVDIR:-/dev/disk/by-vdev}
45 ZPOOLDIR=${ZPOOLDIR:-${pkgdatadir}/zpool-config}
46 ZPIOSDIR=${ZPIOSDIR:-${pkgdatadir}/zpios-test}
47 ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkgdatadir}/zpios-profile}
48 TESTSDIR=${TESTSDIR:-${pkgdatadir}/zfs-tests}
49 RUNFILEDIR=${RUNFILEDIR:-${pkgdatadir}/runfiles}
50
51 ZDB=${ZDB:-${sbindir}/zdb}
52 ZFS=${ZFS:-${sbindir}/zfs}
53 ZINJECT=${ZINJECT:-${sbindir}/zinject}
54 ZHACK=${ZHACK:-${sbindir}/zhack}
55 ZPOOL=${ZPOOL:-${sbindir}/zpool}
56 ZTEST=${ZTEST:-${sbindir}/ztest}
57 ZPIOS=${ZPIOS:-${sbindir}/zpios}
58
59 COMMON_SH=${COMMON_SH:-${pkgdatadir}/common.sh}
60 ZFS_SH=${ZFS_SH:-${pkgdatadir}/zfs.sh}
61 ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkgdatadir}/zpool-create.sh}
62 ZPIOS_SH=${ZPIOS_SH:-${pkgdatadir}/zpios.sh}
63 ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkgdatadir}/zpios-survey.sh}
64 TEST_RUNNER=${TEST_RUNNER:-${pkgdatadir}/test-runner/bin/test-runner.py}
65 STF_TOOLS=${STF_TOOLS:-${pkgdatadir}/test-runner}
66 STF_SUITE=${STF_SUITE:-${pkgdatadir}/zfs-tests}
67
68 LDMOD=${LDMOD:-/sbin/modprobe}
69 LSMOD=${LSMOD:-/sbin/lsmod}
70 RMMOD=${RMMOD:-/sbin/rmmod}
71 INFOMOD=${INFOMOD:-/sbin/modinfo}
72 LOSETUP=${LOSETUP:-/sbin/losetup}
73 MDADM=${MDADM:-/sbin/mdadm}
74 DMSETUP=${DMSETUP:-/sbin/dmsetup}
75 PARTED=${PARTED:-/sbin/parted}
76 BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
77 LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
78 SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
79 SYSCTL=${SYSCTL:-/sbin/sysctl}
80 UDEVADM=${UDEVADM:-/sbin/udevadm}
81 AWK=${AWK:-/usr/bin/awk}
82
83 ZED_PIDFILE=${ZED_PIDFILE:-${localstatedir}/run/zed.pid}
84
85 COLOR_BLACK="\033[0;30m"
86 COLOR_DK_GRAY="\033[1;30m"
87 COLOR_BLUE="\033[0;34m"
88 COLOR_LT_BLUE="\033[1;34m"
89 COLOR_GREEN="\033[0;32m"
90 COLOR_LT_GREEN="\033[1;32m"
91 COLOR_CYAN="\033[0;36m"
92 COLOR_LT_CYAN="\033[1;36m"
93 COLOR_RED="\033[0;31m"
94 COLOR_LT_RED="\033[1;31m"
95 COLOR_PURPLE="\033[0;35m"
96 COLOR_LT_PURPLE="\033[1;35m"
97 COLOR_BROWN="\033[0;33m"
98 COLOR_YELLOW="\033[1;33m"
99 COLOR_LT_GRAY="\033[0;37m"
100 COLOR_WHITE="\033[1;37m"
101 COLOR_RESET="\033[0m"
102
103 die() {
104 echo -e "${PROG}: $1" >&2
105 exit 1
106 }
107
108 msg() {
109 if [ ${VERBOSE} ]; then
110 echo "$@"
111 fi
112 }
113
114 pass() {
115 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
116 }
117
118 fail() {
119 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
120 exit $1
121 }
122
123 skip() {
124 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
125 }
126
127 populate() {
128 local ROOT=$1
129 local MAX_DIR_SIZE=$2
130 local MAX_FILE_SIZE=$3
131
132 mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
133 DIRS=`find $ROOT`
134
135 for DIR in $DIRS; do
136 COUNT=$(($RANDOM % $MAX_DIR_SIZE))
137
138 for i in `seq $COUNT`; do
139 FILE=`mktemp -p ${DIR}`
140 SIZE=$(($RANDOM % $MAX_FILE_SIZE))
141 dd if=/dev/urandom of=$FILE bs=1k count=$SIZE &>/dev/null
142 done
143 done
144
145 return 0
146 }
147
148 init() {
149 # Disable the udev rule 90-zfs.rules to prevent the zfs module
150 # stack from being loaded due to the detection of a zfs device.
151 # This is important because the test scripts require full control
152 # over when and how the modules are loaded/unloaded. A trap is
153 # set to ensure the udev rule is correctly replaced on exit.
154 local RULE=${udevruledir}/90-zfs.rules
155 if test -e ${RULE}; then
156 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
157 mv ${RULE} ${RULE}.disabled
158 fi
159
160 # Create a random directory tree of files and sub-directories to
161 # to act as a copy source for the various regression tests.
162 SRC_DIR=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
163 trap "rm -Rf $SRC_DIR" INT TERM EXIT
164 populate $SRC_DIR 10 100
165 }
166
167 spl_dump_log() {
168 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
169 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
170 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
171 echo
172 echo "Dumped debug log: ${NAME}.log"
173 tail -n1 ${NAME}.log
174 echo
175 return 0
176 }
177
178 check_modules() {
179 local LOADED_MODULES=()
180 local MISSING_MODULES=()
181
182 for MOD in ${MODULES[*]}; do
183 local NAME=`basename $MOD .ko`
184
185 if ${LSMOD} | egrep -q "^${NAME}"; then
186 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
187 fi
188
189 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
190 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
191 fi
192 done
193
194 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
195 ERROR="Unload these modules with '${PROG} -u':\n"
196 ERROR="${ERROR}${LOADED_MODULES[*]}"
197 return 1
198 fi
199
200 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
201 ERROR="The following modules can not be found,"
202 ERROR="${ERROR} ensure your source trees are built:\n"
203 ERROR="${ERROR}${MISSING_MODULES[*]}"
204 return 1
205 fi
206
207 return 0
208 }
209
210 load_module() {
211 local NAME=`basename $1 .ko`
212
213 if [ ${VERBOSE} ]; then
214 echo "Loading ${NAME} ($@)"
215 fi
216
217 ${LDMOD} $* &>/dev/null
218 if [ $? -ne 0 ]; then
219 echo "Failed to load ${NAME} ($@)"
220 return 1
221 fi
222
223 return 0
224 }
225
226 load_modules() {
227 mkdir -p /etc/zfs
228
229 for MOD in ${KERNEL_MODULES[*]}; do
230 load_module ${MOD} >/dev/null
231 done
232
233 for MOD in ${MODULES[*]}; do
234 local NAME=`basename ${MOD} .ko`
235 local VALUE=
236
237 for OPT in "$@"; do
238 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
239
240 if [ ${NAME} = "${OPT_NAME}" ]; then
241 VALUE=`echo ${OPT} | cut -f2- -d'='`
242 fi
243 done
244
245 load_module ${MOD} ${VALUE} || return 1
246 done
247
248 if [ ${VERBOSE} ]; then
249 echo "Successfully loaded ZFS module stack"
250 fi
251
252 return 0
253 }
254
255 unload_module() {
256 local NAME=`basename $1 .ko`
257
258 if [ ${VERBOSE} ]; then
259 echo "Unloading ${NAME} ($@)"
260 fi
261
262 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
263
264 return 0
265 }
266
267 unload_modules() {
268 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
269 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
270
271 for MOD in ${MODULES_REVERSE[*]}; do
272 local NAME=`basename ${MOD} .ko`
273 local USE_COUNT=`${LSMOD} |
274 egrep "^${NAME} "| ${AWK} '{print $3}'`
275
276 if [ "${USE_COUNT}" = 0 ] ; then
277
278 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
279 spl_dump_log
280 fi
281
282 unload_module ${MOD} || return 1
283 fi
284 done
285
286 if [ ${VERBOSE} ]; then
287 echo "Successfully unloaded ZFS module stack"
288 fi
289
290 return 0
291 }
292
293 #
294 # Check that the mdadm utilities are installed.
295 #
296 check_loop_utils() {
297 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
298 }
299
300
301 #
302 # Find and return an unused loop device. A new /dev/loopN node will be
303 # created if required. The kernel loop driver will automatically register
304 # the minor as long as it's less than /sys/module/loop/parameters/max_loop.
305 #
306 unused_loop_device() {
307 local DEVICE=$(${LOSETUP} -f)
308 local MAX_LOOP_PATH="/sys/module/loop/parameters/max_loop"
309 local MAX_LOOP;
310
311 # An existing /dev/loopN device was available.
312 if [ -n "${DEVICE}" ]; then
313 echo "${DEVICE}"
314 return 0
315 fi
316
317 # Create a new /dev/loopN provided we are not at MAX_LOOP.
318 if [ -f "${MAX_LOOP_PATH}" ]; then
319 MAX_LOOP=`cat /sys/module/loop/parameters/max_loop`
320 if [ ${MAX_LOOP} -eq 0 ]; then
321 MAX_LOOP=255
322 fi
323
324 for (( i=0; i<=${MAX_LOOP}; i++ )); do
325 DEVICE="/dev/loop$i"
326
327 if [ -b "${DEVICE}" ]; then
328 continue
329 else
330 mknod -m660 "${DEVICE}" b 7 $i
331 chown root.disk "${DEVICE}"
332 chmod 666 "${DEVICE}"
333
334 echo "${DEVICE}"
335 return 0
336 fi
337 done
338 fi
339
340 die "Error: Unable to create new loopback device"
341 }
342
343 #
344 # This can be slightly dangerous because the loop devices we are
345 # cleaning up may not be ours. However, if the devices are currently
346 # in use we will not be able to remove them, and we only remove
347 # devices which include 'zpool' or 'deleted' in the name. So any
348 # damage we might do should be limited to other zfs related testing.
349 #
350 cleanup_loop_devices() {
351 local TMP_FILE=`mktemp`
352
353 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
354 ${AWK} -F":" -v losetup="$LOSETUP" \
355 '/zpool/ || /deleted/ { system("losetup -d "$1) }' ${TMP_FILE}
356 ${AWK} -F" " '/zpool/ || /deleted/ { system("rm -f "$3) }' ${TMP_FILE}
357
358 rm -f ${TMP_FILE}
359 }
360
361 #
362 # Destroy the passed loopback devices, this is used when you know
363 # the names of the loopback devices.
364 #
365 destroy_loop_devices() {
366 local LODEVICES="$1"
367
368 msg "Destroying ${LODEVICES}"
369 ${LOSETUP} -d ${LODEVICES} || \
370 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
371
372 rm -f ${FILES}
373 return 0
374 }
375
376 #
377 # Create a device label taking care to briefly wait if udev needs to settle.
378 #
379 label() {
380 local DEVICE=$1
381 local LABEL=$2
382
383 wait_udev ${DEVICE} 30 || return 1
384 ${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 2
385
386 return 0
387 }
388
389 #
390 # Create a primary partition on a block device.
391 #
392 partition() {
393 local DEVICE=$1
394 local TYPE=$2
395 local START=$3
396 local END=$4
397
398 ${PARTED} --align optimal ${DEVICE} --script -- \
399 mkpart ${TYPE} ${START} ${END} || return 1
400 udev_trigger
401
402 return 0
403 }
404
405 #
406 # Create a filesystem on the block device
407 #
408 format() {
409 local DEVICE=$1
410 local FSTYPE=$2
411
412 # Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
413 # won't mount
414 /sbin/mkfs.${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev/null || return 1
415
416 return 0
417 }
418
419 #
420 # Check that the mdadm utilities are installed.
421 #
422 check_md_utils() {
423 test -f ${MDADM} || die "${MDADM} utility must be installed"
424 test -f ${PARTED} || die "${PARTED} utility must be installed"
425 }
426
427 check_md_partitionable() {
428 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
429 local LODEVICE=`unused_loop_device`
430 local MDDEVICE=`unused_md_device`
431 local RESULT=1
432
433 check_md_utils
434
435 rm -f ${LOFILE}
436 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
437 &>/dev/null || return ${RESULT}
438
439 msg "Creating ${LODEVICE} using ${LOFILE}"
440 ${LOSETUP} ${LODEVICE} ${LOFILE}
441 if [ $? -ne 0 ]; then
442 rm -f ${LOFILE}
443 return ${RESULT}
444 fi
445
446 msg "Creating ${MDDEVICE} using ${LODEVICE}"
447 ${MDADM} --build ${MDDEVICE} --level=faulty \
448 --raid-devices=1 ${LODEVICE} &>/dev/null
449 if [ $? -ne 0 ]; then
450 destroy_loop_devices ${LODEVICE}
451 rm -f ${LOFILE}
452 return ${RESULT}
453 fi
454 wait_udev ${MDDEVICE} 30
455
456 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
457 RESULT=$?
458
459 destroy_md_devices ${MDDEVICE}
460 destroy_loop_devices ${LODEVICE}
461 rm -f ${LOFILE}
462
463 return ${RESULT}
464 }
465
466 #
467 # Find and return an unused md device.
468 #
469 unused_md_device() {
470 for (( i=0; i<32; i++ )); do
471 MDDEVICE=md${i}
472
473 # Skip active devicesudo in /proc/mdstat.
474 grep -q "${MDDEVICE} " /proc/mdstat && continue
475
476 # Device doesn't exist, use it.
477 if [ ! -e $/dev/{MDDEVICE} ]; then
478 echo /dev/${MDDEVICE}
479 return
480 fi
481
482 # Device exists but may not be in use.
483 if [ -b /dev/${MDDEVICE} ]; then
484 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
485 if [ $? -eq 1 ]; then
486 echo /dev/${MDDEVICE}
487 return
488 fi
489 fi
490 done
491
492 die "Error: Unable to find unused md device"
493 }
494
495 #
496 # This can be slightly dangerous because it is possible the md devices
497 # we are cleaning up may not be ours. However, if the devices are
498 # currently in use we will not be able to remove them, and even if
499 # we remove devices which were not out we do not zero the super block
500 # so you should be able to reconstruct them.
501 #
502 cleanup_md_devices() {
503 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
504 udev_trigger
505 }
506
507 #
508 # Destroy the passed md devices, this is used when you know
509 # the names of the md devices.
510 #
511 destroy_md_devices() {
512 local MDDEVICES="$1"
513
514 msg "Destroying ${MDDEVICES}"
515 for MDDEVICE in ${MDDEVICES}; do
516 ${MDADM} --stop ${MDDEVICE} &>/dev/null
517 ${MDADM} --remove ${MDDEVICE} &>/dev/null
518 ${MDADM} --detail ${MDDEVICE} &>/dev/null
519 done
520
521 return 0
522 }
523
524 #
525 # Check that the scsi utilities are installed.
526 #
527 check_sd_utils() {
528 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
529 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
530 }
531
532 #
533 # Rescan the scsi bus for scsi_debug devices. It is preferable to use the
534 # scsi-rescan tool if it is installed, but if it's not we can fall back to
535 # removing and readding the device manually. This rescan will only effect
536 # the first scsi_debug device if scsi-rescan is missing.
537 #
538 scsi_rescan() {
539 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
540
541 if [ -f ${SCSIRESCAN} ]; then
542 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
543 else
544 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
545 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
546 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
547 udev_trigger
548 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
549 udev_trigger
550 fi
551 }
552
553 #
554 # Trigger udev and wait for it to settle.
555 #
556 udev_trigger() {
557 if [ -f ${UDEVADM} ]; then
558 ${UDEVADM} trigger --action=change --subsystem-match=block
559 ${UDEVADM} settle
560 else
561 /sbin/udevtrigger
562 /sbin/udevsettle
563 fi
564 }
565
566 #
567 # The following udev helper functions assume that the provided
568 # udev rules file will create a /dev/disk/by-vdev/<CHANNEL><RANK>
569 # disk mapping. In this mapping each CHANNEL is represented by
570 # the letters a-z, and the RANK is represented by the numbers
571 # 1-n. A CHANNEL should identify a group of RANKS which are all
572 # attached to a single controller, each RANK represents a disk.
573 # This provides a simply mechanism to locate a specific drive
574 # given a known hardware configuration.
575 #
576 udev_setup() {
577 local SRC_PATH=$1
578
579 # When running in tree manually contruct symlinks in tree to
580 # the proper devices. Symlinks are installed for all entires
581 # in the config file regardless of if that device actually
582 # exists. When installed as a package udev can be relied on for
583 # this and it will only create links for devices which exist.
584 if [ ${INTREE} ]; then
585 PWD=`pwd`
586 mkdir -p ${DEVDIR}/
587 cd ${DEVDIR}/
588 ${AWK} '!/^#/ && /./ { system( \
589 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
590 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
591 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
592 ) }' $SRC_PATH
593 cd ${PWD}
594 else
595 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
596 DST_PATH=/etc/zfs/${DST_FILE}
597
598 if [ -e ${DST_PATH} ]; then
599 die "Error: Config ${DST_PATH} already exists"
600 fi
601
602 cp ${SRC_PATH} ${DST_PATH}
603 udev_trigger
604 fi
605
606 return 0
607 }
608
609 udev_cleanup() {
610 local SRC_PATH=$1
611
612 if [ ${INTREE} ]; then
613 PWD=`pwd`
614 cd ${DEVDIR}/
615 ${AWK} '!/^#/ && /./ { system( \
616 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
617 cd ${PWD}
618 fi
619
620 return 0
621 }
622
623 udev_cr2d() {
624 local CHANNEL=`echo "obase=16; $1+96" | bc`
625 local RANK=$2
626
627 printf "\x${CHANNEL}${RANK}"
628 }
629
630 udev_raid0_setup() {
631 local RANKS=$1
632 local CHANNELS=$2
633 local IDX=0
634
635 RAID0S=()
636 for RANK in `seq 1 ${RANKS}`; do
637 for CHANNEL in `seq 1 ${CHANNELS}`; do
638 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
639 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
640 let IDX=IDX+1
641 done
642 done
643
644 return 0
645 }
646
647 udev_raid10_setup() {
648 local RANKS=$1
649 local CHANNELS=$2
650 local IDX=0
651
652 RAID10S=()
653 for RANK in `seq 1 ${RANKS}`; do
654 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
655 let CHANNEL2=CHANNEL1+1
656 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
657 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
658 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
659 RAID10S[${IDX}]="mirror ${GROUP}"
660 let IDX=IDX+1
661 done
662 done
663
664 return 0
665 }
666
667 udev_raidz_setup() {
668 local RANKS=$1
669 local CHANNELS=$2
670
671 RAIDZS=()
672 for RANK in `seq 1 ${RANKS}`; do
673 RAIDZ=("raidz")
674
675 for CHANNEL in `seq 1 ${CHANNELS}`; do
676 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
677 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
678 done
679
680 RAIDZS[${RANK}]="${RAIDZ[*]}"
681 done
682
683 return 0
684 }
685
686 udev_raidz2_setup() {
687 local RANKS=$1
688 local CHANNELS=$2
689
690 RAIDZ2S=()
691 for RANK in `seq 1 ${RANKS}`; do
692 RAIDZ2=("raidz2")
693
694 for CHANNEL in `seq 1 ${CHANNELS}`; do
695 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
696 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
697 done
698
699 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
700 done
701
702 return 0
703 }
704
705 run_one_test() {
706 local TEST_NUM=$1
707 local TEST_NAME=$2
708
709 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
710 test_${TEST_NUM}
711 }
712
713 skip_one_test() {
714 local TEST_NUM=$1
715 local TEST_NAME=$2
716
717 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
718 skip
719 }
720
721 run_test() {
722 local TEST_NUM=$1
723 local TEST_NAME=$2
724
725 for i in ${TESTS_SKIP[@]}; do
726 if [[ $i == ${TEST_NUM} ]] ; then
727 skip_one_test ${TEST_NUM} "${TEST_NAME}"
728 return 0
729 fi
730 done
731
732 if [ "${TESTS_RUN[0]}" = "*" ]; then
733 run_one_test ${TEST_NUM} "${TEST_NAME}"
734 else
735 for i in ${TESTS_RUN[@]}; do
736 if [[ $i == ${TEST_NUM} ]] ; then
737 run_one_test ${TEST_NUM} "${TEST_NAME}"
738 return 0
739 fi
740 done
741
742 skip_one_test ${TEST_NUM} "${TEST_NAME}"
743 fi
744 }
745
746 wait_udev() {
747 local DEVICE=$1
748 local DELAY=$2
749 local COUNT=0
750
751 udev_trigger
752 while [ ! -e ${DEVICE} ]; do
753 if [ ${COUNT} -gt ${DELAY} ]; then
754 return 1
755 fi
756
757 let COUNT=${COUNT}+1
758 sleep 1
759 done
760
761 return 0
762 }
763
764 stack_clear() {
765 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
766 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
767
768 if [ -e $STACK_MAX_SIZE ]; then
769 echo 1 >$STACK_TRACER_ENABLED
770 echo 0 >$STACK_MAX_SIZE
771 fi
772 }
773
774 stack_check() {
775 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
776 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
777 local STACK_LIMIT=7000
778
779 if [ -e $STACK_MAX_SIZE ]; then
780 STACK_SIZE=`cat $STACK_MAX_SIZE`
781
782 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
783 echo
784 echo "Warning: max stack size $STACK_SIZE bytes"
785 cat $STACK_TRACE
786 fi
787 fi
788 }
789
790 kill_zed() {
791 if [ -f $ZED_PIDFILE ]; then
792 kill $(cat $ZED_PIDFILE)
793 fi
794 }