]> git.proxmox.com Git - mirror_zfs.git/blob - scripts/common.sh.in
Merge branch 'zed-initial'
[mirror_zfs.git] / scripts / common.sh.in
1 #!/bin/bash
2 #
3 # Common support functions for testing scripts. If a script-config
4 # files is available it will be sourced so in-tree kernel modules and
5 # utilities will be used. If no script-config can be found then the
6 # installed kernel modules and utilities will be used.
7
8 basedir="$(dirname $0)"
9
10 SCRIPT_CONFIG=zfs-script-config.sh
11 if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12 . "${basedir}/../${SCRIPT_CONFIG}"
13 else
14 KERNEL_MODULES=(zlib_deflate zlib_inflate)
15 MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
16 fi
17
18 PROG="<define PROG>"
19 CLEANUP=
20 VERBOSE=
21 VERBOSE_FLAG=
22 FORCE=
23 FORCE_FLAG=
24 DUMP_LOG=
25 ERROR=
26 RAID0S=()
27 RAID10S=()
28 RAIDZS=()
29 RAIDZ2S=()
30 TESTS_RUN=${TESTS_RUN:-'*'}
31 TESTS_SKIP=${TESTS_SKIP:-}
32
33 prefix=@prefix@
34 exec_prefix=@exec_prefix@
35 pkgdatadir=@datarootdir@/@PACKAGE@
36 bindir=@bindir@
37 sbindir=@sbindir@
38 udevdir=@udevdir@
39 udevruledir=@udevruledir@
40 sysconfdir=@sysconfdir@
41 localstatedir=@localstatedir@
42
43 ETCDIR=${ETCDIR:-/etc}
44 DEVDIR=${DEVDIR:-/dev/disk/by-vdev}
45 ZPOOLDIR=${ZPOOLDIR:-${pkgdatadir}/zpool-config}
46 ZPIOSDIR=${ZPIOSDIR:-${pkgdatadir}/zpios-test}
47 ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkgdatadir}/zpios-profile}
48
49 ZDB=${ZDB:-${sbindir}/zdb}
50 ZFS=${ZFS:-${sbindir}/zfs}
51 ZINJECT=${ZINJECT:-${sbindir}/zinject}
52 ZPOOL=${ZPOOL:-${sbindir}/zpool}
53 ZTEST=${ZTEST:-${sbindir}/ztest}
54 ZPIOS=${ZPIOS:-${sbindir}/zpios}
55
56 COMMON_SH=${COMMON_SH:-${pkgdatadir}/common.sh}
57 ZFS_SH=${ZFS_SH:-${pkgdatadir}/zfs.sh}
58 ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkgdatadir}/zpool-create.sh}
59 ZPIOS_SH=${ZPIOS_SH:-${pkgdatadir}/zpios.sh}
60 ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkgdatadir}/zpios-survey.sh}
61
62 LDMOD=${LDMOD:-/sbin/modprobe}
63 LSMOD=${LSMOD:-/sbin/lsmod}
64 RMMOD=${RMMOD:-/sbin/rmmod}
65 INFOMOD=${INFOMOD:-/sbin/modinfo}
66 LOSETUP=${LOSETUP:-/sbin/losetup}
67 MDADM=${MDADM:-/sbin/mdadm}
68 PARTED=${PARTED:-/sbin/parted}
69 BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
70 LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
71 SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
72 SYSCTL=${SYSCTL:-/sbin/sysctl}
73 UDEVADM=${UDEVADM:-/sbin/udevadm}
74 AWK=${AWK:-/usr/bin/awk}
75
76 ZED_PIDFILE=${ZED_PIDFILE:-${localstatedir}/run/zed.pid}
77
78 COLOR_BLACK="\033[0;30m"
79 COLOR_DK_GRAY="\033[1;30m"
80 COLOR_BLUE="\033[0;34m"
81 COLOR_LT_BLUE="\033[1;34m"
82 COLOR_GREEN="\033[0;32m"
83 COLOR_LT_GREEN="\033[1;32m"
84 COLOR_CYAN="\033[0;36m"
85 COLOR_LT_CYAN="\033[1;36m"
86 COLOR_RED="\033[0;31m"
87 COLOR_LT_RED="\033[1;31m"
88 COLOR_PURPLE="\033[0;35m"
89 COLOR_LT_PURPLE="\033[1;35m"
90 COLOR_BROWN="\033[0;33m"
91 COLOR_YELLOW="\033[1;33m"
92 COLOR_LT_GRAY="\033[0;37m"
93 COLOR_WHITE="\033[1;37m"
94 COLOR_RESET="\033[0m"
95
96 die() {
97 echo -e "${PROG}: $1" >&2
98 exit 1
99 }
100
101 msg() {
102 if [ ${VERBOSE} ]; then
103 echo "$@"
104 fi
105 }
106
107 pass() {
108 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
109 }
110
111 fail() {
112 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
113 exit $1
114 }
115
116 skip() {
117 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
118 }
119
120 populate() {
121 local ROOT=$1
122 local MAX_DIR_SIZE=$2
123 local MAX_FILE_SIZE=$3
124
125 mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
126 DIRS=`find $ROOT`
127
128 for DIR in $DIRS; do
129 COUNT=$(($RANDOM % $MAX_DIR_SIZE))
130
131 for i in `seq $COUNT`; do
132 FILE=`mktemp -p ${DIR}`
133 SIZE=$(($RANDOM % $MAX_FILE_SIZE))
134 dd if=/dev/urandom of=$FILE bs=1k count=$SIZE &>/dev/null
135 done
136 done
137
138 return 0
139 }
140
141 init() {
142 # Disable the udev rule 90-zfs.rules to prevent the zfs module
143 # stack from being loaded due to the detection of a zfs device.
144 # This is important because the test scripts require full control
145 # over when and how the modules are loaded/unloaded. A trap is
146 # set to ensure the udev rule is correctly replaced on exit.
147 local RULE=${udevruledir}/90-zfs.rules
148 if test -e ${RULE}; then
149 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
150 mv ${RULE} ${RULE}.disabled
151 fi
152
153 # Create a random directory tree of files and sub-directories to
154 # to act as a copy source for the various regression tests.
155 SRC_DIR=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
156 trap "rm -Rf $SRC_DIR" INT TERM EXIT
157 populate $SRC_DIR 10 100
158 }
159
160 spl_dump_log() {
161 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
162 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
163 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
164 echo
165 echo "Dumped debug log: ${NAME}.log"
166 tail -n1 ${NAME}.log
167 echo
168 return 0
169 }
170
171 check_modules() {
172 local LOADED_MODULES=()
173 local MISSING_MODULES=()
174
175 for MOD in ${MODULES[*]}; do
176 local NAME=`basename $MOD .ko`
177
178 if ${LSMOD} | egrep -q "^${NAME}"; then
179 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
180 fi
181
182 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
183 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
184 fi
185 done
186
187 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
188 ERROR="Unload these modules with '${PROG} -u':\n"
189 ERROR="${ERROR}${LOADED_MODULES[*]}"
190 return 1
191 fi
192
193 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
194 ERROR="The following modules can not be found,"
195 ERROR="${ERROR} ensure your source trees are built:\n"
196 ERROR="${ERROR}${MISSING_MODULES[*]}"
197 return 1
198 fi
199
200 return 0
201 }
202
203 load_module() {
204 local NAME=`basename $1 .ko`
205
206 if [ ${VERBOSE} ]; then
207 echo "Loading ${NAME} ($@)"
208 fi
209
210 ${LDMOD} $* &>/dev/null || ERROR="Failed to load $1" return 1
211
212 return 0
213 }
214
215 load_modules() {
216 mkdir -p /etc/zfs
217
218 for MOD in ${KERNEL_MODULES[*]}; do
219 load_module ${MOD}
220 done
221
222 for MOD in ${MODULES[*]}; do
223 local NAME=`basename ${MOD} .ko`
224 local VALUE=
225
226 for OPT in "$@"; do
227 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
228
229 if [ ${NAME} = "${OPT_NAME}" ]; then
230 VALUE=`echo ${OPT} | cut -f2- -d'='`
231 fi
232 done
233
234 load_module ${MOD} ${VALUE} || return 1
235 done
236
237 if [ ${VERBOSE} ]; then
238 echo "Successfully loaded ZFS module stack"
239 fi
240
241 return 0
242 }
243
244 unload_module() {
245 local NAME=`basename $1 .ko`
246
247 if [ ${VERBOSE} ]; then
248 echo "Unloading ${NAME} ($@)"
249 fi
250
251 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
252
253 return 0
254 }
255
256 unload_modules() {
257 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
258 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
259
260 for MOD in ${MODULES_REVERSE[*]}; do
261 local NAME=`basename ${MOD} .ko`
262 local USE_COUNT=`${LSMOD} |
263 egrep "^${NAME} "| ${AWK} '{print $3}'`
264
265 if [ "${USE_COUNT}" = 0 ] ; then
266
267 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
268 spl_dump_log
269 fi
270
271 unload_module ${MOD} || return 1
272 fi
273 done
274
275 if [ ${VERBOSE} ]; then
276 echo "Successfully unloaded ZFS module stack"
277 fi
278
279 return 0
280 }
281
282 #
283 # Check that the mdadm utilities are installed.
284 #
285 check_loop_utils() {
286 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
287 }
288
289
290 #
291 # Find and return an unused loopback device.
292 #
293 unused_loop_device() {
294 for DEVICE in `ls -1 /dev/loop[0-9]* 2>/dev/null`; do
295 ${LOSETUP} ${DEVICE} &>/dev/null
296 if [ $? -ne 0 ]; then
297 echo ${DEVICE}
298 return
299 fi
300 done
301
302 die "Error: Unable to find unused loopback device"
303 }
304
305 #
306 # This can be slightly dangerous because the loop devices we are
307 # cleaning up may not be ours. However, if the devices are currently
308 # in use we will not be able to remove them, and we only remove
309 # devices which include 'zpool' in the name. So any damage we might
310 # do should be limited to other zfs related testing.
311 #
312 cleanup_loop_devices() {
313 local TMP_FILE=`mktemp`
314
315 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
316 ${AWK} -F":" -v losetup="$LOSETUP" \
317 '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
318 ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
319
320 rm -f ${TMP_FILE}
321 }
322
323 #
324 # Destroy the passed loopback devices, this is used when you know
325 # the names of the loopback devices.
326 #
327 destroy_loop_devices() {
328 local LODEVICES="$1"
329
330 msg "Destroying ${LODEVICES}"
331 ${LOSETUP} -d ${LODEVICES} || \
332 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
333
334 rm -f ${FILES}
335 return 0
336 }
337
338 #
339 # Create a device label taking care to briefly wait if udev needs to settle.
340 #
341 label() {
342 local DEVICE=$1
343 local LABEL=$2
344
345 wait_udev ${DEVICE} 30 || return 1
346 ${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 2
347
348 return 0
349 }
350
351 #
352 # Create a primary partition on a block device.
353 #
354 partition() {
355 local DEVICE=$1
356 local TYPE=$2
357 local START=$3
358 local END=$4
359
360 ${PARTED} --align optimal ${DEVICE} --script -- \
361 mkpart ${TYPE} ${START} ${END} || return 1
362 udev_trigger
363
364 return 0
365 }
366
367 #
368 # Create a filesystem on the block device
369 #
370 format() {
371 local DEVICE=$1
372 local FSTYPE=$2
373
374 # Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
375 # won't mount
376 /sbin/mkfs.${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev/null || return 1
377
378 return 0
379 }
380
381 #
382 # Check that the mdadm utilities are installed.
383 #
384 check_md_utils() {
385 test -f ${MDADM} || die "${MDADM} utility must be installed"
386 test -f ${PARTED} || die "${PARTED} utility must be installed"
387 }
388
389 check_md_partitionable() {
390 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
391 local LODEVICE=`unused_loop_device`
392 local MDDEVICE=`unused_md_device`
393 local RESULT=1
394
395 check_md_utils
396
397 rm -f ${LOFILE}
398 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
399 &>/dev/null || return ${RESULT}
400
401 msg "Creating ${LODEVICE} using ${LOFILE}"
402 ${LOSETUP} ${LODEVICE} ${LOFILE}
403 if [ $? -ne 0 ]; then
404 rm -f ${LOFILE}
405 return ${RESULT}
406 fi
407
408 msg "Creating ${MDDEVICE} using ${LODEVICE}"
409 ${MDADM} --build ${MDDEVICE} --level=faulty \
410 --raid-devices=1 ${LODEVICE} &>/dev/null
411 if [ $? -ne 0 ]; then
412 destroy_loop_devices ${LODEVICE}
413 rm -f ${LOFILE}
414 return ${RESULT}
415 fi
416 wait_udev ${MDDEVICE} 30
417
418 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
419 RESULT=$?
420
421 destroy_md_devices ${MDDEVICE}
422 destroy_loop_devices ${LODEVICE}
423 rm -f ${LOFILE}
424
425 return ${RESULT}
426 }
427
428 #
429 # Find and return an unused md device.
430 #
431 unused_md_device() {
432 for (( i=0; i<32; i++ )); do
433 MDDEVICE=md${i}
434
435 # Skip active devicesudo in /proc/mdstat.
436 grep -q "${MDDEVICE} " /proc/mdstat && continue
437
438 # Device doesn't exist, use it.
439 if [ ! -e $/dev/{MDDEVICE} ]; then
440 echo /dev/${MDDEVICE}
441 return
442 fi
443
444 # Device exists but may not be in use.
445 if [ -b /dev/${MDDEVICE} ]; then
446 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
447 if [ $? -eq 1 ]; then
448 echo /dev/${MDDEVICE}
449 return
450 fi
451 fi
452 done
453
454 die "Error: Unable to find unused md device"
455 }
456
457 #
458 # This can be slightly dangerous because it is possible the md devices
459 # we are cleaning up may not be ours. However, if the devices are
460 # currently in use we will not be able to remove them, and even if
461 # we remove devices which were not out we do not zero the super block
462 # so you should be able to reconstruct them.
463 #
464 cleanup_md_devices() {
465 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
466 udev_trigger
467 }
468
469 #
470 # Destroy the passed md devices, this is used when you know
471 # the names of the md devices.
472 #
473 destroy_md_devices() {
474 local MDDEVICES="$1"
475
476 msg "Destroying ${MDDEVICES}"
477 for MDDEVICE in ${MDDEVICES}; do
478 ${MDADM} --stop ${MDDEVICE} &>/dev/null
479 ${MDADM} --remove ${MDDEVICE} &>/dev/null
480 ${MDADM} --detail ${MDDEVICE} &>/dev/null
481 done
482
483 return 0
484 }
485
486 #
487 # Check that the scsi utilities are installed.
488 #
489 check_sd_utils() {
490 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
491 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
492 }
493
494 #
495 # Rescan the scsi bus for scsi_debug devices. It is preferable to use the
496 # scsi-rescan tool if it is installed, but if it's not we can fall back to
497 # removing and readding the device manually. This rescan will only effect
498 # the first scsi_debug device if scsi-rescan is missing.
499 #
500 scsi_rescan() {
501 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
502
503 if [ -f ${SCSIRESCAN} ]; then
504 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
505 else
506 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
507 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
508 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
509 udev_trigger
510 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
511 udev_trigger
512 fi
513 }
514
515 #
516 # Trigger udev and wait for it to settle.
517 #
518 udev_trigger() {
519 if [ -f ${UDEVADM} ]; then
520 ${UDEVADM} trigger --action=change --subsystem-match=block
521 ${UDEVADM} settle
522 else
523 /sbin/udevtrigger
524 /sbin/udevsettle
525 fi
526 }
527
528 #
529 # The following udev helper functions assume that the provided
530 # udev rules file will create a /dev/disk/by-vdev/<CHANNEL><RANK>
531 # disk mapping. In this mapping each CHANNEL is represented by
532 # the letters a-z, and the RANK is represented by the numbers
533 # 1-n. A CHANNEL should identify a group of RANKS which are all
534 # attached to a single controller, each RANK represents a disk.
535 # This provides a simply mechanism to locate a specific drive
536 # given a known hardware configuration.
537 #
538 udev_setup() {
539 local SRC_PATH=$1
540
541 # When running in tree manually contruct symlinks in tree to
542 # the proper devices. Symlinks are installed for all entires
543 # in the config file regardless of if that device actually
544 # exists. When installed as a package udev can be relied on for
545 # this and it will only create links for devices which exist.
546 if [ ${INTREE} ]; then
547 PWD=`pwd`
548 mkdir -p ${DEVDIR}/
549 cd ${DEVDIR}/
550 ${AWK} '!/^#/ && /./ { system( \
551 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
552 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
553 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
554 ) }' $SRC_PATH
555 cd ${PWD}
556 else
557 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
558 DST_PATH=/etc/zfs/${DST_FILE}
559
560 if [ -e ${DST_PATH} ]; then
561 die "Error: Config ${DST_PATH} already exists"
562 fi
563
564 cp ${SRC_PATH} ${DST_PATH}
565 udev_trigger
566 fi
567
568 return 0
569 }
570
571 udev_cleanup() {
572 local SRC_PATH=$1
573
574 if [ ${INTREE} ]; then
575 PWD=`pwd`
576 cd ${DEVDIR}/
577 ${AWK} '!/^#/ && /./ { system( \
578 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
579 cd ${PWD}
580 fi
581
582 return 0
583 }
584
585 udev_cr2d() {
586 local CHANNEL=`echo "obase=16; $1+96" | bc`
587 local RANK=$2
588
589 printf "\x${CHANNEL}${RANK}"
590 }
591
592 udev_raid0_setup() {
593 local RANKS=$1
594 local CHANNELS=$2
595 local IDX=0
596
597 RAID0S=()
598 for RANK in `seq 1 ${RANKS}`; do
599 for CHANNEL in `seq 1 ${CHANNELS}`; do
600 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
601 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
602 let IDX=IDX+1
603 done
604 done
605
606 return 0
607 }
608
609 udev_raid10_setup() {
610 local RANKS=$1
611 local CHANNELS=$2
612 local IDX=0
613
614 RAID10S=()
615 for RANK in `seq 1 ${RANKS}`; do
616 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
617 let CHANNEL2=CHANNEL1+1
618 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
619 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
620 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
621 RAID10S[${IDX}]="mirror ${GROUP}"
622 let IDX=IDX+1
623 done
624 done
625
626 return 0
627 }
628
629 udev_raidz_setup() {
630 local RANKS=$1
631 local CHANNELS=$2
632
633 RAIDZS=()
634 for RANK in `seq 1 ${RANKS}`; do
635 RAIDZ=("raidz")
636
637 for CHANNEL in `seq 1 ${CHANNELS}`; do
638 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
639 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
640 done
641
642 RAIDZS[${RANK}]="${RAIDZ[*]}"
643 done
644
645 return 0
646 }
647
648 udev_raidz2_setup() {
649 local RANKS=$1
650 local CHANNELS=$2
651
652 RAIDZ2S=()
653 for RANK in `seq 1 ${RANKS}`; do
654 RAIDZ2=("raidz2")
655
656 for CHANNEL in `seq 1 ${CHANNELS}`; do
657 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
658 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
659 done
660
661 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
662 done
663
664 return 0
665 }
666
667 run_one_test() {
668 local TEST_NUM=$1
669 local TEST_NAME=$2
670
671 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
672 test_${TEST_NUM}
673 }
674
675 skip_one_test() {
676 local TEST_NUM=$1
677 local TEST_NAME=$2
678
679 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
680 skip
681 }
682
683 run_test() {
684 local TEST_NUM=$1
685 local TEST_NAME=$2
686
687 for i in ${TESTS_SKIP[@]}; do
688 if [[ $i == ${TEST_NUM} ]] ; then
689 skip_one_test ${TEST_NUM} "${TEST_NAME}"
690 return 0
691 fi
692 done
693
694 if [ "${TESTS_RUN[0]}" = "*" ]; then
695 run_one_test ${TEST_NUM} "${TEST_NAME}"
696 else
697 for i in ${TESTS_RUN[@]}; do
698 if [[ $i == ${TEST_NUM} ]] ; then
699 run_one_test ${TEST_NUM} "${TEST_NAME}"
700 return 0
701 fi
702 done
703
704 skip_one_test ${TEST_NUM} "${TEST_NAME}"
705 fi
706 }
707
708 wait_udev() {
709 local DEVICE=$1
710 local DELAY=$2
711 local COUNT=0
712
713 udev_trigger
714 while [ ! -e ${DEVICE} ]; do
715 if [ ${COUNT} -gt ${DELAY} ]; then
716 return 1
717 fi
718
719 let COUNT=${COUNT}+1
720 sleep 1
721 done
722
723 return 0
724 }
725
726 stack_clear() {
727 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
728 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
729
730 if [ -e $STACK_MAX_SIZE ]; then
731 echo 1 >$STACK_TRACER_ENABLED
732 echo 0 >$STACK_MAX_SIZE
733 fi
734 }
735
736 stack_check() {
737 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
738 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
739 local STACK_LIMIT=7000
740
741 if [ -e $STACK_MAX_SIZE ]; then
742 STACK_SIZE=`cat $STACK_MAX_SIZE`
743
744 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
745 echo
746 echo "Warning: max stack size $STACK_SIZE bytes"
747 cat $STACK_TRACE
748 fi
749 fi
750 }
751
752 kill_zed() {
753 if [ -f $ZED_PIDFILE ]; then
754 kill $(cat $ZED_PIDFILE)
755 fi
756 }