]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/common.sh.in
Quiet mkfs.ext2 output
[mirror_zfs-debian.git] / scripts / common.sh.in
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
6283f55e 3# Common support functions for testing scripts. If a script-config
c9c0d073 4# files is available it will be sourced so in-tree kernel modules and
6283f55e 5# utilities will be used. If no script-config can be found then the
c9c0d073
BB
6# installed kernel modules and utilities will be used.
7
8basedir="$(dirname $0)"
9
6283f55e 10SCRIPT_CONFIG=zfs-script-config.sh
c9c0d073
BB
11if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12. "${basedir}/../${SCRIPT_CONFIG}"
13else
cf47fad6
BB
14KERNEL_MODULES=(zlib_deflate zlib_inflate)
15MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
c9c0d073
BB
16fi
17
18PROG="<define PROG>"
19CLEANUP=
20VERBOSE=
21VERBOSE_FLAG=
22FORCE=
23FORCE_FLAG=
24DUMP_LOG=
25ERROR=
26RAID0S=()
27RAID10S=()
28RAIDZS=()
29RAIDZ2S=()
325f0235
BB
30TESTS_RUN=${TESTS_RUN:-'*'}
31TESTS_SKIP=${TESTS_SKIP:-}
c9c0d073
BB
32
33prefix=@prefix@
34exec_prefix=@exec_prefix@
35libexecdir=@libexecdir@
36pkglibexecdir=${libexecdir}/@PACKAGE@
37bindir=@bindir@
38sbindir=@sbindir@
5cbf6db9
BB
39udevdir=@udevdir@
40udevruledir=@udevruledir@
41sysconfdir=@sysconfdir@
c9c0d073
BB
42
43ETCDIR=${ETCDIR:-/etc}
44DEVDIR=${DEVDIR:-/dev/disk/zpool}
45ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
302ef151
BB
46ZPIOSDIR=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
47ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
c9c0d073
BB
48
49ZDB=${ZDB:-${sbindir}/zdb}
50ZFS=${ZFS:-${sbindir}/zfs}
51ZINJECT=${ZINJECT:-${sbindir}/zinject}
52ZPOOL=${ZPOOL:-${sbindir}/zpool}
53ZPOOL_ID=${ZPOOL_ID:-${bindir}/zpool_id}
54ZTEST=${ZTEST:-${sbindir}/ztest}
302ef151 55ZPIOS=${ZPIOS:-${sbindir}/zpios}
c9c0d073
BB
56
57COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
58ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
59ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
302ef151
BB
60ZPIOS_SH=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
61ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
c9c0d073
BB
62
63LDMOD=${LDMOD:-/sbin/modprobe}
64LSMOD=${LSMOD:-/sbin/lsmod}
65RMMOD=${RMMOD:-/sbin/rmmod}
66INFOMOD=${INFOMOD:-/sbin/modinfo}
67LOSETUP=${LOSETUP:-/sbin/losetup}
0ee8118b
BB
68MDADM=${MDADM:-/sbin/mdadm}
69PARTED=${PARTED:-/sbin/parted}
70BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
71LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
72SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
c9c0d073
BB
73SYSCTL=${SYSCTL:-/sbin/sysctl}
74UDEVADM=${UDEVADM:-/sbin/udevadm}
75AWK=${AWK:-/usr/bin/awk}
76
325f0235
BB
77COLOR_BLACK="\033[0;30m"
78COLOR_DK_GRAY="\033[1;30m"
79COLOR_BLUE="\033[0;34m"
80COLOR_LT_BLUE="\033[1;34m"
81COLOR_GREEN="\033[0;32m"
82COLOR_LT_GREEN="\033[1;32m"
83COLOR_CYAN="\033[0;36m"
84COLOR_LT_CYAN="\033[1;36m"
85COLOR_RED="\033[0;31m"
86COLOR_LT_RED="\033[1;31m"
87COLOR_PURPLE="\033[0;35m"
88COLOR_LT_PURPLE="\033[1;35m"
89COLOR_BROWN="\033[0;33m"
90COLOR_YELLOW="\033[1;33m"
91COLOR_LT_GRAY="\033[0;37m"
92COLOR_WHITE="\033[1;37m"
93COLOR_RESET="\033[0m"
94
c9c0d073
BB
95die() {
96 echo -e "${PROG}: $1" >&2
97 exit 1
98}
99
100msg() {
101 if [ ${VERBOSE} ]; then
102 echo "$@"
103 fi
104}
105
106pass() {
325f0235 107 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
c9c0d073
BB
108}
109
110fail() {
325f0235 111 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
c9c0d073
BB
112 exit $1
113}
114
325f0235
BB
115skip() {
116 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
117}
118
930b6fec
BB
119populate() {
120 local ROOT=$1
121 local MAX_DIR_SIZE=$2
122 local MAX_FILE_SIZE=$3
123
124 mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
125 DIRS=`find $ROOT`
126
127 for DIR in $DIRS; do
128 COUNT=$(($RANDOM % $MAX_DIR_SIZE))
129
130 for i in `seq $COUNT`; do
131 FILE=`mktemp -p ${DIR}`
132 SIZE=$(($RANDOM % $MAX_FILE_SIZE))
133 dd if=/dev/urandom of=$FILE bs=1k count=$SIZE &>/dev/null
134 done
135 done
136
137 return 0
138}
139
5cbf6db9
BB
140init() {
141 # Disable the udev rule 90-zfs.rules to prevent the zfs module
142 # stack from being loaded due to the detection of a zfs device.
143 # This is important because the test scripts require full control
144 # over when and how the modules are loaded/unloaded. A trap is
145 # set to ensure the udev rule is correctly replaced on exit.
146 local RULE=${udevruledir}/90-zfs.rules
147 if test -e ${RULE}; then
563103de 148 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
5cbf6db9
BB
149 mv ${RULE} ${RULE}.disabled
150 fi
930b6fec
BB
151
152 # Create a random directory tree of files and sub-directories to
153 # to act as a copy source for the various regression tests.
154 SRC_DIR=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
155 trap "rm -Rf $SRC_DIR" INT TERM EXIT
156 populate $SRC_DIR 10 100
5cbf6db9
BB
157}
158
c9c0d073
BB
159spl_dump_log() {
160 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
161 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
162 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
163 echo
164 echo "Dumped debug log: ${NAME}.log"
165 tail -n1 ${NAME}.log
166 echo
167 return 0
168}
169
170check_modules() {
171 local LOADED_MODULES=()
172 local MISSING_MODULES=()
173
174 for MOD in ${MODULES[*]}; do
175 local NAME=`basename $MOD .ko`
176
177 if ${LSMOD} | egrep -q "^${NAME}"; then
178 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
179 fi
180
181 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
182 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
183 fi
184 done
185
186 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
187 ERROR="Unload these modules with '${PROG} -u':\n"
188 ERROR="${ERROR}${LOADED_MODULES[*]}"
189 return 1
190 fi
191
192 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
193 ERROR="The following modules can not be found,"
194 ERROR="${ERROR} ensure your source trees are built:\n"
195 ERROR="${ERROR}${MISSING_MODULES[*]}"
196 return 1
197 fi
198
199 return 0
200}
201
202load_module() {
203 local NAME=`basename $1 .ko`
204
205 if [ ${VERBOSE} ]; then
206 echo "Loading ${NAME} ($@)"
207 fi
208
cf47fad6 209 ${LDMOD} $* &>/dev/null || ERROR="Failed to load $1" return 1
c9c0d073
BB
210
211 return 0
212}
213
214load_modules() {
215 mkdir -p /etc/zfs
216
cf47fad6
BB
217 for MOD in ${KERNEL_MODULES[*]}; do
218 load_module ${MOD}
219 done
220
c9c0d073
BB
221 for MOD in ${MODULES[*]}; do
222 local NAME=`basename ${MOD} .ko`
223 local VALUE=
224
225 for OPT in "$@"; do
226 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
227
228 if [ ${NAME} = "${OPT_NAME}" ]; then
229 VALUE=`echo ${OPT} | cut -f2- -d'='`
230 fi
231 done
232
233 load_module ${MOD} ${VALUE} || return 1
234 done
235
236 if [ ${VERBOSE} ]; then
237 echo "Successfully loaded ZFS module stack"
238 fi
239
240 return 0
241}
242
243unload_module() {
244 local NAME=`basename $1 .ko`
245
246 if [ ${VERBOSE} ]; then
247 echo "Unloading ${NAME} ($@)"
248 fi
249
250 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
251
252 return 0
253}
254
255unload_modules() {
256 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
257 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
258
259 for MOD in ${MODULES_REVERSE[*]}; do
260 local NAME=`basename ${MOD} .ko`
261 local USE_COUNT=`${LSMOD} |
262 egrep "^${NAME} "| ${AWK} '{print $3}'`
263
264 if [ "${USE_COUNT}" = 0 ] ; then
265
266 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
267 spl_dump_log
268 fi
269
270 unload_module ${MOD} || return 1
271 fi
272 done
273
274 if [ ${VERBOSE} ]; then
275 echo "Successfully unloaded ZFS module stack"
276 fi
277
278 return 0
279}
280
0ee8118b
BB
281#
282# Check that the mdadm utilities are installed.
283#
284check_loop_utils() {
285 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
286}
287
288
289#
290# Find and return an unused loopback device.
291#
c9c0d073 292unused_loop_device() {
6cb7ab06 293 for DEVICE in `ls -1 /dev/loop[0-9]* 2>/dev/null`; do
c9c0d073
BB
294 ${LOSETUP} ${DEVICE} &>/dev/null
295 if [ $? -ne 0 ]; then
296 echo ${DEVICE}
297 return
298 fi
299 done
300
301 die "Error: Unable to find unused loopback device"
302}
303
304#
305# This can be slightly dangerous because the loop devices we are
0ee8118b 306# cleaning up may not be ours. However, if the devices are currently
c9c0d073
BB
307# in use we will not be able to remove them, and we only remove
308# devices which include 'zpool' in the name. So any damage we might
309# do should be limited to other zfs related testing.
310#
311cleanup_loop_devices() {
312 local TMP_FILE=`mktemp`
313
314 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
315 ${AWK} -F":" -v losetup="$LOSETUP" \
316 '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
317 ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
318
319 rm -f ${TMP_FILE}
320}
321
0ee8118b
BB
322#
323# Destroy the passed loopback devices, this is used when you know
324# the names of the loopback devices.
325#
326destroy_loop_devices() {
327 local LODEVICES="$1"
328
329 msg "Destroying ${LODEVICES}"
330 ${LOSETUP} -d ${LODEVICES} || \
331 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
332
333 rm -f ${FILES}
334 return 0
335}
336
93648f31
BB
337#
338# Create a device label.
339#
340label() {
341 local DEVICE=$1
342 local LABEL=$2
343
344 ${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 1
345
346 return 0
347}
348
349#
350# Create a primary partition on a block device.
351#
352partition() {
353 local DEVICE=$1
354 local TYPE=$2
355 local START=$3
356 local END=$4
357
358 ${PARTED} --align optimal ${DEVICE} --script -- \
359 mkpart ${TYPE} ${START} ${END} || return 1
360 udev_trigger
361
362 return 0
363}
364
365#
366# Create a filesystem on the block device
367#
368format() {
369 local DEVICE=$1
370 local FSTYPE=$2
371
2f342404
ED
372 # Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
373 # won't mount
ff5b1c80 374 /sbin/mkfs.${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev/null || return 1
93648f31
BB
375
376 return 0
377}
378
0ee8118b
BB
379#
380# Check that the mdadm utilities are installed.
381#
382check_md_utils() {
383 test -f ${MDADM} || die "${MDADM} utility must be installed"
384 test -f ${PARTED} || die "${PARTED} utility must be installed"
385}
386
387check_md_partitionable() {
388 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
389 local LODEVICE=`unused_loop_device`
390 local MDDEVICE=`unused_md_device`
391 local RESULT=1
392
393 check_md_utils
394
395 rm -f ${LOFILE}
396 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
397 &>/dev/null || return ${RESULT}
398
399 msg "Creating ${LODEVICE} using ${LOFILE}"
400 ${LOSETUP} ${LODEVICE} ${LOFILE}
401 if [ $? -ne 0 ]; then
402 rm -f ${LOFILE}
403 return ${RESULT}
404 fi
405
406 msg "Creating ${MDDEVICE} using ${LODEVICE}"
407 ${MDADM} --build ${MDDEVICE} --level=faulty \
408 --raid-devices=1 ${LODEVICE} &>/dev/null
409 if [ $? -ne 0 ]; then
410 destroy_loop_devices ${LODEVICE}
411 rm -f ${LOFILE}
412 return ${RESULT}
413 fi
414 wait_udev ${MDDEVICE} 30
415
416 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
417 RESULT=$?
418
419 destroy_md_devices ${MDDEVICE}
420 destroy_loop_devices ${LODEVICE}
421 rm -f ${LOFILE}
422
423 return ${RESULT}
424}
425
426#
427# Find and return an unused md device.
428#
429unused_md_device() {
430 for (( i=0; i<32; i++ )); do
431 MDDEVICE=md${i}
432
433 # Skip active devicesudo in /proc/mdstat.
434 grep -q "${MDDEVICE} " /proc/mdstat && continue
435
436 # Device doesn't exist, use it.
437 if [ ! -e $/dev/{MDDEVICE} ]; then
438 echo /dev/${MDDEVICE}
439 return
440 fi
441
442 # Device exists but may not be in use.
443 if [ -b /dev/${MDDEVICE} ]; then
444 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
445 if [ $? -eq 1 ]; then
446 echo /dev/${MDDEVICE}
447 return
448 fi
449 fi
450 done
451
452 die "Error: Unable to find unused md device"
453}
454
455#
456# This can be slightly dangerous because it is possible the md devices
457# we are cleaning up may not be ours. However, if the devices are
458# currently in use we will not be able to remove them, and even if
459# we remove devices which were not out we do not zero the super block
460# so you should be able to reconstruct them.
461#
462cleanup_md_devices() {
463 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
464 udev_trigger
465}
466
467#
468# Destroy the passed md devices, this is used when you know
469# the names of the md devices.
470#
471destroy_md_devices() {
472 local MDDEVICES="$1"
473
474 msg "Destroying ${MDDEVICES}"
475 for MDDEVICE in ${MDDEVICES}; do
476 ${MDADM} --stop ${MDDEVICE} &>/dev/null
477 ${MDADM} --remove ${MDDEVICE} &>/dev/null
478 ${MDADM} --detail ${MDDEVICE} &>/dev/null
479 done
480
481 return 0
482}
483
484#
485# Check that the scsi utilities are installed.
486#
487check_sd_utils() {
488 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
489 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
490}
491
492#
493# Rescan the scsi bus for scsi_debug devices. It is preferable to use the
494# scsi-rescan tool if it is installed, but if it's not we can fall back to
495# removing and readding the device manually. This rescan will only effect
496# the first scsi_debug device if scsi-rescan is missing.
497#
498scsi_rescan() {
499 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
500
501 if [ -f ${SCSIRESCAN} ]; then
502 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
503 else
504 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
505 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
506 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
507 udev_trigger
508 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
509 udev_trigger
510 fi
511}
512
513#
514# Trigger udev and wait for it to settle.
515#
516udev_trigger() {
517 if [ -f ${UDEVADM} ]; then
fa417e57 518 ${UDEVADM} trigger --action=change --subsystem-match=block
0ee8118b
BB
519 ${UDEVADM} settle
520 else
521 /sbin/udevtrigger
522 /sbin/udevsettle
523 fi
524}
525
c9c0d073
BB
526#
527# The following udev helper functions assume that the provided
528# udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
529# disk mapping. In this mapping each CHANNEL is represented by
530# the letters a-z, and the RANK is represented by the numbers
531# 1-n. A CHANNEL should identify a group of RANKS which are all
532# attached to a single controller, each RANK represents a disk.
533# This provides a simply mechanism to locate a specific drive
534# given a known hardware configuration.
535#
536udev_setup() {
537 local SRC_PATH=$1
538
539 # When running in tree manually contruct symlinks in tree to
540 # the proper devices. Symlinks are installed for all entires
541 # in the config file regardless of if that device actually
542 # exists. When installed as a package udev can be relied on for
543 # this and it will only create links for devices which exist.
544 if [ ${INTREE} ]; then
545 PWD=`pwd`
546 mkdir -p ${DEVDIR}/
547 cd ${DEVDIR}/
548 ${AWK} '!/^#/ && /./ { system( \
549 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
550 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
551 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
552 ) }' $SRC_PATH
553 cd ${PWD}
554 else
555 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
556 DST_PATH=/etc/zfs/${DST_FILE}
557
558 if [ -e ${DST_PATH} ]; then
559 die "Error: Config ${DST_PATH} already exists"
560 fi
561
562 cp ${SRC_PATH} ${DST_PATH}
0ee8118b 563 udev_trigger
c9c0d073
BB
564 fi
565
566 return 0
567}
568
569udev_cleanup() {
570 local SRC_PATH=$1
571
572 if [ ${INTREE} ]; then
573 PWD=`pwd`
574 cd ${DEVDIR}/
575 ${AWK} '!/^#/ && /./ { system( \
576 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
577 cd ${PWD}
578 fi
579
580 return 0
581}
582
583udev_cr2d() {
584 local CHANNEL=`echo "obase=16; $1+96" | bc`
585 local RANK=$2
586
587 printf "\x${CHANNEL}${RANK}"
588}
589
590udev_raid0_setup() {
591 local RANKS=$1
592 local CHANNELS=$2
593 local IDX=0
594
595 RAID0S=()
596 for RANK in `seq 1 ${RANKS}`; do
597 for CHANNEL in `seq 1 ${CHANNELS}`; do
598 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
599 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
600 let IDX=IDX+1
601 done
602 done
603
604 return 0
605}
606
607udev_raid10_setup() {
608 local RANKS=$1
609 local CHANNELS=$2
610 local IDX=0
611
612 RAID10S=()
613 for RANK in `seq 1 ${RANKS}`; do
614 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
615 let CHANNEL2=CHANNEL1+1
616 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
617 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
618 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
619 RAID10S[${IDX}]="mirror ${GROUP}"
620 let IDX=IDX+1
621 done
622 done
623
624 return 0
625}
626
627udev_raidz_setup() {
628 local RANKS=$1
629 local CHANNELS=$2
630
631 RAIDZS=()
632 for RANK in `seq 1 ${RANKS}`; do
633 RAIDZ=("raidz")
634
635 for CHANNEL in `seq 1 ${CHANNELS}`; do
636 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
637 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
638 done
639
640 RAIDZS[${RANK}]="${RAIDZ[*]}"
641 done
642
643 return 0
644}
645
646udev_raidz2_setup() {
647 local RANKS=$1
648 local CHANNELS=$2
649
650 RAIDZ2S=()
651 for RANK in `seq 1 ${RANKS}`; do
652 RAIDZ2=("raidz2")
653
654 for CHANNEL in `seq 1 ${CHANNELS}`; do
655 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
656 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
657 done
658
659 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
660 done
661
662 return 0
663}
325f0235
BB
664
665run_one_test() {
666 local TEST_NUM=$1
667 local TEST_NAME=$2
668
0ee8118b 669 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
325f0235
BB
670 test_${TEST_NUM}
671}
672
673skip_one_test() {
674 local TEST_NUM=$1
675 local TEST_NAME=$2
676
0ee8118b 677 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
325f0235
BB
678 skip
679}
680
681run_test() {
682 local TEST_NUM=$1
683 local TEST_NAME=$2
684
685 for i in ${TESTS_SKIP[@]}; do
686 if [[ $i == ${TEST_NUM} ]] ; then
687 skip_one_test ${TEST_NUM} "${TEST_NAME}"
688 return 0
689 fi
690 done
691
692 if [ "${TESTS_RUN[0]}" = "*" ]; then
693 run_one_test ${TEST_NUM} "${TEST_NAME}"
694 else
695 for i in ${TESTS_RUN[@]}; do
696 if [[ $i == ${TEST_NUM} ]] ; then
697 run_one_test ${TEST_NUM} "${TEST_NAME}"
698 return 0
699 fi
700 done
701
702 skip_one_test ${TEST_NUM} "${TEST_NAME}"
703 fi
704}
2c4834f8
BB
705
706wait_udev() {
707 local DEVICE=$1
708 local DELAY=$2
709 local COUNT=0
710
0ee8118b 711 udev_trigger
2c4834f8
BB
712 while [ ! -e ${DEVICE} ]; do
713 if [ ${COUNT} -gt ${DELAY} ]; then
714 return 1
715 fi
716
717 let COUNT=${COUNT}+1
718 sleep 1
719 done
720
721 return 0
722}
10715a01
BB
723
724stack_clear() {
725 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
726 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
727
728 if [ -e $STACK_MAX_SIZE ]; then
729 echo 1 >$STACK_TRACER_ENABLED
730 echo 0 >$STACK_MAX_SIZE
731 fi
732}
733
734stack_check() {
735 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
736 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
737 local STACK_LIMIT=7000
738
739 if [ -e $STACK_MAX_SIZE ]; then
740 STACK_SIZE=`cat $STACK_MAX_SIZE`
741
742 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
743 echo
744 echo "Warning: max stack size $STACK_SIZE bytes"
745 cat $STACK_TRACE
746 fi
747 fi
748}