]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/common.sh.in
Merge branch 'upstream'
[mirror_zfs-debian.git] / scripts / common.sh.in
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
6283f55e 3# Common support functions for testing scripts. If a script-config
c9c0d073 4# files is available it will be sourced so in-tree kernel modules and
6283f55e 5# utilities will be used. If no script-config can be found then the
c9c0d073
BB
6# installed kernel modules and utilities will be used.
7
8basedir="$(dirname $0)"
9
6283f55e 10SCRIPT_CONFIG=zfs-script-config.sh
c9c0d073
BB
11if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12. "${basedir}/../${SCRIPT_CONFIG}"
13else
cf47fad6
BB
14KERNEL_MODULES=(zlib_deflate zlib_inflate)
15MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
c9c0d073
BB
16fi
17
18PROG="<define PROG>"
19CLEANUP=
20VERBOSE=
21VERBOSE_FLAG=
22FORCE=
23FORCE_FLAG=
24DUMP_LOG=
25ERROR=
26RAID0S=()
27RAID10S=()
28RAIDZS=()
29RAIDZ2S=()
325f0235
BB
30TESTS_RUN=${TESTS_RUN:-'*'}
31TESTS_SKIP=${TESTS_SKIP:-}
c9c0d073
BB
32
33prefix=@prefix@
34exec_prefix=@exec_prefix@
48c028f5 35pkgdatadir=@datarootdir@/@PACKAGE@
c9c0d073
BB
36bindir=@bindir@
37sbindir=@sbindir@
5cbf6db9
BB
38udevdir=@udevdir@
39udevruledir=@udevruledir@
40sysconfdir=@sysconfdir@
c9c0d073
BB
41
42ETCDIR=${ETCDIR:-/etc}
dbf763b3 43DEVDIR=${DEVDIR:-/dev/disk/by-vdev}
48c028f5
BB
44ZPOOLDIR=${ZPOOLDIR:-${pkgdatadir}/zpool-config}
45ZPIOSDIR=${ZPIOSDIR:-${pkgdatadir}/zpios-test}
46ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkgdatadir}/zpios-profile}
c9c0d073
BB
47
48ZDB=${ZDB:-${sbindir}/zdb}
49ZFS=${ZFS:-${sbindir}/zfs}
50ZINJECT=${ZINJECT:-${sbindir}/zinject}
51ZPOOL=${ZPOOL:-${sbindir}/zpool}
c9c0d073 52ZTEST=${ZTEST:-${sbindir}/ztest}
302ef151 53ZPIOS=${ZPIOS:-${sbindir}/zpios}
c9c0d073 54
48c028f5
BB
55COMMON_SH=${COMMON_SH:-${pkgdatadir}/common.sh}
56ZFS_SH=${ZFS_SH:-${pkgdatadir}/zfs.sh}
57ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkgdatadir}/zpool-create.sh}
58ZPIOS_SH=${ZPIOS_SH:-${pkgdatadir}/zpios.sh}
59ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkgdatadir}/zpios-survey.sh}
c9c0d073
BB
60
61LDMOD=${LDMOD:-/sbin/modprobe}
62LSMOD=${LSMOD:-/sbin/lsmod}
63RMMOD=${RMMOD:-/sbin/rmmod}
64INFOMOD=${INFOMOD:-/sbin/modinfo}
65LOSETUP=${LOSETUP:-/sbin/losetup}
0ee8118b
BB
66MDADM=${MDADM:-/sbin/mdadm}
67PARTED=${PARTED:-/sbin/parted}
68BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
69LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
70SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
c9c0d073
BB
71SYSCTL=${SYSCTL:-/sbin/sysctl}
72UDEVADM=${UDEVADM:-/sbin/udevadm}
73AWK=${AWK:-/usr/bin/awk}
74
325f0235
BB
75COLOR_BLACK="\033[0;30m"
76COLOR_DK_GRAY="\033[1;30m"
77COLOR_BLUE="\033[0;34m"
78COLOR_LT_BLUE="\033[1;34m"
79COLOR_GREEN="\033[0;32m"
80COLOR_LT_GREEN="\033[1;32m"
81COLOR_CYAN="\033[0;36m"
82COLOR_LT_CYAN="\033[1;36m"
83COLOR_RED="\033[0;31m"
84COLOR_LT_RED="\033[1;31m"
85COLOR_PURPLE="\033[0;35m"
86COLOR_LT_PURPLE="\033[1;35m"
87COLOR_BROWN="\033[0;33m"
88COLOR_YELLOW="\033[1;33m"
89COLOR_LT_GRAY="\033[0;37m"
90COLOR_WHITE="\033[1;37m"
91COLOR_RESET="\033[0m"
92
c9c0d073
BB
93die() {
94 echo -e "${PROG}: $1" >&2
95 exit 1
96}
97
98msg() {
99 if [ ${VERBOSE} ]; then
100 echo "$@"
101 fi
102}
103
104pass() {
325f0235 105 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
c9c0d073
BB
106}
107
108fail() {
325f0235 109 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
c9c0d073
BB
110 exit $1
111}
112
325f0235
BB
113skip() {
114 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
115}
116
930b6fec
BB
117populate() {
118 local ROOT=$1
119 local MAX_DIR_SIZE=$2
120 local MAX_FILE_SIZE=$3
121
122 mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
123 DIRS=`find $ROOT`
124
125 for DIR in $DIRS; do
126 COUNT=$(($RANDOM % $MAX_DIR_SIZE))
127
128 for i in `seq $COUNT`; do
129 FILE=`mktemp -p ${DIR}`
130 SIZE=$(($RANDOM % $MAX_FILE_SIZE))
131 dd if=/dev/urandom of=$FILE bs=1k count=$SIZE &>/dev/null
132 done
133 done
134
135 return 0
136}
137
5cbf6db9
BB
138init() {
139 # Disable the udev rule 90-zfs.rules to prevent the zfs module
140 # stack from being loaded due to the detection of a zfs device.
141 # This is important because the test scripts require full control
142 # over when and how the modules are loaded/unloaded. A trap is
143 # set to ensure the udev rule is correctly replaced on exit.
144 local RULE=${udevruledir}/90-zfs.rules
145 if test -e ${RULE}; then
563103de 146 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
5cbf6db9
BB
147 mv ${RULE} ${RULE}.disabled
148 fi
930b6fec
BB
149
150 # Create a random directory tree of files and sub-directories to
151 # to act as a copy source for the various regression tests.
152 SRC_DIR=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
153 trap "rm -Rf $SRC_DIR" INT TERM EXIT
154 populate $SRC_DIR 10 100
5cbf6db9
BB
155}
156
c9c0d073
BB
157spl_dump_log() {
158 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
159 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
160 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
161 echo
162 echo "Dumped debug log: ${NAME}.log"
163 tail -n1 ${NAME}.log
164 echo
165 return 0
166}
167
168check_modules() {
169 local LOADED_MODULES=()
170 local MISSING_MODULES=()
171
172 for MOD in ${MODULES[*]}; do
173 local NAME=`basename $MOD .ko`
174
175 if ${LSMOD} | egrep -q "^${NAME}"; then
176 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
177 fi
178
179 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
180 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
181 fi
182 done
183
184 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
185 ERROR="Unload these modules with '${PROG} -u':\n"
186 ERROR="${ERROR}${LOADED_MODULES[*]}"
187 return 1
188 fi
189
190 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
191 ERROR="The following modules can not be found,"
192 ERROR="${ERROR} ensure your source trees are built:\n"
193 ERROR="${ERROR}${MISSING_MODULES[*]}"
194 return 1
195 fi
196
197 return 0
198}
199
200load_module() {
201 local NAME=`basename $1 .ko`
202
203 if [ ${VERBOSE} ]; then
204 echo "Loading ${NAME} ($@)"
205 fi
206
cf47fad6 207 ${LDMOD} $* &>/dev/null || ERROR="Failed to load $1" return 1
c9c0d073
BB
208
209 return 0
210}
211
212load_modules() {
213 mkdir -p /etc/zfs
214
cf47fad6
BB
215 for MOD in ${KERNEL_MODULES[*]}; do
216 load_module ${MOD}
217 done
218
c9c0d073
BB
219 for MOD in ${MODULES[*]}; do
220 local NAME=`basename ${MOD} .ko`
221 local VALUE=
222
223 for OPT in "$@"; do
224 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
225
226 if [ ${NAME} = "${OPT_NAME}" ]; then
227 VALUE=`echo ${OPT} | cut -f2- -d'='`
228 fi
229 done
230
231 load_module ${MOD} ${VALUE} || return 1
232 done
233
234 if [ ${VERBOSE} ]; then
235 echo "Successfully loaded ZFS module stack"
236 fi
237
238 return 0
239}
240
241unload_module() {
242 local NAME=`basename $1 .ko`
243
244 if [ ${VERBOSE} ]; then
245 echo "Unloading ${NAME} ($@)"
246 fi
247
248 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
249
250 return 0
251}
252
253unload_modules() {
254 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
255 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
256
257 for MOD in ${MODULES_REVERSE[*]}; do
258 local NAME=`basename ${MOD} .ko`
259 local USE_COUNT=`${LSMOD} |
260 egrep "^${NAME} "| ${AWK} '{print $3}'`
261
262 if [ "${USE_COUNT}" = 0 ] ; then
263
264 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
265 spl_dump_log
266 fi
267
268 unload_module ${MOD} || return 1
269 fi
270 done
271
272 if [ ${VERBOSE} ]; then
273 echo "Successfully unloaded ZFS module stack"
274 fi
275
276 return 0
277}
278
0ee8118b
BB
279#
280# Check that the mdadm utilities are installed.
281#
282check_loop_utils() {
283 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
284}
285
286
287#
288# Find and return an unused loopback device.
289#
c9c0d073 290unused_loop_device() {
6cb7ab06 291 for DEVICE in `ls -1 /dev/loop[0-9]* 2>/dev/null`; do
c9c0d073
BB
292 ${LOSETUP} ${DEVICE} &>/dev/null
293 if [ $? -ne 0 ]; then
294 echo ${DEVICE}
295 return
296 fi
297 done
298
299 die "Error: Unable to find unused loopback device"
300}
301
302#
303# This can be slightly dangerous because the loop devices we are
0ee8118b 304# cleaning up may not be ours. However, if the devices are currently
c9c0d073
BB
305# in use we will not be able to remove them, and we only remove
306# devices which include 'zpool' in the name. So any damage we might
307# do should be limited to other zfs related testing.
308#
309cleanup_loop_devices() {
310 local TMP_FILE=`mktemp`
311
312 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
313 ${AWK} -F":" -v losetup="$LOSETUP" \
314 '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
315 ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
316
317 rm -f ${TMP_FILE}
318}
319
0ee8118b
BB
320#
321# Destroy the passed loopback devices, this is used when you know
322# the names of the loopback devices.
323#
324destroy_loop_devices() {
325 local LODEVICES="$1"
326
327 msg "Destroying ${LODEVICES}"
328 ${LOSETUP} -d ${LODEVICES} || \
329 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
330
331 rm -f ${FILES}
332 return 0
333}
334
93648f31
BB
335#
336# Create a device label.
337#
338label() {
339 local DEVICE=$1
340 local LABEL=$2
341
342 ${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 1
343
344 return 0
345}
346
347#
348# Create a primary partition on a block device.
349#
350partition() {
351 local DEVICE=$1
352 local TYPE=$2
353 local START=$3
354 local END=$4
355
356 ${PARTED} --align optimal ${DEVICE} --script -- \
357 mkpart ${TYPE} ${START} ${END} || return 1
358 udev_trigger
359
360 return 0
361}
362
363#
364# Create a filesystem on the block device
365#
366format() {
367 local DEVICE=$1
368 local FSTYPE=$2
369
2f342404
ED
370 # Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
371 # won't mount
ff5b1c80 372 /sbin/mkfs.${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev/null || return 1
93648f31
BB
373
374 return 0
375}
376
0ee8118b
BB
377#
378# Check that the mdadm utilities are installed.
379#
380check_md_utils() {
381 test -f ${MDADM} || die "${MDADM} utility must be installed"
382 test -f ${PARTED} || die "${PARTED} utility must be installed"
383}
384
385check_md_partitionable() {
386 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
387 local LODEVICE=`unused_loop_device`
388 local MDDEVICE=`unused_md_device`
389 local RESULT=1
390
391 check_md_utils
392
393 rm -f ${LOFILE}
394 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
395 &>/dev/null || return ${RESULT}
396
397 msg "Creating ${LODEVICE} using ${LOFILE}"
398 ${LOSETUP} ${LODEVICE} ${LOFILE}
399 if [ $? -ne 0 ]; then
400 rm -f ${LOFILE}
401 return ${RESULT}
402 fi
403
404 msg "Creating ${MDDEVICE} using ${LODEVICE}"
405 ${MDADM} --build ${MDDEVICE} --level=faulty \
406 --raid-devices=1 ${LODEVICE} &>/dev/null
407 if [ $? -ne 0 ]; then
408 destroy_loop_devices ${LODEVICE}
409 rm -f ${LOFILE}
410 return ${RESULT}
411 fi
412 wait_udev ${MDDEVICE} 30
413
414 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
415 RESULT=$?
416
417 destroy_md_devices ${MDDEVICE}
418 destroy_loop_devices ${LODEVICE}
419 rm -f ${LOFILE}
420
421 return ${RESULT}
422}
423
424#
425# Find and return an unused md device.
426#
427unused_md_device() {
428 for (( i=0; i<32; i++ )); do
429 MDDEVICE=md${i}
430
431 # Skip active devicesudo in /proc/mdstat.
432 grep -q "${MDDEVICE} " /proc/mdstat && continue
433
434 # Device doesn't exist, use it.
435 if [ ! -e $/dev/{MDDEVICE} ]; then
436 echo /dev/${MDDEVICE}
437 return
438 fi
439
440 # Device exists but may not be in use.
441 if [ -b /dev/${MDDEVICE} ]; then
442 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
443 if [ $? -eq 1 ]; then
444 echo /dev/${MDDEVICE}
445 return
446 fi
447 fi
448 done
449
450 die "Error: Unable to find unused md device"
451}
452
453#
454# This can be slightly dangerous because it is possible the md devices
455# we are cleaning up may not be ours. However, if the devices are
456# currently in use we will not be able to remove them, and even if
457# we remove devices which were not out we do not zero the super block
458# so you should be able to reconstruct them.
459#
460cleanup_md_devices() {
461 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
462 udev_trigger
463}
464
465#
466# Destroy the passed md devices, this is used when you know
467# the names of the md devices.
468#
469destroy_md_devices() {
470 local MDDEVICES="$1"
471
472 msg "Destroying ${MDDEVICES}"
473 for MDDEVICE in ${MDDEVICES}; do
474 ${MDADM} --stop ${MDDEVICE} &>/dev/null
475 ${MDADM} --remove ${MDDEVICE} &>/dev/null
476 ${MDADM} --detail ${MDDEVICE} &>/dev/null
477 done
478
479 return 0
480}
481
482#
483# Check that the scsi utilities are installed.
484#
485check_sd_utils() {
486 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
487 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
488}
489
490#
491# Rescan the scsi bus for scsi_debug devices. It is preferable to use the
492# scsi-rescan tool if it is installed, but if it's not we can fall back to
493# removing and readding the device manually. This rescan will only effect
494# the first scsi_debug device if scsi-rescan is missing.
495#
496scsi_rescan() {
497 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
498
499 if [ -f ${SCSIRESCAN} ]; then
500 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
501 else
502 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
503 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
504 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
505 udev_trigger
506 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
507 udev_trigger
508 fi
509}
510
511#
512# Trigger udev and wait for it to settle.
513#
514udev_trigger() {
515 if [ -f ${UDEVADM} ]; then
fa417e57 516 ${UDEVADM} trigger --action=change --subsystem-match=block
0ee8118b
BB
517 ${UDEVADM} settle
518 else
519 /sbin/udevtrigger
520 /sbin/udevsettle
521 fi
522}
523
c9c0d073
BB
524#
525# The following udev helper functions assume that the provided
dbf763b3 526# udev rules file will create a /dev/disk/by-vdev/<CHANNEL><RANK>
c9c0d073
BB
527# disk mapping. In this mapping each CHANNEL is represented by
528# the letters a-z, and the RANK is represented by the numbers
529# 1-n. A CHANNEL should identify a group of RANKS which are all
530# attached to a single controller, each RANK represents a disk.
531# This provides a simply mechanism to locate a specific drive
532# given a known hardware configuration.
533#
534udev_setup() {
535 local SRC_PATH=$1
536
537 # When running in tree manually contruct symlinks in tree to
538 # the proper devices. Symlinks are installed for all entires
539 # in the config file regardless of if that device actually
540 # exists. When installed as a package udev can be relied on for
541 # this and it will only create links for devices which exist.
542 if [ ${INTREE} ]; then
543 PWD=`pwd`
544 mkdir -p ${DEVDIR}/
545 cd ${DEVDIR}/
546 ${AWK} '!/^#/ && /./ { system( \
547 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
548 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
549 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
550 ) }' $SRC_PATH
551 cd ${PWD}
552 else
553 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
554 DST_PATH=/etc/zfs/${DST_FILE}
555
556 if [ -e ${DST_PATH} ]; then
557 die "Error: Config ${DST_PATH} already exists"
558 fi
559
560 cp ${SRC_PATH} ${DST_PATH}
0ee8118b 561 udev_trigger
c9c0d073
BB
562 fi
563
564 return 0
565}
566
567udev_cleanup() {
568 local SRC_PATH=$1
569
570 if [ ${INTREE} ]; then
571 PWD=`pwd`
572 cd ${DEVDIR}/
573 ${AWK} '!/^#/ && /./ { system( \
574 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
575 cd ${PWD}
576 fi
577
578 return 0
579}
580
581udev_cr2d() {
582 local CHANNEL=`echo "obase=16; $1+96" | bc`
583 local RANK=$2
584
585 printf "\x${CHANNEL}${RANK}"
586}
587
588udev_raid0_setup() {
589 local RANKS=$1
590 local CHANNELS=$2
591 local IDX=0
592
593 RAID0S=()
594 for RANK in `seq 1 ${RANKS}`; do
595 for CHANNEL in `seq 1 ${CHANNELS}`; do
596 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
597 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
598 let IDX=IDX+1
599 done
600 done
601
602 return 0
603}
604
605udev_raid10_setup() {
606 local RANKS=$1
607 local CHANNELS=$2
608 local IDX=0
609
610 RAID10S=()
611 for RANK in `seq 1 ${RANKS}`; do
612 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
613 let CHANNEL2=CHANNEL1+1
614 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
615 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
616 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
617 RAID10S[${IDX}]="mirror ${GROUP}"
618 let IDX=IDX+1
619 done
620 done
621
622 return 0
623}
624
625udev_raidz_setup() {
626 local RANKS=$1
627 local CHANNELS=$2
628
629 RAIDZS=()
630 for RANK in `seq 1 ${RANKS}`; do
631 RAIDZ=("raidz")
632
633 for CHANNEL in `seq 1 ${CHANNELS}`; do
634 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
635 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
636 done
637
638 RAIDZS[${RANK}]="${RAIDZ[*]}"
639 done
640
641 return 0
642}
643
644udev_raidz2_setup() {
645 local RANKS=$1
646 local CHANNELS=$2
647
648 RAIDZ2S=()
649 for RANK in `seq 1 ${RANKS}`; do
650 RAIDZ2=("raidz2")
651
652 for CHANNEL in `seq 1 ${CHANNELS}`; do
653 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
654 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
655 done
656
657 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
658 done
659
660 return 0
661}
325f0235
BB
662
663run_one_test() {
664 local TEST_NUM=$1
665 local TEST_NAME=$2
666
0ee8118b 667 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
325f0235
BB
668 test_${TEST_NUM}
669}
670
671skip_one_test() {
672 local TEST_NUM=$1
673 local TEST_NAME=$2
674
0ee8118b 675 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
325f0235
BB
676 skip
677}
678
679run_test() {
680 local TEST_NUM=$1
681 local TEST_NAME=$2
682
683 for i in ${TESTS_SKIP[@]}; do
684 if [[ $i == ${TEST_NUM} ]] ; then
685 skip_one_test ${TEST_NUM} "${TEST_NAME}"
686 return 0
687 fi
688 done
689
690 if [ "${TESTS_RUN[0]}" = "*" ]; then
691 run_one_test ${TEST_NUM} "${TEST_NAME}"
692 else
693 for i in ${TESTS_RUN[@]}; do
694 if [[ $i == ${TEST_NUM} ]] ; then
695 run_one_test ${TEST_NUM} "${TEST_NAME}"
696 return 0
697 fi
698 done
699
700 skip_one_test ${TEST_NUM} "${TEST_NAME}"
701 fi
702}
2c4834f8
BB
703
704wait_udev() {
705 local DEVICE=$1
706 local DELAY=$2
707 local COUNT=0
708
0ee8118b 709 udev_trigger
2c4834f8
BB
710 while [ ! -e ${DEVICE} ]; do
711 if [ ${COUNT} -gt ${DELAY} ]; then
712 return 1
713 fi
714
715 let COUNT=${COUNT}+1
716 sleep 1
717 done
718
719 return 0
720}
10715a01
BB
721
722stack_clear() {
723 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
724 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
725
726 if [ -e $STACK_MAX_SIZE ]; then
727 echo 1 >$STACK_TRACER_ENABLED
728 echo 0 >$STACK_MAX_SIZE
729 fi
730}
731
732stack_check() {
733 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
734 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
735 local STACK_LIMIT=7000
736
737 if [ -e $STACK_MAX_SIZE ]; then
738 STACK_SIZE=`cat $STACK_MAX_SIZE`
739
740 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
741 echo
742 echo "Warning: max stack size $STACK_SIZE bytes"
743 cat $STACK_TRACE
744 fi
745 fi
746}