]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/common.sh.in
Retire zpool_id infrastructure
[mirror_zfs-debian.git] / scripts / common.sh.in
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
6283f55e 3# Common support functions for testing scripts. If a script-config
c9c0d073 4# files is available it will be sourced so in-tree kernel modules and
6283f55e 5# utilities will be used. If no script-config can be found then the
c9c0d073
BB
6# installed kernel modules and utilities will be used.
7
8basedir="$(dirname $0)"
9
6283f55e 10SCRIPT_CONFIG=zfs-script-config.sh
c9c0d073
BB
11if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12. "${basedir}/../${SCRIPT_CONFIG}"
13else
cf47fad6
BB
14KERNEL_MODULES=(zlib_deflate zlib_inflate)
15MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
c9c0d073
BB
16fi
17
18PROG="<define PROG>"
19CLEANUP=
20VERBOSE=
21VERBOSE_FLAG=
22FORCE=
23FORCE_FLAG=
24DUMP_LOG=
25ERROR=
26RAID0S=()
27RAID10S=()
28RAIDZS=()
29RAIDZ2S=()
325f0235
BB
30TESTS_RUN=${TESTS_RUN:-'*'}
31TESTS_SKIP=${TESTS_SKIP:-}
c9c0d073
BB
32
33prefix=@prefix@
34exec_prefix=@exec_prefix@
35libexecdir=@libexecdir@
36pkglibexecdir=${libexecdir}/@PACKAGE@
37bindir=@bindir@
38sbindir=@sbindir@
5cbf6db9
BB
39udevdir=@udevdir@
40udevruledir=@udevruledir@
41sysconfdir=@sysconfdir@
c9c0d073
BB
42
43ETCDIR=${ETCDIR:-/etc}
dbf763b3 44DEVDIR=${DEVDIR:-/dev/disk/by-vdev}
c9c0d073 45ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
302ef151
BB
46ZPIOSDIR=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
47ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
c9c0d073
BB
48
49ZDB=${ZDB:-${sbindir}/zdb}
50ZFS=${ZFS:-${sbindir}/zfs}
51ZINJECT=${ZINJECT:-${sbindir}/zinject}
52ZPOOL=${ZPOOL:-${sbindir}/zpool}
c9c0d073 53ZTEST=${ZTEST:-${sbindir}/ztest}
302ef151 54ZPIOS=${ZPIOS:-${sbindir}/zpios}
c9c0d073
BB
55
56COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
57ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
58ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
302ef151
BB
59ZPIOS_SH=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
60ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
c9c0d073
BB
61
62LDMOD=${LDMOD:-/sbin/modprobe}
63LSMOD=${LSMOD:-/sbin/lsmod}
64RMMOD=${RMMOD:-/sbin/rmmod}
65INFOMOD=${INFOMOD:-/sbin/modinfo}
66LOSETUP=${LOSETUP:-/sbin/losetup}
0ee8118b
BB
67MDADM=${MDADM:-/sbin/mdadm}
68PARTED=${PARTED:-/sbin/parted}
69BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
70LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
71SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
c9c0d073
BB
72SYSCTL=${SYSCTL:-/sbin/sysctl}
73UDEVADM=${UDEVADM:-/sbin/udevadm}
74AWK=${AWK:-/usr/bin/awk}
75
325f0235
BB
76COLOR_BLACK="\033[0;30m"
77COLOR_DK_GRAY="\033[1;30m"
78COLOR_BLUE="\033[0;34m"
79COLOR_LT_BLUE="\033[1;34m"
80COLOR_GREEN="\033[0;32m"
81COLOR_LT_GREEN="\033[1;32m"
82COLOR_CYAN="\033[0;36m"
83COLOR_LT_CYAN="\033[1;36m"
84COLOR_RED="\033[0;31m"
85COLOR_LT_RED="\033[1;31m"
86COLOR_PURPLE="\033[0;35m"
87COLOR_LT_PURPLE="\033[1;35m"
88COLOR_BROWN="\033[0;33m"
89COLOR_YELLOW="\033[1;33m"
90COLOR_LT_GRAY="\033[0;37m"
91COLOR_WHITE="\033[1;37m"
92COLOR_RESET="\033[0m"
93
c9c0d073
BB
94die() {
95 echo -e "${PROG}: $1" >&2
96 exit 1
97}
98
99msg() {
100 if [ ${VERBOSE} ]; then
101 echo "$@"
102 fi
103}
104
105pass() {
325f0235 106 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
c9c0d073
BB
107}
108
109fail() {
325f0235 110 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
c9c0d073
BB
111 exit $1
112}
113
325f0235
BB
114skip() {
115 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
116}
117
930b6fec
BB
118populate() {
119 local ROOT=$1
120 local MAX_DIR_SIZE=$2
121 local MAX_FILE_SIZE=$3
122
123 mkdir -p $ROOT/{a,b,c,d,e,f,g}/{h,i}
124 DIRS=`find $ROOT`
125
126 for DIR in $DIRS; do
127 COUNT=$(($RANDOM % $MAX_DIR_SIZE))
128
129 for i in `seq $COUNT`; do
130 FILE=`mktemp -p ${DIR}`
131 SIZE=$(($RANDOM % $MAX_FILE_SIZE))
132 dd if=/dev/urandom of=$FILE bs=1k count=$SIZE &>/dev/null
133 done
134 done
135
136 return 0
137}
138
5cbf6db9
BB
139init() {
140 # Disable the udev rule 90-zfs.rules to prevent the zfs module
141 # stack from being loaded due to the detection of a zfs device.
142 # This is important because the test scripts require full control
143 # over when and how the modules are loaded/unloaded. A trap is
144 # set to ensure the udev rule is correctly replaced on exit.
145 local RULE=${udevruledir}/90-zfs.rules
146 if test -e ${RULE}; then
563103de 147 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
5cbf6db9
BB
148 mv ${RULE} ${RULE}.disabled
149 fi
930b6fec
BB
150
151 # Create a random directory tree of files and sub-directories to
152 # to act as a copy source for the various regression tests.
153 SRC_DIR=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
154 trap "rm -Rf $SRC_DIR" INT TERM EXIT
155 populate $SRC_DIR 10 100
5cbf6db9
BB
156}
157
c9c0d073
BB
158spl_dump_log() {
159 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
160 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
161 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
162 echo
163 echo "Dumped debug log: ${NAME}.log"
164 tail -n1 ${NAME}.log
165 echo
166 return 0
167}
168
169check_modules() {
170 local LOADED_MODULES=()
171 local MISSING_MODULES=()
172
173 for MOD in ${MODULES[*]}; do
174 local NAME=`basename $MOD .ko`
175
176 if ${LSMOD} | egrep -q "^${NAME}"; then
177 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
178 fi
179
180 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
181 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
182 fi
183 done
184
185 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
186 ERROR="Unload these modules with '${PROG} -u':\n"
187 ERROR="${ERROR}${LOADED_MODULES[*]}"
188 return 1
189 fi
190
191 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
192 ERROR="The following modules can not be found,"
193 ERROR="${ERROR} ensure your source trees are built:\n"
194 ERROR="${ERROR}${MISSING_MODULES[*]}"
195 return 1
196 fi
197
198 return 0
199}
200
201load_module() {
202 local NAME=`basename $1 .ko`
203
204 if [ ${VERBOSE} ]; then
205 echo "Loading ${NAME} ($@)"
206 fi
207
cf47fad6 208 ${LDMOD} $* &>/dev/null || ERROR="Failed to load $1" return 1
c9c0d073
BB
209
210 return 0
211}
212
213load_modules() {
214 mkdir -p /etc/zfs
215
cf47fad6
BB
216 for MOD in ${KERNEL_MODULES[*]}; do
217 load_module ${MOD}
218 done
219
c9c0d073
BB
220 for MOD in ${MODULES[*]}; do
221 local NAME=`basename ${MOD} .ko`
222 local VALUE=
223
224 for OPT in "$@"; do
225 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
226
227 if [ ${NAME} = "${OPT_NAME}" ]; then
228 VALUE=`echo ${OPT} | cut -f2- -d'='`
229 fi
230 done
231
232 load_module ${MOD} ${VALUE} || return 1
233 done
234
235 if [ ${VERBOSE} ]; then
236 echo "Successfully loaded ZFS module stack"
237 fi
238
239 return 0
240}
241
242unload_module() {
243 local NAME=`basename $1 .ko`
244
245 if [ ${VERBOSE} ]; then
246 echo "Unloading ${NAME} ($@)"
247 fi
248
249 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
250
251 return 0
252}
253
254unload_modules() {
255 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
256 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
257
258 for MOD in ${MODULES_REVERSE[*]}; do
259 local NAME=`basename ${MOD} .ko`
260 local USE_COUNT=`${LSMOD} |
261 egrep "^${NAME} "| ${AWK} '{print $3}'`
262
263 if [ "${USE_COUNT}" = 0 ] ; then
264
265 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
266 spl_dump_log
267 fi
268
269 unload_module ${MOD} || return 1
270 fi
271 done
272
273 if [ ${VERBOSE} ]; then
274 echo "Successfully unloaded ZFS module stack"
275 fi
276
277 return 0
278}
279
0ee8118b
BB
280#
281# Check that the mdadm utilities are installed.
282#
283check_loop_utils() {
284 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
285}
286
287
288#
289# Find and return an unused loopback device.
290#
c9c0d073 291unused_loop_device() {
6cb7ab06 292 for DEVICE in `ls -1 /dev/loop[0-9]* 2>/dev/null`; do
c9c0d073
BB
293 ${LOSETUP} ${DEVICE} &>/dev/null
294 if [ $? -ne 0 ]; then
295 echo ${DEVICE}
296 return
297 fi
298 done
299
300 die "Error: Unable to find unused loopback device"
301}
302
303#
304# This can be slightly dangerous because the loop devices we are
0ee8118b 305# cleaning up may not be ours. However, if the devices are currently
c9c0d073
BB
306# in use we will not be able to remove them, and we only remove
307# devices which include 'zpool' in the name. So any damage we might
308# do should be limited to other zfs related testing.
309#
310cleanup_loop_devices() {
311 local TMP_FILE=`mktemp`
312
313 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
314 ${AWK} -F":" -v losetup="$LOSETUP" \
315 '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
316 ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
317
318 rm -f ${TMP_FILE}
319}
320
0ee8118b
BB
321#
322# Destroy the passed loopback devices, this is used when you know
323# the names of the loopback devices.
324#
325destroy_loop_devices() {
326 local LODEVICES="$1"
327
328 msg "Destroying ${LODEVICES}"
329 ${LOSETUP} -d ${LODEVICES} || \
330 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
331
332 rm -f ${FILES}
333 return 0
334}
335
93648f31
BB
336#
337# Create a device label.
338#
339label() {
340 local DEVICE=$1
341 local LABEL=$2
342
343 ${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 1
344
345 return 0
346}
347
348#
349# Create a primary partition on a block device.
350#
351partition() {
352 local DEVICE=$1
353 local TYPE=$2
354 local START=$3
355 local END=$4
356
357 ${PARTED} --align optimal ${DEVICE} --script -- \
358 mkpart ${TYPE} ${START} ${END} || return 1
359 udev_trigger
360
361 return 0
362}
363
364#
365# Create a filesystem on the block device
366#
367format() {
368 local DEVICE=$1
369 local FSTYPE=$2
370
2f342404
ED
371 # Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
372 # won't mount
ff5b1c80 373 /sbin/mkfs.${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev/null || return 1
93648f31
BB
374
375 return 0
376}
377
0ee8118b
BB
378#
379# Check that the mdadm utilities are installed.
380#
381check_md_utils() {
382 test -f ${MDADM} || die "${MDADM} utility must be installed"
383 test -f ${PARTED} || die "${PARTED} utility must be installed"
384}
385
386check_md_partitionable() {
387 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
388 local LODEVICE=`unused_loop_device`
389 local MDDEVICE=`unused_md_device`
390 local RESULT=1
391
392 check_md_utils
393
394 rm -f ${LOFILE}
395 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
396 &>/dev/null || return ${RESULT}
397
398 msg "Creating ${LODEVICE} using ${LOFILE}"
399 ${LOSETUP} ${LODEVICE} ${LOFILE}
400 if [ $? -ne 0 ]; then
401 rm -f ${LOFILE}
402 return ${RESULT}
403 fi
404
405 msg "Creating ${MDDEVICE} using ${LODEVICE}"
406 ${MDADM} --build ${MDDEVICE} --level=faulty \
407 --raid-devices=1 ${LODEVICE} &>/dev/null
408 if [ $? -ne 0 ]; then
409 destroy_loop_devices ${LODEVICE}
410 rm -f ${LOFILE}
411 return ${RESULT}
412 fi
413 wait_udev ${MDDEVICE} 30
414
415 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
416 RESULT=$?
417
418 destroy_md_devices ${MDDEVICE}
419 destroy_loop_devices ${LODEVICE}
420 rm -f ${LOFILE}
421
422 return ${RESULT}
423}
424
425#
426# Find and return an unused md device.
427#
428unused_md_device() {
429 for (( i=0; i<32; i++ )); do
430 MDDEVICE=md${i}
431
432 # Skip active devicesudo in /proc/mdstat.
433 grep -q "${MDDEVICE} " /proc/mdstat && continue
434
435 # Device doesn't exist, use it.
436 if [ ! -e $/dev/{MDDEVICE} ]; then
437 echo /dev/${MDDEVICE}
438 return
439 fi
440
441 # Device exists but may not be in use.
442 if [ -b /dev/${MDDEVICE} ]; then
443 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
444 if [ $? -eq 1 ]; then
445 echo /dev/${MDDEVICE}
446 return
447 fi
448 fi
449 done
450
451 die "Error: Unable to find unused md device"
452}
453
454#
455# This can be slightly dangerous because it is possible the md devices
456# we are cleaning up may not be ours. However, if the devices are
457# currently in use we will not be able to remove them, and even if
458# we remove devices which were not out we do not zero the super block
459# so you should be able to reconstruct them.
460#
461cleanup_md_devices() {
462 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
463 udev_trigger
464}
465
466#
467# Destroy the passed md devices, this is used when you know
468# the names of the md devices.
469#
470destroy_md_devices() {
471 local MDDEVICES="$1"
472
473 msg "Destroying ${MDDEVICES}"
474 for MDDEVICE in ${MDDEVICES}; do
475 ${MDADM} --stop ${MDDEVICE} &>/dev/null
476 ${MDADM} --remove ${MDDEVICE} &>/dev/null
477 ${MDADM} --detail ${MDDEVICE} &>/dev/null
478 done
479
480 return 0
481}
482
483#
484# Check that the scsi utilities are installed.
485#
486check_sd_utils() {
487 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
488 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
489}
490
491#
492# Rescan the scsi bus for scsi_debug devices. It is preferable to use the
493# scsi-rescan tool if it is installed, but if it's not we can fall back to
494# removing and readding the device manually. This rescan will only effect
495# the first scsi_debug device if scsi-rescan is missing.
496#
497scsi_rescan() {
498 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
499
500 if [ -f ${SCSIRESCAN} ]; then
501 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
502 else
503 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
504 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
505 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
506 udev_trigger
507 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
508 udev_trigger
509 fi
510}
511
512#
513# Trigger udev and wait for it to settle.
514#
515udev_trigger() {
516 if [ -f ${UDEVADM} ]; then
fa417e57 517 ${UDEVADM} trigger --action=change --subsystem-match=block
0ee8118b
BB
518 ${UDEVADM} settle
519 else
520 /sbin/udevtrigger
521 /sbin/udevsettle
522 fi
523}
524
c9c0d073
BB
525#
526# The following udev helper functions assume that the provided
dbf763b3 527# udev rules file will create a /dev/disk/by-vdev/<CHANNEL><RANK>
c9c0d073
BB
528# disk mapping. In this mapping each CHANNEL is represented by
529# the letters a-z, and the RANK is represented by the numbers
530# 1-n. A CHANNEL should identify a group of RANKS which are all
531# attached to a single controller, each RANK represents a disk.
532# This provides a simply mechanism to locate a specific drive
533# given a known hardware configuration.
534#
535udev_setup() {
536 local SRC_PATH=$1
537
538 # When running in tree manually contruct symlinks in tree to
539 # the proper devices. Symlinks are installed for all entires
540 # in the config file regardless of if that device actually
541 # exists. When installed as a package udev can be relied on for
542 # this and it will only create links for devices which exist.
543 if [ ${INTREE} ]; then
544 PWD=`pwd`
545 mkdir -p ${DEVDIR}/
546 cd ${DEVDIR}/
547 ${AWK} '!/^#/ && /./ { system( \
548 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
549 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
550 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
551 ) }' $SRC_PATH
552 cd ${PWD}
553 else
554 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
555 DST_PATH=/etc/zfs/${DST_FILE}
556
557 if [ -e ${DST_PATH} ]; then
558 die "Error: Config ${DST_PATH} already exists"
559 fi
560
561 cp ${SRC_PATH} ${DST_PATH}
0ee8118b 562 udev_trigger
c9c0d073
BB
563 fi
564
565 return 0
566}
567
568udev_cleanup() {
569 local SRC_PATH=$1
570
571 if [ ${INTREE} ]; then
572 PWD=`pwd`
573 cd ${DEVDIR}/
574 ${AWK} '!/^#/ && /./ { system( \
575 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
576 cd ${PWD}
577 fi
578
579 return 0
580}
581
582udev_cr2d() {
583 local CHANNEL=`echo "obase=16; $1+96" | bc`
584 local RANK=$2
585
586 printf "\x${CHANNEL}${RANK}"
587}
588
589udev_raid0_setup() {
590 local RANKS=$1
591 local CHANNELS=$2
592 local IDX=0
593
594 RAID0S=()
595 for RANK in `seq 1 ${RANKS}`; do
596 for CHANNEL in `seq 1 ${CHANNELS}`; do
597 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
598 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
599 let IDX=IDX+1
600 done
601 done
602
603 return 0
604}
605
606udev_raid10_setup() {
607 local RANKS=$1
608 local CHANNELS=$2
609 local IDX=0
610
611 RAID10S=()
612 for RANK in `seq 1 ${RANKS}`; do
613 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
614 let CHANNEL2=CHANNEL1+1
615 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
616 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
617 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
618 RAID10S[${IDX}]="mirror ${GROUP}"
619 let IDX=IDX+1
620 done
621 done
622
623 return 0
624}
625
626udev_raidz_setup() {
627 local RANKS=$1
628 local CHANNELS=$2
629
630 RAIDZS=()
631 for RANK in `seq 1 ${RANKS}`; do
632 RAIDZ=("raidz")
633
634 for CHANNEL in `seq 1 ${CHANNELS}`; do
635 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
636 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
637 done
638
639 RAIDZS[${RANK}]="${RAIDZ[*]}"
640 done
641
642 return 0
643}
644
645udev_raidz2_setup() {
646 local RANKS=$1
647 local CHANNELS=$2
648
649 RAIDZ2S=()
650 for RANK in `seq 1 ${RANKS}`; do
651 RAIDZ2=("raidz2")
652
653 for CHANNEL in `seq 1 ${CHANNELS}`; do
654 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
655 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
656 done
657
658 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
659 done
660
661 return 0
662}
325f0235
BB
663
664run_one_test() {
665 local TEST_NUM=$1
666 local TEST_NAME=$2
667
0ee8118b 668 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
325f0235
BB
669 test_${TEST_NUM}
670}
671
672skip_one_test() {
673 local TEST_NUM=$1
674 local TEST_NAME=$2
675
0ee8118b 676 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
325f0235
BB
677 skip
678}
679
680run_test() {
681 local TEST_NUM=$1
682 local TEST_NAME=$2
683
684 for i in ${TESTS_SKIP[@]}; do
685 if [[ $i == ${TEST_NUM} ]] ; then
686 skip_one_test ${TEST_NUM} "${TEST_NAME}"
687 return 0
688 fi
689 done
690
691 if [ "${TESTS_RUN[0]}" = "*" ]; then
692 run_one_test ${TEST_NUM} "${TEST_NAME}"
693 else
694 for i in ${TESTS_RUN[@]}; do
695 if [[ $i == ${TEST_NUM} ]] ; then
696 run_one_test ${TEST_NUM} "${TEST_NAME}"
697 return 0
698 fi
699 done
700
701 skip_one_test ${TEST_NUM} "${TEST_NAME}"
702 fi
703}
2c4834f8
BB
704
705wait_udev() {
706 local DEVICE=$1
707 local DELAY=$2
708 local COUNT=0
709
0ee8118b 710 udev_trigger
2c4834f8
BB
711 while [ ! -e ${DEVICE} ]; do
712 if [ ${COUNT} -gt ${DELAY} ]; then
713 return 1
714 fi
715
716 let COUNT=${COUNT}+1
717 sleep 1
718 done
719
720 return 0
721}
10715a01
BB
722
723stack_clear() {
724 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
725 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
726
727 if [ -e $STACK_MAX_SIZE ]; then
728 echo 1 >$STACK_TRACER_ENABLED
729 echo 0 >$STACK_MAX_SIZE
730 fi
731}
732
733stack_check() {
734 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
735 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
736 local STACK_LIMIT=7000
737
738 if [ -e $STACK_MAX_SIZE ]; then
739 STACK_SIZE=`cat $STACK_MAX_SIZE`
740
741 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
742 echo
743 echo "Warning: max stack size $STACK_SIZE bytes"
744 cat $STACK_TRACE
745 fi
746 fi
747}