3 # Common support functions for testing scripts. If a script-config
4 # files is available it will be sourced so in-tree kernel modules and
5 # utilities will be used. If no script-config can be found then the
6 # installed kernel modules and utilities will be used.
8 basedir
="$(dirname $0)"
10 SCRIPT_CONFIG
=zfs-script-config.sh
11 if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12 .
"${basedir}/../${SCRIPT_CONFIG}"
14 KERNEL_MODULES
=(zlib_deflate zlib_inflate
)
15 MODULES
=(spl splat zavl znvpair zunicode zcommon zfs
)
30 TESTS_RUN
=${TESTS_RUN:-'*'}
31 TESTS_SKIP
=${TESTS_SKIP:-}
34 exec_prefix
=@exec_prefix@
35 libexecdir
=@libexecdir@
36 pkglibexecdir
=${libexecdir}/@PACKAGE@
40 udevruledir
=@udevruledir@
41 sysconfdir
=@sysconfdir@
43 ETCDIR
=${ETCDIR:-/etc}
44 DEVDIR
=${DEVDIR:-/dev/disk/by-vdev}
45 ZPOOLDIR
=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
46 ZPIOSDIR
=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
47 ZPIOSPROFILEDIR
=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
49 ZDB
=${ZDB:-${sbindir}/zdb}
50 ZFS
=${ZFS:-${sbindir}/zfs}
51 ZINJECT
=${ZINJECT:-${sbindir}/zinject}
52 ZPOOL
=${ZPOOL:-${sbindir}/zpool}
53 ZTEST
=${ZTEST:-${sbindir}/ztest}
54 ZPIOS
=${ZPIOS:-${sbindir}/zpios}
56 COMMON_SH
=${COMMON_SH:-${pkglibexecdir}/common.sh}
57 ZFS_SH
=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
58 ZPOOL_CREATE_SH
=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
59 ZPIOS_SH
=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
60 ZPIOS_SURVEY_SH
=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
62 LDMOD
=${LDMOD:-/sbin/modprobe}
63 LSMOD
=${LSMOD:-/sbin/lsmod}
64 RMMOD
=${RMMOD:-/sbin/rmmod}
65 INFOMOD
=${INFOMOD:-/sbin/modinfo}
66 LOSETUP
=${LOSETUP:-/sbin/losetup}
67 MDADM
=${MDADM:-/sbin/mdadm}
68 PARTED
=${PARTED:-/sbin/parted}
69 BLOCKDEV
=${BLOCKDEV:-/sbin/blockdev}
70 LSSCSI
=${LSSCSI:-/usr/bin/lsscsi}
71 SCSIRESCAN
=${SCSIRESCAN:-/usr/bin/scsi-rescan}
72 SYSCTL
=${SYSCTL:-/sbin/sysctl}
73 UDEVADM
=${UDEVADM:-/sbin/udevadm}
74 AWK
=${AWK:-/usr/bin/awk}
76 COLOR_BLACK
="\033[0;30m"
77 COLOR_DK_GRAY
="\033[1;30m"
78 COLOR_BLUE
="\033[0;34m"
79 COLOR_LT_BLUE
="\033[1;34m"
80 COLOR_GREEN
="\033[0;32m"
81 COLOR_LT_GREEN
="\033[1;32m"
82 COLOR_CYAN
="\033[0;36m"
83 COLOR_LT_CYAN
="\033[1;36m"
84 COLOR_RED
="\033[0;31m"
85 COLOR_LT_RED
="\033[1;31m"
86 COLOR_PURPLE
="\033[0;35m"
87 COLOR_LT_PURPLE
="\033[1;35m"
88 COLOR_BROWN
="\033[0;33m"
89 COLOR_YELLOW
="\033[1;33m"
90 COLOR_LT_GRAY
="\033[0;37m"
91 COLOR_WHITE
="\033[1;37m"
95 echo -e "${PROG}: $1" >&2
100 if [ ${VERBOSE} ]; then
106 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
110 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
115 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
120 local MAX_DIR_SIZE
=$2
121 local MAX_FILE_SIZE
=$3
123 mkdir
-p $ROOT/{a
,b
,c
,d
,e
,f
,g
}/{h
,i
}
127 COUNT
=$
(($RANDOM % $MAX_DIR_SIZE))
129 for i
in `seq $COUNT`; do
130 FILE
=`mktemp -p ${DIR}`
131 SIZE
=$
(($RANDOM % $MAX_FILE_SIZE))
132 dd if=/dev
/urandom of
=$FILE bs
=1k count
=$SIZE &>/dev
/null
140 # Disable the udev rule 90-zfs.rules to prevent the zfs module
141 # stack from being loaded due to the detection of a zfs device.
142 # This is important because the test scripts require full control
143 # over when and how the modules are loaded/unloaded. A trap is
144 # set to ensure the udev rule is correctly replaced on exit.
145 local RULE
=${udevruledir}/90-zfs.rules
146 if test -e ${RULE}; then
147 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
148 mv ${RULE} ${RULE}.disabled
151 # Create a random directory tree of files and sub-directories to
152 # to act as a copy source for the various regression tests.
153 SRC_DIR
=`mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX`
154 trap "rm -Rf $SRC_DIR" INT TERM EXIT
155 populate
$SRC_DIR 10 100
159 ${SYSCTL} -w kernel.spl.debug.dump
=1 &>/dev
/null
160 local NAME
=`dmesg | tail -n 1 | cut -f5 -d' '`
161 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
163 echo "Dumped debug log: ${NAME}.log"
170 local LOADED_MODULES
=()
171 local MISSING_MODULES
=()
173 for MOD
in ${MODULES[*]}; do
174 local NAME
=`basename $MOD .ko`
176 if ${LSMOD} |
egrep -q "^${NAME}"; then
177 LOADED_MODULES
=(${NAME} ${LOADED_MODULES[*]})
180 if [ ${INFOMOD} ${MOD} 2>/dev
/null
]; then
181 MISSING_MODULES
=("\t${MOD}\n" ${MISSING_MODULES[*]})
185 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
186 ERROR
="Unload these modules with '${PROG} -u':\n"
187 ERROR
="${ERROR}${LOADED_MODULES[*]}"
191 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
192 ERROR
="The following modules can not be found,"
193 ERROR
="${ERROR} ensure your source trees are built:\n"
194 ERROR
="${ERROR}${MISSING_MODULES[*]}"
202 local NAME
=`basename $1 .ko`
204 if [ ${VERBOSE} ]; then
205 echo "Loading ${NAME} ($@)"
208 ${LDMOD} $
* &>/dev
/null || ERROR
="Failed to load $1" return 1
216 for MOD
in ${KERNEL_MODULES[*]}; do
220 for MOD
in ${MODULES[*]}; do
221 local NAME
=`basename ${MOD} .ko`
225 OPT_NAME
=`echo ${OPT} | cut -f1 -d'='`
227 if [ ${NAME} = "${OPT_NAME}" ]; then
228 VALUE
=`echo ${OPT} | cut -f2- -d'='`
232 load_module
${MOD} ${VALUE} ||
return 1
235 if [ ${VERBOSE} ]; then
236 echo "Successfully loaded ZFS module stack"
243 local NAME
=`basename $1 .ko`
245 if [ ${VERBOSE} ]; then
246 echo "Unloading ${NAME} ($@)"
249 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
255 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
256 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
258 for MOD in ${MODULES_REVERSE[*]}; do
259 local NAME=`basename ${MOD} .ko`
260 local USE_COUNT=`${LSMOD} |
261 egrep "^
${NAME} "| ${AWK} '{print $3}'`
263 if [ "${USE_COUNT}" = 0 ] ; then
265 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
269 unload_module ${MOD} || return 1
273 if [ ${VERBOSE} ]; then
274 echo "Successfully unloaded ZFS module stack"
281 # Check that the mdadm utilities are installed.
284 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
289 # Find and return an unused loopback device.
291 unused_loop_device() {
292 for DEVICE in `ls -1 /dev/loop[0-9]* 2>/dev/null`; do
293 ${LOSETUP} ${DEVICE} &>/dev/null
294 if [ $? -ne 0 ]; then
300 die "Error: Unable to find unused loopback device"
304 # This can be slightly dangerous because the loop devices we are
305 # cleaning up may not be ours. However, if the devices are currently
306 # in use we will not be able to remove them, and we only remove
307 # devices which include 'zpool
' in the name. So any damage we might
308 # do should be limited to other zfs related testing.
310 cleanup_loop_devices() {
311 local TMP_FILE=`mktemp`
313 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
314 ${AWK} -F":" -v losetup="$LOSETUP" \
315 '/zpool
/ { system
("losetup -d "$1) }' ${TMP_FILE}
316 ${AWK} -F" " '/zpool
/ { system
("rm -f "$3) }' ${TMP_FILE}
322 # Destroy the passed loopback devices, this is used when you know
323 # the names of the loopback devices.
325 destroy_loop_devices() {
328 msg "Destroying ${LODEVICES}"
329 ${LOSETUP} -d ${LODEVICES} || \
330 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
337 # Create a device label.
343 ${PARTED} ${DEVICE} --script -- mklabel ${LABEL} || return 1
349 # Create a primary partition on a block device.
357 ${PARTED} --align optimal ${DEVICE} --script -- \
358 mkpart ${TYPE} ${START} ${END} || return 1
365 # Create a filesystem on the block device
371 # Force 4K blocksize, else mkfs.ext2 tries to use 8K, which
373 /sbin
/mkfs.
${FSTYPE} -b 4096 -F -q ${DEVICE} >/dev
/null ||
return 1
379 # Check that the mdadm utilities are installed.
382 test -f ${MDADM} || die
"${MDADM} utility must be installed"
383 test -f ${PARTED} || die
"${PARTED} utility must be installed"
386 check_md_partitionable
() {
387 local LOFILE
=`mktemp -p /tmp zpool-lo.XXXXXXXX`
388 local LODEVICE
=`unused_loop_device`
389 local MDDEVICE
=`unused_md_device`
395 dd if=/dev
/zero of
=${LOFILE} bs
=1M count
=0 seek
=16 \
396 &>/dev
/null ||
return ${RESULT}
398 msg
"Creating ${LODEVICE} using ${LOFILE}"
399 ${LOSETUP} ${LODEVICE} ${LOFILE}
400 if [ $?
-ne 0 ]; then
405 msg
"Creating ${MDDEVICE} using ${LODEVICE}"
406 ${MDADM} --build ${MDDEVICE} --level=faulty \
407 --raid-devices=1 ${LODEVICE} &>/dev
/null
408 if [ $?
-ne 0 ]; then
409 destroy_loop_devices
${LODEVICE}
413 wait_udev
${MDDEVICE} 30
415 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev
/null
418 destroy_md_devices
${MDDEVICE}
419 destroy_loop_devices
${LODEVICE}
426 # Find and return an unused md device.
429 for (( i
=0; i
<32; i
++ )); do
432 # Skip active devicesudo in /proc/mdstat.
433 grep -q "${MDDEVICE} " /proc
/mdstat
&& continue
435 # Device doesn't exist, use it.
436 if [ ! -e $
/dev
/{MDDEVICE
} ]; then
437 echo /dev
/${MDDEVICE}
441 # Device exists but may not be in use.
442 if [ -b /dev
/${MDDEVICE} ]; then
443 ${MDADM} --detail /dev
/${MDDEVICE} &>/dev
/null
444 if [ $?
-eq 1 ]; then
445 echo /dev
/${MDDEVICE}
451 die
"Error: Unable to find unused md device"
455 # This can be slightly dangerous because it is possible the md devices
456 # we are cleaning up may not be ours. However, if the devices are
457 # currently in use we will not be able to remove them, and even if
458 # we remove devices which were not out we do not zero the super block
459 # so you should be able to reconstruct them.
461 cleanup_md_devices
() {
462 destroy_md_devices
"`ls /dev/md* 2>/dev/null | grep -v p`"
467 # Destroy the passed md devices, this is used when you know
468 # the names of the md devices.
470 destroy_md_devices
() {
473 msg
"Destroying ${MDDEVICES}"
474 for MDDEVICE
in ${MDDEVICES}; do
475 ${MDADM} --stop ${MDDEVICE} &>/dev
/null
476 ${MDADM} --remove ${MDDEVICE} &>/dev
/null
477 ${MDADM} --detail ${MDDEVICE} &>/dev
/null
484 # Check that the scsi utilities are installed.
487 ${INFOMOD} scsi_debug
&>/dev
/null || die
"scsi_debug module required"
488 test -f ${LSSCSI} || die
"${LSSCSI} utility must be installed"
492 # Rescan the scsi bus for scsi_debug devices. It is preferable to use the
493 # scsi-rescan tool if it is installed, but if it's not we can fall back to
494 # removing and readding the device manually. This rescan will only effect
495 # the first scsi_debug device if scsi-rescan is missing.
498 local AWK_SCRIPT
="/scsi_debug/ { print \$1; exit }"
500 if [ -f ${SCSIRESCAN} ]; then
501 ${SCSIRESCAN} --forcerescan --remove &>/dev
/null
503 local SCSIID
=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
504 local SCSIHOST
=`echo ${SCSIID} | cut -f1 -d':'`
505 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
507 echo "- - -" >/sys
/class
/scsi_host
/host${SCSIHOST}/scan
513 # Trigger udev and wait for it to settle.
516 if [ -f ${UDEVADM} ]; then
517 ${UDEVADM} trigger
--action=change
--subsystem-match=block
526 # The following udev helper functions assume that the provided
527 # udev rules file will create a /dev/disk/by-vdev/<CHANNEL><RANK>
528 # disk mapping. In this mapping each CHANNEL is represented by
529 # the letters a-z, and the RANK is represented by the numbers
530 # 1-n. A CHANNEL should identify a group of RANKS which are all
531 # attached to a single controller, each RANK represents a disk.
532 # This provides a simply mechanism to locate a specific drive
533 # given a known hardware configuration.
538 # When running in tree manually contruct symlinks in tree to
539 # the proper devices. Symlinks are installed for all entires
540 # in the config file regardless of if that device actually
541 # exists. When installed as a package udev can be relied on for
542 # this and it will only create links for devices which exist.
543 if [ ${INTREE} ]; then
547 ${AWK} '!/^#/ && /./ { system( \
548 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
549 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
550 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
554 DST_FILE
=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
555 DST_PATH
=/etc
/zfs
/${DST_FILE}
557 if [ -e ${DST_PATH} ]; then
558 die
"Error: Config ${DST_PATH} already exists"
561 cp ${SRC_PATH} ${DST_PATH}
571 if [ ${INTREE} ]; then
574 ${AWK} '!/^#/ && /./ { system( \
575 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
583 local CHANNEL
=`echo "obase=16; $1+96" | bc`
586 printf "\x${CHANNEL}${RANK}"
595 for RANK
in `seq 1 ${RANKS}`; do
596 for CHANNEL
in `seq 1 ${CHANNELS}`; do
597 DISK
=`udev_cr2d ${CHANNEL} ${RANK}`
598 RAID0S
[${IDX}]="${DEVDIR}/${DISK}"
606 udev_raid10_setup() {
612 for RANK in `seq 1 ${RANKS}`; do
613 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
614 let CHANNEL2=CHANNEL1+1
615 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
616 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
617 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
618 RAID10S[${IDX}]="mirror
${GROUP}"
631 for RANK in `seq 1 ${RANKS}`; do
634 for CHANNEL in `seq 1 ${CHANNELS}`; do
635 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
636 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
639 RAIDZS
[${RANK}]="${RAIDZ[*]}"
645 udev_raidz2_setup
() {
650 for RANK
in `seq 1 ${RANKS}`; do
653 for CHANNEL
in `seq 1 ${CHANNELS}`; do
654 DISK
=`udev_cr2d ${CHANNEL} ${RANK}`
655 RAIDZ2
[${CHANNEL}]="${DEVDIR}/${DISK}"
658 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
668 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
676 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
684 for i in ${TESTS_SKIP[@]}; do
685 if [[ $i == ${TEST_NUM} ]] ; then
686 skip_one_test ${TEST_NUM} "${TEST_NAME}"
691 if [ "${TESTS_RUN[0]}" = "*" ]; then
692 run_one_test ${TEST_NUM} "${TEST_NAME}"
694 for i in ${TESTS_RUN[@]}; do
695 if [[ $i == ${TEST_NUM} ]] ; then
696 run_one_test ${TEST_NUM} "${TEST_NAME}"
701 skip_one_test ${TEST_NUM} "${TEST_NAME}"
711 while [ ! -e ${DEVICE} ]; do
712 if [ ${COUNT} -gt ${DELAY} ]; then
724 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
725 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
727 if [ -e $STACK_MAX_SIZE ]; then
728 echo 1 >$STACK_TRACER_ENABLED
729 echo 0 >$STACK_MAX_SIZE
734 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
735 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
736 local STACK_LIMIT=7000
738 if [ -e $STACK_MAX_SIZE ]; then
739 STACK_SIZE=`cat $STACK_MAX_SIZE`
741 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
743 echo "Warning
: max stack size
$STACK_SIZE bytes
"