]> git.proxmox.com Git - mirror_zfs-debian.git/blob - scripts/common.sh.in
Merge branch 'upstream'
[mirror_zfs-debian.git] / scripts / common.sh.in
1 #!/bin/bash
2 #
3 # Common support functions for testing scripts. If a script-config
4 # files is available it will be sourced so in-tree kernel modules and
5 # utilities will be used. If no script-config can be found then the
6 # installed kernel modules and utilities will be used.
7
8 basedir="$(dirname $0)"
9
10 SCRIPT_CONFIG=zfs-script-config.sh
11 if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12 . "${basedir}/../${SCRIPT_CONFIG}"
13 else
14 KERNEL_MODULES=(zlib_deflate zlib_inflate)
15 MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
16 fi
17
18 PROG="<define PROG>"
19 CLEANUP=
20 VERBOSE=
21 VERBOSE_FLAG=
22 FORCE=
23 FORCE_FLAG=
24 DUMP_LOG=
25 ERROR=
26 RAID0S=()
27 RAID10S=()
28 RAIDZS=()
29 RAIDZ2S=()
30 TESTS_RUN=${TESTS_RUN:-'*'}
31 TESTS_SKIP=${TESTS_SKIP:-}
32
33 prefix=@prefix@
34 exec_prefix=@exec_prefix@
35 libexecdir=@libexecdir@
36 pkglibexecdir=${libexecdir}/@PACKAGE@
37 bindir=@bindir@
38 sbindir=@sbindir@
39 udevdir=@udevdir@
40 udevruledir=@udevruledir@
41 sysconfdir=@sysconfdir@
42
43 ETCDIR=${ETCDIR:-/etc}
44 DEVDIR=${DEVDIR:-/dev/disk/zpool}
45 ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
46 ZPIOSDIR=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
47 ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
48
49 ZDB=${ZDB:-${sbindir}/zdb}
50 ZFS=${ZFS:-${sbindir}/zfs}
51 ZINJECT=${ZINJECT:-${sbindir}/zinject}
52 ZPOOL=${ZPOOL:-${sbindir}/zpool}
53 ZPOOL_ID=${ZPOOL_ID:-${bindir}/zpool_id}
54 ZTEST=${ZTEST:-${sbindir}/ztest}
55 ZPIOS=${ZPIOS:-${sbindir}/zpios}
56
57 COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
58 ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
59 ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
60 ZPIOS_SH=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
61 ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
62
63 LDMOD=${LDMOD:-/sbin/modprobe}
64 LSMOD=${LSMOD:-/sbin/lsmod}
65 RMMOD=${RMMOD:-/sbin/rmmod}
66 INFOMOD=${INFOMOD:-/sbin/modinfo}
67 LOSETUP=${LOSETUP:-/sbin/losetup}
68 MDADM=${MDADM:-/sbin/mdadm}
69 PARTED=${PARTED:-/sbin/parted}
70 BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
71 LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
72 SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
73 SYSCTL=${SYSCTL:-/sbin/sysctl}
74 UDEVADM=${UDEVADM:-/sbin/udevadm}
75 AWK=${AWK:-/usr/bin/awk}
76
77 COLOR_BLACK="\033[0;30m"
78 COLOR_DK_GRAY="\033[1;30m"
79 COLOR_BLUE="\033[0;34m"
80 COLOR_LT_BLUE="\033[1;34m"
81 COLOR_GREEN="\033[0;32m"
82 COLOR_LT_GREEN="\033[1;32m"
83 COLOR_CYAN="\033[0;36m"
84 COLOR_LT_CYAN="\033[1;36m"
85 COLOR_RED="\033[0;31m"
86 COLOR_LT_RED="\033[1;31m"
87 COLOR_PURPLE="\033[0;35m"
88 COLOR_LT_PURPLE="\033[1;35m"
89 COLOR_BROWN="\033[0;33m"
90 COLOR_YELLOW="\033[1;33m"
91 COLOR_LT_GRAY="\033[0;37m"
92 COLOR_WHITE="\033[1;37m"
93 COLOR_RESET="\033[0m"
94
95 die() {
96 echo -e "${PROG}: $1" >&2
97 exit 1
98 }
99
100 msg() {
101 if [ ${VERBOSE} ]; then
102 echo "$@"
103 fi
104 }
105
106 pass() {
107 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
108 }
109
110 fail() {
111 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
112 exit $1
113 }
114
115 skip() {
116 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
117 }
118
119 init() {
120 # Disable the udev rule 90-zfs.rules to prevent the zfs module
121 # stack from being loaded due to the detection of a zfs device.
122 # This is important because the test scripts require full control
123 # over when and how the modules are loaded/unloaded. A trap is
124 # set to ensure the udev rule is correctly replaced on exit.
125 local RULE=${udevruledir}/90-zfs.rules
126 if test -e ${RULE}; then
127 trap "mv ${RULE}.disabled ${RULE}; exit $?" INT TERM EXIT
128 mv ${RULE} ${RULE}.disabled
129 fi
130 }
131
132 spl_dump_log() {
133 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
134 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
135 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
136 echo
137 echo "Dumped debug log: ${NAME}.log"
138 tail -n1 ${NAME}.log
139 echo
140 return 0
141 }
142
143 check_modules() {
144 local LOADED_MODULES=()
145 local MISSING_MODULES=()
146
147 for MOD in ${MODULES[*]}; do
148 local NAME=`basename $MOD .ko`
149
150 if ${LSMOD} | egrep -q "^${NAME}"; then
151 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
152 fi
153
154 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
155 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
156 fi
157 done
158
159 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
160 ERROR="Unload these modules with '${PROG} -u':\n"
161 ERROR="${ERROR}${LOADED_MODULES[*]}"
162 return 1
163 fi
164
165 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
166 ERROR="The following modules can not be found,"
167 ERROR="${ERROR} ensure your source trees are built:\n"
168 ERROR="${ERROR}${MISSING_MODULES[*]}"
169 return 1
170 fi
171
172 return 0
173 }
174
175 load_module() {
176 local NAME=`basename $1 .ko`
177
178 if [ ${VERBOSE} ]; then
179 echo "Loading ${NAME} ($@)"
180 fi
181
182 ${LDMOD} $* &>/dev/null || ERROR="Failed to load $1" return 1
183
184 return 0
185 }
186
187 load_modules() {
188 mkdir -p /etc/zfs
189
190 for MOD in ${KERNEL_MODULES[*]}; do
191 load_module ${MOD}
192 done
193
194 for MOD in ${MODULES[*]}; do
195 local NAME=`basename ${MOD} .ko`
196 local VALUE=
197
198 for OPT in "$@"; do
199 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
200
201 if [ ${NAME} = "${OPT_NAME}" ]; then
202 VALUE=`echo ${OPT} | cut -f2- -d'='`
203 fi
204 done
205
206 load_module ${MOD} ${VALUE} || return 1
207 done
208
209 if [ ${VERBOSE} ]; then
210 echo "Successfully loaded ZFS module stack"
211 fi
212
213 return 0
214 }
215
216 unload_module() {
217 local NAME=`basename $1 .ko`
218
219 if [ ${VERBOSE} ]; then
220 echo "Unloading ${NAME} ($@)"
221 fi
222
223 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
224
225 return 0
226 }
227
228 unload_modules() {
229 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
230 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
231
232 for MOD in ${MODULES_REVERSE[*]}; do
233 local NAME=`basename ${MOD} .ko`
234 local USE_COUNT=`${LSMOD} |
235 egrep "^${NAME} "| ${AWK} '{print $3}'`
236
237 if [ "${USE_COUNT}" = 0 ] ; then
238
239 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
240 spl_dump_log
241 fi
242
243 unload_module ${MOD} || return 1
244 fi
245 done
246
247 if [ ${VERBOSE} ]; then
248 echo "Successfully unloaded ZFS module stack"
249 fi
250
251 return 0
252 }
253
254 #
255 # Check that the mdadm utilities are installed.
256 #
257 check_loop_utils() {
258 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
259 }
260
261
262 #
263 # Find and return an unused loopback device.
264 #
265 unused_loop_device() {
266 for DEVICE in `ls -1 /dev/loop* 2>/dev/null`; do
267 ${LOSETUP} ${DEVICE} &>/dev/null
268 if [ $? -ne 0 ]; then
269 echo ${DEVICE}
270 return
271 fi
272 done
273
274 die "Error: Unable to find unused loopback device"
275 }
276
277 #
278 # This can be slightly dangerous because the loop devices we are
279 # cleaning up may not be ours. However, if the devices are currently
280 # in use we will not be able to remove them, and we only remove
281 # devices which include 'zpool' in the name. So any damage we might
282 # do should be limited to other zfs related testing.
283 #
284 cleanup_loop_devices() {
285 local TMP_FILE=`mktemp`
286
287 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
288 ${AWK} -F":" -v losetup="$LOSETUP" \
289 '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
290 ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
291
292 rm -f ${TMP_FILE}
293 }
294
295 #
296 # Destroy the passed loopback devices, this is used when you know
297 # the names of the loopback devices.
298 #
299 destroy_loop_devices() {
300 local LODEVICES="$1"
301
302 msg "Destroying ${LODEVICES}"
303 ${LOSETUP} -d ${LODEVICES} || \
304 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
305
306 rm -f ${FILES}
307 return 0
308 }
309
310 #
311 # Check that the mdadm utilities are installed.
312 #
313 check_md_utils() {
314 test -f ${MDADM} || die "${MDADM} utility must be installed"
315 test -f ${PARTED} || die "${PARTED} utility must be installed"
316 }
317
318 check_md_partitionable() {
319 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
320 local LODEVICE=`unused_loop_device`
321 local MDDEVICE=`unused_md_device`
322 local RESULT=1
323
324 check_md_utils
325
326 rm -f ${LOFILE}
327 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
328 &>/dev/null || return ${RESULT}
329
330 msg "Creating ${LODEVICE} using ${LOFILE}"
331 ${LOSETUP} ${LODEVICE} ${LOFILE}
332 if [ $? -ne 0 ]; then
333 rm -f ${LOFILE}
334 return ${RESULT}
335 fi
336
337 msg "Creating ${MDDEVICE} using ${LODEVICE}"
338 ${MDADM} --build ${MDDEVICE} --level=faulty \
339 --raid-devices=1 ${LODEVICE} &>/dev/null
340 if [ $? -ne 0 ]; then
341 destroy_loop_devices ${LODEVICE}
342 rm -f ${LOFILE}
343 return ${RESULT}
344 fi
345 wait_udev ${MDDEVICE} 30
346
347 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
348 RESULT=$?
349
350 destroy_md_devices ${MDDEVICE}
351 destroy_loop_devices ${LODEVICE}
352 rm -f ${LOFILE}
353
354 return ${RESULT}
355 }
356
357 #
358 # Find and return an unused md device.
359 #
360 unused_md_device() {
361 for (( i=0; i<32; i++ )); do
362 MDDEVICE=md${i}
363
364 # Skip active devicesudo in /proc/mdstat.
365 grep -q "${MDDEVICE} " /proc/mdstat && continue
366
367 # Device doesn't exist, use it.
368 if [ ! -e $/dev/{MDDEVICE} ]; then
369 echo /dev/${MDDEVICE}
370 return
371 fi
372
373 # Device exists but may not be in use.
374 if [ -b /dev/${MDDEVICE} ]; then
375 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
376 if [ $? -eq 1 ]; then
377 echo /dev/${MDDEVICE}
378 return
379 fi
380 fi
381 done
382
383 die "Error: Unable to find unused md device"
384 }
385
386 #
387 # This can be slightly dangerous because it is possible the md devices
388 # we are cleaning up may not be ours. However, if the devices are
389 # currently in use we will not be able to remove them, and even if
390 # we remove devices which were not out we do not zero the super block
391 # so you should be able to reconstruct them.
392 #
393 cleanup_md_devices() {
394 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
395 udev_trigger
396 }
397
398 #
399 # Destroy the passed md devices, this is used when you know
400 # the names of the md devices.
401 #
402 destroy_md_devices() {
403 local MDDEVICES="$1"
404
405 msg "Destroying ${MDDEVICES}"
406 for MDDEVICE in ${MDDEVICES}; do
407 ${MDADM} --stop ${MDDEVICE} &>/dev/null
408 ${MDADM} --remove ${MDDEVICE} &>/dev/null
409 ${MDADM} --detail ${MDDEVICE} &>/dev/null
410 done
411
412 return 0
413 }
414
415 #
416 # Check that the scsi utilities are installed.
417 #
418 check_sd_utils() {
419 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
420 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
421 }
422
423 #
424 # Rescan the scsi bus for scsi_debug devices. It is preferable to use the
425 # scsi-rescan tool if it is installed, but if it's not we can fall back to
426 # removing and readding the device manually. This rescan will only effect
427 # the first scsi_debug device if scsi-rescan is missing.
428 #
429 scsi_rescan() {
430 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
431
432 if [ -f ${SCSIRESCAN} ]; then
433 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
434 else
435 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
436 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
437 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
438 udev_trigger
439 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
440 udev_trigger
441 fi
442 }
443
444 #
445 # Trigger udev and wait for it to settle.
446 #
447 udev_trigger() {
448 if [ -f ${UDEVADM} ]; then
449 ${UDEVADM} trigger --action=change --subsystem-match=block
450 ${UDEVADM} settle
451 else
452 /sbin/udevtrigger
453 /sbin/udevsettle
454 fi
455 }
456
457 #
458 # The following udev helper functions assume that the provided
459 # udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
460 # disk mapping. In this mapping each CHANNEL is represented by
461 # the letters a-z, and the RANK is represented by the numbers
462 # 1-n. A CHANNEL should identify a group of RANKS which are all
463 # attached to a single controller, each RANK represents a disk.
464 # This provides a simply mechanism to locate a specific drive
465 # given a known hardware configuration.
466 #
467 udev_setup() {
468 local SRC_PATH=$1
469
470 # When running in tree manually contruct symlinks in tree to
471 # the proper devices. Symlinks are installed for all entires
472 # in the config file regardless of if that device actually
473 # exists. When installed as a package udev can be relied on for
474 # this and it will only create links for devices which exist.
475 if [ ${INTREE} ]; then
476 PWD=`pwd`
477 mkdir -p ${DEVDIR}/
478 cd ${DEVDIR}/
479 ${AWK} '!/^#/ && /./ { system( \
480 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
481 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
482 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
483 ) }' $SRC_PATH
484 cd ${PWD}
485 else
486 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
487 DST_PATH=/etc/zfs/${DST_FILE}
488
489 if [ -e ${DST_PATH} ]; then
490 die "Error: Config ${DST_PATH} already exists"
491 fi
492
493 cp ${SRC_PATH} ${DST_PATH}
494 udev_trigger
495 fi
496
497 return 0
498 }
499
500 udev_cleanup() {
501 local SRC_PATH=$1
502
503 if [ ${INTREE} ]; then
504 PWD=`pwd`
505 cd ${DEVDIR}/
506 ${AWK} '!/^#/ && /./ { system( \
507 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
508 cd ${PWD}
509 fi
510
511 return 0
512 }
513
514 udev_cr2d() {
515 local CHANNEL=`echo "obase=16; $1+96" | bc`
516 local RANK=$2
517
518 printf "\x${CHANNEL}${RANK}"
519 }
520
521 udev_raid0_setup() {
522 local RANKS=$1
523 local CHANNELS=$2
524 local IDX=0
525
526 RAID0S=()
527 for RANK in `seq 1 ${RANKS}`; do
528 for CHANNEL in `seq 1 ${CHANNELS}`; do
529 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
530 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
531 let IDX=IDX+1
532 done
533 done
534
535 return 0
536 }
537
538 udev_raid10_setup() {
539 local RANKS=$1
540 local CHANNELS=$2
541 local IDX=0
542
543 RAID10S=()
544 for RANK in `seq 1 ${RANKS}`; do
545 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
546 let CHANNEL2=CHANNEL1+1
547 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
548 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
549 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
550 RAID10S[${IDX}]="mirror ${GROUP}"
551 let IDX=IDX+1
552 done
553 done
554
555 return 0
556 }
557
558 udev_raidz_setup() {
559 local RANKS=$1
560 local CHANNELS=$2
561
562 RAIDZS=()
563 for RANK in `seq 1 ${RANKS}`; do
564 RAIDZ=("raidz")
565
566 for CHANNEL in `seq 1 ${CHANNELS}`; do
567 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
568 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
569 done
570
571 RAIDZS[${RANK}]="${RAIDZ[*]}"
572 done
573
574 return 0
575 }
576
577 udev_raidz2_setup() {
578 local RANKS=$1
579 local CHANNELS=$2
580
581 RAIDZ2S=()
582 for RANK in `seq 1 ${RANKS}`; do
583 RAIDZ2=("raidz2")
584
585 for CHANNEL in `seq 1 ${CHANNELS}`; do
586 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
587 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
588 done
589
590 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
591 done
592
593 return 0
594 }
595
596 run_one_test() {
597 local TEST_NUM=$1
598 local TEST_NAME=$2
599
600 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
601 test_${TEST_NUM}
602 }
603
604 skip_one_test() {
605 local TEST_NUM=$1
606 local TEST_NAME=$2
607
608 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
609 skip
610 }
611
612 run_test() {
613 local TEST_NUM=$1
614 local TEST_NAME=$2
615
616 for i in ${TESTS_SKIP[@]}; do
617 if [[ $i == ${TEST_NUM} ]] ; then
618 skip_one_test ${TEST_NUM} "${TEST_NAME}"
619 return 0
620 fi
621 done
622
623 if [ "${TESTS_RUN[0]}" = "*" ]; then
624 run_one_test ${TEST_NUM} "${TEST_NAME}"
625 else
626 for i in ${TESTS_RUN[@]}; do
627 if [[ $i == ${TEST_NUM} ]] ; then
628 run_one_test ${TEST_NUM} "${TEST_NAME}"
629 return 0
630 fi
631 done
632
633 skip_one_test ${TEST_NUM} "${TEST_NAME}"
634 fi
635 }
636
637 wait_udev() {
638 local DEVICE=$1
639 local DELAY=$2
640 local COUNT=0
641
642 udev_trigger
643 while [ ! -e ${DEVICE} ]; do
644 if [ ${COUNT} -gt ${DELAY} ]; then
645 return 1
646 fi
647
648 let COUNT=${COUNT}+1
649 sleep 1
650 done
651
652 return 0
653 }
654
655 stack_clear() {
656 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
657 local STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
658
659 if [ -e $STACK_MAX_SIZE ]; then
660 echo 1 >$STACK_TRACER_ENABLED
661 echo 0 >$STACK_MAX_SIZE
662 fi
663 }
664
665 stack_check() {
666 local STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
667 local STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
668 local STACK_LIMIT=7000
669
670 if [ -e $STACK_MAX_SIZE ]; then
671 STACK_SIZE=`cat $STACK_MAX_SIZE`
672
673 if [ $STACK_SIZE -ge $STACK_LIMIT ]; then
674 echo
675 echo "Warning: max stack size $STACK_SIZE bytes"
676 cat $STACK_TRACE
677 fi
678 fi
679 }