]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/common.sh.in
Unconditionally load core kernel modules
[mirror_zfs-debian.git] / scripts / common.sh.in
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
6283f55e 3# Common support functions for testing scripts. If a script-config
c9c0d073 4# files is available it will be sourced so in-tree kernel modules and
6283f55e 5# utilities will be used. If no script-config can be found then the
c9c0d073
BB
6# installed kernel modules and utilities will be used.
7
8basedir="$(dirname $0)"
9
6283f55e 10SCRIPT_CONFIG=zfs-script-config.sh
c9c0d073
BB
11if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12. "${basedir}/../${SCRIPT_CONFIG}"
13else
cf47fad6
BB
14KERNEL_MODULES=(zlib_deflate zlib_inflate)
15MODULES=(spl splat zavl znvpair zunicode zcommon zfs)
c9c0d073
BB
16fi
17
18PROG="<define PROG>"
19CLEANUP=
20VERBOSE=
21VERBOSE_FLAG=
22FORCE=
23FORCE_FLAG=
24DUMP_LOG=
25ERROR=
26RAID0S=()
27RAID10S=()
28RAIDZS=()
29RAIDZ2S=()
325f0235
BB
30TESTS_RUN=${TESTS_RUN:-'*'}
31TESTS_SKIP=${TESTS_SKIP:-}
c9c0d073
BB
32
33prefix=@prefix@
34exec_prefix=@exec_prefix@
35libexecdir=@libexecdir@
36pkglibexecdir=${libexecdir}/@PACKAGE@
37bindir=@bindir@
38sbindir=@sbindir@
39
40ETCDIR=${ETCDIR:-/etc}
41DEVDIR=${DEVDIR:-/dev/disk/zpool}
42ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
302ef151
BB
43ZPIOSDIR=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
44ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
c9c0d073
BB
45
46ZDB=${ZDB:-${sbindir}/zdb}
47ZFS=${ZFS:-${sbindir}/zfs}
48ZINJECT=${ZINJECT:-${sbindir}/zinject}
49ZPOOL=${ZPOOL:-${sbindir}/zpool}
50ZPOOL_ID=${ZPOOL_ID:-${bindir}/zpool_id}
51ZTEST=${ZTEST:-${sbindir}/ztest}
302ef151 52ZPIOS=${ZPIOS:-${sbindir}/zpios}
c9c0d073
BB
53
54COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
55ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
56ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
302ef151
BB
57ZPIOS_SH=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
58ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
c9c0d073
BB
59
60LDMOD=${LDMOD:-/sbin/modprobe}
61LSMOD=${LSMOD:-/sbin/lsmod}
62RMMOD=${RMMOD:-/sbin/rmmod}
63INFOMOD=${INFOMOD:-/sbin/modinfo}
64LOSETUP=${LOSETUP:-/sbin/losetup}
0ee8118b
BB
65MDADM=${MDADM:-/sbin/mdadm}
66PARTED=${PARTED:-/sbin/parted}
67BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
68LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
69SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
c9c0d073
BB
70SYSCTL=${SYSCTL:-/sbin/sysctl}
71UDEVADM=${UDEVADM:-/sbin/udevadm}
72AWK=${AWK:-/usr/bin/awk}
73
325f0235
BB
74COLOR_BLACK="\033[0;30m"
75COLOR_DK_GRAY="\033[1;30m"
76COLOR_BLUE="\033[0;34m"
77COLOR_LT_BLUE="\033[1;34m"
78COLOR_GREEN="\033[0;32m"
79COLOR_LT_GREEN="\033[1;32m"
80COLOR_CYAN="\033[0;36m"
81COLOR_LT_CYAN="\033[1;36m"
82COLOR_RED="\033[0;31m"
83COLOR_LT_RED="\033[1;31m"
84COLOR_PURPLE="\033[0;35m"
85COLOR_LT_PURPLE="\033[1;35m"
86COLOR_BROWN="\033[0;33m"
87COLOR_YELLOW="\033[1;33m"
88COLOR_LT_GRAY="\033[0;37m"
89COLOR_WHITE="\033[1;37m"
90COLOR_RESET="\033[0m"
91
c9c0d073
BB
92die() {
93 echo -e "${PROG}: $1" >&2
94 exit 1
95}
96
97msg() {
98 if [ ${VERBOSE} ]; then
99 echo "$@"
100 fi
101}
102
103pass() {
325f0235 104 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
c9c0d073
BB
105}
106
107fail() {
325f0235 108 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
c9c0d073
BB
109 exit $1
110}
111
325f0235
BB
112skip() {
113 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
114}
115
c9c0d073
BB
116spl_dump_log() {
117 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
118 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
119 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
120 echo
121 echo "Dumped debug log: ${NAME}.log"
122 tail -n1 ${NAME}.log
123 echo
124 return 0
125}
126
127check_modules() {
128 local LOADED_MODULES=()
129 local MISSING_MODULES=()
130
131 for MOD in ${MODULES[*]}; do
132 local NAME=`basename $MOD .ko`
133
134 if ${LSMOD} | egrep -q "^${NAME}"; then
135 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
136 fi
137
138 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
139 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
140 fi
141 done
142
143 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
144 ERROR="Unload these modules with '${PROG} -u':\n"
145 ERROR="${ERROR}${LOADED_MODULES[*]}"
146 return 1
147 fi
148
149 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
150 ERROR="The following modules can not be found,"
151 ERROR="${ERROR} ensure your source trees are built:\n"
152 ERROR="${ERROR}${MISSING_MODULES[*]}"
153 return 1
154 fi
155
156 return 0
157}
158
159load_module() {
160 local NAME=`basename $1 .ko`
161
162 if [ ${VERBOSE} ]; then
163 echo "Loading ${NAME} ($@)"
164 fi
165
cf47fad6 166 ${LDMOD} $* &>/dev/null || ERROR="Failed to load $1" return 1
c9c0d073
BB
167
168 return 0
169}
170
171load_modules() {
172 mkdir -p /etc/zfs
173
cf47fad6
BB
174 for MOD in ${KERNEL_MODULES[*]}; do
175 load_module ${MOD}
176 done
177
c9c0d073
BB
178 for MOD in ${MODULES[*]}; do
179 local NAME=`basename ${MOD} .ko`
180 local VALUE=
181
182 for OPT in "$@"; do
183 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
184
185 if [ ${NAME} = "${OPT_NAME}" ]; then
186 VALUE=`echo ${OPT} | cut -f2- -d'='`
187 fi
188 done
189
190 load_module ${MOD} ${VALUE} || return 1
191 done
192
193 if [ ${VERBOSE} ]; then
194 echo "Successfully loaded ZFS module stack"
195 fi
196
197 return 0
198}
199
200unload_module() {
201 local NAME=`basename $1 .ko`
202
203 if [ ${VERBOSE} ]; then
204 echo "Unloading ${NAME} ($@)"
205 fi
206
207 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
208
209 return 0
210}
211
212unload_modules() {
213 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
214 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
215
216 for MOD in ${MODULES_REVERSE[*]}; do
217 local NAME=`basename ${MOD} .ko`
218 local USE_COUNT=`${LSMOD} |
219 egrep "^${NAME} "| ${AWK} '{print $3}'`
220
221 if [ "${USE_COUNT}" = 0 ] ; then
222
223 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
224 spl_dump_log
225 fi
226
227 unload_module ${MOD} || return 1
228 fi
229 done
230
231 if [ ${VERBOSE} ]; then
232 echo "Successfully unloaded ZFS module stack"
233 fi
234
235 return 0
236}
237
0ee8118b
BB
238#
239# Check that the mdadm utilities are installed.
240#
241check_loop_utils() {
242 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
243}
244
245
246#
247# Find and return an unused loopback device.
248#
c9c0d073 249unused_loop_device() {
0ee8118b 250 for DEVICE in `ls -1 /dev/loop* 2>/dev/null`; do
c9c0d073
BB
251 ${LOSETUP} ${DEVICE} &>/dev/null
252 if [ $? -ne 0 ]; then
253 echo ${DEVICE}
254 return
255 fi
256 done
257
258 die "Error: Unable to find unused loopback device"
259}
260
261#
262# This can be slightly dangerous because the loop devices we are
0ee8118b 263# cleaning up may not be ours. However, if the devices are currently
c9c0d073
BB
264# in use we will not be able to remove them, and we only remove
265# devices which include 'zpool' in the name. So any damage we might
266# do should be limited to other zfs related testing.
267#
268cleanup_loop_devices() {
269 local TMP_FILE=`mktemp`
270
271 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
272 ${AWK} -F":" -v losetup="$LOSETUP" \
273 '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
274 ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
275
276 rm -f ${TMP_FILE}
277}
278
0ee8118b
BB
279#
280# Destroy the passed loopback devices, this is used when you know
281# the names of the loopback devices.
282#
283destroy_loop_devices() {
284 local LODEVICES="$1"
285
286 msg "Destroying ${LODEVICES}"
287 ${LOSETUP} -d ${LODEVICES} || \
288 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
289
290 rm -f ${FILES}
291 return 0
292}
293
294#
295# Check that the mdadm utilities are installed.
296#
297check_md_utils() {
298 test -f ${MDADM} || die "${MDADM} utility must be installed"
299 test -f ${PARTED} || die "${PARTED} utility must be installed"
300}
301
302check_md_partitionable() {
303 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
304 local LODEVICE=`unused_loop_device`
305 local MDDEVICE=`unused_md_device`
306 local RESULT=1
307
308 check_md_utils
309
310 rm -f ${LOFILE}
311 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
312 &>/dev/null || return ${RESULT}
313
314 msg "Creating ${LODEVICE} using ${LOFILE}"
315 ${LOSETUP} ${LODEVICE} ${LOFILE}
316 if [ $? -ne 0 ]; then
317 rm -f ${LOFILE}
318 return ${RESULT}
319 fi
320
321 msg "Creating ${MDDEVICE} using ${LODEVICE}"
322 ${MDADM} --build ${MDDEVICE} --level=faulty \
323 --raid-devices=1 ${LODEVICE} &>/dev/null
324 if [ $? -ne 0 ]; then
325 destroy_loop_devices ${LODEVICE}
326 rm -f ${LOFILE}
327 return ${RESULT}
328 fi
329 wait_udev ${MDDEVICE} 30
330
331 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
332 RESULT=$?
333
334 destroy_md_devices ${MDDEVICE}
335 destroy_loop_devices ${LODEVICE}
336 rm -f ${LOFILE}
337
338 return ${RESULT}
339}
340
341#
342# Find and return an unused md device.
343#
344unused_md_device() {
345 for (( i=0; i<32; i++ )); do
346 MDDEVICE=md${i}
347
348 # Skip active devicesudo in /proc/mdstat.
349 grep -q "${MDDEVICE} " /proc/mdstat && continue
350
351 # Device doesn't exist, use it.
352 if [ ! -e $/dev/{MDDEVICE} ]; then
353 echo /dev/${MDDEVICE}
354 return
355 fi
356
357 # Device exists but may not be in use.
358 if [ -b /dev/${MDDEVICE} ]; then
359 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
360 if [ $? -eq 1 ]; then
361 echo /dev/${MDDEVICE}
362 return
363 fi
364 fi
365 done
366
367 die "Error: Unable to find unused md device"
368}
369
370#
371# This can be slightly dangerous because it is possible the md devices
372# we are cleaning up may not be ours. However, if the devices are
373# currently in use we will not be able to remove them, and even if
374# we remove devices which were not out we do not zero the super block
375# so you should be able to reconstruct them.
376#
377cleanup_md_devices() {
378 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
379 udev_trigger
380}
381
382#
383# Destroy the passed md devices, this is used when you know
384# the names of the md devices.
385#
386destroy_md_devices() {
387 local MDDEVICES="$1"
388
389 msg "Destroying ${MDDEVICES}"
390 for MDDEVICE in ${MDDEVICES}; do
391 ${MDADM} --stop ${MDDEVICE} &>/dev/null
392 ${MDADM} --remove ${MDDEVICE} &>/dev/null
393 ${MDADM} --detail ${MDDEVICE} &>/dev/null
394 done
395
396 return 0
397}
398
399#
400# Check that the scsi utilities are installed.
401#
402check_sd_utils() {
403 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
404 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
405}
406
407#
408# Rescan the scsi bus for scsi_debug devices. It is preferable to use the
409# scsi-rescan tool if it is installed, but if it's not we can fall back to
410# removing and readding the device manually. This rescan will only effect
411# the first scsi_debug device if scsi-rescan is missing.
412#
413scsi_rescan() {
414 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
415
416 if [ -f ${SCSIRESCAN} ]; then
417 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
418 else
419 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
420 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
421 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
422 udev_trigger
423 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
424 udev_trigger
425 fi
426}
427
428#
429# Trigger udev and wait for it to settle.
430#
431udev_trigger() {
432 if [ -f ${UDEVADM} ]; then
433 ${UDEVADM} trigger
434 ${UDEVADM} settle
435 else
436 /sbin/udevtrigger
437 /sbin/udevsettle
438 fi
439}
440
c9c0d073
BB
441#
442# The following udev helper functions assume that the provided
443# udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
444# disk mapping. In this mapping each CHANNEL is represented by
445# the letters a-z, and the RANK is represented by the numbers
446# 1-n. A CHANNEL should identify a group of RANKS which are all
447# attached to a single controller, each RANK represents a disk.
448# This provides a simply mechanism to locate a specific drive
449# given a known hardware configuration.
450#
451udev_setup() {
452 local SRC_PATH=$1
453
454 # When running in tree manually contruct symlinks in tree to
455 # the proper devices. Symlinks are installed for all entires
456 # in the config file regardless of if that device actually
457 # exists. When installed as a package udev can be relied on for
458 # this and it will only create links for devices which exist.
459 if [ ${INTREE} ]; then
460 PWD=`pwd`
461 mkdir -p ${DEVDIR}/
462 cd ${DEVDIR}/
463 ${AWK} '!/^#/ && /./ { system( \
464 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
465 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
466 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
467 ) }' $SRC_PATH
468 cd ${PWD}
469 else
470 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
471 DST_PATH=/etc/zfs/${DST_FILE}
472
473 if [ -e ${DST_PATH} ]; then
474 die "Error: Config ${DST_PATH} already exists"
475 fi
476
477 cp ${SRC_PATH} ${DST_PATH}
0ee8118b 478 udev_trigger
c9c0d073
BB
479 fi
480
481 return 0
482}
483
484udev_cleanup() {
485 local SRC_PATH=$1
486
487 if [ ${INTREE} ]; then
488 PWD=`pwd`
489 cd ${DEVDIR}/
490 ${AWK} '!/^#/ && /./ { system( \
491 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
492 cd ${PWD}
493 fi
494
495 return 0
496}
497
498udev_cr2d() {
499 local CHANNEL=`echo "obase=16; $1+96" | bc`
500 local RANK=$2
501
502 printf "\x${CHANNEL}${RANK}"
503}
504
505udev_raid0_setup() {
506 local RANKS=$1
507 local CHANNELS=$2
508 local IDX=0
509
510 RAID0S=()
511 for RANK in `seq 1 ${RANKS}`; do
512 for CHANNEL in `seq 1 ${CHANNELS}`; do
513 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
514 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
515 let IDX=IDX+1
516 done
517 done
518
519 return 0
520}
521
522udev_raid10_setup() {
523 local RANKS=$1
524 local CHANNELS=$2
525 local IDX=0
526
527 RAID10S=()
528 for RANK in `seq 1 ${RANKS}`; do
529 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
530 let CHANNEL2=CHANNEL1+1
531 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
532 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
533 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
534 RAID10S[${IDX}]="mirror ${GROUP}"
535 let IDX=IDX+1
536 done
537 done
538
539 return 0
540}
541
542udev_raidz_setup() {
543 local RANKS=$1
544 local CHANNELS=$2
545
546 RAIDZS=()
547 for RANK in `seq 1 ${RANKS}`; do
548 RAIDZ=("raidz")
549
550 for CHANNEL in `seq 1 ${CHANNELS}`; do
551 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
552 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
553 done
554
555 RAIDZS[${RANK}]="${RAIDZ[*]}"
556 done
557
558 return 0
559}
560
561udev_raidz2_setup() {
562 local RANKS=$1
563 local CHANNELS=$2
564
565 RAIDZ2S=()
566 for RANK in `seq 1 ${RANKS}`; do
567 RAIDZ2=("raidz2")
568
569 for CHANNEL in `seq 1 ${CHANNELS}`; do
570 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
571 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
572 done
573
574 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
575 done
576
577 return 0
578}
325f0235
BB
579
580run_one_test() {
581 local TEST_NUM=$1
582 local TEST_NAME=$2
583
0ee8118b 584 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
325f0235
BB
585 test_${TEST_NUM}
586}
587
588skip_one_test() {
589 local TEST_NUM=$1
590 local TEST_NAME=$2
591
0ee8118b 592 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
325f0235
BB
593 skip
594}
595
596run_test() {
597 local TEST_NUM=$1
598 local TEST_NAME=$2
599
600 for i in ${TESTS_SKIP[@]}; do
601 if [[ $i == ${TEST_NUM} ]] ; then
602 skip_one_test ${TEST_NUM} "${TEST_NAME}"
603 return 0
604 fi
605 done
606
607 if [ "${TESTS_RUN[0]}" = "*" ]; then
608 run_one_test ${TEST_NUM} "${TEST_NAME}"
609 else
610 for i in ${TESTS_RUN[@]}; do
611 if [[ $i == ${TEST_NUM} ]] ; then
612 run_one_test ${TEST_NUM} "${TEST_NAME}"
613 return 0
614 fi
615 done
616
617 skip_one_test ${TEST_NUM} "${TEST_NAME}"
618 fi
619}
2c4834f8
BB
620
621wait_udev() {
622 local DEVICE=$1
623 local DELAY=$2
624 local COUNT=0
625
0ee8118b 626 udev_trigger
2c4834f8
BB
627 while [ ! -e ${DEVICE} ]; do
628 if [ ${COUNT} -gt ${DELAY} ]; then
629 return 1
630 fi
631
632 let COUNT=${COUNT}+1
633 sleep 1
634 done
635
636 return 0
637}