if [ `uname` = FreeBSD ]; then
SED=gsed
DIFFCOLOPTS=""
+ KERNCORE="kern.corefile"
else
SED=sed
termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
termwidth="-W ${termwidth}"
fi
DIFFCOLOPTS="-y $termwidth"
+ KERNCORE="kernel.core_pattern"
fi
EXTRA_OPTS=""
# subvolumes that relate to it.
#
# @param dir path name of the environment
+# @param dumplogs pass "1" to dump logs otherwise it will only if cores found
# @return 0 on success, 1 on error
#
function teardown() {
local dir=$1
+ local dumplogs=$2
kill_daemons $dir KILL
if [ `uname` != FreeBSD ] \
&& [ $(stat -f -c '%T' .) == "btrfs" ]; then
__teardown_btrfs $dir
fi
+ local cores="no"
+ local pattern="$(sysctl -n $KERNCORE)"
+ # See if we have apport core handling
+ if [ "${pattern:0:1}" = "|" ]; then
+ # TODO: Where can we get the dumps?
+ # Not sure where the dumps really are so this will look in the CWD
+ pattern=""
+ fi
+ # Local we start with core and teuthology ends with core
+ if ls $(dirname "$pattern") | grep -q '^core\|core$' ; then
+ cores="yes"
+ if [ -n "$LOCALRUN" ]; then
+ mkdir /tmp/cores.$$ 2> /dev/null || true
+ for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
+ mv $i /tmp/cores.$$
+ done
+ fi
+ fi
+ if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
+ if [ -n "$LOCALRUN" ]; then
+ display_logs $dir
+ else
+ # Move logs to where Teuthology will archive it
+ mkdir -p $TESTDIR/archive/log
+ mv $dir/*.log $TESTDIR/archive/log
+ fi
+ fi
rm -fr $dir
rm -rf $(get_asok_dir)
+ if [ "$cores" = "yes" ]; then
+ echo "ERROR: Failure due to cores found"
+ if [ -n "$LOCALRUN" ]; then
+ echo "Find saved core files in /tmp/cores.$$"
+ fi
+ return 1
+ fi
+ return 0
}
function __teardown_btrfs() {
--id $id \
--mon-osd-full-ratio=.99 \
--mon-data-avail-crit=1 \
+ --mon-data-avail-warn=5 \
--paxos-propose-interval=0.1 \
--osd-crush-chooseleaf-type=0 \
$EXTRA_OPTS \
function create_rbd_pool() {
ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
- ceph osd pool create rbd $PG_NUM || return 1
+ create_pool rbd $PG_NUM || return 1
rbd pool init rbd
}
+function create_pool() {
+ ceph osd pool create "$@"
+ sleep 1
+}
+
+function delete_pool() {
+ local poolname=$1
+ ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
+}
+
#######################################################################
function run_mgr() {
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
- stamp=$(get_last_scrub_stamp 2.0)
+ stamp=$(get_last_scrub_stamp 1.0)
test -n "$stamp" || return 1
teardown $dir || return 1
}
#######################################################################
+calc() { awk "BEGIN{print $*}"; }
+
##
# Return a list of numbers that are increasingly larger and whose
# total is **timeout** seconds. It can be used to have short sleep
local i
local total="0"
i=$first_step
- while test "$(echo $total + $i \<= $timeout | bc -l)" = "1"; do
- echo -n "$i "
- total=$(echo $total + $i | bc -l)
- i=$(echo $i \* 2 | bc -l)
+ while test "$(calc $total + $i \<= $timeout)" = "1"; do
+ echo -n "$(calc $i) "
+ total=$(calc $total + $i)
+ i=$(calc $i \* 2)
done
- if test "$(echo $total \< $timeout | bc -l)" = "1"; then
- echo -n $(echo $timeout - $total | bc -l)
+ if test "$(calc $total \< $timeout)" = "1"; then
+ echo -n "$(calc $timeout - $total) "
fi
$trace && shopt -s -o xtrace
}
function test_get_timeout_delays() {
test "$(get_timeout_delays 1)" = "1 " || return 1
- test "$(get_timeout_delays 5)" = "1 2 2" || return 1
- test "$(get_timeout_delays 6)" = "1 2 3" || return 1
+ test "$(get_timeout_delays 5)" = "1 2 2 " || return 1
+ test "$(get_timeout_delays 6)" = "1 2 3 " || return 1
test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
- test "$(get_timeout_delays 8)" = "1 2 4 1" || return 1
- test "$(get_timeout_delays 1 .1)" = ".1 .2 .4 .3" || return 1
- test "$(get_timeout_delays 1.5 .1)" = ".1 .2 .4 .8 " || return 1
- test "$(get_timeout_delays 5 .1)" = ".1 .2 .4 .8 1.6 1.9" || return 1
- test "$(get_timeout_delays 6 .1)" = ".1 .2 .4 .8 1.6 2.9" || return 1
- test "$(get_timeout_delays 6.3 .1)" = ".1 .2 .4 .8 1.6 3.2 " || return 1
- test "$(get_timeout_delays 20 .1)" = ".1 .2 .4 .8 1.6 3.2 6.4 7.3" || return 1
+ test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1
+ test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1
+ test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1
+ test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1
+ test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1
+ test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1
+ test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1
}
#######################################################################
local -a delays=($(get_timeout_delays $TIMEOUT .1))
local -i loop=0
+ flush_pg_stats || return 1
while test $(get_num_pgs) == 0 ; do
sleep 1
done
#######################################################################
##
-# Wait until the cluster becomes HEALTH_OK again or if it does not make progress
-# for $TIMEOUT seconds.
+# Wait until the cluster has health condition passed as arg
+# again for $TIMEOUT seconds.
#
-# @return 0 if the cluster is HEALTHY, 1 otherwise
+# @param string to grep for in health detail
+# @return 0 if the cluster health matches request, 1 otherwise
#
function wait_for_health() {
local grepstr=$1
done
}
+##
+# Wait until the cluster becomes HEALTH_OK again or if it does not make progress
+# for $TIMEOUT seconds.
+#
+# @return 0 if the cluster is HEALTHY, 1 otherwise
+#
function wait_for_health_ok() {
wait_for_health "HEALTH_OK" || return 1
}
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
- repair 2.0 || return 1
+ repair 1.0 || return 1
kill_daemons $dir KILL osd || return 1
- ! TIMEOUT=1 repair 2.0 || return 1
+ ! TIMEOUT=1 repair 1.0 || return 1
teardown $dir || return 1
}
#######################################################################
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
- pg_scrub 2.0 || return 1
+ pg_scrub 1.0 || return 1
kill_daemons $dir KILL osd || return 1
- ! TIMEOUT=1 pg_scrub 2.0 || return 1
+ ! TIMEOUT=1 pg_scrub 1.0 || return 1
teardown $dir || return 1
}
local sname=${3:-last_scrub_stamp}
for ((i=0; i < $TIMEOUT; i++)); do
- if test "$last_scrub" != "$(get_last_scrub_stamp $pgid $sname)" ; then
+ if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
return 0
fi
sleep 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
- local pgid=2.0
+ local pgid=1.0
ceph pg repair $pgid
local last_scrub=$(get_last_scrub_stamp $pgid)
wait_for_scrub $pgid "$last_scrub" || return 1
#
function run_in_background() {
local pid_variable=$1
- shift;
+ shift
# Execute the command and prepend the output with its pid
# We enforce to return the exit status of the command and not the awk one.
- ("$@" |& awk '{ a[i++] = $0 }END{for (i = 0; i in a; ++i) { print "'$$': " a[i]} }'; return ${PIPESTATUS[0]}) >&2 &
+ ("$@" |& sed 's/^/'$$': /'; return "${PIPESTATUS[0]}") >&2 &
eval "$pid_variable+=\" $!\""
}
+function save_stdout {
+ local out="$1"
+ shift
+ "$@" > "$out"
+}
+
function test_run_in_background() {
local pids
run_in_background pids sleep 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
rados -p rbd put obj /etc/group
- flush_pg_stats
+ flush_pg_stats || return 1
local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
raw_bytes_used=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
bytes_used=`ceph df detail --format=json | jq "$jq_filter.bytes_used"`
test $raw_bytes_used > 0 || return 1
test $raw_bytes_used == $bytes_used || return 1
+ teardown $dir
}
#######################################################################
if run $dir "$@" ; then
code=0
else
- display_logs $dir
code=1
fi
- teardown $dir || return 1
+ teardown $dir $code || return 1
return $code
}
export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
export CEPH_ARGS
- CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
export CEPH_CONF=/dev/null
local dir=td/ceph-helpers
for func in $funcs ; do
- $func $dir || return 1
+ if ! $func $dir; then
+ teardown $dir 1
+ return 1
+ fi
done
}
if test "$1" = TESTS ; then
shift
run_tests "$@"
+ exit $?
fi
# NOTE:
return 1
}
+function inject_eio() {
+ local pooltype=$1
+ shift
+ local which=$1
+ shift
+ local poolname=$1
+ shift
+ local objname=$1
+ shift
+ local dir=$1
+ shift
+ local shard_id=$1
+ shift
+
+ local -a initial_osds=($(get_osds $poolname $objname))
+ local osd_id=${initial_osds[$shard_id]}
+ if [ "$pooltype" != "ec" ]; then
+ shard_id=""
+ fi
+ set_config osd $osd_id filestore_debug_inject_read_err true || return 1
+ local loop=0
+ while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
+ inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
+ loop=$(expr $loop + 1)
+ if [ $loop = "10" ]; then
+ return 1
+ fi
+ sleep 1
+ done
+}
+
+function multidiff() {
+ if ! diff $@ ; then
+ if [ "$DIFFCOLOPTS" = "" ]; then
+ return 1
+ fi
+ diff $DIFFCOLOPTS $@
+ fi
+}
+
# Local Variables:
# compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
# End: