3 # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5 # Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
7 # Author: Loic Dachary <loic@dachary.org>
8 # Author: Federico Gimenez <fgimenez@coit.es>
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU Library Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Library Public License for more details.
22 : ${CEPH_BUILD_VIRTUALENV:=/tmp}
24 if type xmlstarlet
> /dev
/null
2>&1; then
26 elif type xml
> /dev
/null
2>&1; then
29 echo "Missing xmlstarlet binary!"
33 if [ `uname` = FreeBSD
]; then
36 KERNCORE
="kern.corefile"
39 termwidth
=$
(stty
-a |
head -1 |
sed -e 's/.*columns \([0-9]*\).*/\1/')
40 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
41 termwidth
="-W ${termwidth}"
43 DIFFCOLOPTS
="-y $termwidth"
44 KERNCORE
="kernel.core_pattern"
48 if [ -n "$CEPH_LIB" ]; then
49 EXTRA_OPTS
+=" --erasure-code-dir $CEPH_LIB"
50 EXTRA_OPTS
+=" --plugin-dir $CEPH_LIB"
51 EXTRA_OPTS
+=" --osd-class-dir $CEPH_LIB"
54 #! @file ceph-helpers.sh
55 # @brief Toolbox to manage Ceph cluster dedicated to testing
59 # ~~~~~~~~~~~~~~~~{.sh}
60 # source ceph-helpers.sh
63 # # cleanup leftovers and reset mydir
65 # # create a cluster with one monitor and three osds
70 # # put and get an object
71 # rados --pool rbd put GROUP /etc/group
72 # rados --pool rbd get GROUP /tmp/GROUP
73 # # stop the cluster and cleanup the directory
78 # The focus is on simplicity and efficiency, in the context of
79 # functional tests. The output is intentionally very verbose
80 # and functions return as soon as an error is found. The caller
81 # is also expected to abort on the first error so that debugging
82 # can be done by looking at the end of the output.
84 # Each function is documented, implemented and tested independently.
85 # When modifying a helper, the test and the documentation are
86 # expected to be updated and it is easier of they are collocated. A
87 # test for a given function can be run with
89 # ~~~~~~~~~~~~~~~~{.sh}
90 # ceph-helpers.sh TESTS test_get_osds
93 # and all the tests (i.e. all functions matching test_*) are run
96 # ~~~~~~~~~~~~~~~~{.sh}
97 # ceph-helpers.sh TESTS
100 # A test function takes a single argument : the directory dedicated
101 # to the tests. It is expected to not create any file outside of this
102 # directory and remove it entirely when it completes successfully.
106 function get_asok_dir
() {
107 if [ -n "$CEPH_ASOK_DIR" ]; then
108 echo "$CEPH_ASOK_DIR"
110 echo ${TMPDIR:-/tmp}/ceph-asok.$$
114 function get_asok_path
() {
116 if [ -n "$name" ]; then
117 echo $
(get_asok_dir
)/ceph-
$name.asok
119 echo $
(get_asok_dir
)/\
$cluster-\
$name.asok
123 # Cleanup any leftovers found in **dir** via **teardown**
124 # and reset **dir** as an empty environment.
126 # @param dir path name of the environment
127 # @return 0 on success, 1 on error
131 teardown
$dir ||
return 1
133 mkdir
-p $
(get_asok_dir
)
136 function test_setup
() {
138 setup
$dir ||
return 1
139 test -d $dir ||
return 1
140 setup
$dir ||
return 1
141 test -d $dir ||
return 1
145 #######################################################################
148 # Kill all daemons for which a .pid file exists in **dir** and remove
149 # **dir**. If the file system in which **dir** is btrfs, delete all
150 # subvolumes that relate to it.
152 # @param dir path name of the environment
153 # @param dumplogs pass "1" to dump logs otherwise it will only if cores found
154 # @return 0 on success, 1 on error
156 function teardown
() {
159 kill_daemons
$dir KILL
160 if [ `uname` != FreeBSD
] \
161 && [ $
(stat
-f -c '%T' .
) == "btrfs" ]; then
162 __teardown_btrfs
$dir
165 local pattern
="$(sysctl -n $KERNCORE)"
166 # See if we have apport core handling
167 if [ "${pattern:0:1}" = "|" ]; then
168 # TODO: Where can we get the dumps?
169 # Not sure where the dumps really are so this will look in the CWD
172 # Local we start with core and teuthology ends with core
173 if ls $
(dirname "$pattern") |
grep -q '^core\|core$' ; then
175 if [ -n "$LOCALRUN" ]; then
176 mkdir
/tmp
/cores.$$
2> /dev
/null || true
177 for i
in $
(ls $
(dirname $
(sysctl
-n $KERNCORE)) |
grep '^core\|core$'); do
182 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
183 if [ -n "$LOCALRUN" ]; then
186 # Move logs to where Teuthology will archive it
187 mkdir
-p $TESTDIR/archive
/log
188 mv $dir/*.log
$TESTDIR/archive
/log
192 rm -rf $
(get_asok_dir
)
193 if [ "$cores" = "yes" ]; then
194 echo "ERROR: Failure due to cores found"
195 if [ -n "$LOCALRUN" ]; then
196 echo "Find saved core files in /tmp/cores.$$"
203 function __teardown_btrfs
() {
204 local btrfs_base_dir
=$1
205 local btrfs_root
=$
(df
-P . |
tail -1 |
awk '{print $NF}')
206 local btrfs_dirs
=$
(cd $btrfs_base_dir; sudo btrfs subvolume list .
-t |
awk '/^[0-9]/ {print $4}' |
grep "$btrfs_base_dir/$btrfs_dir")
207 for subvolume
in $btrfs_dirs; do
208 sudo btrfs subvolume delete
$btrfs_root/$subvolume
212 function test_teardown
() {
214 setup
$dir ||
return 1
215 teardown
$dir ||
return 1
216 ! test -d $dir ||
return 1
219 #######################################################################
222 # Sends a signal to a single daemon.
223 # This is a helper function for kill_daemons
225 # After the daemon is sent **signal**, its actual termination
226 # will be verified by sending it signal 0. If the daemon is
227 # still alive, kill_daemon will pause for a few seconds and
228 # try again. This will repeat for a fixed number of times
229 # before kill_daemon returns on failure. The list of
230 # sleep intervals can be specified as **delays** and defaults
233 # 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
235 # This sequence is designed to run first a very short sleep time (0.1)
236 # if the machine is fast enough and the daemon terminates in a fraction of a
237 # second. The increasing sleep numbers should give plenty of time for
238 # the daemon to die even on the slowest running machine. If a daemon
239 # takes more than a few minutes to stop (the sum of all sleep times),
240 # there probably is no point in waiting more and a number of things
241 # are likely to go wrong anyway: better give up and return on error.
243 # @param pid the process id to send a signal
244 # @param send_signal the signal to send
245 # @param delays sequence of sleep times before failure
247 function kill_daemon
() {
250 local delays
=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
252 for try
in $delays ; do
253 if kill -$send_signal $pid 2> /dev
/null
; then
265 function test_kill_daemon
() {
267 setup
$dir ||
return 1
268 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
269 run_mgr
$dir x ||
return 1
270 run_osd
$dir 0 ||
return 1
273 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
275 # sending signal 0 won't kill the daemon
276 # waiting just for one second instead of the default schedule
277 # allows us to quickly verify what happens when kill fails
278 # to stop the daemon (i.e. it must return false)
280 ! kill_daemon
$pidfile 0 1 ||
return 1
282 # killing just the osd and verify the mon still is responsive
284 kill_daemon
$pidfile TERM ||
return 1
287 ceph osd dump |
grep "osd.0 down" ||
return 1
290 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
294 kill_daemon
$pidfile TERM ||
return 1
298 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
300 # kill the mon and verify it cannot be reached
302 kill_daemon
$pidfile TERM ||
return 1
303 ! timeout
5 ceph status ||
return 1
306 teardown
$dir ||
return 1
310 # Kill all daemons for which a .pid file exists in **dir**. Each
311 # daemon is sent a **signal** and kill_daemons waits for it to exit
312 # during a few minutes. By default all daemons are killed. If a
313 # **name_prefix** is provided, only the daemons for which a pid
314 # file is found matching the prefix are killed. See run_osd and
315 # run_mon for more information about the name conventions for
318 # Send TERM to all daemons : kill_daemons $dir
319 # Send KILL to all daemons : kill_daemons $dir KILL
320 # Send KILL to all osds : kill_daemons $dir KILL osd
321 # Send KILL to osd 1 : kill_daemons $dir KILL osd.1
323 # If a daemon is sent the TERM signal and does not terminate
324 # within a few minutes, it will still be running even after
325 # kill_daemons returns.
327 # If all daemons are kill successfully the function returns 0
328 # if at least one daemon remains, this is treated as an
329 # error and the function return 1.
331 # @param dir path name of the environment
332 # @param signal name of the first signal (defaults to TERM)
333 # @param name_prefix only kill match daemons (defaults to all)
334 # @param delays sequence of sleep times before failure
335 # @return 0 on success, 1 on error
337 function kill_daemons
() {
338 local trace
=$
(shopt -q -o xtrace
&& echo true ||
echo false
)
339 $trace && shopt -u -o xtrace
341 local signal
=${2:-TERM}
342 local name_prefix
=$3 # optional, osd, mon, osd.1
343 local delays
=$4 #optional timing
347 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
348 run_in_background pids kill_daemon
$pidfile $signal $delays
354 $trace && shopt -s -o xtrace
358 function test_kill_daemons
() {
360 setup
$dir ||
return 1
361 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
362 run_mgr
$dir x ||
return 1
363 run_osd
$dir 0 ||
return 1
365 # sending signal 0 won't kill the daemon
366 # waiting just for one second instead of the default schedule
367 # allows us to quickly verify what happens when kill fails
368 # to stop the daemon (i.e. it must return false)
370 ! kill_daemons
$dir 0 osd
1 ||
return 1
372 # killing just the osd and verify the mon still is responsive
374 kill_daemons
$dir TERM osd ||
return 1
375 ceph osd dump |
grep "osd.0 down" ||
return 1
379 kill_daemons
$dir TERM mgr ||
return 1
381 # kill the mon and verify it cannot be reached
383 kill_daemons
$dir TERM ||
return 1
384 ! timeout
5 ceph status ||
return 1
385 teardown
$dir ||
return 1
388 #######################################################################
391 # Run a monitor by the name mon.**id** with data in **dir**/**id**.
392 # The logs can be found in **dir**/mon.**id**.log and the pid file
393 # is **dir**/mon.**id**.pid and the admin socket is
394 # **dir**/**id**/ceph-mon.**id**.asok.
396 # The remaining arguments are passed verbatim to ceph-mon --mkfs
397 # and the ceph-mon daemon.
399 # Two mandatory arguments must be provided: --fsid and --mon-host
400 # Instead of adding them to every call to run_mon, they can be
401 # set in the CEPH_ARGS environment variable to be read implicitly
402 # by every ceph command.
404 # The CEPH_CONF variable is expected to be set to /dev/null to
405 # only rely on arguments for configuration.
409 # CEPH_ARGS="--fsid=$(uuidgen) "
410 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
411 # run_mon $dir a # spawn a mon and bind port 7018
412 # run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
414 # If mon_initial_members is not set, the default rbd pool is deleted
415 # and replaced with a replicated pool with less placement groups to
416 # speed up initialization. If mon_initial_members is set, no attempt
417 # is made to recreate the rbd pool because it would hang forever,
418 # waiting for other mons to join.
420 # A **dir**/ceph.conf file is created but not meant to be used by any
421 # function. It is convenient for debugging a failure with:
423 # ceph --conf **dir**/ceph.conf -s
425 # @param dir path name of the environment
426 # @param id mon identifier
427 # @param ... can be any option valid for ceph-mon
428 # @return 0 on success, 1 on error
446 --mon-osd-full-ratio=.99 \
447 --mon-data-avail-crit=1 \
448 --mon-data-avail-warn=5 \
449 --paxos-propose-interval=0.1 \
450 --osd-crush-chooseleaf-type=0 \
457 --log-file=$dir/\
$name.log \
458 --admin-socket=$
(get_asok_path
) \
459 --mon-cluster-log-file=$dir/log \
461 --pid-file=$dir/\
$name.pid \
462 --mon-allow-pool-delete \
463 --mon-osd-backfillfull-ratio .99 \
466 cat > $dir/ceph.conf
<<EOF
468 fsid = $(get_config mon $id fsid)
469 mon host = $(get_config mon $id mon_host)
473 function test_run_mon
() {
476 setup
$dir ||
return 1
478 run_mon
$dir a
--mon-initial-members=a ||
return 1
479 create_rbd_pool ||
return 1
480 # rbd has not been deleted / created, hence it has pool id 0
481 ceph osd dump |
grep "pool 1 'rbd'" ||
return 1
482 kill_daemons
$dir ||
return 1
484 run_mon
$dir a ||
return 1
485 create_rbd_pool ||
return 1
486 # rbd has been deleted / created, hence it does not have pool id 0
487 ! ceph osd dump |
grep "pool 1 'rbd'" ||
return 1
488 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
489 config get osd_pool_default_size
)
490 test "$size" = '{"osd_pool_default_size":"3"}' ||
return 1
492 ! CEPH_ARGS
='' ceph status ||
return 1
493 CEPH_ARGS
='' ceph
--conf $dir/ceph.conf status ||
return 1
495 kill_daemons
$dir ||
return 1
497 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
498 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
499 config get osd_pool_default_size
)
500 test "$size" = '{"osd_pool_default_size":"1"}' ||
return 1
501 kill_daemons
$dir ||
return 1
503 CEPH_ARGS
="$CEPH_ARGS --osd_pool_default_size=2" \
504 run_mon
$dir a ||
return 1
505 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
506 config get osd_pool_default_size
)
507 test "$size" = '{"osd_pool_default_size":"2"}' ||
return 1
508 kill_daemons
$dir ||
return 1
510 teardown
$dir ||
return 1
513 function create_rbd_pool
() {
514 ceph osd pool delete rbd rbd
--yes-i-really-really-mean-it ||
return 1
515 create_pool rbd
$PG_NUM ||
return 1
519 function create_pool
() {
520 ceph osd pool create
"$@"
524 function delete_pool
() {
526 ceph osd pool delete
$poolname $poolname --yes-i-really-really-mean-it
529 #######################################################################
542 --debug-objecter 20 \
547 --log-file=$dir/\
$name.log \
548 --admin-socket=$
(get_asok_path
) \
550 --pid-file=$dir/\
$name.pid \
554 #######################################################################
557 # Create (prepare) and run (activate) an osd by the name osd.**id**
558 # with data in **dir**/**id**. The logs can be found in
559 # **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
560 # the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
562 # The remaining arguments are passed verbatim to ceph-osd.
564 # Two mandatory arguments must be provided: --fsid and --mon-host
565 # Instead of adding them to every call to run_osd, they can be
566 # set in the CEPH_ARGS environment variable to be read implicitly
567 # by every ceph command.
569 # The CEPH_CONF variable is expected to be set to /dev/null to
570 # only rely on arguments for configuration.
572 # The run_osd function creates the OSD data directory with ceph-disk
573 # prepare on the **dir**/**id** directory and relies on the
574 # activate_osd function to run the daemon.
578 # CEPH_ARGS="--fsid=$(uuidgen) "
579 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
580 # run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
582 # @param dir path name of the environment
583 # @param id osd identifier
584 # @param ... can be any option valid for ceph-osd
585 # @return 0 on success, 1 on error
592 local osd_data
=$dir/$id
595 ceph_disk_args
+=" --statedir=$dir"
596 ceph_disk_args
+=" --sysconfdir=$dir"
597 ceph_disk_args
+=" --prepend-to-path="
600 ceph-disk
$ceph_disk_args \
601 prepare
--filestore $osd_data ||
return 1
603 activate_osd
$dir $id "$@"
606 function run_osd_bluestore
() {
611 local osd_data
=$dir/$id
614 ceph_disk_args
+=" --statedir=$dir"
615 ceph_disk_args
+=" --sysconfdir=$dir"
616 ceph_disk_args
+=" --prepend-to-path="
619 ceph-disk
$ceph_disk_args \
620 prepare
--bluestore $osd_data ||
return 1
622 activate_osd
$dir $id "$@"
625 function test_run_osd
() {
628 setup
$dir ||
return 1
630 run_mon
$dir a ||
return 1
631 run_mgr
$dir x ||
return 1
633 run_osd
$dir 0 ||
return 1
634 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
635 config get osd_max_backfills
)
636 echo "$backfills" |
grep --quiet 'osd_max_backfills' ||
return 1
638 run_osd
$dir 1 --osd-max-backfills 20 ||
return 1
639 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.1) \
640 config get osd_max_backfills
)
641 test "$backfills" = '{"osd_max_backfills":"20"}' ||
return 1
643 CEPH_ARGS
="$CEPH_ARGS --osd-max-backfills 30" run_osd
$dir 2 ||
return 1
644 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.2) \
645 config get osd_max_backfills
)
646 test "$backfills" = '{"osd_max_backfills":"30"}' ||
return 1
648 teardown
$dir ||
return 1
651 #######################################################################
654 # Shutdown and remove all traces of the osd by the name osd.**id**.
656 # The OSD is shutdown with the TERM signal. It is then removed from
657 # the auth list, crush map, osd map etc and the files associated with
658 # it are also removed.
660 # @param dir path name of the environment
661 # @param id osd identifier
662 # @return 0 on success, 1 on error
664 function destroy_osd
() {
668 ceph osd out osd.
$id ||
return 1
669 kill_daemons
$dir TERM osd.
$id ||
return 1
670 ceph osd purge osd.
$id --yes-i-really-mean-it ||
return 1
671 teardown
$dir/$id ||
return 1
675 function test_destroy_osd
() {
678 setup
$dir ||
return 1
679 run_mon
$dir a ||
return 1
680 run_mgr
$dir x ||
return 1
681 run_osd
$dir 0 ||
return 1
682 destroy_osd
$dir 0 ||
return 1
683 ! ceph osd dump |
grep "osd.$id " ||
return 1
684 teardown
$dir ||
return 1
687 #######################################################################
690 # Run (activate) an osd by the name osd.**id** with data in
691 # **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
692 # the pid file is **dir**/osd.**id**.pid and the admin socket is
693 # **dir**/**id**/ceph-osd.**id**.asok.
695 # The remaining arguments are passed verbatim to ceph-osd.
697 # Two mandatory arguments must be provided: --fsid and --mon-host
698 # Instead of adding them to every call to activate_osd, they can be
699 # set in the CEPH_ARGS environment variable to be read implicitly
700 # by every ceph command.
702 # The CEPH_CONF variable is expected to be set to /dev/null to
703 # only rely on arguments for configuration.
705 # The activate_osd function expects a valid OSD data directory
706 # in **dir**/**id**, either just created via run_osd or re-using
707 # one left by a previous run of ceph-osd. The ceph-osd daemon is
708 # run indirectly via ceph-disk activate.
710 # The activate_osd function blocks until the monitor reports the osd
711 # up. If it fails to do so within $TIMEOUT seconds, activate_osd
716 # CEPH_ARGS="--fsid=$(uuidgen) "
717 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
718 # activate_osd $dir 0 # activate an osd using the monitor listening on 7018
720 # @param dir path name of the environment
721 # @param id osd identifier
722 # @param ... can be any option valid for ceph-osd
723 # @return 0 on success, 1 on error
725 function activate_osd
() {
730 local osd_data
=$dir/$id
733 ceph_disk_args
+=" --statedir=$dir"
734 ceph_disk_args
+=" --sysconfdir=$dir"
735 ceph_disk_args
+=" --prepend-to-path="
737 local ceph_args
="$CEPH_ARGS"
738 ceph_args
+=" --osd-failsafe-full-ratio=.99"
739 ceph_args
+=" --osd-journal-size=100"
740 ceph_args
+=" --osd-scrub-load-threshold=2000"
741 ceph_args
+=" --osd-data=$osd_data"
742 ceph_args
+=" --chdir="
743 ceph_args
+=$EXTRA_OPTS
744 ceph_args
+=" --run-dir=$dir"
745 ceph_args
+=" --admin-socket=$(get_asok_path)"
746 ceph_args
+=" --debug-osd=20"
747 ceph_args
+=" --log-file=$dir/\$name.log"
748 ceph_args
+=" --pid-file=$dir/\$name.pid"
749 ceph_args
+=" --osd-max-object-name-len 460"
750 ceph_args
+=" --osd-max-object-namespace-len 64"
751 ceph_args
+=" --enable-experimental-unrecoverable-data-corrupting-features *"
755 CEPH_ARGS
="$ceph_args " ceph-disk
$ceph_disk_args \
758 $osd_data ||
return 1
760 [ "$id" = "$(cat $osd_data/whoami)" ] ||
return 1
762 wait_for_osd up
$id ||
return 1
765 function test_activate_osd
() {
768 setup
$dir ||
return 1
770 run_mon
$dir a ||
return 1
771 run_mgr
$dir x ||
return 1
773 run_osd
$dir 0 ||
return 1
774 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
775 config get osd_max_backfills
)
776 echo "$backfills" |
grep --quiet 'osd_max_backfills' ||
return 1
778 kill_daemons
$dir TERM osd ||
return 1
780 activate_osd
$dir 0 --osd-max-backfills 20 ||
return 1
781 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
782 config get osd_max_backfills
)
783 test "$backfills" = '{"osd_max_backfills":"20"}' ||
return 1
785 teardown
$dir ||
return 1
788 #######################################################################
791 # Wait until the OSD **id** is either up or down, as specified by
792 # **state**. It fails after $TIMEOUT seconds.
794 # @param state either up or down
795 # @param id osd identifier
796 # @return 0 on success, 1 on error
798 function wait_for_osd
() {
803 for ((i
=0; i
< $TIMEOUT; i
++)); do
805 if ! ceph osd dump |
grep "osd.$id $state"; then
815 function test_wait_for_osd
() {
817 setup
$dir ||
return 1
818 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
819 run_mgr
$dir x ||
return 1
820 run_osd
$dir 0 ||
return 1
821 wait_for_osd up
0 ||
return 1
822 kill_daemons
$dir TERM osd ||
return 1
823 wait_for_osd down
0 ||
return 1
824 ( TIMEOUT
=1 ; ! wait_for_osd up
0 ) ||
return 1
825 teardown
$dir ||
return 1
828 #######################################################################
831 # Display the list of OSD ids supporting the **objectname** stored in
832 # **poolname**, as reported by ceph osd map.
834 # @param poolname an existing pool
835 # @param objectname an objectname (may or may not exist)
836 # @param STDOUT white space separated list of OSD ids
837 # @return 0 on success, 1 on error
839 function get_osds
() {
843 local osds
=$
(ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
845 # get rid of the trailing space
849 function test_get_osds
() {
852 setup
$dir ||
return 1
853 run_mon
$dir a
--osd_pool_default_size=2 ||
return 1
854 run_mgr
$dir x ||
return 1
855 run_osd
$dir 0 ||
return 1
856 run_osd
$dir 1 ||
return 1
857 create_rbd_pool ||
return 1
858 wait_for_clean ||
return 1
859 create_rbd_pool ||
return 1
860 get_osds rbd GROUP |
grep --quiet '^[0-1] [0-1]$' ||
return 1
861 teardown
$dir ||
return 1
864 #######################################################################
867 # Wait for the monitor to form quorum (optionally, of size N)
869 # @param timeout duration (lower-bound) to wait for quorum to be formed
870 # @param quorumsize size of quorum to wait for
871 # @return 0 on success, 1 on error
873 function wait_for_quorum
() {
877 if [[ -z "$timeout" ]]; then
881 if [[ -z "$quorumsize" ]]; then
882 timeout
$timeout ceph mon_status
--format=json
>&/dev
/null ||
return 1
887 wait_until
=$
((`date +%s` + $timeout))
888 while [[ $
(date +%s
) -lt $wait_until ]]; do
889 jqfilter
='.quorum | length == '$quorumsize
890 jqinput
="$(timeout $timeout ceph mon_status --format=json 2>/dev/null)"
891 res
=$
(echo $jqinput | jq
"$jqfilter")
892 if [[ "$res" == "true" ]]; then
900 #######################################################################
903 # Return the PG of supporting the **objectname** stored in
904 # **poolname**, as reported by ceph osd map.
906 # @param poolname an existing pool
907 # @param objectname an objectname (may or may not exist)
909 # @return 0 on success, 1 on error
915 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | jq
-r '.pgid'
918 function test_get_pg
() {
921 setup
$dir ||
return 1
922 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
923 run_mgr
$dir x ||
return 1
924 run_osd
$dir 0 ||
return 1
925 create_rbd_pool ||
return 1
926 wait_for_clean ||
return 1
927 get_pg rbd GROUP |
grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' ||
return 1
928 teardown
$dir ||
return 1
931 #######################################################################
934 # Return the value of the **config**, obtained via the config get command
935 # of the admin socket of **daemon**.**id**.
937 # @param daemon mon or osd
938 # @param id mon or osd ID
939 # @param config the configuration variable name as found in config_opts.h
940 # @param STDOUT the config value
941 # @return 0 on success, 1 on error
943 function get_config
() {
949 ceph
--format json daemon $
(get_asok_path
$daemon.
$id) \
950 config get
$config 2> /dev
/null | \
954 function test_get_config
() {
957 # override the default config using command line arg and check it
958 setup
$dir ||
return 1
959 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
960 test $
(get_config mon a osd_pool_default_size
) = 1 ||
return 1
961 run_mgr
$dir x ||
return 1
962 run_osd
$dir 0 --osd_max_scrubs=3 ||
return 1
963 test $
(get_config osd
0 osd_max_scrubs
) = 3 ||
return 1
964 teardown
$dir ||
return 1
967 #######################################################################
970 # Set the **config** to specified **value**, via the config set command
971 # of the admin socket of **daemon**.**id**
973 # @param daemon mon or osd
974 # @param id mon or osd ID
975 # @param config the configuration variable name as found in config_opts.h
976 # @param value the config value
977 # @return 0 on success, 1 on error
979 function set_config
() {
985 test $
(env CEPH_ARGS
='' ceph
--format json daemon $
(get_asok_path
$daemon.
$id) \
986 config
set $config $value 2> /dev
/null | \
987 jq
'has("success")') == true
990 function test_set_config
() {
993 setup
$dir ||
return 1
994 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
995 test $
(get_config mon a ms_crc_header
) = true ||
return 1
996 set_config mon a ms_crc_header false ||
return 1
997 test $
(get_config mon a ms_crc_header
) = false ||
return 1
998 set_config mon a ms_crc_header true ||
return 1
999 test $
(get_config mon a ms_crc_header
) = true ||
return 1
1000 teardown
$dir ||
return 1
1003 #######################################################################
1006 # Return the OSD id of the primary OSD supporting the **objectname**
1007 # stored in **poolname**, as reported by ceph osd map.
1009 # @param poolname an existing pool
1010 # @param objectname an objectname (may or may not exist)
1011 # @param STDOUT the primary OSD id
1012 # @return 0 on success, 1 on error
1014 function get_primary
() {
1018 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
1019 jq
'.acting_primary'
1022 function test_get_primary
() {
1025 setup
$dir ||
return 1
1026 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1028 run_mgr
$dir x ||
return 1
1029 run_osd
$dir $osd ||
return 1
1030 create_rbd_pool ||
return 1
1031 wait_for_clean ||
return 1
1032 test $
(get_primary rbd GROUP
) = $osd ||
return 1
1033 teardown
$dir ||
return 1
1036 #######################################################################
1039 # Return the id of any OSD supporting the **objectname** stored in
1040 # **poolname**, as reported by ceph osd map, except the primary.
1042 # @param poolname an existing pool
1043 # @param objectname an objectname (may or may not exist)
1044 # @param STDOUT the OSD id
1045 # @return 0 on success, 1 on error
1047 function get_not_primary
() {
1051 local primary
=$
(get_primary
$poolname $objectname)
1052 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
1053 jq
".acting | map(select (. != $primary)) | .[0]"
1056 function test_get_not_primary
() {
1059 setup
$dir ||
return 1
1060 run_mon
$dir a
--osd_pool_default_size=2 ||
return 1
1061 run_mgr
$dir x ||
return 1
1062 run_osd
$dir 0 ||
return 1
1063 run_osd
$dir 1 ||
return 1
1064 create_rbd_pool ||
return 1
1065 wait_for_clean ||
return 1
1066 local primary
=$
(get_primary rbd GROUP
)
1067 local not_primary
=$
(get_not_primary rbd GROUP
)
1068 test $not_primary != $primary ||
return 1
1069 test $not_primary = 0 -o $not_primary = 1 ||
return 1
1070 teardown
$dir ||
return 1
1073 #######################################################################
1076 # Run ceph-objectstore-tool against the OSD **id** using the data path
1077 # **dir**. The OSD is killed with TERM prior to running
1078 # ceph-objectstore-tool because access to the data path is
1079 # exclusive. The OSD is restarted after the command completes. The
1080 # objectstore_tool returns after all PG are active+clean again.
1082 # @param dir the data path of the OSD
1083 # @param id the OSD id
1084 # @param ... arguments to ceph-objectstore-tool
1085 # @param STDIN the input of ceph-objectstore-tool
1086 # @param STDOUT the output of ceph-objectstore-tool
1087 # @return 0 on success, 1 on error
1089 # The value of $ceph_osd_args will be passed to restarted osds
1091 function objectstore_tool
() {
1096 local osd_data
=$dir/$id
1098 local osd_type
=$
(cat $osd_data/type)
1100 kill_daemons
$dir TERM osd.
$id >&2 < /dev
/null ||
return 1
1103 if [ "$objectstore_type" == "filestore" ]; then
1104 journal_args
=" --journal-path $osd_data/journal"
1106 ceph-objectstore-tool \
1107 --data-path $osd_data \
1110 activate_osd
$dir $id $ceph_osd_args >&2 ||
return 1
1114 function test_objectstore_tool
() {
1117 setup
$dir ||
return 1
1118 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1120 run_mgr
$dir x ||
return 1
1121 run_osd
$dir $osd ||
return 1
1122 create_rbd_pool ||
return 1
1123 wait_for_clean ||
return 1
1124 rados
--pool rbd put GROUP
/etc
/group ||
return 1
1125 objectstore_tool
$dir $osd GROUP get-bytes | \
1127 ! objectstore_tool
$dir $osd NOTEXISTS get-bytes ||
return 1
1128 teardown
$dir ||
return 1
1131 #######################################################################
1134 # Predicate checking if there is an ongoing recovery in the
1135 # cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1136 # counters are reported by ceph status, it means recovery is in
1139 # @return 0 if recovery in progress, 1 otherwise
1141 function get_is_making_recovery_progress
() {
1142 local recovery_progress
1143 recovery_progress
+=".recovering_keys_per_sec + "
1144 recovery_progress
+=".recovering_bytes_per_sec + "
1145 recovery_progress
+=".recovering_objects_per_sec"
1146 local progress
=$
(ceph
--format json status
2>/dev
/null | \
1147 jq
-r ".pgmap | $recovery_progress")
1148 test "$progress" != null
1151 function test_get_is_making_recovery_progress
() {
1154 setup
$dir ||
return 1
1155 run_mon
$dir a ||
return 1
1156 run_mgr
$dir x ||
return 1
1157 ! get_is_making_recovery_progress ||
return 1
1158 teardown
$dir ||
return 1
1161 #######################################################################
1164 # Return the number of active PGs in the cluster. A PG is active if
1165 # ceph pg dump pgs reports it both **active** and **clean** and that
1168 # @param STDOUT the number of active PGs
1169 # @return 0 on success, 1 on error
1171 function get_num_active_clean
() {
1173 expression
+="select(contains(\"active\") and contains(\"clean\")) | "
1174 expression
+="select(contains(\"stale\") | not)"
1175 ceph
--format json pg dump pgs
2>/dev
/null | \
1176 jq
"[.[] | .state | $expression] | length"
1179 function test_get_num_active_clean
() {
1182 setup
$dir ||
return 1
1183 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1184 run_mgr
$dir x ||
return 1
1185 run_osd
$dir 0 ||
return 1
1186 create_rbd_pool ||
return 1
1187 wait_for_clean ||
return 1
1188 local num_active_clean
=$
(get_num_active_clean
)
1189 test "$num_active_clean" = $PG_NUM ||
return 1
1190 teardown
$dir ||
return 1
1193 #######################################################################
1196 # Return the number of PGs in the cluster, according to
1199 # @param STDOUT the number of PGs
1200 # @return 0 on success, 1 on error
1202 function get_num_pgs
() {
1203 ceph
--format json status
2>/dev
/null | jq
'.pgmap.num_pgs'
1206 function test_get_num_pgs
() {
1209 setup
$dir ||
return 1
1210 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1211 run_mgr
$dir x ||
return 1
1212 run_osd
$dir 0 ||
return 1
1213 create_rbd_pool ||
return 1
1214 wait_for_clean ||
return 1
1215 local num_pgs
=$
(get_num_pgs
)
1216 test "$num_pgs" -gt 0 ||
return 1
1217 teardown
$dir ||
return 1
1220 #######################################################################
1223 # Return the OSD ids in use by at least one PG in the cluster (either
1224 # in the up or the acting set), according to ceph pg dump pgs. Every
1225 # OSD id shows as many times as they are used in up and acting sets.
1226 # If an OSD id is in both the up and acting set of a given PG, it will
1229 # @param STDOUT a sorted list of OSD ids
1230 # @return 0 on success, 1 on error
1232 function get_osd_id_used_by_pgs
() {
1233 ceph
--format json pg dump pgs
2>/dev
/null | jq
'.[] | .up[], .acting[]' |
sort
1236 function test_get_osd_id_used_by_pgs
() {
1239 setup
$dir ||
return 1
1240 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1241 run_mgr
$dir x ||
return 1
1242 run_osd
$dir 0 ||
return 1
1243 create_rbd_pool ||
return 1
1244 wait_for_clean ||
return 1
1245 local osd_ids
=$
(get_osd_id_used_by_pgs |
uniq)
1246 test "$osd_ids" = "0" ||
return 1
1247 teardown
$dir ||
return 1
1250 #######################################################################
1253 # Wait until the OSD **id** shows **count** times in the
1254 # PGs (see get_osd_id_used_by_pgs for more information about
1255 # how OSD ids are counted).
1257 # @param id the OSD id
1258 # @param count the number of time it must show in the PGs
1259 # @return 0 on success, 1 on error
1261 function wait_osd_id_used_by_pgs
() {
1266 for ((i
=0; i
< $TIMEOUT / 5; i
++)); do
1268 if ! test $
(get_osd_id_used_by_pgs |
grep -c $id) = $count ; then
1278 function test_wait_osd_id_used_by_pgs
() {
1281 setup
$dir ||
return 1
1282 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1283 run_mgr
$dir x ||
return 1
1284 run_osd
$dir 0 ||
return 1
1285 create_rbd_pool ||
return 1
1286 wait_for_clean ||
return 1
1287 wait_osd_id_used_by_pgs
0 8 ||
return 1
1288 ! TIMEOUT
=1 wait_osd_id_used_by_pgs
123 5 ||
return 1
1289 teardown
$dir ||
return 1
1292 #######################################################################
1295 # Return the date and time of the last completed scrub for **pgid**,
1296 # as reported by ceph pg dump pgs. Note that a repair also sets this
1299 # @param pgid the id of the PG
1300 # @param STDOUT the date and time of the last scrub
1301 # @return 0 on success, 1 on error
1303 function get_last_scrub_stamp
() {
1305 local sname
=${2:-last_scrub_stamp}
1306 ceph
--format json pg dump pgs
2>/dev
/null | \
1307 jq
-r ".[] | select(.pgid==\"$pgid\") | .$sname"
1310 function test_get_last_scrub_stamp
() {
1313 setup
$dir ||
return 1
1314 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1315 run_mgr
$dir x ||
return 1
1316 run_osd
$dir 0 ||
return 1
1317 create_rbd_pool ||
return 1
1318 wait_for_clean ||
return 1
1319 stamp
=$
(get_last_scrub_stamp
1.0)
1320 test -n "$stamp" ||
return 1
1321 teardown
$dir ||
return 1
1324 #######################################################################
1327 # Predicate checking if the cluster is clean, i.e. all of its PGs are
1328 # in a clean state (see get_num_active_clean for a definition).
1330 # @return 0 if the cluster is clean, 1 otherwise
1332 function is_clean
() {
1333 num_pgs
=$
(get_num_pgs
)
1334 test $num_pgs != 0 ||
return 1
1335 test $
(get_num_active_clean
) = $num_pgs ||
return 1
1338 function test_is_clean
() {
1341 setup
$dir ||
return 1
1342 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1343 run_mgr
$dir x ||
return 1
1344 run_osd
$dir 0 ||
return 1
1345 create_rbd_pool ||
return 1
1346 wait_for_clean ||
return 1
1347 is_clean ||
return 1
1348 teardown
$dir ||
return 1
1351 #######################################################################
1353 calc
() { awk "BEGIN{print $*}"; }
1356 # Return a list of numbers that are increasingly larger and whose
1357 # total is **timeout** seconds. It can be used to have short sleep
1358 # delay while waiting for an event on a fast machine. But if running
1359 # very slowly the larger delays avoid stressing the machine even
1360 # further or spamming the logs.
1362 # @param timeout sum of all delays, in seconds
1363 # @return a list of sleep delays
1365 function get_timeout_delays
() {
1366 local trace
=$
(shopt -q -o xtrace
&& echo true ||
echo false
)
1367 $trace && shopt -u -o xtrace
1369 local first_step
=${2:-1}
1374 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1375 echo -n "$(calc $i) "
1376 total
=$
(calc
$total + $i)
1379 if test "$(calc $total \< $timeout)" = "1"; then
1380 echo -n "$(calc $timeout - $total) "
1382 $trace && shopt -s -o xtrace
1385 function test_get_timeout_delays
() {
1386 test "$(get_timeout_delays 1)" = "1 " ||
return 1
1387 test "$(get_timeout_delays 5)" = "1 2 2 " ||
return 1
1388 test "$(get_timeout_delays 6)" = "1 2 3 " ||
return 1
1389 test "$(get_timeout_delays 7)" = "1 2 4 " ||
return 1
1390 test "$(get_timeout_delays 8)" = "1 2 4 1 " ||
return 1
1391 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " ||
return 1
1392 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " ||
return 1
1393 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " ||
return 1
1394 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " ||
return 1
1395 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " ||
return 1
1396 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " ||
return 1
1399 #######################################################################
1402 # Wait until the cluster becomes clean or if it does not make progress
1403 # for $TIMEOUT seconds.
1404 # Progress is measured either via the **get_is_making_recovery_progress**
1405 # predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1407 # @return 0 if the cluster is clean, 1 otherwise
1409 function wait_for_clean
() {
1410 local num_active_clean
=-1
1411 local cur_active_clean
1412 local -a delays
=($
(get_timeout_delays
$TIMEOUT .1))
1415 flush_pg_stats ||
return 1
1416 while test $
(get_num_pgs
) == 0 ; do
1421 # Comparing get_num_active_clean & get_num_pgs is used to determine
1422 # if the cluster is clean. That's almost an inline of is_clean() to
1423 # get more performance by avoiding multiple calls of get_num_active_clean.
1424 cur_active_clean
=$
(get_num_active_clean
)
1425 test $cur_active_clean = $
(get_num_pgs
) && break
1426 if test $cur_active_clean != $num_active_clean ; then
1428 num_active_clean
=$cur_active_clean
1429 elif get_is_making_recovery_progress
; then
1431 elif (( $loop >= ${#delays[*]} )) ; then
1435 sleep ${delays[$loop]}
1441 function test_wait_for_clean
() {
1444 setup
$dir ||
return 1
1445 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1446 run_mgr
$dir x ||
return 1
1447 create_rbd_pool ||
return 1
1448 ! TIMEOUT
=1 wait_for_clean ||
return 1
1449 run_osd
$dir 0 ||
return 1
1450 wait_for_clean ||
return 1
1451 teardown
$dir ||
return 1
1454 #######################################################################
1457 # Wait until the cluster has health condition passed as arg
1458 # again for $TIMEOUT seconds.
1460 # @param string to grep for in health detail
1461 # @return 0 if the cluster health matches request, 1 otherwise
1463 function wait_for_health
() {
1465 local -a delays
=($
(get_timeout_delays
$TIMEOUT .1))
1468 while ! ceph health detail |
grep "$grepstr" ; do
1469 if (( $loop >= ${#delays[*]} )) ; then
1473 sleep ${delays[$loop]}
1479 # Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1480 # for $TIMEOUT seconds.
1482 # @return 0 if the cluster is HEALTHY, 1 otherwise
1484 function wait_for_health_ok
() {
1485 wait_for_health
"HEALTH_OK" ||
return 1
1488 function test_wait_for_health_ok
() {
1491 setup
$dir ||
return 1
1492 run_mon
$dir a
--osd_pool_default_size=1 --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 ||
return 1
1493 run_mgr
$dir x
--mon_pg_warn_min_per_osd=0 ||
return 1
1494 run_osd
$dir 0 ||
return 1
1495 kill_daemons
$dir TERM osd ||
return 1
1496 ! TIMEOUT
=1 wait_for_health_ok ||
return 1
1497 activate_osd
$dir 0 ||
return 1
1498 wait_for_health_ok ||
return 1
1499 teardown
$dir ||
return 1
1503 #######################################################################
1506 # Run repair on **pgid** and wait until it completes. The repair
1507 # function will fail if repair does not complete within $TIMEOUT
1510 # @param pgid the id of the PG
1511 # @return 0 on success, 1 on error
1515 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1516 ceph pg repair
$pgid
1517 wait_for_scrub
$pgid "$last_scrub"
1520 function test_repair
() {
1523 setup
$dir ||
return 1
1524 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1525 run_mgr
$dir x ||
return 1
1526 run_osd
$dir 0 ||
return 1
1527 create_rbd_pool ||
return 1
1528 wait_for_clean ||
return 1
1529 repair
1.0 ||
return 1
1530 kill_daemons
$dir KILL osd ||
return 1
1531 ! TIMEOUT
=1 repair
1.0 ||
return 1
1532 teardown
$dir ||
return 1
1534 #######################################################################
1537 # Run scrub on **pgid** and wait until it completes. The pg_scrub
1538 # function will fail if repair does not complete within $TIMEOUT
1539 # seconds. The pg_scrub is complete whenever the
1540 # **get_last_scrub_stamp** function reports a timestamp different from
1541 # the one stored before starting the scrub.
1543 # @param pgid the id of the PG
1544 # @return 0 on success, 1 on error
1546 function pg_scrub
() {
1548 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1550 wait_for_scrub
$pgid "$last_scrub"
1553 function pg_deep_scrub
() {
1555 local last_scrub
=$
(get_last_scrub_stamp
$pgid last_deep_scrub_stamp
)
1556 ceph pg deep-scrub
$pgid
1557 wait_for_scrub
$pgid "$last_scrub" last_deep_scrub_stamp
1560 function test_pg_scrub
() {
1563 setup
$dir ||
return 1
1564 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1565 run_mgr
$dir x ||
return 1
1566 run_osd
$dir 0 ||
return 1
1567 create_rbd_pool ||
return 1
1568 wait_for_clean ||
return 1
1569 pg_scrub
1.0 ||
return 1
1570 kill_daemons
$dir KILL osd ||
return 1
1571 ! TIMEOUT
=1 pg_scrub
1.0 ||
return 1
1572 teardown
$dir ||
return 1
1575 #######################################################################
1578 # Run the *command* and expect it to fail (i.e. return a non zero status).
1579 # The output (stderr and stdout) is stored in a temporary file in *dir*
1580 # and is expected to contain the string *expected*.
1582 # Return 0 if the command failed and the string was found. Otherwise
1583 # return 1 and cat the full output of the command on stderr for debug.
1585 # @param dir temporary directory to store the output
1586 # @param expected string to look for in the output
1587 # @param command ... the command and its arguments
1588 # @return 0 on success, 1 on error
1591 function expect_failure
() {
1598 if "$@" > $dir/out
2>&1 ; then
1604 if $success ||
! grep --quiet "$expected" $dir/out
; then
1612 function test_expect_failure
() {
1615 setup
$dir ||
return 1
1616 expect_failure
$dir FAIL bash
-c 'echo FAIL ; exit 1' ||
return 1
1617 # the command did not fail
1618 ! expect_failure
$dir FAIL bash
-c 'echo FAIL ; exit 0' > $dir/out ||
return 1
1619 grep --quiet FAIL
$dir/out ||
return 1
1620 # the command failed but the output does not contain the expected string
1621 ! expect_failure
$dir FAIL bash
-c 'echo UNEXPECTED ; exit 1' > $dir/out ||
return 1
1622 ! grep --quiet FAIL
$dir/out ||
return 1
1623 teardown
$dir ||
return 1
1626 #######################################################################
1629 # Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1630 # will fail if scrub does not complete within $TIMEOUT seconds. The
1631 # repair is complete whenever the **get_last_scrub_stamp** function
1632 # reports a timestamp different from the one given in argument.
1634 # @param pgid the id of the PG
1635 # @param last_scrub timestamp of the last scrub for *pgid*
1636 # @return 0 on success, 1 on error
1638 function wait_for_scrub
() {
1640 local last_scrub
="$2"
1641 local sname
=${3:-last_scrub_stamp}
1643 for ((i
=0; i
< $TIMEOUT; i
++)); do
1644 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
1652 function test_wait_for_scrub
() {
1655 setup
$dir ||
return 1
1656 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1657 run_mgr
$dir x ||
return 1
1658 run_osd
$dir 0 ||
return 1
1659 create_rbd_pool ||
return 1
1660 wait_for_clean ||
return 1
1662 ceph pg repair
$pgid
1663 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1664 wait_for_scrub
$pgid "$last_scrub" ||
return 1
1665 kill_daemons
$dir KILL osd ||
return 1
1666 last_scrub
=$
(get_last_scrub_stamp
$pgid)
1667 ! TIMEOUT
=1 wait_for_scrub
$pgid "$last_scrub" ||
return 1
1668 teardown
$dir ||
return 1
1671 #######################################################################
1674 # Return 0 if the erasure code *plugin* is available, 1 otherwise.
1676 # @param plugin erasure code plugin
1677 # @return 0 on success, 1 on error
1680 function erasure_code_plugin_exists
() {
1686 FreeBSD
) grepstr
="Cannot open.*$plugin" ;;
1687 *) grepstr
="$plugin.*No such file" ;;
1690 s
=$
(ceph osd erasure-code-profile
set TESTPROFILE plugin
=$plugin 2>&1)
1692 if [ $status -eq 0 ]; then
1693 ceph osd erasure-code-profile
rm TESTPROFILE
1694 elif ! echo $s |
grep --quiet "$grepstr" ; then
1696 # display why the string was rejected.
1702 function test_erasure_code_plugin_exists
() {
1705 setup
$dir ||
return 1
1706 run_mon
$dir a ||
return 1
1707 run_mgr
$dir x ||
return 1
1708 erasure_code_plugin_exists jerasure ||
return 1
1709 ! erasure_code_plugin_exists FAKE ||
return 1
1710 teardown
$dir ||
return 1
1713 #######################################################################
1716 # Display all log files from **dir** on stdout.
1718 # @param dir directory in which all data is stored
1721 function display_logs
() {
1724 find $dir -maxdepth 1 -name '*.log' | \
1725 while read file ; do
1726 echo "======================= $file"
1731 function test_display_logs
() {
1734 setup
$dir ||
return 1
1735 run_mon
$dir a ||
return 1
1736 kill_daemons
$dir ||
return 1
1737 display_logs
$dir > $dir/log.out
1738 grep --quiet mon.a.log
$dir/log.out ||
return 1
1739 teardown
$dir ||
return 1
1742 #######################################################################
1744 # Spawn a command in background and save the pid in the variable name
1745 # passed in argument. To make the output reading easier, the output is
1746 # prepend with the process id.
1750 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1752 # @param pid_variable the variable name (not value) where the pids will be stored
1753 # @param ... the command to execute
1754 # @return only the pid_variable output should be considered and used with **wait_background**
1756 function run_in_background
() {
1757 local pid_variable
=$1
1759 # Execute the command and prepend the output with its pid
1760 # We enforce to return the exit status of the command and not the awk one.
1761 ("$@" |
& sed 's/^/'$$
': /'; return "${PIPESTATUS[0]}") >&2 &
1762 eval "$pid_variable+=\" $!\""
1765 function save_stdout
{
1771 function test_run_in_background
() {
1773 run_in_background pids
sleep 1
1774 run_in_background pids
sleep 1
1775 test $
(echo $pids |
wc -w) = 2 ||
return 1
1776 wait $pids ||
return 1
1779 #######################################################################
1781 # Wait for pids running in background to complete.
1782 # This function is usually used after a **run_in_background** call
1785 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1786 # wait_background pids1
1788 # @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
1789 # @return returns 1 if at least one process exits in error unless returns 0
1791 function wait_background
() {
1792 # We extract the PIDS from the variable name
1796 for pid
in $pids; do
1797 if ! wait $pid; then
1798 # If one process failed then return 1
1803 # We empty the variable reporting that all process ended
1810 function test_wait_background
() {
1812 run_in_background pids bash
-c "sleep 1; exit 1"
1813 run_in_background pids bash
-c "sleep 2; exit 0"
1814 wait_background pids
1815 if [ $?
-ne 1 ]; then return 1; fi
1817 run_in_background pids bash
-c "sleep 1; exit 0"
1818 run_in_background pids bash
-c "sleep 2; exit 0"
1819 wait_background pids
1820 if [ $?
-ne 0 ]; then return 1; fi
1822 if [ ! -z "$pids" ]; then return 1; fi
1825 function flush_pg_stats
()
1827 local timeout
=${1:-$TIMEOUT}
1832 seq=`ceph tell osd.$osd flush_pg_stats`
1833 seqs
="$seqs $osd-$seq"
1837 osd
=`echo $s | cut -d - -f 1`
1838 seq=`echo $s | cut -d - -f 2`
1839 echo "waiting osd.$osd seq $seq"
1840 while test $
(ceph osd last-stat-seq
$osd) -lt $seq; do
1842 if [ $
((timeout--
)) -eq 0 ]; then
1849 function test_flush_pg_stats
()
1853 setup
$dir ||
return 1
1854 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1855 run_mgr
$dir x ||
return 1
1856 run_osd
$dir 0 ||
return 1
1857 create_rbd_pool ||
return 1
1858 rados
-p rbd put obj
/etc
/group
1859 flush_pg_stats ||
return 1
1860 local jq_filter
='.pools | .[] | select(.name == "rbd") | .stats'
1861 raw_bytes_used
=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
1862 bytes_used
=`ceph df detail --format=json | jq "$jq_filter.bytes_used"`
1863 test $raw_bytes_used > 0 ||
return 1
1864 test $raw_bytes_used == $bytes_used ||
return 1
1868 #######################################################################
1871 # Call the **run** function (which must be defined by the caller) with
1872 # the **dir** argument followed by the caller argument list.
1874 # If the **run** function returns on error, all logs found in **dir**
1875 # are displayed for diagnostic purposes.
1877 # **teardown** function is called when the **run** function returns
1878 # (on success or on error), to cleanup leftovers. The CEPH_CONF is set
1879 # to /dev/null and CEPH_ARGS is unset so that the tests are protected from
1880 # external interferences.
1882 # It is the responsibility of the **run** function to call the
1883 # **setup** function to prepare the test environment (create a temporary
1886 # The shell is required (via PS4) to display the function and line
1887 # number whenever a statement is executed to help debugging.
1889 # @param dir directory in which all data is stored
1890 # @param ... arguments passed transparently to **run**
1891 # @return 0 on success, 1 on error
1898 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1900 export PATH
=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv
/bin
:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv
/bin
:.
:$PATH # make sure program from sources are preferred
1901 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1903 export CEPH_CONF
=/dev
/null
1907 if run
$dir "$@" ; then
1912 teardown
$dir $code ||
return 1
1916 #######################################################################
1918 function run_tests
() {
1920 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1922 export PATH
=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv
/bin
:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv
/bin
:.
:$PATH # make sure program from sources are preferred
1923 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1925 export CEPH_MON
="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
1927 CEPH_ARGS
+=" --fsid=$(uuidgen) --auth-supported=none "
1928 CEPH_ARGS
+="--mon-host=$CEPH_MON "
1929 export CEPH_CONF
=/dev
/null
1931 local funcs
=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
1932 local dir
=td
/ceph-helpers
1934 for func
in $funcs ; do
1935 if ! $func $dir; then
1942 if test "$1" = TESTS
; then
1949 # jq only support --exit-status|-e from version 1.4 forwards, which makes
1950 # returning on error waaaay prettier and straightforward.
1951 # However, the current automated upstream build is running with v1.3,
1952 # which has no idea what -e is. Hence the convoluted error checking we
1954 # The next time someone changes this code, please check if v1.4 is now
1955 # a thing, and, if so, please change these to use -e. Thanks.
1957 # jq '.all.supported | select([.[] == "foo"] | any)'
1958 function jq_success
() {
1963 in_escaped
=$
(printf %s
"$input" |
sed "s/'/'\\\\''/g")
1964 filter_escaped
=$
(printf %s
"$filter" |
sed "s/'/'\\\\''/g")
1966 ret
=$
(echo "$in_escaped" | jq
"$filter_escaped")
1967 if [[ "$ret" == "true" ]]; then
1969 elif [[ -n "$expects" ]]; then
1970 if [[ "$ret" == "$expects" ]]; then
1979 ret
="$(echo $input | jq \"$filter\")"
1980 if [[ "$ret" == "true" ]]; then
1982 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
1988 function inject_eio
() {
2002 local -a initial_osds
=($
(get_osds
$poolname $objname))
2003 local osd_id
=${initial_osds[$shard_id]}
2004 if [ "$pooltype" != "ec" ]; then
2007 set_config osd
$osd_id filestore_debug_inject_read_err true ||
return 1
2009 while ( CEPH_ARGS
='' ceph
--admin-daemon $
(get_asok_path osd.
$osd_id) \
2010 inject
${which}err
$poolname $objname $shard_id |
grep -q Invalid
); do
2011 loop
=$
(expr $loop + 1)
2012 if [ $loop = "10" ]; then
2019 function multidiff
() {
2021 if [ "$DIFFCOLOPTS" = "" ]; then
2024 diff $DIFFCOLOPTS $@
2029 # compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"