3 # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5 # Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
7 # Author: Loic Dachary <loic@dachary.org>
8 # Author: Federico Gimenez <fgimenez@coit.es>
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU Library Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Library Public License for more details.
22 : ${CEPH_BUILD_VIRTUALENV:=/tmp}
24 if type xmlstarlet
> /dev
/null
2>&1; then
26 elif type xml
> /dev
/null
2>&1; then
29 echo "Missing xmlstarlet binary!"
33 if [ `uname` = FreeBSD
]; then
36 KERNCORE
="kern.corefile"
39 termwidth
=$
(stty
-a |
head -1 |
sed -e 's/.*columns \([0-9]*\).*/\1/')
40 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
41 termwidth
="-W ${termwidth}"
43 DIFFCOLOPTS
="-y $termwidth"
44 KERNCORE
="kernel.core_pattern"
48 if [ -n "$CEPH_LIB" ]; then
49 EXTRA_OPTS
+=" --erasure-code-dir $CEPH_LIB"
50 EXTRA_OPTS
+=" --plugin-dir $CEPH_LIB"
51 EXTRA_OPTS
+=" --osd-class-dir $CEPH_LIB"
54 #! @file ceph-helpers.sh
55 # @brief Toolbox to manage Ceph cluster dedicated to testing
59 # ~~~~~~~~~~~~~~~~{.sh}
60 # source ceph-helpers.sh
63 # # cleanup leftovers and reset mydir
65 # # create a cluster with one monitor and three osds
70 # # put and get an object
71 # rados --pool rbd put GROUP /etc/group
72 # rados --pool rbd get GROUP /tmp/GROUP
73 # # stop the cluster and cleanup the directory
78 # The focus is on simplicity and efficiency, in the context of
79 # functional tests. The output is intentionally very verbose
80 # and functions return as soon as an error is found. The caller
81 # is also expected to abort on the first error so that debugging
82 # can be done by looking at the end of the output.
84 # Each function is documented, implemented and tested independently.
85 # When modifying a helper, the test and the documentation are
86 # expected to be updated and it is easier of they are collocated. A
87 # test for a given function can be run with
89 # ~~~~~~~~~~~~~~~~{.sh}
90 # ceph-helpers.sh TESTS test_get_osds
93 # and all the tests (i.e. all functions matching test_*) are run
96 # ~~~~~~~~~~~~~~~~{.sh}
97 # ceph-helpers.sh TESTS
100 # A test function takes a single argument : the directory dedicated
101 # to the tests. It is expected to not create any file outside of this
102 # directory and remove it entirely when it completes successfully.
106 function get_asok_dir
() {
107 if [ -n "$CEPH_ASOK_DIR" ]; then
108 echo "$CEPH_ASOK_DIR"
110 echo ${TMPDIR:-/tmp}/ceph-asok.$$
114 function get_asok_path
() {
116 if [ -n "$name" ]; then
117 echo $
(get_asok_dir
)/ceph-
$name.asok
119 echo $
(get_asok_dir
)/\
$cluster-\
$name.asok
123 # Cleanup any leftovers found in **dir** via **teardown**
124 # and reset **dir** as an empty environment.
126 # @param dir path name of the environment
127 # @return 0 on success, 1 on error
131 teardown
$dir ||
return 1
133 mkdir
-p $
(get_asok_dir
)
136 function test_setup
() {
138 setup
$dir ||
return 1
139 test -d $dir ||
return 1
140 setup
$dir ||
return 1
141 test -d $dir ||
return 1
145 #######################################################################
148 # Kill all daemons for which a .pid file exists in **dir** and remove
149 # **dir**. If the file system in which **dir** is btrfs, delete all
150 # subvolumes that relate to it.
152 # @param dir path name of the environment
153 # @return 0 on success, 1 on error
155 function teardown
() {
158 kill_daemons
$dir KILL
159 if [ `uname` != FreeBSD
] \
160 && [ $
(stat
-f -c '%T' .
) == "btrfs" ]; then
161 __teardown_btrfs
$dir
164 local pattern
="$(sysctl -n $KERNCORE)"
165 # See if we have apport core handling
166 if [ "${pattern:0:1}" = "|" ]; then
167 # TODO: Where can we get the dumps?
168 # Not sure where the dumps really are so this will look in the CWD
171 # Local we start with core and teuthology ends with core
172 if ls $
(dirname $pattern) |
grep -q '^core\|core$' ; then
174 if [ -n "$LOCALRUN" ]; then
175 mkdir
/tmp
/cores.$$
2> /dev
/null || true
176 for i
in $
(ls $
(dirname $
(sysctl
-n $KERNCORE)) |
grep '^core\|core$'); do
181 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
185 rm -rf $
(get_asok_dir
)
186 if [ "$cores" = "yes" ]; then
187 echo "ERROR: Failure due to cores found"
188 if [ -n "$LOCALRUN" ]; then
189 echo "Find saved core files in /tmp/cores.$$"
196 function __teardown_btrfs
() {
197 local btrfs_base_dir
=$1
198 local btrfs_root
=$
(df
-P . |
tail -1 |
awk '{print $NF}')
199 local btrfs_dirs
=$
(cd $btrfs_base_dir; sudo btrfs subvolume list .
-t |
awk '/^[0-9]/ {print $4}' |
grep "$btrfs_base_dir/$btrfs_dir")
200 for subvolume
in $btrfs_dirs; do
201 sudo btrfs subvolume delete
$btrfs_root/$subvolume
205 function test_teardown
() {
207 setup
$dir ||
return 1
208 teardown
$dir ||
return 1
209 ! test -d $dir ||
return 1
212 #######################################################################
215 # Sends a signal to a single daemon.
216 # This is a helper function for kill_daemons
218 # After the daemon is sent **signal**, its actual termination
219 # will be verified by sending it signal 0. If the daemon is
220 # still alive, kill_daemon will pause for a few seconds and
221 # try again. This will repeat for a fixed number of times
222 # before kill_daemon returns on failure. The list of
223 # sleep intervals can be specified as **delays** and defaults
226 # 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
228 # This sequence is designed to run first a very short sleep time (0.1)
229 # if the machine is fast enough and the daemon terminates in a fraction of a
230 # second. The increasing sleep numbers should give plenty of time for
231 # the daemon to die even on the slowest running machine. If a daemon
232 # takes more than a few minutes to stop (the sum of all sleep times),
233 # there probably is no point in waiting more and a number of things
234 # are likely to go wrong anyway: better give up and return on error.
236 # @param pid the process id to send a signal
237 # @param send_signal the signal to send
238 # @param delays sequence of sleep times before failure
240 function kill_daemon
() {
243 local delays
=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
245 for try
in $delays ; do
246 if kill -$send_signal $pid 2> /dev
/null
; then
258 function test_kill_daemon
() {
260 setup
$dir ||
return 1
261 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
262 run_mgr
$dir x ||
return 1
263 run_osd
$dir 0 ||
return 1
266 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
268 # sending signal 0 won't kill the daemon
269 # waiting just for one second instead of the default schedule
270 # allows us to quickly verify what happens when kill fails
271 # to stop the daemon (i.e. it must return false)
273 ! kill_daemon
$pidfile 0 1 ||
return 1
275 # killing just the osd and verify the mon still is responsive
277 kill_daemon
$pidfile TERM ||
return 1
280 ceph osd dump |
grep "osd.0 down" ||
return 1
283 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
287 kill_daemon
$pidfile TERM ||
return 1
291 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
293 # kill the mon and verify it cannot be reached
295 kill_daemon
$pidfile TERM ||
return 1
296 ! timeout
5 ceph status ||
return 1
299 teardown
$dir ||
return 1
303 # Kill all daemons for which a .pid file exists in **dir**. Each
304 # daemon is sent a **signal** and kill_daemons waits for it to exit
305 # during a few minutes. By default all daemons are killed. If a
306 # **name_prefix** is provided, only the daemons for which a pid
307 # file is found matching the prefix are killed. See run_osd and
308 # run_mon for more information about the name conventions for
311 # Send TERM to all daemons : kill_daemons $dir
312 # Send KILL to all daemons : kill_daemons $dir KILL
313 # Send KILL to all osds : kill_daemons $dir KILL osd
314 # Send KILL to osd 1 : kill_daemons $dir KILL osd.1
316 # If a daemon is sent the TERM signal and does not terminate
317 # within a few minutes, it will still be running even after
318 # kill_daemons returns.
320 # If all daemons are kill successfully the function returns 0
321 # if at least one daemon remains, this is treated as an
322 # error and the function return 1.
324 # @param dir path name of the environment
325 # @param signal name of the first signal (defaults to TERM)
326 # @param name_prefix only kill match daemons (defaults to all)
327 # @param delays sequence of sleep times before failure
328 # @return 0 on success, 1 on error
330 function kill_daemons
() {
331 local trace
=$
(shopt -q -o xtrace
&& echo true ||
echo false
)
332 $trace && shopt -u -o xtrace
334 local signal
=${2:-TERM}
335 local name_prefix
=$3 # optional, osd, mon, osd.1
336 local delays
=$4 #optional timing
340 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
341 run_in_background pids kill_daemon
$pidfile $signal $delays
347 $trace && shopt -s -o xtrace
351 function test_kill_daemons
() {
353 setup
$dir ||
return 1
354 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
355 run_mgr
$dir x ||
return 1
356 run_osd
$dir 0 ||
return 1
358 # sending signal 0 won't kill the daemon
359 # waiting just for one second instead of the default schedule
360 # allows us to quickly verify what happens when kill fails
361 # to stop the daemon (i.e. it must return false)
363 ! kill_daemons
$dir 0 osd
1 ||
return 1
365 # killing just the osd and verify the mon still is responsive
367 kill_daemons
$dir TERM osd ||
return 1
368 ceph osd dump |
grep "osd.0 down" ||
return 1
372 kill_daemons
$dir TERM mgr ||
return 1
374 # kill the mon and verify it cannot be reached
376 kill_daemons
$dir TERM ||
return 1
377 ! timeout
5 ceph status ||
return 1
378 teardown
$dir ||
return 1
381 #######################################################################
384 # Run a monitor by the name mon.**id** with data in **dir**/**id**.
385 # The logs can be found in **dir**/mon.**id**.log and the pid file
386 # is **dir**/mon.**id**.pid and the admin socket is
387 # **dir**/**id**/ceph-mon.**id**.asok.
389 # The remaining arguments are passed verbatim to ceph-mon --mkfs
390 # and the ceph-mon daemon.
392 # Two mandatory arguments must be provided: --fsid and --mon-host
393 # Instead of adding them to every call to run_mon, they can be
394 # set in the CEPH_ARGS environment variable to be read implicitly
395 # by every ceph command.
397 # The CEPH_CONF variable is expected to be set to /dev/null to
398 # only rely on arguments for configuration.
402 # CEPH_ARGS="--fsid=$(uuidgen) "
403 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
404 # run_mon $dir a # spawn a mon and bind port 7018
405 # run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
407 # If mon_initial_members is not set, the default rbd pool is deleted
408 # and replaced with a replicated pool with less placement groups to
409 # speed up initialization. If mon_initial_members is set, no attempt
410 # is made to recreate the rbd pool because it would hang forever,
411 # waiting for other mons to join.
413 # A **dir**/ceph.conf file is created but not meant to be used by any
414 # function. It is convenient for debugging a failure with:
416 # ceph --conf **dir**/ceph.conf -s
418 # @param dir path name of the environment
419 # @param id mon identifier
420 # @param ... can be any option valid for ceph-mon
421 # @return 0 on success, 1 on error
439 --mon-osd-full-ratio=.99 \
440 --mon-data-avail-crit=1 \
441 --mon-data-avail-warn=5 \
442 --paxos-propose-interval=0.1 \
443 --osd-crush-chooseleaf-type=0 \
450 --log-file=$dir/\
$name.log \
451 --admin-socket=$
(get_asok_path
) \
452 --mon-cluster-log-file=$dir/log \
454 --pid-file=$dir/\
$name.pid \
455 --mon-allow-pool-delete \
456 --mon-osd-backfillfull-ratio .99 \
459 cat > $dir/ceph.conf
<<EOF
461 fsid = $(get_config mon $id fsid)
462 mon host = $(get_config mon $id mon_host)
466 function test_run_mon
() {
469 setup
$dir ||
return 1
471 run_mon
$dir a
--mon-initial-members=a ||
return 1
472 create_rbd_pool ||
return 1
473 # rbd has not been deleted / created, hence it has pool id 0
474 ceph osd dump |
grep "pool 1 'rbd'" ||
return 1
475 kill_daemons
$dir ||
return 1
477 run_mon
$dir a ||
return 1
478 create_rbd_pool ||
return 1
479 # rbd has been deleted / created, hence it does not have pool id 0
480 ! ceph osd dump |
grep "pool 1 'rbd'" ||
return 1
481 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
482 config get osd_pool_default_size
)
483 test "$size" = '{"osd_pool_default_size":"3"}' ||
return 1
485 ! CEPH_ARGS
='' ceph status ||
return 1
486 CEPH_ARGS
='' ceph
--conf $dir/ceph.conf status ||
return 1
488 kill_daemons
$dir ||
return 1
490 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
491 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
492 config get osd_pool_default_size
)
493 test "$size" = '{"osd_pool_default_size":"1"}' ||
return 1
494 kill_daemons
$dir ||
return 1
496 CEPH_ARGS
="$CEPH_ARGS --osd_pool_default_size=2" \
497 run_mon
$dir a ||
return 1
498 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
499 config get osd_pool_default_size
)
500 test "$size" = '{"osd_pool_default_size":"2"}' ||
return 1
501 kill_daemons
$dir ||
return 1
503 teardown
$dir ||
return 1
506 function create_rbd_pool
() {
507 ceph osd pool delete rbd rbd
--yes-i-really-really-mean-it ||
return 1
508 create_pool rbd
$PG_NUM ||
return 1
512 function create_pool
() {
513 ceph osd pool create
"$@"
517 function delete_pool
() {
519 ceph osd pool delete
$poolname $poolname --yes-i-really-really-mean-it
522 #######################################################################
535 --debug-objecter 20 \
540 --log-file=$dir/\
$name.log \
541 --admin-socket=$
(get_asok_path
) \
543 --pid-file=$dir/\
$name.pid \
547 #######################################################################
550 # Create (prepare) and run (activate) an osd by the name osd.**id**
551 # with data in **dir**/**id**. The logs can be found in
552 # **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
553 # the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
555 # The remaining arguments are passed verbatim to ceph-osd.
557 # Two mandatory arguments must be provided: --fsid and --mon-host
558 # Instead of adding them to every call to run_osd, they can be
559 # set in the CEPH_ARGS environment variable to be read implicitly
560 # by every ceph command.
562 # The CEPH_CONF variable is expected to be set to /dev/null to
563 # only rely on arguments for configuration.
565 # The run_osd function creates the OSD data directory with ceph-disk
566 # prepare on the **dir**/**id** directory and relies on the
567 # activate_osd function to run the daemon.
571 # CEPH_ARGS="--fsid=$(uuidgen) "
572 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
573 # run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
575 # @param dir path name of the environment
576 # @param id osd identifier
577 # @param ... can be any option valid for ceph-osd
578 # @return 0 on success, 1 on error
585 local osd_data
=$dir/$id
588 ceph_disk_args
+=" --statedir=$dir"
589 ceph_disk_args
+=" --sysconfdir=$dir"
590 ceph_disk_args
+=" --prepend-to-path="
593 ceph-disk
$ceph_disk_args \
594 prepare
--filestore $osd_data ||
return 1
596 activate_osd
$dir $id "$@"
599 function run_osd_bluestore
() {
604 local osd_data
=$dir/$id
607 ceph_disk_args
+=" --statedir=$dir"
608 ceph_disk_args
+=" --sysconfdir=$dir"
609 ceph_disk_args
+=" --prepend-to-path="
612 ceph-disk
$ceph_disk_args \
613 prepare
--bluestore $osd_data ||
return 1
615 activate_osd
$dir $id "$@"
618 function test_run_osd
() {
621 setup
$dir ||
return 1
623 run_mon
$dir a ||
return 1
624 run_mgr
$dir x ||
return 1
626 run_osd
$dir 0 ||
return 1
627 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
628 config get osd_max_backfills
)
629 echo "$backfills" |
grep --quiet 'osd_max_backfills' ||
return 1
631 run_osd
$dir 1 --osd-max-backfills 20 ||
return 1
632 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.1) \
633 config get osd_max_backfills
)
634 test "$backfills" = '{"osd_max_backfills":"20"}' ||
return 1
636 CEPH_ARGS
="$CEPH_ARGS --osd-max-backfills 30" run_osd
$dir 2 ||
return 1
637 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.2) \
638 config get osd_max_backfills
)
639 test "$backfills" = '{"osd_max_backfills":"30"}' ||
return 1
641 teardown
$dir ||
return 1
644 #######################################################################
647 # Shutdown and remove all traces of the osd by the name osd.**id**.
649 # The OSD is shutdown with the TERM signal. It is then removed from
650 # the auth list, crush map, osd map etc and the files associated with
651 # it are also removed.
653 # @param dir path name of the environment
654 # @param id osd identifier
655 # @return 0 on success, 1 on error
657 function destroy_osd
() {
661 ceph osd out osd.
$id ||
return 1
662 kill_daemons
$dir TERM osd.
$id ||
return 1
663 ceph osd purge osd.
$id --yes-i-really-mean-it ||
return 1
664 teardown
$dir/$id ||
return 1
668 function test_destroy_osd
() {
671 setup
$dir ||
return 1
672 run_mon
$dir a ||
return 1
673 run_mgr
$dir x ||
return 1
674 run_osd
$dir 0 ||
return 1
675 destroy_osd
$dir 0 ||
return 1
676 ! ceph osd dump |
grep "osd.$id " ||
return 1
677 teardown
$dir ||
return 1
680 #######################################################################
683 # Run (activate) an osd by the name osd.**id** with data in
684 # **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
685 # the pid file is **dir**/osd.**id**.pid and the admin socket is
686 # **dir**/**id**/ceph-osd.**id**.asok.
688 # The remaining arguments are passed verbatim to ceph-osd.
690 # Two mandatory arguments must be provided: --fsid and --mon-host
691 # Instead of adding them to every call to activate_osd, they can be
692 # set in the CEPH_ARGS environment variable to be read implicitly
693 # by every ceph command.
695 # The CEPH_CONF variable is expected to be set to /dev/null to
696 # only rely on arguments for configuration.
698 # The activate_osd function expects a valid OSD data directory
699 # in **dir**/**id**, either just created via run_osd or re-using
700 # one left by a previous run of ceph-osd. The ceph-osd daemon is
701 # run indirectly via ceph-disk activate.
703 # The activate_osd function blocks until the monitor reports the osd
704 # up. If it fails to do so within $TIMEOUT seconds, activate_osd
709 # CEPH_ARGS="--fsid=$(uuidgen) "
710 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
711 # activate_osd $dir 0 # activate an osd using the monitor listening on 7018
713 # @param dir path name of the environment
714 # @param id osd identifier
715 # @param ... can be any option valid for ceph-osd
716 # @return 0 on success, 1 on error
718 function activate_osd
() {
723 local osd_data
=$dir/$id
726 ceph_disk_args
+=" --statedir=$dir"
727 ceph_disk_args
+=" --sysconfdir=$dir"
728 ceph_disk_args
+=" --prepend-to-path="
730 local ceph_args
="$CEPH_ARGS"
731 ceph_args
+=" --osd-failsafe-full-ratio=.99"
732 ceph_args
+=" --osd-journal-size=100"
733 ceph_args
+=" --osd-scrub-load-threshold=2000"
734 ceph_args
+=" --osd-data=$osd_data"
735 ceph_args
+=" --chdir="
736 ceph_args
+=$EXTRA_OPTS
737 ceph_args
+=" --run-dir=$dir"
738 ceph_args
+=" --admin-socket=$(get_asok_path)"
739 ceph_args
+=" --debug-osd=20"
740 ceph_args
+=" --log-file=$dir/\$name.log"
741 ceph_args
+=" --pid-file=$dir/\$name.pid"
742 ceph_args
+=" --osd-max-object-name-len 460"
743 ceph_args
+=" --osd-max-object-namespace-len 64"
744 ceph_args
+=" --enable-experimental-unrecoverable-data-corrupting-features *"
748 CEPH_ARGS
="$ceph_args " ceph-disk
$ceph_disk_args \
751 $osd_data ||
return 1
753 [ "$id" = "$(cat $osd_data/whoami)" ] ||
return 1
755 wait_for_osd up
$id ||
return 1
758 function test_activate_osd
() {
761 setup
$dir ||
return 1
763 run_mon
$dir a ||
return 1
764 run_mgr
$dir x ||
return 1
766 run_osd
$dir 0 ||
return 1
767 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
768 config get osd_max_backfills
)
769 echo "$backfills" |
grep --quiet 'osd_max_backfills' ||
return 1
771 kill_daemons
$dir TERM osd ||
return 1
773 activate_osd
$dir 0 --osd-max-backfills 20 ||
return 1
774 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
775 config get osd_max_backfills
)
776 test "$backfills" = '{"osd_max_backfills":"20"}' ||
return 1
778 teardown
$dir ||
return 1
781 #######################################################################
784 # Wait until the OSD **id** is either up or down, as specified by
785 # **state**. It fails after $TIMEOUT seconds.
787 # @param state either up or down
788 # @param id osd identifier
789 # @return 0 on success, 1 on error
791 function wait_for_osd
() {
796 for ((i
=0; i
< $TIMEOUT; i
++)); do
798 if ! ceph osd dump |
grep "osd.$id $state"; then
808 function test_wait_for_osd
() {
810 setup
$dir ||
return 1
811 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
812 run_mgr
$dir x ||
return 1
813 run_osd
$dir 0 ||
return 1
814 wait_for_osd up
0 ||
return 1
815 kill_daemons
$dir TERM osd ||
return 1
816 wait_for_osd down
0 ||
return 1
817 ( TIMEOUT
=1 ; ! wait_for_osd up
0 ) ||
return 1
818 teardown
$dir ||
return 1
821 #######################################################################
824 # Display the list of OSD ids supporting the **objectname** stored in
825 # **poolname**, as reported by ceph osd map.
827 # @param poolname an existing pool
828 # @param objectname an objectname (may or may not exist)
829 # @param STDOUT white space separated list of OSD ids
830 # @return 0 on success, 1 on error
832 function get_osds
() {
836 local osds
=$
(ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
838 # get rid of the trailing space
842 function test_get_osds
() {
845 setup
$dir ||
return 1
846 run_mon
$dir a
--osd_pool_default_size=2 ||
return 1
847 run_mgr
$dir x ||
return 1
848 run_osd
$dir 0 ||
return 1
849 run_osd
$dir 1 ||
return 1
850 create_rbd_pool ||
return 1
851 wait_for_clean ||
return 1
852 create_rbd_pool ||
return 1
853 get_osds rbd GROUP |
grep --quiet '^[0-1] [0-1]$' ||
return 1
854 teardown
$dir ||
return 1
857 #######################################################################
860 # Wait for the monitor to form quorum (optionally, of size N)
862 # @param timeout duration (lower-bound) to wait for quorum to be formed
863 # @param quorumsize size of quorum to wait for
864 # @return 0 on success, 1 on error
866 function wait_for_quorum
() {
870 if [[ -z "$timeout" ]]; then
874 if [[ -z "$quorumsize" ]]; then
875 timeout
$timeout ceph mon_status
--format=json
>&/dev
/null ||
return 1
880 wait_until
=$
((`date +%s` + $timeout))
881 while [[ $
(date +%s
) -lt $wait_until ]]; do
882 jqfilter
='.quorum | length == '$quorumsize
883 jqinput
="$(timeout $timeout ceph mon_status --format=json 2>/dev/null)"
884 res
=$
(echo $jqinput | jq
"$jqfilter")
885 if [[ "$res" == "true" ]]; then
893 #######################################################################
896 # Return the PG of supporting the **objectname** stored in
897 # **poolname**, as reported by ceph osd map.
899 # @param poolname an existing pool
900 # @param objectname an objectname (may or may not exist)
902 # @return 0 on success, 1 on error
908 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | jq
-r '.pgid'
911 function test_get_pg
() {
914 setup
$dir ||
return 1
915 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
916 run_mgr
$dir x ||
return 1
917 run_osd
$dir 0 ||
return 1
918 create_rbd_pool ||
return 1
919 wait_for_clean ||
return 1
920 get_pg rbd GROUP |
grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' ||
return 1
921 teardown
$dir ||
return 1
924 #######################################################################
927 # Return the value of the **config**, obtained via the config get command
928 # of the admin socket of **daemon**.**id**.
930 # @param daemon mon or osd
931 # @param id mon or osd ID
932 # @param config the configuration variable name as found in config_opts.h
933 # @param STDOUT the config value
934 # @return 0 on success, 1 on error
936 function get_config
() {
942 ceph
--format json daemon $
(get_asok_path
$daemon.
$id) \
943 config get
$config 2> /dev
/null | \
947 function test_get_config
() {
950 # override the default config using command line arg and check it
951 setup
$dir ||
return 1
952 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
953 test $
(get_config mon a osd_pool_default_size
) = 1 ||
return 1
954 run_mgr
$dir x ||
return 1
955 run_osd
$dir 0 --osd_max_scrubs=3 ||
return 1
956 test $
(get_config osd
0 osd_max_scrubs
) = 3 ||
return 1
957 teardown
$dir ||
return 1
960 #######################################################################
963 # Set the **config** to specified **value**, via the config set command
964 # of the admin socket of **daemon**.**id**
966 # @param daemon mon or osd
967 # @param id mon or osd ID
968 # @param config the configuration variable name as found in config_opts.h
969 # @param value the config value
970 # @return 0 on success, 1 on error
972 function set_config
() {
978 test $
(env CEPH_ARGS
='' ceph
--format json daemon $
(get_asok_path
$daemon.
$id) \
979 config
set $config $value 2> /dev
/null | \
980 jq
'has("success")') == true
983 function test_set_config
() {
986 setup
$dir ||
return 1
987 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
988 test $
(get_config mon a ms_crc_header
) = true ||
return 1
989 set_config mon a ms_crc_header false ||
return 1
990 test $
(get_config mon a ms_crc_header
) = false ||
return 1
991 set_config mon a ms_crc_header true ||
return 1
992 test $
(get_config mon a ms_crc_header
) = true ||
return 1
993 teardown
$dir ||
return 1
996 #######################################################################
999 # Return the OSD id of the primary OSD supporting the **objectname**
1000 # stored in **poolname**, as reported by ceph osd map.
1002 # @param poolname an existing pool
1003 # @param objectname an objectname (may or may not exist)
1004 # @param STDOUT the primary OSD id
1005 # @return 0 on success, 1 on error
1007 function get_primary
() {
1011 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
1012 jq
'.acting_primary'
1015 function test_get_primary
() {
1018 setup
$dir ||
return 1
1019 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1021 run_mgr
$dir x ||
return 1
1022 run_osd
$dir $osd ||
return 1
1023 create_rbd_pool ||
return 1
1024 wait_for_clean ||
return 1
1025 test $
(get_primary rbd GROUP
) = $osd ||
return 1
1026 teardown
$dir ||
return 1
1029 #######################################################################
1032 # Return the id of any OSD supporting the **objectname** stored in
1033 # **poolname**, as reported by ceph osd map, except the primary.
1035 # @param poolname an existing pool
1036 # @param objectname an objectname (may or may not exist)
1037 # @param STDOUT the OSD id
1038 # @return 0 on success, 1 on error
1040 function get_not_primary
() {
1044 local primary
=$
(get_primary
$poolname $objectname)
1045 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
1046 jq
".acting | map(select (. != $primary)) | .[0]"
1049 function test_get_not_primary
() {
1052 setup
$dir ||
return 1
1053 run_mon
$dir a
--osd_pool_default_size=2 ||
return 1
1054 run_mgr
$dir x ||
return 1
1055 run_osd
$dir 0 ||
return 1
1056 run_osd
$dir 1 ||
return 1
1057 create_rbd_pool ||
return 1
1058 wait_for_clean ||
return 1
1059 local primary
=$
(get_primary rbd GROUP
)
1060 local not_primary
=$
(get_not_primary rbd GROUP
)
1061 test $not_primary != $primary ||
return 1
1062 test $not_primary = 0 -o $not_primary = 1 ||
return 1
1063 teardown
$dir ||
return 1
1066 #######################################################################
1069 # Run ceph-objectstore-tool against the OSD **id** using the data path
1070 # **dir**. The OSD is killed with TERM prior to running
1071 # ceph-objectstore-tool because access to the data path is
1072 # exclusive. The OSD is restarted after the command completes. The
1073 # objectstore_tool returns after all PG are active+clean again.
1075 # @param dir the data path of the OSD
1076 # @param id the OSD id
1077 # @param ... arguments to ceph-objectstore-tool
1078 # @param STDIN the input of ceph-objectstore-tool
1079 # @param STDOUT the output of ceph-objectstore-tool
1080 # @return 0 on success, 1 on error
1082 # The value of $ceph_osd_args will be passed to restarted osds
1084 function objectstore_tool
() {
1089 local osd_data
=$dir/$id
1091 local osd_type
=$
(cat $osd_data/type)
1093 kill_daemons
$dir TERM osd.
$id >&2 < /dev
/null ||
return 1
1096 if [ "$objectstore_type" == "filestore" ]; then
1097 journal_args
=" --journal-path $osd_data/journal"
1099 ceph-objectstore-tool \
1100 --data-path $osd_data \
1103 activate_osd
$dir $id $ceph_osd_args >&2 ||
return 1
1107 function test_objectstore_tool
() {
1110 setup
$dir ||
return 1
1111 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1113 run_mgr
$dir x ||
return 1
1114 run_osd
$dir $osd ||
return 1
1115 create_rbd_pool ||
return 1
1116 wait_for_clean ||
return 1
1117 rados
--pool rbd put GROUP
/etc
/group ||
return 1
1118 objectstore_tool
$dir $osd GROUP get-bytes | \
1120 ! objectstore_tool
$dir $osd NOTEXISTS get-bytes ||
return 1
1121 teardown
$dir ||
return 1
1124 #######################################################################
1127 # Predicate checking if there is an ongoing recovery in the
1128 # cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1129 # counters are reported by ceph status, it means recovery is in
1132 # @return 0 if recovery in progress, 1 otherwise
1134 function get_is_making_recovery_progress
() {
1135 local recovery_progress
1136 recovery_progress
+=".recovering_keys_per_sec + "
1137 recovery_progress
+=".recovering_bytes_per_sec + "
1138 recovery_progress
+=".recovering_objects_per_sec"
1139 local progress
=$
(ceph
--format json status
2>/dev
/null | \
1140 jq
-r ".pgmap | $recovery_progress")
1141 test "$progress" != null
1144 function test_get_is_making_recovery_progress
() {
1147 setup
$dir ||
return 1
1148 run_mon
$dir a ||
return 1
1149 run_mgr
$dir x ||
return 1
1150 ! get_is_making_recovery_progress ||
return 1
1151 teardown
$dir ||
return 1
1154 #######################################################################
1157 # Return the number of active PGs in the cluster. A PG is active if
1158 # ceph pg dump pgs reports it both **active** and **clean** and that
1161 # @param STDOUT the number of active PGs
1162 # @return 0 on success, 1 on error
1164 function get_num_active_clean
() {
1166 expression
+="select(contains(\"active\") and contains(\"clean\")) | "
1167 expression
+="select(contains(\"stale\") | not)"
1168 ceph
--format json pg dump pgs
2>/dev
/null | \
1169 jq
"[.[] | .state | $expression] | length"
1172 function test_get_num_active_clean
() {
1175 setup
$dir ||
return 1
1176 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1177 run_mgr
$dir x ||
return 1
1178 run_osd
$dir 0 ||
return 1
1179 create_rbd_pool ||
return 1
1180 wait_for_clean ||
return 1
1181 local num_active_clean
=$
(get_num_active_clean
)
1182 test "$num_active_clean" = $PG_NUM ||
return 1
1183 teardown
$dir ||
return 1
1186 #######################################################################
1189 # Return the number of PGs in the cluster, according to
1192 # @param STDOUT the number of PGs
1193 # @return 0 on success, 1 on error
1195 function get_num_pgs
() {
1196 ceph
--format json status
2>/dev
/null | jq
'.pgmap.num_pgs'
1199 function test_get_num_pgs
() {
1202 setup
$dir ||
return 1
1203 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1204 run_mgr
$dir x ||
return 1
1205 run_osd
$dir 0 ||
return 1
1206 create_rbd_pool ||
return 1
1207 wait_for_clean ||
return 1
1208 local num_pgs
=$
(get_num_pgs
)
1209 test "$num_pgs" -gt 0 ||
return 1
1210 teardown
$dir ||
return 1
1213 #######################################################################
1216 # Return the OSD ids in use by at least one PG in the cluster (either
1217 # in the up or the acting set), according to ceph pg dump pgs. Every
1218 # OSD id shows as many times as they are used in up and acting sets.
1219 # If an OSD id is in both the up and acting set of a given PG, it will
1222 # @param STDOUT a sorted list of OSD ids
1223 # @return 0 on success, 1 on error
1225 function get_osd_id_used_by_pgs
() {
1226 ceph
--format json pg dump pgs
2>/dev
/null | jq
'.[] | .up[], .acting[]' |
sort
1229 function test_get_osd_id_used_by_pgs
() {
1232 setup
$dir ||
return 1
1233 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1234 run_mgr
$dir x ||
return 1
1235 run_osd
$dir 0 ||
return 1
1236 create_rbd_pool ||
return 1
1237 wait_for_clean ||
return 1
1238 local osd_ids
=$
(get_osd_id_used_by_pgs |
uniq)
1239 test "$osd_ids" = "0" ||
return 1
1240 teardown
$dir ||
return 1
1243 #######################################################################
1246 # Wait until the OSD **id** shows **count** times in the
1247 # PGs (see get_osd_id_used_by_pgs for more information about
1248 # how OSD ids are counted).
1250 # @param id the OSD id
1251 # @param count the number of time it must show in the PGs
1252 # @return 0 on success, 1 on error
1254 function wait_osd_id_used_by_pgs
() {
1259 for ((i
=0; i
< $TIMEOUT / 5; i
++)); do
1261 if ! test $
(get_osd_id_used_by_pgs |
grep -c $id) = $count ; then
1271 function test_wait_osd_id_used_by_pgs
() {
1274 setup
$dir ||
return 1
1275 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1276 run_mgr
$dir x ||
return 1
1277 run_osd
$dir 0 ||
return 1
1278 create_rbd_pool ||
return 1
1279 wait_for_clean ||
return 1
1280 wait_osd_id_used_by_pgs
0 8 ||
return 1
1281 ! TIMEOUT
=1 wait_osd_id_used_by_pgs
123 5 ||
return 1
1282 teardown
$dir ||
return 1
1285 #######################################################################
1288 # Return the date and time of the last completed scrub for **pgid**,
1289 # as reported by ceph pg dump pgs. Note that a repair also sets this
1292 # @param pgid the id of the PG
1293 # @param STDOUT the date and time of the last scrub
1294 # @return 0 on success, 1 on error
1296 function get_last_scrub_stamp
() {
1298 local sname
=${2:-last_scrub_stamp}
1299 ceph
--format json pg dump pgs
2>/dev
/null | \
1300 jq
-r ".[] | select(.pgid==\"$pgid\") | .$sname"
1303 function test_get_last_scrub_stamp
() {
1306 setup
$dir ||
return 1
1307 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1308 run_mgr
$dir x ||
return 1
1309 run_osd
$dir 0 ||
return 1
1310 create_rbd_pool ||
return 1
1311 wait_for_clean ||
return 1
1312 stamp
=$
(get_last_scrub_stamp
1.0)
1313 test -n "$stamp" ||
return 1
1314 teardown
$dir ||
return 1
1317 #######################################################################
1320 # Predicate checking if the cluster is clean, i.e. all of its PGs are
1321 # in a clean state (see get_num_active_clean for a definition).
1323 # @return 0 if the cluster is clean, 1 otherwise
1325 function is_clean
() {
1326 num_pgs
=$
(get_num_pgs
)
1327 test $num_pgs != 0 ||
return 1
1328 test $
(get_num_active_clean
) = $num_pgs ||
return 1
1331 function test_is_clean
() {
1334 setup
$dir ||
return 1
1335 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1336 run_mgr
$dir x ||
return 1
1337 run_osd
$dir 0 ||
return 1
1338 create_rbd_pool ||
return 1
1339 wait_for_clean ||
return 1
1340 is_clean ||
return 1
1341 teardown
$dir ||
return 1
1344 #######################################################################
1346 calc
() { awk "BEGIN{print $*}"; }
1349 # Return a list of numbers that are increasingly larger and whose
1350 # total is **timeout** seconds. It can be used to have short sleep
1351 # delay while waiting for an event on a fast machine. But if running
1352 # very slowly the larger delays avoid stressing the machine even
1353 # further or spamming the logs.
1355 # @param timeout sum of all delays, in seconds
1356 # @return a list of sleep delays
1358 function get_timeout_delays
() {
1359 local trace
=$
(shopt -q -o xtrace
&& echo true ||
echo false
)
1360 $trace && shopt -u -o xtrace
1362 local first_step
=${2:-1}
1367 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1368 echo -n "$(calc $i) "
1369 total
=$
(calc
$total + $i)
1372 if test "$(calc $total \< $timeout)" = "1"; then
1373 echo -n "$(calc $timeout - $total) "
1375 $trace && shopt -s -o xtrace
1378 function test_get_timeout_delays
() {
1379 test "$(get_timeout_delays 1)" = "1 " ||
return 1
1380 test "$(get_timeout_delays 5)" = "1 2 2 " ||
return 1
1381 test "$(get_timeout_delays 6)" = "1 2 3 " ||
return 1
1382 test "$(get_timeout_delays 7)" = "1 2 4 " ||
return 1
1383 test "$(get_timeout_delays 8)" = "1 2 4 1 " ||
return 1
1384 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " ||
return 1
1385 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " ||
return 1
1386 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " ||
return 1
1387 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " ||
return 1
1388 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " ||
return 1
1389 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " ||
return 1
1392 #######################################################################
1395 # Wait until the cluster becomes clean or if it does not make progress
1396 # for $TIMEOUT seconds.
1397 # Progress is measured either via the **get_is_making_recovery_progress**
1398 # predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1400 # @return 0 if the cluster is clean, 1 otherwise
1402 function wait_for_clean
() {
1403 local num_active_clean
=-1
1404 local cur_active_clean
1405 local -a delays
=($
(get_timeout_delays
$TIMEOUT .1))
1408 flush_pg_stats ||
return 1
1409 while test $
(get_num_pgs
) == 0 ; do
1414 # Comparing get_num_active_clean & get_num_pgs is used to determine
1415 # if the cluster is clean. That's almost an inline of is_clean() to
1416 # get more performance by avoiding multiple calls of get_num_active_clean.
1417 cur_active_clean
=$
(get_num_active_clean
)
1418 test $cur_active_clean = $
(get_num_pgs
) && break
1419 if test $cur_active_clean != $num_active_clean ; then
1421 num_active_clean
=$cur_active_clean
1422 elif get_is_making_recovery_progress
; then
1424 elif (( $loop >= ${#delays[*]} )) ; then
1428 sleep ${delays[$loop]}
1434 function test_wait_for_clean
() {
1437 setup
$dir ||
return 1
1438 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1439 run_mgr
$dir x ||
return 1
1440 create_rbd_pool ||
return 1
1441 ! TIMEOUT
=1 wait_for_clean ||
return 1
1442 run_osd
$dir 0 ||
return 1
1443 wait_for_clean ||
return 1
1444 teardown
$dir ||
return 1
1447 #######################################################################
1450 # Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1451 # for $TIMEOUT seconds.
1453 # @return 0 if the cluster is HEALTHY, 1 otherwise
1455 function wait_for_health
() {
1457 local -a delays
=($
(get_timeout_delays
$TIMEOUT .1))
1460 while ! ceph health detail |
grep "$grepstr" ; do
1461 if (( $loop >= ${#delays[*]} )) ; then
1465 sleep ${delays[$loop]}
1470 function wait_for_health_ok
() {
1471 wait_for_health
"HEALTH_OK" ||
return 1
1474 function test_wait_for_health_ok
() {
1477 setup
$dir ||
return 1
1478 run_mon
$dir a
--osd_pool_default_size=1 --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 ||
return 1
1479 run_mgr
$dir x
--mon_pg_warn_min_per_osd=0 ||
return 1
1480 run_osd
$dir 0 ||
return 1
1481 kill_daemons
$dir TERM osd ||
return 1
1482 ! TIMEOUT
=1 wait_for_health_ok ||
return 1
1483 activate_osd
$dir 0 ||
return 1
1484 wait_for_health_ok ||
return 1
1485 teardown
$dir ||
return 1
1489 #######################################################################
1492 # Run repair on **pgid** and wait until it completes. The repair
1493 # function will fail if repair does not complete within $TIMEOUT
1496 # @param pgid the id of the PG
1497 # @return 0 on success, 1 on error
1501 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1502 ceph pg repair
$pgid
1503 wait_for_scrub
$pgid "$last_scrub"
1506 function test_repair
() {
1509 setup
$dir ||
return 1
1510 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1511 run_mgr
$dir x ||
return 1
1512 run_osd
$dir 0 ||
return 1
1513 create_rbd_pool ||
return 1
1514 wait_for_clean ||
return 1
1515 repair
1.0 ||
return 1
1516 kill_daemons
$dir KILL osd ||
return 1
1517 ! TIMEOUT
=1 repair
1.0 ||
return 1
1518 teardown
$dir ||
return 1
1520 #######################################################################
1523 # Run scrub on **pgid** and wait until it completes. The pg_scrub
1524 # function will fail if repair does not complete within $TIMEOUT
1525 # seconds. The pg_scrub is complete whenever the
1526 # **get_last_scrub_stamp** function reports a timestamp different from
1527 # the one stored before starting the scrub.
1529 # @param pgid the id of the PG
1530 # @return 0 on success, 1 on error
1532 function pg_scrub
() {
1534 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1536 wait_for_scrub
$pgid "$last_scrub"
1539 function pg_deep_scrub
() {
1541 local last_scrub
=$
(get_last_scrub_stamp
$pgid last_deep_scrub_stamp
)
1542 ceph pg deep-scrub
$pgid
1543 wait_for_scrub
$pgid "$last_scrub" last_deep_scrub_stamp
1546 function test_pg_scrub
() {
1549 setup
$dir ||
return 1
1550 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1551 run_mgr
$dir x ||
return 1
1552 run_osd
$dir 0 ||
return 1
1553 create_rbd_pool ||
return 1
1554 wait_for_clean ||
return 1
1555 pg_scrub
1.0 ||
return 1
1556 kill_daemons
$dir KILL osd ||
return 1
1557 ! TIMEOUT
=1 pg_scrub
1.0 ||
return 1
1558 teardown
$dir ||
return 1
1561 #######################################################################
1564 # Run the *command* and expect it to fail (i.e. return a non zero status).
1565 # The output (stderr and stdout) is stored in a temporary file in *dir*
1566 # and is expected to contain the string *expected*.
1568 # Return 0 if the command failed and the string was found. Otherwise
1569 # return 1 and cat the full output of the command on stderr for debug.
1571 # @param dir temporary directory to store the output
1572 # @param expected string to look for in the output
1573 # @param command ... the command and its arguments
1574 # @return 0 on success, 1 on error
1577 function expect_failure
() {
1584 if "$@" > $dir/out
2>&1 ; then
1590 if $success ||
! grep --quiet "$expected" $dir/out
; then
1598 function test_expect_failure
() {
1601 setup
$dir ||
return 1
1602 expect_failure
$dir FAIL bash
-c 'echo FAIL ; exit 1' ||
return 1
1603 # the command did not fail
1604 ! expect_failure
$dir FAIL bash
-c 'echo FAIL ; exit 0' > $dir/out ||
return 1
1605 grep --quiet FAIL
$dir/out ||
return 1
1606 # the command failed but the output does not contain the expected string
1607 ! expect_failure
$dir FAIL bash
-c 'echo UNEXPECTED ; exit 1' > $dir/out ||
return 1
1608 ! grep --quiet FAIL
$dir/out ||
return 1
1609 teardown
$dir ||
return 1
1612 #######################################################################
1615 # Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1616 # will fail if scrub does not complete within $TIMEOUT seconds. The
1617 # repair is complete whenever the **get_last_scrub_stamp** function
1618 # reports a timestamp different from the one given in argument.
1620 # @param pgid the id of the PG
1621 # @param last_scrub timestamp of the last scrub for *pgid*
1622 # @return 0 on success, 1 on error
1624 function wait_for_scrub
() {
1626 local last_scrub
="$2"
1627 local sname
=${3:-last_scrub_stamp}
1629 for ((i
=0; i
< $TIMEOUT; i
++)); do
1630 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
1638 function test_wait_for_scrub
() {
1641 setup
$dir ||
return 1
1642 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1643 run_mgr
$dir x ||
return 1
1644 run_osd
$dir 0 ||
return 1
1645 create_rbd_pool ||
return 1
1646 wait_for_clean ||
return 1
1648 ceph pg repair
$pgid
1649 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1650 wait_for_scrub
$pgid "$last_scrub" ||
return 1
1651 kill_daemons
$dir KILL osd ||
return 1
1652 last_scrub
=$
(get_last_scrub_stamp
$pgid)
1653 ! TIMEOUT
=1 wait_for_scrub
$pgid "$last_scrub" ||
return 1
1654 teardown
$dir ||
return 1
1657 #######################################################################
1660 # Return 0 if the erasure code *plugin* is available, 1 otherwise.
1662 # @param plugin erasure code plugin
1663 # @return 0 on success, 1 on error
1666 function erasure_code_plugin_exists
() {
1672 FreeBSD
) grepstr
="Cannot open.*$plugin" ;;
1673 *) grepstr
="$plugin.*No such file" ;;
1676 s
=$
(ceph osd erasure-code-profile
set TESTPROFILE plugin
=$plugin 2>&1)
1678 if [ $status -eq 0 ]; then
1679 ceph osd erasure-code-profile
rm TESTPROFILE
1680 elif ! echo $s |
grep --quiet "$grepstr" ; then
1682 # display why the string was rejected.
1688 function test_erasure_code_plugin_exists
() {
1691 setup
$dir ||
return 1
1692 run_mon
$dir a ||
return 1
1693 run_mgr
$dir x ||
return 1
1694 erasure_code_plugin_exists jerasure ||
return 1
1695 ! erasure_code_plugin_exists FAKE ||
return 1
1696 teardown
$dir ||
return 1
1699 #######################################################################
1702 # Display all log files from **dir** on stdout.
1704 # @param dir directory in which all data is stored
1707 function display_logs
() {
1710 find $dir -maxdepth 1 -name '*.log' | \
1711 while read file ; do
1712 echo "======================= $file"
1717 function test_display_logs
() {
1720 setup
$dir ||
return 1
1721 run_mon
$dir a ||
return 1
1722 kill_daemons
$dir ||
return 1
1723 display_logs
$dir > $dir/log.out
1724 grep --quiet mon.a.log
$dir/log.out ||
return 1
1725 teardown
$dir ||
return 1
1728 #######################################################################
1730 # Spawn a command in background and save the pid in the variable name
1731 # passed in argument. To make the output reading easier, the output is
1732 # prepend with the process id.
1736 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1738 # @param pid_variable the variable name (not value) where the pids will be stored
1739 # @param ... the command to execute
1740 # @return only the pid_variable output should be considered and used with **wait_background**
1742 function run_in_background
() {
1743 local pid_variable
=$1
1745 # Execute the command and prepend the output with its pid
1746 # We enforce to return the exit status of the command and not the awk one.
1747 ("$@" |
& sed 's/^/'$$
': /'; return "${PIPESTATUS[0]}") >&2 &
1748 eval "$pid_variable+=\" $!\""
1751 function save_stdout
{
1757 function test_run_in_background
() {
1759 run_in_background pids
sleep 1
1760 run_in_background pids
sleep 1
1761 test $
(echo $pids |
wc -w) = 2 ||
return 1
1762 wait $pids ||
return 1
1765 #######################################################################
1767 # Wait for pids running in background to complete.
1768 # This function is usually used after a **run_in_background** call
1771 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1772 # wait_background pids1
1774 # @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
1775 # @return returns 1 if at least one process exits in error unless returns 0
1777 function wait_background
() {
1778 # We extract the PIDS from the variable name
1782 for pid
in $pids; do
1783 if ! wait $pid; then
1784 # If one process failed then return 1
1789 # We empty the variable reporting that all process ended
1796 function test_wait_background
() {
1798 run_in_background pids bash
-c "sleep 1; exit 1"
1799 run_in_background pids bash
-c "sleep 2; exit 0"
1800 wait_background pids
1801 if [ $?
-ne 1 ]; then return 1; fi
1803 run_in_background pids bash
-c "sleep 1; exit 0"
1804 run_in_background pids bash
-c "sleep 2; exit 0"
1805 wait_background pids
1806 if [ $?
-ne 0 ]; then return 1; fi
1808 if [ ! -z "$pids" ]; then return 1; fi
1811 function flush_pg_stats
()
1813 local timeout
=${1:-$TIMEOUT}
1818 seq=`ceph tell osd.$osd flush_pg_stats`
1819 seqs
="$seqs $osd-$seq"
1823 osd
=`echo $s | cut -d - -f 1`
1824 seq=`echo $s | cut -d - -f 2`
1825 echo "waiting osd.$osd seq $seq"
1826 while test $
(ceph osd last-stat-seq
$osd) -lt $seq; do
1828 if [ $
((timeout--
)) -eq 0 ]; then
1835 function test_flush_pg_stats
()
1839 setup
$dir ||
return 1
1840 run_mon
$dir a
--osd_pool_default_size=1 ||
return 1
1841 run_mgr
$dir x ||
return 1
1842 run_osd
$dir 0 ||
return 1
1843 create_rbd_pool ||
return 1
1844 rados
-p rbd put obj
/etc
/group
1845 flush_pg_stats ||
return 1
1846 local jq_filter
='.pools | .[] | select(.name == "rbd") | .stats'
1847 raw_bytes_used
=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
1848 bytes_used
=`ceph df detail --format=json | jq "$jq_filter.bytes_used"`
1849 test $raw_bytes_used > 0 ||
return 1
1850 test $raw_bytes_used == $bytes_used ||
return 1
1854 #######################################################################
1857 # Call the **run** function (which must be defined by the caller) with
1858 # the **dir** argument followed by the caller argument list.
1860 # If the **run** function returns on error, all logs found in **dir**
1861 # are displayed for diagnostic purposes.
1863 # **teardown** function is called when the **run** function returns
1864 # (on success or on error), to cleanup leftovers. The CEPH_CONF is set
1865 # to /dev/null and CEPH_ARGS is unset so that the tests are protected from
1866 # external interferences.
1868 # It is the responsibility of the **run** function to call the
1869 # **setup** function to prepare the test environment (create a temporary
1872 # The shell is required (via PS4) to display the function and line
1873 # number whenever a statement is executed to help debugging.
1875 # @param dir directory in which all data is stored
1876 # @param ... arguments passed transparently to **run**
1877 # @return 0 on success, 1 on error
1884 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1886 export PATH
=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv
/bin
:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv
/bin
:.
:$PATH # make sure program from sources are preferred
1887 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1889 export CEPH_CONF
=/dev
/null
1893 if run
$dir "$@" ; then
1898 teardown
$dir $code ||
return 1
1902 #######################################################################
1904 function run_tests
() {
1906 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1908 export PATH
=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv
/bin
:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv
/bin
:.
:$PATH # make sure program from sources are preferred
1909 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1911 export CEPH_MON
="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
1913 CEPH_ARGS
+=" --fsid=$(uuidgen) --auth-supported=none "
1914 CEPH_ARGS
+="--mon-host=$CEPH_MON "
1915 export CEPH_CONF
=/dev
/null
1917 local funcs
=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
1918 local dir
=td
/ceph-helpers
1920 for func
in $funcs ; do
1921 if ! $func $dir; then
1928 if test "$1" = TESTS
; then
1935 # jq only support --exit-status|-e from version 1.4 forwards, which makes
1936 # returning on error waaaay prettier and straightforward.
1937 # However, the current automated upstream build is running with v1.3,
1938 # which has no idea what -e is. Hence the convoluted error checking we
1940 # The next time someone changes this code, please check if v1.4 is now
1941 # a thing, and, if so, please change these to use -e. Thanks.
1943 # jq '.all.supported | select([.[] == "foo"] | any)'
1944 function jq_success
() {
1949 in_escaped
=$
(printf %s
"$input" |
sed "s/'/'\\\\''/g")
1950 filter_escaped
=$
(printf %s
"$filter" |
sed "s/'/'\\\\''/g")
1952 ret
=$
(echo "$in_escaped" | jq
"$filter_escaped")
1953 if [[ "$ret" == "true" ]]; then
1955 elif [[ -n "$expects" ]]; then
1956 if [[ "$ret" == "$expects" ]]; then
1965 ret
="$(echo $input | jq \"$filter\")"
1966 if [[ "$ret" == "true" ]]; then
1968 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
1974 function inject_eio
() {
1988 local -a initial_osds
=($
(get_osds
$poolname $objname))
1989 local osd_id
=${initial_osds[$shard_id]}
1990 if [ "$pooltype" != "ec" ]; then
1993 set_config osd
$osd_id filestore_debug_inject_read_err true ||
return 1
1995 while ( CEPH_ARGS
='' ceph
--admin-daemon $
(get_asok_path osd.
$osd_id) \
1996 inject
${which}err
$poolname $objname $shard_id |
grep -q Invalid
); do
1997 loop
=$
(expr $loop + 1)
1998 if [ $loop = "10" ]; then
2006 # compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"