3 # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5 # Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
7 # Author: Loic Dachary <loic@dachary.org>
8 # Author: Federico Gimenez <fgimenez@coit.es>
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU Library Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Library Public License for more details.
21 WAIT_FOR_CLEAN_TIMEOUT
=90
24 TMPDIR
=${TMPDIR:-/tmp}
25 CEPH_BUILD_VIRTUALENV
=${TMPDIR}
26 TESTDIR
=${TESTDIR:-${TMPDIR}}
28 if type xmlstarlet
> /dev
/null
2>&1; then
30 elif type xml
> /dev
/null
2>&1; then
33 echo "Missing xmlstarlet binary!"
37 if [ `uname` = FreeBSD
]; then
41 KERNCORE
="kern.corefile"
45 termwidth
=$
(stty
-a |
head -1 |
sed -e 's/.*columns \([0-9]*\).*/\1/')
46 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
47 termwidth
="-W ${termwidth}"
49 DIFFCOLOPTS
="-y $termwidth"
50 KERNCORE
="kernel.core_pattern"
55 #! @file ceph-helpers.sh
56 # @brief Toolbox to manage Ceph cluster dedicated to testing
60 # ~~~~~~~~~~~~~~~~{.sh}
61 # source ceph-helpers.sh
64 # # cleanup leftovers and reset mydir
66 # # create a cluster with one monitor and three osds
71 # # put and get an object
72 # rados --pool rbd put GROUP /etc/group
73 # rados --pool rbd get GROUP /tmp/GROUP
74 # # stop the cluster and cleanup the directory
79 # The focus is on simplicity and efficiency, in the context of
80 # functional tests. The output is intentionally very verbose
81 # and functions return as soon as an error is found. The caller
82 # is also expected to abort on the first error so that debugging
83 # can be done by looking at the end of the output.
85 # Each function is documented, implemented and tested independently.
86 # When modifying a helper, the test and the documentation are
87 # expected to be updated and it is easier of they are collocated. A
88 # test for a given function can be run with
90 # ~~~~~~~~~~~~~~~~{.sh}
91 # ceph-helpers.sh TESTS test_get_osds
94 # and all the tests (i.e. all functions matching test_*) are run
97 # ~~~~~~~~~~~~~~~~{.sh}
98 # ceph-helpers.sh TESTS
101 # A test function takes a single argument : the directory dedicated
102 # to the tests. It is expected to not create any file outside of this
103 # directory and remove it entirely when it completes successfully.
107 function get_asok_dir
() {
108 if [ -n "$CEPH_ASOK_DIR" ]; then
109 echo "$CEPH_ASOK_DIR"
111 echo ${TMPDIR:-/tmp}/ceph-asok.$$
115 function get_asok_path
() {
117 if [ -n "$name" ]; then
118 echo $
(get_asok_dir
)/ceph-
$name.asok
120 echo $
(get_asok_dir
)/\
$cluster-\
$name.asok
124 # Cleanup any leftovers found in **dir** via **teardown**
125 # and reset **dir** as an empty environment.
127 # @param dir path name of the environment
128 # @return 0 on success, 1 on error
132 teardown
$dir ||
return 1
134 mkdir
-p $
(get_asok_dir
)
135 if [ $
(ulimit -n) -le 1024 ]; then
136 ulimit -n 4096 ||
return 1
138 if [ -z "$LOCALRUN" ]; then
139 trap "teardown $dir 1" TERM HUP INT
143 function test_setup
() {
145 setup
$dir ||
return 1
146 test -d $dir ||
return 1
147 setup
$dir ||
return 1
148 test -d $dir ||
return 1
152 #######################################################################
155 # Kill all daemons for which a .pid file exists in **dir** and remove
156 # **dir**. If the file system in which **dir** is btrfs, delete all
157 # subvolumes that relate to it.
159 # @param dir path name of the environment
160 # @param dumplogs pass "1" to dump logs otherwise it will only if cores found
161 # @return 0 on success, 1 on error
163 function teardown
() {
166 kill_daemons
$dir KILL
167 if [ `uname` != FreeBSD
] \
168 && [ $
(stat
-f -c '%T' .
) == "btrfs" ]; then
169 __teardown_btrfs
$dir
172 local pattern
="$(sysctl -n $KERNCORE)"
173 # See if we have apport core handling
174 if [ "${pattern:0:1}" = "|" ]; then
175 # TODO: Where can we get the dumps?
176 # Not sure where the dumps really are so this will look in the CWD
179 # Local we start with core and teuthology ends with core
180 if ls $
(dirname "$pattern") |
grep -q '^core\|core$' ; then
182 if [ -n "$LOCALRUN" ]; then
183 mkdir
/tmp
/cores.$$
2> /dev
/null || true
184 for i
in $
(ls $
(dirname $
(sysctl
-n $KERNCORE)) |
grep '^core\|core$'); do
189 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
190 if [ -n "$LOCALRUN" ]; then
193 # Move logs to where Teuthology will archive it
194 mkdir
-p $TESTDIR/archive
/log
195 mv $dir/*.log
$TESTDIR/archive
/log
199 rm -rf $
(get_asok_dir
)
200 if [ "$cores" = "yes" ]; then
201 echo "ERROR: Failure due to cores found"
202 if [ -n "$LOCALRUN" ]; then
203 echo "Find saved core files in /tmp/cores.$$"
210 function __teardown_btrfs
() {
211 local btrfs_base_dir
=$1
212 local btrfs_root
=$
(df
-P . |
tail -1 |
$AWK '{print $NF}')
213 local btrfs_dirs
=$
(cd $btrfs_base_dir; sudo btrfs subvolume list
-t . |
$AWK '/^[0-9]/ {print $4}' |
grep "$btrfs_base_dir/$btrfs_dir")
214 for subvolume
in $btrfs_dirs; do
215 sudo btrfs subvolume delete
$btrfs_root/$subvolume
219 function test_teardown
() {
221 setup
$dir ||
return 1
222 teardown
$dir ||
return 1
223 ! test -d $dir ||
return 1
226 #######################################################################
229 # Sends a signal to a single daemon.
230 # This is a helper function for kill_daemons
232 # After the daemon is sent **signal**, its actual termination
233 # will be verified by sending it signal 0. If the daemon is
234 # still alive, kill_daemon will pause for a few seconds and
235 # try again. This will repeat for a fixed number of times
236 # before kill_daemon returns on failure. The list of
237 # sleep intervals can be specified as **delays** and defaults
240 # 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
242 # This sequence is designed to run first a very short sleep time (0.1)
243 # if the machine is fast enough and the daemon terminates in a fraction of a
244 # second. The increasing sleep numbers should give plenty of time for
245 # the daemon to die even on the slowest running machine. If a daemon
246 # takes more than a few minutes to stop (the sum of all sleep times),
247 # there probably is no point in waiting more and a number of things
248 # are likely to go wrong anyway: better give up and return on error.
250 # @param pid the process id to send a signal
251 # @param send_signal the signal to send
252 # @param delays sequence of sleep times before failure
254 function kill_daemon
() {
257 local delays
=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
259 # In order to try after the last large sleep add 0 at the end so we check
260 # one last time before dropping out of the loop
261 for try
in $delays 0 ; do
262 if kill -$send_signal $pid 2> /dev
/null
; then
274 function test_kill_daemon
() {
276 setup
$dir ||
return 1
277 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
278 run_mgr
$dir x ||
return 1
279 run_osd
$dir 0 ||
return 1
282 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
284 # sending signal 0 won't kill the daemon
285 # waiting just for one second instead of the default schedule
286 # allows us to quickly verify what happens when kill fails
287 # to stop the daemon (i.e. it must return false)
289 ! kill_daemon
$pidfile 0 1 ||
return 1
291 # killing just the osd and verify the mon still is responsive
293 kill_daemon
$pidfile TERM ||
return 1
297 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
301 kill_daemon
$pidfile TERM ||
return 1
305 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
307 # kill the mon and verify it cannot be reached
309 kill_daemon
$pidfile TERM ||
return 1
310 ! timeout
5 ceph status ||
return 1
313 teardown
$dir ||
return 1
317 # Kill all daemons for which a .pid file exists in **dir**. Each
318 # daemon is sent a **signal** and kill_daemons waits for it to exit
319 # during a few minutes. By default all daemons are killed. If a
320 # **name_prefix** is provided, only the daemons for which a pid
321 # file is found matching the prefix are killed. See run_osd and
322 # run_mon for more information about the name conventions for
325 # Send TERM to all daemons : kill_daemons $dir
326 # Send KILL to all daemons : kill_daemons $dir KILL
327 # Send KILL to all osds : kill_daemons $dir KILL osd
328 # Send KILL to osd 1 : kill_daemons $dir KILL osd.1
330 # If a daemon is sent the TERM signal and does not terminate
331 # within a few minutes, it will still be running even after
332 # kill_daemons returns.
334 # If all daemons are kill successfully the function returns 0
335 # if at least one daemon remains, this is treated as an
336 # error and the function return 1.
338 # @param dir path name of the environment
339 # @param signal name of the first signal (defaults to TERM)
340 # @param name_prefix only kill match daemons (defaults to all)
341 # @param delays sequence of sleep times before failure
342 # @return 0 on success, 1 on error
344 function kill_daemons
() {
345 local trace
=$
(shopt -q -o xtrace
&& echo true ||
echo false
)
346 $trace && shopt -u -o xtrace
348 local signal
=${2:-TERM}
349 local name_prefix
=$3 # optional, osd, mon, osd.1
350 local delays
=$4 #optional timing
354 for pidfile
in $
(find $dir 2>/dev
/null |
grep $name_prefix'[^/]*\.pid') ; do
355 run_in_background pids kill_daemon
$pidfile $signal $delays
361 $trace && shopt -s -o xtrace
365 function test_kill_daemons
() {
367 setup
$dir ||
return 1
368 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
369 run_mgr
$dir x ||
return 1
370 run_osd
$dir 0 ||
return 1
372 # sending signal 0 won't kill the daemon
373 # waiting just for one second instead of the default schedule
374 # allows us to quickly verify what happens when kill fails
375 # to stop the daemon (i.e. it must return false)
377 ! kill_daemons
$dir 0 osd
1 ||
return 1
379 # killing just the osd and verify the mon still is responsive
381 kill_daemons
$dir TERM osd ||
return 1
385 kill_daemons
$dir TERM mgr ||
return 1
387 # kill the mon and verify it cannot be reached
389 kill_daemons
$dir TERM ||
return 1
390 ! timeout
5 ceph status ||
return 1
391 teardown
$dir ||
return 1
395 # return a random TCP port which is not used yet
397 # please note, there could be racing if we use this function for
398 # a free port, and then try to bind on this port.
400 function get_unused_port
() {
402 python3
-c "import socket; s=socket.socket(); s.bind(('$ip', 0)); print(s.getsockname()[1]); s.close()"
405 #######################################################################
408 # Run a monitor by the name mon.**id** with data in **dir**/**id**.
409 # The logs can be found in **dir**/mon.**id**.log and the pid file
410 # is **dir**/mon.**id**.pid and the admin socket is
411 # **dir**/**id**/ceph-mon.**id**.asok.
413 # The remaining arguments are passed verbatim to ceph-mon --mkfs
414 # and the ceph-mon daemon.
416 # Two mandatory arguments must be provided: --fsid and --mon-host
417 # Instead of adding them to every call to run_mon, they can be
418 # set in the CEPH_ARGS environment variable to be read implicitly
419 # by every ceph command.
421 # The CEPH_CONF variable is expected to be set to /dev/null to
422 # only rely on arguments for configuration.
426 # CEPH_ARGS="--fsid=$(uuidgen) "
427 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
428 # run_mon $dir a # spawn a mon and bind port 7018
429 # run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
431 # If mon_initial_members is not set, the default rbd pool is deleted
432 # and replaced with a replicated pool with less placement groups to
433 # speed up initialization. If mon_initial_members is set, no attempt
434 # is made to recreate the rbd pool because it would hang forever,
435 # waiting for other mons to join.
437 # A **dir**/ceph.conf file is created but not meant to be used by any
438 # function. It is convenient for debugging a failure with:
440 # ceph --conf **dir**/ceph.conf -s
442 # @param dir path name of the environment
443 # @param id mon identifier
444 # @param ... can be any option valid for ceph-mon
445 # @return 0 on success, 1 on error
463 --osd-failsafe-full-ratio=.99 \
464 --mon-osd-full-ratio=.99 \
465 --mon-data-avail-crit=1 \
466 --mon-data-avail-warn=5 \
467 --paxos-propose-interval=0.1 \
468 --osd-crush-chooseleaf-type=0 \
475 --log-file=$dir/\
$name.log \
476 --admin-socket=$
(get_asok_path
) \
477 --mon-cluster-log-file=$dir/log \
479 --pid-file=$dir/\
$name.pid \
480 --mon-allow-pool-delete \
481 --mon-allow-pool-size-one \
482 --osd-pool-default-pg-autoscale-mode off \
483 --mon-osd-backfillfull-ratio .99 \
484 --mon-warn-on-insecure-global-id-reclaim-allowed=false \
487 cat > $dir/ceph.conf
<<EOF
489 fsid = $(get_config mon $id fsid)
490 mon host = $(get_config mon $id mon_host)
494 function test_run_mon
() {
497 setup
$dir ||
return 1
499 run_mon
$dir a ||
return 1
500 ceph mon dump |
grep "mon.a" ||
return 1
501 kill_daemons
$dir ||
return 1
503 run_mon
$dir a
--osd_pool_default_size=3 ||
return 1
504 run_osd
$dir 0 ||
return 1
505 run_osd
$dir 1 ||
return 1
506 run_osd
$dir 2 ||
return 1
507 create_rbd_pool ||
return 1
508 ceph osd dump |
grep "pool 1 'rbd'" ||
return 1
509 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
510 config get osd_pool_default_size
)
511 test "$size" = '{"osd_pool_default_size":"3"}' ||
return 1
513 ! CEPH_ARGS
='' ceph status ||
return 1
514 CEPH_ARGS
='' ceph
--conf $dir/ceph.conf status ||
return 1
516 kill_daemons
$dir ||
return 1
518 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
519 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
520 config get osd_pool_default_size
)
521 test "$size" = '{"osd_pool_default_size":"1"}' ||
return 1
522 kill_daemons
$dir ||
return 1
524 CEPH_ARGS
="$CEPH_ARGS --osd_pool_default_size=2" \
525 run_mon
$dir a ||
return 1
526 local size
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path mon.a
) \
527 config get osd_pool_default_size
)
528 test "$size" = '{"osd_pool_default_size":"2"}' ||
return 1
529 kill_daemons
$dir ||
return 1
531 teardown
$dir ||
return 1
534 function create_rbd_pool
() {
535 ceph osd pool delete rbd rbd
--yes-i-really-really-mean-it ||
return 1
536 create_pool rbd
$PG_NUM ||
return 1
540 function create_pool
() {
541 ceph osd pool create
"$@"
545 function delete_pool
() {
547 ceph osd pool delete
$poolname $poolname --yes-i-really-really-mean-it
550 #######################################################################
559 ceph config
set mgr mgr_pool false
--force
563 --osd-failsafe-full-ratio=.99 \
565 --debug-objecter 20 \
570 --log-file=$dir/\
$name.log \
571 --admin-socket=$
(get_asok_path
) \
573 --pid-file=$dir/\
$name.pid \
574 --mgr-module-path=$
(realpath
${CEPH_ROOT}/src
/pybind
/mgr
) \
589 --debug-objecter 20 \
593 --log-file=$dir/\
$name.log \
594 --admin-socket=$
(get_asok_path
) \
596 --pid-file=$dir/\
$name.pid \
600 #######################################################################
603 # Create (prepare) and run (activate) an osd by the name osd.**id**
604 # with data in **dir**/**id**. The logs can be found in
605 # **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
606 # the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
608 # The remaining arguments are passed verbatim to ceph-osd.
610 # Two mandatory arguments must be provided: --fsid and --mon-host
611 # Instead of adding them to every call to run_osd, they can be
612 # set in the CEPH_ARGS environment variable to be read implicitly
613 # by every ceph command.
615 # The CEPH_CONF variable is expected to be set to /dev/null to
616 # only rely on arguments for configuration.
618 # The run_osd function creates the OSD data directory on the **dir**/**id**
619 # directory and relies on the activate_osd function to run the daemon.
623 # CEPH_ARGS="--fsid=$(uuidgen) "
624 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
625 # run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
627 # @param dir path name of the environment
628 # @param id osd identifier
629 # @param ... can be any option valid for ceph-osd
630 # @return 0 on success, 1 on error
637 local osd_data
=$dir/$id
639 local ceph_args
="$CEPH_ARGS"
640 ceph_args
+=" --osd-failsafe-full-ratio=.99"
641 ceph_args
+=" --osd-journal-size=100"
642 ceph_args
+=" --osd-scrub-load-threshold=2000"
643 ceph_args
+=" --osd-data=$osd_data"
644 ceph_args
+=" --osd-journal=${osd_data}/journal"
645 ceph_args
+=" --chdir="
646 ceph_args
+=$EXTRA_OPTS
647 ceph_args
+=" --run-dir=$dir"
648 ceph_args
+=" --admin-socket=$(get_asok_path)"
649 ceph_args
+=" --debug-osd=20"
650 ceph_args
+=" --debug-ms=1"
651 ceph_args
+=" --debug-monc=20"
652 ceph_args
+=" --log-file=$dir/\$name.log"
653 ceph_args
+=" --pid-file=$dir/\$name.pid"
654 ceph_args
+=" --osd-max-object-name-len=460"
655 ceph_args
+=" --osd-max-object-namespace-len=64"
656 ceph_args
+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
657 ceph_args
+=" --osd-mclock-profile=high_recovery_ops"
663 echo "add osd$id $uuid"
664 OSD_SECRET
=$
(ceph-authtool
--gen-print-key)
665 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
666 ceph osd new
$uuid -i $osd_data/new.json
667 rm $osd_data/new.json
668 ceph-osd
-i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid
670 local key_fn
=$osd_data/keyring
675 echo adding osd
$id key to auth repository
676 ceph
-i "$key_fn" auth add osd.
$id osd
"allow *" mon
"allow profile osd" mgr
"allow profile osd"
678 ceph-osd
-i $id $ceph_args &
680 # If noup is set, then can't wait for this osd
681 if ceph osd dump
--format=json | jq
'.flags_set[]' |
grep -q '"noup"' ; then
684 wait_for_osd up
$id ||
return 1
688 function run_osd_filestore
() {
693 local osd_data
=$dir/$id
695 local ceph_args
="$CEPH_ARGS"
696 ceph_args
+=" --osd-failsafe-full-ratio=.99"
697 ceph_args
+=" --osd-journal-size=100"
698 ceph_args
+=" --osd-scrub-load-threshold=2000"
699 ceph_args
+=" --osd-data=$osd_data"
700 ceph_args
+=" --osd-journal=${osd_data}/journal"
701 ceph_args
+=" --chdir="
702 ceph_args
+=$EXTRA_OPTS
703 ceph_args
+=" --run-dir=$dir"
704 ceph_args
+=" --admin-socket=$(get_asok_path)"
705 ceph_args
+=" --debug-osd=20"
706 ceph_args
+=" --debug-ms=1"
707 ceph_args
+=" --debug-monc=20"
708 ceph_args
+=" --log-file=$dir/\$name.log"
709 ceph_args
+=" --pid-file=$dir/\$name.pid"
710 ceph_args
+=" --osd-max-object-name-len=460"
711 ceph_args
+=" --osd-max-object-namespace-len=64"
712 ceph_args
+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
718 echo "add osd$osd $uuid"
719 OSD_SECRET
=$
(ceph-authtool
--gen-print-key)
720 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
721 ceph osd new
$uuid -i $osd_data/new.json
722 rm $osd_data/new.json
723 ceph-osd
-i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid --osd-objectstore=filestore
725 local key_fn
=$osd_data/keyring
730 echo adding osd
$id key to auth repository
731 ceph
-i "$key_fn" auth add osd.
$id osd
"allow *" mon
"allow profile osd" mgr
"allow profile osd"
733 ceph-osd
-i $id $ceph_args &
735 # If noup is set, then can't wait for this osd
736 if ceph osd dump
--format=json | jq
'.flags_set[]' |
grep -q '"noup"' ; then
739 wait_for_osd up
$id ||
return 1
744 function test_run_osd
() {
747 setup
$dir ||
return 1
749 run_mon
$dir a ||
return 1
750 run_mgr
$dir x ||
return 1
752 run_osd
$dir 0 ||
return 1
753 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
754 config get osd_max_backfills
)
755 echo "$backfills" |
grep --quiet 'osd_max_backfills' ||
return 1
757 run_osd
$dir 1 --osd-max-backfills 20 ||
return 1
758 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.1) \
759 config get osd_max_backfills
)
760 test "$backfills" = '{"osd_max_backfills":"20"}' ||
return 1
762 CEPH_ARGS
="$CEPH_ARGS --osd-max-backfills 30" run_osd
$dir 2 ||
return 1
763 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.2) \
764 config get osd_max_backfills
)
765 test "$backfills" = '{"osd_max_backfills":"30"}' ||
return 1
767 teardown
$dir ||
return 1
770 #######################################################################
773 # Shutdown and remove all traces of the osd by the name osd.**id**.
775 # The OSD is shutdown with the TERM signal. It is then removed from
776 # the auth list, crush map, osd map etc and the files associated with
777 # it are also removed.
779 # @param dir path name of the environment
780 # @param id osd identifier
781 # @return 0 on success, 1 on error
783 function destroy_osd
() {
787 ceph osd out osd.
$id ||
return 1
788 kill_daemons
$dir TERM osd.
$id ||
return 1
789 ceph osd down osd.
$id ||
return 1
790 ceph osd purge osd.
$id --yes-i-really-mean-it ||
return 1
791 teardown
$dir/$id ||
return 1
795 function test_destroy_osd
() {
798 setup
$dir ||
return 1
799 run_mon
$dir a ||
return 1
800 run_mgr
$dir x ||
return 1
801 run_osd
$dir 0 ||
return 1
802 destroy_osd
$dir 0 ||
return 1
803 ! ceph osd dump |
grep "osd.$id " ||
return 1
804 teardown
$dir ||
return 1
807 #######################################################################
810 # Run (activate) an osd by the name osd.**id** with data in
811 # **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
812 # the pid file is **dir**/osd.**id**.pid and the admin socket is
813 # **dir**/**id**/ceph-osd.**id**.asok.
815 # The remaining arguments are passed verbatim to ceph-osd.
817 # Two mandatory arguments must be provided: --fsid and --mon-host
818 # Instead of adding them to every call to activate_osd, they can be
819 # set in the CEPH_ARGS environment variable to be read implicitly
820 # by every ceph command.
822 # The CEPH_CONF variable is expected to be set to /dev/null to
823 # only rely on arguments for configuration.
825 # The activate_osd function expects a valid OSD data directory
826 # in **dir**/**id**, either just created via run_osd or re-using
827 # one left by a previous run of ceph-osd. The ceph-osd daemon is
828 # run directly on the foreground
830 # The activate_osd function blocks until the monitor reports the osd
831 # up. If it fails to do so within $TIMEOUT seconds, activate_osd
836 # CEPH_ARGS="--fsid=$(uuidgen) "
837 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
838 # activate_osd $dir 0 # activate an osd using the monitor listening on 7018
840 # @param dir path name of the environment
841 # @param id osd identifier
842 # @param ... can be any option valid for ceph-osd
843 # @return 0 on success, 1 on error
845 function activate_osd
() {
850 local osd_data
=$dir/$id
852 local ceph_args
="$CEPH_ARGS"
853 ceph_args
+=" --osd-failsafe-full-ratio=.99"
854 ceph_args
+=" --osd-journal-size=100"
855 ceph_args
+=" --osd-scrub-load-threshold=2000"
856 ceph_args
+=" --osd-data=$osd_data"
857 ceph_args
+=" --osd-journal=${osd_data}/journal"
858 ceph_args
+=" --chdir="
859 ceph_args
+=$EXTRA_OPTS
860 ceph_args
+=" --run-dir=$dir"
861 ceph_args
+=" --admin-socket=$(get_asok_path)"
862 ceph_args
+=" --debug-osd=20"
863 ceph_args
+=" --log-file=$dir/\$name.log"
864 ceph_args
+=" --pid-file=$dir/\$name.pid"
865 ceph_args
+=" --osd-max-object-name-len=460"
866 ceph_args
+=" --osd-max-object-namespace-len=64"
867 ceph_args
+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
868 ceph_args
+=" --osd-mclock-profile=high_recovery_ops"
874 ceph-osd
-i $id $ceph_args &
876 [ "$id" = "$(cat $osd_data/whoami)" ] ||
return 1
878 # If noup is set, then can't wait for this osd
879 if ceph osd dump
--format=json | jq
'.flags_set[]' |
grep -q '"noup"' ; then
882 wait_for_osd up
$id ||
return 1
885 function test_activate_osd
() {
888 setup
$dir ||
return 1
890 run_mon
$dir a ||
return 1
891 run_mgr
$dir x ||
return 1
893 run_osd
$dir 0 ||
return 1
894 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
895 config get osd_max_backfills
)
896 echo "$backfills" |
grep --quiet 'osd_max_backfills' ||
return 1
898 kill_daemons
$dir TERM osd ||
return 1
900 activate_osd
$dir 0 --osd-max-backfills 20 ||
return 1
901 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
902 config get osd_max_backfills
)
903 test "$backfills" = '{"osd_max_backfills":"20"}' ||
return 1
905 teardown
$dir ||
return 1
908 function test_activate_osd_after_mark_down
() {
911 setup
$dir ||
return 1
913 run_mon
$dir a ||
return 1
914 run_mgr
$dir x ||
return 1
916 run_osd
$dir 0 ||
return 1
917 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
918 config get osd_max_backfills
)
919 echo "$backfills" |
grep --quiet 'osd_max_backfills' ||
return 1
921 kill_daemons
$dir TERM osd ||
return 1
922 ceph osd down
0 ||
return 1
923 wait_for_osd down
0 ||
return 1
925 activate_osd
$dir 0 --osd-max-backfills 20 ||
return 1
926 local backfills
=$
(CEPH_ARGS
='' ceph
--format=json daemon $
(get_asok_path osd
.0) \
927 config get osd_max_backfills
)
928 test "$backfills" = '{"osd_max_backfills":"20"}' ||
return 1
930 teardown
$dir ||
return 1
933 function test_activate_osd_skip_benchmark
() {
936 setup
$dir ||
return 1
938 run_mon
$dir a ||
return 1
939 run_mgr
$dir x ||
return 1
941 # Skip the osd benchmark during first osd bring-up.
942 run_osd
$dir 0 --osd-op-queue=mclock_scheduler \
943 --osd-mclock-skip-benchmark=true ||
return 1
944 local max_iops_hdd_def
=$
(CEPH_ARGS
='' ceph
--format=json daemon \
945 $
(get_asok_path osd
.0) config get osd_mclock_max_capacity_iops_hdd
)
946 local max_iops_ssd_def
=$
(CEPH_ARGS
='' ceph
--format=json daemon \
947 $
(get_asok_path osd
.0) config get osd_mclock_max_capacity_iops_ssd
)
949 kill_daemons
$dir TERM osd ||
return 1
950 ceph osd down
0 ||
return 1
951 wait_for_osd down
0 ||
return 1
953 # Skip the osd benchmark during activation as well. Validate that
954 # the max osd capacities are left unchanged.
955 activate_osd
$dir 0 --osd-op-queue=mclock_scheduler \
956 --osd-mclock-skip-benchmark=true ||
return 1
957 local max_iops_hdd_after_boot
=$
(CEPH_ARGS
='' ceph
--format=json daemon \
958 $
(get_asok_path osd
.0) config get osd_mclock_max_capacity_iops_hdd
)
959 local max_iops_ssd_after_boot
=$
(CEPH_ARGS
='' ceph
--format=json daemon \
960 $
(get_asok_path osd
.0) config get osd_mclock_max_capacity_iops_ssd
)
962 test "$max_iops_hdd_def" = "$max_iops_hdd_after_boot" ||
return 1
963 test "$max_iops_ssd_def" = "$max_iops_ssd_after_boot" ||
return 1
965 teardown
$dir ||
return 1
967 #######################################################################
970 # Wait until the OSD **id** is either up or down, as specified by
971 # **state**. It fails after $TIMEOUT seconds.
973 # @param state either up or down
974 # @param id osd identifier
975 # @return 0 on success, 1 on error
977 function wait_for_osd
() {
982 for ((i
=0; i
< $TIMEOUT; i
++)); do
984 if ! ceph osd dump |
grep "osd.$id $state"; then
994 function test_wait_for_osd
() {
996 setup
$dir ||
return 1
997 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
998 run_mgr
$dir x ||
return 1
999 run_osd
$dir 0 ||
return 1
1000 run_osd
$dir 1 ||
return 1
1001 wait_for_osd up
0 ||
return 1
1002 wait_for_osd up
1 ||
return 1
1003 kill_daemons
$dir TERM osd
.0 ||
return 1
1004 wait_for_osd down
0 ||
return 1
1005 ( TIMEOUT
=1 ; ! wait_for_osd up
0 ) ||
return 1
1006 teardown
$dir ||
return 1
1009 #######################################################################
1012 # Display the list of OSD ids supporting the **objectname** stored in
1013 # **poolname**, as reported by ceph osd map.
1015 # @param poolname an existing pool
1016 # @param objectname an objectname (may or may not exist)
1017 # @param STDOUT white space separated list of OSD ids
1018 # @return 0 on success, 1 on error
1020 function get_osds
() {
1024 local osds
=$
(ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
1026 # get rid of the trailing space
1030 function test_get_osds
() {
1033 setup
$dir ||
return 1
1034 run_mon
$dir a
--osd_pool_default_size=2 ||
return 1
1035 run_mgr
$dir x ||
return 1
1036 run_osd
$dir 0 ||
return 1
1037 run_osd
$dir 1 ||
return 1
1038 create_rbd_pool ||
return 1
1039 wait_for_clean ||
return 1
1040 create_rbd_pool ||
return 1
1041 get_osds rbd GROUP |
grep --quiet '^[0-1] [0-1]$' ||
return 1
1042 teardown
$dir ||
return 1
1045 #######################################################################
1048 # Wait for the monitor to form quorum (optionally, of size N)
1050 # @param timeout duration (lower-bound) to wait for quorum to be formed
1051 # @param quorumsize size of quorum to wait for
1052 # @return 0 on success, 1 on error
1054 function wait_for_quorum
() {
1058 if [[ -z "$timeout" ]]; then
1062 if [[ -z "$quorumsize" ]]; then
1063 timeout
$timeout ceph quorum_status
--format=json
>&/dev
/null ||
return 1
1068 wait_until
=$
((`date +%s` + $timeout))
1069 while [[ $
(date +%s
) -lt $wait_until ]]; do
1070 jqfilter
='.quorum | length == '$quorumsize
1071 jqinput
="$(timeout $timeout ceph quorum_status --format=json 2>/dev/null)"
1072 res
=$
(echo $jqinput | jq
"$jqfilter")
1073 if [[ "$res" == "true" ]]; then
1081 #######################################################################
1084 # Return the PG of supporting the **objectname** stored in
1085 # **poolname**, as reported by ceph osd map.
1087 # @param poolname an existing pool
1088 # @param objectname an objectname (may or may not exist)
1089 # @param STDOUT a PG
1090 # @return 0 on success, 1 on error
1096 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | jq
-r '.pgid'
1099 function test_get_pg
() {
1102 setup
$dir ||
return 1
1103 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1104 run_mgr
$dir x ||
return 1
1105 run_osd
$dir 0 ||
return 1
1106 create_rbd_pool ||
return 1
1107 wait_for_clean ||
return 1
1108 get_pg rbd GROUP |
grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' ||
return 1
1109 teardown
$dir ||
return 1
1112 #######################################################################
1115 # Return the value of the **config**, obtained via the config get command
1116 # of the admin socket of **daemon**.**id**.
1118 # @param daemon mon or osd
1119 # @param id mon or osd ID
1120 # @param config the configuration variable name as found in config_opts.h
1121 # @param STDOUT the config value
1122 # @return 0 on success, 1 on error
1124 function get_config
() {
1130 ceph
--format json daemon $
(get_asok_path
$daemon.
$id) \
1131 config get
$config 2> /dev
/null | \
1135 function test_get_config
() {
1138 # override the default config using command line arg and check it
1139 setup
$dir ||
return 1
1140 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1141 test $
(get_config mon a osd_pool_default_size
) = 1 ||
return 1
1142 run_mgr
$dir x ||
return 1
1143 run_osd
$dir 0 --osd_max_scrubs=3 ||
return 1
1144 test $
(get_config osd
0 osd_max_scrubs
) = 3 ||
return 1
1145 teardown
$dir ||
return 1
1148 #######################################################################
1151 # Set the **config** to specified **value**, via the config set command
1152 # of the admin socket of **daemon**.**id**
1154 # @param daemon mon or osd
1155 # @param id mon or osd ID
1156 # @param config the configuration variable name as found in config_opts.h
1157 # @param value the config value
1158 # @return 0 on success, 1 on error
1160 function set_config
() {
1166 test $
(env CEPH_ARGS
='' ceph
--format json daemon $
(get_asok_path
$daemon.
$id) \
1167 config
set $config $value 2> /dev
/null | \
1168 jq
'has("success")') == true
1171 function test_set_config
() {
1174 setup
$dir ||
return 1
1175 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1176 test $
(get_config mon a ms_crc_header
) = true ||
return 1
1177 set_config mon a ms_crc_header false ||
return 1
1178 test $
(get_config mon a ms_crc_header
) = false ||
return 1
1179 set_config mon a ms_crc_header true ||
return 1
1180 test $
(get_config mon a ms_crc_header
) = true ||
return 1
1181 teardown
$dir ||
return 1
1184 #######################################################################
1187 # Return the OSD id of the primary OSD supporting the **objectname**
1188 # stored in **poolname**, as reported by ceph osd map.
1190 # @param poolname an existing pool
1191 # @param objectname an objectname (may or may not exist)
1192 # @param STDOUT the primary OSD id
1193 # @return 0 on success, 1 on error
1195 function get_primary
() {
1199 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
1200 jq
'.acting_primary'
1203 function test_get_primary
() {
1206 setup
$dir ||
return 1
1207 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1209 run_mgr
$dir x ||
return 1
1210 run_osd
$dir $osd ||
return 1
1211 create_rbd_pool ||
return 1
1212 wait_for_clean ||
return 1
1213 test $
(get_primary rbd GROUP
) = $osd ||
return 1
1214 teardown
$dir ||
return 1
1217 #######################################################################
1220 # Return the id of any OSD supporting the **objectname** stored in
1221 # **poolname**, as reported by ceph osd map, except the primary.
1223 # @param poolname an existing pool
1224 # @param objectname an objectname (may or may not exist)
1225 # @param STDOUT the OSD id
1226 # @return 0 on success, 1 on error
1228 function get_not_primary
() {
1232 local primary
=$
(get_primary
$poolname $objectname)
1233 ceph
--format json osd map
$poolname $objectname 2>/dev
/null | \
1234 jq
".acting | map(select (. != $primary)) | .[0]"
1237 function test_get_not_primary
() {
1240 setup
$dir ||
return 1
1241 run_mon
$dir a
--osd_pool_default_size=2 ||
return 1
1242 run_mgr
$dir x ||
return 1
1243 run_osd
$dir 0 ||
return 1
1244 run_osd
$dir 1 ||
return 1
1245 create_rbd_pool ||
return 1
1246 wait_for_clean ||
return 1
1247 local primary
=$
(get_primary rbd GROUP
)
1248 local not_primary
=$
(get_not_primary rbd GROUP
)
1249 test $not_primary != $primary ||
return 1
1250 test $not_primary = 0 -o $not_primary = 1 ||
return 1
1251 teardown
$dir ||
return 1
1254 #######################################################################
1256 function _objectstore_tool_nodown
() {
1261 local osd_data
=$dir/$id
1263 ceph-objectstore-tool \
1264 --data-path $osd_data \
1268 function _objectstore_tool_nowait
() {
1274 kill_daemons
$dir TERM osd.
$id >&2 < /dev
/null ||
return 1
1276 _objectstore_tool_nodown
$dir $id "$@" ||
return 1
1277 activate_osd
$dir $id $ceph_osd_args >&2 ||
return 1
1281 # Run ceph-objectstore-tool against the OSD **id** using the data path
1282 # **dir**. The OSD is killed with TERM prior to running
1283 # ceph-objectstore-tool because access to the data path is
1284 # exclusive. The OSD is restarted after the command completes. The
1285 # objectstore_tool returns after all PG are active+clean again.
1287 # @param dir the data path of the OSD
1288 # @param id the OSD id
1289 # @param ... arguments to ceph-objectstore-tool
1290 # @param STDIN the input of ceph-objectstore-tool
1291 # @param STDOUT the output of ceph-objectstore-tool
1292 # @return 0 on success, 1 on error
1294 # The value of $ceph_osd_args will be passed to restarted osds
1296 function objectstore_tool
() {
1302 _objectstore_tool_nowait
$dir $id "$@" ||
return 1
1306 function test_objectstore_tool
() {
1309 setup
$dir ||
return 1
1310 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1312 run_mgr
$dir x ||
return 1
1313 run_osd
$dir $osd ||
return 1
1314 create_rbd_pool ||
return 1
1315 wait_for_clean ||
return 1
1316 rados
--pool rbd put GROUP
/etc
/group ||
return 1
1317 objectstore_tool
$dir $osd GROUP get-bytes | \
1319 ! objectstore_tool
$dir $osd NOTEXISTS get-bytes ||
return 1
1320 teardown
$dir ||
return 1
1323 #######################################################################
1326 # Predicate checking if there is an ongoing recovery in the
1327 # cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1328 # counters are reported by ceph status, it means recovery is in
1331 # @return 0 if recovery in progress, 1 otherwise
1333 function get_is_making_recovery_progress
() {
1334 local recovery_progress
1335 recovery_progress
+=".recovering_keys_per_sec + "
1336 recovery_progress
+=".recovering_bytes_per_sec + "
1337 recovery_progress
+=".recovering_objects_per_sec"
1338 local progress
=$
(ceph
--format json status
2>/dev
/null | \
1339 jq
-r ".pgmap | $recovery_progress")
1340 test "$progress" != null
1343 function test_get_is_making_recovery_progress
() {
1346 setup
$dir ||
return 1
1347 run_mon
$dir a ||
return 1
1348 run_mgr
$dir x ||
return 1
1349 ! get_is_making_recovery_progress ||
return 1
1350 teardown
$dir ||
return 1
1353 #######################################################################
1356 # Return the number of active PGs in the cluster. A PG is active if
1357 # ceph pg dump pgs reports it both **active** and **clean** and that
1360 # @param STDOUT the number of active PGs
1361 # @return 0 on success, 1 on error
1363 function get_num_active_clean
() {
1365 expression
+="select(contains(\"active\") and contains(\"clean\")) | "
1366 expression
+="select(contains(\"stale\") | not)"
1367 ceph
--format json pg dump pgs
2>/dev
/null | \
1368 jq
".pg_stats | [.[] | .state | $expression] | length"
1371 function test_get_num_active_clean
() {
1374 setup
$dir ||
return 1
1375 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1376 run_mgr
$dir x ||
return 1
1377 run_osd
$dir 0 ||
return 1
1378 create_rbd_pool ||
return 1
1379 wait_for_clean ||
return 1
1380 local num_active_clean
=$
(get_num_active_clean
)
1381 test "$num_active_clean" = $PG_NUM ||
return 1
1382 teardown
$dir ||
return 1
1386 # Return the number of active or peered PGs in the cluster. A PG matches if
1387 # ceph pg dump pgs reports it is either **active** or **peered** and that
1390 # @param STDOUT the number of active PGs
1391 # @return 0 on success, 1 on error
1393 function get_num_active_or_peered
() {
1395 expression
+="select(contains(\"active\") or contains(\"peered\")) | "
1396 expression
+="select(contains(\"stale\") | not)"
1397 ceph
--format json pg dump pgs
2>/dev
/null | \
1398 jq
".pg_stats | [.[] | .state | $expression] | length"
1401 function test_get_num_active_or_peered
() {
1404 setup
$dir ||
return 1
1405 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1406 run_mgr
$dir x ||
return 1
1407 run_osd
$dir 0 ||
return 1
1408 create_rbd_pool ||
return 1
1409 wait_for_clean ||
return 1
1410 local num_peered
=$
(get_num_active_or_peered
)
1411 test "$num_peered" = $PG_NUM ||
return 1
1412 teardown
$dir ||
return 1
1415 #######################################################################
1418 # Return the number of PGs in the cluster, according to
1421 # @param STDOUT the number of PGs
1422 # @return 0 on success, 1 on error
1424 function get_num_pgs
() {
1425 ceph
--format json status
2>/dev
/null | jq
'.pgmap.num_pgs'
1428 function test_get_num_pgs
() {
1431 setup
$dir ||
return 1
1432 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1433 run_mgr
$dir x ||
return 1
1434 run_osd
$dir 0 ||
return 1
1435 create_rbd_pool ||
return 1
1436 wait_for_clean ||
return 1
1437 local num_pgs
=$
(get_num_pgs
)
1438 test "$num_pgs" -gt 0 ||
return 1
1439 teardown
$dir ||
return 1
1442 #######################################################################
1445 # Return the OSD ids in use by at least one PG in the cluster (either
1446 # in the up or the acting set), according to ceph pg dump pgs. Every
1447 # OSD id shows as many times as they are used in up and acting sets.
1448 # If an OSD id is in both the up and acting set of a given PG, it will
1451 # @param STDOUT a sorted list of OSD ids
1452 # @return 0 on success, 1 on error
1454 function get_osd_id_used_by_pgs
() {
1455 ceph
--format json pg dump pgs
2>/dev
/null | jq
'.pg_stats | .[] | .up[], .acting[]' |
sort
1458 function test_get_osd_id_used_by_pgs
() {
1461 setup
$dir ||
return 1
1462 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1463 run_mgr
$dir x ||
return 1
1464 run_osd
$dir 0 ||
return 1
1465 create_rbd_pool ||
return 1
1466 wait_for_clean ||
return 1
1467 local osd_ids
=$
(get_osd_id_used_by_pgs |
uniq)
1468 test "$osd_ids" = "0" ||
return 1
1469 teardown
$dir ||
return 1
1472 #######################################################################
1475 # Wait until the OSD **id** shows **count** times in the
1476 # PGs (see get_osd_id_used_by_pgs for more information about
1477 # how OSD ids are counted).
1479 # @param id the OSD id
1480 # @param count the number of time it must show in the PGs
1481 # @return 0 on success, 1 on error
1483 function wait_osd_id_used_by_pgs
() {
1488 for ((i
=0; i
< $TIMEOUT / 5; i
++)); do
1490 if ! test $
(get_osd_id_used_by_pgs |
grep -c $id) = $count ; then
1500 function test_wait_osd_id_used_by_pgs
() {
1503 setup
$dir ||
return 1
1504 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1505 run_mgr
$dir x ||
return 1
1506 run_osd
$dir 0 ||
return 1
1507 create_rbd_pool ||
return 1
1508 wait_for_clean ||
return 1
1509 wait_osd_id_used_by_pgs
0 8 ||
return 1
1510 ! TIMEOUT
=1 wait_osd_id_used_by_pgs
123 5 ||
return 1
1511 teardown
$dir ||
return 1
1514 #######################################################################
1517 # Return the date and time of the last completed scrub for **pgid**,
1518 # as reported by ceph pg dump pgs. Note that a repair also sets this
1521 # @param pgid the id of the PG
1522 # @param STDOUT the date and time of the last scrub
1523 # @return 0 on success, 1 on error
1525 function get_last_scrub_stamp
() {
1527 local sname
=${2:-last_scrub_stamp}
1528 ceph
--format json pg dump pgs
2>/dev
/null | \
1529 jq
-r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
1532 function test_get_last_scrub_stamp
() {
1535 setup
$dir ||
return 1
1536 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1537 run_mgr
$dir x ||
return 1
1538 run_osd
$dir 0 ||
return 1
1539 create_rbd_pool ||
return 1
1540 wait_for_clean ||
return 1
1541 stamp
=$
(get_last_scrub_stamp
1.0)
1542 test -n "$stamp" ||
return 1
1543 teardown
$dir ||
return 1
1546 #######################################################################
1549 # Predicate checking if the cluster is clean, i.e. all of its PGs are
1550 # in a clean state (see get_num_active_clean for a definition).
1552 # @return 0 if the cluster is clean, 1 otherwise
1554 function is_clean
() {
1555 num_pgs
=$
(get_num_pgs
)
1556 test $num_pgs != 0 ||
return 1
1557 test $
(get_num_active_clean
) = $num_pgs ||
return 1
1560 function test_is_clean
() {
1563 setup
$dir ||
return 1
1564 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1565 run_mgr
$dir x ||
return 1
1566 run_osd
$dir 0 ||
return 1
1567 create_rbd_pool ||
return 1
1568 wait_for_clean ||
return 1
1569 is_clean ||
return 1
1570 teardown
$dir ||
return 1
1573 #######################################################################
1575 calc
() { $AWK "BEGIN{print $*}"; }
1578 # Return a list of numbers that are increasingly larger and whose
1579 # total is **timeout** seconds. It can be used to have short sleep
1580 # delay while waiting for an event on a fast machine. But if running
1581 # very slowly the larger delays avoid stressing the machine even
1582 # further or spamming the logs.
1584 # @param timeout sum of all delays, in seconds
1585 # @return a list of sleep delays
1587 function get_timeout_delays
() {
1588 local trace
=$
(shopt -q -o xtrace
&& echo true ||
echo false
)
1589 $trace && shopt -u -o xtrace
1591 local first_step
=${2:-1}
1592 local max_timeout
=${3:-$MAX_TIMEOUT}
1597 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1598 echo -n "$(calc $i) "
1599 total
=$
(calc
$total + $i)
1601 if [ $max_timeout -gt 0 ]; then
1602 # Did we reach max timeout ?
1603 if [ ${i%.*} -eq ${max_timeout%.*} ] && [ ${i#*.} \> ${max_timeout#*.} ] || [ ${i%.*} -gt ${max_timeout%.*} ]; then
1604 # Yes, so let's cap the max wait time to max
1609 if test "$(calc $total \< $timeout)" = "1"; then
1610 echo -n "$(calc $timeout - $total) "
1612 $trace && shopt -s -o xtrace
1615 function test_get_timeout_delays
() {
1616 test "$(get_timeout_delays 1)" = "1 " ||
return 1
1617 test "$(get_timeout_delays 5)" = "1 2 2 " ||
return 1
1618 test "$(get_timeout_delays 6)" = "1 2 3 " ||
return 1
1619 test "$(get_timeout_delays 7)" = "1 2 4 " ||
return 1
1620 test "$(get_timeout_delays 8)" = "1 2 4 1 " ||
return 1
1621 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " ||
return 1
1622 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " ||
return 1
1623 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " ||
return 1
1624 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " ||
return 1
1625 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " ||
return 1
1626 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " ||
return 1
1627 test "$(get_timeout_delays 300 .1 0)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 12.8 25.6 51.2 102.4 95.3 " ||
return 1
1628 test "$(get_timeout_delays 300 .1 10)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 7.3 " ||
return 1
1631 #######################################################################
1634 # Wait until the cluster becomes clean or if it does not make progress
1635 # for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1636 # Progress is measured either via the **get_is_making_recovery_progress**
1637 # predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1639 # @return 0 if the cluster is clean, 1 otherwise
1641 function wait_for_clean
() {
1643 local num_active_clean
=-1
1644 local cur_active_clean
1645 local -a delays
=($
(get_timeout_delays
$WAIT_FOR_CLEAN_TIMEOUT .1))
1648 flush_pg_stats ||
return 1
1649 while test $
(get_num_pgs
) == 0 ; do
1654 # Comparing get_num_active_clean & get_num_pgs is used to determine
1655 # if the cluster is clean. That's almost an inline of is_clean() to
1656 # get more performance by avoiding multiple calls of get_num_active_clean.
1657 cur_active_clean
=$
(get_num_active_clean
)
1658 test $cur_active_clean = $
(get_num_pgs
) && break
1659 if test $cur_active_clean != $num_active_clean ; then
1661 num_active_clean
=$cur_active_clean
1662 elif get_is_making_recovery_progress
; then
1664 elif (( $loop >= ${#delays[*]} )) ; then
1668 # eval is a no-op if cmd is empty
1670 sleep ${delays[$loop]}
1676 function test_wait_for_clean
() {
1679 setup
$dir ||
return 1
1680 run_mon
$dir a
--osd_pool_default_size=2 ||
return 1
1681 run_osd
$dir 0 ||
return 1
1682 run_mgr
$dir x ||
return 1
1683 create_rbd_pool ||
return 1
1684 ! WAIT_FOR_CLEAN_TIMEOUT
=1 wait_for_clean ||
return 1
1685 run_osd
$dir 1 ||
return 1
1686 wait_for_clean ||
return 1
1687 teardown
$dir ||
return 1
1691 # Wait until the cluster becomes peered or if it does not make progress
1692 # for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1693 # Progress is measured either via the **get_is_making_recovery_progress**
1694 # predicate or if the number of peered PGs changes (as returned by get_num_active_or_peered)
1696 # @return 0 if the cluster is clean, 1 otherwise
1698 function wait_for_peered
() {
1702 local -a delays
=($
(get_timeout_delays
$WAIT_FOR_CLEAN_TIMEOUT .1))
1705 flush_pg_stats ||
return 1
1706 while test $
(get_num_pgs
) == 0 ; do
1711 # Comparing get_num_active_clean & get_num_pgs is used to determine
1712 # if the cluster is clean. That's almost an inline of is_clean() to
1713 # get more performance by avoiding multiple calls of get_num_active_clean.
1714 cur_peered
=$
(get_num_active_or_peered
)
1715 test $cur_peered = $
(get_num_pgs
) && break
1716 if test $cur_peered != $num_peered ; then
1718 num_peered
=$cur_peered
1719 elif get_is_making_recovery_progress
; then
1721 elif (( $loop >= ${#delays[*]} )) ; then
1725 # eval is a no-op if cmd is empty
1727 sleep ${delays[$loop]}
1733 function test_wait_for_peered
() {
1736 setup
$dir ||
return 1
1737 run_mon
$dir a
--osd_pool_default_size=2 ||
return 1
1738 run_osd
$dir 0 ||
return 1
1739 run_mgr
$dir x ||
return 1
1740 create_rbd_pool ||
return 1
1741 ! WAIT_FOR_CLEAN_TIMEOUT
=1 wait_for_clean ||
return 1
1742 run_osd
$dir 1 ||
return 1
1743 wait_for_peered ||
return 1
1744 teardown
$dir ||
return 1
1748 #######################################################################
1751 # Wait until the cluster's health condition disappeared.
1754 # @param string to grep for in health detail
1755 # @return 0 if the cluster health doesn't matches request,
1756 # 1 otherwise if after $TIMEOUT seconds health condition remains.
1758 function wait_for_health_gone
() {
1760 local -a delays
=($
(get_timeout_delays
$TIMEOUT .1))
1763 while ceph health detail |
grep "$grepstr" ; do
1764 if (( $loop >= ${#delays[*]} )) ; then
1768 sleep ${delays[$loop]}
1774 # Wait until the cluster has health condition passed as arg
1775 # again for $TIMEOUT seconds.
1777 # @param string to grep for in health detail
1778 # @return 0 if the cluster health matches request, 1 otherwise
1780 function wait_for_health
() {
1782 local -a delays
=($
(get_timeout_delays
$TIMEOUT .1))
1785 while ! ceph health detail |
grep "$grepstr" ; do
1786 if (( $loop >= ${#delays[*]} )) ; then
1790 sleep ${delays[$loop]}
1796 # Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1797 # for $TIMEOUT seconds.
1799 # @return 0 if the cluster is HEALTHY, 1 otherwise
1801 function wait_for_health_ok
() {
1802 wait_for_health
"HEALTH_OK" ||
return 1
1805 function test_wait_for_health_ok
() {
1808 setup
$dir ||
return 1
1809 run_mon
$dir a
--osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 ||
return 1
1810 run_mgr
$dir x
--mon_pg_warn_min_per_osd=0 ||
return 1
1811 # start osd_pool_default_size OSDs
1812 run_osd
$dir 0 ||
return 1
1813 run_osd
$dir 1 ||
return 1
1814 run_osd
$dir 2 ||
return 1
1815 kill_daemons
$dir TERM osd ||
return 1
1816 ceph osd down
0 ||
return 1
1817 # expect TOO_FEW_OSDS warning
1818 ! TIMEOUT
=1 wait_for_health_ok ||
return 1
1819 # resurrect all OSDs
1820 activate_osd
$dir 0 ||
return 1
1821 activate_osd
$dir 1 ||
return 1
1822 activate_osd
$dir 2 ||
return 1
1823 wait_for_health_ok ||
return 1
1824 teardown
$dir ||
return 1
1828 #######################################################################
1831 # Run repair on **pgid** and wait until it completes. The repair
1832 # function will fail if repair does not complete within $TIMEOUT
1835 # @param pgid the id of the PG
1836 # @return 0 on success, 1 on error
1840 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1841 ceph pg repair
$pgid
1842 wait_for_scrub
$pgid "$last_scrub"
1845 function test_repair
() {
1848 setup
$dir ||
return 1
1849 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1850 run_mgr
$dir x ||
return 1
1851 run_osd
$dir 0 ||
return 1
1852 create_rbd_pool ||
return 1
1853 wait_for_clean ||
return 1
1854 repair
1.0 ||
return 1
1855 kill_daemons
$dir KILL osd ||
return 1
1856 ! TIMEOUT
=1 repair
1.0 ||
return 1
1857 teardown
$dir ||
return 1
1859 #######################################################################
1862 # Run scrub on **pgid** and wait until it completes. The pg_scrub
1863 # function will fail if repair does not complete within $TIMEOUT
1864 # seconds. The pg_scrub is complete whenever the
1865 # **get_last_scrub_stamp** function reports a timestamp different from
1866 # the one stored before starting the scrub.
1868 # @param pgid the id of the PG
1869 # @return 0 on success, 1 on error
1871 function pg_scrub
() {
1873 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1875 wait_for_scrub
$pgid "$last_scrub"
1878 function pg_deep_scrub
() {
1880 local last_scrub
=$
(get_last_scrub_stamp
$pgid last_deep_scrub_stamp
)
1881 ceph pg deep-scrub
$pgid
1882 wait_for_scrub
$pgid "$last_scrub" last_deep_scrub_stamp
1885 function test_pg_scrub
() {
1888 setup
$dir ||
return 1
1889 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1890 run_mgr
$dir x ||
return 1
1891 run_osd
$dir 0 ||
return 1
1892 create_rbd_pool ||
return 1
1893 wait_for_clean ||
return 1
1894 pg_scrub
1.0 ||
return 1
1895 kill_daemons
$dir KILL osd ||
return 1
1896 ! TIMEOUT
=1 pg_scrub
1.0 ||
return 1
1897 teardown
$dir ||
return 1
1900 #######################################################################
1903 # Run the *command* and expect it to fail (i.e. return a non zero status).
1904 # The output (stderr and stdout) is stored in a temporary file in *dir*
1905 # and is expected to contain the string *expected*.
1907 # Return 0 if the command failed and the string was found. Otherwise
1908 # return 1 and cat the full output of the command on stderr for debug.
1910 # @param dir temporary directory to store the output
1911 # @param expected string to look for in the output
1912 # @param command ... the command and its arguments
1913 # @return 0 on success, 1 on error
1916 function expect_failure
() {
1923 if "$@" > $dir/out
2>&1 ; then
1929 if $success ||
! grep --quiet "$expected" $dir/out
; then
1937 function test_expect_failure
() {
1940 setup
$dir ||
return 1
1941 expect_failure
$dir FAIL bash
-c 'echo FAIL ; exit 1' ||
return 1
1942 # the command did not fail
1943 ! expect_failure
$dir FAIL bash
-c 'echo FAIL ; exit 0' > $dir/out ||
return 1
1944 grep --quiet FAIL
$dir/out ||
return 1
1945 # the command failed but the output does not contain the expected string
1946 ! expect_failure
$dir FAIL bash
-c 'echo UNEXPECTED ; exit 1' > $dir/out ||
return 1
1947 ! grep --quiet FAIL
$dir/out ||
return 1
1948 teardown
$dir ||
return 1
1951 #######################################################################
1954 # Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1955 # will fail if scrub does not complete within $TIMEOUT seconds. The
1956 # repair is complete whenever the **get_last_scrub_stamp** function
1957 # reports a timestamp different from the one given in argument.
1959 # @param pgid the id of the PG
1960 # @param last_scrub timestamp of the last scrub for *pgid*
1961 # @return 0 on success, 1 on error
1963 function wait_for_scrub
() {
1965 local last_scrub
="$2"
1966 local sname
=${3:-last_scrub_stamp}
1968 for ((i
=0; i
< $TIMEOUT; i
++)); do
1969 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
1977 function test_wait_for_scrub
() {
1980 setup
$dir ||
return 1
1981 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
1982 run_mgr
$dir x ||
return 1
1983 run_osd
$dir 0 ||
return 1
1984 create_rbd_pool ||
return 1
1985 wait_for_clean ||
return 1
1987 ceph pg repair
$pgid
1988 local last_scrub
=$
(get_last_scrub_stamp
$pgid)
1989 wait_for_scrub
$pgid "$last_scrub" ||
return 1
1990 kill_daemons
$dir KILL osd ||
return 1
1991 last_scrub
=$
(get_last_scrub_stamp
$pgid)
1992 ! TIMEOUT
=1 wait_for_scrub
$pgid "$last_scrub" ||
return 1
1993 teardown
$dir ||
return 1
1996 #######################################################################
1999 # Return 0 if the erasure code *plugin* is available, 1 otherwise.
2001 # @param plugin erasure code plugin
2002 # @return 0 on success, 1 on error
2005 function erasure_code_plugin_exists
() {
2011 FreeBSD
) grepstr
="Cannot open.*$plugin" ;;
2012 *) grepstr
="$plugin.*No such file" ;;
2015 s
=$
(ceph osd erasure-code-profile
set TESTPROFILE plugin
=$plugin 2>&1)
2017 if [ $status -eq 0 ]; then
2018 ceph osd erasure-code-profile
rm TESTPROFILE
2019 elif ! echo $s |
grep --quiet "$grepstr" ; then
2021 # display why the string was rejected.
2027 function test_erasure_code_plugin_exists
() {
2030 setup
$dir ||
return 1
2031 run_mon
$dir a ||
return 1
2032 run_mgr
$dir x ||
return 1
2033 erasure_code_plugin_exists jerasure ||
return 1
2034 ! erasure_code_plugin_exists FAKE ||
return 1
2035 teardown
$dir ||
return 1
2038 #######################################################################
2041 # Display all log files from **dir** on stdout.
2043 # @param dir directory in which all data is stored
2046 function display_logs
() {
2049 find $dir -maxdepth 1 -name '*.log' | \
2050 while read file ; do
2051 echo "======================= $file"
2056 function test_display_logs
() {
2059 setup
$dir ||
return 1
2060 run_mon
$dir a ||
return 1
2061 kill_daemons
$dir ||
return 1
2062 display_logs
$dir > $dir/log.out
2063 grep --quiet mon.a.log
$dir/log.out ||
return 1
2064 teardown
$dir ||
return 1
2067 #######################################################################
2069 # Spawn a command in background and save the pid in the variable name
2070 # passed in argument. To make the output reading easier, the output is
2071 # prepend with the process id.
2075 # run_in_background pids1 bash -c 'sleep 1; exit 1'
2077 # @param pid_variable the variable name (not value) where the pids will be stored
2078 # @param ... the command to execute
2079 # @return only the pid_variable output should be considered and used with **wait_background**
2081 function run_in_background
() {
2082 local pid_variable
=$1
2084 # Execute the command and prepend the output with its pid
2085 # We enforce to return the exit status of the command and not the sed one.
2086 ("$@" |
& sed 's/^/'$BASHPID': /'; return "${PIPESTATUS[0]}") >&2 &
2087 eval "$pid_variable+=\" $!\""
2090 function save_stdout
{
2096 function test_run_in_background
() {
2098 run_in_background pids
sleep 1
2099 run_in_background pids
sleep 1
2100 test $
(echo $pids |
wc -w) = 2 ||
return 1
2101 wait $pids ||
return 1
2104 #######################################################################
2106 # Wait for pids running in background to complete.
2107 # This function is usually used after a **run_in_background** call
2110 # run_in_background pids1 bash -c 'sleep 1; exit 1'
2111 # wait_background pids1
2113 # @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
2114 # @return returns 1 if at least one process exits in error unless returns 0
2116 function wait_background
() {
2117 # We extract the PIDS from the variable name
2121 for pid
in $pids; do
2122 if ! wait $pid; then
2123 # If one process failed then return 1
2128 # We empty the variable reporting that all process ended
2135 function test_wait_background
() {
2137 run_in_background pids bash
-c "sleep 1; exit 1"
2138 run_in_background pids bash
-c "sleep 2; exit 0"
2139 wait_background pids
2140 if [ $?
-ne 1 ]; then return 1; fi
2142 run_in_background pids bash
-c "sleep 1; exit 0"
2143 run_in_background pids bash
-c "sleep 2; exit 0"
2144 wait_background pids
2145 if [ $?
-ne 0 ]; then return 1; fi
2147 if [ ! -z "$pids" ]; then return 1; fi
2150 function flush_pg_stats
()
2152 local timeout
=${1:-$TIMEOUT}
2157 seq=`ceph tell osd.$osd flush_pg_stats`
2162 seqs
="$seqs $osd-$seq"
2166 osd
=`echo $s | cut -d - -f 1`
2167 seq=`echo $s | cut -d - -f 2`
2168 echo "waiting osd.$osd seq $seq"
2169 while test $
(ceph osd last-stat-seq
$osd) -lt $seq; do
2171 if [ $
((timeout--
)) -eq 0 ]; then
2178 function test_flush_pg_stats
()
2182 setup
$dir ||
return 1
2183 run_mon
$dir a
--osd_pool_default_size=1 --mon_allow_pool_size_one=true ||
return 1
2184 run_mgr
$dir x ||
return 1
2185 run_osd
$dir 0 ||
return 1
2186 create_rbd_pool ||
return 1
2187 rados
-p rbd put obj
/etc
/group
2188 flush_pg_stats ||
return 1
2189 local jq_filter
='.pools | .[] | select(.name == "rbd") | .stats'
2190 stored
=`ceph df detail --format=json | jq "$jq_filter.stored"`
2191 stored_raw
=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2192 test $stored -gt 0 ||
return 1
2193 test $stored == $stored_raw ||
return 1
2197 ########################################################################
2199 # Get the current op scheduler enabled on an osd by reading the
2200 # osd_op_queue config option
2203 # get_op_scheduler $osdid
2205 # @param id the id of the OSD
2206 # @return the name of the op scheduler enabled for the OSD
2208 function get_op_scheduler
() {
2211 get_config osd
$id osd_op_queue
2214 function test_get_op_scheduler
() {
2217 setup
$dir ||
return 1
2219 run_mon
$dir a ||
return 1
2220 run_mgr
$dir x ||
return 1
2222 run_osd
$dir 0 --osd_op_queue=wpq ||
return 1
2223 test $
(get_op_scheduler
0) = "wpq" ||
return 1
2225 run_osd
$dir 1 --osd_op_queue=mclock_scheduler ||
return 1
2226 test $
(get_op_scheduler
1) = "mclock_scheduler" ||
return 1
2227 teardown
$dir ||
return 1
2230 #######################################################################
2233 # Call the **run** function (which must be defined by the caller) with
2234 # the **dir** argument followed by the caller argument list.
2236 # If the **run** function returns on error, all logs found in **dir**
2237 # are displayed for diagnostic purposes.
2239 # **teardown** function is called when the **run** function returns
2240 # (on success or on error), to cleanup leftovers. The CEPH_CONF is set
2241 # to /dev/null and CEPH_ARGS is unset so that the tests are protected from
2242 # external interferences.
2244 # It is the responsibility of the **run** function to call the
2245 # **setup** function to prepare the test environment (create a temporary
2248 # The shell is required (via PS4) to display the function and line
2249 # number whenever a statement is executed to help debugging.
2251 # @param dir directory in which all data is stored
2252 # @param ... arguments passed transparently to **run**
2253 # @return 0 on success, 1 on error
2260 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2262 export PATH
=.
:$PATH # make sure program from sources are preferred
2263 export PYTHONWARNINGS
=ignore
2264 export CEPH_CONF
=/dev
/null
2268 if run
$dir "$@" ; then
2273 teardown
$dir $code ||
return 1
2277 #######################################################################
2279 function run_tests
() {
2281 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2283 export .
:$PATH # make sure program from sources are preferred
2285 export CEPH_MON
="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
2287 CEPH_ARGS
+=" --fsid=$(uuidgen) --auth-supported=none "
2288 CEPH_ARGS
+="--mon-host=$CEPH_MON "
2289 export CEPH_CONF
=/dev
/null
2291 local funcs
=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
2292 local dir
=td
/ceph-helpers
2294 for func
in $funcs ; do
2295 if ! $func $dir; then
2302 if test "$1" = TESTS
; then
2309 # jq only support --exit-status|-e from version 1.4 forwards, which makes
2310 # returning on error waaaay prettier and straightforward.
2311 # However, the current automated upstream build is running with v1.3,
2312 # which has no idea what -e is. Hence the convoluted error checking we
2314 # The next time someone changes this code, please check if v1.4 is now
2315 # a thing, and, if so, please change these to use -e. Thanks.
2317 # jq '.all.supported | select([.[] == "foo"] | any)'
2318 function jq_success
() {
2323 in_escaped
=$
(printf %s
"$input" |
sed "s/'/'\\\\''/g")
2324 filter_escaped
=$
(printf %s
"$filter" |
sed "s/'/'\\\\''/g")
2326 ret
=$
(echo "$in_escaped" | jq
"$filter_escaped")
2327 if [[ "$ret" == "true" ]]; then
2329 elif [[ -n "$expects" ]]; then
2330 if [[ "$ret" == "$expects" ]]; then
2339 ret
="$(echo $input | jq \"$filter\")"
2340 if [[ "$ret" == "true" ]]; then
2342 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
2348 function inject_eio
() {
2362 local -a initial_osds
=($
(get_osds
$poolname $objname))
2363 local osd_id
=${initial_osds[$shard_id]}
2364 if [ "$pooltype" != "ec" ]; then
2367 type=$
(cat $dir/$osd_id/type)
2368 set_config osd
$osd_id ${type}_debug_inject_read_err true ||
return 1
2370 while ( CEPH_ARGS
='' ceph
--admin-daemon $
(get_asok_path osd.
$osd_id) \
2371 inject
${which}err
$poolname $objname $shard_id |
grep -q Invalid
); do
2372 loop
=$
(expr $loop + 1)
2373 if [ $loop = "10" ]; then
2380 function multidiff
() {
2382 if [ "$DIFFCOLOPTS" = "" ]; then
2385 diff $DIFFCOLOPTS $@
2389 function create_ec_pool
() {
2392 local allow_overwrites
=$1
2395 ceph osd erasure-code-profile
set myprofile crush-failure-domain
=osd
"$@" ||
return 1
2397 create_pool
"$poolname" 1 1 erasure myprofile ||
return 1
2399 if [ "$allow_overwrites" = "true" ]; then
2400 ceph osd pool
set "$poolname" allow_ec_overwrites true ||
return 1
2403 wait_for_clean ||
return 1
2408 # compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"