]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/ceph-helpers.sh
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / standalone / ceph-helpers.sh
1 #!/usr/bin/env bash
2 #
3 # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5 # Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
6 #
7 # Author: Loic Dachary <loic@dachary.org>
8 # Author: Federico Gimenez <fgimenez@coit.es>
9 #
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU Library Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
13 # any later version.
14 #
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Library Public License for more details.
19 #
20 TIMEOUT=300
21 WAIT_FOR_CLEAN_TIMEOUT=90
22 MAX_TIMEOUT=15
23 PG_NUM=4
24 TMPDIR=${TMPDIR:-/tmp}
25 CEPH_BUILD_VIRTUALENV=${TMPDIR}
26 TESTDIR=${TESTDIR:-${TMPDIR}}
27
28 if type xmlstarlet > /dev/null 2>&1; then
29 XMLSTARLET=xmlstarlet
30 elif type xml > /dev/null 2>&1; then
31 XMLSTARLET=xml
32 else
33 echo "Missing xmlstarlet binary!"
34 exit 1
35 fi
36
37 if [ `uname` = FreeBSD ]; then
38 SED=gsed
39 AWK=gawk
40 DIFFCOLOPTS=""
41 KERNCORE="kern.corefile"
42 else
43 SED=sed
44 AWK=awk
45 termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
46 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
47 termwidth="-W ${termwidth}"
48 fi
49 DIFFCOLOPTS="-y $termwidth"
50 KERNCORE="kernel.core_pattern"
51 fi
52
53 EXTRA_OPTS=""
54
55 #! @file ceph-helpers.sh
56 # @brief Toolbox to manage Ceph cluster dedicated to testing
57 #
58 # Example use case:
59 #
60 # ~~~~~~~~~~~~~~~~{.sh}
61 # source ceph-helpers.sh
62 #
63 # function mytest() {
64 # # cleanup leftovers and reset mydir
65 # setup mydir
66 # # create a cluster with one monitor and three osds
67 # run_mon mydir a
68 # run_osd mydir 0
69 # run_osd mydir 2
70 # run_osd mydir 3
71 # # put and get an object
72 # rados --pool rbd put GROUP /etc/group
73 # rados --pool rbd get GROUP /tmp/GROUP
74 # # stop the cluster and cleanup the directory
75 # teardown mydir
76 # }
77 # ~~~~~~~~~~~~~~~~
78 #
79 # The focus is on simplicity and efficiency, in the context of
80 # functional tests. The output is intentionally very verbose
81 # and functions return as soon as an error is found. The caller
82 # is also expected to abort on the first error so that debugging
83 # can be done by looking at the end of the output.
84 #
85 # Each function is documented, implemented and tested independently.
86 # When modifying a helper, the test and the documentation are
87 # expected to be updated and it is easier of they are collocated. A
88 # test for a given function can be run with
89 #
90 # ~~~~~~~~~~~~~~~~{.sh}
91 # ceph-helpers.sh TESTS test_get_osds
92 # ~~~~~~~~~~~~~~~~
93 #
94 # and all the tests (i.e. all functions matching test_*) are run
95 # with:
96 #
97 # ~~~~~~~~~~~~~~~~{.sh}
98 # ceph-helpers.sh TESTS
99 # ~~~~~~~~~~~~~~~~
100 #
101 # A test function takes a single argument : the directory dedicated
102 # to the tests. It is expected to not create any file outside of this
103 # directory and remove it entirely when it completes successfully.
104 #
105
106
107 function get_asok_dir() {
108 if [ -n "$CEPH_ASOK_DIR" ]; then
109 echo "$CEPH_ASOK_DIR"
110 else
111 echo ${TMPDIR:-/tmp}/ceph-asok.$$
112 fi
113 }
114
115 function get_asok_path() {
116 local name=$1
117 if [ -n "$name" ]; then
118 echo $(get_asok_dir)/ceph-$name.asok
119 else
120 echo $(get_asok_dir)/\$cluster-\$name.asok
121 fi
122 }
123 ##
124 # Cleanup any leftovers found in **dir** via **teardown**
125 # and reset **dir** as an empty environment.
126 #
127 # @param dir path name of the environment
128 # @return 0 on success, 1 on error
129 #
130 function setup() {
131 local dir=$1
132 teardown $dir || return 1
133 mkdir -p $dir
134 mkdir -p $(get_asok_dir)
135 if [ $(ulimit -n) -le 1024 ]; then
136 ulimit -n 4096 || return 1
137 fi
138 if [ -z "$LOCALRUN" ]; then
139 trap "teardown $dir 1" TERM HUP INT
140 fi
141 }
142
143 function test_setup() {
144 local dir=$dir
145 setup $dir || return 1
146 test -d $dir || return 1
147 setup $dir || return 1
148 test -d $dir || return 1
149 teardown $dir
150 }
151
152 #######################################################################
153
154 ##
155 # Kill all daemons for which a .pid file exists in **dir** and remove
156 # **dir**. If the file system in which **dir** is btrfs, delete all
157 # subvolumes that relate to it.
158 #
159 # @param dir path name of the environment
160 # @param dumplogs pass "1" to dump logs otherwise it will only if cores found
161 # @return 0 on success, 1 on error
162 #
163 function teardown() {
164 local dir=$1
165 local dumplogs=$2
166 kill_daemons $dir KILL
167 if [ `uname` != FreeBSD ] \
168 && [ $(stat -f -c '%T' .) == "btrfs" ]; then
169 __teardown_btrfs $dir
170 fi
171 local cores="no"
172 local pattern="$(sysctl -n $KERNCORE)"
173 # See if we have apport core handling
174 if [ "${pattern:0:1}" = "|" ]; then
175 # TODO: Where can we get the dumps?
176 # Not sure where the dumps really are so this will look in the CWD
177 pattern=""
178 fi
179 # Local we start with core and teuthology ends with core
180 if ls $(dirname "$pattern") | grep -q '^core\|core$' ; then
181 cores="yes"
182 if [ -n "$LOCALRUN" ]; then
183 mkdir /tmp/cores.$$ 2> /dev/null || true
184 for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
185 mv $i /tmp/cores.$$
186 done
187 fi
188 fi
189 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
190 if [ -n "$LOCALRUN" ]; then
191 display_logs $dir
192 else
193 # Move logs to where Teuthology will archive it
194 mkdir -p $TESTDIR/archive/log
195 mv $dir/*.log $TESTDIR/archive/log
196 fi
197 fi
198 rm -fr $dir
199 rm -rf $(get_asok_dir)
200 if [ "$cores" = "yes" ]; then
201 echo "ERROR: Failure due to cores found"
202 if [ -n "$LOCALRUN" ]; then
203 echo "Find saved core files in /tmp/cores.$$"
204 fi
205 return 1
206 fi
207 return 0
208 }
209
210 function __teardown_btrfs() {
211 local btrfs_base_dir=$1
212 local btrfs_root=$(df -P . | tail -1 | $AWK '{print $NF}')
213 local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list -t . | $AWK '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
214 for subvolume in $btrfs_dirs; do
215 sudo btrfs subvolume delete $btrfs_root/$subvolume
216 done
217 }
218
219 function test_teardown() {
220 local dir=$dir
221 setup $dir || return 1
222 teardown $dir || return 1
223 ! test -d $dir || return 1
224 }
225
226 #######################################################################
227
228 ##
229 # Sends a signal to a single daemon.
230 # This is a helper function for kill_daemons
231 #
232 # After the daemon is sent **signal**, its actual termination
233 # will be verified by sending it signal 0. If the daemon is
234 # still alive, kill_daemon will pause for a few seconds and
235 # try again. This will repeat for a fixed number of times
236 # before kill_daemon returns on failure. The list of
237 # sleep intervals can be specified as **delays** and defaults
238 # to:
239 #
240 # 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
241 #
242 # This sequence is designed to run first a very short sleep time (0.1)
243 # if the machine is fast enough and the daemon terminates in a fraction of a
244 # second. The increasing sleep numbers should give plenty of time for
245 # the daemon to die even on the slowest running machine. If a daemon
246 # takes more than a few minutes to stop (the sum of all sleep times),
247 # there probably is no point in waiting more and a number of things
248 # are likely to go wrong anyway: better give up and return on error.
249 #
250 # @param pid the process id to send a signal
251 # @param send_signal the signal to send
252 # @param delays sequence of sleep times before failure
253 #
254 function kill_daemon() {
255 local pid=$(cat $1)
256 local send_signal=$2
257 local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
258 local exit_code=1
259 # In order to try after the last large sleep add 0 at the end so we check
260 # one last time before dropping out of the loop
261 for try in $delays 0 ; do
262 if kill -$send_signal $pid 2> /dev/null ; then
263 exit_code=1
264 else
265 exit_code=0
266 break
267 fi
268 send_signal=0
269 sleep $try
270 done;
271 return $exit_code
272 }
273
274 function test_kill_daemon() {
275 local dir=$1
276 setup $dir || return 1
277 run_mon $dir a --osd_pool_default_size=1 || return 1
278 run_mgr $dir x || return 1
279 run_osd $dir 0 || return 1
280
281 name_prefix=osd
282 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
283 #
284 # sending signal 0 won't kill the daemon
285 # waiting just for one second instead of the default schedule
286 # allows us to quickly verify what happens when kill fails
287 # to stop the daemon (i.e. it must return false)
288 #
289 ! kill_daemon $pidfile 0 1 || return 1
290 #
291 # killing just the osd and verify the mon still is responsive
292 #
293 kill_daemon $pidfile TERM || return 1
294 done
295
296 ceph osd dump | grep "osd.0 down" || return 1
297
298 name_prefix=mgr
299 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
300 #
301 # kill the mgr
302 #
303 kill_daemon $pidfile TERM || return 1
304 done
305
306 name_prefix=mon
307 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
308 #
309 # kill the mon and verify it cannot be reached
310 #
311 kill_daemon $pidfile TERM || return 1
312 ! timeout 5 ceph status || return 1
313 done
314
315 teardown $dir || return 1
316 }
317
318 ##
319 # Kill all daemons for which a .pid file exists in **dir**. Each
320 # daemon is sent a **signal** and kill_daemons waits for it to exit
321 # during a few minutes. By default all daemons are killed. If a
322 # **name_prefix** is provided, only the daemons for which a pid
323 # file is found matching the prefix are killed. See run_osd and
324 # run_mon for more information about the name conventions for
325 # the pid files.
326 #
327 # Send TERM to all daemons : kill_daemons $dir
328 # Send KILL to all daemons : kill_daemons $dir KILL
329 # Send KILL to all osds : kill_daemons $dir KILL osd
330 # Send KILL to osd 1 : kill_daemons $dir KILL osd.1
331 #
332 # If a daemon is sent the TERM signal and does not terminate
333 # within a few minutes, it will still be running even after
334 # kill_daemons returns.
335 #
336 # If all daemons are kill successfully the function returns 0
337 # if at least one daemon remains, this is treated as an
338 # error and the function return 1.
339 #
340 # @param dir path name of the environment
341 # @param signal name of the first signal (defaults to TERM)
342 # @param name_prefix only kill match daemons (defaults to all)
343 # @param delays sequence of sleep times before failure
344 # @return 0 on success, 1 on error
345 #
346 function kill_daemons() {
347 local trace=$(shopt -q -o xtrace && echo true || echo false)
348 $trace && shopt -u -o xtrace
349 local dir=$1
350 local signal=${2:-TERM}
351 local name_prefix=$3 # optional, osd, mon, osd.1
352 local delays=$4 #optional timing
353 local status=0
354 local pids=""
355
356 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
357 run_in_background pids kill_daemon $pidfile $signal $delays
358 done
359
360 wait_background pids
361 status=$?
362
363 $trace && shopt -s -o xtrace
364 return $status
365 }
366
367 function test_kill_daemons() {
368 local dir=$1
369 setup $dir || return 1
370 run_mon $dir a --osd_pool_default_size=1 || return 1
371 run_mgr $dir x || return 1
372 run_osd $dir 0 || return 1
373 #
374 # sending signal 0 won't kill the daemon
375 # waiting just for one second instead of the default schedule
376 # allows us to quickly verify what happens when kill fails
377 # to stop the daemon (i.e. it must return false)
378 #
379 ! kill_daemons $dir 0 osd 1 || return 1
380 #
381 # killing just the osd and verify the mon still is responsive
382 #
383 kill_daemons $dir TERM osd || return 1
384 ceph osd dump | grep "osd.0 down" || return 1
385 #
386 # kill the mgr
387 #
388 kill_daemons $dir TERM mgr || return 1
389 #
390 # kill the mon and verify it cannot be reached
391 #
392 kill_daemons $dir TERM || return 1
393 ! timeout 5 ceph status || return 1
394 teardown $dir || return 1
395 }
396
397 #
398 # return a random TCP port which is not used yet
399 #
400 # please note, there could be racing if we use this function for
401 # a free port, and then try to bind on this port.
402 #
403 function get_unused_port() {
404 local ip=127.0.0.1
405 python3 -c "import socket; s=socket.socket(); s.bind(('$ip', 0)); print(s.getsockname()[1]); s.close()"
406 }
407
408 #######################################################################
409
410 ##
411 # Run a monitor by the name mon.**id** with data in **dir**/**id**.
412 # The logs can be found in **dir**/mon.**id**.log and the pid file
413 # is **dir**/mon.**id**.pid and the admin socket is
414 # **dir**/**id**/ceph-mon.**id**.asok.
415 #
416 # The remaining arguments are passed verbatim to ceph-mon --mkfs
417 # and the ceph-mon daemon.
418 #
419 # Two mandatory arguments must be provided: --fsid and --mon-host
420 # Instead of adding them to every call to run_mon, they can be
421 # set in the CEPH_ARGS environment variable to be read implicitly
422 # by every ceph command.
423 #
424 # The CEPH_CONF variable is expected to be set to /dev/null to
425 # only rely on arguments for configuration.
426 #
427 # Examples:
428 #
429 # CEPH_ARGS="--fsid=$(uuidgen) "
430 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
431 # run_mon $dir a # spawn a mon and bind port 7018
432 # run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
433 #
434 # If mon_initial_members is not set, the default rbd pool is deleted
435 # and replaced with a replicated pool with less placement groups to
436 # speed up initialization. If mon_initial_members is set, no attempt
437 # is made to recreate the rbd pool because it would hang forever,
438 # waiting for other mons to join.
439 #
440 # A **dir**/ceph.conf file is created but not meant to be used by any
441 # function. It is convenient for debugging a failure with:
442 #
443 # ceph --conf **dir**/ceph.conf -s
444 #
445 # @param dir path name of the environment
446 # @param id mon identifier
447 # @param ... can be any option valid for ceph-mon
448 # @return 0 on success, 1 on error
449 #
450 function run_mon() {
451 local dir=$1
452 shift
453 local id=$1
454 shift
455 local data=$dir/$id
456
457 ceph-mon \
458 --id $id \
459 --mkfs \
460 --mon-data=$data \
461 --run-dir=$dir \
462 "$@" || return 1
463
464 ceph-mon \
465 --id $id \
466 --osd-failsafe-full-ratio=.99 \
467 --mon-osd-full-ratio=.99 \
468 --mon-data-avail-crit=1 \
469 --mon-data-avail-warn=5 \
470 --paxos-propose-interval=0.1 \
471 --osd-crush-chooseleaf-type=0 \
472 $EXTRA_OPTS \
473 --debug-mon 20 \
474 --debug-ms 20 \
475 --debug-paxos 20 \
476 --chdir= \
477 --mon-data=$data \
478 --log-file=$dir/\$name.log \
479 --admin-socket=$(get_asok_path) \
480 --mon-cluster-log-file=$dir/log \
481 --run-dir=$dir \
482 --pid-file=$dir/\$name.pid \
483 --mon-allow-pool-delete \
484 --mon-osd-backfillfull-ratio .99 \
485 "$@" || return 1
486
487 cat > $dir/ceph.conf <<EOF
488 [global]
489 fsid = $(get_config mon $id fsid)
490 mon host = $(get_config mon $id mon_host)
491 EOF
492 }
493
494 function test_run_mon() {
495 local dir=$1
496
497 setup $dir || return 1
498
499 run_mon $dir a --mon-initial-members=a || return 1
500 ceph mon dump | grep "mon.a" || return 1
501 kill_daemons $dir || return 1
502
503 run_mon $dir a --osd_pool_default_size=3 || return 1
504 run_osd $dir 0 || return 1
505 run_osd $dir 1 || return 1
506 run_osd $dir 2 || return 1
507 create_rbd_pool || return 1
508 ceph osd dump | grep "pool 1 'rbd'" || return 1
509 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
510 config get osd_pool_default_size)
511 test "$size" = '{"osd_pool_default_size":"3"}' || return 1
512
513 ! CEPH_ARGS='' ceph status || return 1
514 CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
515
516 kill_daemons $dir || return 1
517
518 run_mon $dir a --osd_pool_default_size=1 || return 1
519 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
520 config get osd_pool_default_size)
521 test "$size" = '{"osd_pool_default_size":"1"}' || return 1
522 kill_daemons $dir || return 1
523
524 CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
525 run_mon $dir a || return 1
526 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
527 config get osd_pool_default_size)
528 test "$size" = '{"osd_pool_default_size":"2"}' || return 1
529 kill_daemons $dir || return 1
530
531 teardown $dir || return 1
532 }
533
534 function create_rbd_pool() {
535 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
536 create_pool rbd $PG_NUM || return 1
537 rbd pool init rbd
538 }
539
540 function create_pool() {
541 ceph osd pool create "$@"
542 sleep 1
543 }
544
545 function delete_pool() {
546 local poolname=$1
547 ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
548 }
549
550 #######################################################################
551
552 function run_mgr() {
553 local dir=$1
554 shift
555 local id=$1
556 shift
557 local data=$dir/$id
558
559 ceph-mgr \
560 --id $id \
561 $EXTRA_OPTS \
562 --osd-failsafe-full-ratio=.99 \
563 --debug-mgr 20 \
564 --debug-objecter 20 \
565 --debug-ms 20 \
566 --debug-paxos 20 \
567 --chdir= \
568 --mgr-data=$data \
569 --log-file=$dir/\$name.log \
570 --admin-socket=$(get_asok_path) \
571 --run-dir=$dir \
572 --pid-file=$dir/\$name.pid \
573 --mgr-module-path=$(realpath ${CEPH_ROOT}/src/pybind/mgr) \
574 "$@" || return 1
575 }
576
577 function run_mds() {
578 local dir=$1
579 shift
580 local id=$1
581 shift
582 local data=$dir/$id
583
584 ceph-mds \
585 --id $id \
586 $EXTRA_OPTS \
587 --debug-mds 20 \
588 --debug-objecter 20 \
589 --debug-ms 20 \
590 --chdir= \
591 --mds-data=$data \
592 --log-file=$dir/\$name.log \
593 --admin-socket=$(get_asok_path) \
594 --run-dir=$dir \
595 --pid-file=$dir/\$name.pid \
596 "$@" || return 1
597 }
598
599 #######################################################################
600
601 ##
602 # Create (prepare) and run (activate) an osd by the name osd.**id**
603 # with data in **dir**/**id**. The logs can be found in
604 # **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
605 # the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
606 #
607 # The remaining arguments are passed verbatim to ceph-osd.
608 #
609 # Two mandatory arguments must be provided: --fsid and --mon-host
610 # Instead of adding them to every call to run_osd, they can be
611 # set in the CEPH_ARGS environment variable to be read implicitly
612 # by every ceph command.
613 #
614 # The CEPH_CONF variable is expected to be set to /dev/null to
615 # only rely on arguments for configuration.
616 #
617 # The run_osd function creates the OSD data directory on the **dir**/**id**
618 # directory and relies on the activate_osd function to run the daemon.
619 #
620 # Examples:
621 #
622 # CEPH_ARGS="--fsid=$(uuidgen) "
623 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
624 # run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
625 #
626 # @param dir path name of the environment
627 # @param id osd identifier
628 # @param ... can be any option valid for ceph-osd
629 # @return 0 on success, 1 on error
630 #
631 function run_osd() {
632 local dir=$1
633 shift
634 local id=$1
635 shift
636 local osd_data=$dir/$id
637
638 local ceph_args="$CEPH_ARGS"
639 ceph_args+=" --osd-failsafe-full-ratio=.99"
640 ceph_args+=" --osd-journal-size=100"
641 ceph_args+=" --osd-scrub-load-threshold=2000"
642 ceph_args+=" --osd-data=$osd_data"
643 ceph_args+=" --osd-journal=${osd_data}/journal"
644 ceph_args+=" --chdir="
645 ceph_args+=$EXTRA_OPTS
646 ceph_args+=" --run-dir=$dir"
647 ceph_args+=" --admin-socket=$(get_asok_path)"
648 ceph_args+=" --debug-osd=20"
649 ceph_args+=" --log-file=$dir/\$name.log"
650 ceph_args+=" --pid-file=$dir/\$name.pid"
651 ceph_args+=" --osd-max-object-name-len=460"
652 ceph_args+=" --osd-max-object-namespace-len=64"
653 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
654 ceph_args+=" "
655 ceph_args+="$@"
656 mkdir -p $osd_data
657
658 local uuid=`uuidgen`
659 echo "add osd$id $uuid"
660 OSD_SECRET=$(ceph-authtool --gen-print-key)
661 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
662 ceph osd new $uuid -i $osd_data/new.json
663 rm $osd_data/new.json
664 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid
665
666 local key_fn=$osd_data/keyring
667 cat > $key_fn<<EOF
668 [osd.$id]
669 key = $OSD_SECRET
670 EOF
671 echo adding osd$id key to auth repository
672 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
673 echo start osd.$id
674 ceph-osd -i $id $ceph_args &
675
676 wait_for_osd up $id || return 1
677
678 }
679
680 function run_osd_bluestore() {
681 local dir=$1
682 shift
683 local id=$1
684 shift
685 local osd_data=$dir/$id
686
687 local ceph_args="$CEPH_ARGS"
688 ceph_args+=" --osd-failsafe-full-ratio=.99"
689 ceph_args+=" --osd-journal-size=100"
690 ceph_args+=" --osd-scrub-load-threshold=2000"
691 ceph_args+=" --osd-data=$osd_data"
692 ceph_args+=" --osd-journal=${osd_data}/journal"
693 ceph_args+=" --chdir="
694 ceph_args+=$EXTRA_OPTS
695 ceph_args+=" --run-dir=$dir"
696 ceph_args+=" --admin-socket=$(get_asok_path)"
697 ceph_args+=" --debug-osd=20"
698 ceph_args+=" --log-file=$dir/\$name.log"
699 ceph_args+=" --pid-file=$dir/\$name.pid"
700 ceph_args+=" --osd-max-object-name-len=460"
701 ceph_args+=" --osd-max-object-namespace-len=64"
702 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
703 ceph_args+=" "
704 ceph_args+="$@"
705 mkdir -p $osd_data
706
707 local uuid=`uuidgen`
708 echo "add osd$osd $uuid"
709 OSD_SECRET=$(ceph-authtool --gen-print-key)
710 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
711 ceph osd new $uuid -i $osd_data/new.json
712 rm $osd_data/new.json
713 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid --osd-objectstore=bluestore
714
715 local key_fn=$osd_data/keyring
716 cat > $key_fn<<EOF
717 [osd.$osd]
718 key = $OSD_SECRET
719 EOF
720 echo adding osd$id key to auth repository
721 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
722 echo start osd.$id
723 ceph-osd -i $id $ceph_args &
724
725 wait_for_osd up $id || return 1
726
727
728 }
729
730 function test_run_osd() {
731 local dir=$1
732
733 setup $dir || return 1
734
735 run_mon $dir a || return 1
736 run_mgr $dir x || return 1
737
738 run_osd $dir 0 || return 1
739 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
740 config get osd_max_backfills)
741 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
742
743 run_osd $dir 1 --osd-max-backfills 20 || return 1
744 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
745 config get osd_max_backfills)
746 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
747
748 CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
749 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
750 config get osd_max_backfills)
751 test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
752
753 teardown $dir || return 1
754 }
755
756 #######################################################################
757
758 ##
759 # Shutdown and remove all traces of the osd by the name osd.**id**.
760 #
761 # The OSD is shutdown with the TERM signal. It is then removed from
762 # the auth list, crush map, osd map etc and the files associated with
763 # it are also removed.
764 #
765 # @param dir path name of the environment
766 # @param id osd identifier
767 # @return 0 on success, 1 on error
768 #
769 function destroy_osd() {
770 local dir=$1
771 local id=$2
772
773 ceph osd out osd.$id || return 1
774 kill_daemons $dir TERM osd.$id || return 1
775 ceph osd purge osd.$id --yes-i-really-mean-it || return 1
776 teardown $dir/$id || return 1
777 rm -fr $dir/$id
778 }
779
780 function test_destroy_osd() {
781 local dir=$1
782
783 setup $dir || return 1
784 run_mon $dir a || return 1
785 run_mgr $dir x || return 1
786 run_osd $dir 0 || return 1
787 destroy_osd $dir 0 || return 1
788 ! ceph osd dump | grep "osd.$id " || return 1
789 teardown $dir || return 1
790 }
791
792 #######################################################################
793
794 ##
795 # Run (activate) an osd by the name osd.**id** with data in
796 # **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
797 # the pid file is **dir**/osd.**id**.pid and the admin socket is
798 # **dir**/**id**/ceph-osd.**id**.asok.
799 #
800 # The remaining arguments are passed verbatim to ceph-osd.
801 #
802 # Two mandatory arguments must be provided: --fsid and --mon-host
803 # Instead of adding them to every call to activate_osd, they can be
804 # set in the CEPH_ARGS environment variable to be read implicitly
805 # by every ceph command.
806 #
807 # The CEPH_CONF variable is expected to be set to /dev/null to
808 # only rely on arguments for configuration.
809 #
810 # The activate_osd function expects a valid OSD data directory
811 # in **dir**/**id**, either just created via run_osd or re-using
812 # one left by a previous run of ceph-osd. The ceph-osd daemon is
813 # run directly on the foreground
814 #
815 # The activate_osd function blocks until the monitor reports the osd
816 # up. If it fails to do so within $TIMEOUT seconds, activate_osd
817 # fails.
818 #
819 # Examples:
820 #
821 # CEPH_ARGS="--fsid=$(uuidgen) "
822 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
823 # activate_osd $dir 0 # activate an osd using the monitor listening on 7018
824 #
825 # @param dir path name of the environment
826 # @param id osd identifier
827 # @param ... can be any option valid for ceph-osd
828 # @return 0 on success, 1 on error
829 #
830 function activate_osd() {
831 local dir=$1
832 shift
833 local id=$1
834 shift
835 local osd_data=$dir/$id
836
837 local ceph_args="$CEPH_ARGS"
838 ceph_args+=" --osd-failsafe-full-ratio=.99"
839 ceph_args+=" --osd-journal-size=100"
840 ceph_args+=" --osd-scrub-load-threshold=2000"
841 ceph_args+=" --osd-data=$osd_data"
842 ceph_args+=" --osd-journal=${osd_data}/journal"
843 ceph_args+=" --chdir="
844 ceph_args+=$EXTRA_OPTS
845 ceph_args+=" --run-dir=$dir"
846 ceph_args+=" --admin-socket=$(get_asok_path)"
847 ceph_args+=" --debug-osd=20"
848 ceph_args+=" --log-file=$dir/\$name.log"
849 ceph_args+=" --pid-file=$dir/\$name.pid"
850 ceph_args+=" --osd-max-object-name-len=460"
851 ceph_args+=" --osd-max-object-namespace-len=64"
852 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
853 ceph_args+=" "
854 ceph_args+="$@"
855 mkdir -p $osd_data
856
857 echo start osd.$id
858 ceph-osd -i $id $ceph_args &
859
860 [ "$id" = "$(cat $osd_data/whoami)" ] || return 1
861
862 wait_for_osd up $id || return 1
863 }
864
865 function test_activate_osd() {
866 local dir=$1
867
868 setup $dir || return 1
869
870 run_mon $dir a || return 1
871 run_mgr $dir x || return 1
872
873 run_osd $dir 0 || return 1
874 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
875 config get osd_max_backfills)
876 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
877
878 kill_daemons $dir TERM osd || return 1
879
880 activate_osd $dir 0 --osd-max-backfills 20 || return 1
881 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
882 config get osd_max_backfills)
883 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
884
885 teardown $dir || return 1
886 }
887
888 #######################################################################
889
890 ##
891 # Wait until the OSD **id** is either up or down, as specified by
892 # **state**. It fails after $TIMEOUT seconds.
893 #
894 # @param state either up or down
895 # @param id osd identifier
896 # @return 0 on success, 1 on error
897 #
898 function wait_for_osd() {
899 local state=$1
900 local id=$2
901
902 status=1
903 for ((i=0; i < $TIMEOUT; i++)); do
904 echo $i
905 if ! ceph osd dump | grep "osd.$id $state"; then
906 sleep 1
907 else
908 status=0
909 break
910 fi
911 done
912 return $status
913 }
914
915 function test_wait_for_osd() {
916 local dir=$1
917 setup $dir || return 1
918 run_mon $dir a --osd_pool_default_size=1 || return 1
919 run_mgr $dir x || return 1
920 run_osd $dir 0 || return 1
921 wait_for_osd up 0 || return 1
922 kill_daemons $dir TERM osd || return 1
923 wait_for_osd down 0 || return 1
924 ( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
925 teardown $dir || return 1
926 }
927
928 #######################################################################
929
930 ##
931 # Display the list of OSD ids supporting the **objectname** stored in
932 # **poolname**, as reported by ceph osd map.
933 #
934 # @param poolname an existing pool
935 # @param objectname an objectname (may or may not exist)
936 # @param STDOUT white space separated list of OSD ids
937 # @return 0 on success, 1 on error
938 #
939 function get_osds() {
940 local poolname=$1
941 local objectname=$2
942
943 local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
944 jq '.acting | .[]')
945 # get rid of the trailing space
946 echo $osds
947 }
948
949 function test_get_osds() {
950 local dir=$1
951
952 setup $dir || return 1
953 run_mon $dir a --osd_pool_default_size=2 || return 1
954 run_mgr $dir x || return 1
955 run_osd $dir 0 || return 1
956 run_osd $dir 1 || return 1
957 create_rbd_pool || return 1
958 wait_for_clean || return 1
959 create_rbd_pool || return 1
960 get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
961 teardown $dir || return 1
962 }
963
964 #######################################################################
965
966 ##
967 # Wait for the monitor to form quorum (optionally, of size N)
968 #
969 # @param timeout duration (lower-bound) to wait for quorum to be formed
970 # @param quorumsize size of quorum to wait for
971 # @return 0 on success, 1 on error
972 #
973 function wait_for_quorum() {
974 local timeout=$1
975 local quorumsize=$2
976
977 if [[ -z "$timeout" ]]; then
978 timeout=300
979 fi
980
981 if [[ -z "$quorumsize" ]]; then
982 timeout $timeout ceph mon_status --format=json >&/dev/null || return 1
983 return 0
984 fi
985
986 no_quorum=1
987 wait_until=$((`date +%s` + $timeout))
988 while [[ $(date +%s) -lt $wait_until ]]; do
989 jqfilter='.quorum | length == '$quorumsize
990 jqinput="$(timeout $timeout ceph mon_status --format=json 2>/dev/null)"
991 res=$(echo $jqinput | jq "$jqfilter")
992 if [[ "$res" == "true" ]]; then
993 no_quorum=0
994 break
995 fi
996 done
997 return $no_quorum
998 }
999
1000 #######################################################################
1001
1002 ##
1003 # Return the PG of supporting the **objectname** stored in
1004 # **poolname**, as reported by ceph osd map.
1005 #
1006 # @param poolname an existing pool
1007 # @param objectname an objectname (may or may not exist)
1008 # @param STDOUT a PG
1009 # @return 0 on success, 1 on error
1010 #
1011 function get_pg() {
1012 local poolname=$1
1013 local objectname=$2
1014
1015 ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
1016 }
1017
1018 function test_get_pg() {
1019 local dir=$1
1020
1021 setup $dir || return 1
1022 run_mon $dir a --osd_pool_default_size=1 || return 1
1023 run_mgr $dir x || return 1
1024 run_osd $dir 0 || return 1
1025 create_rbd_pool || return 1
1026 wait_for_clean || return 1
1027 get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
1028 teardown $dir || return 1
1029 }
1030
1031 #######################################################################
1032
1033 ##
1034 # Return the value of the **config**, obtained via the config get command
1035 # of the admin socket of **daemon**.**id**.
1036 #
1037 # @param daemon mon or osd
1038 # @param id mon or osd ID
1039 # @param config the configuration variable name as found in config_opts.h
1040 # @param STDOUT the config value
1041 # @return 0 on success, 1 on error
1042 #
1043 function get_config() {
1044 local daemon=$1
1045 local id=$2
1046 local config=$3
1047
1048 CEPH_ARGS='' \
1049 ceph --format json daemon $(get_asok_path $daemon.$id) \
1050 config get $config 2> /dev/null | \
1051 jq -r ".$config"
1052 }
1053
1054 function test_get_config() {
1055 local dir=$1
1056
1057 # override the default config using command line arg and check it
1058 setup $dir || return 1
1059 run_mon $dir a --osd_pool_default_size=1 || return 1
1060 test $(get_config mon a osd_pool_default_size) = 1 || return 1
1061 run_mgr $dir x || return 1
1062 run_osd $dir 0 --osd_max_scrubs=3 || return 1
1063 test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
1064 teardown $dir || return 1
1065 }
1066
1067 #######################################################################
1068
1069 ##
1070 # Set the **config** to specified **value**, via the config set command
1071 # of the admin socket of **daemon**.**id**
1072 #
1073 # @param daemon mon or osd
1074 # @param id mon or osd ID
1075 # @param config the configuration variable name as found in config_opts.h
1076 # @param value the config value
1077 # @return 0 on success, 1 on error
1078 #
1079 function set_config() {
1080 local daemon=$1
1081 local id=$2
1082 local config=$3
1083 local value=$4
1084
1085 test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
1086 config set $config $value 2> /dev/null | \
1087 jq 'has("success")') == true
1088 }
1089
1090 function test_set_config() {
1091 local dir=$1
1092
1093 setup $dir || return 1
1094 run_mon $dir a --osd_pool_default_size=1 || return 1
1095 test $(get_config mon a ms_crc_header) = true || return 1
1096 set_config mon a ms_crc_header false || return 1
1097 test $(get_config mon a ms_crc_header) = false || return 1
1098 set_config mon a ms_crc_header true || return 1
1099 test $(get_config mon a ms_crc_header) = true || return 1
1100 teardown $dir || return 1
1101 }
1102
1103 #######################################################################
1104
1105 ##
1106 # Return the OSD id of the primary OSD supporting the **objectname**
1107 # stored in **poolname**, as reported by ceph osd map.
1108 #
1109 # @param poolname an existing pool
1110 # @param objectname an objectname (may or may not exist)
1111 # @param STDOUT the primary OSD id
1112 # @return 0 on success, 1 on error
1113 #
1114 function get_primary() {
1115 local poolname=$1
1116 local objectname=$2
1117
1118 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1119 jq '.acting_primary'
1120 }
1121
1122 function test_get_primary() {
1123 local dir=$1
1124
1125 setup $dir || return 1
1126 run_mon $dir a --osd_pool_default_size=1 || return 1
1127 local osd=0
1128 run_mgr $dir x || return 1
1129 run_osd $dir $osd || return 1
1130 create_rbd_pool || return 1
1131 wait_for_clean || return 1
1132 test $(get_primary rbd GROUP) = $osd || return 1
1133 teardown $dir || return 1
1134 }
1135
1136 #######################################################################
1137
1138 ##
1139 # Return the id of any OSD supporting the **objectname** stored in
1140 # **poolname**, as reported by ceph osd map, except the primary.
1141 #
1142 # @param poolname an existing pool
1143 # @param objectname an objectname (may or may not exist)
1144 # @param STDOUT the OSD id
1145 # @return 0 on success, 1 on error
1146 #
1147 function get_not_primary() {
1148 local poolname=$1
1149 local objectname=$2
1150
1151 local primary=$(get_primary $poolname $objectname)
1152 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1153 jq ".acting | map(select (. != $primary)) | .[0]"
1154 }
1155
1156 function test_get_not_primary() {
1157 local dir=$1
1158
1159 setup $dir || return 1
1160 run_mon $dir a --osd_pool_default_size=2 || return 1
1161 run_mgr $dir x || return 1
1162 run_osd $dir 0 || return 1
1163 run_osd $dir 1 || return 1
1164 create_rbd_pool || return 1
1165 wait_for_clean || return 1
1166 local primary=$(get_primary rbd GROUP)
1167 local not_primary=$(get_not_primary rbd GROUP)
1168 test $not_primary != $primary || return 1
1169 test $not_primary = 0 -o $not_primary = 1 || return 1
1170 teardown $dir || return 1
1171 }
1172
1173 #######################################################################
1174
1175 function _objectstore_tool_nodown() {
1176 local dir=$1
1177 shift
1178 local id=$1
1179 shift
1180 local osd_data=$dir/$id
1181
1182 local journal_args
1183 if [ "$objectstore_type" == "filestore" ]; then
1184 journal_args=" --journal-path $osd_data/journal"
1185 fi
1186 ceph-objectstore-tool \
1187 --data-path $osd_data \
1188 $journal_args \
1189 "$@" || return 1
1190 }
1191
1192 function _objectstore_tool_nowait() {
1193 local dir=$1
1194 shift
1195 local id=$1
1196 shift
1197
1198 kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
1199
1200 _objectstore_tool_nodown $dir $id "$@" || return 1
1201 activate_osd $dir $id $ceph_osd_args >&2 || return 1
1202 }
1203
1204 ##
1205 # Run ceph-objectstore-tool against the OSD **id** using the data path
1206 # **dir**. The OSD is killed with TERM prior to running
1207 # ceph-objectstore-tool because access to the data path is
1208 # exclusive. The OSD is restarted after the command completes. The
1209 # objectstore_tool returns after all PG are active+clean again.
1210 #
1211 # @param dir the data path of the OSD
1212 # @param id the OSD id
1213 # @param ... arguments to ceph-objectstore-tool
1214 # @param STDIN the input of ceph-objectstore-tool
1215 # @param STDOUT the output of ceph-objectstore-tool
1216 # @return 0 on success, 1 on error
1217 #
1218 # The value of $ceph_osd_args will be passed to restarted osds
1219 #
1220 function objectstore_tool() {
1221 local dir=$1
1222 shift
1223 local id=$1
1224 shift
1225
1226 _objectstore_tool_nowait $dir $id "$@" || return 1
1227 wait_for_clean >&2
1228 }
1229
1230 function test_objectstore_tool() {
1231 local dir=$1
1232
1233 setup $dir || return 1
1234 run_mon $dir a --osd_pool_default_size=1 || return 1
1235 local osd=0
1236 run_mgr $dir x || return 1
1237 run_osd $dir $osd || return 1
1238 create_rbd_pool || return 1
1239 wait_for_clean || return 1
1240 rados --pool rbd put GROUP /etc/group || return 1
1241 objectstore_tool $dir $osd GROUP get-bytes | \
1242 diff - /etc/group
1243 ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
1244 teardown $dir || return 1
1245 }
1246
1247 #######################################################################
1248
1249 ##
1250 # Predicate checking if there is an ongoing recovery in the
1251 # cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1252 # counters are reported by ceph status, it means recovery is in
1253 # progress.
1254 #
1255 # @return 0 if recovery in progress, 1 otherwise
1256 #
1257 function get_is_making_recovery_progress() {
1258 local recovery_progress
1259 recovery_progress+=".recovering_keys_per_sec + "
1260 recovery_progress+=".recovering_bytes_per_sec + "
1261 recovery_progress+=".recovering_objects_per_sec"
1262 local progress=$(ceph --format json status 2>/dev/null | \
1263 jq -r ".pgmap | $recovery_progress")
1264 test "$progress" != null
1265 }
1266
1267 function test_get_is_making_recovery_progress() {
1268 local dir=$1
1269
1270 setup $dir || return 1
1271 run_mon $dir a || return 1
1272 run_mgr $dir x || return 1
1273 ! get_is_making_recovery_progress || return 1
1274 teardown $dir || return 1
1275 }
1276
1277 #######################################################################
1278
1279 ##
1280 # Return the number of active PGs in the cluster. A PG is active if
1281 # ceph pg dump pgs reports it both **active** and **clean** and that
1282 # not **stale**.
1283 #
1284 # @param STDOUT the number of active PGs
1285 # @return 0 on success, 1 on error
1286 #
1287 function get_num_active_clean() {
1288 local expression
1289 expression+="select(contains(\"active\") and contains(\"clean\")) | "
1290 expression+="select(contains(\"stale\") | not)"
1291 ceph --format json pg dump pgs 2>/dev/null | \
1292 jq ".pg_stats | [.[] | .state | $expression] | length"
1293 }
1294
1295 function test_get_num_active_clean() {
1296 local dir=$1
1297
1298 setup $dir || return 1
1299 run_mon $dir a --osd_pool_default_size=1 || return 1
1300 run_mgr $dir x || return 1
1301 run_osd $dir 0 || return 1
1302 create_rbd_pool || return 1
1303 wait_for_clean || return 1
1304 local num_active_clean=$(get_num_active_clean)
1305 test "$num_active_clean" = $PG_NUM || return 1
1306 teardown $dir || return 1
1307 }
1308
1309 #######################################################################
1310
1311 ##
1312 # Return the number of PGs in the cluster, according to
1313 # ceph pg dump pgs.
1314 #
1315 # @param STDOUT the number of PGs
1316 # @return 0 on success, 1 on error
1317 #
1318 function get_num_pgs() {
1319 ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
1320 }
1321
1322 function test_get_num_pgs() {
1323 local dir=$1
1324
1325 setup $dir || return 1
1326 run_mon $dir a --osd_pool_default_size=1 || return 1
1327 run_mgr $dir x || return 1
1328 run_osd $dir 0 || return 1
1329 create_rbd_pool || return 1
1330 wait_for_clean || return 1
1331 local num_pgs=$(get_num_pgs)
1332 test "$num_pgs" -gt 0 || return 1
1333 teardown $dir || return 1
1334 }
1335
1336 #######################################################################
1337
1338 ##
1339 # Return the OSD ids in use by at least one PG in the cluster (either
1340 # in the up or the acting set), according to ceph pg dump pgs. Every
1341 # OSD id shows as many times as they are used in up and acting sets.
1342 # If an OSD id is in both the up and acting set of a given PG, it will
1343 # show twice.
1344 #
1345 # @param STDOUT a sorted list of OSD ids
1346 # @return 0 on success, 1 on error
1347 #
1348 function get_osd_id_used_by_pgs() {
1349 ceph --format json pg dump pgs 2>/dev/null | jq '.pg_stats | .[] | .up[], .acting[]' | sort
1350 }
1351
1352 function test_get_osd_id_used_by_pgs() {
1353 local dir=$1
1354
1355 setup $dir || return 1
1356 run_mon $dir a --osd_pool_default_size=1 || return 1
1357 run_mgr $dir x || return 1
1358 run_osd $dir 0 || return 1
1359 create_rbd_pool || return 1
1360 wait_for_clean || return 1
1361 local osd_ids=$(get_osd_id_used_by_pgs | uniq)
1362 test "$osd_ids" = "0" || return 1
1363 teardown $dir || return 1
1364 }
1365
1366 #######################################################################
1367
1368 ##
1369 # Wait until the OSD **id** shows **count** times in the
1370 # PGs (see get_osd_id_used_by_pgs for more information about
1371 # how OSD ids are counted).
1372 #
1373 # @param id the OSD id
1374 # @param count the number of time it must show in the PGs
1375 # @return 0 on success, 1 on error
1376 #
1377 function wait_osd_id_used_by_pgs() {
1378 local id=$1
1379 local count=$2
1380
1381 status=1
1382 for ((i=0; i < $TIMEOUT / 5; i++)); do
1383 echo $i
1384 if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then
1385 sleep 5
1386 else
1387 status=0
1388 break
1389 fi
1390 done
1391 return $status
1392 }
1393
1394 function test_wait_osd_id_used_by_pgs() {
1395 local dir=$1
1396
1397 setup $dir || return 1
1398 run_mon $dir a --osd_pool_default_size=1 || return 1
1399 run_mgr $dir x || return 1
1400 run_osd $dir 0 || return 1
1401 create_rbd_pool || return 1
1402 wait_for_clean || return 1
1403 wait_osd_id_used_by_pgs 0 8 || return 1
1404 ! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1
1405 teardown $dir || return 1
1406 }
1407
1408 #######################################################################
1409
1410 ##
1411 # Return the date and time of the last completed scrub for **pgid**,
1412 # as reported by ceph pg dump pgs. Note that a repair also sets this
1413 # date.
1414 #
1415 # @param pgid the id of the PG
1416 # @param STDOUT the date and time of the last scrub
1417 # @return 0 on success, 1 on error
1418 #
1419 function get_last_scrub_stamp() {
1420 local pgid=$1
1421 local sname=${2:-last_scrub_stamp}
1422 ceph --format json pg dump pgs 2>/dev/null | \
1423 jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
1424 }
1425
1426 function test_get_last_scrub_stamp() {
1427 local dir=$1
1428
1429 setup $dir || return 1
1430 run_mon $dir a --osd_pool_default_size=1 || return 1
1431 run_mgr $dir x || return 1
1432 run_osd $dir 0 || return 1
1433 create_rbd_pool || return 1
1434 wait_for_clean || return 1
1435 stamp=$(get_last_scrub_stamp 1.0)
1436 test -n "$stamp" || return 1
1437 teardown $dir || return 1
1438 }
1439
1440 #######################################################################
1441
1442 ##
1443 # Predicate checking if the cluster is clean, i.e. all of its PGs are
1444 # in a clean state (see get_num_active_clean for a definition).
1445 #
1446 # @return 0 if the cluster is clean, 1 otherwise
1447 #
1448 function is_clean() {
1449 num_pgs=$(get_num_pgs)
1450 test $num_pgs != 0 || return 1
1451 test $(get_num_active_clean) = $num_pgs || return 1
1452 }
1453
1454 function test_is_clean() {
1455 local dir=$1
1456
1457 setup $dir || return 1
1458 run_mon $dir a --osd_pool_default_size=1 || return 1
1459 run_mgr $dir x || return 1
1460 run_osd $dir 0 || return 1
1461 create_rbd_pool || return 1
1462 wait_for_clean || return 1
1463 is_clean || return 1
1464 teardown $dir || return 1
1465 }
1466
1467 #######################################################################
1468
1469 calc() { $AWK "BEGIN{print $*}"; }
1470
1471 ##
1472 # Return a list of numbers that are increasingly larger and whose
1473 # total is **timeout** seconds. It can be used to have short sleep
1474 # delay while waiting for an event on a fast machine. But if running
1475 # very slowly the larger delays avoid stressing the machine even
1476 # further or spamming the logs.
1477 #
1478 # @param timeout sum of all delays, in seconds
1479 # @return a list of sleep delays
1480 #
1481 function get_timeout_delays() {
1482 local trace=$(shopt -q -o xtrace && echo true || echo false)
1483 $trace && shopt -u -o xtrace
1484 local timeout=$1
1485 local first_step=${2:-1}
1486 local max_timeout=${3:-$MAX_TIMEOUT}
1487
1488 local i
1489 local total="0"
1490 i=$first_step
1491 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1492 echo -n "$(calc $i) "
1493 total=$(calc $total + $i)
1494 i=$(calc $i \* 2)
1495 if [ $max_timeout -gt 0 ]; then
1496 # Did we reach max timeout ?
1497 if [ ${i%.*} -eq ${max_timeout%.*} ] && [ ${i#*.} \> ${max_timeout#*.} ] || [ ${i%.*} -gt ${max_timeout%.*} ]; then
1498 # Yes, so let's cap the max wait time to max
1499 i=$max_timeout
1500 fi
1501 fi
1502 done
1503 if test "$(calc $total \< $timeout)" = "1"; then
1504 echo -n "$(calc $timeout - $total) "
1505 fi
1506 $trace && shopt -s -o xtrace
1507 }
1508
1509 function test_get_timeout_delays() {
1510 test "$(get_timeout_delays 1)" = "1 " || return 1
1511 test "$(get_timeout_delays 5)" = "1 2 2 " || return 1
1512 test "$(get_timeout_delays 6)" = "1 2 3 " || return 1
1513 test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
1514 test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1
1515 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1
1516 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1
1517 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1
1518 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1
1519 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1
1520 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1
1521 test "$(get_timeout_delays 300 .1 0)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 12.8 25.6 51.2 102.4 95.3 " || return 1
1522 test "$(get_timeout_delays 300 .1 10)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 7.3 " || return 1
1523 }
1524
1525 #######################################################################
1526
1527 ##
1528 # Wait until the cluster becomes clean or if it does not make progress
1529 # for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1530 # Progress is measured either via the **get_is_making_recovery_progress**
1531 # predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1532 #
1533 # @return 0 if the cluster is clean, 1 otherwise
1534 #
1535 function wait_for_clean() {
1536 local cmd=$1
1537 local num_active_clean=-1
1538 local cur_active_clean
1539 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
1540 local -i loop=0
1541
1542 flush_pg_stats || return 1
1543 while test $(get_num_pgs) == 0 ; do
1544 sleep 1
1545 done
1546
1547 while true ; do
1548 # Comparing get_num_active_clean & get_num_pgs is used to determine
1549 # if the cluster is clean. That's almost an inline of is_clean() to
1550 # get more performance by avoiding multiple calls of get_num_active_clean.
1551 cur_active_clean=$(get_num_active_clean)
1552 test $cur_active_clean = $(get_num_pgs) && break
1553 if test $cur_active_clean != $num_active_clean ; then
1554 loop=0
1555 num_active_clean=$cur_active_clean
1556 elif get_is_making_recovery_progress ; then
1557 loop=0
1558 elif (( $loop >= ${#delays[*]} )) ; then
1559 ceph report
1560 return 1
1561 fi
1562 # eval is a no-op if cmd is empty
1563 eval $cmd
1564 sleep ${delays[$loop]}
1565 loop+=1
1566 done
1567 return 0
1568 }
1569
1570 function test_wait_for_clean() {
1571 local dir=$1
1572
1573 setup $dir || return 1
1574 run_mon $dir a --osd_pool_default_size=2 || return 1
1575 run_osd $dir 0 || return 1
1576 run_mgr $dir x || return 1
1577 create_rbd_pool || return 1
1578 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1579 run_osd $dir 1 || return 1
1580 wait_for_clean || return 1
1581 teardown $dir || return 1
1582 }
1583
1584 #######################################################################
1585
1586 ##
1587 # Wait until the cluster has health condition passed as arg
1588 # again for $TIMEOUT seconds.
1589 #
1590 # @param string to grep for in health detail
1591 # @return 0 if the cluster health matches request, 1 otherwise
1592 #
1593 function wait_for_health() {
1594 local grepstr=$1
1595 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1596 local -i loop=0
1597
1598 while ! ceph health detail | grep "$grepstr" ; do
1599 if (( $loop >= ${#delays[*]} )) ; then
1600 ceph health detail
1601 return 1
1602 fi
1603 sleep ${delays[$loop]}
1604 loop+=1
1605 done
1606 }
1607
1608 ##
1609 # Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1610 # for $TIMEOUT seconds.
1611 #
1612 # @return 0 if the cluster is HEALTHY, 1 otherwise
1613 #
1614 function wait_for_health_ok() {
1615 wait_for_health "HEALTH_OK" || return 1
1616 }
1617
1618 function test_wait_for_health_ok() {
1619 local dir=$1
1620
1621 setup $dir || return 1
1622 run_mon $dir a --osd_pool_default_size=1 --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
1623 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
1624 run_osd $dir 0 || return 1
1625 kill_daemons $dir TERM osd || return 1
1626 ceph osd down 0 || return 1
1627 ! TIMEOUT=1 wait_for_health_ok || return 1
1628 activate_osd $dir 0 || return 1
1629 wait_for_health_ok || return 1
1630 teardown $dir || return 1
1631 }
1632
1633
1634 #######################################################################
1635
1636 ##
1637 # Run repair on **pgid** and wait until it completes. The repair
1638 # function will fail if repair does not complete within $TIMEOUT
1639 # seconds.
1640 #
1641 # @param pgid the id of the PG
1642 # @return 0 on success, 1 on error
1643 #
1644 function repair() {
1645 local pgid=$1
1646 local last_scrub=$(get_last_scrub_stamp $pgid)
1647 ceph pg repair $pgid
1648 wait_for_scrub $pgid "$last_scrub"
1649 }
1650
1651 function test_repair() {
1652 local dir=$1
1653
1654 setup $dir || return 1
1655 run_mon $dir a --osd_pool_default_size=1 || return 1
1656 run_mgr $dir x || return 1
1657 run_osd $dir 0 || return 1
1658 create_rbd_pool || return 1
1659 wait_for_clean || return 1
1660 repair 1.0 || return 1
1661 kill_daemons $dir KILL osd || return 1
1662 ! TIMEOUT=1 repair 1.0 || return 1
1663 teardown $dir || return 1
1664 }
1665 #######################################################################
1666
1667 ##
1668 # Run scrub on **pgid** and wait until it completes. The pg_scrub
1669 # function will fail if repair does not complete within $TIMEOUT
1670 # seconds. The pg_scrub is complete whenever the
1671 # **get_last_scrub_stamp** function reports a timestamp different from
1672 # the one stored before starting the scrub.
1673 #
1674 # @param pgid the id of the PG
1675 # @return 0 on success, 1 on error
1676 #
1677 function pg_scrub() {
1678 local pgid=$1
1679 local last_scrub=$(get_last_scrub_stamp $pgid)
1680 ceph pg scrub $pgid
1681 wait_for_scrub $pgid "$last_scrub"
1682 }
1683
1684 function pg_deep_scrub() {
1685 local pgid=$1
1686 local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
1687 ceph pg deep-scrub $pgid
1688 wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
1689 }
1690
1691 function test_pg_scrub() {
1692 local dir=$1
1693
1694 setup $dir || return 1
1695 run_mon $dir a --osd_pool_default_size=1 || return 1
1696 run_mgr $dir x || return 1
1697 run_osd $dir 0 || return 1
1698 create_rbd_pool || return 1
1699 wait_for_clean || return 1
1700 pg_scrub 1.0 || return 1
1701 kill_daemons $dir KILL osd || return 1
1702 ! TIMEOUT=1 pg_scrub 1.0 || return 1
1703 teardown $dir || return 1
1704 }
1705
1706 #######################################################################
1707
1708 ##
1709 # Run the *command* and expect it to fail (i.e. return a non zero status).
1710 # The output (stderr and stdout) is stored in a temporary file in *dir*
1711 # and is expected to contain the string *expected*.
1712 #
1713 # Return 0 if the command failed and the string was found. Otherwise
1714 # return 1 and cat the full output of the command on stderr for debug.
1715 #
1716 # @param dir temporary directory to store the output
1717 # @param expected string to look for in the output
1718 # @param command ... the command and its arguments
1719 # @return 0 on success, 1 on error
1720 #
1721
1722 function expect_failure() {
1723 local dir=$1
1724 shift
1725 local expected="$1"
1726 shift
1727 local success
1728
1729 if "$@" > $dir/out 2>&1 ; then
1730 success=true
1731 else
1732 success=false
1733 fi
1734
1735 if $success || ! grep --quiet "$expected" $dir/out ; then
1736 cat $dir/out >&2
1737 return 1
1738 else
1739 return 0
1740 fi
1741 }
1742
1743 function test_expect_failure() {
1744 local dir=$1
1745
1746 setup $dir || return 1
1747 expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
1748 # the command did not fail
1749 ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
1750 grep --quiet FAIL $dir/out || return 1
1751 # the command failed but the output does not contain the expected string
1752 ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
1753 ! grep --quiet FAIL $dir/out || return 1
1754 teardown $dir || return 1
1755 }
1756
1757 #######################################################################
1758
1759 ##
1760 # Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1761 # will fail if scrub does not complete within $TIMEOUT seconds. The
1762 # repair is complete whenever the **get_last_scrub_stamp** function
1763 # reports a timestamp different from the one given in argument.
1764 #
1765 # @param pgid the id of the PG
1766 # @param last_scrub timestamp of the last scrub for *pgid*
1767 # @return 0 on success, 1 on error
1768 #
1769 function wait_for_scrub() {
1770 local pgid=$1
1771 local last_scrub="$2"
1772 local sname=${3:-last_scrub_stamp}
1773
1774 for ((i=0; i < $TIMEOUT; i++)); do
1775 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
1776 return 0
1777 fi
1778 sleep 1
1779 done
1780 return 1
1781 }
1782
1783 function test_wait_for_scrub() {
1784 local dir=$1
1785
1786 setup $dir || return 1
1787 run_mon $dir a --osd_pool_default_size=1 || return 1
1788 run_mgr $dir x || return 1
1789 run_osd $dir 0 || return 1
1790 create_rbd_pool || return 1
1791 wait_for_clean || return 1
1792 local pgid=1.0
1793 ceph pg repair $pgid
1794 local last_scrub=$(get_last_scrub_stamp $pgid)
1795 wait_for_scrub $pgid "$last_scrub" || return 1
1796 kill_daemons $dir KILL osd || return 1
1797 last_scrub=$(get_last_scrub_stamp $pgid)
1798 ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
1799 teardown $dir || return 1
1800 }
1801
1802 #######################################################################
1803
1804 ##
1805 # Return 0 if the erasure code *plugin* is available, 1 otherwise.
1806 #
1807 # @param plugin erasure code plugin
1808 # @return 0 on success, 1 on error
1809 #
1810
1811 function erasure_code_plugin_exists() {
1812 local plugin=$1
1813 local status
1814 local grepstr
1815 local s
1816 case `uname` in
1817 FreeBSD) grepstr="Cannot open.*$plugin" ;;
1818 *) grepstr="$plugin.*No such file" ;;
1819 esac
1820
1821 s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
1822 local status=$?
1823 if [ $status -eq 0 ]; then
1824 ceph osd erasure-code-profile rm TESTPROFILE
1825 elif ! echo $s | grep --quiet "$grepstr" ; then
1826 status=1
1827 # display why the string was rejected.
1828 echo $s
1829 fi
1830 return $status
1831 }
1832
1833 function test_erasure_code_plugin_exists() {
1834 local dir=$1
1835
1836 setup $dir || return 1
1837 run_mon $dir a || return 1
1838 run_mgr $dir x || return 1
1839 erasure_code_plugin_exists jerasure || return 1
1840 ! erasure_code_plugin_exists FAKE || return 1
1841 teardown $dir || return 1
1842 }
1843
1844 #######################################################################
1845
1846 ##
1847 # Display all log files from **dir** on stdout.
1848 #
1849 # @param dir directory in which all data is stored
1850 #
1851
1852 function display_logs() {
1853 local dir=$1
1854
1855 find $dir -maxdepth 1 -name '*.log' | \
1856 while read file ; do
1857 echo "======================= $file"
1858 cat $file
1859 done
1860 }
1861
1862 function test_display_logs() {
1863 local dir=$1
1864
1865 setup $dir || return 1
1866 run_mon $dir a || return 1
1867 kill_daemons $dir || return 1
1868 display_logs $dir > $dir/log.out
1869 grep --quiet mon.a.log $dir/log.out || return 1
1870 teardown $dir || return 1
1871 }
1872
1873 #######################################################################
1874 ##
1875 # Spawn a command in background and save the pid in the variable name
1876 # passed in argument. To make the output reading easier, the output is
1877 # prepend with the process id.
1878 #
1879 # Example:
1880 # pids1=""
1881 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1882 #
1883 # @param pid_variable the variable name (not value) where the pids will be stored
1884 # @param ... the command to execute
1885 # @return only the pid_variable output should be considered and used with **wait_background**
1886 #
1887 function run_in_background() {
1888 local pid_variable=$1
1889 shift
1890 # Execute the command and prepend the output with its pid
1891 # We enforce to return the exit status of the command and not the sed one.
1892 ("$@" |& sed 's/^/'$$': /'; return "${PIPESTATUS[0]}") >&2 &
1893 eval "$pid_variable+=\" $!\""
1894 }
1895
1896 function save_stdout {
1897 local out="$1"
1898 shift
1899 "$@" > "$out"
1900 }
1901
1902 function test_run_in_background() {
1903 local pids
1904 run_in_background pids sleep 1
1905 run_in_background pids sleep 1
1906 test $(echo $pids | wc -w) = 2 || return 1
1907 wait $pids || return 1
1908 }
1909
1910 #######################################################################
1911 ##
1912 # Wait for pids running in background to complete.
1913 # This function is usually used after a **run_in_background** call
1914 # Example:
1915 # pids1=""
1916 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1917 # wait_background pids1
1918 #
1919 # @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
1920 # @return returns 1 if at least one process exits in error unless returns 0
1921 #
1922 function wait_background() {
1923 # We extract the PIDS from the variable name
1924 pids=${!1}
1925
1926 return_code=0
1927 for pid in $pids; do
1928 if ! wait $pid; then
1929 # If one process failed then return 1
1930 return_code=1
1931 fi
1932 done
1933
1934 # We empty the variable reporting that all process ended
1935 eval "$1=''"
1936
1937 return $return_code
1938 }
1939
1940
1941 function test_wait_background() {
1942 local pids=""
1943 run_in_background pids bash -c "sleep 1; exit 1"
1944 run_in_background pids bash -c "sleep 2; exit 0"
1945 wait_background pids
1946 if [ $? -ne 1 ]; then return 1; fi
1947
1948 run_in_background pids bash -c "sleep 1; exit 0"
1949 run_in_background pids bash -c "sleep 2; exit 0"
1950 wait_background pids
1951 if [ $? -ne 0 ]; then return 1; fi
1952
1953 if [ ! -z "$pids" ]; then return 1; fi
1954 }
1955
1956 function flush_pg_stats()
1957 {
1958 local timeout=${1:-$TIMEOUT}
1959
1960 ids=`ceph osd ls`
1961 seqs=''
1962 for osd in $ids; do
1963 seq=`ceph tell osd.$osd flush_pg_stats`
1964 seqs="$seqs $osd-$seq"
1965 done
1966
1967 for s in $seqs; do
1968 osd=`echo $s | cut -d - -f 1`
1969 seq=`echo $s | cut -d - -f 2`
1970 echo "waiting osd.$osd seq $seq"
1971 while test $(ceph osd last-stat-seq $osd) -lt $seq; do
1972 sleep 1
1973 if [ $((timeout--)) -eq 0 ]; then
1974 return 1
1975 fi
1976 done
1977 done
1978 }
1979
1980 function test_flush_pg_stats()
1981 {
1982 local dir=$1
1983
1984 setup $dir || return 1
1985 run_mon $dir a --osd_pool_default_size=1 || return 1
1986 run_mgr $dir x || return 1
1987 run_osd $dir 0 || return 1
1988 create_rbd_pool || return 1
1989 rados -p rbd put obj /etc/group
1990 flush_pg_stats || return 1
1991 local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
1992 stored=`ceph df detail --format=json | jq "$jq_filter.stored"`
1993 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
1994 test $stored -gt 0 || return 1
1995 test $stored == $stored_raw || return 1
1996 teardown $dir
1997 }
1998
1999 #######################################################################
2000
2001 ##
2002 # Call the **run** function (which must be defined by the caller) with
2003 # the **dir** argument followed by the caller argument list.
2004 #
2005 # If the **run** function returns on error, all logs found in **dir**
2006 # are displayed for diagnostic purposes.
2007 #
2008 # **teardown** function is called when the **run** function returns
2009 # (on success or on error), to cleanup leftovers. The CEPH_CONF is set
2010 # to /dev/null and CEPH_ARGS is unset so that the tests are protected from
2011 # external interferences.
2012 #
2013 # It is the responsibility of the **run** function to call the
2014 # **setup** function to prepare the test environment (create a temporary
2015 # directory etc.).
2016 #
2017 # The shell is required (via PS4) to display the function and line
2018 # number whenever a statement is executed to help debugging.
2019 #
2020 # @param dir directory in which all data is stored
2021 # @param ... arguments passed transparently to **run**
2022 # @return 0 on success, 1 on error
2023 #
2024 function main() {
2025 local dir=td/$1
2026 shift
2027
2028 shopt -s -o xtrace
2029 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2030
2031 export PATH=.:$PATH # make sure program from sources are preferred
2032 export PYTHONWARNINGS=ignore
2033 export CEPH_CONF=/dev/null
2034 unset CEPH_ARGS
2035
2036 local code
2037 if run $dir "$@" ; then
2038 code=0
2039 else
2040 code=1
2041 fi
2042 teardown $dir $code || return 1
2043 return $code
2044 }
2045
2046 #######################################################################
2047
2048 function run_tests() {
2049 shopt -s -o xtrace
2050 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2051
2052 export .:$PATH # make sure program from sources are preferred
2053
2054 export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
2055 export CEPH_ARGS
2056 CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
2057 CEPH_ARGS+="--mon-host=$CEPH_MON "
2058 export CEPH_CONF=/dev/null
2059
2060 local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
2061 local dir=td/ceph-helpers
2062
2063 for func in $funcs ; do
2064 if ! $func $dir; then
2065 teardown $dir 1
2066 return 1
2067 fi
2068 done
2069 }
2070
2071 if test "$1" = TESTS ; then
2072 shift
2073 run_tests "$@"
2074 exit $?
2075 fi
2076
2077 # NOTE:
2078 # jq only support --exit-status|-e from version 1.4 forwards, which makes
2079 # returning on error waaaay prettier and straightforward.
2080 # However, the current automated upstream build is running with v1.3,
2081 # which has no idea what -e is. Hence the convoluted error checking we
2082 # need. Sad.
2083 # The next time someone changes this code, please check if v1.4 is now
2084 # a thing, and, if so, please change these to use -e. Thanks.
2085
2086 # jq '.all.supported | select([.[] == "foo"] | any)'
2087 function jq_success() {
2088 input="$1"
2089 filter="$2"
2090 expects="\"$3\""
2091
2092 in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g")
2093 filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g")
2094
2095 ret=$(echo "$in_escaped" | jq "$filter_escaped")
2096 if [[ "$ret" == "true" ]]; then
2097 return 0
2098 elif [[ -n "$expects" ]]; then
2099 if [[ "$ret" == "$expects" ]]; then
2100 return 0
2101 fi
2102 fi
2103 return 1
2104 input=$1
2105 filter=$2
2106 expects="$3"
2107
2108 ret="$(echo $input | jq \"$filter\")"
2109 if [[ "$ret" == "true" ]]; then
2110 return 0
2111 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
2112 return 0
2113 fi
2114 return 1
2115 }
2116
2117 function inject_eio() {
2118 local pooltype=$1
2119 shift
2120 local which=$1
2121 shift
2122 local poolname=$1
2123 shift
2124 local objname=$1
2125 shift
2126 local dir=$1
2127 shift
2128 local shard_id=$1
2129 shift
2130
2131 local -a initial_osds=($(get_osds $poolname $objname))
2132 local osd_id=${initial_osds[$shard_id]}
2133 if [ "$pooltype" != "ec" ]; then
2134 shard_id=""
2135 fi
2136 set_config osd $osd_id filestore_debug_inject_read_err true || return 1
2137 local loop=0
2138 while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
2139 inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
2140 loop=$(expr $loop + 1)
2141 if [ $loop = "10" ]; then
2142 return 1
2143 fi
2144 sleep 1
2145 done
2146 }
2147
2148 function multidiff() {
2149 if ! diff $@ ; then
2150 if [ "$DIFFCOLOPTS" = "" ]; then
2151 return 1
2152 fi
2153 diff $DIFFCOLOPTS $@
2154 fi
2155 }
2156
2157 # Local Variables:
2158 # compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
2159 # End: