]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/ceph-helpers.sh
import 15.2.0 Octopus source
[ceph.git] / ceph / qa / standalone / ceph-helpers.sh
1 #!/usr/bin/env bash
2 #
3 # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5 # Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
6 #
7 # Author: Loic Dachary <loic@dachary.org>
8 # Author: Federico Gimenez <fgimenez@coit.es>
9 #
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU Library Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
13 # any later version.
14 #
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Library Public License for more details.
19 #
20 TIMEOUT=300
21 WAIT_FOR_CLEAN_TIMEOUT=90
22 MAX_TIMEOUT=15
23 PG_NUM=4
24 TMPDIR=${TMPDIR:-/tmp}
25 CEPH_BUILD_VIRTUALENV=${TMPDIR}
26 TESTDIR=${TESTDIR:-${TMPDIR}}
27
28 if type xmlstarlet > /dev/null 2>&1; then
29 XMLSTARLET=xmlstarlet
30 elif type xml > /dev/null 2>&1; then
31 XMLSTARLET=xml
32 else
33 echo "Missing xmlstarlet binary!"
34 exit 1
35 fi
36
37 if [ `uname` = FreeBSD ]; then
38 SED=gsed
39 AWK=gawk
40 DIFFCOLOPTS=""
41 KERNCORE="kern.corefile"
42 else
43 SED=sed
44 AWK=awk
45 termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
46 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
47 termwidth="-W ${termwidth}"
48 fi
49 DIFFCOLOPTS="-y $termwidth"
50 KERNCORE="kernel.core_pattern"
51 fi
52
53 EXTRA_OPTS=""
54
55 #! @file ceph-helpers.sh
56 # @brief Toolbox to manage Ceph cluster dedicated to testing
57 #
58 # Example use case:
59 #
60 # ~~~~~~~~~~~~~~~~{.sh}
61 # source ceph-helpers.sh
62 #
63 # function mytest() {
64 # # cleanup leftovers and reset mydir
65 # setup mydir
66 # # create a cluster with one monitor and three osds
67 # run_mon mydir a
68 # run_osd mydir 0
69 # run_osd mydir 2
70 # run_osd mydir 3
71 # # put and get an object
72 # rados --pool rbd put GROUP /etc/group
73 # rados --pool rbd get GROUP /tmp/GROUP
74 # # stop the cluster and cleanup the directory
75 # teardown mydir
76 # }
77 # ~~~~~~~~~~~~~~~~
78 #
79 # The focus is on simplicity and efficiency, in the context of
80 # functional tests. The output is intentionally very verbose
81 # and functions return as soon as an error is found. The caller
82 # is also expected to abort on the first error so that debugging
83 # can be done by looking at the end of the output.
84 #
85 # Each function is documented, implemented and tested independently.
86 # When modifying a helper, the test and the documentation are
87 # expected to be updated and it is easier of they are collocated. A
88 # test for a given function can be run with
89 #
90 # ~~~~~~~~~~~~~~~~{.sh}
91 # ceph-helpers.sh TESTS test_get_osds
92 # ~~~~~~~~~~~~~~~~
93 #
94 # and all the tests (i.e. all functions matching test_*) are run
95 # with:
96 #
97 # ~~~~~~~~~~~~~~~~{.sh}
98 # ceph-helpers.sh TESTS
99 # ~~~~~~~~~~~~~~~~
100 #
101 # A test function takes a single argument : the directory dedicated
102 # to the tests. It is expected to not create any file outside of this
103 # directory and remove it entirely when it completes successfully.
104 #
105
106
107 function get_asok_dir() {
108 if [ -n "$CEPH_ASOK_DIR" ]; then
109 echo "$CEPH_ASOK_DIR"
110 else
111 echo ${TMPDIR:-/tmp}/ceph-asok.$$
112 fi
113 }
114
115 function get_asok_path() {
116 local name=$1
117 if [ -n "$name" ]; then
118 echo $(get_asok_dir)/ceph-$name.asok
119 else
120 echo $(get_asok_dir)/\$cluster-\$name.asok
121 fi
122 }
123 ##
124 # Cleanup any leftovers found in **dir** via **teardown**
125 # and reset **dir** as an empty environment.
126 #
127 # @param dir path name of the environment
128 # @return 0 on success, 1 on error
129 #
130 function setup() {
131 local dir=$1
132 teardown $dir || return 1
133 mkdir -p $dir
134 mkdir -p $(get_asok_dir)
135 if [ $(ulimit -n) -le 1024 ]; then
136 ulimit -n 4096 || return 1
137 fi
138 if [ -z "$LOCALRUN" ]; then
139 trap "teardown $dir 1" TERM HUP INT
140 fi
141 }
142
143 function test_setup() {
144 local dir=$dir
145 setup $dir || return 1
146 test -d $dir || return 1
147 setup $dir || return 1
148 test -d $dir || return 1
149 teardown $dir
150 }
151
152 #######################################################################
153
154 ##
155 # Kill all daemons for which a .pid file exists in **dir** and remove
156 # **dir**. If the file system in which **dir** is btrfs, delete all
157 # subvolumes that relate to it.
158 #
159 # @param dir path name of the environment
160 # @param dumplogs pass "1" to dump logs otherwise it will only if cores found
161 # @return 0 on success, 1 on error
162 #
163 function teardown() {
164 local dir=$1
165 local dumplogs=$2
166 kill_daemons $dir KILL
167 if [ `uname` != FreeBSD ] \
168 && [ $(stat -f -c '%T' .) == "btrfs" ]; then
169 __teardown_btrfs $dir
170 fi
171 local cores="no"
172 local pattern="$(sysctl -n $KERNCORE)"
173 # See if we have apport core handling
174 if [ "${pattern:0:1}" = "|" ]; then
175 # TODO: Where can we get the dumps?
176 # Not sure where the dumps really are so this will look in the CWD
177 pattern=""
178 fi
179 # Local we start with core and teuthology ends with core
180 if ls $(dirname "$pattern") | grep -q '^core\|core$' ; then
181 cores="yes"
182 if [ -n "$LOCALRUN" ]; then
183 mkdir /tmp/cores.$$ 2> /dev/null || true
184 for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
185 mv $i /tmp/cores.$$
186 done
187 fi
188 fi
189 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
190 if [ -n "$LOCALRUN" ]; then
191 display_logs $dir
192 else
193 # Move logs to where Teuthology will archive it
194 mkdir -p $TESTDIR/archive/log
195 mv $dir/*.log $TESTDIR/archive/log
196 fi
197 fi
198 rm -fr $dir
199 rm -rf $(get_asok_dir)
200 if [ "$cores" = "yes" ]; then
201 echo "ERROR: Failure due to cores found"
202 if [ -n "$LOCALRUN" ]; then
203 echo "Find saved core files in /tmp/cores.$$"
204 fi
205 return 1
206 fi
207 return 0
208 }
209
210 function __teardown_btrfs() {
211 local btrfs_base_dir=$1
212 local btrfs_root=$(df -P . | tail -1 | $AWK '{print $NF}')
213 local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list -t . | $AWK '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
214 for subvolume in $btrfs_dirs; do
215 sudo btrfs subvolume delete $btrfs_root/$subvolume
216 done
217 }
218
219 function test_teardown() {
220 local dir=$dir
221 setup $dir || return 1
222 teardown $dir || return 1
223 ! test -d $dir || return 1
224 }
225
226 #######################################################################
227
228 ##
229 # Sends a signal to a single daemon.
230 # This is a helper function for kill_daemons
231 #
232 # After the daemon is sent **signal**, its actual termination
233 # will be verified by sending it signal 0. If the daemon is
234 # still alive, kill_daemon will pause for a few seconds and
235 # try again. This will repeat for a fixed number of times
236 # before kill_daemon returns on failure. The list of
237 # sleep intervals can be specified as **delays** and defaults
238 # to:
239 #
240 # 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
241 #
242 # This sequence is designed to run first a very short sleep time (0.1)
243 # if the machine is fast enough and the daemon terminates in a fraction of a
244 # second. The increasing sleep numbers should give plenty of time for
245 # the daemon to die even on the slowest running machine. If a daemon
246 # takes more than a few minutes to stop (the sum of all sleep times),
247 # there probably is no point in waiting more and a number of things
248 # are likely to go wrong anyway: better give up and return on error.
249 #
250 # @param pid the process id to send a signal
251 # @param send_signal the signal to send
252 # @param delays sequence of sleep times before failure
253 #
254 function kill_daemon() {
255 local pid=$(cat $1)
256 local send_signal=$2
257 local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
258 local exit_code=1
259 # In order to try after the last large sleep add 0 at the end so we check
260 # one last time before dropping out of the loop
261 for try in $delays 0 ; do
262 if kill -$send_signal $pid 2> /dev/null ; then
263 exit_code=1
264 else
265 exit_code=0
266 break
267 fi
268 send_signal=0
269 sleep $try
270 done;
271 return $exit_code
272 }
273
274 function test_kill_daemon() {
275 local dir=$1
276 setup $dir || return 1
277 run_mon $dir a --osd_pool_default_size=1 || return 1
278 run_mgr $dir x || return 1
279 run_osd $dir 0 || return 1
280
281 name_prefix=osd
282 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
283 #
284 # sending signal 0 won't kill the daemon
285 # waiting just for one second instead of the default schedule
286 # allows us to quickly verify what happens when kill fails
287 # to stop the daemon (i.e. it must return false)
288 #
289 ! kill_daemon $pidfile 0 1 || return 1
290 #
291 # killing just the osd and verify the mon still is responsive
292 #
293 kill_daemon $pidfile TERM || return 1
294 done
295
296 name_prefix=mgr
297 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
298 #
299 # kill the mgr
300 #
301 kill_daemon $pidfile TERM || return 1
302 done
303
304 name_prefix=mon
305 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
306 #
307 # kill the mon and verify it cannot be reached
308 #
309 kill_daemon $pidfile TERM || return 1
310 ! timeout 5 ceph status || return 1
311 done
312
313 teardown $dir || return 1
314 }
315
316 ##
317 # Kill all daemons for which a .pid file exists in **dir**. Each
318 # daemon is sent a **signal** and kill_daemons waits for it to exit
319 # during a few minutes. By default all daemons are killed. If a
320 # **name_prefix** is provided, only the daemons for which a pid
321 # file is found matching the prefix are killed. See run_osd and
322 # run_mon for more information about the name conventions for
323 # the pid files.
324 #
325 # Send TERM to all daemons : kill_daemons $dir
326 # Send KILL to all daemons : kill_daemons $dir KILL
327 # Send KILL to all osds : kill_daemons $dir KILL osd
328 # Send KILL to osd 1 : kill_daemons $dir KILL osd.1
329 #
330 # If a daemon is sent the TERM signal and does not terminate
331 # within a few minutes, it will still be running even after
332 # kill_daemons returns.
333 #
334 # If all daemons are kill successfully the function returns 0
335 # if at least one daemon remains, this is treated as an
336 # error and the function return 1.
337 #
338 # @param dir path name of the environment
339 # @param signal name of the first signal (defaults to TERM)
340 # @param name_prefix only kill match daemons (defaults to all)
341 # @param delays sequence of sleep times before failure
342 # @return 0 on success, 1 on error
343 #
344 function kill_daemons() {
345 local trace=$(shopt -q -o xtrace && echo true || echo false)
346 $trace && shopt -u -o xtrace
347 local dir=$1
348 local signal=${2:-TERM}
349 local name_prefix=$3 # optional, osd, mon, osd.1
350 local delays=$4 #optional timing
351 local status=0
352 local pids=""
353
354 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
355 run_in_background pids kill_daemon $pidfile $signal $delays
356 done
357
358 wait_background pids
359 status=$?
360
361 $trace && shopt -s -o xtrace
362 return $status
363 }
364
365 function test_kill_daemons() {
366 local dir=$1
367 setup $dir || return 1
368 run_mon $dir a --osd_pool_default_size=1 || return 1
369 run_mgr $dir x || return 1
370 run_osd $dir 0 || return 1
371 #
372 # sending signal 0 won't kill the daemon
373 # waiting just for one second instead of the default schedule
374 # allows us to quickly verify what happens when kill fails
375 # to stop the daemon (i.e. it must return false)
376 #
377 ! kill_daemons $dir 0 osd 1 || return 1
378 #
379 # killing just the osd and verify the mon still is responsive
380 #
381 kill_daemons $dir TERM osd || return 1
382 #
383 # kill the mgr
384 #
385 kill_daemons $dir TERM mgr || return 1
386 #
387 # kill the mon and verify it cannot be reached
388 #
389 kill_daemons $dir TERM || return 1
390 ! timeout 5 ceph status || return 1
391 teardown $dir || return 1
392 }
393
394 #
395 # return a random TCP port which is not used yet
396 #
397 # please note, there could be racing if we use this function for
398 # a free port, and then try to bind on this port.
399 #
400 function get_unused_port() {
401 local ip=127.0.0.1
402 python3 -c "import socket; s=socket.socket(); s.bind(('$ip', 0)); print(s.getsockname()[1]); s.close()"
403 }
404
405 #######################################################################
406
407 ##
408 # Run a monitor by the name mon.**id** with data in **dir**/**id**.
409 # The logs can be found in **dir**/mon.**id**.log and the pid file
410 # is **dir**/mon.**id**.pid and the admin socket is
411 # **dir**/**id**/ceph-mon.**id**.asok.
412 #
413 # The remaining arguments are passed verbatim to ceph-mon --mkfs
414 # and the ceph-mon daemon.
415 #
416 # Two mandatory arguments must be provided: --fsid and --mon-host
417 # Instead of adding them to every call to run_mon, they can be
418 # set in the CEPH_ARGS environment variable to be read implicitly
419 # by every ceph command.
420 #
421 # The CEPH_CONF variable is expected to be set to /dev/null to
422 # only rely on arguments for configuration.
423 #
424 # Examples:
425 #
426 # CEPH_ARGS="--fsid=$(uuidgen) "
427 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
428 # run_mon $dir a # spawn a mon and bind port 7018
429 # run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
430 #
431 # If mon_initial_members is not set, the default rbd pool is deleted
432 # and replaced with a replicated pool with less placement groups to
433 # speed up initialization. If mon_initial_members is set, no attempt
434 # is made to recreate the rbd pool because it would hang forever,
435 # waiting for other mons to join.
436 #
437 # A **dir**/ceph.conf file is created but not meant to be used by any
438 # function. It is convenient for debugging a failure with:
439 #
440 # ceph --conf **dir**/ceph.conf -s
441 #
442 # @param dir path name of the environment
443 # @param id mon identifier
444 # @param ... can be any option valid for ceph-mon
445 # @return 0 on success, 1 on error
446 #
447 function run_mon() {
448 local dir=$1
449 shift
450 local id=$1
451 shift
452 local data=$dir/$id
453
454 ceph-mon \
455 --id $id \
456 --mkfs \
457 --mon-data=$data \
458 --run-dir=$dir \
459 "$@" || return 1
460
461 ceph-mon \
462 --id $id \
463 --osd-failsafe-full-ratio=.99 \
464 --mon-osd-full-ratio=.99 \
465 --mon-data-avail-crit=1 \
466 --mon-data-avail-warn=5 \
467 --paxos-propose-interval=0.1 \
468 --osd-crush-chooseleaf-type=0 \
469 $EXTRA_OPTS \
470 --debug-mon 20 \
471 --debug-ms 20 \
472 --debug-paxos 20 \
473 --chdir= \
474 --mon-data=$data \
475 --log-file=$dir/\$name.log \
476 --admin-socket=$(get_asok_path) \
477 --mon-cluster-log-file=$dir/log \
478 --run-dir=$dir \
479 --pid-file=$dir/\$name.pid \
480 --mon-allow-pool-delete \
481 --osd-pool-default-pg-autoscale-mode off \
482 --mon-osd-backfillfull-ratio .99 \
483 "$@" || return 1
484
485 cat > $dir/ceph.conf <<EOF
486 [global]
487 fsid = $(get_config mon $id fsid)
488 mon host = $(get_config mon $id mon_host)
489 EOF
490 }
491
492 function test_run_mon() {
493 local dir=$1
494
495 setup $dir || return 1
496
497 run_mon $dir a --mon-initial-members=a || return 1
498 ceph mon dump | grep "mon.a" || return 1
499 kill_daemons $dir || return 1
500
501 run_mon $dir a --osd_pool_default_size=3 || return 1
502 run_osd $dir 0 || return 1
503 run_osd $dir 1 || return 1
504 run_osd $dir 2 || return 1
505 create_rbd_pool || return 1
506 ceph osd dump | grep "pool 1 'rbd'" || return 1
507 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
508 config get osd_pool_default_size)
509 test "$size" = '{"osd_pool_default_size":"3"}' || return 1
510
511 ! CEPH_ARGS='' ceph status || return 1
512 CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
513
514 kill_daemons $dir || return 1
515
516 run_mon $dir a --osd_pool_default_size=1 || return 1
517 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
518 config get osd_pool_default_size)
519 test "$size" = '{"osd_pool_default_size":"1"}' || return 1
520 kill_daemons $dir || return 1
521
522 CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
523 run_mon $dir a || return 1
524 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
525 config get osd_pool_default_size)
526 test "$size" = '{"osd_pool_default_size":"2"}' || return 1
527 kill_daemons $dir || return 1
528
529 teardown $dir || return 1
530 }
531
532 function create_rbd_pool() {
533 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
534 create_pool rbd $PG_NUM || return 1
535 rbd pool init rbd
536 }
537
538 function create_pool() {
539 ceph osd pool create "$@"
540 sleep 1
541 }
542
543 function delete_pool() {
544 local poolname=$1
545 ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
546 }
547
548 #######################################################################
549
550 function run_mgr() {
551 local dir=$1
552 shift
553 local id=$1
554 shift
555 local data=$dir/$id
556
557 ceph config set mgr mgr/devicehealth/enable_monitoring off --force
558 ceph-mgr \
559 --id $id \
560 $EXTRA_OPTS \
561 --osd-failsafe-full-ratio=.99 \
562 --debug-mgr 20 \
563 --debug-objecter 20 \
564 --debug-ms 20 \
565 --debug-paxos 20 \
566 --chdir= \
567 --mgr-data=$data \
568 --log-file=$dir/\$name.log \
569 --admin-socket=$(get_asok_path) \
570 --run-dir=$dir \
571 --pid-file=$dir/\$name.pid \
572 --mgr-module-path=$(realpath ${CEPH_ROOT}/src/pybind/mgr) \
573 "$@" || return 1
574 }
575
576 function run_mds() {
577 local dir=$1
578 shift
579 local id=$1
580 shift
581 local data=$dir/$id
582
583 ceph-mds \
584 --id $id \
585 $EXTRA_OPTS \
586 --debug-mds 20 \
587 --debug-objecter 20 \
588 --debug-ms 20 \
589 --chdir= \
590 --mds-data=$data \
591 --log-file=$dir/\$name.log \
592 --admin-socket=$(get_asok_path) \
593 --run-dir=$dir \
594 --pid-file=$dir/\$name.pid \
595 "$@" || return 1
596 }
597
598 #######################################################################
599
600 ##
601 # Create (prepare) and run (activate) an osd by the name osd.**id**
602 # with data in **dir**/**id**. The logs can be found in
603 # **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
604 # the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
605 #
606 # The remaining arguments are passed verbatim to ceph-osd.
607 #
608 # Two mandatory arguments must be provided: --fsid and --mon-host
609 # Instead of adding them to every call to run_osd, they can be
610 # set in the CEPH_ARGS environment variable to be read implicitly
611 # by every ceph command.
612 #
613 # The CEPH_CONF variable is expected to be set to /dev/null to
614 # only rely on arguments for configuration.
615 #
616 # The run_osd function creates the OSD data directory on the **dir**/**id**
617 # directory and relies on the activate_osd function to run the daemon.
618 #
619 # Examples:
620 #
621 # CEPH_ARGS="--fsid=$(uuidgen) "
622 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
623 # run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
624 #
625 # @param dir path name of the environment
626 # @param id osd identifier
627 # @param ... can be any option valid for ceph-osd
628 # @return 0 on success, 1 on error
629 #
630 function run_osd() {
631 local dir=$1
632 shift
633 local id=$1
634 shift
635 local osd_data=$dir/$id
636
637 local ceph_args="$CEPH_ARGS"
638 ceph_args+=" --osd-failsafe-full-ratio=.99"
639 ceph_args+=" --osd-journal-size=100"
640 ceph_args+=" --osd-scrub-load-threshold=2000"
641 ceph_args+=" --osd-data=$osd_data"
642 ceph_args+=" --osd-journal=${osd_data}/journal"
643 ceph_args+=" --chdir="
644 ceph_args+=$EXTRA_OPTS
645 ceph_args+=" --run-dir=$dir"
646 ceph_args+=" --admin-socket=$(get_asok_path)"
647 ceph_args+=" --debug-osd=20"
648 ceph_args+=" --debug-ms=1"
649 ceph_args+=" --debug-monc=20"
650 ceph_args+=" --log-file=$dir/\$name.log"
651 ceph_args+=" --pid-file=$dir/\$name.pid"
652 ceph_args+=" --osd-max-object-name-len=460"
653 ceph_args+=" --osd-max-object-namespace-len=64"
654 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
655 ceph_args+=" "
656 ceph_args+="$@"
657 mkdir -p $osd_data
658
659 local uuid=`uuidgen`
660 echo "add osd$id $uuid"
661 OSD_SECRET=$(ceph-authtool --gen-print-key)
662 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
663 ceph osd new $uuid -i $osd_data/new.json
664 rm $osd_data/new.json
665 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid
666
667 local key_fn=$osd_data/keyring
668 cat > $key_fn<<EOF
669 [osd.$id]
670 key = $OSD_SECRET
671 EOF
672 echo adding osd$id key to auth repository
673 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
674 echo start osd.$id
675 ceph-osd -i $id $ceph_args &
676
677 # If noup is set, then can't wait for this osd
678 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
679 return 0
680 fi
681 wait_for_osd up $id || return 1
682
683 }
684
685 function run_osd_filestore() {
686 local dir=$1
687 shift
688 local id=$1
689 shift
690 local osd_data=$dir/$id
691
692 local ceph_args="$CEPH_ARGS"
693 ceph_args+=" --osd-failsafe-full-ratio=.99"
694 ceph_args+=" --osd-journal-size=100"
695 ceph_args+=" --osd-scrub-load-threshold=2000"
696 ceph_args+=" --osd-data=$osd_data"
697 ceph_args+=" --osd-journal=${osd_data}/journal"
698 ceph_args+=" --chdir="
699 ceph_args+=$EXTRA_OPTS
700 ceph_args+=" --run-dir=$dir"
701 ceph_args+=" --admin-socket=$(get_asok_path)"
702 ceph_args+=" --debug-osd=20"
703 ceph_args+=" --debug-ms=1"
704 ceph_args+=" --debug-monc=20"
705 ceph_args+=" --log-file=$dir/\$name.log"
706 ceph_args+=" --pid-file=$dir/\$name.pid"
707 ceph_args+=" --osd-max-object-name-len=460"
708 ceph_args+=" --osd-max-object-namespace-len=64"
709 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
710 ceph_args+=" "
711 ceph_args+="$@"
712 mkdir -p $osd_data
713
714 local uuid=`uuidgen`
715 echo "add osd$osd $uuid"
716 OSD_SECRET=$(ceph-authtool --gen-print-key)
717 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
718 ceph osd new $uuid -i $osd_data/new.json
719 rm $osd_data/new.json
720 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid --osd-objectstore=filestore
721
722 local key_fn=$osd_data/keyring
723 cat > $key_fn<<EOF
724 [osd.$osd]
725 key = $OSD_SECRET
726 EOF
727 echo adding osd$id key to auth repository
728 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
729 echo start osd.$id
730 ceph-osd -i $id $ceph_args &
731
732 # If noup is set, then can't wait for this osd
733 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
734 return 0
735 fi
736 wait_for_osd up $id || return 1
737
738
739 }
740
741 function test_run_osd() {
742 local dir=$1
743
744 setup $dir || return 1
745
746 run_mon $dir a || return 1
747 run_mgr $dir x || return 1
748
749 run_osd $dir 0 || return 1
750 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
751 config get osd_max_backfills)
752 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
753
754 run_osd $dir 1 --osd-max-backfills 20 || return 1
755 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
756 config get osd_max_backfills)
757 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
758
759 CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
760 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
761 config get osd_max_backfills)
762 test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
763
764 teardown $dir || return 1
765 }
766
767 #######################################################################
768
769 ##
770 # Shutdown and remove all traces of the osd by the name osd.**id**.
771 #
772 # The OSD is shutdown with the TERM signal. It is then removed from
773 # the auth list, crush map, osd map etc and the files associated with
774 # it are also removed.
775 #
776 # @param dir path name of the environment
777 # @param id osd identifier
778 # @return 0 on success, 1 on error
779 #
780 function destroy_osd() {
781 local dir=$1
782 local id=$2
783
784 ceph osd out osd.$id || return 1
785 kill_daemons $dir TERM osd.$id || return 1
786 ceph osd down osd.$id || return 1
787 ceph osd purge osd.$id --yes-i-really-mean-it || return 1
788 teardown $dir/$id || return 1
789 rm -fr $dir/$id
790 }
791
792 function test_destroy_osd() {
793 local dir=$1
794
795 setup $dir || return 1
796 run_mon $dir a || return 1
797 run_mgr $dir x || return 1
798 run_osd $dir 0 || return 1
799 destroy_osd $dir 0 || return 1
800 ! ceph osd dump | grep "osd.$id " || return 1
801 teardown $dir || return 1
802 }
803
804 #######################################################################
805
806 ##
807 # Run (activate) an osd by the name osd.**id** with data in
808 # **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
809 # the pid file is **dir**/osd.**id**.pid and the admin socket is
810 # **dir**/**id**/ceph-osd.**id**.asok.
811 #
812 # The remaining arguments are passed verbatim to ceph-osd.
813 #
814 # Two mandatory arguments must be provided: --fsid and --mon-host
815 # Instead of adding them to every call to activate_osd, they can be
816 # set in the CEPH_ARGS environment variable to be read implicitly
817 # by every ceph command.
818 #
819 # The CEPH_CONF variable is expected to be set to /dev/null to
820 # only rely on arguments for configuration.
821 #
822 # The activate_osd function expects a valid OSD data directory
823 # in **dir**/**id**, either just created via run_osd or re-using
824 # one left by a previous run of ceph-osd. The ceph-osd daemon is
825 # run directly on the foreground
826 #
827 # The activate_osd function blocks until the monitor reports the osd
828 # up. If it fails to do so within $TIMEOUT seconds, activate_osd
829 # fails.
830 #
831 # Examples:
832 #
833 # CEPH_ARGS="--fsid=$(uuidgen) "
834 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
835 # activate_osd $dir 0 # activate an osd using the monitor listening on 7018
836 #
837 # @param dir path name of the environment
838 # @param id osd identifier
839 # @param ... can be any option valid for ceph-osd
840 # @return 0 on success, 1 on error
841 #
842 function activate_osd() {
843 local dir=$1
844 shift
845 local id=$1
846 shift
847 local osd_data=$dir/$id
848
849 local ceph_args="$CEPH_ARGS"
850 ceph_args+=" --osd-failsafe-full-ratio=.99"
851 ceph_args+=" --osd-journal-size=100"
852 ceph_args+=" --osd-scrub-load-threshold=2000"
853 ceph_args+=" --osd-data=$osd_data"
854 ceph_args+=" --osd-journal=${osd_data}/journal"
855 ceph_args+=" --chdir="
856 ceph_args+=$EXTRA_OPTS
857 ceph_args+=" --run-dir=$dir"
858 ceph_args+=" --admin-socket=$(get_asok_path)"
859 ceph_args+=" --debug-osd=20"
860 ceph_args+=" --log-file=$dir/\$name.log"
861 ceph_args+=" --pid-file=$dir/\$name.pid"
862 ceph_args+=" --osd-max-object-name-len=460"
863 ceph_args+=" --osd-max-object-namespace-len=64"
864 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
865 ceph_args+=" "
866 ceph_args+="$@"
867 mkdir -p $osd_data
868
869 echo start osd.$id
870 ceph-osd -i $id $ceph_args &
871
872 [ "$id" = "$(cat $osd_data/whoami)" ] || return 1
873
874 # If noup is set, then can't wait for this osd
875 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
876 return 0
877 fi
878 wait_for_osd up $id || return 1
879 }
880
881 function test_activate_osd() {
882 local dir=$1
883
884 setup $dir || return 1
885
886 run_mon $dir a || return 1
887 run_mgr $dir x || return 1
888
889 run_osd $dir 0 || return 1
890 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
891 config get osd_max_backfills)
892 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
893
894 kill_daemons $dir TERM osd || return 1
895
896 activate_osd $dir 0 --osd-max-backfills 20 || return 1
897 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
898 config get osd_max_backfills)
899 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
900
901 teardown $dir || return 1
902 }
903
904 #######################################################################
905
906 ##
907 # Wait until the OSD **id** is either up or down, as specified by
908 # **state**. It fails after $TIMEOUT seconds.
909 #
910 # @param state either up or down
911 # @param id osd identifier
912 # @return 0 on success, 1 on error
913 #
914 function wait_for_osd() {
915 local state=$1
916 local id=$2
917
918 status=1
919 for ((i=0; i < $TIMEOUT; i++)); do
920 echo $i
921 if ! ceph osd dump | grep "osd.$id $state"; then
922 sleep 1
923 else
924 status=0
925 break
926 fi
927 done
928 return $status
929 }
930
931 function test_wait_for_osd() {
932 local dir=$1
933 setup $dir || return 1
934 run_mon $dir a --osd_pool_default_size=1 || return 1
935 run_mgr $dir x || return 1
936 run_osd $dir 0 || return 1
937 run_osd $dir 1 || return 1
938 wait_for_osd up 0 || return 1
939 wait_for_osd up 1 || return 1
940 kill_daemons $dir TERM osd.0 || return 1
941 wait_for_osd down 0 || return 1
942 ( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
943 teardown $dir || return 1
944 }
945
946 #######################################################################
947
948 ##
949 # Display the list of OSD ids supporting the **objectname** stored in
950 # **poolname**, as reported by ceph osd map.
951 #
952 # @param poolname an existing pool
953 # @param objectname an objectname (may or may not exist)
954 # @param STDOUT white space separated list of OSD ids
955 # @return 0 on success, 1 on error
956 #
957 function get_osds() {
958 local poolname=$1
959 local objectname=$2
960
961 local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
962 jq '.acting | .[]')
963 # get rid of the trailing space
964 echo $osds
965 }
966
967 function test_get_osds() {
968 local dir=$1
969
970 setup $dir || return 1
971 run_mon $dir a --osd_pool_default_size=2 || return 1
972 run_mgr $dir x || return 1
973 run_osd $dir 0 || return 1
974 run_osd $dir 1 || return 1
975 create_rbd_pool || return 1
976 wait_for_clean || return 1
977 create_rbd_pool || return 1
978 get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
979 teardown $dir || return 1
980 }
981
982 #######################################################################
983
984 ##
985 # Wait for the monitor to form quorum (optionally, of size N)
986 #
987 # @param timeout duration (lower-bound) to wait for quorum to be formed
988 # @param quorumsize size of quorum to wait for
989 # @return 0 on success, 1 on error
990 #
991 function wait_for_quorum() {
992 local timeout=$1
993 local quorumsize=$2
994
995 if [[ -z "$timeout" ]]; then
996 timeout=300
997 fi
998
999 if [[ -z "$quorumsize" ]]; then
1000 timeout $timeout ceph quorum_status --format=json >&/dev/null || return 1
1001 return 0
1002 fi
1003
1004 no_quorum=1
1005 wait_until=$((`date +%s` + $timeout))
1006 while [[ $(date +%s) -lt $wait_until ]]; do
1007 jqfilter='.quorum | length == '$quorumsize
1008 jqinput="$(timeout $timeout ceph quorum_status --format=json 2>/dev/null)"
1009 res=$(echo $jqinput | jq "$jqfilter")
1010 if [[ "$res" == "true" ]]; then
1011 no_quorum=0
1012 break
1013 fi
1014 done
1015 return $no_quorum
1016 }
1017
1018 #######################################################################
1019
1020 ##
1021 # Return the PG of supporting the **objectname** stored in
1022 # **poolname**, as reported by ceph osd map.
1023 #
1024 # @param poolname an existing pool
1025 # @param objectname an objectname (may or may not exist)
1026 # @param STDOUT a PG
1027 # @return 0 on success, 1 on error
1028 #
1029 function get_pg() {
1030 local poolname=$1
1031 local objectname=$2
1032
1033 ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
1034 }
1035
1036 function test_get_pg() {
1037 local dir=$1
1038
1039 setup $dir || return 1
1040 run_mon $dir a --osd_pool_default_size=1 || return 1
1041 run_mgr $dir x || return 1
1042 run_osd $dir 0 || return 1
1043 create_rbd_pool || return 1
1044 wait_for_clean || return 1
1045 get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
1046 teardown $dir || return 1
1047 }
1048
1049 #######################################################################
1050
1051 ##
1052 # Return the value of the **config**, obtained via the config get command
1053 # of the admin socket of **daemon**.**id**.
1054 #
1055 # @param daemon mon or osd
1056 # @param id mon or osd ID
1057 # @param config the configuration variable name as found in config_opts.h
1058 # @param STDOUT the config value
1059 # @return 0 on success, 1 on error
1060 #
1061 function get_config() {
1062 local daemon=$1
1063 local id=$2
1064 local config=$3
1065
1066 CEPH_ARGS='' \
1067 ceph --format json daemon $(get_asok_path $daemon.$id) \
1068 config get $config 2> /dev/null | \
1069 jq -r ".$config"
1070 }
1071
1072 function test_get_config() {
1073 local dir=$1
1074
1075 # override the default config using command line arg and check it
1076 setup $dir || return 1
1077 run_mon $dir a --osd_pool_default_size=1 || return 1
1078 test $(get_config mon a osd_pool_default_size) = 1 || return 1
1079 run_mgr $dir x || return 1
1080 run_osd $dir 0 --osd_max_scrubs=3 || return 1
1081 test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
1082 teardown $dir || return 1
1083 }
1084
1085 #######################################################################
1086
1087 ##
1088 # Set the **config** to specified **value**, via the config set command
1089 # of the admin socket of **daemon**.**id**
1090 #
1091 # @param daemon mon or osd
1092 # @param id mon or osd ID
1093 # @param config the configuration variable name as found in config_opts.h
1094 # @param value the config value
1095 # @return 0 on success, 1 on error
1096 #
1097 function set_config() {
1098 local daemon=$1
1099 local id=$2
1100 local config=$3
1101 local value=$4
1102
1103 test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
1104 config set $config $value 2> /dev/null | \
1105 jq 'has("success")') == true
1106 }
1107
1108 function test_set_config() {
1109 local dir=$1
1110
1111 setup $dir || return 1
1112 run_mon $dir a --osd_pool_default_size=1 || return 1
1113 test $(get_config mon a ms_crc_header) = true || return 1
1114 set_config mon a ms_crc_header false || return 1
1115 test $(get_config mon a ms_crc_header) = false || return 1
1116 set_config mon a ms_crc_header true || return 1
1117 test $(get_config mon a ms_crc_header) = true || return 1
1118 teardown $dir || return 1
1119 }
1120
1121 #######################################################################
1122
1123 ##
1124 # Return the OSD id of the primary OSD supporting the **objectname**
1125 # stored in **poolname**, as reported by ceph osd map.
1126 #
1127 # @param poolname an existing pool
1128 # @param objectname an objectname (may or may not exist)
1129 # @param STDOUT the primary OSD id
1130 # @return 0 on success, 1 on error
1131 #
1132 function get_primary() {
1133 local poolname=$1
1134 local objectname=$2
1135
1136 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1137 jq '.acting_primary'
1138 }
1139
1140 function test_get_primary() {
1141 local dir=$1
1142
1143 setup $dir || return 1
1144 run_mon $dir a --osd_pool_default_size=1 || return 1
1145 local osd=0
1146 run_mgr $dir x || return 1
1147 run_osd $dir $osd || return 1
1148 create_rbd_pool || return 1
1149 wait_for_clean || return 1
1150 test $(get_primary rbd GROUP) = $osd || return 1
1151 teardown $dir || return 1
1152 }
1153
1154 #######################################################################
1155
1156 ##
1157 # Return the id of any OSD supporting the **objectname** stored in
1158 # **poolname**, as reported by ceph osd map, except the primary.
1159 #
1160 # @param poolname an existing pool
1161 # @param objectname an objectname (may or may not exist)
1162 # @param STDOUT the OSD id
1163 # @return 0 on success, 1 on error
1164 #
1165 function get_not_primary() {
1166 local poolname=$1
1167 local objectname=$2
1168
1169 local primary=$(get_primary $poolname $objectname)
1170 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1171 jq ".acting | map(select (. != $primary)) | .[0]"
1172 }
1173
1174 function test_get_not_primary() {
1175 local dir=$1
1176
1177 setup $dir || return 1
1178 run_mon $dir a --osd_pool_default_size=2 || return 1
1179 run_mgr $dir x || return 1
1180 run_osd $dir 0 || return 1
1181 run_osd $dir 1 || return 1
1182 create_rbd_pool || return 1
1183 wait_for_clean || return 1
1184 local primary=$(get_primary rbd GROUP)
1185 local not_primary=$(get_not_primary rbd GROUP)
1186 test $not_primary != $primary || return 1
1187 test $not_primary = 0 -o $not_primary = 1 || return 1
1188 teardown $dir || return 1
1189 }
1190
1191 #######################################################################
1192
1193 function _objectstore_tool_nodown() {
1194 local dir=$1
1195 shift
1196 local id=$1
1197 shift
1198 local osd_data=$dir/$id
1199
1200 ceph-objectstore-tool \
1201 --data-path $osd_data \
1202 "$@" || return 1
1203 }
1204
1205 function _objectstore_tool_nowait() {
1206 local dir=$1
1207 shift
1208 local id=$1
1209 shift
1210
1211 kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
1212
1213 _objectstore_tool_nodown $dir $id "$@" || return 1
1214 activate_osd $dir $id $ceph_osd_args >&2 || return 1
1215 }
1216
1217 ##
1218 # Run ceph-objectstore-tool against the OSD **id** using the data path
1219 # **dir**. The OSD is killed with TERM prior to running
1220 # ceph-objectstore-tool because access to the data path is
1221 # exclusive. The OSD is restarted after the command completes. The
1222 # objectstore_tool returns after all PG are active+clean again.
1223 #
1224 # @param dir the data path of the OSD
1225 # @param id the OSD id
1226 # @param ... arguments to ceph-objectstore-tool
1227 # @param STDIN the input of ceph-objectstore-tool
1228 # @param STDOUT the output of ceph-objectstore-tool
1229 # @return 0 on success, 1 on error
1230 #
1231 # The value of $ceph_osd_args will be passed to restarted osds
1232 #
1233 function objectstore_tool() {
1234 local dir=$1
1235 shift
1236 local id=$1
1237 shift
1238
1239 _objectstore_tool_nowait $dir $id "$@" || return 1
1240 wait_for_clean >&2
1241 }
1242
1243 function test_objectstore_tool() {
1244 local dir=$1
1245
1246 setup $dir || return 1
1247 run_mon $dir a --osd_pool_default_size=1 || return 1
1248 local osd=0
1249 run_mgr $dir x || return 1
1250 run_osd $dir $osd || return 1
1251 create_rbd_pool || return 1
1252 wait_for_clean || return 1
1253 rados --pool rbd put GROUP /etc/group || return 1
1254 objectstore_tool $dir $osd GROUP get-bytes | \
1255 diff - /etc/group
1256 ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
1257 teardown $dir || return 1
1258 }
1259
1260 #######################################################################
1261
1262 ##
1263 # Predicate checking if there is an ongoing recovery in the
1264 # cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1265 # counters are reported by ceph status, it means recovery is in
1266 # progress.
1267 #
1268 # @return 0 if recovery in progress, 1 otherwise
1269 #
1270 function get_is_making_recovery_progress() {
1271 local recovery_progress
1272 recovery_progress+=".recovering_keys_per_sec + "
1273 recovery_progress+=".recovering_bytes_per_sec + "
1274 recovery_progress+=".recovering_objects_per_sec"
1275 local progress=$(ceph --format json status 2>/dev/null | \
1276 jq -r ".pgmap | $recovery_progress")
1277 test "$progress" != null
1278 }
1279
1280 function test_get_is_making_recovery_progress() {
1281 local dir=$1
1282
1283 setup $dir || return 1
1284 run_mon $dir a || return 1
1285 run_mgr $dir x || return 1
1286 ! get_is_making_recovery_progress || return 1
1287 teardown $dir || return 1
1288 }
1289
1290 #######################################################################
1291
1292 ##
1293 # Return the number of active PGs in the cluster. A PG is active if
1294 # ceph pg dump pgs reports it both **active** and **clean** and that
1295 # not **stale**.
1296 #
1297 # @param STDOUT the number of active PGs
1298 # @return 0 on success, 1 on error
1299 #
1300 function get_num_active_clean() {
1301 local expression
1302 expression+="select(contains(\"active\") and contains(\"clean\")) | "
1303 expression+="select(contains(\"stale\") | not)"
1304 ceph --format json pg dump pgs 2>/dev/null | \
1305 jq ".pg_stats | [.[] | .state | $expression] | length"
1306 }
1307
1308 function test_get_num_active_clean() {
1309 local dir=$1
1310
1311 setup $dir || return 1
1312 run_mon $dir a --osd_pool_default_size=1 || return 1
1313 run_mgr $dir x || return 1
1314 run_osd $dir 0 || return 1
1315 create_rbd_pool || return 1
1316 wait_for_clean || return 1
1317 local num_active_clean=$(get_num_active_clean)
1318 test "$num_active_clean" = $PG_NUM || return 1
1319 teardown $dir || return 1
1320 }
1321
1322 ##
1323 # Return the number of active or peered PGs in the cluster. A PG matches if
1324 # ceph pg dump pgs reports it is either **active** or **peered** and that
1325 # not **stale**.
1326 #
1327 # @param STDOUT the number of active PGs
1328 # @return 0 on success, 1 on error
1329 #
1330 function get_num_active_or_peered() {
1331 local expression
1332 expression+="select(contains(\"active\") or contains(\"peered\")) | "
1333 expression+="select(contains(\"stale\") | not)"
1334 ceph --format json pg dump pgs 2>/dev/null | \
1335 jq ".pg_stats | [.[] | .state | $expression] | length"
1336 }
1337
1338 function test_get_num_active_or_peered() {
1339 local dir=$1
1340
1341 setup $dir || return 1
1342 run_mon $dir a --osd_pool_default_size=1 || return 1
1343 run_mgr $dir x || return 1
1344 run_osd $dir 0 || return 1
1345 create_rbd_pool || return 1
1346 wait_for_clean || return 1
1347 local num_peered=$(get_num_active_or_peered)
1348 test "$num_peered" = $PG_NUM || return 1
1349 teardown $dir || return 1
1350 }
1351
1352 #######################################################################
1353
1354 ##
1355 # Return the number of PGs in the cluster, according to
1356 # ceph pg dump pgs.
1357 #
1358 # @param STDOUT the number of PGs
1359 # @return 0 on success, 1 on error
1360 #
1361 function get_num_pgs() {
1362 ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
1363 }
1364
1365 function test_get_num_pgs() {
1366 local dir=$1
1367
1368 setup $dir || return 1
1369 run_mon $dir a --osd_pool_default_size=1 || return 1
1370 run_mgr $dir x || return 1
1371 run_osd $dir 0 || return 1
1372 create_rbd_pool || return 1
1373 wait_for_clean || return 1
1374 local num_pgs=$(get_num_pgs)
1375 test "$num_pgs" -gt 0 || return 1
1376 teardown $dir || return 1
1377 }
1378
1379 #######################################################################
1380
1381 ##
1382 # Return the OSD ids in use by at least one PG in the cluster (either
1383 # in the up or the acting set), according to ceph pg dump pgs. Every
1384 # OSD id shows as many times as they are used in up and acting sets.
1385 # If an OSD id is in both the up and acting set of a given PG, it will
1386 # show twice.
1387 #
1388 # @param STDOUT a sorted list of OSD ids
1389 # @return 0 on success, 1 on error
1390 #
1391 function get_osd_id_used_by_pgs() {
1392 ceph --format json pg dump pgs 2>/dev/null | jq '.pg_stats | .[] | .up[], .acting[]' | sort
1393 }
1394
1395 function test_get_osd_id_used_by_pgs() {
1396 local dir=$1
1397
1398 setup $dir || return 1
1399 run_mon $dir a --osd_pool_default_size=1 || return 1
1400 run_mgr $dir x || return 1
1401 run_osd $dir 0 || return 1
1402 create_rbd_pool || return 1
1403 wait_for_clean || return 1
1404 local osd_ids=$(get_osd_id_used_by_pgs | uniq)
1405 test "$osd_ids" = "0" || return 1
1406 teardown $dir || return 1
1407 }
1408
1409 #######################################################################
1410
1411 ##
1412 # Wait until the OSD **id** shows **count** times in the
1413 # PGs (see get_osd_id_used_by_pgs for more information about
1414 # how OSD ids are counted).
1415 #
1416 # @param id the OSD id
1417 # @param count the number of time it must show in the PGs
1418 # @return 0 on success, 1 on error
1419 #
1420 function wait_osd_id_used_by_pgs() {
1421 local id=$1
1422 local count=$2
1423
1424 status=1
1425 for ((i=0; i < $TIMEOUT / 5; i++)); do
1426 echo $i
1427 if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then
1428 sleep 5
1429 else
1430 status=0
1431 break
1432 fi
1433 done
1434 return $status
1435 }
1436
1437 function test_wait_osd_id_used_by_pgs() {
1438 local dir=$1
1439
1440 setup $dir || return 1
1441 run_mon $dir a --osd_pool_default_size=1 || return 1
1442 run_mgr $dir x || return 1
1443 run_osd $dir 0 || return 1
1444 create_rbd_pool || return 1
1445 wait_for_clean || return 1
1446 wait_osd_id_used_by_pgs 0 8 || return 1
1447 ! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1
1448 teardown $dir || return 1
1449 }
1450
1451 #######################################################################
1452
1453 ##
1454 # Return the date and time of the last completed scrub for **pgid**,
1455 # as reported by ceph pg dump pgs. Note that a repair also sets this
1456 # date.
1457 #
1458 # @param pgid the id of the PG
1459 # @param STDOUT the date and time of the last scrub
1460 # @return 0 on success, 1 on error
1461 #
1462 function get_last_scrub_stamp() {
1463 local pgid=$1
1464 local sname=${2:-last_scrub_stamp}
1465 ceph --format json pg dump pgs 2>/dev/null | \
1466 jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
1467 }
1468
1469 function test_get_last_scrub_stamp() {
1470 local dir=$1
1471
1472 setup $dir || return 1
1473 run_mon $dir a --osd_pool_default_size=1 || return 1
1474 run_mgr $dir x || return 1
1475 run_osd $dir 0 || return 1
1476 create_rbd_pool || return 1
1477 wait_for_clean || return 1
1478 stamp=$(get_last_scrub_stamp 1.0)
1479 test -n "$stamp" || return 1
1480 teardown $dir || return 1
1481 }
1482
1483 #######################################################################
1484
1485 ##
1486 # Predicate checking if the cluster is clean, i.e. all of its PGs are
1487 # in a clean state (see get_num_active_clean for a definition).
1488 #
1489 # @return 0 if the cluster is clean, 1 otherwise
1490 #
1491 function is_clean() {
1492 num_pgs=$(get_num_pgs)
1493 test $num_pgs != 0 || return 1
1494 test $(get_num_active_clean) = $num_pgs || return 1
1495 }
1496
1497 function test_is_clean() {
1498 local dir=$1
1499
1500 setup $dir || return 1
1501 run_mon $dir a --osd_pool_default_size=1 || return 1
1502 run_mgr $dir x || return 1
1503 run_osd $dir 0 || return 1
1504 create_rbd_pool || return 1
1505 wait_for_clean || return 1
1506 is_clean || return 1
1507 teardown $dir || return 1
1508 }
1509
1510 #######################################################################
1511
1512 calc() { $AWK "BEGIN{print $*}"; }
1513
1514 ##
1515 # Return a list of numbers that are increasingly larger and whose
1516 # total is **timeout** seconds. It can be used to have short sleep
1517 # delay while waiting for an event on a fast machine. But if running
1518 # very slowly the larger delays avoid stressing the machine even
1519 # further or spamming the logs.
1520 #
1521 # @param timeout sum of all delays, in seconds
1522 # @return a list of sleep delays
1523 #
1524 function get_timeout_delays() {
1525 local trace=$(shopt -q -o xtrace && echo true || echo false)
1526 $trace && shopt -u -o xtrace
1527 local timeout=$1
1528 local first_step=${2:-1}
1529 local max_timeout=${3:-$MAX_TIMEOUT}
1530
1531 local i
1532 local total="0"
1533 i=$first_step
1534 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1535 echo -n "$(calc $i) "
1536 total=$(calc $total + $i)
1537 i=$(calc $i \* 2)
1538 if [ $max_timeout -gt 0 ]; then
1539 # Did we reach max timeout ?
1540 if [ ${i%.*} -eq ${max_timeout%.*} ] && [ ${i#*.} \> ${max_timeout#*.} ] || [ ${i%.*} -gt ${max_timeout%.*} ]; then
1541 # Yes, so let's cap the max wait time to max
1542 i=$max_timeout
1543 fi
1544 fi
1545 done
1546 if test "$(calc $total \< $timeout)" = "1"; then
1547 echo -n "$(calc $timeout - $total) "
1548 fi
1549 $trace && shopt -s -o xtrace
1550 }
1551
1552 function test_get_timeout_delays() {
1553 test "$(get_timeout_delays 1)" = "1 " || return 1
1554 test "$(get_timeout_delays 5)" = "1 2 2 " || return 1
1555 test "$(get_timeout_delays 6)" = "1 2 3 " || return 1
1556 test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
1557 test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1
1558 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1
1559 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1
1560 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1
1561 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1
1562 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1
1563 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1
1564 test "$(get_timeout_delays 300 .1 0)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 12.8 25.6 51.2 102.4 95.3 " || return 1
1565 test "$(get_timeout_delays 300 .1 10)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 7.3 " || return 1
1566 }
1567
1568 #######################################################################
1569
1570 ##
1571 # Wait until the cluster becomes clean or if it does not make progress
1572 # for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1573 # Progress is measured either via the **get_is_making_recovery_progress**
1574 # predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1575 #
1576 # @return 0 if the cluster is clean, 1 otherwise
1577 #
1578 function wait_for_clean() {
1579 local cmd=$1
1580 local num_active_clean=-1
1581 local cur_active_clean
1582 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
1583 local -i loop=0
1584
1585 flush_pg_stats || return 1
1586 while test $(get_num_pgs) == 0 ; do
1587 sleep 1
1588 done
1589
1590 while true ; do
1591 # Comparing get_num_active_clean & get_num_pgs is used to determine
1592 # if the cluster is clean. That's almost an inline of is_clean() to
1593 # get more performance by avoiding multiple calls of get_num_active_clean.
1594 cur_active_clean=$(get_num_active_clean)
1595 test $cur_active_clean = $(get_num_pgs) && break
1596 if test $cur_active_clean != $num_active_clean ; then
1597 loop=0
1598 num_active_clean=$cur_active_clean
1599 elif get_is_making_recovery_progress ; then
1600 loop=0
1601 elif (( $loop >= ${#delays[*]} )) ; then
1602 ceph report
1603 return 1
1604 fi
1605 # eval is a no-op if cmd is empty
1606 eval $cmd
1607 sleep ${delays[$loop]}
1608 loop+=1
1609 done
1610 return 0
1611 }
1612
1613 function test_wait_for_clean() {
1614 local dir=$1
1615
1616 setup $dir || return 1
1617 run_mon $dir a --osd_pool_default_size=2 || return 1
1618 run_osd $dir 0 || return 1
1619 run_mgr $dir x || return 1
1620 create_rbd_pool || return 1
1621 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1622 run_osd $dir 1 || return 1
1623 wait_for_clean || return 1
1624 teardown $dir || return 1
1625 }
1626
1627 ##
1628 # Wait until the cluster becomes peered or if it does not make progress
1629 # for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1630 # Progress is measured either via the **get_is_making_recovery_progress**
1631 # predicate or if the number of peered PGs changes (as returned by get_num_active_or_peered)
1632 #
1633 # @return 0 if the cluster is clean, 1 otherwise
1634 #
1635 function wait_for_peered() {
1636 local cmd=$1
1637 local num_peered=-1
1638 local cur_peered
1639 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
1640 local -i loop=0
1641
1642 flush_pg_stats || return 1
1643 while test $(get_num_pgs) == 0 ; do
1644 sleep 1
1645 done
1646
1647 while true ; do
1648 # Comparing get_num_active_clean & get_num_pgs is used to determine
1649 # if the cluster is clean. That's almost an inline of is_clean() to
1650 # get more performance by avoiding multiple calls of get_num_active_clean.
1651 cur_peered=$(get_num_active_or_peered)
1652 test $cur_peered = $(get_num_pgs) && break
1653 if test $cur_peered != $num_peered ; then
1654 loop=0
1655 num_peered=$cur_peered
1656 elif get_is_making_recovery_progress ; then
1657 loop=0
1658 elif (( $loop >= ${#delays[*]} )) ; then
1659 ceph report
1660 return 1
1661 fi
1662 # eval is a no-op if cmd is empty
1663 eval $cmd
1664 sleep ${delays[$loop]}
1665 loop+=1
1666 done
1667 return 0
1668 }
1669
1670 function test_wait_for_peered() {
1671 local dir=$1
1672
1673 setup $dir || return 1
1674 run_mon $dir a --osd_pool_default_size=2 || return 1
1675 run_osd $dir 0 || return 1
1676 run_mgr $dir x || return 1
1677 create_rbd_pool || return 1
1678 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1679 run_osd $dir 1 || return 1
1680 wait_for_peered || return 1
1681 teardown $dir || return 1
1682 }
1683
1684
1685 #######################################################################
1686
1687 ##
1688 # Wait until the cluster has health condition passed as arg
1689 # again for $TIMEOUT seconds.
1690 #
1691 # @param string to grep for in health detail
1692 # @return 0 if the cluster health matches request, 1 otherwise
1693 #
1694 function wait_for_health() {
1695 local grepstr=$1
1696 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1697 local -i loop=0
1698
1699 while ! ceph health detail | grep "$grepstr" ; do
1700 if (( $loop >= ${#delays[*]} )) ; then
1701 ceph health detail
1702 return 1
1703 fi
1704 sleep ${delays[$loop]}
1705 loop+=1
1706 done
1707 }
1708
1709 ##
1710 # Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1711 # for $TIMEOUT seconds.
1712 #
1713 # @return 0 if the cluster is HEALTHY, 1 otherwise
1714 #
1715 function wait_for_health_ok() {
1716 wait_for_health "HEALTH_OK" || return 1
1717 }
1718
1719 function test_wait_for_health_ok() {
1720 local dir=$1
1721
1722 setup $dir || return 1
1723 run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
1724 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
1725 # start osd_pool_default_size OSDs
1726 run_osd $dir 0 || return 1
1727 run_osd $dir 1 || return 1
1728 run_osd $dir 2 || return 1
1729 kill_daemons $dir TERM osd || return 1
1730 ceph osd down 0 || return 1
1731 # expect TOO_FEW_OSDS warning
1732 ! TIMEOUT=1 wait_for_health_ok || return 1
1733 # resurrect all OSDs
1734 activate_osd $dir 0 || return 1
1735 activate_osd $dir 1 || return 1
1736 activate_osd $dir 2 || return 1
1737 wait_for_health_ok || return 1
1738 teardown $dir || return 1
1739 }
1740
1741
1742 #######################################################################
1743
1744 ##
1745 # Run repair on **pgid** and wait until it completes. The repair
1746 # function will fail if repair does not complete within $TIMEOUT
1747 # seconds.
1748 #
1749 # @param pgid the id of the PG
1750 # @return 0 on success, 1 on error
1751 #
1752 function repair() {
1753 local pgid=$1
1754 local last_scrub=$(get_last_scrub_stamp $pgid)
1755 ceph pg repair $pgid
1756 wait_for_scrub $pgid "$last_scrub"
1757 }
1758
1759 function test_repair() {
1760 local dir=$1
1761
1762 setup $dir || return 1
1763 run_mon $dir a --osd_pool_default_size=1 || return 1
1764 run_mgr $dir x || return 1
1765 run_osd $dir 0 || return 1
1766 create_rbd_pool || return 1
1767 wait_for_clean || return 1
1768 repair 1.0 || return 1
1769 kill_daemons $dir KILL osd || return 1
1770 ! TIMEOUT=1 repair 1.0 || return 1
1771 teardown $dir || return 1
1772 }
1773 #######################################################################
1774
1775 ##
1776 # Run scrub on **pgid** and wait until it completes. The pg_scrub
1777 # function will fail if repair does not complete within $TIMEOUT
1778 # seconds. The pg_scrub is complete whenever the
1779 # **get_last_scrub_stamp** function reports a timestamp different from
1780 # the one stored before starting the scrub.
1781 #
1782 # @param pgid the id of the PG
1783 # @return 0 on success, 1 on error
1784 #
1785 function pg_scrub() {
1786 local pgid=$1
1787 local last_scrub=$(get_last_scrub_stamp $pgid)
1788 ceph pg scrub $pgid
1789 wait_for_scrub $pgid "$last_scrub"
1790 }
1791
1792 function pg_deep_scrub() {
1793 local pgid=$1
1794 local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
1795 ceph pg deep-scrub $pgid
1796 wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
1797 }
1798
1799 function test_pg_scrub() {
1800 local dir=$1
1801
1802 setup $dir || return 1
1803 run_mon $dir a --osd_pool_default_size=1 || return 1
1804 run_mgr $dir x || return 1
1805 run_osd $dir 0 || return 1
1806 create_rbd_pool || return 1
1807 wait_for_clean || return 1
1808 pg_scrub 1.0 || return 1
1809 kill_daemons $dir KILL osd || return 1
1810 ! TIMEOUT=1 pg_scrub 1.0 || return 1
1811 teardown $dir || return 1
1812 }
1813
1814 #######################################################################
1815
1816 ##
1817 # Run the *command* and expect it to fail (i.e. return a non zero status).
1818 # The output (stderr and stdout) is stored in a temporary file in *dir*
1819 # and is expected to contain the string *expected*.
1820 #
1821 # Return 0 if the command failed and the string was found. Otherwise
1822 # return 1 and cat the full output of the command on stderr for debug.
1823 #
1824 # @param dir temporary directory to store the output
1825 # @param expected string to look for in the output
1826 # @param command ... the command and its arguments
1827 # @return 0 on success, 1 on error
1828 #
1829
1830 function expect_failure() {
1831 local dir=$1
1832 shift
1833 local expected="$1"
1834 shift
1835 local success
1836
1837 if "$@" > $dir/out 2>&1 ; then
1838 success=true
1839 else
1840 success=false
1841 fi
1842
1843 if $success || ! grep --quiet "$expected" $dir/out ; then
1844 cat $dir/out >&2
1845 return 1
1846 else
1847 return 0
1848 fi
1849 }
1850
1851 function test_expect_failure() {
1852 local dir=$1
1853
1854 setup $dir || return 1
1855 expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
1856 # the command did not fail
1857 ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
1858 grep --quiet FAIL $dir/out || return 1
1859 # the command failed but the output does not contain the expected string
1860 ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
1861 ! grep --quiet FAIL $dir/out || return 1
1862 teardown $dir || return 1
1863 }
1864
1865 #######################################################################
1866
1867 ##
1868 # Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1869 # will fail if scrub does not complete within $TIMEOUT seconds. The
1870 # repair is complete whenever the **get_last_scrub_stamp** function
1871 # reports a timestamp different from the one given in argument.
1872 #
1873 # @param pgid the id of the PG
1874 # @param last_scrub timestamp of the last scrub for *pgid*
1875 # @return 0 on success, 1 on error
1876 #
1877 function wait_for_scrub() {
1878 local pgid=$1
1879 local last_scrub="$2"
1880 local sname=${3:-last_scrub_stamp}
1881
1882 for ((i=0; i < $TIMEOUT; i++)); do
1883 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
1884 return 0
1885 fi
1886 sleep 1
1887 done
1888 return 1
1889 }
1890
1891 function test_wait_for_scrub() {
1892 local dir=$1
1893
1894 setup $dir || return 1
1895 run_mon $dir a --osd_pool_default_size=1 || return 1
1896 run_mgr $dir x || return 1
1897 run_osd $dir 0 || return 1
1898 create_rbd_pool || return 1
1899 wait_for_clean || return 1
1900 local pgid=1.0
1901 ceph pg repair $pgid
1902 local last_scrub=$(get_last_scrub_stamp $pgid)
1903 wait_for_scrub $pgid "$last_scrub" || return 1
1904 kill_daemons $dir KILL osd || return 1
1905 last_scrub=$(get_last_scrub_stamp $pgid)
1906 ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
1907 teardown $dir || return 1
1908 }
1909
1910 #######################################################################
1911
1912 ##
1913 # Return 0 if the erasure code *plugin* is available, 1 otherwise.
1914 #
1915 # @param plugin erasure code plugin
1916 # @return 0 on success, 1 on error
1917 #
1918
1919 function erasure_code_plugin_exists() {
1920 local plugin=$1
1921 local status
1922 local grepstr
1923 local s
1924 case `uname` in
1925 FreeBSD) grepstr="Cannot open.*$plugin" ;;
1926 *) grepstr="$plugin.*No such file" ;;
1927 esac
1928
1929 s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
1930 local status=$?
1931 if [ $status -eq 0 ]; then
1932 ceph osd erasure-code-profile rm TESTPROFILE
1933 elif ! echo $s | grep --quiet "$grepstr" ; then
1934 status=1
1935 # display why the string was rejected.
1936 echo $s
1937 fi
1938 return $status
1939 }
1940
1941 function test_erasure_code_plugin_exists() {
1942 local dir=$1
1943
1944 setup $dir || return 1
1945 run_mon $dir a || return 1
1946 run_mgr $dir x || return 1
1947 erasure_code_plugin_exists jerasure || return 1
1948 ! erasure_code_plugin_exists FAKE || return 1
1949 teardown $dir || return 1
1950 }
1951
1952 #######################################################################
1953
1954 ##
1955 # Display all log files from **dir** on stdout.
1956 #
1957 # @param dir directory in which all data is stored
1958 #
1959
1960 function display_logs() {
1961 local dir=$1
1962
1963 find $dir -maxdepth 1 -name '*.log' | \
1964 while read file ; do
1965 echo "======================= $file"
1966 cat $file
1967 done
1968 }
1969
1970 function test_display_logs() {
1971 local dir=$1
1972
1973 setup $dir || return 1
1974 run_mon $dir a || return 1
1975 kill_daemons $dir || return 1
1976 display_logs $dir > $dir/log.out
1977 grep --quiet mon.a.log $dir/log.out || return 1
1978 teardown $dir || return 1
1979 }
1980
1981 #######################################################################
1982 ##
1983 # Spawn a command in background and save the pid in the variable name
1984 # passed in argument. To make the output reading easier, the output is
1985 # prepend with the process id.
1986 #
1987 # Example:
1988 # pids1=""
1989 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1990 #
1991 # @param pid_variable the variable name (not value) where the pids will be stored
1992 # @param ... the command to execute
1993 # @return only the pid_variable output should be considered and used with **wait_background**
1994 #
1995 function run_in_background() {
1996 local pid_variable=$1
1997 shift
1998 # Execute the command and prepend the output with its pid
1999 # We enforce to return the exit status of the command and not the sed one.
2000 ("$@" |& sed 's/^/'$$': /'; return "${PIPESTATUS[0]}") >&2 &
2001 eval "$pid_variable+=\" $!\""
2002 }
2003
2004 function save_stdout {
2005 local out="$1"
2006 shift
2007 "$@" > "$out"
2008 }
2009
2010 function test_run_in_background() {
2011 local pids
2012 run_in_background pids sleep 1
2013 run_in_background pids sleep 1
2014 test $(echo $pids | wc -w) = 2 || return 1
2015 wait $pids || return 1
2016 }
2017
2018 #######################################################################
2019 ##
2020 # Wait for pids running in background to complete.
2021 # This function is usually used after a **run_in_background** call
2022 # Example:
2023 # pids1=""
2024 # run_in_background pids1 bash -c 'sleep 1; exit 1'
2025 # wait_background pids1
2026 #
2027 # @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
2028 # @return returns 1 if at least one process exits in error unless returns 0
2029 #
2030 function wait_background() {
2031 # We extract the PIDS from the variable name
2032 pids=${!1}
2033
2034 return_code=0
2035 for pid in $pids; do
2036 if ! wait $pid; then
2037 # If one process failed then return 1
2038 return_code=1
2039 fi
2040 done
2041
2042 # We empty the variable reporting that all process ended
2043 eval "$1=''"
2044
2045 return $return_code
2046 }
2047
2048
2049 function test_wait_background() {
2050 local pids=""
2051 run_in_background pids bash -c "sleep 1; exit 1"
2052 run_in_background pids bash -c "sleep 2; exit 0"
2053 wait_background pids
2054 if [ $? -ne 1 ]; then return 1; fi
2055
2056 run_in_background pids bash -c "sleep 1; exit 0"
2057 run_in_background pids bash -c "sleep 2; exit 0"
2058 wait_background pids
2059 if [ $? -ne 0 ]; then return 1; fi
2060
2061 if [ ! -z "$pids" ]; then return 1; fi
2062 }
2063
2064 function flush_pg_stats()
2065 {
2066 local timeout=${1:-$TIMEOUT}
2067
2068 ids=`ceph osd ls`
2069 seqs=''
2070 for osd in $ids; do
2071 seq=`ceph tell osd.$osd flush_pg_stats`
2072 seqs="$seqs $osd-$seq"
2073 done
2074
2075 for s in $seqs; do
2076 osd=`echo $s | cut -d - -f 1`
2077 seq=`echo $s | cut -d - -f 2`
2078 echo "waiting osd.$osd seq $seq"
2079 while test $(ceph osd last-stat-seq $osd) -lt $seq; do
2080 sleep 1
2081 if [ $((timeout--)) -eq 0 ]; then
2082 return 1
2083 fi
2084 done
2085 done
2086 }
2087
2088 function test_flush_pg_stats()
2089 {
2090 local dir=$1
2091
2092 setup $dir || return 1
2093 run_mon $dir a --osd_pool_default_size=1 || return 1
2094 run_mgr $dir x || return 1
2095 run_osd $dir 0 || return 1
2096 create_rbd_pool || return 1
2097 rados -p rbd put obj /etc/group
2098 flush_pg_stats || return 1
2099 local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
2100 stored=`ceph df detail --format=json | jq "$jq_filter.stored"`
2101 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2102 test $stored -gt 0 || return 1
2103 test $stored == $stored_raw || return 1
2104 teardown $dir
2105 }
2106
2107 #######################################################################
2108
2109 ##
2110 # Call the **run** function (which must be defined by the caller) with
2111 # the **dir** argument followed by the caller argument list.
2112 #
2113 # If the **run** function returns on error, all logs found in **dir**
2114 # are displayed for diagnostic purposes.
2115 #
2116 # **teardown** function is called when the **run** function returns
2117 # (on success or on error), to cleanup leftovers. The CEPH_CONF is set
2118 # to /dev/null and CEPH_ARGS is unset so that the tests are protected from
2119 # external interferences.
2120 #
2121 # It is the responsibility of the **run** function to call the
2122 # **setup** function to prepare the test environment (create a temporary
2123 # directory etc.).
2124 #
2125 # The shell is required (via PS4) to display the function and line
2126 # number whenever a statement is executed to help debugging.
2127 #
2128 # @param dir directory in which all data is stored
2129 # @param ... arguments passed transparently to **run**
2130 # @return 0 on success, 1 on error
2131 #
2132 function main() {
2133 local dir=td/$1
2134 shift
2135
2136 shopt -s -o xtrace
2137 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2138
2139 export PATH=.:$PATH # make sure program from sources are preferred
2140 export PYTHONWARNINGS=ignore
2141 export CEPH_CONF=/dev/null
2142 unset CEPH_ARGS
2143
2144 local code
2145 if run $dir "$@" ; then
2146 code=0
2147 else
2148 code=1
2149 fi
2150 teardown $dir $code || return 1
2151 return $code
2152 }
2153
2154 #######################################################################
2155
2156 function run_tests() {
2157 shopt -s -o xtrace
2158 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2159
2160 export .:$PATH # make sure program from sources are preferred
2161
2162 export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
2163 export CEPH_ARGS
2164 CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
2165 CEPH_ARGS+="--mon-host=$CEPH_MON "
2166 export CEPH_CONF=/dev/null
2167
2168 local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
2169 local dir=td/ceph-helpers
2170
2171 for func in $funcs ; do
2172 if ! $func $dir; then
2173 teardown $dir 1
2174 return 1
2175 fi
2176 done
2177 }
2178
2179 if test "$1" = TESTS ; then
2180 shift
2181 run_tests "$@"
2182 exit $?
2183 fi
2184
2185 # NOTE:
2186 # jq only support --exit-status|-e from version 1.4 forwards, which makes
2187 # returning on error waaaay prettier and straightforward.
2188 # However, the current automated upstream build is running with v1.3,
2189 # which has no idea what -e is. Hence the convoluted error checking we
2190 # need. Sad.
2191 # The next time someone changes this code, please check if v1.4 is now
2192 # a thing, and, if so, please change these to use -e. Thanks.
2193
2194 # jq '.all.supported | select([.[] == "foo"] | any)'
2195 function jq_success() {
2196 input="$1"
2197 filter="$2"
2198 expects="\"$3\""
2199
2200 in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g")
2201 filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g")
2202
2203 ret=$(echo "$in_escaped" | jq "$filter_escaped")
2204 if [[ "$ret" == "true" ]]; then
2205 return 0
2206 elif [[ -n "$expects" ]]; then
2207 if [[ "$ret" == "$expects" ]]; then
2208 return 0
2209 fi
2210 fi
2211 return 1
2212 input=$1
2213 filter=$2
2214 expects="$3"
2215
2216 ret="$(echo $input | jq \"$filter\")"
2217 if [[ "$ret" == "true" ]]; then
2218 return 0
2219 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
2220 return 0
2221 fi
2222 return 1
2223 }
2224
2225 function inject_eio() {
2226 local pooltype=$1
2227 shift
2228 local which=$1
2229 shift
2230 local poolname=$1
2231 shift
2232 local objname=$1
2233 shift
2234 local dir=$1
2235 shift
2236 local shard_id=$1
2237 shift
2238
2239 local -a initial_osds=($(get_osds $poolname $objname))
2240 local osd_id=${initial_osds[$shard_id]}
2241 if [ "$pooltype" != "ec" ]; then
2242 shard_id=""
2243 fi
2244 type=$(cat $dir/$osd_id/type)
2245 set_config osd $osd_id ${type}_debug_inject_read_err true || return 1
2246 local loop=0
2247 while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
2248 inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
2249 loop=$(expr $loop + 1)
2250 if [ $loop = "10" ]; then
2251 return 1
2252 fi
2253 sleep 1
2254 done
2255 }
2256
2257 function multidiff() {
2258 if ! diff $@ ; then
2259 if [ "$DIFFCOLOPTS" = "" ]; then
2260 return 1
2261 fi
2262 diff $DIFFCOLOPTS $@
2263 fi
2264 }
2265
2266 function create_ec_pool() {
2267 local pool_name=$1
2268 shift
2269 local allow_overwrites=$1
2270 shift
2271
2272 ceph osd erasure-code-profile set myprofile crush-failure-domain=osd "$@" || return 1
2273
2274 create_pool "$poolname" 1 1 erasure myprofile || return 1
2275
2276 if [ "$allow_overwrites" = "true" ]; then
2277 ceph osd pool set "$poolname" allow_ec_overwrites true || return 1
2278 fi
2279
2280 wait_for_clean || return 1
2281 return 0
2282 }
2283
2284 # Local Variables:
2285 # compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
2286 # End: