]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/ceph-helpers.sh
update sources to v12.2.3
[ceph.git] / ceph / qa / standalone / ceph-helpers.sh
1 #!/bin/bash
2 #
3 # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5 # Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
6 #
7 # Author: Loic Dachary <loic@dachary.org>
8 # Author: Federico Gimenez <fgimenez@coit.es>
9 #
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU Library Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
13 # any later version.
14 #
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Library Public License for more details.
19 #
20 TIMEOUT=300
21 PG_NUM=4
22 : ${CEPH_BUILD_VIRTUALENV:=/tmp}
23
24 if type xmlstarlet > /dev/null 2>&1; then
25 XMLSTARLET=xmlstarlet
26 elif type xml > /dev/null 2>&1; then
27 XMLSTARLET=xml
28 else
29 echo "Missing xmlstarlet binary!"
30 exit 1
31 fi
32
33 if [ `uname` = FreeBSD ]; then
34 SED=gsed
35 DIFFCOLOPTS=""
36 KERNCORE="kern.corefile"
37 else
38 SED=sed
39 termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
40 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
41 termwidth="-W ${termwidth}"
42 fi
43 DIFFCOLOPTS="-y $termwidth"
44 KERNCORE="kernel.core_pattern"
45 fi
46
47 EXTRA_OPTS=""
48 if [ -n "$CEPH_LIB" ]; then
49 EXTRA_OPTS+=" --erasure-code-dir $CEPH_LIB"
50 EXTRA_OPTS+=" --plugin-dir $CEPH_LIB"
51 EXTRA_OPTS+=" --osd-class-dir $CEPH_LIB"
52 fi
53
54 #! @file ceph-helpers.sh
55 # @brief Toolbox to manage Ceph cluster dedicated to testing
56 #
57 # Example use case:
58 #
59 # ~~~~~~~~~~~~~~~~{.sh}
60 # source ceph-helpers.sh
61 #
62 # function mytest() {
63 # # cleanup leftovers and reset mydir
64 # setup mydir
65 # # create a cluster with one monitor and three osds
66 # run_mon mydir a
67 # run_osd mydir 0
68 # run_osd mydir 2
69 # run_osd mydir 3
70 # # put and get an object
71 # rados --pool rbd put GROUP /etc/group
72 # rados --pool rbd get GROUP /tmp/GROUP
73 # # stop the cluster and cleanup the directory
74 # teardown mydir
75 # }
76 # ~~~~~~~~~~~~~~~~
77 #
78 # The focus is on simplicity and efficiency, in the context of
79 # functional tests. The output is intentionally very verbose
80 # and functions return as soon as an error is found. The caller
81 # is also expected to abort on the first error so that debugging
82 # can be done by looking at the end of the output.
83 #
84 # Each function is documented, implemented and tested independently.
85 # When modifying a helper, the test and the documentation are
86 # expected to be updated and it is easier of they are collocated. A
87 # test for a given function can be run with
88 #
89 # ~~~~~~~~~~~~~~~~{.sh}
90 # ceph-helpers.sh TESTS test_get_osds
91 # ~~~~~~~~~~~~~~~~
92 #
93 # and all the tests (i.e. all functions matching test_*) are run
94 # with:
95 #
96 # ~~~~~~~~~~~~~~~~{.sh}
97 # ceph-helpers.sh TESTS
98 # ~~~~~~~~~~~~~~~~
99 #
100 # A test function takes a single argument : the directory dedicated
101 # to the tests. It is expected to not create any file outside of this
102 # directory and remove it entirely when it completes successfully.
103 #
104
105
106 function get_asok_dir() {
107 if [ -n "$CEPH_ASOK_DIR" ]; then
108 echo "$CEPH_ASOK_DIR"
109 else
110 echo ${TMPDIR:-/tmp}/ceph-asok.$$
111 fi
112 }
113
114 function get_asok_path() {
115 local name=$1
116 if [ -n "$name" ]; then
117 echo $(get_asok_dir)/ceph-$name.asok
118 else
119 echo $(get_asok_dir)/\$cluster-\$name.asok
120 fi
121 }
122 ##
123 # Cleanup any leftovers found in **dir** via **teardown**
124 # and reset **dir** as an empty environment.
125 #
126 # @param dir path name of the environment
127 # @return 0 on success, 1 on error
128 #
129 function setup() {
130 local dir=$1
131 teardown $dir || return 1
132 mkdir -p $dir
133 mkdir -p $(get_asok_dir)
134 }
135
136 function test_setup() {
137 local dir=$dir
138 setup $dir || return 1
139 test -d $dir || return 1
140 setup $dir || return 1
141 test -d $dir || return 1
142 teardown $dir
143 }
144
145 #######################################################################
146
147 ##
148 # Kill all daemons for which a .pid file exists in **dir** and remove
149 # **dir**. If the file system in which **dir** is btrfs, delete all
150 # subvolumes that relate to it.
151 #
152 # @param dir path name of the environment
153 # @return 0 on success, 1 on error
154 #
155 function teardown() {
156 local dir=$1
157 local dumplogs=$2
158 kill_daemons $dir KILL
159 if [ `uname` != FreeBSD ] \
160 && [ $(stat -f -c '%T' .) == "btrfs" ]; then
161 __teardown_btrfs $dir
162 fi
163 local cores="no"
164 local pattern="$(sysctl -n $KERNCORE)"
165 # See if we have apport core handling
166 if [ "${pattern:0:1}" = "|" ]; then
167 # TODO: Where can we get the dumps?
168 # Not sure where the dumps really are so this will look in the CWD
169 pattern=""
170 fi
171 # Local we start with core and teuthology ends with core
172 if ls $(dirname $pattern) | grep -q '^core\|core$' ; then
173 cores="yes"
174 if [ -n "$LOCALRUN" ]; then
175 mkdir /tmp/cores.$$ 2> /dev/null || true
176 for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
177 mv $i /tmp/cores.$$
178 done
179 fi
180 fi
181 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
182 display_logs $dir
183 fi
184 rm -fr $dir
185 rm -rf $(get_asok_dir)
186 if [ "$cores" = "yes" ]; then
187 echo "ERROR: Failure due to cores found"
188 if [ -n "$LOCALRUN" ]; then
189 echo "Find saved core files in /tmp/cores.$$"
190 fi
191 return 1
192 fi
193 return 0
194 }
195
196 function __teardown_btrfs() {
197 local btrfs_base_dir=$1
198 local btrfs_root=$(df -P . | tail -1 | awk '{print $NF}')
199 local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list . -t | awk '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
200 for subvolume in $btrfs_dirs; do
201 sudo btrfs subvolume delete $btrfs_root/$subvolume
202 done
203 }
204
205 function test_teardown() {
206 local dir=$dir
207 setup $dir || return 1
208 teardown $dir || return 1
209 ! test -d $dir || return 1
210 }
211
212 #######################################################################
213
214 ##
215 # Sends a signal to a single daemon.
216 # This is a helper function for kill_daemons
217 #
218 # After the daemon is sent **signal**, its actual termination
219 # will be verified by sending it signal 0. If the daemon is
220 # still alive, kill_daemon will pause for a few seconds and
221 # try again. This will repeat for a fixed number of times
222 # before kill_daemon returns on failure. The list of
223 # sleep intervals can be specified as **delays** and defaults
224 # to:
225 #
226 # 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
227 #
228 # This sequence is designed to run first a very short sleep time (0.1)
229 # if the machine is fast enough and the daemon terminates in a fraction of a
230 # second. The increasing sleep numbers should give plenty of time for
231 # the daemon to die even on the slowest running machine. If a daemon
232 # takes more than a few minutes to stop (the sum of all sleep times),
233 # there probably is no point in waiting more and a number of things
234 # are likely to go wrong anyway: better give up and return on error.
235 #
236 # @param pid the process id to send a signal
237 # @param send_signal the signal to send
238 # @param delays sequence of sleep times before failure
239 #
240 function kill_daemon() {
241 local pid=$(cat $1)
242 local send_signal=$2
243 local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
244 local exit_code=1
245 for try in $delays ; do
246 if kill -$send_signal $pid 2> /dev/null ; then
247 exit_code=1
248 else
249 exit_code=0
250 break
251 fi
252 send_signal=0
253 sleep $try
254 done;
255 return $exit_code
256 }
257
258 function test_kill_daemon() {
259 local dir=$1
260 setup $dir || return 1
261 run_mon $dir a --osd_pool_default_size=1 || return 1
262 run_mgr $dir x || return 1
263 run_osd $dir 0 || return 1
264
265 name_prefix=osd
266 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
267 #
268 # sending signal 0 won't kill the daemon
269 # waiting just for one second instead of the default schedule
270 # allows us to quickly verify what happens when kill fails
271 # to stop the daemon (i.e. it must return false)
272 #
273 ! kill_daemon $pidfile 0 1 || return 1
274 #
275 # killing just the osd and verify the mon still is responsive
276 #
277 kill_daemon $pidfile TERM || return 1
278 done
279
280 ceph osd dump | grep "osd.0 down" || return 1
281
282 name_prefix=mgr
283 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
284 #
285 # kill the mgr
286 #
287 kill_daemon $pidfile TERM || return 1
288 done
289
290 name_prefix=mon
291 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
292 #
293 # kill the mon and verify it cannot be reached
294 #
295 kill_daemon $pidfile TERM || return 1
296 ! timeout 5 ceph status || return 1
297 done
298
299 teardown $dir || return 1
300 }
301
302 ##
303 # Kill all daemons for which a .pid file exists in **dir**. Each
304 # daemon is sent a **signal** and kill_daemons waits for it to exit
305 # during a few minutes. By default all daemons are killed. If a
306 # **name_prefix** is provided, only the daemons for which a pid
307 # file is found matching the prefix are killed. See run_osd and
308 # run_mon for more information about the name conventions for
309 # the pid files.
310 #
311 # Send TERM to all daemons : kill_daemons $dir
312 # Send KILL to all daemons : kill_daemons $dir KILL
313 # Send KILL to all osds : kill_daemons $dir KILL osd
314 # Send KILL to osd 1 : kill_daemons $dir KILL osd.1
315 #
316 # If a daemon is sent the TERM signal and does not terminate
317 # within a few minutes, it will still be running even after
318 # kill_daemons returns.
319 #
320 # If all daemons are kill successfully the function returns 0
321 # if at least one daemon remains, this is treated as an
322 # error and the function return 1.
323 #
324 # @param dir path name of the environment
325 # @param signal name of the first signal (defaults to TERM)
326 # @param name_prefix only kill match daemons (defaults to all)
327 # @param delays sequence of sleep times before failure
328 # @return 0 on success, 1 on error
329 #
330 function kill_daemons() {
331 local trace=$(shopt -q -o xtrace && echo true || echo false)
332 $trace && shopt -u -o xtrace
333 local dir=$1
334 local signal=${2:-TERM}
335 local name_prefix=$3 # optional, osd, mon, osd.1
336 local delays=$4 #optional timing
337 local status=0
338 local pids=""
339
340 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
341 run_in_background pids kill_daemon $pidfile $signal $delays
342 done
343
344 wait_background pids
345 status=$?
346
347 $trace && shopt -s -o xtrace
348 return $status
349 }
350
351 function test_kill_daemons() {
352 local dir=$1
353 setup $dir || return 1
354 run_mon $dir a --osd_pool_default_size=1 || return 1
355 run_mgr $dir x || return 1
356 run_osd $dir 0 || return 1
357 #
358 # sending signal 0 won't kill the daemon
359 # waiting just for one second instead of the default schedule
360 # allows us to quickly verify what happens when kill fails
361 # to stop the daemon (i.e. it must return false)
362 #
363 ! kill_daemons $dir 0 osd 1 || return 1
364 #
365 # killing just the osd and verify the mon still is responsive
366 #
367 kill_daemons $dir TERM osd || return 1
368 ceph osd dump | grep "osd.0 down" || return 1
369 #
370 # kill the mgr
371 #
372 kill_daemons $dir TERM mgr || return 1
373 #
374 # kill the mon and verify it cannot be reached
375 #
376 kill_daemons $dir TERM || return 1
377 ! timeout 5 ceph status || return 1
378 teardown $dir || return 1
379 }
380
381 #######################################################################
382
383 ##
384 # Run a monitor by the name mon.**id** with data in **dir**/**id**.
385 # The logs can be found in **dir**/mon.**id**.log and the pid file
386 # is **dir**/mon.**id**.pid and the admin socket is
387 # **dir**/**id**/ceph-mon.**id**.asok.
388 #
389 # The remaining arguments are passed verbatim to ceph-mon --mkfs
390 # and the ceph-mon daemon.
391 #
392 # Two mandatory arguments must be provided: --fsid and --mon-host
393 # Instead of adding them to every call to run_mon, they can be
394 # set in the CEPH_ARGS environment variable to be read implicitly
395 # by every ceph command.
396 #
397 # The CEPH_CONF variable is expected to be set to /dev/null to
398 # only rely on arguments for configuration.
399 #
400 # Examples:
401 #
402 # CEPH_ARGS="--fsid=$(uuidgen) "
403 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
404 # run_mon $dir a # spawn a mon and bind port 7018
405 # run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
406 #
407 # If mon_initial_members is not set, the default rbd pool is deleted
408 # and replaced with a replicated pool with less placement groups to
409 # speed up initialization. If mon_initial_members is set, no attempt
410 # is made to recreate the rbd pool because it would hang forever,
411 # waiting for other mons to join.
412 #
413 # A **dir**/ceph.conf file is created but not meant to be used by any
414 # function. It is convenient for debugging a failure with:
415 #
416 # ceph --conf **dir**/ceph.conf -s
417 #
418 # @param dir path name of the environment
419 # @param id mon identifier
420 # @param ... can be any option valid for ceph-mon
421 # @return 0 on success, 1 on error
422 #
423 function run_mon() {
424 local dir=$1
425 shift
426 local id=$1
427 shift
428 local data=$dir/$id
429
430 ceph-mon \
431 --id $id \
432 --mkfs \
433 --mon-data=$data \
434 --run-dir=$dir \
435 "$@" || return 1
436
437 ceph-mon \
438 --id $id \
439 --mon-osd-full-ratio=.99 \
440 --mon-data-avail-crit=1 \
441 --mon-data-avail-warn=5 \
442 --paxos-propose-interval=0.1 \
443 --osd-crush-chooseleaf-type=0 \
444 $EXTRA_OPTS \
445 --debug-mon 20 \
446 --debug-ms 20 \
447 --debug-paxos 20 \
448 --chdir= \
449 --mon-data=$data \
450 --log-file=$dir/\$name.log \
451 --admin-socket=$(get_asok_path) \
452 --mon-cluster-log-file=$dir/log \
453 --run-dir=$dir \
454 --pid-file=$dir/\$name.pid \
455 --mon-allow-pool-delete \
456 --mon-osd-backfillfull-ratio .99 \
457 "$@" || return 1
458
459 cat > $dir/ceph.conf <<EOF
460 [global]
461 fsid = $(get_config mon $id fsid)
462 mon host = $(get_config mon $id mon_host)
463 EOF
464 }
465
466 function test_run_mon() {
467 local dir=$1
468
469 setup $dir || return 1
470
471 run_mon $dir a --mon-initial-members=a || return 1
472 create_rbd_pool || return 1
473 # rbd has not been deleted / created, hence it has pool id 0
474 ceph osd dump | grep "pool 1 'rbd'" || return 1
475 kill_daemons $dir || return 1
476
477 run_mon $dir a || return 1
478 create_rbd_pool || return 1
479 # rbd has been deleted / created, hence it does not have pool id 0
480 ! ceph osd dump | grep "pool 1 'rbd'" || return 1
481 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
482 config get osd_pool_default_size)
483 test "$size" = '{"osd_pool_default_size":"3"}' || return 1
484
485 ! CEPH_ARGS='' ceph status || return 1
486 CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
487
488 kill_daemons $dir || return 1
489
490 run_mon $dir a --osd_pool_default_size=1 || return 1
491 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
492 config get osd_pool_default_size)
493 test "$size" = '{"osd_pool_default_size":"1"}' || return 1
494 kill_daemons $dir || return 1
495
496 CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
497 run_mon $dir a || return 1
498 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
499 config get osd_pool_default_size)
500 test "$size" = '{"osd_pool_default_size":"2"}' || return 1
501 kill_daemons $dir || return 1
502
503 teardown $dir || return 1
504 }
505
506 function create_rbd_pool() {
507 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
508 create_pool rbd $PG_NUM || return 1
509 rbd pool init rbd
510 }
511
512 function create_pool() {
513 ceph osd pool create "$@"
514 sleep 1
515 }
516
517 #######################################################################
518
519 function run_mgr() {
520 local dir=$1
521 shift
522 local id=$1
523 shift
524 local data=$dir/$id
525
526 ceph-mgr \
527 --id $id \
528 $EXTRA_OPTS \
529 --debug-mgr 20 \
530 --debug-objecter 20 \
531 --debug-ms 20 \
532 --debug-paxos 20 \
533 --chdir= \
534 --mgr-data=$data \
535 --log-file=$dir/\$name.log \
536 --admin-socket=$(get_asok_path) \
537 --run-dir=$dir \
538 --pid-file=$dir/\$name.pid \
539 "$@" || return 1
540 }
541
542 #######################################################################
543
544 ##
545 # Create (prepare) and run (activate) an osd by the name osd.**id**
546 # with data in **dir**/**id**. The logs can be found in
547 # **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
548 # the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
549 #
550 # The remaining arguments are passed verbatim to ceph-osd.
551 #
552 # Two mandatory arguments must be provided: --fsid and --mon-host
553 # Instead of adding them to every call to run_osd, they can be
554 # set in the CEPH_ARGS environment variable to be read implicitly
555 # by every ceph command.
556 #
557 # The CEPH_CONF variable is expected to be set to /dev/null to
558 # only rely on arguments for configuration.
559 #
560 # The run_osd function creates the OSD data directory with ceph-disk
561 # prepare on the **dir**/**id** directory and relies on the
562 # activate_osd function to run the daemon.
563 #
564 # Examples:
565 #
566 # CEPH_ARGS="--fsid=$(uuidgen) "
567 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
568 # run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
569 #
570 # @param dir path name of the environment
571 # @param id osd identifier
572 # @param ... can be any option valid for ceph-osd
573 # @return 0 on success, 1 on error
574 #
575 function run_osd() {
576 local dir=$1
577 shift
578 local id=$1
579 shift
580 local osd_data=$dir/$id
581
582 local ceph_disk_args
583 ceph_disk_args+=" --statedir=$dir"
584 ceph_disk_args+=" --sysconfdir=$dir"
585 ceph_disk_args+=" --prepend-to-path="
586
587 mkdir -p $osd_data
588 ceph-disk $ceph_disk_args \
589 prepare --filestore $osd_data || return 1
590
591 activate_osd $dir $id "$@"
592 }
593
594 function run_osd_bluestore() {
595 local dir=$1
596 shift
597 local id=$1
598 shift
599 local osd_data=$dir/$id
600
601 local ceph_disk_args
602 ceph_disk_args+=" --statedir=$dir"
603 ceph_disk_args+=" --sysconfdir=$dir"
604 ceph_disk_args+=" --prepend-to-path="
605
606 mkdir -p $osd_data
607 ceph-disk $ceph_disk_args \
608 prepare --bluestore $osd_data || return 1
609
610 activate_osd $dir $id "$@"
611 }
612
613 function test_run_osd() {
614 local dir=$1
615
616 setup $dir || return 1
617
618 run_mon $dir a || return 1
619 run_mgr $dir x || return 1
620
621 run_osd $dir 0 || return 1
622 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
623 config get osd_max_backfills)
624 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
625
626 run_osd $dir 1 --osd-max-backfills 20 || return 1
627 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
628 config get osd_max_backfills)
629 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
630
631 CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
632 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
633 config get osd_max_backfills)
634 test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
635
636 teardown $dir || return 1
637 }
638
639 #######################################################################
640
641 ##
642 # Shutdown and remove all traces of the osd by the name osd.**id**.
643 #
644 # The OSD is shutdown with the TERM signal. It is then removed from
645 # the auth list, crush map, osd map etc and the files associated with
646 # it are also removed.
647 #
648 # @param dir path name of the environment
649 # @param id osd identifier
650 # @return 0 on success, 1 on error
651 #
652 function destroy_osd() {
653 local dir=$1
654 local id=$2
655
656 ceph osd out osd.$id || return 1
657 kill_daemons $dir TERM osd.$id || return 1
658 ceph osd purge osd.$id --yes-i-really-mean-it || return 1
659 teardown $dir/$id || return 1
660 rm -fr $dir/$id
661 }
662
663 function test_destroy_osd() {
664 local dir=$1
665
666 setup $dir || return 1
667 run_mon $dir a || return 1
668 run_mgr $dir x || return 1
669 run_osd $dir 0 || return 1
670 destroy_osd $dir 0 || return 1
671 ! ceph osd dump | grep "osd.$id " || return 1
672 teardown $dir || return 1
673 }
674
675 #######################################################################
676
677 ##
678 # Run (activate) an osd by the name osd.**id** with data in
679 # **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
680 # the pid file is **dir**/osd.**id**.pid and the admin socket is
681 # **dir**/**id**/ceph-osd.**id**.asok.
682 #
683 # The remaining arguments are passed verbatim to ceph-osd.
684 #
685 # Two mandatory arguments must be provided: --fsid and --mon-host
686 # Instead of adding them to every call to activate_osd, they can be
687 # set in the CEPH_ARGS environment variable to be read implicitly
688 # by every ceph command.
689 #
690 # The CEPH_CONF variable is expected to be set to /dev/null to
691 # only rely on arguments for configuration.
692 #
693 # The activate_osd function expects a valid OSD data directory
694 # in **dir**/**id**, either just created via run_osd or re-using
695 # one left by a previous run of ceph-osd. The ceph-osd daemon is
696 # run indirectly via ceph-disk activate.
697 #
698 # The activate_osd function blocks until the monitor reports the osd
699 # up. If it fails to do so within $TIMEOUT seconds, activate_osd
700 # fails.
701 #
702 # Examples:
703 #
704 # CEPH_ARGS="--fsid=$(uuidgen) "
705 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
706 # activate_osd $dir 0 # activate an osd using the monitor listening on 7018
707 #
708 # @param dir path name of the environment
709 # @param id osd identifier
710 # @param ... can be any option valid for ceph-osd
711 # @return 0 on success, 1 on error
712 #
713 function activate_osd() {
714 local dir=$1
715 shift
716 local id=$1
717 shift
718 local osd_data=$dir/$id
719
720 local ceph_disk_args
721 ceph_disk_args+=" --statedir=$dir"
722 ceph_disk_args+=" --sysconfdir=$dir"
723 ceph_disk_args+=" --prepend-to-path="
724
725 local ceph_args="$CEPH_ARGS"
726 ceph_args+=" --osd-failsafe-full-ratio=.99"
727 ceph_args+=" --osd-journal-size=100"
728 ceph_args+=" --osd-scrub-load-threshold=2000"
729 ceph_args+=" --osd-data=$osd_data"
730 ceph_args+=" --chdir="
731 ceph_args+=$EXTRA_OPTS
732 ceph_args+=" --run-dir=$dir"
733 ceph_args+=" --admin-socket=$(get_asok_path)"
734 ceph_args+=" --debug-osd=20"
735 ceph_args+=" --log-file=$dir/\$name.log"
736 ceph_args+=" --pid-file=$dir/\$name.pid"
737 ceph_args+=" --osd-max-object-name-len 460"
738 ceph_args+=" --osd-max-object-namespace-len 64"
739 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features *"
740 ceph_args+=" "
741 ceph_args+="$@"
742 mkdir -p $osd_data
743 CEPH_ARGS="$ceph_args " ceph-disk $ceph_disk_args \
744 activate \
745 --mark-init=none \
746 $osd_data || return 1
747
748 [ "$id" = "$(cat $osd_data/whoami)" ] || return 1
749
750 wait_for_osd up $id || return 1
751 }
752
753 function test_activate_osd() {
754 local dir=$1
755
756 setup $dir || return 1
757
758 run_mon $dir a || return 1
759 run_mgr $dir x || return 1
760
761 run_osd $dir 0 || return 1
762 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
763 config get osd_max_backfills)
764 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
765
766 kill_daemons $dir TERM osd || return 1
767
768 activate_osd $dir 0 --osd-max-backfills 20 || return 1
769 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
770 config get osd_max_backfills)
771 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
772
773 teardown $dir || return 1
774 }
775
776 #######################################################################
777
778 ##
779 # Wait until the OSD **id** is either up or down, as specified by
780 # **state**. It fails after $TIMEOUT seconds.
781 #
782 # @param state either up or down
783 # @param id osd identifier
784 # @return 0 on success, 1 on error
785 #
786 function wait_for_osd() {
787 local state=$1
788 local id=$2
789
790 status=1
791 for ((i=0; i < $TIMEOUT; i++)); do
792 echo $i
793 if ! ceph osd dump | grep "osd.$id $state"; then
794 sleep 1
795 else
796 status=0
797 break
798 fi
799 done
800 return $status
801 }
802
803 function test_wait_for_osd() {
804 local dir=$1
805 setup $dir || return 1
806 run_mon $dir a --osd_pool_default_size=1 || return 1
807 run_mgr $dir x || return 1
808 run_osd $dir 0 || return 1
809 wait_for_osd up 0 || return 1
810 kill_daemons $dir TERM osd || return 1
811 wait_for_osd down 0 || return 1
812 ( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
813 teardown $dir || return 1
814 }
815
816 #######################################################################
817
818 ##
819 # Display the list of OSD ids supporting the **objectname** stored in
820 # **poolname**, as reported by ceph osd map.
821 #
822 # @param poolname an existing pool
823 # @param objectname an objectname (may or may not exist)
824 # @param STDOUT white space separated list of OSD ids
825 # @return 0 on success, 1 on error
826 #
827 function get_osds() {
828 local poolname=$1
829 local objectname=$2
830
831 local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
832 jq '.acting | .[]')
833 # get rid of the trailing space
834 echo $osds
835 }
836
837 function test_get_osds() {
838 local dir=$1
839
840 setup $dir || return 1
841 run_mon $dir a --osd_pool_default_size=2 || return 1
842 run_mgr $dir x || return 1
843 run_osd $dir 0 || return 1
844 run_osd $dir 1 || return 1
845 create_rbd_pool || return 1
846 wait_for_clean || return 1
847 create_rbd_pool || return 1
848 get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
849 teardown $dir || return 1
850 }
851
852 #######################################################################
853
854 ##
855 # Wait for the monitor to form quorum (optionally, of size N)
856 #
857 # @param timeout duration (lower-bound) to wait for quorum to be formed
858 # @param quorumsize size of quorum to wait for
859 # @return 0 on success, 1 on error
860 #
861 function wait_for_quorum() {
862 local timeout=$1
863 local quorumsize=$2
864
865 if [[ -z "$timeout" ]]; then
866 timeout=300
867 fi
868
869 if [[ -z "$quorumsize" ]]; then
870 timeout $timeout ceph mon_status --format=json >&/dev/null || return 1
871 return 0
872 fi
873
874 no_quorum=1
875 wait_until=$((`date +%s` + $timeout))
876 while [[ $(date +%s) -lt $wait_until ]]; do
877 jqfilter='.quorum | length == '$quorumsize
878 jqinput="$(timeout $timeout ceph mon_status --format=json 2>/dev/null)"
879 res=$(echo $jqinput | jq "$jqfilter")
880 if [[ "$res" == "true" ]]; then
881 no_quorum=0
882 break
883 fi
884 done
885 return $no_quorum
886 }
887
888 #######################################################################
889
890 ##
891 # Return the PG of supporting the **objectname** stored in
892 # **poolname**, as reported by ceph osd map.
893 #
894 # @param poolname an existing pool
895 # @param objectname an objectname (may or may not exist)
896 # @param STDOUT a PG
897 # @return 0 on success, 1 on error
898 #
899 function get_pg() {
900 local poolname=$1
901 local objectname=$2
902
903 ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
904 }
905
906 function test_get_pg() {
907 local dir=$1
908
909 setup $dir || return 1
910 run_mon $dir a --osd_pool_default_size=1 || return 1
911 run_mgr $dir x || return 1
912 run_osd $dir 0 || return 1
913 create_rbd_pool || return 1
914 wait_for_clean || return 1
915 get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
916 teardown $dir || return 1
917 }
918
919 #######################################################################
920
921 ##
922 # Return the value of the **config**, obtained via the config get command
923 # of the admin socket of **daemon**.**id**.
924 #
925 # @param daemon mon or osd
926 # @param id mon or osd ID
927 # @param config the configuration variable name as found in config_opts.h
928 # @param STDOUT the config value
929 # @return 0 on success, 1 on error
930 #
931 function get_config() {
932 local daemon=$1
933 local id=$2
934 local config=$3
935
936 CEPH_ARGS='' \
937 ceph --format json daemon $(get_asok_path $daemon.$id) \
938 config get $config 2> /dev/null | \
939 jq -r ".$config"
940 }
941
942 function test_get_config() {
943 local dir=$1
944
945 # override the default config using command line arg and check it
946 setup $dir || return 1
947 run_mon $dir a --osd_pool_default_size=1 || return 1
948 test $(get_config mon a osd_pool_default_size) = 1 || return 1
949 run_mgr $dir x || return 1
950 run_osd $dir 0 --osd_max_scrubs=3 || return 1
951 test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
952 teardown $dir || return 1
953 }
954
955 #######################################################################
956
957 ##
958 # Set the **config** to specified **value**, via the config set command
959 # of the admin socket of **daemon**.**id**
960 #
961 # @param daemon mon or osd
962 # @param id mon or osd ID
963 # @param config the configuration variable name as found in config_opts.h
964 # @param value the config value
965 # @return 0 on success, 1 on error
966 #
967 function set_config() {
968 local daemon=$1
969 local id=$2
970 local config=$3
971 local value=$4
972
973 test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
974 config set $config $value 2> /dev/null | \
975 jq 'has("success")') == true
976 }
977
978 function test_set_config() {
979 local dir=$1
980
981 setup $dir || return 1
982 run_mon $dir a --osd_pool_default_size=1 || return 1
983 test $(get_config mon a ms_crc_header) = true || return 1
984 set_config mon a ms_crc_header false || return 1
985 test $(get_config mon a ms_crc_header) = false || return 1
986 set_config mon a ms_crc_header true || return 1
987 test $(get_config mon a ms_crc_header) = true || return 1
988 teardown $dir || return 1
989 }
990
991 #######################################################################
992
993 ##
994 # Return the OSD id of the primary OSD supporting the **objectname**
995 # stored in **poolname**, as reported by ceph osd map.
996 #
997 # @param poolname an existing pool
998 # @param objectname an objectname (may or may not exist)
999 # @param STDOUT the primary OSD id
1000 # @return 0 on success, 1 on error
1001 #
1002 function get_primary() {
1003 local poolname=$1
1004 local objectname=$2
1005
1006 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1007 jq '.acting_primary'
1008 }
1009
1010 function test_get_primary() {
1011 local dir=$1
1012
1013 setup $dir || return 1
1014 run_mon $dir a --osd_pool_default_size=1 || return 1
1015 local osd=0
1016 run_mgr $dir x || return 1
1017 run_osd $dir $osd || return 1
1018 create_rbd_pool || return 1
1019 wait_for_clean || return 1
1020 test $(get_primary rbd GROUP) = $osd || return 1
1021 teardown $dir || return 1
1022 }
1023
1024 #######################################################################
1025
1026 ##
1027 # Return the id of any OSD supporting the **objectname** stored in
1028 # **poolname**, as reported by ceph osd map, except the primary.
1029 #
1030 # @param poolname an existing pool
1031 # @param objectname an objectname (may or may not exist)
1032 # @param STDOUT the OSD id
1033 # @return 0 on success, 1 on error
1034 #
1035 function get_not_primary() {
1036 local poolname=$1
1037 local objectname=$2
1038
1039 local primary=$(get_primary $poolname $objectname)
1040 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1041 jq ".acting | map(select (. != $primary)) | .[0]"
1042 }
1043
1044 function test_get_not_primary() {
1045 local dir=$1
1046
1047 setup $dir || return 1
1048 run_mon $dir a --osd_pool_default_size=2 || return 1
1049 run_mgr $dir x || return 1
1050 run_osd $dir 0 || return 1
1051 run_osd $dir 1 || return 1
1052 create_rbd_pool || return 1
1053 wait_for_clean || return 1
1054 local primary=$(get_primary rbd GROUP)
1055 local not_primary=$(get_not_primary rbd GROUP)
1056 test $not_primary != $primary || return 1
1057 test $not_primary = 0 -o $not_primary = 1 || return 1
1058 teardown $dir || return 1
1059 }
1060
1061 #######################################################################
1062
1063 ##
1064 # Run ceph-objectstore-tool against the OSD **id** using the data path
1065 # **dir**. The OSD is killed with TERM prior to running
1066 # ceph-objectstore-tool because access to the data path is
1067 # exclusive. The OSD is restarted after the command completes. The
1068 # objectstore_tool returns after all PG are active+clean again.
1069 #
1070 # @param dir the data path of the OSD
1071 # @param id the OSD id
1072 # @param ... arguments to ceph-objectstore-tool
1073 # @param STDIN the input of ceph-objectstore-tool
1074 # @param STDOUT the output of ceph-objectstore-tool
1075 # @return 0 on success, 1 on error
1076 #
1077 # The value of $ceph_osd_args will be passed to restarted osds
1078 #
1079 function objectstore_tool() {
1080 local dir=$1
1081 shift
1082 local id=$1
1083 shift
1084 local osd_data=$dir/$id
1085
1086 local osd_type=$(cat $osd_data/type)
1087
1088 kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
1089
1090 local journal_args
1091 if [ "$objectstore_type" == "filestore" ]; then
1092 journal_args=" --journal-path $osd_data/journal"
1093 fi
1094 ceph-objectstore-tool \
1095 --data-path $osd_data \
1096 $journal_args \
1097 "$@" || return 1
1098 activate_osd $dir $id $ceph_osd_args >&2 || return 1
1099 wait_for_clean >&2
1100 }
1101
1102 function test_objectstore_tool() {
1103 local dir=$1
1104
1105 setup $dir || return 1
1106 run_mon $dir a --osd_pool_default_size=1 || return 1
1107 local osd=0
1108 run_mgr $dir x || return 1
1109 run_osd $dir $osd || return 1
1110 create_rbd_pool || return 1
1111 wait_for_clean || return 1
1112 rados --pool rbd put GROUP /etc/group || return 1
1113 objectstore_tool $dir $osd GROUP get-bytes | \
1114 diff - /etc/group
1115 ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
1116 teardown $dir || return 1
1117 }
1118
1119 #######################################################################
1120
1121 ##
1122 # Predicate checking if there is an ongoing recovery in the
1123 # cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1124 # counters are reported by ceph status, it means recovery is in
1125 # progress.
1126 #
1127 # @return 0 if recovery in progress, 1 otherwise
1128 #
1129 function get_is_making_recovery_progress() {
1130 local recovery_progress
1131 recovery_progress+=".recovering_keys_per_sec + "
1132 recovery_progress+=".recovering_bytes_per_sec + "
1133 recovery_progress+=".recovering_objects_per_sec"
1134 local progress=$(ceph --format json status 2>/dev/null | \
1135 jq -r ".pgmap | $recovery_progress")
1136 test "$progress" != null
1137 }
1138
1139 function test_get_is_making_recovery_progress() {
1140 local dir=$1
1141
1142 setup $dir || return 1
1143 run_mon $dir a || return 1
1144 run_mgr $dir x || return 1
1145 ! get_is_making_recovery_progress || return 1
1146 teardown $dir || return 1
1147 }
1148
1149 #######################################################################
1150
1151 ##
1152 # Return the number of active PGs in the cluster. A PG is active if
1153 # ceph pg dump pgs reports it both **active** and **clean** and that
1154 # not **stale**.
1155 #
1156 # @param STDOUT the number of active PGs
1157 # @return 0 on success, 1 on error
1158 #
1159 function get_num_active_clean() {
1160 local expression
1161 expression+="select(contains(\"active\") and contains(\"clean\")) | "
1162 expression+="select(contains(\"stale\") | not)"
1163 ceph --format json pg dump pgs 2>/dev/null | \
1164 jq "[.[] | .state | $expression] | length"
1165 }
1166
1167 function test_get_num_active_clean() {
1168 local dir=$1
1169
1170 setup $dir || return 1
1171 run_mon $dir a --osd_pool_default_size=1 || return 1
1172 run_mgr $dir x || return 1
1173 run_osd $dir 0 || return 1
1174 create_rbd_pool || return 1
1175 wait_for_clean || return 1
1176 local num_active_clean=$(get_num_active_clean)
1177 test "$num_active_clean" = $PG_NUM || return 1
1178 teardown $dir || return 1
1179 }
1180
1181 #######################################################################
1182
1183 ##
1184 # Return the number of PGs in the cluster, according to
1185 # ceph pg dump pgs.
1186 #
1187 # @param STDOUT the number of PGs
1188 # @return 0 on success, 1 on error
1189 #
1190 function get_num_pgs() {
1191 ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
1192 }
1193
1194 function test_get_num_pgs() {
1195 local dir=$1
1196
1197 setup $dir || return 1
1198 run_mon $dir a --osd_pool_default_size=1 || return 1
1199 run_mgr $dir x || return 1
1200 run_osd $dir 0 || return 1
1201 create_rbd_pool || return 1
1202 wait_for_clean || return 1
1203 local num_pgs=$(get_num_pgs)
1204 test "$num_pgs" -gt 0 || return 1
1205 teardown $dir || return 1
1206 }
1207
1208 #######################################################################
1209
1210 ##
1211 # Return the OSD ids in use by at least one PG in the cluster (either
1212 # in the up or the acting set), according to ceph pg dump pgs. Every
1213 # OSD id shows as many times as they are used in up and acting sets.
1214 # If an OSD id is in both the up and acting set of a given PG, it will
1215 # show twice.
1216 #
1217 # @param STDOUT a sorted list of OSD ids
1218 # @return 0 on success, 1 on error
1219 #
1220 function get_osd_id_used_by_pgs() {
1221 ceph --format json pg dump pgs 2>/dev/null | jq '.[] | .up[], .acting[]' | sort
1222 }
1223
1224 function test_get_osd_id_used_by_pgs() {
1225 local dir=$1
1226
1227 setup $dir || return 1
1228 run_mon $dir a --osd_pool_default_size=1 || return 1
1229 run_mgr $dir x || return 1
1230 run_osd $dir 0 || return 1
1231 create_rbd_pool || return 1
1232 wait_for_clean || return 1
1233 local osd_ids=$(get_osd_id_used_by_pgs | uniq)
1234 test "$osd_ids" = "0" || return 1
1235 teardown $dir || return 1
1236 }
1237
1238 #######################################################################
1239
1240 ##
1241 # Wait until the OSD **id** shows **count** times in the
1242 # PGs (see get_osd_id_used_by_pgs for more information about
1243 # how OSD ids are counted).
1244 #
1245 # @param id the OSD id
1246 # @param count the number of time it must show in the PGs
1247 # @return 0 on success, 1 on error
1248 #
1249 function wait_osd_id_used_by_pgs() {
1250 local id=$1
1251 local count=$2
1252
1253 status=1
1254 for ((i=0; i < $TIMEOUT / 5; i++)); do
1255 echo $i
1256 if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then
1257 sleep 5
1258 else
1259 status=0
1260 break
1261 fi
1262 done
1263 return $status
1264 }
1265
1266 function test_wait_osd_id_used_by_pgs() {
1267 local dir=$1
1268
1269 setup $dir || return 1
1270 run_mon $dir a --osd_pool_default_size=1 || return 1
1271 run_mgr $dir x || return 1
1272 run_osd $dir 0 || return 1
1273 create_rbd_pool || return 1
1274 wait_for_clean || return 1
1275 wait_osd_id_used_by_pgs 0 8 || return 1
1276 ! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1
1277 teardown $dir || return 1
1278 }
1279
1280 #######################################################################
1281
1282 ##
1283 # Return the date and time of the last completed scrub for **pgid**,
1284 # as reported by ceph pg dump pgs. Note that a repair also sets this
1285 # date.
1286 #
1287 # @param pgid the id of the PG
1288 # @param STDOUT the date and time of the last scrub
1289 # @return 0 on success, 1 on error
1290 #
1291 function get_last_scrub_stamp() {
1292 local pgid=$1
1293 local sname=${2:-last_scrub_stamp}
1294 ceph --format json pg dump pgs 2>/dev/null | \
1295 jq -r ".[] | select(.pgid==\"$pgid\") | .$sname"
1296 }
1297
1298 function test_get_last_scrub_stamp() {
1299 local dir=$1
1300
1301 setup $dir || return 1
1302 run_mon $dir a --osd_pool_default_size=1 || return 1
1303 run_mgr $dir x || return 1
1304 run_osd $dir 0 || return 1
1305 create_rbd_pool || return 1
1306 wait_for_clean || return 1
1307 stamp=$(get_last_scrub_stamp 1.0)
1308 test -n "$stamp" || return 1
1309 teardown $dir || return 1
1310 }
1311
1312 #######################################################################
1313
1314 ##
1315 # Predicate checking if the cluster is clean, i.e. all of its PGs are
1316 # in a clean state (see get_num_active_clean for a definition).
1317 #
1318 # @return 0 if the cluster is clean, 1 otherwise
1319 #
1320 function is_clean() {
1321 num_pgs=$(get_num_pgs)
1322 test $num_pgs != 0 || return 1
1323 test $(get_num_active_clean) = $num_pgs || return 1
1324 }
1325
1326 function test_is_clean() {
1327 local dir=$1
1328
1329 setup $dir || return 1
1330 run_mon $dir a --osd_pool_default_size=1 || return 1
1331 run_mgr $dir x || return 1
1332 run_osd $dir 0 || return 1
1333 create_rbd_pool || return 1
1334 wait_for_clean || return 1
1335 is_clean || return 1
1336 teardown $dir || return 1
1337 }
1338
1339 #######################################################################
1340
1341 ##
1342 # Return a list of numbers that are increasingly larger and whose
1343 # total is **timeout** seconds. It can be used to have short sleep
1344 # delay while waiting for an event on a fast machine. But if running
1345 # very slowly the larger delays avoid stressing the machine even
1346 # further or spamming the logs.
1347 #
1348 # @param timeout sum of all delays, in seconds
1349 # @return a list of sleep delays
1350 #
1351 function get_timeout_delays() {
1352 local trace=$(shopt -q -o xtrace && echo true || echo false)
1353 $trace && shopt -u -o xtrace
1354 local timeout=$1
1355 local first_step=${2:-1}
1356
1357 local i
1358 local total="0"
1359 i=$first_step
1360 while test "$(echo $total + $i \<= $timeout | bc -l)" = "1"; do
1361 echo -n "$i "
1362 total=$(echo $total + $i | bc -l)
1363 i=$(echo $i \* 2 | bc -l)
1364 done
1365 if test "$(echo $total \< $timeout | bc -l)" = "1"; then
1366 echo -n $(echo $timeout - $total | bc -l)
1367 fi
1368 $trace && shopt -s -o xtrace
1369 }
1370
1371 function test_get_timeout_delays() {
1372 test "$(get_timeout_delays 1)" = "1 " || return 1
1373 test "$(get_timeout_delays 5)" = "1 2 2" || return 1
1374 test "$(get_timeout_delays 6)" = "1 2 3" || return 1
1375 test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
1376 test "$(get_timeout_delays 8)" = "1 2 4 1" || return 1
1377 test "$(get_timeout_delays 1 .1)" = ".1 .2 .4 .3" || return 1
1378 test "$(get_timeout_delays 1.5 .1)" = ".1 .2 .4 .8 " || return 1
1379 test "$(get_timeout_delays 5 .1)" = ".1 .2 .4 .8 1.6 1.9" || return 1
1380 test "$(get_timeout_delays 6 .1)" = ".1 .2 .4 .8 1.6 2.9" || return 1
1381 test "$(get_timeout_delays 6.3 .1)" = ".1 .2 .4 .8 1.6 3.2 " || return 1
1382 test "$(get_timeout_delays 20 .1)" = ".1 .2 .4 .8 1.6 3.2 6.4 7.3" || return 1
1383 }
1384
1385 #######################################################################
1386
1387 ##
1388 # Wait until the cluster becomes clean or if it does not make progress
1389 # for $TIMEOUT seconds.
1390 # Progress is measured either via the **get_is_making_recovery_progress**
1391 # predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1392 #
1393 # @return 0 if the cluster is clean, 1 otherwise
1394 #
1395 function wait_for_clean() {
1396 local num_active_clean=-1
1397 local cur_active_clean
1398 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1399 local -i loop=0
1400
1401 flush_pg_stats
1402 while test $(get_num_pgs) == 0 ; do
1403 sleep 1
1404 done
1405
1406 while true ; do
1407 # Comparing get_num_active_clean & get_num_pgs is used to determine
1408 # if the cluster is clean. That's almost an inline of is_clean() to
1409 # get more performance by avoiding multiple calls of get_num_active_clean.
1410 cur_active_clean=$(get_num_active_clean)
1411 test $cur_active_clean = $(get_num_pgs) && break
1412 if test $cur_active_clean != $num_active_clean ; then
1413 loop=0
1414 num_active_clean=$cur_active_clean
1415 elif get_is_making_recovery_progress ; then
1416 loop=0
1417 elif (( $loop >= ${#delays[*]} )) ; then
1418 ceph report
1419 return 1
1420 fi
1421 sleep ${delays[$loop]}
1422 loop+=1
1423 done
1424 return 0
1425 }
1426
1427 function test_wait_for_clean() {
1428 local dir=$1
1429
1430 setup $dir || return 1
1431 run_mon $dir a --osd_pool_default_size=1 || return 1
1432 run_mgr $dir x || return 1
1433 create_rbd_pool || return 1
1434 ! TIMEOUT=1 wait_for_clean || return 1
1435 run_osd $dir 0 || return 1
1436 wait_for_clean || return 1
1437 teardown $dir || return 1
1438 }
1439
1440 #######################################################################
1441
1442 ##
1443 # Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1444 # for $TIMEOUT seconds.
1445 #
1446 # @return 0 if the cluster is HEALTHY, 1 otherwise
1447 #
1448 function wait_for_health() {
1449 local grepstr=$1
1450 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1451 local -i loop=0
1452
1453 while ! ceph health detail | grep "$grepstr" ; do
1454 if (( $loop >= ${#delays[*]} )) ; then
1455 ceph health detail
1456 return 1
1457 fi
1458 sleep ${delays[$loop]}
1459 loop+=1
1460 done
1461 }
1462
1463 function wait_for_health_ok() {
1464 wait_for_health "HEALTH_OK" || return 1
1465 }
1466
1467 function test_wait_for_health_ok() {
1468 local dir=$1
1469
1470 setup $dir || return 1
1471 run_mon $dir a --osd_pool_default_size=1 --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
1472 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
1473 run_osd $dir 0 || return 1
1474 kill_daemons $dir TERM osd || return 1
1475 ! TIMEOUT=1 wait_for_health_ok || return 1
1476 activate_osd $dir 0 || return 1
1477 wait_for_health_ok || return 1
1478 teardown $dir || return 1
1479 }
1480
1481
1482 #######################################################################
1483
1484 ##
1485 # Run repair on **pgid** and wait until it completes. The repair
1486 # function will fail if repair does not complete within $TIMEOUT
1487 # seconds.
1488 #
1489 # @param pgid the id of the PG
1490 # @return 0 on success, 1 on error
1491 #
1492 function repair() {
1493 local pgid=$1
1494 local last_scrub=$(get_last_scrub_stamp $pgid)
1495 ceph pg repair $pgid
1496 wait_for_scrub $pgid "$last_scrub"
1497 }
1498
1499 function test_repair() {
1500 local dir=$1
1501
1502 setup $dir || return 1
1503 run_mon $dir a --osd_pool_default_size=1 || return 1
1504 run_mgr $dir x || return 1
1505 run_osd $dir 0 || return 1
1506 create_rbd_pool || return 1
1507 wait_for_clean || return 1
1508 repair 1.0 || return 1
1509 kill_daemons $dir KILL osd || return 1
1510 ! TIMEOUT=1 repair 1.0 || return 1
1511 teardown $dir || return 1
1512 }
1513 #######################################################################
1514
1515 ##
1516 # Run scrub on **pgid** and wait until it completes. The pg_scrub
1517 # function will fail if repair does not complete within $TIMEOUT
1518 # seconds. The pg_scrub is complete whenever the
1519 # **get_last_scrub_stamp** function reports a timestamp different from
1520 # the one stored before starting the scrub.
1521 #
1522 # @param pgid the id of the PG
1523 # @return 0 on success, 1 on error
1524 #
1525 function pg_scrub() {
1526 local pgid=$1
1527 local last_scrub=$(get_last_scrub_stamp $pgid)
1528 ceph pg scrub $pgid
1529 wait_for_scrub $pgid "$last_scrub"
1530 }
1531
1532 function pg_deep_scrub() {
1533 local pgid=$1
1534 local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
1535 ceph pg deep-scrub $pgid
1536 wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
1537 }
1538
1539 function test_pg_scrub() {
1540 local dir=$1
1541
1542 setup $dir || return 1
1543 run_mon $dir a --osd_pool_default_size=1 || return 1
1544 run_mgr $dir x || return 1
1545 run_osd $dir 0 || return 1
1546 create_rbd_pool || return 1
1547 wait_for_clean || return 1
1548 pg_scrub 1.0 || return 1
1549 kill_daemons $dir KILL osd || return 1
1550 ! TIMEOUT=1 pg_scrub 1.0 || return 1
1551 teardown $dir || return 1
1552 }
1553
1554 #######################################################################
1555
1556 ##
1557 # Run the *command* and expect it to fail (i.e. return a non zero status).
1558 # The output (stderr and stdout) is stored in a temporary file in *dir*
1559 # and is expected to contain the string *expected*.
1560 #
1561 # Return 0 if the command failed and the string was found. Otherwise
1562 # return 1 and cat the full output of the command on stderr for debug.
1563 #
1564 # @param dir temporary directory to store the output
1565 # @param expected string to look for in the output
1566 # @param command ... the command and its arguments
1567 # @return 0 on success, 1 on error
1568 #
1569
1570 function expect_failure() {
1571 local dir=$1
1572 shift
1573 local expected="$1"
1574 shift
1575 local success
1576
1577 if "$@" > $dir/out 2>&1 ; then
1578 success=true
1579 else
1580 success=false
1581 fi
1582
1583 if $success || ! grep --quiet "$expected" $dir/out ; then
1584 cat $dir/out >&2
1585 return 1
1586 else
1587 return 0
1588 fi
1589 }
1590
1591 function test_expect_failure() {
1592 local dir=$1
1593
1594 setup $dir || return 1
1595 expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
1596 # the command did not fail
1597 ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
1598 grep --quiet FAIL $dir/out || return 1
1599 # the command failed but the output does not contain the expected string
1600 ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
1601 ! grep --quiet FAIL $dir/out || return 1
1602 teardown $dir || return 1
1603 }
1604
1605 #######################################################################
1606
1607 ##
1608 # Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1609 # will fail if scrub does not complete within $TIMEOUT seconds. The
1610 # repair is complete whenever the **get_last_scrub_stamp** function
1611 # reports a timestamp different from the one given in argument.
1612 #
1613 # @param pgid the id of the PG
1614 # @param last_scrub timestamp of the last scrub for *pgid*
1615 # @return 0 on success, 1 on error
1616 #
1617 function wait_for_scrub() {
1618 local pgid=$1
1619 local last_scrub="$2"
1620 local sname=${3:-last_scrub_stamp}
1621
1622 for ((i=0; i < $TIMEOUT; i++)); do
1623 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
1624 return 0
1625 fi
1626 sleep 1
1627 done
1628 return 1
1629 }
1630
1631 function test_wait_for_scrub() {
1632 local dir=$1
1633
1634 setup $dir || return 1
1635 run_mon $dir a --osd_pool_default_size=1 || return 1
1636 run_mgr $dir x || return 1
1637 run_osd $dir 0 || return 1
1638 create_rbd_pool || return 1
1639 wait_for_clean || return 1
1640 local pgid=1.0
1641 ceph pg repair $pgid
1642 local last_scrub=$(get_last_scrub_stamp $pgid)
1643 wait_for_scrub $pgid "$last_scrub" || return 1
1644 kill_daemons $dir KILL osd || return 1
1645 last_scrub=$(get_last_scrub_stamp $pgid)
1646 ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
1647 teardown $dir || return 1
1648 }
1649
1650 #######################################################################
1651
1652 ##
1653 # Return 0 if the erasure code *plugin* is available, 1 otherwise.
1654 #
1655 # @param plugin erasure code plugin
1656 # @return 0 on success, 1 on error
1657 #
1658
1659 function erasure_code_plugin_exists() {
1660 local plugin=$1
1661 local status
1662 local grepstr
1663 local s
1664 case `uname` in
1665 FreeBSD) grepstr="Cannot open.*$plugin" ;;
1666 *) grepstr="$plugin.*No such file" ;;
1667 esac
1668
1669 s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
1670 local status=$?
1671 if [ $status -eq 0 ]; then
1672 ceph osd erasure-code-profile rm TESTPROFILE
1673 elif ! echo $s | grep --quiet "$grepstr" ; then
1674 status=1
1675 # display why the string was rejected.
1676 echo $s
1677 fi
1678 return $status
1679 }
1680
1681 function test_erasure_code_plugin_exists() {
1682 local dir=$1
1683
1684 setup $dir || return 1
1685 run_mon $dir a || return 1
1686 run_mgr $dir x || return 1
1687 erasure_code_plugin_exists jerasure || return 1
1688 ! erasure_code_plugin_exists FAKE || return 1
1689 teardown $dir || return 1
1690 }
1691
1692 #######################################################################
1693
1694 ##
1695 # Display all log files from **dir** on stdout.
1696 #
1697 # @param dir directory in which all data is stored
1698 #
1699
1700 function display_logs() {
1701 local dir=$1
1702
1703 find $dir -maxdepth 1 -name '*.log' | \
1704 while read file ; do
1705 echo "======================= $file"
1706 cat $file
1707 done
1708 }
1709
1710 function test_display_logs() {
1711 local dir=$1
1712
1713 setup $dir || return 1
1714 run_mon $dir a || return 1
1715 kill_daemons $dir || return 1
1716 display_logs $dir > $dir/log.out
1717 grep --quiet mon.a.log $dir/log.out || return 1
1718 teardown $dir || return 1
1719 }
1720
1721 #######################################################################
1722 ##
1723 # Spawn a command in background and save the pid in the variable name
1724 # passed in argument. To make the output reading easier, the output is
1725 # prepend with the process id.
1726 #
1727 # Example:
1728 # pids1=""
1729 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1730 #
1731 # @param pid_variable the variable name (not value) where the pids will be stored
1732 # @param ... the command to execute
1733 # @return only the pid_variable output should be considered and used with **wait_background**
1734 #
1735 function run_in_background() {
1736 local pid_variable=$1
1737 shift;
1738 # Execute the command and prepend the output with its pid
1739 # We enforce to return the exit status of the command and not the awk one.
1740 ("$@" |& awk '{ a[i++] = $0 }END{for (i = 0; i in a; ++i) { print "'$$': " a[i]} }'; return ${PIPESTATUS[0]}) >&2 &
1741 eval "$pid_variable+=\" $!\""
1742 }
1743
1744 function test_run_in_background() {
1745 local pids
1746 run_in_background pids sleep 1
1747 run_in_background pids sleep 1
1748 test $(echo $pids | wc -w) = 2 || return 1
1749 wait $pids || return 1
1750 }
1751
1752 #######################################################################
1753 ##
1754 # Wait for pids running in background to complete.
1755 # This function is usually used after a **run_in_background** call
1756 # Example:
1757 # pids1=""
1758 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1759 # wait_background pids1
1760 #
1761 # @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
1762 # @return returns 1 if at least one process exits in error unless returns 0
1763 #
1764 function wait_background() {
1765 # We extract the PIDS from the variable name
1766 pids=${!1}
1767
1768 return_code=0
1769 for pid in $pids; do
1770 if ! wait $pid; then
1771 # If one process failed then return 1
1772 return_code=1
1773 fi
1774 done
1775
1776 # We empty the variable reporting that all process ended
1777 eval "$1=''"
1778
1779 return $return_code
1780 }
1781
1782
1783 function test_wait_background() {
1784 local pids=""
1785 run_in_background pids bash -c "sleep 1; exit 1"
1786 run_in_background pids bash -c "sleep 2; exit 0"
1787 wait_background pids
1788 if [ $? -ne 1 ]; then return 1; fi
1789
1790 run_in_background pids bash -c "sleep 1; exit 0"
1791 run_in_background pids bash -c "sleep 2; exit 0"
1792 wait_background pids
1793 if [ $? -ne 0 ]; then return 1; fi
1794
1795 if [ ! -z "$pids" ]; then return 1; fi
1796 }
1797
1798 function flush_pg_stats()
1799 {
1800 local timeout=${1:-$TIMEOUT}
1801
1802 ids=`ceph osd ls`
1803 seqs=''
1804 for osd in $ids; do
1805 seq=`ceph tell osd.$osd flush_pg_stats`
1806 seqs="$seqs $osd-$seq"
1807 done
1808
1809 for s in $seqs; do
1810 osd=`echo $s | cut -d - -f 1`
1811 seq=`echo $s | cut -d - -f 2`
1812 echo "waiting osd.$osd seq $seq"
1813 while test $(ceph osd last-stat-seq $osd) -lt $seq; do
1814 sleep 1
1815 if [ $((timeout--)) -eq 0 ]; then
1816 return 1
1817 fi
1818 done
1819 done
1820 }
1821
1822 function test_flush_pg_stats()
1823 {
1824 local dir=$1
1825
1826 setup $dir || return 1
1827 run_mon $dir a --osd_pool_default_size=1 || return 1
1828 run_mgr $dir x || return 1
1829 run_osd $dir 0 || return 1
1830 create_rbd_pool || return 1
1831 rados -p rbd put obj /etc/group
1832 flush_pg_stats
1833 local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
1834 raw_bytes_used=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
1835 bytes_used=`ceph df detail --format=json | jq "$jq_filter.bytes_used"`
1836 test $raw_bytes_used > 0 || return 1
1837 test $raw_bytes_used == $bytes_used || return 1
1838 teardown $dir
1839 }
1840
1841 #######################################################################
1842
1843 ##
1844 # Call the **run** function (which must be defined by the caller) with
1845 # the **dir** argument followed by the caller argument list.
1846 #
1847 # If the **run** function returns on error, all logs found in **dir**
1848 # are displayed for diagnostic purposes.
1849 #
1850 # **teardown** function is called when the **run** function returns
1851 # (on success or on error), to cleanup leftovers. The CEPH_CONF is set
1852 # to /dev/null and CEPH_ARGS is unset so that the tests are protected from
1853 # external interferences.
1854 #
1855 # It is the responsibility of the **run** function to call the
1856 # **setup** function to prepare the test environment (create a temporary
1857 # directory etc.).
1858 #
1859 # The shell is required (via PS4) to display the function and line
1860 # number whenever a statement is executed to help debugging.
1861 #
1862 # @param dir directory in which all data is stored
1863 # @param ... arguments passed transparently to **run**
1864 # @return 0 on success, 1 on error
1865 #
1866 function main() {
1867 local dir=td/$1
1868 shift
1869
1870 shopt -s -o xtrace
1871 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1872
1873 export PATH=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv/bin:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv/bin:.:$PATH # make sure program from sources are preferred
1874 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1875
1876 export CEPH_CONF=/dev/null
1877 unset CEPH_ARGS
1878
1879 local code
1880 if run $dir "$@" ; then
1881 code=0
1882 else
1883 code=1
1884 fi
1885 teardown $dir $code || return 1
1886 return $code
1887 }
1888
1889 #######################################################################
1890
1891 function run_tests() {
1892 shopt -s -o xtrace
1893 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1894
1895 export PATH=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv/bin:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv/bin:.:$PATH # make sure program from sources are preferred
1896 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1897
1898 export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
1899 export CEPH_ARGS
1900 CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
1901 CEPH_ARGS+="--mon-host=$CEPH_MON "
1902 export CEPH_CONF=/dev/null
1903
1904 local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
1905 local dir=td/ceph-helpers
1906
1907 for func in $funcs ; do
1908 if ! $func $dir; then
1909 teardown $dir 1
1910 return 1
1911 fi
1912 done
1913 }
1914
1915 if test "$1" = TESTS ; then
1916 shift
1917 run_tests "$@"
1918 exit $?
1919 fi
1920
1921 # NOTE:
1922 # jq only support --exit-status|-e from version 1.4 forwards, which makes
1923 # returning on error waaaay prettier and straightforward.
1924 # However, the current automated upstream build is running with v1.3,
1925 # which has no idea what -e is. Hence the convoluted error checking we
1926 # need. Sad.
1927 # The next time someone changes this code, please check if v1.4 is now
1928 # a thing, and, if so, please change these to use -e. Thanks.
1929
1930 # jq '.all.supported | select([.[] == "foo"] | any)'
1931 function jq_success() {
1932 input="$1"
1933 filter="$2"
1934 expects="\"$3\""
1935
1936 in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g")
1937 filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g")
1938
1939 ret=$(echo "$in_escaped" | jq "$filter_escaped")
1940 if [[ "$ret" == "true" ]]; then
1941 return 0
1942 elif [[ -n "$expects" ]]; then
1943 if [[ "$ret" == "$expects" ]]; then
1944 return 0
1945 fi
1946 fi
1947 return 1
1948 input=$1
1949 filter=$2
1950 expects="$3"
1951
1952 ret="$(echo $input | jq \"$filter\")"
1953 if [[ "$ret" == "true" ]]; then
1954 return 0
1955 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
1956 return 0
1957 fi
1958 return 1
1959 }
1960
1961 function inject_eio() {
1962 local pooltype=$1
1963 shift
1964 local which=$1
1965 shift
1966 local poolname=$1
1967 shift
1968 local objname=$1
1969 shift
1970 local dir=$1
1971 shift
1972 local shard_id=$1
1973 shift
1974
1975 local -a initial_osds=($(get_osds $poolname $objname))
1976 local osd_id=${initial_osds[$shard_id]}
1977 if [ "$pooltype" != "ec" ]; then
1978 shard_id=""
1979 fi
1980 set_config osd $osd_id filestore_debug_inject_read_err true || return 1
1981 local loop=0
1982 while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
1983 inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
1984 loop=$(expr $loop + 1)
1985 if [ $loop = "10" ]; then
1986 return 1
1987 fi
1988 sleep 1
1989 done
1990 }
1991
1992 # Local Variables:
1993 # compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
1994 # End: