]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/ceph-helpers.sh
bb681bf440e9752d0c2d31f4571e9098b35ecc5c
[ceph.git] / ceph / qa / workunits / ceph-helpers.sh
1 #!/bin/bash
2 #
3 # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5 # Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
6 #
7 # Author: Loic Dachary <loic@dachary.org>
8 # Author: Federico Gimenez <fgimenez@coit.es>
9 #
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU Library Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
13 # any later version.
14 #
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Library Public License for more details.
19 #
20 TIMEOUT=300
21 PG_NUM=4
22 : ${CEPH_BUILD_VIRTUALENV:=/tmp}
23
24 if type xmlstarlet > /dev/null 2>&1; then
25 XMLSTARLET=xmlstarlet
26 elif type xml > /dev/null 2>&1; then
27 XMLSTARLET=xml
28 else
29 echo "Missing xmlstarlet binary!"
30 exit 1
31 fi
32
33 if [ `uname` = FreeBSD ]; then
34 SED=gsed
35 DIFFCOLOPTS=""
36 else
37 SED=sed
38 termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
39 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
40 termwidth="-W ${termwidth}"
41 fi
42 DIFFCOLOPTS="-y $termwidth"
43 fi
44
45 #! @file ceph-helpers.sh
46 # @brief Toolbox to manage Ceph cluster dedicated to testing
47 #
48 # Example use case:
49 #
50 # ~~~~~~~~~~~~~~~~{.sh}
51 # source ceph-helpers.sh
52 #
53 # function mytest() {
54 # # cleanup leftovers and reset mydir
55 # setup mydir
56 # # create a cluster with one monitor and three osds
57 # run_mon mydir a
58 # run_osd mydir 0
59 # run_osd mydir 2
60 # run_osd mydir 3
61 # # put and get an object
62 # rados --pool rbd put GROUP /etc/group
63 # rados --pool rbd get GROUP /tmp/GROUP
64 # # stop the cluster and cleanup the directory
65 # teardown mydir
66 # }
67 # ~~~~~~~~~~~~~~~~
68 #
69 # The focus is on simplicity and efficiency, in the context of
70 # functional tests. The output is intentionally very verbose
71 # and functions return as soon as an error is found. The caller
72 # is also expected to abort on the first error so that debugging
73 # can be done by looking at the end of the output.
74 #
75 # Each function is documented, implemented and tested independently.
76 # When modifying a helper, the test and the documentation are
77 # expected to be updated and it is easier of they are collocated. A
78 # test for a given function can be run with
79 #
80 # ~~~~~~~~~~~~~~~~{.sh}
81 # ceph-helpers.sh TESTS test_get_osds
82 # ~~~~~~~~~~~~~~~~
83 #
84 # and all the tests (i.e. all functions matching test_*) are run
85 # with:
86 #
87 # ~~~~~~~~~~~~~~~~{.sh}
88 # ceph-helpers.sh TESTS
89 # ~~~~~~~~~~~~~~~~
90 #
91 # A test function takes a single argument : the directory dedicated
92 # to the tests. It is expected to not create any file outside of this
93 # directory and remove it entirely when it completes successfully.
94 #
95
96
97 ##
98 # Cleanup any leftovers found in **dir** via **teardown**
99 # and reset **dir** as an empty environment.
100 #
101 # @param dir path name of the environment
102 # @return 0 on success, 1 on error
103 #
104 function setup() {
105 local dir=$1
106 teardown $dir || return 1
107 mkdir -p $dir
108 }
109
110 function test_setup() {
111 local dir=$dir
112 setup $dir || return 1
113 test -d $dir || return 1
114 setup $dir || return 1
115 test -d $dir || return 1
116 teardown $dir
117 }
118
119 #######################################################################
120
121 ##
122 # Kill all daemons for which a .pid file exists in **dir** and remove
123 # **dir**. If the file system in which **dir** is btrfs, delete all
124 # subvolumes that relate to it.
125 #
126 # @param dir path name of the environment
127 # @return 0 on success, 1 on error
128 #
129 function teardown() {
130 local dir=$1
131 kill_daemons $dir KILL
132 if [ `uname` != FreeBSD ] \
133 && [ $(stat -f -c '%T' .) == "btrfs" ]; then
134 __teardown_btrfs $dir
135 fi
136 rm -fr $dir
137 }
138
139 function __teardown_btrfs() {
140 local btrfs_base_dir=$1
141 local btrfs_root=$(df -P . | tail -1 | awk '{print $NF}')
142 local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list . -t | awk '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
143 for subvolume in $btrfs_dirs; do
144 sudo btrfs subvolume delete $btrfs_root/$subvolume
145 done
146 }
147
148 function test_teardown() {
149 local dir=$dir
150 setup $dir || return 1
151 teardown $dir || return 1
152 ! test -d $dir || return 1
153 }
154
155 #######################################################################
156
157 ##
158 # Sends a signal to a single daemon.
159 # This is a helper function for kill_daemons
160 #
161 # After the daemon is sent **signal**, its actual termination
162 # will be verified by sending it signal 0. If the daemon is
163 # still alive, kill_daemon will pause for a few seconds and
164 # try again. This will repeat for a fixed number of times
165 # before kill_daemon returns on failure. The list of
166 # sleep intervals can be specified as **delays** and defaults
167 # to:
168 #
169 # 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
170 #
171 # This sequence is designed to run first a very short sleep time (0.1)
172 # if the machine is fast enough and the daemon terminates in a fraction of a
173 # second. The increasing sleep numbers should give plenty of time for
174 # the daemon to die even on the slowest running machine. If a daemon
175 # takes more than a few minutes to stop (the sum of all sleep times),
176 # there probably is no point in waiting more and a number of things
177 # are likely to go wrong anyway: better give up and return on error.
178 #
179 # @param pid the process id to send a signal
180 # @param send_signal the signal to send
181 # @param delays sequence of sleep times before failure
182 #
183 function kill_daemon() {
184 local pid=$(cat $1)
185 local send_signal=$2
186 local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
187 local exit_code=1
188 for try in $delays ; do
189 if kill -$send_signal $pid 2> /dev/null ; then
190 exit_code=1
191 else
192 exit_code=0
193 break
194 fi
195 send_signal=0
196 sleep $try
197 done;
198 return $exit_code
199 }
200
201 function test_kill_daemon() {
202 local dir=$1
203 setup $dir || return 1
204 run_mon $dir a --osd_pool_default_size=1 || return 1
205 run_mgr $dir x || return 1
206 run_osd $dir 0 || return 1
207
208 name_prefix=osd
209 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
210 #
211 # sending signal 0 won't kill the daemon
212 # waiting just for one second instead of the default schedule
213 # allows us to quickly verify what happens when kill fails
214 # to stop the daemon (i.e. it must return false)
215 #
216 ! kill_daemon $pidfile 0 1 || return 1
217 #
218 # killing just the osd and verify the mon still is responsive
219 #
220 kill_daemon $pidfile TERM || return 1
221 done
222
223 ceph osd dump | grep "osd.0 down" || return 1
224
225 name_prefix=mgr
226 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
227 #
228 # kill the mgr
229 #
230 kill_daemon $pidfile TERM || return 1
231 done
232
233 name_prefix=mon
234 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
235 #
236 # kill the mon and verify it cannot be reached
237 #
238 kill_daemon $pidfile TERM || return 1
239 ! timeout 60 ceph --connect-timeout 60 status || return 1
240 done
241
242 teardown $dir || return 1
243 }
244
245 ##
246 # Kill all daemons for which a .pid file exists in **dir**. Each
247 # daemon is sent a **signal** and kill_daemons waits for it to exit
248 # during a few minutes. By default all daemons are killed. If a
249 # **name_prefix** is provided, only the daemons for which a pid
250 # file is found matching the prefix are killed. See run_osd and
251 # run_mon for more information about the name conventions for
252 # the pid files.
253 #
254 # Send TERM to all daemons : kill_daemons $dir
255 # Send KILL to all daemons : kill_daemons $dir KILL
256 # Send KILL to all osds : kill_daemons $dir KILL osd
257 # Send KILL to osd 1 : kill_daemons $dir KILL osd.1
258 #
259 # If a daemon is sent the TERM signal and does not terminate
260 # within a few minutes, it will still be running even after
261 # kill_daemons returns.
262 #
263 # If all daemons are kill successfully the function returns 0
264 # if at least one daemon remains, this is treated as an
265 # error and the function return 1.
266 #
267 # @param dir path name of the environment
268 # @param signal name of the first signal (defaults to TERM)
269 # @param name_prefix only kill match daemons (defaults to all)
270 # @param delays sequence of sleep times before failure
271 # @return 0 on success, 1 on error
272 #
273 function kill_daemons() {
274 local trace=$(shopt -q -o xtrace && echo true || echo false)
275 $trace && shopt -u -o xtrace
276 local dir=$1
277 local signal=${2:-TERM}
278 local name_prefix=$3 # optional, osd, mon, osd.1
279 local delays=$4 #optional timing
280 local status=0
281 local pids=""
282
283 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
284 run_in_background pids kill_daemon $pidfile $signal $delays
285 done
286
287 wait_background pids
288 status=$?
289
290 $trace && shopt -s -o xtrace
291 return $status
292 }
293
294 function test_kill_daemons() {
295 local dir=$1
296 setup $dir || return 1
297 run_mon $dir a --osd_pool_default_size=1 || return 1
298 run_mgr $dir x || return 1
299 run_osd $dir 0 || return 1
300 #
301 # sending signal 0 won't kill the daemon
302 # waiting just for one second instead of the default schedule
303 # allows us to quickly verify what happens when kill fails
304 # to stop the daemon (i.e. it must return false)
305 #
306 ! kill_daemons $dir 0 osd 1 || return 1
307 #
308 # killing just the osd and verify the mon still is responsive
309 #
310 kill_daemons $dir TERM osd || return 1
311 ceph osd dump | grep "osd.0 down" || return 1
312 #
313 # kill the mgr
314 #
315 kill_daemons $dir TERM mgr || return 1
316 #
317 # kill the mon and verify it cannot be reached
318 #
319 kill_daemons $dir TERM || return 1
320 ! timeout 60 ceph --connect-timeout 60 status || return 1
321 teardown $dir || return 1
322 }
323
324 #######################################################################
325
326 ##
327 # Run a monitor by the name mon.**id** with data in **dir**/**id**.
328 # The logs can be found in **dir**/mon.**id**.log and the pid file
329 # is **dir**/mon.**id**.pid and the admin socket is
330 # **dir**/**id**/ceph-mon.**id**.asok.
331 #
332 # The remaining arguments are passed verbatim to ceph-mon --mkfs
333 # and the ceph-mon daemon.
334 #
335 # Two mandatory arguments must be provided: --fsid and --mon-host
336 # Instead of adding them to every call to run_mon, they can be
337 # set in the CEPH_ARGS environment variable to be read implicitly
338 # by every ceph command.
339 #
340 # The CEPH_CONF variable is expected to be set to /dev/null to
341 # only rely on arguments for configuration.
342 #
343 # Examples:
344 #
345 # CEPH_ARGS="--fsid=$(uuidgen) "
346 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
347 # run_mon $dir a # spawn a mon and bind port 7018
348 # run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
349 #
350 # If mon_initial_members is not set, the default rbd pool is deleted
351 # and replaced with a replicated pool with less placement groups to
352 # speed up initialization. If mon_initial_members is set, no attempt
353 # is made to recreate the rbd pool because it would hang forever,
354 # waiting for other mons to join.
355 #
356 # A **dir**/ceph.conf file is created but not meant to be used by any
357 # function. It is convenient for debugging a failure with:
358 #
359 # ceph --conf **dir**/ceph.conf -s
360 #
361 # @param dir path name of the environment
362 # @param id mon identifier
363 # @param ... can be any option valid for ceph-mon
364 # @return 0 on success, 1 on error
365 #
366 function run_mon() {
367 local dir=$1
368 shift
369 local id=$1
370 shift
371 local data=$dir/$id
372
373 ceph-mon \
374 --id $id \
375 --mkfs \
376 --mon-data=$data \
377 --run-dir=$dir \
378 "$@" || return 1
379
380 ceph-mon \
381 --id $id \
382 --mon-osd-full-ratio=.99 \
383 --mon-data-avail-crit=1 \
384 --paxos-propose-interval=0.1 \
385 --osd-crush-chooseleaf-type=0 \
386 --erasure-code-dir=$CEPH_LIB \
387 --plugin-dir=$CEPH_LIB \
388 --debug-mon 20 \
389 --debug-ms 20 \
390 --debug-paxos 20 \
391 --chdir= \
392 --mon-data=$data \
393 --log-file=$dir/\$name.log \
394 --admin-socket=$dir/\$cluster-\$name.asok \
395 --mon-cluster-log-file=$dir/log \
396 --run-dir=$dir \
397 --pid-file=$dir/\$name.pid \
398 --mon-allow-pool-delete \
399 "$@" || return 1
400
401 cat > $dir/ceph.conf <<EOF
402 [global]
403 fsid = $(get_config mon $id fsid)
404 mon host = $(get_config mon $id mon_host)
405 EOF
406 if test -z "$(get_config mon $id mon_initial_members)" ; then
407 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
408 ceph osd pool create rbd $PG_NUM || return 1
409 ceph osd set-backfillfull-ratio .99
410 fi
411 }
412
413 function test_run_mon() {
414 local dir=$1
415
416 setup $dir || return 1
417
418 run_mon $dir a --mon-initial-members=a || return 1
419 # rbd has not been deleted / created, hence it has pool id 0
420 ceph osd dump | grep "pool 0 'rbd'" || return 1
421 kill_daemons $dir || return 1
422
423 run_mon $dir a || return 1
424 # rbd has been deleted / created, hence it does not have pool id 0
425 ! ceph osd dump | grep "pool 0 'rbd'" || return 1
426 local size=$(CEPH_ARGS='' ceph --format=json daemon $dir/ceph-mon.a.asok \
427 config get osd_pool_default_size)
428 test "$size" = '{"osd_pool_default_size":"3"}' || return 1
429
430 ! CEPH_ARGS='' ceph status || return 1
431 CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
432
433 kill_daemons $dir || return 1
434
435 run_mon $dir a --osd_pool_default_size=1 || return 1
436 local size=$(CEPH_ARGS='' ceph --format=json daemon $dir/ceph-mon.a.asok \
437 config get osd_pool_default_size)
438 test "$size" = '{"osd_pool_default_size":"1"}' || return 1
439 kill_daemons $dir || return 1
440
441 CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
442 run_mon $dir a || return 1
443 local size=$(CEPH_ARGS='' ceph --format=json daemon $dir/ceph-mon.a.asok \
444 config get osd_pool_default_size)
445 test "$size" = '{"osd_pool_default_size":"2"}' || return 1
446 kill_daemons $dir || return 1
447
448 teardown $dir || return 1
449 }
450
451 #######################################################################
452
453 function run_mgr() {
454 local dir=$1
455 shift
456 local id=$1
457 shift
458 local data=$dir/$id
459
460 ceph-mgr \
461 --id $id \
462 --erasure-code-dir=$CEPH_LIB \
463 --plugin-dir=$CEPH_LIB \
464 --debug-mgr 20 \
465 --debug-objecter 20 \
466 --debug-ms 20 \
467 --debug-paxos 20 \
468 --chdir= \
469 --mgr-data=$data \
470 --log-file=$dir/\$name.log \
471 --admin-socket=$dir/\$cluster-\$name.asok \
472 --run-dir=$dir \
473 --pid-file=$dir/\$name.pid \
474 "$@" || return 1
475 }
476
477 #######################################################################
478
479 ##
480 # Create (prepare) and run (activate) an osd by the name osd.**id**
481 # with data in **dir**/**id**. The logs can be found in
482 # **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
483 # the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
484 #
485 # The remaining arguments are passed verbatim to ceph-osd.
486 #
487 # Two mandatory arguments must be provided: --fsid and --mon-host
488 # Instead of adding them to every call to run_osd, they can be
489 # set in the CEPH_ARGS environment variable to be read implicitly
490 # by every ceph command.
491 #
492 # The CEPH_CONF variable is expected to be set to /dev/null to
493 # only rely on arguments for configuration.
494 #
495 # The run_osd function creates the OSD data directory with ceph-disk
496 # prepare on the **dir**/**id** directory and relies on the
497 # activate_osd function to run the daemon.
498 #
499 # Examples:
500 #
501 # CEPH_ARGS="--fsid=$(uuidgen) "
502 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
503 # run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
504 #
505 # @param dir path name of the environment
506 # @param id osd identifier
507 # @param ... can be any option valid for ceph-osd
508 # @return 0 on success, 1 on error
509 #
510 function run_osd() {
511 local dir=$1
512 shift
513 local id=$1
514 shift
515 local osd_data=$dir/$id
516
517 local ceph_disk_args
518 ceph_disk_args+=" --statedir=$dir"
519 ceph_disk_args+=" --sysconfdir=$dir"
520 ceph_disk_args+=" --prepend-to-path="
521
522 mkdir -p $osd_data
523 ceph-disk $ceph_disk_args \
524 prepare --filestore $osd_data || return 1
525
526 activate_osd $dir $id "$@"
527 }
528
529 function run_osd_bluestore() {
530 local dir=$1
531 shift
532 local id=$1
533 shift
534 local osd_data=$dir/$id
535
536 local ceph_disk_args
537 ceph_disk_args+=" --statedir=$dir"
538 ceph_disk_args+=" --sysconfdir=$dir"
539 ceph_disk_args+=" --prepend-to-path="
540
541 mkdir -p $osd_data
542 ceph-disk $ceph_disk_args \
543 prepare --bluestore $osd_data || return 1
544
545 activate_osd $dir $id "$@"
546 }
547
548 function test_run_osd() {
549 local dir=$1
550
551 setup $dir || return 1
552
553 run_mon $dir a || return 1
554 run_mgr $dir x || return 1
555
556 run_osd $dir 0 || return 1
557 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.0.asok \
558 config get osd_max_backfills)
559 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
560
561 run_osd $dir 1 --osd-max-backfills 20 || return 1
562 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.1.asok \
563 config get osd_max_backfills)
564 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
565
566 CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
567 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.2.asok \
568 config get osd_max_backfills)
569 test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
570
571 teardown $dir || return 1
572 }
573
574 #######################################################################
575
576 ##
577 # Shutdown and remove all traces of the osd by the name osd.**id**.
578 #
579 # The OSD is shutdown with the TERM signal. It is then removed from
580 # the auth list, crush map, osd map etc and the files associated with
581 # it are also removed.
582 #
583 # @param dir path name of the environment
584 # @param id osd identifier
585 # @return 0 on success, 1 on error
586 #
587 function destroy_osd() {
588 local dir=$1
589 local id=$2
590
591 kill_daemons $dir TERM osd.$id || return 1
592 ceph osd out osd.$id || return 1
593 ceph auth del osd.$id || return 1
594 ceph osd crush remove osd.$id || return 1
595 ceph osd rm $id || return 1
596 teardown $dir/$id || return 1
597 rm -fr $dir/$id
598 }
599
600 function test_destroy_osd() {
601 local dir=$1
602
603 setup $dir || return 1
604 run_mon $dir a || return 1
605 run_mgr $dir x || return 1
606 run_osd $dir 0 || return 1
607 destroy_osd $dir 0 || return 1
608 ! ceph osd dump | grep "osd.$id " || return 1
609 teardown $dir || return 1
610 }
611
612 #######################################################################
613
614 ##
615 # Run (activate) an osd by the name osd.**id** with data in
616 # **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
617 # the pid file is **dir**/osd.**id**.pid and the admin socket is
618 # **dir**/**id**/ceph-osd.**id**.asok.
619 #
620 # The remaining arguments are passed verbatim to ceph-osd.
621 #
622 # Two mandatory arguments must be provided: --fsid and --mon-host
623 # Instead of adding them to every call to activate_osd, they can be
624 # set in the CEPH_ARGS environment variable to be read implicitly
625 # by every ceph command.
626 #
627 # The CEPH_CONF variable is expected to be set to /dev/null to
628 # only rely on arguments for configuration.
629 #
630 # The activate_osd function expects a valid OSD data directory
631 # in **dir**/**id**, either just created via run_osd or re-using
632 # one left by a previous run of ceph-osd. The ceph-osd daemon is
633 # run indirectly via ceph-disk activate.
634 #
635 # The activate_osd function blocks until the monitor reports the osd
636 # up. If it fails to do so within $TIMEOUT seconds, activate_osd
637 # fails.
638 #
639 # Examples:
640 #
641 # CEPH_ARGS="--fsid=$(uuidgen) "
642 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
643 # activate_osd $dir 0 # activate an osd using the monitor listening on 7018
644 #
645 # @param dir path name of the environment
646 # @param id osd identifier
647 # @param ... can be any option valid for ceph-osd
648 # @return 0 on success, 1 on error
649 #
650 function activate_osd() {
651 local dir=$1
652 shift
653 local id=$1
654 shift
655 local osd_data=$dir/$id
656
657 local ceph_disk_args
658 ceph_disk_args+=" --statedir=$dir"
659 ceph_disk_args+=" --sysconfdir=$dir"
660 ceph_disk_args+=" --prepend-to-path="
661
662 local ceph_args="$CEPH_ARGS"
663 ceph_args+=" --osd-failsafe-full-ratio=.99"
664 ceph_args+=" --osd-journal-size=100"
665 ceph_args+=" --osd-scrub-load-threshold=2000"
666 ceph_args+=" --osd-data=$osd_data"
667 ceph_args+=" --chdir="
668 ceph_args+=" --erasure-code-dir=$CEPH_LIB"
669 ceph_args+=" --plugin-dir=$CEPH_LIB"
670 ceph_args+=" --osd-class-dir=$CEPH_LIB"
671 ceph_args+=" --run-dir=$dir"
672 ceph_args+=" --debug-osd=20"
673 ceph_args+=" --log-file=$dir/\$name.log"
674 ceph_args+=" --pid-file=$dir/\$name.pid"
675 ceph_args+=" --osd-max-object-name-len 460"
676 ceph_args+=" --osd-max-object-namespace-len 64"
677 ceph_args+=" "
678 ceph_args+="$@"
679 mkdir -p $osd_data
680 CEPH_ARGS="$ceph_args " ceph-disk $ceph_disk_args \
681 activate \
682 --mark-init=none \
683 $osd_data || return 1
684
685 [ "$id" = "$(cat $osd_data/whoami)" ] || return 1
686
687 wait_for_osd up $id || return 1
688 }
689
690 function test_activate_osd() {
691 local dir=$1
692
693 setup $dir || return 1
694
695 run_mon $dir a || return 1
696 run_mgr $dir x || return 1
697
698 run_osd $dir 0 || return 1
699 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.0.asok \
700 config get osd_max_backfills)
701 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
702
703 kill_daemons $dir TERM osd || return 1
704
705 activate_osd $dir 0 --osd-max-backfills 20 || return 1
706 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.0.asok \
707 config get osd_max_backfills)
708 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
709
710 teardown $dir || return 1
711 }
712
713 #######################################################################
714
715 ##
716 # Wait until the OSD **id** is either up or down, as specified by
717 # **state**. It fails after $TIMEOUT seconds.
718 #
719 # @param state either up or down
720 # @param id osd identifier
721 # @return 0 on success, 1 on error
722 #
723 function wait_for_osd() {
724 local state=$1
725 local id=$2
726
727 status=1
728 for ((i=0; i < $TIMEOUT; i++)); do
729 echo $i
730 if ! ceph osd dump | grep "osd.$id $state"; then
731 sleep 1
732 else
733 status=0
734 break
735 fi
736 done
737 return $status
738 }
739
740 function test_wait_for_osd() {
741 local dir=$1
742 setup $dir || return 1
743 run_mon $dir a --osd_pool_default_size=1 || return 1
744 run_mgr $dir x || return 1
745 run_osd $dir 0 || return 1
746 wait_for_osd up 0 || return 1
747 kill_daemons $dir TERM osd || return 1
748 wait_for_osd down 0 || return 1
749 ( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
750 teardown $dir || return 1
751 }
752
753 #######################################################################
754
755 ##
756 # Display the list of OSD ids supporting the **objectname** stored in
757 # **poolname**, as reported by ceph osd map.
758 #
759 # @param poolname an existing pool
760 # @param objectname an objectname (may or may not exist)
761 # @param STDOUT white space separated list of OSD ids
762 # @return 0 on success, 1 on error
763 #
764 function get_osds() {
765 local poolname=$1
766 local objectname=$2
767
768 local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
769 jq '.acting | .[]')
770 # get rid of the trailing space
771 echo $osds
772 }
773
774 function test_get_osds() {
775 local dir=$1
776
777 setup $dir || return 1
778 run_mon $dir a --osd_pool_default_size=2 || return 1
779 run_mgr $dir x || return 1
780 run_osd $dir 0 || return 1
781 run_osd $dir 1 || return 1
782 wait_for_clean || return 1
783 get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
784 teardown $dir || return 1
785 }
786
787 #######################################################################
788
789 ##
790 # Wait for the monitor to form quorum (optionally, of size N)
791 #
792 # @param timeout duration (lower-bound) to wait for quorum to be formed
793 # @param quorumsize size of quorum to wait for
794 # @return 0 on success, 1 on error
795 #
796 function wait_for_quorum() {
797 local timeout=$1
798 local quorumsize=$2
799
800 if [[ -z "$timeout" ]]; then
801 timeout=300
802 fi
803
804 if [[ -z "$quorumsize" ]]; then
805 timeout $timeout ceph mon_status --format=json >&/dev/null || return 1
806 return 0
807 fi
808
809 no_quorum=1
810 wait_until=$((`date +%s` + $timeout))
811 while [[ $(date +%s) -lt $wait_until ]]; do
812 jqfilter='.quorum | length == '$quorumsize
813 jqinput="$(timeout $timeout ceph mon_status --format=json 2>/dev/null)"
814 res=$(echo $jqinput | jq "$jqfilter")
815 if [[ "$res" == "true" ]]; then
816 no_quorum=0
817 break
818 fi
819 done
820 return $no_quorum
821 }
822
823 #######################################################################
824
825 ##
826 # Return the PG of supporting the **objectname** stored in
827 # **poolname**, as reported by ceph osd map.
828 #
829 # @param poolname an existing pool
830 # @param objectname an objectname (may or may not exist)
831 # @param STDOUT a PG
832 # @return 0 on success, 1 on error
833 #
834 function get_pg() {
835 local poolname=$1
836 local objectname=$2
837
838 ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
839 }
840
841 function test_get_pg() {
842 local dir=$1
843
844 setup $dir || return 1
845 run_mon $dir a --osd_pool_default_size=1 || return 1
846 run_mgr $dir x || return 1
847 run_osd $dir 0 || return 1
848 wait_for_clean || return 1
849 get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
850 teardown $dir || return 1
851 }
852
853 #######################################################################
854
855 ##
856 # Return the value of the **config**, obtained via the config get command
857 # of the admin socket of **daemon**.**id**.
858 #
859 # @param daemon mon or osd
860 # @param id mon or osd ID
861 # @param config the configuration variable name as found in config_opts.h
862 # @param STDOUT the config value
863 # @return 0 on success, 1 on error
864 #
865 function get_config() {
866 local daemon=$1
867 local id=$2
868 local config=$3
869
870 CEPH_ARGS='' \
871 ceph --format json daemon $dir/ceph-$daemon.$id.asok \
872 config get $config 2> /dev/null | \
873 jq -r ".$config"
874 }
875
876 function test_get_config() {
877 local dir=$1
878
879 # override the default config using command line arg and check it
880 setup $dir || return 1
881 run_mon $dir a --osd_pool_default_size=1 || return 1
882 test $(get_config mon a osd_pool_default_size) = 1 || return 1
883 run_mgr $dir x || return 1
884 run_osd $dir 0 --osd_max_scrubs=3 || return 1
885 test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
886 teardown $dir || return 1
887 }
888
889 #######################################################################
890
891 ##
892 # Set the **config** to specified **value**, via the config set command
893 # of the admin socket of **daemon**.**id**
894 #
895 # @param daemon mon or osd
896 # @param id mon or osd ID
897 # @param config the configuration variable name as found in config_opts.h
898 # @param value the config value
899 # @return 0 on success, 1 on error
900 #
901 function set_config() {
902 local daemon=$1
903 local id=$2
904 local config=$3
905 local value=$4
906
907 test $(env CEPH_ARGS='' ceph --format json daemon $dir/ceph-$daemon.$id.asok \
908 config set $config $value 2> /dev/null | \
909 jq 'has("success")') == true
910 }
911
912 function test_set_config() {
913 local dir=$1
914
915 setup $dir || return 1
916 run_mon $dir a --osd_pool_default_size=1 || return 1
917 test $(get_config mon a ms_crc_header) = true || return 1
918 set_config mon a ms_crc_header false || return 1
919 test $(get_config mon a ms_crc_header) = false || return 1
920 set_config mon a ms_crc_header true || return 1
921 test $(get_config mon a ms_crc_header) = true || return 1
922 teardown $dir || return 1
923 }
924
925 #######################################################################
926
927 ##
928 # Return the OSD id of the primary OSD supporting the **objectname**
929 # stored in **poolname**, as reported by ceph osd map.
930 #
931 # @param poolname an existing pool
932 # @param objectname an objectname (may or may not exist)
933 # @param STDOUT the primary OSD id
934 # @return 0 on success, 1 on error
935 #
936 function get_primary() {
937 local poolname=$1
938 local objectname=$2
939
940 ceph --format json osd map $poolname $objectname 2>/dev/null | \
941 jq '.acting_primary'
942 }
943
944 function test_get_primary() {
945 local dir=$1
946
947 setup $dir || return 1
948 run_mon $dir a --osd_pool_default_size=1 || return 1
949 local osd=0
950 run_mgr $dir x || return 1
951 run_osd $dir $osd || return 1
952 wait_for_clean || return 1
953 test $(get_primary rbd GROUP) = $osd || return 1
954 teardown $dir || return 1
955 }
956
957 #######################################################################
958
959 ##
960 # Return the id of any OSD supporting the **objectname** stored in
961 # **poolname**, as reported by ceph osd map, except the primary.
962 #
963 # @param poolname an existing pool
964 # @param objectname an objectname (may or may not exist)
965 # @param STDOUT the OSD id
966 # @return 0 on success, 1 on error
967 #
968 function get_not_primary() {
969 local poolname=$1
970 local objectname=$2
971
972 local primary=$(get_primary $poolname $objectname)
973 ceph --format json osd map $poolname $objectname 2>/dev/null | \
974 jq ".acting | map(select (. != $primary)) | .[0]"
975 }
976
977 function test_get_not_primary() {
978 local dir=$1
979
980 setup $dir || return 1
981 run_mon $dir a --osd_pool_default_size=2 || return 1
982 run_mgr $dir x || return 1
983 run_osd $dir 0 || return 1
984 run_osd $dir 1 || return 1
985 wait_for_clean || return 1
986 local primary=$(get_primary rbd GROUP)
987 local not_primary=$(get_not_primary rbd GROUP)
988 test $not_primary != $primary || return 1
989 test $not_primary = 0 -o $not_primary = 1 || return 1
990 teardown $dir || return 1
991 }
992
993 #######################################################################
994
995 ##
996 # Run ceph-objectstore-tool against the OSD **id** using the data path
997 # **dir**. The OSD is killed with TERM prior to running
998 # ceph-objectstore-tool because access to the data path is
999 # exclusive. The OSD is restarted after the command completes. The
1000 # objectstore_tool returns after all PG are active+clean again.
1001 #
1002 # @param dir the data path of the OSD
1003 # @param id the OSD id
1004 # @param ... arguments to ceph-objectstore-tool
1005 # @param STDIN the input of ceph-objectstore-tool
1006 # @param STDOUT the output of ceph-objectstore-tool
1007 # @return 0 on success, 1 on error
1008 #
1009 # The value of $ceph_osd_args will be passed to restarted osds
1010 #
1011 function objectstore_tool() {
1012 local dir=$1
1013 shift
1014 local id=$1
1015 shift
1016 local osd_data=$dir/$id
1017
1018 local osd_type=$(cat $osd_data/type)
1019
1020 kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
1021
1022 local journal_args
1023 if [ "$objectstore_type" == "filestore" ]; then
1024 journal_args=" --journal-path $osd_data/journal"
1025 fi
1026 ceph-objectstore-tool \
1027 --data-path $osd_data \
1028 $journal_args \
1029 "$@" || return 1
1030 activate_osd $dir $id $ceph_osd_args >&2 || return 1
1031 wait_for_clean >&2
1032 }
1033
1034 function test_objectstore_tool() {
1035 local dir=$1
1036
1037 setup $dir || return 1
1038 run_mon $dir a --osd_pool_default_size=1 || return 1
1039 local osd=0
1040 run_mgr $dir x || return 1
1041 run_osd $dir $osd || return 1
1042 wait_for_clean || return 1
1043 rados --pool rbd put GROUP /etc/group || return 1
1044 objectstore_tool $dir $osd GROUP get-bytes | \
1045 diff - /etc/group
1046 ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
1047 teardown $dir || return 1
1048 }
1049
1050 #######################################################################
1051
1052 ##
1053 # Predicate checking if there is an ongoing recovery in the
1054 # cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1055 # counters are reported by ceph status, it means recovery is in
1056 # progress.
1057 #
1058 # @return 0 if recovery in progress, 1 otherwise
1059 #
1060 function get_is_making_recovery_progress() {
1061 local recovery_progress
1062 recovery_progress+=".recovering_keys_per_sec + "
1063 recovery_progress+=".recovering_bytes_per_sec + "
1064 recovery_progress+=".recovering_objects_per_sec"
1065 local progress=$(ceph --format json status 2>/dev/null | \
1066 jq -r ".pgmap | $recovery_progress")
1067 test "$progress" != null
1068 }
1069
1070 function test_get_is_making_recovery_progress() {
1071 local dir=$1
1072
1073 setup $dir || return 1
1074 run_mon $dir a || return 1
1075 run_mgr $dir x || return 1
1076 ! get_is_making_recovery_progress || return 1
1077 teardown $dir || return 1
1078 }
1079
1080 #######################################################################
1081
1082 ##
1083 # Return the number of active PGs in the cluster. A PG is active if
1084 # ceph pg dump pgs reports it both **active** and **clean** and that
1085 # not **stale**.
1086 #
1087 # @param STDOUT the number of active PGs
1088 # @return 0 on success, 1 on error
1089 #
1090 function get_num_active_clean() {
1091 local expression
1092 expression+="select(contains(\"active\") and contains(\"clean\")) | "
1093 expression+="select(contains(\"stale\") | not)"
1094 ceph --format json pg dump pgs 2>/dev/null | \
1095 jq "[.[] | .state | $expression] | length"
1096 }
1097
1098 function test_get_num_active_clean() {
1099 local dir=$1
1100
1101 setup $dir || return 1
1102 run_mon $dir a --osd_pool_default_size=1 || return 1
1103 run_mgr $dir x || return 1
1104 run_osd $dir 0 || return 1
1105 wait_for_clean || return 1
1106 local num_active_clean=$(get_num_active_clean)
1107 test "$num_active_clean" = $PG_NUM || return 1
1108 teardown $dir || return 1
1109 }
1110
1111 #######################################################################
1112
1113 ##
1114 # Return the number of PGs in the cluster, according to
1115 # ceph pg dump pgs.
1116 #
1117 # @param STDOUT the number of PGs
1118 # @return 0 on success, 1 on error
1119 #
1120 function get_num_pgs() {
1121 ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
1122 }
1123
1124 function test_get_num_pgs() {
1125 local dir=$1
1126
1127 setup $dir || return 1
1128 run_mon $dir a --osd_pool_default_size=1 || return 1
1129 run_mgr $dir x || return 1
1130 run_osd $dir 0 || return 1
1131 wait_for_clean || return 1
1132 local num_pgs=$(get_num_pgs)
1133 test "$num_pgs" -gt 0 || return 1
1134 teardown $dir || return 1
1135 }
1136
1137 #######################################################################
1138
1139 ##
1140 # Return the date and time of the last completed scrub for **pgid**,
1141 # as reported by ceph pg dump pgs. Note that a repair also sets this
1142 # date.
1143 #
1144 # @param pgid the id of the PG
1145 # @param STDOUT the date and time of the last scrub
1146 # @return 0 on success, 1 on error
1147 #
1148 function get_last_scrub_stamp() {
1149 local pgid=$1
1150 local sname=${2:-last_scrub_stamp}
1151 ceph --format json pg dump pgs 2>/dev/null | \
1152 jq -r ".[] | select(.pgid==\"$pgid\") | .$sname"
1153 }
1154
1155 function test_get_last_scrub_stamp() {
1156 local dir=$1
1157
1158 setup $dir || return 1
1159 run_mon $dir a --osd_pool_default_size=1 || return 1
1160 run_mgr $dir x || return 1
1161 run_osd $dir 0 || return 1
1162 wait_for_clean || return 1
1163 stamp=$(get_last_scrub_stamp 1.0)
1164 test -n "$stamp" || return 1
1165 teardown $dir || return 1
1166 }
1167
1168 #######################################################################
1169
1170 ##
1171 # Predicate checking if the cluster is clean, i.e. all of its PGs are
1172 # in a clean state (see get_num_active_clean for a definition).
1173 #
1174 # @return 0 if the cluster is clean, 1 otherwise
1175 #
1176 function is_clean() {
1177 num_pgs=$(get_num_pgs)
1178 test $num_pgs != 0 || return 1
1179 test $(get_num_active_clean) = $num_pgs || return 1
1180 }
1181
1182 function test_is_clean() {
1183 local dir=$1
1184
1185 setup $dir || return 1
1186 run_mon $dir a --osd_pool_default_size=1 || return 1
1187 run_mgr $dir x || return 1
1188 run_osd $dir 0 || return 1
1189 wait_for_clean || return 1
1190 is_clean || return 1
1191 teardown $dir || return 1
1192 }
1193
1194 #######################################################################
1195
1196 ##
1197 # Return a list of numbers that are increasingly larger and whose
1198 # total is **timeout** seconds. It can be used to have short sleep
1199 # delay while waiting for an event on a fast machine. But if running
1200 # very slowly the larger delays avoid stressing the machine even
1201 # further or spamming the logs.
1202 #
1203 # @param timeout sum of all delays, in seconds
1204 # @return a list of sleep delays
1205 #
1206 function get_timeout_delays() {
1207 local trace=$(shopt -q -o xtrace && echo true || echo false)
1208 $trace && shopt -u -o xtrace
1209 local timeout=$1
1210 local first_step=${2:-1}
1211
1212 local i
1213 local total="0"
1214 i=$first_step
1215 while test "$(echo $total + $i \<= $timeout | bc -l)" = "1"; do
1216 echo -n "$i "
1217 total=$(echo $total + $i | bc -l)
1218 i=$(echo $i \* 2 | bc -l)
1219 done
1220 if test "$(echo $total \< $timeout | bc -l)" = "1"; then
1221 echo -n $(echo $timeout - $total | bc -l)
1222 fi
1223 $trace && shopt -s -o xtrace
1224 }
1225
1226 function test_get_timeout_delays() {
1227 test "$(get_timeout_delays 1)" = "1 " || return 1
1228 test "$(get_timeout_delays 5)" = "1 2 2" || return 1
1229 test "$(get_timeout_delays 6)" = "1 2 3" || return 1
1230 test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
1231 test "$(get_timeout_delays 8)" = "1 2 4 1" || return 1
1232 test "$(get_timeout_delays 1 .1)" = ".1 .2 .4 .3" || return 1
1233 test "$(get_timeout_delays 1.5 .1)" = ".1 .2 .4 .8 " || return 1
1234 test "$(get_timeout_delays 5 .1)" = ".1 .2 .4 .8 1.6 1.9" || return 1
1235 test "$(get_timeout_delays 6 .1)" = ".1 .2 .4 .8 1.6 2.9" || return 1
1236 test "$(get_timeout_delays 6.3 .1)" = ".1 .2 .4 .8 1.6 3.2 " || return 1
1237 test "$(get_timeout_delays 20 .1)" = ".1 .2 .4 .8 1.6 3.2 6.4 7.3" || return 1
1238 }
1239
1240 #######################################################################
1241
1242 ##
1243 # Wait until the cluster becomes clean or if it does not make progress
1244 # for $TIMEOUT seconds.
1245 # Progress is measured either via the **get_is_making_recovery_progress**
1246 # predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1247 #
1248 # @return 0 if the cluster is clean, 1 otherwise
1249 #
1250 function wait_for_clean() {
1251 local num_active_clean=-1
1252 local cur_active_clean
1253 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1254 local -i loop=0
1255
1256 while test $(get_num_pgs) == 0 ; do
1257 sleep 1
1258 done
1259
1260 while true ; do
1261 # Comparing get_num_active_clean & get_num_pgs is used to determine
1262 # if the cluster is clean. That's almost an inline of is_clean() to
1263 # get more performance by avoiding multiple calls of get_num_active_clean.
1264 cur_active_clean=$(get_num_active_clean)
1265 test $cur_active_clean = $(get_num_pgs) && break
1266 if test $cur_active_clean != $num_active_clean ; then
1267 loop=0
1268 num_active_clean=$cur_active_clean
1269 elif get_is_making_recovery_progress ; then
1270 loop=0
1271 elif (( $loop >= ${#delays[*]} )) ; then
1272 ceph report
1273 return 1
1274 fi
1275 sleep ${delays[$loop]}
1276 loop+=1
1277 done
1278 return 0
1279 }
1280
1281 function test_wait_for_clean() {
1282 local dir=$1
1283
1284 setup $dir || return 1
1285 run_mon $dir a --osd_pool_default_size=1 || return 1
1286 run_mgr $dir x || return 1
1287 ! TIMEOUT=1 wait_for_clean || return 1
1288 run_osd $dir 0 || return 1
1289 wait_for_clean || return 1
1290 teardown $dir || return 1
1291 }
1292
1293 #######################################################################
1294
1295 ##
1296 # Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1297 # for $TIMEOUT seconds.
1298 #
1299 # @return 0 if the cluster is HEALTHY, 1 otherwise
1300 #
1301 function wait_for_health() {
1302 local grepstr=$1
1303 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1304 local -i loop=0
1305
1306 while ! ceph health detail | grep "$grepstr" ; do
1307 if (( $loop >= ${#delays[*]} )) ; then
1308 ceph health detail
1309 return 1
1310 fi
1311 sleep ${delays[$loop]}
1312 loop+=1
1313 done
1314 }
1315
1316 function wait_for_health_ok() {
1317 wait_for_health "HEALTH_OK" || return 1
1318 }
1319
1320 function test_wait_for_health_ok() {
1321 local dir=$1
1322
1323 setup $dir || return 1
1324 run_mon $dir a --osd_pool_default_size=1 --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
1325 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
1326 ! TIMEOUT=1 wait_for_health_ok || return 1
1327 run_osd $dir 0 || return 1
1328 wait_for_health_ok || return 1
1329 teardown $dir || return 1
1330 }
1331
1332
1333 #######################################################################
1334
1335 ##
1336 # Run repair on **pgid** and wait until it completes. The repair
1337 # function will fail if repair does not complete within $TIMEOUT
1338 # seconds.
1339 #
1340 # @param pgid the id of the PG
1341 # @return 0 on success, 1 on error
1342 #
1343 function repair() {
1344 local pgid=$1
1345 local last_scrub=$(get_last_scrub_stamp $pgid)
1346 ceph pg repair $pgid
1347 wait_for_scrub $pgid "$last_scrub"
1348 }
1349
1350 function test_repair() {
1351 local dir=$1
1352
1353 setup $dir || return 1
1354 run_mon $dir a --osd_pool_default_size=1 || return 1
1355 run_mgr $dir x || return 1
1356 run_osd $dir 0 || return 1
1357 wait_for_clean || return 1
1358 repair 1.0 || return 1
1359 kill_daemons $dir KILL osd || return 1
1360 ! TIMEOUT=1 repair 1.0 || return 1
1361 teardown $dir || return 1
1362 }
1363 #######################################################################
1364
1365 ##
1366 # Run scrub on **pgid** and wait until it completes. The pg_scrub
1367 # function will fail if repair does not complete within $TIMEOUT
1368 # seconds. The pg_scrub is complete whenever the
1369 # **get_last_scrub_stamp** function reports a timestamp different from
1370 # the one stored before starting the scrub.
1371 #
1372 # @param pgid the id of the PG
1373 # @return 0 on success, 1 on error
1374 #
1375 function pg_scrub() {
1376 local pgid=$1
1377 local last_scrub=$(get_last_scrub_stamp $pgid)
1378 ceph pg scrub $pgid
1379 wait_for_scrub $pgid "$last_scrub"
1380 }
1381
1382 function pg_deep_scrub() {
1383 local pgid=$1
1384 local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
1385 ceph pg deep-scrub $pgid
1386 wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
1387 }
1388
1389 function test_pg_scrub() {
1390 local dir=$1
1391
1392 setup $dir || return 1
1393 run_mon $dir a --osd_pool_default_size=1 || return 1
1394 run_mgr $dir x || return 1
1395 run_osd $dir 0 || return 1
1396 wait_for_clean || return 1
1397 pg_scrub 1.0 || return 1
1398 kill_daemons $dir KILL osd || return 1
1399 ! TIMEOUT=1 pg_scrub 1.0 || return 1
1400 teardown $dir || return 1
1401 }
1402
1403 #######################################################################
1404
1405 ##
1406 # Run the *command* and expect it to fail (i.e. return a non zero status).
1407 # The output (stderr and stdout) is stored in a temporary file in *dir*
1408 # and is expected to contain the string *expected*.
1409 #
1410 # Return 0 if the command failed and the string was found. Otherwise
1411 # return 1 and cat the full output of the command on stderr for debug.
1412 #
1413 # @param dir temporary directory to store the output
1414 # @param expected string to look for in the output
1415 # @param command ... the command and its arguments
1416 # @return 0 on success, 1 on error
1417 #
1418
1419 function expect_failure() {
1420 local dir=$1
1421 shift
1422 local expected="$1"
1423 shift
1424 local success
1425
1426 if "$@" > $dir/out 2>&1 ; then
1427 success=true
1428 else
1429 success=false
1430 fi
1431
1432 if $success || ! grep --quiet "$expected" $dir/out ; then
1433 cat $dir/out >&2
1434 return 1
1435 else
1436 return 0
1437 fi
1438 }
1439
1440 function test_expect_failure() {
1441 local dir=$1
1442
1443 setup $dir || return 1
1444 expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
1445 # the command did not fail
1446 ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
1447 grep --quiet FAIL $dir/out || return 1
1448 # the command failed but the output does not contain the expected string
1449 ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
1450 ! grep --quiet FAIL $dir/out || return 1
1451 teardown $dir || return 1
1452 }
1453
1454 #######################################################################
1455
1456 ##
1457 # Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1458 # will fail if scrub does not complete within $TIMEOUT seconds. The
1459 # repair is complete whenever the **get_last_scrub_stamp** function
1460 # reports a timestamp different from the one given in argument.
1461 #
1462 # @param pgid the id of the PG
1463 # @param last_scrub timestamp of the last scrub for *pgid*
1464 # @return 0 on success, 1 on error
1465 #
1466 function wait_for_scrub() {
1467 local pgid=$1
1468 local last_scrub="$2"
1469 local sname=${3:-last_scrub_stamp}
1470
1471 for ((i=0; i < $TIMEOUT; i++)); do
1472 if test "$last_scrub" != "$(get_last_scrub_stamp $pgid $sname)" ; then
1473 return 0
1474 fi
1475 sleep 1
1476 done
1477 return 1
1478 }
1479
1480 function test_wait_for_scrub() {
1481 local dir=$1
1482
1483 setup $dir || return 1
1484 run_mon $dir a --osd_pool_default_size=1 || return 1
1485 run_mgr $dir x || return 1
1486 run_osd $dir 0 || return 1
1487 wait_for_clean || return 1
1488 local pgid=1.0
1489 ceph pg repair $pgid
1490 local last_scrub=$(get_last_scrub_stamp $pgid)
1491 wait_for_scrub $pgid "$last_scrub" || return 1
1492 kill_daemons $dir KILL osd || return 1
1493 last_scrub=$(get_last_scrub_stamp $pgid)
1494 ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
1495 teardown $dir || return 1
1496 }
1497
1498 #######################################################################
1499
1500 ##
1501 # Return 0 if the erasure code *plugin* is available, 1 otherwise.
1502 #
1503 # @param plugin erasure code plugin
1504 # @return 0 on success, 1 on error
1505 #
1506
1507 function erasure_code_plugin_exists() {
1508 local plugin=$1
1509 local status
1510 local grepstr
1511 local s
1512 case `uname` in
1513 FreeBSD) grepstr="Cannot open.*$plugin" ;;
1514 *) grepstr="$plugin.*No such file" ;;
1515 esac
1516
1517 s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
1518 local status=$?
1519 if [ $status -eq 0 ]; then
1520 ceph osd erasure-code-profile rm TESTPROFILE
1521 elif ! echo $s | grep --quiet "$grepstr" ; then
1522 status=1
1523 # display why the string was rejected.
1524 echo $s
1525 fi
1526 return $status
1527 }
1528
1529 function test_erasure_code_plugin_exists() {
1530 local dir=$1
1531
1532 setup $dir || return 1
1533 run_mon $dir a || return 1
1534 run_mgr $dir x || return 1
1535 erasure_code_plugin_exists jerasure || return 1
1536 ! erasure_code_plugin_exists FAKE || return 1
1537 teardown $dir || return 1
1538 }
1539
1540 #######################################################################
1541
1542 ##
1543 # Display all log files from **dir** on stdout.
1544 #
1545 # @param dir directory in which all data is stored
1546 #
1547
1548 function display_logs() {
1549 local dir=$1
1550
1551 find $dir -maxdepth 1 -name '*.log' | \
1552 while read file ; do
1553 echo "======================= $file"
1554 cat $file
1555 done
1556 }
1557
1558 function test_display_logs() {
1559 local dir=$1
1560
1561 setup $dir || return 1
1562 run_mon $dir a || return 1
1563 kill_daemons $dir || return 1
1564 display_logs $dir > $dir/log.out
1565 grep --quiet mon.a.log $dir/log.out || return 1
1566 teardown $dir || return 1
1567 }
1568
1569 #######################################################################
1570 ##
1571 # Spawn a command in background and save the pid in the variable name
1572 # passed in argument. To make the output reading easier, the output is
1573 # prepend with the process id.
1574 #
1575 # Example:
1576 # pids1=""
1577 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1578 #
1579 # @param pid_variable the variable name (not value) where the pids will be stored
1580 # @param ... the command to execute
1581 # @return only the pid_variable output should be considered and used with **wait_background**
1582 #
1583 function run_in_background() {
1584 local pid_variable=$1
1585 shift;
1586 # Execute the command and prepend the output with its pid
1587 # We enforce to return the exit status of the command and not the awk one.
1588 ("$@" |& awk '{ a[i++] = $0 }END{for (i = 0; i in a; ++i) { print "'$$': " a[i]} }'; return ${PIPESTATUS[0]}) >&2 &
1589 eval "$pid_variable+=\" $!\""
1590 }
1591
1592 function test_run_in_background() {
1593 local pids
1594 run_in_background pids sleep 1
1595 run_in_background pids sleep 1
1596 test $(echo $pids | wc -w) = 2 || return 1
1597 wait $pids || return 1
1598 }
1599
1600 #######################################################################
1601 ##
1602 # Wait for pids running in background to complete.
1603 # This function is usually used after a **run_in_background** call
1604 # Example:
1605 # pids1=""
1606 # run_in_background pids1 bash -c 'sleep 1; exit 1'
1607 # wait_background pids1
1608 #
1609 # @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
1610 # @return returns 1 if at least one process exits in error unless returns 0
1611 #
1612 function wait_background() {
1613 # We extract the PIDS from the variable name
1614 pids=${!1}
1615
1616 return_code=0
1617 for pid in $pids; do
1618 if ! wait $pid; then
1619 # If one process failed then return 1
1620 return_code=1
1621 fi
1622 done
1623
1624 # We empty the variable reporting that all process ended
1625 eval "$1=''"
1626
1627 return $return_code
1628 }
1629
1630
1631 function test_wait_background() {
1632 local pids=""
1633 run_in_background pids bash -c "sleep 1; exit 1"
1634 run_in_background pids bash -c "sleep 2; exit 0"
1635 wait_background pids
1636 if [ $? -ne 1 ]; then return 1; fi
1637
1638 run_in_background pids bash -c "sleep 1; exit 0"
1639 run_in_background pids bash -c "sleep 2; exit 0"
1640 wait_background pids
1641 if [ $? -ne 0 ]; then return 1; fi
1642
1643 if [ ! -z "$pids" ]; then return 1; fi
1644 }
1645
1646 function flush_pg_stats()
1647 {
1648 local timeout=${1:-$TIMEOUT}
1649
1650 ids=`ceph osd ls`
1651 seqs=''
1652 for osd in $ids; do
1653 seq=`ceph tell osd.$osd flush_pg_stats`
1654 seqs="$seqs $osd-$seq"
1655 done
1656
1657 for s in $seqs; do
1658 osd=`echo $s | cut -d - -f 1`
1659 seq=`echo $s | cut -d - -f 2`
1660 echo "waiting osd.$osd seq $seq"
1661 while test $(ceph osd last-stat-seq $osd) -lt $seq; do
1662 sleep 1
1663 if [ $((timeout--)) -eq 0 ]; then
1664 return 1
1665 fi
1666 done
1667 done
1668 }
1669
1670 function test_flush_pg_stats()
1671 {
1672 local dir=$1
1673
1674 setup $dir || return 1
1675 run_mon $dir a --osd_pool_default_size=1 || return 1
1676 run_mgr $dir x || return 1
1677 run_osd $dir 0 || return 1
1678 rados -p rbd put obj /etc/group
1679 flush_pg_stats
1680 local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
1681 raw_bytes_used=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
1682 bytes_used=`ceph df detail --format=json | jq "$jq_filter.bytes_used"`
1683 test $raw_bytes_used > 0 || return 1
1684 test $raw_bytes_used == $bytes_used || return 1
1685 }
1686
1687 #######################################################################
1688
1689 ##
1690 # Call the **run** function (which must be defined by the caller) with
1691 # the **dir** argument followed by the caller argument list.
1692 #
1693 # If the **run** function returns on error, all logs found in **dir**
1694 # are displayed for diagnostic purposes.
1695 #
1696 # **teardown** function is called when the **run** function returns
1697 # (on success or on error), to cleanup leftovers. The CEPH_CONF is set
1698 # to /dev/null and CEPH_ARGS is unset so that the tests are protected from
1699 # external interferences.
1700 #
1701 # It is the responsibility of the **run** function to call the
1702 # **setup** function to prepare the test environment (create a temporary
1703 # directory etc.).
1704 #
1705 # The shell is required (via PS4) to display the function and line
1706 # number whenever a statement is executed to help debugging.
1707 #
1708 # @param dir directory in which all data is stored
1709 # @param ... arguments passed transparently to **run**
1710 # @return 0 on success, 1 on error
1711 #
1712 function main() {
1713 local dir=td/$1
1714 shift
1715
1716 shopt -s -o xtrace
1717 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1718
1719 export PATH=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv/bin:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv/bin:.:$PATH # make sure program from sources are preferred
1720 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1721
1722 export CEPH_CONF=/dev/null
1723 unset CEPH_ARGS
1724
1725 local code
1726 if run $dir "$@" ; then
1727 code=0
1728 else
1729 display_logs $dir
1730 code=1
1731 fi
1732 teardown $dir || return 1
1733 return $code
1734 }
1735
1736 #######################################################################
1737
1738 function run_tests() {
1739 shopt -s -o xtrace
1740 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1741
1742 export PATH=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv/bin:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv/bin:.:$PATH # make sure program from sources are preferred
1743 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1744
1745 export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
1746 export CEPH_ARGS
1747 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
1748 CEPH_ARGS+="--mon-host=$CEPH_MON "
1749 export CEPH_CONF=/dev/null
1750
1751 local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
1752 local dir=td/ceph-helpers
1753
1754 for func in $funcs ; do
1755 $func $dir || return 1
1756 done
1757 }
1758
1759 if test "$1" = TESTS ; then
1760 shift
1761 run_tests "$@"
1762 fi
1763
1764 # Local Variables:
1765 # compile-command: "cd ../../src ; make -j4 && ../qa/workunits/ceph-helpers.sh TESTS # test_get_config"
1766 # End: