]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/ceph-helpers.sh
0901815ee9bb2282670602f5edd8584216b29125
[ceph.git] / ceph / qa / standalone / ceph-helpers.sh
1 #!/usr/bin/env bash
2 #
3 # Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5 # Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
6 #
7 # Author: Loic Dachary <loic@dachary.org>
8 # Author: Federico Gimenez <fgimenez@coit.es>
9 #
10 # This program is free software; you can redistribute it and/or modify
11 # it under the terms of the GNU Library Public License as published by
12 # the Free Software Foundation; either version 2, or (at your option)
13 # any later version.
14 #
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Library Public License for more details.
19 #
20 TIMEOUT=300
21 WAIT_FOR_CLEAN_TIMEOUT=90
22 MAX_TIMEOUT=15
23 PG_NUM=4
24 TMPDIR=${TMPDIR:-/tmp}
25 CEPH_BUILD_VIRTUALENV=${TMPDIR}
26 TESTDIR=${TESTDIR:-${TMPDIR}}
27
28 if type xmlstarlet > /dev/null 2>&1; then
29 XMLSTARLET=xmlstarlet
30 elif type xml > /dev/null 2>&1; then
31 XMLSTARLET=xml
32 else
33 echo "Missing xmlstarlet binary!"
34 exit 1
35 fi
36
37 if [ `uname` = FreeBSD ]; then
38 SED=gsed
39 AWK=gawk
40 DIFFCOLOPTS=""
41 KERNCORE="kern.corefile"
42 else
43 SED=sed
44 AWK=awk
45 termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
46 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
47 termwidth="-W ${termwidth}"
48 fi
49 DIFFCOLOPTS="-y $termwidth"
50 KERNCORE="kernel.core_pattern"
51 fi
52
53 EXTRA_OPTS=""
54
55 #! @file ceph-helpers.sh
56 # @brief Toolbox to manage Ceph cluster dedicated to testing
57 #
58 # Example use case:
59 #
60 # ~~~~~~~~~~~~~~~~{.sh}
61 # source ceph-helpers.sh
62 #
63 # function mytest() {
64 # # cleanup leftovers and reset mydir
65 # setup mydir
66 # # create a cluster with one monitor and three osds
67 # run_mon mydir a
68 # run_osd mydir 0
69 # run_osd mydir 2
70 # run_osd mydir 3
71 # # put and get an object
72 # rados --pool rbd put GROUP /etc/group
73 # rados --pool rbd get GROUP /tmp/GROUP
74 # # stop the cluster and cleanup the directory
75 # teardown mydir
76 # }
77 # ~~~~~~~~~~~~~~~~
78 #
79 # The focus is on simplicity and efficiency, in the context of
80 # functional tests. The output is intentionally very verbose
81 # and functions return as soon as an error is found. The caller
82 # is also expected to abort on the first error so that debugging
83 # can be done by looking at the end of the output.
84 #
85 # Each function is documented, implemented and tested independently.
86 # When modifying a helper, the test and the documentation are
87 # expected to be updated and it is easier of they are collocated. A
88 # test for a given function can be run with
89 #
90 # ~~~~~~~~~~~~~~~~{.sh}
91 # ceph-helpers.sh TESTS test_get_osds
92 # ~~~~~~~~~~~~~~~~
93 #
94 # and all the tests (i.e. all functions matching test_*) are run
95 # with:
96 #
97 # ~~~~~~~~~~~~~~~~{.sh}
98 # ceph-helpers.sh TESTS
99 # ~~~~~~~~~~~~~~~~
100 #
101 # A test function takes a single argument : the directory dedicated
102 # to the tests. It is expected to not create any file outside of this
103 # directory and remove it entirely when it completes successfully.
104 #
105
106
107 function get_asok_dir() {
108 if [ -n "$CEPH_ASOK_DIR" ]; then
109 echo "$CEPH_ASOK_DIR"
110 else
111 echo ${TMPDIR:-/tmp}/ceph-asok.$$
112 fi
113 }
114
115 function get_asok_path() {
116 local name=$1
117 if [ -n "$name" ]; then
118 echo $(get_asok_dir)/ceph-$name.asok
119 else
120 echo $(get_asok_dir)/\$cluster-\$name.asok
121 fi
122 }
123 ##
124 # Cleanup any leftovers found in **dir** via **teardown**
125 # and reset **dir** as an empty environment.
126 #
127 # @param dir path name of the environment
128 # @return 0 on success, 1 on error
129 #
130 function setup() {
131 local dir=$1
132 teardown $dir || return 1
133 mkdir -p $dir
134 mkdir -p $(get_asok_dir)
135 if [ $(ulimit -n) -le 1024 ]; then
136 ulimit -n 4096 || return 1
137 fi
138 if [ -z "$LOCALRUN" ]; then
139 trap "teardown $dir 1" TERM HUP INT
140 fi
141 }
142
143 function test_setup() {
144 local dir=$dir
145 setup $dir || return 1
146 test -d $dir || return 1
147 setup $dir || return 1
148 test -d $dir || return 1
149 teardown $dir
150 }
151
152 #######################################################################
153
154 ##
155 # Kill all daemons for which a .pid file exists in **dir** and remove
156 # **dir**. If the file system in which **dir** is btrfs, delete all
157 # subvolumes that relate to it.
158 #
159 # @param dir path name of the environment
160 # @param dumplogs pass "1" to dump logs otherwise it will only if cores found
161 # @return 0 on success, 1 on error
162 #
163 function teardown() {
164 local dir=$1
165 local dumplogs=$2
166 kill_daemons $dir KILL
167 if [ `uname` != FreeBSD ] \
168 && [ $(stat -f -c '%T' .) == "btrfs" ]; then
169 __teardown_btrfs $dir
170 fi
171 local cores="no"
172 local pattern="$(sysctl -n $KERNCORE)"
173 # See if we have apport core handling
174 if [ "${pattern:0:1}" = "|" ]; then
175 # TODO: Where can we get the dumps?
176 # Not sure where the dumps really are so this will look in the CWD
177 pattern=""
178 fi
179 # Local we start with core and teuthology ends with core
180 if ls $(dirname "$pattern") | grep -q '^core\|core$' ; then
181 cores="yes"
182 if [ -n "$LOCALRUN" ]; then
183 mkdir /tmp/cores.$$ 2> /dev/null || true
184 for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
185 mv $i /tmp/cores.$$
186 done
187 fi
188 fi
189 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
190 if [ -n "$LOCALRUN" ]; then
191 display_logs $dir
192 else
193 # Move logs to where Teuthology will archive it
194 mkdir -p $TESTDIR/archive/log
195 mv $dir/*.log $TESTDIR/archive/log
196 fi
197 fi
198 rm -fr $dir
199 rm -rf $(get_asok_dir)
200 if [ "$cores" = "yes" ]; then
201 echo "ERROR: Failure due to cores found"
202 if [ -n "$LOCALRUN" ]; then
203 echo "Find saved core files in /tmp/cores.$$"
204 fi
205 return 1
206 fi
207 return 0
208 }
209
210 function __teardown_btrfs() {
211 local btrfs_base_dir=$1
212 local btrfs_root=$(df -P . | tail -1 | $AWK '{print $NF}')
213 local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list -t . | $AWK '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
214 for subvolume in $btrfs_dirs; do
215 sudo btrfs subvolume delete $btrfs_root/$subvolume
216 done
217 }
218
219 function test_teardown() {
220 local dir=$dir
221 setup $dir || return 1
222 teardown $dir || return 1
223 ! test -d $dir || return 1
224 }
225
226 #######################################################################
227
228 ##
229 # Sends a signal to a single daemon.
230 # This is a helper function for kill_daemons
231 #
232 # After the daemon is sent **signal**, its actual termination
233 # will be verified by sending it signal 0. If the daemon is
234 # still alive, kill_daemon will pause for a few seconds and
235 # try again. This will repeat for a fixed number of times
236 # before kill_daemon returns on failure. The list of
237 # sleep intervals can be specified as **delays** and defaults
238 # to:
239 #
240 # 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
241 #
242 # This sequence is designed to run first a very short sleep time (0.1)
243 # if the machine is fast enough and the daemon terminates in a fraction of a
244 # second. The increasing sleep numbers should give plenty of time for
245 # the daemon to die even on the slowest running machine. If a daemon
246 # takes more than a few minutes to stop (the sum of all sleep times),
247 # there probably is no point in waiting more and a number of things
248 # are likely to go wrong anyway: better give up and return on error.
249 #
250 # @param pid the process id to send a signal
251 # @param send_signal the signal to send
252 # @param delays sequence of sleep times before failure
253 #
254 function kill_daemon() {
255 local pid=$(cat $1)
256 local send_signal=$2
257 local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
258 local exit_code=1
259 # In order to try after the last large sleep add 0 at the end so we check
260 # one last time before dropping out of the loop
261 for try in $delays 0 ; do
262 if kill -$send_signal $pid 2> /dev/null ; then
263 exit_code=1
264 else
265 exit_code=0
266 break
267 fi
268 send_signal=0
269 sleep $try
270 done;
271 return $exit_code
272 }
273
274 function test_kill_daemon() {
275 local dir=$1
276 setup $dir || return 1
277 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
278 run_mgr $dir x || return 1
279 run_osd $dir 0 || return 1
280
281 name_prefix=osd
282 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
283 #
284 # sending signal 0 won't kill the daemon
285 # waiting just for one second instead of the default schedule
286 # allows us to quickly verify what happens when kill fails
287 # to stop the daemon (i.e. it must return false)
288 #
289 ! kill_daemon $pidfile 0 1 || return 1
290 #
291 # killing just the osd and verify the mon still is responsive
292 #
293 kill_daemon $pidfile TERM || return 1
294 done
295
296 name_prefix=mgr
297 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
298 #
299 # kill the mgr
300 #
301 kill_daemon $pidfile TERM || return 1
302 done
303
304 name_prefix=mon
305 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
306 #
307 # kill the mon and verify it cannot be reached
308 #
309 kill_daemon $pidfile TERM || return 1
310 ! timeout 5 ceph status || return 1
311 done
312
313 teardown $dir || return 1
314 }
315
316 ##
317 # Kill all daemons for which a .pid file exists in **dir**. Each
318 # daemon is sent a **signal** and kill_daemons waits for it to exit
319 # during a few minutes. By default all daemons are killed. If a
320 # **name_prefix** is provided, only the daemons for which a pid
321 # file is found matching the prefix are killed. See run_osd and
322 # run_mon for more information about the name conventions for
323 # the pid files.
324 #
325 # Send TERM to all daemons : kill_daemons $dir
326 # Send KILL to all daemons : kill_daemons $dir KILL
327 # Send KILL to all osds : kill_daemons $dir KILL osd
328 # Send KILL to osd 1 : kill_daemons $dir KILL osd.1
329 #
330 # If a daemon is sent the TERM signal and does not terminate
331 # within a few minutes, it will still be running even after
332 # kill_daemons returns.
333 #
334 # If all daemons are kill successfully the function returns 0
335 # if at least one daemon remains, this is treated as an
336 # error and the function return 1.
337 #
338 # @param dir path name of the environment
339 # @param signal name of the first signal (defaults to TERM)
340 # @param name_prefix only kill match daemons (defaults to all)
341 # @param delays sequence of sleep times before failure
342 # @return 0 on success, 1 on error
343 #
344 function kill_daemons() {
345 local trace=$(shopt -q -o xtrace && echo true || echo false)
346 $trace && shopt -u -o xtrace
347 local dir=$1
348 local signal=${2:-TERM}
349 local name_prefix=$3 # optional, osd, mon, osd.1
350 local delays=$4 #optional timing
351 local status=0
352 local pids=""
353
354 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
355 run_in_background pids kill_daemon $pidfile $signal $delays
356 done
357
358 wait_background pids
359 status=$?
360
361 $trace && shopt -s -o xtrace
362 return $status
363 }
364
365 function test_kill_daemons() {
366 local dir=$1
367 setup $dir || return 1
368 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
369 run_mgr $dir x || return 1
370 run_osd $dir 0 || return 1
371 #
372 # sending signal 0 won't kill the daemon
373 # waiting just for one second instead of the default schedule
374 # allows us to quickly verify what happens when kill fails
375 # to stop the daemon (i.e. it must return false)
376 #
377 ! kill_daemons $dir 0 osd 1 || return 1
378 #
379 # killing just the osd and verify the mon still is responsive
380 #
381 kill_daemons $dir TERM osd || return 1
382 #
383 # kill the mgr
384 #
385 kill_daemons $dir TERM mgr || return 1
386 #
387 # kill the mon and verify it cannot be reached
388 #
389 kill_daemons $dir TERM || return 1
390 ! timeout 5 ceph status || return 1
391 teardown $dir || return 1
392 }
393
394 #
395 # return a random TCP port which is not used yet
396 #
397 # please note, there could be racing if we use this function for
398 # a free port, and then try to bind on this port.
399 #
400 function get_unused_port() {
401 local ip=127.0.0.1
402 python3 -c "import socket; s=socket.socket(); s.bind(('$ip', 0)); print(s.getsockname()[1]); s.close()"
403 }
404
405 #######################################################################
406
407 ##
408 # Run a monitor by the name mon.**id** with data in **dir**/**id**.
409 # The logs can be found in **dir**/mon.**id**.log and the pid file
410 # is **dir**/mon.**id**.pid and the admin socket is
411 # **dir**/**id**/ceph-mon.**id**.asok.
412 #
413 # The remaining arguments are passed verbatim to ceph-mon --mkfs
414 # and the ceph-mon daemon.
415 #
416 # Two mandatory arguments must be provided: --fsid and --mon-host
417 # Instead of adding them to every call to run_mon, they can be
418 # set in the CEPH_ARGS environment variable to be read implicitly
419 # by every ceph command.
420 #
421 # The CEPH_CONF variable is expected to be set to /dev/null to
422 # only rely on arguments for configuration.
423 #
424 # Examples:
425 #
426 # CEPH_ARGS="--fsid=$(uuidgen) "
427 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
428 # run_mon $dir a # spawn a mon and bind port 7018
429 # run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
430 #
431 # If mon_initial_members is not set, the default rbd pool is deleted
432 # and replaced with a replicated pool with less placement groups to
433 # speed up initialization. If mon_initial_members is set, no attempt
434 # is made to recreate the rbd pool because it would hang forever,
435 # waiting for other mons to join.
436 #
437 # A **dir**/ceph.conf file is created but not meant to be used by any
438 # function. It is convenient for debugging a failure with:
439 #
440 # ceph --conf **dir**/ceph.conf -s
441 #
442 # @param dir path name of the environment
443 # @param id mon identifier
444 # @param ... can be any option valid for ceph-mon
445 # @return 0 on success, 1 on error
446 #
447 function run_mon() {
448 local dir=$1
449 shift
450 local id=$1
451 shift
452 local data=$dir/$id
453
454 ceph-mon \
455 --id $id \
456 --mkfs \
457 --mon-data=$data \
458 --run-dir=$dir \
459 "$@" || return 1
460
461 ceph-mon \
462 --id $id \
463 --osd-failsafe-full-ratio=.99 \
464 --mon-osd-full-ratio=.99 \
465 --mon-data-avail-crit=1 \
466 --mon-data-avail-warn=5 \
467 --paxos-propose-interval=0.1 \
468 --osd-crush-chooseleaf-type=0 \
469 $EXTRA_OPTS \
470 --debug-mon 20 \
471 --debug-ms 20 \
472 --debug-paxos 20 \
473 --chdir= \
474 --mon-data=$data \
475 --log-file=$dir/\$name.log \
476 --admin-socket=$(get_asok_path) \
477 --mon-cluster-log-file=$dir/log \
478 --run-dir=$dir \
479 --pid-file=$dir/\$name.pid \
480 --mon-allow-pool-delete \
481 --mon-allow-pool-size-one \
482 --osd-pool-default-pg-autoscale-mode off \
483 --mon-osd-backfillfull-ratio .99 \
484 --mon-warn-on-insecure-global-id-reclaim-allowed=false \
485 "$@" || return 1
486
487 cat > $dir/ceph.conf <<EOF
488 [global]
489 fsid = $(get_config mon $id fsid)
490 mon host = $(get_config mon $id mon_host)
491 EOF
492 }
493
494 function test_run_mon() {
495 local dir=$1
496
497 setup $dir || return 1
498
499 run_mon $dir a --mon-initial-members=a || return 1
500 ceph mon dump | grep "mon.a" || return 1
501 kill_daemons $dir || return 1
502
503 run_mon $dir a --osd_pool_default_size=3 || return 1
504 run_osd $dir 0 || return 1
505 run_osd $dir 1 || return 1
506 run_osd $dir 2 || return 1
507 create_rbd_pool || return 1
508 ceph osd dump | grep "pool 1 'rbd'" || return 1
509 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
510 config get osd_pool_default_size)
511 test "$size" = '{"osd_pool_default_size":"3"}' || return 1
512
513 ! CEPH_ARGS='' ceph status || return 1
514 CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
515
516 kill_daemons $dir || return 1
517
518 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
519 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
520 config get osd_pool_default_size)
521 test "$size" = '{"osd_pool_default_size":"1"}' || return 1
522 kill_daemons $dir || return 1
523
524 CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
525 run_mon $dir a || return 1
526 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
527 config get osd_pool_default_size)
528 test "$size" = '{"osd_pool_default_size":"2"}' || return 1
529 kill_daemons $dir || return 1
530
531 teardown $dir || return 1
532 }
533
534 function create_rbd_pool() {
535 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
536 create_pool rbd $PG_NUM || return 1
537 rbd pool init rbd
538 }
539
540 function create_pool() {
541 ceph osd pool create "$@"
542 sleep 1
543 }
544
545 function delete_pool() {
546 local poolname=$1
547 ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
548 }
549
550 #######################################################################
551
552 function run_mgr() {
553 local dir=$1
554 shift
555 local id=$1
556 shift
557 local data=$dir/$id
558
559 ceph config set mgr mgr_pool false --force
560 ceph-mgr \
561 --id $id \
562 $EXTRA_OPTS \
563 --osd-failsafe-full-ratio=.99 \
564 --debug-mgr 20 \
565 --debug-objecter 20 \
566 --debug-ms 20 \
567 --debug-paxos 20 \
568 --chdir= \
569 --mgr-data=$data \
570 --log-file=$dir/\$name.log \
571 --admin-socket=$(get_asok_path) \
572 --run-dir=$dir \
573 --pid-file=$dir/\$name.pid \
574 --mgr-module-path=$(realpath ${CEPH_ROOT}/src/pybind/mgr) \
575 "$@" || return 1
576 }
577
578 function run_mds() {
579 local dir=$1
580 shift
581 local id=$1
582 shift
583 local data=$dir/$id
584
585 ceph-mds \
586 --id $id \
587 $EXTRA_OPTS \
588 --debug-mds 20 \
589 --debug-objecter 20 \
590 --debug-ms 20 \
591 --chdir= \
592 --mds-data=$data \
593 --log-file=$dir/\$name.log \
594 --admin-socket=$(get_asok_path) \
595 --run-dir=$dir \
596 --pid-file=$dir/\$name.pid \
597 "$@" || return 1
598 }
599
600 #######################################################################
601
602 ##
603 # Create (prepare) and run (activate) an osd by the name osd.**id**
604 # with data in **dir**/**id**. The logs can be found in
605 # **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
606 # the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
607 #
608 # The remaining arguments are passed verbatim to ceph-osd.
609 #
610 # Two mandatory arguments must be provided: --fsid and --mon-host
611 # Instead of adding them to every call to run_osd, they can be
612 # set in the CEPH_ARGS environment variable to be read implicitly
613 # by every ceph command.
614 #
615 # The CEPH_CONF variable is expected to be set to /dev/null to
616 # only rely on arguments for configuration.
617 #
618 # The run_osd function creates the OSD data directory on the **dir**/**id**
619 # directory and relies on the activate_osd function to run the daemon.
620 #
621 # Examples:
622 #
623 # CEPH_ARGS="--fsid=$(uuidgen) "
624 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
625 # run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
626 #
627 # @param dir path name of the environment
628 # @param id osd identifier
629 # @param ... can be any option valid for ceph-osd
630 # @return 0 on success, 1 on error
631 #
632 function run_osd() {
633 local dir=$1
634 shift
635 local id=$1
636 shift
637 local osd_data=$dir/$id
638
639 local ceph_args="$CEPH_ARGS"
640 ceph_args+=" --osd-failsafe-full-ratio=.99"
641 ceph_args+=" --osd-journal-size=100"
642 ceph_args+=" --osd-scrub-load-threshold=2000"
643 ceph_args+=" --osd-data=$osd_data"
644 ceph_args+=" --osd-journal=${osd_data}/journal"
645 ceph_args+=" --chdir="
646 ceph_args+=$EXTRA_OPTS
647 ceph_args+=" --run-dir=$dir"
648 ceph_args+=" --admin-socket=$(get_asok_path)"
649 ceph_args+=" --debug-osd=20"
650 ceph_args+=" --debug-ms=1"
651 ceph_args+=" --debug-monc=20"
652 ceph_args+=" --log-file=$dir/\$name.log"
653 ceph_args+=" --pid-file=$dir/\$name.pid"
654 ceph_args+=" --osd-max-object-name-len=460"
655 ceph_args+=" --osd-max-object-namespace-len=64"
656 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
657 ceph_args+=" "
658 ceph_args+="$@"
659 mkdir -p $osd_data
660
661 local uuid=`uuidgen`
662 echo "add osd$id $uuid"
663 OSD_SECRET=$(ceph-authtool --gen-print-key)
664 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
665 ceph osd new $uuid -i $osd_data/new.json
666 rm $osd_data/new.json
667 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid
668
669 local key_fn=$osd_data/keyring
670 cat > $key_fn<<EOF
671 [osd.$id]
672 key = $OSD_SECRET
673 EOF
674 echo adding osd$id key to auth repository
675 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
676 echo start osd.$id
677 ceph-osd -i $id $ceph_args &
678
679 # If noup is set, then can't wait for this osd
680 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
681 return 0
682 fi
683 wait_for_osd up $id || return 1
684
685 }
686
687 function run_osd_filestore() {
688 local dir=$1
689 shift
690 local id=$1
691 shift
692 local osd_data=$dir/$id
693
694 local ceph_args="$CEPH_ARGS"
695 ceph_args+=" --osd-failsafe-full-ratio=.99"
696 ceph_args+=" --osd-journal-size=100"
697 ceph_args+=" --osd-scrub-load-threshold=2000"
698 ceph_args+=" --osd-data=$osd_data"
699 ceph_args+=" --osd-journal=${osd_data}/journal"
700 ceph_args+=" --chdir="
701 ceph_args+=$EXTRA_OPTS
702 ceph_args+=" --run-dir=$dir"
703 ceph_args+=" --admin-socket=$(get_asok_path)"
704 ceph_args+=" --debug-osd=20"
705 ceph_args+=" --debug-ms=1"
706 ceph_args+=" --debug-monc=20"
707 ceph_args+=" --log-file=$dir/\$name.log"
708 ceph_args+=" --pid-file=$dir/\$name.pid"
709 ceph_args+=" --osd-max-object-name-len=460"
710 ceph_args+=" --osd-max-object-namespace-len=64"
711 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
712 ceph_args+=" "
713 ceph_args+="$@"
714 mkdir -p $osd_data
715
716 local uuid=`uuidgen`
717 echo "add osd$osd $uuid"
718 OSD_SECRET=$(ceph-authtool --gen-print-key)
719 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
720 ceph osd new $uuid -i $osd_data/new.json
721 rm $osd_data/new.json
722 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid --osd-objectstore=filestore
723
724 local key_fn=$osd_data/keyring
725 cat > $key_fn<<EOF
726 [osd.$osd]
727 key = $OSD_SECRET
728 EOF
729 echo adding osd$id key to auth repository
730 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
731 echo start osd.$id
732 ceph-osd -i $id $ceph_args &
733
734 # If noup is set, then can't wait for this osd
735 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
736 return 0
737 fi
738 wait_for_osd up $id || return 1
739
740
741 }
742
743 function test_run_osd() {
744 local dir=$1
745
746 setup $dir || return 1
747
748 run_mon $dir a || return 1
749 run_mgr $dir x || return 1
750
751 run_osd $dir 0 || return 1
752 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
753 config get osd_max_backfills)
754 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
755
756 run_osd $dir 1 --osd-max-backfills 20 || return 1
757 local scheduler=$(get_op_scheduler 1)
758 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
759 config get osd_max_backfills)
760 if [ "$scheduler" = "mclock_scheduler" ]; then
761 test "$backfills" = '{"osd_max_backfills":"1000"}' || return 1
762 else
763 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
764 fi
765
766 CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
767 local scheduler=$(get_op_scheduler 2)
768 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
769 config get osd_max_backfills)
770 if [ "$scheduler" = "mclock_scheduler" ]; then
771 test "$backfills" = '{"osd_max_backfills":"1000"}' || return 1
772 else
773 test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
774 fi
775
776 teardown $dir || return 1
777 }
778
779 #######################################################################
780
781 ##
782 # Shutdown and remove all traces of the osd by the name osd.**id**.
783 #
784 # The OSD is shutdown with the TERM signal. It is then removed from
785 # the auth list, crush map, osd map etc and the files associated with
786 # it are also removed.
787 #
788 # @param dir path name of the environment
789 # @param id osd identifier
790 # @return 0 on success, 1 on error
791 #
792 function destroy_osd() {
793 local dir=$1
794 local id=$2
795
796 ceph osd out osd.$id || return 1
797 kill_daemons $dir TERM osd.$id || return 1
798 ceph osd down osd.$id || return 1
799 ceph osd purge osd.$id --yes-i-really-mean-it || return 1
800 teardown $dir/$id || return 1
801 rm -fr $dir/$id
802 }
803
804 function test_destroy_osd() {
805 local dir=$1
806
807 setup $dir || return 1
808 run_mon $dir a || return 1
809 run_mgr $dir x || return 1
810 run_osd $dir 0 || return 1
811 destroy_osd $dir 0 || return 1
812 ! ceph osd dump | grep "osd.$id " || return 1
813 teardown $dir || return 1
814 }
815
816 #######################################################################
817
818 ##
819 # Run (activate) an osd by the name osd.**id** with data in
820 # **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
821 # the pid file is **dir**/osd.**id**.pid and the admin socket is
822 # **dir**/**id**/ceph-osd.**id**.asok.
823 #
824 # The remaining arguments are passed verbatim to ceph-osd.
825 #
826 # Two mandatory arguments must be provided: --fsid and --mon-host
827 # Instead of adding them to every call to activate_osd, they can be
828 # set in the CEPH_ARGS environment variable to be read implicitly
829 # by every ceph command.
830 #
831 # The CEPH_CONF variable is expected to be set to /dev/null to
832 # only rely on arguments for configuration.
833 #
834 # The activate_osd function expects a valid OSD data directory
835 # in **dir**/**id**, either just created via run_osd or re-using
836 # one left by a previous run of ceph-osd. The ceph-osd daemon is
837 # run directly on the foreground
838 #
839 # The activate_osd function blocks until the monitor reports the osd
840 # up. If it fails to do so within $TIMEOUT seconds, activate_osd
841 # fails.
842 #
843 # Examples:
844 #
845 # CEPH_ARGS="--fsid=$(uuidgen) "
846 # CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
847 # activate_osd $dir 0 # activate an osd using the monitor listening on 7018
848 #
849 # @param dir path name of the environment
850 # @param id osd identifier
851 # @param ... can be any option valid for ceph-osd
852 # @return 0 on success, 1 on error
853 #
854 function activate_osd() {
855 local dir=$1
856 shift
857 local id=$1
858 shift
859 local osd_data=$dir/$id
860
861 local ceph_args="$CEPH_ARGS"
862 ceph_args+=" --osd-failsafe-full-ratio=.99"
863 ceph_args+=" --osd-journal-size=100"
864 ceph_args+=" --osd-scrub-load-threshold=2000"
865 ceph_args+=" --osd-data=$osd_data"
866 ceph_args+=" --osd-journal=${osd_data}/journal"
867 ceph_args+=" --chdir="
868 ceph_args+=$EXTRA_OPTS
869 ceph_args+=" --run-dir=$dir"
870 ceph_args+=" --admin-socket=$(get_asok_path)"
871 ceph_args+=" --debug-osd=20"
872 ceph_args+=" --log-file=$dir/\$name.log"
873 ceph_args+=" --pid-file=$dir/\$name.pid"
874 ceph_args+=" --osd-max-object-name-len=460"
875 ceph_args+=" --osd-max-object-namespace-len=64"
876 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
877 ceph_args+=" "
878 ceph_args+="$@"
879 mkdir -p $osd_data
880
881 echo start osd.$id
882 ceph-osd -i $id $ceph_args &
883
884 [ "$id" = "$(cat $osd_data/whoami)" ] || return 1
885
886 # If noup is set, then can't wait for this osd
887 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
888 return 0
889 fi
890 wait_for_osd up $id || return 1
891 }
892
893 function test_activate_osd() {
894 local dir=$1
895
896 setup $dir || return 1
897
898 run_mon $dir a || return 1
899 run_mgr $dir x || return 1
900
901 run_osd $dir 0 || return 1
902 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
903 config get osd_max_backfills)
904 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
905
906 kill_daemons $dir TERM osd || return 1
907
908 activate_osd $dir 0 --osd-max-backfills 20 || return 1
909 local scheduler=$(get_op_scheduler 0)
910 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
911 config get osd_max_backfills)
912 if [ "$scheduler" = "mclock_scheduler" ]; then
913 test "$backfills" = '{"osd_max_backfills":"1000"}' || return 1
914 else
915 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
916 fi
917
918 teardown $dir || return 1
919 }
920
921 function test_activate_osd_after_mark_down() {
922 local dir=$1
923
924 setup $dir || return 1
925
926 run_mon $dir a || return 1
927 run_mgr $dir x || return 1
928
929 run_osd $dir 0 || return 1
930 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
931 config get osd_max_backfills)
932 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
933
934 kill_daemons $dir TERM osd || return 1
935 ceph osd down 0 || return 1
936 wait_for_osd down 0 || return 1
937
938 activate_osd $dir 0 --osd-max-backfills 20 || return 1
939 local scheduler=$(get_op_scheduler 0)
940 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
941 config get osd_max_backfills)
942 if [ "$scheduler" = "mclock_scheduler" ]; then
943 test "$backfills" = '{"osd_max_backfills":"1000"}' || return 1
944 else
945 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
946 fi
947
948 teardown $dir || return 1
949 }
950
951 function test_activate_osd_skip_benchmark() {
952 local dir=$1
953
954 setup $dir || return 1
955
956 run_mon $dir a || return 1
957 run_mgr $dir x || return 1
958
959 # Skip the osd benchmark during first osd bring-up.
960 run_osd $dir 0 --osd-op-queue=mclock_scheduler \
961 --osd-mclock-skip-benchmark=true || return 1
962 local max_iops_hdd_def=$(CEPH_ARGS='' ceph --format=json daemon \
963 $(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_hdd)
964 local max_iops_ssd_def=$(CEPH_ARGS='' ceph --format=json daemon \
965 $(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_ssd)
966
967 kill_daemons $dir TERM osd || return 1
968 ceph osd down 0 || return 1
969 wait_for_osd down 0 || return 1
970
971 # Skip the osd benchmark during activation as well. Validate that
972 # the max osd capacities are left unchanged.
973 activate_osd $dir 0 --osd-op-queue=mclock_scheduler \
974 --osd-mclock-skip-benchmark=true || return 1
975 local max_iops_hdd_after_boot=$(CEPH_ARGS='' ceph --format=json daemon \
976 $(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_hdd)
977 local max_iops_ssd_after_boot=$(CEPH_ARGS='' ceph --format=json daemon \
978 $(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_ssd)
979
980 test "$max_iops_hdd_def" = "$max_iops_hdd_after_boot" || return 1
981 test "$max_iops_ssd_def" = "$max_iops_ssd_after_boot" || return 1
982
983 teardown $dir || return 1
984 }
985 #######################################################################
986
987 ##
988 # Wait until the OSD **id** is either up or down, as specified by
989 # **state**. It fails after $TIMEOUT seconds.
990 #
991 # @param state either up or down
992 # @param id osd identifier
993 # @return 0 on success, 1 on error
994 #
995 function wait_for_osd() {
996 local state=$1
997 local id=$2
998
999 status=1
1000 for ((i=0; i < $TIMEOUT; i++)); do
1001 echo $i
1002 if ! ceph osd dump | grep "osd.$id $state"; then
1003 sleep 1
1004 else
1005 status=0
1006 break
1007 fi
1008 done
1009 return $status
1010 }
1011
1012 function test_wait_for_osd() {
1013 local dir=$1
1014 setup $dir || return 1
1015 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1016 run_mgr $dir x || return 1
1017 run_osd $dir 0 || return 1
1018 run_osd $dir 1 || return 1
1019 wait_for_osd up 0 || return 1
1020 wait_for_osd up 1 || return 1
1021 kill_daemons $dir TERM osd.0 || return 1
1022 wait_for_osd down 0 || return 1
1023 ( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
1024 teardown $dir || return 1
1025 }
1026
1027 #######################################################################
1028
1029 ##
1030 # Display the list of OSD ids supporting the **objectname** stored in
1031 # **poolname**, as reported by ceph osd map.
1032 #
1033 # @param poolname an existing pool
1034 # @param objectname an objectname (may or may not exist)
1035 # @param STDOUT white space separated list of OSD ids
1036 # @return 0 on success, 1 on error
1037 #
1038 function get_osds() {
1039 local poolname=$1
1040 local objectname=$2
1041
1042 local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
1043 jq '.acting | .[]')
1044 # get rid of the trailing space
1045 echo $osds
1046 }
1047
1048 function test_get_osds() {
1049 local dir=$1
1050
1051 setup $dir || return 1
1052 run_mon $dir a --osd_pool_default_size=2 || return 1
1053 run_mgr $dir x || return 1
1054 run_osd $dir 0 || return 1
1055 run_osd $dir 1 || return 1
1056 create_rbd_pool || return 1
1057 wait_for_clean || return 1
1058 create_rbd_pool || return 1
1059 get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
1060 teardown $dir || return 1
1061 }
1062
1063 #######################################################################
1064
1065 ##
1066 # Wait for the monitor to form quorum (optionally, of size N)
1067 #
1068 # @param timeout duration (lower-bound) to wait for quorum to be formed
1069 # @param quorumsize size of quorum to wait for
1070 # @return 0 on success, 1 on error
1071 #
1072 function wait_for_quorum() {
1073 local timeout=$1
1074 local quorumsize=$2
1075
1076 if [[ -z "$timeout" ]]; then
1077 timeout=300
1078 fi
1079
1080 if [[ -z "$quorumsize" ]]; then
1081 timeout $timeout ceph quorum_status --format=json >&/dev/null || return 1
1082 return 0
1083 fi
1084
1085 no_quorum=1
1086 wait_until=$((`date +%s` + $timeout))
1087 while [[ $(date +%s) -lt $wait_until ]]; do
1088 jqfilter='.quorum | length == '$quorumsize
1089 jqinput="$(timeout $timeout ceph quorum_status --format=json 2>/dev/null)"
1090 res=$(echo $jqinput | jq "$jqfilter")
1091 if [[ "$res" == "true" ]]; then
1092 no_quorum=0
1093 break
1094 fi
1095 done
1096 return $no_quorum
1097 }
1098
1099 #######################################################################
1100
1101 ##
1102 # Return the PG of supporting the **objectname** stored in
1103 # **poolname**, as reported by ceph osd map.
1104 #
1105 # @param poolname an existing pool
1106 # @param objectname an objectname (may or may not exist)
1107 # @param STDOUT a PG
1108 # @return 0 on success, 1 on error
1109 #
1110 function get_pg() {
1111 local poolname=$1
1112 local objectname=$2
1113
1114 ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
1115 }
1116
1117 function test_get_pg() {
1118 local dir=$1
1119
1120 setup $dir || return 1
1121 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1122 run_mgr $dir x || return 1
1123 run_osd $dir 0 || return 1
1124 create_rbd_pool || return 1
1125 wait_for_clean || return 1
1126 get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
1127 teardown $dir || return 1
1128 }
1129
1130 #######################################################################
1131
1132 ##
1133 # Return the value of the **config**, obtained via the config get command
1134 # of the admin socket of **daemon**.**id**.
1135 #
1136 # @param daemon mon or osd
1137 # @param id mon or osd ID
1138 # @param config the configuration variable name as found in config_opts.h
1139 # @param STDOUT the config value
1140 # @return 0 on success, 1 on error
1141 #
1142 function get_config() {
1143 local daemon=$1
1144 local id=$2
1145 local config=$3
1146
1147 CEPH_ARGS='' \
1148 ceph --format json daemon $(get_asok_path $daemon.$id) \
1149 config get $config 2> /dev/null | \
1150 jq -r ".$config"
1151 }
1152
1153 function test_get_config() {
1154 local dir=$1
1155
1156 # override the default config using command line arg and check it
1157 setup $dir || return 1
1158 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1159 test $(get_config mon a osd_pool_default_size) = 1 || return 1
1160 run_mgr $dir x || return 1
1161 run_osd $dir 0 --osd_max_scrubs=3 || return 1
1162 test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
1163 teardown $dir || return 1
1164 }
1165
1166 #######################################################################
1167
1168 ##
1169 # Set the **config** to specified **value**, via the config set command
1170 # of the admin socket of **daemon**.**id**
1171 #
1172 # @param daemon mon or osd
1173 # @param id mon or osd ID
1174 # @param config the configuration variable name as found in config_opts.h
1175 # @param value the config value
1176 # @return 0 on success, 1 on error
1177 #
1178 function set_config() {
1179 local daemon=$1
1180 local id=$2
1181 local config=$3
1182 local value=$4
1183
1184 test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
1185 config set $config $value 2> /dev/null | \
1186 jq 'has("success")') == true
1187 }
1188
1189 function test_set_config() {
1190 local dir=$1
1191
1192 setup $dir || return 1
1193 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1194 test $(get_config mon a ms_crc_header) = true || return 1
1195 set_config mon a ms_crc_header false || return 1
1196 test $(get_config mon a ms_crc_header) = false || return 1
1197 set_config mon a ms_crc_header true || return 1
1198 test $(get_config mon a ms_crc_header) = true || return 1
1199 teardown $dir || return 1
1200 }
1201
1202 #######################################################################
1203
1204 ##
1205 # Return the OSD id of the primary OSD supporting the **objectname**
1206 # stored in **poolname**, as reported by ceph osd map.
1207 #
1208 # @param poolname an existing pool
1209 # @param objectname an objectname (may or may not exist)
1210 # @param STDOUT the primary OSD id
1211 # @return 0 on success, 1 on error
1212 #
1213 function get_primary() {
1214 local poolname=$1
1215 local objectname=$2
1216
1217 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1218 jq '.acting_primary'
1219 }
1220
1221 function test_get_primary() {
1222 local dir=$1
1223
1224 setup $dir || return 1
1225 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1226 local osd=0
1227 run_mgr $dir x || return 1
1228 run_osd $dir $osd || return 1
1229 create_rbd_pool || return 1
1230 wait_for_clean || return 1
1231 test $(get_primary rbd GROUP) = $osd || return 1
1232 teardown $dir || return 1
1233 }
1234
1235 #######################################################################
1236
1237 ##
1238 # Return the id of any OSD supporting the **objectname** stored in
1239 # **poolname**, as reported by ceph osd map, except the primary.
1240 #
1241 # @param poolname an existing pool
1242 # @param objectname an objectname (may or may not exist)
1243 # @param STDOUT the OSD id
1244 # @return 0 on success, 1 on error
1245 #
1246 function get_not_primary() {
1247 local poolname=$1
1248 local objectname=$2
1249
1250 local primary=$(get_primary $poolname $objectname)
1251 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1252 jq ".acting | map(select (. != $primary)) | .[0]"
1253 }
1254
1255 function test_get_not_primary() {
1256 local dir=$1
1257
1258 setup $dir || return 1
1259 run_mon $dir a --osd_pool_default_size=2 || return 1
1260 run_mgr $dir x || return 1
1261 run_osd $dir 0 || return 1
1262 run_osd $dir 1 || return 1
1263 create_rbd_pool || return 1
1264 wait_for_clean || return 1
1265 local primary=$(get_primary rbd GROUP)
1266 local not_primary=$(get_not_primary rbd GROUP)
1267 test $not_primary != $primary || return 1
1268 test $not_primary = 0 -o $not_primary = 1 || return 1
1269 teardown $dir || return 1
1270 }
1271
1272 #######################################################################
1273
1274 function _objectstore_tool_nodown() {
1275 local dir=$1
1276 shift
1277 local id=$1
1278 shift
1279 local osd_data=$dir/$id
1280
1281 ceph-objectstore-tool \
1282 --data-path $osd_data \
1283 "$@" || return 1
1284 }
1285
1286 function _objectstore_tool_nowait() {
1287 local dir=$1
1288 shift
1289 local id=$1
1290 shift
1291
1292 kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
1293
1294 _objectstore_tool_nodown $dir $id "$@" || return 1
1295 activate_osd $dir $id $ceph_osd_args >&2 || return 1
1296 }
1297
1298 ##
1299 # Run ceph-objectstore-tool against the OSD **id** using the data path
1300 # **dir**. The OSD is killed with TERM prior to running
1301 # ceph-objectstore-tool because access to the data path is
1302 # exclusive. The OSD is restarted after the command completes. The
1303 # objectstore_tool returns after all PG are active+clean again.
1304 #
1305 # @param dir the data path of the OSD
1306 # @param id the OSD id
1307 # @param ... arguments to ceph-objectstore-tool
1308 # @param STDIN the input of ceph-objectstore-tool
1309 # @param STDOUT the output of ceph-objectstore-tool
1310 # @return 0 on success, 1 on error
1311 #
1312 # The value of $ceph_osd_args will be passed to restarted osds
1313 #
1314 function objectstore_tool() {
1315 local dir=$1
1316 shift
1317 local id=$1
1318 shift
1319
1320 _objectstore_tool_nowait $dir $id "$@" || return 1
1321 wait_for_clean >&2
1322 }
1323
1324 function test_objectstore_tool() {
1325 local dir=$1
1326
1327 setup $dir || return 1
1328 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1329 local osd=0
1330 run_mgr $dir x || return 1
1331 run_osd $dir $osd || return 1
1332 create_rbd_pool || return 1
1333 wait_for_clean || return 1
1334 rados --pool rbd put GROUP /etc/group || return 1
1335 objectstore_tool $dir $osd GROUP get-bytes | \
1336 diff - /etc/group
1337 ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
1338 teardown $dir || return 1
1339 }
1340
1341 #######################################################################
1342
1343 ##
1344 # Predicate checking if there is an ongoing recovery in the
1345 # cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1346 # counters are reported by ceph status, it means recovery is in
1347 # progress.
1348 #
1349 # @return 0 if recovery in progress, 1 otherwise
1350 #
1351 function get_is_making_recovery_progress() {
1352 local recovery_progress
1353 recovery_progress+=".recovering_keys_per_sec + "
1354 recovery_progress+=".recovering_bytes_per_sec + "
1355 recovery_progress+=".recovering_objects_per_sec"
1356 local progress=$(ceph --format json status 2>/dev/null | \
1357 jq -r ".pgmap | $recovery_progress")
1358 test "$progress" != null
1359 }
1360
1361 function test_get_is_making_recovery_progress() {
1362 local dir=$1
1363
1364 setup $dir || return 1
1365 run_mon $dir a || return 1
1366 run_mgr $dir x || return 1
1367 ! get_is_making_recovery_progress || return 1
1368 teardown $dir || return 1
1369 }
1370
1371 #######################################################################
1372
1373 ##
1374 # Return the number of active PGs in the cluster. A PG is active if
1375 # ceph pg dump pgs reports it both **active** and **clean** and that
1376 # not **stale**.
1377 #
1378 # @param STDOUT the number of active PGs
1379 # @return 0 on success, 1 on error
1380 #
1381 function get_num_active_clean() {
1382 local expression
1383 expression+="select(contains(\"active\") and contains(\"clean\")) | "
1384 expression+="select(contains(\"stale\") | not)"
1385 ceph --format json pg dump pgs 2>/dev/null | \
1386 jq ".pg_stats | [.[] | .state | $expression] | length"
1387 }
1388
1389 function test_get_num_active_clean() {
1390 local dir=$1
1391
1392 setup $dir || return 1
1393 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1394 run_mgr $dir x || return 1
1395 run_osd $dir 0 || return 1
1396 create_rbd_pool || return 1
1397 wait_for_clean || return 1
1398 local num_active_clean=$(get_num_active_clean)
1399 test "$num_active_clean" = $PG_NUM || return 1
1400 teardown $dir || return 1
1401 }
1402
1403 ##
1404 # Return the number of active or peered PGs in the cluster. A PG matches if
1405 # ceph pg dump pgs reports it is either **active** or **peered** and that
1406 # not **stale**.
1407 #
1408 # @param STDOUT the number of active PGs
1409 # @return 0 on success, 1 on error
1410 #
1411 function get_num_active_or_peered() {
1412 local expression
1413 expression+="select(contains(\"active\") or contains(\"peered\")) | "
1414 expression+="select(contains(\"stale\") | not)"
1415 ceph --format json pg dump pgs 2>/dev/null | \
1416 jq ".pg_stats | [.[] | .state | $expression] | length"
1417 }
1418
1419 function test_get_num_active_or_peered() {
1420 local dir=$1
1421
1422 setup $dir || return 1
1423 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1424 run_mgr $dir x || return 1
1425 run_osd $dir 0 || return 1
1426 create_rbd_pool || return 1
1427 wait_for_clean || return 1
1428 local num_peered=$(get_num_active_or_peered)
1429 test "$num_peered" = $PG_NUM || return 1
1430 teardown $dir || return 1
1431 }
1432
1433 #######################################################################
1434
1435 ##
1436 # Return the number of PGs in the cluster, according to
1437 # ceph pg dump pgs.
1438 #
1439 # @param STDOUT the number of PGs
1440 # @return 0 on success, 1 on error
1441 #
1442 function get_num_pgs() {
1443 ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
1444 }
1445
1446 function test_get_num_pgs() {
1447 local dir=$1
1448
1449 setup $dir || return 1
1450 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1451 run_mgr $dir x || return 1
1452 run_osd $dir 0 || return 1
1453 create_rbd_pool || return 1
1454 wait_for_clean || return 1
1455 local num_pgs=$(get_num_pgs)
1456 test "$num_pgs" -gt 0 || return 1
1457 teardown $dir || return 1
1458 }
1459
1460 #######################################################################
1461
1462 ##
1463 # Return the OSD ids in use by at least one PG in the cluster (either
1464 # in the up or the acting set), according to ceph pg dump pgs. Every
1465 # OSD id shows as many times as they are used in up and acting sets.
1466 # If an OSD id is in both the up and acting set of a given PG, it will
1467 # show twice.
1468 #
1469 # @param STDOUT a sorted list of OSD ids
1470 # @return 0 on success, 1 on error
1471 #
1472 function get_osd_id_used_by_pgs() {
1473 ceph --format json pg dump pgs 2>/dev/null | jq '.pg_stats | .[] | .up[], .acting[]' | sort
1474 }
1475
1476 function test_get_osd_id_used_by_pgs() {
1477 local dir=$1
1478
1479 setup $dir || return 1
1480 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1481 run_mgr $dir x || return 1
1482 run_osd $dir 0 || return 1
1483 create_rbd_pool || return 1
1484 wait_for_clean || return 1
1485 local osd_ids=$(get_osd_id_used_by_pgs | uniq)
1486 test "$osd_ids" = "0" || return 1
1487 teardown $dir || return 1
1488 }
1489
1490 #######################################################################
1491
1492 ##
1493 # Wait until the OSD **id** shows **count** times in the
1494 # PGs (see get_osd_id_used_by_pgs for more information about
1495 # how OSD ids are counted).
1496 #
1497 # @param id the OSD id
1498 # @param count the number of time it must show in the PGs
1499 # @return 0 on success, 1 on error
1500 #
1501 function wait_osd_id_used_by_pgs() {
1502 local id=$1
1503 local count=$2
1504
1505 status=1
1506 for ((i=0; i < $TIMEOUT / 5; i++)); do
1507 echo $i
1508 if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then
1509 sleep 5
1510 else
1511 status=0
1512 break
1513 fi
1514 done
1515 return $status
1516 }
1517
1518 function test_wait_osd_id_used_by_pgs() {
1519 local dir=$1
1520
1521 setup $dir || return 1
1522 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1523 run_mgr $dir x || return 1
1524 run_osd $dir 0 || return 1
1525 create_rbd_pool || return 1
1526 wait_for_clean || return 1
1527 wait_osd_id_used_by_pgs 0 8 || return 1
1528 ! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1
1529 teardown $dir || return 1
1530 }
1531
1532 #######################################################################
1533
1534 ##
1535 # Return the date and time of the last completed scrub for **pgid**,
1536 # as reported by ceph pg dump pgs. Note that a repair also sets this
1537 # date.
1538 #
1539 # @param pgid the id of the PG
1540 # @param STDOUT the date and time of the last scrub
1541 # @return 0 on success, 1 on error
1542 #
1543 function get_last_scrub_stamp() {
1544 local pgid=$1
1545 local sname=${2:-last_scrub_stamp}
1546 ceph --format json pg dump pgs 2>/dev/null | \
1547 jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
1548 }
1549
1550 function test_get_last_scrub_stamp() {
1551 local dir=$1
1552
1553 setup $dir || return 1
1554 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1555 run_mgr $dir x || return 1
1556 run_osd $dir 0 || return 1
1557 create_rbd_pool || return 1
1558 wait_for_clean || return 1
1559 stamp=$(get_last_scrub_stamp 1.0)
1560 test -n "$stamp" || return 1
1561 teardown $dir || return 1
1562 }
1563
1564 #######################################################################
1565
1566 ##
1567 # Predicate checking if the cluster is clean, i.e. all of its PGs are
1568 # in a clean state (see get_num_active_clean for a definition).
1569 #
1570 # @return 0 if the cluster is clean, 1 otherwise
1571 #
1572 function is_clean() {
1573 num_pgs=$(get_num_pgs)
1574 test $num_pgs != 0 || return 1
1575 test $(get_num_active_clean) = $num_pgs || return 1
1576 }
1577
1578 function test_is_clean() {
1579 local dir=$1
1580
1581 setup $dir || return 1
1582 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1583 run_mgr $dir x || return 1
1584 run_osd $dir 0 || return 1
1585 create_rbd_pool || return 1
1586 wait_for_clean || return 1
1587 is_clean || return 1
1588 teardown $dir || return 1
1589 }
1590
1591 #######################################################################
1592
1593 calc() { $AWK "BEGIN{print $*}"; }
1594
1595 ##
1596 # Return a list of numbers that are increasingly larger and whose
1597 # total is **timeout** seconds. It can be used to have short sleep
1598 # delay while waiting for an event on a fast machine. But if running
1599 # very slowly the larger delays avoid stressing the machine even
1600 # further or spamming the logs.
1601 #
1602 # @param timeout sum of all delays, in seconds
1603 # @return a list of sleep delays
1604 #
1605 function get_timeout_delays() {
1606 local trace=$(shopt -q -o xtrace && echo true || echo false)
1607 $trace && shopt -u -o xtrace
1608 local timeout=$1
1609 local first_step=${2:-1}
1610 local max_timeout=${3:-$MAX_TIMEOUT}
1611
1612 local i
1613 local total="0"
1614 i=$first_step
1615 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1616 echo -n "$(calc $i) "
1617 total=$(calc $total + $i)
1618 i=$(calc $i \* 2)
1619 if [ $max_timeout -gt 0 ]; then
1620 # Did we reach max timeout ?
1621 if [ ${i%.*} -eq ${max_timeout%.*} ] && [ ${i#*.} \> ${max_timeout#*.} ] || [ ${i%.*} -gt ${max_timeout%.*} ]; then
1622 # Yes, so let's cap the max wait time to max
1623 i=$max_timeout
1624 fi
1625 fi
1626 done
1627 if test "$(calc $total \< $timeout)" = "1"; then
1628 echo -n "$(calc $timeout - $total) "
1629 fi
1630 $trace && shopt -s -o xtrace
1631 }
1632
1633 function test_get_timeout_delays() {
1634 test "$(get_timeout_delays 1)" = "1 " || return 1
1635 test "$(get_timeout_delays 5)" = "1 2 2 " || return 1
1636 test "$(get_timeout_delays 6)" = "1 2 3 " || return 1
1637 test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
1638 test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1
1639 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1
1640 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1
1641 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1
1642 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1
1643 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1
1644 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1
1645 test "$(get_timeout_delays 300 .1 0)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 12.8 25.6 51.2 102.4 95.3 " || return 1
1646 test "$(get_timeout_delays 300 .1 10)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 7.3 " || return 1
1647 }
1648
1649 #######################################################################
1650
1651 ##
1652 # Wait until the cluster becomes clean or if it does not make progress
1653 # for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1654 # Progress is measured either via the **get_is_making_recovery_progress**
1655 # predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1656 #
1657 # @return 0 if the cluster is clean, 1 otherwise
1658 #
1659 function wait_for_clean() {
1660 local cmd=$1
1661 local num_active_clean=-1
1662 local cur_active_clean
1663 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
1664 local -i loop=0
1665
1666 flush_pg_stats || return 1
1667 while test $(get_num_pgs) == 0 ; do
1668 sleep 1
1669 done
1670
1671 while true ; do
1672 # Comparing get_num_active_clean & get_num_pgs is used to determine
1673 # if the cluster is clean. That's almost an inline of is_clean() to
1674 # get more performance by avoiding multiple calls of get_num_active_clean.
1675 cur_active_clean=$(get_num_active_clean)
1676 test $cur_active_clean = $(get_num_pgs) && break
1677 if test $cur_active_clean != $num_active_clean ; then
1678 loop=0
1679 num_active_clean=$cur_active_clean
1680 elif get_is_making_recovery_progress ; then
1681 loop=0
1682 elif (( $loop >= ${#delays[*]} )) ; then
1683 ceph report
1684 return 1
1685 fi
1686 # eval is a no-op if cmd is empty
1687 eval $cmd
1688 sleep ${delays[$loop]}
1689 loop+=1
1690 done
1691 return 0
1692 }
1693
1694 function test_wait_for_clean() {
1695 local dir=$1
1696
1697 setup $dir || return 1
1698 run_mon $dir a --osd_pool_default_size=2 || return 1
1699 run_osd $dir 0 || return 1
1700 run_mgr $dir x || return 1
1701 create_rbd_pool || return 1
1702 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1703 run_osd $dir 1 || return 1
1704 wait_for_clean || return 1
1705 teardown $dir || return 1
1706 }
1707
1708 ##
1709 # Wait until the cluster becomes peered or if it does not make progress
1710 # for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1711 # Progress is measured either via the **get_is_making_recovery_progress**
1712 # predicate or if the number of peered PGs changes (as returned by get_num_active_or_peered)
1713 #
1714 # @return 0 if the cluster is clean, 1 otherwise
1715 #
1716 function wait_for_peered() {
1717 local cmd=$1
1718 local num_peered=-1
1719 local cur_peered
1720 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
1721 local -i loop=0
1722
1723 flush_pg_stats || return 1
1724 while test $(get_num_pgs) == 0 ; do
1725 sleep 1
1726 done
1727
1728 while true ; do
1729 # Comparing get_num_active_clean & get_num_pgs is used to determine
1730 # if the cluster is clean. That's almost an inline of is_clean() to
1731 # get more performance by avoiding multiple calls of get_num_active_clean.
1732 cur_peered=$(get_num_active_or_peered)
1733 test $cur_peered = $(get_num_pgs) && break
1734 if test $cur_peered != $num_peered ; then
1735 loop=0
1736 num_peered=$cur_peered
1737 elif get_is_making_recovery_progress ; then
1738 loop=0
1739 elif (( $loop >= ${#delays[*]} )) ; then
1740 ceph report
1741 return 1
1742 fi
1743 # eval is a no-op if cmd is empty
1744 eval $cmd
1745 sleep ${delays[$loop]}
1746 loop+=1
1747 done
1748 return 0
1749 }
1750
1751 function test_wait_for_peered() {
1752 local dir=$1
1753
1754 setup $dir || return 1
1755 run_mon $dir a --osd_pool_default_size=2 || return 1
1756 run_osd $dir 0 || return 1
1757 run_mgr $dir x || return 1
1758 create_rbd_pool || return 1
1759 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1760 run_osd $dir 1 || return 1
1761 wait_for_peered || return 1
1762 teardown $dir || return 1
1763 }
1764
1765
1766 #######################################################################
1767
1768 ##
1769 # Wait until the cluster has health condition passed as arg
1770 # again for $TIMEOUT seconds.
1771 #
1772 # @param string to grep for in health detail
1773 # @return 0 if the cluster health matches request, 1 otherwise
1774 #
1775 function wait_for_health() {
1776 local grepstr=$1
1777 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1778 local -i loop=0
1779
1780 while ! ceph health detail | grep "$grepstr" ; do
1781 if (( $loop >= ${#delays[*]} )) ; then
1782 ceph health detail
1783 return 1
1784 fi
1785 sleep ${delays[$loop]}
1786 loop+=1
1787 done
1788 }
1789
1790 ##
1791 # Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1792 # for $TIMEOUT seconds.
1793 #
1794 # @return 0 if the cluster is HEALTHY, 1 otherwise
1795 #
1796 function wait_for_health_ok() {
1797 wait_for_health "HEALTH_OK" || return 1
1798 }
1799
1800 function test_wait_for_health_ok() {
1801 local dir=$1
1802
1803 setup $dir || return 1
1804 run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
1805 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
1806 # start osd_pool_default_size OSDs
1807 run_osd $dir 0 || return 1
1808 run_osd $dir 1 || return 1
1809 run_osd $dir 2 || return 1
1810 kill_daemons $dir TERM osd || return 1
1811 ceph osd down 0 || return 1
1812 # expect TOO_FEW_OSDS warning
1813 ! TIMEOUT=1 wait_for_health_ok || return 1
1814 # resurrect all OSDs
1815 activate_osd $dir 0 || return 1
1816 activate_osd $dir 1 || return 1
1817 activate_osd $dir 2 || return 1
1818 wait_for_health_ok || return 1
1819 teardown $dir || return 1
1820 }
1821
1822
1823 #######################################################################
1824
1825 ##
1826 # Run repair on **pgid** and wait until it completes. The repair
1827 # function will fail if repair does not complete within $TIMEOUT
1828 # seconds.
1829 #
1830 # @param pgid the id of the PG
1831 # @return 0 on success, 1 on error
1832 #
1833 function repair() {
1834 local pgid=$1
1835 local last_scrub=$(get_last_scrub_stamp $pgid)
1836 ceph pg repair $pgid
1837 wait_for_scrub $pgid "$last_scrub"
1838 }
1839
1840 function test_repair() {
1841 local dir=$1
1842
1843 setup $dir || return 1
1844 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1845 run_mgr $dir x || return 1
1846 run_osd $dir 0 || return 1
1847 create_rbd_pool || return 1
1848 wait_for_clean || return 1
1849 repair 1.0 || return 1
1850 kill_daemons $dir KILL osd || return 1
1851 ! TIMEOUT=1 repair 1.0 || return 1
1852 teardown $dir || return 1
1853 }
1854 #######################################################################
1855
1856 ##
1857 # Run scrub on **pgid** and wait until it completes. The pg_scrub
1858 # function will fail if repair does not complete within $TIMEOUT
1859 # seconds. The pg_scrub is complete whenever the
1860 # **get_last_scrub_stamp** function reports a timestamp different from
1861 # the one stored before starting the scrub.
1862 #
1863 # @param pgid the id of the PG
1864 # @return 0 on success, 1 on error
1865 #
1866 function pg_scrub() {
1867 local pgid=$1
1868 local last_scrub=$(get_last_scrub_stamp $pgid)
1869 ceph pg scrub $pgid
1870 wait_for_scrub $pgid "$last_scrub"
1871 }
1872
1873 function pg_deep_scrub() {
1874 local pgid=$1
1875 local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
1876 ceph pg deep-scrub $pgid
1877 wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
1878 }
1879
1880 function test_pg_scrub() {
1881 local dir=$1
1882
1883 setup $dir || return 1
1884 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1885 run_mgr $dir x || return 1
1886 run_osd $dir 0 || return 1
1887 create_rbd_pool || return 1
1888 wait_for_clean || return 1
1889 pg_scrub 1.0 || return 1
1890 kill_daemons $dir KILL osd || return 1
1891 ! TIMEOUT=1 pg_scrub 1.0 || return 1
1892 teardown $dir || return 1
1893 }
1894
1895 #######################################################################
1896
1897 ##
1898 # Run the *command* and expect it to fail (i.e. return a non zero status).
1899 # The output (stderr and stdout) is stored in a temporary file in *dir*
1900 # and is expected to contain the string *expected*.
1901 #
1902 # Return 0 if the command failed and the string was found. Otherwise
1903 # return 1 and cat the full output of the command on stderr for debug.
1904 #
1905 # @param dir temporary directory to store the output
1906 # @param expected string to look for in the output
1907 # @param command ... the command and its arguments
1908 # @return 0 on success, 1 on error
1909 #
1910
1911 function expect_failure() {
1912 local dir=$1
1913 shift
1914 local expected="$1"
1915 shift
1916 local success
1917
1918 if "$@" > $dir/out 2>&1 ; then
1919 success=true
1920 else
1921 success=false
1922 fi
1923
1924 if $success || ! grep --quiet "$expected" $dir/out ; then
1925 cat $dir/out >&2
1926 return 1
1927 else
1928 return 0
1929 fi
1930 }
1931
1932 function test_expect_failure() {
1933 local dir=$1
1934
1935 setup $dir || return 1
1936 expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
1937 # the command did not fail
1938 ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
1939 grep --quiet FAIL $dir/out || return 1
1940 # the command failed but the output does not contain the expected string
1941 ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
1942 ! grep --quiet FAIL $dir/out || return 1
1943 teardown $dir || return 1
1944 }
1945
1946 #######################################################################
1947
1948 ##
1949 # Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1950 # will fail if scrub does not complete within $TIMEOUT seconds. The
1951 # repair is complete whenever the **get_last_scrub_stamp** function
1952 # reports a timestamp different from the one given in argument.
1953 #
1954 # @param pgid the id of the PG
1955 # @param last_scrub timestamp of the last scrub for *pgid*
1956 # @return 0 on success, 1 on error
1957 #
1958 function wait_for_scrub() {
1959 local pgid=$1
1960 local last_scrub="$2"
1961 local sname=${3:-last_scrub_stamp}
1962
1963 for ((i=0; i < $TIMEOUT; i++)); do
1964 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
1965 return 0
1966 fi
1967 sleep 1
1968 done
1969 return 1
1970 }
1971
1972 function test_wait_for_scrub() {
1973 local dir=$1
1974
1975 setup $dir || return 1
1976 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
1977 run_mgr $dir x || return 1
1978 run_osd $dir 0 || return 1
1979 create_rbd_pool || return 1
1980 wait_for_clean || return 1
1981 local pgid=1.0
1982 ceph pg repair $pgid
1983 local last_scrub=$(get_last_scrub_stamp $pgid)
1984 wait_for_scrub $pgid "$last_scrub" || return 1
1985 kill_daemons $dir KILL osd || return 1
1986 last_scrub=$(get_last_scrub_stamp $pgid)
1987 ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
1988 teardown $dir || return 1
1989 }
1990
1991 #######################################################################
1992
1993 ##
1994 # Return 0 if the erasure code *plugin* is available, 1 otherwise.
1995 #
1996 # @param plugin erasure code plugin
1997 # @return 0 on success, 1 on error
1998 #
1999
2000 function erasure_code_plugin_exists() {
2001 local plugin=$1
2002 local status
2003 local grepstr
2004 local s
2005 case `uname` in
2006 FreeBSD) grepstr="Cannot open.*$plugin" ;;
2007 *) grepstr="$plugin.*No such file" ;;
2008 esac
2009
2010 s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
2011 local status=$?
2012 if [ $status -eq 0 ]; then
2013 ceph osd erasure-code-profile rm TESTPROFILE
2014 elif ! echo $s | grep --quiet "$grepstr" ; then
2015 status=1
2016 # display why the string was rejected.
2017 echo $s
2018 fi
2019 return $status
2020 }
2021
2022 function test_erasure_code_plugin_exists() {
2023 local dir=$1
2024
2025 setup $dir || return 1
2026 run_mon $dir a || return 1
2027 run_mgr $dir x || return 1
2028 erasure_code_plugin_exists jerasure || return 1
2029 ! erasure_code_plugin_exists FAKE || return 1
2030 teardown $dir || return 1
2031 }
2032
2033 #######################################################################
2034
2035 ##
2036 # Display all log files from **dir** on stdout.
2037 #
2038 # @param dir directory in which all data is stored
2039 #
2040
2041 function display_logs() {
2042 local dir=$1
2043
2044 find $dir -maxdepth 1 -name '*.log' | \
2045 while read file ; do
2046 echo "======================= $file"
2047 cat $file
2048 done
2049 }
2050
2051 function test_display_logs() {
2052 local dir=$1
2053
2054 setup $dir || return 1
2055 run_mon $dir a || return 1
2056 kill_daemons $dir || return 1
2057 display_logs $dir > $dir/log.out
2058 grep --quiet mon.a.log $dir/log.out || return 1
2059 teardown $dir || return 1
2060 }
2061
2062 #######################################################################
2063 ##
2064 # Spawn a command in background and save the pid in the variable name
2065 # passed in argument. To make the output reading easier, the output is
2066 # prepend with the process id.
2067 #
2068 # Example:
2069 # pids1=""
2070 # run_in_background pids1 bash -c 'sleep 1; exit 1'
2071 #
2072 # @param pid_variable the variable name (not value) where the pids will be stored
2073 # @param ... the command to execute
2074 # @return only the pid_variable output should be considered and used with **wait_background**
2075 #
2076 function run_in_background() {
2077 local pid_variable=$1
2078 shift
2079 # Execute the command and prepend the output with its pid
2080 # We enforce to return the exit status of the command and not the sed one.
2081 ("$@" |& sed 's/^/'$BASHPID': /'; return "${PIPESTATUS[0]}") >&2 &
2082 eval "$pid_variable+=\" $!\""
2083 }
2084
2085 function save_stdout {
2086 local out="$1"
2087 shift
2088 "$@" > "$out"
2089 }
2090
2091 function test_run_in_background() {
2092 local pids
2093 run_in_background pids sleep 1
2094 run_in_background pids sleep 1
2095 test $(echo $pids | wc -w) = 2 || return 1
2096 wait $pids || return 1
2097 }
2098
2099 #######################################################################
2100 ##
2101 # Wait for pids running in background to complete.
2102 # This function is usually used after a **run_in_background** call
2103 # Example:
2104 # pids1=""
2105 # run_in_background pids1 bash -c 'sleep 1; exit 1'
2106 # wait_background pids1
2107 #
2108 # @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
2109 # @return returns 1 if at least one process exits in error unless returns 0
2110 #
2111 function wait_background() {
2112 # We extract the PIDS from the variable name
2113 pids=${!1}
2114
2115 return_code=0
2116 for pid in $pids; do
2117 if ! wait $pid; then
2118 # If one process failed then return 1
2119 return_code=1
2120 fi
2121 done
2122
2123 # We empty the variable reporting that all process ended
2124 eval "$1=''"
2125
2126 return $return_code
2127 }
2128
2129
2130 function test_wait_background() {
2131 local pids=""
2132 run_in_background pids bash -c "sleep 1; exit 1"
2133 run_in_background pids bash -c "sleep 2; exit 0"
2134 wait_background pids
2135 if [ $? -ne 1 ]; then return 1; fi
2136
2137 run_in_background pids bash -c "sleep 1; exit 0"
2138 run_in_background pids bash -c "sleep 2; exit 0"
2139 wait_background pids
2140 if [ $? -ne 0 ]; then return 1; fi
2141
2142 if [ ! -z "$pids" ]; then return 1; fi
2143 }
2144
2145 function flush_pg_stats()
2146 {
2147 local timeout=${1:-$TIMEOUT}
2148
2149 ids=`ceph osd ls`
2150 seqs=''
2151 for osd in $ids; do
2152 seq=`ceph tell osd.$osd flush_pg_stats`
2153 if test -z "$seq"
2154 then
2155 continue
2156 fi
2157 seqs="$seqs $osd-$seq"
2158 done
2159
2160 for s in $seqs; do
2161 osd=`echo $s | cut -d - -f 1`
2162 seq=`echo $s | cut -d - -f 2`
2163 echo "waiting osd.$osd seq $seq"
2164 while test $(ceph osd last-stat-seq $osd) -lt $seq; do
2165 sleep 1
2166 if [ $((timeout--)) -eq 0 ]; then
2167 return 1
2168 fi
2169 done
2170 done
2171 }
2172
2173 function test_flush_pg_stats()
2174 {
2175 local dir=$1
2176
2177 setup $dir || return 1
2178 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
2179 run_mgr $dir x || return 1
2180 run_osd $dir 0 || return 1
2181 create_rbd_pool || return 1
2182 rados -p rbd put obj /etc/group
2183 flush_pg_stats || return 1
2184 local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
2185 stored=`ceph df detail --format=json | jq "$jq_filter.stored"`
2186 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2187 test $stored -gt 0 || return 1
2188 test $stored == $stored_raw || return 1
2189 teardown $dir
2190 }
2191
2192 ########################################################################
2193 ##
2194 # Get the current op scheduler enabled on an osd by reading the
2195 # osd_op_queue config option
2196 #
2197 # Example:
2198 # get_op_scheduler $osdid
2199 #
2200 # @param id the id of the OSD
2201 # @return the name of the op scheduler enabled for the OSD
2202 #
2203 function get_op_scheduler() {
2204 local id=$1
2205
2206 get_config osd $id osd_op_queue
2207 }
2208
2209 function test_get_op_scheduler() {
2210 local dir=$1
2211
2212 setup $dir || return 1
2213
2214 run_mon $dir a || return 1
2215 run_mgr $dir x || return 1
2216
2217 run_osd $dir 0 --osd_op_queue=wpq || return 1
2218 test $(get_op_scheduler 0) = "wpq" || return 1
2219
2220 run_osd $dir 1 --osd_op_queue=mclock_scheduler || return 1
2221 test $(get_op_scheduler 1) = "mclock_scheduler" || return 1
2222 teardown $dir || return 1
2223 }
2224
2225 #######################################################################
2226
2227 ##
2228 # Call the **run** function (which must be defined by the caller) with
2229 # the **dir** argument followed by the caller argument list.
2230 #
2231 # If the **run** function returns on error, all logs found in **dir**
2232 # are displayed for diagnostic purposes.
2233 #
2234 # **teardown** function is called when the **run** function returns
2235 # (on success or on error), to cleanup leftovers. The CEPH_CONF is set
2236 # to /dev/null and CEPH_ARGS is unset so that the tests are protected from
2237 # external interferences.
2238 #
2239 # It is the responsibility of the **run** function to call the
2240 # **setup** function to prepare the test environment (create a temporary
2241 # directory etc.).
2242 #
2243 # The shell is required (via PS4) to display the function and line
2244 # number whenever a statement is executed to help debugging.
2245 #
2246 # @param dir directory in which all data is stored
2247 # @param ... arguments passed transparently to **run**
2248 # @return 0 on success, 1 on error
2249 #
2250 function main() {
2251 local dir=td/$1
2252 shift
2253
2254 shopt -s -o xtrace
2255 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2256
2257 export PATH=.:$PATH # make sure program from sources are preferred
2258 export PYTHONWARNINGS=ignore
2259 export CEPH_CONF=/dev/null
2260 unset CEPH_ARGS
2261
2262 local code
2263 if run $dir "$@" ; then
2264 code=0
2265 else
2266 code=1
2267 fi
2268 teardown $dir $code || return 1
2269 return $code
2270 }
2271
2272 #######################################################################
2273
2274 function run_tests() {
2275 shopt -s -o xtrace
2276 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2277
2278 export .:$PATH # make sure program from sources are preferred
2279
2280 export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
2281 export CEPH_ARGS
2282 CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
2283 CEPH_ARGS+="--mon-host=$CEPH_MON "
2284 export CEPH_CONF=/dev/null
2285
2286 local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
2287 local dir=td/ceph-helpers
2288
2289 for func in $funcs ; do
2290 if ! $func $dir; then
2291 teardown $dir 1
2292 return 1
2293 fi
2294 done
2295 }
2296
2297 if test "$1" = TESTS ; then
2298 shift
2299 run_tests "$@"
2300 exit $?
2301 fi
2302
2303 # NOTE:
2304 # jq only support --exit-status|-e from version 1.4 forwards, which makes
2305 # returning on error waaaay prettier and straightforward.
2306 # However, the current automated upstream build is running with v1.3,
2307 # which has no idea what -e is. Hence the convoluted error checking we
2308 # need. Sad.
2309 # The next time someone changes this code, please check if v1.4 is now
2310 # a thing, and, if so, please change these to use -e. Thanks.
2311
2312 # jq '.all.supported | select([.[] == "foo"] | any)'
2313 function jq_success() {
2314 input="$1"
2315 filter="$2"
2316 expects="\"$3\""
2317
2318 in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g")
2319 filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g")
2320
2321 ret=$(echo "$in_escaped" | jq "$filter_escaped")
2322 if [[ "$ret" == "true" ]]; then
2323 return 0
2324 elif [[ -n "$expects" ]]; then
2325 if [[ "$ret" == "$expects" ]]; then
2326 return 0
2327 fi
2328 fi
2329 return 1
2330 input=$1
2331 filter=$2
2332 expects="$3"
2333
2334 ret="$(echo $input | jq \"$filter\")"
2335 if [[ "$ret" == "true" ]]; then
2336 return 0
2337 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
2338 return 0
2339 fi
2340 return 1
2341 }
2342
2343 function inject_eio() {
2344 local pooltype=$1
2345 shift
2346 local which=$1
2347 shift
2348 local poolname=$1
2349 shift
2350 local objname=$1
2351 shift
2352 local dir=$1
2353 shift
2354 local shard_id=$1
2355 shift
2356
2357 local -a initial_osds=($(get_osds $poolname $objname))
2358 local osd_id=${initial_osds[$shard_id]}
2359 if [ "$pooltype" != "ec" ]; then
2360 shard_id=""
2361 fi
2362 type=$(cat $dir/$osd_id/type)
2363 set_config osd $osd_id ${type}_debug_inject_read_err true || return 1
2364 local loop=0
2365 while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
2366 inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
2367 loop=$(expr $loop + 1)
2368 if [ $loop = "10" ]; then
2369 return 1
2370 fi
2371 sleep 1
2372 done
2373 }
2374
2375 function multidiff() {
2376 if ! diff $@ ; then
2377 if [ "$DIFFCOLOPTS" = "" ]; then
2378 return 1
2379 fi
2380 diff $DIFFCOLOPTS $@
2381 fi
2382 }
2383
2384 function create_ec_pool() {
2385 local pool_name=$1
2386 shift
2387 local allow_overwrites=$1
2388 shift
2389
2390 ceph osd erasure-code-profile set myprofile crush-failure-domain=osd "$@" || return 1
2391
2392 create_pool "$poolname" 1 1 erasure myprofile || return 1
2393
2394 if [ "$allow_overwrites" = "true" ]; then
2395 ceph osd pool set "$poolname" allow_ec_overwrites true || return 1
2396 fi
2397
2398 wait_for_clean || return 1
2399 return 0
2400 }
2401
2402 # Local Variables:
2403 # compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
2404 # End: