]> git.proxmox.com Git - ceph.git/blame - ceph/qa/standalone/ceph-helpers.sh
update sources to 12.2.7
[ceph.git] / ceph / qa / standalone / ceph-helpers.sh
CommitLineData
7c673cae
FG
1#!/bin/bash
2#
3# Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4# Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5# Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
6#
7# Author: Loic Dachary <loic@dachary.org>
8# Author: Federico Gimenez <fgimenez@coit.es>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU Library Public License as published by
12# the Free Software Foundation; either version 2, or (at your option)
13# any later version.
14#
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18# GNU Library Public License for more details.
19#
20TIMEOUT=300
21PG_NUM=4
22: ${CEPH_BUILD_VIRTUALENV:=/tmp}
23
24if type xmlstarlet > /dev/null 2>&1; then
25 XMLSTARLET=xmlstarlet
26elif type xml > /dev/null 2>&1; then
27 XMLSTARLET=xml
28else
29 echo "Missing xmlstarlet binary!"
30 exit 1
31fi
31f18b77 32
7c673cae
FG
33if [ `uname` = FreeBSD ]; then
34 SED=gsed
31f18b77 35 DIFFCOLOPTS=""
b5b8bbf5 36 KERNCORE="kern.corefile"
7c673cae
FG
37else
38 SED=sed
31f18b77 39 termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
c07f9fc5
FG
40 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
41 termwidth="-W ${termwidth}"
31f18b77
FG
42 fi
43 DIFFCOLOPTS="-y $termwidth"
b5b8bbf5 44 KERNCORE="kernel.core_pattern"
31f18b77 45fi
7c673cae 46
c07f9fc5
FG
47EXTRA_OPTS=""
48if [ -n "$CEPH_LIB" ]; then
49 EXTRA_OPTS+=" --erasure-code-dir $CEPH_LIB"
50 EXTRA_OPTS+=" --plugin-dir $CEPH_LIB"
51 EXTRA_OPTS+=" --osd-class-dir $CEPH_LIB"
52fi
53
7c673cae
FG
54#! @file ceph-helpers.sh
55# @brief Toolbox to manage Ceph cluster dedicated to testing
56#
57# Example use case:
58#
59# ~~~~~~~~~~~~~~~~{.sh}
60# source ceph-helpers.sh
61#
62# function mytest() {
63# # cleanup leftovers and reset mydir
64# setup mydir
65# # create a cluster with one monitor and three osds
66# run_mon mydir a
67# run_osd mydir 0
68# run_osd mydir 2
69# run_osd mydir 3
70# # put and get an object
71# rados --pool rbd put GROUP /etc/group
72# rados --pool rbd get GROUP /tmp/GROUP
73# # stop the cluster and cleanup the directory
74# teardown mydir
75# }
76# ~~~~~~~~~~~~~~~~
77#
78# The focus is on simplicity and efficiency, in the context of
79# functional tests. The output is intentionally very verbose
80# and functions return as soon as an error is found. The caller
81# is also expected to abort on the first error so that debugging
82# can be done by looking at the end of the output.
83#
84# Each function is documented, implemented and tested independently.
85# When modifying a helper, the test and the documentation are
86# expected to be updated and it is easier of they are collocated. A
87# test for a given function can be run with
88#
89# ~~~~~~~~~~~~~~~~{.sh}
90# ceph-helpers.sh TESTS test_get_osds
91# ~~~~~~~~~~~~~~~~
92#
93# and all the tests (i.e. all functions matching test_*) are run
94# with:
95#
96# ~~~~~~~~~~~~~~~~{.sh}
97# ceph-helpers.sh TESTS
98# ~~~~~~~~~~~~~~~~
99#
100# A test function takes a single argument : the directory dedicated
101# to the tests. It is expected to not create any file outside of this
102# directory and remove it entirely when it completes successfully.
103#
104
105
c07f9fc5
FG
106function get_asok_dir() {
107 if [ -n "$CEPH_ASOK_DIR" ]; then
108 echo "$CEPH_ASOK_DIR"
109 else
110 echo ${TMPDIR:-/tmp}/ceph-asok.$$
111 fi
112}
113
114function get_asok_path() {
115 local name=$1
116 if [ -n "$name" ]; then
117 echo $(get_asok_dir)/ceph-$name.asok
118 else
119 echo $(get_asok_dir)/\$cluster-\$name.asok
120 fi
121}
7c673cae
FG
122##
123# Cleanup any leftovers found in **dir** via **teardown**
124# and reset **dir** as an empty environment.
125#
126# @param dir path name of the environment
127# @return 0 on success, 1 on error
128#
129function setup() {
130 local dir=$1
131 teardown $dir || return 1
132 mkdir -p $dir
c07f9fc5 133 mkdir -p $(get_asok_dir)
7c673cae
FG
134}
135
136function test_setup() {
137 local dir=$dir
138 setup $dir || return 1
139 test -d $dir || return 1
140 setup $dir || return 1
141 test -d $dir || return 1
142 teardown $dir
143}
144
145#######################################################################
146
147##
148# Kill all daemons for which a .pid file exists in **dir** and remove
149# **dir**. If the file system in which **dir** is btrfs, delete all
150# subvolumes that relate to it.
151#
152# @param dir path name of the environment
153# @return 0 on success, 1 on error
154#
155function teardown() {
156 local dir=$1
b5b8bbf5 157 local dumplogs=$2
7c673cae
FG
158 kill_daemons $dir KILL
159 if [ `uname` != FreeBSD ] \
160 && [ $(stat -f -c '%T' .) == "btrfs" ]; then
161 __teardown_btrfs $dir
162 fi
b5b8bbf5
FG
163 local cores="no"
164 local pattern="$(sysctl -n $KERNCORE)"
165 # See if we have apport core handling
166 if [ "${pattern:0:1}" = "|" ]; then
167 # TODO: Where can we get the dumps?
168 # Not sure where the dumps really are so this will look in the CWD
169 pattern=""
170 fi
171 # Local we start with core and teuthology ends with core
172 if ls $(dirname $pattern) | grep -q '^core\|core$' ; then
173 cores="yes"
174 if [ -n "$LOCALRUN" ]; then
175 mkdir /tmp/cores.$$ 2> /dev/null || true
176 for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
177 mv $i /tmp/cores.$$
178 done
179 fi
180 fi
181 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
182 display_logs $dir
183 fi
7c673cae 184 rm -fr $dir
c07f9fc5 185 rm -rf $(get_asok_dir)
b5b8bbf5
FG
186 if [ "$cores" = "yes" ]; then
187 echo "ERROR: Failure due to cores found"
188 if [ -n "$LOCALRUN" ]; then
189 echo "Find saved core files in /tmp/cores.$$"
190 fi
191 return 1
192 fi
193 return 0
7c673cae
FG
194}
195
196function __teardown_btrfs() {
197 local btrfs_base_dir=$1
198 local btrfs_root=$(df -P . | tail -1 | awk '{print $NF}')
199 local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list . -t | awk '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
200 for subvolume in $btrfs_dirs; do
201 sudo btrfs subvolume delete $btrfs_root/$subvolume
202 done
203}
204
205function test_teardown() {
206 local dir=$dir
207 setup $dir || return 1
208 teardown $dir || return 1
209 ! test -d $dir || return 1
210}
211
212#######################################################################
213
214##
215# Sends a signal to a single daemon.
216# This is a helper function for kill_daemons
217#
218# After the daemon is sent **signal**, its actual termination
219# will be verified by sending it signal 0. If the daemon is
220# still alive, kill_daemon will pause for a few seconds and
221# try again. This will repeat for a fixed number of times
222# before kill_daemon returns on failure. The list of
223# sleep intervals can be specified as **delays** and defaults
224# to:
225#
226# 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
227#
228# This sequence is designed to run first a very short sleep time (0.1)
229# if the machine is fast enough and the daemon terminates in a fraction of a
230# second. The increasing sleep numbers should give plenty of time for
231# the daemon to die even on the slowest running machine. If a daemon
232# takes more than a few minutes to stop (the sum of all sleep times),
233# there probably is no point in waiting more and a number of things
234# are likely to go wrong anyway: better give up and return on error.
235#
236# @param pid the process id to send a signal
237# @param send_signal the signal to send
238# @param delays sequence of sleep times before failure
239#
240function kill_daemon() {
7c673cae
FG
241 local pid=$(cat $1)
242 local send_signal=$2
243 local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
244 local exit_code=1
245 for try in $delays ; do
246 if kill -$send_signal $pid 2> /dev/null ; then
247 exit_code=1
248 else
249 exit_code=0
250 break
251 fi
252 send_signal=0
253 sleep $try
254 done;
255 return $exit_code
256}
257
258function test_kill_daemon() {
259 local dir=$1
260 setup $dir || return 1
261 run_mon $dir a --osd_pool_default_size=1 || return 1
262 run_mgr $dir x || return 1
263 run_osd $dir 0 || return 1
264
265 name_prefix=osd
266 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
267 #
268 # sending signal 0 won't kill the daemon
269 # waiting just for one second instead of the default schedule
270 # allows us to quickly verify what happens when kill fails
271 # to stop the daemon (i.e. it must return false)
272 #
273 ! kill_daemon $pidfile 0 1 || return 1
274 #
275 # killing just the osd and verify the mon still is responsive
276 #
277 kill_daemon $pidfile TERM || return 1
278 done
279
280 ceph osd dump | grep "osd.0 down" || return 1
281
282 name_prefix=mgr
283 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
284 #
285 # kill the mgr
286 #
287 kill_daemon $pidfile TERM || return 1
288 done
289
290 name_prefix=mon
291 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
292 #
293 # kill the mon and verify it cannot be reached
294 #
295 kill_daemon $pidfile TERM || return 1
224ce89b 296 ! timeout 5 ceph status || return 1
7c673cae
FG
297 done
298
299 teardown $dir || return 1
300}
301
302##
303# Kill all daemons for which a .pid file exists in **dir**. Each
304# daemon is sent a **signal** and kill_daemons waits for it to exit
305# during a few minutes. By default all daemons are killed. If a
306# **name_prefix** is provided, only the daemons for which a pid
307# file is found matching the prefix are killed. See run_osd and
308# run_mon for more information about the name conventions for
309# the pid files.
310#
311# Send TERM to all daemons : kill_daemons $dir
312# Send KILL to all daemons : kill_daemons $dir KILL
313# Send KILL to all osds : kill_daemons $dir KILL osd
314# Send KILL to osd 1 : kill_daemons $dir KILL osd.1
315#
316# If a daemon is sent the TERM signal and does not terminate
317# within a few minutes, it will still be running even after
c07f9fc5 318# kill_daemons returns.
7c673cae
FG
319#
320# If all daemons are kill successfully the function returns 0
c07f9fc5 321# if at least one daemon remains, this is treated as an
7c673cae
FG
322# error and the function return 1.
323#
324# @param dir path name of the environment
325# @param signal name of the first signal (defaults to TERM)
326# @param name_prefix only kill match daemons (defaults to all)
327# @param delays sequence of sleep times before failure
328# @return 0 on success, 1 on error
329#
330function kill_daemons() {
331 local trace=$(shopt -q -o xtrace && echo true || echo false)
332 $trace && shopt -u -o xtrace
333 local dir=$1
334 local signal=${2:-TERM}
335 local name_prefix=$3 # optional, osd, mon, osd.1
336 local delays=$4 #optional timing
337 local status=0
338 local pids=""
339
340 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
341 run_in_background pids kill_daemon $pidfile $signal $delays
342 done
343
344 wait_background pids
345 status=$?
346
347 $trace && shopt -s -o xtrace
348 return $status
349}
350
351function test_kill_daemons() {
352 local dir=$1
353 setup $dir || return 1
354 run_mon $dir a --osd_pool_default_size=1 || return 1
355 run_mgr $dir x || return 1
356 run_osd $dir 0 || return 1
357 #
358 # sending signal 0 won't kill the daemon
359 # waiting just for one second instead of the default schedule
c07f9fc5 360 # allows us to quickly verify what happens when kill fails
7c673cae
FG
361 # to stop the daemon (i.e. it must return false)
362 #
363 ! kill_daemons $dir 0 osd 1 || return 1
364 #
365 # killing just the osd and verify the mon still is responsive
366 #
367 kill_daemons $dir TERM osd || return 1
368 ceph osd dump | grep "osd.0 down" || return 1
369 #
370 # kill the mgr
371 #
372 kill_daemons $dir TERM mgr || return 1
373 #
374 # kill the mon and verify it cannot be reached
375 #
376 kill_daemons $dir TERM || return 1
224ce89b 377 ! timeout 5 ceph status || return 1
7c673cae
FG
378 teardown $dir || return 1
379}
380
381#######################################################################
382
383##
384# Run a monitor by the name mon.**id** with data in **dir**/**id**.
385# The logs can be found in **dir**/mon.**id**.log and the pid file
386# is **dir**/mon.**id**.pid and the admin socket is
387# **dir**/**id**/ceph-mon.**id**.asok.
388#
389# The remaining arguments are passed verbatim to ceph-mon --mkfs
390# and the ceph-mon daemon.
391#
392# Two mandatory arguments must be provided: --fsid and --mon-host
393# Instead of adding them to every call to run_mon, they can be
394# set in the CEPH_ARGS environment variable to be read implicitly
395# by every ceph command.
396#
397# The CEPH_CONF variable is expected to be set to /dev/null to
398# only rely on arguments for configuration.
399#
400# Examples:
401#
402# CEPH_ARGS="--fsid=$(uuidgen) "
403# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
404# run_mon $dir a # spawn a mon and bind port 7018
405# run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
406#
407# If mon_initial_members is not set, the default rbd pool is deleted
408# and replaced with a replicated pool with less placement groups to
409# speed up initialization. If mon_initial_members is set, no attempt
410# is made to recreate the rbd pool because it would hang forever,
411# waiting for other mons to join.
412#
413# A **dir**/ceph.conf file is created but not meant to be used by any
414# function. It is convenient for debugging a failure with:
415#
416# ceph --conf **dir**/ceph.conf -s
417#
418# @param dir path name of the environment
419# @param id mon identifier
420# @param ... can be any option valid for ceph-mon
421# @return 0 on success, 1 on error
422#
c07f9fc5 423function run_mon() {
7c673cae
FG
424 local dir=$1
425 shift
426 local id=$1
427 shift
428 local data=$dir/$id
429
430 ceph-mon \
431 --id $id \
432 --mkfs \
433 --mon-data=$data \
434 --run-dir=$dir \
435 "$@" || return 1
436
437 ceph-mon \
438 --id $id \
439 --mon-osd-full-ratio=.99 \
440 --mon-data-avail-crit=1 \
b5b8bbf5 441 --mon-data-avail-warn=5 \
7c673cae
FG
442 --paxos-propose-interval=0.1 \
443 --osd-crush-chooseleaf-type=0 \
c07f9fc5 444 $EXTRA_OPTS \
7c673cae
FG
445 --debug-mon 20 \
446 --debug-ms 20 \
447 --debug-paxos 20 \
448 --chdir= \
449 --mon-data=$data \
450 --log-file=$dir/\$name.log \
c07f9fc5 451 --admin-socket=$(get_asok_path) \
7c673cae
FG
452 --mon-cluster-log-file=$dir/log \
453 --run-dir=$dir \
454 --pid-file=$dir/\$name.pid \
455 --mon-allow-pool-delete \
c07f9fc5 456 --mon-osd-backfillfull-ratio .99 \
7c673cae
FG
457 "$@" || return 1
458
459 cat > $dir/ceph.conf <<EOF
460[global]
461fsid = $(get_config mon $id fsid)
462mon host = $(get_config mon $id mon_host)
463EOF
224ce89b
WB
464}
465
7c673cae
FG
466function test_run_mon() {
467 local dir=$1
468
469 setup $dir || return 1
470
471 run_mon $dir a --mon-initial-members=a || return 1
c07f9fc5 472 create_rbd_pool || return 1
7c673cae 473 # rbd has not been deleted / created, hence it has pool id 0
224ce89b 474 ceph osd dump | grep "pool 1 'rbd'" || return 1
7c673cae
FG
475 kill_daemons $dir || return 1
476
477 run_mon $dir a || return 1
c07f9fc5 478 create_rbd_pool || return 1
7c673cae 479 # rbd has been deleted / created, hence it does not have pool id 0
224ce89b 480 ! ceph osd dump | grep "pool 1 'rbd'" || return 1
c07f9fc5 481 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
482 config get osd_pool_default_size)
483 test "$size" = '{"osd_pool_default_size":"3"}' || return 1
484
485 ! CEPH_ARGS='' ceph status || return 1
486 CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
487
488 kill_daemons $dir || return 1
489
490 run_mon $dir a --osd_pool_default_size=1 || return 1
c07f9fc5 491 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
492 config get osd_pool_default_size)
493 test "$size" = '{"osd_pool_default_size":"1"}' || return 1
494 kill_daemons $dir || return 1
495
496 CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
497 run_mon $dir a || return 1
c07f9fc5 498 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
499 config get osd_pool_default_size)
500 test "$size" = '{"osd_pool_default_size":"2"}' || return 1
501 kill_daemons $dir || return 1
502
503 teardown $dir || return 1
504}
505
c07f9fc5
FG
506function create_rbd_pool() {
507 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
b5b8bbf5 508 create_pool rbd $PG_NUM || return 1
c07f9fc5
FG
509 rbd pool init rbd
510}
511
b5b8bbf5
FG
512function create_pool() {
513 ceph osd pool create "$@"
514 sleep 1
515}
516
28e407b8
AA
517function delete_pool() {
518 local poolname=$1
519 ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
520}
521
7c673cae
FG
522#######################################################################
523
524function run_mgr() {
525 local dir=$1
526 shift
527 local id=$1
528 shift
529 local data=$dir/$id
530
531 ceph-mgr \
532 --id $id \
c07f9fc5 533 $EXTRA_OPTS \
7c673cae
FG
534 --debug-mgr 20 \
535 --debug-objecter 20 \
536 --debug-ms 20 \
537 --debug-paxos 20 \
538 --chdir= \
539 --mgr-data=$data \
540 --log-file=$dir/\$name.log \
c07f9fc5 541 --admin-socket=$(get_asok_path) \
7c673cae
FG
542 --run-dir=$dir \
543 --pid-file=$dir/\$name.pid \
544 "$@" || return 1
545}
546
547#######################################################################
548
549##
550# Create (prepare) and run (activate) an osd by the name osd.**id**
551# with data in **dir**/**id**. The logs can be found in
552# **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
553# the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
554#
555# The remaining arguments are passed verbatim to ceph-osd.
556#
557# Two mandatory arguments must be provided: --fsid and --mon-host
558# Instead of adding them to every call to run_osd, they can be
559# set in the CEPH_ARGS environment variable to be read implicitly
560# by every ceph command.
561#
562# The CEPH_CONF variable is expected to be set to /dev/null to
563# only rely on arguments for configuration.
564#
565# The run_osd function creates the OSD data directory with ceph-disk
566# prepare on the **dir**/**id** directory and relies on the
567# activate_osd function to run the daemon.
568#
569# Examples:
570#
571# CEPH_ARGS="--fsid=$(uuidgen) "
572# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
573# run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
574#
575# @param dir path name of the environment
576# @param id osd identifier
577# @param ... can be any option valid for ceph-osd
578# @return 0 on success, 1 on error
579#
580function run_osd() {
581 local dir=$1
582 shift
583 local id=$1
584 shift
585 local osd_data=$dir/$id
586
587 local ceph_disk_args
588 ceph_disk_args+=" --statedir=$dir"
589 ceph_disk_args+=" --sysconfdir=$dir"
590 ceph_disk_args+=" --prepend-to-path="
591
592 mkdir -p $osd_data
593 ceph-disk $ceph_disk_args \
31f18b77 594 prepare --filestore $osd_data || return 1
7c673cae
FG
595
596 activate_osd $dir $id "$@"
597}
598
599function run_osd_bluestore() {
600 local dir=$1
601 shift
602 local id=$1
603 shift
604 local osd_data=$dir/$id
605
606 local ceph_disk_args
607 ceph_disk_args+=" --statedir=$dir"
608 ceph_disk_args+=" --sysconfdir=$dir"
609 ceph_disk_args+=" --prepend-to-path="
610
611 mkdir -p $osd_data
612 ceph-disk $ceph_disk_args \
613 prepare --bluestore $osd_data || return 1
614
31f18b77 615 activate_osd $dir $id "$@"
7c673cae
FG
616}
617
618function test_run_osd() {
619 local dir=$1
620
621 setup $dir || return 1
622
623 run_mon $dir a || return 1
624 run_mgr $dir x || return 1
625
626 run_osd $dir 0 || return 1
c07f9fc5 627 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae
FG
628 config get osd_max_backfills)
629 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
630
631 run_osd $dir 1 --osd-max-backfills 20 || return 1
c07f9fc5 632 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
7c673cae
FG
633 config get osd_max_backfills)
634 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
635
636 CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
c07f9fc5 637 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
7c673cae
FG
638 config get osd_max_backfills)
639 test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
640
641 teardown $dir || return 1
642}
643
644#######################################################################
645
646##
647# Shutdown and remove all traces of the osd by the name osd.**id**.
648#
649# The OSD is shutdown with the TERM signal. It is then removed from
650# the auth list, crush map, osd map etc and the files associated with
651# it are also removed.
652#
653# @param dir path name of the environment
654# @param id osd identifier
655# @return 0 on success, 1 on error
656#
657function destroy_osd() {
658 local dir=$1
659 local id=$2
660
7c673cae 661 ceph osd out osd.$id || return 1
c07f9fc5
FG
662 kill_daemons $dir TERM osd.$id || return 1
663 ceph osd purge osd.$id --yes-i-really-mean-it || return 1
7c673cae
FG
664 teardown $dir/$id || return 1
665 rm -fr $dir/$id
666}
667
668function test_destroy_osd() {
669 local dir=$1
670
671 setup $dir || return 1
672 run_mon $dir a || return 1
673 run_mgr $dir x || return 1
674 run_osd $dir 0 || return 1
675 destroy_osd $dir 0 || return 1
676 ! ceph osd dump | grep "osd.$id " || return 1
677 teardown $dir || return 1
678}
679
680#######################################################################
681
682##
683# Run (activate) an osd by the name osd.**id** with data in
684# **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
685# the pid file is **dir**/osd.**id**.pid and the admin socket is
686# **dir**/**id**/ceph-osd.**id**.asok.
687#
688# The remaining arguments are passed verbatim to ceph-osd.
689#
690# Two mandatory arguments must be provided: --fsid and --mon-host
691# Instead of adding them to every call to activate_osd, they can be
692# set in the CEPH_ARGS environment variable to be read implicitly
693# by every ceph command.
694#
695# The CEPH_CONF variable is expected to be set to /dev/null to
696# only rely on arguments for configuration.
697#
698# The activate_osd function expects a valid OSD data directory
699# in **dir**/**id**, either just created via run_osd or re-using
700# one left by a previous run of ceph-osd. The ceph-osd daemon is
701# run indirectly via ceph-disk activate.
702#
703# The activate_osd function blocks until the monitor reports the osd
704# up. If it fails to do so within $TIMEOUT seconds, activate_osd
705# fails.
706#
707# Examples:
708#
709# CEPH_ARGS="--fsid=$(uuidgen) "
710# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
711# activate_osd $dir 0 # activate an osd using the monitor listening on 7018
712#
713# @param dir path name of the environment
714# @param id osd identifier
715# @param ... can be any option valid for ceph-osd
716# @return 0 on success, 1 on error
717#
718function activate_osd() {
719 local dir=$1
720 shift
721 local id=$1
722 shift
723 local osd_data=$dir/$id
724
725 local ceph_disk_args
726 ceph_disk_args+=" --statedir=$dir"
727 ceph_disk_args+=" --sysconfdir=$dir"
728 ceph_disk_args+=" --prepend-to-path="
729
730 local ceph_args="$CEPH_ARGS"
7c673cae
FG
731 ceph_args+=" --osd-failsafe-full-ratio=.99"
732 ceph_args+=" --osd-journal-size=100"
733 ceph_args+=" --osd-scrub-load-threshold=2000"
734 ceph_args+=" --osd-data=$osd_data"
735 ceph_args+=" --chdir="
c07f9fc5 736 ceph_args+=$EXTRA_OPTS
7c673cae 737 ceph_args+=" --run-dir=$dir"
c07f9fc5 738 ceph_args+=" --admin-socket=$(get_asok_path)"
7c673cae
FG
739 ceph_args+=" --debug-osd=20"
740 ceph_args+=" --log-file=$dir/\$name.log"
741 ceph_args+=" --pid-file=$dir/\$name.pid"
742 ceph_args+=" --osd-max-object-name-len 460"
743 ceph_args+=" --osd-max-object-namespace-len 64"
224ce89b 744 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features *"
7c673cae
FG
745 ceph_args+=" "
746 ceph_args+="$@"
747 mkdir -p $osd_data
748 CEPH_ARGS="$ceph_args " ceph-disk $ceph_disk_args \
749 activate \
750 --mark-init=none \
751 $osd_data || return 1
752
753 [ "$id" = "$(cat $osd_data/whoami)" ] || return 1
754
755 wait_for_osd up $id || return 1
756}
757
758function test_activate_osd() {
759 local dir=$1
760
761 setup $dir || return 1
762
763 run_mon $dir a || return 1
764 run_mgr $dir x || return 1
765
766 run_osd $dir 0 || return 1
c07f9fc5 767 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae
FG
768 config get osd_max_backfills)
769 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
770
771 kill_daemons $dir TERM osd || return 1
772
773 activate_osd $dir 0 --osd-max-backfills 20 || return 1
c07f9fc5 774 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae
FG
775 config get osd_max_backfills)
776 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
777
778 teardown $dir || return 1
779}
780
781#######################################################################
782
783##
784# Wait until the OSD **id** is either up or down, as specified by
785# **state**. It fails after $TIMEOUT seconds.
786#
787# @param state either up or down
788# @param id osd identifier
789# @return 0 on success, 1 on error
790#
791function wait_for_osd() {
792 local state=$1
793 local id=$2
794
795 status=1
796 for ((i=0; i < $TIMEOUT; i++)); do
797 echo $i
798 if ! ceph osd dump | grep "osd.$id $state"; then
799 sleep 1
800 else
801 status=0
802 break
803 fi
804 done
805 return $status
806}
807
808function test_wait_for_osd() {
809 local dir=$1
810 setup $dir || return 1
811 run_mon $dir a --osd_pool_default_size=1 || return 1
812 run_mgr $dir x || return 1
813 run_osd $dir 0 || return 1
814 wait_for_osd up 0 || return 1
815 kill_daemons $dir TERM osd || return 1
816 wait_for_osd down 0 || return 1
817 ( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
818 teardown $dir || return 1
819}
820
821#######################################################################
822
823##
824# Display the list of OSD ids supporting the **objectname** stored in
825# **poolname**, as reported by ceph osd map.
826#
827# @param poolname an existing pool
828# @param objectname an objectname (may or may not exist)
829# @param STDOUT white space separated list of OSD ids
830# @return 0 on success, 1 on error
831#
832function get_osds() {
833 local poolname=$1
834 local objectname=$2
835
31f18b77
FG
836 local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
837 jq '.acting | .[]')
7c673cae
FG
838 # get rid of the trailing space
839 echo $osds
840}
841
842function test_get_osds() {
843 local dir=$1
844
845 setup $dir || return 1
846 run_mon $dir a --osd_pool_default_size=2 || return 1
847 run_mgr $dir x || return 1
848 run_osd $dir 0 || return 1
849 run_osd $dir 1 || return 1
c07f9fc5 850 create_rbd_pool || return 1
7c673cae 851 wait_for_clean || return 1
c07f9fc5 852 create_rbd_pool || return 1
7c673cae
FG
853 get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
854 teardown $dir || return 1
855}
856
857#######################################################################
858
859##
860# Wait for the monitor to form quorum (optionally, of size N)
861#
862# @param timeout duration (lower-bound) to wait for quorum to be formed
863# @param quorumsize size of quorum to wait for
864# @return 0 on success, 1 on error
865#
866function wait_for_quorum() {
867 local timeout=$1
868 local quorumsize=$2
869
870 if [[ -z "$timeout" ]]; then
871 timeout=300
872 fi
873
874 if [[ -z "$quorumsize" ]]; then
875 timeout $timeout ceph mon_status --format=json >&/dev/null || return 1
876 return 0
877 fi
878
879 no_quorum=1
c07f9fc5 880 wait_until=$((`date +%s` + $timeout))
7c673cae
FG
881 while [[ $(date +%s) -lt $wait_until ]]; do
882 jqfilter='.quorum | length == '$quorumsize
883 jqinput="$(timeout $timeout ceph mon_status --format=json 2>/dev/null)"
884 res=$(echo $jqinput | jq "$jqfilter")
885 if [[ "$res" == "true" ]]; then
886 no_quorum=0
887 break
888 fi
889 done
890 return $no_quorum
891}
892
893#######################################################################
894
895##
896# Return the PG of supporting the **objectname** stored in
897# **poolname**, as reported by ceph osd map.
898#
899# @param poolname an existing pool
900# @param objectname an objectname (may or may not exist)
901# @param STDOUT a PG
902# @return 0 on success, 1 on error
903#
904function get_pg() {
905 local poolname=$1
906 local objectname=$2
907
31f18b77 908 ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
7c673cae
FG
909}
910
911function test_get_pg() {
912 local dir=$1
913
914 setup $dir || return 1
915 run_mon $dir a --osd_pool_default_size=1 || return 1
916 run_mgr $dir x || return 1
917 run_osd $dir 0 || return 1
c07f9fc5 918 create_rbd_pool || return 1
7c673cae
FG
919 wait_for_clean || return 1
920 get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
921 teardown $dir || return 1
922}
923
924#######################################################################
925
926##
927# Return the value of the **config**, obtained via the config get command
928# of the admin socket of **daemon**.**id**.
929#
930# @param daemon mon or osd
931# @param id mon or osd ID
932# @param config the configuration variable name as found in config_opts.h
933# @param STDOUT the config value
934# @return 0 on success, 1 on error
935#
936function get_config() {
937 local daemon=$1
938 local id=$2
939 local config=$3
940
941 CEPH_ARGS='' \
c07f9fc5 942 ceph --format json daemon $(get_asok_path $daemon.$id) \
7c673cae 943 config get $config 2> /dev/null | \
31f18b77 944 jq -r ".$config"
7c673cae
FG
945}
946
947function test_get_config() {
948 local dir=$1
949
950 # override the default config using command line arg and check it
951 setup $dir || return 1
952 run_mon $dir a --osd_pool_default_size=1 || return 1
953 test $(get_config mon a osd_pool_default_size) = 1 || return 1
954 run_mgr $dir x || return 1
955 run_osd $dir 0 --osd_max_scrubs=3 || return 1
956 test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
957 teardown $dir || return 1
958}
959
960#######################################################################
961
962##
963# Set the **config** to specified **value**, via the config set command
964# of the admin socket of **daemon**.**id**
965#
966# @param daemon mon or osd
967# @param id mon or osd ID
968# @param config the configuration variable name as found in config_opts.h
969# @param value the config value
970# @return 0 on success, 1 on error
971#
972function set_config() {
973 local daemon=$1
974 local id=$2
975 local config=$3
976 local value=$4
977
c07f9fc5 978 test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
31f18b77
FG
979 config set $config $value 2> /dev/null | \
980 jq 'has("success")') == true
7c673cae
FG
981}
982
983function test_set_config() {
984 local dir=$1
985
986 setup $dir || return 1
987 run_mon $dir a --osd_pool_default_size=1 || return 1
988 test $(get_config mon a ms_crc_header) = true || return 1
989 set_config mon a ms_crc_header false || return 1
990 test $(get_config mon a ms_crc_header) = false || return 1
991 set_config mon a ms_crc_header true || return 1
992 test $(get_config mon a ms_crc_header) = true || return 1
993 teardown $dir || return 1
994}
995
996#######################################################################
997
998##
999# Return the OSD id of the primary OSD supporting the **objectname**
1000# stored in **poolname**, as reported by ceph osd map.
1001#
1002# @param poolname an existing pool
1003# @param objectname an objectname (may or may not exist)
1004# @param STDOUT the primary OSD id
1005# @return 0 on success, 1 on error
1006#
1007function get_primary() {
1008 local poolname=$1
1009 local objectname=$2
1010
31f18b77
FG
1011 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1012 jq '.acting_primary'
7c673cae
FG
1013}
1014
1015function test_get_primary() {
1016 local dir=$1
1017
1018 setup $dir || return 1
1019 run_mon $dir a --osd_pool_default_size=1 || return 1
1020 local osd=0
1021 run_mgr $dir x || return 1
1022 run_osd $dir $osd || return 1
c07f9fc5 1023 create_rbd_pool || return 1
7c673cae
FG
1024 wait_for_clean || return 1
1025 test $(get_primary rbd GROUP) = $osd || return 1
1026 teardown $dir || return 1
1027}
1028
1029#######################################################################
1030
1031##
1032# Return the id of any OSD supporting the **objectname** stored in
1033# **poolname**, as reported by ceph osd map, except the primary.
1034#
1035# @param poolname an existing pool
1036# @param objectname an objectname (may or may not exist)
1037# @param STDOUT the OSD id
1038# @return 0 on success, 1 on error
1039#
1040function get_not_primary() {
1041 local poolname=$1
1042 local objectname=$2
1043
1044 local primary=$(get_primary $poolname $objectname)
31f18b77
FG
1045 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1046 jq ".acting | map(select (. != $primary)) | .[0]"
7c673cae
FG
1047}
1048
1049function test_get_not_primary() {
1050 local dir=$1
1051
1052 setup $dir || return 1
1053 run_mon $dir a --osd_pool_default_size=2 || return 1
1054 run_mgr $dir x || return 1
1055 run_osd $dir 0 || return 1
1056 run_osd $dir 1 || return 1
c07f9fc5 1057 create_rbd_pool || return 1
7c673cae
FG
1058 wait_for_clean || return 1
1059 local primary=$(get_primary rbd GROUP)
1060 local not_primary=$(get_not_primary rbd GROUP)
1061 test $not_primary != $primary || return 1
1062 test $not_primary = 0 -o $not_primary = 1 || return 1
1063 teardown $dir || return 1
1064}
1065
1066#######################################################################
1067
1068##
1069# Run ceph-objectstore-tool against the OSD **id** using the data path
1070# **dir**. The OSD is killed with TERM prior to running
1071# ceph-objectstore-tool because access to the data path is
1072# exclusive. The OSD is restarted after the command completes. The
1073# objectstore_tool returns after all PG are active+clean again.
1074#
1075# @param dir the data path of the OSD
1076# @param id the OSD id
1077# @param ... arguments to ceph-objectstore-tool
1078# @param STDIN the input of ceph-objectstore-tool
1079# @param STDOUT the output of ceph-objectstore-tool
1080# @return 0 on success, 1 on error
1081#
1082# The value of $ceph_osd_args will be passed to restarted osds
1083#
1084function objectstore_tool() {
1085 local dir=$1
1086 shift
1087 local id=$1
1088 shift
1089 local osd_data=$dir/$id
1090
1091 local osd_type=$(cat $osd_data/type)
1092
1093 kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
1094
1095 local journal_args
1096 if [ "$objectstore_type" == "filestore" ]; then
1097 journal_args=" --journal-path $osd_data/journal"
1098 fi
1099 ceph-objectstore-tool \
7c673cae
FG
1100 --data-path $osd_data \
1101 $journal_args \
1102 "$@" || return 1
1103 activate_osd $dir $id $ceph_osd_args >&2 || return 1
1104 wait_for_clean >&2
1105}
1106
1107function test_objectstore_tool() {
1108 local dir=$1
1109
1110 setup $dir || return 1
1111 run_mon $dir a --osd_pool_default_size=1 || return 1
1112 local osd=0
1113 run_mgr $dir x || return 1
1114 run_osd $dir $osd || return 1
c07f9fc5 1115 create_rbd_pool || return 1
7c673cae
FG
1116 wait_for_clean || return 1
1117 rados --pool rbd put GROUP /etc/group || return 1
1118 objectstore_tool $dir $osd GROUP get-bytes | \
1119 diff - /etc/group
1120 ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
1121 teardown $dir || return 1
1122}
1123
1124#######################################################################
1125
1126##
1127# Predicate checking if there is an ongoing recovery in the
1128# cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1129# counters are reported by ceph status, it means recovery is in
1130# progress.
1131#
1132# @return 0 if recovery in progress, 1 otherwise
1133#
1134function get_is_making_recovery_progress() {
31f18b77
FG
1135 local recovery_progress
1136 recovery_progress+=".recovering_keys_per_sec + "
1137 recovery_progress+=".recovering_bytes_per_sec + "
1138 recovery_progress+=".recovering_objects_per_sec"
1139 local progress=$(ceph --format json status 2>/dev/null | \
1140 jq -r ".pgmap | $recovery_progress")
1141 test "$progress" != null
7c673cae
FG
1142}
1143
1144function test_get_is_making_recovery_progress() {
1145 local dir=$1
1146
1147 setup $dir || return 1
1148 run_mon $dir a || return 1
1149 run_mgr $dir x || return 1
1150 ! get_is_making_recovery_progress || return 1
1151 teardown $dir || return 1
1152}
1153
1154#######################################################################
1155
1156##
1157# Return the number of active PGs in the cluster. A PG is active if
1158# ceph pg dump pgs reports it both **active** and **clean** and that
1159# not **stale**.
1160#
1161# @param STDOUT the number of active PGs
1162# @return 0 on success, 1 on error
1163#
1164function get_num_active_clean() {
31f18b77
FG
1165 local expression
1166 expression+="select(contains(\"active\") and contains(\"clean\")) | "
1167 expression+="select(contains(\"stale\") | not)"
1168 ceph --format json pg dump pgs 2>/dev/null | \
1169 jq "[.[] | .state | $expression] | length"
7c673cae
FG
1170}
1171
1172function test_get_num_active_clean() {
1173 local dir=$1
1174
1175 setup $dir || return 1
1176 run_mon $dir a --osd_pool_default_size=1 || return 1
1177 run_mgr $dir x || return 1
1178 run_osd $dir 0 || return 1
c07f9fc5 1179 create_rbd_pool || return 1
7c673cae
FG
1180 wait_for_clean || return 1
1181 local num_active_clean=$(get_num_active_clean)
1182 test "$num_active_clean" = $PG_NUM || return 1
1183 teardown $dir || return 1
1184}
1185
1186#######################################################################
1187
1188##
1189# Return the number of PGs in the cluster, according to
1190# ceph pg dump pgs.
1191#
1192# @param STDOUT the number of PGs
1193# @return 0 on success, 1 on error
1194#
1195function get_num_pgs() {
31f18b77 1196 ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
7c673cae
FG
1197}
1198
1199function test_get_num_pgs() {
1200 local dir=$1
1201
1202 setup $dir || return 1
1203 run_mon $dir a --osd_pool_default_size=1 || return 1
1204 run_mgr $dir x || return 1
1205 run_osd $dir 0 || return 1
c07f9fc5 1206 create_rbd_pool || return 1
7c673cae
FG
1207 wait_for_clean || return 1
1208 local num_pgs=$(get_num_pgs)
1209 test "$num_pgs" -gt 0 || return 1
1210 teardown $dir || return 1
1211}
1212
1213#######################################################################
1214
c07f9fc5
FG
1215##
1216# Return the OSD ids in use by at least one PG in the cluster (either
1217# in the up or the acting set), according to ceph pg dump pgs. Every
1218# OSD id shows as many times as they are used in up and acting sets.
1219# If an OSD id is in both the up and acting set of a given PG, it will
1220# show twice.
1221#
1222# @param STDOUT a sorted list of OSD ids
1223# @return 0 on success, 1 on error
1224#
1225function get_osd_id_used_by_pgs() {
1226 ceph --format json pg dump pgs 2>/dev/null | jq '.[] | .up[], .acting[]' | sort
1227}
1228
1229function test_get_osd_id_used_by_pgs() {
1230 local dir=$1
1231
1232 setup $dir || return 1
1233 run_mon $dir a --osd_pool_default_size=1 || return 1
1234 run_mgr $dir x || return 1
1235 run_osd $dir 0 || return 1
1236 create_rbd_pool || return 1
1237 wait_for_clean || return 1
1238 local osd_ids=$(get_osd_id_used_by_pgs | uniq)
1239 test "$osd_ids" = "0" || return 1
1240 teardown $dir || return 1
1241}
1242
1243#######################################################################
1244
1245##
1246# Wait until the OSD **id** shows **count** times in the
1247# PGs (see get_osd_id_used_by_pgs for more information about
1248# how OSD ids are counted).
1249#
1250# @param id the OSD id
1251# @param count the number of time it must show in the PGs
1252# @return 0 on success, 1 on error
1253#
1254function wait_osd_id_used_by_pgs() {
1255 local id=$1
1256 local count=$2
1257
1258 status=1
1259 for ((i=0; i < $TIMEOUT / 5; i++)); do
1260 echo $i
1261 if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then
1262 sleep 5
1263 else
1264 status=0
1265 break
1266 fi
1267 done
1268 return $status
1269}
1270
1271function test_wait_osd_id_used_by_pgs() {
1272 local dir=$1
1273
1274 setup $dir || return 1
1275 run_mon $dir a --osd_pool_default_size=1 || return 1
1276 run_mgr $dir x || return 1
1277 run_osd $dir 0 || return 1
1278 create_rbd_pool || return 1
1279 wait_for_clean || return 1
1280 wait_osd_id_used_by_pgs 0 8 || return 1
1281 ! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1
1282 teardown $dir || return 1
1283}
1284
1285#######################################################################
1286
7c673cae
FG
1287##
1288# Return the date and time of the last completed scrub for **pgid**,
1289# as reported by ceph pg dump pgs. Note that a repair also sets this
1290# date.
1291#
1292# @param pgid the id of the PG
1293# @param STDOUT the date and time of the last scrub
1294# @return 0 on success, 1 on error
1295#
1296function get_last_scrub_stamp() {
1297 local pgid=$1
1298 local sname=${2:-last_scrub_stamp}
31f18b77
FG
1299 ceph --format json pg dump pgs 2>/dev/null | \
1300 jq -r ".[] | select(.pgid==\"$pgid\") | .$sname"
7c673cae
FG
1301}
1302
1303function test_get_last_scrub_stamp() {
1304 local dir=$1
1305
1306 setup $dir || return 1
1307 run_mon $dir a --osd_pool_default_size=1 || return 1
1308 run_mgr $dir x || return 1
1309 run_osd $dir 0 || return 1
c07f9fc5 1310 create_rbd_pool || return 1
7c673cae 1311 wait_for_clean || return 1
b5b8bbf5 1312 stamp=$(get_last_scrub_stamp 1.0)
7c673cae
FG
1313 test -n "$stamp" || return 1
1314 teardown $dir || return 1
1315}
1316
1317#######################################################################
1318
1319##
1320# Predicate checking if the cluster is clean, i.e. all of its PGs are
1321# in a clean state (see get_num_active_clean for a definition).
1322#
1323# @return 0 if the cluster is clean, 1 otherwise
1324#
1325function is_clean() {
1326 num_pgs=$(get_num_pgs)
1327 test $num_pgs != 0 || return 1
1328 test $(get_num_active_clean) = $num_pgs || return 1
1329}
1330
1331function test_is_clean() {
1332 local dir=$1
1333
1334 setup $dir || return 1
1335 run_mon $dir a --osd_pool_default_size=1 || return 1
1336 run_mgr $dir x || return 1
1337 run_osd $dir 0 || return 1
c07f9fc5 1338 create_rbd_pool || return 1
7c673cae
FG
1339 wait_for_clean || return 1
1340 is_clean || return 1
1341 teardown $dir || return 1
1342}
1343
1344#######################################################################
1345
94b18763
FG
1346calc() { awk "BEGIN{print $*}"; }
1347
7c673cae
FG
1348##
1349# Return a list of numbers that are increasingly larger and whose
1350# total is **timeout** seconds. It can be used to have short sleep
1351# delay while waiting for an event on a fast machine. But if running
1352# very slowly the larger delays avoid stressing the machine even
1353# further or spamming the logs.
1354#
1355# @param timeout sum of all delays, in seconds
1356# @return a list of sleep delays
1357#
1358function get_timeout_delays() {
1359 local trace=$(shopt -q -o xtrace && echo true || echo false)
1360 $trace && shopt -u -o xtrace
1361 local timeout=$1
1362 local first_step=${2:-1}
1363
1364 local i
1365 local total="0"
1366 i=$first_step
94b18763
FG
1367 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1368 echo -n "$(calc $i) "
1369 total=$(calc $total + $i)
1370 i=$(calc $i \* 2)
7c673cae 1371 done
94b18763
FG
1372 if test "$(calc $total \< $timeout)" = "1"; then
1373 echo -n "$(calc $timeout - $total) "
7c673cae
FG
1374 fi
1375 $trace && shopt -s -o xtrace
1376}
1377
1378function test_get_timeout_delays() {
1379 test "$(get_timeout_delays 1)" = "1 " || return 1
94b18763
FG
1380 test "$(get_timeout_delays 5)" = "1 2 2 " || return 1
1381 test "$(get_timeout_delays 6)" = "1 2 3 " || return 1
7c673cae 1382 test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
94b18763
FG
1383 test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1
1384 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1
1385 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1
1386 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1
1387 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1
1388 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1
1389 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1
7c673cae
FG
1390}
1391
1392#######################################################################
1393
1394##
1395# Wait until the cluster becomes clean or if it does not make progress
1396# for $TIMEOUT seconds.
1397# Progress is measured either via the **get_is_making_recovery_progress**
1398# predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1399#
1400# @return 0 if the cluster is clean, 1 otherwise
1401#
1402function wait_for_clean() {
1403 local num_active_clean=-1
1404 local cur_active_clean
1405 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1406 local -i loop=0
31f18b77 1407
3a9019d9 1408 flush_pg_stats || return 1
31f18b77
FG
1409 while test $(get_num_pgs) == 0 ; do
1410 sleep 1
1411 done
7c673cae
FG
1412
1413 while true ; do
1414 # Comparing get_num_active_clean & get_num_pgs is used to determine
1415 # if the cluster is clean. That's almost an inline of is_clean() to
1416 # get more performance by avoiding multiple calls of get_num_active_clean.
1417 cur_active_clean=$(get_num_active_clean)
1418 test $cur_active_clean = $(get_num_pgs) && break
1419 if test $cur_active_clean != $num_active_clean ; then
1420 loop=0
1421 num_active_clean=$cur_active_clean
1422 elif get_is_making_recovery_progress ; then
1423 loop=0
1424 elif (( $loop >= ${#delays[*]} )) ; then
1425 ceph report
1426 return 1
1427 fi
1428 sleep ${delays[$loop]}
1429 loop+=1
1430 done
1431 return 0
1432}
1433
1434function test_wait_for_clean() {
1435 local dir=$1
1436
1437 setup $dir || return 1
1438 run_mon $dir a --osd_pool_default_size=1 || return 1
1439 run_mgr $dir x || return 1
c07f9fc5 1440 create_rbd_pool || return 1
7c673cae
FG
1441 ! TIMEOUT=1 wait_for_clean || return 1
1442 run_osd $dir 0 || return 1
1443 wait_for_clean || return 1
1444 teardown $dir || return 1
1445}
1446
1447#######################################################################
1448
1449##
1450# Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1451# for $TIMEOUT seconds.
1452#
1453# @return 0 if the cluster is HEALTHY, 1 otherwise
1454#
1455function wait_for_health() {
1456 local grepstr=$1
1457 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1458 local -i loop=0
1459
1460 while ! ceph health detail | grep "$grepstr" ; do
1461 if (( $loop >= ${#delays[*]} )) ; then
1462 ceph health detail
1463 return 1
1464 fi
1465 sleep ${delays[$loop]}
1466 loop+=1
1467 done
1468}
1469
1470function wait_for_health_ok() {
1471 wait_for_health "HEALTH_OK" || return 1
1472}
1473
1474function test_wait_for_health_ok() {
1475 local dir=$1
1476
1477 setup $dir || return 1
1478 run_mon $dir a --osd_pool_default_size=1 --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
31f18b77 1479 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
7c673cae 1480 run_osd $dir 0 || return 1
224ce89b
WB
1481 kill_daemons $dir TERM osd || return 1
1482 ! TIMEOUT=1 wait_for_health_ok || return 1
1483 activate_osd $dir 0 || return 1
7c673cae
FG
1484 wait_for_health_ok || return 1
1485 teardown $dir || return 1
1486}
1487
1488
1489#######################################################################
1490
1491##
1492# Run repair on **pgid** and wait until it completes. The repair
1493# function will fail if repair does not complete within $TIMEOUT
1494# seconds.
1495#
1496# @param pgid the id of the PG
1497# @return 0 on success, 1 on error
1498#
1499function repair() {
1500 local pgid=$1
1501 local last_scrub=$(get_last_scrub_stamp $pgid)
1502 ceph pg repair $pgid
1503 wait_for_scrub $pgid "$last_scrub"
1504}
1505
1506function test_repair() {
1507 local dir=$1
1508
1509 setup $dir || return 1
1510 run_mon $dir a --osd_pool_default_size=1 || return 1
1511 run_mgr $dir x || return 1
1512 run_osd $dir 0 || return 1
c07f9fc5 1513 create_rbd_pool || return 1
7c673cae 1514 wait_for_clean || return 1
b5b8bbf5 1515 repair 1.0 || return 1
7c673cae 1516 kill_daemons $dir KILL osd || return 1
b5b8bbf5 1517 ! TIMEOUT=1 repair 1.0 || return 1
7c673cae
FG
1518 teardown $dir || return 1
1519}
1520#######################################################################
1521
1522##
1523# Run scrub on **pgid** and wait until it completes. The pg_scrub
1524# function will fail if repair does not complete within $TIMEOUT
1525# seconds. The pg_scrub is complete whenever the
1526# **get_last_scrub_stamp** function reports a timestamp different from
1527# the one stored before starting the scrub.
1528#
1529# @param pgid the id of the PG
1530# @return 0 on success, 1 on error
1531#
1532function pg_scrub() {
1533 local pgid=$1
1534 local last_scrub=$(get_last_scrub_stamp $pgid)
1535 ceph pg scrub $pgid
1536 wait_for_scrub $pgid "$last_scrub"
1537}
1538
1539function pg_deep_scrub() {
1540 local pgid=$1
1541 local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
1542 ceph pg deep-scrub $pgid
1543 wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
1544}
1545
1546function test_pg_scrub() {
1547 local dir=$1
1548
1549 setup $dir || return 1
1550 run_mon $dir a --osd_pool_default_size=1 || return 1
1551 run_mgr $dir x || return 1
1552 run_osd $dir 0 || return 1
c07f9fc5 1553 create_rbd_pool || return 1
7c673cae 1554 wait_for_clean || return 1
b5b8bbf5 1555 pg_scrub 1.0 || return 1
7c673cae 1556 kill_daemons $dir KILL osd || return 1
b5b8bbf5 1557 ! TIMEOUT=1 pg_scrub 1.0 || return 1
7c673cae
FG
1558 teardown $dir || return 1
1559}
1560
1561#######################################################################
1562
1563##
1564# Run the *command* and expect it to fail (i.e. return a non zero status).
1565# The output (stderr and stdout) is stored in a temporary file in *dir*
1566# and is expected to contain the string *expected*.
1567#
1568# Return 0 if the command failed and the string was found. Otherwise
1569# return 1 and cat the full output of the command on stderr for debug.
1570#
1571# @param dir temporary directory to store the output
1572# @param expected string to look for in the output
1573# @param command ... the command and its arguments
1574# @return 0 on success, 1 on error
1575#
1576
1577function expect_failure() {
1578 local dir=$1
1579 shift
1580 local expected="$1"
1581 shift
1582 local success
1583
1584 if "$@" > $dir/out 2>&1 ; then
1585 success=true
1586 else
1587 success=false
1588 fi
1589
1590 if $success || ! grep --quiet "$expected" $dir/out ; then
1591 cat $dir/out >&2
1592 return 1
1593 else
1594 return 0
1595 fi
1596}
1597
1598function test_expect_failure() {
1599 local dir=$1
1600
1601 setup $dir || return 1
1602 expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
1603 # the command did not fail
1604 ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
1605 grep --quiet FAIL $dir/out || return 1
1606 # the command failed but the output does not contain the expected string
1607 ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
1608 ! grep --quiet FAIL $dir/out || return 1
1609 teardown $dir || return 1
1610}
1611
1612#######################################################################
1613
1614##
1615# Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1616# will fail if scrub does not complete within $TIMEOUT seconds. The
1617# repair is complete whenever the **get_last_scrub_stamp** function
1618# reports a timestamp different from the one given in argument.
1619#
1620# @param pgid the id of the PG
1621# @param last_scrub timestamp of the last scrub for *pgid*
1622# @return 0 on success, 1 on error
1623#
1624function wait_for_scrub() {
1625 local pgid=$1
1626 local last_scrub="$2"
1627 local sname=${3:-last_scrub_stamp}
1628
1629 for ((i=0; i < $TIMEOUT; i++)); do
b5b8bbf5 1630 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
7c673cae
FG
1631 return 0
1632 fi
1633 sleep 1
1634 done
1635 return 1
1636}
1637
1638function test_wait_for_scrub() {
1639 local dir=$1
1640
1641 setup $dir || return 1
1642 run_mon $dir a --osd_pool_default_size=1 || return 1
1643 run_mgr $dir x || return 1
1644 run_osd $dir 0 || return 1
c07f9fc5 1645 create_rbd_pool || return 1
7c673cae 1646 wait_for_clean || return 1
b5b8bbf5 1647 local pgid=1.0
7c673cae
FG
1648 ceph pg repair $pgid
1649 local last_scrub=$(get_last_scrub_stamp $pgid)
1650 wait_for_scrub $pgid "$last_scrub" || return 1
1651 kill_daemons $dir KILL osd || return 1
1652 last_scrub=$(get_last_scrub_stamp $pgid)
1653 ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
1654 teardown $dir || return 1
1655}
1656
1657#######################################################################
1658
1659##
1660# Return 0 if the erasure code *plugin* is available, 1 otherwise.
1661#
1662# @param plugin erasure code plugin
1663# @return 0 on success, 1 on error
1664#
1665
1666function erasure_code_plugin_exists() {
1667 local plugin=$1
1668 local status
1669 local grepstr
1670 local s
1671 case `uname` in
1672 FreeBSD) grepstr="Cannot open.*$plugin" ;;
1673 *) grepstr="$plugin.*No such file" ;;
1674 esac
1675
1676 s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
1677 local status=$?
1678 if [ $status -eq 0 ]; then
1679 ceph osd erasure-code-profile rm TESTPROFILE
1680 elif ! echo $s | grep --quiet "$grepstr" ; then
1681 status=1
1682 # display why the string was rejected.
1683 echo $s
1684 fi
1685 return $status
1686}
1687
1688function test_erasure_code_plugin_exists() {
1689 local dir=$1
1690
1691 setup $dir || return 1
1692 run_mon $dir a || return 1
1693 run_mgr $dir x || return 1
1694 erasure_code_plugin_exists jerasure || return 1
1695 ! erasure_code_plugin_exists FAKE || return 1
1696 teardown $dir || return 1
1697}
1698
1699#######################################################################
1700
1701##
1702# Display all log files from **dir** on stdout.
1703#
1704# @param dir directory in which all data is stored
1705#
1706
1707function display_logs() {
1708 local dir=$1
1709
1710 find $dir -maxdepth 1 -name '*.log' | \
1711 while read file ; do
1712 echo "======================= $file"
1713 cat $file
1714 done
1715}
1716
1717function test_display_logs() {
1718 local dir=$1
1719
1720 setup $dir || return 1
1721 run_mon $dir a || return 1
1722 kill_daemons $dir || return 1
1723 display_logs $dir > $dir/log.out
1724 grep --quiet mon.a.log $dir/log.out || return 1
1725 teardown $dir || return 1
1726}
1727
1728#######################################################################
1729##
1730# Spawn a command in background and save the pid in the variable name
1731# passed in argument. To make the output reading easier, the output is
1732# prepend with the process id.
1733#
1734# Example:
1735# pids1=""
1736# run_in_background pids1 bash -c 'sleep 1; exit 1'
1737#
1738# @param pid_variable the variable name (not value) where the pids will be stored
1739# @param ... the command to execute
1740# @return only the pid_variable output should be considered and used with **wait_background**
1741#
1742function run_in_background() {
1743 local pid_variable=$1
94b18763 1744 shift
7c673cae
FG
1745 # Execute the command and prepend the output with its pid
1746 # We enforce to return the exit status of the command and not the awk one.
94b18763 1747 ("$@" |& sed 's/^/'$$': /'; return "${PIPESTATUS[0]}") >&2 &
7c673cae
FG
1748 eval "$pid_variable+=\" $!\""
1749}
1750
94b18763
FG
1751function save_stdout {
1752 local out="$1"
1753 shift
1754 "$@" > "$out"
1755}
1756
7c673cae
FG
1757function test_run_in_background() {
1758 local pids
1759 run_in_background pids sleep 1
1760 run_in_background pids sleep 1
1761 test $(echo $pids | wc -w) = 2 || return 1
1762 wait $pids || return 1
1763}
1764
1765#######################################################################
1766##
1767# Wait for pids running in background to complete.
1768# This function is usually used after a **run_in_background** call
1769# Example:
1770# pids1=""
1771# run_in_background pids1 bash -c 'sleep 1; exit 1'
1772# wait_background pids1
1773#
1774# @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
1775# @return returns 1 if at least one process exits in error unless returns 0
1776#
1777function wait_background() {
1778 # We extract the PIDS from the variable name
1779 pids=${!1}
1780
1781 return_code=0
1782 for pid in $pids; do
1783 if ! wait $pid; then
1784 # If one process failed then return 1
1785 return_code=1
1786 fi
1787 done
1788
1789 # We empty the variable reporting that all process ended
1790 eval "$1=''"
1791
1792 return $return_code
1793}
1794
1795
1796function test_wait_background() {
1797 local pids=""
1798 run_in_background pids bash -c "sleep 1; exit 1"
1799 run_in_background pids bash -c "sleep 2; exit 0"
1800 wait_background pids
1801 if [ $? -ne 1 ]; then return 1; fi
1802
1803 run_in_background pids bash -c "sleep 1; exit 0"
1804 run_in_background pids bash -c "sleep 2; exit 0"
1805 wait_background pids
1806 if [ $? -ne 0 ]; then return 1; fi
1807
1808 if [ ! -z "$pids" ]; then return 1; fi
1809}
1810
31f18b77
FG
1811function flush_pg_stats()
1812{
1813 local timeout=${1:-$TIMEOUT}
1814
1815 ids=`ceph osd ls`
1816 seqs=''
1817 for osd in $ids; do
1818 seq=`ceph tell osd.$osd flush_pg_stats`
1819 seqs="$seqs $osd-$seq"
1820 done
1821
1822 for s in $seqs; do
1823 osd=`echo $s | cut -d - -f 1`
1824 seq=`echo $s | cut -d - -f 2`
1825 echo "waiting osd.$osd seq $seq"
1826 while test $(ceph osd last-stat-seq $osd) -lt $seq; do
1827 sleep 1
1828 if [ $((timeout--)) -eq 0 ]; then
1829 return 1
1830 fi
1831 done
1832 done
1833}
1834
1835function test_flush_pg_stats()
1836{
1837 local dir=$1
1838
1839 setup $dir || return 1
1840 run_mon $dir a --osd_pool_default_size=1 || return 1
1841 run_mgr $dir x || return 1
1842 run_osd $dir 0 || return 1
c07f9fc5 1843 create_rbd_pool || return 1
31f18b77 1844 rados -p rbd put obj /etc/group
3a9019d9 1845 flush_pg_stats || return 1
31f18b77
FG
1846 local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
1847 raw_bytes_used=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
1848 bytes_used=`ceph df detail --format=json | jq "$jq_filter.bytes_used"`
1849 test $raw_bytes_used > 0 || return 1
1850 test $raw_bytes_used == $bytes_used || return 1
b5b8bbf5 1851 teardown $dir
31f18b77
FG
1852}
1853
7c673cae
FG
1854#######################################################################
1855
1856##
1857# Call the **run** function (which must be defined by the caller) with
1858# the **dir** argument followed by the caller argument list.
1859#
1860# If the **run** function returns on error, all logs found in **dir**
1861# are displayed for diagnostic purposes.
1862#
1863# **teardown** function is called when the **run** function returns
1864# (on success or on error), to cleanup leftovers. The CEPH_CONF is set
1865# to /dev/null and CEPH_ARGS is unset so that the tests are protected from
1866# external interferences.
1867#
1868# It is the responsibility of the **run** function to call the
1869# **setup** function to prepare the test environment (create a temporary
1870# directory etc.).
1871#
1872# The shell is required (via PS4) to display the function and line
1873# number whenever a statement is executed to help debugging.
1874#
1875# @param dir directory in which all data is stored
1876# @param ... arguments passed transparently to **run**
1877# @return 0 on success, 1 on error
1878#
1879function main() {
1880 local dir=td/$1
1881 shift
1882
1883 shopt -s -o xtrace
1884 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1885
1886 export PATH=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv/bin:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv/bin:.:$PATH # make sure program from sources are preferred
1887 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1888
1889 export CEPH_CONF=/dev/null
1890 unset CEPH_ARGS
1891
1892 local code
1893 if run $dir "$@" ; then
1894 code=0
1895 else
7c673cae
FG
1896 code=1
1897 fi
b5b8bbf5 1898 teardown $dir $code || return 1
7c673cae
FG
1899 return $code
1900}
1901
1902#######################################################################
1903
1904function run_tests() {
1905 shopt -s -o xtrace
1906 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
1907
1908 export PATH=${CEPH_BUILD_VIRTUALENV}/ceph-disk-virtualenv/bin:${CEPH_BUILD_VIRTUALENV}/ceph-detect-init-virtualenv/bin:.:$PATH # make sure program from sources are preferred
1909 #export PATH=$CEPH_ROOT/src/ceph-disk/virtualenv/bin:$CEPH_ROOT/src/ceph-detect-init/virtualenv/bin:.:$PATH # make sure program from sources are preferred
1910
1911 export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
1912 export CEPH_ARGS
b5b8bbf5 1913 CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
7c673cae
FG
1914 CEPH_ARGS+="--mon-host=$CEPH_MON "
1915 export CEPH_CONF=/dev/null
1916
1917 local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
1918 local dir=td/ceph-helpers
1919
1920 for func in $funcs ; do
b5b8bbf5
FG
1921 if ! $func $dir; then
1922 teardown $dir 1
1923 return 1
1924 fi
7c673cae
FG
1925 done
1926}
1927
1928if test "$1" = TESTS ; then
1929 shift
1930 run_tests "$@"
b5b8bbf5 1931 exit $?
7c673cae
FG
1932fi
1933
224ce89b
WB
1934# NOTE:
1935# jq only support --exit-status|-e from version 1.4 forwards, which makes
1936# returning on error waaaay prettier and straightforward.
1937# However, the current automated upstream build is running with v1.3,
1938# which has no idea what -e is. Hence the convoluted error checking we
1939# need. Sad.
1940# The next time someone changes this code, please check if v1.4 is now
1941# a thing, and, if so, please change these to use -e. Thanks.
1942
1943# jq '.all.supported | select([.[] == "foo"] | any)'
1944function jq_success() {
1945 input="$1"
1946 filter="$2"
1947 expects="\"$3\""
1948
1949 in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g")
1950 filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g")
1951
1952 ret=$(echo "$in_escaped" | jq "$filter_escaped")
1953 if [[ "$ret" == "true" ]]; then
1954 return 0
1955 elif [[ -n "$expects" ]]; then
1956 if [[ "$ret" == "$expects" ]]; then
1957 return 0
1958 fi
1959 fi
1960 return 1
1961 input=$1
1962 filter=$2
1963 expects="$3"
1964
1965 ret="$(echo $input | jq \"$filter\")"
1966 if [[ "$ret" == "true" ]]; then
1967 return 0
1968 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
1969 return 0
1970 fi
1971 return 1
1972}
1973
b5b8bbf5
FG
1974function inject_eio() {
1975 local pooltype=$1
1976 shift
1977 local which=$1
1978 shift
1979 local poolname=$1
1980 shift
1981 local objname=$1
1982 shift
1983 local dir=$1
1984 shift
1985 local shard_id=$1
1986 shift
1987
1988 local -a initial_osds=($(get_osds $poolname $objname))
1989 local osd_id=${initial_osds[$shard_id]}
1990 if [ "$pooltype" != "ec" ]; then
1991 shard_id=""
1992 fi
1993 set_config osd $osd_id filestore_debug_inject_read_err true || return 1
1994 local loop=0
1995 while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
1996 inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
1997 loop=$(expr $loop + 1)
1998 if [ $loop = "10" ]; then
1999 return 1
2000 fi
2001 sleep 1
2002 done
2003}
2004
7c673cae 2005# Local Variables:
c07f9fc5 2006# compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
7c673cae 2007# End: