]> git.proxmox.com Git - ceph.git/blame - ceph/qa/standalone/ceph-helpers.sh
update download target update for octopus release
[ceph.git] / ceph / qa / standalone / ceph-helpers.sh
CommitLineData
11fdf7f2 1#!/usr/bin/env bash
7c673cae
FG
2#
3# Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4# Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5# Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
6#
7# Author: Loic Dachary <loic@dachary.org>
8# Author: Federico Gimenez <fgimenez@coit.es>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU Library Public License as published by
12# the Free Software Foundation; either version 2, or (at your option)
13# any later version.
14#
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18# GNU Library Public License for more details.
19#
20TIMEOUT=300
11fdf7f2
TL
21WAIT_FOR_CLEAN_TIMEOUT=90
22MAX_TIMEOUT=15
7c673cae 23PG_NUM=4
11fdf7f2 24TMPDIR=${TMPDIR:-/tmp}
f64942e4
AA
25CEPH_BUILD_VIRTUALENV=${TMPDIR}
26TESTDIR=${TESTDIR:-${TMPDIR}}
7c673cae
FG
27
28if type xmlstarlet > /dev/null 2>&1; then
29 XMLSTARLET=xmlstarlet
30elif type xml > /dev/null 2>&1; then
31 XMLSTARLET=xml
32else
33 echo "Missing xmlstarlet binary!"
34 exit 1
35fi
31f18b77 36
7c673cae
FG
37if [ `uname` = FreeBSD ]; then
38 SED=gsed
f64942e4 39 AWK=gawk
31f18b77 40 DIFFCOLOPTS=""
b5b8bbf5 41 KERNCORE="kern.corefile"
7c673cae
FG
42else
43 SED=sed
f64942e4 44 AWK=awk
31f18b77 45 termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
c07f9fc5
FG
46 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
47 termwidth="-W ${termwidth}"
31f18b77
FG
48 fi
49 DIFFCOLOPTS="-y $termwidth"
b5b8bbf5 50 KERNCORE="kernel.core_pattern"
31f18b77 51fi
7c673cae 52
c07f9fc5 53EXTRA_OPTS=""
c07f9fc5 54
7c673cae
FG
55#! @file ceph-helpers.sh
56# @brief Toolbox to manage Ceph cluster dedicated to testing
57#
58# Example use case:
59#
60# ~~~~~~~~~~~~~~~~{.sh}
61# source ceph-helpers.sh
62#
63# function mytest() {
64# # cleanup leftovers and reset mydir
65# setup mydir
66# # create a cluster with one monitor and three osds
67# run_mon mydir a
68# run_osd mydir 0
69# run_osd mydir 2
70# run_osd mydir 3
71# # put and get an object
72# rados --pool rbd put GROUP /etc/group
73# rados --pool rbd get GROUP /tmp/GROUP
74# # stop the cluster and cleanup the directory
75# teardown mydir
76# }
77# ~~~~~~~~~~~~~~~~
78#
79# The focus is on simplicity and efficiency, in the context of
80# functional tests. The output is intentionally very verbose
81# and functions return as soon as an error is found. The caller
82# is also expected to abort on the first error so that debugging
83# can be done by looking at the end of the output.
84#
85# Each function is documented, implemented and tested independently.
86# When modifying a helper, the test and the documentation are
87# expected to be updated and it is easier of they are collocated. A
88# test for a given function can be run with
89#
90# ~~~~~~~~~~~~~~~~{.sh}
91# ceph-helpers.sh TESTS test_get_osds
92# ~~~~~~~~~~~~~~~~
93#
94# and all the tests (i.e. all functions matching test_*) are run
95# with:
96#
97# ~~~~~~~~~~~~~~~~{.sh}
98# ceph-helpers.sh TESTS
99# ~~~~~~~~~~~~~~~~
100#
101# A test function takes a single argument : the directory dedicated
102# to the tests. It is expected to not create any file outside of this
103# directory and remove it entirely when it completes successfully.
104#
105
106
c07f9fc5
FG
107function get_asok_dir() {
108 if [ -n "$CEPH_ASOK_DIR" ]; then
109 echo "$CEPH_ASOK_DIR"
110 else
111 echo ${TMPDIR:-/tmp}/ceph-asok.$$
112 fi
113}
114
115function get_asok_path() {
116 local name=$1
117 if [ -n "$name" ]; then
118 echo $(get_asok_dir)/ceph-$name.asok
119 else
120 echo $(get_asok_dir)/\$cluster-\$name.asok
121 fi
122}
7c673cae
FG
123##
124# Cleanup any leftovers found in **dir** via **teardown**
125# and reset **dir** as an empty environment.
126#
127# @param dir path name of the environment
128# @return 0 on success, 1 on error
129#
130function setup() {
131 local dir=$1
132 teardown $dir || return 1
133 mkdir -p $dir
c07f9fc5 134 mkdir -p $(get_asok_dir)
11fdf7f2
TL
135 if [ $(ulimit -n) -le 1024 ]; then
136 ulimit -n 4096 || return 1
137 fi
138 if [ -z "$LOCALRUN" ]; then
139 trap "teardown $dir 1" TERM HUP INT
140 fi
7c673cae
FG
141}
142
143function test_setup() {
144 local dir=$dir
145 setup $dir || return 1
146 test -d $dir || return 1
147 setup $dir || return 1
148 test -d $dir || return 1
149 teardown $dir
150}
151
152#######################################################################
153
154##
155# Kill all daemons for which a .pid file exists in **dir** and remove
156# **dir**. If the file system in which **dir** is btrfs, delete all
157# subvolumes that relate to it.
158#
159# @param dir path name of the environment
1adf2230 160# @param dumplogs pass "1" to dump logs otherwise it will only if cores found
7c673cae
FG
161# @return 0 on success, 1 on error
162#
163function teardown() {
164 local dir=$1
b5b8bbf5 165 local dumplogs=$2
7c673cae
FG
166 kill_daemons $dir KILL
167 if [ `uname` != FreeBSD ] \
168 && [ $(stat -f -c '%T' .) == "btrfs" ]; then
169 __teardown_btrfs $dir
170 fi
b5b8bbf5
FG
171 local cores="no"
172 local pattern="$(sysctl -n $KERNCORE)"
173 # See if we have apport core handling
174 if [ "${pattern:0:1}" = "|" ]; then
175 # TODO: Where can we get the dumps?
176 # Not sure where the dumps really are so this will look in the CWD
177 pattern=""
178 fi
179 # Local we start with core and teuthology ends with core
1adf2230 180 if ls $(dirname "$pattern") | grep -q '^core\|core$' ; then
b5b8bbf5
FG
181 cores="yes"
182 if [ -n "$LOCALRUN" ]; then
183 mkdir /tmp/cores.$$ 2> /dev/null || true
184 for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
185 mv $i /tmp/cores.$$
186 done
187 fi
188 fi
189 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
1adf2230
AA
190 if [ -n "$LOCALRUN" ]; then
191 display_logs $dir
192 else
193 # Move logs to where Teuthology will archive it
194 mkdir -p $TESTDIR/archive/log
195 mv $dir/*.log $TESTDIR/archive/log
196 fi
b5b8bbf5 197 fi
7c673cae 198 rm -fr $dir
c07f9fc5 199 rm -rf $(get_asok_dir)
b5b8bbf5
FG
200 if [ "$cores" = "yes" ]; then
201 echo "ERROR: Failure due to cores found"
202 if [ -n "$LOCALRUN" ]; then
203 echo "Find saved core files in /tmp/cores.$$"
204 fi
205 return 1
206 fi
207 return 0
7c673cae
FG
208}
209
210function __teardown_btrfs() {
211 local btrfs_base_dir=$1
f64942e4
AA
212 local btrfs_root=$(df -P . | tail -1 | $AWK '{print $NF}')
213 local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list -t . | $AWK '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
7c673cae
FG
214 for subvolume in $btrfs_dirs; do
215 sudo btrfs subvolume delete $btrfs_root/$subvolume
216 done
217}
218
219function test_teardown() {
220 local dir=$dir
221 setup $dir || return 1
222 teardown $dir || return 1
223 ! test -d $dir || return 1
224}
225
226#######################################################################
227
228##
229# Sends a signal to a single daemon.
230# This is a helper function for kill_daemons
231#
232# After the daemon is sent **signal**, its actual termination
233# will be verified by sending it signal 0. If the daemon is
234# still alive, kill_daemon will pause for a few seconds and
235# try again. This will repeat for a fixed number of times
236# before kill_daemon returns on failure. The list of
237# sleep intervals can be specified as **delays** and defaults
238# to:
239#
240# 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
241#
242# This sequence is designed to run first a very short sleep time (0.1)
243# if the machine is fast enough and the daemon terminates in a fraction of a
244# second. The increasing sleep numbers should give plenty of time for
245# the daemon to die even on the slowest running machine. If a daemon
246# takes more than a few minutes to stop (the sum of all sleep times),
247# there probably is no point in waiting more and a number of things
248# are likely to go wrong anyway: better give up and return on error.
249#
250# @param pid the process id to send a signal
251# @param send_signal the signal to send
252# @param delays sequence of sleep times before failure
253#
254function kill_daemon() {
7c673cae
FG
255 local pid=$(cat $1)
256 local send_signal=$2
257 local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
258 local exit_code=1
11fdf7f2
TL
259 # In order to try after the last large sleep add 0 at the end so we check
260 # one last time before dropping out of the loop
261 for try in $delays 0 ; do
7c673cae
FG
262 if kill -$send_signal $pid 2> /dev/null ; then
263 exit_code=1
264 else
265 exit_code=0
266 break
267 fi
268 send_signal=0
269 sleep $try
270 done;
271 return $exit_code
272}
273
274function test_kill_daemon() {
275 local dir=$1
276 setup $dir || return 1
277 run_mon $dir a --osd_pool_default_size=1 || return 1
278 run_mgr $dir x || return 1
279 run_osd $dir 0 || return 1
280
281 name_prefix=osd
282 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
283 #
284 # sending signal 0 won't kill the daemon
285 # waiting just for one second instead of the default schedule
286 # allows us to quickly verify what happens when kill fails
287 # to stop the daemon (i.e. it must return false)
288 #
289 ! kill_daemon $pidfile 0 1 || return 1
290 #
291 # killing just the osd and verify the mon still is responsive
292 #
293 kill_daemon $pidfile TERM || return 1
294 done
295
7c673cae
FG
296 name_prefix=mgr
297 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
298 #
299 # kill the mgr
300 #
301 kill_daemon $pidfile TERM || return 1
302 done
303
304 name_prefix=mon
305 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
306 #
307 # kill the mon and verify it cannot be reached
308 #
309 kill_daemon $pidfile TERM || return 1
224ce89b 310 ! timeout 5 ceph status || return 1
7c673cae
FG
311 done
312
313 teardown $dir || return 1
314}
315
316##
317# Kill all daemons for which a .pid file exists in **dir**. Each
318# daemon is sent a **signal** and kill_daemons waits for it to exit
319# during a few minutes. By default all daemons are killed. If a
320# **name_prefix** is provided, only the daemons for which a pid
321# file is found matching the prefix are killed. See run_osd and
322# run_mon for more information about the name conventions for
323# the pid files.
324#
325# Send TERM to all daemons : kill_daemons $dir
326# Send KILL to all daemons : kill_daemons $dir KILL
327# Send KILL to all osds : kill_daemons $dir KILL osd
328# Send KILL to osd 1 : kill_daemons $dir KILL osd.1
329#
330# If a daemon is sent the TERM signal and does not terminate
331# within a few minutes, it will still be running even after
c07f9fc5 332# kill_daemons returns.
7c673cae
FG
333#
334# If all daemons are kill successfully the function returns 0
c07f9fc5 335# if at least one daemon remains, this is treated as an
7c673cae
FG
336# error and the function return 1.
337#
338# @param dir path name of the environment
339# @param signal name of the first signal (defaults to TERM)
340# @param name_prefix only kill match daemons (defaults to all)
341# @param delays sequence of sleep times before failure
342# @return 0 on success, 1 on error
343#
344function kill_daemons() {
345 local trace=$(shopt -q -o xtrace && echo true || echo false)
346 $trace && shopt -u -o xtrace
347 local dir=$1
348 local signal=${2:-TERM}
349 local name_prefix=$3 # optional, osd, mon, osd.1
350 local delays=$4 #optional timing
351 local status=0
352 local pids=""
353
354 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
355 run_in_background pids kill_daemon $pidfile $signal $delays
356 done
357
358 wait_background pids
359 status=$?
360
361 $trace && shopt -s -o xtrace
362 return $status
363}
364
365function test_kill_daemons() {
366 local dir=$1
367 setup $dir || return 1
368 run_mon $dir a --osd_pool_default_size=1 || return 1
369 run_mgr $dir x || return 1
370 run_osd $dir 0 || return 1
371 #
372 # sending signal 0 won't kill the daemon
373 # waiting just for one second instead of the default schedule
c07f9fc5 374 # allows us to quickly verify what happens when kill fails
7c673cae
FG
375 # to stop the daemon (i.e. it must return false)
376 #
377 ! kill_daemons $dir 0 osd 1 || return 1
378 #
379 # killing just the osd and verify the mon still is responsive
380 #
381 kill_daemons $dir TERM osd || return 1
7c673cae
FG
382 #
383 # kill the mgr
384 #
385 kill_daemons $dir TERM mgr || return 1
386 #
387 # kill the mon and verify it cannot be reached
388 #
389 kill_daemons $dir TERM || return 1
224ce89b 390 ! timeout 5 ceph status || return 1
7c673cae
FG
391 teardown $dir || return 1
392}
393
a8e16298
TL
394#
395# return a random TCP port which is not used yet
396#
397# please note, there could be racing if we use this function for
398# a free port, and then try to bind on this port.
399#
400function get_unused_port() {
401 local ip=127.0.0.1
402 python3 -c "import socket; s=socket.socket(); s.bind(('$ip', 0)); print(s.getsockname()[1]); s.close()"
403}
404
7c673cae
FG
405#######################################################################
406
407##
408# Run a monitor by the name mon.**id** with data in **dir**/**id**.
409# The logs can be found in **dir**/mon.**id**.log and the pid file
410# is **dir**/mon.**id**.pid and the admin socket is
411# **dir**/**id**/ceph-mon.**id**.asok.
412#
413# The remaining arguments are passed verbatim to ceph-mon --mkfs
414# and the ceph-mon daemon.
415#
416# Two mandatory arguments must be provided: --fsid and --mon-host
417# Instead of adding them to every call to run_mon, they can be
418# set in the CEPH_ARGS environment variable to be read implicitly
419# by every ceph command.
420#
421# The CEPH_CONF variable is expected to be set to /dev/null to
422# only rely on arguments for configuration.
423#
424# Examples:
425#
426# CEPH_ARGS="--fsid=$(uuidgen) "
427# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
428# run_mon $dir a # spawn a mon and bind port 7018
429# run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
430#
431# If mon_initial_members is not set, the default rbd pool is deleted
432# and replaced with a replicated pool with less placement groups to
433# speed up initialization. If mon_initial_members is set, no attempt
434# is made to recreate the rbd pool because it would hang forever,
435# waiting for other mons to join.
436#
437# A **dir**/ceph.conf file is created but not meant to be used by any
438# function. It is convenient for debugging a failure with:
439#
440# ceph --conf **dir**/ceph.conf -s
441#
442# @param dir path name of the environment
443# @param id mon identifier
444# @param ... can be any option valid for ceph-mon
445# @return 0 on success, 1 on error
446#
c07f9fc5 447function run_mon() {
7c673cae
FG
448 local dir=$1
449 shift
450 local id=$1
451 shift
452 local data=$dir/$id
453
454 ceph-mon \
455 --id $id \
456 --mkfs \
457 --mon-data=$data \
458 --run-dir=$dir \
459 "$@" || return 1
460
461 ceph-mon \
462 --id $id \
11fdf7f2 463 --osd-failsafe-full-ratio=.99 \
7c673cae
FG
464 --mon-osd-full-ratio=.99 \
465 --mon-data-avail-crit=1 \
b5b8bbf5 466 --mon-data-avail-warn=5 \
7c673cae
FG
467 --paxos-propose-interval=0.1 \
468 --osd-crush-chooseleaf-type=0 \
c07f9fc5 469 $EXTRA_OPTS \
7c673cae
FG
470 --debug-mon 20 \
471 --debug-ms 20 \
472 --debug-paxos 20 \
473 --chdir= \
474 --mon-data=$data \
475 --log-file=$dir/\$name.log \
c07f9fc5 476 --admin-socket=$(get_asok_path) \
7c673cae
FG
477 --mon-cluster-log-file=$dir/log \
478 --run-dir=$dir \
479 --pid-file=$dir/\$name.pid \
480 --mon-allow-pool-delete \
c07f9fc5 481 --mon-osd-backfillfull-ratio .99 \
7c673cae
FG
482 "$@" || return 1
483
484 cat > $dir/ceph.conf <<EOF
485[global]
486fsid = $(get_config mon $id fsid)
487mon host = $(get_config mon $id mon_host)
488EOF
224ce89b
WB
489}
490
7c673cae
FG
491function test_run_mon() {
492 local dir=$1
493
494 setup $dir || return 1
495
496 run_mon $dir a --mon-initial-members=a || return 1
11fdf7f2 497 ceph mon dump | grep "mon.a" || return 1
7c673cae
FG
498 kill_daemons $dir || return 1
499
11fdf7f2
TL
500 run_mon $dir a --osd_pool_default_size=3 || return 1
501 run_osd $dir 0 || return 1
502 run_osd $dir 1 || return 1
503 run_osd $dir 2 || return 1
c07f9fc5 504 create_rbd_pool || return 1
11fdf7f2 505 ceph osd dump | grep "pool 1 'rbd'" || return 1
c07f9fc5 506 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
507 config get osd_pool_default_size)
508 test "$size" = '{"osd_pool_default_size":"3"}' || return 1
509
510 ! CEPH_ARGS='' ceph status || return 1
511 CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
512
513 kill_daemons $dir || return 1
514
515 run_mon $dir a --osd_pool_default_size=1 || return 1
c07f9fc5 516 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
517 config get osd_pool_default_size)
518 test "$size" = '{"osd_pool_default_size":"1"}' || return 1
519 kill_daemons $dir || return 1
520
521 CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
522 run_mon $dir a || return 1
c07f9fc5 523 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
524 config get osd_pool_default_size)
525 test "$size" = '{"osd_pool_default_size":"2"}' || return 1
526 kill_daemons $dir || return 1
527
528 teardown $dir || return 1
529}
530
c07f9fc5
FG
531function create_rbd_pool() {
532 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
b5b8bbf5 533 create_pool rbd $PG_NUM || return 1
c07f9fc5
FG
534 rbd pool init rbd
535}
536
b5b8bbf5
FG
537function create_pool() {
538 ceph osd pool create "$@"
539 sleep 1
540}
541
28e407b8
AA
542function delete_pool() {
543 local poolname=$1
544 ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
545}
546
7c673cae
FG
547#######################################################################
548
549function run_mgr() {
550 local dir=$1
551 shift
552 local id=$1
553 shift
554 local data=$dir/$id
555
556 ceph-mgr \
557 --id $id \
c07f9fc5 558 $EXTRA_OPTS \
11fdf7f2 559 --osd-failsafe-full-ratio=.99 \
7c673cae
FG
560 --debug-mgr 20 \
561 --debug-objecter 20 \
562 --debug-ms 20 \
563 --debug-paxos 20 \
564 --chdir= \
565 --mgr-data=$data \
566 --log-file=$dir/\$name.log \
c07f9fc5 567 --admin-socket=$(get_asok_path) \
7c673cae
FG
568 --run-dir=$dir \
569 --pid-file=$dir/\$name.pid \
11fdf7f2
TL
570 --mgr-module-path=$(realpath ${CEPH_ROOT}/src/pybind/mgr) \
571 "$@" || return 1
572}
573
574function run_mds() {
575 local dir=$1
576 shift
577 local id=$1
578 shift
579 local data=$dir/$id
580
581 ceph-mds \
582 --id $id \
583 $EXTRA_OPTS \
584 --debug-mds 20 \
585 --debug-objecter 20 \
586 --debug-ms 20 \
587 --chdir= \
588 --mds-data=$data \
589 --log-file=$dir/\$name.log \
590 --admin-socket=$(get_asok_path) \
591 --run-dir=$dir \
592 --pid-file=$dir/\$name.pid \
7c673cae
FG
593 "$@" || return 1
594}
595
596#######################################################################
597
598##
599# Create (prepare) and run (activate) an osd by the name osd.**id**
600# with data in **dir**/**id**. The logs can be found in
601# **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
602# the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
603#
604# The remaining arguments are passed verbatim to ceph-osd.
605#
606# Two mandatory arguments must be provided: --fsid and --mon-host
607# Instead of adding them to every call to run_osd, they can be
608# set in the CEPH_ARGS environment variable to be read implicitly
609# by every ceph command.
610#
611# The CEPH_CONF variable is expected to be set to /dev/null to
612# only rely on arguments for configuration.
613#
11fdf7f2
TL
614# The run_osd function creates the OSD data directory on the **dir**/**id**
615# directory and relies on the activate_osd function to run the daemon.
7c673cae
FG
616#
617# Examples:
618#
619# CEPH_ARGS="--fsid=$(uuidgen) "
620# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
621# run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
622#
623# @param dir path name of the environment
624# @param id osd identifier
625# @param ... can be any option valid for ceph-osd
626# @return 0 on success, 1 on error
627#
628function run_osd() {
629 local dir=$1
630 shift
631 local id=$1
632 shift
633 local osd_data=$dir/$id
634
11fdf7f2
TL
635 local ceph_args="$CEPH_ARGS"
636 ceph_args+=" --osd-failsafe-full-ratio=.99"
637 ceph_args+=" --osd-journal-size=100"
638 ceph_args+=" --osd-scrub-load-threshold=2000"
639 ceph_args+=" --osd-data=$osd_data"
640 ceph_args+=" --osd-journal=${osd_data}/journal"
641 ceph_args+=" --chdir="
642 ceph_args+=$EXTRA_OPTS
643 ceph_args+=" --run-dir=$dir"
644 ceph_args+=" --admin-socket=$(get_asok_path)"
645 ceph_args+=" --debug-osd=20"
646 ceph_args+=" --log-file=$dir/\$name.log"
647 ceph_args+=" --pid-file=$dir/\$name.pid"
648 ceph_args+=" --osd-max-object-name-len=460"
649 ceph_args+=" --osd-max-object-namespace-len=64"
650 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
651 ceph_args+=" "
652 ceph_args+="$@"
7c673cae 653 mkdir -p $osd_data
7c673cae 654
11fdf7f2
TL
655 local uuid=`uuidgen`
656 echo "add osd$id $uuid"
657 OSD_SECRET=$(ceph-authtool --gen-print-key)
658 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
659 ceph osd new $uuid -i $osd_data/new.json
660 rm $osd_data/new.json
661 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid
662
663 local key_fn=$osd_data/keyring
664 cat > $key_fn<<EOF
665[osd.$id]
666key = $OSD_SECRET
667EOF
668 echo adding osd$id key to auth repository
669 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
670 echo start osd.$id
671 ceph-osd -i $id $ceph_args &
672
eafe8130
TL
673 # If noup is set, then can't wait for this osd
674 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
675 return 0
676 fi
11fdf7f2
TL
677 wait_for_osd up $id || return 1
678
7c673cae
FG
679}
680
eafe8130 681function run_osd_filestore() {
7c673cae
FG
682 local dir=$1
683 shift
684 local id=$1
685 shift
686 local osd_data=$dir/$id
687
11fdf7f2
TL
688 local ceph_args="$CEPH_ARGS"
689 ceph_args+=" --osd-failsafe-full-ratio=.99"
690 ceph_args+=" --osd-journal-size=100"
691 ceph_args+=" --osd-scrub-load-threshold=2000"
692 ceph_args+=" --osd-data=$osd_data"
693 ceph_args+=" --osd-journal=${osd_data}/journal"
694 ceph_args+=" --chdir="
695 ceph_args+=$EXTRA_OPTS
696 ceph_args+=" --run-dir=$dir"
697 ceph_args+=" --admin-socket=$(get_asok_path)"
698 ceph_args+=" --debug-osd=20"
699 ceph_args+=" --log-file=$dir/\$name.log"
700 ceph_args+=" --pid-file=$dir/\$name.pid"
701 ceph_args+=" --osd-max-object-name-len=460"
702 ceph_args+=" --osd-max-object-namespace-len=64"
703 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
704 ceph_args+=" "
705 ceph_args+="$@"
7c673cae 706 mkdir -p $osd_data
7c673cae 707
11fdf7f2
TL
708 local uuid=`uuidgen`
709 echo "add osd$osd $uuid"
710 OSD_SECRET=$(ceph-authtool --gen-print-key)
711 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
712 ceph osd new $uuid -i $osd_data/new.json
713 rm $osd_data/new.json
eafe8130 714 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid --osd-objectstore=filestore
11fdf7f2
TL
715
716 local key_fn=$osd_data/keyring
717 cat > $key_fn<<EOF
718[osd.$osd]
719key = $OSD_SECRET
720EOF
721 echo adding osd$id key to auth repository
722 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
723 echo start osd.$id
724 ceph-osd -i $id $ceph_args &
725
eafe8130
TL
726 # If noup is set, then can't wait for this osd
727 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
728 return 0
729 fi
11fdf7f2
TL
730 wait_for_osd up $id || return 1
731
732
7c673cae
FG
733}
734
735function test_run_osd() {
736 local dir=$1
737
738 setup $dir || return 1
739
740 run_mon $dir a || return 1
741 run_mgr $dir x || return 1
742
743 run_osd $dir 0 || return 1
c07f9fc5 744 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae
FG
745 config get osd_max_backfills)
746 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
747
748 run_osd $dir 1 --osd-max-backfills 20 || return 1
c07f9fc5 749 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
7c673cae
FG
750 config get osd_max_backfills)
751 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
752
753 CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
c07f9fc5 754 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
7c673cae
FG
755 config get osd_max_backfills)
756 test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
757
758 teardown $dir || return 1
759}
760
761#######################################################################
762
763##
764# Shutdown and remove all traces of the osd by the name osd.**id**.
765#
766# The OSD is shutdown with the TERM signal. It is then removed from
767# the auth list, crush map, osd map etc and the files associated with
768# it are also removed.
769#
770# @param dir path name of the environment
771# @param id osd identifier
772# @return 0 on success, 1 on error
773#
774function destroy_osd() {
775 local dir=$1
776 local id=$2
777
7c673cae 778 ceph osd out osd.$id || return 1
c07f9fc5 779 kill_daemons $dir TERM osd.$id || return 1
92f5a8d4 780 ceph osd down osd.$id || return 1
c07f9fc5 781 ceph osd purge osd.$id --yes-i-really-mean-it || return 1
7c673cae
FG
782 teardown $dir/$id || return 1
783 rm -fr $dir/$id
784}
785
786function test_destroy_osd() {
787 local dir=$1
788
789 setup $dir || return 1
790 run_mon $dir a || return 1
791 run_mgr $dir x || return 1
792 run_osd $dir 0 || return 1
793 destroy_osd $dir 0 || return 1
794 ! ceph osd dump | grep "osd.$id " || return 1
795 teardown $dir || return 1
796}
797
798#######################################################################
799
800##
801# Run (activate) an osd by the name osd.**id** with data in
802# **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
803# the pid file is **dir**/osd.**id**.pid and the admin socket is
804# **dir**/**id**/ceph-osd.**id**.asok.
805#
806# The remaining arguments are passed verbatim to ceph-osd.
807#
808# Two mandatory arguments must be provided: --fsid and --mon-host
809# Instead of adding them to every call to activate_osd, they can be
810# set in the CEPH_ARGS environment variable to be read implicitly
811# by every ceph command.
812#
813# The CEPH_CONF variable is expected to be set to /dev/null to
814# only rely on arguments for configuration.
815#
816# The activate_osd function expects a valid OSD data directory
817# in **dir**/**id**, either just created via run_osd or re-using
818# one left by a previous run of ceph-osd. The ceph-osd daemon is
11fdf7f2 819# run directly on the foreground
7c673cae
FG
820#
821# The activate_osd function blocks until the monitor reports the osd
822# up. If it fails to do so within $TIMEOUT seconds, activate_osd
823# fails.
824#
825# Examples:
826#
827# CEPH_ARGS="--fsid=$(uuidgen) "
828# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
829# activate_osd $dir 0 # activate an osd using the monitor listening on 7018
830#
831# @param dir path name of the environment
832# @param id osd identifier
833# @param ... can be any option valid for ceph-osd
834# @return 0 on success, 1 on error
835#
836function activate_osd() {
837 local dir=$1
838 shift
839 local id=$1
840 shift
841 local osd_data=$dir/$id
842
7c673cae 843 local ceph_args="$CEPH_ARGS"
7c673cae
FG
844 ceph_args+=" --osd-failsafe-full-ratio=.99"
845 ceph_args+=" --osd-journal-size=100"
846 ceph_args+=" --osd-scrub-load-threshold=2000"
847 ceph_args+=" --osd-data=$osd_data"
11fdf7f2 848 ceph_args+=" --osd-journal=${osd_data}/journal"
7c673cae 849 ceph_args+=" --chdir="
c07f9fc5 850 ceph_args+=$EXTRA_OPTS
7c673cae 851 ceph_args+=" --run-dir=$dir"
c07f9fc5 852 ceph_args+=" --admin-socket=$(get_asok_path)"
7c673cae
FG
853 ceph_args+=" --debug-osd=20"
854 ceph_args+=" --log-file=$dir/\$name.log"
855 ceph_args+=" --pid-file=$dir/\$name.pid"
11fdf7f2
TL
856 ceph_args+=" --osd-max-object-name-len=460"
857 ceph_args+=" --osd-max-object-namespace-len=64"
858 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
7c673cae
FG
859 ceph_args+=" "
860 ceph_args+="$@"
861 mkdir -p $osd_data
11fdf7f2
TL
862
863 echo start osd.$id
864 ceph-osd -i $id $ceph_args &
7c673cae
FG
865
866 [ "$id" = "$(cat $osd_data/whoami)" ] || return 1
867
eafe8130
TL
868 # If noup is set, then can't wait for this osd
869 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
870 return 0
871 fi
7c673cae
FG
872 wait_for_osd up $id || return 1
873}
874
875function test_activate_osd() {
876 local dir=$1
877
878 setup $dir || return 1
879
880 run_mon $dir a || return 1
881 run_mgr $dir x || return 1
882
883 run_osd $dir 0 || return 1
c07f9fc5 884 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae
FG
885 config get osd_max_backfills)
886 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
887
888 kill_daemons $dir TERM osd || return 1
889
890 activate_osd $dir 0 --osd-max-backfills 20 || return 1
c07f9fc5 891 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae
FG
892 config get osd_max_backfills)
893 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
894
895 teardown $dir || return 1
896}
897
898#######################################################################
899
900##
901# Wait until the OSD **id** is either up or down, as specified by
902# **state**. It fails after $TIMEOUT seconds.
903#
904# @param state either up or down
905# @param id osd identifier
906# @return 0 on success, 1 on error
907#
908function wait_for_osd() {
909 local state=$1
910 local id=$2
911
912 status=1
913 for ((i=0; i < $TIMEOUT; i++)); do
914 echo $i
915 if ! ceph osd dump | grep "osd.$id $state"; then
916 sleep 1
917 else
918 status=0
919 break
920 fi
921 done
922 return $status
923}
924
925function test_wait_for_osd() {
926 local dir=$1
927 setup $dir || return 1
928 run_mon $dir a --osd_pool_default_size=1 || return 1
929 run_mgr $dir x || return 1
930 run_osd $dir 0 || return 1
92f5a8d4 931 run_osd $dir 1 || return 1
7c673cae 932 wait_for_osd up 0 || return 1
92f5a8d4
TL
933 wait_for_osd up 1 || return 1
934 kill_daemons $dir TERM osd.0 || return 1
7c673cae
FG
935 wait_for_osd down 0 || return 1
936 ( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
937 teardown $dir || return 1
938}
939
940#######################################################################
941
942##
943# Display the list of OSD ids supporting the **objectname** stored in
944# **poolname**, as reported by ceph osd map.
945#
946# @param poolname an existing pool
947# @param objectname an objectname (may or may not exist)
948# @param STDOUT white space separated list of OSD ids
949# @return 0 on success, 1 on error
950#
951function get_osds() {
952 local poolname=$1
953 local objectname=$2
954
31f18b77
FG
955 local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
956 jq '.acting | .[]')
7c673cae
FG
957 # get rid of the trailing space
958 echo $osds
959}
960
961function test_get_osds() {
962 local dir=$1
963
964 setup $dir || return 1
965 run_mon $dir a --osd_pool_default_size=2 || return 1
966 run_mgr $dir x || return 1
967 run_osd $dir 0 || return 1
968 run_osd $dir 1 || return 1
c07f9fc5 969 create_rbd_pool || return 1
7c673cae 970 wait_for_clean || return 1
c07f9fc5 971 create_rbd_pool || return 1
7c673cae
FG
972 get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
973 teardown $dir || return 1
974}
975
976#######################################################################
977
978##
979# Wait for the monitor to form quorum (optionally, of size N)
980#
981# @param timeout duration (lower-bound) to wait for quorum to be formed
982# @param quorumsize size of quorum to wait for
983# @return 0 on success, 1 on error
984#
985function wait_for_quorum() {
986 local timeout=$1
987 local quorumsize=$2
988
989 if [[ -z "$timeout" ]]; then
990 timeout=300
991 fi
992
993 if [[ -z "$quorumsize" ]]; then
994 timeout $timeout ceph mon_status --format=json >&/dev/null || return 1
995 return 0
996 fi
997
998 no_quorum=1
c07f9fc5 999 wait_until=$((`date +%s` + $timeout))
7c673cae
FG
1000 while [[ $(date +%s) -lt $wait_until ]]; do
1001 jqfilter='.quorum | length == '$quorumsize
1002 jqinput="$(timeout $timeout ceph mon_status --format=json 2>/dev/null)"
1003 res=$(echo $jqinput | jq "$jqfilter")
1004 if [[ "$res" == "true" ]]; then
1005 no_quorum=0
1006 break
1007 fi
1008 done
1009 return $no_quorum
1010}
1011
1012#######################################################################
1013
1014##
1015# Return the PG of supporting the **objectname** stored in
1016# **poolname**, as reported by ceph osd map.
1017#
1018# @param poolname an existing pool
1019# @param objectname an objectname (may or may not exist)
1020# @param STDOUT a PG
1021# @return 0 on success, 1 on error
1022#
1023function get_pg() {
1024 local poolname=$1
1025 local objectname=$2
1026
31f18b77 1027 ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
7c673cae
FG
1028}
1029
1030function test_get_pg() {
1031 local dir=$1
1032
1033 setup $dir || return 1
1034 run_mon $dir a --osd_pool_default_size=1 || return 1
1035 run_mgr $dir x || return 1
1036 run_osd $dir 0 || return 1
c07f9fc5 1037 create_rbd_pool || return 1
7c673cae
FG
1038 wait_for_clean || return 1
1039 get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
1040 teardown $dir || return 1
1041}
1042
1043#######################################################################
1044
1045##
1046# Return the value of the **config**, obtained via the config get command
1047# of the admin socket of **daemon**.**id**.
1048#
1049# @param daemon mon or osd
1050# @param id mon or osd ID
1051# @param config the configuration variable name as found in config_opts.h
1052# @param STDOUT the config value
1053# @return 0 on success, 1 on error
1054#
1055function get_config() {
1056 local daemon=$1
1057 local id=$2
1058 local config=$3
1059
1060 CEPH_ARGS='' \
c07f9fc5 1061 ceph --format json daemon $(get_asok_path $daemon.$id) \
7c673cae 1062 config get $config 2> /dev/null | \
31f18b77 1063 jq -r ".$config"
7c673cae
FG
1064}
1065
1066function test_get_config() {
1067 local dir=$1
1068
1069 # override the default config using command line arg and check it
1070 setup $dir || return 1
1071 run_mon $dir a --osd_pool_default_size=1 || return 1
1072 test $(get_config mon a osd_pool_default_size) = 1 || return 1
1073 run_mgr $dir x || return 1
1074 run_osd $dir 0 --osd_max_scrubs=3 || return 1
1075 test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
1076 teardown $dir || return 1
1077}
1078
1079#######################################################################
1080
1081##
1082# Set the **config** to specified **value**, via the config set command
1083# of the admin socket of **daemon**.**id**
1084#
1085# @param daemon mon or osd
1086# @param id mon or osd ID
1087# @param config the configuration variable name as found in config_opts.h
1088# @param value the config value
1089# @return 0 on success, 1 on error
1090#
1091function set_config() {
1092 local daemon=$1
1093 local id=$2
1094 local config=$3
1095 local value=$4
1096
c07f9fc5 1097 test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
31f18b77
FG
1098 config set $config $value 2> /dev/null | \
1099 jq 'has("success")') == true
7c673cae
FG
1100}
1101
1102function test_set_config() {
1103 local dir=$1
1104
1105 setup $dir || return 1
1106 run_mon $dir a --osd_pool_default_size=1 || return 1
1107 test $(get_config mon a ms_crc_header) = true || return 1
1108 set_config mon a ms_crc_header false || return 1
1109 test $(get_config mon a ms_crc_header) = false || return 1
1110 set_config mon a ms_crc_header true || return 1
1111 test $(get_config mon a ms_crc_header) = true || return 1
1112 teardown $dir || return 1
1113}
1114
1115#######################################################################
1116
1117##
1118# Return the OSD id of the primary OSD supporting the **objectname**
1119# stored in **poolname**, as reported by ceph osd map.
1120#
1121# @param poolname an existing pool
1122# @param objectname an objectname (may or may not exist)
1123# @param STDOUT the primary OSD id
1124# @return 0 on success, 1 on error
1125#
1126function get_primary() {
1127 local poolname=$1
1128 local objectname=$2
1129
31f18b77
FG
1130 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1131 jq '.acting_primary'
7c673cae
FG
1132}
1133
1134function test_get_primary() {
1135 local dir=$1
1136
1137 setup $dir || return 1
1138 run_mon $dir a --osd_pool_default_size=1 || return 1
1139 local osd=0
1140 run_mgr $dir x || return 1
1141 run_osd $dir $osd || return 1
c07f9fc5 1142 create_rbd_pool || return 1
7c673cae
FG
1143 wait_for_clean || return 1
1144 test $(get_primary rbd GROUP) = $osd || return 1
1145 teardown $dir || return 1
1146}
1147
1148#######################################################################
1149
1150##
1151# Return the id of any OSD supporting the **objectname** stored in
1152# **poolname**, as reported by ceph osd map, except the primary.
1153#
1154# @param poolname an existing pool
1155# @param objectname an objectname (may or may not exist)
1156# @param STDOUT the OSD id
1157# @return 0 on success, 1 on error
1158#
1159function get_not_primary() {
1160 local poolname=$1
1161 local objectname=$2
1162
1163 local primary=$(get_primary $poolname $objectname)
31f18b77
FG
1164 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1165 jq ".acting | map(select (. != $primary)) | .[0]"
7c673cae
FG
1166}
1167
1168function test_get_not_primary() {
1169 local dir=$1
1170
1171 setup $dir || return 1
1172 run_mon $dir a --osd_pool_default_size=2 || return 1
1173 run_mgr $dir x || return 1
1174 run_osd $dir 0 || return 1
1175 run_osd $dir 1 || return 1
c07f9fc5 1176 create_rbd_pool || return 1
7c673cae
FG
1177 wait_for_clean || return 1
1178 local primary=$(get_primary rbd GROUP)
1179 local not_primary=$(get_not_primary rbd GROUP)
1180 test $not_primary != $primary || return 1
1181 test $not_primary = 0 -o $not_primary = 1 || return 1
1182 teardown $dir || return 1
1183}
1184
1185#######################################################################
1186
11fdf7f2
TL
1187function _objectstore_tool_nodown() {
1188 local dir=$1
1189 shift
1190 local id=$1
1191 shift
1192 local osd_data=$dir/$id
1193
11fdf7f2
TL
1194 ceph-objectstore-tool \
1195 --data-path $osd_data \
11fdf7f2
TL
1196 "$@" || return 1
1197}
1198
1199function _objectstore_tool_nowait() {
1200 local dir=$1
1201 shift
1202 local id=$1
1203 shift
1204
1205 kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
1206
1207 _objectstore_tool_nodown $dir $id "$@" || return 1
1208 activate_osd $dir $id $ceph_osd_args >&2 || return 1
1209}
1210
7c673cae
FG
1211##
1212# Run ceph-objectstore-tool against the OSD **id** using the data path
1213# **dir**. The OSD is killed with TERM prior to running
1214# ceph-objectstore-tool because access to the data path is
1215# exclusive. The OSD is restarted after the command completes. The
1216# objectstore_tool returns after all PG are active+clean again.
1217#
1218# @param dir the data path of the OSD
1219# @param id the OSD id
1220# @param ... arguments to ceph-objectstore-tool
1221# @param STDIN the input of ceph-objectstore-tool
1222# @param STDOUT the output of ceph-objectstore-tool
1223# @return 0 on success, 1 on error
1224#
1225# The value of $ceph_osd_args will be passed to restarted osds
1226#
1227function objectstore_tool() {
1228 local dir=$1
1229 shift
1230 local id=$1
1231 shift
7c673cae 1232
11fdf7f2 1233 _objectstore_tool_nowait $dir $id "$@" || return 1
7c673cae
FG
1234 wait_for_clean >&2
1235}
1236
1237function test_objectstore_tool() {
1238 local dir=$1
1239
1240 setup $dir || return 1
1241 run_mon $dir a --osd_pool_default_size=1 || return 1
1242 local osd=0
1243 run_mgr $dir x || return 1
1244 run_osd $dir $osd || return 1
c07f9fc5 1245 create_rbd_pool || return 1
7c673cae
FG
1246 wait_for_clean || return 1
1247 rados --pool rbd put GROUP /etc/group || return 1
1248 objectstore_tool $dir $osd GROUP get-bytes | \
1249 diff - /etc/group
1250 ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
1251 teardown $dir || return 1
1252}
1253
1254#######################################################################
1255
1256##
1257# Predicate checking if there is an ongoing recovery in the
1258# cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1259# counters are reported by ceph status, it means recovery is in
1260# progress.
1261#
1262# @return 0 if recovery in progress, 1 otherwise
1263#
1264function get_is_making_recovery_progress() {
31f18b77
FG
1265 local recovery_progress
1266 recovery_progress+=".recovering_keys_per_sec + "
1267 recovery_progress+=".recovering_bytes_per_sec + "
1268 recovery_progress+=".recovering_objects_per_sec"
1269 local progress=$(ceph --format json status 2>/dev/null | \
1270 jq -r ".pgmap | $recovery_progress")
1271 test "$progress" != null
7c673cae
FG
1272}
1273
1274function test_get_is_making_recovery_progress() {
1275 local dir=$1
1276
1277 setup $dir || return 1
1278 run_mon $dir a || return 1
1279 run_mgr $dir x || return 1
1280 ! get_is_making_recovery_progress || return 1
1281 teardown $dir || return 1
1282}
1283
1284#######################################################################
1285
1286##
1287# Return the number of active PGs in the cluster. A PG is active if
1288# ceph pg dump pgs reports it both **active** and **clean** and that
1289# not **stale**.
1290#
1291# @param STDOUT the number of active PGs
1292# @return 0 on success, 1 on error
1293#
1294function get_num_active_clean() {
31f18b77
FG
1295 local expression
1296 expression+="select(contains(\"active\") and contains(\"clean\")) | "
1297 expression+="select(contains(\"stale\") | not)"
1298 ceph --format json pg dump pgs 2>/dev/null | \
11fdf7f2 1299 jq ".pg_stats | [.[] | .state | $expression] | length"
7c673cae
FG
1300}
1301
1302function test_get_num_active_clean() {
1303 local dir=$1
1304
1305 setup $dir || return 1
1306 run_mon $dir a --osd_pool_default_size=1 || return 1
1307 run_mgr $dir x || return 1
1308 run_osd $dir 0 || return 1
c07f9fc5 1309 create_rbd_pool || return 1
7c673cae
FG
1310 wait_for_clean || return 1
1311 local num_active_clean=$(get_num_active_clean)
1312 test "$num_active_clean" = $PG_NUM || return 1
1313 teardown $dir || return 1
1314}
1315
92f5a8d4
TL
1316##
1317# Return the number of active or peered PGs in the cluster. A PG matches if
1318# ceph pg dump pgs reports it is either **active** or **peered** and that
1319# not **stale**.
1320#
1321# @param STDOUT the number of active PGs
1322# @return 0 on success, 1 on error
1323#
1324function get_num_active_or_peered() {
1325 local expression
1326 expression+="select(contains(\"active\") or contains(\"peered\")) | "
1327 expression+="select(contains(\"stale\") | not)"
1328 ceph --format json pg dump pgs 2>/dev/null | \
1329 jq ".pg_stats | [.[] | .state | $expression] | length"
1330}
1331
1332function test_get_num_active_or_peered() {
1333 local dir=$1
1334
1335 setup $dir || return 1
1336 run_mon $dir a --osd_pool_default_size=1 || return 1
1337 run_mgr $dir x || return 1
1338 run_osd $dir 0 || return 1
1339 create_rbd_pool || return 1
1340 wait_for_clean || return 1
1341 local num_peered=$(get_num_active_or_peered)
1342 test "$num_peered" = $PG_NUM || return 1
1343 teardown $dir || return 1
1344}
1345
7c673cae
FG
1346#######################################################################
1347
1348##
1349# Return the number of PGs in the cluster, according to
1350# ceph pg dump pgs.
1351#
1352# @param STDOUT the number of PGs
1353# @return 0 on success, 1 on error
1354#
1355function get_num_pgs() {
31f18b77 1356 ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
7c673cae
FG
1357}
1358
1359function test_get_num_pgs() {
1360 local dir=$1
1361
1362 setup $dir || return 1
1363 run_mon $dir a --osd_pool_default_size=1 || return 1
1364 run_mgr $dir x || return 1
1365 run_osd $dir 0 || return 1
c07f9fc5 1366 create_rbd_pool || return 1
7c673cae
FG
1367 wait_for_clean || return 1
1368 local num_pgs=$(get_num_pgs)
1369 test "$num_pgs" -gt 0 || return 1
1370 teardown $dir || return 1
1371}
1372
1373#######################################################################
1374
c07f9fc5
FG
1375##
1376# Return the OSD ids in use by at least one PG in the cluster (either
1377# in the up or the acting set), according to ceph pg dump pgs. Every
1378# OSD id shows as many times as they are used in up and acting sets.
1379# If an OSD id is in both the up and acting set of a given PG, it will
1380# show twice.
1381#
1382# @param STDOUT a sorted list of OSD ids
1383# @return 0 on success, 1 on error
1384#
1385function get_osd_id_used_by_pgs() {
11fdf7f2 1386 ceph --format json pg dump pgs 2>/dev/null | jq '.pg_stats | .[] | .up[], .acting[]' | sort
c07f9fc5
FG
1387}
1388
1389function test_get_osd_id_used_by_pgs() {
1390 local dir=$1
1391
1392 setup $dir || return 1
1393 run_mon $dir a --osd_pool_default_size=1 || return 1
1394 run_mgr $dir x || return 1
1395 run_osd $dir 0 || return 1
1396 create_rbd_pool || return 1
1397 wait_for_clean || return 1
1398 local osd_ids=$(get_osd_id_used_by_pgs | uniq)
1399 test "$osd_ids" = "0" || return 1
1400 teardown $dir || return 1
1401}
1402
1403#######################################################################
1404
1405##
1406# Wait until the OSD **id** shows **count** times in the
1407# PGs (see get_osd_id_used_by_pgs for more information about
1408# how OSD ids are counted).
1409#
1410# @param id the OSD id
1411# @param count the number of time it must show in the PGs
1412# @return 0 on success, 1 on error
1413#
1414function wait_osd_id_used_by_pgs() {
1415 local id=$1
1416 local count=$2
1417
1418 status=1
1419 for ((i=0; i < $TIMEOUT / 5; i++)); do
1420 echo $i
1421 if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then
1422 sleep 5
1423 else
1424 status=0
1425 break
1426 fi
1427 done
1428 return $status
1429}
1430
1431function test_wait_osd_id_used_by_pgs() {
1432 local dir=$1
1433
1434 setup $dir || return 1
1435 run_mon $dir a --osd_pool_default_size=1 || return 1
1436 run_mgr $dir x || return 1
1437 run_osd $dir 0 || return 1
1438 create_rbd_pool || return 1
1439 wait_for_clean || return 1
1440 wait_osd_id_used_by_pgs 0 8 || return 1
1441 ! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1
1442 teardown $dir || return 1
1443}
1444
1445#######################################################################
1446
7c673cae
FG
1447##
1448# Return the date and time of the last completed scrub for **pgid**,
1449# as reported by ceph pg dump pgs. Note that a repair also sets this
1450# date.
1451#
1452# @param pgid the id of the PG
1453# @param STDOUT the date and time of the last scrub
1454# @return 0 on success, 1 on error
1455#
1456function get_last_scrub_stamp() {
1457 local pgid=$1
1458 local sname=${2:-last_scrub_stamp}
31f18b77 1459 ceph --format json pg dump pgs 2>/dev/null | \
11fdf7f2 1460 jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
7c673cae
FG
1461}
1462
1463function test_get_last_scrub_stamp() {
1464 local dir=$1
1465
1466 setup $dir || return 1
1467 run_mon $dir a --osd_pool_default_size=1 || return 1
1468 run_mgr $dir x || return 1
1469 run_osd $dir 0 || return 1
c07f9fc5 1470 create_rbd_pool || return 1
7c673cae 1471 wait_for_clean || return 1
b5b8bbf5 1472 stamp=$(get_last_scrub_stamp 1.0)
7c673cae
FG
1473 test -n "$stamp" || return 1
1474 teardown $dir || return 1
1475}
1476
1477#######################################################################
1478
1479##
1480# Predicate checking if the cluster is clean, i.e. all of its PGs are
1481# in a clean state (see get_num_active_clean for a definition).
1482#
1483# @return 0 if the cluster is clean, 1 otherwise
1484#
1485function is_clean() {
1486 num_pgs=$(get_num_pgs)
1487 test $num_pgs != 0 || return 1
1488 test $(get_num_active_clean) = $num_pgs || return 1
1489}
1490
1491function test_is_clean() {
1492 local dir=$1
1493
1494 setup $dir || return 1
1495 run_mon $dir a --osd_pool_default_size=1 || return 1
1496 run_mgr $dir x || return 1
1497 run_osd $dir 0 || return 1
c07f9fc5 1498 create_rbd_pool || return 1
7c673cae
FG
1499 wait_for_clean || return 1
1500 is_clean || return 1
1501 teardown $dir || return 1
1502}
1503
1504#######################################################################
1505
f64942e4 1506calc() { $AWK "BEGIN{print $*}"; }
94b18763 1507
7c673cae
FG
1508##
1509# Return a list of numbers that are increasingly larger and whose
1510# total is **timeout** seconds. It can be used to have short sleep
1511# delay while waiting for an event on a fast machine. But if running
1512# very slowly the larger delays avoid stressing the machine even
1513# further or spamming the logs.
1514#
1515# @param timeout sum of all delays, in seconds
1516# @return a list of sleep delays
1517#
1518function get_timeout_delays() {
1519 local trace=$(shopt -q -o xtrace && echo true || echo false)
1520 $trace && shopt -u -o xtrace
1521 local timeout=$1
1522 local first_step=${2:-1}
11fdf7f2 1523 local max_timeout=${3:-$MAX_TIMEOUT}
7c673cae
FG
1524
1525 local i
1526 local total="0"
1527 i=$first_step
94b18763
FG
1528 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1529 echo -n "$(calc $i) "
1530 total=$(calc $total + $i)
1531 i=$(calc $i \* 2)
11fdf7f2
TL
1532 if [ $max_timeout -gt 0 ]; then
1533 # Did we reach max timeout ?
1534 if [ ${i%.*} -eq ${max_timeout%.*} ] && [ ${i#*.} \> ${max_timeout#*.} ] || [ ${i%.*} -gt ${max_timeout%.*} ]; then
1535 # Yes, so let's cap the max wait time to max
1536 i=$max_timeout
1537 fi
1538 fi
7c673cae 1539 done
94b18763
FG
1540 if test "$(calc $total \< $timeout)" = "1"; then
1541 echo -n "$(calc $timeout - $total) "
7c673cae
FG
1542 fi
1543 $trace && shopt -s -o xtrace
1544}
1545
1546function test_get_timeout_delays() {
1547 test "$(get_timeout_delays 1)" = "1 " || return 1
94b18763
FG
1548 test "$(get_timeout_delays 5)" = "1 2 2 " || return 1
1549 test "$(get_timeout_delays 6)" = "1 2 3 " || return 1
7c673cae 1550 test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
94b18763
FG
1551 test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1
1552 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1
1553 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1
1554 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1
1555 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1
1556 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1
1557 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1
11fdf7f2
TL
1558 test "$(get_timeout_delays 300 .1 0)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 12.8 25.6 51.2 102.4 95.3 " || return 1
1559 test "$(get_timeout_delays 300 .1 10)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 7.3 " || return 1
7c673cae
FG
1560}
1561
1562#######################################################################
1563
1564##
1565# Wait until the cluster becomes clean or if it does not make progress
11fdf7f2 1566# for $WAIT_FOR_CLEAN_TIMEOUT seconds.
7c673cae
FG
1567# Progress is measured either via the **get_is_making_recovery_progress**
1568# predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1569#
1570# @return 0 if the cluster is clean, 1 otherwise
1571#
1572function wait_for_clean() {
a8e16298 1573 local cmd=$1
7c673cae
FG
1574 local num_active_clean=-1
1575 local cur_active_clean
11fdf7f2 1576 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
7c673cae 1577 local -i loop=0
31f18b77 1578
3a9019d9 1579 flush_pg_stats || return 1
31f18b77
FG
1580 while test $(get_num_pgs) == 0 ; do
1581 sleep 1
1582 done
7c673cae
FG
1583
1584 while true ; do
1585 # Comparing get_num_active_clean & get_num_pgs is used to determine
1586 # if the cluster is clean. That's almost an inline of is_clean() to
1587 # get more performance by avoiding multiple calls of get_num_active_clean.
1588 cur_active_clean=$(get_num_active_clean)
1589 test $cur_active_clean = $(get_num_pgs) && break
1590 if test $cur_active_clean != $num_active_clean ; then
1591 loop=0
1592 num_active_clean=$cur_active_clean
1593 elif get_is_making_recovery_progress ; then
1594 loop=0
1595 elif (( $loop >= ${#delays[*]} )) ; then
1596 ceph report
1597 return 1
1598 fi
a8e16298
TL
1599 # eval is a no-op if cmd is empty
1600 eval $cmd
7c673cae
FG
1601 sleep ${delays[$loop]}
1602 loop+=1
1603 done
1604 return 0
1605}
1606
1607function test_wait_for_clean() {
1608 local dir=$1
1609
1610 setup $dir || return 1
11fdf7f2
TL
1611 run_mon $dir a --osd_pool_default_size=2 || return 1
1612 run_osd $dir 0 || return 1
7c673cae 1613 run_mgr $dir x || return 1
c07f9fc5 1614 create_rbd_pool || return 1
11fdf7f2
TL
1615 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1616 run_osd $dir 1 || return 1
7c673cae
FG
1617 wait_for_clean || return 1
1618 teardown $dir || return 1
1619}
1620
92f5a8d4
TL
1621##
1622# Wait until the cluster becomes peered or if it does not make progress
1623# for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1624# Progress is measured either via the **get_is_making_recovery_progress**
1625# predicate or if the number of peered PGs changes (as returned by get_num_active_or_peered)
1626#
1627# @return 0 if the cluster is clean, 1 otherwise
1628#
1629function wait_for_peered() {
1630 local cmd=$1
1631 local num_peered=-1
1632 local cur_peered
1633 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
1634 local -i loop=0
1635
1636 flush_pg_stats || return 1
1637 while test $(get_num_pgs) == 0 ; do
1638 sleep 1
1639 done
1640
1641 while true ; do
1642 # Comparing get_num_active_clean & get_num_pgs is used to determine
1643 # if the cluster is clean. That's almost an inline of is_clean() to
1644 # get more performance by avoiding multiple calls of get_num_active_clean.
1645 cur_peered=$(get_num_active_or_peered)
1646 test $cur_peered = $(get_num_pgs) && break
1647 if test $cur_peered != $num_peered ; then
1648 loop=0
1649 num_peered=$cur_peered
1650 elif get_is_making_recovery_progress ; then
1651 loop=0
1652 elif (( $loop >= ${#delays[*]} )) ; then
1653 ceph report
1654 return 1
1655 fi
1656 # eval is a no-op if cmd is empty
1657 eval $cmd
1658 sleep ${delays[$loop]}
1659 loop+=1
1660 done
1661 return 0
1662}
1663
1664function test_wait_for_peered() {
1665 local dir=$1
1666
1667 setup $dir || return 1
1668 run_mon $dir a --osd_pool_default_size=2 || return 1
1669 run_osd $dir 0 || return 1
1670 run_mgr $dir x || return 1
1671 create_rbd_pool || return 1
1672 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1673 run_osd $dir 1 || return 1
1674 wait_for_peered || return 1
1675 teardown $dir || return 1
1676}
1677
1678
7c673cae
FG
1679#######################################################################
1680
1681##
1adf2230
AA
1682# Wait until the cluster has health condition passed as arg
1683# again for $TIMEOUT seconds.
7c673cae 1684#
1adf2230
AA
1685# @param string to grep for in health detail
1686# @return 0 if the cluster health matches request, 1 otherwise
7c673cae
FG
1687#
1688function wait_for_health() {
1689 local grepstr=$1
1690 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1691 local -i loop=0
1692
1693 while ! ceph health detail | grep "$grepstr" ; do
1694 if (( $loop >= ${#delays[*]} )) ; then
1695 ceph health detail
1696 return 1
1697 fi
1698 sleep ${delays[$loop]}
1699 loop+=1
1700 done
1701}
1702
1adf2230
AA
1703##
1704# Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1705# for $TIMEOUT seconds.
1706#
1707# @return 0 if the cluster is HEALTHY, 1 otherwise
1708#
7c673cae
FG
1709function wait_for_health_ok() {
1710 wait_for_health "HEALTH_OK" || return 1
1711}
1712
1713function test_wait_for_health_ok() {
1714 local dir=$1
1715
1716 setup $dir || return 1
eafe8130 1717 run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
31f18b77 1718 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
eafe8130 1719 # start osd_pool_default_size OSDs
7c673cae 1720 run_osd $dir 0 || return 1
eafe8130
TL
1721 run_osd $dir 1 || return 1
1722 run_osd $dir 2 || return 1
224ce89b 1723 kill_daemons $dir TERM osd || return 1
11fdf7f2 1724 ceph osd down 0 || return 1
eafe8130 1725 # expect TOO_FEW_OSDS warning
224ce89b 1726 ! TIMEOUT=1 wait_for_health_ok || return 1
eafe8130 1727 # resurrect all OSDs
224ce89b 1728 activate_osd $dir 0 || return 1
eafe8130
TL
1729 activate_osd $dir 1 || return 1
1730 activate_osd $dir 2 || return 1
7c673cae
FG
1731 wait_for_health_ok || return 1
1732 teardown $dir || return 1
1733}
1734
1735
1736#######################################################################
1737
1738##
1739# Run repair on **pgid** and wait until it completes. The repair
1740# function will fail if repair does not complete within $TIMEOUT
1741# seconds.
1742#
1743# @param pgid the id of the PG
1744# @return 0 on success, 1 on error
1745#
1746function repair() {
1747 local pgid=$1
1748 local last_scrub=$(get_last_scrub_stamp $pgid)
1749 ceph pg repair $pgid
1750 wait_for_scrub $pgid "$last_scrub"
1751}
1752
1753function test_repair() {
1754 local dir=$1
1755
1756 setup $dir || return 1
1757 run_mon $dir a --osd_pool_default_size=1 || return 1
1758 run_mgr $dir x || return 1
1759 run_osd $dir 0 || return 1
c07f9fc5 1760 create_rbd_pool || return 1
7c673cae 1761 wait_for_clean || return 1
b5b8bbf5 1762 repair 1.0 || return 1
7c673cae 1763 kill_daemons $dir KILL osd || return 1
b5b8bbf5 1764 ! TIMEOUT=1 repair 1.0 || return 1
7c673cae
FG
1765 teardown $dir || return 1
1766}
1767#######################################################################
1768
1769##
1770# Run scrub on **pgid** and wait until it completes. The pg_scrub
1771# function will fail if repair does not complete within $TIMEOUT
1772# seconds. The pg_scrub is complete whenever the
1773# **get_last_scrub_stamp** function reports a timestamp different from
1774# the one stored before starting the scrub.
1775#
1776# @param pgid the id of the PG
1777# @return 0 on success, 1 on error
1778#
1779function pg_scrub() {
1780 local pgid=$1
1781 local last_scrub=$(get_last_scrub_stamp $pgid)
1782 ceph pg scrub $pgid
1783 wait_for_scrub $pgid "$last_scrub"
1784}
1785
1786function pg_deep_scrub() {
1787 local pgid=$1
1788 local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
1789 ceph pg deep-scrub $pgid
1790 wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
1791}
1792
1793function test_pg_scrub() {
1794 local dir=$1
1795
1796 setup $dir || return 1
1797 run_mon $dir a --osd_pool_default_size=1 || return 1
1798 run_mgr $dir x || return 1
1799 run_osd $dir 0 || return 1
c07f9fc5 1800 create_rbd_pool || return 1
7c673cae 1801 wait_for_clean || return 1
b5b8bbf5 1802 pg_scrub 1.0 || return 1
7c673cae 1803 kill_daemons $dir KILL osd || return 1
b5b8bbf5 1804 ! TIMEOUT=1 pg_scrub 1.0 || return 1
7c673cae
FG
1805 teardown $dir || return 1
1806}
1807
1808#######################################################################
1809
1810##
1811# Run the *command* and expect it to fail (i.e. return a non zero status).
1812# The output (stderr and stdout) is stored in a temporary file in *dir*
1813# and is expected to contain the string *expected*.
1814#
1815# Return 0 if the command failed and the string was found. Otherwise
1816# return 1 and cat the full output of the command on stderr for debug.
1817#
1818# @param dir temporary directory to store the output
1819# @param expected string to look for in the output
1820# @param command ... the command and its arguments
1821# @return 0 on success, 1 on error
1822#
1823
1824function expect_failure() {
1825 local dir=$1
1826 shift
1827 local expected="$1"
1828 shift
1829 local success
1830
1831 if "$@" > $dir/out 2>&1 ; then
1832 success=true
1833 else
1834 success=false
1835 fi
1836
1837 if $success || ! grep --quiet "$expected" $dir/out ; then
1838 cat $dir/out >&2
1839 return 1
1840 else
1841 return 0
1842 fi
1843}
1844
1845function test_expect_failure() {
1846 local dir=$1
1847
1848 setup $dir || return 1
1849 expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
1850 # the command did not fail
1851 ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
1852 grep --quiet FAIL $dir/out || return 1
1853 # the command failed but the output does not contain the expected string
1854 ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
1855 ! grep --quiet FAIL $dir/out || return 1
1856 teardown $dir || return 1
1857}
1858
1859#######################################################################
1860
1861##
1862# Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1863# will fail if scrub does not complete within $TIMEOUT seconds. The
1864# repair is complete whenever the **get_last_scrub_stamp** function
1865# reports a timestamp different from the one given in argument.
1866#
1867# @param pgid the id of the PG
1868# @param last_scrub timestamp of the last scrub for *pgid*
1869# @return 0 on success, 1 on error
1870#
1871function wait_for_scrub() {
1872 local pgid=$1
1873 local last_scrub="$2"
1874 local sname=${3:-last_scrub_stamp}
1875
1876 for ((i=0; i < $TIMEOUT; i++)); do
b5b8bbf5 1877 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
7c673cae
FG
1878 return 0
1879 fi
1880 sleep 1
1881 done
1882 return 1
1883}
1884
1885function test_wait_for_scrub() {
1886 local dir=$1
1887
1888 setup $dir || return 1
1889 run_mon $dir a --osd_pool_default_size=1 || return 1
1890 run_mgr $dir x || return 1
1891 run_osd $dir 0 || return 1
c07f9fc5 1892 create_rbd_pool || return 1
7c673cae 1893 wait_for_clean || return 1
b5b8bbf5 1894 local pgid=1.0
7c673cae
FG
1895 ceph pg repair $pgid
1896 local last_scrub=$(get_last_scrub_stamp $pgid)
1897 wait_for_scrub $pgid "$last_scrub" || return 1
1898 kill_daemons $dir KILL osd || return 1
1899 last_scrub=$(get_last_scrub_stamp $pgid)
1900 ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
1901 teardown $dir || return 1
1902}
1903
1904#######################################################################
1905
1906##
1907# Return 0 if the erasure code *plugin* is available, 1 otherwise.
1908#
1909# @param plugin erasure code plugin
1910# @return 0 on success, 1 on error
1911#
1912
1913function erasure_code_plugin_exists() {
1914 local plugin=$1
1915 local status
1916 local grepstr
1917 local s
1918 case `uname` in
1919 FreeBSD) grepstr="Cannot open.*$plugin" ;;
1920 *) grepstr="$plugin.*No such file" ;;
1921 esac
1922
1923 s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
1924 local status=$?
1925 if [ $status -eq 0 ]; then
1926 ceph osd erasure-code-profile rm TESTPROFILE
1927 elif ! echo $s | grep --quiet "$grepstr" ; then
1928 status=1
1929 # display why the string was rejected.
1930 echo $s
1931 fi
1932 return $status
1933}
1934
1935function test_erasure_code_plugin_exists() {
1936 local dir=$1
1937
1938 setup $dir || return 1
1939 run_mon $dir a || return 1
1940 run_mgr $dir x || return 1
1941 erasure_code_plugin_exists jerasure || return 1
1942 ! erasure_code_plugin_exists FAKE || return 1
1943 teardown $dir || return 1
1944}
1945
1946#######################################################################
1947
1948##
1949# Display all log files from **dir** on stdout.
1950#
1951# @param dir directory in which all data is stored
1952#
1953
1954function display_logs() {
1955 local dir=$1
1956
1957 find $dir -maxdepth 1 -name '*.log' | \
1958 while read file ; do
1959 echo "======================= $file"
1960 cat $file
1961 done
1962}
1963
1964function test_display_logs() {
1965 local dir=$1
1966
1967 setup $dir || return 1
1968 run_mon $dir a || return 1
1969 kill_daemons $dir || return 1
1970 display_logs $dir > $dir/log.out
1971 grep --quiet mon.a.log $dir/log.out || return 1
1972 teardown $dir || return 1
1973}
1974
1975#######################################################################
1976##
1977# Spawn a command in background and save the pid in the variable name
1978# passed in argument. To make the output reading easier, the output is
1979# prepend with the process id.
1980#
1981# Example:
1982# pids1=""
1983# run_in_background pids1 bash -c 'sleep 1; exit 1'
1984#
1985# @param pid_variable the variable name (not value) where the pids will be stored
1986# @param ... the command to execute
1987# @return only the pid_variable output should be considered and used with **wait_background**
1988#
1989function run_in_background() {
1990 local pid_variable=$1
94b18763 1991 shift
7c673cae 1992 # Execute the command and prepend the output with its pid
f64942e4 1993 # We enforce to return the exit status of the command and not the sed one.
94b18763 1994 ("$@" |& sed 's/^/'$$': /'; return "${PIPESTATUS[0]}") >&2 &
7c673cae
FG
1995 eval "$pid_variable+=\" $!\""
1996}
1997
94b18763
FG
1998function save_stdout {
1999 local out="$1"
2000 shift
2001 "$@" > "$out"
2002}
2003
7c673cae
FG
2004function test_run_in_background() {
2005 local pids
2006 run_in_background pids sleep 1
2007 run_in_background pids sleep 1
2008 test $(echo $pids | wc -w) = 2 || return 1
2009 wait $pids || return 1
2010}
2011
2012#######################################################################
2013##
2014# Wait for pids running in background to complete.
2015# This function is usually used after a **run_in_background** call
2016# Example:
2017# pids1=""
2018# run_in_background pids1 bash -c 'sleep 1; exit 1'
2019# wait_background pids1
2020#
2021# @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
2022# @return returns 1 if at least one process exits in error unless returns 0
2023#
2024function wait_background() {
2025 # We extract the PIDS from the variable name
2026 pids=${!1}
2027
2028 return_code=0
2029 for pid in $pids; do
2030 if ! wait $pid; then
2031 # If one process failed then return 1
2032 return_code=1
2033 fi
2034 done
2035
2036 # We empty the variable reporting that all process ended
2037 eval "$1=''"
2038
2039 return $return_code
2040}
2041
2042
2043function test_wait_background() {
2044 local pids=""
2045 run_in_background pids bash -c "sleep 1; exit 1"
2046 run_in_background pids bash -c "sleep 2; exit 0"
2047 wait_background pids
2048 if [ $? -ne 1 ]; then return 1; fi
2049
2050 run_in_background pids bash -c "sleep 1; exit 0"
2051 run_in_background pids bash -c "sleep 2; exit 0"
2052 wait_background pids
2053 if [ $? -ne 0 ]; then return 1; fi
2054
2055 if [ ! -z "$pids" ]; then return 1; fi
2056}
2057
31f18b77
FG
2058function flush_pg_stats()
2059{
2060 local timeout=${1:-$TIMEOUT}
2061
2062 ids=`ceph osd ls`
2063 seqs=''
2064 for osd in $ids; do
2065 seq=`ceph tell osd.$osd flush_pg_stats`
2066 seqs="$seqs $osd-$seq"
2067 done
2068
2069 for s in $seqs; do
2070 osd=`echo $s | cut -d - -f 1`
2071 seq=`echo $s | cut -d - -f 2`
2072 echo "waiting osd.$osd seq $seq"
2073 while test $(ceph osd last-stat-seq $osd) -lt $seq; do
2074 sleep 1
2075 if [ $((timeout--)) -eq 0 ]; then
2076 return 1
2077 fi
2078 done
2079 done
2080}
2081
2082function test_flush_pg_stats()
2083{
2084 local dir=$1
2085
2086 setup $dir || return 1
2087 run_mon $dir a --osd_pool_default_size=1 || return 1
2088 run_mgr $dir x || return 1
2089 run_osd $dir 0 || return 1
c07f9fc5 2090 create_rbd_pool || return 1
31f18b77 2091 rados -p rbd put obj /etc/group
3a9019d9 2092 flush_pg_stats || return 1
31f18b77 2093 local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
11fdf7f2
TL
2094 stored=`ceph df detail --format=json | jq "$jq_filter.stored"`
2095 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2096 test $stored -gt 0 || return 1
2097 test $stored == $stored_raw || return 1
b5b8bbf5 2098 teardown $dir
31f18b77
FG
2099}
2100
7c673cae
FG
2101#######################################################################
2102
2103##
2104# Call the **run** function (which must be defined by the caller) with
2105# the **dir** argument followed by the caller argument list.
2106#
2107# If the **run** function returns on error, all logs found in **dir**
2108# are displayed for diagnostic purposes.
2109#
2110# **teardown** function is called when the **run** function returns
2111# (on success or on error), to cleanup leftovers. The CEPH_CONF is set
2112# to /dev/null and CEPH_ARGS is unset so that the tests are protected from
2113# external interferences.
2114#
2115# It is the responsibility of the **run** function to call the
2116# **setup** function to prepare the test environment (create a temporary
2117# directory etc.).
2118#
2119# The shell is required (via PS4) to display the function and line
2120# number whenever a statement is executed to help debugging.
2121#
2122# @param dir directory in which all data is stored
2123# @param ... arguments passed transparently to **run**
2124# @return 0 on success, 1 on error
2125#
2126function main() {
2127 local dir=td/$1
2128 shift
2129
2130 shopt -s -o xtrace
2131 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2132
11fdf7f2
TL
2133 export PATH=.:$PATH # make sure program from sources are preferred
2134 export PYTHONWARNINGS=ignore
7c673cae
FG
2135 export CEPH_CONF=/dev/null
2136 unset CEPH_ARGS
2137
2138 local code
2139 if run $dir "$@" ; then
2140 code=0
2141 else
7c673cae
FG
2142 code=1
2143 fi
b5b8bbf5 2144 teardown $dir $code || return 1
7c673cae
FG
2145 return $code
2146}
2147
2148#######################################################################
2149
2150function run_tests() {
2151 shopt -s -o xtrace
2152 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2153
11fdf7f2 2154 export .:$PATH # make sure program from sources are preferred
7c673cae
FG
2155
2156 export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
2157 export CEPH_ARGS
b5b8bbf5 2158 CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
7c673cae
FG
2159 CEPH_ARGS+="--mon-host=$CEPH_MON "
2160 export CEPH_CONF=/dev/null
2161
2162 local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
2163 local dir=td/ceph-helpers
2164
2165 for func in $funcs ; do
b5b8bbf5
FG
2166 if ! $func $dir; then
2167 teardown $dir 1
2168 return 1
2169 fi
7c673cae
FG
2170 done
2171}
2172
2173if test "$1" = TESTS ; then
2174 shift
2175 run_tests "$@"
b5b8bbf5 2176 exit $?
7c673cae
FG
2177fi
2178
224ce89b
WB
2179# NOTE:
2180# jq only support --exit-status|-e from version 1.4 forwards, which makes
2181# returning on error waaaay prettier and straightforward.
2182# However, the current automated upstream build is running with v1.3,
2183# which has no idea what -e is. Hence the convoluted error checking we
2184# need. Sad.
2185# The next time someone changes this code, please check if v1.4 is now
2186# a thing, and, if so, please change these to use -e. Thanks.
2187
2188# jq '.all.supported | select([.[] == "foo"] | any)'
2189function jq_success() {
2190 input="$1"
2191 filter="$2"
2192 expects="\"$3\""
2193
2194 in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g")
2195 filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g")
2196
2197 ret=$(echo "$in_escaped" | jq "$filter_escaped")
2198 if [[ "$ret" == "true" ]]; then
2199 return 0
2200 elif [[ -n "$expects" ]]; then
2201 if [[ "$ret" == "$expects" ]]; then
2202 return 0
2203 fi
2204 fi
2205 return 1
2206 input=$1
2207 filter=$2
2208 expects="$3"
2209
2210 ret="$(echo $input | jq \"$filter\")"
2211 if [[ "$ret" == "true" ]]; then
2212 return 0
2213 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
2214 return 0
2215 fi
2216 return 1
2217}
2218
b5b8bbf5
FG
2219function inject_eio() {
2220 local pooltype=$1
2221 shift
2222 local which=$1
2223 shift
2224 local poolname=$1
2225 shift
2226 local objname=$1
2227 shift
2228 local dir=$1
2229 shift
2230 local shard_id=$1
2231 shift
2232
2233 local -a initial_osds=($(get_osds $poolname $objname))
2234 local osd_id=${initial_osds[$shard_id]}
2235 if [ "$pooltype" != "ec" ]; then
2236 shard_id=""
2237 fi
eafe8130
TL
2238 type=$(cat $dir/$osd_id/type)
2239 set_config osd $osd_id ${type}_debug_inject_read_err true || return 1
b5b8bbf5
FG
2240 local loop=0
2241 while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
2242 inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
2243 loop=$(expr $loop + 1)
2244 if [ $loop = "10" ]; then
2245 return 1
2246 fi
2247 sleep 1
2248 done
2249}
2250
1adf2230
AA
2251function multidiff() {
2252 if ! diff $@ ; then
2253 if [ "$DIFFCOLOPTS" = "" ]; then
2254 return 1
2255 fi
2256 diff $DIFFCOLOPTS $@
2257 fi
2258}
2259
eafe8130
TL
2260function create_ec_pool() {
2261 local pool_name=$1
2262 shift
2263 local allow_overwrites=$1
2264 shift
2265
2266 ceph osd erasure-code-profile set myprofile crush-failure-domain=osd "$@" || return 1
2267
2268 create_pool "$poolname" 1 1 erasure myprofile || return 1
2269
2270 if [ "$allow_overwrites" = "true" ]; then
2271 ceph osd pool set "$poolname" allow_ec_overwrites true || return 1
2272 fi
2273
2274 wait_for_clean || return 1
2275 return 0
2276}
2277
7c673cae 2278# Local Variables:
c07f9fc5 2279# compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
7c673cae 2280# End: