]> git.proxmox.com Git - ceph.git/blame - ceph/qa/standalone/ceph-helpers.sh
bump version to 18.2.2-pve1
[ceph.git] / ceph / qa / standalone / ceph-helpers.sh
CommitLineData
11fdf7f2 1#!/usr/bin/env bash
7c673cae
FG
2#
3# Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
4# Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
5# Copyright (C) 2014 Federico Gimenez <fgimenez@coit.es>
6#
7# Author: Loic Dachary <loic@dachary.org>
8# Author: Federico Gimenez <fgimenez@coit.es>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU Library Public License as published by
12# the Free Software Foundation; either version 2, or (at your option)
13# any later version.
14#
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18# GNU Library Public License for more details.
19#
20TIMEOUT=300
11fdf7f2
TL
21WAIT_FOR_CLEAN_TIMEOUT=90
22MAX_TIMEOUT=15
7c673cae 23PG_NUM=4
11fdf7f2 24TMPDIR=${TMPDIR:-/tmp}
f64942e4
AA
25CEPH_BUILD_VIRTUALENV=${TMPDIR}
26TESTDIR=${TESTDIR:-${TMPDIR}}
7c673cae
FG
27
28if type xmlstarlet > /dev/null 2>&1; then
29 XMLSTARLET=xmlstarlet
30elif type xml > /dev/null 2>&1; then
31 XMLSTARLET=xml
32else
33 echo "Missing xmlstarlet binary!"
34 exit 1
35fi
31f18b77 36
7c673cae
FG
37if [ `uname` = FreeBSD ]; then
38 SED=gsed
f64942e4 39 AWK=gawk
31f18b77 40 DIFFCOLOPTS=""
b5b8bbf5 41 KERNCORE="kern.corefile"
7c673cae
FG
42else
43 SED=sed
f64942e4 44 AWK=awk
31f18b77 45 termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
c07f9fc5
FG
46 if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
47 termwidth="-W ${termwidth}"
31f18b77
FG
48 fi
49 DIFFCOLOPTS="-y $termwidth"
b5b8bbf5 50 KERNCORE="kernel.core_pattern"
31f18b77 51fi
7c673cae 52
c07f9fc5 53EXTRA_OPTS=""
c07f9fc5 54
7c673cae
FG
55#! @file ceph-helpers.sh
56# @brief Toolbox to manage Ceph cluster dedicated to testing
57#
58# Example use case:
59#
60# ~~~~~~~~~~~~~~~~{.sh}
61# source ceph-helpers.sh
62#
63# function mytest() {
64# # cleanup leftovers and reset mydir
65# setup mydir
66# # create a cluster with one monitor and three osds
67# run_mon mydir a
68# run_osd mydir 0
69# run_osd mydir 2
70# run_osd mydir 3
71# # put and get an object
72# rados --pool rbd put GROUP /etc/group
73# rados --pool rbd get GROUP /tmp/GROUP
74# # stop the cluster and cleanup the directory
75# teardown mydir
76# }
77# ~~~~~~~~~~~~~~~~
78#
79# The focus is on simplicity and efficiency, in the context of
80# functional tests. The output is intentionally very verbose
81# and functions return as soon as an error is found. The caller
82# is also expected to abort on the first error so that debugging
83# can be done by looking at the end of the output.
84#
85# Each function is documented, implemented and tested independently.
86# When modifying a helper, the test and the documentation are
87# expected to be updated and it is easier of they are collocated. A
88# test for a given function can be run with
89#
90# ~~~~~~~~~~~~~~~~{.sh}
91# ceph-helpers.sh TESTS test_get_osds
92# ~~~~~~~~~~~~~~~~
93#
94# and all the tests (i.e. all functions matching test_*) are run
95# with:
96#
97# ~~~~~~~~~~~~~~~~{.sh}
98# ceph-helpers.sh TESTS
99# ~~~~~~~~~~~~~~~~
100#
101# A test function takes a single argument : the directory dedicated
102# to the tests. It is expected to not create any file outside of this
103# directory and remove it entirely when it completes successfully.
104#
105
106
c07f9fc5
FG
107function get_asok_dir() {
108 if [ -n "$CEPH_ASOK_DIR" ]; then
109 echo "$CEPH_ASOK_DIR"
110 else
111 echo ${TMPDIR:-/tmp}/ceph-asok.$$
112 fi
113}
114
115function get_asok_path() {
116 local name=$1
117 if [ -n "$name" ]; then
118 echo $(get_asok_dir)/ceph-$name.asok
119 else
120 echo $(get_asok_dir)/\$cluster-\$name.asok
121 fi
122}
7c673cae
FG
123##
124# Cleanup any leftovers found in **dir** via **teardown**
125# and reset **dir** as an empty environment.
126#
127# @param dir path name of the environment
128# @return 0 on success, 1 on error
129#
130function setup() {
131 local dir=$1
132 teardown $dir || return 1
133 mkdir -p $dir
c07f9fc5 134 mkdir -p $(get_asok_dir)
11fdf7f2
TL
135 if [ $(ulimit -n) -le 1024 ]; then
136 ulimit -n 4096 || return 1
137 fi
138 if [ -z "$LOCALRUN" ]; then
139 trap "teardown $dir 1" TERM HUP INT
140 fi
7c673cae
FG
141}
142
143function test_setup() {
144 local dir=$dir
145 setup $dir || return 1
146 test -d $dir || return 1
147 setup $dir || return 1
148 test -d $dir || return 1
149 teardown $dir
150}
151
152#######################################################################
153
154##
155# Kill all daemons for which a .pid file exists in **dir** and remove
156# **dir**. If the file system in which **dir** is btrfs, delete all
157# subvolumes that relate to it.
158#
159# @param dir path name of the environment
1adf2230 160# @param dumplogs pass "1" to dump logs otherwise it will only if cores found
7c673cae
FG
161# @return 0 on success, 1 on error
162#
163function teardown() {
164 local dir=$1
b5b8bbf5 165 local dumplogs=$2
7c673cae
FG
166 kill_daemons $dir KILL
167 if [ `uname` != FreeBSD ] \
168 && [ $(stat -f -c '%T' .) == "btrfs" ]; then
169 __teardown_btrfs $dir
170 fi
b5b8bbf5
FG
171 local cores="no"
172 local pattern="$(sysctl -n $KERNCORE)"
173 # See if we have apport core handling
174 if [ "${pattern:0:1}" = "|" ]; then
175 # TODO: Where can we get the dumps?
176 # Not sure where the dumps really are so this will look in the CWD
177 pattern=""
178 fi
179 # Local we start with core and teuthology ends with core
1adf2230 180 if ls $(dirname "$pattern") | grep -q '^core\|core$' ; then
b5b8bbf5
FG
181 cores="yes"
182 if [ -n "$LOCALRUN" ]; then
183 mkdir /tmp/cores.$$ 2> /dev/null || true
184 for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
185 mv $i /tmp/cores.$$
186 done
187 fi
188 fi
189 if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
1adf2230
AA
190 if [ -n "$LOCALRUN" ]; then
191 display_logs $dir
192 else
193 # Move logs to where Teuthology will archive it
194 mkdir -p $TESTDIR/archive/log
195 mv $dir/*.log $TESTDIR/archive/log
196 fi
b5b8bbf5 197 fi
7c673cae 198 rm -fr $dir
c07f9fc5 199 rm -rf $(get_asok_dir)
b5b8bbf5
FG
200 if [ "$cores" = "yes" ]; then
201 echo "ERROR: Failure due to cores found"
202 if [ -n "$LOCALRUN" ]; then
203 echo "Find saved core files in /tmp/cores.$$"
204 fi
205 return 1
206 fi
207 return 0
7c673cae
FG
208}
209
210function __teardown_btrfs() {
211 local btrfs_base_dir=$1
f64942e4
AA
212 local btrfs_root=$(df -P . | tail -1 | $AWK '{print $NF}')
213 local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list -t . | $AWK '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
7c673cae
FG
214 for subvolume in $btrfs_dirs; do
215 sudo btrfs subvolume delete $btrfs_root/$subvolume
216 done
217}
218
219function test_teardown() {
220 local dir=$dir
221 setup $dir || return 1
222 teardown $dir || return 1
223 ! test -d $dir || return 1
224}
225
226#######################################################################
227
228##
229# Sends a signal to a single daemon.
230# This is a helper function for kill_daemons
231#
232# After the daemon is sent **signal**, its actual termination
233# will be verified by sending it signal 0. If the daemon is
234# still alive, kill_daemon will pause for a few seconds and
235# try again. This will repeat for a fixed number of times
236# before kill_daemon returns on failure. The list of
237# sleep intervals can be specified as **delays** and defaults
238# to:
239#
240# 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
241#
242# This sequence is designed to run first a very short sleep time (0.1)
243# if the machine is fast enough and the daemon terminates in a fraction of a
244# second. The increasing sleep numbers should give plenty of time for
245# the daemon to die even on the slowest running machine. If a daemon
246# takes more than a few minutes to stop (the sum of all sleep times),
247# there probably is no point in waiting more and a number of things
248# are likely to go wrong anyway: better give up and return on error.
249#
250# @param pid the process id to send a signal
251# @param send_signal the signal to send
252# @param delays sequence of sleep times before failure
253#
254function kill_daemon() {
7c673cae
FG
255 local pid=$(cat $1)
256 local send_signal=$2
257 local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
258 local exit_code=1
11fdf7f2
TL
259 # In order to try after the last large sleep add 0 at the end so we check
260 # one last time before dropping out of the loop
261 for try in $delays 0 ; do
7c673cae
FG
262 if kill -$send_signal $pid 2> /dev/null ; then
263 exit_code=1
264 else
265 exit_code=0
266 break
267 fi
268 send_signal=0
269 sleep $try
270 done;
271 return $exit_code
272}
273
274function test_kill_daemon() {
275 local dir=$1
276 setup $dir || return 1
f67539c2 277 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
278 run_mgr $dir x || return 1
279 run_osd $dir 0 || return 1
280
281 name_prefix=osd
282 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
283 #
284 # sending signal 0 won't kill the daemon
285 # waiting just for one second instead of the default schedule
286 # allows us to quickly verify what happens when kill fails
287 # to stop the daemon (i.e. it must return false)
288 #
289 ! kill_daemon $pidfile 0 1 || return 1
290 #
291 # killing just the osd and verify the mon still is responsive
292 #
293 kill_daemon $pidfile TERM || return 1
294 done
295
7c673cae
FG
296 name_prefix=mgr
297 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
298 #
299 # kill the mgr
300 #
301 kill_daemon $pidfile TERM || return 1
302 done
303
304 name_prefix=mon
305 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
306 #
307 # kill the mon and verify it cannot be reached
308 #
309 kill_daemon $pidfile TERM || return 1
224ce89b 310 ! timeout 5 ceph status || return 1
7c673cae
FG
311 done
312
313 teardown $dir || return 1
314}
315
316##
317# Kill all daemons for which a .pid file exists in **dir**. Each
318# daemon is sent a **signal** and kill_daemons waits for it to exit
319# during a few minutes. By default all daemons are killed. If a
320# **name_prefix** is provided, only the daemons for which a pid
321# file is found matching the prefix are killed. See run_osd and
322# run_mon for more information about the name conventions for
323# the pid files.
324#
325# Send TERM to all daemons : kill_daemons $dir
326# Send KILL to all daemons : kill_daemons $dir KILL
327# Send KILL to all osds : kill_daemons $dir KILL osd
328# Send KILL to osd 1 : kill_daemons $dir KILL osd.1
329#
330# If a daemon is sent the TERM signal and does not terminate
331# within a few minutes, it will still be running even after
c07f9fc5 332# kill_daemons returns.
7c673cae
FG
333#
334# If all daemons are kill successfully the function returns 0
c07f9fc5 335# if at least one daemon remains, this is treated as an
7c673cae
FG
336# error and the function return 1.
337#
338# @param dir path name of the environment
339# @param signal name of the first signal (defaults to TERM)
340# @param name_prefix only kill match daemons (defaults to all)
341# @param delays sequence of sleep times before failure
342# @return 0 on success, 1 on error
343#
344function kill_daemons() {
345 local trace=$(shopt -q -o xtrace && echo true || echo false)
346 $trace && shopt -u -o xtrace
347 local dir=$1
348 local signal=${2:-TERM}
349 local name_prefix=$3 # optional, osd, mon, osd.1
350 local delays=$4 #optional timing
351 local status=0
352 local pids=""
353
354 for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
355 run_in_background pids kill_daemon $pidfile $signal $delays
356 done
357
358 wait_background pids
359 status=$?
360
361 $trace && shopt -s -o xtrace
362 return $status
363}
364
365function test_kill_daemons() {
366 local dir=$1
367 setup $dir || return 1
f67539c2 368 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
369 run_mgr $dir x || return 1
370 run_osd $dir 0 || return 1
371 #
372 # sending signal 0 won't kill the daemon
373 # waiting just for one second instead of the default schedule
c07f9fc5 374 # allows us to quickly verify what happens when kill fails
7c673cae
FG
375 # to stop the daemon (i.e. it must return false)
376 #
377 ! kill_daemons $dir 0 osd 1 || return 1
378 #
379 # killing just the osd and verify the mon still is responsive
380 #
381 kill_daemons $dir TERM osd || return 1
7c673cae
FG
382 #
383 # kill the mgr
384 #
385 kill_daemons $dir TERM mgr || return 1
386 #
387 # kill the mon and verify it cannot be reached
388 #
389 kill_daemons $dir TERM || return 1
224ce89b 390 ! timeout 5 ceph status || return 1
7c673cae
FG
391 teardown $dir || return 1
392}
393
a8e16298
TL
394#
395# return a random TCP port which is not used yet
396#
397# please note, there could be racing if we use this function for
398# a free port, and then try to bind on this port.
399#
400function get_unused_port() {
401 local ip=127.0.0.1
402 python3 -c "import socket; s=socket.socket(); s.bind(('$ip', 0)); print(s.getsockname()[1]); s.close()"
403}
404
7c673cae
FG
405#######################################################################
406
407##
408# Run a monitor by the name mon.**id** with data in **dir**/**id**.
409# The logs can be found in **dir**/mon.**id**.log and the pid file
410# is **dir**/mon.**id**.pid and the admin socket is
411# **dir**/**id**/ceph-mon.**id**.asok.
412#
413# The remaining arguments are passed verbatim to ceph-mon --mkfs
414# and the ceph-mon daemon.
415#
416# Two mandatory arguments must be provided: --fsid and --mon-host
417# Instead of adding them to every call to run_mon, they can be
418# set in the CEPH_ARGS environment variable to be read implicitly
419# by every ceph command.
420#
421# The CEPH_CONF variable is expected to be set to /dev/null to
422# only rely on arguments for configuration.
423#
424# Examples:
425#
426# CEPH_ARGS="--fsid=$(uuidgen) "
427# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
428# run_mon $dir a # spawn a mon and bind port 7018
429# run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
430#
431# If mon_initial_members is not set, the default rbd pool is deleted
432# and replaced with a replicated pool with less placement groups to
433# speed up initialization. If mon_initial_members is set, no attempt
434# is made to recreate the rbd pool because it would hang forever,
435# waiting for other mons to join.
436#
437# A **dir**/ceph.conf file is created but not meant to be used by any
438# function. It is convenient for debugging a failure with:
439#
440# ceph --conf **dir**/ceph.conf -s
441#
442# @param dir path name of the environment
443# @param id mon identifier
444# @param ... can be any option valid for ceph-mon
445# @return 0 on success, 1 on error
446#
c07f9fc5 447function run_mon() {
7c673cae
FG
448 local dir=$1
449 shift
450 local id=$1
451 shift
452 local data=$dir/$id
453
454 ceph-mon \
455 --id $id \
456 --mkfs \
457 --mon-data=$data \
458 --run-dir=$dir \
459 "$@" || return 1
460
461 ceph-mon \
462 --id $id \
11fdf7f2 463 --osd-failsafe-full-ratio=.99 \
7c673cae
FG
464 --mon-osd-full-ratio=.99 \
465 --mon-data-avail-crit=1 \
b5b8bbf5 466 --mon-data-avail-warn=5 \
7c673cae
FG
467 --paxos-propose-interval=0.1 \
468 --osd-crush-chooseleaf-type=0 \
c07f9fc5 469 $EXTRA_OPTS \
7c673cae
FG
470 --debug-mon 20 \
471 --debug-ms 20 \
472 --debug-paxos 20 \
473 --chdir= \
474 --mon-data=$data \
475 --log-file=$dir/\$name.log \
c07f9fc5 476 --admin-socket=$(get_asok_path) \
7c673cae
FG
477 --mon-cluster-log-file=$dir/log \
478 --run-dir=$dir \
479 --pid-file=$dir/\$name.pid \
480 --mon-allow-pool-delete \
f67539c2 481 --mon-allow-pool-size-one \
9f95a23c 482 --osd-pool-default-pg-autoscale-mode off \
c07f9fc5 483 --mon-osd-backfillfull-ratio .99 \
c5c27e9a 484 --mon-warn-on-insecure-global-id-reclaim-allowed=false \
7c673cae
FG
485 "$@" || return 1
486
487 cat > $dir/ceph.conf <<EOF
488[global]
489fsid = $(get_config mon $id fsid)
490mon host = $(get_config mon $id mon_host)
491EOF
224ce89b
WB
492}
493
7c673cae
FG
494function test_run_mon() {
495 local dir=$1
496
497 setup $dir || return 1
498
39ae355f 499 run_mon $dir a || return 1
11fdf7f2 500 ceph mon dump | grep "mon.a" || return 1
7c673cae
FG
501 kill_daemons $dir || return 1
502
11fdf7f2
TL
503 run_mon $dir a --osd_pool_default_size=3 || return 1
504 run_osd $dir 0 || return 1
505 run_osd $dir 1 || return 1
506 run_osd $dir 2 || return 1
c07f9fc5 507 create_rbd_pool || return 1
11fdf7f2 508 ceph osd dump | grep "pool 1 'rbd'" || return 1
c07f9fc5 509 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
510 config get osd_pool_default_size)
511 test "$size" = '{"osd_pool_default_size":"3"}' || return 1
512
513 ! CEPH_ARGS='' ceph status || return 1
514 CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
515
516 kill_daemons $dir || return 1
517
f67539c2 518 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
c07f9fc5 519 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
520 config get osd_pool_default_size)
521 test "$size" = '{"osd_pool_default_size":"1"}' || return 1
522 kill_daemons $dir || return 1
523
524 CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
525 run_mon $dir a || return 1
c07f9fc5 526 local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
7c673cae
FG
527 config get osd_pool_default_size)
528 test "$size" = '{"osd_pool_default_size":"2"}' || return 1
529 kill_daemons $dir || return 1
530
531 teardown $dir || return 1
532}
533
c07f9fc5
FG
534function create_rbd_pool() {
535 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
b5b8bbf5 536 create_pool rbd $PG_NUM || return 1
c07f9fc5
FG
537 rbd pool init rbd
538}
539
b5b8bbf5
FG
540function create_pool() {
541 ceph osd pool create "$@"
542 sleep 1
543}
544
28e407b8
AA
545function delete_pool() {
546 local poolname=$1
547 ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
548}
549
7c673cae
FG
550#######################################################################
551
552function run_mgr() {
553 local dir=$1
554 shift
555 local id=$1
556 shift
557 local data=$dir/$id
558
20effc67 559 ceph config set mgr mgr_pool false --force
7c673cae
FG
560 ceph-mgr \
561 --id $id \
c07f9fc5 562 $EXTRA_OPTS \
11fdf7f2 563 --osd-failsafe-full-ratio=.99 \
7c673cae
FG
564 --debug-mgr 20 \
565 --debug-objecter 20 \
566 --debug-ms 20 \
567 --debug-paxos 20 \
568 --chdir= \
569 --mgr-data=$data \
570 --log-file=$dir/\$name.log \
c07f9fc5 571 --admin-socket=$(get_asok_path) \
7c673cae
FG
572 --run-dir=$dir \
573 --pid-file=$dir/\$name.pid \
11fdf7f2
TL
574 --mgr-module-path=$(realpath ${CEPH_ROOT}/src/pybind/mgr) \
575 "$@" || return 1
576}
577
578function run_mds() {
579 local dir=$1
580 shift
581 local id=$1
582 shift
583 local data=$dir/$id
584
585 ceph-mds \
586 --id $id \
587 $EXTRA_OPTS \
588 --debug-mds 20 \
589 --debug-objecter 20 \
590 --debug-ms 20 \
591 --chdir= \
592 --mds-data=$data \
593 --log-file=$dir/\$name.log \
594 --admin-socket=$(get_asok_path) \
595 --run-dir=$dir \
596 --pid-file=$dir/\$name.pid \
7c673cae
FG
597 "$@" || return 1
598}
599
600#######################################################################
601
602##
603# Create (prepare) and run (activate) an osd by the name osd.**id**
604# with data in **dir**/**id**. The logs can be found in
605# **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
606# the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
607#
608# The remaining arguments are passed verbatim to ceph-osd.
609#
610# Two mandatory arguments must be provided: --fsid and --mon-host
611# Instead of adding them to every call to run_osd, they can be
612# set in the CEPH_ARGS environment variable to be read implicitly
613# by every ceph command.
614#
615# The CEPH_CONF variable is expected to be set to /dev/null to
616# only rely on arguments for configuration.
617#
11fdf7f2
TL
618# The run_osd function creates the OSD data directory on the **dir**/**id**
619# directory and relies on the activate_osd function to run the daemon.
7c673cae
FG
620#
621# Examples:
622#
623# CEPH_ARGS="--fsid=$(uuidgen) "
624# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
625# run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
626#
627# @param dir path name of the environment
628# @param id osd identifier
629# @param ... can be any option valid for ceph-osd
630# @return 0 on success, 1 on error
631#
632function run_osd() {
633 local dir=$1
634 shift
635 local id=$1
636 shift
637 local osd_data=$dir/$id
638
11fdf7f2
TL
639 local ceph_args="$CEPH_ARGS"
640 ceph_args+=" --osd-failsafe-full-ratio=.99"
641 ceph_args+=" --osd-journal-size=100"
642 ceph_args+=" --osd-scrub-load-threshold=2000"
643 ceph_args+=" --osd-data=$osd_data"
644 ceph_args+=" --osd-journal=${osd_data}/journal"
645 ceph_args+=" --chdir="
646 ceph_args+=$EXTRA_OPTS
647 ceph_args+=" --run-dir=$dir"
648 ceph_args+=" --admin-socket=$(get_asok_path)"
649 ceph_args+=" --debug-osd=20"
9f95a23c
TL
650 ceph_args+=" --debug-ms=1"
651 ceph_args+=" --debug-monc=20"
11fdf7f2
TL
652 ceph_args+=" --log-file=$dir/\$name.log"
653 ceph_args+=" --pid-file=$dir/\$name.pid"
654 ceph_args+=" --osd-max-object-name-len=460"
655 ceph_args+=" --osd-max-object-namespace-len=64"
656 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
1e59de90 657 ceph_args+=" --osd-mclock-profile=high_recovery_ops"
11fdf7f2
TL
658 ceph_args+=" "
659 ceph_args+="$@"
7c673cae 660 mkdir -p $osd_data
7c673cae 661
11fdf7f2
TL
662 local uuid=`uuidgen`
663 echo "add osd$id $uuid"
664 OSD_SECRET=$(ceph-authtool --gen-print-key)
665 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
666 ceph osd new $uuid -i $osd_data/new.json
667 rm $osd_data/new.json
668 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid
669
670 local key_fn=$osd_data/keyring
671 cat > $key_fn<<EOF
672[osd.$id]
673key = $OSD_SECRET
674EOF
675 echo adding osd$id key to auth repository
676 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
677 echo start osd.$id
678 ceph-osd -i $id $ceph_args &
679
eafe8130
TL
680 # If noup is set, then can't wait for this osd
681 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
682 return 0
683 fi
11fdf7f2
TL
684 wait_for_osd up $id || return 1
685
7c673cae
FG
686}
687
eafe8130 688function run_osd_filestore() {
7c673cae
FG
689 local dir=$1
690 shift
691 local id=$1
692 shift
693 local osd_data=$dir/$id
694
11fdf7f2
TL
695 local ceph_args="$CEPH_ARGS"
696 ceph_args+=" --osd-failsafe-full-ratio=.99"
697 ceph_args+=" --osd-journal-size=100"
698 ceph_args+=" --osd-scrub-load-threshold=2000"
699 ceph_args+=" --osd-data=$osd_data"
700 ceph_args+=" --osd-journal=${osd_data}/journal"
701 ceph_args+=" --chdir="
702 ceph_args+=$EXTRA_OPTS
703 ceph_args+=" --run-dir=$dir"
704 ceph_args+=" --admin-socket=$(get_asok_path)"
705 ceph_args+=" --debug-osd=20"
9f95a23c
TL
706 ceph_args+=" --debug-ms=1"
707 ceph_args+=" --debug-monc=20"
11fdf7f2
TL
708 ceph_args+=" --log-file=$dir/\$name.log"
709 ceph_args+=" --pid-file=$dir/\$name.pid"
710 ceph_args+=" --osd-max-object-name-len=460"
711 ceph_args+=" --osd-max-object-namespace-len=64"
712 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
713 ceph_args+=" "
714 ceph_args+="$@"
7c673cae 715 mkdir -p $osd_data
7c673cae 716
11fdf7f2
TL
717 local uuid=`uuidgen`
718 echo "add osd$osd $uuid"
719 OSD_SECRET=$(ceph-authtool --gen-print-key)
720 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
721 ceph osd new $uuid -i $osd_data/new.json
722 rm $osd_data/new.json
eafe8130 723 ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid --osd-objectstore=filestore
11fdf7f2
TL
724
725 local key_fn=$osd_data/keyring
726 cat > $key_fn<<EOF
727[osd.$osd]
728key = $OSD_SECRET
729EOF
730 echo adding osd$id key to auth repository
731 ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
732 echo start osd.$id
733 ceph-osd -i $id $ceph_args &
734
eafe8130
TL
735 # If noup is set, then can't wait for this osd
736 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
737 return 0
738 fi
11fdf7f2
TL
739 wait_for_osd up $id || return 1
740
741
7c673cae
FG
742}
743
744function test_run_osd() {
745 local dir=$1
746
747 setup $dir || return 1
748
749 run_mon $dir a || return 1
750 run_mgr $dir x || return 1
751
752 run_osd $dir 0 || return 1
c07f9fc5 753 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae
FG
754 config get osd_max_backfills)
755 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
756
757 run_osd $dir 1 --osd-max-backfills 20 || return 1
c07f9fc5 758 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
7c673cae 759 config get osd_max_backfills)
39ae355f 760 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
7c673cae
FG
761
762 CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
c07f9fc5 763 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
7c673cae 764 config get osd_max_backfills)
39ae355f 765 test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
7c673cae
FG
766
767 teardown $dir || return 1
768}
769
770#######################################################################
771
772##
773# Shutdown and remove all traces of the osd by the name osd.**id**.
774#
775# The OSD is shutdown with the TERM signal. It is then removed from
776# the auth list, crush map, osd map etc and the files associated with
777# it are also removed.
778#
779# @param dir path name of the environment
780# @param id osd identifier
781# @return 0 on success, 1 on error
782#
783function destroy_osd() {
784 local dir=$1
785 local id=$2
786
7c673cae 787 ceph osd out osd.$id || return 1
c07f9fc5 788 kill_daemons $dir TERM osd.$id || return 1
92f5a8d4 789 ceph osd down osd.$id || return 1
c07f9fc5 790 ceph osd purge osd.$id --yes-i-really-mean-it || return 1
7c673cae
FG
791 teardown $dir/$id || return 1
792 rm -fr $dir/$id
793}
794
795function test_destroy_osd() {
796 local dir=$1
797
798 setup $dir || return 1
799 run_mon $dir a || return 1
800 run_mgr $dir x || return 1
801 run_osd $dir 0 || return 1
802 destroy_osd $dir 0 || return 1
803 ! ceph osd dump | grep "osd.$id " || return 1
804 teardown $dir || return 1
805}
806
807#######################################################################
808
809##
810# Run (activate) an osd by the name osd.**id** with data in
811# **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
812# the pid file is **dir**/osd.**id**.pid and the admin socket is
813# **dir**/**id**/ceph-osd.**id**.asok.
814#
815# The remaining arguments are passed verbatim to ceph-osd.
816#
817# Two mandatory arguments must be provided: --fsid and --mon-host
818# Instead of adding them to every call to activate_osd, they can be
819# set in the CEPH_ARGS environment variable to be read implicitly
820# by every ceph command.
821#
822# The CEPH_CONF variable is expected to be set to /dev/null to
823# only rely on arguments for configuration.
824#
825# The activate_osd function expects a valid OSD data directory
826# in **dir**/**id**, either just created via run_osd or re-using
827# one left by a previous run of ceph-osd. The ceph-osd daemon is
11fdf7f2 828# run directly on the foreground
7c673cae
FG
829#
830# The activate_osd function blocks until the monitor reports the osd
831# up. If it fails to do so within $TIMEOUT seconds, activate_osd
832# fails.
833#
834# Examples:
835#
836# CEPH_ARGS="--fsid=$(uuidgen) "
837# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
838# activate_osd $dir 0 # activate an osd using the monitor listening on 7018
839#
840# @param dir path name of the environment
841# @param id osd identifier
842# @param ... can be any option valid for ceph-osd
843# @return 0 on success, 1 on error
844#
845function activate_osd() {
846 local dir=$1
847 shift
848 local id=$1
849 shift
850 local osd_data=$dir/$id
851
7c673cae 852 local ceph_args="$CEPH_ARGS"
7c673cae
FG
853 ceph_args+=" --osd-failsafe-full-ratio=.99"
854 ceph_args+=" --osd-journal-size=100"
855 ceph_args+=" --osd-scrub-load-threshold=2000"
856 ceph_args+=" --osd-data=$osd_data"
11fdf7f2 857 ceph_args+=" --osd-journal=${osd_data}/journal"
7c673cae 858 ceph_args+=" --chdir="
c07f9fc5 859 ceph_args+=$EXTRA_OPTS
7c673cae 860 ceph_args+=" --run-dir=$dir"
c07f9fc5 861 ceph_args+=" --admin-socket=$(get_asok_path)"
7c673cae
FG
862 ceph_args+=" --debug-osd=20"
863 ceph_args+=" --log-file=$dir/\$name.log"
864 ceph_args+=" --pid-file=$dir/\$name.pid"
11fdf7f2
TL
865 ceph_args+=" --osd-max-object-name-len=460"
866 ceph_args+=" --osd-max-object-namespace-len=64"
867 ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
1e59de90 868 ceph_args+=" --osd-mclock-profile=high_recovery_ops"
7c673cae
FG
869 ceph_args+=" "
870 ceph_args+="$@"
871 mkdir -p $osd_data
11fdf7f2
TL
872
873 echo start osd.$id
874 ceph-osd -i $id $ceph_args &
7c673cae
FG
875
876 [ "$id" = "$(cat $osd_data/whoami)" ] || return 1
877
eafe8130
TL
878 # If noup is set, then can't wait for this osd
879 if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
880 return 0
881 fi
7c673cae
FG
882 wait_for_osd up $id || return 1
883}
884
885function test_activate_osd() {
886 local dir=$1
887
888 setup $dir || return 1
889
890 run_mon $dir a || return 1
891 run_mgr $dir x || return 1
892
893 run_osd $dir 0 || return 1
c07f9fc5 894 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae
FG
895 config get osd_max_backfills)
896 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
897
898 kill_daemons $dir TERM osd || return 1
899
900 activate_osd $dir 0 --osd-max-backfills 20 || return 1
c07f9fc5 901 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
7c673cae 902 config get osd_max_backfills)
39ae355f 903 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
7c673cae
FG
904
905 teardown $dir || return 1
906}
907
20effc67
TL
908function test_activate_osd_after_mark_down() {
909 local dir=$1
910
911 setup $dir || return 1
912
913 run_mon $dir a || return 1
914 run_mgr $dir x || return 1
915
916 run_osd $dir 0 || return 1
917 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
918 config get osd_max_backfills)
919 echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
920
921 kill_daemons $dir TERM osd || return 1
922 ceph osd down 0 || return 1
923 wait_for_osd down 0 || return 1
924
925 activate_osd $dir 0 --osd-max-backfills 20 || return 1
20effc67
TL
926 local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
927 config get osd_max_backfills)
39ae355f 928 test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
20effc67
TL
929
930 teardown $dir || return 1
931}
932
933function test_activate_osd_skip_benchmark() {
934 local dir=$1
935
936 setup $dir || return 1
937
938 run_mon $dir a || return 1
939 run_mgr $dir x || return 1
940
941 # Skip the osd benchmark during first osd bring-up.
942 run_osd $dir 0 --osd-op-queue=mclock_scheduler \
943 --osd-mclock-skip-benchmark=true || return 1
944 local max_iops_hdd_def=$(CEPH_ARGS='' ceph --format=json daemon \
945 $(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_hdd)
946 local max_iops_ssd_def=$(CEPH_ARGS='' ceph --format=json daemon \
947 $(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_ssd)
948
949 kill_daemons $dir TERM osd || return 1
950 ceph osd down 0 || return 1
951 wait_for_osd down 0 || return 1
952
953 # Skip the osd benchmark during activation as well. Validate that
954 # the max osd capacities are left unchanged.
955 activate_osd $dir 0 --osd-op-queue=mclock_scheduler \
956 --osd-mclock-skip-benchmark=true || return 1
957 local max_iops_hdd_after_boot=$(CEPH_ARGS='' ceph --format=json daemon \
958 $(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_hdd)
959 local max_iops_ssd_after_boot=$(CEPH_ARGS='' ceph --format=json daemon \
960 $(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_ssd)
961
962 test "$max_iops_hdd_def" = "$max_iops_hdd_after_boot" || return 1
963 test "$max_iops_ssd_def" = "$max_iops_ssd_after_boot" || return 1
964
965 teardown $dir || return 1
966}
7c673cae
FG
967#######################################################################
968
969##
970# Wait until the OSD **id** is either up or down, as specified by
971# **state**. It fails after $TIMEOUT seconds.
972#
973# @param state either up or down
974# @param id osd identifier
975# @return 0 on success, 1 on error
976#
977function wait_for_osd() {
978 local state=$1
979 local id=$2
980
981 status=1
982 for ((i=0; i < $TIMEOUT; i++)); do
983 echo $i
984 if ! ceph osd dump | grep "osd.$id $state"; then
985 sleep 1
986 else
987 status=0
988 break
989 fi
990 done
991 return $status
992}
993
994function test_wait_for_osd() {
995 local dir=$1
996 setup $dir || return 1
f67539c2 997 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
998 run_mgr $dir x || return 1
999 run_osd $dir 0 || return 1
92f5a8d4 1000 run_osd $dir 1 || return 1
7c673cae 1001 wait_for_osd up 0 || return 1
92f5a8d4
TL
1002 wait_for_osd up 1 || return 1
1003 kill_daemons $dir TERM osd.0 || return 1
7c673cae
FG
1004 wait_for_osd down 0 || return 1
1005 ( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
1006 teardown $dir || return 1
1007}
1008
1009#######################################################################
1010
1011##
1012# Display the list of OSD ids supporting the **objectname** stored in
1013# **poolname**, as reported by ceph osd map.
1014#
1015# @param poolname an existing pool
1016# @param objectname an objectname (may or may not exist)
1017# @param STDOUT white space separated list of OSD ids
1018# @return 0 on success, 1 on error
1019#
1020function get_osds() {
1021 local poolname=$1
1022 local objectname=$2
1023
31f18b77
FG
1024 local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
1025 jq '.acting | .[]')
7c673cae
FG
1026 # get rid of the trailing space
1027 echo $osds
1028}
1029
1030function test_get_osds() {
1031 local dir=$1
1032
1033 setup $dir || return 1
1034 run_mon $dir a --osd_pool_default_size=2 || return 1
1035 run_mgr $dir x || return 1
1036 run_osd $dir 0 || return 1
1037 run_osd $dir 1 || return 1
c07f9fc5 1038 create_rbd_pool || return 1
7c673cae 1039 wait_for_clean || return 1
c07f9fc5 1040 create_rbd_pool || return 1
7c673cae
FG
1041 get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
1042 teardown $dir || return 1
1043}
1044
1045#######################################################################
1046
1047##
1048# Wait for the monitor to form quorum (optionally, of size N)
1049#
1050# @param timeout duration (lower-bound) to wait for quorum to be formed
1051# @param quorumsize size of quorum to wait for
1052# @return 0 on success, 1 on error
1053#
1054function wait_for_quorum() {
1055 local timeout=$1
1056 local quorumsize=$2
1057
1058 if [[ -z "$timeout" ]]; then
1059 timeout=300
1060 fi
1061
1062 if [[ -z "$quorumsize" ]]; then
9f95a23c 1063 timeout $timeout ceph quorum_status --format=json >&/dev/null || return 1
7c673cae
FG
1064 return 0
1065 fi
1066
1067 no_quorum=1
c07f9fc5 1068 wait_until=$((`date +%s` + $timeout))
7c673cae
FG
1069 while [[ $(date +%s) -lt $wait_until ]]; do
1070 jqfilter='.quorum | length == '$quorumsize
9f95a23c 1071 jqinput="$(timeout $timeout ceph quorum_status --format=json 2>/dev/null)"
7c673cae
FG
1072 res=$(echo $jqinput | jq "$jqfilter")
1073 if [[ "$res" == "true" ]]; then
1074 no_quorum=0
1075 break
1076 fi
1077 done
1078 return $no_quorum
1079}
1080
1081#######################################################################
1082
1083##
1084# Return the PG of supporting the **objectname** stored in
1085# **poolname**, as reported by ceph osd map.
1086#
1087# @param poolname an existing pool
1088# @param objectname an objectname (may or may not exist)
1089# @param STDOUT a PG
1090# @return 0 on success, 1 on error
1091#
1092function get_pg() {
1093 local poolname=$1
1094 local objectname=$2
1095
31f18b77 1096 ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
7c673cae
FG
1097}
1098
1099function test_get_pg() {
1100 local dir=$1
1101
1102 setup $dir || return 1
f67539c2 1103 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1104 run_mgr $dir x || return 1
1105 run_osd $dir 0 || return 1
c07f9fc5 1106 create_rbd_pool || return 1
7c673cae
FG
1107 wait_for_clean || return 1
1108 get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
1109 teardown $dir || return 1
1110}
1111
1112#######################################################################
1113
1114##
1115# Return the value of the **config**, obtained via the config get command
1116# of the admin socket of **daemon**.**id**.
1117#
1118# @param daemon mon or osd
1119# @param id mon or osd ID
1120# @param config the configuration variable name as found in config_opts.h
1121# @param STDOUT the config value
1122# @return 0 on success, 1 on error
1123#
1124function get_config() {
1125 local daemon=$1
1126 local id=$2
1127 local config=$3
1128
1129 CEPH_ARGS='' \
c07f9fc5 1130 ceph --format json daemon $(get_asok_path $daemon.$id) \
7c673cae 1131 config get $config 2> /dev/null | \
31f18b77 1132 jq -r ".$config"
7c673cae
FG
1133}
1134
1135function test_get_config() {
1136 local dir=$1
1137
1138 # override the default config using command line arg and check it
1139 setup $dir || return 1
f67539c2 1140 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1141 test $(get_config mon a osd_pool_default_size) = 1 || return 1
1142 run_mgr $dir x || return 1
1143 run_osd $dir 0 --osd_max_scrubs=3 || return 1
1144 test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
1145 teardown $dir || return 1
1146}
1147
1148#######################################################################
1149
1150##
1151# Set the **config** to specified **value**, via the config set command
1152# of the admin socket of **daemon**.**id**
1153#
1154# @param daemon mon or osd
1155# @param id mon or osd ID
1156# @param config the configuration variable name as found in config_opts.h
1157# @param value the config value
1158# @return 0 on success, 1 on error
1159#
1160function set_config() {
1161 local daemon=$1
1162 local id=$2
1163 local config=$3
1164 local value=$4
1165
c07f9fc5 1166 test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
31f18b77
FG
1167 config set $config $value 2> /dev/null | \
1168 jq 'has("success")') == true
7c673cae
FG
1169}
1170
1171function test_set_config() {
1172 local dir=$1
1173
1174 setup $dir || return 1
f67539c2 1175 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1176 test $(get_config mon a ms_crc_header) = true || return 1
1177 set_config mon a ms_crc_header false || return 1
1178 test $(get_config mon a ms_crc_header) = false || return 1
1179 set_config mon a ms_crc_header true || return 1
1180 test $(get_config mon a ms_crc_header) = true || return 1
1181 teardown $dir || return 1
1182}
1183
1184#######################################################################
1185
1186##
1187# Return the OSD id of the primary OSD supporting the **objectname**
1188# stored in **poolname**, as reported by ceph osd map.
1189#
1190# @param poolname an existing pool
1191# @param objectname an objectname (may or may not exist)
1192# @param STDOUT the primary OSD id
1193# @return 0 on success, 1 on error
1194#
1195function get_primary() {
1196 local poolname=$1
1197 local objectname=$2
1198
31f18b77
FG
1199 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1200 jq '.acting_primary'
7c673cae
FG
1201}
1202
1203function test_get_primary() {
1204 local dir=$1
1205
1206 setup $dir || return 1
f67539c2 1207 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1208 local osd=0
1209 run_mgr $dir x || return 1
1210 run_osd $dir $osd || return 1
c07f9fc5 1211 create_rbd_pool || return 1
7c673cae
FG
1212 wait_for_clean || return 1
1213 test $(get_primary rbd GROUP) = $osd || return 1
1214 teardown $dir || return 1
1215}
1216
1217#######################################################################
1218
1219##
1220# Return the id of any OSD supporting the **objectname** stored in
1221# **poolname**, as reported by ceph osd map, except the primary.
1222#
1223# @param poolname an existing pool
1224# @param objectname an objectname (may or may not exist)
1225# @param STDOUT the OSD id
1226# @return 0 on success, 1 on error
1227#
1228function get_not_primary() {
1229 local poolname=$1
1230 local objectname=$2
1231
1232 local primary=$(get_primary $poolname $objectname)
31f18b77
FG
1233 ceph --format json osd map $poolname $objectname 2>/dev/null | \
1234 jq ".acting | map(select (. != $primary)) | .[0]"
7c673cae
FG
1235}
1236
1237function test_get_not_primary() {
1238 local dir=$1
1239
1240 setup $dir || return 1
1241 run_mon $dir a --osd_pool_default_size=2 || return 1
1242 run_mgr $dir x || return 1
1243 run_osd $dir 0 || return 1
1244 run_osd $dir 1 || return 1
c07f9fc5 1245 create_rbd_pool || return 1
7c673cae
FG
1246 wait_for_clean || return 1
1247 local primary=$(get_primary rbd GROUP)
1248 local not_primary=$(get_not_primary rbd GROUP)
1249 test $not_primary != $primary || return 1
1250 test $not_primary = 0 -o $not_primary = 1 || return 1
1251 teardown $dir || return 1
1252}
1253
1254#######################################################################
1255
11fdf7f2
TL
1256function _objectstore_tool_nodown() {
1257 local dir=$1
1258 shift
1259 local id=$1
1260 shift
1261 local osd_data=$dir/$id
1262
11fdf7f2
TL
1263 ceph-objectstore-tool \
1264 --data-path $osd_data \
11fdf7f2
TL
1265 "$@" || return 1
1266}
1267
1268function _objectstore_tool_nowait() {
1269 local dir=$1
1270 shift
1271 local id=$1
1272 shift
1273
1274 kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
1275
1276 _objectstore_tool_nodown $dir $id "$@" || return 1
1277 activate_osd $dir $id $ceph_osd_args >&2 || return 1
1278}
1279
7c673cae
FG
1280##
1281# Run ceph-objectstore-tool against the OSD **id** using the data path
1282# **dir**. The OSD is killed with TERM prior to running
1283# ceph-objectstore-tool because access to the data path is
1284# exclusive. The OSD is restarted after the command completes. The
1285# objectstore_tool returns after all PG are active+clean again.
1286#
1287# @param dir the data path of the OSD
1288# @param id the OSD id
1289# @param ... arguments to ceph-objectstore-tool
1290# @param STDIN the input of ceph-objectstore-tool
1291# @param STDOUT the output of ceph-objectstore-tool
1292# @return 0 on success, 1 on error
1293#
1294# The value of $ceph_osd_args will be passed to restarted osds
1295#
1296function objectstore_tool() {
1297 local dir=$1
1298 shift
1299 local id=$1
1300 shift
7c673cae 1301
11fdf7f2 1302 _objectstore_tool_nowait $dir $id "$@" || return 1
7c673cae
FG
1303 wait_for_clean >&2
1304}
1305
1306function test_objectstore_tool() {
1307 local dir=$1
1308
1309 setup $dir || return 1
f67539c2 1310 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1311 local osd=0
1312 run_mgr $dir x || return 1
1313 run_osd $dir $osd || return 1
c07f9fc5 1314 create_rbd_pool || return 1
7c673cae
FG
1315 wait_for_clean || return 1
1316 rados --pool rbd put GROUP /etc/group || return 1
1317 objectstore_tool $dir $osd GROUP get-bytes | \
1318 diff - /etc/group
1319 ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
1320 teardown $dir || return 1
1321}
1322
1323#######################################################################
1324
1325##
1326# Predicate checking if there is an ongoing recovery in the
1327# cluster. If any of the recovering_{keys,bytes,objects}_per_sec
1328# counters are reported by ceph status, it means recovery is in
1329# progress.
1330#
1331# @return 0 if recovery in progress, 1 otherwise
1332#
1333function get_is_making_recovery_progress() {
31f18b77
FG
1334 local recovery_progress
1335 recovery_progress+=".recovering_keys_per_sec + "
1336 recovery_progress+=".recovering_bytes_per_sec + "
1337 recovery_progress+=".recovering_objects_per_sec"
1338 local progress=$(ceph --format json status 2>/dev/null | \
1339 jq -r ".pgmap | $recovery_progress")
1340 test "$progress" != null
7c673cae
FG
1341}
1342
1343function test_get_is_making_recovery_progress() {
1344 local dir=$1
1345
1346 setup $dir || return 1
1347 run_mon $dir a || return 1
1348 run_mgr $dir x || return 1
1349 ! get_is_making_recovery_progress || return 1
1350 teardown $dir || return 1
1351}
1352
1353#######################################################################
1354
1355##
1356# Return the number of active PGs in the cluster. A PG is active if
1357# ceph pg dump pgs reports it both **active** and **clean** and that
1358# not **stale**.
1359#
1360# @param STDOUT the number of active PGs
1361# @return 0 on success, 1 on error
1362#
1363function get_num_active_clean() {
31f18b77
FG
1364 local expression
1365 expression+="select(contains(\"active\") and contains(\"clean\")) | "
1366 expression+="select(contains(\"stale\") | not)"
1367 ceph --format json pg dump pgs 2>/dev/null | \
11fdf7f2 1368 jq ".pg_stats | [.[] | .state | $expression] | length"
7c673cae
FG
1369}
1370
1371function test_get_num_active_clean() {
1372 local dir=$1
1373
1374 setup $dir || return 1
f67539c2 1375 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1376 run_mgr $dir x || return 1
1377 run_osd $dir 0 || return 1
c07f9fc5 1378 create_rbd_pool || return 1
7c673cae
FG
1379 wait_for_clean || return 1
1380 local num_active_clean=$(get_num_active_clean)
1381 test "$num_active_clean" = $PG_NUM || return 1
1382 teardown $dir || return 1
1383}
1384
92f5a8d4
TL
1385##
1386# Return the number of active or peered PGs in the cluster. A PG matches if
1387# ceph pg dump pgs reports it is either **active** or **peered** and that
1388# not **stale**.
1389#
1390# @param STDOUT the number of active PGs
1391# @return 0 on success, 1 on error
1392#
1393function get_num_active_or_peered() {
1394 local expression
1395 expression+="select(contains(\"active\") or contains(\"peered\")) | "
1396 expression+="select(contains(\"stale\") | not)"
1397 ceph --format json pg dump pgs 2>/dev/null | \
1398 jq ".pg_stats | [.[] | .state | $expression] | length"
1399}
1400
1401function test_get_num_active_or_peered() {
1402 local dir=$1
1403
1404 setup $dir || return 1
f67539c2 1405 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
92f5a8d4
TL
1406 run_mgr $dir x || return 1
1407 run_osd $dir 0 || return 1
1408 create_rbd_pool || return 1
1409 wait_for_clean || return 1
1410 local num_peered=$(get_num_active_or_peered)
1411 test "$num_peered" = $PG_NUM || return 1
1412 teardown $dir || return 1
1413}
1414
7c673cae
FG
1415#######################################################################
1416
1417##
1418# Return the number of PGs in the cluster, according to
1419# ceph pg dump pgs.
1420#
1421# @param STDOUT the number of PGs
1422# @return 0 on success, 1 on error
1423#
1424function get_num_pgs() {
31f18b77 1425 ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
7c673cae
FG
1426}
1427
1428function test_get_num_pgs() {
1429 local dir=$1
1430
1431 setup $dir || return 1
f67539c2 1432 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1433 run_mgr $dir x || return 1
1434 run_osd $dir 0 || return 1
c07f9fc5 1435 create_rbd_pool || return 1
7c673cae
FG
1436 wait_for_clean || return 1
1437 local num_pgs=$(get_num_pgs)
1438 test "$num_pgs" -gt 0 || return 1
1439 teardown $dir || return 1
1440}
1441
1442#######################################################################
1443
c07f9fc5
FG
1444##
1445# Return the OSD ids in use by at least one PG in the cluster (either
1446# in the up or the acting set), according to ceph pg dump pgs. Every
1447# OSD id shows as many times as they are used in up and acting sets.
1448# If an OSD id is in both the up and acting set of a given PG, it will
1449# show twice.
1450#
1451# @param STDOUT a sorted list of OSD ids
1452# @return 0 on success, 1 on error
1453#
1454function get_osd_id_used_by_pgs() {
11fdf7f2 1455 ceph --format json pg dump pgs 2>/dev/null | jq '.pg_stats | .[] | .up[], .acting[]' | sort
c07f9fc5
FG
1456}
1457
1458function test_get_osd_id_used_by_pgs() {
1459 local dir=$1
1460
1461 setup $dir || return 1
f67539c2 1462 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
c07f9fc5
FG
1463 run_mgr $dir x || return 1
1464 run_osd $dir 0 || return 1
1465 create_rbd_pool || return 1
1466 wait_for_clean || return 1
1467 local osd_ids=$(get_osd_id_used_by_pgs | uniq)
1468 test "$osd_ids" = "0" || return 1
1469 teardown $dir || return 1
1470}
1471
1472#######################################################################
1473
1474##
1475# Wait until the OSD **id** shows **count** times in the
1476# PGs (see get_osd_id_used_by_pgs for more information about
1477# how OSD ids are counted).
1478#
1479# @param id the OSD id
1480# @param count the number of time it must show in the PGs
1481# @return 0 on success, 1 on error
1482#
1483function wait_osd_id_used_by_pgs() {
1484 local id=$1
1485 local count=$2
1486
1487 status=1
1488 for ((i=0; i < $TIMEOUT / 5; i++)); do
1489 echo $i
1490 if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then
1491 sleep 5
1492 else
1493 status=0
1494 break
1495 fi
1496 done
1497 return $status
1498}
1499
1500function test_wait_osd_id_used_by_pgs() {
1501 local dir=$1
1502
1503 setup $dir || return 1
f67539c2 1504 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
c07f9fc5
FG
1505 run_mgr $dir x || return 1
1506 run_osd $dir 0 || return 1
1507 create_rbd_pool || return 1
1508 wait_for_clean || return 1
1509 wait_osd_id_used_by_pgs 0 8 || return 1
1510 ! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1
1511 teardown $dir || return 1
1512}
1513
1514#######################################################################
1515
7c673cae
FG
1516##
1517# Return the date and time of the last completed scrub for **pgid**,
1518# as reported by ceph pg dump pgs. Note that a repair also sets this
1519# date.
1520#
1521# @param pgid the id of the PG
1522# @param STDOUT the date and time of the last scrub
1523# @return 0 on success, 1 on error
1524#
1525function get_last_scrub_stamp() {
1526 local pgid=$1
1527 local sname=${2:-last_scrub_stamp}
31f18b77 1528 ceph --format json pg dump pgs 2>/dev/null | \
11fdf7f2 1529 jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
7c673cae
FG
1530}
1531
1532function test_get_last_scrub_stamp() {
1533 local dir=$1
1534
1535 setup $dir || return 1
f67539c2 1536 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1537 run_mgr $dir x || return 1
1538 run_osd $dir 0 || return 1
c07f9fc5 1539 create_rbd_pool || return 1
7c673cae 1540 wait_for_clean || return 1
b5b8bbf5 1541 stamp=$(get_last_scrub_stamp 1.0)
7c673cae
FG
1542 test -n "$stamp" || return 1
1543 teardown $dir || return 1
1544}
1545
1546#######################################################################
1547
1548##
1549# Predicate checking if the cluster is clean, i.e. all of its PGs are
1550# in a clean state (see get_num_active_clean for a definition).
1551#
1552# @return 0 if the cluster is clean, 1 otherwise
1553#
1554function is_clean() {
1555 num_pgs=$(get_num_pgs)
1556 test $num_pgs != 0 || return 1
1557 test $(get_num_active_clean) = $num_pgs || return 1
1558}
1559
1560function test_is_clean() {
1561 local dir=$1
1562
1563 setup $dir || return 1
f67539c2 1564 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1565 run_mgr $dir x || return 1
1566 run_osd $dir 0 || return 1
c07f9fc5 1567 create_rbd_pool || return 1
7c673cae
FG
1568 wait_for_clean || return 1
1569 is_clean || return 1
1570 teardown $dir || return 1
1571}
1572
1573#######################################################################
1574
f64942e4 1575calc() { $AWK "BEGIN{print $*}"; }
94b18763 1576
7c673cae
FG
1577##
1578# Return a list of numbers that are increasingly larger and whose
1579# total is **timeout** seconds. It can be used to have short sleep
1580# delay while waiting for an event on a fast machine. But if running
1581# very slowly the larger delays avoid stressing the machine even
1582# further or spamming the logs.
1583#
1584# @param timeout sum of all delays, in seconds
1585# @return a list of sleep delays
1586#
1587function get_timeout_delays() {
1588 local trace=$(shopt -q -o xtrace && echo true || echo false)
1589 $trace && shopt -u -o xtrace
1590 local timeout=$1
1591 local first_step=${2:-1}
11fdf7f2 1592 local max_timeout=${3:-$MAX_TIMEOUT}
7c673cae
FG
1593
1594 local i
1595 local total="0"
1596 i=$first_step
94b18763
FG
1597 while test "$(calc $total + $i \<= $timeout)" = "1"; do
1598 echo -n "$(calc $i) "
1599 total=$(calc $total + $i)
1600 i=$(calc $i \* 2)
11fdf7f2
TL
1601 if [ $max_timeout -gt 0 ]; then
1602 # Did we reach max timeout ?
1603 if [ ${i%.*} -eq ${max_timeout%.*} ] && [ ${i#*.} \> ${max_timeout#*.} ] || [ ${i%.*} -gt ${max_timeout%.*} ]; then
1604 # Yes, so let's cap the max wait time to max
1605 i=$max_timeout
1606 fi
1607 fi
7c673cae 1608 done
94b18763
FG
1609 if test "$(calc $total \< $timeout)" = "1"; then
1610 echo -n "$(calc $timeout - $total) "
7c673cae
FG
1611 fi
1612 $trace && shopt -s -o xtrace
1613}
1614
1615function test_get_timeout_delays() {
1616 test "$(get_timeout_delays 1)" = "1 " || return 1
94b18763
FG
1617 test "$(get_timeout_delays 5)" = "1 2 2 " || return 1
1618 test "$(get_timeout_delays 6)" = "1 2 3 " || return 1
7c673cae 1619 test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
94b18763
FG
1620 test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1
1621 test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1
1622 test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1
1623 test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1
1624 test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1
1625 test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1
1626 test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1
11fdf7f2
TL
1627 test "$(get_timeout_delays 300 .1 0)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 12.8 25.6 51.2 102.4 95.3 " || return 1
1628 test "$(get_timeout_delays 300 .1 10)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 7.3 " || return 1
7c673cae
FG
1629}
1630
1631#######################################################################
1632
1633##
1634# Wait until the cluster becomes clean or if it does not make progress
11fdf7f2 1635# for $WAIT_FOR_CLEAN_TIMEOUT seconds.
7c673cae
FG
1636# Progress is measured either via the **get_is_making_recovery_progress**
1637# predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
1638#
1639# @return 0 if the cluster is clean, 1 otherwise
1640#
1641function wait_for_clean() {
a8e16298 1642 local cmd=$1
7c673cae
FG
1643 local num_active_clean=-1
1644 local cur_active_clean
11fdf7f2 1645 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
7c673cae 1646 local -i loop=0
31f18b77 1647
3a9019d9 1648 flush_pg_stats || return 1
31f18b77
FG
1649 while test $(get_num_pgs) == 0 ; do
1650 sleep 1
1651 done
7c673cae
FG
1652
1653 while true ; do
1654 # Comparing get_num_active_clean & get_num_pgs is used to determine
1655 # if the cluster is clean. That's almost an inline of is_clean() to
1656 # get more performance by avoiding multiple calls of get_num_active_clean.
1657 cur_active_clean=$(get_num_active_clean)
1658 test $cur_active_clean = $(get_num_pgs) && break
1659 if test $cur_active_clean != $num_active_clean ; then
1660 loop=0
1661 num_active_clean=$cur_active_clean
1662 elif get_is_making_recovery_progress ; then
1663 loop=0
1664 elif (( $loop >= ${#delays[*]} )) ; then
1665 ceph report
1666 return 1
1667 fi
a8e16298
TL
1668 # eval is a no-op if cmd is empty
1669 eval $cmd
7c673cae
FG
1670 sleep ${delays[$loop]}
1671 loop+=1
1672 done
1673 return 0
1674}
1675
1676function test_wait_for_clean() {
1677 local dir=$1
1678
1679 setup $dir || return 1
11fdf7f2
TL
1680 run_mon $dir a --osd_pool_default_size=2 || return 1
1681 run_osd $dir 0 || return 1
7c673cae 1682 run_mgr $dir x || return 1
c07f9fc5 1683 create_rbd_pool || return 1
11fdf7f2
TL
1684 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1685 run_osd $dir 1 || return 1
7c673cae
FG
1686 wait_for_clean || return 1
1687 teardown $dir || return 1
1688}
1689
92f5a8d4
TL
1690##
1691# Wait until the cluster becomes peered or if it does not make progress
1692# for $WAIT_FOR_CLEAN_TIMEOUT seconds.
1693# Progress is measured either via the **get_is_making_recovery_progress**
1694# predicate or if the number of peered PGs changes (as returned by get_num_active_or_peered)
1695#
1696# @return 0 if the cluster is clean, 1 otherwise
1697#
1698function wait_for_peered() {
1699 local cmd=$1
1700 local num_peered=-1
1701 local cur_peered
1702 local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
1703 local -i loop=0
1704
1705 flush_pg_stats || return 1
1706 while test $(get_num_pgs) == 0 ; do
1707 sleep 1
1708 done
1709
1710 while true ; do
1711 # Comparing get_num_active_clean & get_num_pgs is used to determine
1712 # if the cluster is clean. That's almost an inline of is_clean() to
1713 # get more performance by avoiding multiple calls of get_num_active_clean.
1714 cur_peered=$(get_num_active_or_peered)
1715 test $cur_peered = $(get_num_pgs) && break
1716 if test $cur_peered != $num_peered ; then
1717 loop=0
1718 num_peered=$cur_peered
1719 elif get_is_making_recovery_progress ; then
1720 loop=0
1721 elif (( $loop >= ${#delays[*]} )) ; then
1722 ceph report
1723 return 1
1724 fi
1725 # eval is a no-op if cmd is empty
1726 eval $cmd
1727 sleep ${delays[$loop]}
1728 loop+=1
1729 done
1730 return 0
1731}
1732
1733function test_wait_for_peered() {
1734 local dir=$1
1735
1736 setup $dir || return 1
1737 run_mon $dir a --osd_pool_default_size=2 || return 1
1738 run_osd $dir 0 || return 1
1739 run_mgr $dir x || return 1
1740 create_rbd_pool || return 1
1741 ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
1742 run_osd $dir 1 || return 1
1743 wait_for_peered || return 1
1744 teardown $dir || return 1
1745}
1746
1747
7c673cae
FG
1748#######################################################################
1749
aee94f69
TL
1750##
1751# Wait until the cluster's health condition disappeared.
1752# $TIMEOUT default
1753#
1754# @param string to grep for in health detail
1755# @return 0 if the cluster health doesn't matches request,
1756# 1 otherwise if after $TIMEOUT seconds health condition remains.
1757#
1758function wait_for_health_gone() {
1759 local grepstr=$1
1760 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1761 local -i loop=0
1762
1763 while ceph health detail | grep "$grepstr" ; do
1764 if (( $loop >= ${#delays[*]} )) ; then
1765 ceph health detail
1766 return 1
1767 fi
1768 sleep ${delays[$loop]}
1769 loop+=1
1770 done
1771}
1772
7c673cae 1773##
1adf2230
AA
1774# Wait until the cluster has health condition passed as arg
1775# again for $TIMEOUT seconds.
7c673cae 1776#
1adf2230
AA
1777# @param string to grep for in health detail
1778# @return 0 if the cluster health matches request, 1 otherwise
7c673cae
FG
1779#
1780function wait_for_health() {
1781 local grepstr=$1
1782 local -a delays=($(get_timeout_delays $TIMEOUT .1))
1783 local -i loop=0
1784
1785 while ! ceph health detail | grep "$grepstr" ; do
1786 if (( $loop >= ${#delays[*]} )) ; then
1787 ceph health detail
1788 return 1
1789 fi
1790 sleep ${delays[$loop]}
1791 loop+=1
1792 done
1793}
1794
1adf2230
AA
1795##
1796# Wait until the cluster becomes HEALTH_OK again or if it does not make progress
1797# for $TIMEOUT seconds.
1798#
1799# @return 0 if the cluster is HEALTHY, 1 otherwise
1800#
7c673cae
FG
1801function wait_for_health_ok() {
1802 wait_for_health "HEALTH_OK" || return 1
1803}
1804
1805function test_wait_for_health_ok() {
1806 local dir=$1
1807
1808 setup $dir || return 1
eafe8130 1809 run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
31f18b77 1810 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
eafe8130 1811 # start osd_pool_default_size OSDs
7c673cae 1812 run_osd $dir 0 || return 1
eafe8130
TL
1813 run_osd $dir 1 || return 1
1814 run_osd $dir 2 || return 1
224ce89b 1815 kill_daemons $dir TERM osd || return 1
11fdf7f2 1816 ceph osd down 0 || return 1
eafe8130 1817 # expect TOO_FEW_OSDS warning
224ce89b 1818 ! TIMEOUT=1 wait_for_health_ok || return 1
eafe8130 1819 # resurrect all OSDs
224ce89b 1820 activate_osd $dir 0 || return 1
eafe8130
TL
1821 activate_osd $dir 1 || return 1
1822 activate_osd $dir 2 || return 1
7c673cae
FG
1823 wait_for_health_ok || return 1
1824 teardown $dir || return 1
1825}
1826
1827
1828#######################################################################
1829
1830##
1831# Run repair on **pgid** and wait until it completes. The repair
1832# function will fail if repair does not complete within $TIMEOUT
1833# seconds.
1834#
1835# @param pgid the id of the PG
1836# @return 0 on success, 1 on error
1837#
1838function repair() {
1839 local pgid=$1
1840 local last_scrub=$(get_last_scrub_stamp $pgid)
1841 ceph pg repair $pgid
1842 wait_for_scrub $pgid "$last_scrub"
1843}
1844
1845function test_repair() {
1846 local dir=$1
1847
1848 setup $dir || return 1
f67539c2 1849 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1850 run_mgr $dir x || return 1
1851 run_osd $dir 0 || return 1
c07f9fc5 1852 create_rbd_pool || return 1
7c673cae 1853 wait_for_clean || return 1
b5b8bbf5 1854 repair 1.0 || return 1
7c673cae 1855 kill_daemons $dir KILL osd || return 1
b5b8bbf5 1856 ! TIMEOUT=1 repair 1.0 || return 1
7c673cae
FG
1857 teardown $dir || return 1
1858}
1859#######################################################################
1860
1861##
1862# Run scrub on **pgid** and wait until it completes. The pg_scrub
1863# function will fail if repair does not complete within $TIMEOUT
1864# seconds. The pg_scrub is complete whenever the
1865# **get_last_scrub_stamp** function reports a timestamp different from
1866# the one stored before starting the scrub.
1867#
1868# @param pgid the id of the PG
1869# @return 0 on success, 1 on error
1870#
1871function pg_scrub() {
1872 local pgid=$1
1873 local last_scrub=$(get_last_scrub_stamp $pgid)
1874 ceph pg scrub $pgid
1875 wait_for_scrub $pgid "$last_scrub"
1876}
1877
1878function pg_deep_scrub() {
1879 local pgid=$1
1880 local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
1881 ceph pg deep-scrub $pgid
1882 wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
1883}
1884
1885function test_pg_scrub() {
1886 local dir=$1
1887
1888 setup $dir || return 1
f67539c2 1889 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1890 run_mgr $dir x || return 1
1891 run_osd $dir 0 || return 1
c07f9fc5 1892 create_rbd_pool || return 1
7c673cae 1893 wait_for_clean || return 1
b5b8bbf5 1894 pg_scrub 1.0 || return 1
7c673cae 1895 kill_daemons $dir KILL osd || return 1
b5b8bbf5 1896 ! TIMEOUT=1 pg_scrub 1.0 || return 1
7c673cae
FG
1897 teardown $dir || return 1
1898}
1899
1900#######################################################################
1901
1902##
1903# Run the *command* and expect it to fail (i.e. return a non zero status).
1904# The output (stderr and stdout) is stored in a temporary file in *dir*
1905# and is expected to contain the string *expected*.
1906#
1907# Return 0 if the command failed and the string was found. Otherwise
1908# return 1 and cat the full output of the command on stderr for debug.
1909#
1910# @param dir temporary directory to store the output
1911# @param expected string to look for in the output
1912# @param command ... the command and its arguments
1913# @return 0 on success, 1 on error
1914#
1915
1916function expect_failure() {
1917 local dir=$1
1918 shift
1919 local expected="$1"
1920 shift
1921 local success
1922
1923 if "$@" > $dir/out 2>&1 ; then
1924 success=true
1925 else
1926 success=false
1927 fi
1928
1929 if $success || ! grep --quiet "$expected" $dir/out ; then
1930 cat $dir/out >&2
1931 return 1
1932 else
1933 return 0
1934 fi
1935}
1936
1937function test_expect_failure() {
1938 local dir=$1
1939
1940 setup $dir || return 1
1941 expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
1942 # the command did not fail
1943 ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
1944 grep --quiet FAIL $dir/out || return 1
1945 # the command failed but the output does not contain the expected string
1946 ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
1947 ! grep --quiet FAIL $dir/out || return 1
1948 teardown $dir || return 1
1949}
1950
1951#######################################################################
1952
1953##
1954# Given the *last_scrub*, wait for scrub to happen on **pgid**. It
1955# will fail if scrub does not complete within $TIMEOUT seconds. The
1956# repair is complete whenever the **get_last_scrub_stamp** function
1957# reports a timestamp different from the one given in argument.
1958#
1959# @param pgid the id of the PG
1960# @param last_scrub timestamp of the last scrub for *pgid*
1961# @return 0 on success, 1 on error
1962#
1963function wait_for_scrub() {
1964 local pgid=$1
1965 local last_scrub="$2"
1966 local sname=${3:-last_scrub_stamp}
1967
1968 for ((i=0; i < $TIMEOUT; i++)); do
b5b8bbf5 1969 if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
7c673cae
FG
1970 return 0
1971 fi
1972 sleep 1
1973 done
1974 return 1
1975}
1976
1977function test_wait_for_scrub() {
1978 local dir=$1
1979
1980 setup $dir || return 1
f67539c2 1981 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
7c673cae
FG
1982 run_mgr $dir x || return 1
1983 run_osd $dir 0 || return 1
c07f9fc5 1984 create_rbd_pool || return 1
7c673cae 1985 wait_for_clean || return 1
b5b8bbf5 1986 local pgid=1.0
7c673cae
FG
1987 ceph pg repair $pgid
1988 local last_scrub=$(get_last_scrub_stamp $pgid)
1989 wait_for_scrub $pgid "$last_scrub" || return 1
1990 kill_daemons $dir KILL osd || return 1
1991 last_scrub=$(get_last_scrub_stamp $pgid)
1992 ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
1993 teardown $dir || return 1
1994}
1995
1996#######################################################################
1997
1998##
1999# Return 0 if the erasure code *plugin* is available, 1 otherwise.
2000#
2001# @param plugin erasure code plugin
2002# @return 0 on success, 1 on error
2003#
2004
2005function erasure_code_plugin_exists() {
2006 local plugin=$1
2007 local status
2008 local grepstr
2009 local s
2010 case `uname` in
2011 FreeBSD) grepstr="Cannot open.*$plugin" ;;
2012 *) grepstr="$plugin.*No such file" ;;
2013 esac
2014
2015 s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
2016 local status=$?
2017 if [ $status -eq 0 ]; then
2018 ceph osd erasure-code-profile rm TESTPROFILE
2019 elif ! echo $s | grep --quiet "$grepstr" ; then
2020 status=1
2021 # display why the string was rejected.
2022 echo $s
2023 fi
2024 return $status
2025}
2026
2027function test_erasure_code_plugin_exists() {
2028 local dir=$1
2029
2030 setup $dir || return 1
2031 run_mon $dir a || return 1
2032 run_mgr $dir x || return 1
2033 erasure_code_plugin_exists jerasure || return 1
2034 ! erasure_code_plugin_exists FAKE || return 1
2035 teardown $dir || return 1
2036}
2037
2038#######################################################################
2039
2040##
2041# Display all log files from **dir** on stdout.
2042#
2043# @param dir directory in which all data is stored
2044#
2045
2046function display_logs() {
2047 local dir=$1
2048
2049 find $dir -maxdepth 1 -name '*.log' | \
2050 while read file ; do
2051 echo "======================= $file"
2052 cat $file
2053 done
2054}
2055
2056function test_display_logs() {
2057 local dir=$1
2058
2059 setup $dir || return 1
2060 run_mon $dir a || return 1
2061 kill_daemons $dir || return 1
2062 display_logs $dir > $dir/log.out
2063 grep --quiet mon.a.log $dir/log.out || return 1
2064 teardown $dir || return 1
2065}
2066
2067#######################################################################
2068##
2069# Spawn a command in background and save the pid in the variable name
2070# passed in argument. To make the output reading easier, the output is
2071# prepend with the process id.
2072#
2073# Example:
2074# pids1=""
2075# run_in_background pids1 bash -c 'sleep 1; exit 1'
2076#
2077# @param pid_variable the variable name (not value) where the pids will be stored
2078# @param ... the command to execute
2079# @return only the pid_variable output should be considered and used with **wait_background**
2080#
2081function run_in_background() {
2082 local pid_variable=$1
94b18763 2083 shift
7c673cae 2084 # Execute the command and prepend the output with its pid
f64942e4 2085 # We enforce to return the exit status of the command and not the sed one.
f67539c2 2086 ("$@" |& sed 's/^/'$BASHPID': /'; return "${PIPESTATUS[0]}") >&2 &
7c673cae
FG
2087 eval "$pid_variable+=\" $!\""
2088}
2089
94b18763
FG
2090function save_stdout {
2091 local out="$1"
2092 shift
2093 "$@" > "$out"
2094}
2095
7c673cae
FG
2096function test_run_in_background() {
2097 local pids
2098 run_in_background pids sleep 1
2099 run_in_background pids sleep 1
2100 test $(echo $pids | wc -w) = 2 || return 1
2101 wait $pids || return 1
2102}
2103
2104#######################################################################
2105##
2106# Wait for pids running in background to complete.
2107# This function is usually used after a **run_in_background** call
2108# Example:
2109# pids1=""
2110# run_in_background pids1 bash -c 'sleep 1; exit 1'
2111# wait_background pids1
2112#
2113# @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
2114# @return returns 1 if at least one process exits in error unless returns 0
2115#
2116function wait_background() {
2117 # We extract the PIDS from the variable name
2118 pids=${!1}
2119
2120 return_code=0
2121 for pid in $pids; do
2122 if ! wait $pid; then
2123 # If one process failed then return 1
2124 return_code=1
2125 fi
2126 done
2127
2128 # We empty the variable reporting that all process ended
2129 eval "$1=''"
2130
2131 return $return_code
2132}
2133
2134
2135function test_wait_background() {
2136 local pids=""
2137 run_in_background pids bash -c "sleep 1; exit 1"
2138 run_in_background pids bash -c "sleep 2; exit 0"
2139 wait_background pids
2140 if [ $? -ne 1 ]; then return 1; fi
2141
2142 run_in_background pids bash -c "sleep 1; exit 0"
2143 run_in_background pids bash -c "sleep 2; exit 0"
2144 wait_background pids
2145 if [ $? -ne 0 ]; then return 1; fi
2146
2147 if [ ! -z "$pids" ]; then return 1; fi
2148}
2149
31f18b77
FG
2150function flush_pg_stats()
2151{
2152 local timeout=${1:-$TIMEOUT}
2153
2154 ids=`ceph osd ls`
2155 seqs=''
2156 for osd in $ids; do
2157 seq=`ceph tell osd.$osd flush_pg_stats`
f6b5b4d7
TL
2158 if test -z "$seq"
2159 then
2160 continue
2161 fi
31f18b77
FG
2162 seqs="$seqs $osd-$seq"
2163 done
2164
2165 for s in $seqs; do
2166 osd=`echo $s | cut -d - -f 1`
2167 seq=`echo $s | cut -d - -f 2`
2168 echo "waiting osd.$osd seq $seq"
2169 while test $(ceph osd last-stat-seq $osd) -lt $seq; do
2170 sleep 1
2171 if [ $((timeout--)) -eq 0 ]; then
2172 return 1
2173 fi
2174 done
2175 done
2176}
2177
2178function test_flush_pg_stats()
2179{
2180 local dir=$1
2181
2182 setup $dir || return 1
f67539c2 2183 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
31f18b77
FG
2184 run_mgr $dir x || return 1
2185 run_osd $dir 0 || return 1
c07f9fc5 2186 create_rbd_pool || return 1
31f18b77 2187 rados -p rbd put obj /etc/group
3a9019d9 2188 flush_pg_stats || return 1
31f18b77 2189 local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
11fdf7f2
TL
2190 stored=`ceph df detail --format=json | jq "$jq_filter.stored"`
2191 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2192 test $stored -gt 0 || return 1
2193 test $stored == $stored_raw || return 1
b5b8bbf5 2194 teardown $dir
31f18b77
FG
2195}
2196
20effc67
TL
2197########################################################################
2198##
2199# Get the current op scheduler enabled on an osd by reading the
2200# osd_op_queue config option
2201#
2202# Example:
2203# get_op_scheduler $osdid
2204#
2205# @param id the id of the OSD
2206# @return the name of the op scheduler enabled for the OSD
2207#
2208function get_op_scheduler() {
2209 local id=$1
2210
2211 get_config osd $id osd_op_queue
2212}
2213
2214function test_get_op_scheduler() {
2215 local dir=$1
2216
2217 setup $dir || return 1
2218
2219 run_mon $dir a || return 1
2220 run_mgr $dir x || return 1
2221
2222 run_osd $dir 0 --osd_op_queue=wpq || return 1
2223 test $(get_op_scheduler 0) = "wpq" || return 1
2224
2225 run_osd $dir 1 --osd_op_queue=mclock_scheduler || return 1
2226 test $(get_op_scheduler 1) = "mclock_scheduler" || return 1
2227 teardown $dir || return 1
2228}
2229
7c673cae
FG
2230#######################################################################
2231
2232##
2233# Call the **run** function (which must be defined by the caller) with
2234# the **dir** argument followed by the caller argument list.
2235#
2236# If the **run** function returns on error, all logs found in **dir**
2237# are displayed for diagnostic purposes.
2238#
2239# **teardown** function is called when the **run** function returns
2240# (on success or on error), to cleanup leftovers. The CEPH_CONF is set
2241# to /dev/null and CEPH_ARGS is unset so that the tests are protected from
2242# external interferences.
2243#
2244# It is the responsibility of the **run** function to call the
2245# **setup** function to prepare the test environment (create a temporary
2246# directory etc.).
2247#
2248# The shell is required (via PS4) to display the function and line
2249# number whenever a statement is executed to help debugging.
2250#
2251# @param dir directory in which all data is stored
2252# @param ... arguments passed transparently to **run**
2253# @return 0 on success, 1 on error
2254#
2255function main() {
2256 local dir=td/$1
2257 shift
2258
2259 shopt -s -o xtrace
2260 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2261
11fdf7f2
TL
2262 export PATH=.:$PATH # make sure program from sources are preferred
2263 export PYTHONWARNINGS=ignore
7c673cae
FG
2264 export CEPH_CONF=/dev/null
2265 unset CEPH_ARGS
2266
2267 local code
2268 if run $dir "$@" ; then
2269 code=0
2270 else
7c673cae
FG
2271 code=1
2272 fi
b5b8bbf5 2273 teardown $dir $code || return 1
7c673cae
FG
2274 return $code
2275}
2276
2277#######################################################################
2278
2279function run_tests() {
2280 shopt -s -o xtrace
2281 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
2282
11fdf7f2 2283 export .:$PATH # make sure program from sources are preferred
7c673cae
FG
2284
2285 export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
2286 export CEPH_ARGS
b5b8bbf5 2287 CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
7c673cae
FG
2288 CEPH_ARGS+="--mon-host=$CEPH_MON "
2289 export CEPH_CONF=/dev/null
2290
2291 local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
2292 local dir=td/ceph-helpers
2293
2294 for func in $funcs ; do
b5b8bbf5
FG
2295 if ! $func $dir; then
2296 teardown $dir 1
2297 return 1
2298 fi
7c673cae
FG
2299 done
2300}
2301
2302if test "$1" = TESTS ; then
2303 shift
2304 run_tests "$@"
b5b8bbf5 2305 exit $?
7c673cae
FG
2306fi
2307
224ce89b
WB
2308# NOTE:
2309# jq only support --exit-status|-e from version 1.4 forwards, which makes
2310# returning on error waaaay prettier and straightforward.
2311# However, the current automated upstream build is running with v1.3,
2312# which has no idea what -e is. Hence the convoluted error checking we
2313# need. Sad.
2314# The next time someone changes this code, please check if v1.4 is now
2315# a thing, and, if so, please change these to use -e. Thanks.
2316
2317# jq '.all.supported | select([.[] == "foo"] | any)'
2318function jq_success() {
2319 input="$1"
2320 filter="$2"
2321 expects="\"$3\""
2322
2323 in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g")
2324 filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g")
2325
2326 ret=$(echo "$in_escaped" | jq "$filter_escaped")
2327 if [[ "$ret" == "true" ]]; then
2328 return 0
2329 elif [[ -n "$expects" ]]; then
2330 if [[ "$ret" == "$expects" ]]; then
2331 return 0
2332 fi
2333 fi
2334 return 1
2335 input=$1
2336 filter=$2
2337 expects="$3"
2338
2339 ret="$(echo $input | jq \"$filter\")"
2340 if [[ "$ret" == "true" ]]; then
2341 return 0
2342 elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
2343 return 0
2344 fi
2345 return 1
2346}
2347
b5b8bbf5
FG
2348function inject_eio() {
2349 local pooltype=$1
2350 shift
2351 local which=$1
2352 shift
2353 local poolname=$1
2354 shift
2355 local objname=$1
2356 shift
2357 local dir=$1
2358 shift
2359 local shard_id=$1
2360 shift
2361
2362 local -a initial_osds=($(get_osds $poolname $objname))
2363 local osd_id=${initial_osds[$shard_id]}
2364 if [ "$pooltype" != "ec" ]; then
2365 shard_id=""
2366 fi
eafe8130
TL
2367 type=$(cat $dir/$osd_id/type)
2368 set_config osd $osd_id ${type}_debug_inject_read_err true || return 1
b5b8bbf5
FG
2369 local loop=0
2370 while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
2371 inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
2372 loop=$(expr $loop + 1)
2373 if [ $loop = "10" ]; then
2374 return 1
2375 fi
2376 sleep 1
2377 done
2378}
2379
1adf2230
AA
2380function multidiff() {
2381 if ! diff $@ ; then
2382 if [ "$DIFFCOLOPTS" = "" ]; then
2383 return 1
2384 fi
2385 diff $DIFFCOLOPTS $@
2386 fi
2387}
2388
eafe8130
TL
2389function create_ec_pool() {
2390 local pool_name=$1
2391 shift
2392 local allow_overwrites=$1
2393 shift
2394
2395 ceph osd erasure-code-profile set myprofile crush-failure-domain=osd "$@" || return 1
2396
2397 create_pool "$poolname" 1 1 erasure myprofile || return 1
2398
2399 if [ "$allow_overwrites" = "true" ]; then
2400 ceph osd pool set "$poolname" allow_ec_overwrites true || return 1
2401 fi
2402
2403 wait_for_clean || return 1
2404 return 0
2405}
2406
7c673cae 2407# Local Variables:
c07f9fc5 2408# compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
7c673cae 2409# End: