]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/cephtool/test.sh
update sources to 12.2.8
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
CommitLineData
7c673cae 1#!/bin/bash -x
31f18b77
FG
2# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3# vim: ts=8 sw=8 ft=bash smarttab
7c673cae 4
c07f9fc5 5source $(dirname $0)/../../standalone/ceph-helpers.sh
7c673cae
FG
6
7set -e
8set -o functrace
9PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
10SUDO=${SUDO:-sudo}
31f18b77 11export CEPH_DEV=1
7c673cae
FG
12
13function get_admin_socket()
14{
15 local client=$1
16
c07f9fc5 17 if test -n "$CEPH_ASOK_DIR";
7c673cae 18 then
c07f9fc5 19 echo $(get_asok_dir)/$client.asok
7c673cae
FG
20 else
21 local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
23 fi
24}
25
26function check_no_osd_down()
27{
28 ! ceph osd dump | grep ' down '
29}
30
31function wait_no_osd_down()
32{
33 max_run=300
34 for i in $(seq 1 $max_run) ; do
35 if ! check_no_osd_down ; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
37 sleep 1
38 else
39 break
40 fi
41 done
42 check_no_osd_down
43}
44
45function expect_false()
46{
47 set -x
48 if "$@"; then return 1; else return 0; fi
49}
50
51
52TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
53trap "rm -fr $TEMP_DIR" 0
54
55TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
56
57#
58# retry_eagain max cmd args ...
59#
60# retry cmd args ... if it exits on error and its output contains the
61# string EAGAIN, at most $max times
62#
63function retry_eagain()
64{
65 local max=$1
66 shift
67 local status
68 local tmpfile=$TEMP_DIR/retry_eagain.$$
69 local count
70 for count in $(seq 1 $max) ; do
71 status=0
72 "$@" > $tmpfile 2>&1 || status=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN $tmpfile ; then
75 break
76 fi
77 sleep 1
78 done
79 if test $count = $max ; then
80 echo retried with non zero exit status, $max times: "$@" >&2
81 fi
82 cat $tmpfile
83 rm $tmpfile
84 return $status
85}
86
87#
88# map_enxio_to_eagain cmd arg ...
89#
90# add EAGAIN to the output of cmd arg ... if the output contains
91# ENXIO.
92#
93function map_enxio_to_eagain()
94{
95 local status=0
96 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
97
98 "$@" > $tmpfile 2>&1 || status=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO $tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
102 fi
103 cat $tmpfile
104 rm $tmpfile
105 return $status
106}
107
108function check_response()
109{
110 expected_string=$1
111 retcode=$2
112 expected_retcode=$3
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
115 exit 1
116 fi
117
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
120 cat $TMPFILE >&2
121 exit 1
122 fi
123}
124
125function get_config_value_or_die()
126{
127 local target config_opt raw val
128
129 target=$1
130 config_opt=$2
131
132 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $? -ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
135 exit 1
136 fi
137
138 raw=`echo $raw | sed -e 's/[{} "]//g'`
139 val=`echo $raw | cut -f2 -d:`
140
141 echo "$val"
142 return 0
143}
144
145function expect_config_value()
146{
147 local target config_opt expected_val val
148 target=$1
149 config_opt=$2
150 expected_val=$3
151
152 val=$(get_config_value_or_die $target $config_opt)
153
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
156 exit 1
157 fi
158}
159
160function ceph_watch_start()
161{
162 local whatch_opt=--watch
163
164 if [ -n "$1" ]; then
165 whatch_opt=--watch-$1
c07f9fc5
FG
166 if [ -n "$2" ]; then
167 whatch_opt+=" --watch-channel $2"
168 fi
7c673cae
FG
169 fi
170
171 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
172 ceph $whatch_opt > $CEPH_WATCH_FILE &
173 CEPH_WATCH_PID=$!
174
175 # wait until the "ceph" client is connected and receiving
176 # log messages from monitor
177 for i in `seq 3`; do
178 grep -q "cluster" $CEPH_WATCH_FILE && break
179 sleep 1
180 done
181}
182
183function ceph_watch_wait()
184{
185 local regexp=$1
186 local timeout=30
187
188 if [ -n "$2" ]; then
189 timeout=$2
190 fi
191
192 for i in `seq ${timeout}`; do
193 grep -q "$regexp" $CEPH_WATCH_FILE && break
194 sleep 1
195 done
196
197 kill $CEPH_WATCH_PID
198
199 if ! grep "$regexp" $CEPH_WATCH_FILE; then
200 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
201 cat $CEPH_WATCH_FILE >&2
202 return 1
203 fi
204}
205
206function test_mon_injectargs()
207{
208 CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
209 check_response "osd_enable_op_tracker = 'false'"
210 ! grep "the.dump" $TMPFILE || return 1
211 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
212 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
213 ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
214 check_response "osd_enable_op_tracker = 'false'"
215 ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
216 check_response "osd_enable_op_tracker = 'true'"
217 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
218 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
219 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
220 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
221
224ce89b
WB
222 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' >& $TMPFILE || return 1
223 check_response "osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
224
225 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' >& $TMPFILE || return 1
226 check_response "mon_probe_timeout = '2.000000' (not observed, change may require restart)"
227
7c673cae 228 ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
224ce89b 229 check_response "mon_lease = '6.000000' (not observed, change may require restart)"
7c673cae
FG
230
231 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224ce89b
WB
232 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 >& $TMPFILE || return 1
233 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
7c673cae
FG
234}
235
236function test_mon_injectargs_SI()
237{
238 # Test SI units during injectargs and 'config set'
239 # We only aim at testing the units are parsed accordingly
240 # and don't intend to test whether the options being set
241 # actually expect SI units to be passed.
1adf2230
AA
242 # Keep in mind that all integer based options that are not based on bytes
243 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
244 # base 10.
7c673cae
FG
245 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
246 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
247 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
248 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
1adf2230 249 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
7c673cae 250 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
1adf2230 251 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
7c673cae
FG
252 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
253 check_response "'10F': (22) Invalid argument"
254 # now test with injectargs
255 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
256 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
257 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
1adf2230 258 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
7c673cae 259 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
1adf2230 260 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
7c673cae
FG
261 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
262 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
263 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
264}
265
1adf2230
AA
266function test_mon_injectargs_IEC()
267{
268 # Test IEC units during injectargs and 'config set'
269 # We only aim at testing the units are parsed accordingly
270 # and don't intend to test whether the options being set
271 # actually expect IEC units to be passed.
272 # Keep in mind that all integer based options that are based on bytes
273 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
274 # unit modifiers (for backwards compatibility and convinience) and be parsed
275 # to base 2.
276 initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
277 $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
278 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
279 $SUDO ceph daemon mon.a config set mon_data_size_warn 15G
280 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
281 $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
282 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
283 $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
284 check_response "'10F': (22) Invalid argument"
285 # now test with injectargs
286 ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
287 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
288 ceph tell mon.a injectargs '--mon_data_size_warn 15G'
289 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
290 ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
291 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
292 expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
293 $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
294}
295
7c673cae
FG
296function test_tiering_agent()
297{
298 local slow=slow_eviction
299 local fast=fast_eviction
300 ceph osd pool create $slow 1 1
c07f9fc5 301 ceph osd pool application enable $slow rados
7c673cae
FG
302 ceph osd pool create $fast 1 1
303 ceph osd tier add $slow $fast
304 ceph osd tier cache-mode $fast writeback
305 ceph osd tier set-overlay $slow $fast
306 ceph osd pool set $fast hit_set_type bloom
307 rados -p $slow put obj1 /etc/group
308 ceph osd pool set $fast target_max_objects 1
309 ceph osd pool set $fast hit_set_count 1
310 ceph osd pool set $fast hit_set_period 5
311 # wait for the object to be evicted from the cache
312 local evicted
313 evicted=false
314 for i in `seq 1 300` ; do
315 if ! rados -p $fast ls | grep obj1 ; then
316 evicted=true
317 break
318 fi
319 sleep 1
320 done
321 $evicted # assert
322 # the object is proxy read and promoted to the cache
323 rados -p $slow get obj1 - >/dev/null
324 # wait for the promoted object to be evicted again
325 evicted=false
326 for i in `seq 1 300` ; do
327 if ! rados -p $fast ls | grep obj1 ; then
328 evicted=true
329 break
330 fi
331 sleep 1
332 done
333 $evicted # assert
334 ceph osd tier remove-overlay $slow
335 ceph osd tier remove $slow $fast
336 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
337 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
338}
339
31f18b77 340function test_tiering_1()
7c673cae
FG
341{
342 # tiering
343 ceph osd pool create slow 2
c07f9fc5 344 ceph osd pool application enable slow rados
7c673cae 345 ceph osd pool create slow2 2
c07f9fc5 346 ceph osd pool application enable slow2 rados
7c673cae
FG
347 ceph osd pool create cache 2
348 ceph osd pool create cache2 2
349 ceph osd tier add slow cache
350 ceph osd tier add slow cache2
351 expect_false ceph osd tier add slow2 cache
352 # test some state transitions
353 ceph osd tier cache-mode cache writeback
354 expect_false ceph osd tier cache-mode cache forward
355 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
356 expect_false ceph osd tier cache-mode cache readonly
357 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
358 expect_false ceph osd tier cache-mode cache forward
359 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
360 ceph osd tier cache-mode cache none
361 ceph osd tier cache-mode cache writeback
362 ceph osd tier cache-mode cache proxy
363 ceph osd tier cache-mode cache writeback
364 expect_false ceph osd tier cache-mode cache none
365 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
366 # test with dirty objects in the tier pool
367 # tier pool currently set to 'writeback'
368 rados -p cache put /etc/passwd /etc/passwd
31f18b77 369 flush_pg_stats
7c673cae
FG
370 # 1 dirty object in pool 'cache'
371 ceph osd tier cache-mode cache proxy
372 expect_false ceph osd tier cache-mode cache none
373 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
374 ceph osd tier cache-mode cache writeback
375 # remove object from tier pool
376 rados -p cache rm /etc/passwd
377 rados -p cache cache-flush-evict-all
31f18b77 378 flush_pg_stats
7c673cae
FG
379 # no dirty objects in pool 'cache'
380 ceph osd tier cache-mode cache proxy
381 ceph osd tier cache-mode cache none
382 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
383 TRIES=0
384 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
385 do
386 grep 'currently creating pgs' $TMPFILE
387 TRIES=$(( $TRIES + 1 ))
388 test $TRIES -ne 60
389 sleep 3
390 done
391 expect_false ceph osd pool set cache pg_num 4
392 ceph osd tier cache-mode cache none
393 ceph osd tier set-overlay slow cache
394 expect_false ceph osd tier set-overlay slow cache2
395 expect_false ceph osd tier remove slow cache
396 ceph osd tier remove-overlay slow
397 ceph osd tier set-overlay slow cache2
398 ceph osd tier remove-overlay slow
399 ceph osd tier remove slow cache
400 ceph osd tier add slow2 cache
401 expect_false ceph osd tier set-overlay slow cache
402 ceph osd tier set-overlay slow2 cache
403 ceph osd tier remove-overlay slow2
404 ceph osd tier remove slow2 cache
405 ceph osd tier remove slow cache2
406
407 # make sure a non-empty pool fails
408 rados -p cache2 put /etc/passwd /etc/passwd
409 while ! ceph df | grep cache2 | grep ' 1 ' ; do
410 echo waiting for pg stats to flush
411 sleep 2
412 done
413 expect_false ceph osd tier add slow cache2
414 ceph osd tier add slow cache2 --force-nonempty
415 ceph osd tier remove slow cache2
416
417 ceph osd pool ls | grep cache2
418 ceph osd pool ls -f json-pretty | grep cache2
419 ceph osd pool ls detail | grep cache2
420 ceph osd pool ls detail -f json-pretty | grep cache2
421
31f18b77
FG
422 ceph osd pool delete slow slow --yes-i-really-really-mean-it
423 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
7c673cae
FG
424 ceph osd pool delete cache cache --yes-i-really-really-mean-it
425 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
31f18b77 426}
7c673cae 427
31f18b77
FG
428function test_tiering_2()
429{
7c673cae
FG
430 # make sure we can't clobber snapshot state
431 ceph osd pool create snap_base 2
c07f9fc5 432 ceph osd pool application enable snap_base rados
7c673cae
FG
433 ceph osd pool create snap_cache 2
434 ceph osd pool mksnap snap_cache snapname
435 expect_false ceph osd tier add snap_base snap_cache
436 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
437 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
31f18b77 438}
7c673cae 439
31f18b77
FG
440function test_tiering_3()
441{
7c673cae
FG
442 # make sure we can't create snapshot on tier
443 ceph osd pool create basex 2
c07f9fc5 444 ceph osd pool application enable basex rados
7c673cae
FG
445 ceph osd pool create cachex 2
446 ceph osd tier add basex cachex
447 expect_false ceph osd pool mksnap cache snapname
448 ceph osd tier remove basex cachex
449 ceph osd pool delete basex basex --yes-i-really-really-mean-it
450 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
31f18b77 451}
7c673cae 452
31f18b77
FG
453function test_tiering_4()
454{
7c673cae
FG
455 # make sure we can't create an ec pool tier
456 ceph osd pool create eccache 2 2 erasure
457 expect_false ceph osd set-require-min-compat-client bobtail
458 ceph osd pool create repbase 2
c07f9fc5 459 ceph osd pool application enable repbase rados
7c673cae
FG
460 expect_false ceph osd tier add repbase eccache
461 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
462 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
31f18b77 463}
7c673cae 464
31f18b77
FG
465function test_tiering_5()
466{
7c673cae 467 # convenient add-cache command
31f18b77 468 ceph osd pool create slow 2
c07f9fc5 469 ceph osd pool application enable slow rados
7c673cae
FG
470 ceph osd pool create cache3 2
471 ceph osd tier add-cache slow cache3 1024000
472 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
473 ceph osd tier remove slow cache3 2> $TMPFILE || true
474 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
475 ceph osd tier remove-overlay slow
476 ceph osd tier remove slow cache3
477 ceph osd pool ls | grep cache3
478 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
479 ! ceph osd pool ls | grep cache3 || exit 1
7c673cae 480 ceph osd pool delete slow slow --yes-i-really-really-mean-it
31f18b77 481}
7c673cae 482
31f18b77
FG
483function test_tiering_6()
484{
7c673cae
FG
485 # check add-cache whether work
486 ceph osd pool create datapool 2
c07f9fc5 487 ceph osd pool application enable datapool rados
7c673cae
FG
488 ceph osd pool create cachepool 2
489 ceph osd tier add-cache datapool cachepool 1024000
490 ceph osd tier cache-mode cachepool writeback
491 rados -p datapool put object /etc/passwd
492 rados -p cachepool stat object
493 rados -p cachepool cache-flush object
494 rados -p datapool stat object
495 ceph osd tier remove-overlay datapool
496 ceph osd tier remove datapool cachepool
497 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
498 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
31f18b77 499}
7c673cae 500
31f18b77
FG
501function test_tiering_7()
502{
7c673cae
FG
503 # protection against pool removal when used as tiers
504 ceph osd pool create datapool 2
c07f9fc5 505 ceph osd pool application enable datapool rados
7c673cae
FG
506 ceph osd pool create cachepool 2
507 ceph osd tier add-cache datapool cachepool 1024000
508 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
509 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
510 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
511 check_response "EBUSY: pool 'datapool' has tiers cachepool"
512 ceph osd tier remove-overlay datapool
513 ceph osd tier remove datapool cachepool
514 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
515 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
31f18b77 516}
7c673cae 517
31f18b77
FG
518function test_tiering_8()
519{
7c673cae
FG
520 ## check health check
521 ceph osd set notieragent
522 ceph osd pool create datapool 2
c07f9fc5 523 ceph osd pool application enable datapool rados
7c673cae
FG
524 ceph osd pool create cache4 2
525 ceph osd tier add-cache datapool cache4 1024000
526 ceph osd tier cache-mode cache4 writeback
527 tmpfile=$(mktemp|grep tmp)
528 dd if=/dev/zero of=$tmpfile bs=4K count=1
529 ceph osd pool set cache4 target_max_objects 200
530 ceph osd pool set cache4 target_max_bytes 1000000
531 rados -p cache4 put foo1 $tmpfile
532 rados -p cache4 put foo2 $tmpfile
533 rm -f $tmpfile
31f18b77 534 flush_pg_stats
7c673cae
FG
535 ceph df | grep datapool | grep ' 2 '
536 ceph osd tier remove-overlay datapool
537 ceph osd tier remove datapool cache4
538 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
539 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
540 ceph osd unset notieragent
31f18b77 541}
7c673cae 542
31f18b77
FG
543function test_tiering_9()
544{
7c673cae
FG
545 # make sure 'tier remove' behaves as we expect
546 # i.e., removing a tier from a pool that's not its base pool only
547 # results in a 'pool foo is now (or already was) not a tier of bar'
548 #
549 ceph osd pool create basepoolA 2
c07f9fc5 550 ceph osd pool application enable basepoolA rados
7c673cae 551 ceph osd pool create basepoolB 2
c07f9fc5 552 ceph osd pool application enable basepoolB rados
7c673cae
FG
553 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
554 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
555
556 ceph osd pool create cache5 2
557 ceph osd pool create cache6 2
558 ceph osd tier add basepoolA cache5
559 ceph osd tier add basepoolB cache6
560 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
561 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
562 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
563 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
564
565 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
566 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
567 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
568 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
569
570 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
571 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
572
573 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
574 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
575 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
576 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
577}
578
579function test_auth()
580{
581 ceph auth add client.xx mon allow osd "allow *"
582 ceph auth export client.xx >client.xx.keyring
583 ceph auth add client.xx -i client.xx.keyring
584 rm -f client.xx.keyring
585 ceph auth list | grep client.xx
c07f9fc5 586 ceph auth ls | grep client.xx
7c673cae
FG
587 ceph auth get client.xx | grep caps | grep mon
588 ceph auth get client.xx | grep caps | grep osd
589 ceph auth get-key client.xx
590 ceph auth print-key client.xx
591 ceph auth print_key client.xx
592 ceph auth caps client.xx osd "allow rw"
593 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
594 ceph auth get client.xx | grep osd | grep "allow rw"
595 ceph auth export | grep client.xx
596 ceph auth export -o authfile
597 ceph auth import -i authfile
598 ceph auth export -o authfile2
599 diff authfile authfile2
600 rm authfile authfile2
601 ceph auth del client.xx
602 expect_false ceph auth get client.xx
603
604 # (almost) interactive mode
605 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
606 ceph auth get client.xx
607 # script mode
608 echo 'auth del client.xx' | ceph
609 expect_false ceph auth get client.xx
610
611 #
612 # get / set auid
613 #
614 local auid=444
615 ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
616 expect_false ceph auth import --in-file TEST-keyring
617 rm TEST-keyring
618 ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
619 ceph auth import --in-file TEST-keyring
620 rm TEST-keyring
621 ceph auth get client.TEST > $TMPFILE
622 check_response "auid = $auid"
623 ceph --format json-pretty auth get client.TEST > $TMPFILE
624 check_response '"auid": '$auid
c07f9fc5 625 ceph auth ls > $TMPFILE
7c673cae 626 check_response "auid: $auid"
c07f9fc5 627 ceph --format json-pretty auth ls > $TMPFILE
7c673cae
FG
628 check_response '"auid": '$auid
629 ceph auth del client.TEST
630}
631
632function test_auth_profiles()
633{
634 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
635 mgr 'allow profile read-only'
636 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
637 mgr 'allow profile read-write'
638 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
639
640 ceph auth export > client.xx.keyring
641
642 # read-only is allowed all read-only commands (auth excluded)
643 ceph -n client.xx-profile-ro -k client.xx.keyring status
644 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
645 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
646 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
647 ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
648 # read-only gets access denied for rw commands or auth commands
649 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
650 check_response "EACCES: access denied"
651 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
652 check_response "EACCES: access denied"
c07f9fc5 653 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
7c673cae
FG
654 check_response "EACCES: access denied"
655
656 # read-write is allowed for all read-write commands (except auth)
657 ceph -n client.xx-profile-rw -k client.xx.keyring status
658 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
659 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
660 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
661 ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
662 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
663 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
664 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
665 # read-write gets access denied for auth commands
c07f9fc5 666 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
7c673cae
FG
667 check_response "EACCES: access denied"
668
669 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
c07f9fc5 670 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
7c673cae
FG
671 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
672 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
673 ceph -n client.xx-profile-rd -k client.xx.keyring status
674 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
675 check_response "EACCES: access denied"
676 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
677 check_response "EACCES: access denied"
678 # read-only 'mon' subsystem commands are allowed
679 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
680 # but read-write 'mon' commands are not
681 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
682 check_response "EACCES: access denied"
683 ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
684 check_response "EACCES: access denied"
685 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
686 check_response "EACCES: access denied"
687 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
688 check_response "EACCES: access denied"
689
690 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
691 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
692
693 # add a new role-definer with the existing role-definer
694 ceph -n client.xx-profile-rd -k client.xx.keyring \
695 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
696 ceph -n client.xx-profile-rd -k client.xx.keyring \
697 auth export > client.xx.keyring.2
698 # remove old role-definer using the new role-definer
699 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
700 auth del client.xx-profile-rd
701 # remove the remaining role-definer with admin
702 ceph auth del client.xx-profile-rd2
703 rm -f client.xx.keyring client.xx.keyring.2
704}
705
706function test_mon_caps()
707{
708 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
709 chmod +r $TEMP_DIR/ceph.client.bug.keyring
710 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
711 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
712
713 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
714 check_response "Permission denied"
715
716 rm -rf $TEMP_DIR/ceph.client.bug.keyring
717 ceph auth del client.bug
718 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
719 chmod +r $TEMP_DIR/ceph.client.bug.keyring
720 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
721 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
722 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
723 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
724 check_response "Permission denied"
725}
726
727function test_mon_misc()
728{
729 # with and without verbosity
730 ceph osd dump | grep '^epoch'
731 ceph --concise osd dump | grep '^epoch'
732
31f18b77
FG
733 ceph osd df | grep 'MIN/MAX VAR'
734
7c673cae
FG
735 # df
736 ceph df > $TMPFILE
737 grep GLOBAL $TMPFILE
738 grep -v DIRTY $TMPFILE
739 ceph df detail > $TMPFILE
740 grep DIRTY $TMPFILE
741 ceph df --format json > $TMPFILE
742 grep 'total_bytes' $TMPFILE
743 grep -v 'dirty' $TMPFILE
744 ceph df detail --format json > $TMPFILE
745 grep 'rd_bytes' $TMPFILE
746 grep 'dirty' $TMPFILE
747 ceph df --format xml | grep '<total_bytes>'
748 ceph df detail --format xml | grep '<rd_bytes>'
749
750 ceph fsid
751 ceph health
752 ceph health detail
753 ceph health --format json-pretty
754 ceph health detail --format xml-pretty
755
224ce89b
WB
756 ceph time-sync-status
757
7c673cae
FG
758 ceph node ls
759 for t in mon osd mds ; do
760 ceph node ls $t
761 done
762
763 ceph_watch_start
764 mymsg="this is a test log message $$.$(date)"
765 ceph log "$mymsg"
31f18b77
FG
766 ceph log last | grep "$mymsg"
767 ceph log last 100 | grep "$mymsg"
7c673cae
FG
768 ceph_watch_wait "$mymsg"
769
31f18b77 770 ceph mgr dump
224ce89b
WB
771 ceph mgr module ls
772 ceph mgr module enable restful
773 expect_false ceph mgr module enable foodne
774 ceph mgr module enable foodne --force
775 ceph mgr module disable foodne
776 ceph mgr module disable foodnebizbangbash
31f18b77 777
7c673cae
FG
778 ceph mon metadata a
779 ceph mon metadata
31f18b77
FG
780 ceph mon count-metadata ceph_version
781 ceph mon versions
782
c07f9fc5
FG
783 ceph mgr metadata
784 ceph mgr versions
785 ceph mgr count-metadata ceph_version
786
787 ceph versions
788
7c673cae
FG
789 ceph node ls
790}
791
792function check_mds_active()
793{
794 fs_name=$1
795 ceph fs get $fs_name | grep active
796}
797
798function wait_mds_active()
799{
800 fs_name=$1
801 max_run=300
802 for i in $(seq 1 $max_run) ; do
803 if ! check_mds_active $fs_name ; then
804 echo "waiting for an active MDS daemon ($i/$max_run)"
805 sleep 5
806 else
807 break
808 fi
809 done
810 check_mds_active $fs_name
811}
812
813function get_mds_gids()
814{
815 fs_name=$1
816 ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
817}
818
819function fail_all_mds()
820{
821 fs_name=$1
822 ceph fs set $fs_name cluster_down true
823 mds_gids=$(get_mds_gids $fs_name)
824 for mds_gid in $mds_gids ; do
825 ceph mds fail $mds_gid
826 done
827 if check_mds_active $fs_name ; then
828 echo "An active MDS remains, something went wrong"
829 ceph fs get $fs_name
830 exit -1
831 fi
832
833}
834
835function remove_all_fs()
836{
837 existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
838 for fs_name in $existing_fs ; do
839 echo "Removing fs ${fs_name}..."
840 fail_all_mds $fs_name
841 echo "Removing existing filesystem '${fs_name}'..."
842 ceph fs rm $fs_name --yes-i-really-mean-it
843 echo "Removed '${fs_name}'."
844 done
845}
846
847# So that tests requiring MDS can skip if one is not configured
848# in the cluster at all
849function mds_exists()
850{
c07f9fc5 851 ceph auth ls | grep "^mds"
7c673cae
FG
852}
853
854# some of the commands are just not idempotent.
855function without_test_dup_command()
856{
857 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
858 $@
859 else
860 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
861 unset CEPH_CLI_TEST_DUP_COMMAND
862 $@
863 CEPH_CLI_TEST_DUP_COMMAND=saved
864 fi
865}
866
867function test_mds_tell()
868{
31f18b77 869 local FS_NAME=cephfs
7c673cae
FG
870 if ! mds_exists ; then
871 echo "Skipping test, no MDS found"
872 return
873 fi
874
875 remove_all_fs
876 ceph osd pool create fs_data 10
877 ceph osd pool create fs_metadata 10
878 ceph fs new $FS_NAME fs_metadata fs_data
879 wait_mds_active $FS_NAME
880
881 # Test injectargs by GID
882 old_mds_gids=$(get_mds_gids $FS_NAME)
883 echo Old GIDs: $old_mds_gids
884
885 for mds_gid in $old_mds_gids ; do
886 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
887 done
888 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
889
890 # Test respawn by rank
891 without_test_dup_command ceph tell mds.0 respawn
892 new_mds_gids=$old_mds_gids
893 while [ $new_mds_gids -eq $old_mds_gids ] ; do
894 sleep 5
895 new_mds_gids=$(get_mds_gids $FS_NAME)
896 done
897 echo New GIDs: $new_mds_gids
898
899 # Test respawn by ID
900 without_test_dup_command ceph tell mds.a respawn
901 new_mds_gids=$old_mds_gids
902 while [ $new_mds_gids -eq $old_mds_gids ] ; do
903 sleep 5
904 new_mds_gids=$(get_mds_gids $FS_NAME)
905 done
906 echo New GIDs: $new_mds_gids
907
908 remove_all_fs
909 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
910 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
911}
912
913function test_mon_mds()
914{
31f18b77 915 local FS_NAME=cephfs
7c673cae
FG
916 remove_all_fs
917
918 ceph osd pool create fs_data 10
919 ceph osd pool create fs_metadata 10
920 ceph fs new $FS_NAME fs_metadata fs_data
921
922 ceph fs set $FS_NAME cluster_down true
923 ceph fs set $FS_NAME cluster_down false
924
925 # Legacy commands, act on default fs
926 ceph mds cluster_down
927 ceph mds cluster_up
928
929 ceph mds compat rm_incompat 4
930 ceph mds compat rm_incompat 4
931
932 # We don't want any MDSs to be up, their activity can interfere with
933 # the "current_epoch + 1" checking below if they're generating updates
934 fail_all_mds $FS_NAME
935
936 ceph mds compat show
937 expect_false ceph mds deactivate 2
938 ceph mds dump
939 ceph fs dump
940 ceph fs get $FS_NAME
941 for mds_gid in $(get_mds_gids $FS_NAME) ; do
942 ceph mds metadata $mds_id
943 done
944 ceph mds metadata
31f18b77
FG
945 ceph mds versions
946 ceph mds count-metadata os
7c673cae
FG
947
948 # XXX mds fail, but how do you undo it?
949 mdsmapfile=$TEMP_DIR/mdsmap.$$
950 current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
951 [ -s $mdsmapfile ]
952 rm $mdsmapfile
953
954 ceph osd pool create data2 10
955 ceph osd pool create data3 10
956 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
957 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
958 ceph mds add_data_pool $data2_pool
959 ceph mds add_data_pool $data3_pool
960 ceph mds add_data_pool 100 >& $TMPFILE || true
961 check_response "Error ENOENT"
962 ceph mds add_data_pool foobarbaz >& $TMPFILE || true
963 check_response "Error ENOENT"
964 ceph mds remove_data_pool $data2_pool
965 ceph mds remove_data_pool $data3_pool
966 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
967 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
224ce89b 968 ceph mds set allow_multimds false
7c673cae 969 expect_false ceph mds set_max_mds 4
224ce89b 970 ceph mds set allow_multimds true
7c673cae
FG
971 ceph mds set_max_mds 4
972 ceph mds set_max_mds 3
973 ceph mds set_max_mds 256
974 expect_false ceph mds set_max_mds 257
975 ceph mds set max_mds 4
976 ceph mds set max_mds 256
977 expect_false ceph mds set max_mds 257
978 expect_false ceph mds set max_mds asdf
979 expect_false ceph mds set inline_data true
980 ceph mds set inline_data true --yes-i-really-mean-it
981 ceph mds set inline_data yes --yes-i-really-mean-it
982 ceph mds set inline_data 1 --yes-i-really-mean-it
983 expect_false ceph mds set inline_data --yes-i-really-mean-it
984 ceph mds set inline_data false
985 ceph mds set inline_data no
986 ceph mds set inline_data 0
987 expect_false ceph mds set inline_data asdf
988 ceph mds set max_file_size 1048576
989 expect_false ceph mds set max_file_size 123asdf
990
991 expect_false ceph mds set allow_new_snaps
992 expect_false ceph mds set allow_new_snaps true
993 ceph mds set allow_new_snaps true --yes-i-really-mean-it
994 ceph mds set allow_new_snaps 0
995 ceph mds set allow_new_snaps false
996 ceph mds set allow_new_snaps no
997 expect_false ceph mds set allow_new_snaps taco
998
999 # we should never be able to add EC pools as data or metadata pools
1000 # create an ec-pool...
1001 ceph osd pool create mds-ec-pool 10 10 erasure
1002 set +e
1003 ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
1004 check_response 'erasure-code' $? 22
1005 set -e
1006 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
1007 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
1008 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
1009
1010 fail_all_mds $FS_NAME
1011
1012 set +e
1013 # Check that rmfailed requires confirmation
1014 expect_false ceph mds rmfailed 0
1015 ceph mds rmfailed 0 --yes-i-really-mean-it
1016 set -e
1017
1018 # Check that `newfs` is no longer permitted
1019 expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
1020
1021 # Check that 'fs reset' runs
1022 ceph fs reset $FS_NAME --yes-i-really-mean-it
1023
1024 # Check that creating a second FS fails by default
1025 ceph osd pool create fs_metadata2 10
1026 ceph osd pool create fs_data2 10
1027 set +e
1028 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1029 set -e
1030
1031 # Check that setting enable_multiple enables creation of second fs
1032 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1033 ceph fs new cephfs2 fs_metadata2 fs_data2
1034
1035 # Clean up multi-fs stuff
1036 fail_all_mds cephfs2
1037 ceph fs rm cephfs2 --yes-i-really-mean-it
1038 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1039 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1040
1041 fail_all_mds $FS_NAME
1042
1043 # Clean up to enable subsequent fs new tests
1044 ceph fs rm $FS_NAME --yes-i-really-mean-it
1045
1046 set +e
1047 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1048 check_response 'erasure-code' $? 22
1049 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1050 check_response 'erasure-code' $? 22
1051 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1052 check_response 'erasure-code' $? 22
1053 set -e
1054
1055 # ... new create a cache tier in front of the EC pool...
1056 ceph osd pool create mds-tier 2
1057 ceph osd tier add mds-ec-pool mds-tier
1058 ceph osd tier set-overlay mds-ec-pool mds-tier
1059 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1060
1061 # Use of a readonly tier should be forbidden
1062 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1063 set +e
1064 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1065 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1066 set -e
1067
1068 # Use of a writeback tier should enable FS creation
1069 ceph osd tier cache-mode mds-tier writeback
1070 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1071
1072 # While a FS exists using the tiered pools, I should not be allowed
1073 # to remove the tier
1074 set +e
1075 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1076 check_response 'in use by CephFS' $? 16
1077 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1078 check_response 'in use by CephFS' $? 16
1079 set -e
1080
1081 fail_all_mds $FS_NAME
1082 ceph fs rm $FS_NAME --yes-i-really-mean-it
1083
1084 # ... but we should be forbidden from using the cache pool in the FS directly.
1085 set +e
1086 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1087 check_response 'in use as a cache tier' $? 22
1088 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1089 check_response 'in use as a cache tier' $? 22
1090 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1091 check_response 'in use as a cache tier' $? 22
1092 set -e
1093
1094 # Clean up tier + EC pools
1095 ceph osd tier remove-overlay mds-ec-pool
1096 ceph osd tier remove mds-ec-pool mds-tier
1097
1098 # Create a FS using the 'cache' pool now that it's no longer a tier
1099 ceph fs new $FS_NAME fs_metadata mds-tier --force
1100
1101 # We should be forbidden from using this pool as a tier now that
1102 # it's in use for CephFS
1103 set +e
1104 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1105 check_response 'in use by CephFS' $? 16
1106 set -e
1107
1108 fail_all_mds $FS_NAME
1109 ceph fs rm $FS_NAME --yes-i-really-mean-it
1110
1111 # We should be permitted to use an EC pool with overwrites enabled
1112 # as the data pool...
1113 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1114 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1115 fail_all_mds $FS_NAME
1116 ceph fs rm $FS_NAME --yes-i-really-mean-it
1117
1118 # ...but not as the metadata pool
1119 set +e
1120 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1121 check_response 'erasure-code' $? 22
1122 set -e
1123
1124 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1125
1126 # Create a FS and check that we can subsequently add a cache tier to it
1127 ceph fs new $FS_NAME fs_metadata fs_data --force
1128
1129 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1130 ceph osd tier add fs_metadata mds-tier
1131 ceph osd tier cache-mode mds-tier writeback
1132 ceph osd tier set-overlay fs_metadata mds-tier
1133
1134 # Removing tier should be permitted because the underlying pool is
1135 # replicated (#11504 case)
1136 ceph osd tier cache-mode mds-tier proxy
1137 ceph osd tier remove-overlay fs_metadata
1138 ceph osd tier remove fs_metadata mds-tier
1139 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1140
1141 # Clean up FS
1142 fail_all_mds $FS_NAME
1143 ceph fs rm $FS_NAME --yes-i-really-mean-it
1144
1145
1146
1147 ceph mds stat
1148 # ceph mds tell mds.a getmap
1149 # ceph mds rm
1150 # ceph mds rmfailed
1151 # ceph mds set_state
1152 # ceph mds stop
1153
1154 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1155 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1156}
1157
1158function test_mon_mds_metadata()
1159{
1160 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1161 test "$nmons" -gt 0
1162
1163 ceph mds dump |
1164 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1165 while read gid id rank; do
1166 ceph mds metadata ${gid} | grep '"hostname":'
1167 ceph mds metadata ${id} | grep '"hostname":'
1168 ceph mds metadata ${rank} | grep '"hostname":'
1169
1170 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1171 test "$n" -eq "$nmons"
1172 done
1173
1174 expect_false ceph mds metadata UNKNOWN
1175}
1176
1177function test_mon_mon()
1178{
1179 # print help message
1180 ceph --help mon
1181 # no mon add/remove
1182 ceph mon dump
1183 ceph mon getmap -o $TEMP_DIR/monmap.$$
1184 [ -s $TEMP_DIR/monmap.$$ ]
1185 # ceph mon tell
1186 ceph mon_status
1187
1188 # test mon features
224ce89b 1189 ceph mon feature ls
7c673cae
FG
1190 ceph mon feature set kraken --yes-i-really-mean-it
1191 expect_false ceph mon feature set abcd
1192 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1193}
1194
31f18b77
FG
1195function gen_secrets_file()
1196{
1197 # lets assume we can have the following types
1198 # all - generates both cephx and lockbox, with mock dm-crypt key
1199 # cephx - only cephx
1200 # no_cephx - lockbox and dm-crypt, no cephx
1201 # no_lockbox - dm-crypt and cephx, no lockbox
1202 # empty - empty file
1203 # empty_json - correct json, empty map
1204 # bad_json - bad json :)
1205 #
1206 local t=$1
1207 if [[ -z "$t" ]]; then
1208 t="all"
1209 fi
1210
1211 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1212 echo $fn
1213 if [[ "$t" == "empty" ]]; then
1214 return 0
1215 fi
1216
1217 echo "{" > $fn
1218 if [[ "$t" == "bad_json" ]]; then
1219 echo "asd: ; }" >> $fn
1220 return 0
1221 elif [[ "$t" == "empty_json" ]]; then
1222 echo "}" >> $fn
1223 return 0
1224 fi
1225
1226 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1227 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1228 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1229
1230 if [[ "$t" == "all" ]]; then
1231 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1232 elif [[ "$t" == "cephx" ]]; then
1233 echo "$cephx_secret" >> $fn
1234 elif [[ "$t" == "no_cephx" ]]; then
1235 echo "$lb_secret,$dmcrypt_key" >> $fn
1236 elif [[ "$t" == "no_lockbox" ]]; then
1237 echo "$cephx_secret,$dmcrypt_key" >> $fn
1238 else
1239 echo "unknown gen_secrets_file() type \'$fn\'"
1240 return 1
1241 fi
1242 echo "}" >> $fn
1243 return 0
1244}
1245
1246function test_mon_osd_create_destroy()
1247{
1248 ceph osd new 2>&1 | grep 'EINVAL'
1249 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1250 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1251
1252 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1253
1254 old_osds=$(ceph osd ls)
1255 num_osds=$(ceph osd ls | wc -l)
1256
1257 uuid=$(uuidgen)
1258 id=$(ceph osd new $uuid 2>/dev/null)
1259
1260 for i in $old_osds; do
1261 [[ "$i" != "$id" ]]
1262 done
1263
1264 ceph osd find $id
1265
1266 id2=`ceph osd new $uuid 2>/dev/null`
1267
1268 [[ $id2 == $id ]]
1269
1270 ceph osd new $uuid $id
1271
1272 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1273 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1274
1275 uuid2=$(uuidgen)
1276 id2=$(ceph osd new $uuid2)
1277 ceph osd find $id2
1278 [[ "$id2" != "$id" ]]
1279
1280 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1281 ceph osd new $uuid2 $id2
1282
1283 # test with secrets
1284 empty_secrets=$(gen_secrets_file "empty")
1285 empty_json=$(gen_secrets_file "empty_json")
1286 all_secrets=$(gen_secrets_file "all")
1287 cephx_only=$(gen_secrets_file "cephx")
1288 no_cephx=$(gen_secrets_file "no_cephx")
1289 no_lockbox=$(gen_secrets_file "no_lockbox")
1290 bad_json=$(gen_secrets_file "bad_json")
1291
1292 # empty secrets should be idempotent
1293 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1294 [[ "$new_id" == "$id" ]]
1295
1296 # empty json, thus empty secrets
1297 new_id=$(ceph osd new $uuid $id -i $empty_json)
1298 [[ "$new_id" == "$id" ]]
1299
1300 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1301
1302 ceph osd rm $id
1303 ceph osd rm $id2
1304 ceph osd setmaxosd $old_maxosd
1305
31f18b77
FG
1306 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1307 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1308
1309 osds=$(ceph osd ls)
1310 id=$(ceph osd new $uuid -i $all_secrets)
1311 for i in $osds; do
1312 [[ "$i" != "$id" ]]
1313 done
1314
1315 ceph osd find $id
1316
1317 # validate secrets and dm-crypt are set
1318 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1319 s=$(cat $all_secrets | jq '.cephx_secret')
1320 [[ $k == $s ]]
1321 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1322 jq '.key')
1323 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1324 [[ $k == $s ]]
1325 ceph config-key exists dm-crypt/osd/$uuid/luks
1326
1327 osds=$(ceph osd ls)
1328 id2=$(ceph osd new $uuid2 -i $cephx_only)
1329 for i in $osds; do
1330 [[ "$i" != "$id2" ]]
1331 done
1332
1333 ceph osd find $id2
1334 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1335 s=$(cat $all_secrets | jq '.cephx_secret')
1336 [[ $k == $s ]]
1337 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1338 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1339
1340 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1341 ceph osd destroy $id2 --yes-i-really-mean-it
1342 ceph osd find $id2
1343 expect_false ceph auth get-key osd.$id2
1344 ceph osd dump | grep osd.$id2 | grep destroyed
1345
1346 id3=$id2
1347 uuid3=$(uuidgen)
1348 ceph osd new $uuid3 $id3 -i $all_secrets
1349 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1350 ceph auth get-key client.osd-lockbox.$uuid3
1351 ceph auth get-key osd.$id3
1352 ceph config-key exists dm-crypt/osd/$uuid3/luks
1353
1354 ceph osd purge osd.$id3 --yes-i-really-mean-it
1355 expect_false ceph osd find $id2
1356 expect_false ceph auth get-key osd.$id2
1357 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1358 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1359 ceph osd purge osd.$id3 --yes-i-really-mean-it
d2e6a577 1360 ceph osd purge osd.$id3 --yes-i-really-mean-it # idempotent
31f18b77
FG
1361
1362 ceph osd purge osd.$id --yes-i-really-mean-it
d2e6a577 1363 ceph osd purge 123456 --yes-i-really-mean-it
31f18b77
FG
1364 expect_false ceph osd find $id
1365 expect_false ceph auth get-key osd.$id
1366 expect_false ceph auth get-key client.osd-lockbox.$uuid
1367 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1368
1369 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1370 $no_cephx $no_lockbox $bad_json
1371
1372 for i in $(ceph osd ls); do
1373 [[ "$i" != "$id" ]]
1374 [[ "$i" != "$id2" ]]
1375 [[ "$i" != "$id3" ]]
1376 done
1377
1378 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1379 ceph osd setmaxosd $old_maxosd
1380
1381}
1382
c07f9fc5
FG
1383function test_mon_config_key()
1384{
1385 key=asdfasdfqwerqwreasdfuniquesa123df
1386 ceph config-key list | grep -c $key | grep 0
1387 ceph config-key get $key | grep -c bar | grep 0
1388 ceph config-key set $key bar
1389 ceph config-key get $key | grep bar
1390 ceph config-key list | grep -c $key | grep 1
1391 ceph config-key dump | grep $key | grep bar
1392 ceph config-key rm $key
1393 expect_false ceph config-key get $key
1394 ceph config-key list | grep -c $key | grep 0
1395 ceph config-key dump | grep -c $key | grep 0
1396}
1397
7c673cae
FG
1398function test_mon_osd()
1399{
1400 #
1401 # osd blacklist
1402 #
1403 bl=192.168.0.1:0/1000
1404 ceph osd blacklist add $bl
1405 ceph osd blacklist ls | grep $bl
1406 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1407 ceph osd dump --format=json-pretty | grep $bl
1408 ceph osd dump | grep "^blacklist $bl"
1409 ceph osd blacklist rm $bl
1410 ceph osd blacklist ls | expect_false grep $bl
1411
1412 bl=192.168.0.1
1413 # test without nonce, invalid nonce
1414 ceph osd blacklist add $bl
1415 ceph osd blacklist ls | grep $bl
1416 ceph osd blacklist rm $bl
1417 ceph osd blacklist ls | expect_false grep $expect_false bl
1418 expect_false "ceph osd blacklist $bl/-1"
1419 expect_false "ceph osd blacklist $bl/foo"
1420
1421 # test with wrong address
1422 expect_false "ceph osd blacklist 1234.56.78.90/100"
1423
1424 # Test `clear`
1425 ceph osd blacklist add $bl
1426 ceph osd blacklist ls | grep $bl
1427 ceph osd blacklist clear
1428 ceph osd blacklist ls | expect_false grep $bl
1429
1430 #
1431 # osd crush
1432 #
1433 ceph osd crush reweight-all
1434 ceph osd crush tunables legacy
1435 ceph osd crush show-tunables | grep argonaut
1436 ceph osd crush tunables bobtail
1437 ceph osd crush show-tunables | grep bobtail
1438 ceph osd crush tunables firefly
1439 ceph osd crush show-tunables | grep firefly
1440
1441 ceph osd crush set-tunable straw_calc_version 0
1442 ceph osd crush get-tunable straw_calc_version | grep 0
1443 ceph osd crush set-tunable straw_calc_version 1
1444 ceph osd crush get-tunable straw_calc_version | grep 1
1445
1446 #
1447 # require-min-compat-client
1448 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1449 ceph osd set-require-min-compat-client luminous
1450 ceph osd dump | grep 'require_min_compat_client luminous'
1451
1452 #
1453 # osd scrub
1454 #
1455 # how do I tell when these are done?
1456 ceph osd scrub 0
1457 ceph osd deep-scrub 0
1458 ceph osd repair 0
1459
1460 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1461 do
1462 ceph osd set $f
1463 ceph osd unset $f
1464 done
1465 expect_false ceph osd unset sortbitwise # cannot be unset
1466 expect_false ceph osd set bogus
1467 expect_false ceph osd unset bogus
31f18b77
FG
1468 ceph osd require-osd-release luminous
1469 # can't lower (or use new command for anything but jewel)
1470 expect_false ceph osd require-osd-release jewel
1471 # these are no-ops but should succeed.
7c673cae 1472 ceph osd set require_jewel_osds
7c673cae 1473 ceph osd set require_kraken_osds
31f18b77 1474 expect_false ceph osd unset require_jewel_osds
7c673cae
FG
1475
1476 ceph osd set noup
1477 ceph osd down 0
1478 ceph osd dump | grep 'osd.0 down'
1479 ceph osd unset noup
1480 max_run=1000
1481 for ((i=0; i < $max_run; i++)); do
1482 if ! ceph osd dump | grep 'osd.0 up'; then
1483 echo "waiting for osd.0 to come back up ($i/$max_run)"
1484 sleep 1
1485 else
1486 break
1487 fi
1488 done
1489 ceph osd dump | grep 'osd.0 up'
1490
1491 ceph osd dump | grep 'osd.0 up'
1492 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1493 ceph osd find 1
1494 ceph osd find osd.1
1495 expect_false ceph osd find osd.xyz
1496 expect_false ceph osd find xyz
1497 expect_false ceph osd find 0.1
1498 ceph --format plain osd find 1 # falls back to json-pretty
1499 if [ `uname` == Linux ]; then
1500 ceph osd metadata 1 | grep 'distro'
1501 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1502 fi
1503 ceph osd out 0
1504 ceph osd dump | grep 'osd.0.*out'
1505 ceph osd in 0
1506 ceph osd dump | grep 'osd.0.*in'
1507 ceph osd find 0
1508
31f18b77 1509 ceph osd add-nodown 0 1
224ce89b 1510 ceph health detail | grep 'NODOWN'
31f18b77 1511 ceph osd rm-nodown 0 1
224ce89b 1512 ! ceph health detail | grep 'NODOWN'
31f18b77
FG
1513
1514 ceph osd out 0 # so we can mark it as noin later
1515 ceph osd add-noin 0
224ce89b 1516 ceph health detail | grep 'NOIN'
31f18b77 1517 ceph osd rm-noin 0
224ce89b 1518 ! ceph health detail | grep 'NOIN'
31f18b77
FG
1519 ceph osd in 0
1520
1521 ceph osd add-noout 0
224ce89b 1522 ceph health detail | grep 'NOOUT'
31f18b77 1523 ceph osd rm-noout 0
224ce89b 1524 ! ceph health detail | grep 'NOOUT'
31f18b77
FG
1525
1526 # test osd id parse
1527 expect_false ceph osd add-noup 797er
1528 expect_false ceph osd add-nodown u9uwer
1529 expect_false ceph osd add-noin 78~15
1530 expect_false ceph osd add-noout 0 all 1
1531
1532 expect_false ceph osd rm-noup 1234567
1533 expect_false ceph osd rm-nodown fsadf7
1534 expect_false ceph osd rm-noin 0 1 any
1535 expect_false ceph osd rm-noout 790-fd
1536
1537 ids=`ceph osd ls-tree default`
1538 for osd in $ids
1539 do
1540 ceph osd add-nodown $osd
1541 ceph osd add-noout $osd
1542 done
224ce89b
WB
1543 ceph -s | grep 'NODOWN'
1544 ceph -s | grep 'NOOUT'
31f18b77
FG
1545 ceph osd rm-nodown any
1546 ceph osd rm-noout all
224ce89b
WB
1547 ! ceph -s | grep 'NODOWN'
1548 ! ceph -s | grep 'NOOUT'
31f18b77 1549
7c673cae
FG
1550 # make sure mark out preserves weight
1551 ceph osd reweight osd.0 .5
1552 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1553 ceph osd out 0
1554 ceph osd in 0
1555 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1556
7c673cae
FG
1557 ceph osd getmap -o $f
1558 [ -s $f ]
1559 rm $f
1560 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1561 [ "$save" -gt 0 ]
1562 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1563 ceph osd setmaxosd 10
1564 ceph osd getmaxosd | grep 'max_osd = 10'
1565 ceph osd setmaxosd $save
1566 ceph osd getmaxosd | grep "max_osd = $save"
1567
1568 for id in `ceph osd ls` ; do
1569 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1570 done
1571
1572 ceph osd rm 0 2>&1 | grep 'EBUSY'
1573
1574 local old_osds=$(echo $(ceph osd ls))
1575 id=`ceph osd create`
1576 ceph osd find $id
1577 ceph osd lost $id --yes-i-really-mean-it
1578 expect_false ceph osd setmaxosd $id
1579 local new_osds=$(echo $(ceph osd ls))
1580 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1581 ceph osd rm $id
1582 done
1583
1584 uuid=`uuidgen`
1585 id=`ceph osd create $uuid`
1586 id2=`ceph osd create $uuid`
1587 [ "$id" = "$id2" ]
1588 ceph osd rm $id
1589
1590 ceph --help osd
1591
1592 # reset max_osd.
1593 ceph osd setmaxosd $id
1594 ceph osd getmaxosd | grep "max_osd = $save"
1595 local max_osd=$save
1596
1597 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1598 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1599
1600 id=`ceph osd create $uuid $max_osd`
1601 [ "$id" = "$max_osd" ]
1602 ceph osd find $id
1603 max_osd=$((max_osd + 1))
1604 ceph osd getmaxosd | grep "max_osd = $max_osd"
1605
31f18b77
FG
1606 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1607 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
7c673cae
FG
1608 id2=`ceph osd create $uuid`
1609 [ "$id" = "$id2" ]
1610 id2=`ceph osd create $uuid $id`
1611 [ "$id" = "$id2" ]
1612
1613 uuid=`uuidgen`
1614 local gap_start=$max_osd
1615 id=`ceph osd create $uuid $((gap_start + 100))`
1616 [ "$id" = "$((gap_start + 100))" ]
1617 max_osd=$((id + 1))
1618 ceph osd getmaxosd | grep "max_osd = $max_osd"
1619
31f18b77 1620 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
7c673cae
FG
1621
1622 #
1623 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1624 # is repeated and consumes two osd id, not just one.
1625 #
3efd9988 1626 local next_osd=$gap_start
7c673cae
FG
1627 id=`ceph osd create $(uuidgen)`
1628 [ "$id" = "$next_osd" ]
1629
1630 next_osd=$((id + 1))
1631 id=`ceph osd create $(uuidgen) $next_osd`
1632 [ "$id" = "$next_osd" ]
1633
1634 local new_osds=$(echo $(ceph osd ls))
1635 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1636 [ $id -ge $save ]
1637 ceph osd rm $id
1638 done
1639 ceph osd setmaxosd $save
1640
1641 ceph osd ls
1642 ceph osd pool create data 10
c07f9fc5 1643 ceph osd pool application enable data rados
7c673cae
FG
1644 ceph osd lspools | grep data
1645 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1646 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1647 ceph osd pool delete data data --yes-i-really-really-mean-it
1648
1649 ceph osd pause
1650 ceph osd dump | grep 'flags.*pauserd,pausewr'
1651 ceph osd unpause
1652
1653 ceph osd tree
31f18b77
FG
1654 ceph osd tree up
1655 ceph osd tree down
1656 ceph osd tree in
1657 ceph osd tree out
c07f9fc5 1658 ceph osd tree destroyed
31f18b77
FG
1659 ceph osd tree up in
1660 ceph osd tree up out
1661 ceph osd tree down in
1662 ceph osd tree down out
1663 ceph osd tree out down
1664 expect_false ceph osd tree up down
c07f9fc5
FG
1665 expect_false ceph osd tree up destroyed
1666 expect_false ceph osd tree down destroyed
1667 expect_false ceph osd tree up down destroyed
31f18b77
FG
1668 expect_false ceph osd tree in out
1669 expect_false ceph osd tree up foo
1670
1671 ceph osd metadata
1672 ceph osd count-metadata os
1673 ceph osd versions
1674
7c673cae
FG
1675 ceph osd perf
1676 ceph osd blocked-by
1677
1678 ceph osd stat | grep up,
1679}
1680
31f18b77
FG
1681function test_mon_crush()
1682{
1683 f=$TEMP_DIR/map.$$
1684 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1685 [ -s $f ]
1686 [ "$epoch" -gt 1 ]
1687 nextepoch=$(( $epoch + 1 ))
1688 echo epoch $epoch nextepoch $nextepoch
1689 rm -f $f.epoch
1690 expect_false ceph osd setcrushmap $nextepoch -i $f
1691 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1692 echo gotepoch $gotepoch
1693 [ "$gotepoch" -eq "$nextepoch" ]
1694 # should be idempotent
1695 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1696 echo epoch $gotepoch
1697 [ "$gotepoch" -eq "$nextepoch" ]
1698 rm $f
1699}
1700
7c673cae
FG
1701function test_mon_osd_pool()
1702{
1703 #
1704 # osd pool
1705 #
1706 ceph osd pool create data 10
c07f9fc5 1707 ceph osd pool application enable data rados
7c673cae
FG
1708 ceph osd pool mksnap data datasnap
1709 rados -p data lssnap | grep datasnap
1710 ceph osd pool rmsnap data datasnap
1711 expect_false ceph osd pool rmsnap pool_fake snapshot
1712 ceph osd pool delete data data --yes-i-really-really-mean-it
1713
1714 ceph osd pool create data2 10
c07f9fc5 1715 ceph osd pool application enable data2 rados
7c673cae
FG
1716 ceph osd pool rename data2 data3
1717 ceph osd lspools | grep data3
1718 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1719
1720 ceph osd pool create replicated 12 12 replicated
1721 ceph osd pool create replicated 12 12 replicated
1722 ceph osd pool create replicated 12 12 # default is replicated
1723 ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
c07f9fc5 1724 ceph osd pool application enable replicated rados
7c673cae
FG
1725 # should fail because the type is not the same
1726 expect_false ceph osd pool create replicated 12 12 erasure
1727 ceph osd lspools | grep replicated
1728 ceph osd pool create ec_test 1 1 erasure
c07f9fc5 1729 ceph osd pool application enable ec_test rados
7c673cae 1730 set +e
c07f9fc5
FG
1731 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1732 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
7c673cae 1733 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
c07f9fc5 1734 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
7c673cae
FG
1735 else
1736 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1737 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1738 fi
1739 set -e
1740 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1741 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1742}
1743
1744function test_mon_osd_pool_quota()
1745{
1746 #
1747 # test osd pool set/get quota
1748 #
1749
1750 # create tmp pool
1751 ceph osd pool create tmp-quota-pool 36
c07f9fc5 1752 ceph osd pool application enable tmp-quota-pool rados
7c673cae
FG
1753 #
1754 # set erroneous quotas
1755 #
1756 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1757 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1758 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1759 #
1760 # set valid quotas
1761 #
1762 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1763 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1764 #
7c673cae
FG
1765 # get quotas in json-pretty format
1766 #
1767 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1adf2230 1768 grep '"quota_max_objects":.*10000000'
7c673cae
FG
1769 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1770 grep '"quota_max_bytes":.*10'
1771 #
1adf2230
AA
1772 # get quotas
1773 #
1774 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
1775 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10M objects'
1776 #
1777 # set valid quotas with unit prefix
1778 #
1779 ceph osd pool set-quota tmp-quota-pool max_bytes 10K
1780 #
1781 # get quotas
1782 #
1783 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10Ki'
1784 #
1785 # set valid quotas with unit prefix
1786 #
1787 ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
1788 #
1789 # get quotas
1790 #
1791 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10Ki'
1792 #
1793 #
7c673cae
FG
1794 # reset pool quotas
1795 #
1796 ceph osd pool set-quota tmp-quota-pool max_bytes 0
1797 ceph osd pool set-quota tmp-quota-pool max_objects 0
1798 #
1799 # test N/A quotas
1800 #
1801 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
1802 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
1803 #
1804 # cleanup tmp pool
1805 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
1806}
1807
1808function test_mon_pg()
1809{
1810 # Make sure we start healthy.
1811 wait_for_health_ok
1812
1813 ceph pg debug unfound_objects_exist
1814 ceph pg debug degraded_pgs_exist
224ce89b 1815 ceph pg deep-scrub 1.0
7c673cae
FG
1816 ceph pg dump
1817 ceph pg dump pgs_brief --format=json
1818 ceph pg dump pgs --format=json
1819 ceph pg dump pools --format=json
1820 ceph pg dump osds --format=json
1821 ceph pg dump sum --format=json
1822 ceph pg dump all --format=json
1823 ceph pg dump pgs_brief osds --format=json
1824 ceph pg dump pools osds pgs_brief --format=json
1825 ceph pg dump_json
1826 ceph pg dump_pools_json
1827 ceph pg dump_stuck inactive
1828 ceph pg dump_stuck unclean
1829 ceph pg dump_stuck stale
1830 ceph pg dump_stuck undersized
1831 ceph pg dump_stuck degraded
1832 ceph pg ls
224ce89b 1833 ceph pg ls 1
7c673cae
FG
1834 ceph pg ls stale
1835 expect_false ceph pg ls scrubq
1836 ceph pg ls active stale repair recovering
224ce89b
WB
1837 ceph pg ls 1 active
1838 ceph pg ls 1 active stale
7c673cae 1839 ceph pg ls-by-primary osd.0
224ce89b 1840 ceph pg ls-by-primary osd.0 1
7c673cae
FG
1841 ceph pg ls-by-primary osd.0 active
1842 ceph pg ls-by-primary osd.0 active stale
224ce89b 1843 ceph pg ls-by-primary osd.0 1 active stale
7c673cae 1844 ceph pg ls-by-osd osd.0
224ce89b 1845 ceph pg ls-by-osd osd.0 1
7c673cae
FG
1846 ceph pg ls-by-osd osd.0 active
1847 ceph pg ls-by-osd osd.0 active stale
224ce89b 1848 ceph pg ls-by-osd osd.0 1 active stale
7c673cae
FG
1849 ceph pg ls-by-pool rbd
1850 ceph pg ls-by-pool rbd active stale
1851 # can't test this...
1852 # ceph pg force_create_pg
1853 ceph pg getmap -o $TEMP_DIR/map.$$
1854 [ -s $TEMP_DIR/map.$$ ]
224ce89b
WB
1855 ceph pg map 1.0 | grep acting
1856 ceph pg repair 1.0
1857 ceph pg scrub 1.0
7c673cae
FG
1858
1859 ceph osd set-full-ratio .962
1860 ceph osd dump | grep '^full_ratio 0.962'
1861 ceph osd set-backfillfull-ratio .912
1862 ceph osd dump | grep '^backfillfull_ratio 0.912'
1863 ceph osd set-nearfull-ratio .892
1864 ceph osd dump | grep '^nearfull_ratio 0.892'
1865
1866 # Check health status
1867 ceph osd set-nearfull-ratio .913
224ce89b
WB
1868 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1869 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
7c673cae
FG
1870 ceph osd set-nearfull-ratio .892
1871 ceph osd set-backfillfull-ratio .963
224ce89b
WB
1872 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1873 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
7c673cae
FG
1874 ceph osd set-backfillfull-ratio .912
1875
1876 # Check injected full results
1877 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
224ce89b
WB
1878 wait_for_health "OSD_NEARFULL"
1879 ceph health detail | grep "osd.0 is near full"
1880 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1881 wait_for_health_ok
1882
7c673cae 1883 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
224ce89b
WB
1884 wait_for_health "OSD_BACKFILLFULL"
1885 ceph health detail | grep "osd.1 is backfill full"
1886 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
1887 wait_for_health_ok
1888
7c673cae
FG
1889 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
1890 # failsafe and full are the same as far as the monitor is concerned
224ce89b
WB
1891 wait_for_health "OSD_FULL"
1892 ceph health detail | grep "osd.2 is full"
1893 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
1894 wait_for_health_ok
1895
7c673cae 1896 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
224ce89b 1897 wait_for_health "OSD_FULL"
31f18b77 1898 ceph health detail | grep "osd.0 is full"
7c673cae 1899 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
7c673cae
FG
1900 wait_for_health_ok
1901
1902 ceph pg stat | grep 'pgs:'
224ce89b
WB
1903 ceph pg 1.0 query
1904 ceph tell 1.0 query
7c673cae
FG
1905 ceph quorum enter
1906 ceph quorum_status
1907 ceph report | grep osd_stats
1908 ceph status
1909 ceph -s
1910
1911 #
1912 # tell osd version
1913 #
1914 ceph tell osd.0 version
1915 expect_false ceph tell osd.9999 version
1916 expect_false ceph tell osd.foo version
1917
1918 # back to pg stuff
1919
1920 ceph tell osd.0 dump_pg_recovery_stats | grep Started
1921
1922 ceph osd reweight 0 0.9
1923 expect_false ceph osd reweight 0 -1
1924 ceph osd reweight osd.0 1
1925
1926 ceph osd primary-affinity osd.0 .9
1927 expect_false ceph osd primary-affinity osd.0 -2
1928 expect_false ceph osd primary-affinity osd.9999 .5
1929 ceph osd primary-affinity osd.0 1
1930
224ce89b
WB
1931 ceph osd pool set rbd size 2
1932 ceph osd pg-temp 1.0 0 1
1933 ceph osd pg-temp 1.0 osd.1 osd.0
1934 expect_false ceph osd pg-temp 1.0 0 1 2
7c673cae 1935 expect_false ceph osd pg-temp asdf qwer
224ce89b
WB
1936 expect_false ceph osd pg-temp 1.0 asdf
1937 expect_false ceph osd pg-temp 1.0
7c673cae
FG
1938
1939 # don't test ceph osd primary-temp for now
1940}
1941
1942function test_mon_osd_pool_set()
1943{
1944 TEST_POOL_GETSET=pool_getset
1945 ceph osd pool create $TEST_POOL_GETSET 1
c07f9fc5 1946 ceph osd pool application enable $TEST_POOL_GETSET rados
7c673cae
FG
1947 wait_for_clean
1948 ceph osd pool get $TEST_POOL_GETSET all
1949
31f18b77 1950 for s in pg_num pgp_num size min_size crush_rule; do
7c673cae
FG
1951 ceph osd pool get $TEST_POOL_GETSET $s
1952 done
1953
1954 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
1955 (( new_size = old_size + 1 ))
1956 ceph osd pool set $TEST_POOL_GETSET size $new_size
1957 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
1958 ceph osd pool set $TEST_POOL_GETSET size $old_size
1959
1960 ceph osd pool create pool_erasure 1 1 erasure
c07f9fc5 1961 ceph osd pool application enable pool_erasure rados
7c673cae
FG
1962 wait_for_clean
1963 set +e
1964 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
1965 check_response 'not change the size'
1966 set -e
1967 ceph osd pool get pool_erasure erasure_code_profile
1968
1969 auid=5555
1970 ceph osd pool set $TEST_POOL_GETSET auid $auid
1971 ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
1972 ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
1973 ceph osd pool set $TEST_POOL_GETSET auid 0
1974
1975 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
1976 ceph osd pool set $TEST_POOL_GETSET $flag false
1977 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1978 ceph osd pool set $TEST_POOL_GETSET $flag true
1979 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1980 ceph osd pool set $TEST_POOL_GETSET $flag 1
1981 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1982 ceph osd pool set $TEST_POOL_GETSET $flag 0
1983 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1984 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
1985 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
1986 done
1987
1988 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1989 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
1990 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
1991 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
1992 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1993
1994 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1995 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
1996 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
1997 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
1998 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1999
2000 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2001 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
2002 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
2003 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
2004 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2005
2006 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2007 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
2008 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
2009 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
2010 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2011
2012 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2013 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
2014 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
2015 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
2016 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2017
2018 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2019 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
2020 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
2021 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
2022 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2023
2024 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
2025 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
2026 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2027 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
2028 ceph osd pool set $TEST_POOL_GETSET pg_num 10
2029 wait_for_clean
2030 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2031
2032 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
c07f9fc5 2033 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
7c673cae
FG
2034 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2035 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
2036 wait_for_clean
2037 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
c07f9fc5 2038 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32 + 1))
7c673cae
FG
2039 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2040
2041 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
2042 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
2043 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
2044 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
2045 ceph osd pool set $TEST_POOL_GETSET size 2
2046 wait_for_clean
2047 ceph osd pool set $TEST_POOL_GETSET min_size 2
2048
2049 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2050 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2051
2052 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2053 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2054
7c673cae 2055 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
224ce89b
WB
2056
2057 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2058 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2059 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2060 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2061 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2062
2063 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2064 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2065 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2066 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2067 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2068
2069 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2070 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2071 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2072 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2073 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2074 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2075 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2076
2077 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2078 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2079 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2080 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2081 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2082
2083 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2084 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2085 ceph osd pool set $TEST_POOL_GETSET $size 100
2086 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2087 ceph osd pool set $TEST_POOL_GETSET $size 0
2088 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2089 done
c07f9fc5
FG
2090
2091 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2092 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2093 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2094 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2095
7c673cae
FG
2096}
2097
2098function test_mon_osd_tiered_pool_set()
2099{
2100 # this is really a tier pool
2101 ceph osd pool create real-tier 2
2102 ceph osd tier add rbd real-tier
2103
2104 ceph osd pool set real-tier hit_set_type explicit_hash
2105 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2106 ceph osd pool set real-tier hit_set_type explicit_object
2107 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2108 ceph osd pool set real-tier hit_set_type bloom
2109 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2110 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2111 ceph osd pool set real-tier hit_set_period 123
2112 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2113 ceph osd pool set real-tier hit_set_count 12
2114 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2115 ceph osd pool set real-tier hit_set_fpp .01
2116 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2117
2118 ceph osd pool set real-tier target_max_objects 123
2119 ceph osd pool get real-tier target_max_objects | \
2120 grep 'target_max_objects:[ \t]\+123'
2121 ceph osd pool set real-tier target_max_bytes 123456
2122 ceph osd pool get real-tier target_max_bytes | \
2123 grep 'target_max_bytes:[ \t]\+123456'
2124 ceph osd pool set real-tier cache_target_dirty_ratio .123
2125 ceph osd pool get real-tier cache_target_dirty_ratio | \
2126 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2127 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2128 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2129 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2130 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2131 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2132 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2133 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2134 ceph osd pool set real-tier cache_target_full_ratio .123
2135 ceph osd pool get real-tier cache_target_full_ratio | \
2136 grep 'cache_target_full_ratio:[ \t]\+0.123'
2137 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2138 ceph osd pool set real-tier cache_target_full_ratio 1.0
2139 ceph osd pool set real-tier cache_target_full_ratio 0
2140 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2141 ceph osd pool set real-tier cache_min_flush_age 123
2142 ceph osd pool get real-tier cache_min_flush_age | \
2143 grep 'cache_min_flush_age:[ \t]\+123'
2144 ceph osd pool set real-tier cache_min_evict_age 234
2145 ceph osd pool get real-tier cache_min_evict_age | \
2146 grep 'cache_min_evict_age:[ \t]\+234'
2147
2148 # this is not a tier pool
2149 ceph osd pool create fake-tier 2
c07f9fc5 2150 ceph osd pool application enable fake-tier rados
7c673cae
FG
2151 wait_for_clean
2152
2153 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2154 expect_false ceph osd pool get fake-tier hit_set_type
2155 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2156 expect_false ceph osd pool get fake-tier hit_set_type
2157 expect_false ceph osd pool set fake-tier hit_set_type bloom
2158 expect_false ceph osd pool get fake-tier hit_set_type
2159 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2160 expect_false ceph osd pool set fake-tier hit_set_period 123
2161 expect_false ceph osd pool get fake-tier hit_set_period
2162 expect_false ceph osd pool set fake-tier hit_set_count 12
2163 expect_false ceph osd pool get fake-tier hit_set_count
2164 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2165 expect_false ceph osd pool get fake-tier hit_set_fpp
2166
2167 expect_false ceph osd pool set fake-tier target_max_objects 123
2168 expect_false ceph osd pool get fake-tier target_max_objects
2169 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2170 expect_false ceph osd pool get fake-tier target_max_bytes
2171 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2172 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2173 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2174 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2175 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2176 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2177 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2178 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2179 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2180 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2181 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2182 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2183 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2184 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2185 expect_false ceph osd pool get fake-tier cache_min_flush_age
2186 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2187 expect_false ceph osd pool get fake-tier cache_min_evict_age
2188
2189 ceph osd tier remove rbd real-tier
2190 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2191 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2192}
2193
2194function test_mon_osd_erasure_code()
2195{
2196
2197 ceph osd erasure-code-profile set fooprofile a=b c=d
2198 ceph osd erasure-code-profile set fooprofile a=b c=d
2199 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2200 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2201 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2202 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
3efd9988
FG
2203 # ruleset-foo will work for luminous only
2204 ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
2205 ceph osd erasure-code-profile set barprofile crush-failure-domain=host
2206 # clean up
7c673cae 2207 ceph osd erasure-code-profile rm fooprofile
3efd9988 2208 ceph osd erasure-code-profile rm barprofile
7c673cae
FG
2209}
2210
2211function test_mon_osd_misc()
2212{
2213 set +e
2214
2215 # expect error about missing 'pool' argument
2216 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2217
2218 # expect error about unused argument foo
2219 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2220
2221 # expect "not in range" for invalid full ratio
2222 ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
2223
2224 # expect "not in range" for invalid overload percentage
2225 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2226
2227 set -e
2228
2229 ceph osd reweight-by-utilization 110
2230 ceph osd reweight-by-utilization 110 .5
2231 expect_false ceph osd reweight-by-utilization 110 0
2232 expect_false ceph osd reweight-by-utilization 110 -0.1
2233 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2234 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2235 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2236 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2237 ceph osd reweight-by-pg 110
2238 ceph osd test-reweight-by-pg 110 .5
2239 ceph osd reweight-by-pg 110 rbd
2240 ceph osd reweight-by-pg 110 .5 rbd
2241 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2242}
2243
2244function test_mon_heap_profiler()
2245{
2246 do_test=1
2247 set +e
2248 # expect 'heap' commands to be correctly parsed
2249 ceph heap stats 2>$TMPFILE
2250 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2251 echo "tcmalloc not enabled; skip heap profiler test"
2252 do_test=0
2253 fi
2254 set -e
2255
2256 [[ $do_test -eq 0 ]] && return 0
2257
2258 ceph heap start_profiler
2259 ceph heap dump
2260 ceph heap stop_profiler
2261 ceph heap release
2262}
2263
2264function test_admin_heap_profiler()
2265{
2266 do_test=1
2267 set +e
2268 # expect 'heap' commands to be correctly parsed
2269 ceph heap stats 2>$TMPFILE
2270 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2271 echo "tcmalloc not enabled; skip heap profiler test"
2272 do_test=0
2273 fi
2274 set -e
2275
2276 [[ $do_test -eq 0 ]] && return 0
2277
2278 local admin_socket=$(get_admin_socket osd.0)
2279
2280 $SUDO ceph --admin-daemon $admin_socket heap start_profiler
2281 $SUDO ceph --admin-daemon $admin_socket heap dump
2282 $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
2283 $SUDO ceph --admin-daemon $admin_socket heap release
2284}
2285
2286function test_osd_bench()
2287{
2288 # test osd bench limits
2289 # As we should not rely on defaults (as they may change over time),
2290 # lets inject some values and perform some simple tests
2291 # max iops: 10 # 100 IOPS
2292 # max throughput: 10485760 # 10MB/s
2293 # max block size: 2097152 # 2MB
2294 # duration: 10 # 10 seconds
2295
2296 local args="\
2297 --osd-bench-duration 10 \
2298 --osd-bench-max-block-size 2097152 \
2299 --osd-bench-large-size-max-throughput 10485760 \
2300 --osd-bench-small-size-max-iops 10"
2301 ceph tell osd.0 injectargs ${args## }
2302
2303 # anything with a bs larger than 2097152 must fail
2304 expect_false ceph tell osd.0 bench 1 2097153
2305 # but using 'osd_bench_max_bs' must succeed
2306 ceph tell osd.0 bench 1 2097152
2307
2308 # we assume 1MB as a large bs; anything lower is a small bs
2309 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2310 # max count: 409600 (bytes)
2311
2312 # more than max count must not be allowed
2313 expect_false ceph tell osd.0 bench 409601 4096
2314 # but 409600 must be succeed
2315 ceph tell osd.0 bench 409600 4096
2316
2317 # for a large bs, we are limited by throughput.
2318 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2319 # the max count will be (10MB * 10s) = 100MB
2320 # max count: 104857600 (bytes)
2321
2322 # more than max count must not be allowed
2323 expect_false ceph tell osd.0 bench 104857601 2097152
2324 # up to max count must be allowed
2325 ceph tell osd.0 bench 104857600 2097152
2326}
2327
2328function test_osd_negative_filestore_merge_threshold()
2329{
2330 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2331 expect_config_value "osd.0" "filestore_merge_threshold" -1
2332}
2333
2334function test_mon_tell()
2335{
2336 ceph tell mon.a version
2337 ceph tell mon.b version
2338 expect_false ceph tell mon.foo version
2339
2340 sleep 1
2341
c07f9fc5 2342 ceph_watch_start debug audit
7c673cae 2343 ceph tell mon.a version
31f18b77 2344 ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
7c673cae 2345
c07f9fc5 2346 ceph_watch_start debug audit
7c673cae 2347 ceph tell mon.b version
31f18b77 2348 ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
7c673cae
FG
2349}
2350
7c673cae
FG
2351function test_mon_ping()
2352{
2353 ceph ping mon.a
2354 ceph ping mon.b
2355 expect_false ceph ping mon.foo
2356
2357 ceph ping mon.\*
2358}
2359
2360function test_mon_deprecated_commands()
2361{
2362 # current DEPRECATED commands are:
2363 # ceph compact
2364 # ceph scrub
2365 # ceph sync force
2366 #
2367 # Testing should be accomplished by setting
2368 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2369 # each one of these commands.
2370
2371 ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
2372 expect_false ceph tell mon.a compact 2> $TMPFILE
2373 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2374
2375 expect_false ceph tell mon.a scrub 2> $TMPFILE
2376 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2377
2378 expect_false ceph tell mon.a sync force 2> $TMPFILE
2379 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2380
2381 ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
2382}
2383
2384function test_mon_cephdf_commands()
2385{
2386 # ceph df detail:
2387 # pool section:
2388 # RAW USED The near raw used per pool in raw total
2389
2390 ceph osd pool create cephdf_for_test 32 32 replicated
c07f9fc5 2391 ceph osd pool application enable cephdf_for_test rados
7c673cae
FG
2392 ceph osd pool set cephdf_for_test size 2
2393
2394 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2395 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2396
2397 #wait for update
2398 for i in `seq 1 10`; do
2399 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2400 sleep 1
2401 done
31f18b77
FG
2402 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2403 # to sync mon with osd
2404 flush_pg_stats
2405 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2406 cal_raw_used_size=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2407 raw_used_size=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
7c673cae
FG
2408
2409 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2410 rm ./cephdf_for_test
2411
2412 expect_false test $cal_raw_used_size != $raw_used_size
2413}
2414
c07f9fc5
FG
2415function test_mon_pool_application()
2416{
2417 ceph osd pool create app_for_test 10
2418
2419 ceph osd pool application enable app_for_test rbd
2420 expect_false ceph osd pool application enable app_for_test rgw
2421 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2422 ceph osd pool ls detail | grep "application rbd,rgw"
2423 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2424
2425 expect_false ceph osd pool application set app_for_test cephfs key value
2426 ceph osd pool application set app_for_test rbd key1 value1
2427 ceph osd pool application set app_for_test rbd key2 value2
2428 ceph osd pool application set app_for_test rgw key1 value1
181888fb
FG
2429 ceph osd pool application get app_for_test rbd key1 | grep 'value1'
2430 ceph osd pool application get app_for_test rbd key2 | grep 'value2'
2431 ceph osd pool application get app_for_test rgw key1 | grep 'value1'
c07f9fc5
FG
2432
2433 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2434
2435 ceph osd pool application rm app_for_test rgw key1
2436 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2437 ceph osd pool application rm app_for_test rbd key2
2438 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2439 ceph osd pool application rm app_for_test rbd key1
2440 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2441 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2442
2443 expect_false ceph osd pool application disable app_for_test rgw
2444 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2445 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2446 ceph osd pool ls detail | grep "application rbd"
2447 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2448
2449 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2450 ceph osd pool ls detail | grep -v "application "
2451 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2452
2453 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2454}
2455
31f18b77
FG
2456function test_mon_tell_help_command()
2457{
2458 ceph tell mon.a help
2459
2460 # wrong target
2461 expect_false ceph tell mon.zzz help
2462}
2463
c07f9fc5
FG
2464function test_mon_stdin_stdout()
2465{
2466 echo foo | ceph config-key set test_key -i -
2467 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2468}
2469
31f18b77
FG
2470function test_osd_tell_help_command()
2471{
2472 ceph tell osd.1 help
2473 expect_false ceph tell osd.100 help
2474}
2475
224ce89b
WB
2476function test_osd_compact()
2477{
2478 ceph tell osd.1 compact
c07f9fc5 2479 $SUDO ceph daemon osd.1 compact
224ce89b
WB
2480}
2481
31f18b77
FG
2482function test_mds_tell_help_command()
2483{
2484 local FS_NAME=cephfs
2485 if ! mds_exists ; then
2486 echo "Skipping test, no MDS found"
2487 return
2488 fi
2489
2490 remove_all_fs
2491 ceph osd pool create fs_data 10
2492 ceph osd pool create fs_metadata 10
2493 ceph fs new $FS_NAME fs_metadata fs_data
2494 wait_mds_active $FS_NAME
2495
2496
2497 ceph tell mds.a help
2498 expect_false ceph tell mds.z help
2499
2500 remove_all_fs
2501 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2502 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2503}
2504
224ce89b 2505function test_mgr_tell()
31f18b77
FG
2506{
2507 ceph tell mgr help
c07f9fc5 2508 #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
224ce89b 2509 ceph tell mgr osd status
31f18b77
FG
2510}
2511
7c673cae
FG
2512#
2513# New tests should be added to the TESTS array below
2514#
2515# Individual tests may be run using the '-t <testname>' argument
2516# The user can specify '-t <testname>' as many times as she wants
2517#
2518# Tests will be run in order presented in the TESTS array, or in
2519# the order specified by the '-t <testname>' options.
2520#
2521# '-l' will list all the available test names
2522# '-h' will show usage
2523#
2524# The test maintains backward compatibility: not specifying arguments
2525# will run all tests following the order they appear in the TESTS array.
2526#
2527
2528set +x
2529MON_TESTS+=" mon_injectargs"
2530MON_TESTS+=" mon_injectargs_SI"
31f18b77
FG
2531for i in `seq 9`; do
2532 MON_TESTS+=" tiering_$i";
2533done
7c673cae
FG
2534MON_TESTS+=" auth"
2535MON_TESTS+=" auth_profiles"
2536MON_TESTS+=" mon_misc"
2537MON_TESTS+=" mon_mon"
2538MON_TESTS+=" mon_osd"
c07f9fc5 2539MON_TESTS+=" mon_config_key"
31f18b77
FG
2540MON_TESTS+=" mon_crush"
2541MON_TESTS+=" mon_osd_create_destroy"
7c673cae
FG
2542MON_TESTS+=" mon_osd_pool"
2543MON_TESTS+=" mon_osd_pool_quota"
2544MON_TESTS+=" mon_pg"
2545MON_TESTS+=" mon_osd_pool_set"
2546MON_TESTS+=" mon_osd_tiered_pool_set"
2547MON_TESTS+=" mon_osd_erasure_code"
2548MON_TESTS+=" mon_osd_misc"
2549MON_TESTS+=" mon_heap_profiler"
2550MON_TESTS+=" mon_tell"
7c673cae
FG
2551MON_TESTS+=" mon_ping"
2552MON_TESTS+=" mon_deprecated_commands"
2553MON_TESTS+=" mon_caps"
2554MON_TESTS+=" mon_cephdf_commands"
31f18b77 2555MON_TESTS+=" mon_tell_help_command"
c07f9fc5 2556MON_TESTS+=" mon_stdin_stdout"
31f18b77 2557
7c673cae
FG
2558OSD_TESTS+=" osd_bench"
2559OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2560OSD_TESTS+=" tiering_agent"
2561OSD_TESTS+=" admin_heap_profiler"
31f18b77 2562OSD_TESTS+=" osd_tell_help_command"
224ce89b 2563OSD_TESTS+=" osd_compact"
7c673cae
FG
2564
2565MDS_TESTS+=" mds_tell"
2566MDS_TESTS+=" mon_mds"
2567MDS_TESTS+=" mon_mds_metadata"
31f18b77
FG
2568MDS_TESTS+=" mds_tell_help_command"
2569
224ce89b 2570MGR_TESTS+=" mgr_tell"
7c673cae
FG
2571
2572TESTS+=$MON_TESTS
2573TESTS+=$OSD_TESTS
2574TESTS+=$MDS_TESTS
31f18b77 2575TESTS+=$MGR_TESTS
7c673cae
FG
2576
2577#
2578# "main" follows
2579#
2580
2581function list_tests()
2582{
2583 echo "AVAILABLE TESTS"
2584 for i in $TESTS; do
2585 echo " $i"
2586 done
2587}
2588
2589function usage()
2590{
2591 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2592}
2593
2594tests_to_run=()
2595
2596sanity_check=true
2597
2598while [[ $# -gt 0 ]]; do
2599 opt=$1
2600
2601 case "$opt" in
2602 "-l" )
2603 do_list=1
2604 ;;
2605 "--asok-does-not-need-root" )
2606 SUDO=""
2607 ;;
2608 "--no-sanity-check" )
2609 sanity_check=false
2610 ;;
2611 "--test-mon" )
2612 tests_to_run+="$MON_TESTS"
2613 ;;
2614 "--test-osd" )
2615 tests_to_run+="$OSD_TESTS"
2616 ;;
2617 "--test-mds" )
2618 tests_to_run+="$MDS_TESTS"
2619 ;;
31f18b77
FG
2620 "--test-mgr" )
2621 tests_to_run+="$MGR_TESTS"
2622 ;;
7c673cae
FG
2623 "-t" )
2624 shift
2625 if [[ -z "$1" ]]; then
2626 echo "missing argument to '-t'"
2627 usage ;
2628 exit 1
2629 fi
2630 tests_to_run+=" $1"
2631 ;;
2632 "-h" )
2633 usage ;
2634 exit 0
2635 ;;
2636 esac
2637 shift
2638done
2639
2640if [[ $do_list -eq 1 ]]; then
2641 list_tests ;
2642 exit 0
2643fi
2644
224ce89b
WB
2645ceph osd pool create rbd 10
2646
7c673cae
FG
2647if test -z "$tests_to_run" ; then
2648 tests_to_run="$TESTS"
2649fi
2650
2651if $sanity_check ; then
2652 wait_no_osd_down
2653fi
2654for i in $tests_to_run; do
2655 if $sanity_check ; then
2656 check_no_osd_down
2657 fi
2658 set -x
2659 test_${i}
2660 set +x
2661done
2662if $sanity_check ; then
2663 check_no_osd_down
2664fi
2665
2666set -x
2667
2668echo OK