2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
5 source $
(dirname $0)/..
/ceph-helpers.sh
9 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
13 function get_admin_socket
()
17 if test -n "$CEPH_OUT_DIR";
19 echo $CEPH_OUT_DIR/$client.asok
21 local cluster
=$
(echo $CEPH_ARGS |
sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
26 function check_no_osd_down
()
28 ! ceph osd dump |
grep ' down '
31 function wait_no_osd_down
()
34 for i
in $
(seq 1 $max_run) ; do
35 if ! check_no_osd_down
; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
45 function expect_false
()
48 if "$@"; then return 1; else return 0; fi
52 TEMP_DIR
=$
(mktemp
-d ${TMPDIR-/tmp}/cephtool.XXX
)
53 trap "rm -fr $TEMP_DIR" 0
55 TMPFILE
=$
(mktemp
$TEMP_DIR/test_invalid.XXX
)
58 # retry_eagain max cmd args ...
60 # retry cmd args ... if it exits on error and its output contains the
61 # string EAGAIN, at most $max times
63 function retry_eagain
()
68 local tmpfile
=$TEMP_DIR/retry_eagain.$$
70 for count
in $
(seq 1 $max) ; do
72 "$@" > $tmpfile 2>&1 || status
=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN
$tmpfile ; then
79 if test $count = $max ; then
80 echo retried with non zero
exit status
, $max times: "$@" >&2
88 # map_enxio_to_eagain cmd arg ...
90 # add EAGAIN to the output of cmd arg ... if the output contains
93 function map_enxio_to_eagain
()
96 local tmpfile
=$TEMP_DIR/map_enxio_to_eagain.$$
98 "$@" > $tmpfile 2>&1 || status
=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO
$tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
108 function check_response
()
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
125 function get_config_value_or_die
()
127 local target config_opt raw val
132 raw
="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $?
-ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
138 raw
=`echo $raw | sed -e 's/[{} "]//g'`
139 val
=`echo $raw | cut -f2 -d:`
145 function expect_config_value
()
147 local target config_opt expected_val val
152 val
=$
(get_config_value_or_die
$target $config_opt)
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
160 function ceph_watch_start
()
162 local whatch_opt
=--watch
165 whatch_opt
=--watch-$1
168 CEPH_WATCH_FILE
=${TEMP_DIR}/CEPH_WATCH_$$
169 ceph
$whatch_opt > $CEPH_WATCH_FILE &
172 # wait until the "ceph" client is connected and receiving
173 # log messages from monitor
175 grep -q "cluster" $CEPH_WATCH_FILE && break
180 function ceph_watch_wait
()
189 for i
in `seq ${timeout}`; do
190 grep -q "$regexp" $CEPH_WATCH_FILE && break
196 if ! grep "$regexp" $CEPH_WATCH_FILE; then
197 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
198 cat $CEPH_WATCH_FILE >&2
203 function test_mon_injectargs
()
205 CEPH_ARGS
='--mon_debug_dump_location the.dump' ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
206 check_response
"osd_enable_op_tracker = 'false'"
207 ! grep "the.dump" $TMPFILE ||
return 1
208 ceph tell osd
.0 injectargs
'--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE ||
return 1
209 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
210 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
211 check_response
"osd_enable_op_tracker = 'false'"
212 ceph tell osd
.0 injectargs
-- --osd_enable_op_tracker >& $TMPFILE ||
return 1
213 check_response
"osd_enable_op_tracker = 'true'"
214 ceph tell osd
.0 injectargs
-- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE ||
return 1
215 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
216 expect_failure
$TEMP_DIR "Option --osd_op_history_duration requires an argument" \
217 ceph tell osd
.0 injectargs
-- '--osd_op_history_duration'
219 ceph tell osd
.0 injectargs
-- '--osd_deep_scrub_interval 2419200' >& $TMPFILE ||
return 1
220 check_response
"osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
222 ceph tell osd
.0 injectargs
-- '--mon_probe_timeout 2' >& $TMPFILE ||
return 1
223 check_response
"mon_probe_timeout = '2.000000' (not observed, change may require restart)"
225 ceph tell osd
.0 injectargs
-- '--mon-lease 6' >& $TMPFILE ||
return 1
226 check_response
"mon_lease = '6.000000' (not observed, change may require restart)"
228 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
229 expect_false ceph tell osd
.0 injectargs
--osd-scrub-auto-repair-num-errors -1 >& $TMPFILE ||
return 1
230 check_response
"Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
233 function test_mon_injectargs_SI
()
235 # Test SI units during injectargs and 'config set'
236 # We only aim at testing the units are parsed accordingly
237 # and don't intend to test whether the options being set
238 # actually expect SI units to be passed.
239 # Keep in mind that all integer based options (i.e., INT,
240 # LONG, U32, U64) will accept SI unit modifiers.
241 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_pg_warn_min_objects")
242 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10
243 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
244 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10K
245 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10240
246 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
1G
247 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1073741824
248 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10F
> $TMPFILE || true
249 check_response
"'10F': (22) Invalid argument"
250 # now test with injectargs
251 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10'
252 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
253 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10K'
254 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10240
255 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 1G'
256 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1073741824
257 expect_false ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10F'
258 expect_false ceph tell mon.a injectargs
'--mon_globalid_prealloc -1'
259 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
$initial_value
262 function test_tiering_agent
()
264 local slow
=slow_eviction
265 local fast
=fast_eviction
266 ceph osd pool create
$slow 1 1
267 ceph osd pool create
$fast 1 1
268 ceph osd tier add
$slow $fast
269 ceph osd tier cache-mode
$fast writeback
270 ceph osd tier set-overlay
$slow $fast
271 ceph osd pool
set $fast hit_set_type bloom
272 rados
-p $slow put obj1
/etc
/group
273 ceph osd pool
set $fast target_max_objects
1
274 ceph osd pool
set $fast hit_set_count
1
275 ceph osd pool
set $fast hit_set_period
5
276 # wait for the object to be evicted from the cache
279 for i
in `seq 1 300` ; do
280 if ! rados
-p $fast ls |
grep obj1
; then
287 # the object is proxy read and promoted to the cache
288 rados
-p $slow get obj1
- >/dev
/null
289 # wait for the promoted object to be evicted again
291 for i
in `seq 1 300` ; do
292 if ! rados
-p $fast ls |
grep obj1
; then
299 ceph osd tier remove-overlay
$slow
300 ceph osd tier remove
$slow $fast
301 ceph osd pool delete
$fast $fast --yes-i-really-really-mean-it
302 ceph osd pool delete
$slow $slow --yes-i-really-really-mean-it
305 function test_tiering_1
()
308 ceph osd pool create slow
2
309 ceph osd pool create slow2
2
310 ceph osd pool create cache
2
311 ceph osd pool create cache2
2
312 ceph osd tier add slow cache
313 ceph osd tier add slow cache2
314 expect_false ceph osd tier add slow2 cache
315 # test some state transitions
316 ceph osd tier cache-mode cache writeback
317 expect_false ceph osd tier cache-mode cache forward
318 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
319 expect_false ceph osd tier cache-mode cache
readonly
320 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
321 expect_false ceph osd tier cache-mode cache forward
322 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
323 ceph osd tier cache-mode cache none
324 ceph osd tier cache-mode cache writeback
325 ceph osd tier cache-mode cache proxy
326 ceph osd tier cache-mode cache writeback
327 expect_false ceph osd tier cache-mode cache none
328 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
329 # test with dirty objects in the tier pool
330 # tier pool currently set to 'writeback'
331 rados
-p cache put
/etc
/passwd
/etc
/passwd
333 # 1 dirty object in pool 'cache'
334 ceph osd tier cache-mode cache proxy
335 expect_false ceph osd tier cache-mode cache none
336 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
337 ceph osd tier cache-mode cache writeback
338 # remove object from tier pool
339 rados
-p cache
rm /etc
/passwd
340 rados
-p cache cache-flush-evict-all
342 # no dirty objects in pool 'cache'
343 ceph osd tier cache-mode cache proxy
344 ceph osd tier cache-mode cache none
345 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
347 while ! ceph osd pool
set cache pg_num
3 --yes-i-really-mean-it 2>$TMPFILE
349 grep 'currently creating pgs' $TMPFILE
350 TRIES
=$
(( $TRIES + 1 ))
354 expect_false ceph osd pool
set cache pg_num
4
355 ceph osd tier cache-mode cache none
356 ceph osd tier set-overlay slow cache
357 expect_false ceph osd tier set-overlay slow cache2
358 expect_false ceph osd tier remove slow cache
359 ceph osd tier remove-overlay slow
360 ceph osd tier set-overlay slow cache2
361 ceph osd tier remove-overlay slow
362 ceph osd tier remove slow cache
363 ceph osd tier add slow2 cache
364 expect_false ceph osd tier set-overlay slow cache
365 ceph osd tier set-overlay slow2 cache
366 ceph osd tier remove-overlay slow2
367 ceph osd tier remove slow2 cache
368 ceph osd tier remove slow cache2
370 # make sure a non-empty pool fails
371 rados
-p cache2 put
/etc
/passwd
/etc
/passwd
372 while ! ceph df |
grep cache2 |
grep ' 1 ' ; do
373 echo waiting
for pg stats to flush
376 expect_false ceph osd tier add slow cache2
377 ceph osd tier add slow cache2
--force-nonempty
378 ceph osd tier remove slow cache2
380 ceph osd pool
ls |
grep cache2
381 ceph osd pool
ls -f json-pretty |
grep cache2
382 ceph osd pool
ls detail |
grep cache2
383 ceph osd pool
ls detail
-f json-pretty |
grep cache2
385 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
386 ceph osd pool delete slow2 slow2
--yes-i-really-really-mean-it
387 ceph osd pool delete cache cache
--yes-i-really-really-mean-it
388 ceph osd pool delete cache2 cache2
--yes-i-really-really-mean-it
391 function test_tiering_2
()
393 # make sure we can't clobber snapshot state
394 ceph osd pool create snap_base
2
395 ceph osd pool create snap_cache
2
396 ceph osd pool mksnap snap_cache snapname
397 expect_false ceph osd tier add snap_base snap_cache
398 ceph osd pool delete snap_base snap_base
--yes-i-really-really-mean-it
399 ceph osd pool delete snap_cache snap_cache
--yes-i-really-really-mean-it
402 function test_tiering_3
()
404 # make sure we can't create snapshot on tier
405 ceph osd pool create basex
2
406 ceph osd pool create cachex
2
407 ceph osd tier add basex cachex
408 expect_false ceph osd pool mksnap cache snapname
409 ceph osd tier remove basex cachex
410 ceph osd pool delete basex basex
--yes-i-really-really-mean-it
411 ceph osd pool delete cachex cachex
--yes-i-really-really-mean-it
414 function test_tiering_4
()
416 # make sure we can't create an ec pool tier
417 ceph osd pool create eccache
2 2 erasure
418 expect_false ceph osd set-require-min-compat-client bobtail
419 ceph osd pool create repbase
2
420 expect_false ceph osd tier add repbase eccache
421 ceph osd pool delete repbase repbase
--yes-i-really-really-mean-it
422 ceph osd pool delete eccache eccache
--yes-i-really-really-mean-it
425 function test_tiering_5
()
427 # convenient add-cache command
428 ceph osd pool create slow
2
429 ceph osd pool create cache3
2
430 ceph osd tier add-cache slow cache3
1024000
431 ceph osd dump |
grep cache3 |
grep bloom |
grep 'false_positive_probability: 0.05' |
grep 'target_bytes 1024000' |
grep '1200s x4'
432 ceph osd tier remove slow cache3
2> $TMPFILE || true
433 check_response
"EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
434 ceph osd tier remove-overlay slow
435 ceph osd tier remove slow cache3
436 ceph osd pool
ls |
grep cache3
437 ceph osd pool delete cache3 cache3
--yes-i-really-really-mean-it
438 ! ceph osd pool
ls |
grep cache3 ||
exit 1
439 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
442 function test_tiering_6
()
444 # check add-cache whether work
445 ceph osd pool create datapool
2
446 ceph osd pool create cachepool
2
447 ceph osd tier add-cache datapool cachepool
1024000
448 ceph osd tier cache-mode cachepool writeback
449 rados
-p datapool put object
/etc
/passwd
450 rados
-p cachepool stat object
451 rados
-p cachepool cache-flush object
452 rados
-p datapool stat object
453 ceph osd tier remove-overlay datapool
454 ceph osd tier remove datapool cachepool
455 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
456 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
459 function test_tiering_7
()
461 # protection against pool removal when used as tiers
462 ceph osd pool create datapool
2
463 ceph osd pool create cachepool
2
464 ceph osd tier add-cache datapool cachepool
1024000
465 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it 2> $TMPFILE || true
466 check_response
"EBUSY: pool 'cachepool' is a tier of 'datapool'"
467 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it 2> $TMPFILE || true
468 check_response
"EBUSY: pool 'datapool' has tiers cachepool"
469 ceph osd tier remove-overlay datapool
470 ceph osd tier remove datapool cachepool
471 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
472 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
475 function test_tiering_8
()
477 ## check health check
478 ceph osd
set notieragent
479 ceph osd pool create datapool
2
480 ceph osd pool create cache4
2
481 ceph osd tier add-cache datapool cache4
1024000
482 ceph osd tier cache-mode cache4 writeback
483 tmpfile
=$
(mktemp|
grep tmp
)
484 dd if=/dev
/zero of
=$tmpfile bs
=4K count
=1
485 ceph osd pool
set cache4 target_max_objects
200
486 ceph osd pool
set cache4 target_max_bytes
1000000
487 rados
-p cache4 put foo1
$tmpfile
488 rados
-p cache4 put foo2
$tmpfile
491 ceph df |
grep datapool |
grep ' 2 '
492 ceph osd tier remove-overlay datapool
493 ceph osd tier remove datapool cache4
494 ceph osd pool delete cache4 cache4
--yes-i-really-really-mean-it
495 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
496 ceph osd
unset notieragent
499 function test_tiering_9
()
501 # make sure 'tier remove' behaves as we expect
502 # i.e., removing a tier from a pool that's not its base pool only
503 # results in a 'pool foo is now (or already was) not a tier of bar'
505 ceph osd pool create basepoolA
2
506 ceph osd pool create basepoolB
2
507 poolA_id
=$
(ceph osd dump |
grep 'pool.*basepoolA' |
awk '{print $2;}')
508 poolB_id
=$
(ceph osd dump |
grep 'pool.*basepoolB' |
awk '{print $2;}')
510 ceph osd pool create cache5
2
511 ceph osd pool create cache6
2
512 ceph osd tier add basepoolA cache5
513 ceph osd tier add basepoolB cache6
514 ceph osd tier remove basepoolB cache5
2>&1 |
grep 'not a tier of'
515 ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of[ \t]\+$poolA_id"
516 ceph osd tier remove basepoolA cache6
2>&1 |
grep 'not a tier of'
517 ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of[ \t]\+$poolB_id"
519 ceph osd tier remove basepoolA cache5
2>&1 |
grep 'not a tier of'
520 ! ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of" ||
exit 1
521 ceph osd tier remove basepoolB cache6
2>&1 |
grep 'not a tier of'
522 ! ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of" ||
exit 1
524 ! ceph osd dump |
grep "pool.*'basepoolA'" 2>&1 |
grep "tiers" ||
exit 1
525 ! ceph osd dump |
grep "pool.*'basepoolB'" 2>&1 |
grep "tiers" ||
exit 1
527 ceph osd pool delete cache6 cache6
--yes-i-really-really-mean-it
528 ceph osd pool delete cache5 cache5
--yes-i-really-really-mean-it
529 ceph osd pool delete basepoolB basepoolB
--yes-i-really-really-mean-it
530 ceph osd pool delete basepoolA basepoolA
--yes-i-really-really-mean-it
535 ceph auth add client.xx mon allow osd
"allow *"
536 ceph auth
export client.xx
>client.xx.keyring
537 ceph auth add client.xx
-i client.xx.keyring
538 rm -f client.xx.keyring
539 ceph auth list |
grep client.xx
540 ceph auth get client.xx |
grep caps |
grep mon
541 ceph auth get client.xx |
grep caps |
grep osd
542 ceph auth get-key client.xx
543 ceph auth print-key client.xx
544 ceph auth print_key client.xx
545 ceph auth caps client.xx osd
"allow rw"
546 expect_false sh
<<< "ceph auth get client.xx | grep caps | grep mon"
547 ceph auth get client.xx |
grep osd |
grep "allow rw"
548 ceph auth
export |
grep client.xx
549 ceph auth
export -o authfile
550 ceph auth import
-i authfile
551 ceph auth
export -o authfile2
552 diff authfile authfile2
553 rm authfile authfile2
554 ceph auth del client.xx
555 expect_false ceph auth get client.xx
557 # (almost) interactive mode
558 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
559 ceph auth get client.xx
561 echo 'auth del client.xx' | ceph
562 expect_false ceph auth get client.xx
568 ceph-authtool
--create-keyring --name client.TEST
--gen-key --set-uid $auid TEST-keyring
569 expect_false ceph auth import
--in-file TEST-keyring
571 ceph-authtool
--create-keyring --name client.TEST
--gen-key --cap mon
"allow r" --set-uid $auid TEST-keyring
572 ceph auth import
--in-file TEST-keyring
574 ceph auth get client.TEST
> $TMPFILE
575 check_response
"auid = $auid"
576 ceph
--format json-pretty auth get client.TEST
> $TMPFILE
577 check_response
'"auid": '$auid
578 ceph auth list
> $TMPFILE
579 check_response
"auid: $auid"
580 ceph
--format json-pretty auth list
> $TMPFILE
581 check_response
'"auid": '$auid
582 ceph auth del client.TEST
585 function test_auth_profiles
()
587 ceph auth add client.xx-profile-ro mon
'allow profile read-only' \
588 mgr
'allow profile read-only'
589 ceph auth add client.xx-profile-rw mon
'allow profile read-write' \
590 mgr
'allow profile read-write'
591 ceph auth add client.xx-profile-rd mon
'allow profile role-definer'
593 ceph auth
export > client.xx.keyring
595 # read-only is allowed all read-only commands (auth excluded)
596 ceph
-n client.xx-profile-ro
-k client.xx.keyring status
597 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd dump
598 ceph
-n client.xx-profile-ro
-k client.xx.keyring pg dump
599 ceph
-n client.xx-profile-ro
-k client.xx.keyring mon dump
600 ceph
-n client.xx-profile-ro
-k client.xx.keyring mds dump
601 # read-only gets access denied for rw commands or auth commands
602 ceph
-n client.xx-profile-ro
-k client.xx.keyring log foo
>& $TMPFILE || true
603 check_response
"EACCES: access denied"
604 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
605 check_response
"EACCES: access denied"
606 ceph
-n client.xx-profile-ro
-k client.xx.keyring auth list
>& $TMPFILE || true
607 check_response
"EACCES: access denied"
609 # read-write is allowed for all read-write commands (except auth)
610 ceph
-n client.xx-profile-rw
-k client.xx.keyring status
611 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd dump
612 ceph
-n client.xx-profile-rw
-k client.xx.keyring pg dump
613 ceph
-n client.xx-profile-rw
-k client.xx.keyring mon dump
614 ceph
-n client.xx-profile-rw
-k client.xx.keyring mds dump
615 ceph
-n client.xx-profile-rw
-k client.xx.keyring log foo
616 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
set noout
617 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
unset noout
618 # read-write gets access denied for auth commands
619 ceph
-n client.xx-profile-rw
-k client.xx.keyring auth list
>& $TMPFILE || true
620 check_response
"EACCES: access denied"
622 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
623 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth list
624 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
export
625 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth add client.xx-profile-foo
626 ceph
-n client.xx-profile-rd
-k client.xx.keyring status
627 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd dump
>& $TMPFILE || true
628 check_response
"EACCES: access denied"
629 ceph
-n client.xx-profile-rd
-k client.xx.keyring pg dump
>& $TMPFILE || true
630 check_response
"EACCES: access denied"
631 # read-only 'mon' subsystem commands are allowed
632 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon dump
633 # but read-write 'mon' commands are not
634 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon add foo
1.1.1.1 >& $TMPFILE || true
635 check_response
"EACCES: access denied"
636 ceph
-n client.xx-profile-rd
-k client.xx.keyring mds dump
>& $TMPFILE || true
637 check_response
"EACCES: access denied"
638 ceph
-n client.xx-profile-rd
-k client.xx.keyring log foo
>& $TMPFILE || true
639 check_response
"EACCES: access denied"
640 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
641 check_response
"EACCES: access denied"
643 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-ro
644 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-rw
646 # add a new role-definer with the existing role-definer
647 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
648 auth add client.xx-profile-rd2 mon
'allow profile role-definer'
649 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
650 auth
export > client.xx.keyring
.2
651 # remove old role-definer using the new role-definer
652 ceph
-n client.xx-profile-rd2
-k client.xx.keyring
.2 \
653 auth del client.xx-profile-rd
654 # remove the remaining role-definer with admin
655 ceph auth del client.xx-profile-rd2
656 rm -f client.xx.keyring client.xx.keyring
.2
659 function test_mon_caps
()
661 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
662 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
663 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
664 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
666 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
667 check_response
"Permission denied"
669 rm -rf $TEMP_DIR/ceph.client.bug.keyring
670 ceph auth del client.bug
671 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
672 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
673 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
674 ceph-authtool
-n client.bug
--cap mon
'' $TEMP_DIR/ceph.client.bug.keyring
675 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
676 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
677 check_response
"Permission denied"
680 function test_mon_misc
()
682 # with and without verbosity
683 ceph osd dump |
grep '^epoch'
684 ceph
--concise osd dump |
grep '^epoch'
686 ceph osd df |
grep 'MIN/MAX VAR'
691 grep -v DIRTY
$TMPFILE
692 ceph df detail
> $TMPFILE
694 ceph df
--format json
> $TMPFILE
695 grep 'total_bytes' $TMPFILE
696 grep -v 'dirty' $TMPFILE
697 ceph df detail
--format json
> $TMPFILE
698 grep 'rd_bytes' $TMPFILE
699 grep 'dirty' $TMPFILE
700 ceph df
--format xml |
grep '<total_bytes>'
701 ceph df detail
--format xml |
grep '<rd_bytes>'
706 ceph health
--format json-pretty
707 ceph health detail
--format xml-pretty
709 ceph time-sync-status
712 for t
in mon osd mds
; do
717 mymsg
="this is a test log message $$.$(date)"
719 ceph log last |
grep "$mymsg"
720 ceph log last
100 |
grep "$mymsg"
721 ceph_watch_wait
"$mymsg"
725 ceph mgr module
enable restful
726 expect_false ceph mgr module
enable foodne
727 ceph mgr module
enable foodne
--force
728 ceph mgr module disable foodne
729 ceph mgr module disable foodnebizbangbash
733 ceph mon count-metadata ceph_version
739 function check_mds_active
()
742 ceph fs get
$fs_name |
grep active
745 function wait_mds_active
()
749 for i
in $
(seq 1 $max_run) ; do
750 if ! check_mds_active
$fs_name ; then
751 echo "waiting for an active MDS daemon ($i/$max_run)"
757 check_mds_active
$fs_name
760 function get_mds_gids
()
763 ceph fs get
$fs_name --format=json | python
-c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
766 function fail_all_mds
()
769 ceph fs
set $fs_name cluster_down true
770 mds_gids
=$
(get_mds_gids
$fs_name)
771 for mds_gid
in $mds_gids ; do
772 ceph mds fail
$mds_gid
774 if check_mds_active
$fs_name ; then
775 echo "An active MDS remains, something went wrong"
782 function remove_all_fs
()
784 existing_fs
=$
(ceph fs
ls --format=json | python
-c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
785 for fs_name
in $existing_fs ; do
786 echo "Removing fs ${fs_name}..."
787 fail_all_mds
$fs_name
788 echo "Removing existing filesystem '${fs_name}'..."
789 ceph fs
rm $fs_name --yes-i-really-mean-it
790 echo "Removed '${fs_name}'."
794 # So that tests requiring MDS can skip if one is not configured
795 # in the cluster at all
796 function mds_exists
()
798 ceph auth list |
grep "^mds"
801 # some of the commands are just not idempotent.
802 function without_test_dup_command
()
804 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
807 local saved
=${CEPH_CLI_TEST_DUP_COMMAND}
808 unset CEPH_CLI_TEST_DUP_COMMAND
810 CEPH_CLI_TEST_DUP_COMMAND
=saved
814 function test_mds_tell
()
817 if ! mds_exists
; then
818 echo "Skipping test, no MDS found"
823 ceph osd pool create fs_data
10
824 ceph osd pool create fs_metadata
10
825 ceph fs new
$FS_NAME fs_metadata fs_data
826 wait_mds_active
$FS_NAME
828 # Test injectargs by GID
829 old_mds_gids
=$
(get_mds_gids
$FS_NAME)
830 echo Old GIDs
: $old_mds_gids
832 for mds_gid
in $old_mds_gids ; do
833 ceph tell mds.
$mds_gid injectargs
"--debug-mds 20"
835 expect_false ceph tell mds.a injectargs mds_max_file_recover
-1
837 # Test respawn by rank
838 without_test_dup_command ceph tell mds
.0 respawn
839 new_mds_gids
=$old_mds_gids
840 while [ $new_mds_gids -eq $old_mds_gids ] ; do
842 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
844 echo New GIDs
: $new_mds_gids
847 without_test_dup_command ceph tell mds.a respawn
848 new_mds_gids
=$old_mds_gids
849 while [ $new_mds_gids -eq $old_mds_gids ] ; do
851 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
853 echo New GIDs
: $new_mds_gids
856 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
857 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
860 function test_mon_mds
()
865 ceph osd pool create fs_data
10
866 ceph osd pool create fs_metadata
10
867 ceph fs new
$FS_NAME fs_metadata fs_data
869 ceph fs
set $FS_NAME cluster_down true
870 ceph fs
set $FS_NAME cluster_down false
872 # Legacy commands, act on default fs
873 ceph mds cluster_down
876 ceph mds compat rm_incompat
4
877 ceph mds compat rm_incompat
4
879 # We don't want any MDSs to be up, their activity can interfere with
880 # the "current_epoch + 1" checking below if they're generating updates
881 fail_all_mds
$FS_NAME
884 expect_false ceph mds deactivate
2
888 for mds_gid
in $
(get_mds_gids
$FS_NAME) ; do
889 ceph mds metadata
$mds_id
893 ceph mds count-metadata os
895 # XXX mds fail, but how do you undo it?
896 mdsmapfile
=$TEMP_DIR/mdsmap.$$
897 current_epoch
=$
(ceph mds getmap
-o $mdsmapfile --no-log-to-stderr 2>&1 |
grep epoch |
sed 's/.*epoch //')
901 ceph osd pool create data2
10
902 ceph osd pool create data3
10
903 data2_pool
=$
(ceph osd dump |
grep "pool.*'data2'" |
awk '{print $2;}')
904 data3_pool
=$
(ceph osd dump |
grep "pool.*'data3'" |
awk '{print $2;}')
905 ceph mds add_data_pool
$data2_pool
906 ceph mds add_data_pool
$data3_pool
907 ceph mds add_data_pool
100 >& $TMPFILE || true
908 check_response
"Error ENOENT"
909 ceph mds add_data_pool foobarbaz
>& $TMPFILE || true
910 check_response
"Error ENOENT"
911 ceph mds remove_data_pool
$data2_pool
912 ceph mds remove_data_pool
$data3_pool
913 ceph osd pool delete data2 data2
--yes-i-really-really-mean-it
914 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
915 ceph mds
set allow_multimds false
916 expect_false ceph mds set_max_mds
4
917 ceph mds
set allow_multimds true
918 ceph mds set_max_mds
4
919 ceph mds set_max_mds
3
920 ceph mds set_max_mds
256
921 expect_false ceph mds set_max_mds
257
922 ceph mds
set max_mds
4
923 ceph mds
set max_mds
256
924 expect_false ceph mds
set max_mds
257
925 expect_false ceph mds
set max_mds asdf
926 expect_false ceph mds
set inline_data true
927 ceph mds
set inline_data true
--yes-i-really-mean-it
928 ceph mds
set inline_data
yes --yes-i-really-mean-it
929 ceph mds
set inline_data
1 --yes-i-really-mean-it
930 expect_false ceph mds
set inline_data
--yes-i-really-mean-it
931 ceph mds
set inline_data false
932 ceph mds
set inline_data no
933 ceph mds
set inline_data
0
934 expect_false ceph mds
set inline_data asdf
935 ceph mds
set max_file_size
1048576
936 expect_false ceph mds
set max_file_size
123asdf
938 expect_false ceph mds
set allow_new_snaps
939 expect_false ceph mds
set allow_new_snaps true
940 ceph mds
set allow_new_snaps true
--yes-i-really-mean-it
941 ceph mds
set allow_new_snaps
0
942 ceph mds
set allow_new_snaps false
943 ceph mds
set allow_new_snaps no
944 expect_false ceph mds
set allow_new_snaps taco
946 # we should never be able to add EC pools as data or metadata pools
947 # create an ec-pool...
948 ceph osd pool create mds-ec-pool
10 10 erasure
950 ceph mds add_data_pool mds-ec-pool
2>$TMPFILE
951 check_response
'erasure-code' $?
22
953 ec_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-ec-pool" |
awk '{print $2;}')
954 data_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_data" |
awk '{print $2;}')
955 metadata_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_metadata" |
awk '{print $2;}')
957 fail_all_mds
$FS_NAME
960 # Check that rmfailed requires confirmation
961 expect_false ceph mds rmfailed
0
962 ceph mds rmfailed
0 --yes-i-really-mean-it
965 # Check that `newfs` is no longer permitted
966 expect_false ceph mds newfs
$metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
968 # Check that 'fs reset' runs
969 ceph fs
reset $FS_NAME --yes-i-really-mean-it
971 # Check that creating a second FS fails by default
972 ceph osd pool create fs_metadata2
10
973 ceph osd pool create fs_data2
10
975 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
978 # Check that setting enable_multiple enables creation of second fs
979 ceph fs flag
set enable_multiple true
--yes-i-really-mean-it
980 ceph fs new cephfs2 fs_metadata2 fs_data2
982 # Clean up multi-fs stuff
984 ceph fs
rm cephfs2
--yes-i-really-mean-it
985 ceph osd pool delete fs_metadata2 fs_metadata2
--yes-i-really-really-mean-it
986 ceph osd pool delete fs_data2 fs_data2
--yes-i-really-really-mean-it
988 fail_all_mds
$FS_NAME
990 # Clean up to enable subsequent fs new tests
991 ceph fs
rm $FS_NAME --yes-i-really-mean-it
994 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
995 check_response
'erasure-code' $?
22
996 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
997 check_response
'erasure-code' $?
22
998 ceph fs new
$FS_NAME mds-ec-pool mds-ec-pool
2>$TMPFILE
999 check_response
'erasure-code' $?
22
1002 # ... new create a cache tier in front of the EC pool...
1003 ceph osd pool create mds-tier
2
1004 ceph osd tier add mds-ec-pool mds-tier
1005 ceph osd tier set-overlay mds-ec-pool mds-tier
1006 tier_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-tier" |
awk '{print $2;}')
1008 # Use of a readonly tier should be forbidden
1009 ceph osd tier cache-mode mds-tier
readonly --yes-i-really-mean-it
1011 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1012 check_response
'has a write tier (mds-tier) that is configured to forward' $?
22
1015 # Use of a writeback tier should enable FS creation
1016 ceph osd tier cache-mode mds-tier writeback
1017 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force
1019 # While a FS exists using the tiered pools, I should not be allowed
1020 # to remove the tier
1022 ceph osd tier remove-overlay mds-ec-pool
2>$TMPFILE
1023 check_response
'in use by CephFS' $?
16
1024 ceph osd tier remove mds-ec-pool mds-tier
2>$TMPFILE
1025 check_response
'in use by CephFS' $?
16
1028 fail_all_mds
$FS_NAME
1029 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1031 # ... but we should be forbidden from using the cache pool in the FS directly.
1033 ceph fs new
$FS_NAME fs_metadata mds-tier
--force 2>$TMPFILE
1034 check_response
'in use as a cache tier' $?
22
1035 ceph fs new
$FS_NAME mds-tier fs_data
2>$TMPFILE
1036 check_response
'in use as a cache tier' $?
22
1037 ceph fs new
$FS_NAME mds-tier mds-tier
2>$TMPFILE
1038 check_response
'in use as a cache tier' $?
22
1041 # Clean up tier + EC pools
1042 ceph osd tier remove-overlay mds-ec-pool
1043 ceph osd tier remove mds-ec-pool mds-tier
1045 # Create a FS using the 'cache' pool now that it's no longer a tier
1046 ceph fs new
$FS_NAME fs_metadata mds-tier
--force
1048 # We should be forbidden from using this pool as a tier now that
1049 # it's in use for CephFS
1051 ceph osd tier add mds-ec-pool mds-tier
2>$TMPFILE
1052 check_response
'in use by CephFS' $?
16
1055 fail_all_mds
$FS_NAME
1056 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1058 # We should be permitted to use an EC pool with overwrites enabled
1059 # as the data pool...
1060 ceph osd pool
set mds-ec-pool allow_ec_overwrites true
1061 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1062 fail_all_mds
$FS_NAME
1063 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1065 # ...but not as the metadata pool
1067 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1068 check_response
'erasure-code' $?
22
1071 ceph osd pool delete mds-ec-pool mds-ec-pool
--yes-i-really-really-mean-it
1073 # Create a FS and check that we can subsequently add a cache tier to it
1074 ceph fs new
$FS_NAME fs_metadata fs_data
--force
1076 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1077 ceph osd tier add fs_metadata mds-tier
1078 ceph osd tier cache-mode mds-tier writeback
1079 ceph osd tier set-overlay fs_metadata mds-tier
1081 # Removing tier should be permitted because the underlying pool is
1082 # replicated (#11504 case)
1083 ceph osd tier cache-mode mds-tier proxy
1084 ceph osd tier remove-overlay fs_metadata
1085 ceph osd tier remove fs_metadata mds-tier
1086 ceph osd pool delete mds-tier mds-tier
--yes-i-really-really-mean-it
1089 fail_all_mds
$FS_NAME
1090 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1095 # ceph mds tell mds.a getmap
1098 # ceph mds set_state
1101 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
1102 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
1105 function test_mon_mds_metadata
()
1107 local nmons
=$
(ceph tell
'mon.*' version |
grep -c 'version')
1111 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1112 while read gid id rank
; do
1113 ceph mds metadata
${gid} |
grep '"hostname":'
1114 ceph mds metadata
${id} |
grep '"hostname":'
1115 ceph mds metadata
${rank} |
grep '"hostname":'
1117 local n
=$
(ceph tell
'mon.*' mds metadata
${id} |
grep -c '"hostname":')
1118 test "$n" -eq "$nmons"
1121 expect_false ceph mds metadata UNKNOWN
1124 function test_mon_mon
()
1126 # print help message
1130 ceph mon getmap
-o $TEMP_DIR/monmap.$$
1131 [ -s $TEMP_DIR/monmap.$$
]
1137 ceph mon feature
set kraken
--yes-i-really-mean-it
1138 expect_false ceph mon feature
set abcd
1139 expect_false ceph mon feature
set abcd
--yes-i-really-mean-it
1142 function gen_secrets_file
()
1144 # lets assume we can have the following types
1145 # all - generates both cephx and lockbox, with mock dm-crypt key
1146 # cephx - only cephx
1147 # no_cephx - lockbox and dm-crypt, no cephx
1148 # no_lockbox - dm-crypt and cephx, no lockbox
1149 # empty - empty file
1150 # empty_json - correct json, empty map
1151 # bad_json - bad json :)
1154 if [[ -z "$t" ]]; then
1158 fn
=$
(mktemp
$TEMP_DIR/secret.XXXXXX
)
1160 if [[ "$t" == "empty" ]]; then
1165 if [[ "$t" == "bad_json" ]]; then
1166 echo "asd: ; }" >> $fn
1168 elif [[ "$t" == "empty_json" ]]; then
1173 cephx_secret
="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1174 lb_secret
="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1175 dmcrypt_key
="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1177 if [[ "$t" == "all" ]]; then
1178 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1179 elif [[ "$t" == "cephx" ]]; then
1180 echo "$cephx_secret" >> $fn
1181 elif [[ "$t" == "no_cephx" ]]; then
1182 echo "$lb_secret,$dmcrypt_key" >> $fn
1183 elif [[ "$t" == "no_lockbox" ]]; then
1184 echo "$cephx_secret,$dmcrypt_key" >> $fn
1186 echo "unknown gen_secrets_file() type \'$fn\'"
1193 function test_mon_osd_create_destroy
()
1195 ceph osd new
2>&1 |
grep 'EINVAL'
1196 ceph osd new
'' -1 2>&1 |
grep 'EINVAL'
1197 ceph osd new
'' 10 2>&1 |
grep 'EINVAL'
1199 old_maxosd
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1201 old_osds
=$
(ceph osd
ls)
1202 num_osds
=$
(ceph osd
ls |
wc -l)
1205 id
=$
(ceph osd new
$uuid 2>/dev
/null
)
1207 for i
in $old_osds; do
1213 id2
=`ceph osd new $uuid 2>/dev/null`
1217 ceph osd new
$uuid $id
1219 id3
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1220 ceph osd new
$uuid $
((id3
+1)) 2>&1 |
grep EEXIST
1223 id2
=$
(ceph osd new
$uuid2)
1225 [[ "$id2" != "$id" ]]
1227 ceph osd new
$uuid $id2 2>&1 |
grep EEXIST
1228 ceph osd new
$uuid2 $id2
1231 empty_secrets
=$
(gen_secrets_file
"empty")
1232 empty_json
=$
(gen_secrets_file
"empty_json")
1233 all_secrets
=$
(gen_secrets_file
"all")
1234 cephx_only
=$
(gen_secrets_file
"cephx")
1235 no_cephx
=$
(gen_secrets_file
"no_cephx")
1236 no_lockbox
=$
(gen_secrets_file
"no_lockbox")
1237 bad_json
=$
(gen_secrets_file
"bad_json")
1239 # empty secrets should be idempotent
1240 new_id
=$
(ceph osd new
$uuid $id -i $empty_secrets)
1241 [[ "$new_id" == "$id" ]]
1243 # empty json, thus empty secrets
1244 new_id
=$
(ceph osd new
$uuid $id -i $empty_json)
1245 [[ "$new_id" == "$id" ]]
1247 ceph osd new
$uuid $id -i $all_secrets 2>&1 |
grep 'EEXIST'
1251 ceph osd setmaxosd
$old_maxosd
1253 ceph osd new
$uuid -i $bad_json 2>&1 |
grep 'EINVAL'
1254 ceph osd new
$uuid -i $no_cephx 2>&1 |
grep 'EINVAL'
1255 ceph osd new
$uuid -i $no_lockbox 2>&1 |
grep 'EINVAL'
1258 id
=$
(ceph osd new
$uuid -i $all_secrets)
1265 # validate secrets and dm-crypt are set
1266 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1267 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1269 k
=$
(ceph auth get-key client.osd-lockbox.
$uuid --format=json-pretty
2>/dev
/null | \
1271 s
=$
(cat $all_secrets | jq
'.cephx_lockbox_secret')
1273 ceph config-key exists dm-crypt
/osd
/$uuid/luks
1276 id2
=$
(ceph osd new
$uuid2 -i $cephx_only)
1278 [[ "$i" != "$id2" ]]
1282 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1283 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1285 expect_false ceph auth get-key client.osd-lockbox.
$uuid2
1286 expect_false ceph config-key exists dm-crypt
/osd
/$uuid2/luks
1288 ceph osd destroy osd.
$id2 --yes-i-really-mean-it
1289 ceph osd destroy
$id2 --yes-i-really-mean-it
1291 expect_false ceph auth get-key osd.
$id2
1292 ceph osd dump |
grep osd.
$id2 |
grep destroyed
1296 ceph osd new
$uuid3 $id3 -i $all_secrets
1297 ceph osd dump |
grep osd.
$id3 | expect_false
grep destroyed
1298 ceph auth get-key client.osd-lockbox.
$uuid3
1299 ceph auth get-key osd.
$id3
1300 ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1302 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1303 expect_false ceph osd
find $id2
1304 expect_false ceph auth get-key osd.
$id2
1305 expect_false ceph auth get-key client.osd-lockbox.
$uuid3
1306 expect_false ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1307 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1308 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1310 ceph osd purge osd.
$id --yes-i-really-mean-it
1311 expect_false ceph osd
find $id
1312 expect_false ceph auth get-key osd.
$id
1313 expect_false ceph auth get-key client.osd-lockbox.
$uuid
1314 expect_false ceph config-key exists dm-crypt
/osd
/$uuid/luks
1316 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1317 $no_cephx $no_lockbox $bad_json
1319 for i
in $
(ceph osd
ls); do
1321 [[ "$i" != "$id2" ]]
1322 [[ "$i" != "$id3" ]]
1325 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1326 ceph osd setmaxosd
$old_maxosd
1330 function test_mon_osd
()
1335 bl
=192.168.0.1:0/1000
1336 ceph osd blacklist add
$bl
1337 ceph osd blacklist
ls |
grep $bl
1338 ceph osd blacklist
ls --format=json-pretty |
sed 's/\\\//\//' |
grep $bl
1339 ceph osd dump
--format=json-pretty |
grep $bl
1340 ceph osd dump |
grep "^blacklist $bl"
1341 ceph osd blacklist
rm $bl
1342 ceph osd blacklist
ls | expect_false
grep $bl
1345 # test without nonce, invalid nonce
1346 ceph osd blacklist add
$bl
1347 ceph osd blacklist
ls |
grep $bl
1348 ceph osd blacklist
rm $bl
1349 ceph osd blacklist
ls | expect_false
grep $expect_false bl
1350 expect_false
"ceph osd blacklist $bl/-1"
1351 expect_false
"ceph osd blacklist $bl/foo"
1353 # test with wrong address
1354 expect_false
"ceph osd blacklist 1234.56.78.90/100"
1357 ceph osd blacklist add
$bl
1358 ceph osd blacklist
ls |
grep $bl
1359 ceph osd blacklist
clear
1360 ceph osd blacklist
ls | expect_false
grep $bl
1365 ceph osd crush reweight-all
1366 ceph osd crush tunables legacy
1367 ceph osd crush show-tunables |
grep argonaut
1368 ceph osd crush tunables bobtail
1369 ceph osd crush show-tunables |
grep bobtail
1370 ceph osd crush tunables firefly
1371 ceph osd crush show-tunables |
grep firefly
1373 ceph osd crush set-tunable straw_calc_version
0
1374 ceph osd crush get-tunable straw_calc_version |
grep 0
1375 ceph osd crush set-tunable straw_calc_version
1
1376 ceph osd crush get-tunable straw_calc_version |
grep 1
1379 # require-min-compat-client
1380 expect_false ceph osd set-require-min-compat-client dumpling
# firefly tunables
1381 ceph osd set-require-min-compat-client luminous
1382 ceph osd dump |
grep 'require_min_compat_client luminous'
1387 # how do I tell when these are done?
1389 ceph osd deep-scrub
0
1392 for f
in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1397 expect_false ceph osd
unset sortbitwise
# cannot be unset
1398 expect_false ceph osd
set bogus
1399 expect_false ceph osd
unset bogus
1400 ceph osd require-osd-release luminous
1401 # can't lower (or use new command for anything but jewel)
1402 expect_false ceph osd require-osd-release jewel
1403 # these are no-ops but should succeed.
1404 ceph osd
set require_jewel_osds
1405 ceph osd
set require_kraken_osds
1406 expect_false ceph osd
unset require_jewel_osds
1410 ceph osd dump |
grep 'osd.0 down'
1413 for ((i
=0; i
< $max_run; i
++)); do
1414 if ! ceph osd dump |
grep 'osd.0 up'; then
1415 echo "waiting for osd.0 to come back up ($i/$max_run)"
1421 ceph osd dump |
grep 'osd.0 up'
1423 ceph osd dump |
grep 'osd.0 up'
1424 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1427 expect_false ceph osd
find osd.xyz
1428 expect_false ceph osd
find xyz
1429 expect_false ceph osd
find 0.1
1430 ceph
--format plain osd
find 1 # falls back to json-pretty
1431 if [ `uname` == Linux
]; then
1432 ceph osd metadata
1 |
grep 'distro'
1433 ceph
--format plain osd metadata
1 |
grep 'distro' # falls back to json-pretty
1436 ceph osd dump |
grep 'osd.0.*out'
1438 ceph osd dump |
grep 'osd.0.*in'
1441 ceph osd add-nodown
0 1
1442 ceph health detail |
grep 'NODOWN'
1443 ceph osd rm-nodown
0 1
1444 ! ceph health detail |
grep 'NODOWN'
1446 ceph osd out
0 # so we can mark it as noin later
1448 ceph health detail |
grep 'NOIN'
1450 ! ceph health detail |
grep 'NOIN'
1453 ceph osd add-noout
0
1454 ceph health detail |
grep 'NOOUT'
1456 ! ceph health detail |
grep 'NOOUT'
1459 expect_false ceph osd add-noup
797er
1460 expect_false ceph osd add-nodown u9uwer
1461 expect_false ceph osd add-noin
78~
15
1462 expect_false ceph osd add-noout
0 all
1
1464 expect_false ceph osd rm-noup
1234567
1465 expect_false ceph osd rm-nodown fsadf7
1466 expect_false ceph osd rm-noin
0 1 any
1467 expect_false ceph osd rm-noout
790-fd
1469 ids
=`ceph osd ls-tree default`
1472 ceph osd add-nodown
$osd
1473 ceph osd add-noout
$osd
1475 ceph
-s |
grep 'NODOWN'
1476 ceph
-s |
grep 'NOOUT'
1477 ceph osd rm-nodown any
1478 ceph osd rm-noout all
1479 ! ceph
-s |
grep 'NODOWN'
1480 ! ceph
-s |
grep 'NOOUT'
1482 # make sure mark out preserves weight
1483 ceph osd reweight osd
.0 .5
1484 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1487 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1489 ceph osd getmap
-o $f
1492 save
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1494 ceph osd setmaxosd $
((save
- 1)) 2>&1 |
grep 'EBUSY'
1495 ceph osd setmaxosd
10
1496 ceph osd getmaxosd |
grep 'max_osd = 10'
1497 ceph osd setmaxosd
$save
1498 ceph osd getmaxosd |
grep "max_osd = $save"
1500 for id
in `ceph osd ls` ; do
1501 retry_eagain
5 map_enxio_to_eagain ceph tell osd.
$id version
1504 ceph osd
rm 0 2>&1 |
grep 'EBUSY'
1506 local old_osds
=$
(echo $
(ceph osd
ls))
1507 id
=`ceph osd create`
1509 ceph osd lost
$id --yes-i-really-mean-it
1510 expect_false ceph osd setmaxosd
$id
1511 local new_osds
=$
(echo $
(ceph osd
ls))
1512 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1517 id
=`ceph osd create $uuid`
1518 id2
=`ceph osd create $uuid`
1525 ceph osd setmaxosd
$id
1526 ceph osd getmaxosd |
grep "max_osd = $save"
1529 ceph osd create
$uuid 0 2>&1 |
grep 'EINVAL'
1530 ceph osd create
$uuid $
((max_osd
- 1)) 2>&1 |
grep 'EINVAL'
1532 id
=`ceph osd create $uuid $max_osd`
1533 [ "$id" = "$max_osd" ]
1535 max_osd
=$
((max_osd
+ 1))
1536 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1538 ceph osd create
$uuid $
((id
- 1)) 2>&1 |
grep 'EEXIST'
1539 ceph osd create
$uuid $
((id
+ 1)) 2>&1 |
grep 'EEXIST'
1540 id2
=`ceph osd create $uuid`
1542 id2
=`ceph osd create $uuid $id`
1546 local gap_start
=$max_osd
1547 id
=`ceph osd create $uuid $((gap_start + 100))`
1548 [ "$id" = "$((gap_start + 100))" ]
1550 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1552 ceph osd create
$uuid $gap_start 2>&1 |
grep 'EEXIST'
1555 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1556 # is repeated and consumes two osd id, not just one.
1559 if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
1560 next_osd
=$
((gap_start
+ 1))
1564 id
=`ceph osd create`
1565 [ "$id" = "$next_osd" ]
1567 next_osd
=$
((id
+ 1))
1568 id
=`ceph osd create $(uuidgen)`
1569 [ "$id" = "$next_osd" ]
1571 next_osd
=$
((id
+ 1))
1572 id
=`ceph osd create $(uuidgen) $next_osd`
1573 [ "$id" = "$next_osd" ]
1575 local new_osds
=$
(echo $
(ceph osd
ls))
1576 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1580 ceph osd setmaxosd
$save
1583 ceph osd pool create data
10
1584 ceph osd lspools |
grep data
1585 ceph osd map data foo |
grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1586 ceph osd map data foo namespace|
grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1587 ceph osd pool delete data data
--yes-i-really-really-mean-it
1590 ceph osd dump |
grep 'flags.*pauserd,pausewr'
1599 ceph osd tree up out
1600 ceph osd tree down
in
1601 ceph osd tree down out
1602 ceph osd tree out down
1603 expect_false ceph osd tree up down
1604 expect_false ceph osd tree
in out
1605 expect_false ceph osd tree up foo
1608 ceph osd count-metadata os
1614 ceph osd stat |
grep up
,
1617 function test_mon_crush
()
1620 epoch
=$
(ceph osd getcrushmap
-o $f 2>&1 |
tail -n1)
1623 nextepoch
=$
(( $epoch + 1 ))
1624 echo epoch
$epoch nextepoch
$nextepoch
1626 expect_false ceph osd setcrushmap
$nextepoch -i $f
1627 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1628 echo gotepoch
$gotepoch
1629 [ "$gotepoch" -eq "$nextepoch" ]
1630 # should be idempotent
1631 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1632 echo epoch
$gotepoch
1633 [ "$gotepoch" -eq "$nextepoch" ]
1637 function test_mon_osd_pool
()
1642 ceph osd pool create data
10
1643 ceph osd pool mksnap data datasnap
1644 rados
-p data lssnap |
grep datasnap
1645 ceph osd pool rmsnap data datasnap
1646 expect_false ceph osd pool rmsnap pool_fake snapshot
1647 ceph osd pool delete data data
--yes-i-really-really-mean-it
1649 ceph osd pool create data2
10
1650 ceph osd pool rename data2 data3
1651 ceph osd lspools |
grep data3
1652 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
1654 ceph osd pool create replicated
12 12 replicated
1655 ceph osd pool create replicated
12 12 replicated
1656 ceph osd pool create replicated
12 12 # default is replicated
1657 ceph osd pool create replicated
12 # default is replicated, pgp_num = pg_num
1658 # should fail because the type is not the same
1659 expect_false ceph osd pool create replicated
12 12 erasure
1660 ceph osd lspools |
grep replicated
1661 ceph osd pool create ec_test
1 1 erasure
1663 ceph osd metadata |
grep osd_objectstore_type |
grep -qc bluestore
1664 if [ $?
-eq 0 ]; then
1665 ceph osd pool
set ec_test allow_ec_overwrites true
>& $TMPFILE
1666 check_response $?
22 "pool must only be stored on bluestore for scrubbing to work"
1668 ceph osd pool
set ec_test allow_ec_overwrites true ||
return 1
1669 expect_false ceph osd pool
set ec_test allow_ec_overwrites false
1672 ceph osd pool delete replicated replicated
--yes-i-really-really-mean-it
1673 ceph osd pool delete ec_test ec_test
--yes-i-really-really-mean-it
1676 function test_mon_osd_pool_quota
()
1679 # test osd pool set/get quota
1683 ceph osd pool create tmp-quota-pool
36
1685 # set erroneous quotas
1687 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness
10
1688 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes
-1
1689 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1693 ceph osd pool set-quota tmp-quota-pool max_bytes
10
1694 ceph osd pool set-quota tmp-quota-pool max_objects
10M
1698 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10B'
1699 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*10240k objects'
1701 # get quotas in json-pretty format
1703 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1704 grep '"quota_max_objects":.*10485760'
1705 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1706 grep '"quota_max_bytes":.*10'
1710 ceph osd pool set-quota tmp-quota-pool max_bytes
0
1711 ceph osd pool set-quota tmp-quota-pool max_objects
0
1715 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*N/A'
1716 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*N/A'
1719 ceph osd pool delete tmp-quota-pool tmp-quota-pool
--yes-i-really-really-mean-it
1722 function test_mon_pg
()
1724 # Make sure we start healthy.
1727 ceph pg debug unfound_objects_exist
1728 ceph pg debug degraded_pgs_exist
1729 ceph pg deep-scrub
1.0
1731 ceph pg dump pgs_brief
--format=json
1732 ceph pg dump pgs
--format=json
1733 ceph pg dump pools
--format=json
1734 ceph pg dump osds
--format=json
1735 ceph pg dump
sum --format=json
1736 ceph pg dump all
--format=json
1737 ceph pg dump pgs_brief osds
--format=json
1738 ceph pg dump pools osds pgs_brief
--format=json
1740 ceph pg dump_pools_json
1741 ceph pg dump_stuck inactive
1742 ceph pg dump_stuck unclean
1743 ceph pg dump_stuck stale
1744 ceph pg dump_stuck undersized
1745 ceph pg dump_stuck degraded
1749 expect_false ceph pg
ls scrubq
1750 ceph pg
ls active stale repair recovering
1752 ceph pg
ls 1 active stale
1753 ceph pg ls-by-primary osd
.0
1754 ceph pg ls-by-primary osd
.0 1
1755 ceph pg ls-by-primary osd
.0 active
1756 ceph pg ls-by-primary osd
.0 active stale
1757 ceph pg ls-by-primary osd
.0 1 active stale
1758 ceph pg ls-by-osd osd
.0
1759 ceph pg ls-by-osd osd
.0 1
1760 ceph pg ls-by-osd osd
.0 active
1761 ceph pg ls-by-osd osd
.0 active stale
1762 ceph pg ls-by-osd osd
.0 1 active stale
1763 ceph pg ls-by-pool rbd
1764 ceph pg ls-by-pool rbd active stale
1765 # can't test this...
1766 # ceph pg force_create_pg
1767 ceph pg getmap
-o $TEMP_DIR/map.$$
1768 [ -s $TEMP_DIR/map.$$
]
1769 ceph pg map
1.0 |
grep acting
1773 ceph osd set-full-ratio
.962
1774 ceph osd dump |
grep '^full_ratio 0.962'
1775 ceph osd set-backfillfull-ratio
.912
1776 ceph osd dump |
grep '^backfillfull_ratio 0.912'
1777 ceph osd set-nearfull-ratio
.892
1778 ceph osd dump |
grep '^nearfull_ratio 0.892'
1780 # Check health status
1781 ceph osd set-nearfull-ratio
.913
1782 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
1783 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
1784 ceph osd set-nearfull-ratio
.892
1785 ceph osd set-backfillfull-ratio
.963
1786 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
1787 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
1788 ceph osd set-backfillfull-ratio
.912
1790 # Check injected full results
1791 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull nearfull
1792 wait_for_health
"OSD_NEARFULL"
1793 ceph health detail |
grep "osd.0 is near full"
1794 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1797 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull backfillfull
1798 wait_for_health
"OSD_BACKFILLFULL"
1799 ceph health detail |
grep "osd.1 is backfill full"
1800 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull none
1803 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull failsafe
1804 # failsafe and full are the same as far as the monitor is concerned
1805 wait_for_health
"OSD_FULL"
1806 ceph health detail |
grep "osd.2 is full"
1807 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull none
1810 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull full
1811 wait_for_health
"OSD_FULL"
1812 ceph health detail |
grep "osd.0 is full"
1813 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1816 ceph pg stat |
grep 'pgs:'
1821 ceph report |
grep osd_stats
1828 ceph tell osd
.0 version
1829 expect_false ceph tell osd
.9999 version
1830 expect_false ceph tell osd.foo version
1834 ceph tell osd
.0 dump_pg_recovery_stats |
grep Started
1836 ceph osd reweight
0 0.9
1837 expect_false ceph osd reweight
0 -1
1838 ceph osd reweight osd
.0 1
1840 ceph osd primary-affinity osd
.0 .9
1841 expect_false ceph osd primary-affinity osd
.0 -2
1842 expect_false ceph osd primary-affinity osd
.9999 .5
1843 ceph osd primary-affinity osd
.0 1
1845 ceph osd pool
set rbd size
2
1846 ceph osd pg-temp
1.0 0 1
1847 ceph osd pg-temp
1.0 osd
.1 osd
.0
1848 expect_false ceph osd pg-temp
1.0 0 1 2
1849 expect_false ceph osd pg-temp asdf qwer
1850 expect_false ceph osd pg-temp
1.0 asdf
1851 expect_false ceph osd pg-temp
1.0
1853 # don't test ceph osd primary-temp for now
1856 function test_mon_osd_pool_set
()
1858 TEST_POOL_GETSET
=pool_getset
1859 ceph osd pool create
$TEST_POOL_GETSET 1
1861 ceph osd pool get
$TEST_POOL_GETSET all
1863 for s
in pg_num pgp_num size min_size crush_rule
; do
1864 ceph osd pool get
$TEST_POOL_GETSET $s
1867 old_size
=$
(ceph osd pool get
$TEST_POOL_GETSET size |
sed -e 's/size: //')
1868 (( new_size
= old_size
+ 1 ))
1869 ceph osd pool
set $TEST_POOL_GETSET size
$new_size
1870 ceph osd pool get
$TEST_POOL_GETSET size |
grep "size: $new_size"
1871 ceph osd pool
set $TEST_POOL_GETSET size
$old_size
1873 ceph osd pool create pool_erasure
1 1 erasure
1876 ceph osd pool
set pool_erasure size
4444 2>$TMPFILE
1877 check_response
'not change the size'
1879 ceph osd pool get pool_erasure erasure_code_profile
1882 ceph osd pool
set $TEST_POOL_GETSET auid
$auid
1883 ceph osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1884 ceph
--format=xml osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1885 ceph osd pool
set $TEST_POOL_GETSET auid
0
1887 for flag
in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub
; do
1888 ceph osd pool
set $TEST_POOL_GETSET $flag false
1889 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1890 ceph osd pool
set $TEST_POOL_GETSET $flag true
1891 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1892 ceph osd pool
set $TEST_POOL_GETSET $flag 1
1893 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1894 ceph osd pool
set $TEST_POOL_GETSET $flag 0
1895 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1896 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag asdf
1897 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag 2
1900 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1901 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
123456
1902 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval |
grep 'scrub_min_interval: 123456'
1903 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
0
1904 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1906 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
1907 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
123456
1908 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval |
grep 'scrub_max_interval: 123456'
1909 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
0
1910 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
1912 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
1913 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
123456
1914 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval |
grep 'deep_scrub_interval: 123456'
1915 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
0
1916 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
1918 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
1919 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
5
1920 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: 5'
1921 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
0
1922 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
1924 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
1925 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
5
1926 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority |
grep 'recovery_op_priority: 5'
1927 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
0
1928 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
1930 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
1931 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
5
1932 ceph osd pool get
$TEST_POOL_GETSET scrub_priority |
grep 'scrub_priority: 5'
1933 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
0
1934 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
1936 ceph osd pool
set $TEST_POOL_GETSET nopgchange
1
1937 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
10
1938 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
1939 ceph osd pool
set $TEST_POOL_GETSET nopgchange
0
1940 ceph osd pool
set $TEST_POOL_GETSET pg_num
10
1942 ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
1944 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
1945 new_pgs
=$
(($old_pgs+$
(ceph osd stat |
grep osdmap |
awk '{print $3}')*32))
1946 ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
1947 ceph osd pool
set $TEST_POOL_GETSET pgp_num
$new_pgs
1949 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
1950 new_pgs
=$
(($old_pgs+$
(ceph osd stat |
grep osdmap |
awk '{print $3}')*32+1))
1951 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
1953 ceph osd pool
set $TEST_POOL_GETSET nosizechange
1
1954 expect_false ceph osd pool
set $TEST_POOL_GETSET size
2
1955 expect_false ceph osd pool
set $TEST_POOL_GETSET min_size
2
1956 ceph osd pool
set $TEST_POOL_GETSET nosizechange
0
1957 ceph osd pool
set $TEST_POOL_GETSET size
2
1959 ceph osd pool
set $TEST_POOL_GETSET min_size
2
1961 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
0
1962 ceph osd pool
set $TEST_POOL_GETSET hashpspool
0 --yes-i-really-mean-it
1964 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
1
1965 ceph osd pool
set $TEST_POOL_GETSET hashpspool
1 --yes-i-really-mean-it
1967 ceph osd pool
set $TEST_POOL_GETSET nodelete
1
1968 expect_false ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1969 ceph osd pool
set $TEST_POOL_GETSET nodelete
0
1970 ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1972 ceph osd pool get rbd crush_rule |
grep 'crush_rule: '
1974 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
1975 ceph osd pool
set $TEST_POOL_GETSET compression_mode aggressive
1976 ceph osd pool get
$TEST_POOL_GETSET compression_mode |
grep 'aggressive'
1977 ceph osd pool
set $TEST_POOL_GETSET compression_mode
unset
1978 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
1980 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
1981 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm zlib
1982 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm |
grep 'zlib'
1983 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm
unset
1984 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
1986 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
1987 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
1.1
1988 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
-.2
1989 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
.2
1990 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio |
grep '.2'
1991 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
0
1992 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
1994 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
1995 ceph osd pool
set $TEST_POOL_GETSET csum_type crc32c
1996 ceph osd pool get
$TEST_POOL_GETSET csum_type |
grep 'crc32c'
1997 ceph osd pool
set $TEST_POOL_GETSET csum_type
unset
1998 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2000 for size
in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block
; do
2001 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2002 ceph osd pool
set $TEST_POOL_GETSET $size 100
2003 ceph osd pool get
$TEST_POOL_GETSET $size |
grep '100'
2004 ceph osd pool
set $TEST_POOL_GETSET $size 0
2005 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2009 function test_mon_osd_tiered_pool_set
()
2011 # this is really a tier pool
2012 ceph osd pool create real-tier
2
2013 ceph osd tier add rbd real-tier
2015 ceph osd pool
set real-tier hit_set_type explicit_hash
2016 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_hash"
2017 ceph osd pool
set real-tier hit_set_type explicit_object
2018 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_object"
2019 ceph osd pool
set real-tier hit_set_type bloom
2020 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: bloom"
2021 expect_false ceph osd pool
set real-tier hit_set_type i_dont_exist
2022 ceph osd pool
set real-tier hit_set_period
123
2023 ceph osd pool get real-tier hit_set_period |
grep "hit_set_period: 123"
2024 ceph osd pool
set real-tier hit_set_count
12
2025 ceph osd pool get real-tier hit_set_count |
grep "hit_set_count: 12"
2026 ceph osd pool
set real-tier hit_set_fpp
.01
2027 ceph osd pool get real-tier hit_set_fpp |
grep "hit_set_fpp: 0.01"
2029 ceph osd pool
set real-tier target_max_objects
123
2030 ceph osd pool get real-tier target_max_objects | \
2031 grep 'target_max_objects:[ \t]\+123'
2032 ceph osd pool
set real-tier target_max_bytes
123456
2033 ceph osd pool get real-tier target_max_bytes | \
2034 grep 'target_max_bytes:[ \t]\+123456'
2035 ceph osd pool
set real-tier cache_target_dirty_ratio
.123
2036 ceph osd pool get real-tier cache_target_dirty_ratio | \
2037 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2038 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
-.2
2039 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
1.1
2040 ceph osd pool
set real-tier cache_target_dirty_high_ratio
.123
2041 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2042 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2043 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
-.2
2044 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
1.1
2045 ceph osd pool
set real-tier cache_target_full_ratio
.123
2046 ceph osd pool get real-tier cache_target_full_ratio | \
2047 grep 'cache_target_full_ratio:[ \t]\+0.123'
2048 ceph osd dump
-f json-pretty |
grep '"cache_target_full_ratio_micro": 123000'
2049 ceph osd pool
set real-tier cache_target_full_ratio
1.0
2050 ceph osd pool
set real-tier cache_target_full_ratio
0
2051 expect_false ceph osd pool
set real-tier cache_target_full_ratio
1.1
2052 ceph osd pool
set real-tier cache_min_flush_age
123
2053 ceph osd pool get real-tier cache_min_flush_age | \
2054 grep 'cache_min_flush_age:[ \t]\+123'
2055 ceph osd pool
set real-tier cache_min_evict_age
234
2056 ceph osd pool get real-tier cache_min_evict_age | \
2057 grep 'cache_min_evict_age:[ \t]\+234'
2059 # this is not a tier pool
2060 ceph osd pool create fake-tier
2
2063 expect_false ceph osd pool
set fake-tier hit_set_type explicit_hash
2064 expect_false ceph osd pool get fake-tier hit_set_type
2065 expect_false ceph osd pool
set fake-tier hit_set_type explicit_object
2066 expect_false ceph osd pool get fake-tier hit_set_type
2067 expect_false ceph osd pool
set fake-tier hit_set_type bloom
2068 expect_false ceph osd pool get fake-tier hit_set_type
2069 expect_false ceph osd pool
set fake-tier hit_set_type i_dont_exist
2070 expect_false ceph osd pool
set fake-tier hit_set_period
123
2071 expect_false ceph osd pool get fake-tier hit_set_period
2072 expect_false ceph osd pool
set fake-tier hit_set_count
12
2073 expect_false ceph osd pool get fake-tier hit_set_count
2074 expect_false ceph osd pool
set fake-tier hit_set_fpp
.01
2075 expect_false ceph osd pool get fake-tier hit_set_fpp
2077 expect_false ceph osd pool
set fake-tier target_max_objects
123
2078 expect_false ceph osd pool get fake-tier target_max_objects
2079 expect_false ceph osd pool
set fake-tier target_max_bytes
123456
2080 expect_false ceph osd pool get fake-tier target_max_bytes
2081 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
.123
2082 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2083 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
-.2
2084 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
1.1
2085 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
.123
2086 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2087 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
-.2
2088 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
1.1
2089 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
.123
2090 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2091 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.0
2092 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
0
2093 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.1
2094 expect_false ceph osd pool
set fake-tier cache_min_flush_age
123
2095 expect_false ceph osd pool get fake-tier cache_min_flush_age
2096 expect_false ceph osd pool
set fake-tier cache_min_evict_age
234
2097 expect_false ceph osd pool get fake-tier cache_min_evict_age
2099 ceph osd tier remove rbd real-tier
2100 ceph osd pool delete real-tier real-tier
--yes-i-really-really-mean-it
2101 ceph osd pool delete fake-tier fake-tier
--yes-i-really-really-mean-it
2104 function test_mon_osd_erasure_code
()
2107 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2108 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2109 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2110 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
--force
2111 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2112 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f g
=h
2114 # cleanup by removing profile 'fooprofile'
2115 ceph osd erasure-code-profile
rm fooprofile
2118 function test_mon_osd_misc
()
2122 # expect error about missing 'pool' argument
2123 ceph osd map
2>$TMPFILE; check_response
'pool' $?
22
2125 # expect error about unused argument foo
2126 ceph osd
ls foo
2>$TMPFILE; check_response
'unused' $?
22
2128 # expect "not in range" for invalid full ratio
2129 ceph pg set_full_ratio
95 2>$TMPFILE; check_response
'not in range' $?
22
2131 # expect "not in range" for invalid overload percentage
2132 ceph osd reweight-by-utilization
80 2>$TMPFILE; check_response
'higher than 100' $?
22
2136 ceph osd reweight-by-utilization
110
2137 ceph osd reweight-by-utilization
110 .5
2138 expect_false ceph osd reweight-by-utilization
110 0
2139 expect_false ceph osd reweight-by-utilization
110 -0.1
2140 ceph osd test-reweight-by-utilization
110 .5 --no-increasing
2141 ceph osd test-reweight-by-utilization
110 .5 4 --no-increasing
2142 expect_false ceph osd test-reweight-by-utilization
110 .5 0 --no-increasing
2143 expect_false ceph osd test-reweight-by-utilization
110 .5 -10 --no-increasing
2144 ceph osd reweight-by-pg
110
2145 ceph osd test-reweight-by-pg
110 .5
2146 ceph osd reweight-by-pg
110 rbd
2147 ceph osd reweight-by-pg
110 .5 rbd
2148 expect_false ceph osd reweight-by-pg
110 boguspoolasdfasdfasdf
2151 function test_mon_heap_profiler
()
2155 # expect 'heap' commands to be correctly parsed
2156 ceph heap stats
2>$TMPFILE
2157 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2158 echo "tcmalloc not enabled; skip heap profiler test"
2163 [[ $do_test -eq 0 ]] && return 0
2165 ceph heap start_profiler
2167 ceph heap stop_profiler
2171 function test_admin_heap_profiler
()
2175 # expect 'heap' commands to be correctly parsed
2176 ceph heap stats
2>$TMPFILE
2177 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2178 echo "tcmalloc not enabled; skip heap profiler test"
2183 [[ $do_test -eq 0 ]] && return 0
2185 local admin_socket
=$
(get_admin_socket osd
.0)
2187 $SUDO ceph
--admin-daemon $admin_socket heap start_profiler
2188 $SUDO ceph
--admin-daemon $admin_socket heap dump
2189 $SUDO ceph
--admin-daemon $admin_socket heap stop_profiler
2190 $SUDO ceph
--admin-daemon $admin_socket heap release
2193 function test_osd_bench
()
2195 # test osd bench limits
2196 # As we should not rely on defaults (as they may change over time),
2197 # lets inject some values and perform some simple tests
2198 # max iops: 10 # 100 IOPS
2199 # max throughput: 10485760 # 10MB/s
2200 # max block size: 2097152 # 2MB
2201 # duration: 10 # 10 seconds
2204 --osd-bench-duration 10 \
2205 --osd-bench-max-block-size 2097152 \
2206 --osd-bench-large-size-max-throughput 10485760 \
2207 --osd-bench-small-size-max-iops 10"
2208 ceph tell osd
.0 injectargs
${args## }
2210 # anything with a bs larger than 2097152 must fail
2211 expect_false ceph tell osd
.0 bench
1 2097153
2212 # but using 'osd_bench_max_bs' must succeed
2213 ceph tell osd
.0 bench
1 2097152
2215 # we assume 1MB as a large bs; anything lower is a small bs
2216 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2217 # max count: 409600 (bytes)
2219 # more than max count must not be allowed
2220 expect_false ceph tell osd
.0 bench
409601 4096
2221 # but 409600 must be succeed
2222 ceph tell osd
.0 bench
409600 4096
2224 # for a large bs, we are limited by throughput.
2225 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2226 # the max count will be (10MB * 10s) = 100MB
2227 # max count: 104857600 (bytes)
2229 # more than max count must not be allowed
2230 expect_false ceph tell osd
.0 bench
104857601 2097152
2231 # up to max count must be allowed
2232 ceph tell osd
.0 bench
104857600 2097152
2235 function test_osd_negative_filestore_merge_threshold
()
2237 $SUDO ceph daemon osd
.0 config
set filestore_merge_threshold
-1
2238 expect_config_value
"osd.0" "filestore_merge_threshold" -1
2241 function test_mon_tell
()
2243 ceph tell mon.a version
2244 ceph tell mon.b version
2245 expect_false ceph tell mon.foo version
2249 ceph_watch_start debug
2250 ceph tell mon.a version
2251 ceph_watch_wait
'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2253 ceph_watch_start debug
2254 ceph tell mon.b version
2255 ceph_watch_wait
'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2258 function test_mon_ping
()
2262 expect_false ceph
ping mon.foo
2267 function test_mon_deprecated_commands
()
2269 # current DEPRECATED commands are:
2274 # Testing should be accomplished by setting
2275 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2276 # each one of these commands.
2278 ceph tell mon.a injectargs
'--mon-debug-deprecated-as-obsolete'
2279 expect_false ceph tell mon.a compact
2> $TMPFILE
2280 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2282 expect_false ceph tell mon.a scrub
2> $TMPFILE
2283 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2285 expect_false ceph tell mon.a sync force
2> $TMPFILE
2286 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2288 ceph tell mon.a injectargs
'--no-mon-debug-deprecated-as-obsolete'
2291 function test_mon_cephdf_commands
()
2295 # RAW USED The near raw used per pool in raw total
2297 ceph osd pool create cephdf_for_test
32 32 replicated
2298 ceph osd pool
set cephdf_for_test size
2
2300 dd if=/dev
/zero of
=.
/cephdf_for_test bs
=4k count
=1
2301 rados put cephdf_for_test cephdf_for_test
-p cephdf_for_test
2304 for i
in `seq 1 10`; do
2305 rados
-p cephdf_for_test
ls - |
grep -q cephdf_for_test
&& break
2308 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2309 # to sync mon with osd
2311 local jq_filter
='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2312 cal_raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2313 raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
2315 ceph osd pool delete cephdf_for_test cephdf_for_test
--yes-i-really-really-mean-it
2316 rm .
/cephdf_for_test
2318 expect_false
test $cal_raw_used_size != $raw_used_size
2321 function test_mon_tell_help_command
()
2323 ceph tell mon.a
help
2326 expect_false ceph tell mon.zzz
help
2329 function test_osd_tell_help_command
()
2331 ceph tell osd
.1 help
2332 expect_false ceph tell osd
.100 help
2335 function test_osd_compact
()
2337 ceph tell osd
.1 compact
2338 ceph daemon osd
.1 compact
2341 function test_mds_tell_help_command
()
2343 local FS_NAME
=cephfs
2344 if ! mds_exists
; then
2345 echo "Skipping test, no MDS found"
2350 ceph osd pool create fs_data
10
2351 ceph osd pool create fs_metadata
10
2352 ceph fs new
$FS_NAME fs_metadata fs_data
2353 wait_mds_active
$FS_NAME
2356 ceph tell mds.a
help
2357 expect_false ceph tell mds.z
help
2360 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
2361 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
2364 function test_mgr_tell
()
2367 ceph tell mgr fs status
2368 ceph tell mgr osd status
2372 # New tests should be added to the TESTS array below
2374 # Individual tests may be run using the '-t <testname>' argument
2375 # The user can specify '-t <testname>' as many times as she wants
2377 # Tests will be run in order presented in the TESTS array, or in
2378 # the order specified by the '-t <testname>' options.
2380 # '-l' will list all the available test names
2381 # '-h' will show usage
2383 # The test maintains backward compatibility: not specifying arguments
2384 # will run all tests following the order they appear in the TESTS array.
2388 MON_TESTS
+=" mon_injectargs"
2389 MON_TESTS
+=" mon_injectargs_SI"
2390 for i
in `seq 9`; do
2391 MON_TESTS
+=" tiering_$i";
2394 MON_TESTS
+=" auth_profiles"
2395 MON_TESTS
+=" mon_misc"
2396 MON_TESTS
+=" mon_mon"
2397 MON_TESTS
+=" mon_osd"
2398 MON_TESTS
+=" mon_crush"
2399 MON_TESTS
+=" mon_osd_create_destroy"
2400 MON_TESTS
+=" mon_osd_pool"
2401 MON_TESTS
+=" mon_osd_pool_quota"
2402 MON_TESTS
+=" mon_pg"
2403 MON_TESTS
+=" mon_osd_pool_set"
2404 MON_TESTS
+=" mon_osd_tiered_pool_set"
2405 MON_TESTS
+=" mon_osd_erasure_code"
2406 MON_TESTS
+=" mon_osd_misc"
2407 MON_TESTS
+=" mon_heap_profiler"
2408 MON_TESTS
+=" mon_tell"
2409 MON_TESTS
+=" mon_ping"
2410 MON_TESTS
+=" mon_deprecated_commands"
2411 MON_TESTS
+=" mon_caps"
2412 MON_TESTS
+=" mon_cephdf_commands"
2413 MON_TESTS
+=" mon_tell_help_command"
2415 OSD_TESTS
+=" osd_bench"
2416 OSD_TESTS
+=" osd_negative_filestore_merge_threshold"
2417 OSD_TESTS
+=" tiering_agent"
2418 OSD_TESTS
+=" admin_heap_profiler"
2419 OSD_TESTS
+=" osd_tell_help_command"
2420 OSD_TESTS
+=" osd_compact"
2422 MDS_TESTS
+=" mds_tell"
2423 MDS_TESTS
+=" mon_mds"
2424 MDS_TESTS
+=" mon_mds_metadata"
2425 MDS_TESTS
+=" mds_tell_help_command"
2427 MGR_TESTS
+=" mgr_tell"
2438 function list_tests
()
2440 echo "AVAILABLE TESTS"
2448 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2455 while [[ $# -gt 0 ]]; do
2462 "--asok-does-not-need-root" )
2465 "--no-sanity-check" )
2469 tests_to_run
+="$MON_TESTS"
2472 tests_to_run
+="$OSD_TESTS"
2475 tests_to_run
+="$MDS_TESTS"
2478 tests_to_run
+="$MGR_TESTS"
2482 if [[ -z "$1" ]]; then
2483 echo "missing argument to '-t'"
2497 if [[ $do_list -eq 1 ]]; then
2502 ceph osd pool create rbd
10
2504 if test -z "$tests_to_run" ; then
2505 tests_to_run
="$TESTS"
2508 if $sanity_check ; then
2511 for i
in $tests_to_run; do
2512 if $sanity_check ; then
2519 if $sanity_check ; then