2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
5 source $
(dirname $0)/..
/..
/standalone
/ceph-helpers.sh
9 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
13 function get_admin_socket
()
17 if test -n "$CEPH_ASOK_DIR";
19 echo $
(get_asok_dir
)/$client.asok
21 local cluster
=$
(echo $CEPH_ARGS |
sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
26 function check_no_osd_down
()
28 ! ceph osd dump |
grep ' down '
31 function wait_no_osd_down
()
34 for i
in $
(seq 1 $max_run) ; do
35 if ! check_no_osd_down
; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
45 function expect_false
()
48 if "$@"; then return 1; else return 0; fi
52 TEMP_DIR
=$
(mktemp
-d ${TMPDIR-/tmp}/cephtool.XXX
)
53 trap "rm -fr $TEMP_DIR" 0
55 TMPFILE
=$
(mktemp
$TEMP_DIR/test_invalid.XXX
)
58 # retry_eagain max cmd args ...
60 # retry cmd args ... if it exits on error and its output contains the
61 # string EAGAIN, at most $max times
63 function retry_eagain
()
68 local tmpfile
=$TEMP_DIR/retry_eagain.$$
70 for count
in $
(seq 1 $max) ; do
72 "$@" > $tmpfile 2>&1 || status
=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN
$tmpfile ; then
79 if test $count = $max ; then
80 echo retried with non zero
exit status
, $max times: "$@" >&2
88 # map_enxio_to_eagain cmd arg ...
90 # add EAGAIN to the output of cmd arg ... if the output contains
93 function map_enxio_to_eagain
()
96 local tmpfile
=$TEMP_DIR/map_enxio_to_eagain.$$
98 "$@" > $tmpfile 2>&1 || status
=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO
$tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
108 function check_response
()
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
125 function get_config_value_or_die
()
127 local target config_opt raw val
132 raw
="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $?
-ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
138 raw
=`echo $raw | sed -e 's/[{} "]//g'`
139 val
=`echo $raw | cut -f2 -d:`
145 function expect_config_value
()
147 local target config_opt expected_val val
152 val
=$
(get_config_value_or_die
$target $config_opt)
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
160 function ceph_watch_start
()
162 local whatch_opt
=--watch
165 whatch_opt
=--watch-$1
167 whatch_opt
+=" --watch-channel $2"
171 CEPH_WATCH_FILE
=${TEMP_DIR}/CEPH_WATCH_$$
172 ceph
$whatch_opt > $CEPH_WATCH_FILE &
175 # wait until the "ceph" client is connected and receiving
176 # log messages from monitor
178 grep -q "cluster" $CEPH_WATCH_FILE && break
183 function ceph_watch_wait
()
192 for i
in `seq ${timeout}`; do
193 grep -q "$regexp" $CEPH_WATCH_FILE && break
199 if ! grep "$regexp" $CEPH_WATCH_FILE; then
200 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
201 cat $CEPH_WATCH_FILE >&2
206 function test_mon_injectargs
()
208 CEPH_ARGS
='--mon_debug_dump_location the.dump' ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
209 check_response
"osd_enable_op_tracker = 'false'"
210 ! grep "the.dump" $TMPFILE ||
return 1
211 ceph tell osd
.0 injectargs
'--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE ||
return 1
212 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
213 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
214 check_response
"osd_enable_op_tracker = 'false'"
215 ceph tell osd
.0 injectargs
-- --osd_enable_op_tracker >& $TMPFILE ||
return 1
216 check_response
"osd_enable_op_tracker = 'true'"
217 ceph tell osd
.0 injectargs
-- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE ||
return 1
218 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
219 expect_failure
$TEMP_DIR "Option --osd_op_history_duration requires an argument" \
220 ceph tell osd
.0 injectargs
-- '--osd_op_history_duration'
222 ceph tell osd
.0 injectargs
-- '--osd_deep_scrub_interval 2419200' >& $TMPFILE ||
return 1
223 check_response
"osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
225 ceph tell osd
.0 injectargs
-- '--mon_probe_timeout 2' >& $TMPFILE ||
return 1
226 check_response
"mon_probe_timeout = '2.000000' (not observed, change may require restart)"
228 ceph tell osd
.0 injectargs
-- '--mon-lease 6' >& $TMPFILE ||
return 1
229 check_response
"mon_lease = '6.000000' (not observed, change may require restart)"
231 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
232 expect_false ceph tell osd
.0 injectargs
--osd-scrub-auto-repair-num-errors -1 >& $TMPFILE ||
return 1
233 check_response
"Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
236 function test_mon_injectargs_SI
()
238 # Test SI units during injectargs and 'config set'
239 # We only aim at testing the units are parsed accordingly
240 # and don't intend to test whether the options being set
241 # actually expect SI units to be passed.
242 # Keep in mind that all integer based options (i.e., INT,
243 # LONG, U32, U64) will accept SI unit modifiers.
244 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_pg_warn_min_objects")
245 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10
246 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
247 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10K
248 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10240
249 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
1G
250 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1073741824
251 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10F
> $TMPFILE || true
252 check_response
"'10F': (22) Invalid argument"
253 # now test with injectargs
254 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10'
255 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
256 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10K'
257 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10240
258 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 1G'
259 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1073741824
260 expect_false ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10F'
261 expect_false ceph tell mon.a injectargs
'--mon_globalid_prealloc -1'
262 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
$initial_value
265 function test_tiering_agent
()
267 local slow
=slow_eviction
268 local fast
=fast_eviction
269 ceph osd pool create
$slow 1 1
270 ceph osd pool application
enable $slow rados
271 ceph osd pool create
$fast 1 1
272 ceph osd tier add
$slow $fast
273 ceph osd tier cache-mode
$fast writeback
274 ceph osd tier set-overlay
$slow $fast
275 ceph osd pool
set $fast hit_set_type bloom
276 rados
-p $slow put obj1
/etc
/group
277 ceph osd pool
set $fast target_max_objects
1
278 ceph osd pool
set $fast hit_set_count
1
279 ceph osd pool
set $fast hit_set_period
5
280 # wait for the object to be evicted from the cache
283 for i
in `seq 1 300` ; do
284 if ! rados
-p $fast ls |
grep obj1
; then
291 # the object is proxy read and promoted to the cache
292 rados
-p $slow get obj1
- >/dev
/null
293 # wait for the promoted object to be evicted again
295 for i
in `seq 1 300` ; do
296 if ! rados
-p $fast ls |
grep obj1
; then
303 ceph osd tier remove-overlay
$slow
304 ceph osd tier remove
$slow $fast
305 ceph osd pool delete
$fast $fast --yes-i-really-really-mean-it
306 ceph osd pool delete
$slow $slow --yes-i-really-really-mean-it
309 function test_tiering_1
()
312 ceph osd pool create slow
2
313 ceph osd pool application
enable slow rados
314 ceph osd pool create slow2
2
315 ceph osd pool application
enable slow2 rados
316 ceph osd pool create cache
2
317 ceph osd pool create cache2
2
318 ceph osd tier add slow cache
319 ceph osd tier add slow cache2
320 expect_false ceph osd tier add slow2 cache
321 # test some state transitions
322 ceph osd tier cache-mode cache writeback
323 expect_false ceph osd tier cache-mode cache forward
324 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
325 expect_false ceph osd tier cache-mode cache
readonly
326 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
327 expect_false ceph osd tier cache-mode cache forward
328 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
329 ceph osd tier cache-mode cache none
330 ceph osd tier cache-mode cache writeback
331 ceph osd tier cache-mode cache proxy
332 ceph osd tier cache-mode cache writeback
333 expect_false ceph osd tier cache-mode cache none
334 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
335 # test with dirty objects in the tier pool
336 # tier pool currently set to 'writeback'
337 rados
-p cache put
/etc
/passwd
/etc
/passwd
339 # 1 dirty object in pool 'cache'
340 ceph osd tier cache-mode cache proxy
341 expect_false ceph osd tier cache-mode cache none
342 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
343 ceph osd tier cache-mode cache writeback
344 # remove object from tier pool
345 rados
-p cache
rm /etc
/passwd
346 rados
-p cache cache-flush-evict-all
348 # no dirty objects in pool 'cache'
349 ceph osd tier cache-mode cache proxy
350 ceph osd tier cache-mode cache none
351 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
353 while ! ceph osd pool
set cache pg_num
3 --yes-i-really-mean-it 2>$TMPFILE
355 grep 'currently creating pgs' $TMPFILE
356 TRIES
=$
(( $TRIES + 1 ))
360 expect_false ceph osd pool
set cache pg_num
4
361 ceph osd tier cache-mode cache none
362 ceph osd tier set-overlay slow cache
363 expect_false ceph osd tier set-overlay slow cache2
364 expect_false ceph osd tier remove slow cache
365 ceph osd tier remove-overlay slow
366 ceph osd tier set-overlay slow cache2
367 ceph osd tier remove-overlay slow
368 ceph osd tier remove slow cache
369 ceph osd tier add slow2 cache
370 expect_false ceph osd tier set-overlay slow cache
371 ceph osd tier set-overlay slow2 cache
372 ceph osd tier remove-overlay slow2
373 ceph osd tier remove slow2 cache
374 ceph osd tier remove slow cache2
376 # make sure a non-empty pool fails
377 rados
-p cache2 put
/etc
/passwd
/etc
/passwd
378 while ! ceph df |
grep cache2 |
grep ' 1 ' ; do
379 echo waiting
for pg stats to flush
382 expect_false ceph osd tier add slow cache2
383 ceph osd tier add slow cache2
--force-nonempty
384 ceph osd tier remove slow cache2
386 ceph osd pool
ls |
grep cache2
387 ceph osd pool
ls -f json-pretty |
grep cache2
388 ceph osd pool
ls detail |
grep cache2
389 ceph osd pool
ls detail
-f json-pretty |
grep cache2
391 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
392 ceph osd pool delete slow2 slow2
--yes-i-really-really-mean-it
393 ceph osd pool delete cache cache
--yes-i-really-really-mean-it
394 ceph osd pool delete cache2 cache2
--yes-i-really-really-mean-it
397 function test_tiering_2
()
399 # make sure we can't clobber snapshot state
400 ceph osd pool create snap_base
2
401 ceph osd pool application
enable snap_base rados
402 ceph osd pool create snap_cache
2
403 ceph osd pool mksnap snap_cache snapname
404 expect_false ceph osd tier add snap_base snap_cache
405 ceph osd pool delete snap_base snap_base
--yes-i-really-really-mean-it
406 ceph osd pool delete snap_cache snap_cache
--yes-i-really-really-mean-it
409 function test_tiering_3
()
411 # make sure we can't create snapshot on tier
412 ceph osd pool create basex
2
413 ceph osd pool application
enable basex rados
414 ceph osd pool create cachex
2
415 ceph osd tier add basex cachex
416 expect_false ceph osd pool mksnap cache snapname
417 ceph osd tier remove basex cachex
418 ceph osd pool delete basex basex
--yes-i-really-really-mean-it
419 ceph osd pool delete cachex cachex
--yes-i-really-really-mean-it
422 function test_tiering_4
()
424 # make sure we can't create an ec pool tier
425 ceph osd pool create eccache
2 2 erasure
426 expect_false ceph osd set-require-min-compat-client bobtail
427 ceph osd pool create repbase
2
428 ceph osd pool application
enable repbase rados
429 expect_false ceph osd tier add repbase eccache
430 ceph osd pool delete repbase repbase
--yes-i-really-really-mean-it
431 ceph osd pool delete eccache eccache
--yes-i-really-really-mean-it
434 function test_tiering_5
()
436 # convenient add-cache command
437 ceph osd pool create slow
2
438 ceph osd pool application
enable slow rados
439 ceph osd pool create cache3
2
440 ceph osd tier add-cache slow cache3
1024000
441 ceph osd dump |
grep cache3 |
grep bloom |
grep 'false_positive_probability: 0.05' |
grep 'target_bytes 1024000' |
grep '1200s x4'
442 ceph osd tier remove slow cache3
2> $TMPFILE || true
443 check_response
"EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
444 ceph osd tier remove-overlay slow
445 ceph osd tier remove slow cache3
446 ceph osd pool
ls |
grep cache3
447 ceph osd pool delete cache3 cache3
--yes-i-really-really-mean-it
448 ! ceph osd pool
ls |
grep cache3 ||
exit 1
449 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
452 function test_tiering_6
()
454 # check add-cache whether work
455 ceph osd pool create datapool
2
456 ceph osd pool application
enable datapool rados
457 ceph osd pool create cachepool
2
458 ceph osd tier add-cache datapool cachepool
1024000
459 ceph osd tier cache-mode cachepool writeback
460 rados
-p datapool put object
/etc
/passwd
461 rados
-p cachepool stat object
462 rados
-p cachepool cache-flush object
463 rados
-p datapool stat object
464 ceph osd tier remove-overlay datapool
465 ceph osd tier remove datapool cachepool
466 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
467 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
470 function test_tiering_7
()
472 # protection against pool removal when used as tiers
473 ceph osd pool create datapool
2
474 ceph osd pool application
enable datapool rados
475 ceph osd pool create cachepool
2
476 ceph osd tier add-cache datapool cachepool
1024000
477 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it 2> $TMPFILE || true
478 check_response
"EBUSY: pool 'cachepool' is a tier of 'datapool'"
479 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it 2> $TMPFILE || true
480 check_response
"EBUSY: pool 'datapool' has tiers cachepool"
481 ceph osd tier remove-overlay datapool
482 ceph osd tier remove datapool cachepool
483 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
484 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
487 function test_tiering_8
()
489 ## check health check
490 ceph osd
set notieragent
491 ceph osd pool create datapool
2
492 ceph osd pool application
enable datapool rados
493 ceph osd pool create cache4
2
494 ceph osd tier add-cache datapool cache4
1024000
495 ceph osd tier cache-mode cache4 writeback
496 tmpfile
=$
(mktemp|
grep tmp
)
497 dd if=/dev
/zero of
=$tmpfile bs
=4K count
=1
498 ceph osd pool
set cache4 target_max_objects
200
499 ceph osd pool
set cache4 target_max_bytes
1000000
500 rados
-p cache4 put foo1
$tmpfile
501 rados
-p cache4 put foo2
$tmpfile
504 ceph df |
grep datapool |
grep ' 2 '
505 ceph osd tier remove-overlay datapool
506 ceph osd tier remove datapool cache4
507 ceph osd pool delete cache4 cache4
--yes-i-really-really-mean-it
508 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
509 ceph osd
unset notieragent
512 function test_tiering_9
()
514 # make sure 'tier remove' behaves as we expect
515 # i.e., removing a tier from a pool that's not its base pool only
516 # results in a 'pool foo is now (or already was) not a tier of bar'
518 ceph osd pool create basepoolA
2
519 ceph osd pool application
enable basepoolA rados
520 ceph osd pool create basepoolB
2
521 ceph osd pool application
enable basepoolB rados
522 poolA_id
=$
(ceph osd dump |
grep 'pool.*basepoolA' |
awk '{print $2;}')
523 poolB_id
=$
(ceph osd dump |
grep 'pool.*basepoolB' |
awk '{print $2;}')
525 ceph osd pool create cache5
2
526 ceph osd pool create cache6
2
527 ceph osd tier add basepoolA cache5
528 ceph osd tier add basepoolB cache6
529 ceph osd tier remove basepoolB cache5
2>&1 |
grep 'not a tier of'
530 ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of[ \t]\+$poolA_id"
531 ceph osd tier remove basepoolA cache6
2>&1 |
grep 'not a tier of'
532 ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of[ \t]\+$poolB_id"
534 ceph osd tier remove basepoolA cache5
2>&1 |
grep 'not a tier of'
535 ! ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of" ||
exit 1
536 ceph osd tier remove basepoolB cache6
2>&1 |
grep 'not a tier of'
537 ! ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of" ||
exit 1
539 ! ceph osd dump |
grep "pool.*'basepoolA'" 2>&1 |
grep "tiers" ||
exit 1
540 ! ceph osd dump |
grep "pool.*'basepoolB'" 2>&1 |
grep "tiers" ||
exit 1
542 ceph osd pool delete cache6 cache6
--yes-i-really-really-mean-it
543 ceph osd pool delete cache5 cache5
--yes-i-really-really-mean-it
544 ceph osd pool delete basepoolB basepoolB
--yes-i-really-really-mean-it
545 ceph osd pool delete basepoolA basepoolA
--yes-i-really-really-mean-it
550 ceph auth add client.xx mon allow osd
"allow *"
551 ceph auth
export client.xx
>client.xx.keyring
552 ceph auth add client.xx
-i client.xx.keyring
553 rm -f client.xx.keyring
554 ceph auth list |
grep client.xx
555 ceph auth
ls |
grep client.xx
556 ceph auth get client.xx |
grep caps |
grep mon
557 ceph auth get client.xx |
grep caps |
grep osd
558 ceph auth get-key client.xx
559 ceph auth print-key client.xx
560 ceph auth print_key client.xx
561 ceph auth caps client.xx osd
"allow rw"
562 expect_false sh
<<< "ceph auth get client.xx | grep caps | grep mon"
563 ceph auth get client.xx |
grep osd |
grep "allow rw"
564 ceph auth
export |
grep client.xx
565 ceph auth
export -o authfile
566 ceph auth import
-i authfile
567 ceph auth
export -o authfile2
568 diff authfile authfile2
569 rm authfile authfile2
570 ceph auth del client.xx
571 expect_false ceph auth get client.xx
573 # (almost) interactive mode
574 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
575 ceph auth get client.xx
577 echo 'auth del client.xx' | ceph
578 expect_false ceph auth get client.xx
584 ceph-authtool
--create-keyring --name client.TEST
--gen-key --set-uid $auid TEST-keyring
585 expect_false ceph auth import
--in-file TEST-keyring
587 ceph-authtool
--create-keyring --name client.TEST
--gen-key --cap mon
"allow r" --set-uid $auid TEST-keyring
588 ceph auth import
--in-file TEST-keyring
590 ceph auth get client.TEST
> $TMPFILE
591 check_response
"auid = $auid"
592 ceph
--format json-pretty auth get client.TEST
> $TMPFILE
593 check_response
'"auid": '$auid
594 ceph auth
ls > $TMPFILE
595 check_response
"auid: $auid"
596 ceph
--format json-pretty auth
ls > $TMPFILE
597 check_response
'"auid": '$auid
598 ceph auth del client.TEST
601 function test_auth_profiles
()
603 ceph auth add client.xx-profile-ro mon
'allow profile read-only' \
604 mgr
'allow profile read-only'
605 ceph auth add client.xx-profile-rw mon
'allow profile read-write' \
606 mgr
'allow profile read-write'
607 ceph auth add client.xx-profile-rd mon
'allow profile role-definer'
609 ceph auth
export > client.xx.keyring
611 # read-only is allowed all read-only commands (auth excluded)
612 ceph
-n client.xx-profile-ro
-k client.xx.keyring status
613 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd dump
614 ceph
-n client.xx-profile-ro
-k client.xx.keyring pg dump
615 ceph
-n client.xx-profile-ro
-k client.xx.keyring mon dump
616 ceph
-n client.xx-profile-ro
-k client.xx.keyring mds dump
617 # read-only gets access denied for rw commands or auth commands
618 ceph
-n client.xx-profile-ro
-k client.xx.keyring log foo
>& $TMPFILE || true
619 check_response
"EACCES: access denied"
620 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
621 check_response
"EACCES: access denied"
622 ceph
-n client.xx-profile-ro
-k client.xx.keyring auth
ls >& $TMPFILE || true
623 check_response
"EACCES: access denied"
625 # read-write is allowed for all read-write commands (except auth)
626 ceph
-n client.xx-profile-rw
-k client.xx.keyring status
627 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd dump
628 ceph
-n client.xx-profile-rw
-k client.xx.keyring pg dump
629 ceph
-n client.xx-profile-rw
-k client.xx.keyring mon dump
630 ceph
-n client.xx-profile-rw
-k client.xx.keyring mds dump
631 ceph
-n client.xx-profile-rw
-k client.xx.keyring log foo
632 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
set noout
633 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
unset noout
634 # read-write gets access denied for auth commands
635 ceph
-n client.xx-profile-rw
-k client.xx.keyring auth
ls >& $TMPFILE || true
636 check_response
"EACCES: access denied"
638 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
639 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
ls
640 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
export
641 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth add client.xx-profile-foo
642 ceph
-n client.xx-profile-rd
-k client.xx.keyring status
643 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd dump
>& $TMPFILE || true
644 check_response
"EACCES: access denied"
645 ceph
-n client.xx-profile-rd
-k client.xx.keyring pg dump
>& $TMPFILE || true
646 check_response
"EACCES: access denied"
647 # read-only 'mon' subsystem commands are allowed
648 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon dump
649 # but read-write 'mon' commands are not
650 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon add foo
1.1.1.1 >& $TMPFILE || true
651 check_response
"EACCES: access denied"
652 ceph
-n client.xx-profile-rd
-k client.xx.keyring mds dump
>& $TMPFILE || true
653 check_response
"EACCES: access denied"
654 ceph
-n client.xx-profile-rd
-k client.xx.keyring log foo
>& $TMPFILE || true
655 check_response
"EACCES: access denied"
656 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
657 check_response
"EACCES: access denied"
659 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-ro
660 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-rw
662 # add a new role-definer with the existing role-definer
663 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
664 auth add client.xx-profile-rd2 mon
'allow profile role-definer'
665 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
666 auth
export > client.xx.keyring
.2
667 # remove old role-definer using the new role-definer
668 ceph
-n client.xx-profile-rd2
-k client.xx.keyring
.2 \
669 auth del client.xx-profile-rd
670 # remove the remaining role-definer with admin
671 ceph auth del client.xx-profile-rd2
672 rm -f client.xx.keyring client.xx.keyring
.2
675 function test_mon_caps
()
677 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
678 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
679 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
680 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
682 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
683 check_response
"Permission denied"
685 rm -rf $TEMP_DIR/ceph.client.bug.keyring
686 ceph auth del client.bug
687 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
688 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
689 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
690 ceph-authtool
-n client.bug
--cap mon
'' $TEMP_DIR/ceph.client.bug.keyring
691 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
692 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
693 check_response
"Permission denied"
696 function test_mon_misc
()
698 # with and without verbosity
699 ceph osd dump |
grep '^epoch'
700 ceph
--concise osd dump |
grep '^epoch'
702 ceph osd df |
grep 'MIN/MAX VAR'
707 grep -v DIRTY
$TMPFILE
708 ceph df detail
> $TMPFILE
710 ceph df
--format json
> $TMPFILE
711 grep 'total_bytes' $TMPFILE
712 grep -v 'dirty' $TMPFILE
713 ceph df detail
--format json
> $TMPFILE
714 grep 'rd_bytes' $TMPFILE
715 grep 'dirty' $TMPFILE
716 ceph df
--format xml |
grep '<total_bytes>'
717 ceph df detail
--format xml |
grep '<rd_bytes>'
722 ceph health
--format json-pretty
723 ceph health detail
--format xml-pretty
725 ceph time-sync-status
728 for t
in mon osd mds
; do
733 mymsg
="this is a test log message $$.$(date)"
735 ceph log last |
grep "$mymsg"
736 ceph log last
100 |
grep "$mymsg"
737 ceph_watch_wait
"$mymsg"
741 ceph mgr module
enable restful
742 expect_false ceph mgr module
enable foodne
743 ceph mgr module
enable foodne
--force
744 ceph mgr module disable foodne
745 ceph mgr module disable foodnebizbangbash
749 ceph mon count-metadata ceph_version
754 ceph mgr count-metadata ceph_version
761 function check_mds_active
()
764 ceph fs get
$fs_name |
grep active
767 function wait_mds_active
()
771 for i
in $
(seq 1 $max_run) ; do
772 if ! check_mds_active
$fs_name ; then
773 echo "waiting for an active MDS daemon ($i/$max_run)"
779 check_mds_active
$fs_name
782 function get_mds_gids
()
785 ceph fs get
$fs_name --format=json | python
-c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
788 function fail_all_mds
()
791 ceph fs
set $fs_name cluster_down true
792 mds_gids
=$
(get_mds_gids
$fs_name)
793 for mds_gid
in $mds_gids ; do
794 ceph mds fail
$mds_gid
796 if check_mds_active
$fs_name ; then
797 echo "An active MDS remains, something went wrong"
804 function remove_all_fs
()
806 existing_fs
=$
(ceph fs
ls --format=json | python
-c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
807 for fs_name
in $existing_fs ; do
808 echo "Removing fs ${fs_name}..."
809 fail_all_mds
$fs_name
810 echo "Removing existing filesystem '${fs_name}'..."
811 ceph fs
rm $fs_name --yes-i-really-mean-it
812 echo "Removed '${fs_name}'."
816 # So that tests requiring MDS can skip if one is not configured
817 # in the cluster at all
818 function mds_exists
()
820 ceph auth
ls |
grep "^mds"
823 # some of the commands are just not idempotent.
824 function without_test_dup_command
()
826 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
829 local saved
=${CEPH_CLI_TEST_DUP_COMMAND}
830 unset CEPH_CLI_TEST_DUP_COMMAND
832 CEPH_CLI_TEST_DUP_COMMAND
=saved
836 function test_mds_tell
()
839 if ! mds_exists
; then
840 echo "Skipping test, no MDS found"
845 ceph osd pool create fs_data
10
846 ceph osd pool create fs_metadata
10
847 ceph fs new
$FS_NAME fs_metadata fs_data
848 wait_mds_active
$FS_NAME
850 # Test injectargs by GID
851 old_mds_gids
=$
(get_mds_gids
$FS_NAME)
852 echo Old GIDs
: $old_mds_gids
854 for mds_gid
in $old_mds_gids ; do
855 ceph tell mds.
$mds_gid injectargs
"--debug-mds 20"
857 expect_false ceph tell mds.a injectargs mds_max_file_recover
-1
859 # Test respawn by rank
860 without_test_dup_command ceph tell mds
.0 respawn
861 new_mds_gids
=$old_mds_gids
862 while [ $new_mds_gids -eq $old_mds_gids ] ; do
864 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
866 echo New GIDs
: $new_mds_gids
869 without_test_dup_command ceph tell mds.a respawn
870 new_mds_gids
=$old_mds_gids
871 while [ $new_mds_gids -eq $old_mds_gids ] ; do
873 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
875 echo New GIDs
: $new_mds_gids
878 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
879 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
882 function test_mon_mds
()
887 ceph osd pool create fs_data
10
888 ceph osd pool create fs_metadata
10
889 ceph fs new
$FS_NAME fs_metadata fs_data
891 ceph fs
set $FS_NAME cluster_down true
892 ceph fs
set $FS_NAME cluster_down false
894 # Legacy commands, act on default fs
895 ceph mds cluster_down
898 ceph mds compat rm_incompat
4
899 ceph mds compat rm_incompat
4
901 # We don't want any MDSs to be up, their activity can interfere with
902 # the "current_epoch + 1" checking below if they're generating updates
903 fail_all_mds
$FS_NAME
906 expect_false ceph mds deactivate
2
910 for mds_gid
in $
(get_mds_gids
$FS_NAME) ; do
911 ceph mds metadata
$mds_id
915 ceph mds count-metadata os
917 # XXX mds fail, but how do you undo it?
918 mdsmapfile
=$TEMP_DIR/mdsmap.$$
919 current_epoch
=$
(ceph mds getmap
-o $mdsmapfile --no-log-to-stderr 2>&1 |
grep epoch |
sed 's/.*epoch //')
923 ceph osd pool create data2
10
924 ceph osd pool create data3
10
925 data2_pool
=$
(ceph osd dump |
grep "pool.*'data2'" |
awk '{print $2;}')
926 data3_pool
=$
(ceph osd dump |
grep "pool.*'data3'" |
awk '{print $2;}')
927 ceph mds add_data_pool
$data2_pool
928 ceph mds add_data_pool
$data3_pool
929 ceph mds add_data_pool
100 >& $TMPFILE || true
930 check_response
"Error ENOENT"
931 ceph mds add_data_pool foobarbaz
>& $TMPFILE || true
932 check_response
"Error ENOENT"
933 ceph mds remove_data_pool
$data2_pool
934 ceph mds remove_data_pool
$data3_pool
935 ceph osd pool delete data2 data2
--yes-i-really-really-mean-it
936 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
937 ceph mds
set allow_multimds false
938 expect_false ceph mds set_max_mds
4
939 ceph mds
set allow_multimds true
940 ceph mds set_max_mds
4
941 ceph mds set_max_mds
3
942 ceph mds set_max_mds
256
943 expect_false ceph mds set_max_mds
257
944 ceph mds
set max_mds
4
945 ceph mds
set max_mds
256
946 expect_false ceph mds
set max_mds
257
947 expect_false ceph mds
set max_mds asdf
948 expect_false ceph mds
set inline_data true
949 ceph mds
set inline_data true
--yes-i-really-mean-it
950 ceph mds
set inline_data
yes --yes-i-really-mean-it
951 ceph mds
set inline_data
1 --yes-i-really-mean-it
952 expect_false ceph mds
set inline_data
--yes-i-really-mean-it
953 ceph mds
set inline_data false
954 ceph mds
set inline_data no
955 ceph mds
set inline_data
0
956 expect_false ceph mds
set inline_data asdf
957 ceph mds
set max_file_size
1048576
958 expect_false ceph mds
set max_file_size
123asdf
960 expect_false ceph mds
set allow_new_snaps
961 expect_false ceph mds
set allow_new_snaps true
962 ceph mds
set allow_new_snaps true
--yes-i-really-mean-it
963 ceph mds
set allow_new_snaps
0
964 ceph mds
set allow_new_snaps false
965 ceph mds
set allow_new_snaps no
966 expect_false ceph mds
set allow_new_snaps taco
968 # we should never be able to add EC pools as data or metadata pools
969 # create an ec-pool...
970 ceph osd pool create mds-ec-pool
10 10 erasure
972 ceph mds add_data_pool mds-ec-pool
2>$TMPFILE
973 check_response
'erasure-code' $?
22
975 ec_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-ec-pool" |
awk '{print $2;}')
976 data_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_data" |
awk '{print $2;}')
977 metadata_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_metadata" |
awk '{print $2;}')
979 fail_all_mds
$FS_NAME
982 # Check that rmfailed requires confirmation
983 expect_false ceph mds rmfailed
0
984 ceph mds rmfailed
0 --yes-i-really-mean-it
987 # Check that `newfs` is no longer permitted
988 expect_false ceph mds newfs
$metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
990 # Check that 'fs reset' runs
991 ceph fs
reset $FS_NAME --yes-i-really-mean-it
993 # Check that creating a second FS fails by default
994 ceph osd pool create fs_metadata2
10
995 ceph osd pool create fs_data2
10
997 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1000 # Check that setting enable_multiple enables creation of second fs
1001 ceph fs flag
set enable_multiple true
--yes-i-really-mean-it
1002 ceph fs new cephfs2 fs_metadata2 fs_data2
1004 # Clean up multi-fs stuff
1005 fail_all_mds cephfs2
1006 ceph fs
rm cephfs2
--yes-i-really-mean-it
1007 ceph osd pool delete fs_metadata2 fs_metadata2
--yes-i-really-really-mean-it
1008 ceph osd pool delete fs_data2 fs_data2
--yes-i-really-really-mean-it
1010 fail_all_mds
$FS_NAME
1012 # Clean up to enable subsequent fs new tests
1013 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1016 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1017 check_response
'erasure-code' $?
22
1018 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1019 check_response
'erasure-code' $?
22
1020 ceph fs new
$FS_NAME mds-ec-pool mds-ec-pool
2>$TMPFILE
1021 check_response
'erasure-code' $?
22
1024 # ... new create a cache tier in front of the EC pool...
1025 ceph osd pool create mds-tier
2
1026 ceph osd tier add mds-ec-pool mds-tier
1027 ceph osd tier set-overlay mds-ec-pool mds-tier
1028 tier_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-tier" |
awk '{print $2;}')
1030 # Use of a readonly tier should be forbidden
1031 ceph osd tier cache-mode mds-tier
readonly --yes-i-really-mean-it
1033 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1034 check_response
'has a write tier (mds-tier) that is configured to forward' $?
22
1037 # Use of a writeback tier should enable FS creation
1038 ceph osd tier cache-mode mds-tier writeback
1039 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force
1041 # While a FS exists using the tiered pools, I should not be allowed
1042 # to remove the tier
1044 ceph osd tier remove-overlay mds-ec-pool
2>$TMPFILE
1045 check_response
'in use by CephFS' $?
16
1046 ceph osd tier remove mds-ec-pool mds-tier
2>$TMPFILE
1047 check_response
'in use by CephFS' $?
16
1050 fail_all_mds
$FS_NAME
1051 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1053 # ... but we should be forbidden from using the cache pool in the FS directly.
1055 ceph fs new
$FS_NAME fs_metadata mds-tier
--force 2>$TMPFILE
1056 check_response
'in use as a cache tier' $?
22
1057 ceph fs new
$FS_NAME mds-tier fs_data
2>$TMPFILE
1058 check_response
'in use as a cache tier' $?
22
1059 ceph fs new
$FS_NAME mds-tier mds-tier
2>$TMPFILE
1060 check_response
'in use as a cache tier' $?
22
1063 # Clean up tier + EC pools
1064 ceph osd tier remove-overlay mds-ec-pool
1065 ceph osd tier remove mds-ec-pool mds-tier
1067 # Create a FS using the 'cache' pool now that it's no longer a tier
1068 ceph fs new
$FS_NAME fs_metadata mds-tier
--force
1070 # We should be forbidden from using this pool as a tier now that
1071 # it's in use for CephFS
1073 ceph osd tier add mds-ec-pool mds-tier
2>$TMPFILE
1074 check_response
'in use by CephFS' $?
16
1077 fail_all_mds
$FS_NAME
1078 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1080 # We should be permitted to use an EC pool with overwrites enabled
1081 # as the data pool...
1082 ceph osd pool
set mds-ec-pool allow_ec_overwrites true
1083 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1084 fail_all_mds
$FS_NAME
1085 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1087 # ...but not as the metadata pool
1089 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1090 check_response
'erasure-code' $?
22
1093 ceph osd pool delete mds-ec-pool mds-ec-pool
--yes-i-really-really-mean-it
1095 # Create a FS and check that we can subsequently add a cache tier to it
1096 ceph fs new
$FS_NAME fs_metadata fs_data
--force
1098 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1099 ceph osd tier add fs_metadata mds-tier
1100 ceph osd tier cache-mode mds-tier writeback
1101 ceph osd tier set-overlay fs_metadata mds-tier
1103 # Removing tier should be permitted because the underlying pool is
1104 # replicated (#11504 case)
1105 ceph osd tier cache-mode mds-tier proxy
1106 ceph osd tier remove-overlay fs_metadata
1107 ceph osd tier remove fs_metadata mds-tier
1108 ceph osd pool delete mds-tier mds-tier
--yes-i-really-really-mean-it
1111 fail_all_mds
$FS_NAME
1112 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1117 # ceph mds tell mds.a getmap
1120 # ceph mds set_state
1123 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
1124 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
1127 function test_mon_mds_metadata
()
1129 local nmons
=$
(ceph tell
'mon.*' version |
grep -c 'version')
1133 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1134 while read gid id rank
; do
1135 ceph mds metadata
${gid} |
grep '"hostname":'
1136 ceph mds metadata
${id} |
grep '"hostname":'
1137 ceph mds metadata
${rank} |
grep '"hostname":'
1139 local n
=$
(ceph tell
'mon.*' mds metadata
${id} |
grep -c '"hostname":')
1140 test "$n" -eq "$nmons"
1143 expect_false ceph mds metadata UNKNOWN
1146 function test_mon_mon
()
1148 # print help message
1152 ceph mon getmap
-o $TEMP_DIR/monmap.$$
1153 [ -s $TEMP_DIR/monmap.$$
]
1159 ceph mon feature
set kraken
--yes-i-really-mean-it
1160 expect_false ceph mon feature
set abcd
1161 expect_false ceph mon feature
set abcd
--yes-i-really-mean-it
1164 function gen_secrets_file
()
1166 # lets assume we can have the following types
1167 # all - generates both cephx and lockbox, with mock dm-crypt key
1168 # cephx - only cephx
1169 # no_cephx - lockbox and dm-crypt, no cephx
1170 # no_lockbox - dm-crypt and cephx, no lockbox
1171 # empty - empty file
1172 # empty_json - correct json, empty map
1173 # bad_json - bad json :)
1176 if [[ -z "$t" ]]; then
1180 fn
=$
(mktemp
$TEMP_DIR/secret.XXXXXX
)
1182 if [[ "$t" == "empty" ]]; then
1187 if [[ "$t" == "bad_json" ]]; then
1188 echo "asd: ; }" >> $fn
1190 elif [[ "$t" == "empty_json" ]]; then
1195 cephx_secret
="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1196 lb_secret
="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1197 dmcrypt_key
="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1199 if [[ "$t" == "all" ]]; then
1200 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1201 elif [[ "$t" == "cephx" ]]; then
1202 echo "$cephx_secret" >> $fn
1203 elif [[ "$t" == "no_cephx" ]]; then
1204 echo "$lb_secret,$dmcrypt_key" >> $fn
1205 elif [[ "$t" == "no_lockbox" ]]; then
1206 echo "$cephx_secret,$dmcrypt_key" >> $fn
1208 echo "unknown gen_secrets_file() type \'$fn\'"
1215 function test_mon_osd_create_destroy
()
1217 ceph osd new
2>&1 |
grep 'EINVAL'
1218 ceph osd new
'' -1 2>&1 |
grep 'EINVAL'
1219 ceph osd new
'' 10 2>&1 |
grep 'EINVAL'
1221 old_maxosd
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1223 old_osds
=$
(ceph osd
ls)
1224 num_osds
=$
(ceph osd
ls |
wc -l)
1227 id
=$
(ceph osd new
$uuid 2>/dev
/null
)
1229 for i
in $old_osds; do
1235 id2
=`ceph osd new $uuid 2>/dev/null`
1239 ceph osd new
$uuid $id
1241 id3
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1242 ceph osd new
$uuid $
((id3
+1)) 2>&1 |
grep EEXIST
1245 id2
=$
(ceph osd new
$uuid2)
1247 [[ "$id2" != "$id" ]]
1249 ceph osd new
$uuid $id2 2>&1 |
grep EEXIST
1250 ceph osd new
$uuid2 $id2
1253 empty_secrets
=$
(gen_secrets_file
"empty")
1254 empty_json
=$
(gen_secrets_file
"empty_json")
1255 all_secrets
=$
(gen_secrets_file
"all")
1256 cephx_only
=$
(gen_secrets_file
"cephx")
1257 no_cephx
=$
(gen_secrets_file
"no_cephx")
1258 no_lockbox
=$
(gen_secrets_file
"no_lockbox")
1259 bad_json
=$
(gen_secrets_file
"bad_json")
1261 # empty secrets should be idempotent
1262 new_id
=$
(ceph osd new
$uuid $id -i $empty_secrets)
1263 [[ "$new_id" == "$id" ]]
1265 # empty json, thus empty secrets
1266 new_id
=$
(ceph osd new
$uuid $id -i $empty_json)
1267 [[ "$new_id" == "$id" ]]
1269 ceph osd new
$uuid $id -i $all_secrets 2>&1 |
grep 'EEXIST'
1273 ceph osd setmaxosd
$old_maxosd
1275 ceph osd new
$uuid -i $bad_json 2>&1 |
grep 'EINVAL'
1276 ceph osd new
$uuid -i $no_cephx 2>&1 |
grep 'EINVAL'
1277 ceph osd new
$uuid -i $no_lockbox 2>&1 |
grep 'EINVAL'
1280 id
=$
(ceph osd new
$uuid -i $all_secrets)
1287 # validate secrets and dm-crypt are set
1288 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1289 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1291 k
=$
(ceph auth get-key client.osd-lockbox.
$uuid --format=json-pretty
2>/dev
/null | \
1293 s
=$
(cat $all_secrets | jq
'.cephx_lockbox_secret')
1295 ceph config-key exists dm-crypt
/osd
/$uuid/luks
1298 id2
=$
(ceph osd new
$uuid2 -i $cephx_only)
1300 [[ "$i" != "$id2" ]]
1304 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1305 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1307 expect_false ceph auth get-key client.osd-lockbox.
$uuid2
1308 expect_false ceph config-key exists dm-crypt
/osd
/$uuid2/luks
1310 ceph osd destroy osd.
$id2 --yes-i-really-mean-it
1311 ceph osd destroy
$id2 --yes-i-really-mean-it
1313 expect_false ceph auth get-key osd.
$id2
1314 ceph osd dump |
grep osd.
$id2 |
grep destroyed
1318 ceph osd new
$uuid3 $id3 -i $all_secrets
1319 ceph osd dump |
grep osd.
$id3 | expect_false
grep destroyed
1320 ceph auth get-key client.osd-lockbox.
$uuid3
1321 ceph auth get-key osd.
$id3
1322 ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1324 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1325 expect_false ceph osd
find $id2
1326 expect_false ceph auth get-key osd.
$id2
1327 expect_false ceph auth get-key client.osd-lockbox.
$uuid3
1328 expect_false ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1329 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1330 ceph osd purge osd.
$id3 --yes-i-really-mean-it # idempotent
1332 ceph osd purge osd.
$id --yes-i-really-mean-it
1333 ceph osd purge
123456 --yes-i-really-mean-it
1334 expect_false ceph osd
find $id
1335 expect_false ceph auth get-key osd.
$id
1336 expect_false ceph auth get-key client.osd-lockbox.
$uuid
1337 expect_false ceph config-key exists dm-crypt
/osd
/$uuid/luks
1339 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1340 $no_cephx $no_lockbox $bad_json
1342 for i
in $
(ceph osd
ls); do
1344 [[ "$i" != "$id2" ]]
1345 [[ "$i" != "$id3" ]]
1348 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1349 ceph osd setmaxosd
$old_maxosd
1353 function test_mon_config_key
()
1355 key
=asdfasdfqwerqwreasdfuniquesa123df
1356 ceph config-key list |
grep -c $key |
grep 0
1357 ceph config-key get
$key |
grep -c bar |
grep 0
1358 ceph config-key
set $key bar
1359 ceph config-key get
$key |
grep bar
1360 ceph config-key list |
grep -c $key |
grep 1
1361 ceph config-key dump |
grep $key |
grep bar
1362 ceph config-key
rm $key
1363 expect_false ceph config-key get
$key
1364 ceph config-key list |
grep -c $key |
grep 0
1365 ceph config-key dump |
grep -c $key |
grep 0
1368 function test_mon_osd
()
1373 bl
=192.168.0.1:0/1000
1374 ceph osd blacklist add
$bl
1375 ceph osd blacklist
ls |
grep $bl
1376 ceph osd blacklist
ls --format=json-pretty |
sed 's/\\\//\//' |
grep $bl
1377 ceph osd dump
--format=json-pretty |
grep $bl
1378 ceph osd dump |
grep "^blacklist $bl"
1379 ceph osd blacklist
rm $bl
1380 ceph osd blacklist
ls | expect_false
grep $bl
1383 # test without nonce, invalid nonce
1384 ceph osd blacklist add
$bl
1385 ceph osd blacklist
ls |
grep $bl
1386 ceph osd blacklist
rm $bl
1387 ceph osd blacklist
ls | expect_false
grep $expect_false bl
1388 expect_false
"ceph osd blacklist $bl/-1"
1389 expect_false
"ceph osd blacklist $bl/foo"
1391 # test with wrong address
1392 expect_false
"ceph osd blacklist 1234.56.78.90/100"
1395 ceph osd blacklist add
$bl
1396 ceph osd blacklist
ls |
grep $bl
1397 ceph osd blacklist
clear
1398 ceph osd blacklist
ls | expect_false
grep $bl
1403 ceph osd crush reweight-all
1404 ceph osd crush tunables legacy
1405 ceph osd crush show-tunables |
grep argonaut
1406 ceph osd crush tunables bobtail
1407 ceph osd crush show-tunables |
grep bobtail
1408 ceph osd crush tunables firefly
1409 ceph osd crush show-tunables |
grep firefly
1411 ceph osd crush set-tunable straw_calc_version
0
1412 ceph osd crush get-tunable straw_calc_version |
grep 0
1413 ceph osd crush set-tunable straw_calc_version
1
1414 ceph osd crush get-tunable straw_calc_version |
grep 1
1417 # require-min-compat-client
1418 expect_false ceph osd set-require-min-compat-client dumpling
# firefly tunables
1419 ceph osd set-require-min-compat-client luminous
1420 ceph osd dump |
grep 'require_min_compat_client luminous'
1425 # how do I tell when these are done?
1427 ceph osd deep-scrub
0
1430 for f
in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1435 expect_false ceph osd
unset sortbitwise
# cannot be unset
1436 expect_false ceph osd
set bogus
1437 expect_false ceph osd
unset bogus
1438 ceph osd require-osd-release luminous
1439 # can't lower (or use new command for anything but jewel)
1440 expect_false ceph osd require-osd-release jewel
1441 # these are no-ops but should succeed.
1442 ceph osd
set require_jewel_osds
1443 ceph osd
set require_kraken_osds
1444 expect_false ceph osd
unset require_jewel_osds
1448 ceph osd dump |
grep 'osd.0 down'
1451 for ((i
=0; i
< $max_run; i
++)); do
1452 if ! ceph osd dump |
grep 'osd.0 up'; then
1453 echo "waiting for osd.0 to come back up ($i/$max_run)"
1459 ceph osd dump |
grep 'osd.0 up'
1461 ceph osd dump |
grep 'osd.0 up'
1462 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1465 expect_false ceph osd
find osd.xyz
1466 expect_false ceph osd
find xyz
1467 expect_false ceph osd
find 0.1
1468 ceph
--format plain osd
find 1 # falls back to json-pretty
1469 if [ `uname` == Linux
]; then
1470 ceph osd metadata
1 |
grep 'distro'
1471 ceph
--format plain osd metadata
1 |
grep 'distro' # falls back to json-pretty
1474 ceph osd dump |
grep 'osd.0.*out'
1476 ceph osd dump |
grep 'osd.0.*in'
1479 ceph osd add-nodown
0 1
1480 ceph health detail |
grep 'NODOWN'
1481 ceph osd rm-nodown
0 1
1482 ! ceph health detail |
grep 'NODOWN'
1484 ceph osd out
0 # so we can mark it as noin later
1486 ceph health detail |
grep 'NOIN'
1488 ! ceph health detail |
grep 'NOIN'
1491 ceph osd add-noout
0
1492 ceph health detail |
grep 'NOOUT'
1494 ! ceph health detail |
grep 'NOOUT'
1497 expect_false ceph osd add-noup
797er
1498 expect_false ceph osd add-nodown u9uwer
1499 expect_false ceph osd add-noin
78~
15
1500 expect_false ceph osd add-noout
0 all
1
1502 expect_false ceph osd rm-noup
1234567
1503 expect_false ceph osd rm-nodown fsadf7
1504 expect_false ceph osd rm-noin
0 1 any
1505 expect_false ceph osd rm-noout
790-fd
1507 ids
=`ceph osd ls-tree default`
1510 ceph osd add-nodown
$osd
1511 ceph osd add-noout
$osd
1513 ceph
-s |
grep 'NODOWN'
1514 ceph
-s |
grep 'NOOUT'
1515 ceph osd rm-nodown any
1516 ceph osd rm-noout all
1517 ! ceph
-s |
grep 'NODOWN'
1518 ! ceph
-s |
grep 'NOOUT'
1520 # make sure mark out preserves weight
1521 ceph osd reweight osd
.0 .5
1522 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1525 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1527 ceph osd getmap
-o $f
1530 save
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1532 ceph osd setmaxosd $
((save
- 1)) 2>&1 |
grep 'EBUSY'
1533 ceph osd setmaxosd
10
1534 ceph osd getmaxosd |
grep 'max_osd = 10'
1535 ceph osd setmaxosd
$save
1536 ceph osd getmaxosd |
grep "max_osd = $save"
1538 for id
in `ceph osd ls` ; do
1539 retry_eagain
5 map_enxio_to_eagain ceph tell osd.
$id version
1542 ceph osd
rm 0 2>&1 |
grep 'EBUSY'
1544 local old_osds
=$
(echo $
(ceph osd
ls))
1545 id
=`ceph osd create`
1547 ceph osd lost
$id --yes-i-really-mean-it
1548 expect_false ceph osd setmaxosd
$id
1549 local new_osds
=$
(echo $
(ceph osd
ls))
1550 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1555 id
=`ceph osd create $uuid`
1556 id2
=`ceph osd create $uuid`
1563 ceph osd setmaxosd
$id
1564 ceph osd getmaxosd |
grep "max_osd = $save"
1567 ceph osd create
$uuid 0 2>&1 |
grep 'EINVAL'
1568 ceph osd create
$uuid $
((max_osd
- 1)) 2>&1 |
grep 'EINVAL'
1570 id
=`ceph osd create $uuid $max_osd`
1571 [ "$id" = "$max_osd" ]
1573 max_osd
=$
((max_osd
+ 1))
1574 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1576 ceph osd create
$uuid $
((id
- 1)) 2>&1 |
grep 'EEXIST'
1577 ceph osd create
$uuid $
((id
+ 1)) 2>&1 |
grep 'EEXIST'
1578 id2
=`ceph osd create $uuid`
1580 id2
=`ceph osd create $uuid $id`
1584 local gap_start
=$max_osd
1585 id
=`ceph osd create $uuid $((gap_start + 100))`
1586 [ "$id" = "$((gap_start + 100))" ]
1588 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1590 ceph osd create
$uuid $gap_start 2>&1 |
grep 'EEXIST'
1593 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1594 # is repeated and consumes two osd id, not just one.
1597 if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
1598 next_osd
=$
((gap_start
+ 1))
1602 id
=`ceph osd create`
1603 [ "$id" = "$next_osd" ]
1605 next_osd
=$
((id
+ 1))
1606 id
=`ceph osd create $(uuidgen)`
1607 [ "$id" = "$next_osd" ]
1609 next_osd
=$
((id
+ 1))
1610 id
=`ceph osd create $(uuidgen) $next_osd`
1611 [ "$id" = "$next_osd" ]
1613 local new_osds
=$
(echo $
(ceph osd
ls))
1614 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1618 ceph osd setmaxosd
$save
1621 ceph osd pool create data
10
1622 ceph osd pool application
enable data rados
1623 ceph osd lspools |
grep data
1624 ceph osd map data foo |
grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1625 ceph osd map data foo namespace|
grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1626 ceph osd pool delete data data
--yes-i-really-really-mean-it
1629 ceph osd dump |
grep 'flags.*pauserd,pausewr'
1637 ceph osd tree destroyed
1639 ceph osd tree up out
1640 ceph osd tree down
in
1641 ceph osd tree down out
1642 ceph osd tree out down
1643 expect_false ceph osd tree up down
1644 expect_false ceph osd tree up destroyed
1645 expect_false ceph osd tree down destroyed
1646 expect_false ceph osd tree up down destroyed
1647 expect_false ceph osd tree
in out
1648 expect_false ceph osd tree up foo
1651 ceph osd count-metadata os
1657 ceph osd stat |
grep up
,
1660 function test_mon_crush
()
1663 epoch
=$
(ceph osd getcrushmap
-o $f 2>&1 |
tail -n1)
1666 nextepoch
=$
(( $epoch + 1 ))
1667 echo epoch
$epoch nextepoch
$nextepoch
1669 expect_false ceph osd setcrushmap
$nextepoch -i $f
1670 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1671 echo gotepoch
$gotepoch
1672 [ "$gotepoch" -eq "$nextepoch" ]
1673 # should be idempotent
1674 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1675 echo epoch
$gotepoch
1676 [ "$gotepoch" -eq "$nextepoch" ]
1680 function test_mon_osd_pool
()
1685 ceph osd pool create data
10
1686 ceph osd pool application
enable data rados
1687 ceph osd pool mksnap data datasnap
1688 rados
-p data lssnap |
grep datasnap
1689 ceph osd pool rmsnap data datasnap
1690 expect_false ceph osd pool rmsnap pool_fake snapshot
1691 ceph osd pool delete data data
--yes-i-really-really-mean-it
1693 ceph osd pool create data2
10
1694 ceph osd pool application
enable data2 rados
1695 ceph osd pool rename data2 data3
1696 ceph osd lspools |
grep data3
1697 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
1699 ceph osd pool create replicated
12 12 replicated
1700 ceph osd pool create replicated
12 12 replicated
1701 ceph osd pool create replicated
12 12 # default is replicated
1702 ceph osd pool create replicated
12 # default is replicated, pgp_num = pg_num
1703 ceph osd pool application
enable replicated rados
1704 # should fail because the type is not the same
1705 expect_false ceph osd pool create replicated
12 12 erasure
1706 ceph osd lspools |
grep replicated
1707 ceph osd pool create ec_test
1 1 erasure
1708 ceph osd pool application
enable ec_test rados
1710 ceph osd count-metadata osd_objectstore |
grep 'bluestore'
1711 if [ $?
-eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1712 ceph osd pool
set ec_test allow_ec_overwrites true
>& $TMPFILE
1713 check_response
"pool must only be stored on bluestore for scrubbing to work" $?
22
1715 ceph osd pool
set ec_test allow_ec_overwrites true ||
return 1
1716 expect_false ceph osd pool
set ec_test allow_ec_overwrites false
1719 ceph osd pool delete replicated replicated
--yes-i-really-really-mean-it
1720 ceph osd pool delete ec_test ec_test
--yes-i-really-really-mean-it
1723 function test_mon_osd_pool_quota
()
1726 # test osd pool set/get quota
1730 ceph osd pool create tmp-quota-pool
36
1731 ceph osd pool application
enable tmp-quota-pool rados
1733 # set erroneous quotas
1735 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness
10
1736 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes
-1
1737 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1741 ceph osd pool set-quota tmp-quota-pool max_bytes
10
1742 ceph osd pool set-quota tmp-quota-pool max_objects
10M
1746 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10B'
1747 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*10240k objects'
1749 # get quotas in json-pretty format
1751 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1752 grep '"quota_max_objects":.*10485760'
1753 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1754 grep '"quota_max_bytes":.*10'
1758 ceph osd pool set-quota tmp-quota-pool max_bytes
0
1759 ceph osd pool set-quota tmp-quota-pool max_objects
0
1763 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*N/A'
1764 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*N/A'
1767 ceph osd pool delete tmp-quota-pool tmp-quota-pool
--yes-i-really-really-mean-it
1770 function test_mon_pg
()
1772 # Make sure we start healthy.
1775 ceph pg debug unfound_objects_exist
1776 ceph pg debug degraded_pgs_exist
1777 ceph pg deep-scrub
1.0
1779 ceph pg dump pgs_brief
--format=json
1780 ceph pg dump pgs
--format=json
1781 ceph pg dump pools
--format=json
1782 ceph pg dump osds
--format=json
1783 ceph pg dump
sum --format=json
1784 ceph pg dump all
--format=json
1785 ceph pg dump pgs_brief osds
--format=json
1786 ceph pg dump pools osds pgs_brief
--format=json
1788 ceph pg dump_pools_json
1789 ceph pg dump_stuck inactive
1790 ceph pg dump_stuck unclean
1791 ceph pg dump_stuck stale
1792 ceph pg dump_stuck undersized
1793 ceph pg dump_stuck degraded
1797 expect_false ceph pg
ls scrubq
1798 ceph pg
ls active stale repair recovering
1800 ceph pg
ls 1 active stale
1801 ceph pg ls-by-primary osd
.0
1802 ceph pg ls-by-primary osd
.0 1
1803 ceph pg ls-by-primary osd
.0 active
1804 ceph pg ls-by-primary osd
.0 active stale
1805 ceph pg ls-by-primary osd
.0 1 active stale
1806 ceph pg ls-by-osd osd
.0
1807 ceph pg ls-by-osd osd
.0 1
1808 ceph pg ls-by-osd osd
.0 active
1809 ceph pg ls-by-osd osd
.0 active stale
1810 ceph pg ls-by-osd osd
.0 1 active stale
1811 ceph pg ls-by-pool rbd
1812 ceph pg ls-by-pool rbd active stale
1813 # can't test this...
1814 # ceph pg force_create_pg
1815 ceph pg getmap
-o $TEMP_DIR/map.$$
1816 [ -s $TEMP_DIR/map.$$
]
1817 ceph pg map
1.0 |
grep acting
1821 ceph osd set-full-ratio
.962
1822 ceph osd dump |
grep '^full_ratio 0.962'
1823 ceph osd set-backfillfull-ratio
.912
1824 ceph osd dump |
grep '^backfillfull_ratio 0.912'
1825 ceph osd set-nearfull-ratio
.892
1826 ceph osd dump |
grep '^nearfull_ratio 0.892'
1828 # Check health status
1829 ceph osd set-nearfull-ratio
.913
1830 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
1831 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
1832 ceph osd set-nearfull-ratio
.892
1833 ceph osd set-backfillfull-ratio
.963
1834 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
1835 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
1836 ceph osd set-backfillfull-ratio
.912
1838 # Check injected full results
1839 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull nearfull
1840 wait_for_health
"OSD_NEARFULL"
1841 ceph health detail |
grep "osd.0 is near full"
1842 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1845 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull backfillfull
1846 wait_for_health
"OSD_BACKFILLFULL"
1847 ceph health detail |
grep "osd.1 is backfill full"
1848 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull none
1851 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull failsafe
1852 # failsafe and full are the same as far as the monitor is concerned
1853 wait_for_health
"OSD_FULL"
1854 ceph health detail |
grep "osd.2 is full"
1855 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull none
1858 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull full
1859 wait_for_health
"OSD_FULL"
1860 ceph health detail |
grep "osd.0 is full"
1861 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1864 ceph pg stat |
grep 'pgs:'
1869 ceph report |
grep osd_stats
1876 ceph tell osd
.0 version
1877 expect_false ceph tell osd
.9999 version
1878 expect_false ceph tell osd.foo version
1882 ceph tell osd
.0 dump_pg_recovery_stats |
grep Started
1884 ceph osd reweight
0 0.9
1885 expect_false ceph osd reweight
0 -1
1886 ceph osd reweight osd
.0 1
1888 ceph osd primary-affinity osd
.0 .9
1889 expect_false ceph osd primary-affinity osd
.0 -2
1890 expect_false ceph osd primary-affinity osd
.9999 .5
1891 ceph osd primary-affinity osd
.0 1
1893 ceph osd pool
set rbd size
2
1894 ceph osd pg-temp
1.0 0 1
1895 ceph osd pg-temp
1.0 osd
.1 osd
.0
1896 expect_false ceph osd pg-temp
1.0 0 1 2
1897 expect_false ceph osd pg-temp asdf qwer
1898 expect_false ceph osd pg-temp
1.0 asdf
1899 expect_false ceph osd pg-temp
1.0
1901 # don't test ceph osd primary-temp for now
1904 function test_mon_osd_pool_set
()
1906 TEST_POOL_GETSET
=pool_getset
1907 ceph osd pool create
$TEST_POOL_GETSET 1
1908 ceph osd pool application
enable $TEST_POOL_GETSET rados
1910 ceph osd pool get
$TEST_POOL_GETSET all
1912 for s
in pg_num pgp_num size min_size crush_rule
; do
1913 ceph osd pool get
$TEST_POOL_GETSET $s
1916 old_size
=$
(ceph osd pool get
$TEST_POOL_GETSET size |
sed -e 's/size: //')
1917 (( new_size
= old_size
+ 1 ))
1918 ceph osd pool
set $TEST_POOL_GETSET size
$new_size
1919 ceph osd pool get
$TEST_POOL_GETSET size |
grep "size: $new_size"
1920 ceph osd pool
set $TEST_POOL_GETSET size
$old_size
1922 ceph osd pool create pool_erasure
1 1 erasure
1923 ceph osd pool application
enable pool_erasure rados
1926 ceph osd pool
set pool_erasure size
4444 2>$TMPFILE
1927 check_response
'not change the size'
1929 ceph osd pool get pool_erasure erasure_code_profile
1932 ceph osd pool
set $TEST_POOL_GETSET auid
$auid
1933 ceph osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1934 ceph
--format=xml osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1935 ceph osd pool
set $TEST_POOL_GETSET auid
0
1937 for flag
in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub
; do
1938 ceph osd pool
set $TEST_POOL_GETSET $flag false
1939 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1940 ceph osd pool
set $TEST_POOL_GETSET $flag true
1941 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1942 ceph osd pool
set $TEST_POOL_GETSET $flag 1
1943 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1944 ceph osd pool
set $TEST_POOL_GETSET $flag 0
1945 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1946 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag asdf
1947 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag 2
1950 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1951 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
123456
1952 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval |
grep 'scrub_min_interval: 123456'
1953 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
0
1954 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1956 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
1957 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
123456
1958 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval |
grep 'scrub_max_interval: 123456'
1959 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
0
1960 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
1962 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
1963 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
123456
1964 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval |
grep 'deep_scrub_interval: 123456'
1965 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
0
1966 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
1968 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
1969 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
5
1970 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: 5'
1971 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
0
1972 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
1974 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
1975 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
5
1976 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority |
grep 'recovery_op_priority: 5'
1977 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
0
1978 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
1980 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
1981 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
5
1982 ceph osd pool get
$TEST_POOL_GETSET scrub_priority |
grep 'scrub_priority: 5'
1983 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
0
1984 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
1986 ceph osd pool
set $TEST_POOL_GETSET nopgchange
1
1987 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
10
1988 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
1989 ceph osd pool
set $TEST_POOL_GETSET nopgchange
0
1990 ceph osd pool
set $TEST_POOL_GETSET pg_num
10
1992 ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
1994 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
1995 new_pgs
=$
(($old_pgs + $
(ceph osd stat
--format json | jq
'.num_osds') * 32))
1996 ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
1997 ceph osd pool
set $TEST_POOL_GETSET pgp_num
$new_pgs
1999 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
2000 new_pgs
=$
(($old_pgs + $
(ceph osd stat
--format json | jq
'.num_osds') * 32 + 1))
2001 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
2003 ceph osd pool
set $TEST_POOL_GETSET nosizechange
1
2004 expect_false ceph osd pool
set $TEST_POOL_GETSET size
2
2005 expect_false ceph osd pool
set $TEST_POOL_GETSET min_size
2
2006 ceph osd pool
set $TEST_POOL_GETSET nosizechange
0
2007 ceph osd pool
set $TEST_POOL_GETSET size
2
2009 ceph osd pool
set $TEST_POOL_GETSET min_size
2
2011 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
0
2012 ceph osd pool
set $TEST_POOL_GETSET hashpspool
0 --yes-i-really-mean-it
2014 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
1
2015 ceph osd pool
set $TEST_POOL_GETSET hashpspool
1 --yes-i-really-mean-it
2017 ceph osd pool get rbd crush_rule |
grep 'crush_rule: '
2019 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2020 ceph osd pool
set $TEST_POOL_GETSET compression_mode aggressive
2021 ceph osd pool get
$TEST_POOL_GETSET compression_mode |
grep 'aggressive'
2022 ceph osd pool
set $TEST_POOL_GETSET compression_mode
unset
2023 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2025 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2026 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm zlib
2027 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm |
grep 'zlib'
2028 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm
unset
2029 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2031 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2032 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
1.1
2033 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
-.2
2034 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
.2
2035 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio |
grep '.2'
2036 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
0
2037 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2039 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2040 ceph osd pool
set $TEST_POOL_GETSET csum_type crc32c
2041 ceph osd pool get
$TEST_POOL_GETSET csum_type |
grep 'crc32c'
2042 ceph osd pool
set $TEST_POOL_GETSET csum_type
unset
2043 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2045 for size
in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block
; do
2046 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2047 ceph osd pool
set $TEST_POOL_GETSET $size 100
2048 ceph osd pool get
$TEST_POOL_GETSET $size |
grep '100'
2049 ceph osd pool
set $TEST_POOL_GETSET $size 0
2050 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2053 ceph osd pool
set $TEST_POOL_GETSET nodelete
1
2054 expect_false ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2055 ceph osd pool
set $TEST_POOL_GETSET nodelete
0
2056 ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2060 function test_mon_osd_tiered_pool_set
()
2062 # this is really a tier pool
2063 ceph osd pool create real-tier
2
2064 ceph osd tier add rbd real-tier
2066 ceph osd pool
set real-tier hit_set_type explicit_hash
2067 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_hash"
2068 ceph osd pool
set real-tier hit_set_type explicit_object
2069 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_object"
2070 ceph osd pool
set real-tier hit_set_type bloom
2071 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: bloom"
2072 expect_false ceph osd pool
set real-tier hit_set_type i_dont_exist
2073 ceph osd pool
set real-tier hit_set_period
123
2074 ceph osd pool get real-tier hit_set_period |
grep "hit_set_period: 123"
2075 ceph osd pool
set real-tier hit_set_count
12
2076 ceph osd pool get real-tier hit_set_count |
grep "hit_set_count: 12"
2077 ceph osd pool
set real-tier hit_set_fpp
.01
2078 ceph osd pool get real-tier hit_set_fpp |
grep "hit_set_fpp: 0.01"
2080 ceph osd pool
set real-tier target_max_objects
123
2081 ceph osd pool get real-tier target_max_objects | \
2082 grep 'target_max_objects:[ \t]\+123'
2083 ceph osd pool
set real-tier target_max_bytes
123456
2084 ceph osd pool get real-tier target_max_bytes | \
2085 grep 'target_max_bytes:[ \t]\+123456'
2086 ceph osd pool
set real-tier cache_target_dirty_ratio
.123
2087 ceph osd pool get real-tier cache_target_dirty_ratio | \
2088 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2089 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
-.2
2090 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
1.1
2091 ceph osd pool
set real-tier cache_target_dirty_high_ratio
.123
2092 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2093 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2094 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
-.2
2095 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
1.1
2096 ceph osd pool
set real-tier cache_target_full_ratio
.123
2097 ceph osd pool get real-tier cache_target_full_ratio | \
2098 grep 'cache_target_full_ratio:[ \t]\+0.123'
2099 ceph osd dump
-f json-pretty |
grep '"cache_target_full_ratio_micro": 123000'
2100 ceph osd pool
set real-tier cache_target_full_ratio
1.0
2101 ceph osd pool
set real-tier cache_target_full_ratio
0
2102 expect_false ceph osd pool
set real-tier cache_target_full_ratio
1.1
2103 ceph osd pool
set real-tier cache_min_flush_age
123
2104 ceph osd pool get real-tier cache_min_flush_age | \
2105 grep 'cache_min_flush_age:[ \t]\+123'
2106 ceph osd pool
set real-tier cache_min_evict_age
234
2107 ceph osd pool get real-tier cache_min_evict_age | \
2108 grep 'cache_min_evict_age:[ \t]\+234'
2110 # this is not a tier pool
2111 ceph osd pool create fake-tier
2
2112 ceph osd pool application
enable fake-tier rados
2115 expect_false ceph osd pool
set fake-tier hit_set_type explicit_hash
2116 expect_false ceph osd pool get fake-tier hit_set_type
2117 expect_false ceph osd pool
set fake-tier hit_set_type explicit_object
2118 expect_false ceph osd pool get fake-tier hit_set_type
2119 expect_false ceph osd pool
set fake-tier hit_set_type bloom
2120 expect_false ceph osd pool get fake-tier hit_set_type
2121 expect_false ceph osd pool
set fake-tier hit_set_type i_dont_exist
2122 expect_false ceph osd pool
set fake-tier hit_set_period
123
2123 expect_false ceph osd pool get fake-tier hit_set_period
2124 expect_false ceph osd pool
set fake-tier hit_set_count
12
2125 expect_false ceph osd pool get fake-tier hit_set_count
2126 expect_false ceph osd pool
set fake-tier hit_set_fpp
.01
2127 expect_false ceph osd pool get fake-tier hit_set_fpp
2129 expect_false ceph osd pool
set fake-tier target_max_objects
123
2130 expect_false ceph osd pool get fake-tier target_max_objects
2131 expect_false ceph osd pool
set fake-tier target_max_bytes
123456
2132 expect_false ceph osd pool get fake-tier target_max_bytes
2133 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
.123
2134 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2135 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
-.2
2136 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
1.1
2137 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
.123
2138 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2139 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
-.2
2140 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
1.1
2141 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
.123
2142 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2143 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.0
2144 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
0
2145 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.1
2146 expect_false ceph osd pool
set fake-tier cache_min_flush_age
123
2147 expect_false ceph osd pool get fake-tier cache_min_flush_age
2148 expect_false ceph osd pool
set fake-tier cache_min_evict_age
234
2149 expect_false ceph osd pool get fake-tier cache_min_evict_age
2151 ceph osd tier remove rbd real-tier
2152 ceph osd pool delete real-tier real-tier
--yes-i-really-really-mean-it
2153 ceph osd pool delete fake-tier fake-tier
--yes-i-really-really-mean-it
2156 function test_mon_osd_erasure_code
()
2159 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2160 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2161 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2162 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
--force
2163 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2164 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f g
=h
2166 # cleanup by removing profile 'fooprofile'
2167 ceph osd erasure-code-profile
rm fooprofile
2170 function test_mon_osd_misc
()
2174 # expect error about missing 'pool' argument
2175 ceph osd map
2>$TMPFILE; check_response
'pool' $?
22
2177 # expect error about unused argument foo
2178 ceph osd
ls foo
2>$TMPFILE; check_response
'unused' $?
22
2180 # expect "not in range" for invalid full ratio
2181 ceph pg set_full_ratio
95 2>$TMPFILE; check_response
'not in range' $?
22
2183 # expect "not in range" for invalid overload percentage
2184 ceph osd reweight-by-utilization
80 2>$TMPFILE; check_response
'higher than 100' $?
22
2188 ceph osd reweight-by-utilization
110
2189 ceph osd reweight-by-utilization
110 .5
2190 expect_false ceph osd reweight-by-utilization
110 0
2191 expect_false ceph osd reweight-by-utilization
110 -0.1
2192 ceph osd test-reweight-by-utilization
110 .5 --no-increasing
2193 ceph osd test-reweight-by-utilization
110 .5 4 --no-increasing
2194 expect_false ceph osd test-reweight-by-utilization
110 .5 0 --no-increasing
2195 expect_false ceph osd test-reweight-by-utilization
110 .5 -10 --no-increasing
2196 ceph osd reweight-by-pg
110
2197 ceph osd test-reweight-by-pg
110 .5
2198 ceph osd reweight-by-pg
110 rbd
2199 ceph osd reweight-by-pg
110 .5 rbd
2200 expect_false ceph osd reweight-by-pg
110 boguspoolasdfasdfasdf
2203 function test_mon_heap_profiler
()
2207 # expect 'heap' commands to be correctly parsed
2208 ceph heap stats
2>$TMPFILE
2209 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2210 echo "tcmalloc not enabled; skip heap profiler test"
2215 [[ $do_test -eq 0 ]] && return 0
2217 ceph heap start_profiler
2219 ceph heap stop_profiler
2223 function test_admin_heap_profiler
()
2227 # expect 'heap' commands to be correctly parsed
2228 ceph heap stats
2>$TMPFILE
2229 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2230 echo "tcmalloc not enabled; skip heap profiler test"
2235 [[ $do_test -eq 0 ]] && return 0
2237 local admin_socket
=$
(get_admin_socket osd
.0)
2239 $SUDO ceph
--admin-daemon $admin_socket heap start_profiler
2240 $SUDO ceph
--admin-daemon $admin_socket heap dump
2241 $SUDO ceph
--admin-daemon $admin_socket heap stop_profiler
2242 $SUDO ceph
--admin-daemon $admin_socket heap release
2245 function test_osd_bench
()
2247 # test osd bench limits
2248 # As we should not rely on defaults (as they may change over time),
2249 # lets inject some values and perform some simple tests
2250 # max iops: 10 # 100 IOPS
2251 # max throughput: 10485760 # 10MB/s
2252 # max block size: 2097152 # 2MB
2253 # duration: 10 # 10 seconds
2256 --osd-bench-duration 10 \
2257 --osd-bench-max-block-size 2097152 \
2258 --osd-bench-large-size-max-throughput 10485760 \
2259 --osd-bench-small-size-max-iops 10"
2260 ceph tell osd
.0 injectargs
${args## }
2262 # anything with a bs larger than 2097152 must fail
2263 expect_false ceph tell osd
.0 bench
1 2097153
2264 # but using 'osd_bench_max_bs' must succeed
2265 ceph tell osd
.0 bench
1 2097152
2267 # we assume 1MB as a large bs; anything lower is a small bs
2268 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2269 # max count: 409600 (bytes)
2271 # more than max count must not be allowed
2272 expect_false ceph tell osd
.0 bench
409601 4096
2273 # but 409600 must be succeed
2274 ceph tell osd
.0 bench
409600 4096
2276 # for a large bs, we are limited by throughput.
2277 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2278 # the max count will be (10MB * 10s) = 100MB
2279 # max count: 104857600 (bytes)
2281 # more than max count must not be allowed
2282 expect_false ceph tell osd
.0 bench
104857601 2097152
2283 # up to max count must be allowed
2284 ceph tell osd
.0 bench
104857600 2097152
2287 function test_osd_negative_filestore_merge_threshold
()
2289 $SUDO ceph daemon osd
.0 config
set filestore_merge_threshold
-1
2290 expect_config_value
"osd.0" "filestore_merge_threshold" -1
2293 function test_mon_tell
()
2295 ceph tell mon.a version
2296 ceph tell mon.b version
2297 expect_false ceph tell mon.foo version
2301 ceph_watch_start debug audit
2302 ceph tell mon.a version
2303 ceph_watch_wait
'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2305 ceph_watch_start debug audit
2306 ceph tell mon.b version
2307 ceph_watch_wait
'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2310 function test_mon_ping
()
2314 expect_false ceph
ping mon.foo
2319 function test_mon_deprecated_commands
()
2321 # current DEPRECATED commands are:
2326 # Testing should be accomplished by setting
2327 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2328 # each one of these commands.
2330 ceph tell mon.a injectargs
'--mon-debug-deprecated-as-obsolete'
2331 expect_false ceph tell mon.a compact
2> $TMPFILE
2332 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2334 expect_false ceph tell mon.a scrub
2> $TMPFILE
2335 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2337 expect_false ceph tell mon.a sync force
2> $TMPFILE
2338 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2340 ceph tell mon.a injectargs
'--no-mon-debug-deprecated-as-obsolete'
2343 function test_mon_cephdf_commands
()
2347 # RAW USED The near raw used per pool in raw total
2349 ceph osd pool create cephdf_for_test
32 32 replicated
2350 ceph osd pool application
enable cephdf_for_test rados
2351 ceph osd pool
set cephdf_for_test size
2
2353 dd if=/dev
/zero of
=.
/cephdf_for_test bs
=4k count
=1
2354 rados put cephdf_for_test cephdf_for_test
-p cephdf_for_test
2357 for i
in `seq 1 10`; do
2358 rados
-p cephdf_for_test
ls - |
grep -q cephdf_for_test
&& break
2361 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2362 # to sync mon with osd
2364 local jq_filter
='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2365 cal_raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2366 raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
2368 ceph osd pool delete cephdf_for_test cephdf_for_test
--yes-i-really-really-mean-it
2369 rm .
/cephdf_for_test
2371 expect_false
test $cal_raw_used_size != $raw_used_size
2374 function test_mon_pool_application
()
2376 ceph osd pool create app_for_test
10
2378 ceph osd pool application
enable app_for_test rbd
2379 expect_false ceph osd pool application
enable app_for_test rgw
2380 ceph osd pool application
enable app_for_test rgw
--yes-i-really-mean-it
2381 ceph osd pool
ls detail |
grep "application rbd,rgw"
2382 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2384 expect_false ceph osd pool application
set app_for_test cephfs key value
2385 ceph osd pool application
set app_for_test rbd key1 value1
2386 ceph osd pool application
set app_for_test rbd key2 value2
2387 ceph osd pool application
set app_for_test rgw key1 value1
2389 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2391 ceph osd pool application
rm app_for_test rgw key1
2392 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2393 ceph osd pool application
rm app_for_test rbd key2
2394 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2395 ceph osd pool application
rm app_for_test rbd key1
2396 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2397 ceph osd pool application
rm app_for_test rbd key1
# should be idempotent
2399 expect_false ceph osd pool application disable app_for_test rgw
2400 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2401 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it # should be idempotent
2402 ceph osd pool
ls detail |
grep "application rbd"
2403 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{}}'
2405 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2406 ceph osd pool
ls detail |
grep -v "application "
2407 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{}'
2409 ceph osd pool
rm app_for_test app_for_test
--yes-i-really-really-mean-it
2412 function test_mon_tell_help_command
()
2414 ceph tell mon.a
help
2417 expect_false ceph tell mon.zzz
help
2420 function test_mon_stdin_stdout
()
2422 echo foo | ceph config-key
set test_key
-i -
2423 ceph config-key get test_key
-o - |
grep -c foo |
grep -q 1
2426 function test_osd_tell_help_command
()
2428 ceph tell osd
.1 help
2429 expect_false ceph tell osd
.100 help
2432 function test_osd_compact
()
2434 ceph tell osd
.1 compact
2435 $SUDO ceph daemon osd
.1 compact
2438 function test_mds_tell_help_command
()
2440 local FS_NAME
=cephfs
2441 if ! mds_exists
; then
2442 echo "Skipping test, no MDS found"
2447 ceph osd pool create fs_data
10
2448 ceph osd pool create fs_metadata
10
2449 ceph fs new
$FS_NAME fs_metadata fs_data
2450 wait_mds_active
$FS_NAME
2453 ceph tell mds.a
help
2454 expect_false ceph tell mds.z
help
2457 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
2458 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
2461 function test_mgr_tell
()
2464 #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
2465 ceph tell mgr osd status
2469 # New tests should be added to the TESTS array below
2471 # Individual tests may be run using the '-t <testname>' argument
2472 # The user can specify '-t <testname>' as many times as she wants
2474 # Tests will be run in order presented in the TESTS array, or in
2475 # the order specified by the '-t <testname>' options.
2477 # '-l' will list all the available test names
2478 # '-h' will show usage
2480 # The test maintains backward compatibility: not specifying arguments
2481 # will run all tests following the order they appear in the TESTS array.
2485 MON_TESTS
+=" mon_injectargs"
2486 MON_TESTS
+=" mon_injectargs_SI"
2487 for i
in `seq 9`; do
2488 MON_TESTS
+=" tiering_$i";
2491 MON_TESTS
+=" auth_profiles"
2492 MON_TESTS
+=" mon_misc"
2493 MON_TESTS
+=" mon_mon"
2494 MON_TESTS
+=" mon_osd"
2495 MON_TESTS
+=" mon_config_key"
2496 MON_TESTS
+=" mon_crush"
2497 MON_TESTS
+=" mon_osd_create_destroy"
2498 MON_TESTS
+=" mon_osd_pool"
2499 MON_TESTS
+=" mon_osd_pool_quota"
2500 MON_TESTS
+=" mon_pg"
2501 MON_TESTS
+=" mon_osd_pool_set"
2502 MON_TESTS
+=" mon_osd_tiered_pool_set"
2503 MON_TESTS
+=" mon_osd_erasure_code"
2504 MON_TESTS
+=" mon_osd_misc"
2505 MON_TESTS
+=" mon_heap_profiler"
2506 MON_TESTS
+=" mon_tell"
2507 MON_TESTS
+=" mon_ping"
2508 MON_TESTS
+=" mon_deprecated_commands"
2509 MON_TESTS
+=" mon_caps"
2510 MON_TESTS
+=" mon_cephdf_commands"
2511 MON_TESTS
+=" mon_tell_help_command"
2512 MON_TESTS
+=" mon_stdin_stdout"
2514 OSD_TESTS
+=" osd_bench"
2515 OSD_TESTS
+=" osd_negative_filestore_merge_threshold"
2516 OSD_TESTS
+=" tiering_agent"
2517 OSD_TESTS
+=" admin_heap_profiler"
2518 OSD_TESTS
+=" osd_tell_help_command"
2519 OSD_TESTS
+=" osd_compact"
2521 MDS_TESTS
+=" mds_tell"
2522 MDS_TESTS
+=" mon_mds"
2523 MDS_TESTS
+=" mon_mds_metadata"
2524 MDS_TESTS
+=" mds_tell_help_command"
2526 MGR_TESTS
+=" mgr_tell"
2537 function list_tests
()
2539 echo "AVAILABLE TESTS"
2547 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2554 while [[ $# -gt 0 ]]; do
2561 "--asok-does-not-need-root" )
2564 "--no-sanity-check" )
2568 tests_to_run
+="$MON_TESTS"
2571 tests_to_run
+="$OSD_TESTS"
2574 tests_to_run
+="$MDS_TESTS"
2577 tests_to_run
+="$MGR_TESTS"
2581 if [[ -z "$1" ]]; then
2582 echo "missing argument to '-t'"
2596 if [[ $do_list -eq 1 ]]; then
2601 ceph osd pool create rbd
10
2603 if test -z "$tests_to_run" ; then
2604 tests_to_run
="$TESTS"
2607 if $sanity_check ; then
2610 for i
in $tests_to_run; do
2611 if $sanity_check ; then
2618 if $sanity_check ; then