2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
6 source $
(dirname $0)/..
/..
/standalone
/ceph-helpers.sh
10 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
14 function check_no_osd_down
()
16 ! ceph osd dump |
grep ' down '
19 function wait_no_osd_down
()
22 for i
in $
(seq 1 $max_run) ; do
23 if ! check_no_osd_down
; then
24 echo "waiting for osd(s) to come back up ($i/$max_run)"
33 function expect_false
()
36 if "$@"; then return 1; else return 0; fi
39 function expect_true
()
42 if ! "$@"; then return 1; else return 0; fi
45 TEMP_DIR
=$
(mktemp
-d ${TMPDIR-/tmp}/cephtool.XXX
)
46 trap "rm -fr $TEMP_DIR" 0
48 TMPFILE
=$
(mktemp
$TEMP_DIR/test_invalid.XXX
)
51 # retry_eagain max cmd args ...
53 # retry cmd args ... if it exits on error and its output contains the
54 # string EAGAIN, at most $max times
56 function retry_eagain
()
61 local tmpfile
=$TEMP_DIR/retry_eagain.$$
63 for count
in $
(seq 1 $max) ; do
65 "$@" > $tmpfile 2>&1 || status
=$?
66 if test $status = 0 ||
67 ! grep --quiet EAGAIN
$tmpfile ; then
72 if test $count = $max ; then
73 echo retried with non zero
exit status
, $max times: "$@" >&2
81 # map_enxio_to_eagain cmd arg ...
83 # add EAGAIN to the output of cmd arg ... if the output contains
86 function map_enxio_to_eagain
()
89 local tmpfile
=$TEMP_DIR/map_enxio_to_eagain.$$
91 "$@" > $tmpfile 2>&1 || status
=$?
92 if test $status != 0 &&
93 grep --quiet ENXIO
$tmpfile ; then
94 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
101 function check_response
()
106 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
107 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
111 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
112 echo "Didn't find $expected_string in output" >&2
118 function get_config_value_or_die
()
120 local target config_opt raw val
125 raw
="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
126 if [[ $?
-ne 0 ]]; then
127 echo "error obtaining config opt '$config_opt' from '$target': $raw"
131 raw
=`echo $raw | sed -e 's/[{} "]//g'`
132 val
=`echo $raw | cut -f2 -d:`
138 function expect_config_value
()
140 local target config_opt expected_val val
145 val
=$
(get_config_value_or_die
$target $config_opt)
147 if [[ "$val" != "$expected_val" ]]; then
148 echo "expected '$expected_val', got '$val'"
153 function ceph_watch_start
()
155 local whatch_opt
=--watch
158 whatch_opt
=--watch-$1
160 whatch_opt
+=" --watch-channel $2"
164 CEPH_WATCH_FILE
=${TEMP_DIR}/CEPH_WATCH_$$
165 ceph
$whatch_opt > $CEPH_WATCH_FILE &
168 # wait until the "ceph" client is connected and receiving
169 # log messages from monitor
171 grep -q "cluster" $CEPH_WATCH_FILE && break
176 function ceph_watch_wait
()
185 for i
in `seq ${timeout}`; do
186 grep -q "$regexp" $CEPH_WATCH_FILE && break
192 if ! grep "$regexp" $CEPH_WATCH_FILE; then
193 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
194 cat $CEPH_WATCH_FILE >&2
199 function test_mon_injectargs
()
201 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker
202 ceph tell osd
.0 config get osd_enable_op_tracker |
grep false
203 ceph tell osd
.0 injectargs
'--osd_enable_op_tracker --osd_op_history_duration 500'
204 ceph tell osd
.0 config get osd_enable_op_tracker |
grep true
205 ceph tell osd
.0 config get osd_op_history_duration |
grep 500
206 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker
207 ceph tell osd
.0 config get osd_enable_op_tracker |
grep false
208 ceph tell osd
.0 injectargs
-- --osd_enable_op_tracker
209 ceph tell osd
.0 config get osd_enable_op_tracker |
grep true
210 ceph tell osd
.0 injectargs
-- '--osd_enable_op_tracker --osd_op_history_duration 600'
211 ceph tell osd
.0 config get osd_enable_op_tracker |
grep true
212 ceph tell osd
.0 config get osd_op_history_duration |
grep 600
214 ceph tell osd
.0 injectargs
-- '--osd_deep_scrub_interval 2419200'
215 ceph tell osd
.0 config get osd_deep_scrub_interval |
grep 2419200
217 ceph tell osd
.0 injectargs
-- '--mon_probe_timeout 2'
218 ceph tell osd
.0 config get mon_probe_timeout |
grep 2
220 ceph tell osd
.0 injectargs
-- '--mon-lease 6'
221 ceph tell osd
.0 config get mon_lease |
grep 6
223 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224 expect_false ceph tell osd
.0 injectargs
--osd-scrub-auto-repair-num-errors -1 2> $TMPFILE ||
return 1
225 check_response
"Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
227 expect_failure
$TEMP_DIR "Option --osd_op_history_duration requires an argument" \
228 ceph tell osd
.0 injectargs
-- '--osd_op_history_duration'
232 function test_mon_injectargs_SI
()
234 # Test SI units during injectargs and 'config set'
235 # We only aim at testing the units are parsed accordingly
236 # and don't intend to test whether the options being set
237 # actually expect SI units to be passed.
238 # Keep in mind that all integer based options that are not based on bytes
239 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
241 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_pg_warn_min_objects")
242 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10
243 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
244 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10K
245 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10000
246 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
1G
247 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1000000000
248 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10F
> $TMPFILE || true
249 check_response
"(22) Invalid argument"
250 # now test with injectargs
251 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10'
252 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
253 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10K'
254 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10000
255 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 1G'
256 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1000000000
257 expect_false ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10F'
258 expect_false ceph tell mon.a injectargs
'--mon_globalid_prealloc -1'
259 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
$initial_value
262 function test_mon_injectargs_IEC
()
264 # Test IEC units during injectargs and 'config set'
265 # We only aim at testing the units are parsed accordingly
266 # and don't intend to test whether the options being set
267 # actually expect IEC units to be passed.
268 # Keep in mind that all integer based options that are based on bytes
269 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
270 # unit modifiers (for backwards compatibility and convenience) and be parsed
272 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_data_size_warn")
273 $SUDO ceph daemon mon.a config
set mon_data_size_warn
15000000000
274 expect_config_value
"mon.a" "mon_data_size_warn" 15000000000
275 $SUDO ceph daemon mon.a config
set mon_data_size_warn
15G
276 expect_config_value
"mon.a" "mon_data_size_warn" 16106127360
277 $SUDO ceph daemon mon.a config
set mon_data_size_warn
16Gi
278 expect_config_value
"mon.a" "mon_data_size_warn" 17179869184
279 $SUDO ceph daemon mon.a config
set mon_data_size_warn
10F
> $TMPFILE || true
280 check_response
"(22) Invalid argument"
281 # now test with injectargs
282 ceph tell mon.a injectargs
'--mon_data_size_warn 15000000000'
283 expect_config_value
"mon.a" "mon_data_size_warn" 15000000000
284 ceph tell mon.a injectargs
'--mon_data_size_warn 15G'
285 expect_config_value
"mon.a" "mon_data_size_warn" 16106127360
286 ceph tell mon.a injectargs
'--mon_data_size_warn 16Gi'
287 expect_config_value
"mon.a" "mon_data_size_warn" 17179869184
288 expect_false ceph tell mon.a injectargs
'--mon_data_size_warn 10F'
289 $SUDO ceph daemon mon.a config
set mon_data_size_warn
$initial_value
292 function test_tiering_agent
()
294 local slow
=slow_eviction
295 local fast
=fast_eviction
296 ceph osd pool create
$slow 1 1
297 ceph osd pool application
enable $slow rados
298 ceph osd pool create
$fast 1 1
299 ceph osd tier add
$slow $fast
300 ceph osd tier cache-mode
$fast writeback
301 ceph osd tier set-overlay
$slow $fast
302 ceph osd pool
set $fast hit_set_type bloom
303 rados
-p $slow put obj1
/etc
/group
304 ceph osd pool
set $fast target_max_objects
1
305 ceph osd pool
set $fast hit_set_count
1
306 ceph osd pool
set $fast hit_set_period
5
307 # wait for the object to be evicted from the cache
310 for i
in `seq 1 300` ; do
311 if ! rados
-p $fast ls |
grep obj1
; then
318 # the object is proxy read and promoted to the cache
319 rados
-p $slow get obj1
- >/dev
/null
320 # wait for the promoted object to be evicted again
322 for i
in `seq 1 300` ; do
323 if ! rados
-p $fast ls |
grep obj1
; then
330 ceph osd tier remove-overlay
$slow
331 ceph osd tier remove
$slow $fast
332 ceph osd pool delete
$fast $fast --yes-i-really-really-mean-it
333 ceph osd pool delete
$slow $slow --yes-i-really-really-mean-it
336 function test_tiering_1
()
339 ceph osd pool create slow
2
340 ceph osd pool application
enable slow rados
341 ceph osd pool create slow2
2
342 ceph osd pool application
enable slow2 rados
343 ceph osd pool create cache
2
344 ceph osd pool create cache2
2
345 ceph osd tier add slow cache
346 ceph osd tier add slow cache2
347 expect_false ceph osd tier add slow2 cache
348 # forward and proxy are removed/deprecated
349 expect_false ceph osd tier cache-mode cache forward
350 expect_false ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
351 expect_false ceph osd tier cache-mode cache proxy
352 expect_false ceph osd tier cache-mode cache proxy
--yes-i-really-mean-it
353 # test some state transitions
354 ceph osd tier cache-mode cache writeback
355 expect_false ceph osd tier cache-mode cache
readonly
356 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
357 ceph osd tier cache-mode cache readproxy
358 ceph osd tier cache-mode cache none
359 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
360 ceph osd tier cache-mode cache none
361 ceph osd tier cache-mode cache writeback
362 expect_false ceph osd tier cache-mode cache none
363 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
364 # test with dirty objects in the tier pool
365 # tier pool currently set to 'writeback'
366 rados
-p cache put
/etc
/passwd
/etc
/passwd
368 # 1 dirty object in pool 'cache'
369 ceph osd tier cache-mode cache readproxy
370 expect_false ceph osd tier cache-mode cache none
371 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
372 ceph osd tier cache-mode cache writeback
373 # remove object from tier pool
374 rados
-p cache
rm /etc
/passwd
375 rados
-p cache cache-flush-evict-all
377 # no dirty objects in pool 'cache'
378 ceph osd tier cache-mode cache readproxy
379 ceph osd tier cache-mode cache none
380 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
382 while ! ceph osd pool
set cache pg_num
3 --yes-i-really-mean-it 2>$TMPFILE
384 grep 'currently creating pgs' $TMPFILE
385 TRIES
=$
(( $TRIES + 1 ))
389 expect_false ceph osd pool
set cache pg_num
4
390 ceph osd tier cache-mode cache none
391 ceph osd tier set-overlay slow cache
392 expect_false ceph osd tier set-overlay slow cache2
393 expect_false ceph osd tier remove slow cache
394 ceph osd tier remove-overlay slow
395 ceph osd tier set-overlay slow cache2
396 ceph osd tier remove-overlay slow
397 ceph osd tier remove slow cache
398 ceph osd tier add slow2 cache
399 expect_false ceph osd tier set-overlay slow cache
400 ceph osd tier set-overlay slow2 cache
401 ceph osd tier remove-overlay slow2
402 ceph osd tier remove slow2 cache
403 ceph osd tier remove slow cache2
405 # make sure a non-empty pool fails
406 rados
-p cache2 put
/etc
/passwd
/etc
/passwd
407 while ! ceph df |
grep cache2 |
grep ' 1 ' ; do
408 echo waiting
for pg stats to flush
411 expect_false ceph osd tier add slow cache2
412 ceph osd tier add slow cache2
--force-nonempty
413 ceph osd tier remove slow cache2
415 ceph osd pool
ls |
grep cache2
416 ceph osd pool
ls -f json-pretty |
grep cache2
417 ceph osd pool
ls detail |
grep cache2
418 ceph osd pool
ls detail
-f json-pretty |
grep cache2
420 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
421 ceph osd pool delete slow2 slow2
--yes-i-really-really-mean-it
422 ceph osd pool delete cache cache
--yes-i-really-really-mean-it
423 ceph osd pool delete cache2 cache2
--yes-i-really-really-mean-it
426 function test_tiering_2
()
428 # make sure we can't clobber snapshot state
429 ceph osd pool create snap_base
2
430 ceph osd pool application
enable snap_base rados
431 ceph osd pool create snap_cache
2
432 ceph osd pool mksnap snap_cache snapname
433 expect_false ceph osd tier add snap_base snap_cache
434 ceph osd pool delete snap_base snap_base
--yes-i-really-really-mean-it
435 ceph osd pool delete snap_cache snap_cache
--yes-i-really-really-mean-it
438 function test_tiering_3
()
440 # make sure we can't create snapshot on tier
441 ceph osd pool create basex
2
442 ceph osd pool application
enable basex rados
443 ceph osd pool create cachex
2
444 ceph osd tier add basex cachex
445 expect_false ceph osd pool mksnap cache snapname
446 ceph osd tier remove basex cachex
447 ceph osd pool delete basex basex
--yes-i-really-really-mean-it
448 ceph osd pool delete cachex cachex
--yes-i-really-really-mean-it
451 function test_tiering_4
()
453 # make sure we can't create an ec pool tier
454 ceph osd pool create eccache
2 2 erasure
455 expect_false ceph osd set-require-min-compat-client bobtail
456 ceph osd pool create repbase
2
457 ceph osd pool application
enable repbase rados
458 expect_false ceph osd tier add repbase eccache
459 ceph osd pool delete repbase repbase
--yes-i-really-really-mean-it
460 ceph osd pool delete eccache eccache
--yes-i-really-really-mean-it
463 function test_tiering_5
()
465 # convenient add-cache command
466 ceph osd pool create slow
2
467 ceph osd pool application
enable slow rados
468 ceph osd pool create cache3
2
469 ceph osd tier add-cache slow cache3
1024000
470 ceph osd dump |
grep cache3 |
grep bloom |
grep 'false_positive_probability: 0.05' |
grep 'target_bytes 1024000' |
grep '1200s x4'
471 ceph osd tier remove slow cache3
2> $TMPFILE || true
472 check_response
"EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
473 ceph osd tier remove-overlay slow
474 ceph osd tier remove slow cache3
475 ceph osd pool
ls |
grep cache3
476 ceph osd pool delete cache3 cache3
--yes-i-really-really-mean-it
477 ! ceph osd pool
ls |
grep cache3 ||
exit 1
478 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
481 function test_tiering_6
()
483 # check add-cache whether work
484 ceph osd pool create datapool
2
485 ceph osd pool application
enable datapool rados
486 ceph osd pool create cachepool
2
487 ceph osd tier add-cache datapool cachepool
1024000
488 ceph osd tier cache-mode cachepool writeback
489 rados
-p datapool put object
/etc
/passwd
490 rados
-p cachepool stat object
491 rados
-p cachepool cache-flush object
492 rados
-p datapool stat object
493 ceph osd tier remove-overlay datapool
494 ceph osd tier remove datapool cachepool
495 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
496 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
499 function test_tiering_7
()
501 # protection against pool removal when used as tiers
502 ceph osd pool create datapool
2
503 ceph osd pool application
enable datapool rados
504 ceph osd pool create cachepool
2
505 ceph osd tier add-cache datapool cachepool
1024000
506 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it 2> $TMPFILE || true
507 check_response
"EBUSY: pool 'cachepool' is a tier of 'datapool'"
508 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it 2> $TMPFILE || true
509 check_response
"EBUSY: pool 'datapool' has tiers cachepool"
510 ceph osd tier remove-overlay datapool
511 ceph osd tier remove datapool cachepool
512 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
513 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
516 function test_tiering_8
()
518 ## check health check
519 ceph osd
set notieragent
520 ceph osd pool create datapool
2
521 ceph osd pool application
enable datapool rados
522 ceph osd pool create cache4
2
523 ceph osd tier add-cache datapool cache4
1024000
524 ceph osd tier cache-mode cache4 writeback
525 tmpfile
=$
(mktemp|
grep tmp
)
526 dd if=/dev
/zero of
=$tmpfile bs
=4K count
=1
527 ceph osd pool
set cache4 target_max_objects
200
528 ceph osd pool
set cache4 target_max_bytes
1000000
529 rados
-p cache4 put foo1
$tmpfile
530 rados
-p cache4 put foo2
$tmpfile
533 ceph df |
grep datapool |
grep ' 2 '
534 ceph osd tier remove-overlay datapool
535 ceph osd tier remove datapool cache4
536 ceph osd pool delete cache4 cache4
--yes-i-really-really-mean-it
537 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
538 ceph osd
unset notieragent
541 function test_tiering_9
()
543 # make sure 'tier remove' behaves as we expect
544 # i.e., removing a tier from a pool that's not its base pool only
545 # results in a 'pool foo is now (or already was) not a tier of bar'
547 ceph osd pool create basepoolA
2
548 ceph osd pool application
enable basepoolA rados
549 ceph osd pool create basepoolB
2
550 ceph osd pool application
enable basepoolB rados
551 poolA_id
=$
(ceph osd dump |
grep 'pool.*basepoolA' |
awk '{print $2;}')
552 poolB_id
=$
(ceph osd dump |
grep 'pool.*basepoolB' |
awk '{print $2;}')
554 ceph osd pool create cache5
2
555 ceph osd pool create cache6
2
556 ceph osd tier add basepoolA cache5
557 ceph osd tier add basepoolB cache6
558 ceph osd tier remove basepoolB cache5
2>&1 |
grep 'not a tier of'
559 ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of[ \t]\+$poolA_id"
560 ceph osd tier remove basepoolA cache6
2>&1 |
grep 'not a tier of'
561 ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of[ \t]\+$poolB_id"
563 ceph osd tier remove basepoolA cache5
2>&1 |
grep 'not a tier of'
564 ! ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of" ||
exit 1
565 ceph osd tier remove basepoolB cache6
2>&1 |
grep 'not a tier of'
566 ! ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of" ||
exit 1
568 ! ceph osd dump |
grep "pool.*'basepoolA'" 2>&1 |
grep "tiers" ||
exit 1
569 ! ceph osd dump |
grep "pool.*'basepoolB'" 2>&1 |
grep "tiers" ||
exit 1
571 ceph osd pool delete cache6 cache6
--yes-i-really-really-mean-it
572 ceph osd pool delete cache5 cache5
--yes-i-really-really-mean-it
573 ceph osd pool delete basepoolB basepoolB
--yes-i-really-really-mean-it
574 ceph osd pool delete basepoolA basepoolA
--yes-i-really-really-mean-it
579 expect_false ceph auth add client.xx mon
'invalid' osd
"allow *"
580 expect_false ceph auth add client.xx mon
'allow *' osd
"allow *" invalid
"allow *"
581 ceph auth add client.xx mon
'allow *' osd
"allow *"
582 ceph auth
export client.xx
>client.xx.keyring
583 ceph auth add client.xx
-i client.xx.keyring
584 rm -f client.xx.keyring
585 ceph auth list |
grep client.xx
586 ceph auth
ls |
grep client.xx
587 ceph auth get client.xx |
grep caps |
grep mon
588 ceph auth get client.xx |
grep caps |
grep osd
589 ceph auth get-key client.xx
590 ceph auth print-key client.xx
591 ceph auth print_key client.xx
592 ceph auth caps client.xx osd
"allow rw"
593 expect_false sh
<<< "ceph auth get client.xx | grep caps | grep mon"
594 ceph auth get client.xx |
grep osd |
grep "allow rw"
595 ceph auth caps client.xx mon
'allow command "osd tree"'
596 ceph auth
export |
grep client.xx
597 ceph auth
export -o authfile
598 ceph auth import
-i authfile
2>$TMPFILE
599 check_response
"imported keyring"
601 ceph auth
export -o authfile2
602 diff authfile authfile2
603 rm authfile authfile2
604 ceph auth del client.xx
605 expect_false ceph auth get client.xx
607 # (almost) interactive mode
608 echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
609 ceph auth get client.xx
611 echo 'auth del client.xx' | ceph
612 expect_false ceph auth get client.xx
615 function test_auth_profiles
()
617 ceph auth add client.xx-profile-ro mon
'allow profile read-only' \
618 mgr
'allow profile read-only'
619 ceph auth add client.xx-profile-rw mon
'allow profile read-write' \
620 mgr
'allow profile read-write'
621 ceph auth add client.xx-profile-rd mon
'allow profile role-definer'
623 ceph auth
export > client.xx.keyring
625 # read-only is allowed all read-only commands (auth excluded)
626 ceph
-n client.xx-profile-ro
-k client.xx.keyring status
627 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd dump
628 ceph
-n client.xx-profile-ro
-k client.xx.keyring pg dump
629 ceph
-n client.xx-profile-ro
-k client.xx.keyring mon dump
630 # read-only gets access denied for rw commands or auth commands
631 ceph
-n client.xx-profile-ro
-k client.xx.keyring log foo
>& $TMPFILE || true
632 check_response
"EACCES: access denied"
633 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
634 check_response
"EACCES: access denied"
635 ceph
-n client.xx-profile-ro
-k client.xx.keyring auth
ls >& $TMPFILE || true
636 check_response
"EACCES: access denied"
638 # read-write is allowed for all read-write commands (except auth)
639 ceph
-n client.xx-profile-rw
-k client.xx.keyring status
640 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd dump
641 ceph
-n client.xx-profile-rw
-k client.xx.keyring pg dump
642 ceph
-n client.xx-profile-rw
-k client.xx.keyring mon dump
643 ceph
-n client.xx-profile-rw
-k client.xx.keyring fs dump
644 ceph
-n client.xx-profile-rw
-k client.xx.keyring log foo
645 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
set noout
646 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
unset noout
647 # read-write gets access denied for auth commands
648 ceph
-n client.xx-profile-rw
-k client.xx.keyring auth
ls >& $TMPFILE || true
649 check_response
"EACCES: access denied"
651 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
652 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
ls
653 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
export
654 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth add client.xx-profile-foo
655 ceph
-n client.xx-profile-rd
-k client.xx.keyring status
656 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd dump
>& $TMPFILE || true
657 check_response
"EACCES: access denied"
658 ceph
-n client.xx-profile-rd
-k client.xx.keyring pg dump
>& $TMPFILE || true
659 check_response
"EACCES: access denied"
660 # read-only 'mon' subsystem commands are allowed
661 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon dump
662 # but read-write 'mon' commands are not
663 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon add foo
1.1.1.1 >& $TMPFILE || true
664 check_response
"EACCES: access denied"
665 ceph
-n client.xx-profile-rd
-k client.xx.keyring fs dump
>& $TMPFILE || true
666 check_response
"EACCES: access denied"
667 ceph
-n client.xx-profile-rd
-k client.xx.keyring log foo
>& $TMPFILE || true
668 check_response
"EACCES: access denied"
669 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
670 check_response
"EACCES: access denied"
672 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-ro
673 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-rw
675 # add a new role-definer with the existing role-definer
676 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
677 auth add client.xx-profile-rd2 mon
'allow profile role-definer'
678 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
679 auth
export > client.xx.keyring
.2
680 # remove old role-definer using the new role-definer
681 ceph
-n client.xx-profile-rd2
-k client.xx.keyring
.2 \
682 auth del client.xx-profile-rd
683 # remove the remaining role-definer with admin
684 ceph auth del client.xx-profile-rd2
685 rm -f client.xx.keyring client.xx.keyring
.2
688 function test_mon_caps
()
690 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
691 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
692 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
693 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
695 # pass --no-mon-config since we are looking for the permission denied error
696 rados lspools
--no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
698 check_response
"Permission denied"
700 rm -rf $TEMP_DIR/ceph.client.bug.keyring
701 ceph auth del client.bug
702 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
703 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
704 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
705 ceph-authtool
-n client.bug
--cap mon
'' $TEMP_DIR/ceph.client.bug.keyring
706 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
707 rados lspools
--no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
708 check_response
"Permission denied"
711 function test_mon_misc
()
713 # with and without verbosity
714 ceph osd dump |
grep '^epoch'
715 ceph
--concise osd dump |
grep '^epoch'
717 ceph osd df |
grep 'MIN/MAX VAR'
722 grep -v DIRTY
$TMPFILE
723 ceph df detail
> $TMPFILE
725 ceph df
--format json
> $TMPFILE
726 grep 'total_bytes' $TMPFILE
727 grep -v 'dirty' $TMPFILE
728 ceph df detail
--format json
> $TMPFILE
729 grep 'rd_bytes' $TMPFILE
730 grep 'dirty' $TMPFILE
731 ceph df
--format xml |
grep '<total_bytes>'
732 ceph df detail
--format xml |
grep '<rd_bytes>'
737 ceph health
--format json-pretty
738 ceph health detail
--format xml-pretty
740 ceph time-sync-status
743 for t
in mon osd mds mgr
; do
748 mymsg
="this is a test log message $$.$(date)"
750 ceph log last |
grep "$mymsg"
751 ceph log last
100 |
grep "$mymsg"
752 ceph_watch_wait
"$mymsg"
756 ceph mgr module
enable restful
757 expect_false ceph mgr module
enable foodne
758 ceph mgr module
enable foodne
--force
759 ceph mgr module disable foodne
760 ceph mgr module disable foodnebizbangbash
764 ceph mon count-metadata ceph_version
769 ceph mgr count-metadata ceph_version
776 function check_mds_active
()
779 ceph fs get
$fs_name |
grep active
782 function wait_mds_active
()
786 for i
in $
(seq 1 $max_run) ; do
787 if ! check_mds_active
$fs_name ; then
788 echo "waiting for an active MDS daemon ($i/$max_run)"
794 check_mds_active
$fs_name
797 function get_mds_gids
()
800 ceph fs get
$fs_name --format=json | python3
-c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
803 function fail_all_mds
()
806 ceph fs
set $fs_name cluster_down true
807 mds_gids
=$
(get_mds_gids
$fs_name)
808 for mds_gid
in $mds_gids ; do
809 ceph mds fail
$mds_gid
811 if check_mds_active
$fs_name ; then
812 echo "An active MDS remains, something went wrong"
819 function remove_all_fs
()
821 existing_fs
=$
(ceph fs
ls --format=json | python3
-c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
822 for fs_name
in $existing_fs ; do
823 echo "Removing fs ${fs_name}..."
824 fail_all_mds
$fs_name
825 echo "Removing existing filesystem '${fs_name}'..."
826 ceph fs
rm $fs_name --yes-i-really-mean-it
827 echo "Removed '${fs_name}'."
831 # So that tests requiring MDS can skip if one is not configured
832 # in the cluster at all
833 function mds_exists
()
835 ceph auth
ls |
grep "^mds"
838 # some of the commands are just not idempotent.
839 function without_test_dup_command
()
841 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
844 local saved
=${CEPH_CLI_TEST_DUP_COMMAND}
845 unset CEPH_CLI_TEST_DUP_COMMAND
847 CEPH_CLI_TEST_DUP_COMMAND
=saved
851 function test_mds_tell
()
854 if ! mds_exists
; then
855 echo "Skipping test, no MDS found"
860 ceph osd pool create fs_data
16
861 ceph osd pool create fs_metadata
16
862 ceph fs new
$FS_NAME fs_metadata fs_data
863 wait_mds_active
$FS_NAME
865 # Test injectargs by GID
866 old_mds_gids
=$
(get_mds_gids
$FS_NAME)
867 echo Old GIDs
: $old_mds_gids
869 for mds_gid
in $old_mds_gids ; do
870 ceph tell mds.
$mds_gid injectargs
"--debug-mds 20"
872 expect_false ceph tell mds.a injectargs mds_max_file_recover
-1
874 # Test respawn by rank
875 without_test_dup_command ceph tell mds
.0 respawn
876 new_mds_gids
=$old_mds_gids
877 while [ $new_mds_gids -eq $old_mds_gids ] ; do
879 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
881 echo New GIDs
: $new_mds_gids
884 without_test_dup_command ceph tell mds.a respawn
885 new_mds_gids
=$old_mds_gids
886 while [ $new_mds_gids -eq $old_mds_gids ] ; do
888 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
890 echo New GIDs
: $new_mds_gids
893 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
894 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
897 function test_mon_mds
()
902 ceph osd pool create fs_data
16
903 ceph osd pool create fs_metadata
16
904 ceph fs new
$FS_NAME fs_metadata fs_data
906 ceph fs
set $FS_NAME cluster_down true
907 ceph fs
set $FS_NAME cluster_down false
909 ceph mds compat rm_incompat
4
910 ceph mds compat rm_incompat
4
912 # We don't want any MDSs to be up, their activity can interfere with
913 # the "current_epoch + 1" checking below if they're generating updates
914 fail_all_mds
$FS_NAME
919 for mds_gid
in $
(get_mds_gids
$FS_NAME) ; do
920 ceph mds metadata
$mds_id
924 ceph mds count-metadata os
926 # XXX mds fail, but how do you undo it?
927 mdsmapfile
=$TEMP_DIR/mdsmap.$$
928 current_epoch
=$
(ceph fs dump
-o $mdsmapfile --no-log-to-stderr 2>&1 |
grep epoch |
sed 's/.*epoch //')
932 ceph osd pool create data2
16
933 ceph osd pool create data3
16
934 data2_pool
=$
(ceph osd dump |
grep "pool.*'data2'" |
awk '{print $2;}')
935 data3_pool
=$
(ceph osd dump |
grep "pool.*'data3'" |
awk '{print $2;}')
936 ceph fs add_data_pool cephfs
$data2_pool
937 ceph fs add_data_pool cephfs
$data3_pool
938 ceph fs add_data_pool cephfs
100 >& $TMPFILE || true
939 check_response
"Error ENOENT"
940 ceph fs add_data_pool cephfs foobarbaz
>& $TMPFILE || true
941 check_response
"Error ENOENT"
942 ceph fs rm_data_pool cephfs
$data2_pool
943 ceph fs rm_data_pool cephfs
$data3_pool
944 ceph osd pool delete data2 data2
--yes-i-really-really-mean-it
945 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
946 ceph fs
set cephfs max_mds
4
947 ceph fs
set cephfs max_mds
3
948 ceph fs
set cephfs max_mds
256
949 expect_false ceph fs
set cephfs max_mds
257
950 ceph fs
set cephfs max_mds
4
951 ceph fs
set cephfs max_mds
256
952 expect_false ceph fs
set cephfs max_mds
257
953 expect_false ceph fs
set cephfs max_mds asdf
954 expect_false ceph fs
set cephfs inline_data true
955 ceph fs
set cephfs inline_data true
--yes-i-really-really-mean-it
956 ceph fs
set cephfs inline_data
yes --yes-i-really-really-mean-it
957 ceph fs
set cephfs inline_data
1 --yes-i-really-really-mean-it
958 expect_false ceph fs
set cephfs inline_data
--yes-i-really-really-mean-it
959 ceph fs
set cephfs inline_data false
960 ceph fs
set cephfs inline_data no
961 ceph fs
set cephfs inline_data
0
962 expect_false ceph fs
set cephfs inline_data asdf
963 ceph fs
set cephfs max_file_size
1048576
964 expect_false ceph fs
set cephfs max_file_size
123asdf
966 expect_false ceph fs
set cephfs allow_new_snaps
967 ceph fs
set cephfs allow_new_snaps true
968 ceph fs
set cephfs allow_new_snaps
0
969 ceph fs
set cephfs allow_new_snaps false
970 ceph fs
set cephfs allow_new_snaps no
971 expect_false ceph fs
set cephfs allow_new_snaps taco
973 # we should never be able to add EC pools as data or metadata pools
974 # create an ec-pool...
975 ceph osd pool create mds-ec-pool
16 16 erasure
977 ceph fs add_data_pool cephfs mds-ec-pool
2>$TMPFILE
978 check_response
'erasure-code' $?
22
980 ec_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-ec-pool" |
awk '{print $2;}')
981 data_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_data" |
awk '{print $2;}')
982 metadata_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_metadata" |
awk '{print $2;}')
984 fail_all_mds
$FS_NAME
987 # Check that rmfailed requires confirmation
988 expect_false ceph mds rmfailed
0
989 ceph mds rmfailed
0 --yes-i-really-mean-it
992 # Check that `fs new` is no longer permitted
993 expect_false ceph fs new cephfs
$metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
995 # Check that 'fs reset' runs
996 ceph fs
reset $FS_NAME --yes-i-really-mean-it
998 # Check that creating a second FS fails by default
999 ceph osd pool create fs_metadata2
16
1000 ceph osd pool create fs_data2
16
1002 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1005 # Check that setting enable_multiple enables creation of second fs
1006 ceph fs flag
set enable_multiple true
--yes-i-really-mean-it
1007 ceph fs new cephfs2 fs_metadata2 fs_data2
1009 # Clean up multi-fs stuff
1010 fail_all_mds cephfs2
1011 ceph fs
rm cephfs2
--yes-i-really-mean-it
1012 ceph osd pool delete fs_metadata2 fs_metadata2
--yes-i-really-really-mean-it
1013 ceph osd pool delete fs_data2 fs_data2
--yes-i-really-really-mean-it
1015 fail_all_mds
$FS_NAME
1017 # Clean up to enable subsequent fs new tests
1018 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1021 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1022 check_response
'erasure-code' $?
22
1023 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1024 check_response
'erasure-code' $?
22
1025 ceph fs new
$FS_NAME mds-ec-pool mds-ec-pool
2>$TMPFILE
1026 check_response
'erasure-code' $?
22
1029 # ... new create a cache tier in front of the EC pool...
1030 ceph osd pool create mds-tier
2
1031 ceph osd tier add mds-ec-pool mds-tier
1032 ceph osd tier set-overlay mds-ec-pool mds-tier
1033 tier_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-tier" |
awk '{print $2;}')
1035 # Use of a readonly tier should be forbidden
1036 ceph osd tier cache-mode mds-tier
readonly --yes-i-really-mean-it
1038 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1039 check_response
'has a write tier (mds-tier) that is configured to forward' $?
22
1042 # Use of a writeback tier should enable FS creation
1043 ceph osd tier cache-mode mds-tier writeback
1044 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force
1046 # While a FS exists using the tiered pools, I should not be allowed
1047 # to remove the tier
1049 ceph osd tier remove-overlay mds-ec-pool
2>$TMPFILE
1050 check_response
'in use by CephFS' $?
16
1051 ceph osd tier remove mds-ec-pool mds-tier
2>$TMPFILE
1052 check_response
'in use by CephFS' $?
16
1055 fail_all_mds
$FS_NAME
1056 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1058 # ... but we should be forbidden from using the cache pool in the FS directly.
1060 ceph fs new
$FS_NAME fs_metadata mds-tier
--force 2>$TMPFILE
1061 check_response
'in use as a cache tier' $?
22
1062 ceph fs new
$FS_NAME mds-tier fs_data
2>$TMPFILE
1063 check_response
'in use as a cache tier' $?
22
1064 ceph fs new
$FS_NAME mds-tier mds-tier
2>$TMPFILE
1065 check_response
'in use as a cache tier' $?
22
1068 # Clean up tier + EC pools
1069 ceph osd tier remove-overlay mds-ec-pool
1070 ceph osd tier remove mds-ec-pool mds-tier
1072 # Create a FS using the 'cache' pool now that it's no longer a tier
1073 ceph fs new
$FS_NAME fs_metadata mds-tier
--force
1075 # We should be forbidden from using this pool as a tier now that
1076 # it's in use for CephFS
1078 ceph osd tier add mds-ec-pool mds-tier
2>$TMPFILE
1079 check_response
'in use by CephFS' $?
16
1082 fail_all_mds
$FS_NAME
1083 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1085 # We should be permitted to use an EC pool with overwrites enabled
1086 # as the data pool...
1087 ceph osd pool
set mds-ec-pool allow_ec_overwrites true
1088 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1089 fail_all_mds
$FS_NAME
1090 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1092 # ...but not as the metadata pool
1094 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1095 check_response
'erasure-code' $?
22
1098 ceph osd pool delete mds-ec-pool mds-ec-pool
--yes-i-really-really-mean-it
1100 # Create a FS and check that we can subsequently add a cache tier to it
1101 ceph fs new
$FS_NAME fs_metadata fs_data
--force
1103 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1104 ceph osd tier add fs_metadata mds-tier
1105 ceph osd tier cache-mode mds-tier writeback
1106 ceph osd tier set-overlay fs_metadata mds-tier
1108 # Removing tier should be permitted because the underlying pool is
1109 # replicated (#11504 case)
1110 ceph osd tier cache-mode mds-tier readproxy
1111 ceph osd tier remove-overlay fs_metadata
1112 ceph osd tier remove fs_metadata mds-tier
1113 ceph osd pool delete mds-tier mds-tier
--yes-i-really-really-mean-it
1116 fail_all_mds
$FS_NAME
1117 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1122 # ceph mds tell mds.a getmap
1125 # ceph mds set_state
1127 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
1128 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
1131 function test_mon_mds_metadata
()
1133 local nmons
=$
(ceph tell
'mon.*' version |
grep -c 'version')
1137 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1138 while read gid id rank
; do
1139 ceph mds metadata
${gid} |
grep '"hostname":'
1140 ceph mds metadata
${id} |
grep '"hostname":'
1141 ceph mds metadata
${rank} |
grep '"hostname":'
1143 local n
=$
(ceph tell
'mon.*' mds metadata
${id} |
grep -c '"hostname":')
1144 test "$n" -eq "$nmons"
1147 expect_false ceph mds metadata UNKNOWN
1150 function test_mon_mon
()
1152 # print help message
1156 ceph mon getmap
-o $TEMP_DIR/monmap.$$
1157 [ -s $TEMP_DIR/monmap.$$
]
1160 first
=$
(ceph mon dump
-f json | jq
-r '.mons[0].name')
1161 ceph tell mon.
$first mon_status
1165 ceph mon feature
set kraken
--yes-i-really-mean-it
1166 expect_false ceph mon feature
set abcd
1167 expect_false ceph mon feature
set abcd
--yes-i-really-mean-it
1170 function test_mon_priority_and_weight
()
1172 for i
in 0 1 65535; do
1173 ceph mon set-weight a
$i
1174 w
=$
(ceph mon dump
--format=json-pretty
2>/dev
/null | jq
'.mons[0].weight')
1178 for i
in -1 65536; do
1179 expect_false ceph mon set-weight a
$i
1183 function gen_secrets_file
()
1185 # lets assume we can have the following types
1186 # all - generates both cephx and lockbox, with mock dm-crypt key
1187 # cephx - only cephx
1188 # no_cephx - lockbox and dm-crypt, no cephx
1189 # no_lockbox - dm-crypt and cephx, no lockbox
1190 # empty - empty file
1191 # empty_json - correct json, empty map
1192 # bad_json - bad json :)
1195 if [[ -z "$t" ]]; then
1199 fn
=$
(mktemp
$TEMP_DIR/secret.XXXXXX
)
1201 if [[ "$t" == "empty" ]]; then
1206 if [[ "$t" == "bad_json" ]]; then
1207 echo "asd: ; }" >> $fn
1209 elif [[ "$t" == "empty_json" ]]; then
1214 cephx_secret
="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1215 lb_secret
="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1216 dmcrypt_key
="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1218 if [[ "$t" == "all" ]]; then
1219 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1220 elif [[ "$t" == "cephx" ]]; then
1221 echo "$cephx_secret" >> $fn
1222 elif [[ "$t" == "no_cephx" ]]; then
1223 echo "$lb_secret,$dmcrypt_key" >> $fn
1224 elif [[ "$t" == "no_lockbox" ]]; then
1225 echo "$cephx_secret,$dmcrypt_key" >> $fn
1227 echo "unknown gen_secrets_file() type \'$fn\'"
1234 function test_mon_osd_create_destroy
()
1236 ceph osd new
2>&1 |
grep 'EINVAL'
1237 ceph osd new
'' -1 2>&1 |
grep 'EINVAL'
1238 ceph osd new
'' 10 2>&1 |
grep 'EINVAL'
1240 old_maxosd
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1242 old_osds
=$
(ceph osd
ls)
1243 num_osds
=$
(ceph osd
ls |
wc -l)
1246 id
=$
(ceph osd new
$uuid 2>/dev
/null
)
1248 for i
in $old_osds; do
1254 id2
=`ceph osd new $uuid 2>/dev/null`
1258 ceph osd new
$uuid $id
1260 id3
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1261 ceph osd new
$uuid $
((id3
+1)) 2>&1 |
grep EEXIST
1264 id2
=$
(ceph osd new
$uuid2)
1266 [[ "$id2" != "$id" ]]
1268 ceph osd new
$uuid $id2 2>&1 |
grep EEXIST
1269 ceph osd new
$uuid2 $id2
1272 empty_secrets
=$
(gen_secrets_file
"empty")
1273 empty_json
=$
(gen_secrets_file
"empty_json")
1274 all_secrets
=$
(gen_secrets_file
"all")
1275 cephx_only
=$
(gen_secrets_file
"cephx")
1276 no_cephx
=$
(gen_secrets_file
"no_cephx")
1277 no_lockbox
=$
(gen_secrets_file
"no_lockbox")
1278 bad_json
=$
(gen_secrets_file
"bad_json")
1280 # empty secrets should be idempotent
1281 new_id
=$
(ceph osd new
$uuid $id -i $empty_secrets)
1282 [[ "$new_id" == "$id" ]]
1284 # empty json, thus empty secrets
1285 new_id
=$
(ceph osd new
$uuid $id -i $empty_json)
1286 [[ "$new_id" == "$id" ]]
1288 ceph osd new
$uuid $id -i $all_secrets 2>&1 |
grep 'EEXIST'
1292 ceph osd setmaxosd
$old_maxosd
1294 ceph osd new
$uuid -i $no_cephx 2>&1 |
grep 'EINVAL'
1295 ceph osd new
$uuid -i $no_lockbox 2>&1 |
grep 'EINVAL'
1298 id
=$
(ceph osd new
$uuid -i $all_secrets)
1305 # validate secrets and dm-crypt are set
1306 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1307 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1309 k
=$
(ceph auth get-key client.osd-lockbox.
$uuid --format=json-pretty
2>/dev
/null | \
1311 s
=$
(cat $all_secrets | jq
'.cephx_lockbox_secret')
1313 ceph config-key exists dm-crypt
/osd
/$uuid/luks
1316 id2
=$
(ceph osd new
$uuid2 -i $cephx_only)
1318 [[ "$i" != "$id2" ]]
1322 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1323 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1325 expect_false ceph auth get-key client.osd-lockbox.
$uuid2
1326 expect_false ceph config-key exists dm-crypt
/osd
/$uuid2/luks
1328 ceph osd destroy osd.
$id2 --yes-i-really-mean-it
1329 ceph osd destroy
$id2 --yes-i-really-mean-it
1331 expect_false ceph auth get-key osd.
$id2
1332 ceph osd dump |
grep osd.
$id2 |
grep destroyed
1336 ceph osd new
$uuid3 $id3 -i $all_secrets
1337 ceph osd dump |
grep osd.
$id3 | expect_false
grep destroyed
1338 ceph auth get-key client.osd-lockbox.
$uuid3
1339 ceph auth get-key osd.
$id3
1340 ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1342 ceph osd purge-new osd.
$id3 --yes-i-really-mean-it
1343 expect_false ceph osd
find $id2
1344 expect_false ceph auth get-key osd.
$id2
1345 expect_false ceph auth get-key client.osd-lockbox.
$uuid3
1346 expect_false ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1347 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1348 ceph osd purge-new osd.
$id3 --yes-i-really-mean-it # idempotent
1350 ceph osd purge osd.
$id --yes-i-really-mean-it
1351 ceph osd purge
123456 --yes-i-really-mean-it
1352 expect_false ceph osd
find $id
1353 expect_false ceph auth get-key osd.
$id
1354 expect_false ceph auth get-key client.osd-lockbox.
$uuid
1355 expect_false ceph config-key exists dm-crypt
/osd
/$uuid/luks
1357 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1358 $no_cephx $no_lockbox $bad_json
1360 for i
in $
(ceph osd
ls); do
1362 [[ "$i" != "$id2" ]]
1363 [[ "$i" != "$id3" ]]
1366 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1367 ceph osd setmaxosd
$old_maxosd
1371 function test_mon_config_key
()
1373 key
=asdfasdfqwerqwreasdfuniquesa123df
1374 ceph config-key list |
grep -c $key |
grep 0
1375 ceph config-key get
$key |
grep -c bar |
grep 0
1376 ceph config-key
set $key bar
1377 ceph config-key get
$key |
grep bar
1378 ceph config-key list |
grep -c $key |
grep 1
1379 ceph config-key dump |
grep $key |
grep bar
1380 ceph config-key
rm $key
1381 expect_false ceph config-key get
$key
1382 ceph config-key list |
grep -c $key |
grep 0
1383 ceph config-key dump |
grep -c $key |
grep 0
1386 function test_mon_osd
()
1391 bl
=192.168.0.1:0/1000
1392 ceph osd blacklist add
$bl
1393 ceph osd blacklist
ls |
grep $bl
1394 ceph osd blacklist
ls --format=json-pretty |
sed 's/\\\//\//' |
grep $bl
1395 ceph osd dump
--format=json-pretty |
grep $bl
1396 ceph osd dump |
grep $bl
1397 ceph osd blacklist
rm $bl
1398 ceph osd blacklist
ls | expect_false
grep $bl
1401 # test without nonce, invalid nonce
1402 ceph osd blacklist add
$bl
1403 ceph osd blacklist
ls |
grep $bl
1404 ceph osd blacklist
rm $bl
1405 ceph osd blacklist
ls | expect_false
grep $bl
1406 expect_false
"ceph osd blacklist $bl/-1"
1407 expect_false
"ceph osd blacklist $bl/foo"
1409 # test with wrong address
1410 expect_false
"ceph osd blacklist 1234.56.78.90/100"
1413 ceph osd blacklist add
$bl
1414 ceph osd blacklist
ls |
grep $bl
1415 ceph osd blacklist
clear
1416 ceph osd blacklist
ls | expect_false
grep $bl
1421 ceph osd crush reweight-all
1422 ceph osd crush tunables legacy
1423 ceph osd crush show-tunables |
grep argonaut
1424 ceph osd crush tunables bobtail
1425 ceph osd crush show-tunables |
grep bobtail
1426 ceph osd crush tunables firefly
1427 ceph osd crush show-tunables |
grep firefly
1429 ceph osd crush set-tunable straw_calc_version
0
1430 ceph osd crush get-tunable straw_calc_version |
grep 0
1431 ceph osd crush set-tunable straw_calc_version
1
1432 ceph osd crush get-tunable straw_calc_version |
grep 1
1435 # require-min-compat-client
1436 expect_false ceph osd set-require-min-compat-client dumpling
# firefly tunables
1437 ceph osd set-require-min-compat-client luminous
1438 ceph osd get-require-min-compat-client |
grep luminous
1439 ceph osd dump |
grep 'require_min_compat_client luminous'
1446 ceph osd scrub
0 --block
1447 ceph osd deep-scrub
0 --block
1449 # how do I tell when these are done?
1451 ceph osd deep-scrub
0
1454 # pool scrub, force-recovery/backfill
1455 pool_names
=`rados lspools`
1456 for pool_name
in $pool_names
1458 ceph osd pool scrub
$pool_name
1459 ceph osd pool deep-scrub
$pool_name
1460 ceph osd pool repair
$pool_name
1461 ceph osd pool force-recovery
$pool_name
1462 ceph osd pool cancel-force-recovery
$pool_name
1463 ceph osd pool force-backfill
$pool_name
1464 ceph osd pool cancel-force-backfill
$pool_name
1467 for f
in noup nodown noin noout noscrub nodeep-scrub nobackfill \
1468 norebalance norecover notieragent
1473 expect_false ceph osd
set bogus
1474 expect_false ceph osd
unset bogus
1475 for f
in sortbitwise recover_deletes require_jewel_osds \
1478 expect_false ceph osd
set $f
1479 expect_false ceph osd
unset $f
1481 ceph osd require-osd-release octopus
1483 expect_false ceph osd require-osd-release nautilus
1484 expect_false ceph osd require-osd-release mimic
1485 expect_false ceph osd require-osd-release luminous
1486 # these are no-ops but should succeed.
1490 ceph osd dump |
grep 'osd.0 down'
1493 for ((i
=0; i
< $max_run; i
++)); do
1494 if ! ceph osd dump |
grep 'osd.0 up'; then
1495 echo "waiting for osd.0 to come back up ($i/$max_run)"
1501 ceph osd dump |
grep 'osd.0 up'
1503 ceph osd dump |
grep 'osd.0 up'
1504 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1507 expect_false ceph osd
find osd.xyz
1508 expect_false ceph osd
find xyz
1509 expect_false ceph osd
find 0.1
1510 ceph
--format plain osd
find 1 # falls back to json-pretty
1511 if [ `uname` == Linux
]; then
1512 ceph osd metadata
1 |
grep 'distro'
1513 ceph
--format plain osd metadata
1 |
grep 'distro' # falls back to json-pretty
1516 ceph osd dump |
grep 'osd.0.*out'
1518 ceph osd dump |
grep 'osd.0.*in'
1523 expect_false ceph osd info osd.xyz
1524 expect_false ceph osd info xyz
1525 expect_false ceph osd info
42
1526 expect_false ceph osd info osd
.42
1529 info_json
=$
(ceph osd info
--format=json | jq
-cM '.')
1530 dump_json
=$
(ceph osd dump
--format=json | jq
-cM '.osds')
1531 [[ "${info_json}" == "${dump_json}" ]]
1533 info_json
=$
(ceph osd info
0 --format=json | jq
-cM '.')
1534 dump_json
=$
(ceph osd dump
--format=json | \
1535 jq
-cM '.osds[] | select(.osd == 0)')
1536 [[ "${info_json}" == "${dump_json}" ]]
1538 info_plain
="$(ceph osd info)"
1539 dump_plain
="$(ceph osd dump | grep '^osd')"
1540 [[ "${info_plain}" == "${dump_plain}" ]]
1542 info_plain
="$(ceph osd info 0)"
1543 dump_plain
="$(ceph osd dump | grep '^osd.0')"
1544 [[ "${info_plain}" == "${dump_plain}" ]]
1546 ceph osd add-nodown
0 1
1547 ceph health detail |
grep 'NODOWN'
1548 ceph osd rm-nodown
0 1
1549 ! ceph health detail |
grep 'NODOWN'
1551 ceph osd out
0 # so we can mark it as noin later
1553 ceph health detail |
grep 'NOIN'
1555 ! ceph health detail |
grep 'NOIN'
1558 ceph osd add-noout
0
1559 ceph health detail |
grep 'NOOUT'
1561 ! ceph health detail |
grep 'NOOUT'
1564 expect_false ceph osd add-noup
797er
1565 expect_false ceph osd add-nodown u9uwer
1566 expect_false ceph osd add-noin
78~
15
1568 expect_false ceph osd rm-noup
1234567
1569 expect_false ceph osd rm-nodown fsadf7
1570 expect_false ceph osd rm-noout
790-fd
1572 ids
=`ceph osd ls-tree default`
1575 ceph osd add-nodown
$osd
1576 ceph osd add-noout
$osd
1578 ceph
-s |
grep 'NODOWN'
1579 ceph
-s |
grep 'NOOUT'
1580 ceph osd rm-nodown any
1581 ceph osd rm-noout all
1582 ! ceph
-s |
grep 'NODOWN'
1583 ! ceph
-s |
grep 'NOOUT'
1585 # test crush node flags
1586 ceph osd add-noup osd
.0
1587 ceph osd add-nodown osd
.0
1588 ceph osd add-noin osd
.0
1589 ceph osd add-noout osd
.0
1590 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep "osd.0"
1591 ceph osd rm-noup osd
.0
1592 ceph osd rm-nodown osd
.0
1593 ceph osd rm-noin osd
.0
1594 ceph osd rm-noout osd
.0
1595 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep "osd.0"
1597 ceph osd crush add-bucket foo
host root
=default
1598 ceph osd add-noup foo
1599 ceph osd add-nodown foo
1600 ceph osd add-noin foo
1601 ceph osd add-noout foo
1602 ceph osd dump
-f json-pretty | jq
".crush_node_flags" |
grep foo
1603 ceph osd rm-noup foo
1604 ceph osd rm-nodown foo
1605 ceph osd rm-noin foo
1606 ceph osd rm-noout foo
1607 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep foo
1608 ceph osd add-noup foo
1609 ceph osd dump
-f json-pretty | jq
".crush_node_flags" |
grep foo
1610 ceph osd crush
rm foo
1611 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep foo
1613 ceph osd set-group noup osd
.0
1614 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1615 ceph osd set-group noup
,nodown osd
.0
1616 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1617 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1618 ceph osd set-group noup
,nodown
,noin osd
.0
1619 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1620 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1621 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1622 ceph osd set-group noup
,nodown
,noin
,noout osd
.0
1623 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1624 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1625 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1626 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1627 ceph osd unset-group noup osd
.0
1628 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup'
1629 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1630 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1631 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1632 ceph osd unset-group noup
,nodown osd
.0
1633 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup\|nodown'
1634 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1635 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1636 ceph osd unset-group noup
,nodown
,noin osd
.0
1637 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup\|nodown\|noin'
1638 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1639 ceph osd unset-group noup
,nodown
,noin
,noout osd
.0
1640 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup\|nodown\|noin\|noout'
1642 ceph osd set-group noup
,nodown
,noin
,noout osd
.0 osd
.1
1643 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1644 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1645 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1646 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1647 ceph osd dump
-f json-pretty | jq
".osds[1].state" |
grep 'noup'
1648 ceph osd dump
-f json-pretty | jq
".osds[1].state" |
grep 'nodown'
1649 ceph osd dump
-f json-pretty | jq
".osds[1].state" |
grep 'noin'
1650 ceph osd dump
-f json-pretty | jq
".osds[1].state" |
grep 'noout'
1651 ceph osd unset-group noup
,nodown
,noin
,noout osd
.0 osd
.1
1652 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup\|nodown\|noin\|noout'
1653 ceph osd dump
-f json-pretty | jq
".osds[1].state" | expect_false
grep 'noup\|nodown\|noin\|noout'
1655 ceph osd set-group noup all
1656 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1657 ceph osd unset-group noup all
1658 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup'
1661 ceph osd crush add-bucket foo
host root
=default
1662 ceph osd set-group noup foo
1663 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1664 ceph osd set-group noup
,nodown foo
1665 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1666 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1667 ceph osd set-group noup
,nodown
,noin foo
1668 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1669 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1670 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1671 ceph osd set-group noup
,nodown
,noin
,noout foo
1672 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1673 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1674 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1675 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1677 ceph osd unset-group noup foo
1678 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" | expect_false
grep 'noup'
1679 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1680 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1681 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1682 ceph osd unset-group noup
,nodown foo
1683 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" | expect_false
grep 'noup\|nodown'
1684 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1685 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1686 ceph osd unset-group noup
,nodown
,noin foo
1687 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" | expect_false
grep 'noup\|nodown\|noin'
1688 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1689 ceph osd unset-group noup
,nodown
,noin
,noout foo
1690 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" | expect_false
grep 'noup\|nodown\|noin\|noout'
1692 ceph osd set-group noin
,noout foo
1693 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1694 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1695 ceph osd unset-group noin
,noout foo
1696 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep 'foo'
1698 ceph osd set-group noup
,nodown
,noin
,noout foo
1699 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1700 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1701 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1702 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1703 ceph osd crush
rm foo
1704 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep 'foo'
1706 # test device class flags
1707 osd_0_device_class
=$
(ceph osd crush get-device-class osd
.0)
1708 ceph osd set-group noup
$osd_0_device_class
1709 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noup'
1710 ceph osd set-group noup
,nodown
$osd_0_device_class
1711 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noup'
1712 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'nodown'
1713 ceph osd set-group noup
,nodown
,noin
$osd_0_device_class
1714 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noup'
1715 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'nodown'
1716 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1717 ceph osd set-group noup
,nodown
,noin
,noout
$osd_0_device_class
1718 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noup'
1719 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'nodown'
1720 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1721 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1723 ceph osd unset-group noup
$osd_0_device_class
1724 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" | expect_false
grep 'noup'
1725 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'nodown'
1726 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1727 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1728 ceph osd unset-group noup
,nodown
$osd_0_device_class
1729 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" | expect_false
grep 'noup\|nodown'
1730 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1731 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1732 ceph osd unset-group noup
,nodown
,noin
$osd_0_device_class
1733 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" | expect_false
grep 'noup\|nodown\|noin'
1734 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1735 ceph osd unset-group noup
,nodown
,noin
,noout
$osd_0_device_class
1736 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" | expect_false
grep 'noup\|nodown\|noin\|noout'
1738 ceph osd set-group noin
,noout
$osd_0_device_class
1739 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1740 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1741 ceph osd unset-group noin
,noout
$osd_0_device_class
1742 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep $osd_0_device_class
1744 # make sure mark out preserves weight
1745 ceph osd reweight osd
.0 .5
1746 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1749 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1751 ceph osd getmap
-o $f
1754 save
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1756 ceph osd setmaxosd $
((save
- 1)) 2>&1 |
grep 'EBUSY'
1757 ceph osd setmaxosd
10
1758 ceph osd getmaxosd |
grep 'max_osd = 10'
1759 ceph osd setmaxosd
$save
1760 ceph osd getmaxosd |
grep "max_osd = $save"
1762 for id
in `ceph osd ls` ; do
1763 retry_eagain
5 map_enxio_to_eagain ceph tell osd.
$id version
1766 ceph osd
rm 0 2>&1 |
grep 'EBUSY'
1768 local old_osds
=$
(echo $
(ceph osd
ls))
1769 id
=`ceph osd create`
1771 ceph osd lost
$id --yes-i-really-mean-it
1772 expect_false ceph osd setmaxosd
$id
1773 local new_osds
=$
(echo $
(ceph osd
ls))
1774 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1779 id
=`ceph osd create $uuid`
1780 id2
=`ceph osd create $uuid`
1787 ceph osd setmaxosd
$id
1788 ceph osd getmaxosd |
grep "max_osd = $save"
1791 ceph osd create
$uuid 0 2>&1 |
grep 'EINVAL'
1792 ceph osd create
$uuid $
((max_osd
- 1)) 2>&1 |
grep 'EINVAL'
1794 id
=`ceph osd create $uuid $max_osd`
1795 [ "$id" = "$max_osd" ]
1797 max_osd
=$
((max_osd
+ 1))
1798 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1800 ceph osd create
$uuid $
((id
- 1)) 2>&1 |
grep 'EEXIST'
1801 ceph osd create
$uuid $
((id
+ 1)) 2>&1 |
grep 'EEXIST'
1802 id2
=`ceph osd create $uuid`
1804 id2
=`ceph osd create $uuid $id`
1808 local gap_start
=$max_osd
1809 id
=`ceph osd create $uuid $((gap_start + 100))`
1810 [ "$id" = "$((gap_start + 100))" ]
1812 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1814 ceph osd create
$uuid $gap_start 2>&1 |
grep 'EEXIST'
1817 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1818 # is repeated and consumes two osd id, not just one.
1820 local next_osd
=$gap_start
1821 id
=`ceph osd create $(uuidgen)`
1822 [ "$id" = "$next_osd" ]
1824 next_osd
=$
((id
+ 1))
1825 id
=`ceph osd create $(uuidgen) $next_osd`
1826 [ "$id" = "$next_osd" ]
1828 local new_osds
=$
(echo $
(ceph osd
ls))
1829 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1833 ceph osd setmaxosd
$save
1836 ceph osd pool create data
16
1837 ceph osd pool application
enable data rados
1838 ceph osd lspools |
grep data
1839 ceph osd map data foo |
grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1840 ceph osd map data foo namespace|
grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1841 ceph osd pool delete data data
--yes-i-really-really-mean-it
1844 ceph osd dump |
grep 'flags.*pauserd,pausewr'
1852 ceph osd tree destroyed
1854 ceph osd tree up out
1855 ceph osd tree down
in
1856 ceph osd tree down out
1857 ceph osd tree out down
1858 expect_false ceph osd tree up down
1859 expect_false ceph osd tree up destroyed
1860 expect_false ceph osd tree down destroyed
1861 expect_false ceph osd tree up down destroyed
1862 expect_false ceph osd tree
in out
1863 expect_false ceph osd tree up foo
1866 ceph osd count-metadata os
1872 ceph osd stat |
grep up
1875 function test_mon_crush
()
1878 epoch
=$
(ceph osd getcrushmap
-o $f 2>&1 |
tail -n1)
1881 nextepoch
=$
(( $epoch + 1 ))
1882 echo epoch
$epoch nextepoch
$nextepoch
1884 expect_false ceph osd setcrushmap
$nextepoch -i $f
1885 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1886 echo gotepoch
$gotepoch
1887 [ "$gotepoch" -eq "$nextepoch" ]
1888 # should be idempotent
1889 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1890 echo epoch
$gotepoch
1891 [ "$gotepoch" -eq "$nextepoch" ]
1895 function test_mon_osd_pool
()
1900 ceph osd pool create data
16
1901 ceph osd pool application
enable data rados
1902 ceph osd pool mksnap data datasnap
1903 rados
-p data lssnap |
grep datasnap
1904 ceph osd pool rmsnap data datasnap
1905 expect_false ceph osd pool rmsnap pool_fake snapshot
1906 ceph osd pool delete data data
--yes-i-really-really-mean-it
1908 ceph osd pool create data2
16
1909 ceph osd pool application
enable data2 rados
1910 ceph osd pool rename data2 data3
1911 ceph osd lspools |
grep data3
1912 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
1914 ceph osd pool create replicated
16 16 replicated
1915 ceph osd pool create replicated
1 16 replicated
1916 ceph osd pool create replicated
16 16 # default is replicated
1917 ceph osd pool create replicated
16 # default is replicated, pgp_num = pg_num
1918 ceph osd pool application
enable replicated rados
1919 # should fail because the type is not the same
1920 expect_false ceph osd pool create replicated
16 16 erasure
1921 ceph osd lspools |
grep replicated
1922 ceph osd pool create ec_test
1 1 erasure
1923 ceph osd pool application
enable ec_test rados
1925 ceph osd count-metadata osd_objectstore |
grep 'bluestore'
1926 if [ $?
-eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1927 ceph osd pool
set ec_test allow_ec_overwrites true
>& $TMPFILE
1928 check_response
"pool must only be stored on bluestore for scrubbing to work" $?
22
1930 ceph osd pool
set ec_test allow_ec_overwrites true ||
return 1
1931 expect_false ceph osd pool
set ec_test allow_ec_overwrites false
1934 ceph osd pool delete replicated replicated
--yes-i-really-really-mean-it
1935 ceph osd pool delete ec_test ec_test
--yes-i-really-really-mean-it
1937 # test create pool with rule
1938 ceph osd erasure-code-profile
set foo foo
1939 ceph osd erasure-code-profile
ls |
grep foo
1940 ceph osd crush rule create-erasure foo foo
1941 ceph osd pool create erasure
16 16 erasure foo
1942 expect_false ceph osd erasure-code-profile
rm foo
1943 ceph osd pool delete erasure erasure
--yes-i-really-really-mean-it
1944 ceph osd crush rule
rm foo
1945 ceph osd erasure-code-profile
rm foo
1948 ceph osd pool create modeon
--autoscale-mode=on
1949 ceph osd dump |
grep modeon |
grep 'autoscale_mode on'
1950 ceph osd pool create modewarn
--autoscale-mode=warn
1951 ceph osd dump |
grep modewarn |
grep 'autoscale_mode warn'
1952 ceph osd pool create modeoff
--autoscale-mode=off
1953 ceph osd dump |
grep modeoff |
grep 'autoscale_mode off'
1954 ceph osd pool delete modeon modeon
--yes-i-really-really-mean-it
1955 ceph osd pool delete modewarn modewarn
--yes-i-really-really-mean-it
1956 ceph osd pool delete modeoff modeoff
--yes-i-really-really-mean-it
1959 function test_mon_osd_pool_quota
()
1962 # test osd pool set/get quota
1966 ceph osd pool create tmp-quota-pool
32
1967 ceph osd pool application
enable tmp-quota-pool rados
1969 # set erroneous quotas
1971 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness
10
1972 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes
-1
1973 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1977 ceph osd pool set-quota tmp-quota-pool max_bytes
10
1978 ceph osd pool set-quota tmp-quota-pool max_objects
10M
1980 # get quotas in json-pretty format
1982 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1983 grep '"quota_max_objects":.*10000000'
1984 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1985 grep '"quota_max_bytes":.*10'
1989 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10 B'
1990 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*10.*M objects'
1992 # set valid quotas with unit prefix
1994 ceph osd pool set-quota tmp-quota-pool max_bytes
10K
1998 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10 Ki'
2000 # set valid quotas with unit prefix
2002 ceph osd pool set-quota tmp-quota-pool max_bytes
10Ki
2006 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10 Ki'
2011 ceph osd pool set-quota tmp-quota-pool max_bytes
0
2012 ceph osd pool set-quota tmp-quota-pool max_objects
0
2016 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*N/A'
2017 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*N/A'
2020 ceph osd pool delete tmp-quota-pool tmp-quota-pool
--yes-i-really-really-mean-it
2023 function test_mon_pg
()
2025 # Make sure we start healthy.
2028 ceph pg debug unfound_objects_exist
2029 ceph pg debug degraded_pgs_exist
2030 ceph pg deep-scrub
1.0
2032 ceph pg dump pgs_brief
--format=json
2033 ceph pg dump pgs
--format=json
2034 ceph pg dump pools
--format=json
2035 ceph pg dump osds
--format=json
2036 ceph pg dump
sum --format=json
2037 ceph pg dump all
--format=json
2038 ceph pg dump pgs_brief osds
--format=json
2039 ceph pg dump pools osds pgs_brief
--format=json
2041 ceph pg dump_pools_json
2042 ceph pg dump_stuck inactive
2043 ceph pg dump_stuck unclean
2044 ceph pg dump_stuck stale
2045 ceph pg dump_stuck undersized
2046 ceph pg dump_stuck degraded
2050 expect_false ceph pg
ls scrubq
2051 ceph pg
ls active stale repair recovering
2053 ceph pg
ls 1 active stale
2054 ceph pg ls-by-primary osd
.0
2055 ceph pg ls-by-primary osd
.0 1
2056 ceph pg ls-by-primary osd
.0 active
2057 ceph pg ls-by-primary osd
.0 active stale
2058 ceph pg ls-by-primary osd
.0 1 active stale
2059 ceph pg ls-by-osd osd
.0
2060 ceph pg ls-by-osd osd
.0 1
2061 ceph pg ls-by-osd osd
.0 active
2062 ceph pg ls-by-osd osd
.0 active stale
2063 ceph pg ls-by-osd osd
.0 1 active stale
2064 ceph pg ls-by-pool rbd
2065 ceph pg ls-by-pool rbd active stale
2066 # can't test this...
2067 # ceph pg force_create_pg
2068 ceph pg getmap
-o $TEMP_DIR/map.$$
2069 [ -s $TEMP_DIR/map.$$
]
2070 ceph pg map
1.0 |
grep acting
2074 ceph osd set-full-ratio
.962
2075 ceph osd dump |
grep '^full_ratio 0.962'
2076 ceph osd set-backfillfull-ratio
.912
2077 ceph osd dump |
grep '^backfillfull_ratio 0.912'
2078 ceph osd set-nearfull-ratio
.892
2079 ceph osd dump |
grep '^nearfull_ratio 0.892'
2081 # Check health status
2082 ceph osd set-nearfull-ratio
.913
2083 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
2084 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
2085 ceph osd set-nearfull-ratio
.892
2086 ceph osd set-backfillfull-ratio
.963
2087 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
2088 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
2089 ceph osd set-backfillfull-ratio
.912
2091 # Check injected full results
2092 $SUDO ceph tell osd
.0 injectfull nearfull
2093 wait_for_health
"OSD_NEARFULL"
2094 ceph health detail |
grep "osd.0 is near full"
2095 $SUDO ceph tell osd
.0 injectfull none
2098 $SUDO ceph tell osd
.1 injectfull backfillfull
2099 wait_for_health
"OSD_BACKFILLFULL"
2100 ceph health detail |
grep "osd.1 is backfill full"
2101 $SUDO ceph tell osd
.1 injectfull none
2104 $SUDO ceph tell osd
.2 injectfull failsafe
2105 # failsafe and full are the same as far as the monitor is concerned
2106 wait_for_health
"OSD_FULL"
2107 ceph health detail |
grep "osd.2 is full"
2108 $SUDO ceph tell osd
.2 injectfull none
2111 $SUDO ceph tell osd
.0 injectfull full
2112 wait_for_health
"OSD_FULL"
2113 ceph health detail |
grep "osd.0 is full"
2114 $SUDO ceph tell osd
.0 injectfull none
2117 ceph pg stat |
grep 'pgs:'
2120 first
=$
(ceph mon dump
-f json | jq
-r '.mons[0].name')
2121 ceph tell mon.
$first quorum enter
2123 ceph report |
grep osd_stats
2130 ceph tell osd
.0 version
2131 expect_false ceph tell osd
.9999 version
2132 expect_false ceph tell osd.foo version
2136 ceph tell osd
.0 dump_pg_recovery_stats |
grep Started
2138 ceph osd reweight
0 0.9
2139 expect_false ceph osd reweight
0 -1
2140 ceph osd reweight osd
.0 1
2142 ceph osd primary-affinity osd
.0 .9
2143 expect_false ceph osd primary-affinity osd
.0 -2
2144 expect_false ceph osd primary-affinity osd
.9999 .5
2145 ceph osd primary-affinity osd
.0 1
2147 ceph osd pool
set rbd size
2
2148 ceph osd pg-temp
1.0 0 1
2149 ceph osd pg-temp
1.0 osd
.1 osd
.0
2150 expect_false ceph osd pg-temp
1.0 0 1 2
2151 expect_false ceph osd pg-temp asdf qwer
2152 expect_false ceph osd pg-temp
1.0 asdf
2153 ceph osd pg-temp
1.0 # cleanup pg-temp
2156 expect_false ceph pg repeer
0.0 # pool 0 shouldn't exist anymore
2158 # don't test ceph osd primary-temp for now
2161 function test_mon_osd_pool_set
()
2163 TEST_POOL_GETSET
=pool_getset
2164 ceph osd pool create
$TEST_POOL_GETSET 1
2165 ceph osd pool application
enable $TEST_POOL_GETSET rados
2166 ceph osd pool
set $TEST_POOL_GETSET pg_autoscale_mode off
2168 ceph osd pool get
$TEST_POOL_GETSET all
2170 for s
in pg_num pgp_num size min_size crush_rule
; do
2171 ceph osd pool get
$TEST_POOL_GETSET $s
2174 old_size
=$
(ceph osd pool get
$TEST_POOL_GETSET size |
sed -e 's/size: //')
2175 (( new_size
= old_size
+ 1 ))
2176 ceph osd pool
set $TEST_POOL_GETSET size
$new_size
2177 ceph osd pool get
$TEST_POOL_GETSET size |
grep "size: $new_size"
2178 ceph osd pool
set $TEST_POOL_GETSET size
$old_size
2180 ceph osd pool create pool_erasure
1 1 erasure
2181 ceph osd pool application
enable pool_erasure rados
2184 ceph osd pool
set pool_erasure size
4444 2>$TMPFILE
2185 check_response
'not change the size'
2187 ceph osd pool get pool_erasure erasure_code_profile
2188 ceph osd pool
rm pool_erasure pool_erasure
--yes-i-really-really-mean-it
2190 for flag
in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub
; do
2191 ceph osd pool
set $TEST_POOL_GETSET $flag false
2192 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
2193 ceph osd pool
set $TEST_POOL_GETSET $flag true
2194 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
2195 ceph osd pool
set $TEST_POOL_GETSET $flag 1
2196 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
2197 ceph osd pool
set $TEST_POOL_GETSET $flag 0
2198 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
2199 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag asdf
2200 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag 2
2203 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
2204 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
123456
2205 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval |
grep 'scrub_min_interval: 123456'
2206 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
0
2207 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
2209 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
2210 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
123456
2211 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval |
grep 'scrub_max_interval: 123456'
2212 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
0
2213 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
2215 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
2216 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
123456
2217 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval |
grep 'deep_scrub_interval: 123456'
2218 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
0
2219 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
2221 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
2222 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
5
2223 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: 5'
2224 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
-5
2225 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: -5'
2226 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
0
2227 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
2228 expect_false ceph osd pool
set $TEST_POOL_GETSET recovery_priority
-11
2229 expect_false ceph osd pool
set $TEST_POOL_GETSET recovery_priority
11
2231 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
2232 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
5
2233 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority |
grep 'recovery_op_priority: 5'
2234 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
0
2235 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
2237 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
2238 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
5
2239 ceph osd pool get
$TEST_POOL_GETSET scrub_priority |
grep 'scrub_priority: 5'
2240 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
0
2241 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
2243 ceph osd pool
set $TEST_POOL_GETSET nopgchange
1
2244 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
10
2245 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
2246 ceph osd pool
set $TEST_POOL_GETSET nopgchange
0
2247 ceph osd pool
set $TEST_POOL_GETSET pg_num
10
2249 ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
2250 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
0
2251 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
0
2253 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
2254 new_pgs
=$
(($old_pgs + $
(ceph osd stat
--format json | jq
'.num_osds') * 32))
2255 ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
2256 ceph osd pool
set $TEST_POOL_GETSET pgp_num
$new_pgs
2259 ceph osd pool
set $TEST_POOL_GETSET nosizechange
1
2260 expect_false ceph osd pool
set $TEST_POOL_GETSET size
2
2261 expect_false ceph osd pool
set $TEST_POOL_GETSET min_size
2
2262 ceph osd pool
set $TEST_POOL_GETSET nosizechange
0
2263 ceph osd pool
set $TEST_POOL_GETSET size
2
2265 ceph osd pool
set $TEST_POOL_GETSET min_size
2
2267 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
0
2268 ceph osd pool
set $TEST_POOL_GETSET hashpspool
0 --yes-i-really-mean-it
2270 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
1
2271 ceph osd pool
set $TEST_POOL_GETSET hashpspool
1 --yes-i-really-mean-it
2273 ceph osd pool get rbd crush_rule |
grep 'crush_rule: '
2275 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2276 ceph osd pool
set $TEST_POOL_GETSET compression_mode aggressive
2277 ceph osd pool get
$TEST_POOL_GETSET compression_mode |
grep 'aggressive'
2278 ceph osd pool
set $TEST_POOL_GETSET compression_mode
unset
2279 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2281 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2282 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm zlib
2283 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm |
grep 'zlib'
2284 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm
unset
2285 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2287 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2288 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
1.1
2289 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
-.2
2290 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
.2
2291 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio |
grep '.2'
2292 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
0
2293 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2295 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2296 ceph osd pool
set $TEST_POOL_GETSET csum_type crc32c
2297 ceph osd pool get
$TEST_POOL_GETSET csum_type |
grep 'crc32c'
2298 ceph osd pool
set $TEST_POOL_GETSET csum_type
unset
2299 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2301 for size
in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block
; do
2302 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2303 ceph osd pool
set $TEST_POOL_GETSET $size 100
2304 ceph osd pool get
$TEST_POOL_GETSET $size |
grep '100'
2305 ceph osd pool
set $TEST_POOL_GETSET $size 0
2306 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2309 ceph osd pool
set $TEST_POOL_GETSET nodelete
1
2310 expect_false ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2311 ceph osd pool
set $TEST_POOL_GETSET nodelete
0
2312 ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2316 function test_mon_osd_tiered_pool_set
()
2318 # this is really a tier pool
2319 ceph osd pool create real-tier
2
2320 ceph osd tier add rbd real-tier
2322 # expect us to be unable to set negative values for hit_set_*
2323 for o
in hit_set_period hit_set_count hit_set_fpp
; do
2324 expect_false ceph osd pool
set real_tier
$o -1
2327 # and hit_set_fpp should be in range 0..1
2328 expect_false ceph osd pool
set real_tier hit_set_fpp
2
2330 ceph osd pool
set real-tier hit_set_type explicit_hash
2331 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_hash"
2332 ceph osd pool
set real-tier hit_set_type explicit_object
2333 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_object"
2334 ceph osd pool
set real-tier hit_set_type bloom
2335 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: bloom"
2336 expect_false ceph osd pool
set real-tier hit_set_type i_dont_exist
2337 ceph osd pool
set real-tier hit_set_period
123
2338 ceph osd pool get real-tier hit_set_period |
grep "hit_set_period: 123"
2339 ceph osd pool
set real-tier hit_set_count
12
2340 ceph osd pool get real-tier hit_set_count |
grep "hit_set_count: 12"
2341 ceph osd pool
set real-tier hit_set_fpp
.01
2342 ceph osd pool get real-tier hit_set_fpp |
grep "hit_set_fpp: 0.01"
2344 ceph osd pool
set real-tier target_max_objects
123
2345 ceph osd pool get real-tier target_max_objects | \
2346 grep 'target_max_objects:[ \t]\+123'
2347 ceph osd pool
set real-tier target_max_bytes
123456
2348 ceph osd pool get real-tier target_max_bytes | \
2349 grep 'target_max_bytes:[ \t]\+123456'
2350 ceph osd pool
set real-tier cache_target_dirty_ratio
.123
2351 ceph osd pool get real-tier cache_target_dirty_ratio | \
2352 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2353 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
-.2
2354 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
1.1
2355 ceph osd pool
set real-tier cache_target_dirty_high_ratio
.123
2356 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2357 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2358 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
-.2
2359 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
1.1
2360 ceph osd pool
set real-tier cache_target_full_ratio
.123
2361 ceph osd pool get real-tier cache_target_full_ratio | \
2362 grep 'cache_target_full_ratio:[ \t]\+0.123'
2363 ceph osd dump
-f json-pretty |
grep '"cache_target_full_ratio_micro": 123000'
2364 ceph osd pool
set real-tier cache_target_full_ratio
1.0
2365 ceph osd pool
set real-tier cache_target_full_ratio
0
2366 expect_false ceph osd pool
set real-tier cache_target_full_ratio
1.1
2367 ceph osd pool
set real-tier cache_min_flush_age
123
2368 ceph osd pool get real-tier cache_min_flush_age | \
2369 grep 'cache_min_flush_age:[ \t]\+123'
2370 ceph osd pool
set real-tier cache_min_evict_age
234
2371 ceph osd pool get real-tier cache_min_evict_age | \
2372 grep 'cache_min_evict_age:[ \t]\+234'
2375 ceph osd pool
set real-tier target_max_objects
1K
2376 ceph osd pool get real-tier target_max_objects |
grep 1000
2377 for o
in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block
; do
2378 ceph osd pool
set real-tier
$o 1Ki
# no i suffix
2379 val
=$
(ceph osd pool get real-tier
$o --format=json | jq
-c ".$o")
2381 ceph osd pool
set real-tier
$o 1M
# with i suffix
2382 val
=$
(ceph osd pool get real-tier
$o --format=json | jq
-c ".$o")
2383 [[ $val == 1048576 ]]
2386 # this is not a tier pool
2387 ceph osd pool create fake-tier
2
2388 ceph osd pool application
enable fake-tier rados
2391 expect_false ceph osd pool
set fake-tier hit_set_type explicit_hash
2392 expect_false ceph osd pool get fake-tier hit_set_type
2393 expect_false ceph osd pool
set fake-tier hit_set_type explicit_object
2394 expect_false ceph osd pool get fake-tier hit_set_type
2395 expect_false ceph osd pool
set fake-tier hit_set_type bloom
2396 expect_false ceph osd pool get fake-tier hit_set_type
2397 expect_false ceph osd pool
set fake-tier hit_set_type i_dont_exist
2398 expect_false ceph osd pool
set fake-tier hit_set_period
123
2399 expect_false ceph osd pool get fake-tier hit_set_period
2400 expect_false ceph osd pool
set fake-tier hit_set_count
12
2401 expect_false ceph osd pool get fake-tier hit_set_count
2402 expect_false ceph osd pool
set fake-tier hit_set_fpp
.01
2403 expect_false ceph osd pool get fake-tier hit_set_fpp
2405 expect_false ceph osd pool
set fake-tier target_max_objects
123
2406 expect_false ceph osd pool get fake-tier target_max_objects
2407 expect_false ceph osd pool
set fake-tier target_max_bytes
123456
2408 expect_false ceph osd pool get fake-tier target_max_bytes
2409 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
.123
2410 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2411 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
-.2
2412 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
1.1
2413 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
.123
2414 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2415 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
-.2
2416 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
1.1
2417 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
.123
2418 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2419 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.0
2420 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
0
2421 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.1
2422 expect_false ceph osd pool
set fake-tier cache_min_flush_age
123
2423 expect_false ceph osd pool get fake-tier cache_min_flush_age
2424 expect_false ceph osd pool
set fake-tier cache_min_evict_age
234
2425 expect_false ceph osd pool get fake-tier cache_min_evict_age
2427 ceph osd tier remove rbd real-tier
2428 ceph osd pool delete real-tier real-tier
--yes-i-really-really-mean-it
2429 ceph osd pool delete fake-tier fake-tier
--yes-i-really-really-mean-it
2432 function test_mon_osd_erasure_code
()
2435 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2436 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2437 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2438 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
--force
2439 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2440 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f g
=h
2441 # make sure ruleset-foo doesn't work anymore
2442 expect_false ceph osd erasure-code-profile
set barprofile ruleset-failure-domain
=host
2443 ceph osd erasure-code-profile
set barprofile crush-failure-domain
=host
2445 ceph osd erasure-code-profile
rm fooprofile
2446 ceph osd erasure-code-profile
rm barprofile
2448 # try weird k and m values
2449 expect_false ceph osd erasure-code-profile
set badk k
=1 m
=1
2450 expect_false ceph osd erasure-code-profile
set badk k
=1 m
=2
2451 expect_false ceph osd erasure-code-profile
set badk k
=0 m
=2
2452 expect_false ceph osd erasure-code-profile
set badk k
=-1 m
=2
2453 expect_false ceph osd erasure-code-profile
set badm k
=2 m
=0
2454 expect_false ceph osd erasure-code-profile
set badm k
=2 m
=-1
2455 ceph osd erasure-code-profile
set good k
=2 m
=1
2456 ceph osd erasure-code-profile
rm good
2459 function test_mon_osd_misc
()
2463 # expect error about missing 'pool' argument
2464 ceph osd map
2>$TMPFILE; check_response
'pool' $?
22
2466 # expect error about unused argument foo
2467 ceph osd
ls foo
2>$TMPFILE; check_response
'unused' $?
22
2469 # expect "not in range" for invalid overload percentage
2470 ceph osd reweight-by-utilization
80 2>$TMPFILE; check_response
'higher than 100' $?
22
2474 local old_bytes_per_osd
=$
(ceph config get mgr mon_reweight_min_bytes_per_osd
)
2475 local old_pgs_per_osd
=$
(ceph config get mgr mon_reweight_min_pgs_per_osd
)
2476 # otherwise ceph-mgr complains like:
2477 # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
2478 # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
2479 ceph config
set mgr mon_reweight_min_bytes_per_osd
0
2480 ceph config
set mgr mon_reweight_min_pgs_per_osd
0
2481 ceph osd reweight-by-utilization
110
2482 ceph osd reweight-by-utilization
110 .5
2483 expect_false ceph osd reweight-by-utilization
110 0
2484 expect_false ceph osd reweight-by-utilization
110 -0.1
2485 ceph osd test-reweight-by-utilization
110 .5 --no-increasing
2486 ceph osd test-reweight-by-utilization
110 .5 4 --no-increasing
2487 expect_false ceph osd test-reweight-by-utilization
110 .5 0 --no-increasing
2488 expect_false ceph osd test-reweight-by-utilization
110 .5 -10 --no-increasing
2489 ceph osd reweight-by-pg
110
2490 ceph osd test-reweight-by-pg
110 .5
2491 ceph osd reweight-by-pg
110 rbd
2492 ceph osd reweight-by-pg
110 .5 rbd
2493 expect_false ceph osd reweight-by-pg
110 boguspoolasdfasdfasdf
2494 # restore the setting
2495 ceph config
set mgr mon_reweight_min_bytes_per_osd
$old_bytes_per_osd
2496 ceph config
set mgr mon_reweight_min_pgs_per_osd
$old_pgs_per_osd
2499 function test_admin_heap_profiler
()
2503 # expect 'heap' commands to be correctly parsed
2504 ceph tell osd
.0 heap stats
2>$TMPFILE
2505 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2506 echo "tcmalloc not enabled; skip heap profiler test"
2511 [[ $do_test -eq 0 ]] && return 0
2513 $SUDO ceph tell osd
.0 heap start_profiler
2514 $SUDO ceph tell osd
.0 heap dump
2515 $SUDO ceph tell osd
.0 heap stop_profiler
2516 $SUDO ceph tell osd
.0 heap release
2519 function test_osd_bench
()
2521 # test osd bench limits
2522 # As we should not rely on defaults (as they may change over time),
2523 # lets inject some values and perform some simple tests
2524 # max iops: 10 # 100 IOPS
2525 # max throughput: 10485760 # 10MB/s
2526 # max block size: 2097152 # 2MB
2527 # duration: 10 # 10 seconds
2530 --osd-bench-duration 10 \
2531 --osd-bench-max-block-size 2097152 \
2532 --osd-bench-large-size-max-throughput 10485760 \
2533 --osd-bench-small-size-max-iops 10"
2534 ceph tell osd
.0 injectargs
${args## }
2536 # anything with a bs larger than 2097152 must fail
2537 expect_false ceph tell osd
.0 bench
1 2097153
2538 # but using 'osd_bench_max_bs' must succeed
2539 ceph tell osd
.0 bench
1 2097152
2541 # we assume 1MB as a large bs; anything lower is a small bs
2542 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2543 # max count: 409600 (bytes)
2545 # more than max count must not be allowed
2546 expect_false ceph tell osd
.0 bench
409601 4096
2547 # but 409600 must be succeed
2548 ceph tell osd
.0 bench
409600 4096
2550 # for a large bs, we are limited by throughput.
2551 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2552 # the max count will be (10MB * 10s) = 100MB
2553 # max count: 104857600 (bytes)
2555 # more than max count must not be allowed
2556 expect_false ceph tell osd
.0 bench
104857601 2097152
2557 # up to max count must be allowed
2558 ceph tell osd
.0 bench
104857600 2097152
2561 function test_osd_negative_filestore_merge_threshold
()
2563 $SUDO ceph daemon osd
.0 config
set filestore_merge_threshold
-1
2564 expect_config_value
"osd.0" "filestore_merge_threshold" -1
2567 function test_mon_tell
()
2569 for m
in mon.a mon.b
; do
2570 ceph tell
$m sessions
2571 ceph_watch_start debug audit
2572 ceph tell mon.a sessions
2573 ceph_watch_wait
"${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
2575 expect_false ceph tell mon.foo version
2578 function test_mon_ping
()
2582 expect_false ceph
ping mon.foo
2587 function test_mon_deprecated_commands
()
2589 # current DEPRECATED commands are marked with FLAG(DEPRECATED)
2591 # Testing should be accomplished by setting
2592 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2593 # each one of these commands.
2595 ceph tell mon.
* injectargs
'--mon-debug-deprecated-as-obsolete'
2596 expect_false ceph config-key list
2> $TMPFILE
2597 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2599 ceph tell mon.
* injectargs
'--no-mon-debug-deprecated-as-obsolete'
2602 function test_mon_cephdf_commands
()
2606 # RAW USED The near raw used per pool in raw total
2608 ceph osd pool create cephdf_for_test
1 1 replicated
2609 ceph osd pool application
enable cephdf_for_test rados
2610 ceph osd pool
set cephdf_for_test size
2
2612 dd if=/dev
/zero of
=.
/cephdf_for_test bs
=4k count
=1
2613 rados put cephdf_for_test cephdf_for_test
-p cephdf_for_test
2616 for i
in `seq 1 10`; do
2617 rados
-p cephdf_for_test
ls - |
grep -q cephdf_for_test
&& break
2620 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2621 # to sync mon with osd
2623 local jq_filter
='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2624 stored
=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
2625 stored_raw
=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2627 ceph osd pool delete cephdf_for_test cephdf_for_test
--yes-i-really-really-mean-it
2628 rm .
/cephdf_for_test
2630 expect_false
test $stored != $stored_raw
2633 function test_mon_pool_application
()
2635 ceph osd pool create app_for_test
16
2637 ceph osd pool application
enable app_for_test rbd
2638 expect_false ceph osd pool application
enable app_for_test rgw
2639 ceph osd pool application
enable app_for_test rgw
--yes-i-really-mean-it
2640 ceph osd pool
ls detail |
grep "application rbd,rgw"
2641 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2643 expect_false ceph osd pool application
set app_for_test cephfs key value
2644 ceph osd pool application
set app_for_test rbd key1 value1
2645 ceph osd pool application
set app_for_test rbd key2 value2
2646 ceph osd pool application
set app_for_test rgw key1 value1
2647 ceph osd pool application get app_for_test rbd key1 |
grep 'value1'
2648 ceph osd pool application get app_for_test rbd key2 |
grep 'value2'
2649 ceph osd pool application get app_for_test rgw key1 |
grep 'value1'
2651 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2653 ceph osd pool application
rm app_for_test rgw key1
2654 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2655 ceph osd pool application
rm app_for_test rbd key2
2656 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2657 ceph osd pool application
rm app_for_test rbd key1
2658 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2659 ceph osd pool application
rm app_for_test rbd key1
# should be idempotent
2661 expect_false ceph osd pool application disable app_for_test rgw
2662 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2663 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it # should be idempotent
2664 ceph osd pool
ls detail |
grep "application rbd"
2665 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{}}'
2667 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2668 ceph osd pool
ls detail |
grep -v "application "
2669 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{}'
2671 ceph osd pool
rm app_for_test app_for_test
--yes-i-really-really-mean-it
2674 function test_mon_tell_help_command
()
2676 ceph tell mon.a
help |
grep sync_force
2677 ceph tell mon.a
-h |
grep sync_force
2678 ceph tell mon.a config
-h |
grep 'config diff get'
2681 expect_false ceph tell mon.zzz
help
2684 function test_mon_stdin_stdout
()
2686 echo foo | ceph config-key
set test_key
-i -
2687 ceph config-key get test_key
-o - |
grep -c foo |
grep -q 1
2690 function test_osd_tell_help_command
()
2692 ceph tell osd
.1 help
2693 expect_false ceph tell osd
.100 help
2696 function test_osd_compact
()
2698 ceph tell osd
.1 compact
2699 $SUDO ceph daemon osd
.1 compact
2702 function test_mds_tell_help_command
()
2704 local FS_NAME
=cephfs
2705 if ! mds_exists
; then
2706 echo "Skipping test, no MDS found"
2711 ceph osd pool create fs_data
16
2712 ceph osd pool create fs_metadata
16
2713 ceph fs new
$FS_NAME fs_metadata fs_data
2714 wait_mds_active
$FS_NAME
2717 ceph tell mds.a
help
2718 expect_false ceph tell mds.z
help
2721 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
2722 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
2725 function test_mgr_tell
()
2727 ceph tell mgr version
2730 function test_mgr_devices
()
2733 expect_false ceph device info doesnotexist
2734 expect_false ceph device get-health-metrics doesnotexist
2737 function test_per_pool_scrub_status
()
2739 ceph osd pool create noscrub_pool
16
2740 ceph osd pool create noscrub_pool2
16
2741 ceph
-s | expect_false
grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2742 ceph
-s --format json | \
2743 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2744 expect_false
grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2745 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
2746 expect_false
grep -q "Pool .* has .*scrub.* flag"
2747 ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2748 expect_false
grep -q "Pool .* has .*scrub.* flag"
2750 ceph osd pool
set noscrub_pool noscrub
1
2751 ceph
-s | expect_true
grep -q "Some pool(s) have the noscrub flag(s) set"
2752 ceph
-s --format json | \
2753 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2754 expect_true
grep -q "Some pool(s) have the noscrub flag(s) set"
2755 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2756 expect_true
grep -q "Pool noscrub_pool has noscrub flag"
2757 ceph health detail | expect_true
grep -q "Pool noscrub_pool has noscrub flag"
2759 ceph osd pool
set noscrub_pool nodeep-scrub
1
2760 ceph osd pool
set noscrub_pool2 nodeep-scrub
1
2761 ceph
-s | expect_true
grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2762 ceph
-s --format json | \
2763 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2764 expect_true
grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2765 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2766 expect_true
grep -q "Pool noscrub_pool has noscrub flag"
2767 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2768 expect_true
grep -q "Pool noscrub_pool has nodeep-scrub flag"
2769 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2770 expect_true
grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2771 ceph health detail | expect_true
grep -q "Pool noscrub_pool has noscrub flag"
2772 ceph health detail | expect_true
grep -q "Pool noscrub_pool has nodeep-scrub flag"
2773 ceph health detail | expect_true
grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2775 ceph osd pool
rm noscrub_pool noscrub_pool
--yes-i-really-really-mean-it
2776 ceph osd pool
rm noscrub_pool2 noscrub_pool2
--yes-i-really-really-mean-it
2780 # New tests should be added to the TESTS array below
2782 # Individual tests may be run using the '-t <testname>' argument
2783 # The user can specify '-t <testname>' as many times as she wants
2785 # Tests will be run in order presented in the TESTS array, or in
2786 # the order specified by the '-t <testname>' options.
2788 # '-l' will list all the available test names
2789 # '-h' will show usage
2791 # The test maintains backward compatibility: not specifying arguments
2792 # will run all tests following the order they appear in the TESTS array.
2796 MON_TESTS
+=" mon_injectargs"
2797 MON_TESTS
+=" mon_injectargs_SI"
2798 for i
in `seq 9`; do
2799 MON_TESTS
+=" tiering_$i";
2802 MON_TESTS
+=" auth_profiles"
2803 MON_TESTS
+=" mon_misc"
2804 MON_TESTS
+=" mon_mon"
2805 MON_TESTS
+=" mon_osd"
2806 MON_TESTS
+=" mon_config_key"
2807 MON_TESTS
+=" mon_crush"
2808 MON_TESTS
+=" mon_osd_create_destroy"
2809 MON_TESTS
+=" mon_osd_pool"
2810 MON_TESTS
+=" mon_osd_pool_quota"
2811 MON_TESTS
+=" mon_pg"
2812 MON_TESTS
+=" mon_osd_pool_set"
2813 MON_TESTS
+=" mon_osd_tiered_pool_set"
2814 MON_TESTS
+=" mon_osd_erasure_code"
2815 MON_TESTS
+=" mon_osd_misc"
2816 MON_TESTS
+=" mon_tell"
2817 MON_TESTS
+=" mon_ping"
2818 MON_TESTS
+=" mon_deprecated_commands"
2819 MON_TESTS
+=" mon_caps"
2820 MON_TESTS
+=" mon_cephdf_commands"
2821 MON_TESTS
+=" mon_tell_help_command"
2822 MON_TESTS
+=" mon_stdin_stdout"
2824 OSD_TESTS
+=" osd_bench"
2825 OSD_TESTS
+=" osd_negative_filestore_merge_threshold"
2826 OSD_TESTS
+=" tiering_agent"
2827 OSD_TESTS
+=" admin_heap_profiler"
2828 OSD_TESTS
+=" osd_tell_help_command"
2829 OSD_TESTS
+=" osd_compact"
2830 OSD_TESTS
+=" per_pool_scrub_status"
2832 MDS_TESTS
+=" mds_tell"
2833 MDS_TESTS
+=" mon_mds"
2834 MDS_TESTS
+=" mon_mds_metadata"
2835 MDS_TESTS
+=" mds_tell_help_command"
2837 MGR_TESTS
+=" mgr_tell"
2838 MGR_TESTS
+=" mgr_devices"
2849 function list_tests
()
2851 echo "AVAILABLE TESTS"
2859 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2866 while [[ $# -gt 0 ]]; do
2873 "--asok-does-not-need-root" )
2876 "--no-sanity-check" )
2880 tests_to_run
+="$MON_TESTS"
2883 tests_to_run
+="$OSD_TESTS"
2886 tests_to_run
+="$MDS_TESTS"
2889 tests_to_run
+="$MGR_TESTS"
2893 if [[ -z "$1" ]]; then
2894 echo "missing argument to '-t'"
2908 if [[ $do_list -eq 1 ]]; then
2913 ceph osd pool create rbd
16
2915 if test -z "$tests_to_run" ; then
2916 tests_to_run
="$TESTS"
2919 if $sanity_check ; then
2922 for i
in $tests_to_run; do
2923 if $sanity_check ; then
2930 if $sanity_check ; then