2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
6 source $
(dirname $0)/..
/..
/standalone
/ceph-helpers.sh
10 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
14 function check_no_osd_down
()
16 ! ceph osd dump |
grep ' down '
19 function wait_no_osd_down
()
22 for i
in $
(seq 1 $max_run) ; do
23 if ! check_no_osd_down
; then
24 echo "waiting for osd(s) to come back up ($i/$max_run)"
33 function expect_false
()
36 if "$@"; then return 1; else return 0; fi
39 function expect_true
()
42 if ! "$@"; then return 1; else return 0; fi
45 TEMP_DIR
=$
(mktemp
-d ${TMPDIR-/tmp}/cephtool.XXX
)
46 trap "rm -fr $TEMP_DIR" 0
48 TMPFILE
=$
(mktemp
$TEMP_DIR/test_invalid.XXX
)
51 # retry_eagain max cmd args ...
53 # retry cmd args ... if it exits on error and its output contains the
54 # string EAGAIN, at most $max times
56 function retry_eagain
()
61 local tmpfile
=$TEMP_DIR/retry_eagain.$$
63 for count
in $
(seq 1 $max) ; do
65 "$@" > $tmpfile 2>&1 || status
=$?
66 if test $status = 0 ||
67 ! grep --quiet EAGAIN
$tmpfile ; then
72 if test $count = $max ; then
73 echo retried with non zero
exit status
, $max times: "$@" >&2
81 # map_enxio_to_eagain cmd arg ...
83 # add EAGAIN to the output of cmd arg ... if the output contains
86 function map_enxio_to_eagain
()
89 local tmpfile
=$TEMP_DIR/map_enxio_to_eagain.$$
91 "$@" > $tmpfile 2>&1 || status
=$?
92 if test $status != 0 &&
93 grep --quiet ENXIO
$tmpfile ; then
94 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
101 function check_response
()
106 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
107 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
111 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
112 echo "Didn't find $expected_string in output" >&2
118 function get_config_value_or_die
()
120 local target config_opt raw val
125 raw
="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
126 if [[ $?
-ne 0 ]]; then
127 echo "error obtaining config opt '$config_opt' from '$target': $raw"
131 raw
=`echo $raw | sed -e 's/[{} "]//g'`
132 val
=`echo $raw | cut -f2 -d:`
138 function expect_config_value
()
140 local target config_opt expected_val val
145 val
=$
(get_config_value_or_die
$target $config_opt)
147 if [[ "$val" != "$expected_val" ]]; then
148 echo "expected '$expected_val', got '$val'"
153 function ceph_watch_start
()
155 local whatch_opt
=--watch
158 whatch_opt
=--watch-$1
160 whatch_opt
+=" --watch-channel $2"
164 CEPH_WATCH_FILE
=${TEMP_DIR}/CEPH_WATCH_$$
165 ceph
$whatch_opt > $CEPH_WATCH_FILE &
168 # wait until the "ceph" client is connected and receiving
169 # log messages from monitor
171 grep -q "cluster" $CEPH_WATCH_FILE && break
176 function ceph_watch_wait
()
185 for i
in `seq ${timeout}`; do
186 grep -q "$regexp" $CEPH_WATCH_FILE && break
192 if ! grep "$regexp" $CEPH_WATCH_FILE; then
193 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
194 cat $CEPH_WATCH_FILE >&2
199 function test_mon_injectargs
()
201 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker
202 ceph tell osd
.0 config get osd_enable_op_tracker |
grep false
203 ceph tell osd
.0 injectargs
'--osd_enable_op_tracker --osd_op_history_duration 500'
204 ceph tell osd
.0 config get osd_enable_op_tracker |
grep true
205 ceph tell osd
.0 config get osd_op_history_duration |
grep 500
206 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker
207 ceph tell osd
.0 config get osd_enable_op_tracker |
grep false
208 ceph tell osd
.0 injectargs
-- --osd_enable_op_tracker
209 ceph tell osd
.0 config get osd_enable_op_tracker |
grep true
210 ceph tell osd
.0 injectargs
-- '--osd_enable_op_tracker --osd_op_history_duration 600'
211 ceph tell osd
.0 config get osd_enable_op_tracker |
grep true
212 ceph tell osd
.0 config get osd_op_history_duration |
grep 600
214 ceph tell osd
.0 injectargs
-- '--osd_deep_scrub_interval 2419200'
215 ceph tell osd
.0 config get osd_deep_scrub_interval |
grep 2419200
217 ceph tell osd
.0 injectargs
-- '--mon_probe_timeout 2'
218 ceph tell osd
.0 config get mon_probe_timeout |
grep 2
220 ceph tell osd
.0 injectargs
-- '--mon-lease 6'
221 ceph tell osd
.0 config get mon_lease |
grep 6
223 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224 expect_false ceph tell osd
.0 injectargs
--osd-scrub-auto-repair-num-errors -1 2> $TMPFILE ||
return 1
225 check_response
"Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
227 expect_failure
$TEMP_DIR "Option --osd_op_history_duration requires an argument" \
228 ceph tell osd
.0 injectargs
-- '--osd_op_history_duration'
232 function test_mon_injectargs_SI
()
234 # Test SI units during injectargs and 'config set'
235 # We only aim at testing the units are parsed accordingly
236 # and don't intend to test whether the options being set
237 # actually expect SI units to be passed.
238 # Keep in mind that all integer based options that are not based on bytes
239 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
241 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_pg_warn_min_objects")
242 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10
243 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
244 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10K
245 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10000
246 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
1G
247 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1000000000
248 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10F
> $TMPFILE || true
249 check_response
"(22) Invalid argument"
250 # now test with injectargs
251 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10'
252 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
253 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10K'
254 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10000
255 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 1G'
256 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1000000000
257 expect_false ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10F'
258 expect_false ceph tell mon.a injectargs
'--mon_globalid_prealloc -1'
259 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
$initial_value
262 function test_mon_injectargs_IEC
()
264 # Test IEC units during injectargs and 'config set'
265 # We only aim at testing the units are parsed accordingly
266 # and don't intend to test whether the options being set
267 # actually expect IEC units to be passed.
268 # Keep in mind that all integer based options that are based on bytes
269 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
270 # unit modifiers (for backwards compatibility and convenience) and be parsed
272 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_data_size_warn")
273 $SUDO ceph daemon mon.a config
set mon_data_size_warn
15000000000
274 expect_config_value
"mon.a" "mon_data_size_warn" 15000000000
275 $SUDO ceph daemon mon.a config
set mon_data_size_warn
15G
276 expect_config_value
"mon.a" "mon_data_size_warn" 16106127360
277 $SUDO ceph daemon mon.a config
set mon_data_size_warn
16Gi
278 expect_config_value
"mon.a" "mon_data_size_warn" 17179869184
279 $SUDO ceph daemon mon.a config
set mon_data_size_warn
10F
> $TMPFILE || true
280 check_response
"(22) Invalid argument"
281 # now test with injectargs
282 ceph tell mon.a injectargs
'--mon_data_size_warn 15000000000'
283 expect_config_value
"mon.a" "mon_data_size_warn" 15000000000
284 ceph tell mon.a injectargs
'--mon_data_size_warn 15G'
285 expect_config_value
"mon.a" "mon_data_size_warn" 16106127360
286 ceph tell mon.a injectargs
'--mon_data_size_warn 16Gi'
287 expect_config_value
"mon.a" "mon_data_size_warn" 17179869184
288 expect_false ceph tell mon.a injectargs
'--mon_data_size_warn 10F'
289 $SUDO ceph daemon mon.a config
set mon_data_size_warn
$initial_value
292 function test_tiering_agent
()
294 local slow
=slow_eviction
295 local fast
=fast_eviction
296 ceph osd pool create
$slow 1 1
297 ceph osd pool application
enable $slow rados
298 ceph osd pool create
$fast 1 1
299 ceph osd tier add
$slow $fast
300 ceph osd tier cache-mode
$fast writeback
301 ceph osd tier set-overlay
$slow $fast
302 ceph osd pool
set $fast hit_set_type bloom
303 rados
-p $slow put obj1
/etc
/group
304 ceph osd pool
set $fast target_max_objects
1
305 ceph osd pool
set $fast hit_set_count
1
306 ceph osd pool
set $fast hit_set_period
5
307 # wait for the object to be evicted from the cache
310 for i
in `seq 1 300` ; do
311 if ! rados
-p $fast ls |
grep obj1
; then
318 # the object is proxy read and promoted to the cache
319 rados
-p $slow get obj1
- >/dev
/null
320 # wait for the promoted object to be evicted again
322 for i
in `seq 1 300` ; do
323 if ! rados
-p $fast ls |
grep obj1
; then
330 ceph osd tier remove-overlay
$slow
331 ceph osd tier remove
$slow $fast
332 ceph osd pool delete
$fast $fast --yes-i-really-really-mean-it
333 ceph osd pool delete
$slow $slow --yes-i-really-really-mean-it
336 function test_tiering_1
()
339 ceph osd pool create slow
2
340 ceph osd pool application
enable slow rados
341 ceph osd pool create slow2
2
342 ceph osd pool application
enable slow2 rados
343 ceph osd pool create cache
2
344 ceph osd pool create cache2
2
345 ceph osd tier add slow cache
346 ceph osd tier add slow cache2
347 expect_false ceph osd tier add slow2 cache
348 # application metadata should propagate to the tiers
349 ceph osd pool
ls detail
-f json | jq
'.[] | select(.pool_name == "slow") | .application_metadata["rados"]' |
grep '{}'
350 ceph osd pool
ls detail
-f json | jq
'.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' |
grep '{}'
351 ceph osd pool
ls detail
-f json | jq
'.[] | select(.pool_name == "cache") | .application_metadata["rados"]' |
grep '{}'
352 ceph osd pool
ls detail
-f json | jq
'.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' |
grep '{}'
353 # forward and proxy are removed/deprecated
354 expect_false ceph osd tier cache-mode cache forward
355 expect_false ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
356 expect_false ceph osd tier cache-mode cache proxy
357 expect_false ceph osd tier cache-mode cache proxy
--yes-i-really-mean-it
358 # test some state transitions
359 ceph osd tier cache-mode cache writeback
360 expect_false ceph osd tier cache-mode cache
readonly
361 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
362 ceph osd tier cache-mode cache readproxy
363 ceph osd tier cache-mode cache none
364 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
365 ceph osd tier cache-mode cache none
366 ceph osd tier cache-mode cache writeback
367 expect_false ceph osd tier cache-mode cache none
368 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
369 # test with dirty objects in the tier pool
370 # tier pool currently set to 'writeback'
371 rados
-p cache put
/etc
/passwd
/etc
/passwd
373 # 1 dirty object in pool 'cache'
374 ceph osd tier cache-mode cache readproxy
375 expect_false ceph osd tier cache-mode cache none
376 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
377 ceph osd tier cache-mode cache writeback
378 # remove object from tier pool
379 rados
-p cache
rm /etc
/passwd
380 rados
-p cache cache-flush-evict-all
382 # no dirty objects in pool 'cache'
383 ceph osd tier cache-mode cache readproxy
384 ceph osd tier cache-mode cache none
385 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
387 while ! ceph osd pool
set cache pg_num
3 --yes-i-really-mean-it 2>$TMPFILE
389 grep 'currently creating pgs' $TMPFILE
390 TRIES
=$
(( $TRIES + 1 ))
394 expect_false ceph osd pool
set cache pg_num
4
395 ceph osd tier cache-mode cache none
396 ceph osd tier set-overlay slow cache
397 expect_false ceph osd tier set-overlay slow cache2
398 expect_false ceph osd tier remove slow cache
399 ceph osd tier remove-overlay slow
400 ceph osd tier set-overlay slow cache2
401 ceph osd tier remove-overlay slow
402 ceph osd tier remove slow cache
403 ceph osd tier add slow2 cache
404 expect_false ceph osd tier set-overlay slow cache
405 ceph osd tier set-overlay slow2 cache
406 ceph osd tier remove-overlay slow2
407 ceph osd tier remove slow2 cache
408 ceph osd tier remove slow cache2
410 # make sure a non-empty pool fails
411 rados
-p cache2 put
/etc
/passwd
/etc
/passwd
412 while ! ceph df |
grep cache2 |
grep ' 1 ' ; do
413 echo waiting
for pg stats to flush
416 expect_false ceph osd tier add slow cache2
417 ceph osd tier add slow cache2
--force-nonempty
418 ceph osd tier remove slow cache2
420 ceph osd pool
ls |
grep cache2
421 ceph osd pool
ls -f json-pretty |
grep cache2
422 ceph osd pool
ls detail |
grep cache2
423 ceph osd pool
ls detail
-f json-pretty |
grep cache2
425 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
426 ceph osd pool delete slow2 slow2
--yes-i-really-really-mean-it
427 ceph osd pool delete cache cache
--yes-i-really-really-mean-it
428 ceph osd pool delete cache2 cache2
--yes-i-really-really-mean-it
431 function test_tiering_2
()
433 # make sure we can't clobber snapshot state
434 ceph osd pool create snap_base
2
435 ceph osd pool application
enable snap_base rados
436 ceph osd pool create snap_cache
2
437 ceph osd pool mksnap snap_cache snapname
438 expect_false ceph osd tier add snap_base snap_cache
439 ceph osd pool delete snap_base snap_base
--yes-i-really-really-mean-it
440 ceph osd pool delete snap_cache snap_cache
--yes-i-really-really-mean-it
443 function test_tiering_3
()
445 # make sure we can't create snapshot on tier
446 ceph osd pool create basex
2
447 ceph osd pool application
enable basex rados
448 ceph osd pool create cachex
2
449 ceph osd tier add basex cachex
450 expect_false ceph osd pool mksnap cache snapname
451 ceph osd tier remove basex cachex
452 ceph osd pool delete basex basex
--yes-i-really-really-mean-it
453 ceph osd pool delete cachex cachex
--yes-i-really-really-mean-it
456 function test_tiering_4
()
458 # make sure we can't create an ec pool tier
459 ceph osd pool create eccache
2 2 erasure
460 expect_false ceph osd set-require-min-compat-client bobtail
461 ceph osd pool create repbase
2
462 ceph osd pool application
enable repbase rados
463 expect_false ceph osd tier add repbase eccache
464 ceph osd pool delete repbase repbase
--yes-i-really-really-mean-it
465 ceph osd pool delete eccache eccache
--yes-i-really-really-mean-it
468 function test_tiering_5
()
470 # convenient add-cache command
471 ceph osd pool create slow
2
472 ceph osd pool application
enable slow rados
473 ceph osd pool create cache3
2
474 ceph osd tier add-cache slow cache3
1024000
475 ceph osd dump |
grep cache3 |
grep bloom |
grep 'false_positive_probability: 0.05' |
grep 'target_bytes 1024000' |
grep '1200s x4'
476 ceph osd tier remove slow cache3
2> $TMPFILE || true
477 check_response
"EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
478 ceph osd tier remove-overlay slow
479 ceph osd tier remove slow cache3
480 ceph osd pool
ls |
grep cache3
481 ceph osd pool delete cache3 cache3
--yes-i-really-really-mean-it
482 ! ceph osd pool
ls |
grep cache3 ||
exit 1
483 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
486 function test_tiering_6
()
488 # check add-cache whether work
489 ceph osd pool create datapool
2
490 ceph osd pool application
enable datapool rados
491 ceph osd pool create cachepool
2
492 ceph osd tier add-cache datapool cachepool
1024000
493 ceph osd tier cache-mode cachepool writeback
494 rados
-p datapool put object
/etc
/passwd
495 rados
-p cachepool stat object
496 rados
-p cachepool cache-flush object
497 rados
-p datapool stat object
498 ceph osd tier remove-overlay datapool
499 ceph osd tier remove datapool cachepool
500 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
501 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
504 function test_tiering_7
()
506 # protection against pool removal when used as tiers
507 ceph osd pool create datapool
2
508 ceph osd pool application
enable datapool rados
509 ceph osd pool create cachepool
2
510 ceph osd tier add-cache datapool cachepool
1024000
511 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it 2> $TMPFILE || true
512 check_response
"EBUSY: pool 'cachepool' is a tier of 'datapool'"
513 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it 2> $TMPFILE || true
514 check_response
"EBUSY: pool 'datapool' has tiers cachepool"
515 ceph osd tier remove-overlay datapool
516 ceph osd tier remove datapool cachepool
517 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
518 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
521 function test_tiering_8
()
523 ## check health check
524 ceph osd
set notieragent
525 ceph osd pool create datapool
2
526 ceph osd pool application
enable datapool rados
527 ceph osd pool create cache4
2
528 ceph osd tier add-cache datapool cache4
1024000
529 ceph osd tier cache-mode cache4 writeback
530 tmpfile
=$
(mktemp|
grep tmp
)
531 dd if=/dev
/zero of
=$tmpfile bs
=4K count
=1
532 ceph osd pool
set cache4 target_max_objects
200
533 ceph osd pool
set cache4 target_max_bytes
1000000
534 rados
-p cache4 put foo1
$tmpfile
535 rados
-p cache4 put foo2
$tmpfile
538 ceph df |
grep datapool |
grep ' 2 '
539 ceph osd tier remove-overlay datapool
540 ceph osd tier remove datapool cache4
541 ceph osd pool delete cache4 cache4
--yes-i-really-really-mean-it
542 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
543 ceph osd
unset notieragent
546 function test_tiering_9
()
548 # make sure 'tier remove' behaves as we expect
549 # i.e., removing a tier from a pool that's not its base pool only
550 # results in a 'pool foo is now (or already was) not a tier of bar'
552 ceph osd pool create basepoolA
2
553 ceph osd pool application
enable basepoolA rados
554 ceph osd pool create basepoolB
2
555 ceph osd pool application
enable basepoolB rados
556 poolA_id
=$
(ceph osd dump |
grep 'pool.*basepoolA' |
awk '{print $2;}')
557 poolB_id
=$
(ceph osd dump |
grep 'pool.*basepoolB' |
awk '{print $2;}')
559 ceph osd pool create cache5
2
560 ceph osd pool create cache6
2
561 ceph osd tier add basepoolA cache5
562 ceph osd tier add basepoolB cache6
563 ceph osd tier remove basepoolB cache5
2>&1 |
grep 'not a tier of'
564 ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of[ \t]\+$poolA_id"
565 ceph osd tier remove basepoolA cache6
2>&1 |
grep 'not a tier of'
566 ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of[ \t]\+$poolB_id"
568 ceph osd tier remove basepoolA cache5
2>&1 |
grep 'not a tier of'
569 ! ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of" ||
exit 1
570 ceph osd tier remove basepoolB cache6
2>&1 |
grep 'not a tier of'
571 ! ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of" ||
exit 1
573 ! ceph osd dump |
grep "pool.*'basepoolA'" 2>&1 |
grep "tiers" ||
exit 1
574 ! ceph osd dump |
grep "pool.*'basepoolB'" 2>&1 |
grep "tiers" ||
exit 1
576 ceph osd pool delete cache6 cache6
--yes-i-really-really-mean-it
577 ceph osd pool delete cache5 cache5
--yes-i-really-really-mean-it
578 ceph osd pool delete basepoolB basepoolB
--yes-i-really-really-mean-it
579 ceph osd pool delete basepoolA basepoolA
--yes-i-really-really-mean-it
584 expect_false ceph auth add client.xx mon
'invalid' osd
"allow *"
585 expect_false ceph auth add client.xx mon
'allow *' osd
"allow *" invalid
"allow *"
586 ceph auth add client.xx mon
'allow *' osd
"allow *"
587 ceph auth
export client.xx
>client.xx.keyring
588 ceph auth add client.xx
-i client.xx.keyring
589 rm -f client.xx.keyring
590 ceph auth list |
grep client.xx
591 ceph auth
ls |
grep client.xx
592 ceph auth get client.xx |
grep caps |
grep mon
593 ceph auth get client.xx |
grep caps |
grep osd
594 ceph auth get-key client.xx
595 ceph auth print-key client.xx
596 ceph auth print_key client.xx
597 ceph auth caps client.xx osd
"allow rw"
598 expect_false sh
<<< "ceph auth get client.xx | grep caps | grep mon"
599 ceph auth get client.xx |
grep osd |
grep "allow rw"
600 ceph auth caps client.xx mon
'allow command "osd tree"'
601 ceph auth
export |
grep client.xx
602 ceph auth
export -o authfile
603 ceph auth import
-i authfile
2>$TMPFILE
604 check_response
"imported keyring"
606 ceph auth
export -o authfile2
607 diff authfile authfile2
608 rm authfile authfile2
609 ceph auth del client.xx
610 expect_false ceph auth get client.xx
612 # (almost) interactive mode
613 echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
614 ceph auth get client.xx
616 echo 'auth del client.xx' | ceph
617 expect_false ceph auth get client.xx
620 function test_auth_profiles
()
622 ceph auth add client.xx-profile-ro mon
'allow profile read-only' \
623 mgr
'allow profile read-only'
624 ceph auth add client.xx-profile-rw mon
'allow profile read-write' \
625 mgr
'allow profile read-write'
626 ceph auth add client.xx-profile-rd mon
'allow profile role-definer'
628 ceph auth
export > client.xx.keyring
630 # read-only is allowed all read-only commands (auth excluded)
631 ceph
-n client.xx-profile-ro
-k client.xx.keyring status
632 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd dump
633 ceph
-n client.xx-profile-ro
-k client.xx.keyring pg dump
634 ceph
-n client.xx-profile-ro
-k client.xx.keyring mon dump
635 # read-only gets access denied for rw commands or auth commands
636 ceph
-n client.xx-profile-ro
-k client.xx.keyring log foo
>& $TMPFILE || true
637 check_response
"EACCES: access denied"
638 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
639 check_response
"EACCES: access denied"
640 ceph
-n client.xx-profile-ro
-k client.xx.keyring auth
ls >& $TMPFILE || true
641 check_response
"EACCES: access denied"
643 # read-write is allowed for all read-write commands (except auth)
644 ceph
-n client.xx-profile-rw
-k client.xx.keyring status
645 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd dump
646 ceph
-n client.xx-profile-rw
-k client.xx.keyring pg dump
647 ceph
-n client.xx-profile-rw
-k client.xx.keyring mon dump
648 ceph
-n client.xx-profile-rw
-k client.xx.keyring fs dump
649 ceph
-n client.xx-profile-rw
-k client.xx.keyring log foo
650 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
set noout
651 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
unset noout
652 # read-write gets access denied for auth commands
653 ceph
-n client.xx-profile-rw
-k client.xx.keyring auth
ls >& $TMPFILE || true
654 check_response
"EACCES: access denied"
656 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
657 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
ls
658 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
export
659 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth add client.xx-profile-foo
660 ceph
-n client.xx-profile-rd
-k client.xx.keyring status
661 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd dump
>& $TMPFILE || true
662 check_response
"EACCES: access denied"
663 ceph
-n client.xx-profile-rd
-k client.xx.keyring pg dump
>& $TMPFILE || true
664 check_response
"EACCES: access denied"
665 # read-only 'mon' subsystem commands are allowed
666 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon dump
667 # but read-write 'mon' commands are not
668 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon add foo
1.1.1.1 >& $TMPFILE || true
669 check_response
"EACCES: access denied"
670 ceph
-n client.xx-profile-rd
-k client.xx.keyring fs dump
>& $TMPFILE || true
671 check_response
"EACCES: access denied"
672 ceph
-n client.xx-profile-rd
-k client.xx.keyring log foo
>& $TMPFILE || true
673 check_response
"EACCES: access denied"
674 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
675 check_response
"EACCES: access denied"
677 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-ro
678 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-rw
680 # add a new role-definer with the existing role-definer
681 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
682 auth add client.xx-profile-rd2 mon
'allow profile role-definer'
683 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
684 auth
export > client.xx.keyring
.2
685 # remove old role-definer using the new role-definer
686 ceph
-n client.xx-profile-rd2
-k client.xx.keyring
.2 \
687 auth del client.xx-profile-rd
688 # remove the remaining role-definer with admin
689 ceph auth del client.xx-profile-rd2
690 rm -f client.xx.keyring client.xx.keyring
.2
693 function test_mon_caps
()
695 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
696 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
697 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
698 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
700 # pass --no-mon-config since we are looking for the permission denied error
701 rados lspools
--no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
703 check_response
"Permission denied"
705 rm -rf $TEMP_DIR/ceph.client.bug.keyring
706 ceph auth del client.bug
707 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
708 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
709 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
710 ceph-authtool
-n client.bug
--cap mon
'' $TEMP_DIR/ceph.client.bug.keyring
711 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
712 rados lspools
--no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
713 check_response
"Permission denied"
716 function test_mon_misc
()
718 # with and without verbosity
719 ceph osd dump |
grep '^epoch'
720 ceph
--concise osd dump |
grep '^epoch'
722 ceph osd df |
grep 'MIN/MAX VAR'
727 grep -v DIRTY
$TMPFILE
728 ceph df detail
> $TMPFILE
730 ceph df
--format json
> $TMPFILE
731 grep 'total_bytes' $TMPFILE
732 grep -v 'dirty' $TMPFILE
733 ceph df detail
--format json
> $TMPFILE
734 grep 'rd_bytes' $TMPFILE
735 grep 'dirty' $TMPFILE
736 ceph df
--format xml |
grep '<total_bytes>'
737 ceph df detail
--format xml |
grep '<rd_bytes>'
742 ceph health
--format json-pretty
743 ceph health detail
--format xml-pretty
745 ceph time-sync-status
748 for t
in mon osd mds mgr
; do
753 mymsg
="this is a test log message $$.$(date)"
755 ceph log last |
grep "$mymsg"
756 ceph log last
100 |
grep "$mymsg"
757 ceph_watch_wait
"$mymsg"
762 ceph mgr module
enable restful
763 expect_false ceph mgr module
enable foodne
764 ceph mgr module
enable foodne
--force
765 ceph mgr module disable foodne
766 ceph mgr module disable foodnebizbangbash
770 ceph mon count-metadata ceph_version
775 ceph mgr count-metadata ceph_version
782 function check_mds_active
()
785 ceph fs get
$fs_name |
grep active
788 function wait_mds_active
()
792 for i
in $
(seq 1 $max_run) ; do
793 if ! check_mds_active
$fs_name ; then
794 echo "waiting for an active MDS daemon ($i/$max_run)"
800 check_mds_active
$fs_name
803 function get_mds_gids
()
806 ceph fs get
$fs_name --format=json | python3
-c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
809 function fail_all_mds
()
812 ceph fs
set $fs_name cluster_down true
813 mds_gids
=$
(get_mds_gids
$fs_name)
814 for mds_gid
in $mds_gids ; do
815 ceph mds fail
$mds_gid
817 if check_mds_active
$fs_name ; then
818 echo "An active MDS remains, something went wrong"
825 function remove_all_fs
()
827 existing_fs
=$
(ceph fs
ls --format=json | python3
-c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
828 for fs_name
in $existing_fs ; do
829 echo "Removing fs ${fs_name}..."
830 fail_all_mds
$fs_name
831 echo "Removing existing filesystem '${fs_name}'..."
832 ceph fs
rm $fs_name --yes-i-really-mean-it
833 echo "Removed '${fs_name}'."
837 # So that tests requiring MDS can skip if one is not configured
838 # in the cluster at all
839 function mds_exists
()
841 ceph auth
ls |
grep "^mds"
844 # some of the commands are just not idempotent.
845 function without_test_dup_command
()
847 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
850 local saved
=${CEPH_CLI_TEST_DUP_COMMAND}
851 unset CEPH_CLI_TEST_DUP_COMMAND
853 CEPH_CLI_TEST_DUP_COMMAND
=saved
857 function test_mds_tell
()
860 if ! mds_exists
; then
861 echo "Skipping test, no MDS found"
866 ceph osd pool create fs_data
16
867 ceph osd pool create fs_metadata
16
868 ceph fs new
$FS_NAME fs_metadata fs_data
869 wait_mds_active
$FS_NAME
871 # Test injectargs by GID
872 old_mds_gids
=$
(get_mds_gids
$FS_NAME)
873 echo Old GIDs
: $old_mds_gids
875 for mds_gid
in $old_mds_gids ; do
876 ceph tell mds.
$mds_gid injectargs
"--debug-mds 20"
878 expect_false ceph tell mds.a injectargs mds_max_file_recover
-1
880 # Test respawn by rank
881 without_test_dup_command ceph tell mds
.0 respawn
882 new_mds_gids
=$old_mds_gids
883 while [ $new_mds_gids -eq $old_mds_gids ] ; do
885 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
887 echo New GIDs
: $new_mds_gids
890 without_test_dup_command ceph tell mds.a respawn
891 new_mds_gids
=$old_mds_gids
892 while [ $new_mds_gids -eq $old_mds_gids ] ; do
894 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
896 echo New GIDs
: $new_mds_gids
899 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
900 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
903 function test_mon_mds
()
908 ceph osd pool create fs_data
16
909 ceph osd pool create fs_metadata
16
910 ceph fs new
$FS_NAME fs_metadata fs_data
912 ceph fs
set $FS_NAME cluster_down true
913 ceph fs
set $FS_NAME cluster_down false
915 ceph mds compat rm_incompat
4
916 ceph mds compat rm_incompat
4
918 # We don't want any MDSs to be up, their activity can interfere with
919 # the "current_epoch + 1" checking below if they're generating updates
920 fail_all_mds
$FS_NAME
925 for mds_gid
in $
(get_mds_gids
$FS_NAME) ; do
926 ceph mds metadata
$mds_id
930 ceph mds count-metadata os
932 # XXX mds fail, but how do you undo it?
933 mdsmapfile
=$TEMP_DIR/mdsmap.$$
934 current_epoch
=$
(ceph fs dump
-o $mdsmapfile --no-log-to-stderr 2>&1 |
grep epoch |
sed 's/.*epoch //')
938 ceph osd pool create data2
16
939 ceph osd pool create data3
16
940 data2_pool
=$
(ceph osd dump |
grep "pool.*'data2'" |
awk '{print $2;}')
941 data3_pool
=$
(ceph osd dump |
grep "pool.*'data3'" |
awk '{print $2;}')
942 ceph fs add_data_pool cephfs
$data2_pool
943 ceph fs add_data_pool cephfs
$data3_pool
944 ceph fs add_data_pool cephfs
100 >& $TMPFILE || true
945 check_response
"Error ENOENT"
946 ceph fs add_data_pool cephfs foobarbaz
>& $TMPFILE || true
947 check_response
"Error ENOENT"
948 ceph fs rm_data_pool cephfs
$data2_pool
949 ceph fs rm_data_pool cephfs
$data3_pool
950 ceph osd pool delete data2 data2
--yes-i-really-really-mean-it
951 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
952 ceph fs
set cephfs max_mds
4
953 ceph fs
set cephfs max_mds
3
954 ceph fs
set cephfs max_mds
256
955 expect_false ceph fs
set cephfs max_mds
257
956 ceph fs
set cephfs max_mds
4
957 ceph fs
set cephfs max_mds
256
958 expect_false ceph fs
set cephfs max_mds
257
959 expect_false ceph fs
set cephfs max_mds asdf
960 expect_false ceph fs
set cephfs inline_data true
961 ceph fs
set cephfs inline_data true
--yes-i-really-really-mean-it
962 ceph fs
set cephfs inline_data
yes --yes-i-really-really-mean-it
963 ceph fs
set cephfs inline_data
1 --yes-i-really-really-mean-it
964 expect_false ceph fs
set cephfs inline_data
--yes-i-really-really-mean-it
965 ceph fs
set cephfs inline_data false
966 ceph fs
set cephfs inline_data no
967 ceph fs
set cephfs inline_data
0
968 expect_false ceph fs
set cephfs inline_data asdf
969 ceph fs
set cephfs max_file_size
1048576
970 expect_false ceph fs
set cephfs max_file_size
123asdf
972 expect_false ceph fs
set cephfs allow_new_snaps
973 ceph fs
set cephfs allow_new_snaps true
974 ceph fs
set cephfs allow_new_snaps
0
975 ceph fs
set cephfs allow_new_snaps false
976 ceph fs
set cephfs allow_new_snaps no
977 expect_false ceph fs
set cephfs allow_new_snaps taco
979 # we should never be able to add EC pools as data or metadata pools
980 # create an ec-pool...
981 ceph osd pool create mds-ec-pool
16 16 erasure
983 ceph fs add_data_pool cephfs mds-ec-pool
2>$TMPFILE
984 check_response
'erasure-code' $?
22
986 ec_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-ec-pool" |
awk '{print $2;}')
987 data_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_data" |
awk '{print $2;}')
988 metadata_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_metadata" |
awk '{print $2;}')
990 fail_all_mds
$FS_NAME
993 # Check that rmfailed requires confirmation
994 expect_false ceph mds rmfailed
0
995 ceph mds rmfailed
0 --yes-i-really-mean-it
998 # Check that `fs new` is no longer permitted
999 expect_false ceph fs new cephfs
$metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
1001 # Check that 'fs reset' runs
1002 ceph fs
reset $FS_NAME --yes-i-really-mean-it
1004 # Check that creating a second FS fails by default
1005 ceph osd pool create fs_metadata2
16
1006 ceph osd pool create fs_data2
16
1008 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1011 # Check that setting enable_multiple enables creation of second fs
1012 ceph fs flag
set enable_multiple true
--yes-i-really-mean-it
1013 ceph fs new cephfs2 fs_metadata2 fs_data2
1015 # Clean up multi-fs stuff
1016 fail_all_mds cephfs2
1017 ceph fs
rm cephfs2
--yes-i-really-mean-it
1018 ceph osd pool delete fs_metadata2 fs_metadata2
--yes-i-really-really-mean-it
1019 ceph osd pool delete fs_data2 fs_data2
--yes-i-really-really-mean-it
1021 fail_all_mds
$FS_NAME
1023 # Clean up to enable subsequent fs new tests
1024 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1027 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1028 check_response
'erasure-code' $?
22
1029 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1030 check_response
'erasure-code' $?
22
1031 ceph fs new
$FS_NAME mds-ec-pool mds-ec-pool
2>$TMPFILE
1032 check_response
'erasure-code' $?
22
1035 # ... new create a cache tier in front of the EC pool...
1036 ceph osd pool create mds-tier
2
1037 ceph osd tier add mds-ec-pool mds-tier
1038 ceph osd tier set-overlay mds-ec-pool mds-tier
1039 tier_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-tier" |
awk '{print $2;}')
1041 # Use of a readonly tier should be forbidden
1042 ceph osd tier cache-mode mds-tier
readonly --yes-i-really-mean-it
1044 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1045 check_response
'has a write tier (mds-tier) that is configured to forward' $?
22
1048 # Use of a writeback tier should enable FS creation
1049 ceph osd tier cache-mode mds-tier writeback
1050 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force
1052 # While a FS exists using the tiered pools, I should not be allowed
1053 # to remove the tier
1055 ceph osd tier remove-overlay mds-ec-pool
2>$TMPFILE
1056 check_response
'in use by CephFS' $?
16
1057 ceph osd tier remove mds-ec-pool mds-tier
2>$TMPFILE
1058 check_response
'in use by CephFS' $?
16
1061 fail_all_mds
$FS_NAME
1062 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1064 # ... but we should be forbidden from using the cache pool in the FS directly.
1066 ceph fs new
$FS_NAME fs_metadata mds-tier
--force 2>$TMPFILE
1067 check_response
'in use as a cache tier' $?
22
1068 ceph fs new
$FS_NAME mds-tier fs_data
2>$TMPFILE
1069 check_response
'in use as a cache tier' $?
22
1070 ceph fs new
$FS_NAME mds-tier mds-tier
2>$TMPFILE
1071 check_response
'in use as a cache tier' $?
22
1074 # Clean up tier + EC pools
1075 ceph osd tier remove-overlay mds-ec-pool
1076 ceph osd tier remove mds-ec-pool mds-tier
1078 # Create a FS using the 'cache' pool now that it's no longer a tier
1079 ceph fs new
$FS_NAME fs_metadata mds-tier
--force
1081 # We should be forbidden from using this pool as a tier now that
1082 # it's in use for CephFS
1084 ceph osd tier add mds-ec-pool mds-tier
2>$TMPFILE
1085 check_response
'in use by CephFS' $?
16
1088 fail_all_mds
$FS_NAME
1089 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1091 # We should be permitted to use an EC pool with overwrites enabled
1092 # as the data pool...
1093 ceph osd pool
set mds-ec-pool allow_ec_overwrites true
1094 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1095 fail_all_mds
$FS_NAME
1096 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1098 # ...but not as the metadata pool
1100 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1101 check_response
'erasure-code' $?
22
1104 ceph osd pool delete mds-ec-pool mds-ec-pool
--yes-i-really-really-mean-it
1106 # Create a FS and check that we can subsequently add a cache tier to it
1107 ceph fs new
$FS_NAME fs_metadata fs_data
--force
1109 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1110 ceph osd tier add fs_metadata mds-tier
1111 ceph osd tier cache-mode mds-tier writeback
1112 ceph osd tier set-overlay fs_metadata mds-tier
1114 # Removing tier should be permitted because the underlying pool is
1115 # replicated (#11504 case)
1116 ceph osd tier cache-mode mds-tier readproxy
1117 ceph osd tier remove-overlay fs_metadata
1118 ceph osd tier remove fs_metadata mds-tier
1119 ceph osd pool delete mds-tier mds-tier
--yes-i-really-really-mean-it
1122 fail_all_mds
$FS_NAME
1123 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1128 # ceph mds tell mds.a getmap
1131 # ceph mds set_state
1133 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
1134 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
1137 function test_mon_mds_metadata
()
1139 local nmons
=$
(ceph tell
'mon.*' version |
grep -c 'version')
1143 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1144 while read gid id rank
; do
1145 ceph mds metadata
${gid} |
grep '"hostname":'
1146 ceph mds metadata
${id} |
grep '"hostname":'
1147 ceph mds metadata
${rank} |
grep '"hostname":'
1149 local n
=$
(ceph tell
'mon.*' mds metadata
${id} |
grep -c '"hostname":')
1150 test "$n" -eq "$nmons"
1153 expect_false ceph mds metadata UNKNOWN
1156 function test_mon_mon
()
1158 # print help message
1160 # -h works even when some arguments are passed
1161 ceph osd dump
-h |
grep 'osd dump'
1162 ceph osd dump
123 -h |
grep 'osd dump'
1165 ceph mon getmap
-o $TEMP_DIR/monmap.$$
1166 [ -s $TEMP_DIR/monmap.$$
]
1169 first
=$
(ceph mon dump
-f json | jq
-r '.mons[0].name')
1170 ceph tell mon.
$first mon_status
1174 ceph mon feature
set kraken
--yes-i-really-mean-it
1175 expect_false ceph mon feature
set abcd
1176 expect_false ceph mon feature
set abcd
--yes-i-really-mean-it
1179 expect_failure
$TEMP_DIR ceph mon add disallowed_leader
$first
1180 ceph mon
set election_strategy disallow
1181 ceph mon add disallowed_leader
$first
1182 ceph mon
set election_strategy connectivity
1183 ceph mon
rm disallowed_leader
$first
1184 ceph mon
set election_strategy classic
1185 expect_failure
$TEMP_DIR ceph mon
rm disallowed_leader
$first
1188 # don't check output, just ensure it does not fail.
1190 ceph mon stat
-f json | jq
'.'
1193 function test_mon_priority_and_weight
()
1195 for i
in 0 1 65535; do
1196 ceph mon set-weight a
$i
1197 w
=$
(ceph mon dump
--format=json-pretty
2>/dev
/null | jq
'.mons[0].weight')
1201 for i
in -1 65536; do
1202 expect_false ceph mon set-weight a
$i
1206 function gen_secrets_file
()
1208 # lets assume we can have the following types
1209 # all - generates both cephx and lockbox, with mock dm-crypt key
1210 # cephx - only cephx
1211 # no_cephx - lockbox and dm-crypt, no cephx
1212 # no_lockbox - dm-crypt and cephx, no lockbox
1213 # empty - empty file
1214 # empty_json - correct json, empty map
1215 # bad_json - bad json :)
1218 if [[ -z "$t" ]]; then
1222 fn
=$
(mktemp
$TEMP_DIR/secret.XXXXXX
)
1224 if [[ "$t" == "empty" ]]; then
1229 if [[ "$t" == "bad_json" ]]; then
1230 echo "asd: ; }" >> $fn
1232 elif [[ "$t" == "empty_json" ]]; then
1237 cephx_secret
="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1238 lb_secret
="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1239 dmcrypt_key
="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1241 if [[ "$t" == "all" ]]; then
1242 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1243 elif [[ "$t" == "cephx" ]]; then
1244 echo "$cephx_secret" >> $fn
1245 elif [[ "$t" == "no_cephx" ]]; then
1246 echo "$lb_secret,$dmcrypt_key" >> $fn
1247 elif [[ "$t" == "no_lockbox" ]]; then
1248 echo "$cephx_secret,$dmcrypt_key" >> $fn
1250 echo "unknown gen_secrets_file() type \'$fn\'"
1257 function test_mon_osd_create_destroy
()
1259 ceph osd new
2>&1 |
grep 'EINVAL'
1260 ceph osd new
'' -1 2>&1 |
grep 'EINVAL'
1261 ceph osd new
'' 10 2>&1 |
grep 'EINVAL'
1263 old_maxosd
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1265 old_osds
=$
(ceph osd
ls)
1266 num_osds
=$
(ceph osd
ls |
wc -l)
1269 id
=$
(ceph osd new
$uuid 2>/dev
/null
)
1271 for i
in $old_osds; do
1277 id2
=`ceph osd new $uuid 2>/dev/null`
1281 ceph osd new
$uuid $id
1283 id3
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1284 ceph osd new
$uuid $
((id3
+1)) 2>&1 |
grep EEXIST
1287 id2
=$
(ceph osd new
$uuid2)
1289 [[ "$id2" != "$id" ]]
1291 ceph osd new
$uuid $id2 2>&1 |
grep EEXIST
1292 ceph osd new
$uuid2 $id2
1295 empty_secrets
=$
(gen_secrets_file
"empty")
1296 empty_json
=$
(gen_secrets_file
"empty_json")
1297 all_secrets
=$
(gen_secrets_file
"all")
1298 cephx_only
=$
(gen_secrets_file
"cephx")
1299 no_cephx
=$
(gen_secrets_file
"no_cephx")
1300 no_lockbox
=$
(gen_secrets_file
"no_lockbox")
1301 bad_json
=$
(gen_secrets_file
"bad_json")
1303 # empty secrets should be idempotent
1304 new_id
=$
(ceph osd new
$uuid $id -i $empty_secrets)
1305 [[ "$new_id" == "$id" ]]
1307 # empty json, thus empty secrets
1308 new_id
=$
(ceph osd new
$uuid $id -i $empty_json)
1309 [[ "$new_id" == "$id" ]]
1311 ceph osd new
$uuid $id -i $all_secrets 2>&1 |
grep 'EEXIST'
1315 ceph osd setmaxosd
$old_maxosd
1317 ceph osd new
$uuid -i $no_cephx 2>&1 |
grep 'EINVAL'
1318 ceph osd new
$uuid -i $no_lockbox 2>&1 |
grep 'EINVAL'
1321 id
=$
(ceph osd new
$uuid -i $all_secrets)
1328 # validate secrets and dm-crypt are set
1329 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1330 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1332 k
=$
(ceph auth get-key client.osd-lockbox.
$uuid --format=json-pretty
2>/dev
/null | \
1334 s
=$
(cat $all_secrets | jq
'.cephx_lockbox_secret')
1336 ceph config-key exists dm-crypt
/osd
/$uuid/luks
1339 id2
=$
(ceph osd new
$uuid2 -i $cephx_only)
1341 [[ "$i" != "$id2" ]]
1345 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1346 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1348 expect_false ceph auth get-key client.osd-lockbox.
$uuid2
1349 expect_false ceph config-key exists dm-crypt
/osd
/$uuid2/luks
1351 ceph osd destroy osd.
$id2 --yes-i-really-mean-it
1352 ceph osd destroy
$id2 --yes-i-really-mean-it
1354 expect_false ceph auth get-key osd.
$id2
1355 ceph osd dump |
grep osd.
$id2 |
grep destroyed
1359 ceph osd new
$uuid3 $id3 -i $all_secrets
1360 ceph osd dump |
grep osd.
$id3 | expect_false
grep destroyed
1361 ceph auth get-key client.osd-lockbox.
$uuid3
1362 ceph auth get-key osd.
$id3
1363 ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1365 ceph osd purge-new osd.
$id3 --yes-i-really-mean-it
1366 expect_false ceph osd
find $id2
1367 expect_false ceph auth get-key osd.
$id2
1368 expect_false ceph auth get-key client.osd-lockbox.
$uuid3
1369 expect_false ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1370 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1371 ceph osd purge-new osd.
$id3 --yes-i-really-mean-it # idempotent
1373 ceph osd purge osd.
$id --yes-i-really-mean-it
1374 ceph osd purge
123456 --yes-i-really-mean-it
1375 expect_false ceph osd
find $id
1376 expect_false ceph auth get-key osd.
$id
1377 expect_false ceph auth get-key client.osd-lockbox.
$uuid
1378 expect_false ceph config-key exists dm-crypt
/osd
/$uuid/luks
1380 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1381 $no_cephx $no_lockbox $bad_json
1383 for i
in $
(ceph osd
ls); do
1385 [[ "$i" != "$id2" ]]
1386 [[ "$i" != "$id3" ]]
1389 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1390 ceph osd setmaxosd
$old_maxosd
1394 function test_mon_config_key
()
1396 key
=asdfasdfqwerqwreasdfuniquesa123df
1397 ceph config-key list |
grep -c $key |
grep 0
1398 ceph config-key get
$key |
grep -c bar |
grep 0
1399 ceph config-key
set $key bar
1400 ceph config-key get
$key |
grep bar
1401 ceph config-key list |
grep -c $key |
grep 1
1402 ceph config-key dump |
grep $key |
grep bar
1403 ceph config-key
rm $key
1404 expect_false ceph config-key get
$key
1405 ceph config-key list |
grep -c $key |
grep 0
1406 ceph config-key dump |
grep -c $key |
grep 0
1409 function test_mon_osd
()
1414 bl
=192.168.0.1:0/1000
1415 ceph osd blocklist add
$bl
1416 ceph osd blocklist
ls |
grep $bl
1417 ceph osd blocklist
ls --format=json-pretty |
sed 's/\\\//\//' |
grep $bl
1418 ceph osd dump
--format=json-pretty |
grep $bl
1419 ceph osd dump |
grep $bl
1420 ceph osd blocklist
rm $bl
1421 ceph osd blocklist
ls | expect_false
grep $bl
1424 # test without nonce, invalid nonce
1425 ceph osd blocklist add
$bl
1426 ceph osd blocklist
ls |
grep $bl
1427 ceph osd blocklist
rm $bl
1428 ceph osd blocklist
ls | expect_false
grep $bl
1429 expect_false
"ceph osd blocklist add $bl/-1"
1430 expect_false
"ceph osd blocklist add $bl/foo"
1432 # test with invalid address
1433 expect_false
"ceph osd blocklist add 1234.56.78.90/100"
1435 # test range blocklisting
1437 ceph osd blocklist range add
$bl
1438 ceph osd blocklist
ls |
grep $bl
1439 ceph osd blocklist range
rm $bl
1440 ceph osd blocklist
ls | expect_false
grep $bl
1441 bad_bl
=192.168.0.1/33
1442 expect_false ceph osd blocklist range add
$bad_bl
1445 ceph osd blocklist add
$bl
1446 ceph osd blocklist
ls |
grep $bl
1447 ceph osd blocklist
clear
1448 ceph osd blocklist
ls | expect_false
grep $bl
1450 # deprecated syntax?
1451 ceph osd blacklist
ls
1456 ceph osd crush reweight-all
1457 ceph osd crush tunables legacy
1458 ceph osd crush show-tunables |
grep argonaut
1459 ceph osd crush tunables bobtail
1460 ceph osd crush show-tunables |
grep bobtail
1461 ceph osd crush tunables firefly
1462 ceph osd crush show-tunables |
grep firefly
1464 ceph osd crush set-tunable straw_calc_version
0
1465 ceph osd crush get-tunable straw_calc_version |
grep 0
1466 ceph osd crush set-tunable straw_calc_version
1
1467 ceph osd crush get-tunable straw_calc_version |
grep 1
1470 # require-min-compat-client
1471 expect_false ceph osd set-require-min-compat-client dumpling
# firefly tunables
1472 ceph osd get-require-min-compat-client |
grep luminous
1473 ceph osd dump |
grep 'require_min_compat_client luminous'
1480 ceph osd scrub
0 --block
1481 ceph osd deep-scrub
0 --block
1483 # how do I tell when these are done?
1485 ceph osd deep-scrub
0
1488 # pool scrub, force-recovery/backfill
1489 pool_names
=`rados lspools`
1490 for pool_name
in $pool_names
1492 ceph osd pool scrub
$pool_name
1493 ceph osd pool deep-scrub
$pool_name
1494 ceph osd pool repair
$pool_name
1495 ceph osd pool force-recovery
$pool_name
1496 ceph osd pool cancel-force-recovery
$pool_name
1497 ceph osd pool force-backfill
$pool_name
1498 ceph osd pool cancel-force-backfill
$pool_name
1501 for f
in noup nodown noin noout noscrub nodeep-scrub nobackfill \
1502 norebalance norecover notieragent
1507 expect_false ceph osd
set bogus
1508 expect_false ceph osd
unset bogus
1509 for f
in sortbitwise recover_deletes require_jewel_osds \
1512 expect_false ceph osd
set $f
1513 expect_false ceph osd
unset $f
1515 ceph osd require-osd-release quincy
1517 expect_false ceph osd require-osd-release pacific
1518 expect_false ceph osd require-osd-release octopus
1519 # these are no-ops but should succeed.
1523 ceph osd dump |
grep 'osd.0 down'
1526 for ((i
=0; i
< $max_run; i
++)); do
1527 if ! ceph osd dump |
grep 'osd.0 up'; then
1528 echo "waiting for osd.0 to come back up ($i/$max_run)"
1534 ceph osd dump |
grep 'osd.0 up'
1536 ceph osd dump |
grep 'osd.0 up'
1537 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1540 expect_false ceph osd
find osd.xyz
1541 expect_false ceph osd
find xyz
1542 expect_false ceph osd
find 0.1
1543 ceph
--format plain osd
find 1 # falls back to json-pretty
1544 if [ `uname` == Linux
]; then
1545 ceph osd metadata
1 |
grep 'distro'
1546 ceph
--format plain osd metadata
1 |
grep 'distro' # falls back to json-pretty
1549 ceph osd dump |
grep 'osd.0.*out'
1551 ceph osd dump |
grep 'osd.0.*in'
1556 expect_false ceph osd info osd.xyz
1557 expect_false ceph osd info xyz
1558 expect_false ceph osd info
42
1559 expect_false ceph osd info osd
.42
1562 info_json
=$
(ceph osd info
--format=json | jq
-cM '.')
1563 dump_json
=$
(ceph osd dump
--format=json | jq
-cM '.osds')
1564 if [[ "${info_json}" != "${dump_json}" ]]; then
1565 echo "waiting for OSDs to settle"
1567 info_json
=$
(ceph osd info
--format=json | jq
-cM '.')
1568 dump_json
=$
(ceph osd dump
--format=json | jq
-cM '.osds')
1569 [[ "${info_json}" == "${dump_json}" ]]
1572 info_json
=$
(ceph osd info
0 --format=json | jq
-cM '.')
1573 dump_json
=$
(ceph osd dump
--format=json | \
1574 jq
-cM '.osds[] | select(.osd == 0)')
1575 [[ "${info_json}" == "${dump_json}" ]]
1577 info_plain
="$(ceph osd info)"
1578 dump_plain
="$(ceph osd dump | grep '^osd')"
1579 [[ "${info_plain}" == "${dump_plain}" ]]
1581 info_plain
="$(ceph osd info 0)"
1582 dump_plain
="$(ceph osd dump | grep '^osd.0')"
1583 [[ "${info_plain}" == "${dump_plain}" ]]
1585 ceph osd add-nodown
0 1
1586 ceph health detail |
grep 'NODOWN'
1587 ceph osd rm-nodown
0 1
1588 ! ceph health detail |
grep 'NODOWN'
1590 ceph osd out
0 # so we can mark it as noin later
1592 ceph health detail |
grep 'NOIN'
1594 ! ceph health detail |
grep 'NOIN'
1597 ceph osd add-noout
0
1598 ceph health detail |
grep 'NOOUT'
1600 ! ceph health detail |
grep 'NOOUT'
1603 expect_false ceph osd add-noup
797er
1604 expect_false ceph osd add-nodown u9uwer
1605 expect_false ceph osd add-noin
78~
15
1607 expect_false ceph osd rm-noup
1234567
1608 expect_false ceph osd rm-nodown fsadf7
1609 expect_false ceph osd rm-noout
790-fd
1611 ids
=`ceph osd ls-tree default`
1614 ceph osd add-nodown
$osd
1615 ceph osd add-noout
$osd
1617 ceph
-s |
grep 'NODOWN'
1618 ceph
-s |
grep 'NOOUT'
1619 ceph osd rm-nodown any
1620 ceph osd rm-noout all
1621 ! ceph
-s |
grep 'NODOWN'
1622 ! ceph
-s |
grep 'NOOUT'
1624 # test crush node flags
1625 ceph osd add-noup osd
.0
1626 ceph osd add-nodown osd
.0
1627 ceph osd add-noin osd
.0
1628 ceph osd add-noout osd
.0
1629 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep "osd.0"
1630 ceph osd rm-noup osd
.0
1631 ceph osd rm-nodown osd
.0
1632 ceph osd rm-noin osd
.0
1633 ceph osd rm-noout osd
.0
1634 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep "osd.0"
1636 ceph osd crush add-bucket foo
host root
=default
1637 ceph osd add-noup foo
1638 ceph osd add-nodown foo
1639 ceph osd add-noin foo
1640 ceph osd add-noout foo
1641 ceph osd dump
-f json-pretty | jq
".crush_node_flags" |
grep foo
1642 ceph osd rm-noup foo
1643 ceph osd rm-nodown foo
1644 ceph osd rm-noin foo
1645 ceph osd rm-noout foo
1646 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep foo
1647 ceph osd add-noup foo
1648 ceph osd dump
-f json-pretty | jq
".crush_node_flags" |
grep foo
1649 ceph osd crush
rm foo
1650 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep foo
1652 ceph osd set-group noup osd
.0
1653 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1654 ceph osd set-group noup
,nodown osd
.0
1655 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1656 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1657 ceph osd set-group noup
,nodown
,noin osd
.0
1658 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1659 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1660 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1661 ceph osd set-group noup
,nodown
,noin
,noout osd
.0
1662 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1663 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1664 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1665 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1666 ceph osd unset-group noup osd
.0
1667 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup'
1668 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1669 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1670 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1671 ceph osd unset-group noup
,nodown osd
.0
1672 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup\|nodown'
1673 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1674 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1675 ceph osd unset-group noup
,nodown
,noin osd
.0
1676 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup\|nodown\|noin'
1677 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1678 ceph osd unset-group noup
,nodown
,noin
,noout osd
.0
1679 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup\|nodown\|noin\|noout'
1681 ceph osd set-group noup
,nodown
,noin
,noout osd
.0 osd
.1
1682 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1683 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'nodown'
1684 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noin'
1685 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noout'
1686 ceph osd dump
-f json-pretty | jq
".osds[1].state" |
grep 'noup'
1687 ceph osd dump
-f json-pretty | jq
".osds[1].state" |
grep 'nodown'
1688 ceph osd dump
-f json-pretty | jq
".osds[1].state" |
grep 'noin'
1689 ceph osd dump
-f json-pretty | jq
".osds[1].state" |
grep 'noout'
1690 ceph osd unset-group noup
,nodown
,noin
,noout osd
.0 osd
.1
1691 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup\|nodown\|noin\|noout'
1692 ceph osd dump
-f json-pretty | jq
".osds[1].state" | expect_false
grep 'noup\|nodown\|noin\|noout'
1694 ceph osd set-group noup all
1695 ceph osd dump
-f json-pretty | jq
".osds[0].state" |
grep 'noup'
1696 ceph osd unset-group noup all
1697 ceph osd dump
-f json-pretty | jq
".osds[0].state" | expect_false
grep 'noup'
1700 ceph osd crush add-bucket foo
host root
=default
1701 ceph osd set-group noup foo
1702 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1703 ceph osd set-group noup
,nodown foo
1704 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1705 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1706 ceph osd set-group noup
,nodown
,noin foo
1707 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1708 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1709 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1710 ceph osd set-group noup
,nodown
,noin
,noout foo
1711 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1712 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1713 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1714 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1716 ceph osd unset-group noup foo
1717 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" | expect_false
grep 'noup'
1718 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1719 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1720 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1721 ceph osd unset-group noup
,nodown foo
1722 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" | expect_false
grep 'noup\|nodown'
1723 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1724 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1725 ceph osd unset-group noup
,nodown
,noin foo
1726 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" | expect_false
grep 'noup\|nodown\|noin'
1727 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1728 ceph osd unset-group noup
,nodown
,noin
,noout foo
1729 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" | expect_false
grep 'noup\|nodown\|noin\|noout'
1731 ceph osd set-group noin
,noout foo
1732 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1733 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1734 ceph osd unset-group noin
,noout foo
1735 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep 'foo'
1737 ceph osd set-group noup
,nodown
,noin
,noout foo
1738 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noup'
1739 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'nodown'
1740 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noin'
1741 ceph osd dump
-f json-pretty | jq
".crush_node_flags.foo" |
grep 'noout'
1742 ceph osd crush
rm foo
1743 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep 'foo'
1745 # test device class flags
1746 osd_0_device_class
=$
(ceph osd crush get-device-class osd
.0)
1747 ceph osd set-group noup
$osd_0_device_class
1748 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noup'
1749 ceph osd set-group noup
,nodown
$osd_0_device_class
1750 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noup'
1751 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'nodown'
1752 ceph osd set-group noup
,nodown
,noin
$osd_0_device_class
1753 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noup'
1754 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'nodown'
1755 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1756 ceph osd set-group noup
,nodown
,noin
,noout
$osd_0_device_class
1757 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noup'
1758 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'nodown'
1759 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1760 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1762 ceph osd unset-group noup
$osd_0_device_class
1763 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" | expect_false
grep 'noup'
1764 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'nodown'
1765 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1766 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1767 ceph osd unset-group noup
,nodown
$osd_0_device_class
1768 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" | expect_false
grep 'noup\|nodown'
1769 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1770 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1771 ceph osd unset-group noup
,nodown
,noin
$osd_0_device_class
1772 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" | expect_false
grep 'noup\|nodown\|noin'
1773 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1774 ceph osd unset-group noup
,nodown
,noin
,noout
$osd_0_device_class
1775 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" | expect_false
grep 'noup\|nodown\|noin\|noout'
1777 ceph osd set-group noin
,noout
$osd_0_device_class
1778 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noin'
1779 ceph osd dump
-f json-pretty | jq
".device_class_flags.$osd_0_device_class" |
grep 'noout'
1780 ceph osd unset-group noin
,noout
$osd_0_device_class
1781 ceph osd dump
-f json-pretty | jq
".crush_node_flags" | expect_false
grep $osd_0_device_class
1783 # make sure mark out preserves weight
1784 ceph osd reweight osd
.0 .5
1785 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1788 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1790 ceph osd getmap
-o $f
1793 save
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1795 ceph osd setmaxosd $
((save
- 1)) 2>&1 |
grep 'EBUSY'
1796 ceph osd setmaxosd
10
1797 ceph osd getmaxosd |
grep 'max_osd = 10'
1798 ceph osd setmaxosd
$save
1799 ceph osd getmaxosd |
grep "max_osd = $save"
1801 for id
in `ceph osd ls` ; do
1802 retry_eagain
5 map_enxio_to_eagain ceph tell osd.
$id version
1805 ceph osd
rm 0 2>&1 |
grep 'EBUSY'
1807 local old_osds
=$
(echo $
(ceph osd
ls))
1808 id
=`ceph osd create`
1810 ceph osd lost
$id --yes-i-really-mean-it
1811 expect_false ceph osd setmaxosd
$id
1812 local new_osds
=$
(echo $
(ceph osd
ls))
1813 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1818 id
=`ceph osd create $uuid`
1819 id2
=`ceph osd create $uuid`
1826 ceph osd setmaxosd
$id
1827 ceph osd getmaxosd |
grep "max_osd = $save"
1830 ceph osd create
$uuid 0 2>&1 |
grep 'EINVAL'
1831 ceph osd create
$uuid $
((max_osd
- 1)) 2>&1 |
grep 'EINVAL'
1833 id
=`ceph osd create $uuid $max_osd`
1834 [ "$id" = "$max_osd" ]
1836 max_osd
=$
((max_osd
+ 1))
1837 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1839 ceph osd create
$uuid $
((id
- 1)) 2>&1 |
grep 'EEXIST'
1840 ceph osd create
$uuid $
((id
+ 1)) 2>&1 |
grep 'EEXIST'
1841 id2
=`ceph osd create $uuid`
1843 id2
=`ceph osd create $uuid $id`
1847 local gap_start
=$max_osd
1848 id
=`ceph osd create $uuid $((gap_start + 100))`
1849 [ "$id" = "$((gap_start + 100))" ]
1851 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1853 ceph osd create
$uuid $gap_start 2>&1 |
grep 'EEXIST'
1856 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1857 # is repeated and consumes two osd id, not just one.
1859 local next_osd
=$gap_start
1860 id
=`ceph osd create $(uuidgen)`
1861 [ "$id" = "$next_osd" ]
1863 next_osd
=$
((id
+ 1))
1864 id
=`ceph osd create $(uuidgen) $next_osd`
1865 [ "$id" = "$next_osd" ]
1867 local new_osds
=$
(echo $
(ceph osd
ls))
1868 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1872 ceph osd setmaxosd
$save
1875 ceph osd pool create data
16
1876 ceph osd pool application
enable data rados
1877 ceph osd lspools |
grep data
1878 ceph osd map data foo |
grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1879 ceph osd map data foo namespace|
grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1880 ceph osd pool delete data data
--yes-i-really-really-mean-it
1883 ceph osd dump |
grep 'flags.*pauserd,pausewr'
1891 ceph osd tree destroyed
1893 ceph osd tree up out
1894 ceph osd tree down
in
1895 ceph osd tree down out
1896 ceph osd tree out down
1897 expect_false ceph osd tree up down
1898 expect_false ceph osd tree up destroyed
1899 expect_false ceph osd tree down destroyed
1900 expect_false ceph osd tree up down destroyed
1901 expect_false ceph osd tree
in out
1902 expect_false ceph osd tree up foo
1905 ceph osd count-metadata os
1911 ceph osd stat |
grep up
1914 function test_mon_crush
()
1917 epoch
=$
(ceph osd getcrushmap
-o $f 2>&1 |
tail -n1)
1920 nextepoch
=$
(( $epoch + 1 ))
1921 echo epoch
$epoch nextepoch
$nextepoch
1923 expect_false ceph osd setcrushmap
$nextepoch -i $f
1924 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1925 echo gotepoch
$gotepoch
1926 [ "$gotepoch" -eq "$nextepoch" ]
1927 # should be idempotent
1928 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1929 echo epoch
$gotepoch
1930 [ "$gotepoch" -eq "$nextepoch" ]
1934 function test_mon_osd_pool
()
1939 ceph osd pool create data
16
1940 ceph osd pool application
enable data rados
1941 ceph osd pool mksnap data datasnap
1942 rados
-p data lssnap |
grep datasnap
1943 ceph osd pool rmsnap data datasnap
1944 expect_false ceph osd pool rmsnap pool_fake snapshot
1945 ceph osd pool delete data data
--yes-i-really-really-mean-it
1947 ceph osd pool create data2
16
1948 ceph osd pool application
enable data2 rados
1949 ceph osd pool rename data2 data3
1950 ceph osd lspools |
grep data3
1951 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
1953 ceph osd pool create replicated
16 16 replicated
1954 ceph osd pool create replicated
1 16 replicated
1955 ceph osd pool create replicated
16 16 # default is replicated
1956 ceph osd pool create replicated
16 # default is replicated, pgp_num = pg_num
1957 ceph osd pool application
enable replicated rados
1958 # should fail because the type is not the same
1959 expect_false ceph osd pool create replicated
16 16 erasure
1960 ceph osd lspools |
grep replicated
1961 ceph osd pool create ec_test
1 1 erasure
1962 ceph osd pool application
enable ec_test rados
1964 ceph osd count-metadata osd_objectstore |
grep 'bluestore'
1965 if [ $?
-eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1966 ceph osd pool
set ec_test allow_ec_overwrites true
>& $TMPFILE
1967 check_response
"pool must only be stored on bluestore for scrubbing to work" $?
22
1969 ceph osd pool
set ec_test allow_ec_overwrites true ||
return 1
1970 expect_false ceph osd pool
set ec_test allow_ec_overwrites false
1973 ceph osd pool delete replicated replicated
--yes-i-really-really-mean-it
1974 ceph osd pool delete ec_test ec_test
--yes-i-really-really-mean-it
1976 # test create pool with rule
1977 ceph osd erasure-code-profile
set foo foo
1978 ceph osd erasure-code-profile
ls |
grep foo
1979 ceph osd crush rule create-erasure foo foo
1980 ceph osd pool create erasure
16 16 erasure foo
1981 expect_false ceph osd erasure-code-profile
rm foo
1982 ceph osd pool delete erasure erasure
--yes-i-really-really-mean-it
1983 ceph osd crush rule
rm foo
1984 ceph osd erasure-code-profile
rm foo
1987 ceph osd pool create modeon
--autoscale-mode=on
1988 ceph osd dump |
grep modeon |
grep 'autoscale_mode on'
1989 ceph osd pool create modewarn
--autoscale-mode=warn
1990 ceph osd dump |
grep modewarn |
grep 'autoscale_mode warn'
1991 ceph osd pool create modeoff
--autoscale-mode=off
1992 ceph osd dump |
grep modeoff |
grep 'autoscale_mode off'
1993 ceph osd pool delete modeon modeon
--yes-i-really-really-mean-it
1994 ceph osd pool delete modewarn modewarn
--yes-i-really-really-mean-it
1995 ceph osd pool delete modeoff modeoff
--yes-i-really-really-mean-it
1998 function test_mon_osd_pool_quota
()
2001 # test osd pool set/get quota
2005 ceph osd pool create tmp-quota-pool
32
2006 ceph osd pool application
enable tmp-quota-pool rados
2008 # set erroneous quotas
2010 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness
10
2011 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes
-1
2012 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
2016 ceph osd pool set-quota tmp-quota-pool max_bytes
10
2017 ceph osd pool set-quota tmp-quota-pool max_objects
10M
2019 # get quotas in json-pretty format
2021 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
2022 grep '"quota_max_objects":.*10000000'
2023 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
2024 grep '"quota_max_bytes":.*10'
2028 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10 B'
2029 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*10.*M objects'
2031 # set valid quotas with unit prefix
2033 ceph osd pool set-quota tmp-quota-pool max_bytes
10K
2037 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10 Ki'
2039 # set valid quotas with unit prefix
2041 ceph osd pool set-quota tmp-quota-pool max_bytes
10Ki
2045 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10 Ki'
2050 ceph osd pool set-quota tmp-quota-pool max_bytes
0
2051 ceph osd pool set-quota tmp-quota-pool max_objects
0
2055 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*N/A'
2056 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*N/A'
2059 ceph osd pool delete tmp-quota-pool tmp-quota-pool
--yes-i-really-really-mean-it
2062 function test_mon_pg
()
2064 # Make sure we start healthy.
2067 ceph pg debug unfound_objects_exist
2068 ceph pg debug degraded_pgs_exist
2069 ceph pg deep-scrub
1.0
2071 ceph pg dump pgs_brief
--format=json
2072 ceph pg dump pgs
--format=json
2073 ceph pg dump pools
--format=json
2074 ceph pg dump osds
--format=json
2075 ceph pg dump
sum --format=json
2076 ceph pg dump all
--format=json
2077 ceph pg dump pgs_brief osds
--format=json
2078 ceph pg dump pools osds pgs_brief
--format=json
2080 ceph pg dump_pools_json
2081 ceph pg dump_stuck inactive
2082 ceph pg dump_stuck unclean
2083 ceph pg dump_stuck stale
2084 ceph pg dump_stuck undersized
2085 ceph pg dump_stuck degraded
2089 expect_false ceph pg
ls scrubq
2090 ceph pg
ls active stale repair recovering
2092 ceph pg
ls 1 active stale
2093 ceph pg ls-by-primary osd
.0
2094 ceph pg ls-by-primary osd
.0 1
2095 ceph pg ls-by-primary osd
.0 active
2096 ceph pg ls-by-primary osd
.0 active stale
2097 ceph pg ls-by-primary osd
.0 1 active stale
2098 ceph pg ls-by-osd osd
.0
2099 ceph pg ls-by-osd osd
.0 1
2100 ceph pg ls-by-osd osd
.0 active
2101 ceph pg ls-by-osd osd
.0 active stale
2102 ceph pg ls-by-osd osd
.0 1 active stale
2103 ceph pg ls-by-pool rbd
2104 ceph pg ls-by-pool rbd active stale
2105 # can't test this...
2106 # ceph pg force_create_pg
2107 ceph pg getmap
-o $TEMP_DIR/map.$$
2108 [ -s $TEMP_DIR/map.$$
]
2109 ceph pg map
1.0 |
grep acting
2113 ceph osd set-full-ratio
.962
2114 ceph osd dump |
grep '^full_ratio 0.962'
2115 ceph osd set-backfillfull-ratio
.912
2116 ceph osd dump |
grep '^backfillfull_ratio 0.912'
2117 ceph osd set-nearfull-ratio
.892
2118 ceph osd dump |
grep '^nearfull_ratio 0.892'
2120 # Check health status
2121 ceph osd set-nearfull-ratio
.913
2122 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
2123 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
2124 ceph osd set-nearfull-ratio
.892
2125 ceph osd set-backfillfull-ratio
.963
2126 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
2127 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
2128 ceph osd set-backfillfull-ratio
.912
2130 # Check injected full results
2131 $SUDO ceph tell osd
.0 injectfull nearfull
2132 wait_for_health
"OSD_NEARFULL"
2133 ceph health detail |
grep "osd.0 is near full"
2134 $SUDO ceph tell osd
.0 injectfull none
2137 $SUDO ceph tell osd
.1 injectfull backfillfull
2138 wait_for_health
"OSD_BACKFILLFULL"
2139 ceph health detail |
grep "osd.1 is backfill full"
2140 $SUDO ceph tell osd
.1 injectfull none
2143 $SUDO ceph tell osd
.2 injectfull failsafe
2144 # failsafe and full are the same as far as the monitor is concerned
2145 wait_for_health
"OSD_FULL"
2146 ceph health detail |
grep "osd.2 is full"
2147 $SUDO ceph tell osd
.2 injectfull none
2150 $SUDO ceph tell osd
.0 injectfull full
2151 wait_for_health
"OSD_FULL"
2152 ceph health detail |
grep "osd.0 is full"
2153 $SUDO ceph tell osd
.0 injectfull none
2156 ceph pg stat |
grep 'pgs:'
2159 first
=$
(ceph mon dump
-f json | jq
-r '.mons[0].name')
2160 ceph tell mon.
$first quorum enter
2162 ceph report |
grep osd_stats
2169 ceph tell osd
.0 version
2170 expect_false ceph tell osd
.9999 version
2171 expect_false ceph tell osd.foo version
2175 ceph tell osd
.0 dump_pg_recovery_stats |
grep Started
2177 ceph osd reweight
0 0.9
2178 expect_false ceph osd reweight
0 -1
2179 ceph osd reweight osd
.0 1
2181 ceph osd primary-affinity osd
.0 .9
2182 expect_false ceph osd primary-affinity osd
.0 -2
2183 expect_false ceph osd primary-affinity osd
.9999 .5
2184 ceph osd primary-affinity osd
.0 1
2186 ceph osd pool
set rbd size
2
2187 ceph osd pg-temp
1.0 0 1
2188 ceph osd pg-temp
1.0 osd
.1 osd
.0
2189 expect_false ceph osd pg-temp
1.0 0 1 2
2190 expect_false ceph osd pg-temp asdf qwer
2191 expect_false ceph osd pg-temp
1.0 asdf
2192 ceph osd pg-temp
1.0 # cleanup pg-temp
2195 expect_false ceph pg repeer
0.0 # pool 0 shouldn't exist anymore
2197 # don't test ceph osd primary-temp for now
2200 function test_mon_osd_pool_set
()
2202 TEST_POOL_GETSET
=pool_getset
2203 expect_false ceph osd pool create
$TEST_POOL_GETSET 1 --target_size_ratio -0.3
2204 expect_true ceph osd pool create
$TEST_POOL_GETSET 1 --target_size_ratio 1
2205 ceph osd pool application
enable $TEST_POOL_GETSET rados
2206 ceph osd pool
set $TEST_POOL_GETSET pg_autoscale_mode off
2208 ceph osd pool get
$TEST_POOL_GETSET all
2210 for s
in pg_num pgp_num size min_size crush_rule target_size_ratio
; do
2211 ceph osd pool get
$TEST_POOL_GETSET $s
2214 old_size
=$
(ceph osd pool get
$TEST_POOL_GETSET size |
sed -e 's/size: //')
2215 (( new_size
= old_size
+ 1 ))
2216 ceph osd pool
set $TEST_POOL_GETSET size
$new_size --yes-i-really-mean-it
2217 ceph osd pool get
$TEST_POOL_GETSET size |
grep "size: $new_size"
2218 ceph osd pool
set $TEST_POOL_GETSET size
$old_size --yes-i-really-mean-it
2220 ceph osd pool create pool_erasure
1 1 erasure
2221 ceph osd pool application
enable pool_erasure rados
2224 ceph osd pool
set pool_erasure size
4444 2>$TMPFILE
2225 check_response
'not change the size'
2227 ceph osd pool get pool_erasure erasure_code_profile
2228 ceph osd pool
rm pool_erasure pool_erasure
--yes-i-really-really-mean-it
2230 for flag
in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub bulk
; do
2231 ceph osd pool
set $TEST_POOL_GETSET $flag false
2232 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
2233 ceph osd pool
set $TEST_POOL_GETSET $flag true
2234 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
2235 ceph osd pool
set $TEST_POOL_GETSET $flag 1
2236 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
2237 ceph osd pool
set $TEST_POOL_GETSET $flag 0
2238 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
2239 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag asdf
2240 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag 2
2243 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
2244 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
123456
2245 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval |
grep 'scrub_min_interval: 123456'
2246 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
0
2247 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
2249 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
2250 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
123456
2251 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval |
grep 'scrub_max_interval: 123456'
2252 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
0
2253 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
2255 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
2256 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
123456
2257 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval |
grep 'deep_scrub_interval: 123456'
2258 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
0
2259 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
2261 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
2262 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
5
2263 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: 5'
2264 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
-5
2265 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: -5'
2266 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
0
2267 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
2268 expect_false ceph osd pool
set $TEST_POOL_GETSET recovery_priority
-11
2269 expect_false ceph osd pool
set $TEST_POOL_GETSET recovery_priority
11
2271 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
2272 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
5
2273 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority |
grep 'recovery_op_priority: 5'
2274 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
0
2275 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
2277 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
2278 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
5
2279 ceph osd pool get
$TEST_POOL_GETSET scrub_priority |
grep 'scrub_priority: 5'
2280 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
0
2281 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
2283 expect_false ceph osd pool
set $TEST_POOL_GETSET target_size_ratio
-3
2284 expect_false ceph osd pool
set $TEST_POOL_GETSET target_size_ratio abc
2285 expect_true ceph osd pool
set $TEST_POOL_GETSET target_size_ratio
0.1
2286 expect_true ceph osd pool
set $TEST_POOL_GETSET target_size_ratio
1
2287 ceph osd pool get
$TEST_POOL_GETSET target_size_ratio |
grep 'target_size_ratio: 1'
2289 ceph osd pool
set $TEST_POOL_GETSET nopgchange
1
2290 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
10
2291 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
2292 ceph osd pool
set $TEST_POOL_GETSET nopgchange
0
2293 ceph osd pool
set $TEST_POOL_GETSET pg_num
10
2295 ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
2296 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
0
2297 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
0
2299 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
2300 new_pgs
=$
(($old_pgs + $
(ceph osd stat
--format json | jq
'.num_osds') * 32))
2301 ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
2302 ceph osd pool
set $TEST_POOL_GETSET pgp_num
$new_pgs
2305 ceph osd pool
set $TEST_POOL_GETSET nosizechange
1
2306 expect_false ceph osd pool
set $TEST_POOL_GETSET size
2
2307 expect_false ceph osd pool
set $TEST_POOL_GETSET min_size
2
2308 ceph osd pool
set $TEST_POOL_GETSET nosizechange
0
2309 ceph osd pool
set $TEST_POOL_GETSET size
2
2311 ceph osd pool
set $TEST_POOL_GETSET min_size
2
2313 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
0
2314 ceph osd pool
set $TEST_POOL_GETSET hashpspool
0 --yes-i-really-mean-it
2316 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
1
2317 ceph osd pool
set $TEST_POOL_GETSET hashpspool
1 --yes-i-really-mean-it
2319 ceph osd pool get rbd crush_rule |
grep 'crush_rule: '
2321 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2322 ceph osd pool
set $TEST_POOL_GETSET compression_mode aggressive
2323 ceph osd pool get
$TEST_POOL_GETSET compression_mode |
grep 'aggressive'
2324 ceph osd pool
set $TEST_POOL_GETSET compression_mode
unset
2325 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2327 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2328 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm zlib
2329 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm |
grep 'zlib'
2330 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm
unset
2331 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2333 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2334 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
1.1
2335 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
-.2
2336 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
.2
2337 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio |
grep '.2'
2338 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
0
2339 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2341 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2342 ceph osd pool
set $TEST_POOL_GETSET csum_type crc32c
2343 ceph osd pool get
$TEST_POOL_GETSET csum_type |
grep 'crc32c'
2344 ceph osd pool
set $TEST_POOL_GETSET csum_type
unset
2345 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2347 for size
in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block
; do
2348 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2349 ceph osd pool
set $TEST_POOL_GETSET $size 100
2350 ceph osd pool get
$TEST_POOL_GETSET $size |
grep '100'
2351 ceph osd pool
set $TEST_POOL_GETSET $size 0
2352 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2355 ceph osd pool
set $TEST_POOL_GETSET nodelete
1
2356 expect_false ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2357 ceph osd pool
set $TEST_POOL_GETSET nodelete
0
2358 ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2362 function test_mon_osd_tiered_pool_set
()
2364 # this is really a tier pool
2365 ceph osd pool create real-tier
2
2366 ceph osd tier add rbd real-tier
2368 # expect us to be unable to set negative values for hit_set_*
2369 for o
in hit_set_period hit_set_count hit_set_fpp
; do
2370 expect_false ceph osd pool
set real_tier
$o -1
2373 # and hit_set_fpp should be in range 0..1
2374 expect_false ceph osd pool
set real_tier hit_set_fpp
2
2376 ceph osd pool
set real-tier hit_set_type explicit_hash
2377 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_hash"
2378 ceph osd pool
set real-tier hit_set_type explicit_object
2379 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_object"
2380 ceph osd pool
set real-tier hit_set_type bloom
2381 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: bloom"
2382 expect_false ceph osd pool
set real-tier hit_set_type i_dont_exist
2383 ceph osd pool
set real-tier hit_set_period
123
2384 ceph osd pool get real-tier hit_set_period |
grep "hit_set_period: 123"
2385 ceph osd pool
set real-tier hit_set_count
12
2386 ceph osd pool get real-tier hit_set_count |
grep "hit_set_count: 12"
2387 ceph osd pool
set real-tier hit_set_fpp
.01
2388 ceph osd pool get real-tier hit_set_fpp |
grep "hit_set_fpp: 0.01"
2390 ceph osd pool
set real-tier target_max_objects
123
2391 ceph osd pool get real-tier target_max_objects | \
2392 grep 'target_max_objects:[ \t]\+123'
2393 ceph osd pool
set real-tier target_max_bytes
123456
2394 ceph osd pool get real-tier target_max_bytes | \
2395 grep 'target_max_bytes:[ \t]\+123456'
2396 ceph osd pool
set real-tier cache_target_dirty_ratio
.123
2397 ceph osd pool get real-tier cache_target_dirty_ratio | \
2398 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2399 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
-.2
2400 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
1.1
2401 ceph osd pool
set real-tier cache_target_dirty_high_ratio
.123
2402 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2403 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2404 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
-.2
2405 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
1.1
2406 ceph osd pool
set real-tier cache_target_full_ratio
.123
2407 ceph osd pool get real-tier cache_target_full_ratio | \
2408 grep 'cache_target_full_ratio:[ \t]\+0.123'
2409 ceph osd dump
-f json-pretty |
grep '"cache_target_full_ratio_micro": 123000'
2410 ceph osd pool
set real-tier cache_target_full_ratio
1.0
2411 ceph osd pool
set real-tier cache_target_full_ratio
0
2412 expect_false ceph osd pool
set real-tier cache_target_full_ratio
1.1
2413 ceph osd pool
set real-tier cache_min_flush_age
123
2414 ceph osd pool get real-tier cache_min_flush_age | \
2415 grep 'cache_min_flush_age:[ \t]\+123'
2416 ceph osd pool
set real-tier cache_min_evict_age
234
2417 ceph osd pool get real-tier cache_min_evict_age | \
2418 grep 'cache_min_evict_age:[ \t]\+234'
2421 ceph osd pool
set real-tier target_max_objects
1K
2422 ceph osd pool get real-tier target_max_objects |
grep 1000
2423 for o
in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block
; do
2424 ceph osd pool
set real-tier
$o 1Ki
# no i suffix
2425 val
=$
(ceph osd pool get real-tier
$o --format=json | jq
-c ".$o")
2427 ceph osd pool
set real-tier
$o 1M
# with i suffix
2428 val
=$
(ceph osd pool get real-tier
$o --format=json | jq
-c ".$o")
2429 [[ $val == 1048576 ]]
2432 # this is not a tier pool
2433 ceph osd pool create fake-tier
2
2434 ceph osd pool application
enable fake-tier rados
2437 expect_false ceph osd pool
set fake-tier hit_set_type explicit_hash
2438 expect_false ceph osd pool get fake-tier hit_set_type
2439 expect_false ceph osd pool
set fake-tier hit_set_type explicit_object
2440 expect_false ceph osd pool get fake-tier hit_set_type
2441 expect_false ceph osd pool
set fake-tier hit_set_type bloom
2442 expect_false ceph osd pool get fake-tier hit_set_type
2443 expect_false ceph osd pool
set fake-tier hit_set_type i_dont_exist
2444 expect_false ceph osd pool
set fake-tier hit_set_period
123
2445 expect_false ceph osd pool get fake-tier hit_set_period
2446 expect_false ceph osd pool
set fake-tier hit_set_count
12
2447 expect_false ceph osd pool get fake-tier hit_set_count
2448 expect_false ceph osd pool
set fake-tier hit_set_fpp
.01
2449 expect_false ceph osd pool get fake-tier hit_set_fpp
2451 expect_false ceph osd pool
set fake-tier target_max_objects
123
2452 expect_false ceph osd pool get fake-tier target_max_objects
2453 expect_false ceph osd pool
set fake-tier target_max_bytes
123456
2454 expect_false ceph osd pool get fake-tier target_max_bytes
2455 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
.123
2456 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2457 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
-.2
2458 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
1.1
2459 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
.123
2460 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2461 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
-.2
2462 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
1.1
2463 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
.123
2464 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2465 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.0
2466 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
0
2467 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.1
2468 expect_false ceph osd pool
set fake-tier cache_min_flush_age
123
2469 expect_false ceph osd pool get fake-tier cache_min_flush_age
2470 expect_false ceph osd pool
set fake-tier cache_min_evict_age
234
2471 expect_false ceph osd pool get fake-tier cache_min_evict_age
2473 ceph osd tier remove rbd real-tier
2474 ceph osd pool delete real-tier real-tier
--yes-i-really-really-mean-it
2475 ceph osd pool delete fake-tier fake-tier
--yes-i-really-really-mean-it
2478 function test_mon_osd_erasure_code
()
2481 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2482 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2483 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2484 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
--force
2485 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2486 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f g
=h
2487 # make sure rule-foo doesn't work anymore
2488 expect_false ceph osd erasure-code-profile
set barprofile ruleset-failure-domain
=host
2489 ceph osd erasure-code-profile
set barprofile crush-failure-domain
=host
2491 ceph osd erasure-code-profile
rm fooprofile
2492 ceph osd erasure-code-profile
rm barprofile
2494 # try weird k and m values
2495 expect_false ceph osd erasure-code-profile
set badk k
=1 m
=1
2496 expect_false ceph osd erasure-code-profile
set badk k
=1 m
=2
2497 expect_false ceph osd erasure-code-profile
set badk k
=0 m
=2
2498 expect_false ceph osd erasure-code-profile
set badk k
=-1 m
=2
2499 expect_false ceph osd erasure-code-profile
set badm k
=2 m
=0
2500 expect_false ceph osd erasure-code-profile
set badm k
=2 m
=-1
2501 ceph osd erasure-code-profile
set good k
=2 m
=1
2502 ceph osd erasure-code-profile
rm good
2505 function test_mon_osd_misc
()
2509 # expect error about missing 'pool' argument
2510 ceph osd map
2>$TMPFILE; check_response
'pool' $?
22
2512 # expect error about unused argument foo
2513 ceph osd
ls foo
2>$TMPFILE; check_response
'unused' $?
22
2515 # expect "not in range" for invalid overload percentage
2516 ceph osd reweight-by-utilization
80 2>$TMPFILE; check_response
'higher than 100' $?
22
2520 local old_bytes_per_osd
=$
(ceph config get mgr mon_reweight_min_bytes_per_osd
)
2521 local old_pgs_per_osd
=$
(ceph config get mgr mon_reweight_min_pgs_per_osd
)
2522 # otherwise ceph-mgr complains like:
2523 # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
2524 # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
2525 ceph config
set mgr mon_reweight_min_bytes_per_osd
0
2526 ceph config
set mgr mon_reweight_min_pgs_per_osd
0
2527 ceph osd reweight-by-utilization
110
2528 ceph osd reweight-by-utilization
110 .5
2529 expect_false ceph osd reweight-by-utilization
110 0
2530 expect_false ceph osd reweight-by-utilization
110 -0.1
2531 ceph osd test-reweight-by-utilization
110 .5 --no-increasing
2532 ceph osd test-reweight-by-utilization
110 .5 4 --no-increasing
2533 expect_false ceph osd test-reweight-by-utilization
110 .5 0 --no-increasing
2534 expect_false ceph osd test-reweight-by-utilization
110 .5 -10 --no-increasing
2535 ceph osd reweight-by-pg
110
2536 ceph osd test-reweight-by-pg
110 .5
2537 ceph osd reweight-by-pg
110 rbd
2538 ceph osd reweight-by-pg
110 .5 rbd
2539 expect_false ceph osd reweight-by-pg
110 boguspoolasdfasdfasdf
2540 # restore the setting
2541 ceph config
set mgr mon_reweight_min_bytes_per_osd
$old_bytes_per_osd
2542 ceph config
set mgr mon_reweight_min_pgs_per_osd
$old_pgs_per_osd
2545 function test_admin_heap_profiler
()
2549 # expect 'heap' commands to be correctly parsed
2550 ceph tell osd
.0 heap stats
2>$TMPFILE
2551 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2552 echo "tcmalloc not enabled; skip heap profiler test"
2557 [[ $do_test -eq 0 ]] && return 0
2559 $SUDO ceph tell osd
.0 heap start_profiler
2560 $SUDO ceph tell osd
.0 heap dump
2561 $SUDO ceph tell osd
.0 heap stop_profiler
2562 $SUDO ceph tell osd
.0 heap release
2565 function test_osd_bench
()
2567 # test osd bench limits
2568 # As we should not rely on defaults (as they may change over time),
2569 # lets inject some values and perform some simple tests
2570 # max iops: 10 # 100 IOPS
2571 # max throughput: 10485760 # 10MB/s
2572 # max block size: 2097152 # 2MB
2573 # duration: 10 # 10 seconds
2576 --osd-bench-duration 10 \
2577 --osd-bench-max-block-size 2097152 \
2578 --osd-bench-large-size-max-throughput 10485760 \
2579 --osd-bench-small-size-max-iops 10"
2580 ceph tell osd
.0 injectargs
${args## }
2582 # anything with a bs larger than 2097152 must fail
2583 expect_false ceph tell osd
.0 bench
1 2097153
2584 # but using 'osd_bench_max_bs' must succeed
2585 ceph tell osd
.0 bench
1 2097152
2587 # we assume 1MB as a large bs; anything lower is a small bs
2588 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2589 # max count: 409600 (bytes)
2591 # more than max count must not be allowed
2592 expect_false ceph tell osd
.0 bench
409601 4096
2593 # but 409600 must be succeed
2594 ceph tell osd
.0 bench
409600 4096
2596 # for a large bs, we are limited by throughput.
2597 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2598 # the max count will be (10MB * 10s) = 100MB
2599 # max count: 104857600 (bytes)
2601 # more than max count must not be allowed
2602 expect_false ceph tell osd
.0 bench
104857601 2097152
2603 # up to max count must be allowed
2604 ceph tell osd
.0 bench
104857600 2097152
2607 function test_osd_negative_filestore_merge_threshold
()
2609 $SUDO ceph daemon osd
.0 config
set filestore_merge_threshold
-1
2610 expect_config_value
"osd.0" "filestore_merge_threshold" -1
2613 function test_mon_tell
()
2615 for m
in mon.a mon.b
; do
2616 ceph tell
$m sessions
2617 ceph_watch_start debug audit
2618 ceph tell mon.a sessions
2619 ceph_watch_wait
"${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
2621 expect_false ceph tell mon.foo version
2624 function test_mon_ping
()
2628 expect_false ceph
ping mon.foo
2633 function test_mon_deprecated_commands
()
2635 # current DEPRECATED commands are marked with FLAG(DEPRECATED)
2637 # Testing should be accomplished by setting
2638 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2639 # each one of these commands.
2641 ceph tell mon.
* injectargs
'--mon-debug-deprecated-as-obsolete'
2642 expect_false ceph config-key list
2> $TMPFILE
2643 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2645 ceph tell mon.
* injectargs
'--no-mon-debug-deprecated-as-obsolete'
2648 function test_mon_cephdf_commands
()
2652 # RAW USED The near raw used per pool in raw total
2654 ceph osd pool create cephdf_for_test
1 1 replicated
2655 ceph osd pool application
enable cephdf_for_test rados
2656 ceph osd pool
set cephdf_for_test size
2
2658 dd if=/dev
/zero of
=.
/cephdf_for_test bs
=4k count
=1
2659 rados put cephdf_for_test cephdf_for_test
-p cephdf_for_test
2662 for i
in `seq 1 10`; do
2663 rados
-p cephdf_for_test
ls - |
grep -q cephdf_for_test
&& break
2666 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2667 # to sync mon with osd
2669 local jq_filter
='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2670 stored
=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
2671 stored_raw
=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2673 ceph osd pool delete cephdf_for_test cephdf_for_test
--yes-i-really-really-mean-it
2674 rm .
/cephdf_for_test
2676 expect_false
test $stored != $stored_raw
2679 function test_mon_pool_application
()
2681 ceph osd pool create app_for_test
16
2683 ceph osd pool application
enable app_for_test rbd
2684 expect_false ceph osd pool application
enable app_for_test rgw
2685 ceph osd pool application
enable app_for_test rgw
--yes-i-really-mean-it
2686 ceph osd pool
ls detail |
grep "application rbd,rgw"
2687 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2689 expect_false ceph osd pool application
set app_for_test cephfs key value
2690 ceph osd pool application
set app_for_test rbd key1 value1
2691 ceph osd pool application
set app_for_test rbd key2 value2
2692 ceph osd pool application
set app_for_test rgw key1 value1
2693 ceph osd pool application get app_for_test rbd key1 |
grep 'value1'
2694 ceph osd pool application get app_for_test rbd key2 |
grep 'value2'
2695 ceph osd pool application get app_for_test rgw key1 |
grep 'value1'
2697 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2699 ceph osd pool application
rm app_for_test rgw key1
2700 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2701 ceph osd pool application
rm app_for_test rbd key2
2702 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2703 ceph osd pool application
rm app_for_test rbd key1
2704 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2705 ceph osd pool application
rm app_for_test rbd key1
# should be idempotent
2707 expect_false ceph osd pool application disable app_for_test rgw
2708 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2709 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it # should be idempotent
2710 ceph osd pool
ls detail |
grep "application rbd"
2711 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{}}'
2713 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2714 ceph osd pool
ls detail |
grep -v "application "
2715 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{}'
2717 ceph osd pool
rm app_for_test app_for_test
--yes-i-really-really-mean-it
2720 function test_mon_tell_help_command
()
2722 ceph tell mon.a
help |
grep sync_force
2723 ceph tell mon.a
-h |
grep sync_force
2724 ceph tell mon.a config
-h |
grep 'config diff get'
2727 expect_false ceph tell mon.zzz
help
2730 function test_mon_stdin_stdout
()
2732 echo foo | ceph config-key
set test_key
-i -
2733 ceph config-key get test_key
-o - |
grep -c foo |
grep -q 1
2736 function test_osd_tell_help_command
()
2738 ceph tell osd
.1 help
2739 expect_false ceph tell osd
.100 help
2742 function test_osd_compact
()
2744 ceph tell osd
.1 compact
2745 $SUDO ceph daemon osd
.1 compact
2748 function test_mds_tell_help_command
()
2750 local FS_NAME
=cephfs
2751 if ! mds_exists
; then
2752 echo "Skipping test, no MDS found"
2757 ceph osd pool create fs_data
16
2758 ceph osd pool create fs_metadata
16
2759 ceph fs new
$FS_NAME fs_metadata fs_data
2760 wait_mds_active
$FS_NAME
2763 ceph tell mds.a
help
2764 expect_false ceph tell mds.z
help
2767 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
2768 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
2771 function test_mgr_tell
()
2773 ceph tell mgr version
2776 function test_mgr_devices
()
2779 expect_false ceph device info doesnotexist
2780 expect_false ceph device get-health-metrics doesnotexist
2783 function test_per_pool_scrub_status
()
2785 ceph osd pool create noscrub_pool
16
2786 ceph osd pool create noscrub_pool2
16
2787 ceph
-s | expect_false
grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2788 ceph
-s --format json | \
2789 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2790 expect_false
grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2791 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
2792 expect_false
grep -q "Pool .* has .*scrub.* flag"
2793 ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2794 expect_false
grep -q "Pool .* has .*scrub.* flag"
2796 ceph osd pool
set noscrub_pool noscrub
1
2797 ceph
-s | expect_true
grep -q "Some pool(s) have the noscrub flag(s) set"
2798 ceph
-s --format json | \
2799 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2800 expect_true
grep -q "Some pool(s) have the noscrub flag(s) set"
2801 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2802 expect_true
grep -q "Pool noscrub_pool has noscrub flag"
2803 ceph health detail | expect_true
grep -q "Pool noscrub_pool has noscrub flag"
2805 ceph osd pool
set noscrub_pool nodeep-scrub
1
2806 ceph osd pool
set noscrub_pool2 nodeep-scrub
1
2807 ceph
-s | expect_true
grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2808 ceph
-s --format json | \
2809 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2810 expect_true
grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2811 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2812 expect_true
grep -q "Pool noscrub_pool has noscrub flag"
2813 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2814 expect_true
grep -q "Pool noscrub_pool has nodeep-scrub flag"
2815 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2816 expect_true
grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2817 ceph health detail | expect_true
grep -q "Pool noscrub_pool has noscrub flag"
2818 ceph health detail | expect_true
grep -q "Pool noscrub_pool has nodeep-scrub flag"
2819 ceph health detail | expect_true
grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2821 ceph osd pool
rm noscrub_pool noscrub_pool
--yes-i-really-really-mean-it
2822 ceph osd pool
rm noscrub_pool2 noscrub_pool2
--yes-i-really-really-mean-it
2826 # New tests should be added to the TESTS array below
2828 # Individual tests may be run using the '-t <testname>' argument
2829 # The user can specify '-t <testname>' as many times as she wants
2831 # Tests will be run in order presented in the TESTS array, or in
2832 # the order specified by the '-t <testname>' options.
2834 # '-l' will list all the available test names
2835 # '-h' will show usage
2837 # The test maintains backward compatibility: not specifying arguments
2838 # will run all tests following the order they appear in the TESTS array.
2842 MON_TESTS
+=" mon_injectargs"
2843 MON_TESTS
+=" mon_injectargs_SI"
2844 for i
in `seq 9`; do
2845 MON_TESTS
+=" tiering_$i";
2848 MON_TESTS
+=" auth_profiles"
2849 MON_TESTS
+=" mon_misc"
2850 MON_TESTS
+=" mon_mon"
2851 MON_TESTS
+=" mon_osd"
2852 MON_TESTS
+=" mon_config_key"
2853 MON_TESTS
+=" mon_crush"
2854 MON_TESTS
+=" mon_osd_create_destroy"
2855 MON_TESTS
+=" mon_osd_pool"
2856 MON_TESTS
+=" mon_osd_pool_quota"
2857 MON_TESTS
+=" mon_pg"
2858 MON_TESTS
+=" mon_osd_pool_set"
2859 MON_TESTS
+=" mon_osd_tiered_pool_set"
2860 MON_TESTS
+=" mon_osd_erasure_code"
2861 MON_TESTS
+=" mon_osd_misc"
2862 MON_TESTS
+=" mon_tell"
2863 MON_TESTS
+=" mon_ping"
2864 MON_TESTS
+=" mon_deprecated_commands"
2865 MON_TESTS
+=" mon_caps"
2866 MON_TESTS
+=" mon_cephdf_commands"
2867 MON_TESTS
+=" mon_tell_help_command"
2868 MON_TESTS
+=" mon_stdin_stdout"
2870 OSD_TESTS
+=" osd_bench"
2871 OSD_TESTS
+=" osd_negative_filestore_merge_threshold"
2872 OSD_TESTS
+=" tiering_agent"
2873 OSD_TESTS
+=" admin_heap_profiler"
2874 OSD_TESTS
+=" osd_tell_help_command"
2875 OSD_TESTS
+=" osd_compact"
2876 OSD_TESTS
+=" per_pool_scrub_status"
2878 MDS_TESTS
+=" mds_tell"
2879 MDS_TESTS
+=" mon_mds"
2880 MDS_TESTS
+=" mon_mds_metadata"
2881 MDS_TESTS
+=" mds_tell_help_command"
2883 MGR_TESTS
+=" mgr_tell"
2884 MGR_TESTS
+=" mgr_devices"
2895 function list_tests
()
2897 echo "AVAILABLE TESTS"
2905 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2912 while [[ $# -gt 0 ]]; do
2919 "--asok-does-not-need-root" )
2922 "--no-sanity-check" )
2926 tests_to_run
+="$MON_TESTS"
2929 tests_to_run
+="$OSD_TESTS"
2932 tests_to_run
+="$MDS_TESTS"
2935 tests_to_run
+="$MGR_TESTS"
2939 if [[ -z "$1" ]]; then
2940 echo "missing argument to '-t'"
2954 if [[ $do_list -eq 1 ]]; then
2959 ceph osd pool create rbd
16
2961 if test -z "$tests_to_run" ; then
2962 tests_to_run
="$TESTS"
2965 if $sanity_check ; then
2968 for i
in $tests_to_run; do
2969 if $sanity_check ; then
2976 if $sanity_check ; then