2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
5 source $
(dirname $0)/..
/..
/standalone
/ceph-helpers.sh
9 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
13 function get_admin_socket
()
17 if test -n "$CEPH_ASOK_DIR";
19 echo $
(get_asok_dir
)/$client.asok
21 local cluster
=$
(echo $CEPH_ARGS |
sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
26 function check_no_osd_down
()
28 ! ceph osd dump |
grep ' down '
31 function wait_no_osd_down
()
34 for i
in $
(seq 1 $max_run) ; do
35 if ! check_no_osd_down
; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
45 function expect_false
()
48 if "$@"; then return 1; else return 0; fi
52 TEMP_DIR
=$
(mktemp
-d ${TMPDIR:-/tmp}/cephtool.XXX
)
53 trap "rm -fr $TEMP_DIR" 0
55 TMPFILE
=$
(mktemp
$TEMP_DIR/test_invalid.XXX
)
58 # retry_eagain max cmd args ...
60 # retry cmd args ... if it exits on error and its output contains the
61 # string EAGAIN, at most $max times
63 function retry_eagain
()
68 local tmpfile
=$TEMP_DIR/retry_eagain.$$
70 for count
in $
(seq 1 $max) ; do
72 "$@" > $tmpfile 2>&1 || status
=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN
$tmpfile ; then
79 if test $count = $max ; then
80 echo retried with non zero
exit status
, $max times: "$@" >&2
88 # map_enxio_to_eagain cmd arg ...
90 # add EAGAIN to the output of cmd arg ... if the output contains
93 function map_enxio_to_eagain
()
96 local tmpfile
=$TEMP_DIR/map_enxio_to_eagain.$$
98 "$@" > $tmpfile 2>&1 || status
=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO
$tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
108 function check_response
()
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
125 function get_config_value_or_die
()
127 local target config_opt raw val
132 raw
="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $?
-ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
138 raw
=`echo $raw | sed -e 's/[{} "]//g'`
139 val
=`echo $raw | cut -f2 -d:`
145 function expect_config_value
()
147 local target config_opt expected_val val
152 val
=$
(get_config_value_or_die
$target $config_opt)
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
160 function ceph_watch_start
()
162 local whatch_opt
=--watch
165 whatch_opt
=--watch-$1
167 whatch_opt
+=" --watch-channel $2"
171 CEPH_WATCH_FILE
=${TEMP_DIR}/CEPH_WATCH_$$
172 ceph
$whatch_opt > $CEPH_WATCH_FILE &
175 # wait until the "ceph" client is connected and receiving
176 # log messages from monitor
178 grep -q "cluster" $CEPH_WATCH_FILE && break
183 function ceph_watch_wait
()
192 for i
in `seq ${timeout}`; do
193 grep -q "$regexp" $CEPH_WATCH_FILE && break
199 if ! grep "$regexp" $CEPH_WATCH_FILE; then
200 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
201 cat $CEPH_WATCH_FILE >&2
206 function test_mon_injectargs
()
208 CEPH_ARGS
='--mon_debug_dump_location the.dump' ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
209 check_response
"osd_enable_op_tracker = 'false'"
210 ! grep "the.dump" $TMPFILE ||
return 1
211 ceph tell osd
.0 injectargs
'--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE ||
return 1
212 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
213 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
214 check_response
"osd_enable_op_tracker = 'false'"
215 ceph tell osd
.0 injectargs
-- --osd_enable_op_tracker >& $TMPFILE ||
return 1
216 check_response
"osd_enable_op_tracker = 'true'"
217 ceph tell osd
.0 injectargs
-- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE ||
return 1
218 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
219 expect_failure
$TEMP_DIR "Option --osd_op_history_duration requires an argument" \
220 ceph tell osd
.0 injectargs
-- '--osd_op_history_duration'
222 ceph tell osd
.0 injectargs
-- '--osd_deep_scrub_interval 2419200' >& $TMPFILE ||
return 1
223 check_response
"osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
225 ceph tell osd
.0 injectargs
-- '--mon_probe_timeout 2' >& $TMPFILE ||
return 1
226 check_response
"mon_probe_timeout = '2.000000' (not observed, change may require restart)"
228 ceph tell osd
.0 injectargs
-- '--mon-lease 6' >& $TMPFILE ||
return 1
229 check_response
"mon_lease = '6.000000' (not observed, change may require restart)"
231 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
232 expect_false ceph tell osd
.0 injectargs
--osd-scrub-auto-repair-num-errors -1 >& $TMPFILE ||
return 1
233 check_response
"Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
236 function test_mon_injectargs_SI
()
238 # Test SI units during injectargs and 'config set'
239 # We only aim at testing the units are parsed accordingly
240 # and don't intend to test whether the options being set
241 # actually expect SI units to be passed.
242 # Keep in mind that all integer based options that are not based on bytes
243 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
245 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_pg_warn_min_objects")
246 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10
247 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
248 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10K
249 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10000
250 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
1G
251 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1000000000
252 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10F
> $TMPFILE || true
253 check_response
"'10F': (22) Invalid argument"
254 # now test with injectargs
255 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10'
256 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
257 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10K'
258 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10000
259 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 1G'
260 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1000000000
261 expect_false ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10F'
262 expect_false ceph tell mon.a injectargs
'--mon_globalid_prealloc -1'
263 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
$initial_value
266 function test_mon_injectargs_IEC
()
268 # Test IEC units during injectargs and 'config set'
269 # We only aim at testing the units are parsed accordingly
270 # and don't intend to test whether the options being set
271 # actually expect IEC units to be passed.
272 # Keep in mind that all integer based options that are based on bytes
273 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
274 # unit modifiers (for backwards compatibility and convinience) and be parsed
276 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_data_size_warn")
277 $SUDO ceph daemon mon.a config
set mon_data_size_warn
15000000000
278 expect_config_value
"mon.a" "mon_data_size_warn" 15000000000
279 $SUDO ceph daemon mon.a config
set mon_data_size_warn
15G
280 expect_config_value
"mon.a" "mon_data_size_warn" 16106127360
281 $SUDO ceph daemon mon.a config
set mon_data_size_warn
16Gi
282 expect_config_value
"mon.a" "mon_data_size_warn" 17179869184
283 $SUDO ceph daemon mon.a config
set mon_data_size_warn
10F
> $TMPFILE || true
284 check_response
"'10F': (22) Invalid argument"
285 # now test with injectargs
286 ceph tell mon.a injectargs
'--mon_data_size_warn 15000000000'
287 expect_config_value
"mon.a" "mon_data_size_warn" 15000000000
288 ceph tell mon.a injectargs
'--mon_data_size_warn 15G'
289 expect_config_value
"mon.a" "mon_data_size_warn" 16106127360
290 ceph tell mon.a injectargs
'--mon_data_size_warn 16Gi'
291 expect_config_value
"mon.a" "mon_data_size_warn" 17179869184
292 expect_false ceph tell mon.a injectargs
'--mon_data_size_warn 10F'
293 $SUDO ceph daemon mon.a config
set mon_data_size_warn
$initial_value
296 function test_tiering_agent
()
298 local slow
=slow_eviction
299 local fast
=fast_eviction
300 ceph osd pool create
$slow 1 1
301 ceph osd pool application
enable $slow rados
302 ceph osd pool create
$fast 1 1
303 ceph osd tier add
$slow $fast
304 ceph osd tier cache-mode
$fast writeback
305 ceph osd tier set-overlay
$slow $fast
306 ceph osd pool
set $fast hit_set_type bloom
307 rados
-p $slow put obj1
/etc
/group
308 ceph osd pool
set $fast target_max_objects
1
309 ceph osd pool
set $fast hit_set_count
1
310 ceph osd pool
set $fast hit_set_period
5
311 # wait for the object to be evicted from the cache
314 for i
in `seq 1 300` ; do
315 if ! rados
-p $fast ls |
grep obj1
; then
322 # the object is proxy read and promoted to the cache
323 rados
-p $slow get obj1
- >/dev
/null
324 # wait for the promoted object to be evicted again
326 for i
in `seq 1 300` ; do
327 if ! rados
-p $fast ls |
grep obj1
; then
334 ceph osd tier remove-overlay
$slow
335 ceph osd tier remove
$slow $fast
336 ceph osd pool delete
$fast $fast --yes-i-really-really-mean-it
337 ceph osd pool delete
$slow $slow --yes-i-really-really-mean-it
340 function test_tiering_1
()
343 ceph osd pool create slow
2
344 ceph osd pool application
enable slow rados
345 ceph osd pool create slow2
2
346 ceph osd pool application
enable slow2 rados
347 ceph osd pool create cache
2
348 ceph osd pool create cache2
2
349 ceph osd tier add slow cache
350 ceph osd tier add slow cache2
351 expect_false ceph osd tier add slow2 cache
352 # test some state transitions
353 ceph osd tier cache-mode cache writeback
354 expect_false ceph osd tier cache-mode cache forward
355 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
356 expect_false ceph osd tier cache-mode cache
readonly
357 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
358 expect_false ceph osd tier cache-mode cache forward
359 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
360 ceph osd tier cache-mode cache none
361 ceph osd tier cache-mode cache writeback
362 ceph osd tier cache-mode cache proxy
363 ceph osd tier cache-mode cache writeback
364 expect_false ceph osd tier cache-mode cache none
365 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
366 # test with dirty objects in the tier pool
367 # tier pool currently set to 'writeback'
368 rados
-p cache put
/etc
/passwd
/etc
/passwd
370 # 1 dirty object in pool 'cache'
371 ceph osd tier cache-mode cache proxy
372 expect_false ceph osd tier cache-mode cache none
373 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
374 ceph osd tier cache-mode cache writeback
375 # remove object from tier pool
376 rados
-p cache
rm /etc
/passwd
377 rados
-p cache cache-flush-evict-all
379 # no dirty objects in pool 'cache'
380 ceph osd tier cache-mode cache proxy
381 ceph osd tier cache-mode cache none
382 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
384 while ! ceph osd pool
set cache pg_num
3 --yes-i-really-mean-it 2>$TMPFILE
386 grep 'currently creating pgs' $TMPFILE
387 TRIES
=$
(( $TRIES + 1 ))
391 expect_false ceph osd pool
set cache pg_num
4
392 ceph osd tier cache-mode cache none
393 ceph osd tier set-overlay slow cache
394 expect_false ceph osd tier set-overlay slow cache2
395 expect_false ceph osd tier remove slow cache
396 ceph osd tier remove-overlay slow
397 ceph osd tier set-overlay slow cache2
398 ceph osd tier remove-overlay slow
399 ceph osd tier remove slow cache
400 ceph osd tier add slow2 cache
401 expect_false ceph osd tier set-overlay slow cache
402 ceph osd tier set-overlay slow2 cache
403 ceph osd tier remove-overlay slow2
404 ceph osd tier remove slow2 cache
405 ceph osd tier remove slow cache2
407 # make sure a non-empty pool fails
408 rados
-p cache2 put
/etc
/passwd
/etc
/passwd
409 while ! ceph df |
grep cache2 |
grep ' 1 ' ; do
410 echo waiting
for pg stats to flush
413 expect_false ceph osd tier add slow cache2
414 ceph osd tier add slow cache2
--force-nonempty
415 ceph osd tier remove slow cache2
417 ceph osd pool
ls |
grep cache2
418 ceph osd pool
ls -f json-pretty |
grep cache2
419 ceph osd pool
ls detail |
grep cache2
420 ceph osd pool
ls detail
-f json-pretty |
grep cache2
422 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
423 ceph osd pool delete slow2 slow2
--yes-i-really-really-mean-it
424 ceph osd pool delete cache cache
--yes-i-really-really-mean-it
425 ceph osd pool delete cache2 cache2
--yes-i-really-really-mean-it
428 function test_tiering_2
()
430 # make sure we can't clobber snapshot state
431 ceph osd pool create snap_base
2
432 ceph osd pool application
enable snap_base rados
433 ceph osd pool create snap_cache
2
434 ceph osd pool mksnap snap_cache snapname
435 expect_false ceph osd tier add snap_base snap_cache
436 ceph osd pool delete snap_base snap_base
--yes-i-really-really-mean-it
437 ceph osd pool delete snap_cache snap_cache
--yes-i-really-really-mean-it
440 function test_tiering_3
()
442 # make sure we can't create snapshot on tier
443 ceph osd pool create basex
2
444 ceph osd pool application
enable basex rados
445 ceph osd pool create cachex
2
446 ceph osd tier add basex cachex
447 expect_false ceph osd pool mksnap cache snapname
448 ceph osd tier remove basex cachex
449 ceph osd pool delete basex basex
--yes-i-really-really-mean-it
450 ceph osd pool delete cachex cachex
--yes-i-really-really-mean-it
453 function test_tiering_4
()
455 # make sure we can't create an ec pool tier
456 ceph osd pool create eccache
2 2 erasure
457 expect_false ceph osd set-require-min-compat-client bobtail
458 ceph osd pool create repbase
2
459 ceph osd pool application
enable repbase rados
460 expect_false ceph osd tier add repbase eccache
461 ceph osd pool delete repbase repbase
--yes-i-really-really-mean-it
462 ceph osd pool delete eccache eccache
--yes-i-really-really-mean-it
465 function test_tiering_5
()
467 # convenient add-cache command
468 ceph osd pool create slow
2
469 ceph osd pool application
enable slow rados
470 ceph osd pool create cache3
2
471 ceph osd tier add-cache slow cache3
1024000
472 ceph osd dump |
grep cache3 |
grep bloom |
grep 'false_positive_probability: 0.05' |
grep 'target_bytes 1024000' |
grep '1200s x4'
473 ceph osd tier remove slow cache3
2> $TMPFILE || true
474 check_response
"EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
475 ceph osd tier remove-overlay slow
476 ceph osd tier remove slow cache3
477 ceph osd pool
ls |
grep cache3
478 ceph osd pool delete cache3 cache3
--yes-i-really-really-mean-it
479 ! ceph osd pool
ls |
grep cache3 ||
exit 1
480 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
483 function test_tiering_6
()
485 # check add-cache whether work
486 ceph osd pool create datapool
2
487 ceph osd pool application
enable datapool rados
488 ceph osd pool create cachepool
2
489 ceph osd tier add-cache datapool cachepool
1024000
490 ceph osd tier cache-mode cachepool writeback
491 rados
-p datapool put object
/etc
/passwd
492 rados
-p cachepool stat object
493 rados
-p cachepool cache-flush object
494 rados
-p datapool stat object
495 ceph osd tier remove-overlay datapool
496 ceph osd tier remove datapool cachepool
497 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
498 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
501 function test_tiering_7
()
503 # protection against pool removal when used as tiers
504 ceph osd pool create datapool
2
505 ceph osd pool application
enable datapool rados
506 ceph osd pool create cachepool
2
507 ceph osd tier add-cache datapool cachepool
1024000
508 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it 2> $TMPFILE || true
509 check_response
"EBUSY: pool 'cachepool' is a tier of 'datapool'"
510 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it 2> $TMPFILE || true
511 check_response
"EBUSY: pool 'datapool' has tiers cachepool"
512 ceph osd tier remove-overlay datapool
513 ceph osd tier remove datapool cachepool
514 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
515 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
518 function test_tiering_8
()
520 ## check health check
521 ceph osd
set notieragent
522 ceph osd pool create datapool
2
523 ceph osd pool application
enable datapool rados
524 ceph osd pool create cache4
2
525 ceph osd tier add-cache datapool cache4
1024000
526 ceph osd tier cache-mode cache4 writeback
527 tmpfile
=$
(mktemp|
grep tmp
)
528 dd if=/dev
/zero of
=$tmpfile bs
=4K count
=1
529 ceph osd pool
set cache4 target_max_objects
200
530 ceph osd pool
set cache4 target_max_bytes
1000000
531 rados
-p cache4 put foo1
$tmpfile
532 rados
-p cache4 put foo2
$tmpfile
535 ceph df |
grep datapool |
grep ' 2 '
536 ceph osd tier remove-overlay datapool
537 ceph osd tier remove datapool cache4
538 ceph osd pool delete cache4 cache4
--yes-i-really-really-mean-it
539 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
540 ceph osd
unset notieragent
543 function test_tiering_9
()
545 # make sure 'tier remove' behaves as we expect
546 # i.e., removing a tier from a pool that's not its base pool only
547 # results in a 'pool foo is now (or already was) not a tier of bar'
549 ceph osd pool create basepoolA
2
550 ceph osd pool application
enable basepoolA rados
551 ceph osd pool create basepoolB
2
552 ceph osd pool application
enable basepoolB rados
553 poolA_id
=$
(ceph osd dump |
grep 'pool.*basepoolA' |
awk '{print $2;}')
554 poolB_id
=$
(ceph osd dump |
grep 'pool.*basepoolB' |
awk '{print $2;}')
556 ceph osd pool create cache5
2
557 ceph osd pool create cache6
2
558 ceph osd tier add basepoolA cache5
559 ceph osd tier add basepoolB cache6
560 ceph osd tier remove basepoolB cache5
2>&1 |
grep 'not a tier of'
561 ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of[ \t]\+$poolA_id"
562 ceph osd tier remove basepoolA cache6
2>&1 |
grep 'not a tier of'
563 ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of[ \t]\+$poolB_id"
565 ceph osd tier remove basepoolA cache5
2>&1 |
grep 'not a tier of'
566 ! ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of" ||
exit 1
567 ceph osd tier remove basepoolB cache6
2>&1 |
grep 'not a tier of'
568 ! ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of" ||
exit 1
570 ! ceph osd dump |
grep "pool.*'basepoolA'" 2>&1 |
grep "tiers" ||
exit 1
571 ! ceph osd dump |
grep "pool.*'basepoolB'" 2>&1 |
grep "tiers" ||
exit 1
573 ceph osd pool delete cache6 cache6
--yes-i-really-really-mean-it
574 ceph osd pool delete cache5 cache5
--yes-i-really-really-mean-it
575 ceph osd pool delete basepoolB basepoolB
--yes-i-really-really-mean-it
576 ceph osd pool delete basepoolA basepoolA
--yes-i-really-really-mean-it
581 expect_false ceph auth add client.xx mon
'invalid' osd
"allow *"
582 expect_false ceph auth add client.xx mon
'allow *' osd
"allow *" invalid
"allow *"
583 ceph auth add client.xx mon
'allow *' osd
"allow *"
584 ceph auth
export client.xx
>client.xx.keyring
585 ceph auth add client.xx
-i client.xx.keyring
586 rm -f client.xx.keyring
587 ceph auth list |
grep client.xx
588 ceph auth
ls |
grep client.xx
589 ceph auth get client.xx |
grep caps |
grep mon
590 ceph auth get client.xx |
grep caps |
grep osd
591 ceph auth get-key client.xx
592 ceph auth print-key client.xx
593 ceph auth print_key client.xx
594 ceph auth caps client.xx osd
"allow rw"
595 expect_false sh
<<< "ceph auth get client.xx | grep caps | grep mon"
596 ceph auth get client.xx |
grep osd |
grep "allow rw"
597 ceph auth
export |
grep client.xx
598 ceph auth
export -o authfile
599 ceph auth import
-i authfile
600 ceph auth
export -o authfile2
601 diff authfile authfile2
602 rm authfile authfile2
603 ceph auth del client.xx
604 expect_false ceph auth get client.xx
606 # (almost) interactive mode
607 echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
608 ceph auth get client.xx
610 echo 'auth del client.xx' | ceph
611 expect_false ceph auth get client.xx
617 ceph-authtool
--create-keyring --name client.TEST
--gen-key --set-uid $auid TEST-keyring
618 expect_false ceph auth import
--in-file TEST-keyring
620 ceph-authtool
--create-keyring --name client.TEST
--gen-key --cap mon
"allow r" --set-uid $auid TEST-keyring
621 ceph auth import
--in-file TEST-keyring
623 ceph auth get client.TEST
> $TMPFILE
624 check_response
"auid = $auid"
625 ceph
--format json-pretty auth get client.TEST
> $TMPFILE
626 check_response
'"auid": '$auid
627 ceph auth
ls > $TMPFILE
628 check_response
"auid: $auid"
629 ceph
--format json-pretty auth
ls > $TMPFILE
630 check_response
'"auid": '$auid
631 ceph auth del client.TEST
634 function test_auth_profiles
()
636 ceph auth add client.xx-profile-ro mon
'allow profile read-only' \
637 mgr
'allow profile read-only'
638 ceph auth add client.xx-profile-rw mon
'allow profile read-write' \
639 mgr
'allow profile read-write'
640 ceph auth add client.xx-profile-rd mon
'allow profile role-definer'
642 ceph auth
export > client.xx.keyring
644 # read-only is allowed all read-only commands (auth excluded)
645 ceph
-n client.xx-profile-ro
-k client.xx.keyring status
646 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd dump
647 ceph
-n client.xx-profile-ro
-k client.xx.keyring pg dump
648 ceph
-n client.xx-profile-ro
-k client.xx.keyring mon dump
649 ceph
-n client.xx-profile-ro
-k client.xx.keyring mds dump
650 # read-only gets access denied for rw commands or auth commands
651 ceph
-n client.xx-profile-ro
-k client.xx.keyring log foo
>& $TMPFILE || true
652 check_response
"EACCES: access denied"
653 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
654 check_response
"EACCES: access denied"
655 ceph
-n client.xx-profile-ro
-k client.xx.keyring auth
ls >& $TMPFILE || true
656 check_response
"EACCES: access denied"
658 # read-write is allowed for all read-write commands (except auth)
659 ceph
-n client.xx-profile-rw
-k client.xx.keyring status
660 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd dump
661 ceph
-n client.xx-profile-rw
-k client.xx.keyring pg dump
662 ceph
-n client.xx-profile-rw
-k client.xx.keyring mon dump
663 ceph
-n client.xx-profile-rw
-k client.xx.keyring mds dump
664 ceph
-n client.xx-profile-rw
-k client.xx.keyring log foo
665 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
set noout
666 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
unset noout
667 # read-write gets access denied for auth commands
668 ceph
-n client.xx-profile-rw
-k client.xx.keyring auth
ls >& $TMPFILE || true
669 check_response
"EACCES: access denied"
671 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
672 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
ls
673 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
export
674 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth add client.xx-profile-foo
675 ceph
-n client.xx-profile-rd
-k client.xx.keyring status
676 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd dump
>& $TMPFILE || true
677 check_response
"EACCES: access denied"
678 ceph
-n client.xx-profile-rd
-k client.xx.keyring pg dump
>& $TMPFILE || true
679 check_response
"EACCES: access denied"
680 # read-only 'mon' subsystem commands are allowed
681 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon dump
682 # but read-write 'mon' commands are not
683 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon add foo
1.1.1.1 >& $TMPFILE || true
684 check_response
"EACCES: access denied"
685 ceph
-n client.xx-profile-rd
-k client.xx.keyring mds dump
>& $TMPFILE || true
686 check_response
"EACCES: access denied"
687 ceph
-n client.xx-profile-rd
-k client.xx.keyring log foo
>& $TMPFILE || true
688 check_response
"EACCES: access denied"
689 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
690 check_response
"EACCES: access denied"
692 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-ro
693 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-rw
695 # add a new role-definer with the existing role-definer
696 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
697 auth add client.xx-profile-rd2 mon
'allow profile role-definer'
698 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
699 auth
export > client.xx.keyring
.2
700 # remove old role-definer using the new role-definer
701 ceph
-n client.xx-profile-rd2
-k client.xx.keyring
.2 \
702 auth del client.xx-profile-rd
703 # remove the remaining role-definer with admin
704 ceph auth del client.xx-profile-rd2
705 rm -f client.xx.keyring client.xx.keyring
.2
708 function test_mon_caps
()
710 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
711 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
712 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
713 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
715 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
716 check_response
"Permission denied"
718 rm -rf $TEMP_DIR/ceph.client.bug.keyring
719 ceph auth del client.bug
720 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
721 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
722 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
723 ceph-authtool
-n client.bug
--cap mon
'' $TEMP_DIR/ceph.client.bug.keyring
724 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
725 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
726 check_response
"Permission denied"
729 function test_mon_misc
()
731 # with and without verbosity
732 ceph osd dump |
grep '^epoch'
733 ceph
--concise osd dump |
grep '^epoch'
735 ceph osd df |
grep 'MIN/MAX VAR'
740 grep -v DIRTY
$TMPFILE
741 ceph df detail
> $TMPFILE
743 ceph df
--format json
> $TMPFILE
744 grep 'total_bytes' $TMPFILE
745 grep -v 'dirty' $TMPFILE
746 ceph df detail
--format json
> $TMPFILE
747 grep 'rd_bytes' $TMPFILE
748 grep 'dirty' $TMPFILE
749 ceph df
--format xml |
grep '<total_bytes>'
750 ceph df detail
--format xml |
grep '<rd_bytes>'
755 ceph health
--format json-pretty
756 ceph health detail
--format xml-pretty
758 ceph time-sync-status
761 for t
in mon osd mds
; do
766 mymsg
="this is a test log message $$.$(date)"
768 ceph log last |
grep "$mymsg"
769 ceph log last
100 |
grep "$mymsg"
770 ceph_watch_wait
"$mymsg"
774 ceph mgr module
enable restful
775 expect_false ceph mgr module
enable foodne
776 ceph mgr module
enable foodne
--force
777 ceph mgr module disable foodne
778 ceph mgr module disable foodnebizbangbash
782 ceph mon count-metadata ceph_version
787 ceph mgr count-metadata ceph_version
794 function check_mds_active
()
797 ceph fs get
$fs_name |
grep active
800 function wait_mds_active
()
804 for i
in $
(seq 1 $max_run) ; do
805 if ! check_mds_active
$fs_name ; then
806 echo "waiting for an active MDS daemon ($i/$max_run)"
812 check_mds_active
$fs_name
815 function get_mds_gids
()
818 ceph fs get
$fs_name --format=json | python
-c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
821 function fail_all_mds
()
824 ceph fs
set $fs_name cluster_down true
825 mds_gids
=$
(get_mds_gids
$fs_name)
826 for mds_gid
in $mds_gids ; do
827 ceph mds fail
$mds_gid
829 if check_mds_active
$fs_name ; then
830 echo "An active MDS remains, something went wrong"
837 function remove_all_fs
()
839 existing_fs
=$
(ceph fs
ls --format=json | python
-c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
840 for fs_name
in $existing_fs ; do
841 echo "Removing fs ${fs_name}..."
842 fail_all_mds
$fs_name
843 echo "Removing existing filesystem '${fs_name}'..."
844 ceph fs
rm $fs_name --yes-i-really-mean-it
845 echo "Removed '${fs_name}'."
849 # So that tests requiring MDS can skip if one is not configured
850 # in the cluster at all
851 function mds_exists
()
853 ceph auth
ls |
grep "^mds"
856 # some of the commands are just not idempotent.
857 function without_test_dup_command
()
859 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
862 local saved
=${CEPH_CLI_TEST_DUP_COMMAND}
863 unset CEPH_CLI_TEST_DUP_COMMAND
865 CEPH_CLI_TEST_DUP_COMMAND
=saved
869 function test_mds_tell
()
872 if ! mds_exists
; then
873 echo "Skipping test, no MDS found"
878 ceph osd pool create fs_data
10
879 ceph osd pool create fs_metadata
10
880 ceph fs new
$FS_NAME fs_metadata fs_data
881 wait_mds_active
$FS_NAME
883 # Test injectargs by GID
884 old_mds_gids
=$
(get_mds_gids
$FS_NAME)
885 echo Old GIDs
: $old_mds_gids
887 for mds_gid
in $old_mds_gids ; do
888 ceph tell mds.
$mds_gid injectargs
"--debug-mds 20"
890 expect_false ceph tell mds.a injectargs mds_max_file_recover
-1
892 # Test respawn by rank
893 without_test_dup_command ceph tell mds
.0 respawn
894 new_mds_gids
=$old_mds_gids
895 while [ $new_mds_gids -eq $old_mds_gids ] ; do
897 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
899 echo New GIDs
: $new_mds_gids
902 without_test_dup_command ceph tell mds.a respawn
903 new_mds_gids
=$old_mds_gids
904 while [ $new_mds_gids -eq $old_mds_gids ] ; do
906 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
908 echo New GIDs
: $new_mds_gids
911 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
912 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
915 function test_mon_mds
()
920 ceph osd pool create fs_data
10
921 ceph osd pool create fs_metadata
10
922 ceph fs new
$FS_NAME fs_metadata fs_data
924 ceph fs
set $FS_NAME cluster_down true
925 ceph fs
set $FS_NAME cluster_down false
927 # Legacy commands, act on default fs
928 ceph mds cluster_down
931 ceph mds compat rm_incompat
4
932 ceph mds compat rm_incompat
4
934 # We don't want any MDSs to be up, their activity can interfere with
935 # the "current_epoch + 1" checking below if they're generating updates
936 fail_all_mds
$FS_NAME
939 expect_false ceph mds deactivate
2
943 for mds_gid
in $
(get_mds_gids
$FS_NAME) ; do
944 ceph mds metadata
$mds_id
948 ceph mds count-metadata os
950 # XXX mds fail, but how do you undo it?
951 mdsmapfile
=$TEMP_DIR/mdsmap.$$
952 current_epoch
=$
(ceph mds getmap
-o $mdsmapfile --no-log-to-stderr 2>&1 |
grep epoch |
sed 's/.*epoch //')
956 ceph osd pool create data2
10
957 ceph osd pool create data3
10
958 data2_pool
=$
(ceph osd dump |
grep "pool.*'data2'" |
awk '{print $2;}')
959 data3_pool
=$
(ceph osd dump |
grep "pool.*'data3'" |
awk '{print $2;}')
960 ceph mds add_data_pool
$data2_pool
961 ceph mds add_data_pool
$data3_pool
962 ceph mds add_data_pool
100 >& $TMPFILE || true
963 check_response
"Error ENOENT"
964 ceph mds add_data_pool foobarbaz
>& $TMPFILE || true
965 check_response
"Error ENOENT"
966 ceph mds remove_data_pool
$data2_pool
967 ceph mds remove_data_pool
$data3_pool
968 ceph osd pool delete data2 data2
--yes-i-really-really-mean-it
969 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
970 ceph mds
set allow_multimds false
971 expect_false ceph mds set_max_mds
4
972 ceph mds
set allow_multimds true
973 ceph mds set_max_mds
4
974 ceph mds set_max_mds
3
975 ceph mds set_max_mds
256
976 expect_false ceph mds set_max_mds
257
977 ceph mds
set max_mds
4
978 ceph mds
set max_mds
256
979 expect_false ceph mds
set max_mds
257
980 expect_false ceph mds
set max_mds asdf
981 expect_false ceph mds
set inline_data true
982 ceph mds
set inline_data true
--yes-i-really-mean-it
983 ceph mds
set inline_data
yes --yes-i-really-mean-it
984 ceph mds
set inline_data
1 --yes-i-really-mean-it
985 expect_false ceph mds
set inline_data
--yes-i-really-mean-it
986 ceph mds
set inline_data false
987 ceph mds
set inline_data no
988 ceph mds
set inline_data
0
989 expect_false ceph mds
set inline_data asdf
990 ceph mds
set max_file_size
1048576
991 expect_false ceph mds
set max_file_size
123asdf
993 expect_false ceph mds
set allow_new_snaps
994 expect_false ceph mds
set allow_new_snaps true
995 ceph mds
set allow_new_snaps true
--yes-i-really-mean-it
996 ceph mds
set allow_new_snaps
0
997 ceph mds
set allow_new_snaps false
998 ceph mds
set allow_new_snaps no
999 expect_false ceph mds
set allow_new_snaps taco
1001 # we should never be able to add EC pools as data or metadata pools
1002 # create an ec-pool...
1003 ceph osd pool create mds-ec-pool
10 10 erasure
1005 ceph mds add_data_pool mds-ec-pool
2>$TMPFILE
1006 check_response
'erasure-code' $?
22
1008 ec_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-ec-pool" |
awk '{print $2;}')
1009 data_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_data" |
awk '{print $2;}')
1010 metadata_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_metadata" |
awk '{print $2;}')
1012 fail_all_mds
$FS_NAME
1015 # Check that rmfailed requires confirmation
1016 expect_false ceph mds rmfailed
0
1017 ceph mds rmfailed
0 --yes-i-really-mean-it
1020 # Check that `newfs` is no longer permitted
1021 expect_false ceph mds newfs
$metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
1023 # Check that 'fs reset' runs
1024 ceph fs
reset $FS_NAME --yes-i-really-mean-it
1026 # Check that creating a second FS fails by default
1027 ceph osd pool create fs_metadata2
10
1028 ceph osd pool create fs_data2
10
1030 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1033 # Check that setting enable_multiple enables creation of second fs
1034 ceph fs flag
set enable_multiple true
--yes-i-really-mean-it
1035 ceph fs new cephfs2 fs_metadata2 fs_data2
1037 # Clean up multi-fs stuff
1038 fail_all_mds cephfs2
1039 ceph fs
rm cephfs2
--yes-i-really-mean-it
1040 ceph osd pool delete fs_metadata2 fs_metadata2
--yes-i-really-really-mean-it
1041 ceph osd pool delete fs_data2 fs_data2
--yes-i-really-really-mean-it
1043 fail_all_mds
$FS_NAME
1045 # Clean up to enable subsequent fs new tests
1046 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1049 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1050 check_response
'erasure-code' $?
22
1051 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1052 check_response
'erasure-code' $?
22
1053 ceph fs new
$FS_NAME mds-ec-pool mds-ec-pool
2>$TMPFILE
1054 check_response
'erasure-code' $?
22
1057 # ... new create a cache tier in front of the EC pool...
1058 ceph osd pool create mds-tier
2
1059 ceph osd tier add mds-ec-pool mds-tier
1060 ceph osd tier set-overlay mds-ec-pool mds-tier
1061 tier_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-tier" |
awk '{print $2;}')
1063 # Use of a readonly tier should be forbidden
1064 ceph osd tier cache-mode mds-tier
readonly --yes-i-really-mean-it
1066 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1067 check_response
'has a write tier (mds-tier) that is configured to forward' $?
22
1070 # Use of a writeback tier should enable FS creation
1071 ceph osd tier cache-mode mds-tier writeback
1072 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force
1074 # While a FS exists using the tiered pools, I should not be allowed
1075 # to remove the tier
1077 ceph osd tier remove-overlay mds-ec-pool
2>$TMPFILE
1078 check_response
'in use by CephFS' $?
16
1079 ceph osd tier remove mds-ec-pool mds-tier
2>$TMPFILE
1080 check_response
'in use by CephFS' $?
16
1083 fail_all_mds
$FS_NAME
1084 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1086 # ... but we should be forbidden from using the cache pool in the FS directly.
1088 ceph fs new
$FS_NAME fs_metadata mds-tier
--force 2>$TMPFILE
1089 check_response
'in use as a cache tier' $?
22
1090 ceph fs new
$FS_NAME mds-tier fs_data
2>$TMPFILE
1091 check_response
'in use as a cache tier' $?
22
1092 ceph fs new
$FS_NAME mds-tier mds-tier
2>$TMPFILE
1093 check_response
'in use as a cache tier' $?
22
1096 # Clean up tier + EC pools
1097 ceph osd tier remove-overlay mds-ec-pool
1098 ceph osd tier remove mds-ec-pool mds-tier
1100 # Create a FS using the 'cache' pool now that it's no longer a tier
1101 ceph fs new
$FS_NAME fs_metadata mds-tier
--force
1103 # We should be forbidden from using this pool as a tier now that
1104 # it's in use for CephFS
1106 ceph osd tier add mds-ec-pool mds-tier
2>$TMPFILE
1107 check_response
'in use by CephFS' $?
16
1110 fail_all_mds
$FS_NAME
1111 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1113 # We should be permitted to use an EC pool with overwrites enabled
1114 # as the data pool...
1115 ceph osd pool
set mds-ec-pool allow_ec_overwrites true
1116 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1117 fail_all_mds
$FS_NAME
1118 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1120 # ...but not as the metadata pool
1122 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1123 check_response
'erasure-code' $?
22
1126 ceph osd pool delete mds-ec-pool mds-ec-pool
--yes-i-really-really-mean-it
1128 # Create a FS and check that we can subsequently add a cache tier to it
1129 ceph fs new
$FS_NAME fs_metadata fs_data
--force
1131 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1132 ceph osd tier add fs_metadata mds-tier
1133 ceph osd tier cache-mode mds-tier writeback
1134 ceph osd tier set-overlay fs_metadata mds-tier
1136 # Removing tier should be permitted because the underlying pool is
1137 # replicated (#11504 case)
1138 ceph osd tier cache-mode mds-tier proxy
1139 ceph osd tier remove-overlay fs_metadata
1140 ceph osd tier remove fs_metadata mds-tier
1141 ceph osd pool delete mds-tier mds-tier
--yes-i-really-really-mean-it
1144 fail_all_mds
$FS_NAME
1145 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1150 # ceph mds tell mds.a getmap
1153 # ceph mds set_state
1156 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
1157 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
1160 function test_mon_mds_metadata
()
1162 local nmons
=$
(ceph tell
'mon.*' version |
grep -c 'version')
1166 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1167 while read gid id rank
; do
1168 ceph mds metadata
${gid} |
grep '"hostname":'
1169 ceph mds metadata
${id} |
grep '"hostname":'
1170 ceph mds metadata
${rank} |
grep '"hostname":'
1172 local n
=$
(ceph tell
'mon.*' mds metadata
${id} |
grep -c '"hostname":')
1173 test "$n" -eq "$nmons"
1176 expect_false ceph mds metadata UNKNOWN
1179 function test_mon_mon
()
1181 # print help message
1185 ceph mon getmap
-o $TEMP_DIR/monmap.$$
1186 [ -s $TEMP_DIR/monmap.$$
]
1192 ceph mon feature
set kraken
--yes-i-really-mean-it
1193 expect_false ceph mon feature
set abcd
1194 expect_false ceph mon feature
set abcd
--yes-i-really-mean-it
1197 function gen_secrets_file
()
1199 # lets assume we can have the following types
1200 # all - generates both cephx and lockbox, with mock dm-crypt key
1201 # cephx - only cephx
1202 # no_cephx - lockbox and dm-crypt, no cephx
1203 # no_lockbox - dm-crypt and cephx, no lockbox
1204 # empty - empty file
1205 # empty_json - correct json, empty map
1206 # bad_json - bad json :)
1209 if [[ -z "$t" ]]; then
1213 fn
=$
(mktemp
$TEMP_DIR/secret.XXXXXX
)
1215 if [[ "$t" == "empty" ]]; then
1220 if [[ "$t" == "bad_json" ]]; then
1221 echo "asd: ; }" >> $fn
1223 elif [[ "$t" == "empty_json" ]]; then
1228 cephx_secret
="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1229 lb_secret
="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1230 dmcrypt_key
="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1232 if [[ "$t" == "all" ]]; then
1233 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1234 elif [[ "$t" == "cephx" ]]; then
1235 echo "$cephx_secret" >> $fn
1236 elif [[ "$t" == "no_cephx" ]]; then
1237 echo "$lb_secret,$dmcrypt_key" >> $fn
1238 elif [[ "$t" == "no_lockbox" ]]; then
1239 echo "$cephx_secret,$dmcrypt_key" >> $fn
1241 echo "unknown gen_secrets_file() type \'$fn\'"
1248 function test_mon_osd_create_destroy
()
1250 ceph osd new
2>&1 |
grep 'EINVAL'
1251 ceph osd new
'' -1 2>&1 |
grep 'EINVAL'
1252 ceph osd new
'' 10 2>&1 |
grep 'EINVAL'
1254 old_maxosd
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1256 old_osds
=$
(ceph osd
ls)
1257 num_osds
=$
(ceph osd
ls |
wc -l)
1260 id
=$
(ceph osd new
$uuid 2>/dev
/null
)
1262 for i
in $old_osds; do
1268 id2
=`ceph osd new $uuid 2>/dev/null`
1272 ceph osd new
$uuid $id
1274 id3
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1275 ceph osd new
$uuid $
((id3
+1)) 2>&1 |
grep EEXIST
1278 id2
=$
(ceph osd new
$uuid2)
1280 [[ "$id2" != "$id" ]]
1282 ceph osd new
$uuid $id2 2>&1 |
grep EEXIST
1283 ceph osd new
$uuid2 $id2
1286 empty_secrets
=$
(gen_secrets_file
"empty")
1287 empty_json
=$
(gen_secrets_file
"empty_json")
1288 all_secrets
=$
(gen_secrets_file
"all")
1289 cephx_only
=$
(gen_secrets_file
"cephx")
1290 no_cephx
=$
(gen_secrets_file
"no_cephx")
1291 no_lockbox
=$
(gen_secrets_file
"no_lockbox")
1292 bad_json
=$
(gen_secrets_file
"bad_json")
1294 # empty secrets should be idempotent
1295 new_id
=$
(ceph osd new
$uuid $id -i $empty_secrets)
1296 [[ "$new_id" == "$id" ]]
1298 # empty json, thus empty secrets
1299 new_id
=$
(ceph osd new
$uuid $id -i $empty_json)
1300 [[ "$new_id" == "$id" ]]
1302 ceph osd new
$uuid $id -i $all_secrets 2>&1 |
grep 'EEXIST'
1306 ceph osd setmaxosd
$old_maxosd
1308 ceph osd new
$uuid -i $no_cephx 2>&1 |
grep 'EINVAL'
1309 ceph osd new
$uuid -i $no_lockbox 2>&1 |
grep 'EINVAL'
1312 id
=$
(ceph osd new
$uuid -i $all_secrets)
1319 # validate secrets and dm-crypt are set
1320 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1321 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1323 k
=$
(ceph auth get-key client.osd-lockbox.
$uuid --format=json-pretty
2>/dev
/null | \
1325 s
=$
(cat $all_secrets | jq
'.cephx_lockbox_secret')
1327 ceph config-key exists dm-crypt
/osd
/$uuid/luks
1330 id2
=$
(ceph osd new
$uuid2 -i $cephx_only)
1332 [[ "$i" != "$id2" ]]
1336 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1337 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1339 expect_false ceph auth get-key client.osd-lockbox.
$uuid2
1340 expect_false ceph config-key exists dm-crypt
/osd
/$uuid2/luks
1342 ceph osd destroy osd.
$id2 --yes-i-really-mean-it
1343 ceph osd destroy
$id2 --yes-i-really-mean-it
1345 expect_false ceph auth get-key osd.
$id2
1346 ceph osd dump |
grep osd.
$id2 |
grep destroyed
1350 ceph osd new
$uuid3 $id3 -i $all_secrets
1351 ceph osd dump |
grep osd.
$id3 | expect_false
grep destroyed
1352 ceph auth get-key client.osd-lockbox.
$uuid3
1353 ceph auth get-key osd.
$id3
1354 ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1356 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1357 expect_false ceph osd
find $id2
1358 expect_false ceph auth get-key osd.
$id2
1359 expect_false ceph auth get-key client.osd-lockbox.
$uuid3
1360 expect_false ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1361 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1362 ceph osd purge osd.
$id3 --yes-i-really-mean-it # idempotent
1364 ceph osd purge osd.
$id --yes-i-really-mean-it
1365 ceph osd purge
123456 --yes-i-really-mean-it
1366 expect_false ceph osd
find $id
1367 expect_false ceph auth get-key osd.
$id
1368 expect_false ceph auth get-key client.osd-lockbox.
$uuid
1369 expect_false ceph config-key exists dm-crypt
/osd
/$uuid/luks
1371 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1372 $no_cephx $no_lockbox $bad_json
1374 for i
in $
(ceph osd
ls); do
1376 [[ "$i" != "$id2" ]]
1377 [[ "$i" != "$id3" ]]
1380 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1381 ceph osd setmaxosd
$old_maxosd
1385 function test_mon_config_key
()
1387 key
=asdfasdfqwerqwreasdfuniquesa123df
1388 ceph config-key list |
grep -c $key |
grep 0
1389 ceph config-key get
$key |
grep -c bar |
grep 0
1390 ceph config-key
set $key bar
1391 ceph config-key get
$key |
grep bar
1392 ceph config-key list |
grep -c $key |
grep 1
1393 ceph config-key dump |
grep $key |
grep bar
1394 ceph config-key
rm $key
1395 expect_false ceph config-key get
$key
1396 ceph config-key list |
grep -c $key |
grep 0
1397 ceph config-key dump |
grep -c $key |
grep 0
1400 function test_mon_osd
()
1405 bl
=192.168.0.1:0/1000
1406 ceph osd blacklist add
$bl
1407 ceph osd blacklist
ls |
grep $bl
1408 ceph osd blacklist
ls --format=json-pretty |
sed 's/\\\//\//' |
grep $bl
1409 ceph osd dump
--format=json-pretty |
grep $bl
1410 ceph osd dump |
grep "^blacklist $bl"
1411 ceph osd blacklist
rm $bl
1412 ceph osd blacklist
ls | expect_false
grep $bl
1415 # test without nonce, invalid nonce
1416 ceph osd blacklist add
$bl
1417 ceph osd blacklist
ls |
grep $bl
1418 ceph osd blacklist
rm $bl
1419 ceph osd blacklist
ls | expect_false
grep $expect_false bl
1420 expect_false
"ceph osd blacklist $bl/-1"
1421 expect_false
"ceph osd blacklist $bl/foo"
1423 # test with wrong address
1424 expect_false
"ceph osd blacklist 1234.56.78.90/100"
1427 ceph osd blacklist add
$bl
1428 ceph osd blacklist
ls |
grep $bl
1429 ceph osd blacklist
clear
1430 ceph osd blacklist
ls | expect_false
grep $bl
1435 ceph osd crush reweight-all
1436 ceph osd crush tunables legacy
1437 ceph osd crush show-tunables |
grep argonaut
1438 ceph osd crush tunables bobtail
1439 ceph osd crush show-tunables |
grep bobtail
1440 ceph osd crush tunables firefly
1441 ceph osd crush show-tunables |
grep firefly
1443 ceph osd crush set-tunable straw_calc_version
0
1444 ceph osd crush get-tunable straw_calc_version |
grep 0
1445 ceph osd crush set-tunable straw_calc_version
1
1446 ceph osd crush get-tunable straw_calc_version |
grep 1
1449 # require-min-compat-client
1450 expect_false ceph osd set-require-min-compat-client dumpling
# firefly tunables
1451 ceph osd set-require-min-compat-client luminous
1452 ceph osd dump |
grep 'require_min_compat_client luminous'
1457 # how do I tell when these are done?
1459 ceph osd deep-scrub
0
1462 for f
in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1467 expect_false ceph osd
unset sortbitwise
# cannot be unset
1468 expect_false ceph osd
set bogus
1469 expect_false ceph osd
unset bogus
1470 ceph osd require-osd-release luminous
1471 # can't lower (or use new command for anything but jewel)
1472 expect_false ceph osd require-osd-release jewel
1473 # these are no-ops but should succeed.
1474 ceph osd
set require_jewel_osds
1475 ceph osd
set require_kraken_osds
1476 expect_false ceph osd
unset require_jewel_osds
1480 ceph osd dump |
grep 'osd.0 down'
1483 for ((i
=0; i
< $max_run; i
++)); do
1484 if ! ceph osd dump |
grep 'osd.0 up'; then
1485 echo "waiting for osd.0 to come back up ($i/$max_run)"
1491 ceph osd dump |
grep 'osd.0 up'
1493 ceph osd dump |
grep 'osd.0 up'
1494 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1497 expect_false ceph osd
find osd.xyz
1498 expect_false ceph osd
find xyz
1499 expect_false ceph osd
find 0.1
1500 ceph
--format plain osd
find 1 # falls back to json-pretty
1501 if [ `uname` == Linux
]; then
1502 ceph osd metadata
1 |
grep 'distro'
1503 ceph
--format plain osd metadata
1 |
grep 'distro' # falls back to json-pretty
1506 ceph osd dump |
grep 'osd.0.*out'
1508 ceph osd dump |
grep 'osd.0.*in'
1511 ceph osd add-nodown
0 1
1512 ceph health detail |
grep 'NODOWN'
1513 ceph osd rm-nodown
0 1
1514 ! ceph health detail |
grep 'NODOWN'
1516 ceph osd out
0 # so we can mark it as noin later
1518 ceph health detail |
grep 'NOIN'
1520 ! ceph health detail |
grep 'NOIN'
1523 ceph osd add-noout
0
1524 ceph health detail |
grep 'NOOUT'
1526 ! ceph health detail |
grep 'NOOUT'
1529 expect_false ceph osd add-noup
797er
1530 expect_false ceph osd add-nodown u9uwer
1531 expect_false ceph osd add-noin
78~
15
1532 expect_false ceph osd add-noout
0 all
1
1534 expect_false ceph osd rm-noup
1234567
1535 expect_false ceph osd rm-nodown fsadf7
1536 expect_false ceph osd rm-noin
0 1 any
1537 expect_false ceph osd rm-noout
790-fd
1539 ids
=`ceph osd ls-tree default`
1542 ceph osd add-nodown
$osd
1543 ceph osd add-noout
$osd
1545 ceph
-s |
grep 'NODOWN'
1546 ceph
-s |
grep 'NOOUT'
1547 ceph osd rm-nodown any
1548 ceph osd rm-noout all
1549 ! ceph
-s |
grep 'NODOWN'
1550 ! ceph
-s |
grep 'NOOUT'
1552 # make sure mark out preserves weight
1553 ceph osd reweight osd
.0 .5
1554 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1557 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1559 ceph osd getmap
-o $f
1562 save
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1564 ceph osd setmaxosd $
((save
- 1)) 2>&1 |
grep 'EBUSY'
1565 ceph osd setmaxosd
10
1566 ceph osd getmaxosd |
grep 'max_osd = 10'
1567 ceph osd setmaxosd
$save
1568 ceph osd getmaxosd |
grep "max_osd = $save"
1570 for id
in `ceph osd ls` ; do
1571 retry_eagain
5 map_enxio_to_eagain ceph tell osd.
$id version
1574 ceph osd
rm 0 2>&1 |
grep 'EBUSY'
1576 local old_osds
=$
(echo $
(ceph osd
ls))
1577 id
=`ceph osd create`
1579 ceph osd lost
$id --yes-i-really-mean-it
1580 expect_false ceph osd setmaxosd
$id
1581 local new_osds
=$
(echo $
(ceph osd
ls))
1582 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1587 id
=`ceph osd create $uuid`
1588 id2
=`ceph osd create $uuid`
1595 ceph osd setmaxosd
$id
1596 ceph osd getmaxosd |
grep "max_osd = $save"
1599 ceph osd create
$uuid 0 2>&1 |
grep 'EINVAL'
1600 ceph osd create
$uuid $
((max_osd
- 1)) 2>&1 |
grep 'EINVAL'
1602 id
=`ceph osd create $uuid $max_osd`
1603 [ "$id" = "$max_osd" ]
1605 max_osd
=$
((max_osd
+ 1))
1606 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1608 ceph osd create
$uuid $
((id
- 1)) 2>&1 |
grep 'EEXIST'
1609 ceph osd create
$uuid $
((id
+ 1)) 2>&1 |
grep 'EEXIST'
1610 id2
=`ceph osd create $uuid`
1612 id2
=`ceph osd create $uuid $id`
1616 local gap_start
=$max_osd
1617 id
=`ceph osd create $uuid $((gap_start + 100))`
1618 [ "$id" = "$((gap_start + 100))" ]
1620 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1622 ceph osd create
$uuid $gap_start 2>&1 |
grep 'EEXIST'
1625 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1626 # is repeated and consumes two osd id, not just one.
1628 local next_osd
=$gap_start
1629 id
=`ceph osd create $(uuidgen)`
1630 [ "$id" = "$next_osd" ]
1632 next_osd
=$
((id
+ 1))
1633 id
=`ceph osd create $(uuidgen) $next_osd`
1634 [ "$id" = "$next_osd" ]
1636 local new_osds
=$
(echo $
(ceph osd
ls))
1637 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1641 ceph osd setmaxosd
$save
1644 ceph osd pool create data
10
1645 ceph osd pool application
enable data rados
1646 ceph osd lspools |
grep data
1647 ceph osd map data foo |
grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1648 ceph osd map data foo namespace|
grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1649 ceph osd pool delete data data
--yes-i-really-really-mean-it
1652 ceph osd dump |
grep 'flags.*pauserd,pausewr'
1660 ceph osd tree destroyed
1662 ceph osd tree up out
1663 ceph osd tree down
in
1664 ceph osd tree down out
1665 ceph osd tree out down
1666 expect_false ceph osd tree up down
1667 expect_false ceph osd tree up destroyed
1668 expect_false ceph osd tree down destroyed
1669 expect_false ceph osd tree up down destroyed
1670 expect_false ceph osd tree
in out
1671 expect_false ceph osd tree up foo
1674 ceph osd count-metadata os
1680 ceph osd stat |
grep up
,
1683 function test_mon_crush
()
1686 epoch
=$
(ceph osd getcrushmap
-o $f 2>&1 |
tail -n1)
1689 nextepoch
=$
(( $epoch + 1 ))
1690 echo epoch
$epoch nextepoch
$nextepoch
1692 expect_false ceph osd setcrushmap
$nextepoch -i $f
1693 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1694 echo gotepoch
$gotepoch
1695 [ "$gotepoch" -eq "$nextepoch" ]
1696 # should be idempotent
1697 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1698 echo epoch
$gotepoch
1699 [ "$gotepoch" -eq "$nextepoch" ]
1703 function test_mon_osd_pool
()
1708 ceph osd pool create data
10
1709 ceph osd pool application
enable data rados
1710 ceph osd pool mksnap data datasnap
1711 rados
-p data lssnap |
grep datasnap
1712 ceph osd pool rmsnap data datasnap
1713 expect_false ceph osd pool rmsnap pool_fake snapshot
1714 ceph osd pool delete data data
--yes-i-really-really-mean-it
1716 ceph osd pool create data2
10
1717 ceph osd pool application
enable data2 rados
1718 ceph osd pool rename data2 data3
1719 ceph osd lspools |
grep data3
1720 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
1722 ceph osd pool create replicated
12 12 replicated
1723 ceph osd pool create replicated
12 12 replicated
1724 ceph osd pool create replicated
12 12 # default is replicated
1725 ceph osd pool create replicated
12 # default is replicated, pgp_num = pg_num
1726 ceph osd pool application
enable replicated rados
1727 # should fail because the type is not the same
1728 expect_false ceph osd pool create replicated
12 12 erasure
1729 ceph osd lspools |
grep replicated
1730 ceph osd pool create ec_test
1 1 erasure
1731 ceph osd pool application
enable ec_test rados
1733 ceph osd count-metadata osd_objectstore |
grep 'bluestore'
1734 if [ $?
-eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1735 ceph osd pool
set ec_test allow_ec_overwrites true
>& $TMPFILE
1736 check_response
"pool must only be stored on bluestore for scrubbing to work" $?
22
1738 ceph osd pool
set ec_test allow_ec_overwrites true ||
return 1
1739 expect_false ceph osd pool
set ec_test allow_ec_overwrites false
1742 ceph osd pool delete replicated replicated
--yes-i-really-really-mean-it
1743 ceph osd pool delete ec_test ec_test
--yes-i-really-really-mean-it
1746 function test_mon_osd_pool_quota
()
1749 # test osd pool set/get quota
1753 ceph osd pool create tmp-quota-pool
36
1754 ceph osd pool application
enable tmp-quota-pool rados
1756 # set erroneous quotas
1758 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness
10
1759 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes
-1
1760 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1764 ceph osd pool set-quota tmp-quota-pool max_bytes
10
1765 ceph osd pool set-quota tmp-quota-pool max_objects
10M
1767 # get quotas in json-pretty format
1769 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1770 grep '"quota_max_objects":.*10000000'
1771 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1772 grep '"quota_max_bytes":.*10'
1776 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10B'
1777 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*10M objects'
1779 # set valid quotas with unit prefix
1781 ceph osd pool set-quota tmp-quota-pool max_bytes
10K
1785 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10Ki'
1787 # set valid quotas with unit prefix
1789 ceph osd pool set-quota tmp-quota-pool max_bytes
10Ki
1793 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10Ki'
1798 ceph osd pool set-quota tmp-quota-pool max_bytes
0
1799 ceph osd pool set-quota tmp-quota-pool max_objects
0
1803 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*N/A'
1804 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*N/A'
1807 ceph osd pool delete tmp-quota-pool tmp-quota-pool
--yes-i-really-really-mean-it
1810 function test_mon_pg
()
1812 # Make sure we start healthy.
1815 ceph pg debug unfound_objects_exist
1816 ceph pg debug degraded_pgs_exist
1817 ceph pg deep-scrub
1.0
1819 ceph pg dump pgs_brief
--format=json
1820 ceph pg dump pgs
--format=json
1821 ceph pg dump pools
--format=json
1822 ceph pg dump osds
--format=json
1823 ceph pg dump
sum --format=json
1824 ceph pg dump all
--format=json
1825 ceph pg dump pgs_brief osds
--format=json
1826 ceph pg dump pools osds pgs_brief
--format=json
1828 ceph pg dump_pools_json
1829 ceph pg dump_stuck inactive
1830 ceph pg dump_stuck unclean
1831 ceph pg dump_stuck stale
1832 ceph pg dump_stuck undersized
1833 ceph pg dump_stuck degraded
1837 expect_false ceph pg
ls scrubq
1838 ceph pg
ls active stale repair recovering
1840 ceph pg
ls 1 active stale
1841 ceph pg ls-by-primary osd
.0
1842 ceph pg ls-by-primary osd
.0 1
1843 ceph pg ls-by-primary osd
.0 active
1844 ceph pg ls-by-primary osd
.0 active stale
1845 ceph pg ls-by-primary osd
.0 1 active stale
1846 ceph pg ls-by-osd osd
.0
1847 ceph pg ls-by-osd osd
.0 1
1848 ceph pg ls-by-osd osd
.0 active
1849 ceph pg ls-by-osd osd
.0 active stale
1850 ceph pg ls-by-osd osd
.0 1 active stale
1851 ceph pg ls-by-pool rbd
1852 ceph pg ls-by-pool rbd active stale
1853 # can't test this...
1854 # ceph pg force_create_pg
1855 ceph pg getmap
-o $TEMP_DIR/map.$$
1856 [ -s $TEMP_DIR/map.$$
]
1857 ceph pg map
1.0 |
grep acting
1861 ceph osd set-full-ratio
.962
1862 ceph osd dump |
grep '^full_ratio 0.962'
1863 ceph osd set-backfillfull-ratio
.912
1864 ceph osd dump |
grep '^backfillfull_ratio 0.912'
1865 ceph osd set-nearfull-ratio
.892
1866 ceph osd dump |
grep '^nearfull_ratio 0.892'
1868 # Check health status
1869 ceph osd set-nearfull-ratio
.913
1870 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
1871 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
1872 ceph osd set-nearfull-ratio
.892
1873 ceph osd set-backfillfull-ratio
.963
1874 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
1875 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
1876 ceph osd set-backfillfull-ratio
.912
1878 # Check injected full results
1879 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull nearfull
1880 wait_for_health
"OSD_NEARFULL"
1881 ceph health detail |
grep "osd.0 is near full"
1882 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1885 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull backfillfull
1886 wait_for_health
"OSD_BACKFILLFULL"
1887 ceph health detail |
grep "osd.1 is backfill full"
1888 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull none
1891 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull failsafe
1892 # failsafe and full are the same as far as the monitor is concerned
1893 wait_for_health
"OSD_FULL"
1894 ceph health detail |
grep "osd.2 is full"
1895 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull none
1898 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull full
1899 wait_for_health
"OSD_FULL"
1900 ceph health detail |
grep "osd.0 is full"
1901 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1904 ceph pg stat |
grep 'pgs:'
1909 ceph report |
grep osd_stats
1916 ceph tell osd
.0 version
1917 expect_false ceph tell osd
.9999 version
1918 expect_false ceph tell osd.foo version
1922 ceph tell osd
.0 dump_pg_recovery_stats |
grep Started
1924 ceph osd reweight
0 0.9
1925 expect_false ceph osd reweight
0 -1
1926 ceph osd reweight osd
.0 1
1928 ceph osd primary-affinity osd
.0 .9
1929 expect_false ceph osd primary-affinity osd
.0 -2
1930 expect_false ceph osd primary-affinity osd
.9999 .5
1931 ceph osd primary-affinity osd
.0 1
1933 ceph osd pool
set rbd size
2
1934 ceph osd pg-temp
1.0 0 1
1935 ceph osd pg-temp
1.0 osd
.1 osd
.0
1936 expect_false ceph osd pg-temp
1.0 0 1 2
1937 expect_false ceph osd pg-temp asdf qwer
1938 expect_false ceph osd pg-temp
1.0 asdf
1939 expect_false ceph osd pg-temp
1.0
1941 # don't test ceph osd primary-temp for now
1944 function test_mon_osd_pool_set
()
1946 TEST_POOL_GETSET
=pool_getset
1947 ceph osd pool create
$TEST_POOL_GETSET 1
1948 ceph osd pool application
enable $TEST_POOL_GETSET rados
1950 ceph osd pool get
$TEST_POOL_GETSET all
1952 for s
in pg_num pgp_num size min_size crush_rule
; do
1953 ceph osd pool get
$TEST_POOL_GETSET $s
1956 old_size
=$
(ceph osd pool get
$TEST_POOL_GETSET size |
sed -e 's/size: //')
1957 (( new_size
= old_size
+ 1 ))
1958 ceph osd pool
set $TEST_POOL_GETSET size
$new_size
1959 ceph osd pool get
$TEST_POOL_GETSET size |
grep "size: $new_size"
1960 ceph osd pool
set $TEST_POOL_GETSET size
$old_size
1962 ceph osd pool create pool_erasure
1 1 erasure
1963 ceph osd pool application
enable pool_erasure rados
1966 ceph osd pool
set pool_erasure size
4444 2>$TMPFILE
1967 check_response
'not change the size'
1969 ceph osd pool get pool_erasure erasure_code_profile
1972 ceph osd pool
set $TEST_POOL_GETSET auid
$auid
1973 ceph osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1974 ceph
--format=xml osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1975 ceph osd pool
set $TEST_POOL_GETSET auid
0
1977 for flag
in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub
; do
1978 ceph osd pool
set $TEST_POOL_GETSET $flag false
1979 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1980 ceph osd pool
set $TEST_POOL_GETSET $flag true
1981 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1982 ceph osd pool
set $TEST_POOL_GETSET $flag 1
1983 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1984 ceph osd pool
set $TEST_POOL_GETSET $flag 0
1985 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1986 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag asdf
1987 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag 2
1990 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1991 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
123456
1992 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval |
grep 'scrub_min_interval: 123456'
1993 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
0
1994 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1996 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
1997 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
123456
1998 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval |
grep 'scrub_max_interval: 123456'
1999 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
0
2000 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
2002 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
2003 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
123456
2004 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval |
grep 'deep_scrub_interval: 123456'
2005 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
0
2006 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
2008 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
2009 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
5
2010 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: 5'
2011 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
0
2012 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
2014 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
2015 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
5
2016 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority |
grep 'recovery_op_priority: 5'
2017 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
0
2018 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
2020 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
2021 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
5
2022 ceph osd pool get
$TEST_POOL_GETSET scrub_priority |
grep 'scrub_priority: 5'
2023 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
0
2024 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
2026 ceph osd pool
set $TEST_POOL_GETSET nopgchange
1
2027 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
10
2028 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
2029 ceph osd pool
set $TEST_POOL_GETSET nopgchange
0
2030 ceph osd pool
set $TEST_POOL_GETSET pg_num
10
2032 ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
2034 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
2035 new_pgs
=$
(($old_pgs + $
(ceph osd stat
--format json | jq
'.num_osds') * 32))
2036 ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
2037 ceph osd pool
set $TEST_POOL_GETSET pgp_num
$new_pgs
2039 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
2040 new_pgs
=$
(($old_pgs + $
(ceph osd stat
--format json | jq
'.num_osds') * 32 + 1))
2041 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
2043 ceph osd pool
set $TEST_POOL_GETSET nosizechange
1
2044 expect_false ceph osd pool
set $TEST_POOL_GETSET size
2
2045 expect_false ceph osd pool
set $TEST_POOL_GETSET min_size
2
2046 ceph osd pool
set $TEST_POOL_GETSET nosizechange
0
2047 ceph osd pool
set $TEST_POOL_GETSET size
2
2049 ceph osd pool
set $TEST_POOL_GETSET min_size
2
2051 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
0
2052 ceph osd pool
set $TEST_POOL_GETSET hashpspool
0 --yes-i-really-mean-it
2054 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
1
2055 ceph osd pool
set $TEST_POOL_GETSET hashpspool
1 --yes-i-really-mean-it
2057 ceph osd pool get rbd crush_rule |
grep 'crush_rule: '
2059 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2060 ceph osd pool
set $TEST_POOL_GETSET compression_mode aggressive
2061 ceph osd pool get
$TEST_POOL_GETSET compression_mode |
grep 'aggressive'
2062 ceph osd pool
set $TEST_POOL_GETSET compression_mode
unset
2063 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2065 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2066 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm zlib
2067 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm |
grep 'zlib'
2068 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm
unset
2069 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2071 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2072 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
1.1
2073 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
-.2
2074 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
.2
2075 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio |
grep '.2'
2076 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
0
2077 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2079 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2080 ceph osd pool
set $TEST_POOL_GETSET csum_type crc32c
2081 ceph osd pool get
$TEST_POOL_GETSET csum_type |
grep 'crc32c'
2082 ceph osd pool
set $TEST_POOL_GETSET csum_type
unset
2083 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2085 for size
in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block
; do
2086 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2087 ceph osd pool
set $TEST_POOL_GETSET $size 100
2088 ceph osd pool get
$TEST_POOL_GETSET $size |
grep '100'
2089 ceph osd pool
set $TEST_POOL_GETSET $size 0
2090 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2093 ceph osd pool
set $TEST_POOL_GETSET nodelete
1
2094 expect_false ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2095 ceph osd pool
set $TEST_POOL_GETSET nodelete
0
2096 ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2100 function test_mon_osd_tiered_pool_set
()
2102 # this is really a tier pool
2103 ceph osd pool create real-tier
2
2104 ceph osd tier add rbd real-tier
2106 ceph osd pool
set real-tier hit_set_type explicit_hash
2107 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_hash"
2108 ceph osd pool
set real-tier hit_set_type explicit_object
2109 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_object"
2110 ceph osd pool
set real-tier hit_set_type bloom
2111 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: bloom"
2112 expect_false ceph osd pool
set real-tier hit_set_type i_dont_exist
2113 ceph osd pool
set real-tier hit_set_period
123
2114 ceph osd pool get real-tier hit_set_period |
grep "hit_set_period: 123"
2115 ceph osd pool
set real-tier hit_set_count
12
2116 ceph osd pool get real-tier hit_set_count |
grep "hit_set_count: 12"
2117 ceph osd pool
set real-tier hit_set_fpp
.01
2118 ceph osd pool get real-tier hit_set_fpp |
grep "hit_set_fpp: 0.01"
2120 ceph osd pool
set real-tier target_max_objects
123
2121 ceph osd pool get real-tier target_max_objects | \
2122 grep 'target_max_objects:[ \t]\+123'
2123 ceph osd pool
set real-tier target_max_bytes
123456
2124 ceph osd pool get real-tier target_max_bytes | \
2125 grep 'target_max_bytes:[ \t]\+123456'
2126 ceph osd pool
set real-tier cache_target_dirty_ratio
.123
2127 ceph osd pool get real-tier cache_target_dirty_ratio | \
2128 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2129 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
-.2
2130 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
1.1
2131 ceph osd pool
set real-tier cache_target_dirty_high_ratio
.123
2132 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2133 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2134 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
-.2
2135 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
1.1
2136 ceph osd pool
set real-tier cache_target_full_ratio
.123
2137 ceph osd pool get real-tier cache_target_full_ratio | \
2138 grep 'cache_target_full_ratio:[ \t]\+0.123'
2139 ceph osd dump
-f json-pretty |
grep '"cache_target_full_ratio_micro": 123000'
2140 ceph osd pool
set real-tier cache_target_full_ratio
1.0
2141 ceph osd pool
set real-tier cache_target_full_ratio
0
2142 expect_false ceph osd pool
set real-tier cache_target_full_ratio
1.1
2143 ceph osd pool
set real-tier cache_min_flush_age
123
2144 ceph osd pool get real-tier cache_min_flush_age | \
2145 grep 'cache_min_flush_age:[ \t]\+123'
2146 ceph osd pool
set real-tier cache_min_evict_age
234
2147 ceph osd pool get real-tier cache_min_evict_age | \
2148 grep 'cache_min_evict_age:[ \t]\+234'
2150 # this is not a tier pool
2151 ceph osd pool create fake-tier
2
2152 ceph osd pool application
enable fake-tier rados
2155 expect_false ceph osd pool
set fake-tier hit_set_type explicit_hash
2156 expect_false ceph osd pool get fake-tier hit_set_type
2157 expect_false ceph osd pool
set fake-tier hit_set_type explicit_object
2158 expect_false ceph osd pool get fake-tier hit_set_type
2159 expect_false ceph osd pool
set fake-tier hit_set_type bloom
2160 expect_false ceph osd pool get fake-tier hit_set_type
2161 expect_false ceph osd pool
set fake-tier hit_set_type i_dont_exist
2162 expect_false ceph osd pool
set fake-tier hit_set_period
123
2163 expect_false ceph osd pool get fake-tier hit_set_period
2164 expect_false ceph osd pool
set fake-tier hit_set_count
12
2165 expect_false ceph osd pool get fake-tier hit_set_count
2166 expect_false ceph osd pool
set fake-tier hit_set_fpp
.01
2167 expect_false ceph osd pool get fake-tier hit_set_fpp
2169 expect_false ceph osd pool
set fake-tier target_max_objects
123
2170 expect_false ceph osd pool get fake-tier target_max_objects
2171 expect_false ceph osd pool
set fake-tier target_max_bytes
123456
2172 expect_false ceph osd pool get fake-tier target_max_bytes
2173 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
.123
2174 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2175 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
-.2
2176 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
1.1
2177 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
.123
2178 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2179 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
-.2
2180 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
1.1
2181 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
.123
2182 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2183 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.0
2184 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
0
2185 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.1
2186 expect_false ceph osd pool
set fake-tier cache_min_flush_age
123
2187 expect_false ceph osd pool get fake-tier cache_min_flush_age
2188 expect_false ceph osd pool
set fake-tier cache_min_evict_age
234
2189 expect_false ceph osd pool get fake-tier cache_min_evict_age
2191 ceph osd tier remove rbd real-tier
2192 ceph osd pool delete real-tier real-tier
--yes-i-really-really-mean-it
2193 ceph osd pool delete fake-tier fake-tier
--yes-i-really-really-mean-it
2196 function test_mon_osd_erasure_code
()
2199 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2200 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2201 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2202 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
--force
2203 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2204 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f g
=h
2205 # ruleset-foo will work for luminous only
2206 ceph osd erasure-code-profile
set barprofile ruleset-failure-domain
=host
2207 ceph osd erasure-code-profile
set barprofile crush-failure-domain
=host
2209 ceph osd erasure-code-profile
rm fooprofile
2210 ceph osd erasure-code-profile
rm barprofile
2213 function test_mon_osd_misc
()
2217 # expect error about missing 'pool' argument
2218 ceph osd map
2>$TMPFILE; check_response
'pool' $?
22
2220 # expect error about unused argument foo
2221 ceph osd
ls foo
2>$TMPFILE; check_response
'unused' $?
22
2223 # expect "not in range" for invalid full ratio
2224 ceph pg set_full_ratio
95 2>$TMPFILE; check_response
'not in range' $?
22
2226 # expect "not in range" for invalid overload percentage
2227 ceph osd reweight-by-utilization
80 2>$TMPFILE; check_response
'higher than 100' $?
22
2231 ceph osd reweight-by-utilization
110
2232 ceph osd reweight-by-utilization
110 .5
2233 expect_false ceph osd reweight-by-utilization
110 0
2234 expect_false ceph osd reweight-by-utilization
110 -0.1
2235 ceph osd test-reweight-by-utilization
110 .5 --no-increasing
2236 ceph osd test-reweight-by-utilization
110 .5 4 --no-increasing
2237 expect_false ceph osd test-reweight-by-utilization
110 .5 0 --no-increasing
2238 expect_false ceph osd test-reweight-by-utilization
110 .5 -10 --no-increasing
2239 ceph osd reweight-by-pg
110
2240 ceph osd test-reweight-by-pg
110 .5
2241 ceph osd reweight-by-pg
110 rbd
2242 ceph osd reweight-by-pg
110 .5 rbd
2243 expect_false ceph osd reweight-by-pg
110 boguspoolasdfasdfasdf
2246 function test_mon_heap_profiler
()
2250 # expect 'heap' commands to be correctly parsed
2251 ceph heap stats
2>$TMPFILE
2252 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2253 echo "tcmalloc not enabled; skip heap profiler test"
2258 [[ $do_test -eq 0 ]] && return 0
2260 ceph heap start_profiler
2262 ceph heap stop_profiler
2266 function test_admin_heap_profiler
()
2270 # expect 'heap' commands to be correctly parsed
2271 ceph heap stats
2>$TMPFILE
2272 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2273 echo "tcmalloc not enabled; skip heap profiler test"
2278 [[ $do_test -eq 0 ]] && return 0
2280 local admin_socket
=$
(get_admin_socket osd
.0)
2282 $SUDO ceph
--admin-daemon $admin_socket heap start_profiler
2283 $SUDO ceph
--admin-daemon $admin_socket heap dump
2284 $SUDO ceph
--admin-daemon $admin_socket heap stop_profiler
2285 $SUDO ceph
--admin-daemon $admin_socket heap release
2288 function test_osd_bench
()
2290 # test osd bench limits
2291 # As we should not rely on defaults (as they may change over time),
2292 # lets inject some values and perform some simple tests
2293 # max iops: 10 # 100 IOPS
2294 # max throughput: 10485760 # 10MB/s
2295 # max block size: 2097152 # 2MB
2296 # duration: 10 # 10 seconds
2299 --osd-bench-duration 10 \
2300 --osd-bench-max-block-size 2097152 \
2301 --osd-bench-large-size-max-throughput 10485760 \
2302 --osd-bench-small-size-max-iops 10"
2303 ceph tell osd
.0 injectargs
${args## }
2305 # anything with a bs larger than 2097152 must fail
2306 expect_false ceph tell osd
.0 bench
1 2097153
2307 # but using 'osd_bench_max_bs' must succeed
2308 ceph tell osd
.0 bench
1 2097152
2310 # we assume 1MB as a large bs; anything lower is a small bs
2311 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2312 # max count: 409600 (bytes)
2314 # more than max count must not be allowed
2315 expect_false ceph tell osd
.0 bench
409601 4096
2316 # but 409600 must be succeed
2317 ceph tell osd
.0 bench
409600 4096
2319 # for a large bs, we are limited by throughput.
2320 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2321 # the max count will be (10MB * 10s) = 100MB
2322 # max count: 104857600 (bytes)
2324 # more than max count must not be allowed
2325 expect_false ceph tell osd
.0 bench
104857601 2097152
2326 # up to max count must be allowed
2327 ceph tell osd
.0 bench
104857600 2097152
2330 function test_osd_negative_filestore_merge_threshold
()
2332 $SUDO ceph daemon osd
.0 config
set filestore_merge_threshold
-1
2333 expect_config_value
"osd.0" "filestore_merge_threshold" -1
2336 function test_mon_tell
()
2338 ceph tell mon.a version
2339 ceph tell mon.b version
2340 expect_false ceph tell mon.foo version
2344 ceph_watch_start debug audit
2345 ceph tell mon.a version
2346 ceph_watch_wait
'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2348 ceph_watch_start debug audit
2349 ceph tell mon.b version
2350 ceph_watch_wait
'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2353 function test_mon_ping
()
2357 expect_false ceph
ping mon.foo
2362 function test_mon_deprecated_commands
()
2364 # current DEPRECATED commands are:
2369 # Testing should be accomplished by setting
2370 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2371 # each one of these commands.
2373 ceph tell mon.a injectargs
'--mon-debug-deprecated-as-obsolete'
2374 expect_false ceph tell mon.a compact
2> $TMPFILE
2375 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2377 expect_false ceph tell mon.a scrub
2> $TMPFILE
2378 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2380 expect_false ceph tell mon.a sync force
2> $TMPFILE
2381 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2383 ceph tell mon.a injectargs
'--no-mon-debug-deprecated-as-obsolete'
2386 function test_mon_cephdf_commands
()
2390 # RAW USED The near raw used per pool in raw total
2392 ceph osd pool create cephdf_for_test
32 32 replicated
2393 ceph osd pool application
enable cephdf_for_test rados
2394 ceph osd pool
set cephdf_for_test size
2
2396 dd if=/dev
/zero of
=.
/cephdf_for_test bs
=4k count
=1
2397 rados put cephdf_for_test cephdf_for_test
-p cephdf_for_test
2400 for i
in `seq 1 10`; do
2401 rados
-p cephdf_for_test
ls - |
grep -q cephdf_for_test
&& break
2404 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2405 # to sync mon with osd
2407 local jq_filter
='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2408 cal_raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2409 raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
2411 ceph osd pool delete cephdf_for_test cephdf_for_test
--yes-i-really-really-mean-it
2412 rm .
/cephdf_for_test
2414 expect_false
test $cal_raw_used_size != $raw_used_size
2417 function test_mon_pool_application
()
2419 ceph osd pool create app_for_test
10
2421 ceph osd pool application
enable app_for_test rbd
2422 expect_false ceph osd pool application
enable app_for_test rgw
2423 ceph osd pool application
enable app_for_test rgw
--yes-i-really-mean-it
2424 ceph osd pool
ls detail |
grep "application rbd,rgw"
2425 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2427 expect_false ceph osd pool application
set app_for_test cephfs key value
2428 ceph osd pool application
set app_for_test rbd key1 value1
2429 ceph osd pool application
set app_for_test rbd key2 value2
2430 ceph osd pool application
set app_for_test rgw key1 value1
2431 ceph osd pool application get app_for_test rbd key1 |
grep 'value1'
2432 ceph osd pool application get app_for_test rbd key2 |
grep 'value2'
2433 ceph osd pool application get app_for_test rgw key1 |
grep 'value1'
2435 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2437 ceph osd pool application
rm app_for_test rgw key1
2438 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2439 ceph osd pool application
rm app_for_test rbd key2
2440 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2441 ceph osd pool application
rm app_for_test rbd key1
2442 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2443 ceph osd pool application
rm app_for_test rbd key1
# should be idempotent
2445 expect_false ceph osd pool application disable app_for_test rgw
2446 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2447 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it # should be idempotent
2448 ceph osd pool
ls detail |
grep "application rbd"
2449 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{}}'
2451 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2452 ceph osd pool
ls detail |
grep -v "application "
2453 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{}'
2455 ceph osd pool
rm app_for_test app_for_test
--yes-i-really-really-mean-it
2458 function test_mon_tell_help_command
()
2460 ceph tell mon.a
help
2463 expect_false ceph tell mon.zzz
help
2466 function test_mon_stdin_stdout
()
2468 echo foo | ceph config-key
set test_key
-i -
2469 ceph config-key get test_key
-o - |
grep -c foo |
grep -q 1
2472 function test_osd_tell_help_command
()
2474 ceph tell osd
.1 help
2475 expect_false ceph tell osd
.100 help
2478 function test_osd_compact
()
2480 ceph tell osd
.1 compact
2481 $SUDO ceph daemon osd
.1 compact
2484 function test_mds_tell_help_command
()
2486 local FS_NAME
=cephfs
2487 if ! mds_exists
; then
2488 echo "Skipping test, no MDS found"
2493 ceph osd pool create fs_data
10
2494 ceph osd pool create fs_metadata
10
2495 ceph fs new
$FS_NAME fs_metadata fs_data
2496 wait_mds_active
$FS_NAME
2499 ceph tell mds.a
help
2500 expect_false ceph tell mds.z
help
2503 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
2504 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
2507 function test_mgr_tell
()
2510 #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
2511 ceph tell mgr osd status
2515 # New tests should be added to the TESTS array below
2517 # Individual tests may be run using the '-t <testname>' argument
2518 # The user can specify '-t <testname>' as many times as she wants
2520 # Tests will be run in order presented in the TESTS array, or in
2521 # the order specified by the '-t <testname>' options.
2523 # '-l' will list all the available test names
2524 # '-h' will show usage
2526 # The test maintains backward compatibility: not specifying arguments
2527 # will run all tests following the order they appear in the TESTS array.
2531 MON_TESTS
+=" mon_injectargs"
2532 MON_TESTS
+=" mon_injectargs_SI"
2533 for i
in `seq 9`; do
2534 MON_TESTS
+=" tiering_$i";
2537 MON_TESTS
+=" auth_profiles"
2538 MON_TESTS
+=" mon_misc"
2539 MON_TESTS
+=" mon_mon"
2540 MON_TESTS
+=" mon_osd"
2541 MON_TESTS
+=" mon_config_key"
2542 MON_TESTS
+=" mon_crush"
2543 MON_TESTS
+=" mon_osd_create_destroy"
2544 MON_TESTS
+=" mon_osd_pool"
2545 MON_TESTS
+=" mon_osd_pool_quota"
2546 MON_TESTS
+=" mon_pg"
2547 MON_TESTS
+=" mon_osd_pool_set"
2548 MON_TESTS
+=" mon_osd_tiered_pool_set"
2549 MON_TESTS
+=" mon_osd_erasure_code"
2550 MON_TESTS
+=" mon_osd_misc"
2551 MON_TESTS
+=" mon_heap_profiler"
2552 MON_TESTS
+=" mon_tell"
2553 MON_TESTS
+=" mon_ping"
2554 MON_TESTS
+=" mon_deprecated_commands"
2555 MON_TESTS
+=" mon_caps"
2556 MON_TESTS
+=" mon_cephdf_commands"
2557 MON_TESTS
+=" mon_tell_help_command"
2558 MON_TESTS
+=" mon_stdin_stdout"
2560 OSD_TESTS
+=" osd_bench"
2561 OSD_TESTS
+=" osd_negative_filestore_merge_threshold"
2562 OSD_TESTS
+=" tiering_agent"
2563 OSD_TESTS
+=" admin_heap_profiler"
2564 OSD_TESTS
+=" osd_tell_help_command"
2565 OSD_TESTS
+=" osd_compact"
2567 MDS_TESTS
+=" mds_tell"
2568 MDS_TESTS
+=" mon_mds"
2569 MDS_TESTS
+=" mon_mds_metadata"
2570 MDS_TESTS
+=" mds_tell_help_command"
2572 MGR_TESTS
+=" mgr_tell"
2583 function list_tests
()
2585 echo "AVAILABLE TESTS"
2593 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2600 while [[ $# -gt 0 ]]; do
2607 "--asok-does-not-need-root" )
2610 "--no-sanity-check" )
2614 tests_to_run
+="$MON_TESTS"
2617 tests_to_run
+="$OSD_TESTS"
2620 tests_to_run
+="$MDS_TESTS"
2623 tests_to_run
+="$MGR_TESTS"
2627 if [[ -z "$1" ]]; then
2628 echo "missing argument to '-t'"
2642 if [[ $do_list -eq 1 ]]; then
2647 ceph osd pool create rbd
10
2649 if test -z "$tests_to_run" ; then
2650 tests_to_run
="$TESTS"
2653 if $sanity_check ; then
2656 for i
in $tests_to_run; do
2657 if $sanity_check ; then
2664 if $sanity_check ; then