2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
5 source $
(dirname $0)/..
/ceph-helpers.sh
9 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
13 function get_admin_socket
()
17 if test -n "$CEPH_OUT_DIR";
19 echo $CEPH_OUT_DIR/$client.asok
21 local cluster
=$
(echo $CEPH_ARGS |
sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
26 function check_no_osd_down
()
28 ! ceph osd dump |
grep ' down '
31 function wait_no_osd_down
()
34 for i
in $
(seq 1 $max_run) ; do
35 if ! check_no_osd_down
; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
45 function expect_false
()
48 if "$@"; then return 1; else return 0; fi
52 TEMP_DIR
=$
(mktemp
-d ${TMPDIR-/tmp}/cephtool.XXX
)
53 trap "rm -fr $TEMP_DIR" 0
55 TMPFILE
=$
(mktemp
$TEMP_DIR/test_invalid.XXX
)
58 # retry_eagain max cmd args ...
60 # retry cmd args ... if it exits on error and its output contains the
61 # string EAGAIN, at most $max times
63 function retry_eagain
()
68 local tmpfile
=$TEMP_DIR/retry_eagain.$$
70 for count
in $
(seq 1 $max) ; do
72 "$@" > $tmpfile 2>&1 || status
=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN
$tmpfile ; then
79 if test $count = $max ; then
80 echo retried with non zero
exit status
, $max times: "$@" >&2
88 # map_enxio_to_eagain cmd arg ...
90 # add EAGAIN to the output of cmd arg ... if the output contains
93 function map_enxio_to_eagain
()
96 local tmpfile
=$TEMP_DIR/map_enxio_to_eagain.$$
98 "$@" > $tmpfile 2>&1 || status
=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO
$tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
108 function check_response
()
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
125 function get_config_value_or_die
()
127 local target config_opt raw val
132 raw
="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $?
-ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
138 raw
=`echo $raw | sed -e 's/[{} "]//g'`
139 val
=`echo $raw | cut -f2 -d:`
145 function expect_config_value
()
147 local target config_opt expected_val val
152 val
=$
(get_config_value_or_die
$target $config_opt)
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
160 function ceph_watch_start
()
162 local whatch_opt
=--watch
165 whatch_opt
=--watch-$1
168 CEPH_WATCH_FILE
=${TEMP_DIR}/CEPH_WATCH_$$
169 ceph
$whatch_opt > $CEPH_WATCH_FILE &
172 # wait until the "ceph" client is connected and receiving
173 # log messages from monitor
175 grep -q "cluster" $CEPH_WATCH_FILE && break
180 function ceph_watch_wait
()
189 for i
in `seq ${timeout}`; do
190 grep -q "$regexp" $CEPH_WATCH_FILE && break
196 if ! grep "$regexp" $CEPH_WATCH_FILE; then
197 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
198 cat $CEPH_WATCH_FILE >&2
203 function test_mon_injectargs
()
205 CEPH_ARGS
='--mon_debug_dump_location the.dump' ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
206 check_response
"osd_enable_op_tracker = 'false'"
207 ! grep "the.dump" $TMPFILE ||
return 1
208 ceph tell osd
.0 injectargs
'--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE ||
return 1
209 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
210 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
211 check_response
"osd_enable_op_tracker = 'false'"
212 ceph tell osd
.0 injectargs
-- --osd_enable_op_tracker >& $TMPFILE ||
return 1
213 check_response
"osd_enable_op_tracker = 'true'"
214 ceph tell osd
.0 injectargs
-- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE ||
return 1
215 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
216 expect_failure
$TEMP_DIR "Option --osd_op_history_duration requires an argument" \
217 ceph tell osd
.0 injectargs
-- '--osd_op_history_duration'
219 ceph tell osd
.0 injectargs
-- '--mon-lease 6' >& $TMPFILE ||
return 1
220 check_response
"mon_lease = '6' (not observed, change may require restart)"
222 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
223 expect_false ceph tell osd
.0 injectargs
--osd-scrub-auto-repair-num-errors -1
226 function test_mon_injectargs_SI
()
228 # Test SI units during injectargs and 'config set'
229 # We only aim at testing the units are parsed accordingly
230 # and don't intend to test whether the options being set
231 # actually expect SI units to be passed.
232 # Keep in mind that all integer based options (i.e., INT,
233 # LONG, U32, U64) will accept SI unit modifiers.
234 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_pg_warn_min_objects")
235 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10
236 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
237 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10K
238 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10240
239 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
1G
240 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1073741824
241 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10F
> $TMPFILE || true
242 check_response
"'10F': (22) Invalid argument"
243 # now test with injectargs
244 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10'
245 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
246 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10K'
247 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10240
248 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 1G'
249 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1073741824
250 expect_false ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10F'
251 expect_false ceph tell mon.a injectargs
'--mon_globalid_prealloc -1'
252 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
$initial_value
255 function test_tiering_agent
()
257 local slow
=slow_eviction
258 local fast
=fast_eviction
259 ceph osd pool create
$slow 1 1
260 ceph osd pool create
$fast 1 1
261 ceph osd tier add
$slow $fast
262 ceph osd tier cache-mode
$fast writeback
263 ceph osd tier set-overlay
$slow $fast
264 ceph osd pool
set $fast hit_set_type bloom
265 rados
-p $slow put obj1
/etc
/group
266 ceph osd pool
set $fast target_max_objects
1
267 ceph osd pool
set $fast hit_set_count
1
268 ceph osd pool
set $fast hit_set_period
5
269 # wait for the object to be evicted from the cache
272 for i
in `seq 1 300` ; do
273 if ! rados
-p $fast ls |
grep obj1
; then
280 # the object is proxy read and promoted to the cache
281 rados
-p $slow get obj1
- >/dev
/null
282 # wait for the promoted object to be evicted again
284 for i
in `seq 1 300` ; do
285 if ! rados
-p $fast ls |
grep obj1
; then
292 ceph osd tier remove-overlay
$slow
293 ceph osd tier remove
$slow $fast
294 ceph osd pool delete
$fast $fast --yes-i-really-really-mean-it
295 ceph osd pool delete
$slow $slow --yes-i-really-really-mean-it
298 function test_tiering_1
()
301 ceph osd pool create slow
2
302 ceph osd pool create slow2
2
303 ceph osd pool create cache
2
304 ceph osd pool create cache2
2
305 ceph osd tier add slow cache
306 ceph osd tier add slow cache2
307 expect_false ceph osd tier add slow2 cache
308 # test some state transitions
309 ceph osd tier cache-mode cache writeback
310 expect_false ceph osd tier cache-mode cache forward
311 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
312 expect_false ceph osd tier cache-mode cache
readonly
313 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
314 expect_false ceph osd tier cache-mode cache forward
315 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
316 ceph osd tier cache-mode cache none
317 ceph osd tier cache-mode cache writeback
318 ceph osd tier cache-mode cache proxy
319 ceph osd tier cache-mode cache writeback
320 expect_false ceph osd tier cache-mode cache none
321 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
322 # test with dirty objects in the tier pool
323 # tier pool currently set to 'writeback'
324 rados
-p cache put
/etc
/passwd
/etc
/passwd
326 # 1 dirty object in pool 'cache'
327 ceph osd tier cache-mode cache proxy
328 expect_false ceph osd tier cache-mode cache none
329 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
330 ceph osd tier cache-mode cache writeback
331 # remove object from tier pool
332 rados
-p cache
rm /etc
/passwd
333 rados
-p cache cache-flush-evict-all
335 # no dirty objects in pool 'cache'
336 ceph osd tier cache-mode cache proxy
337 ceph osd tier cache-mode cache none
338 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
340 while ! ceph osd pool
set cache pg_num
3 --yes-i-really-mean-it 2>$TMPFILE
342 grep 'currently creating pgs' $TMPFILE
343 TRIES
=$
(( $TRIES + 1 ))
347 expect_false ceph osd pool
set cache pg_num
4
348 ceph osd tier cache-mode cache none
349 ceph osd tier set-overlay slow cache
350 expect_false ceph osd tier set-overlay slow cache2
351 expect_false ceph osd tier remove slow cache
352 ceph osd tier remove-overlay slow
353 ceph osd tier set-overlay slow cache2
354 ceph osd tier remove-overlay slow
355 ceph osd tier remove slow cache
356 ceph osd tier add slow2 cache
357 expect_false ceph osd tier set-overlay slow cache
358 ceph osd tier set-overlay slow2 cache
359 ceph osd tier remove-overlay slow2
360 ceph osd tier remove slow2 cache
361 ceph osd tier remove slow cache2
363 # make sure a non-empty pool fails
364 rados
-p cache2 put
/etc
/passwd
/etc
/passwd
365 while ! ceph df |
grep cache2 |
grep ' 1 ' ; do
366 echo waiting
for pg stats to flush
369 expect_false ceph osd tier add slow cache2
370 ceph osd tier add slow cache2
--force-nonempty
371 ceph osd tier remove slow cache2
373 ceph osd pool
ls |
grep cache2
374 ceph osd pool
ls -f json-pretty |
grep cache2
375 ceph osd pool
ls detail |
grep cache2
376 ceph osd pool
ls detail
-f json-pretty |
grep cache2
378 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
379 ceph osd pool delete slow2 slow2
--yes-i-really-really-mean-it
380 ceph osd pool delete cache cache
--yes-i-really-really-mean-it
381 ceph osd pool delete cache2 cache2
--yes-i-really-really-mean-it
384 function test_tiering_2
()
386 # make sure we can't clobber snapshot state
387 ceph osd pool create snap_base
2
388 ceph osd pool create snap_cache
2
389 ceph osd pool mksnap snap_cache snapname
390 expect_false ceph osd tier add snap_base snap_cache
391 ceph osd pool delete snap_base snap_base
--yes-i-really-really-mean-it
392 ceph osd pool delete snap_cache snap_cache
--yes-i-really-really-mean-it
395 function test_tiering_3
()
397 # make sure we can't create snapshot on tier
398 ceph osd pool create basex
2
399 ceph osd pool create cachex
2
400 ceph osd tier add basex cachex
401 expect_false ceph osd pool mksnap cache snapname
402 ceph osd tier remove basex cachex
403 ceph osd pool delete basex basex
--yes-i-really-really-mean-it
404 ceph osd pool delete cachex cachex
--yes-i-really-really-mean-it
407 function test_tiering_4
()
409 # make sure we can't create an ec pool tier
410 ceph osd pool create eccache
2 2 erasure
411 expect_false ceph osd set-require-min-compat-client bobtail
412 ceph osd pool create repbase
2
413 expect_false ceph osd tier add repbase eccache
414 ceph osd pool delete repbase repbase
--yes-i-really-really-mean-it
415 ceph osd pool delete eccache eccache
--yes-i-really-really-mean-it
418 function test_tiering_5
()
420 # convenient add-cache command
421 ceph osd pool create slow
2
422 ceph osd pool create cache3
2
423 ceph osd tier add-cache slow cache3
1024000
424 ceph osd dump |
grep cache3 |
grep bloom |
grep 'false_positive_probability: 0.05' |
grep 'target_bytes 1024000' |
grep '1200s x4'
425 ceph osd tier remove slow cache3
2> $TMPFILE || true
426 check_response
"EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
427 ceph osd tier remove-overlay slow
428 ceph osd tier remove slow cache3
429 ceph osd pool
ls |
grep cache3
430 ceph osd pool delete cache3 cache3
--yes-i-really-really-mean-it
431 ! ceph osd pool
ls |
grep cache3 ||
exit 1
432 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
435 function test_tiering_6
()
437 # check add-cache whether work
438 ceph osd pool create datapool
2
439 ceph osd pool create cachepool
2
440 ceph osd tier add-cache datapool cachepool
1024000
441 ceph osd tier cache-mode cachepool writeback
442 rados
-p datapool put object
/etc
/passwd
443 rados
-p cachepool stat object
444 rados
-p cachepool cache-flush object
445 rados
-p datapool stat object
446 ceph osd tier remove-overlay datapool
447 ceph osd tier remove datapool cachepool
448 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
449 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
452 function test_tiering_7
()
454 # protection against pool removal when used as tiers
455 ceph osd pool create datapool
2
456 ceph osd pool create cachepool
2
457 ceph osd tier add-cache datapool cachepool
1024000
458 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it 2> $TMPFILE || true
459 check_response
"EBUSY: pool 'cachepool' is a tier of 'datapool'"
460 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it 2> $TMPFILE || true
461 check_response
"EBUSY: pool 'datapool' has tiers cachepool"
462 ceph osd tier remove-overlay datapool
463 ceph osd tier remove datapool cachepool
464 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
465 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
468 function test_tiering_8
()
470 ## check health check
471 ceph osd
set notieragent
472 ceph osd pool create datapool
2
473 ceph osd pool create cache4
2
474 ceph osd tier add-cache datapool cache4
1024000
475 ceph osd tier cache-mode cache4 writeback
476 tmpfile
=$
(mktemp|
grep tmp
)
477 dd if=/dev
/zero of
=$tmpfile bs
=4K count
=1
478 ceph osd pool
set cache4 target_max_objects
200
479 ceph osd pool
set cache4 target_max_bytes
1000000
480 rados
-p cache4 put foo1
$tmpfile
481 rados
-p cache4 put foo2
$tmpfile
484 ceph df |
grep datapool |
grep ' 2 '
485 ceph osd tier remove-overlay datapool
486 ceph osd tier remove datapool cache4
487 ceph osd pool delete cache4 cache4
--yes-i-really-really-mean-it
488 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
489 ceph osd
unset notieragent
492 function test_tiering_9
()
494 # make sure 'tier remove' behaves as we expect
495 # i.e., removing a tier from a pool that's not its base pool only
496 # results in a 'pool foo is now (or already was) not a tier of bar'
498 ceph osd pool create basepoolA
2
499 ceph osd pool create basepoolB
2
500 poolA_id
=$
(ceph osd dump |
grep 'pool.*basepoolA' |
awk '{print $2;}')
501 poolB_id
=$
(ceph osd dump |
grep 'pool.*basepoolB' |
awk '{print $2;}')
503 ceph osd pool create cache5
2
504 ceph osd pool create cache6
2
505 ceph osd tier add basepoolA cache5
506 ceph osd tier add basepoolB cache6
507 ceph osd tier remove basepoolB cache5
2>&1 |
grep 'not a tier of'
508 ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of[ \t]\+$poolA_id"
509 ceph osd tier remove basepoolA cache6
2>&1 |
grep 'not a tier of'
510 ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of[ \t]\+$poolB_id"
512 ceph osd tier remove basepoolA cache5
2>&1 |
grep 'not a tier of'
513 ! ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of" ||
exit 1
514 ceph osd tier remove basepoolB cache6
2>&1 |
grep 'not a tier of'
515 ! ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of" ||
exit 1
517 ! ceph osd dump |
grep "pool.*'basepoolA'" 2>&1 |
grep "tiers" ||
exit 1
518 ! ceph osd dump |
grep "pool.*'basepoolB'" 2>&1 |
grep "tiers" ||
exit 1
520 ceph osd pool delete cache6 cache6
--yes-i-really-really-mean-it
521 ceph osd pool delete cache5 cache5
--yes-i-really-really-mean-it
522 ceph osd pool delete basepoolB basepoolB
--yes-i-really-really-mean-it
523 ceph osd pool delete basepoolA basepoolA
--yes-i-really-really-mean-it
528 ceph auth add client.xx mon allow osd
"allow *"
529 ceph auth
export client.xx
>client.xx.keyring
530 ceph auth add client.xx
-i client.xx.keyring
531 rm -f client.xx.keyring
532 ceph auth list |
grep client.xx
533 ceph auth get client.xx |
grep caps |
grep mon
534 ceph auth get client.xx |
grep caps |
grep osd
535 ceph auth get-key client.xx
536 ceph auth print-key client.xx
537 ceph auth print_key client.xx
538 ceph auth caps client.xx osd
"allow rw"
539 expect_false sh
<<< "ceph auth get client.xx | grep caps | grep mon"
540 ceph auth get client.xx |
grep osd |
grep "allow rw"
541 ceph auth
export |
grep client.xx
542 ceph auth
export -o authfile
543 ceph auth import
-i authfile
544 ceph auth
export -o authfile2
545 diff authfile authfile2
546 rm authfile authfile2
547 ceph auth del client.xx
548 expect_false ceph auth get client.xx
550 # (almost) interactive mode
551 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
552 ceph auth get client.xx
554 echo 'auth del client.xx' | ceph
555 expect_false ceph auth get client.xx
561 ceph-authtool
--create-keyring --name client.TEST
--gen-key --set-uid $auid TEST-keyring
562 expect_false ceph auth import
--in-file TEST-keyring
564 ceph-authtool
--create-keyring --name client.TEST
--gen-key --cap mon
"allow r" --set-uid $auid TEST-keyring
565 ceph auth import
--in-file TEST-keyring
567 ceph auth get client.TEST
> $TMPFILE
568 check_response
"auid = $auid"
569 ceph
--format json-pretty auth get client.TEST
> $TMPFILE
570 check_response
'"auid": '$auid
571 ceph auth list
> $TMPFILE
572 check_response
"auid: $auid"
573 ceph
--format json-pretty auth list
> $TMPFILE
574 check_response
'"auid": '$auid
575 ceph auth del client.TEST
578 function test_auth_profiles
()
580 ceph auth add client.xx-profile-ro mon
'allow profile read-only' \
581 mgr
'allow profile read-only'
582 ceph auth add client.xx-profile-rw mon
'allow profile read-write' \
583 mgr
'allow profile read-write'
584 ceph auth add client.xx-profile-rd mon
'allow profile role-definer'
586 ceph auth
export > client.xx.keyring
588 # read-only is allowed all read-only commands (auth excluded)
589 ceph
-n client.xx-profile-ro
-k client.xx.keyring status
590 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd dump
591 ceph
-n client.xx-profile-ro
-k client.xx.keyring pg dump
592 ceph
-n client.xx-profile-ro
-k client.xx.keyring mon dump
593 ceph
-n client.xx-profile-ro
-k client.xx.keyring mds dump
594 # read-only gets access denied for rw commands or auth commands
595 ceph
-n client.xx-profile-ro
-k client.xx.keyring log foo
>& $TMPFILE || true
596 check_response
"EACCES: access denied"
597 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
598 check_response
"EACCES: access denied"
599 ceph
-n client.xx-profile-ro
-k client.xx.keyring auth list
>& $TMPFILE || true
600 check_response
"EACCES: access denied"
602 # read-write is allowed for all read-write commands (except auth)
603 ceph
-n client.xx-profile-rw
-k client.xx.keyring status
604 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd dump
605 ceph
-n client.xx-profile-rw
-k client.xx.keyring pg dump
606 ceph
-n client.xx-profile-rw
-k client.xx.keyring mon dump
607 ceph
-n client.xx-profile-rw
-k client.xx.keyring mds dump
608 ceph
-n client.xx-profile-rw
-k client.xx.keyring log foo
609 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
set noout
610 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
unset noout
611 # read-write gets access denied for auth commands
612 ceph
-n client.xx-profile-rw
-k client.xx.keyring auth list
>& $TMPFILE || true
613 check_response
"EACCES: access denied"
615 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
616 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth list
617 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
export
618 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth add client.xx-profile-foo
619 ceph
-n client.xx-profile-rd
-k client.xx.keyring status
620 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd dump
>& $TMPFILE || true
621 check_response
"EACCES: access denied"
622 ceph
-n client.xx-profile-rd
-k client.xx.keyring pg dump
>& $TMPFILE || true
623 check_response
"EACCES: access denied"
624 # read-only 'mon' subsystem commands are allowed
625 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon dump
626 # but read-write 'mon' commands are not
627 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon add foo
1.1.1.1 >& $TMPFILE || true
628 check_response
"EACCES: access denied"
629 ceph
-n client.xx-profile-rd
-k client.xx.keyring mds dump
>& $TMPFILE || true
630 check_response
"EACCES: access denied"
631 ceph
-n client.xx-profile-rd
-k client.xx.keyring log foo
>& $TMPFILE || true
632 check_response
"EACCES: access denied"
633 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
634 check_response
"EACCES: access denied"
636 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-ro
637 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-rw
639 # add a new role-definer with the existing role-definer
640 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
641 auth add client.xx-profile-rd2 mon
'allow profile role-definer'
642 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
643 auth
export > client.xx.keyring
.2
644 # remove old role-definer using the new role-definer
645 ceph
-n client.xx-profile-rd2
-k client.xx.keyring
.2 \
646 auth del client.xx-profile-rd
647 # remove the remaining role-definer with admin
648 ceph auth del client.xx-profile-rd2
649 rm -f client.xx.keyring client.xx.keyring
.2
652 function test_mon_caps
()
654 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
655 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
656 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
657 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
659 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
660 check_response
"Permission denied"
662 rm -rf $TEMP_DIR/ceph.client.bug.keyring
663 ceph auth del client.bug
664 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
665 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
666 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
667 ceph-authtool
-n client.bug
--cap mon
'' $TEMP_DIR/ceph.client.bug.keyring
668 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
669 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
670 check_response
"Permission denied"
673 function test_mon_misc
()
675 # with and without verbosity
676 ceph osd dump |
grep '^epoch'
677 ceph
--concise osd dump |
grep '^epoch'
679 ceph osd df |
grep 'MIN/MAX VAR'
684 grep -v DIRTY
$TMPFILE
685 ceph df detail
> $TMPFILE
687 ceph df
--format json
> $TMPFILE
688 grep 'total_bytes' $TMPFILE
689 grep -v 'dirty' $TMPFILE
690 ceph df detail
--format json
> $TMPFILE
691 grep 'rd_bytes' $TMPFILE
692 grep 'dirty' $TMPFILE
693 ceph df
--format xml |
grep '<total_bytes>'
694 ceph df detail
--format xml |
grep '<rd_bytes>'
699 ceph health
--format json-pretty
700 ceph health detail
--format xml-pretty
703 for t
in mon osd mds
; do
708 mymsg
="this is a test log message $$.$(date)"
710 ceph log last |
grep "$mymsg"
711 ceph log last
100 |
grep "$mymsg"
712 ceph_watch_wait
"$mymsg"
718 ceph mon count-metadata ceph_version
724 function check_mds_active
()
727 ceph fs get
$fs_name |
grep active
730 function wait_mds_active
()
734 for i
in $
(seq 1 $max_run) ; do
735 if ! check_mds_active
$fs_name ; then
736 echo "waiting for an active MDS daemon ($i/$max_run)"
742 check_mds_active
$fs_name
745 function get_mds_gids
()
748 ceph fs get
$fs_name --format=json | python
-c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
751 function fail_all_mds
()
754 ceph fs
set $fs_name cluster_down true
755 mds_gids
=$
(get_mds_gids
$fs_name)
756 for mds_gid
in $mds_gids ; do
757 ceph mds fail
$mds_gid
759 if check_mds_active
$fs_name ; then
760 echo "An active MDS remains, something went wrong"
767 function remove_all_fs
()
769 existing_fs
=$
(ceph fs
ls --format=json | python
-c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
770 for fs_name
in $existing_fs ; do
771 echo "Removing fs ${fs_name}..."
772 fail_all_mds
$fs_name
773 echo "Removing existing filesystem '${fs_name}'..."
774 ceph fs
rm $fs_name --yes-i-really-mean-it
775 echo "Removed '${fs_name}'."
779 # So that tests requiring MDS can skip if one is not configured
780 # in the cluster at all
781 function mds_exists
()
783 ceph auth list |
grep "^mds"
786 # some of the commands are just not idempotent.
787 function without_test_dup_command
()
789 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
792 local saved
=${CEPH_CLI_TEST_DUP_COMMAND}
793 unset CEPH_CLI_TEST_DUP_COMMAND
795 CEPH_CLI_TEST_DUP_COMMAND
=saved
799 function test_mds_tell
()
802 if ! mds_exists
; then
803 echo "Skipping test, no MDS found"
808 ceph osd pool create fs_data
10
809 ceph osd pool create fs_metadata
10
810 ceph fs new
$FS_NAME fs_metadata fs_data
811 wait_mds_active
$FS_NAME
813 # Test injectargs by GID
814 old_mds_gids
=$
(get_mds_gids
$FS_NAME)
815 echo Old GIDs
: $old_mds_gids
817 for mds_gid
in $old_mds_gids ; do
818 ceph tell mds.
$mds_gid injectargs
"--debug-mds 20"
820 expect_false ceph tell mds.a injectargs mds_max_file_recover
-1
822 # Test respawn by rank
823 without_test_dup_command ceph tell mds
.0 respawn
824 new_mds_gids
=$old_mds_gids
825 while [ $new_mds_gids -eq $old_mds_gids ] ; do
827 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
829 echo New GIDs
: $new_mds_gids
832 without_test_dup_command ceph tell mds.a respawn
833 new_mds_gids
=$old_mds_gids
834 while [ $new_mds_gids -eq $old_mds_gids ] ; do
836 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
838 echo New GIDs
: $new_mds_gids
841 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
842 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
845 function test_mon_mds
()
850 ceph osd pool create fs_data
10
851 ceph osd pool create fs_metadata
10
852 ceph fs new
$FS_NAME fs_metadata fs_data
854 ceph fs
set $FS_NAME cluster_down true
855 ceph fs
set $FS_NAME cluster_down false
857 # Legacy commands, act on default fs
858 ceph mds cluster_down
861 ceph mds compat rm_incompat
4
862 ceph mds compat rm_incompat
4
864 # We don't want any MDSs to be up, their activity can interfere with
865 # the "current_epoch + 1" checking below if they're generating updates
866 fail_all_mds
$FS_NAME
869 expect_false ceph mds deactivate
2
873 for mds_gid
in $
(get_mds_gids
$FS_NAME) ; do
874 ceph mds metadata
$mds_id
878 ceph mds count-metadata os
880 # XXX mds fail, but how do you undo it?
881 mdsmapfile
=$TEMP_DIR/mdsmap.$$
882 current_epoch
=$
(ceph mds getmap
-o $mdsmapfile --no-log-to-stderr 2>&1 |
grep epoch |
sed 's/.*epoch //')
886 ceph osd pool create data2
10
887 ceph osd pool create data3
10
888 data2_pool
=$
(ceph osd dump |
grep "pool.*'data2'" |
awk '{print $2;}')
889 data3_pool
=$
(ceph osd dump |
grep "pool.*'data3'" |
awk '{print $2;}')
890 ceph mds add_data_pool
$data2_pool
891 ceph mds add_data_pool
$data3_pool
892 ceph mds add_data_pool
100 >& $TMPFILE || true
893 check_response
"Error ENOENT"
894 ceph mds add_data_pool foobarbaz
>& $TMPFILE || true
895 check_response
"Error ENOENT"
896 ceph mds remove_data_pool
$data2_pool
897 ceph mds remove_data_pool
$data3_pool
898 ceph osd pool delete data2 data2
--yes-i-really-really-mean-it
899 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
900 expect_false ceph mds set_max_mds
4
901 ceph mds
set allow_multimds true
--yes-i-really-mean-it
902 ceph mds set_max_mds
4
903 ceph mds set_max_mds
3
904 ceph mds set_max_mds
256
905 expect_false ceph mds set_max_mds
257
906 ceph mds
set max_mds
4
907 ceph mds
set max_mds
256
908 expect_false ceph mds
set max_mds
257
909 expect_false ceph mds
set max_mds asdf
910 expect_false ceph mds
set inline_data true
911 ceph mds
set inline_data true
--yes-i-really-mean-it
912 ceph mds
set inline_data
yes --yes-i-really-mean-it
913 ceph mds
set inline_data
1 --yes-i-really-mean-it
914 expect_false ceph mds
set inline_data
--yes-i-really-mean-it
915 ceph mds
set inline_data false
916 ceph mds
set inline_data no
917 ceph mds
set inline_data
0
918 expect_false ceph mds
set inline_data asdf
919 ceph mds
set max_file_size
1048576
920 expect_false ceph mds
set max_file_size
123asdf
922 expect_false ceph mds
set allow_new_snaps
923 expect_false ceph mds
set allow_new_snaps true
924 ceph mds
set allow_new_snaps true
--yes-i-really-mean-it
925 ceph mds
set allow_new_snaps
0
926 ceph mds
set allow_new_snaps false
927 ceph mds
set allow_new_snaps no
928 expect_false ceph mds
set allow_new_snaps taco
930 # we should never be able to add EC pools as data or metadata pools
931 # create an ec-pool...
932 ceph osd pool create mds-ec-pool
10 10 erasure
934 ceph mds add_data_pool mds-ec-pool
2>$TMPFILE
935 check_response
'erasure-code' $?
22
937 ec_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-ec-pool" |
awk '{print $2;}')
938 data_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_data" |
awk '{print $2;}')
939 metadata_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_metadata" |
awk '{print $2;}')
941 fail_all_mds
$FS_NAME
944 # Check that rmfailed requires confirmation
945 expect_false ceph mds rmfailed
0
946 ceph mds rmfailed
0 --yes-i-really-mean-it
949 # Check that `newfs` is no longer permitted
950 expect_false ceph mds newfs
$metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
952 # Check that 'fs reset' runs
953 ceph fs
reset $FS_NAME --yes-i-really-mean-it
955 # Check that creating a second FS fails by default
956 ceph osd pool create fs_metadata2
10
957 ceph osd pool create fs_data2
10
959 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
962 # Check that setting enable_multiple enables creation of second fs
963 ceph fs flag
set enable_multiple true
--yes-i-really-mean-it
964 ceph fs new cephfs2 fs_metadata2 fs_data2
966 # Clean up multi-fs stuff
968 ceph fs
rm cephfs2
--yes-i-really-mean-it
969 ceph osd pool delete fs_metadata2 fs_metadata2
--yes-i-really-really-mean-it
970 ceph osd pool delete fs_data2 fs_data2
--yes-i-really-really-mean-it
972 fail_all_mds
$FS_NAME
974 # Clean up to enable subsequent fs new tests
975 ceph fs
rm $FS_NAME --yes-i-really-mean-it
978 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
979 check_response
'erasure-code' $?
22
980 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
981 check_response
'erasure-code' $?
22
982 ceph fs new
$FS_NAME mds-ec-pool mds-ec-pool
2>$TMPFILE
983 check_response
'erasure-code' $?
22
986 # ... new create a cache tier in front of the EC pool...
987 ceph osd pool create mds-tier
2
988 ceph osd tier add mds-ec-pool mds-tier
989 ceph osd tier set-overlay mds-ec-pool mds-tier
990 tier_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-tier" |
awk '{print $2;}')
992 # Use of a readonly tier should be forbidden
993 ceph osd tier cache-mode mds-tier
readonly --yes-i-really-mean-it
995 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
996 check_response
'has a write tier (mds-tier) that is configured to forward' $?
22
999 # Use of a writeback tier should enable FS creation
1000 ceph osd tier cache-mode mds-tier writeback
1001 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force
1003 # While a FS exists using the tiered pools, I should not be allowed
1004 # to remove the tier
1006 ceph osd tier remove-overlay mds-ec-pool
2>$TMPFILE
1007 check_response
'in use by CephFS' $?
16
1008 ceph osd tier remove mds-ec-pool mds-tier
2>$TMPFILE
1009 check_response
'in use by CephFS' $?
16
1012 fail_all_mds
$FS_NAME
1013 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1015 # ... but we should be forbidden from using the cache pool in the FS directly.
1017 ceph fs new
$FS_NAME fs_metadata mds-tier
--force 2>$TMPFILE
1018 check_response
'in use as a cache tier' $?
22
1019 ceph fs new
$FS_NAME mds-tier fs_data
2>$TMPFILE
1020 check_response
'in use as a cache tier' $?
22
1021 ceph fs new
$FS_NAME mds-tier mds-tier
2>$TMPFILE
1022 check_response
'in use as a cache tier' $?
22
1025 # Clean up tier + EC pools
1026 ceph osd tier remove-overlay mds-ec-pool
1027 ceph osd tier remove mds-ec-pool mds-tier
1029 # Create a FS using the 'cache' pool now that it's no longer a tier
1030 ceph fs new
$FS_NAME fs_metadata mds-tier
--force
1032 # We should be forbidden from using this pool as a tier now that
1033 # it's in use for CephFS
1035 ceph osd tier add mds-ec-pool mds-tier
2>$TMPFILE
1036 check_response
'in use by CephFS' $?
16
1039 fail_all_mds
$FS_NAME
1040 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1042 # We should be permitted to use an EC pool with overwrites enabled
1043 # as the data pool...
1044 ceph osd pool
set mds-ec-pool allow_ec_overwrites true
1045 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1046 fail_all_mds
$FS_NAME
1047 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1049 # ...but not as the metadata pool
1051 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1052 check_response
'erasure-code' $?
22
1055 ceph osd pool delete mds-ec-pool mds-ec-pool
--yes-i-really-really-mean-it
1057 # Create a FS and check that we can subsequently add a cache tier to it
1058 ceph fs new
$FS_NAME fs_metadata fs_data
--force
1060 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1061 ceph osd tier add fs_metadata mds-tier
1062 ceph osd tier cache-mode mds-tier writeback
1063 ceph osd tier set-overlay fs_metadata mds-tier
1065 # Removing tier should be permitted because the underlying pool is
1066 # replicated (#11504 case)
1067 ceph osd tier cache-mode mds-tier proxy
1068 ceph osd tier remove-overlay fs_metadata
1069 ceph osd tier remove fs_metadata mds-tier
1070 ceph osd pool delete mds-tier mds-tier
--yes-i-really-really-mean-it
1073 fail_all_mds
$FS_NAME
1074 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1079 # ceph mds tell mds.a getmap
1082 # ceph mds set_state
1085 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
1086 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
1089 function test_mon_mds_metadata
()
1091 local nmons
=$
(ceph tell
'mon.*' version |
grep -c 'version')
1095 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1096 while read gid id rank
; do
1097 ceph mds metadata
${gid} |
grep '"hostname":'
1098 ceph mds metadata
${id} |
grep '"hostname":'
1099 ceph mds metadata
${rank} |
grep '"hostname":'
1101 local n
=$
(ceph tell
'mon.*' mds metadata
${id} |
grep -c '"hostname":')
1102 test "$n" -eq "$nmons"
1105 expect_false ceph mds metadata UNKNOWN
1108 function test_mon_mon
()
1110 # print help message
1114 ceph mon getmap
-o $TEMP_DIR/monmap.$$
1115 [ -s $TEMP_DIR/monmap.$$
]
1120 ceph mon feature list
1121 ceph mon feature
set kraken
--yes-i-really-mean-it
1122 expect_false ceph mon feature
set abcd
1123 expect_false ceph mon feature
set abcd
--yes-i-really-mean-it
1126 function gen_secrets_file
()
1128 # lets assume we can have the following types
1129 # all - generates both cephx and lockbox, with mock dm-crypt key
1130 # cephx - only cephx
1131 # no_cephx - lockbox and dm-crypt, no cephx
1132 # no_lockbox - dm-crypt and cephx, no lockbox
1133 # empty - empty file
1134 # empty_json - correct json, empty map
1135 # bad_json - bad json :)
1138 if [[ -z "$t" ]]; then
1142 fn
=$
(mktemp
$TEMP_DIR/secret.XXXXXX
)
1144 if [[ "$t" == "empty" ]]; then
1149 if [[ "$t" == "bad_json" ]]; then
1150 echo "asd: ; }" >> $fn
1152 elif [[ "$t" == "empty_json" ]]; then
1157 cephx_secret
="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1158 lb_secret
="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1159 dmcrypt_key
="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1161 if [[ "$t" == "all" ]]; then
1162 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1163 elif [[ "$t" == "cephx" ]]; then
1164 echo "$cephx_secret" >> $fn
1165 elif [[ "$t" == "no_cephx" ]]; then
1166 echo "$lb_secret,$dmcrypt_key" >> $fn
1167 elif [[ "$t" == "no_lockbox" ]]; then
1168 echo "$cephx_secret,$dmcrypt_key" >> $fn
1170 echo "unknown gen_secrets_file() type \'$fn\'"
1177 function test_mon_osd_create_destroy
()
1179 ceph osd new
2>&1 |
grep 'EINVAL'
1180 ceph osd new
'' -1 2>&1 |
grep 'EINVAL'
1181 ceph osd new
'' 10 2>&1 |
grep 'EINVAL'
1183 old_maxosd
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1185 old_osds
=$
(ceph osd
ls)
1186 num_osds
=$
(ceph osd
ls |
wc -l)
1189 id
=$
(ceph osd new
$uuid 2>/dev
/null
)
1191 for i
in $old_osds; do
1197 id2
=`ceph osd new $uuid 2>/dev/null`
1201 ceph osd new
$uuid $id
1203 id3
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1204 ceph osd new
$uuid $
((id3
+1)) 2>&1 |
grep EEXIST
1207 id2
=$
(ceph osd new
$uuid2)
1209 [[ "$id2" != "$id" ]]
1211 ceph osd new
$uuid $id2 2>&1 |
grep EEXIST
1212 ceph osd new
$uuid2 $id2
1215 empty_secrets
=$
(gen_secrets_file
"empty")
1216 empty_json
=$
(gen_secrets_file
"empty_json")
1217 all_secrets
=$
(gen_secrets_file
"all")
1218 cephx_only
=$
(gen_secrets_file
"cephx")
1219 no_cephx
=$
(gen_secrets_file
"no_cephx")
1220 no_lockbox
=$
(gen_secrets_file
"no_lockbox")
1221 bad_json
=$
(gen_secrets_file
"bad_json")
1223 # empty secrets should be idempotent
1224 new_id
=$
(ceph osd new
$uuid $id -i $empty_secrets)
1225 [[ "$new_id" == "$id" ]]
1227 # empty json, thus empty secrets
1228 new_id
=$
(ceph osd new
$uuid $id -i $empty_json)
1229 [[ "$new_id" == "$id" ]]
1231 ceph osd new
$uuid $id -i $all_secrets 2>&1 |
grep 'EEXIST'
1235 ceph osd setmaxosd
$old_maxosd
1237 ceph osd new
$uuid -i $bad_json 2>&1 |
grep 'EINVAL'
1238 ceph osd new
$uuid -i $no_cephx 2>&1 |
grep 'EINVAL'
1239 ceph osd new
$uuid -i $no_lockbox 2>&1 |
grep 'EINVAL'
1242 id
=$
(ceph osd new
$uuid -i $all_secrets)
1249 # validate secrets and dm-crypt are set
1250 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1251 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1253 k
=$
(ceph auth get-key client.osd-lockbox.
$uuid --format=json-pretty
2>/dev
/null | \
1255 s
=$
(cat $all_secrets | jq
'.cephx_lockbox_secret')
1257 ceph config-key exists dm-crypt
/osd
/$uuid/luks
1260 id2
=$
(ceph osd new
$uuid2 -i $cephx_only)
1262 [[ "$i" != "$id2" ]]
1266 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1267 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1269 expect_false ceph auth get-key client.osd-lockbox.
$uuid2
1270 expect_false ceph config-key exists dm-crypt
/osd
/$uuid2/luks
1272 ceph osd destroy osd.
$id2 --yes-i-really-mean-it
1273 ceph osd destroy
$id2 --yes-i-really-mean-it
1275 expect_false ceph auth get-key osd.
$id2
1276 ceph osd dump |
grep osd.
$id2 |
grep destroyed
1280 ceph osd new
$uuid3 $id3 -i $all_secrets
1281 ceph osd dump |
grep osd.
$id3 | expect_false
grep destroyed
1282 ceph auth get-key client.osd-lockbox.
$uuid3
1283 ceph auth get-key osd.
$id3
1284 ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1286 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1287 expect_false ceph osd
find $id2
1288 expect_false ceph auth get-key osd.
$id2
1289 expect_false ceph auth get-key client.osd-lockbox.
$uuid3
1290 expect_false ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1291 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1292 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1294 ceph osd purge osd.
$id --yes-i-really-mean-it
1295 expect_false ceph osd
find $id
1296 expect_false ceph auth get-key osd.
$id
1297 expect_false ceph auth get-key client.osd-lockbox.
$uuid
1298 expect_false ceph config-key exists dm-crypt
/osd
/$uuid/luks
1300 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1301 $no_cephx $no_lockbox $bad_json
1303 for i
in $
(ceph osd
ls); do
1305 [[ "$i" != "$id2" ]]
1306 [[ "$i" != "$id3" ]]
1309 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1310 ceph osd setmaxosd
$old_maxosd
1314 function test_mon_osd
()
1319 bl
=192.168.0.1:0/1000
1320 ceph osd blacklist add
$bl
1321 ceph osd blacklist
ls |
grep $bl
1322 ceph osd blacklist
ls --format=json-pretty |
sed 's/\\\//\//' |
grep $bl
1323 ceph osd dump
--format=json-pretty |
grep $bl
1324 ceph osd dump |
grep "^blacklist $bl"
1325 ceph osd blacklist
rm $bl
1326 ceph osd blacklist
ls | expect_false
grep $bl
1329 # test without nonce, invalid nonce
1330 ceph osd blacklist add
$bl
1331 ceph osd blacklist
ls |
grep $bl
1332 ceph osd blacklist
rm $bl
1333 ceph osd blacklist
ls | expect_false
grep $expect_false bl
1334 expect_false
"ceph osd blacklist $bl/-1"
1335 expect_false
"ceph osd blacklist $bl/foo"
1337 # test with wrong address
1338 expect_false
"ceph osd blacklist 1234.56.78.90/100"
1341 ceph osd blacklist add
$bl
1342 ceph osd blacklist
ls |
grep $bl
1343 ceph osd blacklist
clear
1344 ceph osd blacklist
ls | expect_false
grep $bl
1349 ceph osd crush reweight-all
1350 ceph osd crush tunables legacy
1351 ceph osd crush show-tunables |
grep argonaut
1352 ceph osd crush tunables bobtail
1353 ceph osd crush show-tunables |
grep bobtail
1354 ceph osd crush tunables firefly
1355 ceph osd crush show-tunables |
grep firefly
1357 ceph osd crush set-tunable straw_calc_version
0
1358 ceph osd crush get-tunable straw_calc_version |
grep 0
1359 ceph osd crush set-tunable straw_calc_version
1
1360 ceph osd crush get-tunable straw_calc_version |
grep 1
1363 # require-min-compat-client
1364 expect_false ceph osd set-require-min-compat-client dumpling
# firefly tunables
1365 ceph osd set-require-min-compat-client luminous
1366 ceph osd dump |
grep 'require_min_compat_client luminous'
1371 # how do I tell when these are done?
1373 ceph osd deep-scrub
0
1376 for f
in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1381 expect_false ceph osd
unset sortbitwise
# cannot be unset
1382 expect_false ceph osd
set bogus
1383 expect_false ceph osd
unset bogus
1384 ceph osd require-osd-release luminous
1385 # can't lower (or use new command for anything but jewel)
1386 expect_false ceph osd require-osd-release jewel
1387 # these are no-ops but should succeed.
1388 ceph osd
set require_jewel_osds
1389 ceph osd
set require_kraken_osds
1390 expect_false ceph osd
unset require_jewel_osds
1394 ceph osd dump |
grep 'osd.0 down'
1397 for ((i
=0; i
< $max_run; i
++)); do
1398 if ! ceph osd dump |
grep 'osd.0 up'; then
1399 echo "waiting for osd.0 to come back up ($i/$max_run)"
1405 ceph osd dump |
grep 'osd.0 up'
1407 ceph osd dump |
grep 'osd.0 up'
1408 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1411 expect_false ceph osd
find osd.xyz
1412 expect_false ceph osd
find xyz
1413 expect_false ceph osd
find 0.1
1414 ceph
--format plain osd
find 1 # falls back to json-pretty
1415 if [ `uname` == Linux
]; then
1416 ceph osd metadata
1 |
grep 'distro'
1417 ceph
--format plain osd metadata
1 |
grep 'distro' # falls back to json-pretty
1420 ceph osd dump |
grep 'osd.0.*out'
1422 ceph osd dump |
grep 'osd.0.*in'
1425 ceph osd add-nodown
0 1
1426 ceph health detail |
grep 'nodown osd(s).*0.*1'
1427 ceph osd rm-nodown
0 1
1428 ! ceph health detail |
grep 'nodown osd(s).*0.*1'
1430 ceph osd out
0 # so we can mark it as noin later
1432 ceph health detail |
grep 'noin osd(s).*0'
1434 ! ceph health detail |
grep 'noin osd(s).*0'
1437 ceph osd add-noout
0
1438 ceph health detail |
grep 'noout osd(s).*0'
1440 ! ceph health detail |
grep 'noout osds(s).*0'
1443 expect_false ceph osd add-noup
797er
1444 expect_false ceph osd add-nodown u9uwer
1445 expect_false ceph osd add-noin
78~
15
1446 expect_false ceph osd add-noout
0 all
1
1448 expect_false ceph osd rm-noup
1234567
1449 expect_false ceph osd rm-nodown fsadf7
1450 expect_false ceph osd rm-noin
0 1 any
1451 expect_false ceph osd rm-noout
790-fd
1453 ids
=`ceph osd ls-tree default`
1456 ceph osd add-nodown
$osd
1457 ceph osd add-noout
$osd
1459 ceph
-s |
grep 'nodown osd(s)'
1460 ceph
-s |
grep 'noout osd(s)'
1461 ceph osd rm-nodown any
1462 ceph osd rm-noout all
1463 ! ceph
-s |
grep 'nodown osd(s)'
1464 ! ceph
-s |
grep 'noout osd(s)'
1466 # make sure mark out preserves weight
1467 ceph osd reweight osd
.0 .5
1468 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1471 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1473 ceph osd getmap
-o $f
1476 save
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1478 ceph osd setmaxosd $
((save
- 1)) 2>&1 |
grep 'EBUSY'
1479 ceph osd setmaxosd
10
1480 ceph osd getmaxosd |
grep 'max_osd = 10'
1481 ceph osd setmaxosd
$save
1482 ceph osd getmaxosd |
grep "max_osd = $save"
1484 for id
in `ceph osd ls` ; do
1485 retry_eagain
5 map_enxio_to_eagain ceph tell osd.
$id version
1488 ceph osd
rm 0 2>&1 |
grep 'EBUSY'
1490 local old_osds
=$
(echo $
(ceph osd
ls))
1491 id
=`ceph osd create`
1493 ceph osd lost
$id --yes-i-really-mean-it
1494 expect_false ceph osd setmaxosd
$id
1495 local new_osds
=$
(echo $
(ceph osd
ls))
1496 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1501 id
=`ceph osd create $uuid`
1502 id2
=`ceph osd create $uuid`
1509 ceph osd setmaxosd
$id
1510 ceph osd getmaxosd |
grep "max_osd = $save"
1513 ceph osd create
$uuid 0 2>&1 |
grep 'EINVAL'
1514 ceph osd create
$uuid $
((max_osd
- 1)) 2>&1 |
grep 'EINVAL'
1516 id
=`ceph osd create $uuid $max_osd`
1517 [ "$id" = "$max_osd" ]
1519 max_osd
=$
((max_osd
+ 1))
1520 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1522 ceph osd create
$uuid $
((id
- 1)) 2>&1 |
grep 'EEXIST'
1523 ceph osd create
$uuid $
((id
+ 1)) 2>&1 |
grep 'EEXIST'
1524 id2
=`ceph osd create $uuid`
1526 id2
=`ceph osd create $uuid $id`
1530 local gap_start
=$max_osd
1531 id
=`ceph osd create $uuid $((gap_start + 100))`
1532 [ "$id" = "$((gap_start + 100))" ]
1534 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1536 ceph osd create
$uuid $gap_start 2>&1 |
grep 'EEXIST'
1539 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1540 # is repeated and consumes two osd id, not just one.
1543 if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
1544 next_osd
=$
((gap_start
+ 1))
1548 id
=`ceph osd create`
1549 [ "$id" = "$next_osd" ]
1551 next_osd
=$
((id
+ 1))
1552 id
=`ceph osd create $(uuidgen)`
1553 [ "$id" = "$next_osd" ]
1555 next_osd
=$
((id
+ 1))
1556 id
=`ceph osd create $(uuidgen) $next_osd`
1557 [ "$id" = "$next_osd" ]
1559 local new_osds
=$
(echo $
(ceph osd
ls))
1560 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1564 ceph osd setmaxosd
$save
1567 ceph osd pool create data
10
1568 ceph osd lspools |
grep data
1569 ceph osd map data foo |
grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1570 ceph osd map data foo namespace|
grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1571 ceph osd pool delete data data
--yes-i-really-really-mean-it
1574 ceph osd dump |
grep 'flags.*pauserd,pausewr'
1583 ceph osd tree up out
1584 ceph osd tree down
in
1585 ceph osd tree down out
1586 ceph osd tree out down
1587 expect_false ceph osd tree up down
1588 expect_false ceph osd tree
in out
1589 expect_false ceph osd tree up foo
1592 ceph osd count-metadata os
1598 ceph osd stat |
grep up
,
1601 function test_mon_crush
()
1604 epoch
=$
(ceph osd getcrushmap
-o $f 2>&1 |
tail -n1)
1607 nextepoch
=$
(( $epoch + 1 ))
1608 echo epoch
$epoch nextepoch
$nextepoch
1610 expect_false ceph osd setcrushmap
$nextepoch -i $f
1611 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1612 echo gotepoch
$gotepoch
1613 [ "$gotepoch" -eq "$nextepoch" ]
1614 # should be idempotent
1615 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1616 echo epoch
$gotepoch
1617 [ "$gotepoch" -eq "$nextepoch" ]
1621 function test_mon_osd_pool
()
1626 ceph osd pool create data
10
1627 ceph osd pool mksnap data datasnap
1628 rados
-p data lssnap |
grep datasnap
1629 ceph osd pool rmsnap data datasnap
1630 expect_false ceph osd pool rmsnap pool_fake snapshot
1631 ceph osd pool delete data data
--yes-i-really-really-mean-it
1633 ceph osd pool create data2
10
1634 ceph osd pool rename data2 data3
1635 ceph osd lspools |
grep data3
1636 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
1638 ceph osd pool create replicated
12 12 replicated
1639 ceph osd pool create replicated
12 12 replicated
1640 ceph osd pool create replicated
12 12 # default is replicated
1641 ceph osd pool create replicated
12 # default is replicated, pgp_num = pg_num
1642 # should fail because the type is not the same
1643 expect_false ceph osd pool create replicated
12 12 erasure
1644 ceph osd lspools |
grep replicated
1645 ceph osd pool create ec_test
1 1 erasure
1647 ceph osd metadata |
grep osd_objectstore_type |
grep -qc bluestore
1648 if [ $?
-eq 0 ]; then
1649 ceph osd pool
set ec_test allow_ec_overwrites true
>& $TMPFILE
1650 check_response $?
22 "pool must only be stored on bluestore for scrubbing to work"
1652 ceph osd pool
set ec_test allow_ec_overwrites true ||
return 1
1653 expect_false ceph osd pool
set ec_test allow_ec_overwrites false
1656 ceph osd pool delete replicated replicated
--yes-i-really-really-mean-it
1657 ceph osd pool delete ec_test ec_test
--yes-i-really-really-mean-it
1660 function test_mon_osd_pool_quota
()
1663 # test osd pool set/get quota
1667 ceph osd pool create tmp-quota-pool
36
1669 # set erroneous quotas
1671 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness
10
1672 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes
-1
1673 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1677 ceph osd pool set-quota tmp-quota-pool max_bytes
10
1678 ceph osd pool set-quota tmp-quota-pool max_objects
10M
1682 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10B'
1683 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*10240k objects'
1685 # get quotas in json-pretty format
1687 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1688 grep '"quota_max_objects":.*10485760'
1689 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1690 grep '"quota_max_bytes":.*10'
1694 ceph osd pool set-quota tmp-quota-pool max_bytes
0
1695 ceph osd pool set-quota tmp-quota-pool max_objects
0
1699 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*N/A'
1700 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*N/A'
1703 ceph osd pool delete tmp-quota-pool tmp-quota-pool
--yes-i-really-really-mean-it
1706 function test_mon_pg
()
1708 # Make sure we start healthy.
1711 ceph pg debug unfound_objects_exist
1712 ceph pg debug degraded_pgs_exist
1713 ceph pg deep-scrub
0.0
1715 ceph pg dump pgs_brief
--format=json
1716 ceph pg dump pgs
--format=json
1717 ceph pg dump pools
--format=json
1718 ceph pg dump osds
--format=json
1719 ceph pg dump
sum --format=json
1720 ceph pg dump all
--format=json
1721 ceph pg dump pgs_brief osds
--format=json
1722 ceph pg dump pools osds pgs_brief
--format=json
1724 ceph pg dump_pools_json
1725 ceph pg dump_stuck inactive
1726 ceph pg dump_stuck unclean
1727 ceph pg dump_stuck stale
1728 ceph pg dump_stuck undersized
1729 ceph pg dump_stuck degraded
1733 expect_false ceph pg
ls scrubq
1734 ceph pg
ls active stale repair recovering
1736 ceph pg
ls 0 active stale
1737 ceph pg ls-by-primary osd
.0
1738 ceph pg ls-by-primary osd
.0 0
1739 ceph pg ls-by-primary osd
.0 active
1740 ceph pg ls-by-primary osd
.0 active stale
1741 ceph pg ls-by-primary osd
.0 0 active stale
1742 ceph pg ls-by-osd osd
.0
1743 ceph pg ls-by-osd osd
.0 0
1744 ceph pg ls-by-osd osd
.0 active
1745 ceph pg ls-by-osd osd
.0 active stale
1746 ceph pg ls-by-osd osd
.0 0 active stale
1747 ceph pg ls-by-pool rbd
1748 ceph pg ls-by-pool rbd active stale
1749 # can't test this...
1750 # ceph pg force_create_pg
1751 ceph pg getmap
-o $TEMP_DIR/map.$$
1752 [ -s $TEMP_DIR/map.$$
]
1753 ceph pg map
0.0 |
grep acting
1757 ceph osd set-full-ratio
.962
1758 ceph osd dump |
grep '^full_ratio 0.962'
1759 ceph osd set-backfillfull-ratio
.912
1760 ceph osd dump |
grep '^backfillfull_ratio 0.912'
1761 ceph osd set-nearfull-ratio
.892
1762 ceph osd dump |
grep '^nearfull_ratio 0.892'
1764 # Check health status
1765 ceph osd set-nearfull-ratio
.913
1766 ceph health |
grep 'HEALTH_ERR.*Full ratio(s) out of order'
1767 ceph health detail |
grep 'backfillfull_ratio (0.912) < nearfull_ratio (0.913), increased'
1768 ceph osd set-nearfull-ratio
.892
1769 ceph osd set-backfillfull-ratio
.963
1770 ceph health detail |
grep 'full_ratio (0.962) < backfillfull_ratio (0.963), increased'
1771 ceph osd set-backfillfull-ratio
.912
1773 # Check injected full results
1774 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull nearfull
1775 wait_for_health
"HEALTH_WARN.*1 nearfull osd(s)"
1776 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull backfillfull
1777 wait_for_health
"HEALTH_WARN.*1 backfillfull osd(s)"
1778 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull failsafe
1779 # failsafe and full are the same as far as the monitor is concerned
1780 wait_for_health
"HEALTH_ERR.*1 full osd(s)"
1781 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull full
1782 wait_for_health
"HEALTH_ERR.*2 full osd(s)"
1783 ceph health detail |
grep "osd.0 is full"
1784 ceph health detail |
grep "osd.2 is full"
1785 ceph health detail |
grep "osd.1 is backfill full"
1786 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1787 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull none
1788 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull none
1791 ceph pg stat |
grep 'pgs:'
1796 ceph report |
grep osd_stats
1803 ceph tell osd
.0 version
1804 expect_false ceph tell osd
.9999 version
1805 expect_false ceph tell osd.foo version
1809 ceph tell osd
.0 dump_pg_recovery_stats |
grep Started
1811 ceph osd reweight
0 0.9
1812 expect_false ceph osd reweight
0 -1
1813 ceph osd reweight osd
.0 1
1815 ceph osd primary-affinity osd
.0 .9
1816 expect_false ceph osd primary-affinity osd
.0 -2
1817 expect_false ceph osd primary-affinity osd
.9999 .5
1818 ceph osd primary-affinity osd
.0 1
1820 ceph osd pg-temp
0.0 0 1 2
1821 ceph osd pg-temp
0.0 osd
.1 osd
.0 osd
.2
1822 expect_false ceph osd pg-temp asdf qwer
1823 expect_false ceph osd pg-temp
0.0 asdf
1824 expect_false ceph osd pg-temp
0.0
1826 # don't test ceph osd primary-temp for now
1829 function test_mon_osd_pool_set
()
1831 TEST_POOL_GETSET
=pool_getset
1832 ceph osd pool create
$TEST_POOL_GETSET 1
1834 ceph osd pool get
$TEST_POOL_GETSET all
1836 for s
in pg_num pgp_num size min_size crush_rule
; do
1837 ceph osd pool get
$TEST_POOL_GETSET $s
1840 old_size
=$
(ceph osd pool get
$TEST_POOL_GETSET size |
sed -e 's/size: //')
1841 (( new_size
= old_size
+ 1 ))
1842 ceph osd pool
set $TEST_POOL_GETSET size
$new_size
1843 ceph osd pool get
$TEST_POOL_GETSET size |
grep "size: $new_size"
1844 ceph osd pool
set $TEST_POOL_GETSET size
$old_size
1846 ceph osd pool create pool_erasure
1 1 erasure
1849 ceph osd pool
set pool_erasure size
4444 2>$TMPFILE
1850 check_response
'not change the size'
1852 ceph osd pool get pool_erasure erasure_code_profile
1855 ceph osd pool
set $TEST_POOL_GETSET auid
$auid
1856 ceph osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1857 ceph
--format=xml osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1858 ceph osd pool
set $TEST_POOL_GETSET auid
0
1860 for flag
in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub
; do
1861 ceph osd pool
set $TEST_POOL_GETSET $flag false
1862 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1863 ceph osd pool
set $TEST_POOL_GETSET $flag true
1864 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1865 ceph osd pool
set $TEST_POOL_GETSET $flag 1
1866 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1867 ceph osd pool
set $TEST_POOL_GETSET $flag 0
1868 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1869 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag asdf
1870 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag 2
1873 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1874 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
123456
1875 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval |
grep 'scrub_min_interval: 123456'
1876 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
0
1877 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1879 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
1880 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
123456
1881 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval |
grep 'scrub_max_interval: 123456'
1882 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
0
1883 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
1885 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
1886 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
123456
1887 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval |
grep 'deep_scrub_interval: 123456'
1888 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
0
1889 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
1891 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
1892 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
5
1893 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: 5'
1894 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
0
1895 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
1897 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
1898 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
5
1899 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority |
grep 'recovery_op_priority: 5'
1900 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
0
1901 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
1903 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
1904 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
5
1905 ceph osd pool get
$TEST_POOL_GETSET scrub_priority |
grep 'scrub_priority: 5'
1906 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
0
1907 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
1909 ceph osd pool
set $TEST_POOL_GETSET nopgchange
1
1910 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
10
1911 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
1912 ceph osd pool
set $TEST_POOL_GETSET nopgchange
0
1913 ceph osd pool
set $TEST_POOL_GETSET pg_num
10
1915 ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
1917 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
1918 new_pgs
=$
(($old_pgs+$
(ceph osd stat |
grep osdmap |
awk '{print $3}')*32))
1919 ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
1920 ceph osd pool
set $TEST_POOL_GETSET pgp_num
$new_pgs
1922 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
1923 new_pgs
=$
(($old_pgs+$
(ceph osd stat |
grep osdmap |
awk '{print $3}')*32+1))
1924 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
1926 ceph osd pool
set $TEST_POOL_GETSET nosizechange
1
1927 expect_false ceph osd pool
set $TEST_POOL_GETSET size
2
1928 expect_false ceph osd pool
set $TEST_POOL_GETSET min_size
2
1929 ceph osd pool
set $TEST_POOL_GETSET nosizechange
0
1930 ceph osd pool
set $TEST_POOL_GETSET size
2
1932 ceph osd pool
set $TEST_POOL_GETSET min_size
2
1934 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
0
1935 ceph osd pool
set $TEST_POOL_GETSET hashpspool
0 --yes-i-really-mean-it
1937 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
1
1938 ceph osd pool
set $TEST_POOL_GETSET hashpspool
1 --yes-i-really-mean-it
1940 ceph osd pool
set $TEST_POOL_GETSET nodelete
1
1941 expect_false ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1942 ceph osd pool
set $TEST_POOL_GETSET nodelete
0
1943 ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1945 ceph osd pool get rbd crush_rule |
grep 'crush_rule: '
1948 function test_mon_osd_tiered_pool_set
()
1950 # this is really a tier pool
1951 ceph osd pool create real-tier
2
1952 ceph osd tier add rbd real-tier
1954 ceph osd pool
set real-tier hit_set_type explicit_hash
1955 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_hash"
1956 ceph osd pool
set real-tier hit_set_type explicit_object
1957 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_object"
1958 ceph osd pool
set real-tier hit_set_type bloom
1959 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: bloom"
1960 expect_false ceph osd pool
set real-tier hit_set_type i_dont_exist
1961 ceph osd pool
set real-tier hit_set_period
123
1962 ceph osd pool get real-tier hit_set_period |
grep "hit_set_period: 123"
1963 ceph osd pool
set real-tier hit_set_count
12
1964 ceph osd pool get real-tier hit_set_count |
grep "hit_set_count: 12"
1965 ceph osd pool
set real-tier hit_set_fpp
.01
1966 ceph osd pool get real-tier hit_set_fpp |
grep "hit_set_fpp: 0.01"
1968 ceph osd pool
set real-tier target_max_objects
123
1969 ceph osd pool get real-tier target_max_objects | \
1970 grep 'target_max_objects:[ \t]\+123'
1971 ceph osd pool
set real-tier target_max_bytes
123456
1972 ceph osd pool get real-tier target_max_bytes | \
1973 grep 'target_max_bytes:[ \t]\+123456'
1974 ceph osd pool
set real-tier cache_target_dirty_ratio
.123
1975 ceph osd pool get real-tier cache_target_dirty_ratio | \
1976 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
1977 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
-.2
1978 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
1.1
1979 ceph osd pool
set real-tier cache_target_dirty_high_ratio
.123
1980 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
1981 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
1982 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
-.2
1983 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
1.1
1984 ceph osd pool
set real-tier cache_target_full_ratio
.123
1985 ceph osd pool get real-tier cache_target_full_ratio | \
1986 grep 'cache_target_full_ratio:[ \t]\+0.123'
1987 ceph osd dump
-f json-pretty |
grep '"cache_target_full_ratio_micro": 123000'
1988 ceph osd pool
set real-tier cache_target_full_ratio
1.0
1989 ceph osd pool
set real-tier cache_target_full_ratio
0
1990 expect_false ceph osd pool
set real-tier cache_target_full_ratio
1.1
1991 ceph osd pool
set real-tier cache_min_flush_age
123
1992 ceph osd pool get real-tier cache_min_flush_age | \
1993 grep 'cache_min_flush_age:[ \t]\+123'
1994 ceph osd pool
set real-tier cache_min_evict_age
234
1995 ceph osd pool get real-tier cache_min_evict_age | \
1996 grep 'cache_min_evict_age:[ \t]\+234'
1998 # this is not a tier pool
1999 ceph osd pool create fake-tier
2
2002 expect_false ceph osd pool
set fake-tier hit_set_type explicit_hash
2003 expect_false ceph osd pool get fake-tier hit_set_type
2004 expect_false ceph osd pool
set fake-tier hit_set_type explicit_object
2005 expect_false ceph osd pool get fake-tier hit_set_type
2006 expect_false ceph osd pool
set fake-tier hit_set_type bloom
2007 expect_false ceph osd pool get fake-tier hit_set_type
2008 expect_false ceph osd pool
set fake-tier hit_set_type i_dont_exist
2009 expect_false ceph osd pool
set fake-tier hit_set_period
123
2010 expect_false ceph osd pool get fake-tier hit_set_period
2011 expect_false ceph osd pool
set fake-tier hit_set_count
12
2012 expect_false ceph osd pool get fake-tier hit_set_count
2013 expect_false ceph osd pool
set fake-tier hit_set_fpp
.01
2014 expect_false ceph osd pool get fake-tier hit_set_fpp
2016 expect_false ceph osd pool
set fake-tier target_max_objects
123
2017 expect_false ceph osd pool get fake-tier target_max_objects
2018 expect_false ceph osd pool
set fake-tier target_max_bytes
123456
2019 expect_false ceph osd pool get fake-tier target_max_bytes
2020 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
.123
2021 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2022 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
-.2
2023 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
1.1
2024 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
.123
2025 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2026 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
-.2
2027 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
1.1
2028 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
.123
2029 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2030 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.0
2031 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
0
2032 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.1
2033 expect_false ceph osd pool
set fake-tier cache_min_flush_age
123
2034 expect_false ceph osd pool get fake-tier cache_min_flush_age
2035 expect_false ceph osd pool
set fake-tier cache_min_evict_age
234
2036 expect_false ceph osd pool get fake-tier cache_min_evict_age
2038 ceph osd tier remove rbd real-tier
2039 ceph osd pool delete real-tier real-tier
--yes-i-really-really-mean-it
2040 ceph osd pool delete fake-tier fake-tier
--yes-i-really-really-mean-it
2043 function test_mon_osd_erasure_code
()
2046 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2047 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2048 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2049 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
--force
2050 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2051 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f g
=h
2053 # cleanup by removing profile 'fooprofile'
2054 ceph osd erasure-code-profile
rm fooprofile
2057 function test_mon_osd_misc
()
2061 # expect error about missing 'pool' argument
2062 ceph osd map
2>$TMPFILE; check_response
'pool' $?
22
2064 # expect error about unused argument foo
2065 ceph osd
ls foo
2>$TMPFILE; check_response
'unused' $?
22
2067 # expect "not in range" for invalid full ratio
2068 ceph pg set_full_ratio
95 2>$TMPFILE; check_response
'not in range' $?
22
2070 # expect "not in range" for invalid overload percentage
2071 ceph osd reweight-by-utilization
80 2>$TMPFILE; check_response
'higher than 100' $?
22
2075 ceph osd reweight-by-utilization
110
2076 ceph osd reweight-by-utilization
110 .5
2077 expect_false ceph osd reweight-by-utilization
110 0
2078 expect_false ceph osd reweight-by-utilization
110 -0.1
2079 ceph osd test-reweight-by-utilization
110 .5 --no-increasing
2080 ceph osd test-reweight-by-utilization
110 .5 4 --no-increasing
2081 expect_false ceph osd test-reweight-by-utilization
110 .5 0 --no-increasing
2082 expect_false ceph osd test-reweight-by-utilization
110 .5 -10 --no-increasing
2083 ceph osd reweight-by-pg
110
2084 ceph osd test-reweight-by-pg
110 .5
2085 ceph osd reweight-by-pg
110 rbd
2086 ceph osd reweight-by-pg
110 .5 rbd
2087 expect_false ceph osd reweight-by-pg
110 boguspoolasdfasdfasdf
2090 function test_mon_heap_profiler
()
2094 # expect 'heap' commands to be correctly parsed
2095 ceph heap stats
2>$TMPFILE
2096 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2097 echo "tcmalloc not enabled; skip heap profiler test"
2102 [[ $do_test -eq 0 ]] && return 0
2104 ceph heap start_profiler
2106 ceph heap stop_profiler
2110 function test_admin_heap_profiler
()
2114 # expect 'heap' commands to be correctly parsed
2115 ceph heap stats
2>$TMPFILE
2116 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2117 echo "tcmalloc not enabled; skip heap profiler test"
2122 [[ $do_test -eq 0 ]] && return 0
2124 local admin_socket
=$
(get_admin_socket osd
.0)
2126 $SUDO ceph
--admin-daemon $admin_socket heap start_profiler
2127 $SUDO ceph
--admin-daemon $admin_socket heap dump
2128 $SUDO ceph
--admin-daemon $admin_socket heap stop_profiler
2129 $SUDO ceph
--admin-daemon $admin_socket heap release
2132 function test_osd_bench
()
2134 # test osd bench limits
2135 # As we should not rely on defaults (as they may change over time),
2136 # lets inject some values and perform some simple tests
2137 # max iops: 10 # 100 IOPS
2138 # max throughput: 10485760 # 10MB/s
2139 # max block size: 2097152 # 2MB
2140 # duration: 10 # 10 seconds
2143 --osd-bench-duration 10 \
2144 --osd-bench-max-block-size 2097152 \
2145 --osd-bench-large-size-max-throughput 10485760 \
2146 --osd-bench-small-size-max-iops 10"
2147 ceph tell osd
.0 injectargs
${args## }
2149 # anything with a bs larger than 2097152 must fail
2150 expect_false ceph tell osd
.0 bench
1 2097153
2151 # but using 'osd_bench_max_bs' must succeed
2152 ceph tell osd
.0 bench
1 2097152
2154 # we assume 1MB as a large bs; anything lower is a small bs
2155 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2156 # max count: 409600 (bytes)
2158 # more than max count must not be allowed
2159 expect_false ceph tell osd
.0 bench
409601 4096
2160 # but 409600 must be succeed
2161 ceph tell osd
.0 bench
409600 4096
2163 # for a large bs, we are limited by throughput.
2164 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2165 # the max count will be (10MB * 10s) = 100MB
2166 # max count: 104857600 (bytes)
2168 # more than max count must not be allowed
2169 expect_false ceph tell osd
.0 bench
104857601 2097152
2170 # up to max count must be allowed
2171 ceph tell osd
.0 bench
104857600 2097152
2174 function test_osd_negative_filestore_merge_threshold
()
2176 $SUDO ceph daemon osd
.0 config
set filestore_merge_threshold
-1
2177 expect_config_value
"osd.0" "filestore_merge_threshold" -1
2180 function test_mon_tell
()
2182 ceph tell mon.a version
2183 ceph tell mon.b version
2184 expect_false ceph tell mon.foo version
2188 ceph_watch_start debug
2189 ceph tell mon.a version
2190 ceph_watch_wait
'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2192 ceph_watch_start debug
2193 ceph tell mon.b version
2194 ceph_watch_wait
'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2197 function test_mon_crushmap_validation
()
2199 local map
=$TEMP_DIR/map
2200 ceph osd getcrushmap
-o $map
2202 local crushtool_path
="${TEMP_DIR}/crushtool"
2203 touch "${crushtool_path}"
2204 chmod +x
"${crushtool_path}"
2205 local crushtool_path_old
=`ceph-conf --show-config-value crushtool`
2206 ceph tell mon.\
* injectargs
--crushtool "${crushtool_path}"
2211 exit 0" > "${crushtool_path}"
2213 ceph osd setcrushmap
-i $map
2218 exit 1" > "${crushtool_path}"
2220 expect_false ceph osd setcrushmap
-i $map
2225 echo 'TEST FAIL' >&2
2226 exit 1" > "${crushtool_path}"
2228 expect_false ceph osd setcrushmap
-i $map 2> $TMPFILE
2229 check_response
"Error EINVAL: Failed crushmap test: TEST FAIL"
2231 local mon_lease
=`ceph-conf --show-config-value mon_lease`
2233 test "${mon_lease}" -gt 0
2238 sleep $((mon_lease - 1))" > "${crushtool_path}"
2240 ceph osd setcrushmap
-i $map
2245 sleep $((mon_lease + 1))" > "${crushtool_path}"
2247 expect_false ceph osd setcrushmap
-i $map 2> $TMPFILE
2248 check_response
"Error EINVAL: Failed crushmap test: ${crushtool_path}: timed out (${mon_lease} sec)"
2250 ceph tell mon.\
* injectargs
--crushtool "${crushtool_path_old}"
2252 rm -f "${crushtool_path}"
2255 function test_mon_ping
()
2259 expect_false ceph
ping mon.foo
2264 function test_mon_deprecated_commands
()
2266 # current DEPRECATED commands are:
2271 # Testing should be accomplished by setting
2272 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2273 # each one of these commands.
2275 ceph tell mon.a injectargs
'--mon-debug-deprecated-as-obsolete'
2276 expect_false ceph tell mon.a compact
2> $TMPFILE
2277 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2279 expect_false ceph tell mon.a scrub
2> $TMPFILE
2280 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2282 expect_false ceph tell mon.a sync force
2> $TMPFILE
2283 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2285 ceph tell mon.a injectargs
'--no-mon-debug-deprecated-as-obsolete'
2288 function test_mon_cephdf_commands
()
2292 # RAW USED The near raw used per pool in raw total
2294 ceph osd pool create cephdf_for_test
32 32 replicated
2295 ceph osd pool
set cephdf_for_test size
2
2297 dd if=/dev
/zero of
=.
/cephdf_for_test bs
=4k count
=1
2298 rados put cephdf_for_test cephdf_for_test
-p cephdf_for_test
2301 for i
in `seq 1 10`; do
2302 rados
-p cephdf_for_test
ls - |
grep -q cephdf_for_test
&& break
2305 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2306 # to sync mon with osd
2308 local jq_filter
='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2309 cal_raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2310 raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
2312 ceph osd pool delete cephdf_for_test cephdf_for_test
--yes-i-really-really-mean-it
2313 rm .
/cephdf_for_test
2315 expect_false
test $cal_raw_used_size != $raw_used_size
2318 function test_mon_tell_help_command
()
2320 ceph tell mon.a
help
2323 expect_false ceph tell mon.zzz
help
2326 function test_osd_tell_help_command
()
2328 ceph tell osd
.1 help
2329 expect_false ceph tell osd
.100 help
2332 function test_mds_tell_help_command
()
2334 local FS_NAME
=cephfs
2335 if ! mds_exists
; then
2336 echo "Skipping test, no MDS found"
2341 ceph osd pool create fs_data
10
2342 ceph osd pool create fs_metadata
10
2343 ceph fs new
$FS_NAME fs_metadata fs_data
2344 wait_mds_active
$FS_NAME
2347 ceph tell mds.a
help
2348 expect_false ceph tell mds.z
help
2351 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
2352 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
2355 function test_mgr_tell_help_command
()
2361 # New tests should be added to the TESTS array below
2363 # Individual tests may be run using the '-t <testname>' argument
2364 # The user can specify '-t <testname>' as many times as she wants
2366 # Tests will be run in order presented in the TESTS array, or in
2367 # the order specified by the '-t <testname>' options.
2369 # '-l' will list all the available test names
2370 # '-h' will show usage
2372 # The test maintains backward compatibility: not specifying arguments
2373 # will run all tests following the order they appear in the TESTS array.
2377 MON_TESTS
+=" mon_injectargs"
2378 MON_TESTS
+=" mon_injectargs_SI"
2379 for i
in `seq 9`; do
2380 MON_TESTS
+=" tiering_$i";
2383 MON_TESTS
+=" auth_profiles"
2384 MON_TESTS
+=" mon_misc"
2385 MON_TESTS
+=" mon_mon"
2386 MON_TESTS
+=" mon_osd"
2387 MON_TESTS
+=" mon_crush"
2388 MON_TESTS
+=" mon_osd_create_destroy"
2389 MON_TESTS
+=" mon_osd_pool"
2390 MON_TESTS
+=" mon_osd_pool_quota"
2391 MON_TESTS
+=" mon_pg"
2392 MON_TESTS
+=" mon_osd_pool_set"
2393 MON_TESTS
+=" mon_osd_tiered_pool_set"
2394 MON_TESTS
+=" mon_osd_erasure_code"
2395 MON_TESTS
+=" mon_osd_misc"
2396 MON_TESTS
+=" mon_heap_profiler"
2397 MON_TESTS
+=" mon_tell"
2398 MON_TESTS
+=" mon_crushmap_validation"
2399 MON_TESTS
+=" mon_ping"
2400 MON_TESTS
+=" mon_deprecated_commands"
2401 MON_TESTS
+=" mon_caps"
2402 MON_TESTS
+=" mon_cephdf_commands"
2403 MON_TESTS
+=" mon_tell_help_command"
2405 OSD_TESTS
+=" osd_bench"
2406 OSD_TESTS
+=" osd_negative_filestore_merge_threshold"
2407 OSD_TESTS
+=" tiering_agent"
2408 OSD_TESTS
+=" admin_heap_profiler"
2409 OSD_TESTS
+=" osd_tell_help_command"
2411 MDS_TESTS
+=" mds_tell"
2412 MDS_TESTS
+=" mon_mds"
2413 MDS_TESTS
+=" mon_mds_metadata"
2414 MDS_TESTS
+=" mds_tell_help_command"
2416 MGR_TESTS
+=" mgr_tell_help_command"
2427 function list_tests
()
2429 echo "AVAILABLE TESTS"
2437 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2444 while [[ $# -gt 0 ]]; do
2451 "--asok-does-not-need-root" )
2454 "--no-sanity-check" )
2458 tests_to_run
+="$MON_TESTS"
2461 tests_to_run
+="$OSD_TESTS"
2464 tests_to_run
+="$MDS_TESTS"
2467 tests_to_run
+="$MGR_TESTS"
2471 if [[ -z "$1" ]]; then
2472 echo "missing argument to '-t'"
2486 if [[ $do_list -eq 1 ]]; then
2491 if test -z "$tests_to_run" ; then
2492 tests_to_run
="$TESTS"
2495 if $sanity_check ; then
2498 for i
in $tests_to_run; do
2499 if $sanity_check ; then
2506 if $sanity_check ; then