2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
5 source $
(dirname $0)/..
/..
/standalone
/ceph-helpers.sh
9 PS4
='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
13 function get_admin_socket
()
17 if test -n "$CEPH_ASOK_DIR";
19 echo $
(get_asok_dir
)/$client.asok
21 local cluster
=$
(echo $CEPH_ARGS |
sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
26 function check_no_osd_down
()
28 ! ceph osd dump |
grep ' down '
31 function wait_no_osd_down
()
34 for i
in $
(seq 1 $max_run) ; do
35 if ! check_no_osd_down
; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
45 function expect_false
()
48 if "$@"; then return 1; else return 0; fi
52 TEMP_DIR
=$
(mktemp
-d ${TMPDIR-/tmp}/cephtool.XXX
)
53 trap "rm -fr $TEMP_DIR" 0
55 TMPFILE
=$
(mktemp
$TEMP_DIR/test_invalid.XXX
)
58 # retry_eagain max cmd args ...
60 # retry cmd args ... if it exits on error and its output contains the
61 # string EAGAIN, at most $max times
63 function retry_eagain
()
68 local tmpfile
=$TEMP_DIR/retry_eagain.$$
70 for count
in $
(seq 1 $max) ; do
72 "$@" > $tmpfile 2>&1 || status
=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN
$tmpfile ; then
79 if test $count = $max ; then
80 echo retried with non zero
exit status
, $max times: "$@" >&2
88 # map_enxio_to_eagain cmd arg ...
90 # add EAGAIN to the output of cmd arg ... if the output contains
93 function map_enxio_to_eagain
()
96 local tmpfile
=$TEMP_DIR/map_enxio_to_eagain.$$
98 "$@" > $tmpfile 2>&1 || status
=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO
$tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
108 function check_response
()
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
125 function get_config_value_or_die
()
127 local target config_opt raw val
132 raw
="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $?
-ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
138 raw
=`echo $raw | sed -e 's/[{} "]//g'`
139 val
=`echo $raw | cut -f2 -d:`
145 function expect_config_value
()
147 local target config_opt expected_val val
152 val
=$
(get_config_value_or_die
$target $config_opt)
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
160 function ceph_watch_start
()
162 local whatch_opt
=--watch
165 whatch_opt
=--watch-$1
167 whatch_opt
+=" --watch-channel $2"
171 CEPH_WATCH_FILE
=${TEMP_DIR}/CEPH_WATCH_$$
172 ceph
$whatch_opt > $CEPH_WATCH_FILE &
175 # wait until the "ceph" client is connected and receiving
176 # log messages from monitor
178 grep -q "cluster" $CEPH_WATCH_FILE && break
183 function ceph_watch_wait
()
192 for i
in `seq ${timeout}`; do
193 grep -q "$regexp" $CEPH_WATCH_FILE && break
199 if ! grep "$regexp" $CEPH_WATCH_FILE; then
200 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
201 cat $CEPH_WATCH_FILE >&2
206 function test_mon_injectargs
()
208 CEPH_ARGS
='--mon_debug_dump_location the.dump' ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
209 check_response
"osd_enable_op_tracker = 'false'"
210 ! grep "the.dump" $TMPFILE ||
return 1
211 ceph tell osd
.0 injectargs
'--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE ||
return 1
212 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
213 ceph tell osd
.0 injectargs
--no-osd_enable_op_tracker >& $TMPFILE ||
return 1
214 check_response
"osd_enable_op_tracker = 'false'"
215 ceph tell osd
.0 injectargs
-- --osd_enable_op_tracker >& $TMPFILE ||
return 1
216 check_response
"osd_enable_op_tracker = 'true'"
217 ceph tell osd
.0 injectargs
-- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE ||
return 1
218 check_response
"osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
219 expect_failure
$TEMP_DIR "Option --osd_op_history_duration requires an argument" \
220 ceph tell osd
.0 injectargs
-- '--osd_op_history_duration'
222 ceph tell osd
.0 injectargs
-- '--osd_deep_scrub_interval 2419200' >& $TMPFILE ||
return 1
223 check_response
"osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
225 ceph tell osd
.0 injectargs
-- '--mon_probe_timeout 2' >& $TMPFILE ||
return 1
226 check_response
"mon_probe_timeout = '2.000000' (not observed, change may require restart)"
228 ceph tell osd
.0 injectargs
-- '--mon-lease 6' >& $TMPFILE ||
return 1
229 check_response
"mon_lease = '6.000000' (not observed, change may require restart)"
231 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
232 expect_false ceph tell osd
.0 injectargs
--osd-scrub-auto-repair-num-errors -1 >& $TMPFILE ||
return 1
233 check_response
"Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
236 function test_mon_injectargs_SI
()
238 # Test SI units during injectargs and 'config set'
239 # We only aim at testing the units are parsed accordingly
240 # and don't intend to test whether the options being set
241 # actually expect SI units to be passed.
242 # Keep in mind that all integer based options that are not based on bytes
243 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
245 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_pg_warn_min_objects")
246 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10
247 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
248 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10K
249 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10000
250 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
1G
251 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1000000000
252 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
10F
> $TMPFILE || true
253 check_response
"'10F': (22) Invalid argument"
254 # now test with injectargs
255 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10'
256 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10
257 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10K'
258 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 10000
259 ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 1G'
260 expect_config_value
"mon.a" "mon_pg_warn_min_objects" 1000000000
261 expect_false ceph tell mon.a injectargs
'--mon_pg_warn_min_objects 10F'
262 expect_false ceph tell mon.a injectargs
'--mon_globalid_prealloc -1'
263 $SUDO ceph daemon mon.a config
set mon_pg_warn_min_objects
$initial_value
266 function test_mon_injectargs_IEC
()
268 # Test IEC units during injectargs and 'config set'
269 # We only aim at testing the units are parsed accordingly
270 # and don't intend to test whether the options being set
271 # actually expect IEC units to be passed.
272 # Keep in mind that all integer based options that are based on bytes
273 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
274 # unit modifiers (for backwards compatibility and convinience) and be parsed
276 initial_value
=$
(get_config_value_or_die
"mon.a" "mon_data_size_warn")
277 $SUDO ceph daemon mon.a config
set mon_data_size_warn
15000000000
278 expect_config_value
"mon.a" "mon_data_size_warn" 15000000000
279 $SUDO ceph daemon mon.a config
set mon_data_size_warn
15G
280 expect_config_value
"mon.a" "mon_data_size_warn" 16106127360
281 $SUDO ceph daemon mon.a config
set mon_data_size_warn
16Gi
282 expect_config_value
"mon.a" "mon_data_size_warn" 17179869184
283 $SUDO ceph daemon mon.a config
set mon_data_size_warn
10F
> $TMPFILE || true
284 check_response
"'10F': (22) Invalid argument"
285 # now test with injectargs
286 ceph tell mon.a injectargs
'--mon_data_size_warn 15000000000'
287 expect_config_value
"mon.a" "mon_data_size_warn" 15000000000
288 ceph tell mon.a injectargs
'--mon_data_size_warn 15G'
289 expect_config_value
"mon.a" "mon_data_size_warn" 16106127360
290 ceph tell mon.a injectargs
'--mon_data_size_warn 16Gi'
291 expect_config_value
"mon.a" "mon_data_size_warn" 17179869184
292 expect_false ceph tell mon.a injectargs
'--mon_data_size_warn 10F'
293 $SUDO ceph daemon mon.a config
set mon_data_size_warn
$initial_value
296 function test_tiering_agent
()
298 local slow
=slow_eviction
299 local fast
=fast_eviction
300 ceph osd pool create
$slow 1 1
301 ceph osd pool application
enable $slow rados
302 ceph osd pool create
$fast 1 1
303 ceph osd tier add
$slow $fast
304 ceph osd tier cache-mode
$fast writeback
305 ceph osd tier set-overlay
$slow $fast
306 ceph osd pool
set $fast hit_set_type bloom
307 rados
-p $slow put obj1
/etc
/group
308 ceph osd pool
set $fast target_max_objects
1
309 ceph osd pool
set $fast hit_set_count
1
310 ceph osd pool
set $fast hit_set_period
5
311 # wait for the object to be evicted from the cache
314 for i
in `seq 1 300` ; do
315 if ! rados
-p $fast ls |
grep obj1
; then
322 # the object is proxy read and promoted to the cache
323 rados
-p $slow get obj1
- >/dev
/null
324 # wait for the promoted object to be evicted again
326 for i
in `seq 1 300` ; do
327 if ! rados
-p $fast ls |
grep obj1
; then
334 ceph osd tier remove-overlay
$slow
335 ceph osd tier remove
$slow $fast
336 ceph osd pool delete
$fast $fast --yes-i-really-really-mean-it
337 ceph osd pool delete
$slow $slow --yes-i-really-really-mean-it
340 function test_tiering_1
()
343 ceph osd pool create slow
2
344 ceph osd pool application
enable slow rados
345 ceph osd pool create slow2
2
346 ceph osd pool application
enable slow2 rados
347 ceph osd pool create cache
2
348 ceph osd pool create cache2
2
349 ceph osd tier add slow cache
350 ceph osd tier add slow cache2
351 expect_false ceph osd tier add slow2 cache
352 # test some state transitions
353 ceph osd tier cache-mode cache writeback
354 expect_false ceph osd tier cache-mode cache forward
355 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
356 expect_false ceph osd tier cache-mode cache
readonly
357 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
358 expect_false ceph osd tier cache-mode cache forward
359 ceph osd tier cache-mode cache forward
--yes-i-really-mean-it
360 ceph osd tier cache-mode cache none
361 ceph osd tier cache-mode cache writeback
362 ceph osd tier cache-mode cache proxy
363 ceph osd tier cache-mode cache writeback
364 expect_false ceph osd tier cache-mode cache none
365 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
366 # test with dirty objects in the tier pool
367 # tier pool currently set to 'writeback'
368 rados
-p cache put
/etc
/passwd
/etc
/passwd
370 # 1 dirty object in pool 'cache'
371 ceph osd tier cache-mode cache proxy
372 expect_false ceph osd tier cache-mode cache none
373 expect_false ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
374 ceph osd tier cache-mode cache writeback
375 # remove object from tier pool
376 rados
-p cache
rm /etc
/passwd
377 rados
-p cache cache-flush-evict-all
379 # no dirty objects in pool 'cache'
380 ceph osd tier cache-mode cache proxy
381 ceph osd tier cache-mode cache none
382 ceph osd tier cache-mode cache
readonly --yes-i-really-mean-it
384 while ! ceph osd pool
set cache pg_num
3 --yes-i-really-mean-it 2>$TMPFILE
386 grep 'currently creating pgs' $TMPFILE
387 TRIES
=$
(( $TRIES + 1 ))
391 expect_false ceph osd pool
set cache pg_num
4
392 ceph osd tier cache-mode cache none
393 ceph osd tier set-overlay slow cache
394 expect_false ceph osd tier set-overlay slow cache2
395 expect_false ceph osd tier remove slow cache
396 ceph osd tier remove-overlay slow
397 ceph osd tier set-overlay slow cache2
398 ceph osd tier remove-overlay slow
399 ceph osd tier remove slow cache
400 ceph osd tier add slow2 cache
401 expect_false ceph osd tier set-overlay slow cache
402 ceph osd tier set-overlay slow2 cache
403 ceph osd tier remove-overlay slow2
404 ceph osd tier remove slow2 cache
405 ceph osd tier remove slow cache2
407 # make sure a non-empty pool fails
408 rados
-p cache2 put
/etc
/passwd
/etc
/passwd
409 while ! ceph df |
grep cache2 |
grep ' 1 ' ; do
410 echo waiting
for pg stats to flush
413 expect_false ceph osd tier add slow cache2
414 ceph osd tier add slow cache2
--force-nonempty
415 ceph osd tier remove slow cache2
417 ceph osd pool
ls |
grep cache2
418 ceph osd pool
ls -f json-pretty |
grep cache2
419 ceph osd pool
ls detail |
grep cache2
420 ceph osd pool
ls detail
-f json-pretty |
grep cache2
422 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
423 ceph osd pool delete slow2 slow2
--yes-i-really-really-mean-it
424 ceph osd pool delete cache cache
--yes-i-really-really-mean-it
425 ceph osd pool delete cache2 cache2
--yes-i-really-really-mean-it
428 function test_tiering_2
()
430 # make sure we can't clobber snapshot state
431 ceph osd pool create snap_base
2
432 ceph osd pool application
enable snap_base rados
433 ceph osd pool create snap_cache
2
434 ceph osd pool mksnap snap_cache snapname
435 expect_false ceph osd tier add snap_base snap_cache
436 ceph osd pool delete snap_base snap_base
--yes-i-really-really-mean-it
437 ceph osd pool delete snap_cache snap_cache
--yes-i-really-really-mean-it
440 function test_tiering_3
()
442 # make sure we can't create snapshot on tier
443 ceph osd pool create basex
2
444 ceph osd pool application
enable basex rados
445 ceph osd pool create cachex
2
446 ceph osd tier add basex cachex
447 expect_false ceph osd pool mksnap cache snapname
448 ceph osd tier remove basex cachex
449 ceph osd pool delete basex basex
--yes-i-really-really-mean-it
450 ceph osd pool delete cachex cachex
--yes-i-really-really-mean-it
453 function test_tiering_4
()
455 # make sure we can't create an ec pool tier
456 ceph osd pool create eccache
2 2 erasure
457 expect_false ceph osd set-require-min-compat-client bobtail
458 ceph osd pool create repbase
2
459 ceph osd pool application
enable repbase rados
460 expect_false ceph osd tier add repbase eccache
461 ceph osd pool delete repbase repbase
--yes-i-really-really-mean-it
462 ceph osd pool delete eccache eccache
--yes-i-really-really-mean-it
465 function test_tiering_5
()
467 # convenient add-cache command
468 ceph osd pool create slow
2
469 ceph osd pool application
enable slow rados
470 ceph osd pool create cache3
2
471 ceph osd tier add-cache slow cache3
1024000
472 ceph osd dump |
grep cache3 |
grep bloom |
grep 'false_positive_probability: 0.05' |
grep 'target_bytes 1024000' |
grep '1200s x4'
473 ceph osd tier remove slow cache3
2> $TMPFILE || true
474 check_response
"EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
475 ceph osd tier remove-overlay slow
476 ceph osd tier remove slow cache3
477 ceph osd pool
ls |
grep cache3
478 ceph osd pool delete cache3 cache3
--yes-i-really-really-mean-it
479 ! ceph osd pool
ls |
grep cache3 ||
exit 1
480 ceph osd pool delete slow slow
--yes-i-really-really-mean-it
483 function test_tiering_6
()
485 # check add-cache whether work
486 ceph osd pool create datapool
2
487 ceph osd pool application
enable datapool rados
488 ceph osd pool create cachepool
2
489 ceph osd tier add-cache datapool cachepool
1024000
490 ceph osd tier cache-mode cachepool writeback
491 rados
-p datapool put object
/etc
/passwd
492 rados
-p cachepool stat object
493 rados
-p cachepool cache-flush object
494 rados
-p datapool stat object
495 ceph osd tier remove-overlay datapool
496 ceph osd tier remove datapool cachepool
497 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
498 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
501 function test_tiering_7
()
503 # protection against pool removal when used as tiers
504 ceph osd pool create datapool
2
505 ceph osd pool application
enable datapool rados
506 ceph osd pool create cachepool
2
507 ceph osd tier add-cache datapool cachepool
1024000
508 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it 2> $TMPFILE || true
509 check_response
"EBUSY: pool 'cachepool' is a tier of 'datapool'"
510 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it 2> $TMPFILE || true
511 check_response
"EBUSY: pool 'datapool' has tiers cachepool"
512 ceph osd tier remove-overlay datapool
513 ceph osd tier remove datapool cachepool
514 ceph osd pool delete cachepool cachepool
--yes-i-really-really-mean-it
515 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
518 function test_tiering_8
()
520 ## check health check
521 ceph osd
set notieragent
522 ceph osd pool create datapool
2
523 ceph osd pool application
enable datapool rados
524 ceph osd pool create cache4
2
525 ceph osd tier add-cache datapool cache4
1024000
526 ceph osd tier cache-mode cache4 writeback
527 tmpfile
=$
(mktemp|
grep tmp
)
528 dd if=/dev
/zero of
=$tmpfile bs
=4K count
=1
529 ceph osd pool
set cache4 target_max_objects
200
530 ceph osd pool
set cache4 target_max_bytes
1000000
531 rados
-p cache4 put foo1
$tmpfile
532 rados
-p cache4 put foo2
$tmpfile
535 ceph df |
grep datapool |
grep ' 2 '
536 ceph osd tier remove-overlay datapool
537 ceph osd tier remove datapool cache4
538 ceph osd pool delete cache4 cache4
--yes-i-really-really-mean-it
539 ceph osd pool delete datapool datapool
--yes-i-really-really-mean-it
540 ceph osd
unset notieragent
543 function test_tiering_9
()
545 # make sure 'tier remove' behaves as we expect
546 # i.e., removing a tier from a pool that's not its base pool only
547 # results in a 'pool foo is now (or already was) not a tier of bar'
549 ceph osd pool create basepoolA
2
550 ceph osd pool application
enable basepoolA rados
551 ceph osd pool create basepoolB
2
552 ceph osd pool application
enable basepoolB rados
553 poolA_id
=$
(ceph osd dump |
grep 'pool.*basepoolA' |
awk '{print $2;}')
554 poolB_id
=$
(ceph osd dump |
grep 'pool.*basepoolB' |
awk '{print $2;}')
556 ceph osd pool create cache5
2
557 ceph osd pool create cache6
2
558 ceph osd tier add basepoolA cache5
559 ceph osd tier add basepoolB cache6
560 ceph osd tier remove basepoolB cache5
2>&1 |
grep 'not a tier of'
561 ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of[ \t]\+$poolA_id"
562 ceph osd tier remove basepoolA cache6
2>&1 |
grep 'not a tier of'
563 ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of[ \t]\+$poolB_id"
565 ceph osd tier remove basepoolA cache5
2>&1 |
grep 'not a tier of'
566 ! ceph osd dump |
grep "pool.*'cache5'" 2>&1 |
grep "tier_of" ||
exit 1
567 ceph osd tier remove basepoolB cache6
2>&1 |
grep 'not a tier of'
568 ! ceph osd dump |
grep "pool.*'cache6'" 2>&1 |
grep "tier_of" ||
exit 1
570 ! ceph osd dump |
grep "pool.*'basepoolA'" 2>&1 |
grep "tiers" ||
exit 1
571 ! ceph osd dump |
grep "pool.*'basepoolB'" 2>&1 |
grep "tiers" ||
exit 1
573 ceph osd pool delete cache6 cache6
--yes-i-really-really-mean-it
574 ceph osd pool delete cache5 cache5
--yes-i-really-really-mean-it
575 ceph osd pool delete basepoolB basepoolB
--yes-i-really-really-mean-it
576 ceph osd pool delete basepoolA basepoolA
--yes-i-really-really-mean-it
581 ceph auth add client.xx mon allow osd
"allow *"
582 ceph auth
export client.xx
>client.xx.keyring
583 ceph auth add client.xx
-i client.xx.keyring
584 rm -f client.xx.keyring
585 ceph auth list |
grep client.xx
586 ceph auth
ls |
grep client.xx
587 ceph auth get client.xx |
grep caps |
grep mon
588 ceph auth get client.xx |
grep caps |
grep osd
589 ceph auth get-key client.xx
590 ceph auth print-key client.xx
591 ceph auth print_key client.xx
592 ceph auth caps client.xx osd
"allow rw"
593 expect_false sh
<<< "ceph auth get client.xx | grep caps | grep mon"
594 ceph auth get client.xx |
grep osd |
grep "allow rw"
595 ceph auth
export |
grep client.xx
596 ceph auth
export -o authfile
597 ceph auth import
-i authfile
598 ceph auth
export -o authfile2
599 diff authfile authfile2
600 rm authfile authfile2
601 ceph auth del client.xx
602 expect_false ceph auth get client.xx
604 # (almost) interactive mode
605 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
606 ceph auth get client.xx
608 echo 'auth del client.xx' | ceph
609 expect_false ceph auth get client.xx
615 ceph-authtool
--create-keyring --name client.TEST
--gen-key --set-uid $auid TEST-keyring
616 expect_false ceph auth import
--in-file TEST-keyring
618 ceph-authtool
--create-keyring --name client.TEST
--gen-key --cap mon
"allow r" --set-uid $auid TEST-keyring
619 ceph auth import
--in-file TEST-keyring
621 ceph auth get client.TEST
> $TMPFILE
622 check_response
"auid = $auid"
623 ceph
--format json-pretty auth get client.TEST
> $TMPFILE
624 check_response
'"auid": '$auid
625 ceph auth
ls > $TMPFILE
626 check_response
"auid: $auid"
627 ceph
--format json-pretty auth
ls > $TMPFILE
628 check_response
'"auid": '$auid
629 ceph auth del client.TEST
632 function test_auth_profiles
()
634 ceph auth add client.xx-profile-ro mon
'allow profile read-only' \
635 mgr
'allow profile read-only'
636 ceph auth add client.xx-profile-rw mon
'allow profile read-write' \
637 mgr
'allow profile read-write'
638 ceph auth add client.xx-profile-rd mon
'allow profile role-definer'
640 ceph auth
export > client.xx.keyring
642 # read-only is allowed all read-only commands (auth excluded)
643 ceph
-n client.xx-profile-ro
-k client.xx.keyring status
644 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd dump
645 ceph
-n client.xx-profile-ro
-k client.xx.keyring pg dump
646 ceph
-n client.xx-profile-ro
-k client.xx.keyring mon dump
647 ceph
-n client.xx-profile-ro
-k client.xx.keyring mds dump
648 # read-only gets access denied for rw commands or auth commands
649 ceph
-n client.xx-profile-ro
-k client.xx.keyring log foo
>& $TMPFILE || true
650 check_response
"EACCES: access denied"
651 ceph
-n client.xx-profile-ro
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
652 check_response
"EACCES: access denied"
653 ceph
-n client.xx-profile-ro
-k client.xx.keyring auth
ls >& $TMPFILE || true
654 check_response
"EACCES: access denied"
656 # read-write is allowed for all read-write commands (except auth)
657 ceph
-n client.xx-profile-rw
-k client.xx.keyring status
658 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd dump
659 ceph
-n client.xx-profile-rw
-k client.xx.keyring pg dump
660 ceph
-n client.xx-profile-rw
-k client.xx.keyring mon dump
661 ceph
-n client.xx-profile-rw
-k client.xx.keyring mds dump
662 ceph
-n client.xx-profile-rw
-k client.xx.keyring log foo
663 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
set noout
664 ceph
-n client.xx-profile-rw
-k client.xx.keyring osd
unset noout
665 # read-write gets access denied for auth commands
666 ceph
-n client.xx-profile-rw
-k client.xx.keyring auth
ls >& $TMPFILE || true
667 check_response
"EACCES: access denied"
669 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
670 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
ls
671 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth
export
672 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth add client.xx-profile-foo
673 ceph
-n client.xx-profile-rd
-k client.xx.keyring status
674 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd dump
>& $TMPFILE || true
675 check_response
"EACCES: access denied"
676 ceph
-n client.xx-profile-rd
-k client.xx.keyring pg dump
>& $TMPFILE || true
677 check_response
"EACCES: access denied"
678 # read-only 'mon' subsystem commands are allowed
679 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon dump
680 # but read-write 'mon' commands are not
681 ceph
-n client.xx-profile-rd
-k client.xx.keyring mon add foo
1.1.1.1 >& $TMPFILE || true
682 check_response
"EACCES: access denied"
683 ceph
-n client.xx-profile-rd
-k client.xx.keyring mds dump
>& $TMPFILE || true
684 check_response
"EACCES: access denied"
685 ceph
-n client.xx-profile-rd
-k client.xx.keyring log foo
>& $TMPFILE || true
686 check_response
"EACCES: access denied"
687 ceph
-n client.xx-profile-rd
-k client.xx.keyring osd
set noout
>& $TMPFILE || true
688 check_response
"EACCES: access denied"
690 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-ro
691 ceph
-n client.xx-profile-rd
-k client.xx.keyring auth del client.xx-profile-rw
693 # add a new role-definer with the existing role-definer
694 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
695 auth add client.xx-profile-rd2 mon
'allow profile role-definer'
696 ceph
-n client.xx-profile-rd
-k client.xx.keyring \
697 auth
export > client.xx.keyring
.2
698 # remove old role-definer using the new role-definer
699 ceph
-n client.xx-profile-rd2
-k client.xx.keyring
.2 \
700 auth del client.xx-profile-rd
701 # remove the remaining role-definer with admin
702 ceph auth del client.xx-profile-rd2
703 rm -f client.xx.keyring client.xx.keyring
.2
706 function test_mon_caps
()
708 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
709 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
710 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
711 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
713 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
714 check_response
"Permission denied"
716 rm -rf $TEMP_DIR/ceph.client.bug.keyring
717 ceph auth del client.bug
718 ceph-authtool
--create-keyring $TEMP_DIR/ceph.client.bug.keyring
719 chmod +r
$TEMP_DIR/ceph.client.bug.keyring
720 ceph-authtool
$TEMP_DIR/ceph.client.bug.keyring
-n client.bug
--gen-key
721 ceph-authtool
-n client.bug
--cap mon
'' $TEMP_DIR/ceph.client.bug.keyring
722 ceph auth add client.bug
-i $TEMP_DIR/ceph.client.bug.keyring
723 rados lspools
--keyring $TEMP_DIR/ceph.client.bug.keyring
-n client.bug
>& $TMPFILE || true
724 check_response
"Permission denied"
727 function test_mon_misc
()
729 # with and without verbosity
730 ceph osd dump |
grep '^epoch'
731 ceph
--concise osd dump |
grep '^epoch'
733 ceph osd df |
grep 'MIN/MAX VAR'
738 grep -v DIRTY
$TMPFILE
739 ceph df detail
> $TMPFILE
741 ceph df
--format json
> $TMPFILE
742 grep 'total_bytes' $TMPFILE
743 grep -v 'dirty' $TMPFILE
744 ceph df detail
--format json
> $TMPFILE
745 grep 'rd_bytes' $TMPFILE
746 grep 'dirty' $TMPFILE
747 ceph df
--format xml |
grep '<total_bytes>'
748 ceph df detail
--format xml |
grep '<rd_bytes>'
753 ceph health
--format json-pretty
754 ceph health detail
--format xml-pretty
756 ceph time-sync-status
759 for t
in mon osd mds
; do
764 mymsg
="this is a test log message $$.$(date)"
766 ceph log last |
grep "$mymsg"
767 ceph log last
100 |
grep "$mymsg"
768 ceph_watch_wait
"$mymsg"
772 ceph mgr module
enable restful
773 expect_false ceph mgr module
enable foodne
774 ceph mgr module
enable foodne
--force
775 ceph mgr module disable foodne
776 ceph mgr module disable foodnebizbangbash
780 ceph mon count-metadata ceph_version
785 ceph mgr count-metadata ceph_version
792 function check_mds_active
()
795 ceph fs get
$fs_name |
grep active
798 function wait_mds_active
()
802 for i
in $
(seq 1 $max_run) ; do
803 if ! check_mds_active
$fs_name ; then
804 echo "waiting for an active MDS daemon ($i/$max_run)"
810 check_mds_active
$fs_name
813 function get_mds_gids
()
816 ceph fs get
$fs_name --format=json | python
-c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
819 function fail_all_mds
()
822 ceph fs
set $fs_name cluster_down true
823 mds_gids
=$
(get_mds_gids
$fs_name)
824 for mds_gid
in $mds_gids ; do
825 ceph mds fail
$mds_gid
827 if check_mds_active
$fs_name ; then
828 echo "An active MDS remains, something went wrong"
835 function remove_all_fs
()
837 existing_fs
=$
(ceph fs
ls --format=json | python
-c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
838 for fs_name
in $existing_fs ; do
839 echo "Removing fs ${fs_name}..."
840 fail_all_mds
$fs_name
841 echo "Removing existing filesystem '${fs_name}'..."
842 ceph fs
rm $fs_name --yes-i-really-mean-it
843 echo "Removed '${fs_name}'."
847 # So that tests requiring MDS can skip if one is not configured
848 # in the cluster at all
849 function mds_exists
()
851 ceph auth
ls |
grep "^mds"
854 # some of the commands are just not idempotent.
855 function without_test_dup_command
()
857 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
860 local saved
=${CEPH_CLI_TEST_DUP_COMMAND}
861 unset CEPH_CLI_TEST_DUP_COMMAND
863 CEPH_CLI_TEST_DUP_COMMAND
=saved
867 function test_mds_tell
()
870 if ! mds_exists
; then
871 echo "Skipping test, no MDS found"
876 ceph osd pool create fs_data
10
877 ceph osd pool create fs_metadata
10
878 ceph fs new
$FS_NAME fs_metadata fs_data
879 wait_mds_active
$FS_NAME
881 # Test injectargs by GID
882 old_mds_gids
=$
(get_mds_gids
$FS_NAME)
883 echo Old GIDs
: $old_mds_gids
885 for mds_gid
in $old_mds_gids ; do
886 ceph tell mds.
$mds_gid injectargs
"--debug-mds 20"
888 expect_false ceph tell mds.a injectargs mds_max_file_recover
-1
890 # Test respawn by rank
891 without_test_dup_command ceph tell mds
.0 respawn
892 new_mds_gids
=$old_mds_gids
893 while [ $new_mds_gids -eq $old_mds_gids ] ; do
895 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
897 echo New GIDs
: $new_mds_gids
900 without_test_dup_command ceph tell mds.a respawn
901 new_mds_gids
=$old_mds_gids
902 while [ $new_mds_gids -eq $old_mds_gids ] ; do
904 new_mds_gids
=$
(get_mds_gids
$FS_NAME)
906 echo New GIDs
: $new_mds_gids
909 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
910 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
913 function test_mon_mds
()
918 ceph osd pool create fs_data
10
919 ceph osd pool create fs_metadata
10
920 ceph fs new
$FS_NAME fs_metadata fs_data
922 ceph fs
set $FS_NAME cluster_down true
923 ceph fs
set $FS_NAME cluster_down false
925 # Legacy commands, act on default fs
926 ceph mds cluster_down
929 ceph mds compat rm_incompat
4
930 ceph mds compat rm_incompat
4
932 # We don't want any MDSs to be up, their activity can interfere with
933 # the "current_epoch + 1" checking below if they're generating updates
934 fail_all_mds
$FS_NAME
937 expect_false ceph mds deactivate
2
941 for mds_gid
in $
(get_mds_gids
$FS_NAME) ; do
942 ceph mds metadata
$mds_id
946 ceph mds count-metadata os
948 # XXX mds fail, but how do you undo it?
949 mdsmapfile
=$TEMP_DIR/mdsmap.$$
950 current_epoch
=$
(ceph mds getmap
-o $mdsmapfile --no-log-to-stderr 2>&1 |
grep epoch |
sed 's/.*epoch //')
954 ceph osd pool create data2
10
955 ceph osd pool create data3
10
956 data2_pool
=$
(ceph osd dump |
grep "pool.*'data2'" |
awk '{print $2;}')
957 data3_pool
=$
(ceph osd dump |
grep "pool.*'data3'" |
awk '{print $2;}')
958 ceph mds add_data_pool
$data2_pool
959 ceph mds add_data_pool
$data3_pool
960 ceph mds add_data_pool
100 >& $TMPFILE || true
961 check_response
"Error ENOENT"
962 ceph mds add_data_pool foobarbaz
>& $TMPFILE || true
963 check_response
"Error ENOENT"
964 ceph mds remove_data_pool
$data2_pool
965 ceph mds remove_data_pool
$data3_pool
966 ceph osd pool delete data2 data2
--yes-i-really-really-mean-it
967 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
968 ceph mds
set allow_multimds false
969 expect_false ceph mds set_max_mds
4
970 ceph mds
set allow_multimds true
971 ceph mds set_max_mds
4
972 ceph mds set_max_mds
3
973 ceph mds set_max_mds
256
974 expect_false ceph mds set_max_mds
257
975 ceph mds
set max_mds
4
976 ceph mds
set max_mds
256
977 expect_false ceph mds
set max_mds
257
978 expect_false ceph mds
set max_mds asdf
979 expect_false ceph mds
set inline_data true
980 ceph mds
set inline_data true
--yes-i-really-mean-it
981 ceph mds
set inline_data
yes --yes-i-really-mean-it
982 ceph mds
set inline_data
1 --yes-i-really-mean-it
983 expect_false ceph mds
set inline_data
--yes-i-really-mean-it
984 ceph mds
set inline_data false
985 ceph mds
set inline_data no
986 ceph mds
set inline_data
0
987 expect_false ceph mds
set inline_data asdf
988 ceph mds
set max_file_size
1048576
989 expect_false ceph mds
set max_file_size
123asdf
991 expect_false ceph mds
set allow_new_snaps
992 expect_false ceph mds
set allow_new_snaps true
993 ceph mds
set allow_new_snaps true
--yes-i-really-mean-it
994 ceph mds
set allow_new_snaps
0
995 ceph mds
set allow_new_snaps false
996 ceph mds
set allow_new_snaps no
997 expect_false ceph mds
set allow_new_snaps taco
999 # we should never be able to add EC pools as data or metadata pools
1000 # create an ec-pool...
1001 ceph osd pool create mds-ec-pool
10 10 erasure
1003 ceph mds add_data_pool mds-ec-pool
2>$TMPFILE
1004 check_response
'erasure-code' $?
22
1006 ec_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-ec-pool" |
awk '{print $2;}')
1007 data_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_data" |
awk '{print $2;}')
1008 metadata_poolnum
=$
(ceph osd dump |
grep "pool.* 'fs_metadata" |
awk '{print $2;}')
1010 fail_all_mds
$FS_NAME
1013 # Check that rmfailed requires confirmation
1014 expect_false ceph mds rmfailed
0
1015 ceph mds rmfailed
0 --yes-i-really-mean-it
1018 # Check that `newfs` is no longer permitted
1019 expect_false ceph mds newfs
$metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
1021 # Check that 'fs reset' runs
1022 ceph fs
reset $FS_NAME --yes-i-really-mean-it
1024 # Check that creating a second FS fails by default
1025 ceph osd pool create fs_metadata2
10
1026 ceph osd pool create fs_data2
10
1028 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1031 # Check that setting enable_multiple enables creation of second fs
1032 ceph fs flag
set enable_multiple true
--yes-i-really-mean-it
1033 ceph fs new cephfs2 fs_metadata2 fs_data2
1035 # Clean up multi-fs stuff
1036 fail_all_mds cephfs2
1037 ceph fs
rm cephfs2
--yes-i-really-mean-it
1038 ceph osd pool delete fs_metadata2 fs_metadata2
--yes-i-really-really-mean-it
1039 ceph osd pool delete fs_data2 fs_data2
--yes-i-really-really-mean-it
1041 fail_all_mds
$FS_NAME
1043 # Clean up to enable subsequent fs new tests
1044 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1047 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1048 check_response
'erasure-code' $?
22
1049 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1050 check_response
'erasure-code' $?
22
1051 ceph fs new
$FS_NAME mds-ec-pool mds-ec-pool
2>$TMPFILE
1052 check_response
'erasure-code' $?
22
1055 # ... new create a cache tier in front of the EC pool...
1056 ceph osd pool create mds-tier
2
1057 ceph osd tier add mds-ec-pool mds-tier
1058 ceph osd tier set-overlay mds-ec-pool mds-tier
1059 tier_poolnum
=$
(ceph osd dump |
grep "pool.* 'mds-tier" |
awk '{print $2;}')
1061 # Use of a readonly tier should be forbidden
1062 ceph osd tier cache-mode mds-tier
readonly --yes-i-really-mean-it
1064 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1065 check_response
'has a write tier (mds-tier) that is configured to forward' $?
22
1068 # Use of a writeback tier should enable FS creation
1069 ceph osd tier cache-mode mds-tier writeback
1070 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force
1072 # While a FS exists using the tiered pools, I should not be allowed
1073 # to remove the tier
1075 ceph osd tier remove-overlay mds-ec-pool
2>$TMPFILE
1076 check_response
'in use by CephFS' $?
16
1077 ceph osd tier remove mds-ec-pool mds-tier
2>$TMPFILE
1078 check_response
'in use by CephFS' $?
16
1081 fail_all_mds
$FS_NAME
1082 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1084 # ... but we should be forbidden from using the cache pool in the FS directly.
1086 ceph fs new
$FS_NAME fs_metadata mds-tier
--force 2>$TMPFILE
1087 check_response
'in use as a cache tier' $?
22
1088 ceph fs new
$FS_NAME mds-tier fs_data
2>$TMPFILE
1089 check_response
'in use as a cache tier' $?
22
1090 ceph fs new
$FS_NAME mds-tier mds-tier
2>$TMPFILE
1091 check_response
'in use as a cache tier' $?
22
1094 # Clean up tier + EC pools
1095 ceph osd tier remove-overlay mds-ec-pool
1096 ceph osd tier remove mds-ec-pool mds-tier
1098 # Create a FS using the 'cache' pool now that it's no longer a tier
1099 ceph fs new
$FS_NAME fs_metadata mds-tier
--force
1101 # We should be forbidden from using this pool as a tier now that
1102 # it's in use for CephFS
1104 ceph osd tier add mds-ec-pool mds-tier
2>$TMPFILE
1105 check_response
'in use by CephFS' $?
16
1108 fail_all_mds
$FS_NAME
1109 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1111 # We should be permitted to use an EC pool with overwrites enabled
1112 # as the data pool...
1113 ceph osd pool
set mds-ec-pool allow_ec_overwrites true
1114 ceph fs new
$FS_NAME fs_metadata mds-ec-pool
--force 2>$TMPFILE
1115 fail_all_mds
$FS_NAME
1116 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1118 # ...but not as the metadata pool
1120 ceph fs new
$FS_NAME mds-ec-pool fs_data
2>$TMPFILE
1121 check_response
'erasure-code' $?
22
1124 ceph osd pool delete mds-ec-pool mds-ec-pool
--yes-i-really-really-mean-it
1126 # Create a FS and check that we can subsequently add a cache tier to it
1127 ceph fs new
$FS_NAME fs_metadata fs_data
--force
1129 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1130 ceph osd tier add fs_metadata mds-tier
1131 ceph osd tier cache-mode mds-tier writeback
1132 ceph osd tier set-overlay fs_metadata mds-tier
1134 # Removing tier should be permitted because the underlying pool is
1135 # replicated (#11504 case)
1136 ceph osd tier cache-mode mds-tier proxy
1137 ceph osd tier remove-overlay fs_metadata
1138 ceph osd tier remove fs_metadata mds-tier
1139 ceph osd pool delete mds-tier mds-tier
--yes-i-really-really-mean-it
1142 fail_all_mds
$FS_NAME
1143 ceph fs
rm $FS_NAME --yes-i-really-mean-it
1148 # ceph mds tell mds.a getmap
1151 # ceph mds set_state
1154 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
1155 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
1158 function test_mon_mds_metadata
()
1160 local nmons
=$
(ceph tell
'mon.*' version |
grep -c 'version')
1164 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1165 while read gid id rank
; do
1166 ceph mds metadata
${gid} |
grep '"hostname":'
1167 ceph mds metadata
${id} |
grep '"hostname":'
1168 ceph mds metadata
${rank} |
grep '"hostname":'
1170 local n
=$
(ceph tell
'mon.*' mds metadata
${id} |
grep -c '"hostname":')
1171 test "$n" -eq "$nmons"
1174 expect_false ceph mds metadata UNKNOWN
1177 function test_mon_mon
()
1179 # print help message
1183 ceph mon getmap
-o $TEMP_DIR/monmap.$$
1184 [ -s $TEMP_DIR/monmap.$$
]
1190 ceph mon feature
set kraken
--yes-i-really-mean-it
1191 expect_false ceph mon feature
set abcd
1192 expect_false ceph mon feature
set abcd
--yes-i-really-mean-it
1195 function gen_secrets_file
()
1197 # lets assume we can have the following types
1198 # all - generates both cephx and lockbox, with mock dm-crypt key
1199 # cephx - only cephx
1200 # no_cephx - lockbox and dm-crypt, no cephx
1201 # no_lockbox - dm-crypt and cephx, no lockbox
1202 # empty - empty file
1203 # empty_json - correct json, empty map
1204 # bad_json - bad json :)
1207 if [[ -z "$t" ]]; then
1211 fn
=$
(mktemp
$TEMP_DIR/secret.XXXXXX
)
1213 if [[ "$t" == "empty" ]]; then
1218 if [[ "$t" == "bad_json" ]]; then
1219 echo "asd: ; }" >> $fn
1221 elif [[ "$t" == "empty_json" ]]; then
1226 cephx_secret
="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1227 lb_secret
="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1228 dmcrypt_key
="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1230 if [[ "$t" == "all" ]]; then
1231 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1232 elif [[ "$t" == "cephx" ]]; then
1233 echo "$cephx_secret" >> $fn
1234 elif [[ "$t" == "no_cephx" ]]; then
1235 echo "$lb_secret,$dmcrypt_key" >> $fn
1236 elif [[ "$t" == "no_lockbox" ]]; then
1237 echo "$cephx_secret,$dmcrypt_key" >> $fn
1239 echo "unknown gen_secrets_file() type \'$fn\'"
1246 function test_mon_osd_create_destroy
()
1248 ceph osd new
2>&1 |
grep 'EINVAL'
1249 ceph osd new
'' -1 2>&1 |
grep 'EINVAL'
1250 ceph osd new
'' 10 2>&1 |
grep 'EINVAL'
1252 old_maxosd
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1254 old_osds
=$
(ceph osd
ls)
1255 num_osds
=$
(ceph osd
ls |
wc -l)
1258 id
=$
(ceph osd new
$uuid 2>/dev
/null
)
1260 for i
in $old_osds; do
1266 id2
=`ceph osd new $uuid 2>/dev/null`
1270 ceph osd new
$uuid $id
1272 id3
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1273 ceph osd new
$uuid $
((id3
+1)) 2>&1 |
grep EEXIST
1276 id2
=$
(ceph osd new
$uuid2)
1278 [[ "$id2" != "$id" ]]
1280 ceph osd new
$uuid $id2 2>&1 |
grep EEXIST
1281 ceph osd new
$uuid2 $id2
1284 empty_secrets
=$
(gen_secrets_file
"empty")
1285 empty_json
=$
(gen_secrets_file
"empty_json")
1286 all_secrets
=$
(gen_secrets_file
"all")
1287 cephx_only
=$
(gen_secrets_file
"cephx")
1288 no_cephx
=$
(gen_secrets_file
"no_cephx")
1289 no_lockbox
=$
(gen_secrets_file
"no_lockbox")
1290 bad_json
=$
(gen_secrets_file
"bad_json")
1292 # empty secrets should be idempotent
1293 new_id
=$
(ceph osd new
$uuid $id -i $empty_secrets)
1294 [[ "$new_id" == "$id" ]]
1296 # empty json, thus empty secrets
1297 new_id
=$
(ceph osd new
$uuid $id -i $empty_json)
1298 [[ "$new_id" == "$id" ]]
1300 ceph osd new
$uuid $id -i $all_secrets 2>&1 |
grep 'EEXIST'
1304 ceph osd setmaxosd
$old_maxosd
1306 ceph osd new
$uuid -i $no_cephx 2>&1 |
grep 'EINVAL'
1307 ceph osd new
$uuid -i $no_lockbox 2>&1 |
grep 'EINVAL'
1310 id
=$
(ceph osd new
$uuid -i $all_secrets)
1317 # validate secrets and dm-crypt are set
1318 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1319 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1321 k
=$
(ceph auth get-key client.osd-lockbox.
$uuid --format=json-pretty
2>/dev
/null | \
1323 s
=$
(cat $all_secrets | jq
'.cephx_lockbox_secret')
1325 ceph config-key exists dm-crypt
/osd
/$uuid/luks
1328 id2
=$
(ceph osd new
$uuid2 -i $cephx_only)
1330 [[ "$i" != "$id2" ]]
1334 k
=$
(ceph auth get-key osd.
$id --format=json-pretty
2>/dev
/null | jq
'.key')
1335 s
=$
(cat $all_secrets | jq
'.cephx_secret')
1337 expect_false ceph auth get-key client.osd-lockbox.
$uuid2
1338 expect_false ceph config-key exists dm-crypt
/osd
/$uuid2/luks
1340 ceph osd destroy osd.
$id2 --yes-i-really-mean-it
1341 ceph osd destroy
$id2 --yes-i-really-mean-it
1343 expect_false ceph auth get-key osd.
$id2
1344 ceph osd dump |
grep osd.
$id2 |
grep destroyed
1348 ceph osd new
$uuid3 $id3 -i $all_secrets
1349 ceph osd dump |
grep osd.
$id3 | expect_false
grep destroyed
1350 ceph auth get-key client.osd-lockbox.
$uuid3
1351 ceph auth get-key osd.
$id3
1352 ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1354 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1355 expect_false ceph osd
find $id2
1356 expect_false ceph auth get-key osd.
$id2
1357 expect_false ceph auth get-key client.osd-lockbox.
$uuid3
1358 expect_false ceph config-key exists dm-crypt
/osd
/$uuid3/luks
1359 ceph osd purge osd.
$id3 --yes-i-really-mean-it
1360 ceph osd purge osd.
$id3 --yes-i-really-mean-it # idempotent
1362 ceph osd purge osd.
$id --yes-i-really-mean-it
1363 ceph osd purge
123456 --yes-i-really-mean-it
1364 expect_false ceph osd
find $id
1365 expect_false ceph auth get-key osd.
$id
1366 expect_false ceph auth get-key client.osd-lockbox.
$uuid
1367 expect_false ceph config-key exists dm-crypt
/osd
/$uuid/luks
1369 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1370 $no_cephx $no_lockbox $bad_json
1372 for i
in $
(ceph osd
ls); do
1374 [[ "$i" != "$id2" ]]
1375 [[ "$i" != "$id3" ]]
1378 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1379 ceph osd setmaxosd
$old_maxosd
1383 function test_mon_config_key
()
1385 key
=asdfasdfqwerqwreasdfuniquesa123df
1386 ceph config-key list |
grep -c $key |
grep 0
1387 ceph config-key get
$key |
grep -c bar |
grep 0
1388 ceph config-key
set $key bar
1389 ceph config-key get
$key |
grep bar
1390 ceph config-key list |
grep -c $key |
grep 1
1391 ceph config-key dump |
grep $key |
grep bar
1392 ceph config-key
rm $key
1393 expect_false ceph config-key get
$key
1394 ceph config-key list |
grep -c $key |
grep 0
1395 ceph config-key dump |
grep -c $key |
grep 0
1398 function test_mon_osd
()
1403 bl
=192.168.0.1:0/1000
1404 ceph osd blacklist add
$bl
1405 ceph osd blacklist
ls |
grep $bl
1406 ceph osd blacklist
ls --format=json-pretty |
sed 's/\\\//\//' |
grep $bl
1407 ceph osd dump
--format=json-pretty |
grep $bl
1408 ceph osd dump |
grep "^blacklist $bl"
1409 ceph osd blacklist
rm $bl
1410 ceph osd blacklist
ls | expect_false
grep $bl
1413 # test without nonce, invalid nonce
1414 ceph osd blacklist add
$bl
1415 ceph osd blacklist
ls |
grep $bl
1416 ceph osd blacklist
rm $bl
1417 ceph osd blacklist
ls | expect_false
grep $expect_false bl
1418 expect_false
"ceph osd blacklist $bl/-1"
1419 expect_false
"ceph osd blacklist $bl/foo"
1421 # test with wrong address
1422 expect_false
"ceph osd blacklist 1234.56.78.90/100"
1425 ceph osd blacklist add
$bl
1426 ceph osd blacklist
ls |
grep $bl
1427 ceph osd blacklist
clear
1428 ceph osd blacklist
ls | expect_false
grep $bl
1433 ceph osd crush reweight-all
1434 ceph osd crush tunables legacy
1435 ceph osd crush show-tunables |
grep argonaut
1436 ceph osd crush tunables bobtail
1437 ceph osd crush show-tunables |
grep bobtail
1438 ceph osd crush tunables firefly
1439 ceph osd crush show-tunables |
grep firefly
1441 ceph osd crush set-tunable straw_calc_version
0
1442 ceph osd crush get-tunable straw_calc_version |
grep 0
1443 ceph osd crush set-tunable straw_calc_version
1
1444 ceph osd crush get-tunable straw_calc_version |
grep 1
1447 # require-min-compat-client
1448 expect_false ceph osd set-require-min-compat-client dumpling
# firefly tunables
1449 ceph osd set-require-min-compat-client luminous
1450 ceph osd dump |
grep 'require_min_compat_client luminous'
1455 # how do I tell when these are done?
1457 ceph osd deep-scrub
0
1460 for f
in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1465 expect_false ceph osd
unset sortbitwise
# cannot be unset
1466 expect_false ceph osd
set bogus
1467 expect_false ceph osd
unset bogus
1468 ceph osd require-osd-release luminous
1469 # can't lower (or use new command for anything but jewel)
1470 expect_false ceph osd require-osd-release jewel
1471 # these are no-ops but should succeed.
1472 ceph osd
set require_jewel_osds
1473 ceph osd
set require_kraken_osds
1474 expect_false ceph osd
unset require_jewel_osds
1478 ceph osd dump |
grep 'osd.0 down'
1481 for ((i
=0; i
< $max_run; i
++)); do
1482 if ! ceph osd dump |
grep 'osd.0 up'; then
1483 echo "waiting for osd.0 to come back up ($i/$max_run)"
1489 ceph osd dump |
grep 'osd.0 up'
1491 ceph osd dump |
grep 'osd.0 up'
1492 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1495 expect_false ceph osd
find osd.xyz
1496 expect_false ceph osd
find xyz
1497 expect_false ceph osd
find 0.1
1498 ceph
--format plain osd
find 1 # falls back to json-pretty
1499 if [ `uname` == Linux
]; then
1500 ceph osd metadata
1 |
grep 'distro'
1501 ceph
--format plain osd metadata
1 |
grep 'distro' # falls back to json-pretty
1504 ceph osd dump |
grep 'osd.0.*out'
1506 ceph osd dump |
grep 'osd.0.*in'
1509 ceph osd add-nodown
0 1
1510 ceph health detail |
grep 'NODOWN'
1511 ceph osd rm-nodown
0 1
1512 ! ceph health detail |
grep 'NODOWN'
1514 ceph osd out
0 # so we can mark it as noin later
1516 ceph health detail |
grep 'NOIN'
1518 ! ceph health detail |
grep 'NOIN'
1521 ceph osd add-noout
0
1522 ceph health detail |
grep 'NOOUT'
1524 ! ceph health detail |
grep 'NOOUT'
1527 expect_false ceph osd add-noup
797er
1528 expect_false ceph osd add-nodown u9uwer
1529 expect_false ceph osd add-noin
78~
15
1530 expect_false ceph osd add-noout
0 all
1
1532 expect_false ceph osd rm-noup
1234567
1533 expect_false ceph osd rm-nodown fsadf7
1534 expect_false ceph osd rm-noin
0 1 any
1535 expect_false ceph osd rm-noout
790-fd
1537 ids
=`ceph osd ls-tree default`
1540 ceph osd add-nodown
$osd
1541 ceph osd add-noout
$osd
1543 ceph
-s |
grep 'NODOWN'
1544 ceph
-s |
grep 'NOOUT'
1545 ceph osd rm-nodown any
1546 ceph osd rm-noout all
1547 ! ceph
-s |
grep 'NODOWN'
1548 ! ceph
-s |
grep 'NOOUT'
1550 # make sure mark out preserves weight
1551 ceph osd reweight osd
.0 .5
1552 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1555 ceph osd dump |
grep ^osd
.0 |
grep 'weight 0.5'
1557 ceph osd getmap
-o $f
1560 save
=$
(ceph osd getmaxosd |
sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1562 ceph osd setmaxosd $
((save
- 1)) 2>&1 |
grep 'EBUSY'
1563 ceph osd setmaxosd
10
1564 ceph osd getmaxosd |
grep 'max_osd = 10'
1565 ceph osd setmaxosd
$save
1566 ceph osd getmaxosd |
grep "max_osd = $save"
1568 for id
in `ceph osd ls` ; do
1569 retry_eagain
5 map_enxio_to_eagain ceph tell osd.
$id version
1572 ceph osd
rm 0 2>&1 |
grep 'EBUSY'
1574 local old_osds
=$
(echo $
(ceph osd
ls))
1575 id
=`ceph osd create`
1577 ceph osd lost
$id --yes-i-really-mean-it
1578 expect_false ceph osd setmaxosd
$id
1579 local new_osds
=$
(echo $
(ceph osd
ls))
1580 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1585 id
=`ceph osd create $uuid`
1586 id2
=`ceph osd create $uuid`
1593 ceph osd setmaxosd
$id
1594 ceph osd getmaxosd |
grep "max_osd = $save"
1597 ceph osd create
$uuid 0 2>&1 |
grep 'EINVAL'
1598 ceph osd create
$uuid $
((max_osd
- 1)) 2>&1 |
grep 'EINVAL'
1600 id
=`ceph osd create $uuid $max_osd`
1601 [ "$id" = "$max_osd" ]
1603 max_osd
=$
((max_osd
+ 1))
1604 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1606 ceph osd create
$uuid $
((id
- 1)) 2>&1 |
grep 'EEXIST'
1607 ceph osd create
$uuid $
((id
+ 1)) 2>&1 |
grep 'EEXIST'
1608 id2
=`ceph osd create $uuid`
1610 id2
=`ceph osd create $uuid $id`
1614 local gap_start
=$max_osd
1615 id
=`ceph osd create $uuid $((gap_start + 100))`
1616 [ "$id" = "$((gap_start + 100))" ]
1618 ceph osd getmaxosd |
grep "max_osd = $max_osd"
1620 ceph osd create
$uuid $gap_start 2>&1 |
grep 'EEXIST'
1623 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1624 # is repeated and consumes two osd id, not just one.
1626 local next_osd
=$gap_start
1627 id
=`ceph osd create $(uuidgen)`
1628 [ "$id" = "$next_osd" ]
1630 next_osd
=$
((id
+ 1))
1631 id
=`ceph osd create $(uuidgen) $next_osd`
1632 [ "$id" = "$next_osd" ]
1634 local new_osds
=$
(echo $
(ceph osd
ls))
1635 for id
in $
(echo $new_osds |
sed -e "s/$old_osds//") ; do
1639 ceph osd setmaxosd
$save
1642 ceph osd pool create data
10
1643 ceph osd pool application
enable data rados
1644 ceph osd lspools |
grep data
1645 ceph osd map data foo |
grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1646 ceph osd map data foo namespace|
grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1647 ceph osd pool delete data data
--yes-i-really-really-mean-it
1650 ceph osd dump |
grep 'flags.*pauserd,pausewr'
1658 ceph osd tree destroyed
1660 ceph osd tree up out
1661 ceph osd tree down
in
1662 ceph osd tree down out
1663 ceph osd tree out down
1664 expect_false ceph osd tree up down
1665 expect_false ceph osd tree up destroyed
1666 expect_false ceph osd tree down destroyed
1667 expect_false ceph osd tree up down destroyed
1668 expect_false ceph osd tree
in out
1669 expect_false ceph osd tree up foo
1672 ceph osd count-metadata os
1678 ceph osd stat |
grep up
,
1681 function test_mon_crush
()
1684 epoch
=$
(ceph osd getcrushmap
-o $f 2>&1 |
tail -n1)
1687 nextepoch
=$
(( $epoch + 1 ))
1688 echo epoch
$epoch nextepoch
$nextepoch
1690 expect_false ceph osd setcrushmap
$nextepoch -i $f
1691 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1692 echo gotepoch
$gotepoch
1693 [ "$gotepoch" -eq "$nextepoch" ]
1694 # should be idempotent
1695 gotepoch
=$
(ceph osd setcrushmap
$epoch -i $f 2>&1 |
tail -n1)
1696 echo epoch
$gotepoch
1697 [ "$gotepoch" -eq "$nextepoch" ]
1701 function test_mon_osd_pool
()
1706 ceph osd pool create data
10
1707 ceph osd pool application
enable data rados
1708 ceph osd pool mksnap data datasnap
1709 rados
-p data lssnap |
grep datasnap
1710 ceph osd pool rmsnap data datasnap
1711 expect_false ceph osd pool rmsnap pool_fake snapshot
1712 ceph osd pool delete data data
--yes-i-really-really-mean-it
1714 ceph osd pool create data2
10
1715 ceph osd pool application
enable data2 rados
1716 ceph osd pool rename data2 data3
1717 ceph osd lspools |
grep data3
1718 ceph osd pool delete data3 data3
--yes-i-really-really-mean-it
1720 ceph osd pool create replicated
12 12 replicated
1721 ceph osd pool create replicated
12 12 replicated
1722 ceph osd pool create replicated
12 12 # default is replicated
1723 ceph osd pool create replicated
12 # default is replicated, pgp_num = pg_num
1724 ceph osd pool application
enable replicated rados
1725 # should fail because the type is not the same
1726 expect_false ceph osd pool create replicated
12 12 erasure
1727 ceph osd lspools |
grep replicated
1728 ceph osd pool create ec_test
1 1 erasure
1729 ceph osd pool application
enable ec_test rados
1731 ceph osd count-metadata osd_objectstore |
grep 'bluestore'
1732 if [ $?
-eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1733 ceph osd pool
set ec_test allow_ec_overwrites true
>& $TMPFILE
1734 check_response
"pool must only be stored on bluestore for scrubbing to work" $?
22
1736 ceph osd pool
set ec_test allow_ec_overwrites true ||
return 1
1737 expect_false ceph osd pool
set ec_test allow_ec_overwrites false
1740 ceph osd pool delete replicated replicated
--yes-i-really-really-mean-it
1741 ceph osd pool delete ec_test ec_test
--yes-i-really-really-mean-it
1744 function test_mon_osd_pool_quota
()
1747 # test osd pool set/get quota
1751 ceph osd pool create tmp-quota-pool
36
1752 ceph osd pool application
enable tmp-quota-pool rados
1754 # set erroneous quotas
1756 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness
10
1757 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes
-1
1758 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1762 ceph osd pool set-quota tmp-quota-pool max_bytes
10
1763 ceph osd pool set-quota tmp-quota-pool max_objects
10M
1765 # get quotas in json-pretty format
1767 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1768 grep '"quota_max_objects":.*10000000'
1769 ceph osd pool get-quota tmp-quota-pool
--format=json-pretty | \
1770 grep '"quota_max_bytes":.*10'
1774 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10B'
1775 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*10M objects'
1777 # set valid quotas with unit prefix
1779 ceph osd pool set-quota tmp-quota-pool max_bytes
10K
1783 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10Ki'
1785 # set valid quotas with unit prefix
1787 ceph osd pool set-quota tmp-quota-pool max_bytes
10Ki
1791 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*10Ki'
1796 ceph osd pool set-quota tmp-quota-pool max_bytes
0
1797 ceph osd pool set-quota tmp-quota-pool max_objects
0
1801 ceph osd pool get-quota tmp-quota-pool |
grep 'max bytes.*N/A'
1802 ceph osd pool get-quota tmp-quota-pool |
grep 'max objects.*N/A'
1805 ceph osd pool delete tmp-quota-pool tmp-quota-pool
--yes-i-really-really-mean-it
1808 function test_mon_pg
()
1810 # Make sure we start healthy.
1813 ceph pg debug unfound_objects_exist
1814 ceph pg debug degraded_pgs_exist
1815 ceph pg deep-scrub
1.0
1817 ceph pg dump pgs_brief
--format=json
1818 ceph pg dump pgs
--format=json
1819 ceph pg dump pools
--format=json
1820 ceph pg dump osds
--format=json
1821 ceph pg dump
sum --format=json
1822 ceph pg dump all
--format=json
1823 ceph pg dump pgs_brief osds
--format=json
1824 ceph pg dump pools osds pgs_brief
--format=json
1826 ceph pg dump_pools_json
1827 ceph pg dump_stuck inactive
1828 ceph pg dump_stuck unclean
1829 ceph pg dump_stuck stale
1830 ceph pg dump_stuck undersized
1831 ceph pg dump_stuck degraded
1835 expect_false ceph pg
ls scrubq
1836 ceph pg
ls active stale repair recovering
1838 ceph pg
ls 1 active stale
1839 ceph pg ls-by-primary osd
.0
1840 ceph pg ls-by-primary osd
.0 1
1841 ceph pg ls-by-primary osd
.0 active
1842 ceph pg ls-by-primary osd
.0 active stale
1843 ceph pg ls-by-primary osd
.0 1 active stale
1844 ceph pg ls-by-osd osd
.0
1845 ceph pg ls-by-osd osd
.0 1
1846 ceph pg ls-by-osd osd
.0 active
1847 ceph pg ls-by-osd osd
.0 active stale
1848 ceph pg ls-by-osd osd
.0 1 active stale
1849 ceph pg ls-by-pool rbd
1850 ceph pg ls-by-pool rbd active stale
1851 # can't test this...
1852 # ceph pg force_create_pg
1853 ceph pg getmap
-o $TEMP_DIR/map.$$
1854 [ -s $TEMP_DIR/map.$$
]
1855 ceph pg map
1.0 |
grep acting
1859 ceph osd set-full-ratio
.962
1860 ceph osd dump |
grep '^full_ratio 0.962'
1861 ceph osd set-backfillfull-ratio
.912
1862 ceph osd dump |
grep '^backfillfull_ratio 0.912'
1863 ceph osd set-nearfull-ratio
.892
1864 ceph osd dump |
grep '^nearfull_ratio 0.892'
1866 # Check health status
1867 ceph osd set-nearfull-ratio
.913
1868 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
1869 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
1870 ceph osd set-nearfull-ratio
.892
1871 ceph osd set-backfillfull-ratio
.963
1872 ceph health
-f json |
grep OSD_OUT_OF_ORDER_FULL
1873 ceph health detail |
grep OSD_OUT_OF_ORDER_FULL
1874 ceph osd set-backfillfull-ratio
.912
1876 # Check injected full results
1877 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull nearfull
1878 wait_for_health
"OSD_NEARFULL"
1879 ceph health detail |
grep "osd.0 is near full"
1880 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1883 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull backfillfull
1884 wait_for_health
"OSD_BACKFILLFULL"
1885 ceph health detail |
grep "osd.1 is backfill full"
1886 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.1) injectfull none
1889 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull failsafe
1890 # failsafe and full are the same as far as the monitor is concerned
1891 wait_for_health
"OSD_FULL"
1892 ceph health detail |
grep "osd.2 is full"
1893 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.2) injectfull none
1896 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull full
1897 wait_for_health
"OSD_FULL"
1898 ceph health detail |
grep "osd.0 is full"
1899 $SUDO ceph
--admin-daemon $
(get_admin_socket osd
.0) injectfull none
1902 ceph pg stat |
grep 'pgs:'
1907 ceph report |
grep osd_stats
1914 ceph tell osd
.0 version
1915 expect_false ceph tell osd
.9999 version
1916 expect_false ceph tell osd.foo version
1920 ceph tell osd
.0 dump_pg_recovery_stats |
grep Started
1922 ceph osd reweight
0 0.9
1923 expect_false ceph osd reweight
0 -1
1924 ceph osd reweight osd
.0 1
1926 ceph osd primary-affinity osd
.0 .9
1927 expect_false ceph osd primary-affinity osd
.0 -2
1928 expect_false ceph osd primary-affinity osd
.9999 .5
1929 ceph osd primary-affinity osd
.0 1
1931 ceph osd pool
set rbd size
2
1932 ceph osd pg-temp
1.0 0 1
1933 ceph osd pg-temp
1.0 osd
.1 osd
.0
1934 expect_false ceph osd pg-temp
1.0 0 1 2
1935 expect_false ceph osd pg-temp asdf qwer
1936 expect_false ceph osd pg-temp
1.0 asdf
1937 expect_false ceph osd pg-temp
1.0
1939 # don't test ceph osd primary-temp for now
1942 function test_mon_osd_pool_set
()
1944 TEST_POOL_GETSET
=pool_getset
1945 ceph osd pool create
$TEST_POOL_GETSET 1
1946 ceph osd pool application
enable $TEST_POOL_GETSET rados
1948 ceph osd pool get
$TEST_POOL_GETSET all
1950 for s
in pg_num pgp_num size min_size crush_rule
; do
1951 ceph osd pool get
$TEST_POOL_GETSET $s
1954 old_size
=$
(ceph osd pool get
$TEST_POOL_GETSET size |
sed -e 's/size: //')
1955 (( new_size
= old_size
+ 1 ))
1956 ceph osd pool
set $TEST_POOL_GETSET size
$new_size
1957 ceph osd pool get
$TEST_POOL_GETSET size |
grep "size: $new_size"
1958 ceph osd pool
set $TEST_POOL_GETSET size
$old_size
1960 ceph osd pool create pool_erasure
1 1 erasure
1961 ceph osd pool application
enable pool_erasure rados
1964 ceph osd pool
set pool_erasure size
4444 2>$TMPFILE
1965 check_response
'not change the size'
1967 ceph osd pool get pool_erasure erasure_code_profile
1970 ceph osd pool
set $TEST_POOL_GETSET auid
$auid
1971 ceph osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1972 ceph
--format=xml osd pool get
$TEST_POOL_GETSET auid |
grep $auid
1973 ceph osd pool
set $TEST_POOL_GETSET auid
0
1975 for flag
in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub
; do
1976 ceph osd pool
set $TEST_POOL_GETSET $flag false
1977 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1978 ceph osd pool
set $TEST_POOL_GETSET $flag true
1979 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1980 ceph osd pool
set $TEST_POOL_GETSET $flag 1
1981 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: true"
1982 ceph osd pool
set $TEST_POOL_GETSET $flag 0
1983 ceph osd pool get
$TEST_POOL_GETSET $flag |
grep "$flag: false"
1984 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag asdf
1985 expect_false ceph osd pool
set $TEST_POOL_GETSET $flag 2
1988 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1989 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
123456
1990 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval |
grep 'scrub_min_interval: 123456'
1991 ceph osd pool
set $TEST_POOL_GETSET scrub_min_interval
0
1992 ceph osd pool get
$TEST_POOL_GETSET scrub_min_interval | expect_false
grep '.'
1994 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
1995 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
123456
1996 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval |
grep 'scrub_max_interval: 123456'
1997 ceph osd pool
set $TEST_POOL_GETSET scrub_max_interval
0
1998 ceph osd pool get
$TEST_POOL_GETSET scrub_max_interval | expect_false
grep '.'
2000 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
2001 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
123456
2002 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval |
grep 'deep_scrub_interval: 123456'
2003 ceph osd pool
set $TEST_POOL_GETSET deep_scrub_interval
0
2004 ceph osd pool get
$TEST_POOL_GETSET deep_scrub_interval | expect_false
grep '.'
2006 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
2007 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
5
2008 ceph osd pool get
$TEST_POOL_GETSET recovery_priority |
grep 'recovery_priority: 5'
2009 ceph osd pool
set $TEST_POOL_GETSET recovery_priority
0
2010 ceph osd pool get
$TEST_POOL_GETSET recovery_priority | expect_false
grep '.'
2012 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
2013 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
5
2014 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority |
grep 'recovery_op_priority: 5'
2015 ceph osd pool
set $TEST_POOL_GETSET recovery_op_priority
0
2016 ceph osd pool get
$TEST_POOL_GETSET recovery_op_priority | expect_false
grep '.'
2018 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
2019 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
5
2020 ceph osd pool get
$TEST_POOL_GETSET scrub_priority |
grep 'scrub_priority: 5'
2021 ceph osd pool
set $TEST_POOL_GETSET scrub_priority
0
2022 ceph osd pool get
$TEST_POOL_GETSET scrub_priority | expect_false
grep '.'
2024 ceph osd pool
set $TEST_POOL_GETSET nopgchange
1
2025 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
10
2026 expect_false ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
2027 ceph osd pool
set $TEST_POOL_GETSET nopgchange
0
2028 ceph osd pool
set $TEST_POOL_GETSET pg_num
10
2030 ceph osd pool
set $TEST_POOL_GETSET pgp_num
10
2032 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
2033 new_pgs
=$
(($old_pgs + $
(ceph osd stat
--format json | jq
'.num_osds') * 32))
2034 ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
2035 ceph osd pool
set $TEST_POOL_GETSET pgp_num
$new_pgs
2037 old_pgs
=$
(ceph osd pool get
$TEST_POOL_GETSET pg_num |
sed -e 's/pg_num: //')
2038 new_pgs
=$
(($old_pgs + $
(ceph osd stat
--format json | jq
'.num_osds') * 32 + 1))
2039 expect_false ceph osd pool
set $TEST_POOL_GETSET pg_num
$new_pgs
2041 ceph osd pool
set $TEST_POOL_GETSET nosizechange
1
2042 expect_false ceph osd pool
set $TEST_POOL_GETSET size
2
2043 expect_false ceph osd pool
set $TEST_POOL_GETSET min_size
2
2044 ceph osd pool
set $TEST_POOL_GETSET nosizechange
0
2045 ceph osd pool
set $TEST_POOL_GETSET size
2
2047 ceph osd pool
set $TEST_POOL_GETSET min_size
2
2049 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
0
2050 ceph osd pool
set $TEST_POOL_GETSET hashpspool
0 --yes-i-really-mean-it
2052 expect_false ceph osd pool
set $TEST_POOL_GETSET hashpspool
1
2053 ceph osd pool
set $TEST_POOL_GETSET hashpspool
1 --yes-i-really-mean-it
2055 ceph osd pool get rbd crush_rule |
grep 'crush_rule: '
2057 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2058 ceph osd pool
set $TEST_POOL_GETSET compression_mode aggressive
2059 ceph osd pool get
$TEST_POOL_GETSET compression_mode |
grep 'aggressive'
2060 ceph osd pool
set $TEST_POOL_GETSET compression_mode
unset
2061 ceph osd pool get
$TEST_POOL_GETSET compression_mode | expect_false
grep '.'
2063 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2064 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm zlib
2065 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm |
grep 'zlib'
2066 ceph osd pool
set $TEST_POOL_GETSET compression_algorithm
unset
2067 ceph osd pool get
$TEST_POOL_GETSET compression_algorithm | expect_false
grep '.'
2069 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2070 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
1.1
2071 expect_false ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
-.2
2072 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
.2
2073 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio |
grep '.2'
2074 ceph osd pool
set $TEST_POOL_GETSET compression_required_ratio
0
2075 ceph osd pool get
$TEST_POOL_GETSET compression_required_ratio | expect_false
grep '.'
2077 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2078 ceph osd pool
set $TEST_POOL_GETSET csum_type crc32c
2079 ceph osd pool get
$TEST_POOL_GETSET csum_type |
grep 'crc32c'
2080 ceph osd pool
set $TEST_POOL_GETSET csum_type
unset
2081 ceph osd pool get
$TEST_POOL_GETSET csum_type | expect_false
grep '.'
2083 for size
in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block
; do
2084 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2085 ceph osd pool
set $TEST_POOL_GETSET $size 100
2086 ceph osd pool get
$TEST_POOL_GETSET $size |
grep '100'
2087 ceph osd pool
set $TEST_POOL_GETSET $size 0
2088 ceph osd pool get
$TEST_POOL_GETSET $size | expect_false
grep '.'
2091 ceph osd pool
set $TEST_POOL_GETSET nodelete
1
2092 expect_false ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2093 ceph osd pool
set $TEST_POOL_GETSET nodelete
0
2094 ceph osd pool delete
$TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2098 function test_mon_osd_tiered_pool_set
()
2100 # this is really a tier pool
2101 ceph osd pool create real-tier
2
2102 ceph osd tier add rbd real-tier
2104 ceph osd pool
set real-tier hit_set_type explicit_hash
2105 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_hash"
2106 ceph osd pool
set real-tier hit_set_type explicit_object
2107 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: explicit_object"
2108 ceph osd pool
set real-tier hit_set_type bloom
2109 ceph osd pool get real-tier hit_set_type |
grep "hit_set_type: bloom"
2110 expect_false ceph osd pool
set real-tier hit_set_type i_dont_exist
2111 ceph osd pool
set real-tier hit_set_period
123
2112 ceph osd pool get real-tier hit_set_period |
grep "hit_set_period: 123"
2113 ceph osd pool
set real-tier hit_set_count
12
2114 ceph osd pool get real-tier hit_set_count |
grep "hit_set_count: 12"
2115 ceph osd pool
set real-tier hit_set_fpp
.01
2116 ceph osd pool get real-tier hit_set_fpp |
grep "hit_set_fpp: 0.01"
2118 ceph osd pool
set real-tier target_max_objects
123
2119 ceph osd pool get real-tier target_max_objects | \
2120 grep 'target_max_objects:[ \t]\+123'
2121 ceph osd pool
set real-tier target_max_bytes
123456
2122 ceph osd pool get real-tier target_max_bytes | \
2123 grep 'target_max_bytes:[ \t]\+123456'
2124 ceph osd pool
set real-tier cache_target_dirty_ratio
.123
2125 ceph osd pool get real-tier cache_target_dirty_ratio | \
2126 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2127 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
-.2
2128 expect_false ceph osd pool
set real-tier cache_target_dirty_ratio
1.1
2129 ceph osd pool
set real-tier cache_target_dirty_high_ratio
.123
2130 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2131 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2132 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
-.2
2133 expect_false ceph osd pool
set real-tier cache_target_dirty_high_ratio
1.1
2134 ceph osd pool
set real-tier cache_target_full_ratio
.123
2135 ceph osd pool get real-tier cache_target_full_ratio | \
2136 grep 'cache_target_full_ratio:[ \t]\+0.123'
2137 ceph osd dump
-f json-pretty |
grep '"cache_target_full_ratio_micro": 123000'
2138 ceph osd pool
set real-tier cache_target_full_ratio
1.0
2139 ceph osd pool
set real-tier cache_target_full_ratio
0
2140 expect_false ceph osd pool
set real-tier cache_target_full_ratio
1.1
2141 ceph osd pool
set real-tier cache_min_flush_age
123
2142 ceph osd pool get real-tier cache_min_flush_age | \
2143 grep 'cache_min_flush_age:[ \t]\+123'
2144 ceph osd pool
set real-tier cache_min_evict_age
234
2145 ceph osd pool get real-tier cache_min_evict_age | \
2146 grep 'cache_min_evict_age:[ \t]\+234'
2148 # this is not a tier pool
2149 ceph osd pool create fake-tier
2
2150 ceph osd pool application
enable fake-tier rados
2153 expect_false ceph osd pool
set fake-tier hit_set_type explicit_hash
2154 expect_false ceph osd pool get fake-tier hit_set_type
2155 expect_false ceph osd pool
set fake-tier hit_set_type explicit_object
2156 expect_false ceph osd pool get fake-tier hit_set_type
2157 expect_false ceph osd pool
set fake-tier hit_set_type bloom
2158 expect_false ceph osd pool get fake-tier hit_set_type
2159 expect_false ceph osd pool
set fake-tier hit_set_type i_dont_exist
2160 expect_false ceph osd pool
set fake-tier hit_set_period
123
2161 expect_false ceph osd pool get fake-tier hit_set_period
2162 expect_false ceph osd pool
set fake-tier hit_set_count
12
2163 expect_false ceph osd pool get fake-tier hit_set_count
2164 expect_false ceph osd pool
set fake-tier hit_set_fpp
.01
2165 expect_false ceph osd pool get fake-tier hit_set_fpp
2167 expect_false ceph osd pool
set fake-tier target_max_objects
123
2168 expect_false ceph osd pool get fake-tier target_max_objects
2169 expect_false ceph osd pool
set fake-tier target_max_bytes
123456
2170 expect_false ceph osd pool get fake-tier target_max_bytes
2171 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
.123
2172 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2173 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
-.2
2174 expect_false ceph osd pool
set fake-tier cache_target_dirty_ratio
1.1
2175 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
.123
2176 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2177 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
-.2
2178 expect_false ceph osd pool
set fake-tier cache_target_dirty_high_ratio
1.1
2179 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
.123
2180 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2181 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.0
2182 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
0
2183 expect_false ceph osd pool
set fake-tier cache_target_full_ratio
1.1
2184 expect_false ceph osd pool
set fake-tier cache_min_flush_age
123
2185 expect_false ceph osd pool get fake-tier cache_min_flush_age
2186 expect_false ceph osd pool
set fake-tier cache_min_evict_age
234
2187 expect_false ceph osd pool get fake-tier cache_min_evict_age
2189 ceph osd tier remove rbd real-tier
2190 ceph osd pool delete real-tier real-tier
--yes-i-really-really-mean-it
2191 ceph osd pool delete fake-tier fake-tier
--yes-i-really-really-mean-it
2194 function test_mon_osd_erasure_code
()
2197 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2198 ceph osd erasure-code-profile
set fooprofile a
=b c
=d
2199 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2200 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
--force
2201 ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f
2202 expect_false ceph osd erasure-code-profile
set fooprofile a
=b c
=d e
=f g
=h
2203 # ruleset-foo will work for luminous only
2204 ceph osd erasure-code-profile
set barprofile ruleset-failure-domain
=host
2205 ceph osd erasure-code-profile
set barprofile crush-failure-domain
=host
2207 ceph osd erasure-code-profile
rm fooprofile
2208 ceph osd erasure-code-profile
rm barprofile
2211 function test_mon_osd_misc
()
2215 # expect error about missing 'pool' argument
2216 ceph osd map
2>$TMPFILE; check_response
'pool' $?
22
2218 # expect error about unused argument foo
2219 ceph osd
ls foo
2>$TMPFILE; check_response
'unused' $?
22
2221 # expect "not in range" for invalid full ratio
2222 ceph pg set_full_ratio
95 2>$TMPFILE; check_response
'not in range' $?
22
2224 # expect "not in range" for invalid overload percentage
2225 ceph osd reweight-by-utilization
80 2>$TMPFILE; check_response
'higher than 100' $?
22
2229 ceph osd reweight-by-utilization
110
2230 ceph osd reweight-by-utilization
110 .5
2231 expect_false ceph osd reweight-by-utilization
110 0
2232 expect_false ceph osd reweight-by-utilization
110 -0.1
2233 ceph osd test-reweight-by-utilization
110 .5 --no-increasing
2234 ceph osd test-reweight-by-utilization
110 .5 4 --no-increasing
2235 expect_false ceph osd test-reweight-by-utilization
110 .5 0 --no-increasing
2236 expect_false ceph osd test-reweight-by-utilization
110 .5 -10 --no-increasing
2237 ceph osd reweight-by-pg
110
2238 ceph osd test-reweight-by-pg
110 .5
2239 ceph osd reweight-by-pg
110 rbd
2240 ceph osd reweight-by-pg
110 .5 rbd
2241 expect_false ceph osd reweight-by-pg
110 boguspoolasdfasdfasdf
2244 function test_mon_heap_profiler
()
2248 # expect 'heap' commands to be correctly parsed
2249 ceph heap stats
2>$TMPFILE
2250 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2251 echo "tcmalloc not enabled; skip heap profiler test"
2256 [[ $do_test -eq 0 ]] && return 0
2258 ceph heap start_profiler
2260 ceph heap stop_profiler
2264 function test_admin_heap_profiler
()
2268 # expect 'heap' commands to be correctly parsed
2269 ceph heap stats
2>$TMPFILE
2270 if [[ $?
-eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2271 echo "tcmalloc not enabled; skip heap profiler test"
2276 [[ $do_test -eq 0 ]] && return 0
2278 local admin_socket
=$
(get_admin_socket osd
.0)
2280 $SUDO ceph
--admin-daemon $admin_socket heap start_profiler
2281 $SUDO ceph
--admin-daemon $admin_socket heap dump
2282 $SUDO ceph
--admin-daemon $admin_socket heap stop_profiler
2283 $SUDO ceph
--admin-daemon $admin_socket heap release
2286 function test_osd_bench
()
2288 # test osd bench limits
2289 # As we should not rely on defaults (as they may change over time),
2290 # lets inject some values and perform some simple tests
2291 # max iops: 10 # 100 IOPS
2292 # max throughput: 10485760 # 10MB/s
2293 # max block size: 2097152 # 2MB
2294 # duration: 10 # 10 seconds
2297 --osd-bench-duration 10 \
2298 --osd-bench-max-block-size 2097152 \
2299 --osd-bench-large-size-max-throughput 10485760 \
2300 --osd-bench-small-size-max-iops 10"
2301 ceph tell osd
.0 injectargs
${args## }
2303 # anything with a bs larger than 2097152 must fail
2304 expect_false ceph tell osd
.0 bench
1 2097153
2305 # but using 'osd_bench_max_bs' must succeed
2306 ceph tell osd
.0 bench
1 2097152
2308 # we assume 1MB as a large bs; anything lower is a small bs
2309 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2310 # max count: 409600 (bytes)
2312 # more than max count must not be allowed
2313 expect_false ceph tell osd
.0 bench
409601 4096
2314 # but 409600 must be succeed
2315 ceph tell osd
.0 bench
409600 4096
2317 # for a large bs, we are limited by throughput.
2318 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2319 # the max count will be (10MB * 10s) = 100MB
2320 # max count: 104857600 (bytes)
2322 # more than max count must not be allowed
2323 expect_false ceph tell osd
.0 bench
104857601 2097152
2324 # up to max count must be allowed
2325 ceph tell osd
.0 bench
104857600 2097152
2328 function test_osd_negative_filestore_merge_threshold
()
2330 $SUDO ceph daemon osd
.0 config
set filestore_merge_threshold
-1
2331 expect_config_value
"osd.0" "filestore_merge_threshold" -1
2334 function test_mon_tell
()
2336 ceph tell mon.a version
2337 ceph tell mon.b version
2338 expect_false ceph tell mon.foo version
2342 ceph_watch_start debug audit
2343 ceph tell mon.a version
2344 ceph_watch_wait
'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2346 ceph_watch_start debug audit
2347 ceph tell mon.b version
2348 ceph_watch_wait
'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2351 function test_mon_ping
()
2355 expect_false ceph
ping mon.foo
2360 function test_mon_deprecated_commands
()
2362 # current DEPRECATED commands are:
2367 # Testing should be accomplished by setting
2368 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2369 # each one of these commands.
2371 ceph tell mon.a injectargs
'--mon-debug-deprecated-as-obsolete'
2372 expect_false ceph tell mon.a compact
2> $TMPFILE
2373 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2375 expect_false ceph tell mon.a scrub
2> $TMPFILE
2376 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2378 expect_false ceph tell mon.a sync force
2> $TMPFILE
2379 check_response
"\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2381 ceph tell mon.a injectargs
'--no-mon-debug-deprecated-as-obsolete'
2384 function test_mon_cephdf_commands
()
2388 # RAW USED The near raw used per pool in raw total
2390 ceph osd pool create cephdf_for_test
32 32 replicated
2391 ceph osd pool application
enable cephdf_for_test rados
2392 ceph osd pool
set cephdf_for_test size
2
2394 dd if=/dev
/zero of
=.
/cephdf_for_test bs
=4k count
=1
2395 rados put cephdf_for_test cephdf_for_test
-p cephdf_for_test
2398 for i
in `seq 1 10`; do
2399 rados
-p cephdf_for_test
ls - |
grep -q cephdf_for_test
&& break
2402 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2403 # to sync mon with osd
2405 local jq_filter
='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2406 cal_raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2407 raw_used_size
=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
2409 ceph osd pool delete cephdf_for_test cephdf_for_test
--yes-i-really-really-mean-it
2410 rm .
/cephdf_for_test
2412 expect_false
test $cal_raw_used_size != $raw_used_size
2415 function test_mon_pool_application
()
2417 ceph osd pool create app_for_test
10
2419 ceph osd pool application
enable app_for_test rbd
2420 expect_false ceph osd pool application
enable app_for_test rgw
2421 ceph osd pool application
enable app_for_test rgw
--yes-i-really-mean-it
2422 ceph osd pool
ls detail |
grep "application rbd,rgw"
2423 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2425 expect_false ceph osd pool application
set app_for_test cephfs key value
2426 ceph osd pool application
set app_for_test rbd key1 value1
2427 ceph osd pool application
set app_for_test rbd key2 value2
2428 ceph osd pool application
set app_for_test rgw key1 value1
2429 ceph osd pool application get app_for_test rbd key1 |
grep 'value1'
2430 ceph osd pool application get app_for_test rbd key2 |
grep 'value2'
2431 ceph osd pool application get app_for_test rgw key1 |
grep 'value1'
2433 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2435 ceph osd pool application
rm app_for_test rgw key1
2436 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2437 ceph osd pool application
rm app_for_test rbd key2
2438 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2439 ceph osd pool application
rm app_for_test rbd key1
2440 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{},"rgw":{}}'
2441 ceph osd pool application
rm app_for_test rbd key1
# should be idempotent
2443 expect_false ceph osd pool application disable app_for_test rgw
2444 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2445 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it # should be idempotent
2446 ceph osd pool
ls detail |
grep "application rbd"
2447 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{"rbd":{}}'
2449 ceph osd pool application disable app_for_test rgw
--yes-i-really-mean-it
2450 ceph osd pool
ls detail |
grep -v "application "
2451 ceph osd pool
ls detail
--format=json |
grep '"application_metadata":{}'
2453 ceph osd pool
rm app_for_test app_for_test
--yes-i-really-really-mean-it
2456 function test_mon_tell_help_command
()
2458 ceph tell mon.a
help
2461 expect_false ceph tell mon.zzz
help
2464 function test_mon_stdin_stdout
()
2466 echo foo | ceph config-key
set test_key
-i -
2467 ceph config-key get test_key
-o - |
grep -c foo |
grep -q 1
2470 function test_osd_tell_help_command
()
2472 ceph tell osd
.1 help
2473 expect_false ceph tell osd
.100 help
2476 function test_osd_compact
()
2478 ceph tell osd
.1 compact
2479 $SUDO ceph daemon osd
.1 compact
2482 function test_mds_tell_help_command
()
2484 local FS_NAME
=cephfs
2485 if ! mds_exists
; then
2486 echo "Skipping test, no MDS found"
2491 ceph osd pool create fs_data
10
2492 ceph osd pool create fs_metadata
10
2493 ceph fs new
$FS_NAME fs_metadata fs_data
2494 wait_mds_active
$FS_NAME
2497 ceph tell mds.a
help
2498 expect_false ceph tell mds.z
help
2501 ceph osd pool delete fs_data fs_data
--yes-i-really-really-mean-it
2502 ceph osd pool delete fs_metadata fs_metadata
--yes-i-really-really-mean-it
2505 function test_mgr_tell
()
2508 #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
2509 ceph tell mgr osd status
2513 # New tests should be added to the TESTS array below
2515 # Individual tests may be run using the '-t <testname>' argument
2516 # The user can specify '-t <testname>' as many times as she wants
2518 # Tests will be run in order presented in the TESTS array, or in
2519 # the order specified by the '-t <testname>' options.
2521 # '-l' will list all the available test names
2522 # '-h' will show usage
2524 # The test maintains backward compatibility: not specifying arguments
2525 # will run all tests following the order they appear in the TESTS array.
2529 MON_TESTS
+=" mon_injectargs"
2530 MON_TESTS
+=" mon_injectargs_SI"
2531 for i
in `seq 9`; do
2532 MON_TESTS
+=" tiering_$i";
2535 MON_TESTS
+=" auth_profiles"
2536 MON_TESTS
+=" mon_misc"
2537 MON_TESTS
+=" mon_mon"
2538 MON_TESTS
+=" mon_osd"
2539 MON_TESTS
+=" mon_config_key"
2540 MON_TESTS
+=" mon_crush"
2541 MON_TESTS
+=" mon_osd_create_destroy"
2542 MON_TESTS
+=" mon_osd_pool"
2543 MON_TESTS
+=" mon_osd_pool_quota"
2544 MON_TESTS
+=" mon_pg"
2545 MON_TESTS
+=" mon_osd_pool_set"
2546 MON_TESTS
+=" mon_osd_tiered_pool_set"
2547 MON_TESTS
+=" mon_osd_erasure_code"
2548 MON_TESTS
+=" mon_osd_misc"
2549 MON_TESTS
+=" mon_heap_profiler"
2550 MON_TESTS
+=" mon_tell"
2551 MON_TESTS
+=" mon_ping"
2552 MON_TESTS
+=" mon_deprecated_commands"
2553 MON_TESTS
+=" mon_caps"
2554 MON_TESTS
+=" mon_cephdf_commands"
2555 MON_TESTS
+=" mon_tell_help_command"
2556 MON_TESTS
+=" mon_stdin_stdout"
2558 OSD_TESTS
+=" osd_bench"
2559 OSD_TESTS
+=" osd_negative_filestore_merge_threshold"
2560 OSD_TESTS
+=" tiering_agent"
2561 OSD_TESTS
+=" admin_heap_profiler"
2562 OSD_TESTS
+=" osd_tell_help_command"
2563 OSD_TESTS
+=" osd_compact"
2565 MDS_TESTS
+=" mds_tell"
2566 MDS_TESTS
+=" mon_mds"
2567 MDS_TESTS
+=" mon_mds_metadata"
2568 MDS_TESTS
+=" mds_tell_help_command"
2570 MGR_TESTS
+=" mgr_tell"
2581 function list_tests
()
2583 echo "AVAILABLE TESTS"
2591 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2598 while [[ $# -gt 0 ]]; do
2605 "--asok-does-not-need-root" )
2608 "--no-sanity-check" )
2612 tests_to_run
+="$MON_TESTS"
2615 tests_to_run
+="$OSD_TESTS"
2618 tests_to_run
+="$MDS_TESTS"
2621 tests_to_run
+="$MGR_TESTS"
2625 if [[ -z "$1" ]]; then
2626 echo "missing argument to '-t'"
2640 if [[ $do_list -eq 1 ]]; then
2645 ceph osd pool create rbd
10
2647 if test -z "$tests_to_run" ; then
2648 tests_to_run
="$TESTS"
2651 if $sanity_check ; then
2654 for i
in $tests_to_run; do
2655 if $sanity_check ; then
2662 if $sanity_check ; then