]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/cephtool/test.sh
update sources to v12.1.3
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
1 #!/bin/bash -x
2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
4
5 source $(dirname $0)/../../standalone/ceph-helpers.sh
6
7 set -e
8 set -o functrace
9 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
10 SUDO=${SUDO:-sudo}
11 export CEPH_DEV=1
12
13 function get_admin_socket()
14 {
15 local client=$1
16
17 if test -n "$CEPH_ASOK_DIR";
18 then
19 echo $(get_asok_dir)/$client.asok
20 else
21 local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
23 fi
24 }
25
26 function check_no_osd_down()
27 {
28 ! ceph osd dump | grep ' down '
29 }
30
31 function wait_no_osd_down()
32 {
33 max_run=300
34 for i in $(seq 1 $max_run) ; do
35 if ! check_no_osd_down ; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
37 sleep 1
38 else
39 break
40 fi
41 done
42 check_no_osd_down
43 }
44
45 function expect_false()
46 {
47 set -x
48 if "$@"; then return 1; else return 0; fi
49 }
50
51
52 TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
53 trap "rm -fr $TEMP_DIR" 0
54
55 TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
56
57 #
58 # retry_eagain max cmd args ...
59 #
60 # retry cmd args ... if it exits on error and its output contains the
61 # string EAGAIN, at most $max times
62 #
63 function retry_eagain()
64 {
65 local max=$1
66 shift
67 local status
68 local tmpfile=$TEMP_DIR/retry_eagain.$$
69 local count
70 for count in $(seq 1 $max) ; do
71 status=0
72 "$@" > $tmpfile 2>&1 || status=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN $tmpfile ; then
75 break
76 fi
77 sleep 1
78 done
79 if test $count = $max ; then
80 echo retried with non zero exit status, $max times: "$@" >&2
81 fi
82 cat $tmpfile
83 rm $tmpfile
84 return $status
85 }
86
87 #
88 # map_enxio_to_eagain cmd arg ...
89 #
90 # add EAGAIN to the output of cmd arg ... if the output contains
91 # ENXIO.
92 #
93 function map_enxio_to_eagain()
94 {
95 local status=0
96 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
97
98 "$@" > $tmpfile 2>&1 || status=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO $tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
102 fi
103 cat $tmpfile
104 rm $tmpfile
105 return $status
106 }
107
108 function check_response()
109 {
110 expected_string=$1
111 retcode=$2
112 expected_retcode=$3
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
115 exit 1
116 fi
117
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
120 cat $TMPFILE >&2
121 exit 1
122 fi
123 }
124
125 function get_config_value_or_die()
126 {
127 local target config_opt raw val
128
129 target=$1
130 config_opt=$2
131
132 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $? -ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
135 exit 1
136 fi
137
138 raw=`echo $raw | sed -e 's/[{} "]//g'`
139 val=`echo $raw | cut -f2 -d:`
140
141 echo "$val"
142 return 0
143 }
144
145 function expect_config_value()
146 {
147 local target config_opt expected_val val
148 target=$1
149 config_opt=$2
150 expected_val=$3
151
152 val=$(get_config_value_or_die $target $config_opt)
153
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
156 exit 1
157 fi
158 }
159
160 function ceph_watch_start()
161 {
162 local whatch_opt=--watch
163
164 if [ -n "$1" ]; then
165 whatch_opt=--watch-$1
166 if [ -n "$2" ]; then
167 whatch_opt+=" --watch-channel $2"
168 fi
169 fi
170
171 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
172 ceph $whatch_opt > $CEPH_WATCH_FILE &
173 CEPH_WATCH_PID=$!
174
175 # wait until the "ceph" client is connected and receiving
176 # log messages from monitor
177 for i in `seq 3`; do
178 grep -q "cluster" $CEPH_WATCH_FILE && break
179 sleep 1
180 done
181 }
182
183 function ceph_watch_wait()
184 {
185 local regexp=$1
186 local timeout=30
187
188 if [ -n "$2" ]; then
189 timeout=$2
190 fi
191
192 for i in `seq ${timeout}`; do
193 grep -q "$regexp" $CEPH_WATCH_FILE && break
194 sleep 1
195 done
196
197 kill $CEPH_WATCH_PID
198
199 if ! grep "$regexp" $CEPH_WATCH_FILE; then
200 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
201 cat $CEPH_WATCH_FILE >&2
202 return 1
203 fi
204 }
205
206 function test_mon_injectargs()
207 {
208 CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
209 check_response "osd_enable_op_tracker = 'false'"
210 ! grep "the.dump" $TMPFILE || return 1
211 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
212 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
213 ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
214 check_response "osd_enable_op_tracker = 'false'"
215 ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
216 check_response "osd_enable_op_tracker = 'true'"
217 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
218 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
219 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
220 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
221
222 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' >& $TMPFILE || return 1
223 check_response "osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
224
225 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' >& $TMPFILE || return 1
226 check_response "mon_probe_timeout = '2.000000' (not observed, change may require restart)"
227
228 ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
229 check_response "mon_lease = '6.000000' (not observed, change may require restart)"
230
231 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
232 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 >& $TMPFILE || return 1
233 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
234 }
235
236 function test_mon_injectargs_SI()
237 {
238 # Test SI units during injectargs and 'config set'
239 # We only aim at testing the units are parsed accordingly
240 # and don't intend to test whether the options being set
241 # actually expect SI units to be passed.
242 # Keep in mind that all integer based options (i.e., INT,
243 # LONG, U32, U64) will accept SI unit modifiers.
244 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
245 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
246 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
247 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
248 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
249 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
250 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
251 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
252 check_response "'10F': (22) Invalid argument"
253 # now test with injectargs
254 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
255 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
256 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
257 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
258 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
259 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
260 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
261 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
262 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
263 }
264
265 function test_tiering_agent()
266 {
267 local slow=slow_eviction
268 local fast=fast_eviction
269 ceph osd pool create $slow 1 1
270 ceph osd pool application enable $slow rados
271 ceph osd pool create $fast 1 1
272 ceph osd tier add $slow $fast
273 ceph osd tier cache-mode $fast writeback
274 ceph osd tier set-overlay $slow $fast
275 ceph osd pool set $fast hit_set_type bloom
276 rados -p $slow put obj1 /etc/group
277 ceph osd pool set $fast target_max_objects 1
278 ceph osd pool set $fast hit_set_count 1
279 ceph osd pool set $fast hit_set_period 5
280 # wait for the object to be evicted from the cache
281 local evicted
282 evicted=false
283 for i in `seq 1 300` ; do
284 if ! rados -p $fast ls | grep obj1 ; then
285 evicted=true
286 break
287 fi
288 sleep 1
289 done
290 $evicted # assert
291 # the object is proxy read and promoted to the cache
292 rados -p $slow get obj1 - >/dev/null
293 # wait for the promoted object to be evicted again
294 evicted=false
295 for i in `seq 1 300` ; do
296 if ! rados -p $fast ls | grep obj1 ; then
297 evicted=true
298 break
299 fi
300 sleep 1
301 done
302 $evicted # assert
303 ceph osd tier remove-overlay $slow
304 ceph osd tier remove $slow $fast
305 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
306 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
307 }
308
309 function test_tiering_1()
310 {
311 # tiering
312 ceph osd pool create slow 2
313 ceph osd pool application enable slow rados
314 ceph osd pool create slow2 2
315 ceph osd pool application enable slow2 rados
316 ceph osd pool create cache 2
317 ceph osd pool create cache2 2
318 ceph osd tier add slow cache
319 ceph osd tier add slow cache2
320 expect_false ceph osd tier add slow2 cache
321 # test some state transitions
322 ceph osd tier cache-mode cache writeback
323 expect_false ceph osd tier cache-mode cache forward
324 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
325 expect_false ceph osd tier cache-mode cache readonly
326 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
327 expect_false ceph osd tier cache-mode cache forward
328 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
329 ceph osd tier cache-mode cache none
330 ceph osd tier cache-mode cache writeback
331 ceph osd tier cache-mode cache proxy
332 ceph osd tier cache-mode cache writeback
333 expect_false ceph osd tier cache-mode cache none
334 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
335 # test with dirty objects in the tier pool
336 # tier pool currently set to 'writeback'
337 rados -p cache put /etc/passwd /etc/passwd
338 flush_pg_stats
339 # 1 dirty object in pool 'cache'
340 ceph osd tier cache-mode cache proxy
341 expect_false ceph osd tier cache-mode cache none
342 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
343 ceph osd tier cache-mode cache writeback
344 # remove object from tier pool
345 rados -p cache rm /etc/passwd
346 rados -p cache cache-flush-evict-all
347 flush_pg_stats
348 # no dirty objects in pool 'cache'
349 ceph osd tier cache-mode cache proxy
350 ceph osd tier cache-mode cache none
351 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
352 TRIES=0
353 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
354 do
355 grep 'currently creating pgs' $TMPFILE
356 TRIES=$(( $TRIES + 1 ))
357 test $TRIES -ne 60
358 sleep 3
359 done
360 expect_false ceph osd pool set cache pg_num 4
361 ceph osd tier cache-mode cache none
362 ceph osd tier set-overlay slow cache
363 expect_false ceph osd tier set-overlay slow cache2
364 expect_false ceph osd tier remove slow cache
365 ceph osd tier remove-overlay slow
366 ceph osd tier set-overlay slow cache2
367 ceph osd tier remove-overlay slow
368 ceph osd tier remove slow cache
369 ceph osd tier add slow2 cache
370 expect_false ceph osd tier set-overlay slow cache
371 ceph osd tier set-overlay slow2 cache
372 ceph osd tier remove-overlay slow2
373 ceph osd tier remove slow2 cache
374 ceph osd tier remove slow cache2
375
376 # make sure a non-empty pool fails
377 rados -p cache2 put /etc/passwd /etc/passwd
378 while ! ceph df | grep cache2 | grep ' 1 ' ; do
379 echo waiting for pg stats to flush
380 sleep 2
381 done
382 expect_false ceph osd tier add slow cache2
383 ceph osd tier add slow cache2 --force-nonempty
384 ceph osd tier remove slow cache2
385
386 ceph osd pool ls | grep cache2
387 ceph osd pool ls -f json-pretty | grep cache2
388 ceph osd pool ls detail | grep cache2
389 ceph osd pool ls detail -f json-pretty | grep cache2
390
391 ceph osd pool delete slow slow --yes-i-really-really-mean-it
392 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
393 ceph osd pool delete cache cache --yes-i-really-really-mean-it
394 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
395 }
396
397 function test_tiering_2()
398 {
399 # make sure we can't clobber snapshot state
400 ceph osd pool create snap_base 2
401 ceph osd pool application enable snap_base rados
402 ceph osd pool create snap_cache 2
403 ceph osd pool mksnap snap_cache snapname
404 expect_false ceph osd tier add snap_base snap_cache
405 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
406 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
407 }
408
409 function test_tiering_3()
410 {
411 # make sure we can't create snapshot on tier
412 ceph osd pool create basex 2
413 ceph osd pool application enable basex rados
414 ceph osd pool create cachex 2
415 ceph osd tier add basex cachex
416 expect_false ceph osd pool mksnap cache snapname
417 ceph osd tier remove basex cachex
418 ceph osd pool delete basex basex --yes-i-really-really-mean-it
419 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
420 }
421
422 function test_tiering_4()
423 {
424 # make sure we can't create an ec pool tier
425 ceph osd pool create eccache 2 2 erasure
426 expect_false ceph osd set-require-min-compat-client bobtail
427 ceph osd pool create repbase 2
428 ceph osd pool application enable repbase rados
429 expect_false ceph osd tier add repbase eccache
430 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
431 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
432 }
433
434 function test_tiering_5()
435 {
436 # convenient add-cache command
437 ceph osd pool create slow 2
438 ceph osd pool application enable slow rados
439 ceph osd pool create cache3 2
440 ceph osd tier add-cache slow cache3 1024000
441 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
442 ceph osd tier remove slow cache3 2> $TMPFILE || true
443 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
444 ceph osd tier remove-overlay slow
445 ceph osd tier remove slow cache3
446 ceph osd pool ls | grep cache3
447 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
448 ! ceph osd pool ls | grep cache3 || exit 1
449 ceph osd pool delete slow slow --yes-i-really-really-mean-it
450 }
451
452 function test_tiering_6()
453 {
454 # check add-cache whether work
455 ceph osd pool create datapool 2
456 ceph osd pool application enable datapool rados
457 ceph osd pool create cachepool 2
458 ceph osd tier add-cache datapool cachepool 1024000
459 ceph osd tier cache-mode cachepool writeback
460 rados -p datapool put object /etc/passwd
461 rados -p cachepool stat object
462 rados -p cachepool cache-flush object
463 rados -p datapool stat object
464 ceph osd tier remove-overlay datapool
465 ceph osd tier remove datapool cachepool
466 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
467 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
468 }
469
470 function test_tiering_7()
471 {
472 # protection against pool removal when used as tiers
473 ceph osd pool create datapool 2
474 ceph osd pool application enable datapool rados
475 ceph osd pool create cachepool 2
476 ceph osd tier add-cache datapool cachepool 1024000
477 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
478 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
479 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
480 check_response "EBUSY: pool 'datapool' has tiers cachepool"
481 ceph osd tier remove-overlay datapool
482 ceph osd tier remove datapool cachepool
483 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
484 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
485 }
486
487 function test_tiering_8()
488 {
489 ## check health check
490 ceph osd set notieragent
491 ceph osd pool create datapool 2
492 ceph osd pool application enable datapool rados
493 ceph osd pool create cache4 2
494 ceph osd tier add-cache datapool cache4 1024000
495 ceph osd tier cache-mode cache4 writeback
496 tmpfile=$(mktemp|grep tmp)
497 dd if=/dev/zero of=$tmpfile bs=4K count=1
498 ceph osd pool set cache4 target_max_objects 200
499 ceph osd pool set cache4 target_max_bytes 1000000
500 rados -p cache4 put foo1 $tmpfile
501 rados -p cache4 put foo2 $tmpfile
502 rm -f $tmpfile
503 flush_pg_stats
504 ceph df | grep datapool | grep ' 2 '
505 ceph osd tier remove-overlay datapool
506 ceph osd tier remove datapool cache4
507 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
508 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
509 ceph osd unset notieragent
510 }
511
512 function test_tiering_9()
513 {
514 # make sure 'tier remove' behaves as we expect
515 # i.e., removing a tier from a pool that's not its base pool only
516 # results in a 'pool foo is now (or already was) not a tier of bar'
517 #
518 ceph osd pool create basepoolA 2
519 ceph osd pool application enable basepoolA rados
520 ceph osd pool create basepoolB 2
521 ceph osd pool application enable basepoolB rados
522 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
523 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
524
525 ceph osd pool create cache5 2
526 ceph osd pool create cache6 2
527 ceph osd tier add basepoolA cache5
528 ceph osd tier add basepoolB cache6
529 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
530 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
531 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
532 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
533
534 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
535 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
536 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
537 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
538
539 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
540 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
541
542 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
543 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
544 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
545 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
546 }
547
548 function test_auth()
549 {
550 ceph auth add client.xx mon allow osd "allow *"
551 ceph auth export client.xx >client.xx.keyring
552 ceph auth add client.xx -i client.xx.keyring
553 rm -f client.xx.keyring
554 ceph auth list | grep client.xx
555 ceph auth ls | grep client.xx
556 ceph auth get client.xx | grep caps | grep mon
557 ceph auth get client.xx | grep caps | grep osd
558 ceph auth get-key client.xx
559 ceph auth print-key client.xx
560 ceph auth print_key client.xx
561 ceph auth caps client.xx osd "allow rw"
562 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
563 ceph auth get client.xx | grep osd | grep "allow rw"
564 ceph auth export | grep client.xx
565 ceph auth export -o authfile
566 ceph auth import -i authfile
567 ceph auth export -o authfile2
568 diff authfile authfile2
569 rm authfile authfile2
570 ceph auth del client.xx
571 expect_false ceph auth get client.xx
572
573 # (almost) interactive mode
574 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
575 ceph auth get client.xx
576 # script mode
577 echo 'auth del client.xx' | ceph
578 expect_false ceph auth get client.xx
579
580 #
581 # get / set auid
582 #
583 local auid=444
584 ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
585 expect_false ceph auth import --in-file TEST-keyring
586 rm TEST-keyring
587 ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
588 ceph auth import --in-file TEST-keyring
589 rm TEST-keyring
590 ceph auth get client.TEST > $TMPFILE
591 check_response "auid = $auid"
592 ceph --format json-pretty auth get client.TEST > $TMPFILE
593 check_response '"auid": '$auid
594 ceph auth ls > $TMPFILE
595 check_response "auid: $auid"
596 ceph --format json-pretty auth ls > $TMPFILE
597 check_response '"auid": '$auid
598 ceph auth del client.TEST
599 }
600
601 function test_auth_profiles()
602 {
603 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
604 mgr 'allow profile read-only'
605 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
606 mgr 'allow profile read-write'
607 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
608
609 ceph auth export > client.xx.keyring
610
611 # read-only is allowed all read-only commands (auth excluded)
612 ceph -n client.xx-profile-ro -k client.xx.keyring status
613 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
614 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
615 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
616 ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
617 # read-only gets access denied for rw commands or auth commands
618 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
619 check_response "EACCES: access denied"
620 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
621 check_response "EACCES: access denied"
622 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
623 check_response "EACCES: access denied"
624
625 # read-write is allowed for all read-write commands (except auth)
626 ceph -n client.xx-profile-rw -k client.xx.keyring status
627 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
628 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
629 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
630 ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
631 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
632 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
633 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
634 # read-write gets access denied for auth commands
635 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
636 check_response "EACCES: access denied"
637
638 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
639 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
640 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
641 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
642 ceph -n client.xx-profile-rd -k client.xx.keyring status
643 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
644 check_response "EACCES: access denied"
645 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
646 check_response "EACCES: access denied"
647 # read-only 'mon' subsystem commands are allowed
648 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
649 # but read-write 'mon' commands are not
650 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
651 check_response "EACCES: access denied"
652 ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
653 check_response "EACCES: access denied"
654 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
655 check_response "EACCES: access denied"
656 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
657 check_response "EACCES: access denied"
658
659 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
660 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
661
662 # add a new role-definer with the existing role-definer
663 ceph -n client.xx-profile-rd -k client.xx.keyring \
664 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
665 ceph -n client.xx-profile-rd -k client.xx.keyring \
666 auth export > client.xx.keyring.2
667 # remove old role-definer using the new role-definer
668 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
669 auth del client.xx-profile-rd
670 # remove the remaining role-definer with admin
671 ceph auth del client.xx-profile-rd2
672 rm -f client.xx.keyring client.xx.keyring.2
673 }
674
675 function test_mon_caps()
676 {
677 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
678 chmod +r $TEMP_DIR/ceph.client.bug.keyring
679 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
680 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
681
682 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
683 check_response "Permission denied"
684
685 rm -rf $TEMP_DIR/ceph.client.bug.keyring
686 ceph auth del client.bug
687 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
688 chmod +r $TEMP_DIR/ceph.client.bug.keyring
689 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
690 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
691 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
692 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
693 check_response "Permission denied"
694 }
695
696 function test_mon_misc()
697 {
698 # with and without verbosity
699 ceph osd dump | grep '^epoch'
700 ceph --concise osd dump | grep '^epoch'
701
702 ceph osd df | grep 'MIN/MAX VAR'
703
704 # df
705 ceph df > $TMPFILE
706 grep GLOBAL $TMPFILE
707 grep -v DIRTY $TMPFILE
708 ceph df detail > $TMPFILE
709 grep DIRTY $TMPFILE
710 ceph df --format json > $TMPFILE
711 grep 'total_bytes' $TMPFILE
712 grep -v 'dirty' $TMPFILE
713 ceph df detail --format json > $TMPFILE
714 grep 'rd_bytes' $TMPFILE
715 grep 'dirty' $TMPFILE
716 ceph df --format xml | grep '<total_bytes>'
717 ceph df detail --format xml | grep '<rd_bytes>'
718
719 ceph fsid
720 ceph health
721 ceph health detail
722 ceph health --format json-pretty
723 ceph health detail --format xml-pretty
724
725 ceph time-sync-status
726
727 ceph node ls
728 for t in mon osd mds ; do
729 ceph node ls $t
730 done
731
732 ceph_watch_start
733 mymsg="this is a test log message $$.$(date)"
734 ceph log "$mymsg"
735 ceph log last | grep "$mymsg"
736 ceph log last 100 | grep "$mymsg"
737 ceph_watch_wait "$mymsg"
738
739 ceph mgr dump
740 ceph mgr module ls
741 ceph mgr module enable restful
742 expect_false ceph mgr module enable foodne
743 ceph mgr module enable foodne --force
744 ceph mgr module disable foodne
745 ceph mgr module disable foodnebizbangbash
746
747 ceph mon metadata a
748 ceph mon metadata
749 ceph mon count-metadata ceph_version
750 ceph mon versions
751
752 ceph mgr metadata
753 ceph mgr versions
754 ceph mgr count-metadata ceph_version
755
756 ceph versions
757
758 ceph node ls
759 }
760
761 function check_mds_active()
762 {
763 fs_name=$1
764 ceph fs get $fs_name | grep active
765 }
766
767 function wait_mds_active()
768 {
769 fs_name=$1
770 max_run=300
771 for i in $(seq 1 $max_run) ; do
772 if ! check_mds_active $fs_name ; then
773 echo "waiting for an active MDS daemon ($i/$max_run)"
774 sleep 5
775 else
776 break
777 fi
778 done
779 check_mds_active $fs_name
780 }
781
782 function get_mds_gids()
783 {
784 fs_name=$1
785 ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
786 }
787
788 function fail_all_mds()
789 {
790 fs_name=$1
791 ceph fs set $fs_name cluster_down true
792 mds_gids=$(get_mds_gids $fs_name)
793 for mds_gid in $mds_gids ; do
794 ceph mds fail $mds_gid
795 done
796 if check_mds_active $fs_name ; then
797 echo "An active MDS remains, something went wrong"
798 ceph fs get $fs_name
799 exit -1
800 fi
801
802 }
803
804 function remove_all_fs()
805 {
806 existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
807 for fs_name in $existing_fs ; do
808 echo "Removing fs ${fs_name}..."
809 fail_all_mds $fs_name
810 echo "Removing existing filesystem '${fs_name}'..."
811 ceph fs rm $fs_name --yes-i-really-mean-it
812 echo "Removed '${fs_name}'."
813 done
814 }
815
816 # So that tests requiring MDS can skip if one is not configured
817 # in the cluster at all
818 function mds_exists()
819 {
820 ceph auth ls | grep "^mds"
821 }
822
823 # some of the commands are just not idempotent.
824 function without_test_dup_command()
825 {
826 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
827 $@
828 else
829 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
830 unset CEPH_CLI_TEST_DUP_COMMAND
831 $@
832 CEPH_CLI_TEST_DUP_COMMAND=saved
833 fi
834 }
835
836 function test_mds_tell()
837 {
838 local FS_NAME=cephfs
839 if ! mds_exists ; then
840 echo "Skipping test, no MDS found"
841 return
842 fi
843
844 remove_all_fs
845 ceph osd pool create fs_data 10
846 ceph osd pool create fs_metadata 10
847 ceph fs new $FS_NAME fs_metadata fs_data
848 wait_mds_active $FS_NAME
849
850 # Test injectargs by GID
851 old_mds_gids=$(get_mds_gids $FS_NAME)
852 echo Old GIDs: $old_mds_gids
853
854 for mds_gid in $old_mds_gids ; do
855 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
856 done
857 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
858
859 # Test respawn by rank
860 without_test_dup_command ceph tell mds.0 respawn
861 new_mds_gids=$old_mds_gids
862 while [ $new_mds_gids -eq $old_mds_gids ] ; do
863 sleep 5
864 new_mds_gids=$(get_mds_gids $FS_NAME)
865 done
866 echo New GIDs: $new_mds_gids
867
868 # Test respawn by ID
869 without_test_dup_command ceph tell mds.a respawn
870 new_mds_gids=$old_mds_gids
871 while [ $new_mds_gids -eq $old_mds_gids ] ; do
872 sleep 5
873 new_mds_gids=$(get_mds_gids $FS_NAME)
874 done
875 echo New GIDs: $new_mds_gids
876
877 remove_all_fs
878 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
879 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
880 }
881
882 function test_mon_mds()
883 {
884 local FS_NAME=cephfs
885 remove_all_fs
886
887 ceph osd pool create fs_data 10
888 ceph osd pool create fs_metadata 10
889 ceph fs new $FS_NAME fs_metadata fs_data
890
891 ceph fs set $FS_NAME cluster_down true
892 ceph fs set $FS_NAME cluster_down false
893
894 # Legacy commands, act on default fs
895 ceph mds cluster_down
896 ceph mds cluster_up
897
898 ceph mds compat rm_incompat 4
899 ceph mds compat rm_incompat 4
900
901 # We don't want any MDSs to be up, their activity can interfere with
902 # the "current_epoch + 1" checking below if they're generating updates
903 fail_all_mds $FS_NAME
904
905 ceph mds compat show
906 expect_false ceph mds deactivate 2
907 ceph mds dump
908 ceph fs dump
909 ceph fs get $FS_NAME
910 for mds_gid in $(get_mds_gids $FS_NAME) ; do
911 ceph mds metadata $mds_id
912 done
913 ceph mds metadata
914 ceph mds versions
915 ceph mds count-metadata os
916
917 # XXX mds fail, but how do you undo it?
918 mdsmapfile=$TEMP_DIR/mdsmap.$$
919 current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
920 [ -s $mdsmapfile ]
921 rm $mdsmapfile
922
923 ceph osd pool create data2 10
924 ceph osd pool create data3 10
925 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
926 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
927 ceph mds add_data_pool $data2_pool
928 ceph mds add_data_pool $data3_pool
929 ceph mds add_data_pool 100 >& $TMPFILE || true
930 check_response "Error ENOENT"
931 ceph mds add_data_pool foobarbaz >& $TMPFILE || true
932 check_response "Error ENOENT"
933 ceph mds remove_data_pool $data2_pool
934 ceph mds remove_data_pool $data3_pool
935 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
936 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
937 ceph mds set allow_multimds false
938 expect_false ceph mds set_max_mds 4
939 ceph mds set allow_multimds true
940 ceph mds set_max_mds 4
941 ceph mds set_max_mds 3
942 ceph mds set_max_mds 256
943 expect_false ceph mds set_max_mds 257
944 ceph mds set max_mds 4
945 ceph mds set max_mds 256
946 expect_false ceph mds set max_mds 257
947 expect_false ceph mds set max_mds asdf
948 expect_false ceph mds set inline_data true
949 ceph mds set inline_data true --yes-i-really-mean-it
950 ceph mds set inline_data yes --yes-i-really-mean-it
951 ceph mds set inline_data 1 --yes-i-really-mean-it
952 expect_false ceph mds set inline_data --yes-i-really-mean-it
953 ceph mds set inline_data false
954 ceph mds set inline_data no
955 ceph mds set inline_data 0
956 expect_false ceph mds set inline_data asdf
957 ceph mds set max_file_size 1048576
958 expect_false ceph mds set max_file_size 123asdf
959
960 expect_false ceph mds set allow_new_snaps
961 expect_false ceph mds set allow_new_snaps true
962 ceph mds set allow_new_snaps true --yes-i-really-mean-it
963 ceph mds set allow_new_snaps 0
964 ceph mds set allow_new_snaps false
965 ceph mds set allow_new_snaps no
966 expect_false ceph mds set allow_new_snaps taco
967
968 # we should never be able to add EC pools as data or metadata pools
969 # create an ec-pool...
970 ceph osd pool create mds-ec-pool 10 10 erasure
971 set +e
972 ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
973 check_response 'erasure-code' $? 22
974 set -e
975 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
976 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
977 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
978
979 fail_all_mds $FS_NAME
980
981 set +e
982 # Check that rmfailed requires confirmation
983 expect_false ceph mds rmfailed 0
984 ceph mds rmfailed 0 --yes-i-really-mean-it
985 set -e
986
987 # Check that `newfs` is no longer permitted
988 expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
989
990 # Check that 'fs reset' runs
991 ceph fs reset $FS_NAME --yes-i-really-mean-it
992
993 # Check that creating a second FS fails by default
994 ceph osd pool create fs_metadata2 10
995 ceph osd pool create fs_data2 10
996 set +e
997 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
998 set -e
999
1000 # Check that setting enable_multiple enables creation of second fs
1001 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1002 ceph fs new cephfs2 fs_metadata2 fs_data2
1003
1004 # Clean up multi-fs stuff
1005 fail_all_mds cephfs2
1006 ceph fs rm cephfs2 --yes-i-really-mean-it
1007 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1008 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1009
1010 fail_all_mds $FS_NAME
1011
1012 # Clean up to enable subsequent fs new tests
1013 ceph fs rm $FS_NAME --yes-i-really-mean-it
1014
1015 set +e
1016 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1017 check_response 'erasure-code' $? 22
1018 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1019 check_response 'erasure-code' $? 22
1020 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1021 check_response 'erasure-code' $? 22
1022 set -e
1023
1024 # ... new create a cache tier in front of the EC pool...
1025 ceph osd pool create mds-tier 2
1026 ceph osd tier add mds-ec-pool mds-tier
1027 ceph osd tier set-overlay mds-ec-pool mds-tier
1028 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1029
1030 # Use of a readonly tier should be forbidden
1031 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1032 set +e
1033 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1034 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1035 set -e
1036
1037 # Use of a writeback tier should enable FS creation
1038 ceph osd tier cache-mode mds-tier writeback
1039 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1040
1041 # While a FS exists using the tiered pools, I should not be allowed
1042 # to remove the tier
1043 set +e
1044 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1045 check_response 'in use by CephFS' $? 16
1046 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1047 check_response 'in use by CephFS' $? 16
1048 set -e
1049
1050 fail_all_mds $FS_NAME
1051 ceph fs rm $FS_NAME --yes-i-really-mean-it
1052
1053 # ... but we should be forbidden from using the cache pool in the FS directly.
1054 set +e
1055 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1056 check_response 'in use as a cache tier' $? 22
1057 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1058 check_response 'in use as a cache tier' $? 22
1059 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1060 check_response 'in use as a cache tier' $? 22
1061 set -e
1062
1063 # Clean up tier + EC pools
1064 ceph osd tier remove-overlay mds-ec-pool
1065 ceph osd tier remove mds-ec-pool mds-tier
1066
1067 # Create a FS using the 'cache' pool now that it's no longer a tier
1068 ceph fs new $FS_NAME fs_metadata mds-tier --force
1069
1070 # We should be forbidden from using this pool as a tier now that
1071 # it's in use for CephFS
1072 set +e
1073 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1074 check_response 'in use by CephFS' $? 16
1075 set -e
1076
1077 fail_all_mds $FS_NAME
1078 ceph fs rm $FS_NAME --yes-i-really-mean-it
1079
1080 # We should be permitted to use an EC pool with overwrites enabled
1081 # as the data pool...
1082 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1083 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1084 fail_all_mds $FS_NAME
1085 ceph fs rm $FS_NAME --yes-i-really-mean-it
1086
1087 # ...but not as the metadata pool
1088 set +e
1089 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1090 check_response 'erasure-code' $? 22
1091 set -e
1092
1093 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1094
1095 # Create a FS and check that we can subsequently add a cache tier to it
1096 ceph fs new $FS_NAME fs_metadata fs_data --force
1097
1098 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1099 ceph osd tier add fs_metadata mds-tier
1100 ceph osd tier cache-mode mds-tier writeback
1101 ceph osd tier set-overlay fs_metadata mds-tier
1102
1103 # Removing tier should be permitted because the underlying pool is
1104 # replicated (#11504 case)
1105 ceph osd tier cache-mode mds-tier proxy
1106 ceph osd tier remove-overlay fs_metadata
1107 ceph osd tier remove fs_metadata mds-tier
1108 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1109
1110 # Clean up FS
1111 fail_all_mds $FS_NAME
1112 ceph fs rm $FS_NAME --yes-i-really-mean-it
1113
1114
1115
1116 ceph mds stat
1117 # ceph mds tell mds.a getmap
1118 # ceph mds rm
1119 # ceph mds rmfailed
1120 # ceph mds set_state
1121 # ceph mds stop
1122
1123 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1124 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1125 }
1126
1127 function test_mon_mds_metadata()
1128 {
1129 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1130 test "$nmons" -gt 0
1131
1132 ceph mds dump |
1133 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1134 while read gid id rank; do
1135 ceph mds metadata ${gid} | grep '"hostname":'
1136 ceph mds metadata ${id} | grep '"hostname":'
1137 ceph mds metadata ${rank} | grep '"hostname":'
1138
1139 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1140 test "$n" -eq "$nmons"
1141 done
1142
1143 expect_false ceph mds metadata UNKNOWN
1144 }
1145
1146 function test_mon_mon()
1147 {
1148 # print help message
1149 ceph --help mon
1150 # no mon add/remove
1151 ceph mon dump
1152 ceph mon getmap -o $TEMP_DIR/monmap.$$
1153 [ -s $TEMP_DIR/monmap.$$ ]
1154 # ceph mon tell
1155 ceph mon_status
1156
1157 # test mon features
1158 ceph mon feature ls
1159 ceph mon feature set kraken --yes-i-really-mean-it
1160 expect_false ceph mon feature set abcd
1161 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1162 }
1163
1164 function gen_secrets_file()
1165 {
1166 # lets assume we can have the following types
1167 # all - generates both cephx and lockbox, with mock dm-crypt key
1168 # cephx - only cephx
1169 # no_cephx - lockbox and dm-crypt, no cephx
1170 # no_lockbox - dm-crypt and cephx, no lockbox
1171 # empty - empty file
1172 # empty_json - correct json, empty map
1173 # bad_json - bad json :)
1174 #
1175 local t=$1
1176 if [[ -z "$t" ]]; then
1177 t="all"
1178 fi
1179
1180 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1181 echo $fn
1182 if [[ "$t" == "empty" ]]; then
1183 return 0
1184 fi
1185
1186 echo "{" > $fn
1187 if [[ "$t" == "bad_json" ]]; then
1188 echo "asd: ; }" >> $fn
1189 return 0
1190 elif [[ "$t" == "empty_json" ]]; then
1191 echo "}" >> $fn
1192 return 0
1193 fi
1194
1195 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1196 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1197 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1198
1199 if [[ "$t" == "all" ]]; then
1200 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1201 elif [[ "$t" == "cephx" ]]; then
1202 echo "$cephx_secret" >> $fn
1203 elif [[ "$t" == "no_cephx" ]]; then
1204 echo "$lb_secret,$dmcrypt_key" >> $fn
1205 elif [[ "$t" == "no_lockbox" ]]; then
1206 echo "$cephx_secret,$dmcrypt_key" >> $fn
1207 else
1208 echo "unknown gen_secrets_file() type \'$fn\'"
1209 return 1
1210 fi
1211 echo "}" >> $fn
1212 return 0
1213 }
1214
1215 function test_mon_osd_create_destroy()
1216 {
1217 ceph osd new 2>&1 | grep 'EINVAL'
1218 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1219 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1220
1221 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1222
1223 old_osds=$(ceph osd ls)
1224 num_osds=$(ceph osd ls | wc -l)
1225
1226 uuid=$(uuidgen)
1227 id=$(ceph osd new $uuid 2>/dev/null)
1228
1229 for i in $old_osds; do
1230 [[ "$i" != "$id" ]]
1231 done
1232
1233 ceph osd find $id
1234
1235 id2=`ceph osd new $uuid 2>/dev/null`
1236
1237 [[ $id2 == $id ]]
1238
1239 ceph osd new $uuid $id
1240
1241 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1242 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1243
1244 uuid2=$(uuidgen)
1245 id2=$(ceph osd new $uuid2)
1246 ceph osd find $id2
1247 [[ "$id2" != "$id" ]]
1248
1249 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1250 ceph osd new $uuid2 $id2
1251
1252 # test with secrets
1253 empty_secrets=$(gen_secrets_file "empty")
1254 empty_json=$(gen_secrets_file "empty_json")
1255 all_secrets=$(gen_secrets_file "all")
1256 cephx_only=$(gen_secrets_file "cephx")
1257 no_cephx=$(gen_secrets_file "no_cephx")
1258 no_lockbox=$(gen_secrets_file "no_lockbox")
1259 bad_json=$(gen_secrets_file "bad_json")
1260
1261 # empty secrets should be idempotent
1262 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1263 [[ "$new_id" == "$id" ]]
1264
1265 # empty json, thus empty secrets
1266 new_id=$(ceph osd new $uuid $id -i $empty_json)
1267 [[ "$new_id" == "$id" ]]
1268
1269 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1270
1271 ceph osd rm $id
1272 ceph osd rm $id2
1273 ceph osd setmaxosd $old_maxosd
1274
1275 ceph osd new $uuid -i $bad_json 2>&1 | grep 'EINVAL'
1276 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1277 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1278
1279 osds=$(ceph osd ls)
1280 id=$(ceph osd new $uuid -i $all_secrets)
1281 for i in $osds; do
1282 [[ "$i" != "$id" ]]
1283 done
1284
1285 ceph osd find $id
1286
1287 # validate secrets and dm-crypt are set
1288 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1289 s=$(cat $all_secrets | jq '.cephx_secret')
1290 [[ $k == $s ]]
1291 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1292 jq '.key')
1293 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1294 [[ $k == $s ]]
1295 ceph config-key exists dm-crypt/osd/$uuid/luks
1296
1297 osds=$(ceph osd ls)
1298 id2=$(ceph osd new $uuid2 -i $cephx_only)
1299 for i in $osds; do
1300 [[ "$i" != "$id2" ]]
1301 done
1302
1303 ceph osd find $id2
1304 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1305 s=$(cat $all_secrets | jq '.cephx_secret')
1306 [[ $k == $s ]]
1307 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1308 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1309
1310 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1311 ceph osd destroy $id2 --yes-i-really-mean-it
1312 ceph osd find $id2
1313 expect_false ceph auth get-key osd.$id2
1314 ceph osd dump | grep osd.$id2 | grep destroyed
1315
1316 id3=$id2
1317 uuid3=$(uuidgen)
1318 ceph osd new $uuid3 $id3 -i $all_secrets
1319 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1320 ceph auth get-key client.osd-lockbox.$uuid3
1321 ceph auth get-key osd.$id3
1322 ceph config-key exists dm-crypt/osd/$uuid3/luks
1323
1324 ceph osd purge osd.$id3 --yes-i-really-mean-it
1325 expect_false ceph osd find $id2
1326 expect_false ceph auth get-key osd.$id2
1327 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1328 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1329 ceph osd purge osd.$id3 --yes-i-really-mean-it
1330 ceph osd purge osd.$id3 --yes-i-really-mean-it # idempotent
1331
1332 ceph osd purge osd.$id --yes-i-really-mean-it
1333 ceph osd purge 123456 --yes-i-really-mean-it
1334 expect_false ceph osd find $id
1335 expect_false ceph auth get-key osd.$id
1336 expect_false ceph auth get-key client.osd-lockbox.$uuid
1337 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1338
1339 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1340 $no_cephx $no_lockbox $bad_json
1341
1342 for i in $(ceph osd ls); do
1343 [[ "$i" != "$id" ]]
1344 [[ "$i" != "$id2" ]]
1345 [[ "$i" != "$id3" ]]
1346 done
1347
1348 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1349 ceph osd setmaxosd $old_maxosd
1350
1351 }
1352
1353 function test_mon_config_key()
1354 {
1355 key=asdfasdfqwerqwreasdfuniquesa123df
1356 ceph config-key list | grep -c $key | grep 0
1357 ceph config-key get $key | grep -c bar | grep 0
1358 ceph config-key set $key bar
1359 ceph config-key get $key | grep bar
1360 ceph config-key list | grep -c $key | grep 1
1361 ceph config-key dump | grep $key | grep bar
1362 ceph config-key rm $key
1363 expect_false ceph config-key get $key
1364 ceph config-key list | grep -c $key | grep 0
1365 ceph config-key dump | grep -c $key | grep 0
1366 }
1367
1368 function test_mon_osd()
1369 {
1370 #
1371 # osd blacklist
1372 #
1373 bl=192.168.0.1:0/1000
1374 ceph osd blacklist add $bl
1375 ceph osd blacklist ls | grep $bl
1376 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1377 ceph osd dump --format=json-pretty | grep $bl
1378 ceph osd dump | grep "^blacklist $bl"
1379 ceph osd blacklist rm $bl
1380 ceph osd blacklist ls | expect_false grep $bl
1381
1382 bl=192.168.0.1
1383 # test without nonce, invalid nonce
1384 ceph osd blacklist add $bl
1385 ceph osd blacklist ls | grep $bl
1386 ceph osd blacklist rm $bl
1387 ceph osd blacklist ls | expect_false grep $expect_false bl
1388 expect_false "ceph osd blacklist $bl/-1"
1389 expect_false "ceph osd blacklist $bl/foo"
1390
1391 # test with wrong address
1392 expect_false "ceph osd blacklist 1234.56.78.90/100"
1393
1394 # Test `clear`
1395 ceph osd blacklist add $bl
1396 ceph osd blacklist ls | grep $bl
1397 ceph osd blacklist clear
1398 ceph osd blacklist ls | expect_false grep $bl
1399
1400 #
1401 # osd crush
1402 #
1403 ceph osd crush reweight-all
1404 ceph osd crush tunables legacy
1405 ceph osd crush show-tunables | grep argonaut
1406 ceph osd crush tunables bobtail
1407 ceph osd crush show-tunables | grep bobtail
1408 ceph osd crush tunables firefly
1409 ceph osd crush show-tunables | grep firefly
1410
1411 ceph osd crush set-tunable straw_calc_version 0
1412 ceph osd crush get-tunable straw_calc_version | grep 0
1413 ceph osd crush set-tunable straw_calc_version 1
1414 ceph osd crush get-tunable straw_calc_version | grep 1
1415
1416 #
1417 # require-min-compat-client
1418 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1419 ceph osd set-require-min-compat-client luminous
1420 ceph osd dump | grep 'require_min_compat_client luminous'
1421
1422 #
1423 # osd scrub
1424 #
1425 # how do I tell when these are done?
1426 ceph osd scrub 0
1427 ceph osd deep-scrub 0
1428 ceph osd repair 0
1429
1430 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1431 do
1432 ceph osd set $f
1433 ceph osd unset $f
1434 done
1435 expect_false ceph osd unset sortbitwise # cannot be unset
1436 expect_false ceph osd set bogus
1437 expect_false ceph osd unset bogus
1438 ceph osd require-osd-release luminous
1439 # can't lower (or use new command for anything but jewel)
1440 expect_false ceph osd require-osd-release jewel
1441 # these are no-ops but should succeed.
1442 ceph osd set require_jewel_osds
1443 ceph osd set require_kraken_osds
1444 expect_false ceph osd unset require_jewel_osds
1445
1446 ceph osd set noup
1447 ceph osd down 0
1448 ceph osd dump | grep 'osd.0 down'
1449 ceph osd unset noup
1450 max_run=1000
1451 for ((i=0; i < $max_run; i++)); do
1452 if ! ceph osd dump | grep 'osd.0 up'; then
1453 echo "waiting for osd.0 to come back up ($i/$max_run)"
1454 sleep 1
1455 else
1456 break
1457 fi
1458 done
1459 ceph osd dump | grep 'osd.0 up'
1460
1461 ceph osd dump | grep 'osd.0 up'
1462 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1463 ceph osd find 1
1464 ceph osd find osd.1
1465 expect_false ceph osd find osd.xyz
1466 expect_false ceph osd find xyz
1467 expect_false ceph osd find 0.1
1468 ceph --format plain osd find 1 # falls back to json-pretty
1469 if [ `uname` == Linux ]; then
1470 ceph osd metadata 1 | grep 'distro'
1471 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1472 fi
1473 ceph osd out 0
1474 ceph osd dump | grep 'osd.0.*out'
1475 ceph osd in 0
1476 ceph osd dump | grep 'osd.0.*in'
1477 ceph osd find 0
1478
1479 ceph osd add-nodown 0 1
1480 ceph health detail | grep 'NODOWN'
1481 ceph osd rm-nodown 0 1
1482 ! ceph health detail | grep 'NODOWN'
1483
1484 ceph osd out 0 # so we can mark it as noin later
1485 ceph osd add-noin 0
1486 ceph health detail | grep 'NOIN'
1487 ceph osd rm-noin 0
1488 ! ceph health detail | grep 'NOIN'
1489 ceph osd in 0
1490
1491 ceph osd add-noout 0
1492 ceph health detail | grep 'NOOUT'
1493 ceph osd rm-noout 0
1494 ! ceph health detail | grep 'NOOUT'
1495
1496 # test osd id parse
1497 expect_false ceph osd add-noup 797er
1498 expect_false ceph osd add-nodown u9uwer
1499 expect_false ceph osd add-noin 78~15
1500 expect_false ceph osd add-noout 0 all 1
1501
1502 expect_false ceph osd rm-noup 1234567
1503 expect_false ceph osd rm-nodown fsadf7
1504 expect_false ceph osd rm-noin 0 1 any
1505 expect_false ceph osd rm-noout 790-fd
1506
1507 ids=`ceph osd ls-tree default`
1508 for osd in $ids
1509 do
1510 ceph osd add-nodown $osd
1511 ceph osd add-noout $osd
1512 done
1513 ceph -s | grep 'NODOWN'
1514 ceph -s | grep 'NOOUT'
1515 ceph osd rm-nodown any
1516 ceph osd rm-noout all
1517 ! ceph -s | grep 'NODOWN'
1518 ! ceph -s | grep 'NOOUT'
1519
1520 # make sure mark out preserves weight
1521 ceph osd reweight osd.0 .5
1522 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1523 ceph osd out 0
1524 ceph osd in 0
1525 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1526
1527 ceph osd getmap -o $f
1528 [ -s $f ]
1529 rm $f
1530 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1531 [ "$save" -gt 0 ]
1532 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1533 ceph osd setmaxosd 10
1534 ceph osd getmaxosd | grep 'max_osd = 10'
1535 ceph osd setmaxosd $save
1536 ceph osd getmaxosd | grep "max_osd = $save"
1537
1538 for id in `ceph osd ls` ; do
1539 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1540 done
1541
1542 ceph osd rm 0 2>&1 | grep 'EBUSY'
1543
1544 local old_osds=$(echo $(ceph osd ls))
1545 id=`ceph osd create`
1546 ceph osd find $id
1547 ceph osd lost $id --yes-i-really-mean-it
1548 expect_false ceph osd setmaxosd $id
1549 local new_osds=$(echo $(ceph osd ls))
1550 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1551 ceph osd rm $id
1552 done
1553
1554 uuid=`uuidgen`
1555 id=`ceph osd create $uuid`
1556 id2=`ceph osd create $uuid`
1557 [ "$id" = "$id2" ]
1558 ceph osd rm $id
1559
1560 ceph --help osd
1561
1562 # reset max_osd.
1563 ceph osd setmaxosd $id
1564 ceph osd getmaxosd | grep "max_osd = $save"
1565 local max_osd=$save
1566
1567 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1568 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1569
1570 id=`ceph osd create $uuid $max_osd`
1571 [ "$id" = "$max_osd" ]
1572 ceph osd find $id
1573 max_osd=$((max_osd + 1))
1574 ceph osd getmaxosd | grep "max_osd = $max_osd"
1575
1576 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1577 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
1578 id2=`ceph osd create $uuid`
1579 [ "$id" = "$id2" ]
1580 id2=`ceph osd create $uuid $id`
1581 [ "$id" = "$id2" ]
1582
1583 uuid=`uuidgen`
1584 local gap_start=$max_osd
1585 id=`ceph osd create $uuid $((gap_start + 100))`
1586 [ "$id" = "$((gap_start + 100))" ]
1587 max_osd=$((id + 1))
1588 ceph osd getmaxosd | grep "max_osd = $max_osd"
1589
1590 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
1591
1592 #
1593 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1594 # is repeated and consumes two osd id, not just one.
1595 #
1596 local next_osd
1597 if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
1598 next_osd=$((gap_start + 1))
1599 else
1600 next_osd=$gap_start
1601 fi
1602 id=`ceph osd create`
1603 [ "$id" = "$next_osd" ]
1604
1605 next_osd=$((id + 1))
1606 id=`ceph osd create $(uuidgen)`
1607 [ "$id" = "$next_osd" ]
1608
1609 next_osd=$((id + 1))
1610 id=`ceph osd create $(uuidgen) $next_osd`
1611 [ "$id" = "$next_osd" ]
1612
1613 local new_osds=$(echo $(ceph osd ls))
1614 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1615 [ $id -ge $save ]
1616 ceph osd rm $id
1617 done
1618 ceph osd setmaxosd $save
1619
1620 ceph osd ls
1621 ceph osd pool create data 10
1622 ceph osd pool application enable data rados
1623 ceph osd lspools | grep data
1624 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1625 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1626 ceph osd pool delete data data --yes-i-really-really-mean-it
1627
1628 ceph osd pause
1629 ceph osd dump | grep 'flags.*pauserd,pausewr'
1630 ceph osd unpause
1631
1632 ceph osd tree
1633 ceph osd tree up
1634 ceph osd tree down
1635 ceph osd tree in
1636 ceph osd tree out
1637 ceph osd tree destroyed
1638 ceph osd tree up in
1639 ceph osd tree up out
1640 ceph osd tree down in
1641 ceph osd tree down out
1642 ceph osd tree out down
1643 expect_false ceph osd tree up down
1644 expect_false ceph osd tree up destroyed
1645 expect_false ceph osd tree down destroyed
1646 expect_false ceph osd tree up down destroyed
1647 expect_false ceph osd tree in out
1648 expect_false ceph osd tree up foo
1649
1650 ceph osd metadata
1651 ceph osd count-metadata os
1652 ceph osd versions
1653
1654 ceph osd perf
1655 ceph osd blocked-by
1656
1657 ceph osd stat | grep up,
1658 }
1659
1660 function test_mon_crush()
1661 {
1662 f=$TEMP_DIR/map.$$
1663 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1664 [ -s $f ]
1665 [ "$epoch" -gt 1 ]
1666 nextepoch=$(( $epoch + 1 ))
1667 echo epoch $epoch nextepoch $nextepoch
1668 rm -f $f.epoch
1669 expect_false ceph osd setcrushmap $nextepoch -i $f
1670 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1671 echo gotepoch $gotepoch
1672 [ "$gotepoch" -eq "$nextepoch" ]
1673 # should be idempotent
1674 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1675 echo epoch $gotepoch
1676 [ "$gotepoch" -eq "$nextepoch" ]
1677 rm $f
1678 }
1679
1680 function test_mon_osd_pool()
1681 {
1682 #
1683 # osd pool
1684 #
1685 ceph osd pool create data 10
1686 ceph osd pool application enable data rados
1687 ceph osd pool mksnap data datasnap
1688 rados -p data lssnap | grep datasnap
1689 ceph osd pool rmsnap data datasnap
1690 expect_false ceph osd pool rmsnap pool_fake snapshot
1691 ceph osd pool delete data data --yes-i-really-really-mean-it
1692
1693 ceph osd pool create data2 10
1694 ceph osd pool application enable data2 rados
1695 ceph osd pool rename data2 data3
1696 ceph osd lspools | grep data3
1697 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1698
1699 ceph osd pool create replicated 12 12 replicated
1700 ceph osd pool create replicated 12 12 replicated
1701 ceph osd pool create replicated 12 12 # default is replicated
1702 ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
1703 ceph osd pool application enable replicated rados
1704 # should fail because the type is not the same
1705 expect_false ceph osd pool create replicated 12 12 erasure
1706 ceph osd lspools | grep replicated
1707 ceph osd pool create ec_test 1 1 erasure
1708 ceph osd pool application enable ec_test rados
1709 set +e
1710 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1711 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1712 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1713 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
1714 else
1715 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1716 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1717 fi
1718 set -e
1719 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1720 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1721 }
1722
1723 function test_mon_osd_pool_quota()
1724 {
1725 #
1726 # test osd pool set/get quota
1727 #
1728
1729 # create tmp pool
1730 ceph osd pool create tmp-quota-pool 36
1731 ceph osd pool application enable tmp-quota-pool rados
1732 #
1733 # set erroneous quotas
1734 #
1735 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1736 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1737 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1738 #
1739 # set valid quotas
1740 #
1741 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1742 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1743 #
1744 # get quotas
1745 #
1746 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
1747 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
1748 #
1749 # get quotas in json-pretty format
1750 #
1751 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1752 grep '"quota_max_objects":.*10485760'
1753 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1754 grep '"quota_max_bytes":.*10'
1755 #
1756 # reset pool quotas
1757 #
1758 ceph osd pool set-quota tmp-quota-pool max_bytes 0
1759 ceph osd pool set-quota tmp-quota-pool max_objects 0
1760 #
1761 # test N/A quotas
1762 #
1763 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
1764 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
1765 #
1766 # cleanup tmp pool
1767 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
1768 }
1769
1770 function test_mon_pg()
1771 {
1772 # Make sure we start healthy.
1773 wait_for_health_ok
1774
1775 ceph pg debug unfound_objects_exist
1776 ceph pg debug degraded_pgs_exist
1777 ceph pg deep-scrub 1.0
1778 ceph pg dump
1779 ceph pg dump pgs_brief --format=json
1780 ceph pg dump pgs --format=json
1781 ceph pg dump pools --format=json
1782 ceph pg dump osds --format=json
1783 ceph pg dump sum --format=json
1784 ceph pg dump all --format=json
1785 ceph pg dump pgs_brief osds --format=json
1786 ceph pg dump pools osds pgs_brief --format=json
1787 ceph pg dump_json
1788 ceph pg dump_pools_json
1789 ceph pg dump_stuck inactive
1790 ceph pg dump_stuck unclean
1791 ceph pg dump_stuck stale
1792 ceph pg dump_stuck undersized
1793 ceph pg dump_stuck degraded
1794 ceph pg ls
1795 ceph pg ls 1
1796 ceph pg ls stale
1797 expect_false ceph pg ls scrubq
1798 ceph pg ls active stale repair recovering
1799 ceph pg ls 1 active
1800 ceph pg ls 1 active stale
1801 ceph pg ls-by-primary osd.0
1802 ceph pg ls-by-primary osd.0 1
1803 ceph pg ls-by-primary osd.0 active
1804 ceph pg ls-by-primary osd.0 active stale
1805 ceph pg ls-by-primary osd.0 1 active stale
1806 ceph pg ls-by-osd osd.0
1807 ceph pg ls-by-osd osd.0 1
1808 ceph pg ls-by-osd osd.0 active
1809 ceph pg ls-by-osd osd.0 active stale
1810 ceph pg ls-by-osd osd.0 1 active stale
1811 ceph pg ls-by-pool rbd
1812 ceph pg ls-by-pool rbd active stale
1813 # can't test this...
1814 # ceph pg force_create_pg
1815 ceph pg getmap -o $TEMP_DIR/map.$$
1816 [ -s $TEMP_DIR/map.$$ ]
1817 ceph pg map 1.0 | grep acting
1818 ceph pg repair 1.0
1819 ceph pg scrub 1.0
1820
1821 ceph osd set-full-ratio .962
1822 ceph osd dump | grep '^full_ratio 0.962'
1823 ceph osd set-backfillfull-ratio .912
1824 ceph osd dump | grep '^backfillfull_ratio 0.912'
1825 ceph osd set-nearfull-ratio .892
1826 ceph osd dump | grep '^nearfull_ratio 0.892'
1827
1828 # Check health status
1829 ceph osd set-nearfull-ratio .913
1830 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1831 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
1832 ceph osd set-nearfull-ratio .892
1833 ceph osd set-backfillfull-ratio .963
1834 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1835 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
1836 ceph osd set-backfillfull-ratio .912
1837
1838 # Check injected full results
1839 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
1840 wait_for_health "OSD_NEARFULL"
1841 ceph health detail | grep "osd.0 is near full"
1842 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1843 wait_for_health_ok
1844
1845 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
1846 wait_for_health "OSD_BACKFILLFULL"
1847 ceph health detail | grep "osd.1 is backfill full"
1848 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
1849 wait_for_health_ok
1850
1851 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
1852 # failsafe and full are the same as far as the monitor is concerned
1853 wait_for_health "OSD_FULL"
1854 ceph health detail | grep "osd.2 is full"
1855 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
1856 wait_for_health_ok
1857
1858 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
1859 wait_for_health "OSD_FULL"
1860 ceph health detail | grep "osd.0 is full"
1861 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1862 wait_for_health_ok
1863
1864 ceph pg stat | grep 'pgs:'
1865 ceph pg 1.0 query
1866 ceph tell 1.0 query
1867 ceph quorum enter
1868 ceph quorum_status
1869 ceph report | grep osd_stats
1870 ceph status
1871 ceph -s
1872
1873 #
1874 # tell osd version
1875 #
1876 ceph tell osd.0 version
1877 expect_false ceph tell osd.9999 version
1878 expect_false ceph tell osd.foo version
1879
1880 # back to pg stuff
1881
1882 ceph tell osd.0 dump_pg_recovery_stats | grep Started
1883
1884 ceph osd reweight 0 0.9
1885 expect_false ceph osd reweight 0 -1
1886 ceph osd reweight osd.0 1
1887
1888 ceph osd primary-affinity osd.0 .9
1889 expect_false ceph osd primary-affinity osd.0 -2
1890 expect_false ceph osd primary-affinity osd.9999 .5
1891 ceph osd primary-affinity osd.0 1
1892
1893 ceph osd pool set rbd size 2
1894 ceph osd pg-temp 1.0 0 1
1895 ceph osd pg-temp 1.0 osd.1 osd.0
1896 expect_false ceph osd pg-temp 1.0 0 1 2
1897 expect_false ceph osd pg-temp asdf qwer
1898 expect_false ceph osd pg-temp 1.0 asdf
1899 expect_false ceph osd pg-temp 1.0
1900
1901 # don't test ceph osd primary-temp for now
1902 }
1903
1904 function test_mon_osd_pool_set()
1905 {
1906 TEST_POOL_GETSET=pool_getset
1907 ceph osd pool create $TEST_POOL_GETSET 1
1908 ceph osd pool application enable $TEST_POOL_GETSET rados
1909 wait_for_clean
1910 ceph osd pool get $TEST_POOL_GETSET all
1911
1912 for s in pg_num pgp_num size min_size crush_rule; do
1913 ceph osd pool get $TEST_POOL_GETSET $s
1914 done
1915
1916 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
1917 (( new_size = old_size + 1 ))
1918 ceph osd pool set $TEST_POOL_GETSET size $new_size
1919 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
1920 ceph osd pool set $TEST_POOL_GETSET size $old_size
1921
1922 ceph osd pool create pool_erasure 1 1 erasure
1923 ceph osd pool application enable pool_erasure rados
1924 wait_for_clean
1925 set +e
1926 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
1927 check_response 'not change the size'
1928 set -e
1929 ceph osd pool get pool_erasure erasure_code_profile
1930
1931 auid=5555
1932 ceph osd pool set $TEST_POOL_GETSET auid $auid
1933 ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
1934 ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
1935 ceph osd pool set $TEST_POOL_GETSET auid 0
1936
1937 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
1938 ceph osd pool set $TEST_POOL_GETSET $flag false
1939 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1940 ceph osd pool set $TEST_POOL_GETSET $flag true
1941 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1942 ceph osd pool set $TEST_POOL_GETSET $flag 1
1943 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1944 ceph osd pool set $TEST_POOL_GETSET $flag 0
1945 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1946 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
1947 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
1948 done
1949
1950 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1951 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
1952 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
1953 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
1954 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1955
1956 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1957 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
1958 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
1959 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
1960 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1961
1962 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1963 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
1964 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
1965 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
1966 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1967
1968 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1969 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
1970 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
1971 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
1972 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1973
1974 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1975 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
1976 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
1977 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
1978 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1979
1980 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1981 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
1982 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
1983 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
1984 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1985
1986 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
1987 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
1988 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1989 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
1990 ceph osd pool set $TEST_POOL_GETSET pg_num 10
1991 wait_for_clean
1992 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1993
1994 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
1995 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
1996 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1997 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
1998 wait_for_clean
1999 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
2000 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32 + 1))
2001 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2002
2003 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
2004 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
2005 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
2006 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
2007 ceph osd pool set $TEST_POOL_GETSET size 2
2008 wait_for_clean
2009 ceph osd pool set $TEST_POOL_GETSET min_size 2
2010
2011 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2012 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2013
2014 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2015 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2016
2017 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
2018
2019 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2020 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2021 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2022 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2023 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2024
2025 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2026 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2027 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2028 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2029 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2030
2031 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2032 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2033 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2034 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2035 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2036 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2037 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2038
2039 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2040 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2041 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2042 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2043 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2044
2045 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2046 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2047 ceph osd pool set $TEST_POOL_GETSET $size 100
2048 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2049 ceph osd pool set $TEST_POOL_GETSET $size 0
2050 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2051 done
2052
2053 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2054 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2055 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2056 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2057
2058 }
2059
2060 function test_mon_osd_tiered_pool_set()
2061 {
2062 # this is really a tier pool
2063 ceph osd pool create real-tier 2
2064 ceph osd tier add rbd real-tier
2065
2066 ceph osd pool set real-tier hit_set_type explicit_hash
2067 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2068 ceph osd pool set real-tier hit_set_type explicit_object
2069 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2070 ceph osd pool set real-tier hit_set_type bloom
2071 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2072 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2073 ceph osd pool set real-tier hit_set_period 123
2074 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2075 ceph osd pool set real-tier hit_set_count 12
2076 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2077 ceph osd pool set real-tier hit_set_fpp .01
2078 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2079
2080 ceph osd pool set real-tier target_max_objects 123
2081 ceph osd pool get real-tier target_max_objects | \
2082 grep 'target_max_objects:[ \t]\+123'
2083 ceph osd pool set real-tier target_max_bytes 123456
2084 ceph osd pool get real-tier target_max_bytes | \
2085 grep 'target_max_bytes:[ \t]\+123456'
2086 ceph osd pool set real-tier cache_target_dirty_ratio .123
2087 ceph osd pool get real-tier cache_target_dirty_ratio | \
2088 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2089 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2090 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2091 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2092 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2093 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2094 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2095 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2096 ceph osd pool set real-tier cache_target_full_ratio .123
2097 ceph osd pool get real-tier cache_target_full_ratio | \
2098 grep 'cache_target_full_ratio:[ \t]\+0.123'
2099 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2100 ceph osd pool set real-tier cache_target_full_ratio 1.0
2101 ceph osd pool set real-tier cache_target_full_ratio 0
2102 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2103 ceph osd pool set real-tier cache_min_flush_age 123
2104 ceph osd pool get real-tier cache_min_flush_age | \
2105 grep 'cache_min_flush_age:[ \t]\+123'
2106 ceph osd pool set real-tier cache_min_evict_age 234
2107 ceph osd pool get real-tier cache_min_evict_age | \
2108 grep 'cache_min_evict_age:[ \t]\+234'
2109
2110 # this is not a tier pool
2111 ceph osd pool create fake-tier 2
2112 ceph osd pool application enable fake-tier rados
2113 wait_for_clean
2114
2115 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2116 expect_false ceph osd pool get fake-tier hit_set_type
2117 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2118 expect_false ceph osd pool get fake-tier hit_set_type
2119 expect_false ceph osd pool set fake-tier hit_set_type bloom
2120 expect_false ceph osd pool get fake-tier hit_set_type
2121 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2122 expect_false ceph osd pool set fake-tier hit_set_period 123
2123 expect_false ceph osd pool get fake-tier hit_set_period
2124 expect_false ceph osd pool set fake-tier hit_set_count 12
2125 expect_false ceph osd pool get fake-tier hit_set_count
2126 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2127 expect_false ceph osd pool get fake-tier hit_set_fpp
2128
2129 expect_false ceph osd pool set fake-tier target_max_objects 123
2130 expect_false ceph osd pool get fake-tier target_max_objects
2131 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2132 expect_false ceph osd pool get fake-tier target_max_bytes
2133 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2134 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2135 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2136 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2137 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2138 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2139 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2140 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2141 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2142 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2143 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2144 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2145 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2146 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2147 expect_false ceph osd pool get fake-tier cache_min_flush_age
2148 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2149 expect_false ceph osd pool get fake-tier cache_min_evict_age
2150
2151 ceph osd tier remove rbd real-tier
2152 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2153 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2154 }
2155
2156 function test_mon_osd_erasure_code()
2157 {
2158
2159 ceph osd erasure-code-profile set fooprofile a=b c=d
2160 ceph osd erasure-code-profile set fooprofile a=b c=d
2161 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2162 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2163 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2164 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2165 #
2166 # cleanup by removing profile 'fooprofile'
2167 ceph osd erasure-code-profile rm fooprofile
2168 }
2169
2170 function test_mon_osd_misc()
2171 {
2172 set +e
2173
2174 # expect error about missing 'pool' argument
2175 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2176
2177 # expect error about unused argument foo
2178 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2179
2180 # expect "not in range" for invalid full ratio
2181 ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
2182
2183 # expect "not in range" for invalid overload percentage
2184 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2185
2186 set -e
2187
2188 ceph osd reweight-by-utilization 110
2189 ceph osd reweight-by-utilization 110 .5
2190 expect_false ceph osd reweight-by-utilization 110 0
2191 expect_false ceph osd reweight-by-utilization 110 -0.1
2192 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2193 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2194 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2195 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2196 ceph osd reweight-by-pg 110
2197 ceph osd test-reweight-by-pg 110 .5
2198 ceph osd reweight-by-pg 110 rbd
2199 ceph osd reweight-by-pg 110 .5 rbd
2200 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2201 }
2202
2203 function test_mon_heap_profiler()
2204 {
2205 do_test=1
2206 set +e
2207 # expect 'heap' commands to be correctly parsed
2208 ceph heap stats 2>$TMPFILE
2209 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2210 echo "tcmalloc not enabled; skip heap profiler test"
2211 do_test=0
2212 fi
2213 set -e
2214
2215 [[ $do_test -eq 0 ]] && return 0
2216
2217 ceph heap start_profiler
2218 ceph heap dump
2219 ceph heap stop_profiler
2220 ceph heap release
2221 }
2222
2223 function test_admin_heap_profiler()
2224 {
2225 do_test=1
2226 set +e
2227 # expect 'heap' commands to be correctly parsed
2228 ceph heap stats 2>$TMPFILE
2229 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2230 echo "tcmalloc not enabled; skip heap profiler test"
2231 do_test=0
2232 fi
2233 set -e
2234
2235 [[ $do_test -eq 0 ]] && return 0
2236
2237 local admin_socket=$(get_admin_socket osd.0)
2238
2239 $SUDO ceph --admin-daemon $admin_socket heap start_profiler
2240 $SUDO ceph --admin-daemon $admin_socket heap dump
2241 $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
2242 $SUDO ceph --admin-daemon $admin_socket heap release
2243 }
2244
2245 function test_osd_bench()
2246 {
2247 # test osd bench limits
2248 # As we should not rely on defaults (as they may change over time),
2249 # lets inject some values and perform some simple tests
2250 # max iops: 10 # 100 IOPS
2251 # max throughput: 10485760 # 10MB/s
2252 # max block size: 2097152 # 2MB
2253 # duration: 10 # 10 seconds
2254
2255 local args="\
2256 --osd-bench-duration 10 \
2257 --osd-bench-max-block-size 2097152 \
2258 --osd-bench-large-size-max-throughput 10485760 \
2259 --osd-bench-small-size-max-iops 10"
2260 ceph tell osd.0 injectargs ${args## }
2261
2262 # anything with a bs larger than 2097152 must fail
2263 expect_false ceph tell osd.0 bench 1 2097153
2264 # but using 'osd_bench_max_bs' must succeed
2265 ceph tell osd.0 bench 1 2097152
2266
2267 # we assume 1MB as a large bs; anything lower is a small bs
2268 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2269 # max count: 409600 (bytes)
2270
2271 # more than max count must not be allowed
2272 expect_false ceph tell osd.0 bench 409601 4096
2273 # but 409600 must be succeed
2274 ceph tell osd.0 bench 409600 4096
2275
2276 # for a large bs, we are limited by throughput.
2277 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2278 # the max count will be (10MB * 10s) = 100MB
2279 # max count: 104857600 (bytes)
2280
2281 # more than max count must not be allowed
2282 expect_false ceph tell osd.0 bench 104857601 2097152
2283 # up to max count must be allowed
2284 ceph tell osd.0 bench 104857600 2097152
2285 }
2286
2287 function test_osd_negative_filestore_merge_threshold()
2288 {
2289 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2290 expect_config_value "osd.0" "filestore_merge_threshold" -1
2291 }
2292
2293 function test_mon_tell()
2294 {
2295 ceph tell mon.a version
2296 ceph tell mon.b version
2297 expect_false ceph tell mon.foo version
2298
2299 sleep 1
2300
2301 ceph_watch_start debug audit
2302 ceph tell mon.a version
2303 ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2304
2305 ceph_watch_start debug audit
2306 ceph tell mon.b version
2307 ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2308 }
2309
2310 function test_mon_ping()
2311 {
2312 ceph ping mon.a
2313 ceph ping mon.b
2314 expect_false ceph ping mon.foo
2315
2316 ceph ping mon.\*
2317 }
2318
2319 function test_mon_deprecated_commands()
2320 {
2321 # current DEPRECATED commands are:
2322 # ceph compact
2323 # ceph scrub
2324 # ceph sync force
2325 #
2326 # Testing should be accomplished by setting
2327 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2328 # each one of these commands.
2329
2330 ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
2331 expect_false ceph tell mon.a compact 2> $TMPFILE
2332 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2333
2334 expect_false ceph tell mon.a scrub 2> $TMPFILE
2335 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2336
2337 expect_false ceph tell mon.a sync force 2> $TMPFILE
2338 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2339
2340 ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
2341 }
2342
2343 function test_mon_cephdf_commands()
2344 {
2345 # ceph df detail:
2346 # pool section:
2347 # RAW USED The near raw used per pool in raw total
2348
2349 ceph osd pool create cephdf_for_test 32 32 replicated
2350 ceph osd pool application enable cephdf_for_test rados
2351 ceph osd pool set cephdf_for_test size 2
2352
2353 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2354 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2355
2356 #wait for update
2357 for i in `seq 1 10`; do
2358 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2359 sleep 1
2360 done
2361 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2362 # to sync mon with osd
2363 flush_pg_stats
2364 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2365 cal_raw_used_size=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2366 raw_used_size=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
2367
2368 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2369 rm ./cephdf_for_test
2370
2371 expect_false test $cal_raw_used_size != $raw_used_size
2372 }
2373
2374 function test_mon_pool_application()
2375 {
2376 ceph osd pool create app_for_test 10
2377
2378 ceph osd pool application enable app_for_test rbd
2379 expect_false ceph osd pool application enable app_for_test rgw
2380 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2381 ceph osd pool ls detail | grep "application rbd,rgw"
2382 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2383
2384 expect_false ceph osd pool application set app_for_test cephfs key value
2385 ceph osd pool application set app_for_test rbd key1 value1
2386 ceph osd pool application set app_for_test rbd key2 value2
2387 ceph osd pool application set app_for_test rgw key1 value1
2388
2389 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2390
2391 ceph osd pool application rm app_for_test rgw key1
2392 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2393 ceph osd pool application rm app_for_test rbd key2
2394 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2395 ceph osd pool application rm app_for_test rbd key1
2396 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2397 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2398
2399 expect_false ceph osd pool application disable app_for_test rgw
2400 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2401 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2402 ceph osd pool ls detail | grep "application rbd"
2403 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2404
2405 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2406 ceph osd pool ls detail | grep -v "application "
2407 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2408
2409 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2410 }
2411
2412 function test_mon_tell_help_command()
2413 {
2414 ceph tell mon.a help
2415
2416 # wrong target
2417 expect_false ceph tell mon.zzz help
2418 }
2419
2420 function test_mon_stdin_stdout()
2421 {
2422 echo foo | ceph config-key set test_key -i -
2423 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2424 }
2425
2426 function test_osd_tell_help_command()
2427 {
2428 ceph tell osd.1 help
2429 expect_false ceph tell osd.100 help
2430 }
2431
2432 function test_osd_compact()
2433 {
2434 ceph tell osd.1 compact
2435 $SUDO ceph daemon osd.1 compact
2436 }
2437
2438 function test_mds_tell_help_command()
2439 {
2440 local FS_NAME=cephfs
2441 if ! mds_exists ; then
2442 echo "Skipping test, no MDS found"
2443 return
2444 fi
2445
2446 remove_all_fs
2447 ceph osd pool create fs_data 10
2448 ceph osd pool create fs_metadata 10
2449 ceph fs new $FS_NAME fs_metadata fs_data
2450 wait_mds_active $FS_NAME
2451
2452
2453 ceph tell mds.a help
2454 expect_false ceph tell mds.z help
2455
2456 remove_all_fs
2457 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2458 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2459 }
2460
2461 function test_mgr_tell()
2462 {
2463 ceph tell mgr help
2464 #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
2465 ceph tell mgr osd status
2466 }
2467
2468 #
2469 # New tests should be added to the TESTS array below
2470 #
2471 # Individual tests may be run using the '-t <testname>' argument
2472 # The user can specify '-t <testname>' as many times as she wants
2473 #
2474 # Tests will be run in order presented in the TESTS array, or in
2475 # the order specified by the '-t <testname>' options.
2476 #
2477 # '-l' will list all the available test names
2478 # '-h' will show usage
2479 #
2480 # The test maintains backward compatibility: not specifying arguments
2481 # will run all tests following the order they appear in the TESTS array.
2482 #
2483
2484 set +x
2485 MON_TESTS+=" mon_injectargs"
2486 MON_TESTS+=" mon_injectargs_SI"
2487 for i in `seq 9`; do
2488 MON_TESTS+=" tiering_$i";
2489 done
2490 MON_TESTS+=" auth"
2491 MON_TESTS+=" auth_profiles"
2492 MON_TESTS+=" mon_misc"
2493 MON_TESTS+=" mon_mon"
2494 MON_TESTS+=" mon_osd"
2495 MON_TESTS+=" mon_config_key"
2496 MON_TESTS+=" mon_crush"
2497 MON_TESTS+=" mon_osd_create_destroy"
2498 MON_TESTS+=" mon_osd_pool"
2499 MON_TESTS+=" mon_osd_pool_quota"
2500 MON_TESTS+=" mon_pg"
2501 MON_TESTS+=" mon_osd_pool_set"
2502 MON_TESTS+=" mon_osd_tiered_pool_set"
2503 MON_TESTS+=" mon_osd_erasure_code"
2504 MON_TESTS+=" mon_osd_misc"
2505 MON_TESTS+=" mon_heap_profiler"
2506 MON_TESTS+=" mon_tell"
2507 MON_TESTS+=" mon_ping"
2508 MON_TESTS+=" mon_deprecated_commands"
2509 MON_TESTS+=" mon_caps"
2510 MON_TESTS+=" mon_cephdf_commands"
2511 MON_TESTS+=" mon_tell_help_command"
2512 MON_TESTS+=" mon_stdin_stdout"
2513
2514 OSD_TESTS+=" osd_bench"
2515 OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2516 OSD_TESTS+=" tiering_agent"
2517 OSD_TESTS+=" admin_heap_profiler"
2518 OSD_TESTS+=" osd_tell_help_command"
2519 OSD_TESTS+=" osd_compact"
2520
2521 MDS_TESTS+=" mds_tell"
2522 MDS_TESTS+=" mon_mds"
2523 MDS_TESTS+=" mon_mds_metadata"
2524 MDS_TESTS+=" mds_tell_help_command"
2525
2526 MGR_TESTS+=" mgr_tell"
2527
2528 TESTS+=$MON_TESTS
2529 TESTS+=$OSD_TESTS
2530 TESTS+=$MDS_TESTS
2531 TESTS+=$MGR_TESTS
2532
2533 #
2534 # "main" follows
2535 #
2536
2537 function list_tests()
2538 {
2539 echo "AVAILABLE TESTS"
2540 for i in $TESTS; do
2541 echo " $i"
2542 done
2543 }
2544
2545 function usage()
2546 {
2547 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2548 }
2549
2550 tests_to_run=()
2551
2552 sanity_check=true
2553
2554 while [[ $# -gt 0 ]]; do
2555 opt=$1
2556
2557 case "$opt" in
2558 "-l" )
2559 do_list=1
2560 ;;
2561 "--asok-does-not-need-root" )
2562 SUDO=""
2563 ;;
2564 "--no-sanity-check" )
2565 sanity_check=false
2566 ;;
2567 "--test-mon" )
2568 tests_to_run+="$MON_TESTS"
2569 ;;
2570 "--test-osd" )
2571 tests_to_run+="$OSD_TESTS"
2572 ;;
2573 "--test-mds" )
2574 tests_to_run+="$MDS_TESTS"
2575 ;;
2576 "--test-mgr" )
2577 tests_to_run+="$MGR_TESTS"
2578 ;;
2579 "-t" )
2580 shift
2581 if [[ -z "$1" ]]; then
2582 echo "missing argument to '-t'"
2583 usage ;
2584 exit 1
2585 fi
2586 tests_to_run+=" $1"
2587 ;;
2588 "-h" )
2589 usage ;
2590 exit 0
2591 ;;
2592 esac
2593 shift
2594 done
2595
2596 if [[ $do_list -eq 1 ]]; then
2597 list_tests ;
2598 exit 0
2599 fi
2600
2601 ceph osd pool create rbd 10
2602
2603 if test -z "$tests_to_run" ; then
2604 tests_to_run="$TESTS"
2605 fi
2606
2607 if $sanity_check ; then
2608 wait_no_osd_down
2609 fi
2610 for i in $tests_to_run; do
2611 if $sanity_check ; then
2612 check_no_osd_down
2613 fi
2614 set -x
2615 test_${i}
2616 set +x
2617 done
2618 if $sanity_check ; then
2619 check_no_osd_down
2620 fi
2621
2622 set -x
2623
2624 echo OK