]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/cephtool/test.sh
36fb92a31c7e70fa0d7b7dc9448b75755f104033
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
1 #!/bin/bash -x
2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
4
5 source $(dirname $0)/../ceph-helpers.sh
6
7 set -e
8 set -o functrace
9 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
10 SUDO=${SUDO:-sudo}
11 export CEPH_DEV=1
12
13 function get_admin_socket()
14 {
15 local client=$1
16
17 if test -n "$CEPH_OUT_DIR";
18 then
19 echo $CEPH_OUT_DIR/$client.asok
20 else
21 local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
23 fi
24 }
25
26 function check_no_osd_down()
27 {
28 ! ceph osd dump | grep ' down '
29 }
30
31 function wait_no_osd_down()
32 {
33 max_run=300
34 for i in $(seq 1 $max_run) ; do
35 if ! check_no_osd_down ; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
37 sleep 1
38 else
39 break
40 fi
41 done
42 check_no_osd_down
43 }
44
45 function expect_false()
46 {
47 set -x
48 if "$@"; then return 1; else return 0; fi
49 }
50
51
52 TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
53 trap "rm -fr $TEMP_DIR" 0
54
55 TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
56
57 #
58 # retry_eagain max cmd args ...
59 #
60 # retry cmd args ... if it exits on error and its output contains the
61 # string EAGAIN, at most $max times
62 #
63 function retry_eagain()
64 {
65 local max=$1
66 shift
67 local status
68 local tmpfile=$TEMP_DIR/retry_eagain.$$
69 local count
70 for count in $(seq 1 $max) ; do
71 status=0
72 "$@" > $tmpfile 2>&1 || status=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN $tmpfile ; then
75 break
76 fi
77 sleep 1
78 done
79 if test $count = $max ; then
80 echo retried with non zero exit status, $max times: "$@" >&2
81 fi
82 cat $tmpfile
83 rm $tmpfile
84 return $status
85 }
86
87 #
88 # map_enxio_to_eagain cmd arg ...
89 #
90 # add EAGAIN to the output of cmd arg ... if the output contains
91 # ENXIO.
92 #
93 function map_enxio_to_eagain()
94 {
95 local status=0
96 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
97
98 "$@" > $tmpfile 2>&1 || status=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO $tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
102 fi
103 cat $tmpfile
104 rm $tmpfile
105 return $status
106 }
107
108 function check_response()
109 {
110 expected_string=$1
111 retcode=$2
112 expected_retcode=$3
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
115 exit 1
116 fi
117
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
120 cat $TMPFILE >&2
121 exit 1
122 fi
123 }
124
125 function get_config_value_or_die()
126 {
127 local target config_opt raw val
128
129 target=$1
130 config_opt=$2
131
132 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $? -ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
135 exit 1
136 fi
137
138 raw=`echo $raw | sed -e 's/[{} "]//g'`
139 val=`echo $raw | cut -f2 -d:`
140
141 echo "$val"
142 return 0
143 }
144
145 function expect_config_value()
146 {
147 local target config_opt expected_val val
148 target=$1
149 config_opt=$2
150 expected_val=$3
151
152 val=$(get_config_value_or_die $target $config_opt)
153
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
156 exit 1
157 fi
158 }
159
160 function ceph_watch_start()
161 {
162 local whatch_opt=--watch
163
164 if [ -n "$1" ]; then
165 whatch_opt=--watch-$1
166 fi
167
168 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
169 ceph $whatch_opt > $CEPH_WATCH_FILE &
170 CEPH_WATCH_PID=$!
171
172 # wait until the "ceph" client is connected and receiving
173 # log messages from monitor
174 for i in `seq 3`; do
175 grep -q "cluster" $CEPH_WATCH_FILE && break
176 sleep 1
177 done
178 }
179
180 function ceph_watch_wait()
181 {
182 local regexp=$1
183 local timeout=30
184
185 if [ -n "$2" ]; then
186 timeout=$2
187 fi
188
189 for i in `seq ${timeout}`; do
190 grep -q "$regexp" $CEPH_WATCH_FILE && break
191 sleep 1
192 done
193
194 kill $CEPH_WATCH_PID
195
196 if ! grep "$regexp" $CEPH_WATCH_FILE; then
197 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
198 cat $CEPH_WATCH_FILE >&2
199 return 1
200 fi
201 }
202
203 function test_mon_injectargs()
204 {
205 CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
206 check_response "osd_enable_op_tracker = 'false'"
207 ! grep "the.dump" $TMPFILE || return 1
208 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
209 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
210 ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
211 check_response "osd_enable_op_tracker = 'false'"
212 ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
213 check_response "osd_enable_op_tracker = 'true'"
214 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
215 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
216 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
217 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
218
219 ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
220 check_response "mon_lease = '6' (not observed, change may require restart)"
221
222 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
223 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1
224 }
225
226 function test_mon_injectargs_SI()
227 {
228 # Test SI units during injectargs and 'config set'
229 # We only aim at testing the units are parsed accordingly
230 # and don't intend to test whether the options being set
231 # actually expect SI units to be passed.
232 # Keep in mind that all integer based options (i.e., INT,
233 # LONG, U32, U64) will accept SI unit modifiers.
234 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
235 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
236 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
237 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
238 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
239 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
240 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
241 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
242 check_response "'10F': (22) Invalid argument"
243 # now test with injectargs
244 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
245 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
246 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
247 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
248 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
249 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
250 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
251 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
252 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
253 }
254
255 function test_tiering_agent()
256 {
257 local slow=slow_eviction
258 local fast=fast_eviction
259 ceph osd pool create $slow 1 1
260 ceph osd pool create $fast 1 1
261 ceph osd tier add $slow $fast
262 ceph osd tier cache-mode $fast writeback
263 ceph osd tier set-overlay $slow $fast
264 ceph osd pool set $fast hit_set_type bloom
265 rados -p $slow put obj1 /etc/group
266 ceph osd pool set $fast target_max_objects 1
267 ceph osd pool set $fast hit_set_count 1
268 ceph osd pool set $fast hit_set_period 5
269 # wait for the object to be evicted from the cache
270 local evicted
271 evicted=false
272 for i in `seq 1 300` ; do
273 if ! rados -p $fast ls | grep obj1 ; then
274 evicted=true
275 break
276 fi
277 sleep 1
278 done
279 $evicted # assert
280 # the object is proxy read and promoted to the cache
281 rados -p $slow get obj1 - >/dev/null
282 # wait for the promoted object to be evicted again
283 evicted=false
284 for i in `seq 1 300` ; do
285 if ! rados -p $fast ls | grep obj1 ; then
286 evicted=true
287 break
288 fi
289 sleep 1
290 done
291 $evicted # assert
292 ceph osd tier remove-overlay $slow
293 ceph osd tier remove $slow $fast
294 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
295 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
296 }
297
298 function test_tiering_1()
299 {
300 # tiering
301 ceph osd pool create slow 2
302 ceph osd pool create slow2 2
303 ceph osd pool create cache 2
304 ceph osd pool create cache2 2
305 ceph osd tier add slow cache
306 ceph osd tier add slow cache2
307 expect_false ceph osd tier add slow2 cache
308 # test some state transitions
309 ceph osd tier cache-mode cache writeback
310 expect_false ceph osd tier cache-mode cache forward
311 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
312 expect_false ceph osd tier cache-mode cache readonly
313 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
314 expect_false ceph osd tier cache-mode cache forward
315 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
316 ceph osd tier cache-mode cache none
317 ceph osd tier cache-mode cache writeback
318 ceph osd tier cache-mode cache proxy
319 ceph osd tier cache-mode cache writeback
320 expect_false ceph osd tier cache-mode cache none
321 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
322 # test with dirty objects in the tier pool
323 # tier pool currently set to 'writeback'
324 rados -p cache put /etc/passwd /etc/passwd
325 flush_pg_stats
326 # 1 dirty object in pool 'cache'
327 ceph osd tier cache-mode cache proxy
328 expect_false ceph osd tier cache-mode cache none
329 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
330 ceph osd tier cache-mode cache writeback
331 # remove object from tier pool
332 rados -p cache rm /etc/passwd
333 rados -p cache cache-flush-evict-all
334 flush_pg_stats
335 # no dirty objects in pool 'cache'
336 ceph osd tier cache-mode cache proxy
337 ceph osd tier cache-mode cache none
338 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
339 TRIES=0
340 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
341 do
342 grep 'currently creating pgs' $TMPFILE
343 TRIES=$(( $TRIES + 1 ))
344 test $TRIES -ne 60
345 sleep 3
346 done
347 expect_false ceph osd pool set cache pg_num 4
348 ceph osd tier cache-mode cache none
349 ceph osd tier set-overlay slow cache
350 expect_false ceph osd tier set-overlay slow cache2
351 expect_false ceph osd tier remove slow cache
352 ceph osd tier remove-overlay slow
353 ceph osd tier set-overlay slow cache2
354 ceph osd tier remove-overlay slow
355 ceph osd tier remove slow cache
356 ceph osd tier add slow2 cache
357 expect_false ceph osd tier set-overlay slow cache
358 ceph osd tier set-overlay slow2 cache
359 ceph osd tier remove-overlay slow2
360 ceph osd tier remove slow2 cache
361 ceph osd tier remove slow cache2
362
363 # make sure a non-empty pool fails
364 rados -p cache2 put /etc/passwd /etc/passwd
365 while ! ceph df | grep cache2 | grep ' 1 ' ; do
366 echo waiting for pg stats to flush
367 sleep 2
368 done
369 expect_false ceph osd tier add slow cache2
370 ceph osd tier add slow cache2 --force-nonempty
371 ceph osd tier remove slow cache2
372
373 ceph osd pool ls | grep cache2
374 ceph osd pool ls -f json-pretty | grep cache2
375 ceph osd pool ls detail | grep cache2
376 ceph osd pool ls detail -f json-pretty | grep cache2
377
378 ceph osd pool delete slow slow --yes-i-really-really-mean-it
379 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
380 ceph osd pool delete cache cache --yes-i-really-really-mean-it
381 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
382 }
383
384 function test_tiering_2()
385 {
386 # make sure we can't clobber snapshot state
387 ceph osd pool create snap_base 2
388 ceph osd pool create snap_cache 2
389 ceph osd pool mksnap snap_cache snapname
390 expect_false ceph osd tier add snap_base snap_cache
391 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
392 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
393 }
394
395 function test_tiering_3()
396 {
397 # make sure we can't create snapshot on tier
398 ceph osd pool create basex 2
399 ceph osd pool create cachex 2
400 ceph osd tier add basex cachex
401 expect_false ceph osd pool mksnap cache snapname
402 ceph osd tier remove basex cachex
403 ceph osd pool delete basex basex --yes-i-really-really-mean-it
404 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
405 }
406
407 function test_tiering_4()
408 {
409 # make sure we can't create an ec pool tier
410 ceph osd pool create eccache 2 2 erasure
411 expect_false ceph osd set-require-min-compat-client bobtail
412 ceph osd pool create repbase 2
413 expect_false ceph osd tier add repbase eccache
414 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
415 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
416 }
417
418 function test_tiering_5()
419 {
420 # convenient add-cache command
421 ceph osd pool create slow 2
422 ceph osd pool create cache3 2
423 ceph osd tier add-cache slow cache3 1024000
424 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
425 ceph osd tier remove slow cache3 2> $TMPFILE || true
426 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
427 ceph osd tier remove-overlay slow
428 ceph osd tier remove slow cache3
429 ceph osd pool ls | grep cache3
430 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
431 ! ceph osd pool ls | grep cache3 || exit 1
432 ceph osd pool delete slow slow --yes-i-really-really-mean-it
433 }
434
435 function test_tiering_6()
436 {
437 # check add-cache whether work
438 ceph osd pool create datapool 2
439 ceph osd pool create cachepool 2
440 ceph osd tier add-cache datapool cachepool 1024000
441 ceph osd tier cache-mode cachepool writeback
442 rados -p datapool put object /etc/passwd
443 rados -p cachepool stat object
444 rados -p cachepool cache-flush object
445 rados -p datapool stat object
446 ceph osd tier remove-overlay datapool
447 ceph osd tier remove datapool cachepool
448 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
449 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
450 }
451
452 function test_tiering_7()
453 {
454 # protection against pool removal when used as tiers
455 ceph osd pool create datapool 2
456 ceph osd pool create cachepool 2
457 ceph osd tier add-cache datapool cachepool 1024000
458 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
459 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
460 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
461 check_response "EBUSY: pool 'datapool' has tiers cachepool"
462 ceph osd tier remove-overlay datapool
463 ceph osd tier remove datapool cachepool
464 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
465 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
466 }
467
468 function test_tiering_8()
469 {
470 ## check health check
471 ceph osd set notieragent
472 ceph osd pool create datapool 2
473 ceph osd pool create cache4 2
474 ceph osd tier add-cache datapool cache4 1024000
475 ceph osd tier cache-mode cache4 writeback
476 tmpfile=$(mktemp|grep tmp)
477 dd if=/dev/zero of=$tmpfile bs=4K count=1
478 ceph osd pool set cache4 target_max_objects 200
479 ceph osd pool set cache4 target_max_bytes 1000000
480 rados -p cache4 put foo1 $tmpfile
481 rados -p cache4 put foo2 $tmpfile
482 rm -f $tmpfile
483 flush_pg_stats
484 ceph df | grep datapool | grep ' 2 '
485 ceph osd tier remove-overlay datapool
486 ceph osd tier remove datapool cache4
487 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
488 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
489 ceph osd unset notieragent
490 }
491
492 function test_tiering_9()
493 {
494 # make sure 'tier remove' behaves as we expect
495 # i.e., removing a tier from a pool that's not its base pool only
496 # results in a 'pool foo is now (or already was) not a tier of bar'
497 #
498 ceph osd pool create basepoolA 2
499 ceph osd pool create basepoolB 2
500 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
501 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
502
503 ceph osd pool create cache5 2
504 ceph osd pool create cache6 2
505 ceph osd tier add basepoolA cache5
506 ceph osd tier add basepoolB cache6
507 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
508 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
509 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
510 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
511
512 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
513 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
514 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
515 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
516
517 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
518 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
519
520 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
521 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
522 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
523 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
524 }
525
526 function test_auth()
527 {
528 ceph auth add client.xx mon allow osd "allow *"
529 ceph auth export client.xx >client.xx.keyring
530 ceph auth add client.xx -i client.xx.keyring
531 rm -f client.xx.keyring
532 ceph auth list | grep client.xx
533 ceph auth get client.xx | grep caps | grep mon
534 ceph auth get client.xx | grep caps | grep osd
535 ceph auth get-key client.xx
536 ceph auth print-key client.xx
537 ceph auth print_key client.xx
538 ceph auth caps client.xx osd "allow rw"
539 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
540 ceph auth get client.xx | grep osd | grep "allow rw"
541 ceph auth export | grep client.xx
542 ceph auth export -o authfile
543 ceph auth import -i authfile
544 ceph auth export -o authfile2
545 diff authfile authfile2
546 rm authfile authfile2
547 ceph auth del client.xx
548 expect_false ceph auth get client.xx
549
550 # (almost) interactive mode
551 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
552 ceph auth get client.xx
553 # script mode
554 echo 'auth del client.xx' | ceph
555 expect_false ceph auth get client.xx
556
557 #
558 # get / set auid
559 #
560 local auid=444
561 ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
562 expect_false ceph auth import --in-file TEST-keyring
563 rm TEST-keyring
564 ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
565 ceph auth import --in-file TEST-keyring
566 rm TEST-keyring
567 ceph auth get client.TEST > $TMPFILE
568 check_response "auid = $auid"
569 ceph --format json-pretty auth get client.TEST > $TMPFILE
570 check_response '"auid": '$auid
571 ceph auth list > $TMPFILE
572 check_response "auid: $auid"
573 ceph --format json-pretty auth list > $TMPFILE
574 check_response '"auid": '$auid
575 ceph auth del client.TEST
576 }
577
578 function test_auth_profiles()
579 {
580 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
581 mgr 'allow profile read-only'
582 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
583 mgr 'allow profile read-write'
584 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
585
586 ceph auth export > client.xx.keyring
587
588 # read-only is allowed all read-only commands (auth excluded)
589 ceph -n client.xx-profile-ro -k client.xx.keyring status
590 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
591 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
592 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
593 ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
594 # read-only gets access denied for rw commands or auth commands
595 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
596 check_response "EACCES: access denied"
597 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
598 check_response "EACCES: access denied"
599 ceph -n client.xx-profile-ro -k client.xx.keyring auth list >& $TMPFILE || true
600 check_response "EACCES: access denied"
601
602 # read-write is allowed for all read-write commands (except auth)
603 ceph -n client.xx-profile-rw -k client.xx.keyring status
604 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
605 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
606 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
607 ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
608 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
609 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
610 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
611 # read-write gets access denied for auth commands
612 ceph -n client.xx-profile-rw -k client.xx.keyring auth list >& $TMPFILE || true
613 check_response "EACCES: access denied"
614
615 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
616 ceph -n client.xx-profile-rd -k client.xx.keyring auth list
617 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
618 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
619 ceph -n client.xx-profile-rd -k client.xx.keyring status
620 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
621 check_response "EACCES: access denied"
622 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
623 check_response "EACCES: access denied"
624 # read-only 'mon' subsystem commands are allowed
625 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
626 # but read-write 'mon' commands are not
627 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
628 check_response "EACCES: access denied"
629 ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
630 check_response "EACCES: access denied"
631 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
632 check_response "EACCES: access denied"
633 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
634 check_response "EACCES: access denied"
635
636 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
637 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
638
639 # add a new role-definer with the existing role-definer
640 ceph -n client.xx-profile-rd -k client.xx.keyring \
641 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
642 ceph -n client.xx-profile-rd -k client.xx.keyring \
643 auth export > client.xx.keyring.2
644 # remove old role-definer using the new role-definer
645 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
646 auth del client.xx-profile-rd
647 # remove the remaining role-definer with admin
648 ceph auth del client.xx-profile-rd2
649 rm -f client.xx.keyring client.xx.keyring.2
650 }
651
652 function test_mon_caps()
653 {
654 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
655 chmod +r $TEMP_DIR/ceph.client.bug.keyring
656 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
657 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
658
659 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
660 check_response "Permission denied"
661
662 rm -rf $TEMP_DIR/ceph.client.bug.keyring
663 ceph auth del client.bug
664 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
665 chmod +r $TEMP_DIR/ceph.client.bug.keyring
666 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
667 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
668 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
669 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
670 check_response "Permission denied"
671 }
672
673 function test_mon_misc()
674 {
675 # with and without verbosity
676 ceph osd dump | grep '^epoch'
677 ceph --concise osd dump | grep '^epoch'
678
679 ceph osd df | grep 'MIN/MAX VAR'
680
681 # df
682 ceph df > $TMPFILE
683 grep GLOBAL $TMPFILE
684 grep -v DIRTY $TMPFILE
685 ceph df detail > $TMPFILE
686 grep DIRTY $TMPFILE
687 ceph df --format json > $TMPFILE
688 grep 'total_bytes' $TMPFILE
689 grep -v 'dirty' $TMPFILE
690 ceph df detail --format json > $TMPFILE
691 grep 'rd_bytes' $TMPFILE
692 grep 'dirty' $TMPFILE
693 ceph df --format xml | grep '<total_bytes>'
694 ceph df detail --format xml | grep '<rd_bytes>'
695
696 ceph fsid
697 ceph health
698 ceph health detail
699 ceph health --format json-pretty
700 ceph health detail --format xml-pretty
701
702 ceph node ls
703 for t in mon osd mds ; do
704 ceph node ls $t
705 done
706
707 ceph_watch_start
708 mymsg="this is a test log message $$.$(date)"
709 ceph log "$mymsg"
710 ceph log last | grep "$mymsg"
711 ceph log last 100 | grep "$mymsg"
712 ceph_watch_wait "$mymsg"
713
714 ceph mgr dump
715
716 ceph mon metadata a
717 ceph mon metadata
718 ceph mon count-metadata ceph_version
719 ceph mon versions
720
721 ceph node ls
722 }
723
724 function check_mds_active()
725 {
726 fs_name=$1
727 ceph fs get $fs_name | grep active
728 }
729
730 function wait_mds_active()
731 {
732 fs_name=$1
733 max_run=300
734 for i in $(seq 1 $max_run) ; do
735 if ! check_mds_active $fs_name ; then
736 echo "waiting for an active MDS daemon ($i/$max_run)"
737 sleep 5
738 else
739 break
740 fi
741 done
742 check_mds_active $fs_name
743 }
744
745 function get_mds_gids()
746 {
747 fs_name=$1
748 ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
749 }
750
751 function fail_all_mds()
752 {
753 fs_name=$1
754 ceph fs set $fs_name cluster_down true
755 mds_gids=$(get_mds_gids $fs_name)
756 for mds_gid in $mds_gids ; do
757 ceph mds fail $mds_gid
758 done
759 if check_mds_active $fs_name ; then
760 echo "An active MDS remains, something went wrong"
761 ceph fs get $fs_name
762 exit -1
763 fi
764
765 }
766
767 function remove_all_fs()
768 {
769 existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
770 for fs_name in $existing_fs ; do
771 echo "Removing fs ${fs_name}..."
772 fail_all_mds $fs_name
773 echo "Removing existing filesystem '${fs_name}'..."
774 ceph fs rm $fs_name --yes-i-really-mean-it
775 echo "Removed '${fs_name}'."
776 done
777 }
778
779 # So that tests requiring MDS can skip if one is not configured
780 # in the cluster at all
781 function mds_exists()
782 {
783 ceph auth list | grep "^mds"
784 }
785
786 # some of the commands are just not idempotent.
787 function without_test_dup_command()
788 {
789 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
790 $@
791 else
792 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
793 unset CEPH_CLI_TEST_DUP_COMMAND
794 $@
795 CEPH_CLI_TEST_DUP_COMMAND=saved
796 fi
797 }
798
799 function test_mds_tell()
800 {
801 local FS_NAME=cephfs
802 if ! mds_exists ; then
803 echo "Skipping test, no MDS found"
804 return
805 fi
806
807 remove_all_fs
808 ceph osd pool create fs_data 10
809 ceph osd pool create fs_metadata 10
810 ceph fs new $FS_NAME fs_metadata fs_data
811 wait_mds_active $FS_NAME
812
813 # Test injectargs by GID
814 old_mds_gids=$(get_mds_gids $FS_NAME)
815 echo Old GIDs: $old_mds_gids
816
817 for mds_gid in $old_mds_gids ; do
818 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
819 done
820 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
821
822 # Test respawn by rank
823 without_test_dup_command ceph tell mds.0 respawn
824 new_mds_gids=$old_mds_gids
825 while [ $new_mds_gids -eq $old_mds_gids ] ; do
826 sleep 5
827 new_mds_gids=$(get_mds_gids $FS_NAME)
828 done
829 echo New GIDs: $new_mds_gids
830
831 # Test respawn by ID
832 without_test_dup_command ceph tell mds.a respawn
833 new_mds_gids=$old_mds_gids
834 while [ $new_mds_gids -eq $old_mds_gids ] ; do
835 sleep 5
836 new_mds_gids=$(get_mds_gids $FS_NAME)
837 done
838 echo New GIDs: $new_mds_gids
839
840 remove_all_fs
841 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
842 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
843 }
844
845 function test_mon_mds()
846 {
847 local FS_NAME=cephfs
848 remove_all_fs
849
850 ceph osd pool create fs_data 10
851 ceph osd pool create fs_metadata 10
852 ceph fs new $FS_NAME fs_metadata fs_data
853
854 ceph fs set $FS_NAME cluster_down true
855 ceph fs set $FS_NAME cluster_down false
856
857 # Legacy commands, act on default fs
858 ceph mds cluster_down
859 ceph mds cluster_up
860
861 ceph mds compat rm_incompat 4
862 ceph mds compat rm_incompat 4
863
864 # We don't want any MDSs to be up, their activity can interfere with
865 # the "current_epoch + 1" checking below if they're generating updates
866 fail_all_mds $FS_NAME
867
868 ceph mds compat show
869 expect_false ceph mds deactivate 2
870 ceph mds dump
871 ceph fs dump
872 ceph fs get $FS_NAME
873 for mds_gid in $(get_mds_gids $FS_NAME) ; do
874 ceph mds metadata $mds_id
875 done
876 ceph mds metadata
877 ceph mds versions
878 ceph mds count-metadata os
879
880 # XXX mds fail, but how do you undo it?
881 mdsmapfile=$TEMP_DIR/mdsmap.$$
882 current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
883 [ -s $mdsmapfile ]
884 rm $mdsmapfile
885
886 ceph osd pool create data2 10
887 ceph osd pool create data3 10
888 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
889 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
890 ceph mds add_data_pool $data2_pool
891 ceph mds add_data_pool $data3_pool
892 ceph mds add_data_pool 100 >& $TMPFILE || true
893 check_response "Error ENOENT"
894 ceph mds add_data_pool foobarbaz >& $TMPFILE || true
895 check_response "Error ENOENT"
896 ceph mds remove_data_pool $data2_pool
897 ceph mds remove_data_pool $data3_pool
898 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
899 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
900 expect_false ceph mds set_max_mds 4
901 ceph mds set allow_multimds true --yes-i-really-mean-it
902 ceph mds set_max_mds 4
903 ceph mds set_max_mds 3
904 ceph mds set_max_mds 256
905 expect_false ceph mds set_max_mds 257
906 ceph mds set max_mds 4
907 ceph mds set max_mds 256
908 expect_false ceph mds set max_mds 257
909 expect_false ceph mds set max_mds asdf
910 expect_false ceph mds set inline_data true
911 ceph mds set inline_data true --yes-i-really-mean-it
912 ceph mds set inline_data yes --yes-i-really-mean-it
913 ceph mds set inline_data 1 --yes-i-really-mean-it
914 expect_false ceph mds set inline_data --yes-i-really-mean-it
915 ceph mds set inline_data false
916 ceph mds set inline_data no
917 ceph mds set inline_data 0
918 expect_false ceph mds set inline_data asdf
919 ceph mds set max_file_size 1048576
920 expect_false ceph mds set max_file_size 123asdf
921
922 expect_false ceph mds set allow_new_snaps
923 expect_false ceph mds set allow_new_snaps true
924 ceph mds set allow_new_snaps true --yes-i-really-mean-it
925 ceph mds set allow_new_snaps 0
926 ceph mds set allow_new_snaps false
927 ceph mds set allow_new_snaps no
928 expect_false ceph mds set allow_new_snaps taco
929
930 # we should never be able to add EC pools as data or metadata pools
931 # create an ec-pool...
932 ceph osd pool create mds-ec-pool 10 10 erasure
933 set +e
934 ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
935 check_response 'erasure-code' $? 22
936 set -e
937 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
938 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
939 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
940
941 fail_all_mds $FS_NAME
942
943 set +e
944 # Check that rmfailed requires confirmation
945 expect_false ceph mds rmfailed 0
946 ceph mds rmfailed 0 --yes-i-really-mean-it
947 set -e
948
949 # Check that `newfs` is no longer permitted
950 expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
951
952 # Check that 'fs reset' runs
953 ceph fs reset $FS_NAME --yes-i-really-mean-it
954
955 # Check that creating a second FS fails by default
956 ceph osd pool create fs_metadata2 10
957 ceph osd pool create fs_data2 10
958 set +e
959 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
960 set -e
961
962 # Check that setting enable_multiple enables creation of second fs
963 ceph fs flag set enable_multiple true --yes-i-really-mean-it
964 ceph fs new cephfs2 fs_metadata2 fs_data2
965
966 # Clean up multi-fs stuff
967 fail_all_mds cephfs2
968 ceph fs rm cephfs2 --yes-i-really-mean-it
969 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
970 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
971
972 fail_all_mds $FS_NAME
973
974 # Clean up to enable subsequent fs new tests
975 ceph fs rm $FS_NAME --yes-i-really-mean-it
976
977 set +e
978 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
979 check_response 'erasure-code' $? 22
980 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
981 check_response 'erasure-code' $? 22
982 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
983 check_response 'erasure-code' $? 22
984 set -e
985
986 # ... new create a cache tier in front of the EC pool...
987 ceph osd pool create mds-tier 2
988 ceph osd tier add mds-ec-pool mds-tier
989 ceph osd tier set-overlay mds-ec-pool mds-tier
990 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
991
992 # Use of a readonly tier should be forbidden
993 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
994 set +e
995 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
996 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
997 set -e
998
999 # Use of a writeback tier should enable FS creation
1000 ceph osd tier cache-mode mds-tier writeback
1001 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1002
1003 # While a FS exists using the tiered pools, I should not be allowed
1004 # to remove the tier
1005 set +e
1006 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1007 check_response 'in use by CephFS' $? 16
1008 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1009 check_response 'in use by CephFS' $? 16
1010 set -e
1011
1012 fail_all_mds $FS_NAME
1013 ceph fs rm $FS_NAME --yes-i-really-mean-it
1014
1015 # ... but we should be forbidden from using the cache pool in the FS directly.
1016 set +e
1017 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1018 check_response 'in use as a cache tier' $? 22
1019 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1020 check_response 'in use as a cache tier' $? 22
1021 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1022 check_response 'in use as a cache tier' $? 22
1023 set -e
1024
1025 # Clean up tier + EC pools
1026 ceph osd tier remove-overlay mds-ec-pool
1027 ceph osd tier remove mds-ec-pool mds-tier
1028
1029 # Create a FS using the 'cache' pool now that it's no longer a tier
1030 ceph fs new $FS_NAME fs_metadata mds-tier --force
1031
1032 # We should be forbidden from using this pool as a tier now that
1033 # it's in use for CephFS
1034 set +e
1035 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1036 check_response 'in use by CephFS' $? 16
1037 set -e
1038
1039 fail_all_mds $FS_NAME
1040 ceph fs rm $FS_NAME --yes-i-really-mean-it
1041
1042 # We should be permitted to use an EC pool with overwrites enabled
1043 # as the data pool...
1044 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1045 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1046 fail_all_mds $FS_NAME
1047 ceph fs rm $FS_NAME --yes-i-really-mean-it
1048
1049 # ...but not as the metadata pool
1050 set +e
1051 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1052 check_response 'erasure-code' $? 22
1053 set -e
1054
1055 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1056
1057 # Create a FS and check that we can subsequently add a cache tier to it
1058 ceph fs new $FS_NAME fs_metadata fs_data --force
1059
1060 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1061 ceph osd tier add fs_metadata mds-tier
1062 ceph osd tier cache-mode mds-tier writeback
1063 ceph osd tier set-overlay fs_metadata mds-tier
1064
1065 # Removing tier should be permitted because the underlying pool is
1066 # replicated (#11504 case)
1067 ceph osd tier cache-mode mds-tier proxy
1068 ceph osd tier remove-overlay fs_metadata
1069 ceph osd tier remove fs_metadata mds-tier
1070 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1071
1072 # Clean up FS
1073 fail_all_mds $FS_NAME
1074 ceph fs rm $FS_NAME --yes-i-really-mean-it
1075
1076
1077
1078 ceph mds stat
1079 # ceph mds tell mds.a getmap
1080 # ceph mds rm
1081 # ceph mds rmfailed
1082 # ceph mds set_state
1083 # ceph mds stop
1084
1085 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1086 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1087 }
1088
1089 function test_mon_mds_metadata()
1090 {
1091 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1092 test "$nmons" -gt 0
1093
1094 ceph mds dump |
1095 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1096 while read gid id rank; do
1097 ceph mds metadata ${gid} | grep '"hostname":'
1098 ceph mds metadata ${id} | grep '"hostname":'
1099 ceph mds metadata ${rank} | grep '"hostname":'
1100
1101 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1102 test "$n" -eq "$nmons"
1103 done
1104
1105 expect_false ceph mds metadata UNKNOWN
1106 }
1107
1108 function test_mon_mon()
1109 {
1110 # print help message
1111 ceph --help mon
1112 # no mon add/remove
1113 ceph mon dump
1114 ceph mon getmap -o $TEMP_DIR/monmap.$$
1115 [ -s $TEMP_DIR/monmap.$$ ]
1116 # ceph mon tell
1117 ceph mon_status
1118
1119 # test mon features
1120 ceph mon feature list
1121 ceph mon feature set kraken --yes-i-really-mean-it
1122 expect_false ceph mon feature set abcd
1123 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1124 }
1125
1126 function gen_secrets_file()
1127 {
1128 # lets assume we can have the following types
1129 # all - generates both cephx and lockbox, with mock dm-crypt key
1130 # cephx - only cephx
1131 # no_cephx - lockbox and dm-crypt, no cephx
1132 # no_lockbox - dm-crypt and cephx, no lockbox
1133 # empty - empty file
1134 # empty_json - correct json, empty map
1135 # bad_json - bad json :)
1136 #
1137 local t=$1
1138 if [[ -z "$t" ]]; then
1139 t="all"
1140 fi
1141
1142 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1143 echo $fn
1144 if [[ "$t" == "empty" ]]; then
1145 return 0
1146 fi
1147
1148 echo "{" > $fn
1149 if [[ "$t" == "bad_json" ]]; then
1150 echo "asd: ; }" >> $fn
1151 return 0
1152 elif [[ "$t" == "empty_json" ]]; then
1153 echo "}" >> $fn
1154 return 0
1155 fi
1156
1157 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1158 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1159 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1160
1161 if [[ "$t" == "all" ]]; then
1162 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1163 elif [[ "$t" == "cephx" ]]; then
1164 echo "$cephx_secret" >> $fn
1165 elif [[ "$t" == "no_cephx" ]]; then
1166 echo "$lb_secret,$dmcrypt_key" >> $fn
1167 elif [[ "$t" == "no_lockbox" ]]; then
1168 echo "$cephx_secret,$dmcrypt_key" >> $fn
1169 else
1170 echo "unknown gen_secrets_file() type \'$fn\'"
1171 return 1
1172 fi
1173 echo "}" >> $fn
1174 return 0
1175 }
1176
1177 function test_mon_osd_create_destroy()
1178 {
1179 ceph osd new 2>&1 | grep 'EINVAL'
1180 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1181 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1182
1183 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1184
1185 old_osds=$(ceph osd ls)
1186 num_osds=$(ceph osd ls | wc -l)
1187
1188 uuid=$(uuidgen)
1189 id=$(ceph osd new $uuid 2>/dev/null)
1190
1191 for i in $old_osds; do
1192 [[ "$i" != "$id" ]]
1193 done
1194
1195 ceph osd find $id
1196
1197 id2=`ceph osd new $uuid 2>/dev/null`
1198
1199 [[ $id2 == $id ]]
1200
1201 ceph osd new $uuid $id
1202
1203 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1204 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1205
1206 uuid2=$(uuidgen)
1207 id2=$(ceph osd new $uuid2)
1208 ceph osd find $id2
1209 [[ "$id2" != "$id" ]]
1210
1211 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1212 ceph osd new $uuid2 $id2
1213
1214 # test with secrets
1215 empty_secrets=$(gen_secrets_file "empty")
1216 empty_json=$(gen_secrets_file "empty_json")
1217 all_secrets=$(gen_secrets_file "all")
1218 cephx_only=$(gen_secrets_file "cephx")
1219 no_cephx=$(gen_secrets_file "no_cephx")
1220 no_lockbox=$(gen_secrets_file "no_lockbox")
1221 bad_json=$(gen_secrets_file "bad_json")
1222
1223 # empty secrets should be idempotent
1224 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1225 [[ "$new_id" == "$id" ]]
1226
1227 # empty json, thus empty secrets
1228 new_id=$(ceph osd new $uuid $id -i $empty_json)
1229 [[ "$new_id" == "$id" ]]
1230
1231 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1232
1233 ceph osd rm $id
1234 ceph osd rm $id2
1235 ceph osd setmaxosd $old_maxosd
1236
1237 ceph osd new $uuid -i $bad_json 2>&1 | grep 'EINVAL'
1238 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1239 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1240
1241 osds=$(ceph osd ls)
1242 id=$(ceph osd new $uuid -i $all_secrets)
1243 for i in $osds; do
1244 [[ "$i" != "$id" ]]
1245 done
1246
1247 ceph osd find $id
1248
1249 # validate secrets and dm-crypt are set
1250 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1251 s=$(cat $all_secrets | jq '.cephx_secret')
1252 [[ $k == $s ]]
1253 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1254 jq '.key')
1255 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1256 [[ $k == $s ]]
1257 ceph config-key exists dm-crypt/osd/$uuid/luks
1258
1259 osds=$(ceph osd ls)
1260 id2=$(ceph osd new $uuid2 -i $cephx_only)
1261 for i in $osds; do
1262 [[ "$i" != "$id2" ]]
1263 done
1264
1265 ceph osd find $id2
1266 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1267 s=$(cat $all_secrets | jq '.cephx_secret')
1268 [[ $k == $s ]]
1269 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1270 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1271
1272 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1273 ceph osd destroy $id2 --yes-i-really-mean-it
1274 ceph osd find $id2
1275 expect_false ceph auth get-key osd.$id2
1276 ceph osd dump | grep osd.$id2 | grep destroyed
1277
1278 id3=$id2
1279 uuid3=$(uuidgen)
1280 ceph osd new $uuid3 $id3 -i $all_secrets
1281 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1282 ceph auth get-key client.osd-lockbox.$uuid3
1283 ceph auth get-key osd.$id3
1284 ceph config-key exists dm-crypt/osd/$uuid3/luks
1285
1286 ceph osd purge osd.$id3 --yes-i-really-mean-it
1287 expect_false ceph osd find $id2
1288 expect_false ceph auth get-key osd.$id2
1289 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1290 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1291 ceph osd purge osd.$id3 --yes-i-really-mean-it
1292 ceph osd purge osd.$id3 --yes-i-really-mean-it
1293
1294 ceph osd purge osd.$id --yes-i-really-mean-it
1295 expect_false ceph osd find $id
1296 expect_false ceph auth get-key osd.$id
1297 expect_false ceph auth get-key client.osd-lockbox.$uuid
1298 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1299
1300 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1301 $no_cephx $no_lockbox $bad_json
1302
1303 for i in $(ceph osd ls); do
1304 [[ "$i" != "$id" ]]
1305 [[ "$i" != "$id2" ]]
1306 [[ "$i" != "$id3" ]]
1307 done
1308
1309 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1310 ceph osd setmaxosd $old_maxosd
1311
1312 }
1313
1314 function test_mon_osd()
1315 {
1316 #
1317 # osd blacklist
1318 #
1319 bl=192.168.0.1:0/1000
1320 ceph osd blacklist add $bl
1321 ceph osd blacklist ls | grep $bl
1322 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1323 ceph osd dump --format=json-pretty | grep $bl
1324 ceph osd dump | grep "^blacklist $bl"
1325 ceph osd blacklist rm $bl
1326 ceph osd blacklist ls | expect_false grep $bl
1327
1328 bl=192.168.0.1
1329 # test without nonce, invalid nonce
1330 ceph osd blacklist add $bl
1331 ceph osd blacklist ls | grep $bl
1332 ceph osd blacklist rm $bl
1333 ceph osd blacklist ls | expect_false grep $expect_false bl
1334 expect_false "ceph osd blacklist $bl/-1"
1335 expect_false "ceph osd blacklist $bl/foo"
1336
1337 # test with wrong address
1338 expect_false "ceph osd blacklist 1234.56.78.90/100"
1339
1340 # Test `clear`
1341 ceph osd blacklist add $bl
1342 ceph osd blacklist ls | grep $bl
1343 ceph osd blacklist clear
1344 ceph osd blacklist ls | expect_false grep $bl
1345
1346 #
1347 # osd crush
1348 #
1349 ceph osd crush reweight-all
1350 ceph osd crush tunables legacy
1351 ceph osd crush show-tunables | grep argonaut
1352 ceph osd crush tunables bobtail
1353 ceph osd crush show-tunables | grep bobtail
1354 ceph osd crush tunables firefly
1355 ceph osd crush show-tunables | grep firefly
1356
1357 ceph osd crush set-tunable straw_calc_version 0
1358 ceph osd crush get-tunable straw_calc_version | grep 0
1359 ceph osd crush set-tunable straw_calc_version 1
1360 ceph osd crush get-tunable straw_calc_version | grep 1
1361
1362 #
1363 # require-min-compat-client
1364 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1365 ceph osd set-require-min-compat-client luminous
1366 ceph osd dump | grep 'require_min_compat_client luminous'
1367
1368 #
1369 # osd scrub
1370 #
1371 # how do I tell when these are done?
1372 ceph osd scrub 0
1373 ceph osd deep-scrub 0
1374 ceph osd repair 0
1375
1376 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1377 do
1378 ceph osd set $f
1379 ceph osd unset $f
1380 done
1381 expect_false ceph osd unset sortbitwise # cannot be unset
1382 expect_false ceph osd set bogus
1383 expect_false ceph osd unset bogus
1384 ceph osd require-osd-release luminous
1385 # can't lower (or use new command for anything but jewel)
1386 expect_false ceph osd require-osd-release jewel
1387 # these are no-ops but should succeed.
1388 ceph osd set require_jewel_osds
1389 ceph osd set require_kraken_osds
1390 expect_false ceph osd unset require_jewel_osds
1391
1392 ceph osd set noup
1393 ceph osd down 0
1394 ceph osd dump | grep 'osd.0 down'
1395 ceph osd unset noup
1396 max_run=1000
1397 for ((i=0; i < $max_run; i++)); do
1398 if ! ceph osd dump | grep 'osd.0 up'; then
1399 echo "waiting for osd.0 to come back up ($i/$max_run)"
1400 sleep 1
1401 else
1402 break
1403 fi
1404 done
1405 ceph osd dump | grep 'osd.0 up'
1406
1407 ceph osd dump | grep 'osd.0 up'
1408 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1409 ceph osd find 1
1410 ceph osd find osd.1
1411 expect_false ceph osd find osd.xyz
1412 expect_false ceph osd find xyz
1413 expect_false ceph osd find 0.1
1414 ceph --format plain osd find 1 # falls back to json-pretty
1415 if [ `uname` == Linux ]; then
1416 ceph osd metadata 1 | grep 'distro'
1417 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1418 fi
1419 ceph osd out 0
1420 ceph osd dump | grep 'osd.0.*out'
1421 ceph osd in 0
1422 ceph osd dump | grep 'osd.0.*in'
1423 ceph osd find 0
1424
1425 ceph osd add-nodown 0 1
1426 ceph health detail | grep 'nodown osd(s).*0.*1'
1427 ceph osd rm-nodown 0 1
1428 ! ceph health detail | grep 'nodown osd(s).*0.*1'
1429
1430 ceph osd out 0 # so we can mark it as noin later
1431 ceph osd add-noin 0
1432 ceph health detail | grep 'noin osd(s).*0'
1433 ceph osd rm-noin 0
1434 ! ceph health detail | grep 'noin osd(s).*0'
1435 ceph osd in 0
1436
1437 ceph osd add-noout 0
1438 ceph health detail | grep 'noout osd(s).*0'
1439 ceph osd rm-noout 0
1440 ! ceph health detail | grep 'noout osds(s).*0'
1441
1442 # test osd id parse
1443 expect_false ceph osd add-noup 797er
1444 expect_false ceph osd add-nodown u9uwer
1445 expect_false ceph osd add-noin 78~15
1446 expect_false ceph osd add-noout 0 all 1
1447
1448 expect_false ceph osd rm-noup 1234567
1449 expect_false ceph osd rm-nodown fsadf7
1450 expect_false ceph osd rm-noin 0 1 any
1451 expect_false ceph osd rm-noout 790-fd
1452
1453 ids=`ceph osd ls-tree default`
1454 for osd in $ids
1455 do
1456 ceph osd add-nodown $osd
1457 ceph osd add-noout $osd
1458 done
1459 ceph -s | grep 'nodown osd(s)'
1460 ceph -s | grep 'noout osd(s)'
1461 ceph osd rm-nodown any
1462 ceph osd rm-noout all
1463 ! ceph -s | grep 'nodown osd(s)'
1464 ! ceph -s | grep 'noout osd(s)'
1465
1466 # make sure mark out preserves weight
1467 ceph osd reweight osd.0 .5
1468 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1469 ceph osd out 0
1470 ceph osd in 0
1471 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1472
1473 ceph osd getmap -o $f
1474 [ -s $f ]
1475 rm $f
1476 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1477 [ "$save" -gt 0 ]
1478 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1479 ceph osd setmaxosd 10
1480 ceph osd getmaxosd | grep 'max_osd = 10'
1481 ceph osd setmaxosd $save
1482 ceph osd getmaxosd | grep "max_osd = $save"
1483
1484 for id in `ceph osd ls` ; do
1485 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1486 done
1487
1488 ceph osd rm 0 2>&1 | grep 'EBUSY'
1489
1490 local old_osds=$(echo $(ceph osd ls))
1491 id=`ceph osd create`
1492 ceph osd find $id
1493 ceph osd lost $id --yes-i-really-mean-it
1494 expect_false ceph osd setmaxosd $id
1495 local new_osds=$(echo $(ceph osd ls))
1496 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1497 ceph osd rm $id
1498 done
1499
1500 uuid=`uuidgen`
1501 id=`ceph osd create $uuid`
1502 id2=`ceph osd create $uuid`
1503 [ "$id" = "$id2" ]
1504 ceph osd rm $id
1505
1506 ceph --help osd
1507
1508 # reset max_osd.
1509 ceph osd setmaxosd $id
1510 ceph osd getmaxosd | grep "max_osd = $save"
1511 local max_osd=$save
1512
1513 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1514 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1515
1516 id=`ceph osd create $uuid $max_osd`
1517 [ "$id" = "$max_osd" ]
1518 ceph osd find $id
1519 max_osd=$((max_osd + 1))
1520 ceph osd getmaxosd | grep "max_osd = $max_osd"
1521
1522 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1523 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
1524 id2=`ceph osd create $uuid`
1525 [ "$id" = "$id2" ]
1526 id2=`ceph osd create $uuid $id`
1527 [ "$id" = "$id2" ]
1528
1529 uuid=`uuidgen`
1530 local gap_start=$max_osd
1531 id=`ceph osd create $uuid $((gap_start + 100))`
1532 [ "$id" = "$((gap_start + 100))" ]
1533 max_osd=$((id + 1))
1534 ceph osd getmaxosd | grep "max_osd = $max_osd"
1535
1536 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
1537
1538 #
1539 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1540 # is repeated and consumes two osd id, not just one.
1541 #
1542 local next_osd
1543 if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
1544 next_osd=$((gap_start + 1))
1545 else
1546 next_osd=$gap_start
1547 fi
1548 id=`ceph osd create`
1549 [ "$id" = "$next_osd" ]
1550
1551 next_osd=$((id + 1))
1552 id=`ceph osd create $(uuidgen)`
1553 [ "$id" = "$next_osd" ]
1554
1555 next_osd=$((id + 1))
1556 id=`ceph osd create $(uuidgen) $next_osd`
1557 [ "$id" = "$next_osd" ]
1558
1559 local new_osds=$(echo $(ceph osd ls))
1560 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1561 [ $id -ge $save ]
1562 ceph osd rm $id
1563 done
1564 ceph osd setmaxosd $save
1565
1566 ceph osd ls
1567 ceph osd pool create data 10
1568 ceph osd lspools | grep data
1569 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1570 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1571 ceph osd pool delete data data --yes-i-really-really-mean-it
1572
1573 ceph osd pause
1574 ceph osd dump | grep 'flags.*pauserd,pausewr'
1575 ceph osd unpause
1576
1577 ceph osd tree
1578 ceph osd tree up
1579 ceph osd tree down
1580 ceph osd tree in
1581 ceph osd tree out
1582 ceph osd tree up in
1583 ceph osd tree up out
1584 ceph osd tree down in
1585 ceph osd tree down out
1586 ceph osd tree out down
1587 expect_false ceph osd tree up down
1588 expect_false ceph osd tree in out
1589 expect_false ceph osd tree up foo
1590
1591 ceph osd metadata
1592 ceph osd count-metadata os
1593 ceph osd versions
1594
1595 ceph osd perf
1596 ceph osd blocked-by
1597
1598 ceph osd stat | grep up,
1599 }
1600
1601 function test_mon_crush()
1602 {
1603 f=$TEMP_DIR/map.$$
1604 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1605 [ -s $f ]
1606 [ "$epoch" -gt 1 ]
1607 nextepoch=$(( $epoch + 1 ))
1608 echo epoch $epoch nextepoch $nextepoch
1609 rm -f $f.epoch
1610 expect_false ceph osd setcrushmap $nextepoch -i $f
1611 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1612 echo gotepoch $gotepoch
1613 [ "$gotepoch" -eq "$nextepoch" ]
1614 # should be idempotent
1615 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1616 echo epoch $gotepoch
1617 [ "$gotepoch" -eq "$nextepoch" ]
1618 rm $f
1619 }
1620
1621 function test_mon_osd_pool()
1622 {
1623 #
1624 # osd pool
1625 #
1626 ceph osd pool create data 10
1627 ceph osd pool mksnap data datasnap
1628 rados -p data lssnap | grep datasnap
1629 ceph osd pool rmsnap data datasnap
1630 expect_false ceph osd pool rmsnap pool_fake snapshot
1631 ceph osd pool delete data data --yes-i-really-really-mean-it
1632
1633 ceph osd pool create data2 10
1634 ceph osd pool rename data2 data3
1635 ceph osd lspools | grep data3
1636 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1637
1638 ceph osd pool create replicated 12 12 replicated
1639 ceph osd pool create replicated 12 12 replicated
1640 ceph osd pool create replicated 12 12 # default is replicated
1641 ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
1642 # should fail because the type is not the same
1643 expect_false ceph osd pool create replicated 12 12 erasure
1644 ceph osd lspools | grep replicated
1645 ceph osd pool create ec_test 1 1 erasure
1646 set +e
1647 ceph osd metadata | grep osd_objectstore_type | grep -qc bluestore
1648 if [ $? -eq 0 ]; then
1649 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1650 check_response $? 22 "pool must only be stored on bluestore for scrubbing to work"
1651 else
1652 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1653 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1654 fi
1655 set -e
1656 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1657 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1658 }
1659
1660 function test_mon_osd_pool_quota()
1661 {
1662 #
1663 # test osd pool set/get quota
1664 #
1665
1666 # create tmp pool
1667 ceph osd pool create tmp-quota-pool 36
1668 #
1669 # set erroneous quotas
1670 #
1671 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1672 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1673 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1674 #
1675 # set valid quotas
1676 #
1677 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1678 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1679 #
1680 # get quotas
1681 #
1682 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
1683 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
1684 #
1685 # get quotas in json-pretty format
1686 #
1687 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1688 grep '"quota_max_objects":.*10485760'
1689 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1690 grep '"quota_max_bytes":.*10'
1691 #
1692 # reset pool quotas
1693 #
1694 ceph osd pool set-quota tmp-quota-pool max_bytes 0
1695 ceph osd pool set-quota tmp-quota-pool max_objects 0
1696 #
1697 # test N/A quotas
1698 #
1699 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
1700 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
1701 #
1702 # cleanup tmp pool
1703 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
1704 }
1705
1706 function test_mon_pg()
1707 {
1708 # Make sure we start healthy.
1709 wait_for_health_ok
1710
1711 ceph pg debug unfound_objects_exist
1712 ceph pg debug degraded_pgs_exist
1713 ceph pg deep-scrub 0.0
1714 ceph pg dump
1715 ceph pg dump pgs_brief --format=json
1716 ceph pg dump pgs --format=json
1717 ceph pg dump pools --format=json
1718 ceph pg dump osds --format=json
1719 ceph pg dump sum --format=json
1720 ceph pg dump all --format=json
1721 ceph pg dump pgs_brief osds --format=json
1722 ceph pg dump pools osds pgs_brief --format=json
1723 ceph pg dump_json
1724 ceph pg dump_pools_json
1725 ceph pg dump_stuck inactive
1726 ceph pg dump_stuck unclean
1727 ceph pg dump_stuck stale
1728 ceph pg dump_stuck undersized
1729 ceph pg dump_stuck degraded
1730 ceph pg ls
1731 ceph pg ls 0
1732 ceph pg ls stale
1733 expect_false ceph pg ls scrubq
1734 ceph pg ls active stale repair recovering
1735 ceph pg ls 0 active
1736 ceph pg ls 0 active stale
1737 ceph pg ls-by-primary osd.0
1738 ceph pg ls-by-primary osd.0 0
1739 ceph pg ls-by-primary osd.0 active
1740 ceph pg ls-by-primary osd.0 active stale
1741 ceph pg ls-by-primary osd.0 0 active stale
1742 ceph pg ls-by-osd osd.0
1743 ceph pg ls-by-osd osd.0 0
1744 ceph pg ls-by-osd osd.0 active
1745 ceph pg ls-by-osd osd.0 active stale
1746 ceph pg ls-by-osd osd.0 0 active stale
1747 ceph pg ls-by-pool rbd
1748 ceph pg ls-by-pool rbd active stale
1749 # can't test this...
1750 # ceph pg force_create_pg
1751 ceph pg getmap -o $TEMP_DIR/map.$$
1752 [ -s $TEMP_DIR/map.$$ ]
1753 ceph pg map 0.0 | grep acting
1754 ceph pg repair 0.0
1755 ceph pg scrub 0.0
1756
1757 ceph osd set-full-ratio .962
1758 ceph osd dump | grep '^full_ratio 0.962'
1759 ceph osd set-backfillfull-ratio .912
1760 ceph osd dump | grep '^backfillfull_ratio 0.912'
1761 ceph osd set-nearfull-ratio .892
1762 ceph osd dump | grep '^nearfull_ratio 0.892'
1763
1764 # Check health status
1765 ceph osd set-nearfull-ratio .913
1766 ceph health | grep 'HEALTH_ERR.*Full ratio(s) out of order'
1767 ceph health detail | grep 'backfillfull_ratio (0.912) < nearfull_ratio (0.913), increased'
1768 ceph osd set-nearfull-ratio .892
1769 ceph osd set-backfillfull-ratio .963
1770 ceph health detail | grep 'full_ratio (0.962) < backfillfull_ratio (0.963), increased'
1771 ceph osd set-backfillfull-ratio .912
1772
1773 # Check injected full results
1774 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
1775 wait_for_health "HEALTH_WARN.*1 nearfull osd(s)"
1776 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
1777 wait_for_health "HEALTH_WARN.*1 backfillfull osd(s)"
1778 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
1779 # failsafe and full are the same as far as the monitor is concerned
1780 wait_for_health "HEALTH_ERR.*1 full osd(s)"
1781 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
1782 wait_for_health "HEALTH_ERR.*2 full osd(s)"
1783 ceph health detail | grep "osd.0 is full"
1784 ceph health detail | grep "osd.2 is full"
1785 ceph health detail | grep "osd.1 is backfill full"
1786 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1787 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
1788 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
1789 wait_for_health_ok
1790
1791 ceph pg stat | grep 'pgs:'
1792 ceph pg 0.0 query
1793 ceph tell 0.0 query
1794 ceph quorum enter
1795 ceph quorum_status
1796 ceph report | grep osd_stats
1797 ceph status
1798 ceph -s
1799
1800 #
1801 # tell osd version
1802 #
1803 ceph tell osd.0 version
1804 expect_false ceph tell osd.9999 version
1805 expect_false ceph tell osd.foo version
1806
1807 # back to pg stuff
1808
1809 ceph tell osd.0 dump_pg_recovery_stats | grep Started
1810
1811 ceph osd reweight 0 0.9
1812 expect_false ceph osd reweight 0 -1
1813 ceph osd reweight osd.0 1
1814
1815 ceph osd primary-affinity osd.0 .9
1816 expect_false ceph osd primary-affinity osd.0 -2
1817 expect_false ceph osd primary-affinity osd.9999 .5
1818 ceph osd primary-affinity osd.0 1
1819
1820 ceph osd pg-temp 0.0 0 1 2
1821 ceph osd pg-temp 0.0 osd.1 osd.0 osd.2
1822 expect_false ceph osd pg-temp asdf qwer
1823 expect_false ceph osd pg-temp 0.0 asdf
1824 expect_false ceph osd pg-temp 0.0
1825
1826 # don't test ceph osd primary-temp for now
1827 }
1828
1829 function test_mon_osd_pool_set()
1830 {
1831 TEST_POOL_GETSET=pool_getset
1832 ceph osd pool create $TEST_POOL_GETSET 1
1833 wait_for_clean
1834 ceph osd pool get $TEST_POOL_GETSET all
1835
1836 for s in pg_num pgp_num size min_size crush_rule; do
1837 ceph osd pool get $TEST_POOL_GETSET $s
1838 done
1839
1840 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
1841 (( new_size = old_size + 1 ))
1842 ceph osd pool set $TEST_POOL_GETSET size $new_size
1843 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
1844 ceph osd pool set $TEST_POOL_GETSET size $old_size
1845
1846 ceph osd pool create pool_erasure 1 1 erasure
1847 wait_for_clean
1848 set +e
1849 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
1850 check_response 'not change the size'
1851 set -e
1852 ceph osd pool get pool_erasure erasure_code_profile
1853
1854 auid=5555
1855 ceph osd pool set $TEST_POOL_GETSET auid $auid
1856 ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
1857 ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
1858 ceph osd pool set $TEST_POOL_GETSET auid 0
1859
1860 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
1861 ceph osd pool set $TEST_POOL_GETSET $flag false
1862 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1863 ceph osd pool set $TEST_POOL_GETSET $flag true
1864 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1865 ceph osd pool set $TEST_POOL_GETSET $flag 1
1866 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1867 ceph osd pool set $TEST_POOL_GETSET $flag 0
1868 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1869 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
1870 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
1871 done
1872
1873 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1874 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
1875 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
1876 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
1877 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1878
1879 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1880 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
1881 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
1882 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
1883 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1884
1885 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1886 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
1887 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
1888 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
1889 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1890
1891 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1892 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
1893 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
1894 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
1895 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1896
1897 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1898 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
1899 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
1900 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
1901 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1902
1903 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1904 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
1905 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
1906 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
1907 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1908
1909 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
1910 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
1911 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1912 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
1913 ceph osd pool set $TEST_POOL_GETSET pg_num 10
1914 wait_for_clean
1915 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1916
1917 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
1918 new_pgs=$(($old_pgs+$(ceph osd stat | grep osdmap | awk '{print $3}')*32))
1919 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1920 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
1921 wait_for_clean
1922 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
1923 new_pgs=$(($old_pgs+$(ceph osd stat | grep osdmap | awk '{print $3}')*32+1))
1924 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1925
1926 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
1927 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
1928 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
1929 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
1930 ceph osd pool set $TEST_POOL_GETSET size 2
1931 wait_for_clean
1932 ceph osd pool set $TEST_POOL_GETSET min_size 2
1933
1934 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
1935 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
1936
1937 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
1938 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
1939
1940 ceph osd pool set $TEST_POOL_GETSET nodelete 1
1941 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1942 ceph osd pool set $TEST_POOL_GETSET nodelete 0
1943 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1944
1945 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
1946 }
1947
1948 function test_mon_osd_tiered_pool_set()
1949 {
1950 # this is really a tier pool
1951 ceph osd pool create real-tier 2
1952 ceph osd tier add rbd real-tier
1953
1954 ceph osd pool set real-tier hit_set_type explicit_hash
1955 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
1956 ceph osd pool set real-tier hit_set_type explicit_object
1957 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
1958 ceph osd pool set real-tier hit_set_type bloom
1959 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
1960 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
1961 ceph osd pool set real-tier hit_set_period 123
1962 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
1963 ceph osd pool set real-tier hit_set_count 12
1964 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
1965 ceph osd pool set real-tier hit_set_fpp .01
1966 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
1967
1968 ceph osd pool set real-tier target_max_objects 123
1969 ceph osd pool get real-tier target_max_objects | \
1970 grep 'target_max_objects:[ \t]\+123'
1971 ceph osd pool set real-tier target_max_bytes 123456
1972 ceph osd pool get real-tier target_max_bytes | \
1973 grep 'target_max_bytes:[ \t]\+123456'
1974 ceph osd pool set real-tier cache_target_dirty_ratio .123
1975 ceph osd pool get real-tier cache_target_dirty_ratio | \
1976 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
1977 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
1978 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
1979 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
1980 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
1981 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
1982 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
1983 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
1984 ceph osd pool set real-tier cache_target_full_ratio .123
1985 ceph osd pool get real-tier cache_target_full_ratio | \
1986 grep 'cache_target_full_ratio:[ \t]\+0.123'
1987 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
1988 ceph osd pool set real-tier cache_target_full_ratio 1.0
1989 ceph osd pool set real-tier cache_target_full_ratio 0
1990 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
1991 ceph osd pool set real-tier cache_min_flush_age 123
1992 ceph osd pool get real-tier cache_min_flush_age | \
1993 grep 'cache_min_flush_age:[ \t]\+123'
1994 ceph osd pool set real-tier cache_min_evict_age 234
1995 ceph osd pool get real-tier cache_min_evict_age | \
1996 grep 'cache_min_evict_age:[ \t]\+234'
1997
1998 # this is not a tier pool
1999 ceph osd pool create fake-tier 2
2000 wait_for_clean
2001
2002 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2003 expect_false ceph osd pool get fake-tier hit_set_type
2004 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2005 expect_false ceph osd pool get fake-tier hit_set_type
2006 expect_false ceph osd pool set fake-tier hit_set_type bloom
2007 expect_false ceph osd pool get fake-tier hit_set_type
2008 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2009 expect_false ceph osd pool set fake-tier hit_set_period 123
2010 expect_false ceph osd pool get fake-tier hit_set_period
2011 expect_false ceph osd pool set fake-tier hit_set_count 12
2012 expect_false ceph osd pool get fake-tier hit_set_count
2013 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2014 expect_false ceph osd pool get fake-tier hit_set_fpp
2015
2016 expect_false ceph osd pool set fake-tier target_max_objects 123
2017 expect_false ceph osd pool get fake-tier target_max_objects
2018 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2019 expect_false ceph osd pool get fake-tier target_max_bytes
2020 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2021 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2022 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2023 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2024 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2025 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2026 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2027 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2028 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2029 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2030 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2031 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2032 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2033 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2034 expect_false ceph osd pool get fake-tier cache_min_flush_age
2035 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2036 expect_false ceph osd pool get fake-tier cache_min_evict_age
2037
2038 ceph osd tier remove rbd real-tier
2039 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2040 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2041 }
2042
2043 function test_mon_osd_erasure_code()
2044 {
2045
2046 ceph osd erasure-code-profile set fooprofile a=b c=d
2047 ceph osd erasure-code-profile set fooprofile a=b c=d
2048 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2049 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2050 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2051 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2052 #
2053 # cleanup by removing profile 'fooprofile'
2054 ceph osd erasure-code-profile rm fooprofile
2055 }
2056
2057 function test_mon_osd_misc()
2058 {
2059 set +e
2060
2061 # expect error about missing 'pool' argument
2062 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2063
2064 # expect error about unused argument foo
2065 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2066
2067 # expect "not in range" for invalid full ratio
2068 ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
2069
2070 # expect "not in range" for invalid overload percentage
2071 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2072
2073 set -e
2074
2075 ceph osd reweight-by-utilization 110
2076 ceph osd reweight-by-utilization 110 .5
2077 expect_false ceph osd reweight-by-utilization 110 0
2078 expect_false ceph osd reweight-by-utilization 110 -0.1
2079 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2080 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2081 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2082 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2083 ceph osd reweight-by-pg 110
2084 ceph osd test-reweight-by-pg 110 .5
2085 ceph osd reweight-by-pg 110 rbd
2086 ceph osd reweight-by-pg 110 .5 rbd
2087 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2088 }
2089
2090 function test_mon_heap_profiler()
2091 {
2092 do_test=1
2093 set +e
2094 # expect 'heap' commands to be correctly parsed
2095 ceph heap stats 2>$TMPFILE
2096 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2097 echo "tcmalloc not enabled; skip heap profiler test"
2098 do_test=0
2099 fi
2100 set -e
2101
2102 [[ $do_test -eq 0 ]] && return 0
2103
2104 ceph heap start_profiler
2105 ceph heap dump
2106 ceph heap stop_profiler
2107 ceph heap release
2108 }
2109
2110 function test_admin_heap_profiler()
2111 {
2112 do_test=1
2113 set +e
2114 # expect 'heap' commands to be correctly parsed
2115 ceph heap stats 2>$TMPFILE
2116 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2117 echo "tcmalloc not enabled; skip heap profiler test"
2118 do_test=0
2119 fi
2120 set -e
2121
2122 [[ $do_test -eq 0 ]] && return 0
2123
2124 local admin_socket=$(get_admin_socket osd.0)
2125
2126 $SUDO ceph --admin-daemon $admin_socket heap start_profiler
2127 $SUDO ceph --admin-daemon $admin_socket heap dump
2128 $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
2129 $SUDO ceph --admin-daemon $admin_socket heap release
2130 }
2131
2132 function test_osd_bench()
2133 {
2134 # test osd bench limits
2135 # As we should not rely on defaults (as they may change over time),
2136 # lets inject some values and perform some simple tests
2137 # max iops: 10 # 100 IOPS
2138 # max throughput: 10485760 # 10MB/s
2139 # max block size: 2097152 # 2MB
2140 # duration: 10 # 10 seconds
2141
2142 local args="\
2143 --osd-bench-duration 10 \
2144 --osd-bench-max-block-size 2097152 \
2145 --osd-bench-large-size-max-throughput 10485760 \
2146 --osd-bench-small-size-max-iops 10"
2147 ceph tell osd.0 injectargs ${args## }
2148
2149 # anything with a bs larger than 2097152 must fail
2150 expect_false ceph tell osd.0 bench 1 2097153
2151 # but using 'osd_bench_max_bs' must succeed
2152 ceph tell osd.0 bench 1 2097152
2153
2154 # we assume 1MB as a large bs; anything lower is a small bs
2155 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2156 # max count: 409600 (bytes)
2157
2158 # more than max count must not be allowed
2159 expect_false ceph tell osd.0 bench 409601 4096
2160 # but 409600 must be succeed
2161 ceph tell osd.0 bench 409600 4096
2162
2163 # for a large bs, we are limited by throughput.
2164 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2165 # the max count will be (10MB * 10s) = 100MB
2166 # max count: 104857600 (bytes)
2167
2168 # more than max count must not be allowed
2169 expect_false ceph tell osd.0 bench 104857601 2097152
2170 # up to max count must be allowed
2171 ceph tell osd.0 bench 104857600 2097152
2172 }
2173
2174 function test_osd_negative_filestore_merge_threshold()
2175 {
2176 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2177 expect_config_value "osd.0" "filestore_merge_threshold" -1
2178 }
2179
2180 function test_mon_tell()
2181 {
2182 ceph tell mon.a version
2183 ceph tell mon.b version
2184 expect_false ceph tell mon.foo version
2185
2186 sleep 1
2187
2188 ceph_watch_start debug
2189 ceph tell mon.a version
2190 ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2191
2192 ceph_watch_start debug
2193 ceph tell mon.b version
2194 ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2195 }
2196
2197 function test_mon_crushmap_validation()
2198 {
2199 local map=$TEMP_DIR/map
2200 ceph osd getcrushmap -o $map
2201
2202 local crushtool_path="${TEMP_DIR}/crushtool"
2203 touch "${crushtool_path}"
2204 chmod +x "${crushtool_path}"
2205 local crushtool_path_old=`ceph-conf --show-config-value crushtool`
2206 ceph tell mon.\* injectargs --crushtool "${crushtool_path}"
2207
2208 printf "%s\n" \
2209 "#!/bin/sh
2210 cat > /dev/null
2211 exit 0" > "${crushtool_path}"
2212
2213 ceph osd setcrushmap -i $map
2214
2215 printf "%s\n" \
2216 "#!/bin/sh
2217 cat > /dev/null
2218 exit 1" > "${crushtool_path}"
2219
2220 expect_false ceph osd setcrushmap -i $map
2221
2222 printf "%s\n" \
2223 "#!/bin/sh
2224 cat > /dev/null
2225 echo 'TEST FAIL' >&2
2226 exit 1" > "${crushtool_path}"
2227
2228 expect_false ceph osd setcrushmap -i $map 2> $TMPFILE
2229 check_response "Error EINVAL: Failed crushmap test: TEST FAIL"
2230
2231 local mon_lease=`ceph-conf --show-config-value mon_lease`
2232
2233 test "${mon_lease}" -gt 0
2234
2235 printf "%s\n" \
2236 "#!/bin/sh
2237 cat > /dev/null
2238 sleep $((mon_lease - 1))" > "${crushtool_path}"
2239
2240 ceph osd setcrushmap -i $map
2241
2242 printf "%s\n" \
2243 "#!/bin/sh
2244 cat > /dev/null
2245 sleep $((mon_lease + 1))" > "${crushtool_path}"
2246
2247 expect_false ceph osd setcrushmap -i $map 2> $TMPFILE
2248 check_response "Error EINVAL: Failed crushmap test: ${crushtool_path}: timed out (${mon_lease} sec)"
2249
2250 ceph tell mon.\* injectargs --crushtool "${crushtool_path_old}"
2251
2252 rm -f "${crushtool_path}"
2253 }
2254
2255 function test_mon_ping()
2256 {
2257 ceph ping mon.a
2258 ceph ping mon.b
2259 expect_false ceph ping mon.foo
2260
2261 ceph ping mon.\*
2262 }
2263
2264 function test_mon_deprecated_commands()
2265 {
2266 # current DEPRECATED commands are:
2267 # ceph compact
2268 # ceph scrub
2269 # ceph sync force
2270 #
2271 # Testing should be accomplished by setting
2272 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2273 # each one of these commands.
2274
2275 ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
2276 expect_false ceph tell mon.a compact 2> $TMPFILE
2277 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2278
2279 expect_false ceph tell mon.a scrub 2> $TMPFILE
2280 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2281
2282 expect_false ceph tell mon.a sync force 2> $TMPFILE
2283 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2284
2285 ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
2286 }
2287
2288 function test_mon_cephdf_commands()
2289 {
2290 # ceph df detail:
2291 # pool section:
2292 # RAW USED The near raw used per pool in raw total
2293
2294 ceph osd pool create cephdf_for_test 32 32 replicated
2295 ceph osd pool set cephdf_for_test size 2
2296
2297 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2298 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2299
2300 #wait for update
2301 for i in `seq 1 10`; do
2302 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2303 sleep 1
2304 done
2305 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2306 # to sync mon with osd
2307 flush_pg_stats
2308 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2309 cal_raw_used_size=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2310 raw_used_size=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
2311
2312 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2313 rm ./cephdf_for_test
2314
2315 expect_false test $cal_raw_used_size != $raw_used_size
2316 }
2317
2318 function test_mon_tell_help_command()
2319 {
2320 ceph tell mon.a help
2321
2322 # wrong target
2323 expect_false ceph tell mon.zzz help
2324 }
2325
2326 function test_osd_tell_help_command()
2327 {
2328 ceph tell osd.1 help
2329 expect_false ceph tell osd.100 help
2330 }
2331
2332 function test_mds_tell_help_command()
2333 {
2334 local FS_NAME=cephfs
2335 if ! mds_exists ; then
2336 echo "Skipping test, no MDS found"
2337 return
2338 fi
2339
2340 remove_all_fs
2341 ceph osd pool create fs_data 10
2342 ceph osd pool create fs_metadata 10
2343 ceph fs new $FS_NAME fs_metadata fs_data
2344 wait_mds_active $FS_NAME
2345
2346
2347 ceph tell mds.a help
2348 expect_false ceph tell mds.z help
2349
2350 remove_all_fs
2351 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2352 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2353 }
2354
2355 function test_mgr_tell_help_command()
2356 {
2357 ceph tell mgr help
2358 }
2359
2360 #
2361 # New tests should be added to the TESTS array below
2362 #
2363 # Individual tests may be run using the '-t <testname>' argument
2364 # The user can specify '-t <testname>' as many times as she wants
2365 #
2366 # Tests will be run in order presented in the TESTS array, or in
2367 # the order specified by the '-t <testname>' options.
2368 #
2369 # '-l' will list all the available test names
2370 # '-h' will show usage
2371 #
2372 # The test maintains backward compatibility: not specifying arguments
2373 # will run all tests following the order they appear in the TESTS array.
2374 #
2375
2376 set +x
2377 MON_TESTS+=" mon_injectargs"
2378 MON_TESTS+=" mon_injectargs_SI"
2379 for i in `seq 9`; do
2380 MON_TESTS+=" tiering_$i";
2381 done
2382 MON_TESTS+=" auth"
2383 MON_TESTS+=" auth_profiles"
2384 MON_TESTS+=" mon_misc"
2385 MON_TESTS+=" mon_mon"
2386 MON_TESTS+=" mon_osd"
2387 MON_TESTS+=" mon_crush"
2388 MON_TESTS+=" mon_osd_create_destroy"
2389 MON_TESTS+=" mon_osd_pool"
2390 MON_TESTS+=" mon_osd_pool_quota"
2391 MON_TESTS+=" mon_pg"
2392 MON_TESTS+=" mon_osd_pool_set"
2393 MON_TESTS+=" mon_osd_tiered_pool_set"
2394 MON_TESTS+=" mon_osd_erasure_code"
2395 MON_TESTS+=" mon_osd_misc"
2396 MON_TESTS+=" mon_heap_profiler"
2397 MON_TESTS+=" mon_tell"
2398 MON_TESTS+=" mon_crushmap_validation"
2399 MON_TESTS+=" mon_ping"
2400 MON_TESTS+=" mon_deprecated_commands"
2401 MON_TESTS+=" mon_caps"
2402 MON_TESTS+=" mon_cephdf_commands"
2403 MON_TESTS+=" mon_tell_help_command"
2404
2405 OSD_TESTS+=" osd_bench"
2406 OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2407 OSD_TESTS+=" tiering_agent"
2408 OSD_TESTS+=" admin_heap_profiler"
2409 OSD_TESTS+=" osd_tell_help_command"
2410
2411 MDS_TESTS+=" mds_tell"
2412 MDS_TESTS+=" mon_mds"
2413 MDS_TESTS+=" mon_mds_metadata"
2414 MDS_TESTS+=" mds_tell_help_command"
2415
2416 MGR_TESTS+=" mgr_tell_help_command"
2417
2418 TESTS+=$MON_TESTS
2419 TESTS+=$OSD_TESTS
2420 TESTS+=$MDS_TESTS
2421 TESTS+=$MGR_TESTS
2422
2423 #
2424 # "main" follows
2425 #
2426
2427 function list_tests()
2428 {
2429 echo "AVAILABLE TESTS"
2430 for i in $TESTS; do
2431 echo " $i"
2432 done
2433 }
2434
2435 function usage()
2436 {
2437 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2438 }
2439
2440 tests_to_run=()
2441
2442 sanity_check=true
2443
2444 while [[ $# -gt 0 ]]; do
2445 opt=$1
2446
2447 case "$opt" in
2448 "-l" )
2449 do_list=1
2450 ;;
2451 "--asok-does-not-need-root" )
2452 SUDO=""
2453 ;;
2454 "--no-sanity-check" )
2455 sanity_check=false
2456 ;;
2457 "--test-mon" )
2458 tests_to_run+="$MON_TESTS"
2459 ;;
2460 "--test-osd" )
2461 tests_to_run+="$OSD_TESTS"
2462 ;;
2463 "--test-mds" )
2464 tests_to_run+="$MDS_TESTS"
2465 ;;
2466 "--test-mgr" )
2467 tests_to_run+="$MGR_TESTS"
2468 ;;
2469 "-t" )
2470 shift
2471 if [[ -z "$1" ]]; then
2472 echo "missing argument to '-t'"
2473 usage ;
2474 exit 1
2475 fi
2476 tests_to_run+=" $1"
2477 ;;
2478 "-h" )
2479 usage ;
2480 exit 0
2481 ;;
2482 esac
2483 shift
2484 done
2485
2486 if [[ $do_list -eq 1 ]]; then
2487 list_tests ;
2488 exit 0
2489 fi
2490
2491 if test -z "$tests_to_run" ; then
2492 tests_to_run="$TESTS"
2493 fi
2494
2495 if $sanity_check ; then
2496 wait_no_osd_down
2497 fi
2498 for i in $tests_to_run; do
2499 if $sanity_check ; then
2500 check_no_osd_down
2501 fi
2502 set -x
2503 test_${i}
2504 set +x
2505 done
2506 if $sanity_check ; then
2507 check_no_osd_down
2508 fi
2509
2510 set -x
2511
2512 echo OK