]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/cephtool/test.sh
bump version to 12.0.3-pve3
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
CommitLineData
7c673cae
FG
1#!/bin/bash -x
2
3source $(dirname $0)/../ceph-helpers.sh
4
5set -e
6set -o functrace
7PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
8SUDO=${SUDO:-sudo}
9
10function get_admin_socket()
11{
12 local client=$1
13
14 if test -n "$CEPH_OUT_DIR";
15 then
16 echo $CEPH_OUT_DIR/$client.asok
17 else
18 local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
19 echo "/var/run/ceph/$cluster-$client.asok"
20 fi
21}
22
23function check_no_osd_down()
24{
25 ! ceph osd dump | grep ' down '
26}
27
28function wait_no_osd_down()
29{
30 max_run=300
31 for i in $(seq 1 $max_run) ; do
32 if ! check_no_osd_down ; then
33 echo "waiting for osd(s) to come back up ($i/$max_run)"
34 sleep 1
35 else
36 break
37 fi
38 done
39 check_no_osd_down
40}
41
42function expect_false()
43{
44 set -x
45 if "$@"; then return 1; else return 0; fi
46}
47
48
49TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
50trap "rm -fr $TEMP_DIR" 0
51
52TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
53
54#
55# retry_eagain max cmd args ...
56#
57# retry cmd args ... if it exits on error and its output contains the
58# string EAGAIN, at most $max times
59#
60function retry_eagain()
61{
62 local max=$1
63 shift
64 local status
65 local tmpfile=$TEMP_DIR/retry_eagain.$$
66 local count
67 for count in $(seq 1 $max) ; do
68 status=0
69 "$@" > $tmpfile 2>&1 || status=$?
70 if test $status = 0 ||
71 ! grep --quiet EAGAIN $tmpfile ; then
72 break
73 fi
74 sleep 1
75 done
76 if test $count = $max ; then
77 echo retried with non zero exit status, $max times: "$@" >&2
78 fi
79 cat $tmpfile
80 rm $tmpfile
81 return $status
82}
83
84#
85# map_enxio_to_eagain cmd arg ...
86#
87# add EAGAIN to the output of cmd arg ... if the output contains
88# ENXIO.
89#
90function map_enxio_to_eagain()
91{
92 local status=0
93 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
94
95 "$@" > $tmpfile 2>&1 || status=$?
96 if test $status != 0 &&
97 grep --quiet ENXIO $tmpfile ; then
98 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
99 fi
100 cat $tmpfile
101 rm $tmpfile
102 return $status
103}
104
105function check_response()
106{
107 expected_string=$1
108 retcode=$2
109 expected_retcode=$3
110 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
111 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
112 exit 1
113 fi
114
115 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
116 echo "Didn't find $expected_string in output" >&2
117 cat $TMPFILE >&2
118 exit 1
119 fi
120}
121
122function get_config_value_or_die()
123{
124 local target config_opt raw val
125
126 target=$1
127 config_opt=$2
128
129 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
130 if [[ $? -ne 0 ]]; then
131 echo "error obtaining config opt '$config_opt' from '$target': $raw"
132 exit 1
133 fi
134
135 raw=`echo $raw | sed -e 's/[{} "]//g'`
136 val=`echo $raw | cut -f2 -d:`
137
138 echo "$val"
139 return 0
140}
141
142function expect_config_value()
143{
144 local target config_opt expected_val val
145 target=$1
146 config_opt=$2
147 expected_val=$3
148
149 val=$(get_config_value_or_die $target $config_opt)
150
151 if [[ "$val" != "$expected_val" ]]; then
152 echo "expected '$expected_val', got '$val'"
153 exit 1
154 fi
155}
156
157function ceph_watch_start()
158{
159 local whatch_opt=--watch
160
161 if [ -n "$1" ]; then
162 whatch_opt=--watch-$1
163 fi
164
165 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
166 ceph $whatch_opt > $CEPH_WATCH_FILE &
167 CEPH_WATCH_PID=$!
168
169 # wait until the "ceph" client is connected and receiving
170 # log messages from monitor
171 for i in `seq 3`; do
172 grep -q "cluster" $CEPH_WATCH_FILE && break
173 sleep 1
174 done
175}
176
177function ceph_watch_wait()
178{
179 local regexp=$1
180 local timeout=30
181
182 if [ -n "$2" ]; then
183 timeout=$2
184 fi
185
186 for i in `seq ${timeout}`; do
187 grep -q "$regexp" $CEPH_WATCH_FILE && break
188 sleep 1
189 done
190
191 kill $CEPH_WATCH_PID
192
193 if ! grep "$regexp" $CEPH_WATCH_FILE; then
194 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
195 cat $CEPH_WATCH_FILE >&2
196 return 1
197 fi
198}
199
200function test_mon_injectargs()
201{
202 CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
203 check_response "osd_enable_op_tracker = 'false'"
204 ! grep "the.dump" $TMPFILE || return 1
205 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
206 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
207 ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
208 check_response "osd_enable_op_tracker = 'false'"
209 ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
210 check_response "osd_enable_op_tracker = 'true'"
211 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
212 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
213 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
214 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
215
216 ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
217 check_response "mon_lease = '6' (not observed, change may require restart)"
218
219 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
220 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1
221}
222
223function test_mon_injectargs_SI()
224{
225 # Test SI units during injectargs and 'config set'
226 # We only aim at testing the units are parsed accordingly
227 # and don't intend to test whether the options being set
228 # actually expect SI units to be passed.
229 # Keep in mind that all integer based options (i.e., INT,
230 # LONG, U32, U64) will accept SI unit modifiers.
231 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
232 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
233 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
234 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
235 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
236 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
237 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
238 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
239 check_response "'10F': (22) Invalid argument"
240 # now test with injectargs
241 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
242 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
243 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
244 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
245 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
246 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
247 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
248 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
249 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
250}
251
252function test_tiering_agent()
253{
254 local slow=slow_eviction
255 local fast=fast_eviction
256 ceph osd pool create $slow 1 1
257 ceph osd pool create $fast 1 1
258 ceph osd tier add $slow $fast
259 ceph osd tier cache-mode $fast writeback
260 ceph osd tier set-overlay $slow $fast
261 ceph osd pool set $fast hit_set_type bloom
262 rados -p $slow put obj1 /etc/group
263 ceph osd pool set $fast target_max_objects 1
264 ceph osd pool set $fast hit_set_count 1
265 ceph osd pool set $fast hit_set_period 5
266 # wait for the object to be evicted from the cache
267 local evicted
268 evicted=false
269 for i in `seq 1 300` ; do
270 if ! rados -p $fast ls | grep obj1 ; then
271 evicted=true
272 break
273 fi
274 sleep 1
275 done
276 $evicted # assert
277 # the object is proxy read and promoted to the cache
278 rados -p $slow get obj1 - >/dev/null
279 # wait for the promoted object to be evicted again
280 evicted=false
281 for i in `seq 1 300` ; do
282 if ! rados -p $fast ls | grep obj1 ; then
283 evicted=true
284 break
285 fi
286 sleep 1
287 done
288 $evicted # assert
289 ceph osd tier remove-overlay $slow
290 ceph osd tier remove $slow $fast
291 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
292 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
293}
294
295function test_tiering()
296{
297 # tiering
298 ceph osd pool create slow 2
299 ceph osd pool create slow2 2
300 ceph osd pool create cache 2
301 ceph osd pool create cache2 2
302 ceph osd tier add slow cache
303 ceph osd tier add slow cache2
304 expect_false ceph osd tier add slow2 cache
305 # test some state transitions
306 ceph osd tier cache-mode cache writeback
307 expect_false ceph osd tier cache-mode cache forward
308 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
309 expect_false ceph osd tier cache-mode cache readonly
310 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
311 expect_false ceph osd tier cache-mode cache forward
312 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
313 ceph osd tier cache-mode cache none
314 ceph osd tier cache-mode cache writeback
315 ceph osd tier cache-mode cache proxy
316 ceph osd tier cache-mode cache writeback
317 expect_false ceph osd tier cache-mode cache none
318 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
319 # test with dirty objects in the tier pool
320 # tier pool currently set to 'writeback'
321 rados -p cache put /etc/passwd /etc/passwd
322 ceph tell osd.\* flush_pg_stats || true
323 # 1 dirty object in pool 'cache'
324 ceph osd tier cache-mode cache proxy
325 expect_false ceph osd tier cache-mode cache none
326 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
327 ceph osd tier cache-mode cache writeback
328 # remove object from tier pool
329 rados -p cache rm /etc/passwd
330 rados -p cache cache-flush-evict-all
331 ceph tell osd.\* flush_pg_stats || true
332 # no dirty objects in pool 'cache'
333 ceph osd tier cache-mode cache proxy
334 ceph osd tier cache-mode cache none
335 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
336 TRIES=0
337 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
338 do
339 grep 'currently creating pgs' $TMPFILE
340 TRIES=$(( $TRIES + 1 ))
341 test $TRIES -ne 60
342 sleep 3
343 done
344 expect_false ceph osd pool set cache pg_num 4
345 ceph osd tier cache-mode cache none
346 ceph osd tier set-overlay slow cache
347 expect_false ceph osd tier set-overlay slow cache2
348 expect_false ceph osd tier remove slow cache
349 ceph osd tier remove-overlay slow
350 ceph osd tier set-overlay slow cache2
351 ceph osd tier remove-overlay slow
352 ceph osd tier remove slow cache
353 ceph osd tier add slow2 cache
354 expect_false ceph osd tier set-overlay slow cache
355 ceph osd tier set-overlay slow2 cache
356 ceph osd tier remove-overlay slow2
357 ceph osd tier remove slow2 cache
358 ceph osd tier remove slow cache2
359
360 # make sure a non-empty pool fails
361 rados -p cache2 put /etc/passwd /etc/passwd
362 while ! ceph df | grep cache2 | grep ' 1 ' ; do
363 echo waiting for pg stats to flush
364 sleep 2
365 done
366 expect_false ceph osd tier add slow cache2
367 ceph osd tier add slow cache2 --force-nonempty
368 ceph osd tier remove slow cache2
369
370 ceph osd pool ls | grep cache2
371 ceph osd pool ls -f json-pretty | grep cache2
372 ceph osd pool ls detail | grep cache2
373 ceph osd pool ls detail -f json-pretty | grep cache2
374
375 ceph osd pool delete cache cache --yes-i-really-really-mean-it
376 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
377
378 # make sure we can't clobber snapshot state
379 ceph osd pool create snap_base 2
380 ceph osd pool create snap_cache 2
381 ceph osd pool mksnap snap_cache snapname
382 expect_false ceph osd tier add snap_base snap_cache
383 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
384 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
385
386 # make sure we can't create snapshot on tier
387 ceph osd pool create basex 2
388 ceph osd pool create cachex 2
389 ceph osd tier add basex cachex
390 expect_false ceph osd pool mksnap cache snapname
391 ceph osd tier remove basex cachex
392 ceph osd pool delete basex basex --yes-i-really-really-mean-it
393 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
394
395 # make sure we can't create an ec pool tier
396 ceph osd pool create eccache 2 2 erasure
397 expect_false ceph osd set-require-min-compat-client bobtail
398 ceph osd pool create repbase 2
399 expect_false ceph osd tier add repbase eccache
400 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
401 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
402
403 # convenient add-cache command
404 ceph osd pool create cache3 2
405 ceph osd tier add-cache slow cache3 1024000
406 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
407 ceph osd tier remove slow cache3 2> $TMPFILE || true
408 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
409 ceph osd tier remove-overlay slow
410 ceph osd tier remove slow cache3
411 ceph osd pool ls | grep cache3
412 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
413 ! ceph osd pool ls | grep cache3 || exit 1
414
415 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
416 ceph osd pool delete slow slow --yes-i-really-really-mean-it
417
418 # check add-cache whether work
419 ceph osd pool create datapool 2
420 ceph osd pool create cachepool 2
421 ceph osd tier add-cache datapool cachepool 1024000
422 ceph osd tier cache-mode cachepool writeback
423 rados -p datapool put object /etc/passwd
424 rados -p cachepool stat object
425 rados -p cachepool cache-flush object
426 rados -p datapool stat object
427 ceph osd tier remove-overlay datapool
428 ceph osd tier remove datapool cachepool
429 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
430 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
431
432 # protection against pool removal when used as tiers
433 ceph osd pool create datapool 2
434 ceph osd pool create cachepool 2
435 ceph osd tier add-cache datapool cachepool 1024000
436 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
437 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
438 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
439 check_response "EBUSY: pool 'datapool' has tiers cachepool"
440 ceph osd tier remove-overlay datapool
441 ceph osd tier remove datapool cachepool
442 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
443 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
444
445 ## check health check
446 ceph osd set notieragent
447 ceph osd pool create datapool 2
448 ceph osd pool create cache4 2
449 ceph osd tier add-cache datapool cache4 1024000
450 ceph osd tier cache-mode cache4 writeback
451 tmpfile=$(mktemp|grep tmp)
452 dd if=/dev/zero of=$tmpfile bs=4K count=1
453 ceph osd pool set cache4 target_max_objects 200
454 ceph osd pool set cache4 target_max_bytes 1000000
455 rados -p cache4 put foo1 $tmpfile
456 rados -p cache4 put foo2 $tmpfile
457 rm -f $tmpfile
458 ceph tell osd.\* flush_pg_stats || true
459 ceph df | grep datapool | grep ' 2 '
460 ceph osd tier remove-overlay datapool
461 ceph osd tier remove datapool cache4
462 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
463 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
464 ceph osd unset notieragent
465
466
467 # make sure 'tier remove' behaves as we expect
468 # i.e., removing a tier from a pool that's not its base pool only
469 # results in a 'pool foo is now (or already was) not a tier of bar'
470 #
471 ceph osd pool create basepoolA 2
472 ceph osd pool create basepoolB 2
473 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
474 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
475
476 ceph osd pool create cache5 2
477 ceph osd pool create cache6 2
478 ceph osd tier add basepoolA cache5
479 ceph osd tier add basepoolB cache6
480 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
481 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
482 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
483 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
484
485 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
486 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
487 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
488 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
489
490 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
491 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
492
493 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
494 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
495 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
496 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
497}
498
499function test_auth()
500{
501 ceph auth add client.xx mon allow osd "allow *"
502 ceph auth export client.xx >client.xx.keyring
503 ceph auth add client.xx -i client.xx.keyring
504 rm -f client.xx.keyring
505 ceph auth list | grep client.xx
506 ceph auth get client.xx | grep caps | grep mon
507 ceph auth get client.xx | grep caps | grep osd
508 ceph auth get-key client.xx
509 ceph auth print-key client.xx
510 ceph auth print_key client.xx
511 ceph auth caps client.xx osd "allow rw"
512 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
513 ceph auth get client.xx | grep osd | grep "allow rw"
514 ceph auth export | grep client.xx
515 ceph auth export -o authfile
516 ceph auth import -i authfile
517 ceph auth export -o authfile2
518 diff authfile authfile2
519 rm authfile authfile2
520 ceph auth del client.xx
521 expect_false ceph auth get client.xx
522
523 # (almost) interactive mode
524 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
525 ceph auth get client.xx
526 # script mode
527 echo 'auth del client.xx' | ceph
528 expect_false ceph auth get client.xx
529
530 #
531 # get / set auid
532 #
533 local auid=444
534 ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
535 expect_false ceph auth import --in-file TEST-keyring
536 rm TEST-keyring
537 ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
538 ceph auth import --in-file TEST-keyring
539 rm TEST-keyring
540 ceph auth get client.TEST > $TMPFILE
541 check_response "auid = $auid"
542 ceph --format json-pretty auth get client.TEST > $TMPFILE
543 check_response '"auid": '$auid
544 ceph auth list > $TMPFILE
545 check_response "auid: $auid"
546 ceph --format json-pretty auth list > $TMPFILE
547 check_response '"auid": '$auid
548 ceph auth del client.TEST
549}
550
551function test_auth_profiles()
552{
553 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
554 mgr 'allow profile read-only'
555 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
556 mgr 'allow profile read-write'
557 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
558
559 ceph auth export > client.xx.keyring
560
561 # read-only is allowed all read-only commands (auth excluded)
562 ceph -n client.xx-profile-ro -k client.xx.keyring status
563 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
564 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
565 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
566 ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
567 # read-only gets access denied for rw commands or auth commands
568 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
569 check_response "EACCES: access denied"
570 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
571 check_response "EACCES: access denied"
572 ceph -n client.xx-profile-ro -k client.xx.keyring auth list >& $TMPFILE || true
573 check_response "EACCES: access denied"
574
575 # read-write is allowed for all read-write commands (except auth)
576 ceph -n client.xx-profile-rw -k client.xx.keyring status
577 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
578 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
579 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
580 ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
581 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
582 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
583 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
584 # read-write gets access denied for auth commands
585 ceph -n client.xx-profile-rw -k client.xx.keyring auth list >& $TMPFILE || true
586 check_response "EACCES: access denied"
587
588 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
589 ceph -n client.xx-profile-rd -k client.xx.keyring auth list
590 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
591 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
592 ceph -n client.xx-profile-rd -k client.xx.keyring status
593 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
594 check_response "EACCES: access denied"
595 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
596 check_response "EACCES: access denied"
597 # read-only 'mon' subsystem commands are allowed
598 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
599 # but read-write 'mon' commands are not
600 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
601 check_response "EACCES: access denied"
602 ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
603 check_response "EACCES: access denied"
604 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
605 check_response "EACCES: access denied"
606 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
607 check_response "EACCES: access denied"
608
609 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
610 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
611
612 # add a new role-definer with the existing role-definer
613 ceph -n client.xx-profile-rd -k client.xx.keyring \
614 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
615 ceph -n client.xx-profile-rd -k client.xx.keyring \
616 auth export > client.xx.keyring.2
617 # remove old role-definer using the new role-definer
618 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
619 auth del client.xx-profile-rd
620 # remove the remaining role-definer with admin
621 ceph auth del client.xx-profile-rd2
622 rm -f client.xx.keyring client.xx.keyring.2
623}
624
625function test_mon_caps()
626{
627 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
628 chmod +r $TEMP_DIR/ceph.client.bug.keyring
629 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
630 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
631
632 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
633 check_response "Permission denied"
634
635 rm -rf $TEMP_DIR/ceph.client.bug.keyring
636 ceph auth del client.bug
637 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
638 chmod +r $TEMP_DIR/ceph.client.bug.keyring
639 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
640 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
641 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
642 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
643 check_response "Permission denied"
644}
645
646function test_mon_misc()
647{
648 # with and without verbosity
649 ceph osd dump | grep '^epoch'
650 ceph --concise osd dump | grep '^epoch'
651
652 # df
653 ceph df > $TMPFILE
654 grep GLOBAL $TMPFILE
655 grep -v DIRTY $TMPFILE
656 ceph df detail > $TMPFILE
657 grep DIRTY $TMPFILE
658 ceph df --format json > $TMPFILE
659 grep 'total_bytes' $TMPFILE
660 grep -v 'dirty' $TMPFILE
661 ceph df detail --format json > $TMPFILE
662 grep 'rd_bytes' $TMPFILE
663 grep 'dirty' $TMPFILE
664 ceph df --format xml | grep '<total_bytes>'
665 ceph df detail --format xml | grep '<rd_bytes>'
666
667 ceph fsid
668 ceph health
669 ceph health detail
670 ceph health --format json-pretty
671 ceph health detail --format xml-pretty
672
673 ceph node ls
674 for t in mon osd mds ; do
675 ceph node ls $t
676 done
677
678 ceph_watch_start
679 mymsg="this is a test log message $$.$(date)"
680 ceph log "$mymsg"
681 ceph_watch_wait "$mymsg"
682
683 ceph mon metadata a
684 ceph mon metadata
685 ceph node ls
686}
687
688function check_mds_active()
689{
690 fs_name=$1
691 ceph fs get $fs_name | grep active
692}
693
694function wait_mds_active()
695{
696 fs_name=$1
697 max_run=300
698 for i in $(seq 1 $max_run) ; do
699 if ! check_mds_active $fs_name ; then
700 echo "waiting for an active MDS daemon ($i/$max_run)"
701 sleep 5
702 else
703 break
704 fi
705 done
706 check_mds_active $fs_name
707}
708
709function get_mds_gids()
710{
711 fs_name=$1
712 ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
713}
714
715function fail_all_mds()
716{
717 fs_name=$1
718 ceph fs set $fs_name cluster_down true
719 mds_gids=$(get_mds_gids $fs_name)
720 for mds_gid in $mds_gids ; do
721 ceph mds fail $mds_gid
722 done
723 if check_mds_active $fs_name ; then
724 echo "An active MDS remains, something went wrong"
725 ceph fs get $fs_name
726 exit -1
727 fi
728
729}
730
731function remove_all_fs()
732{
733 existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
734 for fs_name in $existing_fs ; do
735 echo "Removing fs ${fs_name}..."
736 fail_all_mds $fs_name
737 echo "Removing existing filesystem '${fs_name}'..."
738 ceph fs rm $fs_name --yes-i-really-mean-it
739 echo "Removed '${fs_name}'."
740 done
741}
742
743# So that tests requiring MDS can skip if one is not configured
744# in the cluster at all
745function mds_exists()
746{
747 ceph auth list | grep "^mds"
748}
749
750# some of the commands are just not idempotent.
751function without_test_dup_command()
752{
753 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
754 $@
755 else
756 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
757 unset CEPH_CLI_TEST_DUP_COMMAND
758 $@
759 CEPH_CLI_TEST_DUP_COMMAND=saved
760 fi
761}
762
763function test_mds_tell()
764{
765 FS_NAME=cephfs
766 if ! mds_exists ; then
767 echo "Skipping test, no MDS found"
768 return
769 fi
770
771 remove_all_fs
772 ceph osd pool create fs_data 10
773 ceph osd pool create fs_metadata 10
774 ceph fs new $FS_NAME fs_metadata fs_data
775 wait_mds_active $FS_NAME
776
777 # Test injectargs by GID
778 old_mds_gids=$(get_mds_gids $FS_NAME)
779 echo Old GIDs: $old_mds_gids
780
781 for mds_gid in $old_mds_gids ; do
782 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
783 done
784 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
785
786 # Test respawn by rank
787 without_test_dup_command ceph tell mds.0 respawn
788 new_mds_gids=$old_mds_gids
789 while [ $new_mds_gids -eq $old_mds_gids ] ; do
790 sleep 5
791 new_mds_gids=$(get_mds_gids $FS_NAME)
792 done
793 echo New GIDs: $new_mds_gids
794
795 # Test respawn by ID
796 without_test_dup_command ceph tell mds.a respawn
797 new_mds_gids=$old_mds_gids
798 while [ $new_mds_gids -eq $old_mds_gids ] ; do
799 sleep 5
800 new_mds_gids=$(get_mds_gids $FS_NAME)
801 done
802 echo New GIDs: $new_mds_gids
803
804 remove_all_fs
805 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
806 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
807}
808
809function test_mon_mds()
810{
811 FS_NAME=cephfs
812 remove_all_fs
813
814 ceph osd pool create fs_data 10
815 ceph osd pool create fs_metadata 10
816 ceph fs new $FS_NAME fs_metadata fs_data
817
818 ceph fs set $FS_NAME cluster_down true
819 ceph fs set $FS_NAME cluster_down false
820
821 # Legacy commands, act on default fs
822 ceph mds cluster_down
823 ceph mds cluster_up
824
825 ceph mds compat rm_incompat 4
826 ceph mds compat rm_incompat 4
827
828 # We don't want any MDSs to be up, their activity can interfere with
829 # the "current_epoch + 1" checking below if they're generating updates
830 fail_all_mds $FS_NAME
831
832 ceph mds compat show
833 expect_false ceph mds deactivate 2
834 ceph mds dump
835 ceph fs dump
836 ceph fs get $FS_NAME
837 for mds_gid in $(get_mds_gids $FS_NAME) ; do
838 ceph mds metadata $mds_id
839 done
840 ceph mds metadata
841
842 # XXX mds fail, but how do you undo it?
843 mdsmapfile=$TEMP_DIR/mdsmap.$$
844 current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
845 [ -s $mdsmapfile ]
846 rm $mdsmapfile
847
848 ceph osd pool create data2 10
849 ceph osd pool create data3 10
850 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
851 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
852 ceph mds add_data_pool $data2_pool
853 ceph mds add_data_pool $data3_pool
854 ceph mds add_data_pool 100 >& $TMPFILE || true
855 check_response "Error ENOENT"
856 ceph mds add_data_pool foobarbaz >& $TMPFILE || true
857 check_response "Error ENOENT"
858 ceph mds remove_data_pool $data2_pool
859 ceph mds remove_data_pool $data3_pool
860 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
861 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
862 expect_false ceph mds set_max_mds 4
863 ceph mds set allow_multimds true --yes-i-really-mean-it
864 ceph mds set_max_mds 4
865 ceph mds set_max_mds 3
866 ceph mds set_max_mds 256
867 expect_false ceph mds set_max_mds 257
868 ceph mds set max_mds 4
869 ceph mds set max_mds 256
870 expect_false ceph mds set max_mds 257
871 expect_false ceph mds set max_mds asdf
872 expect_false ceph mds set inline_data true
873 ceph mds set inline_data true --yes-i-really-mean-it
874 ceph mds set inline_data yes --yes-i-really-mean-it
875 ceph mds set inline_data 1 --yes-i-really-mean-it
876 expect_false ceph mds set inline_data --yes-i-really-mean-it
877 ceph mds set inline_data false
878 ceph mds set inline_data no
879 ceph mds set inline_data 0
880 expect_false ceph mds set inline_data asdf
881 ceph mds set max_file_size 1048576
882 expect_false ceph mds set max_file_size 123asdf
883
884 expect_false ceph mds set allow_new_snaps
885 expect_false ceph mds set allow_new_snaps true
886 ceph mds set allow_new_snaps true --yes-i-really-mean-it
887 ceph mds set allow_new_snaps 0
888 ceph mds set allow_new_snaps false
889 ceph mds set allow_new_snaps no
890 expect_false ceph mds set allow_new_snaps taco
891
892 # we should never be able to add EC pools as data or metadata pools
893 # create an ec-pool...
894 ceph osd pool create mds-ec-pool 10 10 erasure
895 set +e
896 ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
897 check_response 'erasure-code' $? 22
898 set -e
899 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
900 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
901 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
902
903 fail_all_mds $FS_NAME
904
905 set +e
906 # Check that rmfailed requires confirmation
907 expect_false ceph mds rmfailed 0
908 ceph mds rmfailed 0 --yes-i-really-mean-it
909 set -e
910
911 # Check that `newfs` is no longer permitted
912 expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
913
914 # Check that 'fs reset' runs
915 ceph fs reset $FS_NAME --yes-i-really-mean-it
916
917 # Check that creating a second FS fails by default
918 ceph osd pool create fs_metadata2 10
919 ceph osd pool create fs_data2 10
920 set +e
921 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
922 set -e
923
924 # Check that setting enable_multiple enables creation of second fs
925 ceph fs flag set enable_multiple true --yes-i-really-mean-it
926 ceph fs new cephfs2 fs_metadata2 fs_data2
927
928 # Clean up multi-fs stuff
929 fail_all_mds cephfs2
930 ceph fs rm cephfs2 --yes-i-really-mean-it
931 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
932 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
933
934 fail_all_mds $FS_NAME
935
936 # Clean up to enable subsequent fs new tests
937 ceph fs rm $FS_NAME --yes-i-really-mean-it
938
939 set +e
940 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
941 check_response 'erasure-code' $? 22
942 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
943 check_response 'erasure-code' $? 22
944 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
945 check_response 'erasure-code' $? 22
946 set -e
947
948 # ... new create a cache tier in front of the EC pool...
949 ceph osd pool create mds-tier 2
950 ceph osd tier add mds-ec-pool mds-tier
951 ceph osd tier set-overlay mds-ec-pool mds-tier
952 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
953
954 # Use of a readonly tier should be forbidden
955 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
956 set +e
957 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
958 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
959 set -e
960
961 # Use of a writeback tier should enable FS creation
962 ceph osd tier cache-mode mds-tier writeback
963 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
964
965 # While a FS exists using the tiered pools, I should not be allowed
966 # to remove the tier
967 set +e
968 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
969 check_response 'in use by CephFS' $? 16
970 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
971 check_response 'in use by CephFS' $? 16
972 set -e
973
974 fail_all_mds $FS_NAME
975 ceph fs rm $FS_NAME --yes-i-really-mean-it
976
977 # ... but we should be forbidden from using the cache pool in the FS directly.
978 set +e
979 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
980 check_response 'in use as a cache tier' $? 22
981 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
982 check_response 'in use as a cache tier' $? 22
983 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
984 check_response 'in use as a cache tier' $? 22
985 set -e
986
987 # Clean up tier + EC pools
988 ceph osd tier remove-overlay mds-ec-pool
989 ceph osd tier remove mds-ec-pool mds-tier
990
991 # Create a FS using the 'cache' pool now that it's no longer a tier
992 ceph fs new $FS_NAME fs_metadata mds-tier --force
993
994 # We should be forbidden from using this pool as a tier now that
995 # it's in use for CephFS
996 set +e
997 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
998 check_response 'in use by CephFS' $? 16
999 set -e
1000
1001 fail_all_mds $FS_NAME
1002 ceph fs rm $FS_NAME --yes-i-really-mean-it
1003
1004 # We should be permitted to use an EC pool with overwrites enabled
1005 # as the data pool...
1006 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1007 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1008 fail_all_mds $FS_NAME
1009 ceph fs rm $FS_NAME --yes-i-really-mean-it
1010
1011 # ...but not as the metadata pool
1012 set +e
1013 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1014 check_response 'erasure-code' $? 22
1015 set -e
1016
1017 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1018
1019 # Create a FS and check that we can subsequently add a cache tier to it
1020 ceph fs new $FS_NAME fs_metadata fs_data --force
1021
1022 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1023 ceph osd tier add fs_metadata mds-tier
1024 ceph osd tier cache-mode mds-tier writeback
1025 ceph osd tier set-overlay fs_metadata mds-tier
1026
1027 # Removing tier should be permitted because the underlying pool is
1028 # replicated (#11504 case)
1029 ceph osd tier cache-mode mds-tier proxy
1030 ceph osd tier remove-overlay fs_metadata
1031 ceph osd tier remove fs_metadata mds-tier
1032 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1033
1034 # Clean up FS
1035 fail_all_mds $FS_NAME
1036 ceph fs rm $FS_NAME --yes-i-really-mean-it
1037
1038
1039
1040 ceph mds stat
1041 # ceph mds tell mds.a getmap
1042 # ceph mds rm
1043 # ceph mds rmfailed
1044 # ceph mds set_state
1045 # ceph mds stop
1046
1047 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1048 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1049}
1050
1051function test_mon_mds_metadata()
1052{
1053 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1054 test "$nmons" -gt 0
1055
1056 ceph mds dump |
1057 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1058 while read gid id rank; do
1059 ceph mds metadata ${gid} | grep '"hostname":'
1060 ceph mds metadata ${id} | grep '"hostname":'
1061 ceph mds metadata ${rank} | grep '"hostname":'
1062
1063 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1064 test "$n" -eq "$nmons"
1065 done
1066
1067 expect_false ceph mds metadata UNKNOWN
1068}
1069
1070function test_mon_mon()
1071{
1072 # print help message
1073 ceph --help mon
1074 # no mon add/remove
1075 ceph mon dump
1076 ceph mon getmap -o $TEMP_DIR/monmap.$$
1077 [ -s $TEMP_DIR/monmap.$$ ]
1078 # ceph mon tell
1079 ceph mon_status
1080
1081 # test mon features
1082 ceph mon feature list
1083 ceph mon feature set kraken --yes-i-really-mean-it
1084 expect_false ceph mon feature set abcd
1085 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1086}
1087
1088function test_mon_osd()
1089{
1090 #
1091 # osd blacklist
1092 #
1093 bl=192.168.0.1:0/1000
1094 ceph osd blacklist add $bl
1095 ceph osd blacklist ls | grep $bl
1096 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1097 ceph osd dump --format=json-pretty | grep $bl
1098 ceph osd dump | grep "^blacklist $bl"
1099 ceph osd blacklist rm $bl
1100 ceph osd blacklist ls | expect_false grep $bl
1101
1102 bl=192.168.0.1
1103 # test without nonce, invalid nonce
1104 ceph osd blacklist add $bl
1105 ceph osd blacklist ls | grep $bl
1106 ceph osd blacklist rm $bl
1107 ceph osd blacklist ls | expect_false grep $expect_false bl
1108 expect_false "ceph osd blacklist $bl/-1"
1109 expect_false "ceph osd blacklist $bl/foo"
1110
1111 # test with wrong address
1112 expect_false "ceph osd blacklist 1234.56.78.90/100"
1113
1114 # Test `clear`
1115 ceph osd blacklist add $bl
1116 ceph osd blacklist ls | grep $bl
1117 ceph osd blacklist clear
1118 ceph osd blacklist ls | expect_false grep $bl
1119
1120 #
1121 # osd crush
1122 #
1123 ceph osd crush reweight-all
1124 ceph osd crush tunables legacy
1125 ceph osd crush show-tunables | grep argonaut
1126 ceph osd crush tunables bobtail
1127 ceph osd crush show-tunables | grep bobtail
1128 ceph osd crush tunables firefly
1129 ceph osd crush show-tunables | grep firefly
1130
1131 ceph osd crush set-tunable straw_calc_version 0
1132 ceph osd crush get-tunable straw_calc_version | grep 0
1133 ceph osd crush set-tunable straw_calc_version 1
1134 ceph osd crush get-tunable straw_calc_version | grep 1
1135
1136 #
1137 # require-min-compat-client
1138 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1139 ceph osd set-require-min-compat-client luminous
1140 ceph osd dump | grep 'require_min_compat_client luminous'
1141
1142 #
1143 # osd scrub
1144 #
1145 # how do I tell when these are done?
1146 ceph osd scrub 0
1147 ceph osd deep-scrub 0
1148 ceph osd repair 0
1149
1150 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1151 do
1152 ceph osd set $f
1153 ceph osd unset $f
1154 done
1155 expect_false ceph osd unset sortbitwise # cannot be unset
1156 expect_false ceph osd set bogus
1157 expect_false ceph osd unset bogus
1158 ceph osd set require_jewel_osds
1159 expect_false ceph osd unset require_jewel_osds
1160 ceph osd set require_kraken_osds
1161 expect_false ceph osd unset require_kraken_osds
1162
1163 ceph osd set noup
1164 ceph osd down 0
1165 ceph osd dump | grep 'osd.0 down'
1166 ceph osd unset noup
1167 max_run=1000
1168 for ((i=0; i < $max_run; i++)); do
1169 if ! ceph osd dump | grep 'osd.0 up'; then
1170 echo "waiting for osd.0 to come back up ($i/$max_run)"
1171 sleep 1
1172 else
1173 break
1174 fi
1175 done
1176 ceph osd dump | grep 'osd.0 up'
1177
1178 ceph osd dump | grep 'osd.0 up'
1179 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1180 ceph osd find 1
1181 ceph osd find osd.1
1182 expect_false ceph osd find osd.xyz
1183 expect_false ceph osd find xyz
1184 expect_false ceph osd find 0.1
1185 ceph --format plain osd find 1 # falls back to json-pretty
1186 if [ `uname` == Linux ]; then
1187 ceph osd metadata 1 | grep 'distro'
1188 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1189 fi
1190 ceph osd out 0
1191 ceph osd dump | grep 'osd.0.*out'
1192 ceph osd in 0
1193 ceph osd dump | grep 'osd.0.*in'
1194 ceph osd find 0
1195
1196 # make sure mark out preserves weight
1197 ceph osd reweight osd.0 .5
1198 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1199 ceph osd out 0
1200 ceph osd in 0
1201 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1202
1203 f=$TEMP_DIR/map.$$
1204 ceph osd getcrushmap -o $f
1205 [ -s $f ]
1206 ceph osd setcrushmap -i $f
1207 rm $f
1208 ceph osd getmap -o $f
1209 [ -s $f ]
1210 rm $f
1211 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1212 [ "$save" -gt 0 ]
1213 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1214 ceph osd setmaxosd 10
1215 ceph osd getmaxosd | grep 'max_osd = 10'
1216 ceph osd setmaxosd $save
1217 ceph osd getmaxosd | grep "max_osd = $save"
1218
1219 for id in `ceph osd ls` ; do
1220 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1221 done
1222
1223 ceph osd rm 0 2>&1 | grep 'EBUSY'
1224
1225 local old_osds=$(echo $(ceph osd ls))
1226 id=`ceph osd create`
1227 ceph osd find $id
1228 ceph osd lost $id --yes-i-really-mean-it
1229 expect_false ceph osd setmaxosd $id
1230 local new_osds=$(echo $(ceph osd ls))
1231 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1232 ceph osd rm $id
1233 done
1234
1235 uuid=`uuidgen`
1236 id=`ceph osd create $uuid`
1237 id2=`ceph osd create $uuid`
1238 [ "$id" = "$id2" ]
1239 ceph osd rm $id
1240
1241 ceph --help osd
1242
1243 # reset max_osd.
1244 ceph osd setmaxosd $id
1245 ceph osd getmaxosd | grep "max_osd = $save"
1246 local max_osd=$save
1247
1248 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1249 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1250
1251 id=`ceph osd create $uuid $max_osd`
1252 [ "$id" = "$max_osd" ]
1253 ceph osd find $id
1254 max_osd=$((max_osd + 1))
1255 ceph osd getmaxosd | grep "max_osd = $max_osd"
1256
1257 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EINVAL'
1258 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EINVAL'
1259 id2=`ceph osd create $uuid`
1260 [ "$id" = "$id2" ]
1261 id2=`ceph osd create $uuid $id`
1262 [ "$id" = "$id2" ]
1263
1264 uuid=`uuidgen`
1265 local gap_start=$max_osd
1266 id=`ceph osd create $uuid $((gap_start + 100))`
1267 [ "$id" = "$((gap_start + 100))" ]
1268 max_osd=$((id + 1))
1269 ceph osd getmaxosd | grep "max_osd = $max_osd"
1270
1271 ceph osd create $uuid $gap_start 2>&1 | grep 'EINVAL'
1272
1273 #
1274 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1275 # is repeated and consumes two osd id, not just one.
1276 #
1277 local next_osd
1278 if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
1279 next_osd=$((gap_start + 1))
1280 else
1281 next_osd=$gap_start
1282 fi
1283 id=`ceph osd create`
1284 [ "$id" = "$next_osd" ]
1285
1286 next_osd=$((id + 1))
1287 id=`ceph osd create $(uuidgen)`
1288 [ "$id" = "$next_osd" ]
1289
1290 next_osd=$((id + 1))
1291 id=`ceph osd create $(uuidgen) $next_osd`
1292 [ "$id" = "$next_osd" ]
1293
1294 local new_osds=$(echo $(ceph osd ls))
1295 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1296 [ $id -ge $save ]
1297 ceph osd rm $id
1298 done
1299 ceph osd setmaxosd $save
1300
1301 ceph osd ls
1302 ceph osd pool create data 10
1303 ceph osd lspools | grep data
1304 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1305 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1306 ceph osd pool delete data data --yes-i-really-really-mean-it
1307
1308 ceph osd pause
1309 ceph osd dump | grep 'flags.*pauserd,pausewr'
1310 ceph osd unpause
1311
1312 ceph osd tree
1313 ceph osd perf
1314 ceph osd blocked-by
1315
1316 ceph osd stat | grep up,
1317}
1318
1319function test_mon_osd_pool()
1320{
1321 #
1322 # osd pool
1323 #
1324 ceph osd pool create data 10
1325 ceph osd pool mksnap data datasnap
1326 rados -p data lssnap | grep datasnap
1327 ceph osd pool rmsnap data datasnap
1328 expect_false ceph osd pool rmsnap pool_fake snapshot
1329 ceph osd pool delete data data --yes-i-really-really-mean-it
1330
1331 ceph osd pool create data2 10
1332 ceph osd pool rename data2 data3
1333 ceph osd lspools | grep data3
1334 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1335
1336 ceph osd pool create replicated 12 12 replicated
1337 ceph osd pool create replicated 12 12 replicated
1338 ceph osd pool create replicated 12 12 # default is replicated
1339 ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
1340 # should fail because the type is not the same
1341 expect_false ceph osd pool create replicated 12 12 erasure
1342 ceph osd lspools | grep replicated
1343 ceph osd pool create ec_test 1 1 erasure
1344 set +e
1345 ceph osd metadata | grep osd_objectstore_type | grep -qc bluestore
1346 if [ $? -eq 0 ]; then
1347 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1348 check_response $? 22 "pool must only be stored on bluestore for scrubbing to work"
1349 else
1350 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1351 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1352 fi
1353 set -e
1354 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1355 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1356}
1357
1358function test_mon_osd_pool_quota()
1359{
1360 #
1361 # test osd pool set/get quota
1362 #
1363
1364 # create tmp pool
1365 ceph osd pool create tmp-quota-pool 36
1366 #
1367 # set erroneous quotas
1368 #
1369 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1370 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1371 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1372 #
1373 # set valid quotas
1374 #
1375 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1376 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1377 #
1378 # get quotas
1379 #
1380 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
1381 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
1382 #
1383 # get quotas in json-pretty format
1384 #
1385 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1386 grep '"quota_max_objects":.*10485760'
1387 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1388 grep '"quota_max_bytes":.*10'
1389 #
1390 # reset pool quotas
1391 #
1392 ceph osd pool set-quota tmp-quota-pool max_bytes 0
1393 ceph osd pool set-quota tmp-quota-pool max_objects 0
1394 #
1395 # test N/A quotas
1396 #
1397 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
1398 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
1399 #
1400 # cleanup tmp pool
1401 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
1402}
1403
1404function test_mon_pg()
1405{
1406 # Make sure we start healthy.
1407 wait_for_health_ok
1408
1409 ceph pg debug unfound_objects_exist
1410 ceph pg debug degraded_pgs_exist
1411 ceph pg deep-scrub 0.0
1412 ceph pg dump
1413 ceph pg dump pgs_brief --format=json
1414 ceph pg dump pgs --format=json
1415 ceph pg dump pools --format=json
1416 ceph pg dump osds --format=json
1417 ceph pg dump sum --format=json
1418 ceph pg dump all --format=json
1419 ceph pg dump pgs_brief osds --format=json
1420 ceph pg dump pools osds pgs_brief --format=json
1421 ceph pg dump_json
1422 ceph pg dump_pools_json
1423 ceph pg dump_stuck inactive
1424 ceph pg dump_stuck unclean
1425 ceph pg dump_stuck stale
1426 ceph pg dump_stuck undersized
1427 ceph pg dump_stuck degraded
1428 ceph pg ls
1429 ceph pg ls 0
1430 ceph pg ls stale
1431 expect_false ceph pg ls scrubq
1432 ceph pg ls active stale repair recovering
1433 ceph pg ls 0 active
1434 ceph pg ls 0 active stale
1435 ceph pg ls-by-primary osd.0
1436 ceph pg ls-by-primary osd.0 0
1437 ceph pg ls-by-primary osd.0 active
1438 ceph pg ls-by-primary osd.0 active stale
1439 ceph pg ls-by-primary osd.0 0 active stale
1440 ceph pg ls-by-osd osd.0
1441 ceph pg ls-by-osd osd.0 0
1442 ceph pg ls-by-osd osd.0 active
1443 ceph pg ls-by-osd osd.0 active stale
1444 ceph pg ls-by-osd osd.0 0 active stale
1445 ceph pg ls-by-pool rbd
1446 ceph pg ls-by-pool rbd active stale
1447 # can't test this...
1448 # ceph pg force_create_pg
1449 ceph pg getmap -o $TEMP_DIR/map.$$
1450 [ -s $TEMP_DIR/map.$$ ]
1451 ceph pg map 0.0 | grep acting
1452 ceph pg repair 0.0
1453 ceph pg scrub 0.0
1454
1455 ceph osd set-full-ratio .962
1456 ceph osd dump | grep '^full_ratio 0.962'
1457 ceph osd set-backfillfull-ratio .912
1458 ceph osd dump | grep '^backfillfull_ratio 0.912'
1459 ceph osd set-nearfull-ratio .892
1460 ceph osd dump | grep '^nearfull_ratio 0.892'
1461
1462 # Check health status
1463 ceph osd set-nearfull-ratio .913
1464 ceph health | grep 'HEALTH_ERR.*Full ratio(s) out of order'
1465 ceph health detail | grep 'backfillfull_ratio (0.912) < nearfull_ratio (0.913), increased'
1466 ceph osd set-nearfull-ratio .892
1467 ceph osd set-backfillfull-ratio .963
1468 ceph health detail | grep 'full_ratio (0.962) < backfillfull_ratio (0.963), increased'
1469 ceph osd set-backfillfull-ratio .912
1470
1471 # Check injected full results
1472 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
1473 wait_for_health "HEALTH_WARN.*1 nearfull osd(s)"
1474 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
1475 wait_for_health "HEALTH_WARN.*1 backfillfull osd(s)"
1476 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
1477 # failsafe and full are the same as far as the monitor is concerned
1478 wait_for_health "HEALTH_ERR.*1 full osd(s)"
1479 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
1480 wait_for_health "HEALTH_ERR.*2 full osd(s)"
1481 ceph health detail | grep "osd.0 is full at.*%"
1482 ceph health detail | grep "osd.2 is full at.*%"
1483 ceph health detail | grep "osd.1 is backfill full at.*%"
1484 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1485 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
1486 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
1487 wait_for_health_ok
1488
1489 ceph pg stat | grep 'pgs:'
1490 ceph pg 0.0 query
1491 ceph tell 0.0 query
1492 ceph quorum enter
1493 ceph quorum_status
1494 ceph report | grep osd_stats
1495 ceph status
1496 ceph -s
1497
1498 #
1499 # tell osd version
1500 #
1501 ceph tell osd.0 version
1502 expect_false ceph tell osd.9999 version
1503 expect_false ceph tell osd.foo version
1504
1505 # back to pg stuff
1506
1507 ceph tell osd.0 dump_pg_recovery_stats | grep Started
1508
1509 ceph osd reweight 0 0.9
1510 expect_false ceph osd reweight 0 -1
1511 ceph osd reweight osd.0 1
1512
1513 ceph osd primary-affinity osd.0 .9
1514 expect_false ceph osd primary-affinity osd.0 -2
1515 expect_false ceph osd primary-affinity osd.9999 .5
1516 ceph osd primary-affinity osd.0 1
1517
1518 ceph osd pg-temp 0.0 0 1 2
1519 ceph osd pg-temp 0.0 osd.1 osd.0 osd.2
1520 expect_false ceph osd pg-temp asdf qwer
1521 expect_false ceph osd pg-temp 0.0 asdf
1522 expect_false ceph osd pg-temp 0.0
1523
1524 # don't test ceph osd primary-temp for now
1525}
1526
1527function test_mon_osd_pool_set()
1528{
1529 TEST_POOL_GETSET=pool_getset
1530 ceph osd pool create $TEST_POOL_GETSET 1
1531 wait_for_clean
1532 ceph osd pool get $TEST_POOL_GETSET all
1533
1534 for s in pg_num pgp_num size min_size crush_rule crush_ruleset; do
1535 ceph osd pool get $TEST_POOL_GETSET $s
1536 done
1537
1538 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
1539 (( new_size = old_size + 1 ))
1540 ceph osd pool set $TEST_POOL_GETSET size $new_size
1541 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
1542 ceph osd pool set $TEST_POOL_GETSET size $old_size
1543
1544 ceph osd pool create pool_erasure 1 1 erasure
1545 wait_for_clean
1546 set +e
1547 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
1548 check_response 'not change the size'
1549 set -e
1550 ceph osd pool get pool_erasure erasure_code_profile
1551
1552 auid=5555
1553 ceph osd pool set $TEST_POOL_GETSET auid $auid
1554 ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
1555 ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
1556 ceph osd pool set $TEST_POOL_GETSET auid 0
1557
1558 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
1559 ceph osd pool set $TEST_POOL_GETSET $flag false
1560 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1561 ceph osd pool set $TEST_POOL_GETSET $flag true
1562 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1563 ceph osd pool set $TEST_POOL_GETSET $flag 1
1564 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1565 ceph osd pool set $TEST_POOL_GETSET $flag 0
1566 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1567 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
1568 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
1569 done
1570
1571 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1572 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
1573 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
1574 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
1575 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1576
1577 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1578 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
1579 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
1580 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
1581 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1582
1583 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1584 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
1585 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
1586 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
1587 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1588
1589 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1590 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
1591 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
1592 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
1593 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1594
1595 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1596 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
1597 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
1598 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
1599 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1600
1601 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1602 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
1603 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
1604 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
1605 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1606
1607 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
1608 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
1609 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1610 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
1611 ceph osd pool set $TEST_POOL_GETSET pg_num 10
1612 wait_for_clean
1613 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1614
1615 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
1616 new_pgs=$(($old_pgs+$(ceph osd stat | grep osdmap | awk '{print $3}')*32))
1617 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1618 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
1619 wait_for_clean
1620 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
1621 new_pgs=$(($old_pgs+$(ceph osd stat | grep osdmap | awk '{print $3}')*32+1))
1622 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1623
1624 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
1625 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
1626 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
1627 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
1628 ceph osd pool set $TEST_POOL_GETSET size 2
1629 wait_for_clean
1630 ceph osd pool set $TEST_POOL_GETSET min_size 2
1631
1632 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
1633 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
1634
1635 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
1636 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
1637
1638 ceph osd pool set $TEST_POOL_GETSET nodelete 1
1639 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1640 ceph osd pool set $TEST_POOL_GETSET nodelete 0
1641 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1642
1643 ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
1644 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
1645}
1646
1647function test_mon_osd_tiered_pool_set()
1648{
1649 # this is really a tier pool
1650 ceph osd pool create real-tier 2
1651 ceph osd tier add rbd real-tier
1652
1653 ceph osd pool set real-tier hit_set_type explicit_hash
1654 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
1655 ceph osd pool set real-tier hit_set_type explicit_object
1656 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
1657 ceph osd pool set real-tier hit_set_type bloom
1658 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
1659 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
1660 ceph osd pool set real-tier hit_set_period 123
1661 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
1662 ceph osd pool set real-tier hit_set_count 12
1663 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
1664 ceph osd pool set real-tier hit_set_fpp .01
1665 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
1666
1667 ceph osd pool set real-tier target_max_objects 123
1668 ceph osd pool get real-tier target_max_objects | \
1669 grep 'target_max_objects:[ \t]\+123'
1670 ceph osd pool set real-tier target_max_bytes 123456
1671 ceph osd pool get real-tier target_max_bytes | \
1672 grep 'target_max_bytes:[ \t]\+123456'
1673 ceph osd pool set real-tier cache_target_dirty_ratio .123
1674 ceph osd pool get real-tier cache_target_dirty_ratio | \
1675 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
1676 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
1677 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
1678 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
1679 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
1680 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
1681 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
1682 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
1683 ceph osd pool set real-tier cache_target_full_ratio .123
1684 ceph osd pool get real-tier cache_target_full_ratio | \
1685 grep 'cache_target_full_ratio:[ \t]\+0.123'
1686 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
1687 ceph osd pool set real-tier cache_target_full_ratio 1.0
1688 ceph osd pool set real-tier cache_target_full_ratio 0
1689 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
1690 ceph osd pool set real-tier cache_min_flush_age 123
1691 ceph osd pool get real-tier cache_min_flush_age | \
1692 grep 'cache_min_flush_age:[ \t]\+123'
1693 ceph osd pool set real-tier cache_min_evict_age 234
1694 ceph osd pool get real-tier cache_min_evict_age | \
1695 grep 'cache_min_evict_age:[ \t]\+234'
1696
1697 # this is not a tier pool
1698 ceph osd pool create fake-tier 2
1699 wait_for_clean
1700
1701 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
1702 expect_false ceph osd pool get fake-tier hit_set_type
1703 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
1704 expect_false ceph osd pool get fake-tier hit_set_type
1705 expect_false ceph osd pool set fake-tier hit_set_type bloom
1706 expect_false ceph osd pool get fake-tier hit_set_type
1707 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
1708 expect_false ceph osd pool set fake-tier hit_set_period 123
1709 expect_false ceph osd pool get fake-tier hit_set_period
1710 expect_false ceph osd pool set fake-tier hit_set_count 12
1711 expect_false ceph osd pool get fake-tier hit_set_count
1712 expect_false ceph osd pool set fake-tier hit_set_fpp .01
1713 expect_false ceph osd pool get fake-tier hit_set_fpp
1714
1715 expect_false ceph osd pool set fake-tier target_max_objects 123
1716 expect_false ceph osd pool get fake-tier target_max_objects
1717 expect_false ceph osd pool set fake-tier target_max_bytes 123456
1718 expect_false ceph osd pool get fake-tier target_max_bytes
1719 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
1720 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
1721 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
1722 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
1723 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
1724 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
1725 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
1726 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
1727 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
1728 expect_false ceph osd pool get fake-tier cache_target_full_ratio
1729 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
1730 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
1731 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
1732 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
1733 expect_false ceph osd pool get fake-tier cache_min_flush_age
1734 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
1735 expect_false ceph osd pool get fake-tier cache_min_evict_age
1736
1737 ceph osd tier remove rbd real-tier
1738 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
1739 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
1740}
1741
1742function test_mon_osd_erasure_code()
1743{
1744
1745 ceph osd erasure-code-profile set fooprofile a=b c=d
1746 ceph osd erasure-code-profile set fooprofile a=b c=d
1747 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
1748 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
1749 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
1750 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
1751 #
1752 # cleanup by removing profile 'fooprofile'
1753 ceph osd erasure-code-profile rm fooprofile
1754}
1755
1756function test_mon_osd_misc()
1757{
1758 set +e
1759
1760 # expect error about missing 'pool' argument
1761 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
1762
1763 # expect error about unused argument foo
1764 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
1765
1766 # expect "not in range" for invalid full ratio
1767 ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
1768
1769 # expect "not in range" for invalid overload percentage
1770 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
1771
1772 set -e
1773
1774 ceph osd reweight-by-utilization 110
1775 ceph osd reweight-by-utilization 110 .5
1776 expect_false ceph osd reweight-by-utilization 110 0
1777 expect_false ceph osd reweight-by-utilization 110 -0.1
1778 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
1779 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
1780 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
1781 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
1782 ceph osd reweight-by-pg 110
1783 ceph osd test-reweight-by-pg 110 .5
1784 ceph osd reweight-by-pg 110 rbd
1785 ceph osd reweight-by-pg 110 .5 rbd
1786 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
1787}
1788
1789function test_mon_heap_profiler()
1790{
1791 do_test=1
1792 set +e
1793 # expect 'heap' commands to be correctly parsed
1794 ceph heap stats 2>$TMPFILE
1795 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
1796 echo "tcmalloc not enabled; skip heap profiler test"
1797 do_test=0
1798 fi
1799 set -e
1800
1801 [[ $do_test -eq 0 ]] && return 0
1802
1803 ceph heap start_profiler
1804 ceph heap dump
1805 ceph heap stop_profiler
1806 ceph heap release
1807}
1808
1809function test_admin_heap_profiler()
1810{
1811 do_test=1
1812 set +e
1813 # expect 'heap' commands to be correctly parsed
1814 ceph heap stats 2>$TMPFILE
1815 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
1816 echo "tcmalloc not enabled; skip heap profiler test"
1817 do_test=0
1818 fi
1819 set -e
1820
1821 [[ $do_test -eq 0 ]] && return 0
1822
1823 local admin_socket=$(get_admin_socket osd.0)
1824
1825 $SUDO ceph --admin-daemon $admin_socket heap start_profiler
1826 $SUDO ceph --admin-daemon $admin_socket heap dump
1827 $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
1828 $SUDO ceph --admin-daemon $admin_socket heap release
1829}
1830
1831function test_osd_bench()
1832{
1833 # test osd bench limits
1834 # As we should not rely on defaults (as they may change over time),
1835 # lets inject some values and perform some simple tests
1836 # max iops: 10 # 100 IOPS
1837 # max throughput: 10485760 # 10MB/s
1838 # max block size: 2097152 # 2MB
1839 # duration: 10 # 10 seconds
1840
1841 local args="\
1842 --osd-bench-duration 10 \
1843 --osd-bench-max-block-size 2097152 \
1844 --osd-bench-large-size-max-throughput 10485760 \
1845 --osd-bench-small-size-max-iops 10"
1846 ceph tell osd.0 injectargs ${args## }
1847
1848 # anything with a bs larger than 2097152 must fail
1849 expect_false ceph tell osd.0 bench 1 2097153
1850 # but using 'osd_bench_max_bs' must succeed
1851 ceph tell osd.0 bench 1 2097152
1852
1853 # we assume 1MB as a large bs; anything lower is a small bs
1854 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
1855 # max count: 409600 (bytes)
1856
1857 # more than max count must not be allowed
1858 expect_false ceph tell osd.0 bench 409601 4096
1859 # but 409600 must be succeed
1860 ceph tell osd.0 bench 409600 4096
1861
1862 # for a large bs, we are limited by throughput.
1863 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
1864 # the max count will be (10MB * 10s) = 100MB
1865 # max count: 104857600 (bytes)
1866
1867 # more than max count must not be allowed
1868 expect_false ceph tell osd.0 bench 104857601 2097152
1869 # up to max count must be allowed
1870 ceph tell osd.0 bench 104857600 2097152
1871}
1872
1873function test_osd_negative_filestore_merge_threshold()
1874{
1875 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
1876 expect_config_value "osd.0" "filestore_merge_threshold" -1
1877}
1878
1879function test_mon_tell()
1880{
1881 ceph tell mon.a version
1882 ceph tell mon.b version
1883 expect_false ceph tell mon.foo version
1884
1885 sleep 1
1886
1887 ceph_watch_start debug
1888 ceph tell mon.a version
1889 ceph_watch_wait 'mon.0 \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
1890
1891 ceph_watch_start debug
1892 ceph tell mon.b version
1893 ceph_watch_wait 'mon.1 \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
1894}
1895
1896function test_mon_crushmap_validation()
1897{
1898 local map=$TEMP_DIR/map
1899 ceph osd getcrushmap -o $map
1900
1901 local crushtool_path="${TEMP_DIR}/crushtool"
1902 touch "${crushtool_path}"
1903 chmod +x "${crushtool_path}"
1904 local crushtool_path_old=`ceph-conf --show-config-value crushtool`
1905 ceph tell mon.\* injectargs --crushtool "${crushtool_path}"
1906
1907 printf "%s\n" \
1908 "#!/bin/sh
1909 cat > /dev/null
1910 exit 0" > "${crushtool_path}"
1911
1912 ceph osd setcrushmap -i $map
1913
1914 printf "%s\n" \
1915 "#!/bin/sh
1916 cat > /dev/null
1917 exit 1" > "${crushtool_path}"
1918
1919 expect_false ceph osd setcrushmap -i $map
1920
1921 printf "%s\n" \
1922 "#!/bin/sh
1923 cat > /dev/null
1924 echo 'TEST FAIL' >&2
1925 exit 1" > "${crushtool_path}"
1926
1927 expect_false ceph osd setcrushmap -i $map 2> $TMPFILE
1928 check_response "Error EINVAL: Failed crushmap test: TEST FAIL"
1929
1930 local mon_lease=`ceph-conf --show-config-value mon_lease`
1931
1932 test "${mon_lease}" -gt 0
1933
1934 printf "%s\n" \
1935 "#!/bin/sh
1936 cat > /dev/null
1937 sleep $((mon_lease - 1))" > "${crushtool_path}"
1938
1939 ceph osd setcrushmap -i $map
1940
1941 printf "%s\n" \
1942 "#!/bin/sh
1943 cat > /dev/null
1944 sleep $((mon_lease + 1))" > "${crushtool_path}"
1945
1946 expect_false ceph osd setcrushmap -i $map 2> $TMPFILE
1947 check_response "Error EINVAL: Failed crushmap test: ${crushtool_path}: timed out (${mon_lease} sec)"
1948
1949 ceph tell mon.\* injectargs --crushtool "${crushtool_path_old}"
1950
1951 rm -f "${crushtool_path}"
1952}
1953
1954function test_mon_ping()
1955{
1956 ceph ping mon.a
1957 ceph ping mon.b
1958 expect_false ceph ping mon.foo
1959
1960 ceph ping mon.\*
1961}
1962
1963function test_mon_deprecated_commands()
1964{
1965 # current DEPRECATED commands are:
1966 # ceph compact
1967 # ceph scrub
1968 # ceph sync force
1969 #
1970 # Testing should be accomplished by setting
1971 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
1972 # each one of these commands.
1973
1974 ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
1975 expect_false ceph tell mon.a compact 2> $TMPFILE
1976 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
1977
1978 expect_false ceph tell mon.a scrub 2> $TMPFILE
1979 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
1980
1981 expect_false ceph tell mon.a sync force 2> $TMPFILE
1982 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
1983
1984 ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
1985}
1986
1987function test_mon_cephdf_commands()
1988{
1989 # ceph df detail:
1990 # pool section:
1991 # RAW USED The near raw used per pool in raw total
1992
1993 ceph osd pool create cephdf_for_test 32 32 replicated
1994 ceph osd pool set cephdf_for_test size 2
1995
1996 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
1997 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
1998
1999 #wait for update
2000 for i in `seq 1 10`; do
2001 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2002 sleep 1
2003 done
2004
2005 cal_raw_used_size=`ceph df detail | grep cephdf_for_test | awk -F ' ' '{printf "%d\n", 2 * $3}'`
2006 raw_used_size=`ceph df detail | grep cephdf_for_test | awk -F ' ' '{print $10}'`
2007
2008 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2009 rm ./cephdf_for_test
2010
2011 expect_false test $cal_raw_used_size != $raw_used_size
2012}
2013
2014#
2015# New tests should be added to the TESTS array below
2016#
2017# Individual tests may be run using the '-t <testname>' argument
2018# The user can specify '-t <testname>' as many times as she wants
2019#
2020# Tests will be run in order presented in the TESTS array, or in
2021# the order specified by the '-t <testname>' options.
2022#
2023# '-l' will list all the available test names
2024# '-h' will show usage
2025#
2026# The test maintains backward compatibility: not specifying arguments
2027# will run all tests following the order they appear in the TESTS array.
2028#
2029
2030set +x
2031MON_TESTS+=" mon_injectargs"
2032MON_TESTS+=" mon_injectargs_SI"
2033MON_TESTS+=" tiering"
2034MON_TESTS+=" auth"
2035MON_TESTS+=" auth_profiles"
2036MON_TESTS+=" mon_misc"
2037MON_TESTS+=" mon_mon"
2038MON_TESTS+=" mon_osd"
2039MON_TESTS+=" mon_osd_pool"
2040MON_TESTS+=" mon_osd_pool_quota"
2041MON_TESTS+=" mon_pg"
2042MON_TESTS+=" mon_osd_pool_set"
2043MON_TESTS+=" mon_osd_tiered_pool_set"
2044MON_TESTS+=" mon_osd_erasure_code"
2045MON_TESTS+=" mon_osd_misc"
2046MON_TESTS+=" mon_heap_profiler"
2047MON_TESTS+=" mon_tell"
2048MON_TESTS+=" mon_crushmap_validation"
2049MON_TESTS+=" mon_ping"
2050MON_TESTS+=" mon_deprecated_commands"
2051MON_TESTS+=" mon_caps"
2052MON_TESTS+=" mon_cephdf_commands"
2053OSD_TESTS+=" osd_bench"
2054OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2055OSD_TESTS+=" tiering_agent"
2056OSD_TESTS+=" admin_heap_profiler"
2057
2058MDS_TESTS+=" mds_tell"
2059MDS_TESTS+=" mon_mds"
2060MDS_TESTS+=" mon_mds_metadata"
2061
2062TESTS+=$MON_TESTS
2063TESTS+=$OSD_TESTS
2064TESTS+=$MDS_TESTS
2065
2066#
2067# "main" follows
2068#
2069
2070function list_tests()
2071{
2072 echo "AVAILABLE TESTS"
2073 for i in $TESTS; do
2074 echo " $i"
2075 done
2076}
2077
2078function usage()
2079{
2080 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2081}
2082
2083tests_to_run=()
2084
2085sanity_check=true
2086
2087while [[ $# -gt 0 ]]; do
2088 opt=$1
2089
2090 case "$opt" in
2091 "-l" )
2092 do_list=1
2093 ;;
2094 "--asok-does-not-need-root" )
2095 SUDO=""
2096 ;;
2097 "--no-sanity-check" )
2098 sanity_check=false
2099 ;;
2100 "--test-mon" )
2101 tests_to_run+="$MON_TESTS"
2102 ;;
2103 "--test-osd" )
2104 tests_to_run+="$OSD_TESTS"
2105 ;;
2106 "--test-mds" )
2107 tests_to_run+="$MDS_TESTS"
2108 ;;
2109 "-t" )
2110 shift
2111 if [[ -z "$1" ]]; then
2112 echo "missing argument to '-t'"
2113 usage ;
2114 exit 1
2115 fi
2116 tests_to_run+=" $1"
2117 ;;
2118 "-h" )
2119 usage ;
2120 exit 0
2121 ;;
2122 esac
2123 shift
2124done
2125
2126if [[ $do_list -eq 1 ]]; then
2127 list_tests ;
2128 exit 0
2129fi
2130
2131if test -z "$tests_to_run" ; then
2132 tests_to_run="$TESTS"
2133fi
2134
2135if $sanity_check ; then
2136 wait_no_osd_down
2137fi
2138for i in $tests_to_run; do
2139 if $sanity_check ; then
2140 check_no_osd_down
2141 fi
2142 set -x
2143 test_${i}
2144 set +x
2145done
2146if $sanity_check ; then
2147 check_no_osd_down
2148fi
2149
2150set -x
2151
2152echo OK