]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/cephtool/test.sh
update sources to v12.1.1
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
CommitLineData
7c673cae 1#!/bin/bash -x
31f18b77
FG
2# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3# vim: ts=8 sw=8 ft=bash smarttab
7c673cae
FG
4
5source $(dirname $0)/../ceph-helpers.sh
6
7set -e
8set -o functrace
9PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
10SUDO=${SUDO:-sudo}
31f18b77 11export CEPH_DEV=1
7c673cae
FG
12
13function get_admin_socket()
14{
15 local client=$1
16
17 if test -n "$CEPH_OUT_DIR";
18 then
19 echo $CEPH_OUT_DIR/$client.asok
20 else
21 local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
23 fi
24}
25
26function check_no_osd_down()
27{
28 ! ceph osd dump | grep ' down '
29}
30
31function wait_no_osd_down()
32{
33 max_run=300
34 for i in $(seq 1 $max_run) ; do
35 if ! check_no_osd_down ; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
37 sleep 1
38 else
39 break
40 fi
41 done
42 check_no_osd_down
43}
44
45function expect_false()
46{
47 set -x
48 if "$@"; then return 1; else return 0; fi
49}
50
51
52TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
53trap "rm -fr $TEMP_DIR" 0
54
55TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
56
57#
58# retry_eagain max cmd args ...
59#
60# retry cmd args ... if it exits on error and its output contains the
61# string EAGAIN, at most $max times
62#
63function retry_eagain()
64{
65 local max=$1
66 shift
67 local status
68 local tmpfile=$TEMP_DIR/retry_eagain.$$
69 local count
70 for count in $(seq 1 $max) ; do
71 status=0
72 "$@" > $tmpfile 2>&1 || status=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN $tmpfile ; then
75 break
76 fi
77 sleep 1
78 done
79 if test $count = $max ; then
80 echo retried with non zero exit status, $max times: "$@" >&2
81 fi
82 cat $tmpfile
83 rm $tmpfile
84 return $status
85}
86
87#
88# map_enxio_to_eagain cmd arg ...
89#
90# add EAGAIN to the output of cmd arg ... if the output contains
91# ENXIO.
92#
93function map_enxio_to_eagain()
94{
95 local status=0
96 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
97
98 "$@" > $tmpfile 2>&1 || status=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO $tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
102 fi
103 cat $tmpfile
104 rm $tmpfile
105 return $status
106}
107
108function check_response()
109{
110 expected_string=$1
111 retcode=$2
112 expected_retcode=$3
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
115 exit 1
116 fi
117
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
120 cat $TMPFILE >&2
121 exit 1
122 fi
123}
124
125function get_config_value_or_die()
126{
127 local target config_opt raw val
128
129 target=$1
130 config_opt=$2
131
132 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $? -ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
135 exit 1
136 fi
137
138 raw=`echo $raw | sed -e 's/[{} "]//g'`
139 val=`echo $raw | cut -f2 -d:`
140
141 echo "$val"
142 return 0
143}
144
145function expect_config_value()
146{
147 local target config_opt expected_val val
148 target=$1
149 config_opt=$2
150 expected_val=$3
151
152 val=$(get_config_value_or_die $target $config_opt)
153
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
156 exit 1
157 fi
158}
159
160function ceph_watch_start()
161{
162 local whatch_opt=--watch
163
164 if [ -n "$1" ]; then
165 whatch_opt=--watch-$1
166 fi
167
168 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
169 ceph $whatch_opt > $CEPH_WATCH_FILE &
170 CEPH_WATCH_PID=$!
171
172 # wait until the "ceph" client is connected and receiving
173 # log messages from monitor
174 for i in `seq 3`; do
175 grep -q "cluster" $CEPH_WATCH_FILE && break
176 sleep 1
177 done
178}
179
180function ceph_watch_wait()
181{
182 local regexp=$1
183 local timeout=30
184
185 if [ -n "$2" ]; then
186 timeout=$2
187 fi
188
189 for i in `seq ${timeout}`; do
190 grep -q "$regexp" $CEPH_WATCH_FILE && break
191 sleep 1
192 done
193
194 kill $CEPH_WATCH_PID
195
196 if ! grep "$regexp" $CEPH_WATCH_FILE; then
197 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
198 cat $CEPH_WATCH_FILE >&2
199 return 1
200 fi
201}
202
203function test_mon_injectargs()
204{
205 CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
206 check_response "osd_enable_op_tracker = 'false'"
207 ! grep "the.dump" $TMPFILE || return 1
208 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
209 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
210 ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
211 check_response "osd_enable_op_tracker = 'false'"
212 ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
213 check_response "osd_enable_op_tracker = 'true'"
214 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
215 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
216 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
217 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
218
224ce89b
WB
219 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' >& $TMPFILE || return 1
220 check_response "osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
221
222 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' >& $TMPFILE || return 1
223 check_response "mon_probe_timeout = '2.000000' (not observed, change may require restart)"
224
7c673cae 225 ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
224ce89b 226 check_response "mon_lease = '6.000000' (not observed, change may require restart)"
7c673cae
FG
227
228 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224ce89b
WB
229 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 >& $TMPFILE || return 1
230 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
7c673cae
FG
231}
232
233function test_mon_injectargs_SI()
234{
235 # Test SI units during injectargs and 'config set'
236 # We only aim at testing the units are parsed accordingly
237 # and don't intend to test whether the options being set
238 # actually expect SI units to be passed.
239 # Keep in mind that all integer based options (i.e., INT,
240 # LONG, U32, U64) will accept SI unit modifiers.
241 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
242 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
243 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
244 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
245 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
246 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
247 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
248 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
249 check_response "'10F': (22) Invalid argument"
250 # now test with injectargs
251 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
252 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
253 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
254 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
255 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
256 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
257 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
258 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
259 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
260}
261
262function test_tiering_agent()
263{
264 local slow=slow_eviction
265 local fast=fast_eviction
266 ceph osd pool create $slow 1 1
267 ceph osd pool create $fast 1 1
268 ceph osd tier add $slow $fast
269 ceph osd tier cache-mode $fast writeback
270 ceph osd tier set-overlay $slow $fast
271 ceph osd pool set $fast hit_set_type bloom
272 rados -p $slow put obj1 /etc/group
273 ceph osd pool set $fast target_max_objects 1
274 ceph osd pool set $fast hit_set_count 1
275 ceph osd pool set $fast hit_set_period 5
276 # wait for the object to be evicted from the cache
277 local evicted
278 evicted=false
279 for i in `seq 1 300` ; do
280 if ! rados -p $fast ls | grep obj1 ; then
281 evicted=true
282 break
283 fi
284 sleep 1
285 done
286 $evicted # assert
287 # the object is proxy read and promoted to the cache
288 rados -p $slow get obj1 - >/dev/null
289 # wait for the promoted object to be evicted again
290 evicted=false
291 for i in `seq 1 300` ; do
292 if ! rados -p $fast ls | grep obj1 ; then
293 evicted=true
294 break
295 fi
296 sleep 1
297 done
298 $evicted # assert
299 ceph osd tier remove-overlay $slow
300 ceph osd tier remove $slow $fast
301 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
302 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
303}
304
31f18b77 305function test_tiering_1()
7c673cae
FG
306{
307 # tiering
308 ceph osd pool create slow 2
309 ceph osd pool create slow2 2
310 ceph osd pool create cache 2
311 ceph osd pool create cache2 2
312 ceph osd tier add slow cache
313 ceph osd tier add slow cache2
314 expect_false ceph osd tier add slow2 cache
315 # test some state transitions
316 ceph osd tier cache-mode cache writeback
317 expect_false ceph osd tier cache-mode cache forward
318 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
319 expect_false ceph osd tier cache-mode cache readonly
320 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
321 expect_false ceph osd tier cache-mode cache forward
322 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
323 ceph osd tier cache-mode cache none
324 ceph osd tier cache-mode cache writeback
325 ceph osd tier cache-mode cache proxy
326 ceph osd tier cache-mode cache writeback
327 expect_false ceph osd tier cache-mode cache none
328 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
329 # test with dirty objects in the tier pool
330 # tier pool currently set to 'writeback'
331 rados -p cache put /etc/passwd /etc/passwd
31f18b77 332 flush_pg_stats
7c673cae
FG
333 # 1 dirty object in pool 'cache'
334 ceph osd tier cache-mode cache proxy
335 expect_false ceph osd tier cache-mode cache none
336 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
337 ceph osd tier cache-mode cache writeback
338 # remove object from tier pool
339 rados -p cache rm /etc/passwd
340 rados -p cache cache-flush-evict-all
31f18b77 341 flush_pg_stats
7c673cae
FG
342 # no dirty objects in pool 'cache'
343 ceph osd tier cache-mode cache proxy
344 ceph osd tier cache-mode cache none
345 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
346 TRIES=0
347 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
348 do
349 grep 'currently creating pgs' $TMPFILE
350 TRIES=$(( $TRIES + 1 ))
351 test $TRIES -ne 60
352 sleep 3
353 done
354 expect_false ceph osd pool set cache pg_num 4
355 ceph osd tier cache-mode cache none
356 ceph osd tier set-overlay slow cache
357 expect_false ceph osd tier set-overlay slow cache2
358 expect_false ceph osd tier remove slow cache
359 ceph osd tier remove-overlay slow
360 ceph osd tier set-overlay slow cache2
361 ceph osd tier remove-overlay slow
362 ceph osd tier remove slow cache
363 ceph osd tier add slow2 cache
364 expect_false ceph osd tier set-overlay slow cache
365 ceph osd tier set-overlay slow2 cache
366 ceph osd tier remove-overlay slow2
367 ceph osd tier remove slow2 cache
368 ceph osd tier remove slow cache2
369
370 # make sure a non-empty pool fails
371 rados -p cache2 put /etc/passwd /etc/passwd
372 while ! ceph df | grep cache2 | grep ' 1 ' ; do
373 echo waiting for pg stats to flush
374 sleep 2
375 done
376 expect_false ceph osd tier add slow cache2
377 ceph osd tier add slow cache2 --force-nonempty
378 ceph osd tier remove slow cache2
379
380 ceph osd pool ls | grep cache2
381 ceph osd pool ls -f json-pretty | grep cache2
382 ceph osd pool ls detail | grep cache2
383 ceph osd pool ls detail -f json-pretty | grep cache2
384
31f18b77
FG
385 ceph osd pool delete slow slow --yes-i-really-really-mean-it
386 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
7c673cae
FG
387 ceph osd pool delete cache cache --yes-i-really-really-mean-it
388 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
31f18b77 389}
7c673cae 390
31f18b77
FG
391function test_tiering_2()
392{
7c673cae
FG
393 # make sure we can't clobber snapshot state
394 ceph osd pool create snap_base 2
395 ceph osd pool create snap_cache 2
396 ceph osd pool mksnap snap_cache snapname
397 expect_false ceph osd tier add snap_base snap_cache
398 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
399 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
31f18b77 400}
7c673cae 401
31f18b77
FG
402function test_tiering_3()
403{
7c673cae
FG
404 # make sure we can't create snapshot on tier
405 ceph osd pool create basex 2
406 ceph osd pool create cachex 2
407 ceph osd tier add basex cachex
408 expect_false ceph osd pool mksnap cache snapname
409 ceph osd tier remove basex cachex
410 ceph osd pool delete basex basex --yes-i-really-really-mean-it
411 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
31f18b77 412}
7c673cae 413
31f18b77
FG
414function test_tiering_4()
415{
7c673cae
FG
416 # make sure we can't create an ec pool tier
417 ceph osd pool create eccache 2 2 erasure
418 expect_false ceph osd set-require-min-compat-client bobtail
419 ceph osd pool create repbase 2
420 expect_false ceph osd tier add repbase eccache
421 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
422 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
31f18b77 423}
7c673cae 424
31f18b77
FG
425function test_tiering_5()
426{
7c673cae 427 # convenient add-cache command
31f18b77 428 ceph osd pool create slow 2
7c673cae
FG
429 ceph osd pool create cache3 2
430 ceph osd tier add-cache slow cache3 1024000
431 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
432 ceph osd tier remove slow cache3 2> $TMPFILE || true
433 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
434 ceph osd tier remove-overlay slow
435 ceph osd tier remove slow cache3
436 ceph osd pool ls | grep cache3
437 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
438 ! ceph osd pool ls | grep cache3 || exit 1
7c673cae 439 ceph osd pool delete slow slow --yes-i-really-really-mean-it
31f18b77 440}
7c673cae 441
31f18b77
FG
442function test_tiering_6()
443{
7c673cae
FG
444 # check add-cache whether work
445 ceph osd pool create datapool 2
446 ceph osd pool create cachepool 2
447 ceph osd tier add-cache datapool cachepool 1024000
448 ceph osd tier cache-mode cachepool writeback
449 rados -p datapool put object /etc/passwd
450 rados -p cachepool stat object
451 rados -p cachepool cache-flush object
452 rados -p datapool stat object
453 ceph osd tier remove-overlay datapool
454 ceph osd tier remove datapool cachepool
455 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
456 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
31f18b77 457}
7c673cae 458
31f18b77
FG
459function test_tiering_7()
460{
7c673cae
FG
461 # protection against pool removal when used as tiers
462 ceph osd pool create datapool 2
463 ceph osd pool create cachepool 2
464 ceph osd tier add-cache datapool cachepool 1024000
465 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
466 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
467 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
468 check_response "EBUSY: pool 'datapool' has tiers cachepool"
469 ceph osd tier remove-overlay datapool
470 ceph osd tier remove datapool cachepool
471 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
472 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
31f18b77 473}
7c673cae 474
31f18b77
FG
475function test_tiering_8()
476{
7c673cae
FG
477 ## check health check
478 ceph osd set notieragent
479 ceph osd pool create datapool 2
480 ceph osd pool create cache4 2
481 ceph osd tier add-cache datapool cache4 1024000
482 ceph osd tier cache-mode cache4 writeback
483 tmpfile=$(mktemp|grep tmp)
484 dd if=/dev/zero of=$tmpfile bs=4K count=1
485 ceph osd pool set cache4 target_max_objects 200
486 ceph osd pool set cache4 target_max_bytes 1000000
487 rados -p cache4 put foo1 $tmpfile
488 rados -p cache4 put foo2 $tmpfile
489 rm -f $tmpfile
31f18b77 490 flush_pg_stats
7c673cae
FG
491 ceph df | grep datapool | grep ' 2 '
492 ceph osd tier remove-overlay datapool
493 ceph osd tier remove datapool cache4
494 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
495 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
496 ceph osd unset notieragent
31f18b77 497}
7c673cae 498
31f18b77
FG
499function test_tiering_9()
500{
7c673cae
FG
501 # make sure 'tier remove' behaves as we expect
502 # i.e., removing a tier from a pool that's not its base pool only
503 # results in a 'pool foo is now (or already was) not a tier of bar'
504 #
505 ceph osd pool create basepoolA 2
506 ceph osd pool create basepoolB 2
507 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
508 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
509
510 ceph osd pool create cache5 2
511 ceph osd pool create cache6 2
512 ceph osd tier add basepoolA cache5
513 ceph osd tier add basepoolB cache6
514 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
515 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
516 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
517 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
518
519 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
520 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
521 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
522 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
523
524 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
525 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
526
527 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
528 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
529 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
530 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
531}
532
533function test_auth()
534{
535 ceph auth add client.xx mon allow osd "allow *"
536 ceph auth export client.xx >client.xx.keyring
537 ceph auth add client.xx -i client.xx.keyring
538 rm -f client.xx.keyring
539 ceph auth list | grep client.xx
540 ceph auth get client.xx | grep caps | grep mon
541 ceph auth get client.xx | grep caps | grep osd
542 ceph auth get-key client.xx
543 ceph auth print-key client.xx
544 ceph auth print_key client.xx
545 ceph auth caps client.xx osd "allow rw"
546 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
547 ceph auth get client.xx | grep osd | grep "allow rw"
548 ceph auth export | grep client.xx
549 ceph auth export -o authfile
550 ceph auth import -i authfile
551 ceph auth export -o authfile2
552 diff authfile authfile2
553 rm authfile authfile2
554 ceph auth del client.xx
555 expect_false ceph auth get client.xx
556
557 # (almost) interactive mode
558 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
559 ceph auth get client.xx
560 # script mode
561 echo 'auth del client.xx' | ceph
562 expect_false ceph auth get client.xx
563
564 #
565 # get / set auid
566 #
567 local auid=444
568 ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
569 expect_false ceph auth import --in-file TEST-keyring
570 rm TEST-keyring
571 ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
572 ceph auth import --in-file TEST-keyring
573 rm TEST-keyring
574 ceph auth get client.TEST > $TMPFILE
575 check_response "auid = $auid"
576 ceph --format json-pretty auth get client.TEST > $TMPFILE
577 check_response '"auid": '$auid
578 ceph auth list > $TMPFILE
579 check_response "auid: $auid"
580 ceph --format json-pretty auth list > $TMPFILE
581 check_response '"auid": '$auid
582 ceph auth del client.TEST
583}
584
585function test_auth_profiles()
586{
587 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
588 mgr 'allow profile read-only'
589 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
590 mgr 'allow profile read-write'
591 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
592
593 ceph auth export > client.xx.keyring
594
595 # read-only is allowed all read-only commands (auth excluded)
596 ceph -n client.xx-profile-ro -k client.xx.keyring status
597 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
598 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
599 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
600 ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
601 # read-only gets access denied for rw commands or auth commands
602 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
603 check_response "EACCES: access denied"
604 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
605 check_response "EACCES: access denied"
606 ceph -n client.xx-profile-ro -k client.xx.keyring auth list >& $TMPFILE || true
607 check_response "EACCES: access denied"
608
609 # read-write is allowed for all read-write commands (except auth)
610 ceph -n client.xx-profile-rw -k client.xx.keyring status
611 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
612 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
613 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
614 ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
615 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
616 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
617 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
618 # read-write gets access denied for auth commands
619 ceph -n client.xx-profile-rw -k client.xx.keyring auth list >& $TMPFILE || true
620 check_response "EACCES: access denied"
621
622 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
623 ceph -n client.xx-profile-rd -k client.xx.keyring auth list
624 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
625 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
626 ceph -n client.xx-profile-rd -k client.xx.keyring status
627 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
628 check_response "EACCES: access denied"
629 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
630 check_response "EACCES: access denied"
631 # read-only 'mon' subsystem commands are allowed
632 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
633 # but read-write 'mon' commands are not
634 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
635 check_response "EACCES: access denied"
636 ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
637 check_response "EACCES: access denied"
638 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
639 check_response "EACCES: access denied"
640 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
641 check_response "EACCES: access denied"
642
643 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
644 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
645
646 # add a new role-definer with the existing role-definer
647 ceph -n client.xx-profile-rd -k client.xx.keyring \
648 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
649 ceph -n client.xx-profile-rd -k client.xx.keyring \
650 auth export > client.xx.keyring.2
651 # remove old role-definer using the new role-definer
652 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
653 auth del client.xx-profile-rd
654 # remove the remaining role-definer with admin
655 ceph auth del client.xx-profile-rd2
656 rm -f client.xx.keyring client.xx.keyring.2
657}
658
659function test_mon_caps()
660{
661 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
662 chmod +r $TEMP_DIR/ceph.client.bug.keyring
663 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
664 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
665
666 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
667 check_response "Permission denied"
668
669 rm -rf $TEMP_DIR/ceph.client.bug.keyring
670 ceph auth del client.bug
671 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
672 chmod +r $TEMP_DIR/ceph.client.bug.keyring
673 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
674 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
675 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
676 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
677 check_response "Permission denied"
678}
679
680function test_mon_misc()
681{
682 # with and without verbosity
683 ceph osd dump | grep '^epoch'
684 ceph --concise osd dump | grep '^epoch'
685
31f18b77
FG
686 ceph osd df | grep 'MIN/MAX VAR'
687
7c673cae
FG
688 # df
689 ceph df > $TMPFILE
690 grep GLOBAL $TMPFILE
691 grep -v DIRTY $TMPFILE
692 ceph df detail > $TMPFILE
693 grep DIRTY $TMPFILE
694 ceph df --format json > $TMPFILE
695 grep 'total_bytes' $TMPFILE
696 grep -v 'dirty' $TMPFILE
697 ceph df detail --format json > $TMPFILE
698 grep 'rd_bytes' $TMPFILE
699 grep 'dirty' $TMPFILE
700 ceph df --format xml | grep '<total_bytes>'
701 ceph df detail --format xml | grep '<rd_bytes>'
702
703 ceph fsid
704 ceph health
705 ceph health detail
706 ceph health --format json-pretty
707 ceph health detail --format xml-pretty
708
224ce89b
WB
709 ceph time-sync-status
710
7c673cae
FG
711 ceph node ls
712 for t in mon osd mds ; do
713 ceph node ls $t
714 done
715
716 ceph_watch_start
717 mymsg="this is a test log message $$.$(date)"
718 ceph log "$mymsg"
31f18b77
FG
719 ceph log last | grep "$mymsg"
720 ceph log last 100 | grep "$mymsg"
7c673cae
FG
721 ceph_watch_wait "$mymsg"
722
31f18b77 723 ceph mgr dump
224ce89b
WB
724 ceph mgr module ls
725 ceph mgr module enable restful
726 expect_false ceph mgr module enable foodne
727 ceph mgr module enable foodne --force
728 ceph mgr module disable foodne
729 ceph mgr module disable foodnebizbangbash
31f18b77 730
7c673cae
FG
731 ceph mon metadata a
732 ceph mon metadata
31f18b77
FG
733 ceph mon count-metadata ceph_version
734 ceph mon versions
735
7c673cae
FG
736 ceph node ls
737}
738
739function check_mds_active()
740{
741 fs_name=$1
742 ceph fs get $fs_name | grep active
743}
744
745function wait_mds_active()
746{
747 fs_name=$1
748 max_run=300
749 for i in $(seq 1 $max_run) ; do
750 if ! check_mds_active $fs_name ; then
751 echo "waiting for an active MDS daemon ($i/$max_run)"
752 sleep 5
753 else
754 break
755 fi
756 done
757 check_mds_active $fs_name
758}
759
760function get_mds_gids()
761{
762 fs_name=$1
763 ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
764}
765
766function fail_all_mds()
767{
768 fs_name=$1
769 ceph fs set $fs_name cluster_down true
770 mds_gids=$(get_mds_gids $fs_name)
771 for mds_gid in $mds_gids ; do
772 ceph mds fail $mds_gid
773 done
774 if check_mds_active $fs_name ; then
775 echo "An active MDS remains, something went wrong"
776 ceph fs get $fs_name
777 exit -1
778 fi
779
780}
781
782function remove_all_fs()
783{
784 existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
785 for fs_name in $existing_fs ; do
786 echo "Removing fs ${fs_name}..."
787 fail_all_mds $fs_name
788 echo "Removing existing filesystem '${fs_name}'..."
789 ceph fs rm $fs_name --yes-i-really-mean-it
790 echo "Removed '${fs_name}'."
791 done
792}
793
794# So that tests requiring MDS can skip if one is not configured
795# in the cluster at all
796function mds_exists()
797{
798 ceph auth list | grep "^mds"
799}
800
801# some of the commands are just not idempotent.
802function without_test_dup_command()
803{
804 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
805 $@
806 else
807 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
808 unset CEPH_CLI_TEST_DUP_COMMAND
809 $@
810 CEPH_CLI_TEST_DUP_COMMAND=saved
811 fi
812}
813
814function test_mds_tell()
815{
31f18b77 816 local FS_NAME=cephfs
7c673cae
FG
817 if ! mds_exists ; then
818 echo "Skipping test, no MDS found"
819 return
820 fi
821
822 remove_all_fs
823 ceph osd pool create fs_data 10
824 ceph osd pool create fs_metadata 10
825 ceph fs new $FS_NAME fs_metadata fs_data
826 wait_mds_active $FS_NAME
827
828 # Test injectargs by GID
829 old_mds_gids=$(get_mds_gids $FS_NAME)
830 echo Old GIDs: $old_mds_gids
831
832 for mds_gid in $old_mds_gids ; do
833 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
834 done
835 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
836
837 # Test respawn by rank
838 without_test_dup_command ceph tell mds.0 respawn
839 new_mds_gids=$old_mds_gids
840 while [ $new_mds_gids -eq $old_mds_gids ] ; do
841 sleep 5
842 new_mds_gids=$(get_mds_gids $FS_NAME)
843 done
844 echo New GIDs: $new_mds_gids
845
846 # Test respawn by ID
847 without_test_dup_command ceph tell mds.a respawn
848 new_mds_gids=$old_mds_gids
849 while [ $new_mds_gids -eq $old_mds_gids ] ; do
850 sleep 5
851 new_mds_gids=$(get_mds_gids $FS_NAME)
852 done
853 echo New GIDs: $new_mds_gids
854
855 remove_all_fs
856 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
857 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
858}
859
860function test_mon_mds()
861{
31f18b77 862 local FS_NAME=cephfs
7c673cae
FG
863 remove_all_fs
864
865 ceph osd pool create fs_data 10
866 ceph osd pool create fs_metadata 10
867 ceph fs new $FS_NAME fs_metadata fs_data
868
869 ceph fs set $FS_NAME cluster_down true
870 ceph fs set $FS_NAME cluster_down false
871
872 # Legacy commands, act on default fs
873 ceph mds cluster_down
874 ceph mds cluster_up
875
876 ceph mds compat rm_incompat 4
877 ceph mds compat rm_incompat 4
878
879 # We don't want any MDSs to be up, their activity can interfere with
880 # the "current_epoch + 1" checking below if they're generating updates
881 fail_all_mds $FS_NAME
882
883 ceph mds compat show
884 expect_false ceph mds deactivate 2
885 ceph mds dump
886 ceph fs dump
887 ceph fs get $FS_NAME
888 for mds_gid in $(get_mds_gids $FS_NAME) ; do
889 ceph mds metadata $mds_id
890 done
891 ceph mds metadata
31f18b77
FG
892 ceph mds versions
893 ceph mds count-metadata os
7c673cae
FG
894
895 # XXX mds fail, but how do you undo it?
896 mdsmapfile=$TEMP_DIR/mdsmap.$$
897 current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
898 [ -s $mdsmapfile ]
899 rm $mdsmapfile
900
901 ceph osd pool create data2 10
902 ceph osd pool create data3 10
903 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
904 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
905 ceph mds add_data_pool $data2_pool
906 ceph mds add_data_pool $data3_pool
907 ceph mds add_data_pool 100 >& $TMPFILE || true
908 check_response "Error ENOENT"
909 ceph mds add_data_pool foobarbaz >& $TMPFILE || true
910 check_response "Error ENOENT"
911 ceph mds remove_data_pool $data2_pool
912 ceph mds remove_data_pool $data3_pool
913 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
914 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
224ce89b 915 ceph mds set allow_multimds false
7c673cae 916 expect_false ceph mds set_max_mds 4
224ce89b 917 ceph mds set allow_multimds true
7c673cae
FG
918 ceph mds set_max_mds 4
919 ceph mds set_max_mds 3
920 ceph mds set_max_mds 256
921 expect_false ceph mds set_max_mds 257
922 ceph mds set max_mds 4
923 ceph mds set max_mds 256
924 expect_false ceph mds set max_mds 257
925 expect_false ceph mds set max_mds asdf
926 expect_false ceph mds set inline_data true
927 ceph mds set inline_data true --yes-i-really-mean-it
928 ceph mds set inline_data yes --yes-i-really-mean-it
929 ceph mds set inline_data 1 --yes-i-really-mean-it
930 expect_false ceph mds set inline_data --yes-i-really-mean-it
931 ceph mds set inline_data false
932 ceph mds set inline_data no
933 ceph mds set inline_data 0
934 expect_false ceph mds set inline_data asdf
935 ceph mds set max_file_size 1048576
936 expect_false ceph mds set max_file_size 123asdf
937
938 expect_false ceph mds set allow_new_snaps
939 expect_false ceph mds set allow_new_snaps true
940 ceph mds set allow_new_snaps true --yes-i-really-mean-it
941 ceph mds set allow_new_snaps 0
942 ceph mds set allow_new_snaps false
943 ceph mds set allow_new_snaps no
944 expect_false ceph mds set allow_new_snaps taco
945
946 # we should never be able to add EC pools as data or metadata pools
947 # create an ec-pool...
948 ceph osd pool create mds-ec-pool 10 10 erasure
949 set +e
950 ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
951 check_response 'erasure-code' $? 22
952 set -e
953 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
954 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
955 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
956
957 fail_all_mds $FS_NAME
958
959 set +e
960 # Check that rmfailed requires confirmation
961 expect_false ceph mds rmfailed 0
962 ceph mds rmfailed 0 --yes-i-really-mean-it
963 set -e
964
965 # Check that `newfs` is no longer permitted
966 expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
967
968 # Check that 'fs reset' runs
969 ceph fs reset $FS_NAME --yes-i-really-mean-it
970
971 # Check that creating a second FS fails by default
972 ceph osd pool create fs_metadata2 10
973 ceph osd pool create fs_data2 10
974 set +e
975 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
976 set -e
977
978 # Check that setting enable_multiple enables creation of second fs
979 ceph fs flag set enable_multiple true --yes-i-really-mean-it
980 ceph fs new cephfs2 fs_metadata2 fs_data2
981
982 # Clean up multi-fs stuff
983 fail_all_mds cephfs2
984 ceph fs rm cephfs2 --yes-i-really-mean-it
985 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
986 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
987
988 fail_all_mds $FS_NAME
989
990 # Clean up to enable subsequent fs new tests
991 ceph fs rm $FS_NAME --yes-i-really-mean-it
992
993 set +e
994 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
995 check_response 'erasure-code' $? 22
996 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
997 check_response 'erasure-code' $? 22
998 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
999 check_response 'erasure-code' $? 22
1000 set -e
1001
1002 # ... new create a cache tier in front of the EC pool...
1003 ceph osd pool create mds-tier 2
1004 ceph osd tier add mds-ec-pool mds-tier
1005 ceph osd tier set-overlay mds-ec-pool mds-tier
1006 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1007
1008 # Use of a readonly tier should be forbidden
1009 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1010 set +e
1011 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1012 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1013 set -e
1014
1015 # Use of a writeback tier should enable FS creation
1016 ceph osd tier cache-mode mds-tier writeback
1017 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1018
1019 # While a FS exists using the tiered pools, I should not be allowed
1020 # to remove the tier
1021 set +e
1022 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1023 check_response 'in use by CephFS' $? 16
1024 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1025 check_response 'in use by CephFS' $? 16
1026 set -e
1027
1028 fail_all_mds $FS_NAME
1029 ceph fs rm $FS_NAME --yes-i-really-mean-it
1030
1031 # ... but we should be forbidden from using the cache pool in the FS directly.
1032 set +e
1033 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1034 check_response 'in use as a cache tier' $? 22
1035 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1036 check_response 'in use as a cache tier' $? 22
1037 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1038 check_response 'in use as a cache tier' $? 22
1039 set -e
1040
1041 # Clean up tier + EC pools
1042 ceph osd tier remove-overlay mds-ec-pool
1043 ceph osd tier remove mds-ec-pool mds-tier
1044
1045 # Create a FS using the 'cache' pool now that it's no longer a tier
1046 ceph fs new $FS_NAME fs_metadata mds-tier --force
1047
1048 # We should be forbidden from using this pool as a tier now that
1049 # it's in use for CephFS
1050 set +e
1051 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1052 check_response 'in use by CephFS' $? 16
1053 set -e
1054
1055 fail_all_mds $FS_NAME
1056 ceph fs rm $FS_NAME --yes-i-really-mean-it
1057
1058 # We should be permitted to use an EC pool with overwrites enabled
1059 # as the data pool...
1060 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1061 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1062 fail_all_mds $FS_NAME
1063 ceph fs rm $FS_NAME --yes-i-really-mean-it
1064
1065 # ...but not as the metadata pool
1066 set +e
1067 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1068 check_response 'erasure-code' $? 22
1069 set -e
1070
1071 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1072
1073 # Create a FS and check that we can subsequently add a cache tier to it
1074 ceph fs new $FS_NAME fs_metadata fs_data --force
1075
1076 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1077 ceph osd tier add fs_metadata mds-tier
1078 ceph osd tier cache-mode mds-tier writeback
1079 ceph osd tier set-overlay fs_metadata mds-tier
1080
1081 # Removing tier should be permitted because the underlying pool is
1082 # replicated (#11504 case)
1083 ceph osd tier cache-mode mds-tier proxy
1084 ceph osd tier remove-overlay fs_metadata
1085 ceph osd tier remove fs_metadata mds-tier
1086 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1087
1088 # Clean up FS
1089 fail_all_mds $FS_NAME
1090 ceph fs rm $FS_NAME --yes-i-really-mean-it
1091
1092
1093
1094 ceph mds stat
1095 # ceph mds tell mds.a getmap
1096 # ceph mds rm
1097 # ceph mds rmfailed
1098 # ceph mds set_state
1099 # ceph mds stop
1100
1101 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1102 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1103}
1104
1105function test_mon_mds_metadata()
1106{
1107 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1108 test "$nmons" -gt 0
1109
1110 ceph mds dump |
1111 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1112 while read gid id rank; do
1113 ceph mds metadata ${gid} | grep '"hostname":'
1114 ceph mds metadata ${id} | grep '"hostname":'
1115 ceph mds metadata ${rank} | grep '"hostname":'
1116
1117 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1118 test "$n" -eq "$nmons"
1119 done
1120
1121 expect_false ceph mds metadata UNKNOWN
1122}
1123
1124function test_mon_mon()
1125{
1126 # print help message
1127 ceph --help mon
1128 # no mon add/remove
1129 ceph mon dump
1130 ceph mon getmap -o $TEMP_DIR/monmap.$$
1131 [ -s $TEMP_DIR/monmap.$$ ]
1132 # ceph mon tell
1133 ceph mon_status
1134
1135 # test mon features
224ce89b 1136 ceph mon feature ls
7c673cae
FG
1137 ceph mon feature set kraken --yes-i-really-mean-it
1138 expect_false ceph mon feature set abcd
1139 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1140}
1141
31f18b77
FG
1142function gen_secrets_file()
1143{
1144 # lets assume we can have the following types
1145 # all - generates both cephx and lockbox, with mock dm-crypt key
1146 # cephx - only cephx
1147 # no_cephx - lockbox and dm-crypt, no cephx
1148 # no_lockbox - dm-crypt and cephx, no lockbox
1149 # empty - empty file
1150 # empty_json - correct json, empty map
1151 # bad_json - bad json :)
1152 #
1153 local t=$1
1154 if [[ -z "$t" ]]; then
1155 t="all"
1156 fi
1157
1158 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1159 echo $fn
1160 if [[ "$t" == "empty" ]]; then
1161 return 0
1162 fi
1163
1164 echo "{" > $fn
1165 if [[ "$t" == "bad_json" ]]; then
1166 echo "asd: ; }" >> $fn
1167 return 0
1168 elif [[ "$t" == "empty_json" ]]; then
1169 echo "}" >> $fn
1170 return 0
1171 fi
1172
1173 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1174 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1175 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1176
1177 if [[ "$t" == "all" ]]; then
1178 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1179 elif [[ "$t" == "cephx" ]]; then
1180 echo "$cephx_secret" >> $fn
1181 elif [[ "$t" == "no_cephx" ]]; then
1182 echo "$lb_secret,$dmcrypt_key" >> $fn
1183 elif [[ "$t" == "no_lockbox" ]]; then
1184 echo "$cephx_secret,$dmcrypt_key" >> $fn
1185 else
1186 echo "unknown gen_secrets_file() type \'$fn\'"
1187 return 1
1188 fi
1189 echo "}" >> $fn
1190 return 0
1191}
1192
1193function test_mon_osd_create_destroy()
1194{
1195 ceph osd new 2>&1 | grep 'EINVAL'
1196 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1197 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1198
1199 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1200
1201 old_osds=$(ceph osd ls)
1202 num_osds=$(ceph osd ls | wc -l)
1203
1204 uuid=$(uuidgen)
1205 id=$(ceph osd new $uuid 2>/dev/null)
1206
1207 for i in $old_osds; do
1208 [[ "$i" != "$id" ]]
1209 done
1210
1211 ceph osd find $id
1212
1213 id2=`ceph osd new $uuid 2>/dev/null`
1214
1215 [[ $id2 == $id ]]
1216
1217 ceph osd new $uuid $id
1218
1219 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1220 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1221
1222 uuid2=$(uuidgen)
1223 id2=$(ceph osd new $uuid2)
1224 ceph osd find $id2
1225 [[ "$id2" != "$id" ]]
1226
1227 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1228 ceph osd new $uuid2 $id2
1229
1230 # test with secrets
1231 empty_secrets=$(gen_secrets_file "empty")
1232 empty_json=$(gen_secrets_file "empty_json")
1233 all_secrets=$(gen_secrets_file "all")
1234 cephx_only=$(gen_secrets_file "cephx")
1235 no_cephx=$(gen_secrets_file "no_cephx")
1236 no_lockbox=$(gen_secrets_file "no_lockbox")
1237 bad_json=$(gen_secrets_file "bad_json")
1238
1239 # empty secrets should be idempotent
1240 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1241 [[ "$new_id" == "$id" ]]
1242
1243 # empty json, thus empty secrets
1244 new_id=$(ceph osd new $uuid $id -i $empty_json)
1245 [[ "$new_id" == "$id" ]]
1246
1247 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1248
1249 ceph osd rm $id
1250 ceph osd rm $id2
1251 ceph osd setmaxosd $old_maxosd
1252
1253 ceph osd new $uuid -i $bad_json 2>&1 | grep 'EINVAL'
1254 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1255 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1256
1257 osds=$(ceph osd ls)
1258 id=$(ceph osd new $uuid -i $all_secrets)
1259 for i in $osds; do
1260 [[ "$i" != "$id" ]]
1261 done
1262
1263 ceph osd find $id
1264
1265 # validate secrets and dm-crypt are set
1266 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1267 s=$(cat $all_secrets | jq '.cephx_secret')
1268 [[ $k == $s ]]
1269 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1270 jq '.key')
1271 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1272 [[ $k == $s ]]
1273 ceph config-key exists dm-crypt/osd/$uuid/luks
1274
1275 osds=$(ceph osd ls)
1276 id2=$(ceph osd new $uuid2 -i $cephx_only)
1277 for i in $osds; do
1278 [[ "$i" != "$id2" ]]
1279 done
1280
1281 ceph osd find $id2
1282 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1283 s=$(cat $all_secrets | jq '.cephx_secret')
1284 [[ $k == $s ]]
1285 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1286 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1287
1288 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1289 ceph osd destroy $id2 --yes-i-really-mean-it
1290 ceph osd find $id2
1291 expect_false ceph auth get-key osd.$id2
1292 ceph osd dump | grep osd.$id2 | grep destroyed
1293
1294 id3=$id2
1295 uuid3=$(uuidgen)
1296 ceph osd new $uuid3 $id3 -i $all_secrets
1297 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1298 ceph auth get-key client.osd-lockbox.$uuid3
1299 ceph auth get-key osd.$id3
1300 ceph config-key exists dm-crypt/osd/$uuid3/luks
1301
1302 ceph osd purge osd.$id3 --yes-i-really-mean-it
1303 expect_false ceph osd find $id2
1304 expect_false ceph auth get-key osd.$id2
1305 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1306 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1307 ceph osd purge osd.$id3 --yes-i-really-mean-it
1308 ceph osd purge osd.$id3 --yes-i-really-mean-it
1309
1310 ceph osd purge osd.$id --yes-i-really-mean-it
1311 expect_false ceph osd find $id
1312 expect_false ceph auth get-key osd.$id
1313 expect_false ceph auth get-key client.osd-lockbox.$uuid
1314 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1315
1316 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1317 $no_cephx $no_lockbox $bad_json
1318
1319 for i in $(ceph osd ls); do
1320 [[ "$i" != "$id" ]]
1321 [[ "$i" != "$id2" ]]
1322 [[ "$i" != "$id3" ]]
1323 done
1324
1325 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1326 ceph osd setmaxosd $old_maxosd
1327
1328}
1329
7c673cae
FG
1330function test_mon_osd()
1331{
1332 #
1333 # osd blacklist
1334 #
1335 bl=192.168.0.1:0/1000
1336 ceph osd blacklist add $bl
1337 ceph osd blacklist ls | grep $bl
1338 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1339 ceph osd dump --format=json-pretty | grep $bl
1340 ceph osd dump | grep "^blacklist $bl"
1341 ceph osd blacklist rm $bl
1342 ceph osd blacklist ls | expect_false grep $bl
1343
1344 bl=192.168.0.1
1345 # test without nonce, invalid nonce
1346 ceph osd blacklist add $bl
1347 ceph osd blacklist ls | grep $bl
1348 ceph osd blacklist rm $bl
1349 ceph osd blacklist ls | expect_false grep $expect_false bl
1350 expect_false "ceph osd blacklist $bl/-1"
1351 expect_false "ceph osd blacklist $bl/foo"
1352
1353 # test with wrong address
1354 expect_false "ceph osd blacklist 1234.56.78.90/100"
1355
1356 # Test `clear`
1357 ceph osd blacklist add $bl
1358 ceph osd blacklist ls | grep $bl
1359 ceph osd blacklist clear
1360 ceph osd blacklist ls | expect_false grep $bl
1361
1362 #
1363 # osd crush
1364 #
1365 ceph osd crush reweight-all
1366 ceph osd crush tunables legacy
1367 ceph osd crush show-tunables | grep argonaut
1368 ceph osd crush tunables bobtail
1369 ceph osd crush show-tunables | grep bobtail
1370 ceph osd crush tunables firefly
1371 ceph osd crush show-tunables | grep firefly
1372
1373 ceph osd crush set-tunable straw_calc_version 0
1374 ceph osd crush get-tunable straw_calc_version | grep 0
1375 ceph osd crush set-tunable straw_calc_version 1
1376 ceph osd crush get-tunable straw_calc_version | grep 1
1377
1378 #
1379 # require-min-compat-client
1380 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1381 ceph osd set-require-min-compat-client luminous
1382 ceph osd dump | grep 'require_min_compat_client luminous'
1383
1384 #
1385 # osd scrub
1386 #
1387 # how do I tell when these are done?
1388 ceph osd scrub 0
1389 ceph osd deep-scrub 0
1390 ceph osd repair 0
1391
1392 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1393 do
1394 ceph osd set $f
1395 ceph osd unset $f
1396 done
1397 expect_false ceph osd unset sortbitwise # cannot be unset
1398 expect_false ceph osd set bogus
1399 expect_false ceph osd unset bogus
31f18b77
FG
1400 ceph osd require-osd-release luminous
1401 # can't lower (or use new command for anything but jewel)
1402 expect_false ceph osd require-osd-release jewel
1403 # these are no-ops but should succeed.
7c673cae 1404 ceph osd set require_jewel_osds
7c673cae 1405 ceph osd set require_kraken_osds
31f18b77 1406 expect_false ceph osd unset require_jewel_osds
7c673cae
FG
1407
1408 ceph osd set noup
1409 ceph osd down 0
1410 ceph osd dump | grep 'osd.0 down'
1411 ceph osd unset noup
1412 max_run=1000
1413 for ((i=0; i < $max_run; i++)); do
1414 if ! ceph osd dump | grep 'osd.0 up'; then
1415 echo "waiting for osd.0 to come back up ($i/$max_run)"
1416 sleep 1
1417 else
1418 break
1419 fi
1420 done
1421 ceph osd dump | grep 'osd.0 up'
1422
1423 ceph osd dump | grep 'osd.0 up'
1424 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1425 ceph osd find 1
1426 ceph osd find osd.1
1427 expect_false ceph osd find osd.xyz
1428 expect_false ceph osd find xyz
1429 expect_false ceph osd find 0.1
1430 ceph --format plain osd find 1 # falls back to json-pretty
1431 if [ `uname` == Linux ]; then
1432 ceph osd metadata 1 | grep 'distro'
1433 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1434 fi
1435 ceph osd out 0
1436 ceph osd dump | grep 'osd.0.*out'
1437 ceph osd in 0
1438 ceph osd dump | grep 'osd.0.*in'
1439 ceph osd find 0
1440
31f18b77 1441 ceph osd add-nodown 0 1
224ce89b 1442 ceph health detail | grep 'NODOWN'
31f18b77 1443 ceph osd rm-nodown 0 1
224ce89b 1444 ! ceph health detail | grep 'NODOWN'
31f18b77
FG
1445
1446 ceph osd out 0 # so we can mark it as noin later
1447 ceph osd add-noin 0
224ce89b 1448 ceph health detail | grep 'NOIN'
31f18b77 1449 ceph osd rm-noin 0
224ce89b 1450 ! ceph health detail | grep 'NOIN'
31f18b77
FG
1451 ceph osd in 0
1452
1453 ceph osd add-noout 0
224ce89b 1454 ceph health detail | grep 'NOOUT'
31f18b77 1455 ceph osd rm-noout 0
224ce89b 1456 ! ceph health detail | grep 'NOOUT'
31f18b77
FG
1457
1458 # test osd id parse
1459 expect_false ceph osd add-noup 797er
1460 expect_false ceph osd add-nodown u9uwer
1461 expect_false ceph osd add-noin 78~15
1462 expect_false ceph osd add-noout 0 all 1
1463
1464 expect_false ceph osd rm-noup 1234567
1465 expect_false ceph osd rm-nodown fsadf7
1466 expect_false ceph osd rm-noin 0 1 any
1467 expect_false ceph osd rm-noout 790-fd
1468
1469 ids=`ceph osd ls-tree default`
1470 for osd in $ids
1471 do
1472 ceph osd add-nodown $osd
1473 ceph osd add-noout $osd
1474 done
224ce89b
WB
1475 ceph -s | grep 'NODOWN'
1476 ceph -s | grep 'NOOUT'
31f18b77
FG
1477 ceph osd rm-nodown any
1478 ceph osd rm-noout all
224ce89b
WB
1479 ! ceph -s | grep 'NODOWN'
1480 ! ceph -s | grep 'NOOUT'
31f18b77 1481
7c673cae
FG
1482 # make sure mark out preserves weight
1483 ceph osd reweight osd.0 .5
1484 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1485 ceph osd out 0
1486 ceph osd in 0
1487 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1488
7c673cae
FG
1489 ceph osd getmap -o $f
1490 [ -s $f ]
1491 rm $f
1492 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1493 [ "$save" -gt 0 ]
1494 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1495 ceph osd setmaxosd 10
1496 ceph osd getmaxosd | grep 'max_osd = 10'
1497 ceph osd setmaxosd $save
1498 ceph osd getmaxosd | grep "max_osd = $save"
1499
1500 for id in `ceph osd ls` ; do
1501 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1502 done
1503
1504 ceph osd rm 0 2>&1 | grep 'EBUSY'
1505
1506 local old_osds=$(echo $(ceph osd ls))
1507 id=`ceph osd create`
1508 ceph osd find $id
1509 ceph osd lost $id --yes-i-really-mean-it
1510 expect_false ceph osd setmaxosd $id
1511 local new_osds=$(echo $(ceph osd ls))
1512 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1513 ceph osd rm $id
1514 done
1515
1516 uuid=`uuidgen`
1517 id=`ceph osd create $uuid`
1518 id2=`ceph osd create $uuid`
1519 [ "$id" = "$id2" ]
1520 ceph osd rm $id
1521
1522 ceph --help osd
1523
1524 # reset max_osd.
1525 ceph osd setmaxosd $id
1526 ceph osd getmaxosd | grep "max_osd = $save"
1527 local max_osd=$save
1528
1529 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1530 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1531
1532 id=`ceph osd create $uuid $max_osd`
1533 [ "$id" = "$max_osd" ]
1534 ceph osd find $id
1535 max_osd=$((max_osd + 1))
1536 ceph osd getmaxosd | grep "max_osd = $max_osd"
1537
31f18b77
FG
1538 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1539 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
7c673cae
FG
1540 id2=`ceph osd create $uuid`
1541 [ "$id" = "$id2" ]
1542 id2=`ceph osd create $uuid $id`
1543 [ "$id" = "$id2" ]
1544
1545 uuid=`uuidgen`
1546 local gap_start=$max_osd
1547 id=`ceph osd create $uuid $((gap_start + 100))`
1548 [ "$id" = "$((gap_start + 100))" ]
1549 max_osd=$((id + 1))
1550 ceph osd getmaxosd | grep "max_osd = $max_osd"
1551
31f18b77 1552 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
7c673cae
FG
1553
1554 #
1555 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1556 # is repeated and consumes two osd id, not just one.
1557 #
1558 local next_osd
1559 if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
1560 next_osd=$((gap_start + 1))
1561 else
1562 next_osd=$gap_start
1563 fi
1564 id=`ceph osd create`
1565 [ "$id" = "$next_osd" ]
1566
1567 next_osd=$((id + 1))
1568 id=`ceph osd create $(uuidgen)`
1569 [ "$id" = "$next_osd" ]
1570
1571 next_osd=$((id + 1))
1572 id=`ceph osd create $(uuidgen) $next_osd`
1573 [ "$id" = "$next_osd" ]
1574
1575 local new_osds=$(echo $(ceph osd ls))
1576 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1577 [ $id -ge $save ]
1578 ceph osd rm $id
1579 done
1580 ceph osd setmaxosd $save
1581
1582 ceph osd ls
1583 ceph osd pool create data 10
1584 ceph osd lspools | grep data
1585 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1586 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1587 ceph osd pool delete data data --yes-i-really-really-mean-it
1588
1589 ceph osd pause
1590 ceph osd dump | grep 'flags.*pauserd,pausewr'
1591 ceph osd unpause
1592
1593 ceph osd tree
31f18b77
FG
1594 ceph osd tree up
1595 ceph osd tree down
1596 ceph osd tree in
1597 ceph osd tree out
1598 ceph osd tree up in
1599 ceph osd tree up out
1600 ceph osd tree down in
1601 ceph osd tree down out
1602 ceph osd tree out down
1603 expect_false ceph osd tree up down
1604 expect_false ceph osd tree in out
1605 expect_false ceph osd tree up foo
1606
1607 ceph osd metadata
1608 ceph osd count-metadata os
1609 ceph osd versions
1610
7c673cae
FG
1611 ceph osd perf
1612 ceph osd blocked-by
1613
1614 ceph osd stat | grep up,
1615}
1616
31f18b77
FG
1617function test_mon_crush()
1618{
1619 f=$TEMP_DIR/map.$$
1620 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1621 [ -s $f ]
1622 [ "$epoch" -gt 1 ]
1623 nextepoch=$(( $epoch + 1 ))
1624 echo epoch $epoch nextepoch $nextepoch
1625 rm -f $f.epoch
1626 expect_false ceph osd setcrushmap $nextepoch -i $f
1627 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1628 echo gotepoch $gotepoch
1629 [ "$gotepoch" -eq "$nextepoch" ]
1630 # should be idempotent
1631 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1632 echo epoch $gotepoch
1633 [ "$gotepoch" -eq "$nextepoch" ]
1634 rm $f
1635}
1636
7c673cae
FG
1637function test_mon_osd_pool()
1638{
1639 #
1640 # osd pool
1641 #
1642 ceph osd pool create data 10
1643 ceph osd pool mksnap data datasnap
1644 rados -p data lssnap | grep datasnap
1645 ceph osd pool rmsnap data datasnap
1646 expect_false ceph osd pool rmsnap pool_fake snapshot
1647 ceph osd pool delete data data --yes-i-really-really-mean-it
1648
1649 ceph osd pool create data2 10
1650 ceph osd pool rename data2 data3
1651 ceph osd lspools | grep data3
1652 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1653
1654 ceph osd pool create replicated 12 12 replicated
1655 ceph osd pool create replicated 12 12 replicated
1656 ceph osd pool create replicated 12 12 # default is replicated
1657 ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
1658 # should fail because the type is not the same
1659 expect_false ceph osd pool create replicated 12 12 erasure
1660 ceph osd lspools | grep replicated
1661 ceph osd pool create ec_test 1 1 erasure
1662 set +e
1663 ceph osd metadata | grep osd_objectstore_type | grep -qc bluestore
1664 if [ $? -eq 0 ]; then
1665 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1666 check_response $? 22 "pool must only be stored on bluestore for scrubbing to work"
1667 else
1668 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1669 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1670 fi
1671 set -e
1672 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1673 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1674}
1675
1676function test_mon_osd_pool_quota()
1677{
1678 #
1679 # test osd pool set/get quota
1680 #
1681
1682 # create tmp pool
1683 ceph osd pool create tmp-quota-pool 36
1684 #
1685 # set erroneous quotas
1686 #
1687 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1688 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1689 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1690 #
1691 # set valid quotas
1692 #
1693 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1694 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1695 #
1696 # get quotas
1697 #
1698 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
1699 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
1700 #
1701 # get quotas in json-pretty format
1702 #
1703 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1704 grep '"quota_max_objects":.*10485760'
1705 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1706 grep '"quota_max_bytes":.*10'
1707 #
1708 # reset pool quotas
1709 #
1710 ceph osd pool set-quota tmp-quota-pool max_bytes 0
1711 ceph osd pool set-quota tmp-quota-pool max_objects 0
1712 #
1713 # test N/A quotas
1714 #
1715 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
1716 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
1717 #
1718 # cleanup tmp pool
1719 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
1720}
1721
1722function test_mon_pg()
1723{
1724 # Make sure we start healthy.
1725 wait_for_health_ok
1726
1727 ceph pg debug unfound_objects_exist
1728 ceph pg debug degraded_pgs_exist
224ce89b 1729 ceph pg deep-scrub 1.0
7c673cae
FG
1730 ceph pg dump
1731 ceph pg dump pgs_brief --format=json
1732 ceph pg dump pgs --format=json
1733 ceph pg dump pools --format=json
1734 ceph pg dump osds --format=json
1735 ceph pg dump sum --format=json
1736 ceph pg dump all --format=json
1737 ceph pg dump pgs_brief osds --format=json
1738 ceph pg dump pools osds pgs_brief --format=json
1739 ceph pg dump_json
1740 ceph pg dump_pools_json
1741 ceph pg dump_stuck inactive
1742 ceph pg dump_stuck unclean
1743 ceph pg dump_stuck stale
1744 ceph pg dump_stuck undersized
1745 ceph pg dump_stuck degraded
1746 ceph pg ls
224ce89b 1747 ceph pg ls 1
7c673cae
FG
1748 ceph pg ls stale
1749 expect_false ceph pg ls scrubq
1750 ceph pg ls active stale repair recovering
224ce89b
WB
1751 ceph pg ls 1 active
1752 ceph pg ls 1 active stale
7c673cae 1753 ceph pg ls-by-primary osd.0
224ce89b 1754 ceph pg ls-by-primary osd.0 1
7c673cae
FG
1755 ceph pg ls-by-primary osd.0 active
1756 ceph pg ls-by-primary osd.0 active stale
224ce89b 1757 ceph pg ls-by-primary osd.0 1 active stale
7c673cae 1758 ceph pg ls-by-osd osd.0
224ce89b 1759 ceph pg ls-by-osd osd.0 1
7c673cae
FG
1760 ceph pg ls-by-osd osd.0 active
1761 ceph pg ls-by-osd osd.0 active stale
224ce89b 1762 ceph pg ls-by-osd osd.0 1 active stale
7c673cae
FG
1763 ceph pg ls-by-pool rbd
1764 ceph pg ls-by-pool rbd active stale
1765 # can't test this...
1766 # ceph pg force_create_pg
1767 ceph pg getmap -o $TEMP_DIR/map.$$
1768 [ -s $TEMP_DIR/map.$$ ]
224ce89b
WB
1769 ceph pg map 1.0 | grep acting
1770 ceph pg repair 1.0
1771 ceph pg scrub 1.0
7c673cae
FG
1772
1773 ceph osd set-full-ratio .962
1774 ceph osd dump | grep '^full_ratio 0.962'
1775 ceph osd set-backfillfull-ratio .912
1776 ceph osd dump | grep '^backfillfull_ratio 0.912'
1777 ceph osd set-nearfull-ratio .892
1778 ceph osd dump | grep '^nearfull_ratio 0.892'
1779
1780 # Check health status
1781 ceph osd set-nearfull-ratio .913
224ce89b
WB
1782 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1783 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
7c673cae
FG
1784 ceph osd set-nearfull-ratio .892
1785 ceph osd set-backfillfull-ratio .963
224ce89b
WB
1786 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1787 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
7c673cae
FG
1788 ceph osd set-backfillfull-ratio .912
1789
1790 # Check injected full results
1791 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
224ce89b
WB
1792 wait_for_health "OSD_NEARFULL"
1793 ceph health detail | grep "osd.0 is near full"
1794 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1795 wait_for_health_ok
1796
7c673cae 1797 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
224ce89b
WB
1798 wait_for_health "OSD_BACKFILLFULL"
1799 ceph health detail | grep "osd.1 is backfill full"
1800 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
1801 wait_for_health_ok
1802
7c673cae
FG
1803 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
1804 # failsafe and full are the same as far as the monitor is concerned
224ce89b
WB
1805 wait_for_health "OSD_FULL"
1806 ceph health detail | grep "osd.2 is full"
1807 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
1808 wait_for_health_ok
1809
7c673cae 1810 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
224ce89b 1811 wait_for_health "OSD_FULL"
31f18b77 1812 ceph health detail | grep "osd.0 is full"
7c673cae 1813 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
7c673cae
FG
1814 wait_for_health_ok
1815
1816 ceph pg stat | grep 'pgs:'
224ce89b
WB
1817 ceph pg 1.0 query
1818 ceph tell 1.0 query
7c673cae
FG
1819 ceph quorum enter
1820 ceph quorum_status
1821 ceph report | grep osd_stats
1822 ceph status
1823 ceph -s
1824
1825 #
1826 # tell osd version
1827 #
1828 ceph tell osd.0 version
1829 expect_false ceph tell osd.9999 version
1830 expect_false ceph tell osd.foo version
1831
1832 # back to pg stuff
1833
1834 ceph tell osd.0 dump_pg_recovery_stats | grep Started
1835
1836 ceph osd reweight 0 0.9
1837 expect_false ceph osd reweight 0 -1
1838 ceph osd reweight osd.0 1
1839
1840 ceph osd primary-affinity osd.0 .9
1841 expect_false ceph osd primary-affinity osd.0 -2
1842 expect_false ceph osd primary-affinity osd.9999 .5
1843 ceph osd primary-affinity osd.0 1
1844
224ce89b
WB
1845 ceph osd pool set rbd size 2
1846 ceph osd pg-temp 1.0 0 1
1847 ceph osd pg-temp 1.0 osd.1 osd.0
1848 expect_false ceph osd pg-temp 1.0 0 1 2
7c673cae 1849 expect_false ceph osd pg-temp asdf qwer
224ce89b
WB
1850 expect_false ceph osd pg-temp 1.0 asdf
1851 expect_false ceph osd pg-temp 1.0
7c673cae
FG
1852
1853 # don't test ceph osd primary-temp for now
1854}
1855
1856function test_mon_osd_pool_set()
1857{
1858 TEST_POOL_GETSET=pool_getset
1859 ceph osd pool create $TEST_POOL_GETSET 1
1860 wait_for_clean
1861 ceph osd pool get $TEST_POOL_GETSET all
1862
31f18b77 1863 for s in pg_num pgp_num size min_size crush_rule; do
7c673cae
FG
1864 ceph osd pool get $TEST_POOL_GETSET $s
1865 done
1866
1867 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
1868 (( new_size = old_size + 1 ))
1869 ceph osd pool set $TEST_POOL_GETSET size $new_size
1870 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
1871 ceph osd pool set $TEST_POOL_GETSET size $old_size
1872
1873 ceph osd pool create pool_erasure 1 1 erasure
1874 wait_for_clean
1875 set +e
1876 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
1877 check_response 'not change the size'
1878 set -e
1879 ceph osd pool get pool_erasure erasure_code_profile
1880
1881 auid=5555
1882 ceph osd pool set $TEST_POOL_GETSET auid $auid
1883 ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
1884 ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
1885 ceph osd pool set $TEST_POOL_GETSET auid 0
1886
1887 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
1888 ceph osd pool set $TEST_POOL_GETSET $flag false
1889 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1890 ceph osd pool set $TEST_POOL_GETSET $flag true
1891 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1892 ceph osd pool set $TEST_POOL_GETSET $flag 1
1893 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1894 ceph osd pool set $TEST_POOL_GETSET $flag 0
1895 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1896 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
1897 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
1898 done
1899
1900 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1901 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
1902 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
1903 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
1904 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1905
1906 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1907 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
1908 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
1909 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
1910 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1911
1912 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1913 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
1914 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
1915 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
1916 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1917
1918 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1919 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
1920 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
1921 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
1922 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1923
1924 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1925 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
1926 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
1927 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
1928 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1929
1930 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1931 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
1932 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
1933 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
1934 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1935
1936 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
1937 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
1938 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1939 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
1940 ceph osd pool set $TEST_POOL_GETSET pg_num 10
1941 wait_for_clean
1942 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1943
1944 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
1945 new_pgs=$(($old_pgs+$(ceph osd stat | grep osdmap | awk '{print $3}')*32))
1946 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1947 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
1948 wait_for_clean
1949 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
1950 new_pgs=$(($old_pgs+$(ceph osd stat | grep osdmap | awk '{print $3}')*32+1))
1951 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1952
1953 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
1954 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
1955 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
1956 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
1957 ceph osd pool set $TEST_POOL_GETSET size 2
1958 wait_for_clean
1959 ceph osd pool set $TEST_POOL_GETSET min_size 2
1960
1961 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
1962 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
1963
1964 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
1965 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
1966
1967 ceph osd pool set $TEST_POOL_GETSET nodelete 1
1968 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1969 ceph osd pool set $TEST_POOL_GETSET nodelete 0
1970 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
1971
7c673cae 1972 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
224ce89b
WB
1973
1974 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
1975 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
1976 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
1977 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
1978 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
1979
1980 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
1981 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
1982 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
1983 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
1984 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
1985
1986 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
1987 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
1988 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
1989 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
1990 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
1991 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
1992 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
1993
1994 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
1995 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
1996 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
1997 ceph osd pool set $TEST_POOL_GETSET csum_type unset
1998 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
1999
2000 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2001 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2002 ceph osd pool set $TEST_POOL_GETSET $size 100
2003 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2004 ceph osd pool set $TEST_POOL_GETSET $size 0
2005 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2006 done
7c673cae
FG
2007}
2008
2009function test_mon_osd_tiered_pool_set()
2010{
2011 # this is really a tier pool
2012 ceph osd pool create real-tier 2
2013 ceph osd tier add rbd real-tier
2014
2015 ceph osd pool set real-tier hit_set_type explicit_hash
2016 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2017 ceph osd pool set real-tier hit_set_type explicit_object
2018 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2019 ceph osd pool set real-tier hit_set_type bloom
2020 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2021 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2022 ceph osd pool set real-tier hit_set_period 123
2023 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2024 ceph osd pool set real-tier hit_set_count 12
2025 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2026 ceph osd pool set real-tier hit_set_fpp .01
2027 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2028
2029 ceph osd pool set real-tier target_max_objects 123
2030 ceph osd pool get real-tier target_max_objects | \
2031 grep 'target_max_objects:[ \t]\+123'
2032 ceph osd pool set real-tier target_max_bytes 123456
2033 ceph osd pool get real-tier target_max_bytes | \
2034 grep 'target_max_bytes:[ \t]\+123456'
2035 ceph osd pool set real-tier cache_target_dirty_ratio .123
2036 ceph osd pool get real-tier cache_target_dirty_ratio | \
2037 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2038 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2039 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2040 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2041 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2042 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2043 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2044 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2045 ceph osd pool set real-tier cache_target_full_ratio .123
2046 ceph osd pool get real-tier cache_target_full_ratio | \
2047 grep 'cache_target_full_ratio:[ \t]\+0.123'
2048 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2049 ceph osd pool set real-tier cache_target_full_ratio 1.0
2050 ceph osd pool set real-tier cache_target_full_ratio 0
2051 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2052 ceph osd pool set real-tier cache_min_flush_age 123
2053 ceph osd pool get real-tier cache_min_flush_age | \
2054 grep 'cache_min_flush_age:[ \t]\+123'
2055 ceph osd pool set real-tier cache_min_evict_age 234
2056 ceph osd pool get real-tier cache_min_evict_age | \
2057 grep 'cache_min_evict_age:[ \t]\+234'
2058
2059 # this is not a tier pool
2060 ceph osd pool create fake-tier 2
2061 wait_for_clean
2062
2063 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2064 expect_false ceph osd pool get fake-tier hit_set_type
2065 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2066 expect_false ceph osd pool get fake-tier hit_set_type
2067 expect_false ceph osd pool set fake-tier hit_set_type bloom
2068 expect_false ceph osd pool get fake-tier hit_set_type
2069 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2070 expect_false ceph osd pool set fake-tier hit_set_period 123
2071 expect_false ceph osd pool get fake-tier hit_set_period
2072 expect_false ceph osd pool set fake-tier hit_set_count 12
2073 expect_false ceph osd pool get fake-tier hit_set_count
2074 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2075 expect_false ceph osd pool get fake-tier hit_set_fpp
2076
2077 expect_false ceph osd pool set fake-tier target_max_objects 123
2078 expect_false ceph osd pool get fake-tier target_max_objects
2079 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2080 expect_false ceph osd pool get fake-tier target_max_bytes
2081 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2082 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2083 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2084 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2085 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2086 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2087 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2088 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2089 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2090 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2091 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2092 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2093 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2094 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2095 expect_false ceph osd pool get fake-tier cache_min_flush_age
2096 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2097 expect_false ceph osd pool get fake-tier cache_min_evict_age
2098
2099 ceph osd tier remove rbd real-tier
2100 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2101 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2102}
2103
2104function test_mon_osd_erasure_code()
2105{
2106
2107 ceph osd erasure-code-profile set fooprofile a=b c=d
2108 ceph osd erasure-code-profile set fooprofile a=b c=d
2109 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2110 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2111 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2112 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2113 #
2114 # cleanup by removing profile 'fooprofile'
2115 ceph osd erasure-code-profile rm fooprofile
2116}
2117
2118function test_mon_osd_misc()
2119{
2120 set +e
2121
2122 # expect error about missing 'pool' argument
2123 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2124
2125 # expect error about unused argument foo
2126 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2127
2128 # expect "not in range" for invalid full ratio
2129 ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
2130
2131 # expect "not in range" for invalid overload percentage
2132 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2133
2134 set -e
2135
2136 ceph osd reweight-by-utilization 110
2137 ceph osd reweight-by-utilization 110 .5
2138 expect_false ceph osd reweight-by-utilization 110 0
2139 expect_false ceph osd reweight-by-utilization 110 -0.1
2140 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2141 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2142 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2143 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2144 ceph osd reweight-by-pg 110
2145 ceph osd test-reweight-by-pg 110 .5
2146 ceph osd reweight-by-pg 110 rbd
2147 ceph osd reweight-by-pg 110 .5 rbd
2148 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2149}
2150
2151function test_mon_heap_profiler()
2152{
2153 do_test=1
2154 set +e
2155 # expect 'heap' commands to be correctly parsed
2156 ceph heap stats 2>$TMPFILE
2157 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2158 echo "tcmalloc not enabled; skip heap profiler test"
2159 do_test=0
2160 fi
2161 set -e
2162
2163 [[ $do_test -eq 0 ]] && return 0
2164
2165 ceph heap start_profiler
2166 ceph heap dump
2167 ceph heap stop_profiler
2168 ceph heap release
2169}
2170
2171function test_admin_heap_profiler()
2172{
2173 do_test=1
2174 set +e
2175 # expect 'heap' commands to be correctly parsed
2176 ceph heap stats 2>$TMPFILE
2177 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2178 echo "tcmalloc not enabled; skip heap profiler test"
2179 do_test=0
2180 fi
2181 set -e
2182
2183 [[ $do_test -eq 0 ]] && return 0
2184
2185 local admin_socket=$(get_admin_socket osd.0)
2186
2187 $SUDO ceph --admin-daemon $admin_socket heap start_profiler
2188 $SUDO ceph --admin-daemon $admin_socket heap dump
2189 $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
2190 $SUDO ceph --admin-daemon $admin_socket heap release
2191}
2192
2193function test_osd_bench()
2194{
2195 # test osd bench limits
2196 # As we should not rely on defaults (as they may change over time),
2197 # lets inject some values and perform some simple tests
2198 # max iops: 10 # 100 IOPS
2199 # max throughput: 10485760 # 10MB/s
2200 # max block size: 2097152 # 2MB
2201 # duration: 10 # 10 seconds
2202
2203 local args="\
2204 --osd-bench-duration 10 \
2205 --osd-bench-max-block-size 2097152 \
2206 --osd-bench-large-size-max-throughput 10485760 \
2207 --osd-bench-small-size-max-iops 10"
2208 ceph tell osd.0 injectargs ${args## }
2209
2210 # anything with a bs larger than 2097152 must fail
2211 expect_false ceph tell osd.0 bench 1 2097153
2212 # but using 'osd_bench_max_bs' must succeed
2213 ceph tell osd.0 bench 1 2097152
2214
2215 # we assume 1MB as a large bs; anything lower is a small bs
2216 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2217 # max count: 409600 (bytes)
2218
2219 # more than max count must not be allowed
2220 expect_false ceph tell osd.0 bench 409601 4096
2221 # but 409600 must be succeed
2222 ceph tell osd.0 bench 409600 4096
2223
2224 # for a large bs, we are limited by throughput.
2225 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2226 # the max count will be (10MB * 10s) = 100MB
2227 # max count: 104857600 (bytes)
2228
2229 # more than max count must not be allowed
2230 expect_false ceph tell osd.0 bench 104857601 2097152
2231 # up to max count must be allowed
2232 ceph tell osd.0 bench 104857600 2097152
2233}
2234
2235function test_osd_negative_filestore_merge_threshold()
2236{
2237 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2238 expect_config_value "osd.0" "filestore_merge_threshold" -1
2239}
2240
2241function test_mon_tell()
2242{
2243 ceph tell mon.a version
2244 ceph tell mon.b version
2245 expect_false ceph tell mon.foo version
2246
2247 sleep 1
2248
2249 ceph_watch_start debug
2250 ceph tell mon.a version
31f18b77 2251 ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
7c673cae
FG
2252
2253 ceph_watch_start debug
2254 ceph tell mon.b version
31f18b77 2255 ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
7c673cae
FG
2256}
2257
7c673cae
FG
2258function test_mon_ping()
2259{
2260 ceph ping mon.a
2261 ceph ping mon.b
2262 expect_false ceph ping mon.foo
2263
2264 ceph ping mon.\*
2265}
2266
2267function test_mon_deprecated_commands()
2268{
2269 # current DEPRECATED commands are:
2270 # ceph compact
2271 # ceph scrub
2272 # ceph sync force
2273 #
2274 # Testing should be accomplished by setting
2275 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2276 # each one of these commands.
2277
2278 ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
2279 expect_false ceph tell mon.a compact 2> $TMPFILE
2280 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2281
2282 expect_false ceph tell mon.a scrub 2> $TMPFILE
2283 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2284
2285 expect_false ceph tell mon.a sync force 2> $TMPFILE
2286 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2287
2288 ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
2289}
2290
2291function test_mon_cephdf_commands()
2292{
2293 # ceph df detail:
2294 # pool section:
2295 # RAW USED The near raw used per pool in raw total
2296
2297 ceph osd pool create cephdf_for_test 32 32 replicated
2298 ceph osd pool set cephdf_for_test size 2
2299
2300 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2301 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2302
2303 #wait for update
2304 for i in `seq 1 10`; do
2305 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2306 sleep 1
2307 done
31f18b77
FG
2308 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2309 # to sync mon with osd
2310 flush_pg_stats
2311 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2312 cal_raw_used_size=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2313 raw_used_size=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
7c673cae
FG
2314
2315 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2316 rm ./cephdf_for_test
2317
2318 expect_false test $cal_raw_used_size != $raw_used_size
2319}
2320
31f18b77
FG
2321function test_mon_tell_help_command()
2322{
2323 ceph tell mon.a help
2324
2325 # wrong target
2326 expect_false ceph tell mon.zzz help
2327}
2328
2329function test_osd_tell_help_command()
2330{
2331 ceph tell osd.1 help
2332 expect_false ceph tell osd.100 help
2333}
2334
224ce89b
WB
2335function test_osd_compact()
2336{
2337 ceph tell osd.1 compact
2338 ceph daemon osd.1 compact
2339}
2340
31f18b77
FG
2341function test_mds_tell_help_command()
2342{
2343 local FS_NAME=cephfs
2344 if ! mds_exists ; then
2345 echo "Skipping test, no MDS found"
2346 return
2347 fi
2348
2349 remove_all_fs
2350 ceph osd pool create fs_data 10
2351 ceph osd pool create fs_metadata 10
2352 ceph fs new $FS_NAME fs_metadata fs_data
2353 wait_mds_active $FS_NAME
2354
2355
2356 ceph tell mds.a help
2357 expect_false ceph tell mds.z help
2358
2359 remove_all_fs
2360 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2361 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2362}
2363
224ce89b 2364function test_mgr_tell()
31f18b77
FG
2365{
2366 ceph tell mgr help
224ce89b
WB
2367 ceph tell mgr fs status
2368 ceph tell mgr osd status
31f18b77
FG
2369}
2370
7c673cae
FG
2371#
2372# New tests should be added to the TESTS array below
2373#
2374# Individual tests may be run using the '-t <testname>' argument
2375# The user can specify '-t <testname>' as many times as she wants
2376#
2377# Tests will be run in order presented in the TESTS array, or in
2378# the order specified by the '-t <testname>' options.
2379#
2380# '-l' will list all the available test names
2381# '-h' will show usage
2382#
2383# The test maintains backward compatibility: not specifying arguments
2384# will run all tests following the order they appear in the TESTS array.
2385#
2386
2387set +x
2388MON_TESTS+=" mon_injectargs"
2389MON_TESTS+=" mon_injectargs_SI"
31f18b77
FG
2390for i in `seq 9`; do
2391 MON_TESTS+=" tiering_$i";
2392done
7c673cae
FG
2393MON_TESTS+=" auth"
2394MON_TESTS+=" auth_profiles"
2395MON_TESTS+=" mon_misc"
2396MON_TESTS+=" mon_mon"
2397MON_TESTS+=" mon_osd"
31f18b77
FG
2398MON_TESTS+=" mon_crush"
2399MON_TESTS+=" mon_osd_create_destroy"
7c673cae
FG
2400MON_TESTS+=" mon_osd_pool"
2401MON_TESTS+=" mon_osd_pool_quota"
2402MON_TESTS+=" mon_pg"
2403MON_TESTS+=" mon_osd_pool_set"
2404MON_TESTS+=" mon_osd_tiered_pool_set"
2405MON_TESTS+=" mon_osd_erasure_code"
2406MON_TESTS+=" mon_osd_misc"
2407MON_TESTS+=" mon_heap_profiler"
2408MON_TESTS+=" mon_tell"
7c673cae
FG
2409MON_TESTS+=" mon_ping"
2410MON_TESTS+=" mon_deprecated_commands"
2411MON_TESTS+=" mon_caps"
2412MON_TESTS+=" mon_cephdf_commands"
31f18b77
FG
2413MON_TESTS+=" mon_tell_help_command"
2414
7c673cae
FG
2415OSD_TESTS+=" osd_bench"
2416OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2417OSD_TESTS+=" tiering_agent"
2418OSD_TESTS+=" admin_heap_profiler"
31f18b77 2419OSD_TESTS+=" osd_tell_help_command"
224ce89b 2420OSD_TESTS+=" osd_compact"
7c673cae
FG
2421
2422MDS_TESTS+=" mds_tell"
2423MDS_TESTS+=" mon_mds"
2424MDS_TESTS+=" mon_mds_metadata"
31f18b77
FG
2425MDS_TESTS+=" mds_tell_help_command"
2426
224ce89b 2427MGR_TESTS+=" mgr_tell"
7c673cae
FG
2428
2429TESTS+=$MON_TESTS
2430TESTS+=$OSD_TESTS
2431TESTS+=$MDS_TESTS
31f18b77 2432TESTS+=$MGR_TESTS
7c673cae
FG
2433
2434#
2435# "main" follows
2436#
2437
2438function list_tests()
2439{
2440 echo "AVAILABLE TESTS"
2441 for i in $TESTS; do
2442 echo " $i"
2443 done
2444}
2445
2446function usage()
2447{
2448 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2449}
2450
2451tests_to_run=()
2452
2453sanity_check=true
2454
2455while [[ $# -gt 0 ]]; do
2456 opt=$1
2457
2458 case "$opt" in
2459 "-l" )
2460 do_list=1
2461 ;;
2462 "--asok-does-not-need-root" )
2463 SUDO=""
2464 ;;
2465 "--no-sanity-check" )
2466 sanity_check=false
2467 ;;
2468 "--test-mon" )
2469 tests_to_run+="$MON_TESTS"
2470 ;;
2471 "--test-osd" )
2472 tests_to_run+="$OSD_TESTS"
2473 ;;
2474 "--test-mds" )
2475 tests_to_run+="$MDS_TESTS"
2476 ;;
31f18b77
FG
2477 "--test-mgr" )
2478 tests_to_run+="$MGR_TESTS"
2479 ;;
7c673cae
FG
2480 "-t" )
2481 shift
2482 if [[ -z "$1" ]]; then
2483 echo "missing argument to '-t'"
2484 usage ;
2485 exit 1
2486 fi
2487 tests_to_run+=" $1"
2488 ;;
2489 "-h" )
2490 usage ;
2491 exit 0
2492 ;;
2493 esac
2494 shift
2495done
2496
2497if [[ $do_list -eq 1 ]]; then
2498 list_tests ;
2499 exit 0
2500fi
2501
224ce89b
WB
2502ceph osd pool create rbd 10
2503
7c673cae
FG
2504if test -z "$tests_to_run" ; then
2505 tests_to_run="$TESTS"
2506fi
2507
2508if $sanity_check ; then
2509 wait_no_osd_down
2510fi
2511for i in $tests_to_run; do
2512 if $sanity_check ; then
2513 check_no_osd_down
2514 fi
2515 set -x
2516 test_${i}
2517 set +x
2518done
2519if $sanity_check ; then
2520 check_no_osd_down
2521fi
2522
2523set -x
2524
2525echo OK