]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/cephtool/test.sh
51d8bd7c7bc3e9599c4563585e01410448f2f2d1
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
1 #!/usr/bin/env bash
2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
4 set -x
5
6 source $(dirname $0)/../../standalone/ceph-helpers.sh
7
8 set -e
9 set -o functrace
10 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
11 SUDO=${SUDO:-sudo}
12 export CEPH_DEV=1
13
14 function check_no_osd_down()
15 {
16 ! ceph osd dump | grep ' down '
17 }
18
19 function wait_no_osd_down()
20 {
21 max_run=300
22 for i in $(seq 1 $max_run) ; do
23 if ! check_no_osd_down ; then
24 echo "waiting for osd(s) to come back up ($i/$max_run)"
25 sleep 1
26 else
27 break
28 fi
29 done
30 check_no_osd_down
31 }
32
33 function expect_false()
34 {
35 set -x
36 if "$@"; then return 1; else return 0; fi
37 }
38
39 function expect_true()
40 {
41 set -x
42 if ! "$@"; then return 1; else return 0; fi
43 }
44
45 TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
46 trap "rm -fr $TEMP_DIR" 0
47
48 TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
49
50 #
51 # retry_eagain max cmd args ...
52 #
53 # retry cmd args ... if it exits on error and its output contains the
54 # string EAGAIN, at most $max times
55 #
56 function retry_eagain()
57 {
58 local max=$1
59 shift
60 local status
61 local tmpfile=$TEMP_DIR/retry_eagain.$$
62 local count
63 for count in $(seq 1 $max) ; do
64 status=0
65 "$@" > $tmpfile 2>&1 || status=$?
66 if test $status = 0 ||
67 ! grep --quiet EAGAIN $tmpfile ; then
68 break
69 fi
70 sleep 1
71 done
72 if test $count = $max ; then
73 echo retried with non zero exit status, $max times: "$@" >&2
74 fi
75 cat $tmpfile
76 rm $tmpfile
77 return $status
78 }
79
80 #
81 # map_enxio_to_eagain cmd arg ...
82 #
83 # add EAGAIN to the output of cmd arg ... if the output contains
84 # ENXIO.
85 #
86 function map_enxio_to_eagain()
87 {
88 local status=0
89 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
90
91 "$@" > $tmpfile 2>&1 || status=$?
92 if test $status != 0 &&
93 grep --quiet ENXIO $tmpfile ; then
94 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
95 fi
96 cat $tmpfile
97 rm $tmpfile
98 return $status
99 }
100
101 function check_response()
102 {
103 expected_string=$1
104 retcode=$2
105 expected_retcode=$3
106 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
107 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
108 exit 1
109 fi
110
111 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
112 echo "Didn't find $expected_string in output" >&2
113 cat $TMPFILE >&2
114 exit 1
115 fi
116 }
117
118 function get_config_value_or_die()
119 {
120 local target config_opt raw val
121
122 target=$1
123 config_opt=$2
124
125 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
126 if [[ $? -ne 0 ]]; then
127 echo "error obtaining config opt '$config_opt' from '$target': $raw"
128 exit 1
129 fi
130
131 raw=`echo $raw | sed -e 's/[{} "]//g'`
132 val=`echo $raw | cut -f2 -d:`
133
134 echo "$val"
135 return 0
136 }
137
138 function expect_config_value()
139 {
140 local target config_opt expected_val val
141 target=$1
142 config_opt=$2
143 expected_val=$3
144
145 val=$(get_config_value_or_die $target $config_opt)
146
147 if [[ "$val" != "$expected_val" ]]; then
148 echo "expected '$expected_val', got '$val'"
149 exit 1
150 fi
151 }
152
153 function ceph_watch_start()
154 {
155 local whatch_opt=--watch
156
157 if [ -n "$1" ]; then
158 whatch_opt=--watch-$1
159 if [ -n "$2" ]; then
160 whatch_opt+=" --watch-channel $2"
161 fi
162 fi
163
164 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
165 ceph $whatch_opt > $CEPH_WATCH_FILE &
166 CEPH_WATCH_PID=$!
167
168 # wait until the "ceph" client is connected and receiving
169 # log messages from monitor
170 for i in `seq 3`; do
171 grep -q "cluster" $CEPH_WATCH_FILE && break
172 sleep 1
173 done
174 }
175
176 function ceph_watch_wait()
177 {
178 local regexp=$1
179 local timeout=30
180
181 if [ -n "$2" ]; then
182 timeout=$2
183 fi
184
185 for i in `seq ${timeout}`; do
186 grep -q "$regexp" $CEPH_WATCH_FILE && break
187 sleep 1
188 done
189
190 kill $CEPH_WATCH_PID
191
192 if ! grep "$regexp" $CEPH_WATCH_FILE; then
193 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
194 cat $CEPH_WATCH_FILE >&2
195 return 1
196 fi
197 }
198
199 function test_mon_injectargs()
200 {
201 ceph tell osd.0 injectargs --no-osd_enable_op_tracker
202 ceph tell osd.0 config get osd_enable_op_tracker | grep false
203 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500'
204 ceph tell osd.0 config get osd_enable_op_tracker | grep true
205 ceph tell osd.0 config get osd_op_history_duration | grep 500
206 ceph tell osd.0 injectargs --no-osd_enable_op_tracker
207 ceph tell osd.0 config get osd_enable_op_tracker | grep false
208 ceph tell osd.0 injectargs -- --osd_enable_op_tracker
209 ceph tell osd.0 config get osd_enable_op_tracker | grep true
210 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600'
211 ceph tell osd.0 config get osd_enable_op_tracker | grep true
212 ceph tell osd.0 config get osd_op_history_duration | grep 600
213
214 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200'
215 ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200
216
217 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2'
218 ceph tell osd.0 config get mon_probe_timeout | grep 2
219
220 ceph tell osd.0 injectargs -- '--mon-lease 6'
221 ceph tell osd.0 config get mon_lease | grep 6
222
223 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1
225 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
226
227 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
228 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
229
230 }
231
232 function test_mon_injectargs_SI()
233 {
234 # Test SI units during injectargs and 'config set'
235 # We only aim at testing the units are parsed accordingly
236 # and don't intend to test whether the options being set
237 # actually expect SI units to be passed.
238 # Keep in mind that all integer based options that are not based on bytes
239 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
240 # base 10.
241 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
242 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
243 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
244 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
245 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
246 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
247 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
248 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
249 check_response "(22) Invalid argument"
250 # now test with injectargs
251 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
252 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
253 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
254 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
255 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
256 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
257 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
258 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
259 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
260 }
261
262 function test_mon_injectargs_IEC()
263 {
264 # Test IEC units during injectargs and 'config set'
265 # We only aim at testing the units are parsed accordingly
266 # and don't intend to test whether the options being set
267 # actually expect IEC units to be passed.
268 # Keep in mind that all integer based options that are based on bytes
269 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
270 # unit modifiers (for backwards compatibility and convenience) and be parsed
271 # to base 2.
272 initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
273 $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
274 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
275 $SUDO ceph daemon mon.a config set mon_data_size_warn 15G
276 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
277 $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
278 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
279 $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
280 check_response "(22) Invalid argument"
281 # now test with injectargs
282 ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
283 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
284 ceph tell mon.a injectargs '--mon_data_size_warn 15G'
285 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
286 ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
287 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
288 expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
289 $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
290 }
291
292 function test_tiering_agent()
293 {
294 local slow=slow_eviction
295 local fast=fast_eviction
296 ceph osd pool create $slow 1 1
297 ceph osd pool application enable $slow rados
298 ceph osd pool create $fast 1 1
299 ceph osd tier add $slow $fast
300 ceph osd tier cache-mode $fast writeback
301 ceph osd tier set-overlay $slow $fast
302 ceph osd pool set $fast hit_set_type bloom
303 rados -p $slow put obj1 /etc/group
304 ceph osd pool set $fast target_max_objects 1
305 ceph osd pool set $fast hit_set_count 1
306 ceph osd pool set $fast hit_set_period 5
307 # wait for the object to be evicted from the cache
308 local evicted
309 evicted=false
310 for i in `seq 1 300` ; do
311 if ! rados -p $fast ls | grep obj1 ; then
312 evicted=true
313 break
314 fi
315 sleep 1
316 done
317 $evicted # assert
318 # the object is proxy read and promoted to the cache
319 rados -p $slow get obj1 - >/dev/null
320 # wait for the promoted object to be evicted again
321 evicted=false
322 for i in `seq 1 300` ; do
323 if ! rados -p $fast ls | grep obj1 ; then
324 evicted=true
325 break
326 fi
327 sleep 1
328 done
329 $evicted # assert
330 ceph osd tier remove-overlay $slow
331 ceph osd tier remove $slow $fast
332 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
333 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
334 }
335
336 function test_tiering_1()
337 {
338 # tiering
339 ceph osd pool create slow 2
340 ceph osd pool application enable slow rados
341 ceph osd pool create slow2 2
342 ceph osd pool application enable slow2 rados
343 ceph osd pool create cache 2
344 ceph osd pool create cache2 2
345 ceph osd tier add slow cache
346 ceph osd tier add slow cache2
347 expect_false ceph osd tier add slow2 cache
348 # test some state transitions
349 ceph osd tier cache-mode cache writeback
350 # forward is removed/deprecated
351 expect_false ceph osd tier cache-mode cache forward
352 expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
353 expect_false ceph osd tier cache-mode cache readonly
354 ceph osd tier cache-mode cache proxy
355 ceph osd tier cache-mode cache none
356 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
357 expect_false ceph osd tier cache-mode cache forward
358 expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
359 ceph osd tier cache-mode cache none
360 ceph osd tier cache-mode cache writeback
361 ceph osd tier cache-mode cache proxy
362 ceph osd tier cache-mode cache writeback
363 expect_false ceph osd tier cache-mode cache none
364 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
365 # test with dirty objects in the tier pool
366 # tier pool currently set to 'writeback'
367 rados -p cache put /etc/passwd /etc/passwd
368 flush_pg_stats
369 # 1 dirty object in pool 'cache'
370 ceph osd tier cache-mode cache proxy
371 expect_false ceph osd tier cache-mode cache none
372 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
373 ceph osd tier cache-mode cache writeback
374 # remove object from tier pool
375 rados -p cache rm /etc/passwd
376 rados -p cache cache-flush-evict-all
377 flush_pg_stats
378 # no dirty objects in pool 'cache'
379 ceph osd tier cache-mode cache proxy
380 ceph osd tier cache-mode cache none
381 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
382 TRIES=0
383 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
384 do
385 grep 'currently creating pgs' $TMPFILE
386 TRIES=$(( $TRIES + 1 ))
387 test $TRIES -ne 60
388 sleep 3
389 done
390 expect_false ceph osd pool set cache pg_num 4
391 ceph osd tier cache-mode cache none
392 ceph osd tier set-overlay slow cache
393 expect_false ceph osd tier set-overlay slow cache2
394 expect_false ceph osd tier remove slow cache
395 ceph osd tier remove-overlay slow
396 ceph osd tier set-overlay slow cache2
397 ceph osd tier remove-overlay slow
398 ceph osd tier remove slow cache
399 ceph osd tier add slow2 cache
400 expect_false ceph osd tier set-overlay slow cache
401 ceph osd tier set-overlay slow2 cache
402 ceph osd tier remove-overlay slow2
403 ceph osd tier remove slow2 cache
404 ceph osd tier remove slow cache2
405
406 # make sure a non-empty pool fails
407 rados -p cache2 put /etc/passwd /etc/passwd
408 while ! ceph df | grep cache2 | grep ' 1 ' ; do
409 echo waiting for pg stats to flush
410 sleep 2
411 done
412 expect_false ceph osd tier add slow cache2
413 ceph osd tier add slow cache2 --force-nonempty
414 ceph osd tier remove slow cache2
415
416 ceph osd pool ls | grep cache2
417 ceph osd pool ls -f json-pretty | grep cache2
418 ceph osd pool ls detail | grep cache2
419 ceph osd pool ls detail -f json-pretty | grep cache2
420
421 ceph osd pool delete slow slow --yes-i-really-really-mean-it
422 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
423 ceph osd pool delete cache cache --yes-i-really-really-mean-it
424 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
425 }
426
427 function test_tiering_2()
428 {
429 # make sure we can't clobber snapshot state
430 ceph osd pool create snap_base 2
431 ceph osd pool application enable snap_base rados
432 ceph osd pool create snap_cache 2
433 ceph osd pool mksnap snap_cache snapname
434 expect_false ceph osd tier add snap_base snap_cache
435 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
436 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
437 }
438
439 function test_tiering_3()
440 {
441 # make sure we can't create snapshot on tier
442 ceph osd pool create basex 2
443 ceph osd pool application enable basex rados
444 ceph osd pool create cachex 2
445 ceph osd tier add basex cachex
446 expect_false ceph osd pool mksnap cache snapname
447 ceph osd tier remove basex cachex
448 ceph osd pool delete basex basex --yes-i-really-really-mean-it
449 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
450 }
451
452 function test_tiering_4()
453 {
454 # make sure we can't create an ec pool tier
455 ceph osd pool create eccache 2 2 erasure
456 expect_false ceph osd set-require-min-compat-client bobtail
457 ceph osd pool create repbase 2
458 ceph osd pool application enable repbase rados
459 expect_false ceph osd tier add repbase eccache
460 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
461 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
462 }
463
464 function test_tiering_5()
465 {
466 # convenient add-cache command
467 ceph osd pool create slow 2
468 ceph osd pool application enable slow rados
469 ceph osd pool create cache3 2
470 ceph osd tier add-cache slow cache3 1024000
471 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
472 ceph osd tier remove slow cache3 2> $TMPFILE || true
473 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
474 ceph osd tier remove-overlay slow
475 ceph osd tier remove slow cache3
476 ceph osd pool ls | grep cache3
477 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
478 ! ceph osd pool ls | grep cache3 || exit 1
479 ceph osd pool delete slow slow --yes-i-really-really-mean-it
480 }
481
482 function test_tiering_6()
483 {
484 # check add-cache whether work
485 ceph osd pool create datapool 2
486 ceph osd pool application enable datapool rados
487 ceph osd pool create cachepool 2
488 ceph osd tier add-cache datapool cachepool 1024000
489 ceph osd tier cache-mode cachepool writeback
490 rados -p datapool put object /etc/passwd
491 rados -p cachepool stat object
492 rados -p cachepool cache-flush object
493 rados -p datapool stat object
494 ceph osd tier remove-overlay datapool
495 ceph osd tier remove datapool cachepool
496 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
497 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
498 }
499
500 function test_tiering_7()
501 {
502 # protection against pool removal when used as tiers
503 ceph osd pool create datapool 2
504 ceph osd pool application enable datapool rados
505 ceph osd pool create cachepool 2
506 ceph osd tier add-cache datapool cachepool 1024000
507 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
508 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
509 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
510 check_response "EBUSY: pool 'datapool' has tiers cachepool"
511 ceph osd tier remove-overlay datapool
512 ceph osd tier remove datapool cachepool
513 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
514 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
515 }
516
517 function test_tiering_8()
518 {
519 ## check health check
520 ceph osd set notieragent
521 ceph osd pool create datapool 2
522 ceph osd pool application enable datapool rados
523 ceph osd pool create cache4 2
524 ceph osd tier add-cache datapool cache4 1024000
525 ceph osd tier cache-mode cache4 writeback
526 tmpfile=$(mktemp|grep tmp)
527 dd if=/dev/zero of=$tmpfile bs=4K count=1
528 ceph osd pool set cache4 target_max_objects 200
529 ceph osd pool set cache4 target_max_bytes 1000000
530 rados -p cache4 put foo1 $tmpfile
531 rados -p cache4 put foo2 $tmpfile
532 rm -f $tmpfile
533 flush_pg_stats
534 ceph df | grep datapool | grep ' 2 '
535 ceph osd tier remove-overlay datapool
536 ceph osd tier remove datapool cache4
537 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
538 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
539 ceph osd unset notieragent
540 }
541
542 function test_tiering_9()
543 {
544 # make sure 'tier remove' behaves as we expect
545 # i.e., removing a tier from a pool that's not its base pool only
546 # results in a 'pool foo is now (or already was) not a tier of bar'
547 #
548 ceph osd pool create basepoolA 2
549 ceph osd pool application enable basepoolA rados
550 ceph osd pool create basepoolB 2
551 ceph osd pool application enable basepoolB rados
552 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
553 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
554
555 ceph osd pool create cache5 2
556 ceph osd pool create cache6 2
557 ceph osd tier add basepoolA cache5
558 ceph osd tier add basepoolB cache6
559 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
560 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
561 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
562 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
563
564 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
565 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
566 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
567 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
568
569 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
570 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
571
572 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
573 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
574 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
575 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
576 }
577
578 function test_auth()
579 {
580 expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
581 expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
582 ceph auth add client.xx mon 'allow *' osd "allow *"
583 ceph auth export client.xx >client.xx.keyring
584 ceph auth add client.xx -i client.xx.keyring
585 rm -f client.xx.keyring
586 ceph auth list | grep client.xx
587 ceph auth ls | grep client.xx
588 ceph auth get client.xx | grep caps | grep mon
589 ceph auth get client.xx | grep caps | grep osd
590 ceph auth get-key client.xx
591 ceph auth print-key client.xx
592 ceph auth print_key client.xx
593 ceph auth caps client.xx osd "allow rw"
594 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
595 ceph auth get client.xx | grep osd | grep "allow rw"
596 ceph auth caps client.xx mon 'allow command "osd tree"'
597 ceph auth export | grep client.xx
598 ceph auth export -o authfile
599 ceph auth import -i authfile 2>$TMPFILE
600 check_response "imported keyring"
601
602 ceph auth export -o authfile2
603 diff authfile authfile2
604 rm authfile authfile2
605 ceph auth del client.xx
606 expect_false ceph auth get client.xx
607
608 # (almost) interactive mode
609 echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
610 ceph auth get client.xx
611 # script mode
612 echo 'auth del client.xx' | ceph
613 expect_false ceph auth get client.xx
614 }
615
616 function test_auth_profiles()
617 {
618 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
619 mgr 'allow profile read-only'
620 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
621 mgr 'allow profile read-write'
622 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
623
624 ceph auth export > client.xx.keyring
625
626 # read-only is allowed all read-only commands (auth excluded)
627 ceph -n client.xx-profile-ro -k client.xx.keyring status
628 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
629 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
630 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
631 # read-only gets access denied for rw commands or auth commands
632 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
633 check_response "EACCES: access denied"
634 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
635 check_response "EACCES: access denied"
636 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
637 check_response "EACCES: access denied"
638
639 # read-write is allowed for all read-write commands (except auth)
640 ceph -n client.xx-profile-rw -k client.xx.keyring status
641 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
642 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
643 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
644 ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
645 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
646 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
647 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
648 # read-write gets access denied for auth commands
649 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
650 check_response "EACCES: access denied"
651
652 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
653 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
654 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
655 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
656 ceph -n client.xx-profile-rd -k client.xx.keyring status
657 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
658 check_response "EACCES: access denied"
659 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
660 check_response "EACCES: access denied"
661 # read-only 'mon' subsystem commands are allowed
662 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
663 # but read-write 'mon' commands are not
664 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
665 check_response "EACCES: access denied"
666 ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
667 check_response "EACCES: access denied"
668 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
669 check_response "EACCES: access denied"
670 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
671 check_response "EACCES: access denied"
672
673 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
674 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
675
676 # add a new role-definer with the existing role-definer
677 ceph -n client.xx-profile-rd -k client.xx.keyring \
678 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
679 ceph -n client.xx-profile-rd -k client.xx.keyring \
680 auth export > client.xx.keyring.2
681 # remove old role-definer using the new role-definer
682 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
683 auth del client.xx-profile-rd
684 # remove the remaining role-definer with admin
685 ceph auth del client.xx-profile-rd2
686 rm -f client.xx.keyring client.xx.keyring.2
687 }
688
689 function test_mon_caps()
690 {
691 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
692 chmod +r $TEMP_DIR/ceph.client.bug.keyring
693 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
694 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
695
696 # pass --no-mon-config since we are looking for the permission denied error
697 rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
698 cat $TMPFILE
699 check_response "Permission denied"
700
701 rm -rf $TEMP_DIR/ceph.client.bug.keyring
702 ceph auth del client.bug
703 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
704 chmod +r $TEMP_DIR/ceph.client.bug.keyring
705 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
706 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
707 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
708 rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
709 check_response "Permission denied"
710 }
711
712 function test_mon_misc()
713 {
714 # with and without verbosity
715 ceph osd dump | grep '^epoch'
716 ceph --concise osd dump | grep '^epoch'
717
718 ceph osd df | grep 'MIN/MAX VAR'
719
720 # df
721 ceph df > $TMPFILE
722 grep RAW $TMPFILE
723 grep -v DIRTY $TMPFILE
724 ceph df detail > $TMPFILE
725 grep DIRTY $TMPFILE
726 ceph df --format json > $TMPFILE
727 grep 'total_bytes' $TMPFILE
728 grep -v 'dirty' $TMPFILE
729 ceph df detail --format json > $TMPFILE
730 grep 'rd_bytes' $TMPFILE
731 grep 'dirty' $TMPFILE
732 ceph df --format xml | grep '<total_bytes>'
733 ceph df detail --format xml | grep '<rd_bytes>'
734
735 ceph fsid
736 ceph health
737 ceph health detail
738 ceph health --format json-pretty
739 ceph health detail --format xml-pretty
740
741 ceph time-sync-status
742
743 ceph node ls
744 for t in mon osd mds mgr ; do
745 ceph node ls $t
746 done
747
748 ceph_watch_start
749 mymsg="this is a test log message $$.$(date)"
750 ceph log "$mymsg"
751 ceph log last | grep "$mymsg"
752 ceph log last 100 | grep "$mymsg"
753 ceph_watch_wait "$mymsg"
754
755 ceph mgr dump
756 ceph mgr module ls
757 ceph mgr module enable restful
758 expect_false ceph mgr module enable foodne
759 ceph mgr module enable foodne --force
760 ceph mgr module disable foodne
761 ceph mgr module disable foodnebizbangbash
762
763 ceph mon metadata a
764 ceph mon metadata
765 ceph mon count-metadata ceph_version
766 ceph mon versions
767
768 ceph mgr metadata
769 ceph mgr versions
770 ceph mgr count-metadata ceph_version
771
772 ceph versions
773
774 ceph node ls
775 }
776
777 function check_mds_active()
778 {
779 fs_name=$1
780 ceph fs get $fs_name | grep active
781 }
782
783 function wait_mds_active()
784 {
785 fs_name=$1
786 max_run=300
787 for i in $(seq 1 $max_run) ; do
788 if ! check_mds_active $fs_name ; then
789 echo "waiting for an active MDS daemon ($i/$max_run)"
790 sleep 5
791 else
792 break
793 fi
794 done
795 check_mds_active $fs_name
796 }
797
798 function get_mds_gids()
799 {
800 fs_name=$1
801 ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
802 }
803
804 function fail_all_mds()
805 {
806 fs_name=$1
807 ceph fs set $fs_name cluster_down true
808 mds_gids=$(get_mds_gids $fs_name)
809 for mds_gid in $mds_gids ; do
810 ceph mds fail $mds_gid
811 done
812 if check_mds_active $fs_name ; then
813 echo "An active MDS remains, something went wrong"
814 ceph fs get $fs_name
815 exit -1
816 fi
817
818 }
819
820 function remove_all_fs()
821 {
822 existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
823 for fs_name in $existing_fs ; do
824 echo "Removing fs ${fs_name}..."
825 fail_all_mds $fs_name
826 echo "Removing existing filesystem '${fs_name}'..."
827 ceph fs rm $fs_name --yes-i-really-mean-it
828 echo "Removed '${fs_name}'."
829 done
830 }
831
832 # So that tests requiring MDS can skip if one is not configured
833 # in the cluster at all
834 function mds_exists()
835 {
836 ceph auth ls | grep "^mds"
837 }
838
839 # some of the commands are just not idempotent.
840 function without_test_dup_command()
841 {
842 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
843 $@
844 else
845 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
846 unset CEPH_CLI_TEST_DUP_COMMAND
847 $@
848 CEPH_CLI_TEST_DUP_COMMAND=saved
849 fi
850 }
851
852 function test_mds_tell()
853 {
854 local FS_NAME=cephfs
855 if ! mds_exists ; then
856 echo "Skipping test, no MDS found"
857 return
858 fi
859
860 remove_all_fs
861 ceph osd pool create fs_data 16
862 ceph osd pool create fs_metadata 16
863 ceph fs new $FS_NAME fs_metadata fs_data
864 wait_mds_active $FS_NAME
865
866 # Test injectargs by GID
867 old_mds_gids=$(get_mds_gids $FS_NAME)
868 echo Old GIDs: $old_mds_gids
869
870 for mds_gid in $old_mds_gids ; do
871 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
872 done
873 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
874
875 # Test respawn by rank
876 without_test_dup_command ceph tell mds.0 respawn
877 new_mds_gids=$old_mds_gids
878 while [ $new_mds_gids -eq $old_mds_gids ] ; do
879 sleep 5
880 new_mds_gids=$(get_mds_gids $FS_NAME)
881 done
882 echo New GIDs: $new_mds_gids
883
884 # Test respawn by ID
885 without_test_dup_command ceph tell mds.a respawn
886 new_mds_gids=$old_mds_gids
887 while [ $new_mds_gids -eq $old_mds_gids ] ; do
888 sleep 5
889 new_mds_gids=$(get_mds_gids $FS_NAME)
890 done
891 echo New GIDs: $new_mds_gids
892
893 remove_all_fs
894 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
895 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
896 }
897
898 function test_mon_mds()
899 {
900 local FS_NAME=cephfs
901 remove_all_fs
902
903 ceph osd pool create fs_data 16
904 ceph osd pool create fs_metadata 16
905 ceph fs new $FS_NAME fs_metadata fs_data
906
907 ceph fs set $FS_NAME cluster_down true
908 ceph fs set $FS_NAME cluster_down false
909
910 ceph mds compat rm_incompat 4
911 ceph mds compat rm_incompat 4
912
913 # We don't want any MDSs to be up, their activity can interfere with
914 # the "current_epoch + 1" checking below if they're generating updates
915 fail_all_mds $FS_NAME
916
917 ceph mds compat show
918 ceph fs dump
919 ceph fs get $FS_NAME
920 for mds_gid in $(get_mds_gids $FS_NAME) ; do
921 ceph mds metadata $mds_id
922 done
923 ceph mds metadata
924 ceph mds versions
925 ceph mds count-metadata os
926
927 # XXX mds fail, but how do you undo it?
928 mdsmapfile=$TEMP_DIR/mdsmap.$$
929 current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
930 [ -s $mdsmapfile ]
931 rm $mdsmapfile
932
933 ceph osd pool create data2 16
934 ceph osd pool create data3 16
935 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
936 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
937 ceph fs add_data_pool cephfs $data2_pool
938 ceph fs add_data_pool cephfs $data3_pool
939 ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
940 check_response "Error ENOENT"
941 ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
942 check_response "Error ENOENT"
943 ceph fs rm_data_pool cephfs $data2_pool
944 ceph fs rm_data_pool cephfs $data3_pool
945 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
946 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
947 ceph fs set cephfs max_mds 4
948 ceph fs set cephfs max_mds 3
949 ceph fs set cephfs max_mds 256
950 expect_false ceph fs set cephfs max_mds 257
951 ceph fs set cephfs max_mds 4
952 ceph fs set cephfs max_mds 256
953 expect_false ceph fs set cephfs max_mds 257
954 expect_false ceph fs set cephfs max_mds asdf
955 expect_false ceph fs set cephfs inline_data true
956 ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
957 ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it
958 ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it
959 expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it
960 ceph fs set cephfs inline_data false
961 ceph fs set cephfs inline_data no
962 ceph fs set cephfs inline_data 0
963 expect_false ceph fs set cephfs inline_data asdf
964 ceph fs set cephfs max_file_size 1048576
965 expect_false ceph fs set cephfs max_file_size 123asdf
966
967 expect_false ceph fs set cephfs allow_new_snaps
968 ceph fs set cephfs allow_new_snaps true
969 ceph fs set cephfs allow_new_snaps 0
970 ceph fs set cephfs allow_new_snaps false
971 ceph fs set cephfs allow_new_snaps no
972 expect_false ceph fs set cephfs allow_new_snaps taco
973
974 # we should never be able to add EC pools as data or metadata pools
975 # create an ec-pool...
976 ceph osd pool create mds-ec-pool 16 16 erasure
977 set +e
978 ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
979 check_response 'erasure-code' $? 22
980 set -e
981 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
982 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
983 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
984
985 fail_all_mds $FS_NAME
986
987 set +e
988 # Check that rmfailed requires confirmation
989 expect_false ceph mds rmfailed 0
990 ceph mds rmfailed 0 --yes-i-really-mean-it
991 set -e
992
993 # Check that `fs new` is no longer permitted
994 expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
995
996 # Check that 'fs reset' runs
997 ceph fs reset $FS_NAME --yes-i-really-mean-it
998
999 # Check that creating a second FS fails by default
1000 ceph osd pool create fs_metadata2 16
1001 ceph osd pool create fs_data2 16
1002 set +e
1003 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1004 set -e
1005
1006 # Check that setting enable_multiple enables creation of second fs
1007 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1008 ceph fs new cephfs2 fs_metadata2 fs_data2
1009
1010 # Clean up multi-fs stuff
1011 fail_all_mds cephfs2
1012 ceph fs rm cephfs2 --yes-i-really-mean-it
1013 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1014 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1015
1016 fail_all_mds $FS_NAME
1017
1018 # Clean up to enable subsequent fs new tests
1019 ceph fs rm $FS_NAME --yes-i-really-mean-it
1020
1021 set +e
1022 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1023 check_response 'erasure-code' $? 22
1024 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1025 check_response 'erasure-code' $? 22
1026 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1027 check_response 'erasure-code' $? 22
1028 set -e
1029
1030 # ... new create a cache tier in front of the EC pool...
1031 ceph osd pool create mds-tier 2
1032 ceph osd tier add mds-ec-pool mds-tier
1033 ceph osd tier set-overlay mds-ec-pool mds-tier
1034 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1035
1036 # Use of a readonly tier should be forbidden
1037 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1038 set +e
1039 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1040 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1041 set -e
1042
1043 # Use of a writeback tier should enable FS creation
1044 ceph osd tier cache-mode mds-tier writeback
1045 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1046
1047 # While a FS exists using the tiered pools, I should not be allowed
1048 # to remove the tier
1049 set +e
1050 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1051 check_response 'in use by CephFS' $? 16
1052 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1053 check_response 'in use by CephFS' $? 16
1054 set -e
1055
1056 fail_all_mds $FS_NAME
1057 ceph fs rm $FS_NAME --yes-i-really-mean-it
1058
1059 # ... but we should be forbidden from using the cache pool in the FS directly.
1060 set +e
1061 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1062 check_response 'in use as a cache tier' $? 22
1063 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1064 check_response 'in use as a cache tier' $? 22
1065 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1066 check_response 'in use as a cache tier' $? 22
1067 set -e
1068
1069 # Clean up tier + EC pools
1070 ceph osd tier remove-overlay mds-ec-pool
1071 ceph osd tier remove mds-ec-pool mds-tier
1072
1073 # Create a FS using the 'cache' pool now that it's no longer a tier
1074 ceph fs new $FS_NAME fs_metadata mds-tier --force
1075
1076 # We should be forbidden from using this pool as a tier now that
1077 # it's in use for CephFS
1078 set +e
1079 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1080 check_response 'in use by CephFS' $? 16
1081 set -e
1082
1083 fail_all_mds $FS_NAME
1084 ceph fs rm $FS_NAME --yes-i-really-mean-it
1085
1086 # We should be permitted to use an EC pool with overwrites enabled
1087 # as the data pool...
1088 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1089 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1090 fail_all_mds $FS_NAME
1091 ceph fs rm $FS_NAME --yes-i-really-mean-it
1092
1093 # ...but not as the metadata pool
1094 set +e
1095 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1096 check_response 'erasure-code' $? 22
1097 set -e
1098
1099 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1100
1101 # Create a FS and check that we can subsequently add a cache tier to it
1102 ceph fs new $FS_NAME fs_metadata fs_data --force
1103
1104 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1105 ceph osd tier add fs_metadata mds-tier
1106 ceph osd tier cache-mode mds-tier writeback
1107 ceph osd tier set-overlay fs_metadata mds-tier
1108
1109 # Removing tier should be permitted because the underlying pool is
1110 # replicated (#11504 case)
1111 ceph osd tier cache-mode mds-tier proxy
1112 ceph osd tier remove-overlay fs_metadata
1113 ceph osd tier remove fs_metadata mds-tier
1114 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1115
1116 # Clean up FS
1117 fail_all_mds $FS_NAME
1118 ceph fs rm $FS_NAME --yes-i-really-mean-it
1119
1120
1121
1122 ceph mds stat
1123 # ceph mds tell mds.a getmap
1124 # ceph mds rm
1125 # ceph mds rmfailed
1126 # ceph mds set_state
1127
1128 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1129 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1130 }
1131
1132 function test_mon_mds_metadata()
1133 {
1134 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1135 test "$nmons" -gt 0
1136
1137 ceph fs dump |
1138 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1139 while read gid id rank; do
1140 ceph mds metadata ${gid} | grep '"hostname":'
1141 ceph mds metadata ${id} | grep '"hostname":'
1142 ceph mds metadata ${rank} | grep '"hostname":'
1143
1144 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1145 test "$n" -eq "$nmons"
1146 done
1147
1148 expect_false ceph mds metadata UNKNOWN
1149 }
1150
1151 function test_mon_mon()
1152 {
1153 # print help message
1154 ceph --help mon
1155 # no mon add/remove
1156 ceph mon dump
1157 ceph mon getmap -o $TEMP_DIR/monmap.$$
1158 [ -s $TEMP_DIR/monmap.$$ ]
1159
1160 # ceph mon tell
1161 first=$(ceph mon dump -f json | jq -r '.mons[0].name')
1162 ceph tell mon.$first mon_status
1163
1164 # test mon features
1165 ceph mon feature ls
1166 ceph mon feature set kraken --yes-i-really-mean-it
1167 expect_false ceph mon feature set abcd
1168 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1169 }
1170
1171 function test_mon_priority_and_weight()
1172 {
1173 for i in 0 1 65535; do
1174 ceph mon set-weight a $i
1175 w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight')
1176 [[ "$w" == "$i" ]]
1177 done
1178
1179 for i in -1 65536; do
1180 expect_false ceph mon set-weight a $i
1181 done
1182 }
1183
1184 function gen_secrets_file()
1185 {
1186 # lets assume we can have the following types
1187 # all - generates both cephx and lockbox, with mock dm-crypt key
1188 # cephx - only cephx
1189 # no_cephx - lockbox and dm-crypt, no cephx
1190 # no_lockbox - dm-crypt and cephx, no lockbox
1191 # empty - empty file
1192 # empty_json - correct json, empty map
1193 # bad_json - bad json :)
1194 #
1195 local t=$1
1196 if [[ -z "$t" ]]; then
1197 t="all"
1198 fi
1199
1200 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1201 echo $fn
1202 if [[ "$t" == "empty" ]]; then
1203 return 0
1204 fi
1205
1206 echo "{" > $fn
1207 if [[ "$t" == "bad_json" ]]; then
1208 echo "asd: ; }" >> $fn
1209 return 0
1210 elif [[ "$t" == "empty_json" ]]; then
1211 echo "}" >> $fn
1212 return 0
1213 fi
1214
1215 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1216 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1217 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1218
1219 if [[ "$t" == "all" ]]; then
1220 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1221 elif [[ "$t" == "cephx" ]]; then
1222 echo "$cephx_secret" >> $fn
1223 elif [[ "$t" == "no_cephx" ]]; then
1224 echo "$lb_secret,$dmcrypt_key" >> $fn
1225 elif [[ "$t" == "no_lockbox" ]]; then
1226 echo "$cephx_secret,$dmcrypt_key" >> $fn
1227 else
1228 echo "unknown gen_secrets_file() type \'$fn\'"
1229 return 1
1230 fi
1231 echo "}" >> $fn
1232 return 0
1233 }
1234
1235 function test_mon_osd_create_destroy()
1236 {
1237 ceph osd new 2>&1 | grep 'EINVAL'
1238 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1239 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1240
1241 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1242
1243 old_osds=$(ceph osd ls)
1244 num_osds=$(ceph osd ls | wc -l)
1245
1246 uuid=$(uuidgen)
1247 id=$(ceph osd new $uuid 2>/dev/null)
1248
1249 for i in $old_osds; do
1250 [[ "$i" != "$id" ]]
1251 done
1252
1253 ceph osd find $id
1254
1255 id2=`ceph osd new $uuid 2>/dev/null`
1256
1257 [[ $id2 == $id ]]
1258
1259 ceph osd new $uuid $id
1260
1261 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1262 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1263
1264 uuid2=$(uuidgen)
1265 id2=$(ceph osd new $uuid2)
1266 ceph osd find $id2
1267 [[ "$id2" != "$id" ]]
1268
1269 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1270 ceph osd new $uuid2 $id2
1271
1272 # test with secrets
1273 empty_secrets=$(gen_secrets_file "empty")
1274 empty_json=$(gen_secrets_file "empty_json")
1275 all_secrets=$(gen_secrets_file "all")
1276 cephx_only=$(gen_secrets_file "cephx")
1277 no_cephx=$(gen_secrets_file "no_cephx")
1278 no_lockbox=$(gen_secrets_file "no_lockbox")
1279 bad_json=$(gen_secrets_file "bad_json")
1280
1281 # empty secrets should be idempotent
1282 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1283 [[ "$new_id" == "$id" ]]
1284
1285 # empty json, thus empty secrets
1286 new_id=$(ceph osd new $uuid $id -i $empty_json)
1287 [[ "$new_id" == "$id" ]]
1288
1289 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1290
1291 ceph osd rm $id
1292 ceph osd rm $id2
1293 ceph osd setmaxosd $old_maxosd
1294
1295 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1296 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1297
1298 osds=$(ceph osd ls)
1299 id=$(ceph osd new $uuid -i $all_secrets)
1300 for i in $osds; do
1301 [[ "$i" != "$id" ]]
1302 done
1303
1304 ceph osd find $id
1305
1306 # validate secrets and dm-crypt are set
1307 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1308 s=$(cat $all_secrets | jq '.cephx_secret')
1309 [[ $k == $s ]]
1310 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1311 jq '.key')
1312 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1313 [[ $k == $s ]]
1314 ceph config-key exists dm-crypt/osd/$uuid/luks
1315
1316 osds=$(ceph osd ls)
1317 id2=$(ceph osd new $uuid2 -i $cephx_only)
1318 for i in $osds; do
1319 [[ "$i" != "$id2" ]]
1320 done
1321
1322 ceph osd find $id2
1323 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1324 s=$(cat $all_secrets | jq '.cephx_secret')
1325 [[ $k == $s ]]
1326 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1327 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1328
1329 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1330 ceph osd destroy $id2 --yes-i-really-mean-it
1331 ceph osd find $id2
1332 expect_false ceph auth get-key osd.$id2
1333 ceph osd dump | grep osd.$id2 | grep destroyed
1334
1335 id3=$id2
1336 uuid3=$(uuidgen)
1337 ceph osd new $uuid3 $id3 -i $all_secrets
1338 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1339 ceph auth get-key client.osd-lockbox.$uuid3
1340 ceph auth get-key osd.$id3
1341 ceph config-key exists dm-crypt/osd/$uuid3/luks
1342
1343 ceph osd purge-new osd.$id3 --yes-i-really-mean-it
1344 expect_false ceph osd find $id2
1345 expect_false ceph auth get-key osd.$id2
1346 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1347 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1348 ceph osd purge osd.$id3 --yes-i-really-mean-it
1349 ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent
1350
1351 ceph osd purge osd.$id --yes-i-really-mean-it
1352 ceph osd purge 123456 --yes-i-really-mean-it
1353 expect_false ceph osd find $id
1354 expect_false ceph auth get-key osd.$id
1355 expect_false ceph auth get-key client.osd-lockbox.$uuid
1356 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1357
1358 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1359 $no_cephx $no_lockbox $bad_json
1360
1361 for i in $(ceph osd ls); do
1362 [[ "$i" != "$id" ]]
1363 [[ "$i" != "$id2" ]]
1364 [[ "$i" != "$id3" ]]
1365 done
1366
1367 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1368 ceph osd setmaxosd $old_maxosd
1369
1370 }
1371
1372 function test_mon_config_key()
1373 {
1374 key=asdfasdfqwerqwreasdfuniquesa123df
1375 ceph config-key list | grep -c $key | grep 0
1376 ceph config-key get $key | grep -c bar | grep 0
1377 ceph config-key set $key bar
1378 ceph config-key get $key | grep bar
1379 ceph config-key list | grep -c $key | grep 1
1380 ceph config-key dump | grep $key | grep bar
1381 ceph config-key rm $key
1382 expect_false ceph config-key get $key
1383 ceph config-key list | grep -c $key | grep 0
1384 ceph config-key dump | grep -c $key | grep 0
1385 }
1386
1387 function test_mon_osd()
1388 {
1389 #
1390 # osd blacklist
1391 #
1392 bl=192.168.0.1:0/1000
1393 ceph osd blacklist add $bl
1394 ceph osd blacklist ls | grep $bl
1395 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1396 ceph osd dump --format=json-pretty | grep $bl
1397 ceph osd dump | grep $bl
1398 ceph osd blacklist rm $bl
1399 ceph osd blacklist ls | expect_false grep $bl
1400
1401 bl=192.168.0.1
1402 # test without nonce, invalid nonce
1403 ceph osd blacklist add $bl
1404 ceph osd blacklist ls | grep $bl
1405 ceph osd blacklist rm $bl
1406 ceph osd blacklist ls | expect_false grep $bl
1407 expect_false "ceph osd blacklist $bl/-1"
1408 expect_false "ceph osd blacklist $bl/foo"
1409
1410 # test with wrong address
1411 expect_false "ceph osd blacklist 1234.56.78.90/100"
1412
1413 # Test `clear`
1414 ceph osd blacklist add $bl
1415 ceph osd blacklist ls | grep $bl
1416 ceph osd blacklist clear
1417 ceph osd blacklist ls | expect_false grep $bl
1418
1419 #
1420 # osd crush
1421 #
1422 ceph osd crush reweight-all
1423 ceph osd crush tunables legacy
1424 ceph osd crush show-tunables | grep argonaut
1425 ceph osd crush tunables bobtail
1426 ceph osd crush show-tunables | grep bobtail
1427 ceph osd crush tunables firefly
1428 ceph osd crush show-tunables | grep firefly
1429
1430 ceph osd crush set-tunable straw_calc_version 0
1431 ceph osd crush get-tunable straw_calc_version | grep 0
1432 ceph osd crush set-tunable straw_calc_version 1
1433 ceph osd crush get-tunable straw_calc_version | grep 1
1434
1435 #
1436 # require-min-compat-client
1437 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1438 ceph osd set-require-min-compat-client luminous
1439 ceph osd get-require-min-compat-client | grep luminous
1440 ceph osd dump | grep 'require_min_compat_client luminous'
1441
1442 #
1443 # osd scrub
1444 #
1445
1446 # blocking
1447 ceph osd scrub 0 --block
1448 ceph osd deep-scrub 0 --block
1449
1450 # how do I tell when these are done?
1451 ceph osd scrub 0
1452 ceph osd deep-scrub 0
1453 ceph osd repair 0
1454
1455 # pool scrub, force-recovery/backfill
1456 pool_names=`rados lspools`
1457 for pool_name in $pool_names
1458 do
1459 ceph osd pool scrub $pool_name
1460 ceph osd pool deep-scrub $pool_name
1461 ceph osd pool repair $pool_name
1462 ceph osd pool force-recovery $pool_name
1463 ceph osd pool cancel-force-recovery $pool_name
1464 ceph osd pool force-backfill $pool_name
1465 ceph osd pool cancel-force-backfill $pool_name
1466 done
1467
1468 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \
1469 norebalance norecover notieragent
1470 do
1471 ceph osd set $f
1472 ceph osd unset $f
1473 done
1474 expect_false ceph osd set bogus
1475 expect_false ceph osd unset bogus
1476 for f in sortbitwise recover_deletes require_jewel_osds \
1477 require_kraken_osds
1478 do
1479 expect_false ceph osd set $f
1480 expect_false ceph osd unset $f
1481 done
1482 ceph osd require-osd-release octopus
1483 # can't lower
1484 expect_false ceph osd require-osd-release nautilus
1485 expect_false ceph osd require-osd-release mimic
1486 expect_false ceph osd require-osd-release luminous
1487 # these are no-ops but should succeed.
1488
1489 ceph osd set noup
1490 ceph osd down 0
1491 ceph osd dump | grep 'osd.0 down'
1492 ceph osd unset noup
1493 max_run=1000
1494 for ((i=0; i < $max_run; i++)); do
1495 if ! ceph osd dump | grep 'osd.0 up'; then
1496 echo "waiting for osd.0 to come back up ($i/$max_run)"
1497 sleep 1
1498 else
1499 break
1500 fi
1501 done
1502 ceph osd dump | grep 'osd.0 up'
1503
1504 ceph osd dump | grep 'osd.0 up'
1505 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1506 ceph osd find 1
1507 ceph osd find osd.1
1508 expect_false ceph osd find osd.xyz
1509 expect_false ceph osd find xyz
1510 expect_false ceph osd find 0.1
1511 ceph --format plain osd find 1 # falls back to json-pretty
1512 if [ `uname` == Linux ]; then
1513 ceph osd metadata 1 | grep 'distro'
1514 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1515 fi
1516 ceph osd out 0
1517 ceph osd dump | grep 'osd.0.*out'
1518 ceph osd in 0
1519 ceph osd dump | grep 'osd.0.*in'
1520 ceph osd find 0
1521
1522 ceph osd info 0
1523 ceph osd info osd.0
1524 expect_false ceph osd info osd.xyz
1525 expect_false ceph osd info xyz
1526 expect_false ceph osd info 42
1527 expect_false ceph osd info osd.42
1528
1529 ceph osd info
1530 info_json=$(ceph osd info --format=json | jq -cM '.')
1531 dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
1532 [[ "${info_json}" == "${dump_json}" ]]
1533
1534 info_json=$(ceph osd info 0 --format=json | jq -cM '.')
1535 dump_json=$(ceph osd dump --format=json | \
1536 jq -cM '.osds[] | select(.osd == 0)')
1537 [[ "${info_json}" == "${dump_json}" ]]
1538
1539 info_plain="$(ceph osd info)"
1540 dump_plain="$(ceph osd dump | grep '^osd')"
1541 [[ "${info_plain}" == "${dump_plain}" ]]
1542
1543 info_plain="$(ceph osd info 0)"
1544 dump_plain="$(ceph osd dump | grep '^osd.0')"
1545 [[ "${info_plain}" == "${dump_plain}" ]]
1546
1547 ceph osd add-nodown 0 1
1548 ceph health detail | grep 'NODOWN'
1549 ceph osd rm-nodown 0 1
1550 ! ceph health detail | grep 'NODOWN'
1551
1552 ceph osd out 0 # so we can mark it as noin later
1553 ceph osd add-noin 0
1554 ceph health detail | grep 'NOIN'
1555 ceph osd rm-noin 0
1556 ! ceph health detail | grep 'NOIN'
1557 ceph osd in 0
1558
1559 ceph osd add-noout 0
1560 ceph health detail | grep 'NOOUT'
1561 ceph osd rm-noout 0
1562 ! ceph health detail | grep 'NOOUT'
1563
1564 # test osd id parse
1565 expect_false ceph osd add-noup 797er
1566 expect_false ceph osd add-nodown u9uwer
1567 expect_false ceph osd add-noin 78~15
1568
1569 expect_false ceph osd rm-noup 1234567
1570 expect_false ceph osd rm-nodown fsadf7
1571 expect_false ceph osd rm-noout 790-fd
1572
1573 ids=`ceph osd ls-tree default`
1574 for osd in $ids
1575 do
1576 ceph osd add-nodown $osd
1577 ceph osd add-noout $osd
1578 done
1579 ceph -s | grep 'NODOWN'
1580 ceph -s | grep 'NOOUT'
1581 ceph osd rm-nodown any
1582 ceph osd rm-noout all
1583 ! ceph -s | grep 'NODOWN'
1584 ! ceph -s | grep 'NOOUT'
1585
1586 # test crush node flags
1587 ceph osd add-noup osd.0
1588 ceph osd add-nodown osd.0
1589 ceph osd add-noin osd.0
1590 ceph osd add-noout osd.0
1591 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
1592 ceph osd rm-noup osd.0
1593 ceph osd rm-nodown osd.0
1594 ceph osd rm-noin osd.0
1595 ceph osd rm-noout osd.0
1596 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
1597
1598 ceph osd crush add-bucket foo host root=default
1599 ceph osd add-noup foo
1600 ceph osd add-nodown foo
1601 ceph osd add-noin foo
1602 ceph osd add-noout foo
1603 ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
1604 ceph osd rm-noup foo
1605 ceph osd rm-nodown foo
1606 ceph osd rm-noin foo
1607 ceph osd rm-noout foo
1608 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
1609 ceph osd add-noup foo
1610 ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
1611 ceph osd crush rm foo
1612 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
1613
1614 ceph osd set-group noup osd.0
1615 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1616 ceph osd set-group noup,nodown osd.0
1617 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1618 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1619 ceph osd set-group noup,nodown,noin osd.0
1620 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1621 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1622 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1623 ceph osd set-group noup,nodown,noin,noout osd.0
1624 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1625 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1626 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1627 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1628 ceph osd unset-group noup osd.0
1629 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
1630 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1631 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1632 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1633 ceph osd unset-group noup,nodown osd.0
1634 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown'
1635 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1636 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1637 ceph osd unset-group noup,nodown,noin osd.0
1638 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin'
1639 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1640 ceph osd unset-group noup,nodown,noin,noout osd.0
1641 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1642
1643 ceph osd set-group noup,nodown,noin,noout osd.0 osd.1
1644 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1645 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1646 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1647 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1648 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup'
1649 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown'
1650 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin'
1651 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout'
1652 ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1
1653 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1654 ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1655
1656 ceph osd set-group noup all
1657 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1658 ceph osd unset-group noup all
1659 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
1660
1661 # crush node flags
1662 ceph osd crush add-bucket foo host root=default
1663 ceph osd set-group noup foo
1664 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1665 ceph osd set-group noup,nodown foo
1666 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1667 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1668 ceph osd set-group noup,nodown,noin foo
1669 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1670 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1671 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1672 ceph osd set-group noup,nodown,noin,noout foo
1673 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1674 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1675 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1676 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1677
1678 ceph osd unset-group noup foo
1679 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup'
1680 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1681 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1682 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1683 ceph osd unset-group noup,nodown foo
1684 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown'
1685 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1686 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1687 ceph osd unset-group noup,nodown,noin foo
1688 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin'
1689 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1690 ceph osd unset-group noup,nodown,noin,noout foo
1691 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout'
1692
1693 ceph osd set-group noin,noout foo
1694 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1695 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1696 ceph osd unset-group noin,noout foo
1697 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
1698
1699 ceph osd set-group noup,nodown,noin,noout foo
1700 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1701 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1702 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1703 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1704 ceph osd crush rm foo
1705 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
1706
1707 # test device class flags
1708 osd_0_device_class=$(ceph osd crush get-device-class osd.0)
1709 ceph osd set-group noup $osd_0_device_class
1710 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1711 ceph osd set-group noup,nodown $osd_0_device_class
1712 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1713 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1714 ceph osd set-group noup,nodown,noin $osd_0_device_class
1715 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1716 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1717 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1718 ceph osd set-group noup,nodown,noin,noout $osd_0_device_class
1719 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1720 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1721 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1722 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1723
1724 ceph osd unset-group noup $osd_0_device_class
1725 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup'
1726 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1727 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1728 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1729 ceph osd unset-group noup,nodown $osd_0_device_class
1730 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown'
1731 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1732 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1733 ceph osd unset-group noup,nodown,noin $osd_0_device_class
1734 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin'
1735 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1736 ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class
1737 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout'
1738
1739 ceph osd set-group noin,noout $osd_0_device_class
1740 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1741 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1742 ceph osd unset-group noin,noout $osd_0_device_class
1743 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class
1744
1745 # make sure mark out preserves weight
1746 ceph osd reweight osd.0 .5
1747 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1748 ceph osd out 0
1749 ceph osd in 0
1750 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1751
1752 ceph osd getmap -o $f
1753 [ -s $f ]
1754 rm $f
1755 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1756 [ "$save" -gt 0 ]
1757 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1758 ceph osd setmaxosd 10
1759 ceph osd getmaxosd | grep 'max_osd = 10'
1760 ceph osd setmaxosd $save
1761 ceph osd getmaxosd | grep "max_osd = $save"
1762
1763 for id in `ceph osd ls` ; do
1764 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1765 done
1766
1767 ceph osd rm 0 2>&1 | grep 'EBUSY'
1768
1769 local old_osds=$(echo $(ceph osd ls))
1770 id=`ceph osd create`
1771 ceph osd find $id
1772 ceph osd lost $id --yes-i-really-mean-it
1773 expect_false ceph osd setmaxosd $id
1774 local new_osds=$(echo $(ceph osd ls))
1775 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1776 ceph osd rm $id
1777 done
1778
1779 uuid=`uuidgen`
1780 id=`ceph osd create $uuid`
1781 id2=`ceph osd create $uuid`
1782 [ "$id" = "$id2" ]
1783 ceph osd rm $id
1784
1785 ceph --help osd
1786
1787 # reset max_osd.
1788 ceph osd setmaxosd $id
1789 ceph osd getmaxosd | grep "max_osd = $save"
1790 local max_osd=$save
1791
1792 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1793 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1794
1795 id=`ceph osd create $uuid $max_osd`
1796 [ "$id" = "$max_osd" ]
1797 ceph osd find $id
1798 max_osd=$((max_osd + 1))
1799 ceph osd getmaxosd | grep "max_osd = $max_osd"
1800
1801 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1802 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
1803 id2=`ceph osd create $uuid`
1804 [ "$id" = "$id2" ]
1805 id2=`ceph osd create $uuid $id`
1806 [ "$id" = "$id2" ]
1807
1808 uuid=`uuidgen`
1809 local gap_start=$max_osd
1810 id=`ceph osd create $uuid $((gap_start + 100))`
1811 [ "$id" = "$((gap_start + 100))" ]
1812 max_osd=$((id + 1))
1813 ceph osd getmaxosd | grep "max_osd = $max_osd"
1814
1815 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
1816
1817 #
1818 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1819 # is repeated and consumes two osd id, not just one.
1820 #
1821 local next_osd=$gap_start
1822 id=`ceph osd create $(uuidgen)`
1823 [ "$id" = "$next_osd" ]
1824
1825 next_osd=$((id + 1))
1826 id=`ceph osd create $(uuidgen) $next_osd`
1827 [ "$id" = "$next_osd" ]
1828
1829 local new_osds=$(echo $(ceph osd ls))
1830 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1831 [ $id -ge $save ]
1832 ceph osd rm $id
1833 done
1834 ceph osd setmaxosd $save
1835
1836 ceph osd ls
1837 ceph osd pool create data 16
1838 ceph osd pool application enable data rados
1839 ceph osd lspools | grep data
1840 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1841 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1842 ceph osd pool delete data data --yes-i-really-really-mean-it
1843
1844 ceph osd pause
1845 ceph osd dump | grep 'flags.*pauserd,pausewr'
1846 ceph osd unpause
1847
1848 ceph osd tree
1849 ceph osd tree up
1850 ceph osd tree down
1851 ceph osd tree in
1852 ceph osd tree out
1853 ceph osd tree destroyed
1854 ceph osd tree up in
1855 ceph osd tree up out
1856 ceph osd tree down in
1857 ceph osd tree down out
1858 ceph osd tree out down
1859 expect_false ceph osd tree up down
1860 expect_false ceph osd tree up destroyed
1861 expect_false ceph osd tree down destroyed
1862 expect_false ceph osd tree up down destroyed
1863 expect_false ceph osd tree in out
1864 expect_false ceph osd tree up foo
1865
1866 ceph osd metadata
1867 ceph osd count-metadata os
1868 ceph osd versions
1869
1870 ceph osd perf
1871 ceph osd blocked-by
1872
1873 ceph osd stat | grep up
1874 }
1875
1876 function test_mon_crush()
1877 {
1878 f=$TEMP_DIR/map.$$
1879 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1880 [ -s $f ]
1881 [ "$epoch" -gt 1 ]
1882 nextepoch=$(( $epoch + 1 ))
1883 echo epoch $epoch nextepoch $nextepoch
1884 rm -f $f.epoch
1885 expect_false ceph osd setcrushmap $nextepoch -i $f
1886 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1887 echo gotepoch $gotepoch
1888 [ "$gotepoch" -eq "$nextepoch" ]
1889 # should be idempotent
1890 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1891 echo epoch $gotepoch
1892 [ "$gotepoch" -eq "$nextepoch" ]
1893 rm $f
1894 }
1895
1896 function test_mon_osd_pool()
1897 {
1898 #
1899 # osd pool
1900 #
1901 ceph osd pool create data 16
1902 ceph osd pool application enable data rados
1903 ceph osd pool mksnap data datasnap
1904 rados -p data lssnap | grep datasnap
1905 ceph osd pool rmsnap data datasnap
1906 expect_false ceph osd pool rmsnap pool_fake snapshot
1907 ceph osd pool delete data data --yes-i-really-really-mean-it
1908
1909 ceph osd pool create data2 16
1910 ceph osd pool application enable data2 rados
1911 ceph osd pool rename data2 data3
1912 ceph osd lspools | grep data3
1913 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1914
1915 ceph osd pool create replicated 16 16 replicated
1916 ceph osd pool create replicated 1 16 replicated
1917 ceph osd pool create replicated 16 16 # default is replicated
1918 ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num
1919 ceph osd pool application enable replicated rados
1920 # should fail because the type is not the same
1921 expect_false ceph osd pool create replicated 16 16 erasure
1922 ceph osd lspools | grep replicated
1923 ceph osd pool create ec_test 1 1 erasure
1924 ceph osd pool application enable ec_test rados
1925 set +e
1926 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1927 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1928 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1929 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
1930 else
1931 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1932 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1933 fi
1934 set -e
1935 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1936 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1937
1938 # test create pool with rule
1939 ceph osd erasure-code-profile set foo foo
1940 ceph osd erasure-code-profile ls | grep foo
1941 ceph osd crush rule create-erasure foo foo
1942 ceph osd pool create erasure 16 16 erasure foo
1943 expect_false ceph osd erasure-code-profile rm foo
1944 ceph osd pool delete erasure erasure --yes-i-really-really-mean-it
1945 ceph osd crush rule rm foo
1946 ceph osd erasure-code-profile rm foo
1947
1948 # autoscale mode
1949 ceph osd pool create modeon --autoscale-mode=on
1950 ceph osd dump | grep modeon | grep 'autoscale_mode on'
1951 ceph osd pool create modewarn --autoscale-mode=warn
1952 ceph osd dump | grep modewarn | grep 'autoscale_mode warn'
1953 ceph osd pool create modeoff --autoscale-mode=off
1954 ceph osd dump | grep modeoff | grep 'autoscale_mode off'
1955 ceph osd pool delete modeon modeon --yes-i-really-really-mean-it
1956 ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it
1957 ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it
1958 }
1959
1960 function test_mon_osd_pool_quota()
1961 {
1962 #
1963 # test osd pool set/get quota
1964 #
1965
1966 # create tmp pool
1967 ceph osd pool create tmp-quota-pool 32
1968 ceph osd pool application enable tmp-quota-pool rados
1969 #
1970 # set erroneous quotas
1971 #
1972 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1973 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1974 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1975 #
1976 # set valid quotas
1977 #
1978 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1979 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1980 #
1981 # get quotas in json-pretty format
1982 #
1983 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1984 grep '"quota_max_objects":.*10000000'
1985 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1986 grep '"quota_max_bytes":.*10'
1987 #
1988 # get quotas
1989 #
1990 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B'
1991 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects'
1992 #
1993 # set valid quotas with unit prefix
1994 #
1995 ceph osd pool set-quota tmp-quota-pool max_bytes 10K
1996 #
1997 # get quotas
1998 #
1999 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
2000 #
2001 # set valid quotas with unit prefix
2002 #
2003 ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
2004 #
2005 # get quotas
2006 #
2007 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
2008 #
2009 #
2010 # reset pool quotas
2011 #
2012 ceph osd pool set-quota tmp-quota-pool max_bytes 0
2013 ceph osd pool set-quota tmp-quota-pool max_objects 0
2014 #
2015 # test N/A quotas
2016 #
2017 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
2018 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
2019 #
2020 # cleanup tmp pool
2021 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
2022 }
2023
2024 function test_mon_pg()
2025 {
2026 # Make sure we start healthy.
2027 wait_for_health_ok
2028
2029 ceph pg debug unfound_objects_exist
2030 ceph pg debug degraded_pgs_exist
2031 ceph pg deep-scrub 1.0
2032 ceph pg dump
2033 ceph pg dump pgs_brief --format=json
2034 ceph pg dump pgs --format=json
2035 ceph pg dump pools --format=json
2036 ceph pg dump osds --format=json
2037 ceph pg dump sum --format=json
2038 ceph pg dump all --format=json
2039 ceph pg dump pgs_brief osds --format=json
2040 ceph pg dump pools osds pgs_brief --format=json
2041 ceph pg dump_json
2042 ceph pg dump_pools_json
2043 ceph pg dump_stuck inactive
2044 ceph pg dump_stuck unclean
2045 ceph pg dump_stuck stale
2046 ceph pg dump_stuck undersized
2047 ceph pg dump_stuck degraded
2048 ceph pg ls
2049 ceph pg ls 1
2050 ceph pg ls stale
2051 expect_false ceph pg ls scrubq
2052 ceph pg ls active stale repair recovering
2053 ceph pg ls 1 active
2054 ceph pg ls 1 active stale
2055 ceph pg ls-by-primary osd.0
2056 ceph pg ls-by-primary osd.0 1
2057 ceph pg ls-by-primary osd.0 active
2058 ceph pg ls-by-primary osd.0 active stale
2059 ceph pg ls-by-primary osd.0 1 active stale
2060 ceph pg ls-by-osd osd.0
2061 ceph pg ls-by-osd osd.0 1
2062 ceph pg ls-by-osd osd.0 active
2063 ceph pg ls-by-osd osd.0 active stale
2064 ceph pg ls-by-osd osd.0 1 active stale
2065 ceph pg ls-by-pool rbd
2066 ceph pg ls-by-pool rbd active stale
2067 # can't test this...
2068 # ceph pg force_create_pg
2069 ceph pg getmap -o $TEMP_DIR/map.$$
2070 [ -s $TEMP_DIR/map.$$ ]
2071 ceph pg map 1.0 | grep acting
2072 ceph pg repair 1.0
2073 ceph pg scrub 1.0
2074
2075 ceph osd set-full-ratio .962
2076 ceph osd dump | grep '^full_ratio 0.962'
2077 ceph osd set-backfillfull-ratio .912
2078 ceph osd dump | grep '^backfillfull_ratio 0.912'
2079 ceph osd set-nearfull-ratio .892
2080 ceph osd dump | grep '^nearfull_ratio 0.892'
2081
2082 # Check health status
2083 ceph osd set-nearfull-ratio .913
2084 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
2085 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
2086 ceph osd set-nearfull-ratio .892
2087 ceph osd set-backfillfull-ratio .963
2088 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
2089 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
2090 ceph osd set-backfillfull-ratio .912
2091
2092 # Check injected full results
2093 $SUDO ceph tell osd.0 injectfull nearfull
2094 wait_for_health "OSD_NEARFULL"
2095 ceph health detail | grep "osd.0 is near full"
2096 $SUDO ceph tell osd.0 injectfull none
2097 wait_for_health_ok
2098
2099 $SUDO ceph tell osd.1 injectfull backfillfull
2100 wait_for_health "OSD_BACKFILLFULL"
2101 ceph health detail | grep "osd.1 is backfill full"
2102 $SUDO ceph tell osd.1 injectfull none
2103 wait_for_health_ok
2104
2105 $SUDO ceph tell osd.2 injectfull failsafe
2106 # failsafe and full are the same as far as the monitor is concerned
2107 wait_for_health "OSD_FULL"
2108 ceph health detail | grep "osd.2 is full"
2109 $SUDO ceph tell osd.2 injectfull none
2110 wait_for_health_ok
2111
2112 $SUDO ceph tell osd.0 injectfull full
2113 wait_for_health "OSD_FULL"
2114 ceph health detail | grep "osd.0 is full"
2115 $SUDO ceph tell osd.0 injectfull none
2116 wait_for_health_ok
2117
2118 ceph pg stat | grep 'pgs:'
2119 ceph pg 1.0 query
2120 ceph tell 1.0 query
2121 first=$(ceph mon dump -f json | jq -r '.mons[0].name')
2122 ceph tell mon.$first quorum enter
2123 ceph quorum_status
2124 ceph report | grep osd_stats
2125 ceph status
2126 ceph -s
2127
2128 #
2129 # tell osd version
2130 #
2131 ceph tell osd.0 version
2132 expect_false ceph tell osd.9999 version
2133 expect_false ceph tell osd.foo version
2134
2135 # back to pg stuff
2136
2137 ceph tell osd.0 dump_pg_recovery_stats | grep Started
2138
2139 ceph osd reweight 0 0.9
2140 expect_false ceph osd reweight 0 -1
2141 ceph osd reweight osd.0 1
2142
2143 ceph osd primary-affinity osd.0 .9
2144 expect_false ceph osd primary-affinity osd.0 -2
2145 expect_false ceph osd primary-affinity osd.9999 .5
2146 ceph osd primary-affinity osd.0 1
2147
2148 ceph osd pool set rbd size 2
2149 ceph osd pg-temp 1.0 0 1
2150 ceph osd pg-temp 1.0 osd.1 osd.0
2151 expect_false ceph osd pg-temp 1.0 0 1 2
2152 expect_false ceph osd pg-temp asdf qwer
2153 expect_false ceph osd pg-temp 1.0 asdf
2154 ceph osd pg-temp 1.0 # cleanup pg-temp
2155
2156 ceph pg repeer 1.0
2157 expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore
2158
2159 # don't test ceph osd primary-temp for now
2160 }
2161
2162 function test_mon_osd_pool_set()
2163 {
2164 TEST_POOL_GETSET=pool_getset
2165 ceph osd pool create $TEST_POOL_GETSET 1
2166 ceph osd pool application enable $TEST_POOL_GETSET rados
2167 ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
2168 wait_for_clean
2169 ceph osd pool get $TEST_POOL_GETSET all
2170
2171 for s in pg_num pgp_num size min_size crush_rule; do
2172 ceph osd pool get $TEST_POOL_GETSET $s
2173 done
2174
2175 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
2176 (( new_size = old_size + 1 ))
2177 ceph osd pool set $TEST_POOL_GETSET size $new_size
2178 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
2179 ceph osd pool set $TEST_POOL_GETSET size $old_size
2180
2181 ceph osd pool create pool_erasure 1 1 erasure
2182 ceph osd pool application enable pool_erasure rados
2183 wait_for_clean
2184 set +e
2185 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
2186 check_response 'not change the size'
2187 set -e
2188 ceph osd pool get pool_erasure erasure_code_profile
2189 ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it
2190
2191 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
2192 ceph osd pool set $TEST_POOL_GETSET $flag false
2193 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
2194 ceph osd pool set $TEST_POOL_GETSET $flag true
2195 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
2196 ceph osd pool set $TEST_POOL_GETSET $flag 1
2197 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
2198 ceph osd pool set $TEST_POOL_GETSET $flag 0
2199 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
2200 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
2201 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
2202 done
2203
2204 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2205 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
2206 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
2207 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
2208 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2209
2210 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2211 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
2212 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
2213 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
2214 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2215
2216 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2217 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
2218 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
2219 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
2220 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2221
2222 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2223 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
2224 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
2225 ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
2226 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
2227 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
2228 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2229 expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11
2230 expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
2231
2232 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2233 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
2234 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
2235 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
2236 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2237
2238 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2239 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
2240 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
2241 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
2242 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2243
2244 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
2245 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
2246 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2247 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
2248 ceph osd pool set $TEST_POOL_GETSET pg_num 10
2249 wait_for_clean
2250 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2251 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0
2252 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0
2253
2254 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
2255 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
2256 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2257 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
2258 wait_for_clean
2259
2260 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
2261 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
2262 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
2263 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
2264 ceph osd pool set $TEST_POOL_GETSET size 2
2265 wait_for_clean
2266 ceph osd pool set $TEST_POOL_GETSET min_size 2
2267
2268 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2269 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2270
2271 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2272 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2273
2274 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
2275
2276 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2277 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2278 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2279 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2280 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2281
2282 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2283 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2284 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2285 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2286 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2287
2288 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2289 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2290 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2291 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2292 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2293 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2294 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2295
2296 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2297 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2298 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2299 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2300 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2301
2302 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2303 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2304 ceph osd pool set $TEST_POOL_GETSET $size 100
2305 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2306 ceph osd pool set $TEST_POOL_GETSET $size 0
2307 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2308 done
2309
2310 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2311 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2312 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2313 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2314
2315 }
2316
2317 function test_mon_osd_tiered_pool_set()
2318 {
2319 # this is really a tier pool
2320 ceph osd pool create real-tier 2
2321 ceph osd tier add rbd real-tier
2322
2323 # expect us to be unable to set negative values for hit_set_*
2324 for o in hit_set_period hit_set_count hit_set_fpp; do
2325 expect_false ceph osd pool set real_tier $o -1
2326 done
2327
2328 # and hit_set_fpp should be in range 0..1
2329 expect_false ceph osd pool set real_tier hit_set_fpp 2
2330
2331 ceph osd pool set real-tier hit_set_type explicit_hash
2332 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2333 ceph osd pool set real-tier hit_set_type explicit_object
2334 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2335 ceph osd pool set real-tier hit_set_type bloom
2336 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2337 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2338 ceph osd pool set real-tier hit_set_period 123
2339 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2340 ceph osd pool set real-tier hit_set_count 12
2341 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2342 ceph osd pool set real-tier hit_set_fpp .01
2343 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2344
2345 ceph osd pool set real-tier target_max_objects 123
2346 ceph osd pool get real-tier target_max_objects | \
2347 grep 'target_max_objects:[ \t]\+123'
2348 ceph osd pool set real-tier target_max_bytes 123456
2349 ceph osd pool get real-tier target_max_bytes | \
2350 grep 'target_max_bytes:[ \t]\+123456'
2351 ceph osd pool set real-tier cache_target_dirty_ratio .123
2352 ceph osd pool get real-tier cache_target_dirty_ratio | \
2353 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2354 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2355 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2356 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2357 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2358 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2359 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2360 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2361 ceph osd pool set real-tier cache_target_full_ratio .123
2362 ceph osd pool get real-tier cache_target_full_ratio | \
2363 grep 'cache_target_full_ratio:[ \t]\+0.123'
2364 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2365 ceph osd pool set real-tier cache_target_full_ratio 1.0
2366 ceph osd pool set real-tier cache_target_full_ratio 0
2367 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2368 ceph osd pool set real-tier cache_min_flush_age 123
2369 ceph osd pool get real-tier cache_min_flush_age | \
2370 grep 'cache_min_flush_age:[ \t]\+123'
2371 ceph osd pool set real-tier cache_min_evict_age 234
2372 ceph osd pool get real-tier cache_min_evict_age | \
2373 grep 'cache_min_evict_age:[ \t]\+234'
2374
2375 # iec vs si units
2376 ceph osd pool set real-tier target_max_objects 1K
2377 ceph osd pool get real-tier target_max_objects | grep 1000
2378 for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2379 ceph osd pool set real-tier $o 1Ki # no i suffix
2380 val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
2381 [[ $val == 1024 ]]
2382 ceph osd pool set real-tier $o 1M # with i suffix
2383 val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
2384 [[ $val == 1048576 ]]
2385 done
2386
2387 # this is not a tier pool
2388 ceph osd pool create fake-tier 2
2389 ceph osd pool application enable fake-tier rados
2390 wait_for_clean
2391
2392 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2393 expect_false ceph osd pool get fake-tier hit_set_type
2394 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2395 expect_false ceph osd pool get fake-tier hit_set_type
2396 expect_false ceph osd pool set fake-tier hit_set_type bloom
2397 expect_false ceph osd pool get fake-tier hit_set_type
2398 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2399 expect_false ceph osd pool set fake-tier hit_set_period 123
2400 expect_false ceph osd pool get fake-tier hit_set_period
2401 expect_false ceph osd pool set fake-tier hit_set_count 12
2402 expect_false ceph osd pool get fake-tier hit_set_count
2403 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2404 expect_false ceph osd pool get fake-tier hit_set_fpp
2405
2406 expect_false ceph osd pool set fake-tier target_max_objects 123
2407 expect_false ceph osd pool get fake-tier target_max_objects
2408 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2409 expect_false ceph osd pool get fake-tier target_max_bytes
2410 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2411 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2412 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2413 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2414 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2415 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2416 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2417 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2418 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2419 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2420 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2421 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2422 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2423 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2424 expect_false ceph osd pool get fake-tier cache_min_flush_age
2425 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2426 expect_false ceph osd pool get fake-tier cache_min_evict_age
2427
2428 ceph osd tier remove rbd real-tier
2429 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2430 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2431 }
2432
2433 function test_mon_osd_erasure_code()
2434 {
2435
2436 ceph osd erasure-code-profile set fooprofile a=b c=d
2437 ceph osd erasure-code-profile set fooprofile a=b c=d
2438 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2439 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2440 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2441 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2442 # make sure ruleset-foo doesn't work anymore
2443 expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
2444 ceph osd erasure-code-profile set barprofile crush-failure-domain=host
2445 # clean up
2446 ceph osd erasure-code-profile rm fooprofile
2447 ceph osd erasure-code-profile rm barprofile
2448
2449 # try weird k and m values
2450 expect_false ceph osd erasure-code-profile set badk k=1 m=1
2451 expect_false ceph osd erasure-code-profile set badk k=1 m=2
2452 expect_false ceph osd erasure-code-profile set badk k=0 m=2
2453 expect_false ceph osd erasure-code-profile set badk k=-1 m=2
2454 expect_false ceph osd erasure-code-profile set badm k=2 m=0
2455 expect_false ceph osd erasure-code-profile set badm k=2 m=-1
2456 ceph osd erasure-code-profile set good k=2 m=1
2457 ceph osd erasure-code-profile rm good
2458 }
2459
2460 function test_mon_osd_misc()
2461 {
2462 set +e
2463
2464 # expect error about missing 'pool' argument
2465 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2466
2467 # expect error about unused argument foo
2468 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2469
2470 # expect "not in range" for invalid overload percentage
2471 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2472
2473 set -e
2474
2475 local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd)
2476 local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd)
2477 # otherwise ceph-mgr complains like:
2478 # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
2479 # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
2480 ceph config set mgr mon_reweight_min_bytes_per_osd 0
2481 ceph config set mgr mon_reweight_min_pgs_per_osd 0
2482 ceph osd reweight-by-utilization 110
2483 ceph osd reweight-by-utilization 110 .5
2484 expect_false ceph osd reweight-by-utilization 110 0
2485 expect_false ceph osd reweight-by-utilization 110 -0.1
2486 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2487 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2488 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2489 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2490 ceph osd reweight-by-pg 110
2491 ceph osd test-reweight-by-pg 110 .5
2492 ceph osd reweight-by-pg 110 rbd
2493 ceph osd reweight-by-pg 110 .5 rbd
2494 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2495 # restore the setting
2496 ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd
2497 ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd
2498 }
2499
2500 function test_admin_heap_profiler()
2501 {
2502 do_test=1
2503 set +e
2504 # expect 'heap' commands to be correctly parsed
2505 ceph tell osd.0 heap stats 2>$TMPFILE
2506 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2507 echo "tcmalloc not enabled; skip heap profiler test"
2508 do_test=0
2509 fi
2510 set -e
2511
2512 [[ $do_test -eq 0 ]] && return 0
2513
2514 $SUDO ceph tell osd.0 heap start_profiler
2515 $SUDO ceph tell osd.0 heap dump
2516 $SUDO ceph tell osd.0 heap stop_profiler
2517 $SUDO ceph tell osd.0 heap release
2518 }
2519
2520 function test_osd_bench()
2521 {
2522 # test osd bench limits
2523 # As we should not rely on defaults (as they may change over time),
2524 # lets inject some values and perform some simple tests
2525 # max iops: 10 # 100 IOPS
2526 # max throughput: 10485760 # 10MB/s
2527 # max block size: 2097152 # 2MB
2528 # duration: 10 # 10 seconds
2529
2530 local args="\
2531 --osd-bench-duration 10 \
2532 --osd-bench-max-block-size 2097152 \
2533 --osd-bench-large-size-max-throughput 10485760 \
2534 --osd-bench-small-size-max-iops 10"
2535 ceph tell osd.0 injectargs ${args## }
2536
2537 # anything with a bs larger than 2097152 must fail
2538 expect_false ceph tell osd.0 bench 1 2097153
2539 # but using 'osd_bench_max_bs' must succeed
2540 ceph tell osd.0 bench 1 2097152
2541
2542 # we assume 1MB as a large bs; anything lower is a small bs
2543 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2544 # max count: 409600 (bytes)
2545
2546 # more than max count must not be allowed
2547 expect_false ceph tell osd.0 bench 409601 4096
2548 # but 409600 must be succeed
2549 ceph tell osd.0 bench 409600 4096
2550
2551 # for a large bs, we are limited by throughput.
2552 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2553 # the max count will be (10MB * 10s) = 100MB
2554 # max count: 104857600 (bytes)
2555
2556 # more than max count must not be allowed
2557 expect_false ceph tell osd.0 bench 104857601 2097152
2558 # up to max count must be allowed
2559 ceph tell osd.0 bench 104857600 2097152
2560 }
2561
2562 function test_osd_negative_filestore_merge_threshold()
2563 {
2564 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2565 expect_config_value "osd.0" "filestore_merge_threshold" -1
2566 }
2567
2568 function test_mon_tell()
2569 {
2570 for m in mon.a mon.b; do
2571 ceph tell $m sessions
2572 ceph_watch_start debug audit
2573 ceph tell mon.a sessions
2574 ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
2575 done
2576 expect_false ceph tell mon.foo version
2577 }
2578
2579 function test_mon_ping()
2580 {
2581 ceph ping mon.a
2582 ceph ping mon.b
2583 expect_false ceph ping mon.foo
2584
2585 ceph ping mon.\*
2586 }
2587
2588 function test_mon_deprecated_commands()
2589 {
2590 # current DEPRECATED commands are marked with FLAG(DEPRECATED)
2591 #
2592 # Testing should be accomplished by setting
2593 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2594 # each one of these commands.
2595
2596 ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete'
2597 expect_false ceph config-key list 2> $TMPFILE
2598 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2599
2600 ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete'
2601 }
2602
2603 function test_mon_cephdf_commands()
2604 {
2605 # ceph df detail:
2606 # pool section:
2607 # RAW USED The near raw used per pool in raw total
2608
2609 ceph osd pool create cephdf_for_test 1 1 replicated
2610 ceph osd pool application enable cephdf_for_test rados
2611 ceph osd pool set cephdf_for_test size 2
2612
2613 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2614 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2615
2616 #wait for update
2617 for i in `seq 1 10`; do
2618 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2619 sleep 1
2620 done
2621 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2622 # to sync mon with osd
2623 flush_pg_stats
2624 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2625 stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
2626 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2627
2628 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2629 rm ./cephdf_for_test
2630
2631 expect_false test $stored != $stored_raw
2632 }
2633
2634 function test_mon_pool_application()
2635 {
2636 ceph osd pool create app_for_test 16
2637
2638 ceph osd pool application enable app_for_test rbd
2639 expect_false ceph osd pool application enable app_for_test rgw
2640 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2641 ceph osd pool ls detail | grep "application rbd,rgw"
2642 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2643
2644 expect_false ceph osd pool application set app_for_test cephfs key value
2645 ceph osd pool application set app_for_test rbd key1 value1
2646 ceph osd pool application set app_for_test rbd key2 value2
2647 ceph osd pool application set app_for_test rgw key1 value1
2648 ceph osd pool application get app_for_test rbd key1 | grep 'value1'
2649 ceph osd pool application get app_for_test rbd key2 | grep 'value2'
2650 ceph osd pool application get app_for_test rgw key1 | grep 'value1'
2651
2652 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2653
2654 ceph osd pool application rm app_for_test rgw key1
2655 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2656 ceph osd pool application rm app_for_test rbd key2
2657 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2658 ceph osd pool application rm app_for_test rbd key1
2659 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2660 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2661
2662 expect_false ceph osd pool application disable app_for_test rgw
2663 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2664 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2665 ceph osd pool ls detail | grep "application rbd"
2666 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2667
2668 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2669 ceph osd pool ls detail | grep -v "application "
2670 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2671
2672 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2673 }
2674
2675 function test_mon_tell_help_command()
2676 {
2677 ceph tell mon.a help | grep sync_force
2678 ceph tell mon.a -h | grep sync_force
2679 ceph tell mon.a config -h | grep 'config diff get'
2680
2681 # wrong target
2682 expect_false ceph tell mon.zzz help
2683 }
2684
2685 function test_mon_stdin_stdout()
2686 {
2687 echo foo | ceph config-key set test_key -i -
2688 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2689 }
2690
2691 function test_osd_tell_help_command()
2692 {
2693 ceph tell osd.1 help
2694 expect_false ceph tell osd.100 help
2695 }
2696
2697 function test_osd_compact()
2698 {
2699 ceph tell osd.1 compact
2700 $SUDO ceph daemon osd.1 compact
2701 }
2702
2703 function test_mds_tell_help_command()
2704 {
2705 local FS_NAME=cephfs
2706 if ! mds_exists ; then
2707 echo "Skipping test, no MDS found"
2708 return
2709 fi
2710
2711 remove_all_fs
2712 ceph osd pool create fs_data 16
2713 ceph osd pool create fs_metadata 16
2714 ceph fs new $FS_NAME fs_metadata fs_data
2715 wait_mds_active $FS_NAME
2716
2717
2718 ceph tell mds.a help
2719 expect_false ceph tell mds.z help
2720
2721 remove_all_fs
2722 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2723 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2724 }
2725
2726 function test_mgr_tell()
2727 {
2728 ceph tell mgr version
2729 }
2730
2731 function test_mgr_devices()
2732 {
2733 ceph device ls
2734 expect_false ceph device info doesnotexist
2735 expect_false ceph device get-health-metrics doesnotexist
2736 }
2737
2738 function test_per_pool_scrub_status()
2739 {
2740 ceph osd pool create noscrub_pool 16
2741 ceph osd pool create noscrub_pool2 16
2742 ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2743 ceph -s --format json | \
2744 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2745 expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2746 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
2747 expect_false grep -q "Pool .* has .*scrub.* flag"
2748 ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2749 expect_false grep -q "Pool .* has .*scrub.* flag"
2750
2751 ceph osd pool set noscrub_pool noscrub 1
2752 ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
2753 ceph -s --format json | \
2754 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2755 expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
2756 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2757 expect_true grep -q "Pool noscrub_pool has noscrub flag"
2758 ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
2759
2760 ceph osd pool set noscrub_pool nodeep-scrub 1
2761 ceph osd pool set noscrub_pool2 nodeep-scrub 1
2762 ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2763 ceph -s --format json | \
2764 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2765 expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2766 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2767 expect_true grep -q "Pool noscrub_pool has noscrub flag"
2768 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2769 expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
2770 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2771 expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2772 ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
2773 ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
2774 ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2775
2776 ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it
2777 ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it
2778 }
2779
2780 #
2781 # New tests should be added to the TESTS array below
2782 #
2783 # Individual tests may be run using the '-t <testname>' argument
2784 # The user can specify '-t <testname>' as many times as she wants
2785 #
2786 # Tests will be run in order presented in the TESTS array, or in
2787 # the order specified by the '-t <testname>' options.
2788 #
2789 # '-l' will list all the available test names
2790 # '-h' will show usage
2791 #
2792 # The test maintains backward compatibility: not specifying arguments
2793 # will run all tests following the order they appear in the TESTS array.
2794 #
2795
2796 set +x
2797 MON_TESTS+=" mon_injectargs"
2798 MON_TESTS+=" mon_injectargs_SI"
2799 for i in `seq 9`; do
2800 MON_TESTS+=" tiering_$i";
2801 done
2802 MON_TESTS+=" auth"
2803 MON_TESTS+=" auth_profiles"
2804 MON_TESTS+=" mon_misc"
2805 MON_TESTS+=" mon_mon"
2806 MON_TESTS+=" mon_osd"
2807 MON_TESTS+=" mon_config_key"
2808 MON_TESTS+=" mon_crush"
2809 MON_TESTS+=" mon_osd_create_destroy"
2810 MON_TESTS+=" mon_osd_pool"
2811 MON_TESTS+=" mon_osd_pool_quota"
2812 MON_TESTS+=" mon_pg"
2813 MON_TESTS+=" mon_osd_pool_set"
2814 MON_TESTS+=" mon_osd_tiered_pool_set"
2815 MON_TESTS+=" mon_osd_erasure_code"
2816 MON_TESTS+=" mon_osd_misc"
2817 MON_TESTS+=" mon_tell"
2818 MON_TESTS+=" mon_ping"
2819 MON_TESTS+=" mon_deprecated_commands"
2820 MON_TESTS+=" mon_caps"
2821 MON_TESTS+=" mon_cephdf_commands"
2822 MON_TESTS+=" mon_tell_help_command"
2823 MON_TESTS+=" mon_stdin_stdout"
2824
2825 OSD_TESTS+=" osd_bench"
2826 OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2827 OSD_TESTS+=" tiering_agent"
2828 OSD_TESTS+=" admin_heap_profiler"
2829 OSD_TESTS+=" osd_tell_help_command"
2830 OSD_TESTS+=" osd_compact"
2831 OSD_TESTS+=" per_pool_scrub_status"
2832
2833 MDS_TESTS+=" mds_tell"
2834 MDS_TESTS+=" mon_mds"
2835 MDS_TESTS+=" mon_mds_metadata"
2836 MDS_TESTS+=" mds_tell_help_command"
2837
2838 MGR_TESTS+=" mgr_tell"
2839 MGR_TESTS+=" mgr_devices"
2840
2841 TESTS+=$MON_TESTS
2842 TESTS+=$OSD_TESTS
2843 TESTS+=$MDS_TESTS
2844 TESTS+=$MGR_TESTS
2845
2846 #
2847 # "main" follows
2848 #
2849
2850 function list_tests()
2851 {
2852 echo "AVAILABLE TESTS"
2853 for i in $TESTS; do
2854 echo " $i"
2855 done
2856 }
2857
2858 function usage()
2859 {
2860 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2861 }
2862
2863 tests_to_run=()
2864
2865 sanity_check=true
2866
2867 while [[ $# -gt 0 ]]; do
2868 opt=$1
2869
2870 case "$opt" in
2871 "-l" )
2872 do_list=1
2873 ;;
2874 "--asok-does-not-need-root" )
2875 SUDO=""
2876 ;;
2877 "--no-sanity-check" )
2878 sanity_check=false
2879 ;;
2880 "--test-mon" )
2881 tests_to_run+="$MON_TESTS"
2882 ;;
2883 "--test-osd" )
2884 tests_to_run+="$OSD_TESTS"
2885 ;;
2886 "--test-mds" )
2887 tests_to_run+="$MDS_TESTS"
2888 ;;
2889 "--test-mgr" )
2890 tests_to_run+="$MGR_TESTS"
2891 ;;
2892 "-t" )
2893 shift
2894 if [[ -z "$1" ]]; then
2895 echo "missing argument to '-t'"
2896 usage ;
2897 exit 1
2898 fi
2899 tests_to_run+=" $1"
2900 ;;
2901 "-h" )
2902 usage ;
2903 exit 0
2904 ;;
2905 esac
2906 shift
2907 done
2908
2909 if [[ $do_list -eq 1 ]]; then
2910 list_tests ;
2911 exit 0
2912 fi
2913
2914 ceph osd pool create rbd 16
2915
2916 if test -z "$tests_to_run" ; then
2917 tests_to_run="$TESTS"
2918 fi
2919
2920 if $sanity_check ; then
2921 wait_no_osd_down
2922 fi
2923 for i in $tests_to_run; do
2924 if $sanity_check ; then
2925 check_no_osd_down
2926 fi
2927 set -x
2928 test_${i}
2929 set +x
2930 done
2931 if $sanity_check ; then
2932 check_no_osd_down
2933 fi
2934
2935 set -x
2936
2937 echo OK