]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/cephtool/test.sh
import 15.2.4
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
1 #!/usr/bin/env bash
2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
4 set -x
5
6 source $(dirname $0)/../../standalone/ceph-helpers.sh
7
8 set -e
9 set -o functrace
10 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
11 SUDO=${SUDO:-sudo}
12 export CEPH_DEV=1
13
14 function check_no_osd_down()
15 {
16 ! ceph osd dump | grep ' down '
17 }
18
19 function wait_no_osd_down()
20 {
21 max_run=300
22 for i in $(seq 1 $max_run) ; do
23 if ! check_no_osd_down ; then
24 echo "waiting for osd(s) to come back up ($i/$max_run)"
25 sleep 1
26 else
27 break
28 fi
29 done
30 check_no_osd_down
31 }
32
33 function expect_false()
34 {
35 set -x
36 if "$@"; then return 1; else return 0; fi
37 }
38
39 function expect_true()
40 {
41 set -x
42 if ! "$@"; then return 1; else return 0; fi
43 }
44
45 TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
46 trap "rm -fr $TEMP_DIR" 0
47
48 TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
49
50 #
51 # retry_eagain max cmd args ...
52 #
53 # retry cmd args ... if it exits on error and its output contains the
54 # string EAGAIN, at most $max times
55 #
56 function retry_eagain()
57 {
58 local max=$1
59 shift
60 local status
61 local tmpfile=$TEMP_DIR/retry_eagain.$$
62 local count
63 for count in $(seq 1 $max) ; do
64 status=0
65 "$@" > $tmpfile 2>&1 || status=$?
66 if test $status = 0 ||
67 ! grep --quiet EAGAIN $tmpfile ; then
68 break
69 fi
70 sleep 1
71 done
72 if test $count = $max ; then
73 echo retried with non zero exit status, $max times: "$@" >&2
74 fi
75 cat $tmpfile
76 rm $tmpfile
77 return $status
78 }
79
80 #
81 # map_enxio_to_eagain cmd arg ...
82 #
83 # add EAGAIN to the output of cmd arg ... if the output contains
84 # ENXIO.
85 #
86 function map_enxio_to_eagain()
87 {
88 local status=0
89 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
90
91 "$@" > $tmpfile 2>&1 || status=$?
92 if test $status != 0 &&
93 grep --quiet ENXIO $tmpfile ; then
94 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
95 fi
96 cat $tmpfile
97 rm $tmpfile
98 return $status
99 }
100
101 function check_response()
102 {
103 expected_string=$1
104 retcode=$2
105 expected_retcode=$3
106 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
107 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
108 exit 1
109 fi
110
111 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
112 echo "Didn't find $expected_string in output" >&2
113 cat $TMPFILE >&2
114 exit 1
115 fi
116 }
117
118 function get_config_value_or_die()
119 {
120 local target config_opt raw val
121
122 target=$1
123 config_opt=$2
124
125 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
126 if [[ $? -ne 0 ]]; then
127 echo "error obtaining config opt '$config_opt' from '$target': $raw"
128 exit 1
129 fi
130
131 raw=`echo $raw | sed -e 's/[{} "]//g'`
132 val=`echo $raw | cut -f2 -d:`
133
134 echo "$val"
135 return 0
136 }
137
138 function expect_config_value()
139 {
140 local target config_opt expected_val val
141 target=$1
142 config_opt=$2
143 expected_val=$3
144
145 val=$(get_config_value_or_die $target $config_opt)
146
147 if [[ "$val" != "$expected_val" ]]; then
148 echo "expected '$expected_val', got '$val'"
149 exit 1
150 fi
151 }
152
153 function ceph_watch_start()
154 {
155 local whatch_opt=--watch
156
157 if [ -n "$1" ]; then
158 whatch_opt=--watch-$1
159 if [ -n "$2" ]; then
160 whatch_opt+=" --watch-channel $2"
161 fi
162 fi
163
164 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
165 ceph $whatch_opt > $CEPH_WATCH_FILE &
166 CEPH_WATCH_PID=$!
167
168 # wait until the "ceph" client is connected and receiving
169 # log messages from monitor
170 for i in `seq 3`; do
171 grep -q "cluster" $CEPH_WATCH_FILE && break
172 sleep 1
173 done
174 }
175
176 function ceph_watch_wait()
177 {
178 local regexp=$1
179 local timeout=30
180
181 if [ -n "$2" ]; then
182 timeout=$2
183 fi
184
185 for i in `seq ${timeout}`; do
186 grep -q "$regexp" $CEPH_WATCH_FILE && break
187 sleep 1
188 done
189
190 kill $CEPH_WATCH_PID
191
192 if ! grep "$regexp" $CEPH_WATCH_FILE; then
193 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
194 cat $CEPH_WATCH_FILE >&2
195 return 1
196 fi
197 }
198
199 function test_mon_injectargs()
200 {
201 ceph tell osd.0 injectargs --no-osd_enable_op_tracker
202 ceph tell osd.0 config get osd_enable_op_tracker | grep false
203 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500'
204 ceph tell osd.0 config get osd_enable_op_tracker | grep true
205 ceph tell osd.0 config get osd_op_history_duration | grep 500
206 ceph tell osd.0 injectargs --no-osd_enable_op_tracker
207 ceph tell osd.0 config get osd_enable_op_tracker | grep false
208 ceph tell osd.0 injectargs -- --osd_enable_op_tracker
209 ceph tell osd.0 config get osd_enable_op_tracker | grep true
210 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600'
211 ceph tell osd.0 config get osd_enable_op_tracker | grep true
212 ceph tell osd.0 config get osd_op_history_duration | grep 600
213
214 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200'
215 ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200
216
217 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2'
218 ceph tell osd.0 config get mon_probe_timeout | grep 2
219
220 ceph tell osd.0 injectargs -- '--mon-lease 6'
221 ceph tell osd.0 config get mon_lease | grep 6
222
223 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1
225 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
226
227 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
228 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
229
230 }
231
232 function test_mon_injectargs_SI()
233 {
234 # Test SI units during injectargs and 'config set'
235 # We only aim at testing the units are parsed accordingly
236 # and don't intend to test whether the options being set
237 # actually expect SI units to be passed.
238 # Keep in mind that all integer based options that are not based on bytes
239 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
240 # base 10.
241 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
242 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
243 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
244 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
245 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
246 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
247 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
248 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
249 check_response "(22) Invalid argument"
250 # now test with injectargs
251 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
252 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
253 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
254 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
255 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
256 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
257 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
258 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
259 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
260 }
261
262 function test_mon_injectargs_IEC()
263 {
264 # Test IEC units during injectargs and 'config set'
265 # We only aim at testing the units are parsed accordingly
266 # and don't intend to test whether the options being set
267 # actually expect IEC units to be passed.
268 # Keep in mind that all integer based options that are based on bytes
269 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
270 # unit modifiers (for backwards compatibility and convenience) and be parsed
271 # to base 2.
272 initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
273 $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
274 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
275 $SUDO ceph daemon mon.a config set mon_data_size_warn 15G
276 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
277 $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
278 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
279 $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
280 check_response "(22) Invalid argument"
281 # now test with injectargs
282 ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
283 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
284 ceph tell mon.a injectargs '--mon_data_size_warn 15G'
285 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
286 ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
287 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
288 expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
289 $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
290 }
291
292 function test_tiering_agent()
293 {
294 local slow=slow_eviction
295 local fast=fast_eviction
296 ceph osd pool create $slow 1 1
297 ceph osd pool application enable $slow rados
298 ceph osd pool create $fast 1 1
299 ceph osd tier add $slow $fast
300 ceph osd tier cache-mode $fast writeback
301 ceph osd tier set-overlay $slow $fast
302 ceph osd pool set $fast hit_set_type bloom
303 rados -p $slow put obj1 /etc/group
304 ceph osd pool set $fast target_max_objects 1
305 ceph osd pool set $fast hit_set_count 1
306 ceph osd pool set $fast hit_set_period 5
307 # wait for the object to be evicted from the cache
308 local evicted
309 evicted=false
310 for i in `seq 1 300` ; do
311 if ! rados -p $fast ls | grep obj1 ; then
312 evicted=true
313 break
314 fi
315 sleep 1
316 done
317 $evicted # assert
318 # the object is proxy read and promoted to the cache
319 rados -p $slow get obj1 - >/dev/null
320 # wait for the promoted object to be evicted again
321 evicted=false
322 for i in `seq 1 300` ; do
323 if ! rados -p $fast ls | grep obj1 ; then
324 evicted=true
325 break
326 fi
327 sleep 1
328 done
329 $evicted # assert
330 ceph osd tier remove-overlay $slow
331 ceph osd tier remove $slow $fast
332 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
333 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
334 }
335
336 function test_tiering_1()
337 {
338 # tiering
339 ceph osd pool create slow 2
340 ceph osd pool application enable slow rados
341 ceph osd pool create slow2 2
342 ceph osd pool application enable slow2 rados
343 ceph osd pool create cache 2
344 ceph osd pool create cache2 2
345 ceph osd tier add slow cache
346 ceph osd tier add slow cache2
347 expect_false ceph osd tier add slow2 cache
348 # forward and proxy are removed/deprecated
349 expect_false ceph osd tier cache-mode cache forward
350 expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
351 expect_false ceph osd tier cache-mode cache proxy
352 expect_false ceph osd tier cache-mode cache proxy --yes-i-really-mean-it
353 # test some state transitions
354 ceph osd tier cache-mode cache writeback
355 expect_false ceph osd tier cache-mode cache readonly
356 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
357 ceph osd tier cache-mode cache readproxy
358 ceph osd tier cache-mode cache none
359 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
360 ceph osd tier cache-mode cache none
361 ceph osd tier cache-mode cache writeback
362 expect_false ceph osd tier cache-mode cache none
363 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
364 # test with dirty objects in the tier pool
365 # tier pool currently set to 'writeback'
366 rados -p cache put /etc/passwd /etc/passwd
367 flush_pg_stats
368 # 1 dirty object in pool 'cache'
369 ceph osd tier cache-mode cache readproxy
370 expect_false ceph osd tier cache-mode cache none
371 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
372 ceph osd tier cache-mode cache writeback
373 # remove object from tier pool
374 rados -p cache rm /etc/passwd
375 rados -p cache cache-flush-evict-all
376 flush_pg_stats
377 # no dirty objects in pool 'cache'
378 ceph osd tier cache-mode cache readproxy
379 ceph osd tier cache-mode cache none
380 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
381 TRIES=0
382 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
383 do
384 grep 'currently creating pgs' $TMPFILE
385 TRIES=$(( $TRIES + 1 ))
386 test $TRIES -ne 60
387 sleep 3
388 done
389 expect_false ceph osd pool set cache pg_num 4
390 ceph osd tier cache-mode cache none
391 ceph osd tier set-overlay slow cache
392 expect_false ceph osd tier set-overlay slow cache2
393 expect_false ceph osd tier remove slow cache
394 ceph osd tier remove-overlay slow
395 ceph osd tier set-overlay slow cache2
396 ceph osd tier remove-overlay slow
397 ceph osd tier remove slow cache
398 ceph osd tier add slow2 cache
399 expect_false ceph osd tier set-overlay slow cache
400 ceph osd tier set-overlay slow2 cache
401 ceph osd tier remove-overlay slow2
402 ceph osd tier remove slow2 cache
403 ceph osd tier remove slow cache2
404
405 # make sure a non-empty pool fails
406 rados -p cache2 put /etc/passwd /etc/passwd
407 while ! ceph df | grep cache2 | grep ' 1 ' ; do
408 echo waiting for pg stats to flush
409 sleep 2
410 done
411 expect_false ceph osd tier add slow cache2
412 ceph osd tier add slow cache2 --force-nonempty
413 ceph osd tier remove slow cache2
414
415 ceph osd pool ls | grep cache2
416 ceph osd pool ls -f json-pretty | grep cache2
417 ceph osd pool ls detail | grep cache2
418 ceph osd pool ls detail -f json-pretty | grep cache2
419
420 ceph osd pool delete slow slow --yes-i-really-really-mean-it
421 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
422 ceph osd pool delete cache cache --yes-i-really-really-mean-it
423 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
424 }
425
426 function test_tiering_2()
427 {
428 # make sure we can't clobber snapshot state
429 ceph osd pool create snap_base 2
430 ceph osd pool application enable snap_base rados
431 ceph osd pool create snap_cache 2
432 ceph osd pool mksnap snap_cache snapname
433 expect_false ceph osd tier add snap_base snap_cache
434 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
435 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
436 }
437
438 function test_tiering_3()
439 {
440 # make sure we can't create snapshot on tier
441 ceph osd pool create basex 2
442 ceph osd pool application enable basex rados
443 ceph osd pool create cachex 2
444 ceph osd tier add basex cachex
445 expect_false ceph osd pool mksnap cache snapname
446 ceph osd tier remove basex cachex
447 ceph osd pool delete basex basex --yes-i-really-really-mean-it
448 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
449 }
450
451 function test_tiering_4()
452 {
453 # make sure we can't create an ec pool tier
454 ceph osd pool create eccache 2 2 erasure
455 expect_false ceph osd set-require-min-compat-client bobtail
456 ceph osd pool create repbase 2
457 ceph osd pool application enable repbase rados
458 expect_false ceph osd tier add repbase eccache
459 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
460 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
461 }
462
463 function test_tiering_5()
464 {
465 # convenient add-cache command
466 ceph osd pool create slow 2
467 ceph osd pool application enable slow rados
468 ceph osd pool create cache3 2
469 ceph osd tier add-cache slow cache3 1024000
470 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
471 ceph osd tier remove slow cache3 2> $TMPFILE || true
472 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
473 ceph osd tier remove-overlay slow
474 ceph osd tier remove slow cache3
475 ceph osd pool ls | grep cache3
476 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
477 ! ceph osd pool ls | grep cache3 || exit 1
478 ceph osd pool delete slow slow --yes-i-really-really-mean-it
479 }
480
481 function test_tiering_6()
482 {
483 # check add-cache whether work
484 ceph osd pool create datapool 2
485 ceph osd pool application enable datapool rados
486 ceph osd pool create cachepool 2
487 ceph osd tier add-cache datapool cachepool 1024000
488 ceph osd tier cache-mode cachepool writeback
489 rados -p datapool put object /etc/passwd
490 rados -p cachepool stat object
491 rados -p cachepool cache-flush object
492 rados -p datapool stat object
493 ceph osd tier remove-overlay datapool
494 ceph osd tier remove datapool cachepool
495 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
496 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
497 }
498
499 function test_tiering_7()
500 {
501 # protection against pool removal when used as tiers
502 ceph osd pool create datapool 2
503 ceph osd pool application enable datapool rados
504 ceph osd pool create cachepool 2
505 ceph osd tier add-cache datapool cachepool 1024000
506 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
507 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
508 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
509 check_response "EBUSY: pool 'datapool' has tiers cachepool"
510 ceph osd tier remove-overlay datapool
511 ceph osd tier remove datapool cachepool
512 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
513 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
514 }
515
516 function test_tiering_8()
517 {
518 ## check health check
519 ceph osd set notieragent
520 ceph osd pool create datapool 2
521 ceph osd pool application enable datapool rados
522 ceph osd pool create cache4 2
523 ceph osd tier add-cache datapool cache4 1024000
524 ceph osd tier cache-mode cache4 writeback
525 tmpfile=$(mktemp|grep tmp)
526 dd if=/dev/zero of=$tmpfile bs=4K count=1
527 ceph osd pool set cache4 target_max_objects 200
528 ceph osd pool set cache4 target_max_bytes 1000000
529 rados -p cache4 put foo1 $tmpfile
530 rados -p cache4 put foo2 $tmpfile
531 rm -f $tmpfile
532 flush_pg_stats
533 ceph df | grep datapool | grep ' 2 '
534 ceph osd tier remove-overlay datapool
535 ceph osd tier remove datapool cache4
536 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
537 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
538 ceph osd unset notieragent
539 }
540
541 function test_tiering_9()
542 {
543 # make sure 'tier remove' behaves as we expect
544 # i.e., removing a tier from a pool that's not its base pool only
545 # results in a 'pool foo is now (or already was) not a tier of bar'
546 #
547 ceph osd pool create basepoolA 2
548 ceph osd pool application enable basepoolA rados
549 ceph osd pool create basepoolB 2
550 ceph osd pool application enable basepoolB rados
551 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
552 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
553
554 ceph osd pool create cache5 2
555 ceph osd pool create cache6 2
556 ceph osd tier add basepoolA cache5
557 ceph osd tier add basepoolB cache6
558 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
559 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
560 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
561 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
562
563 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
564 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
565 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
566 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
567
568 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
569 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
570
571 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
572 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
573 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
574 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
575 }
576
577 function test_auth()
578 {
579 expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
580 expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
581 ceph auth add client.xx mon 'allow *' osd "allow *"
582 ceph auth export client.xx >client.xx.keyring
583 ceph auth add client.xx -i client.xx.keyring
584 rm -f client.xx.keyring
585 ceph auth list | grep client.xx
586 ceph auth ls | grep client.xx
587 ceph auth get client.xx | grep caps | grep mon
588 ceph auth get client.xx | grep caps | grep osd
589 ceph auth get-key client.xx
590 ceph auth print-key client.xx
591 ceph auth print_key client.xx
592 ceph auth caps client.xx osd "allow rw"
593 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
594 ceph auth get client.xx | grep osd | grep "allow rw"
595 ceph auth caps client.xx mon 'allow command "osd tree"'
596 ceph auth export | grep client.xx
597 ceph auth export -o authfile
598 ceph auth import -i authfile 2>$TMPFILE
599 check_response "imported keyring"
600
601 ceph auth export -o authfile2
602 diff authfile authfile2
603 rm authfile authfile2
604 ceph auth del client.xx
605 expect_false ceph auth get client.xx
606
607 # (almost) interactive mode
608 echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
609 ceph auth get client.xx
610 # script mode
611 echo 'auth del client.xx' | ceph
612 expect_false ceph auth get client.xx
613 }
614
615 function test_auth_profiles()
616 {
617 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
618 mgr 'allow profile read-only'
619 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
620 mgr 'allow profile read-write'
621 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
622
623 ceph auth export > client.xx.keyring
624
625 # read-only is allowed all read-only commands (auth excluded)
626 ceph -n client.xx-profile-ro -k client.xx.keyring status
627 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
628 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
629 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
630 # read-only gets access denied for rw commands or auth commands
631 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
632 check_response "EACCES: access denied"
633 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
634 check_response "EACCES: access denied"
635 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
636 check_response "EACCES: access denied"
637
638 # read-write is allowed for all read-write commands (except auth)
639 ceph -n client.xx-profile-rw -k client.xx.keyring status
640 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
641 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
642 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
643 ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
644 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
645 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
646 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
647 # read-write gets access denied for auth commands
648 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
649 check_response "EACCES: access denied"
650
651 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
652 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
653 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
654 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
655 ceph -n client.xx-profile-rd -k client.xx.keyring status
656 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
657 check_response "EACCES: access denied"
658 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
659 check_response "EACCES: access denied"
660 # read-only 'mon' subsystem commands are allowed
661 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
662 # but read-write 'mon' commands are not
663 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
664 check_response "EACCES: access denied"
665 ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
666 check_response "EACCES: access denied"
667 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
668 check_response "EACCES: access denied"
669 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
670 check_response "EACCES: access denied"
671
672 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
673 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
674
675 # add a new role-definer with the existing role-definer
676 ceph -n client.xx-profile-rd -k client.xx.keyring \
677 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
678 ceph -n client.xx-profile-rd -k client.xx.keyring \
679 auth export > client.xx.keyring.2
680 # remove old role-definer using the new role-definer
681 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
682 auth del client.xx-profile-rd
683 # remove the remaining role-definer with admin
684 ceph auth del client.xx-profile-rd2
685 rm -f client.xx.keyring client.xx.keyring.2
686 }
687
688 function test_mon_caps()
689 {
690 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
691 chmod +r $TEMP_DIR/ceph.client.bug.keyring
692 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
693 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
694
695 # pass --no-mon-config since we are looking for the permission denied error
696 rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
697 cat $TMPFILE
698 check_response "Permission denied"
699
700 rm -rf $TEMP_DIR/ceph.client.bug.keyring
701 ceph auth del client.bug
702 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
703 chmod +r $TEMP_DIR/ceph.client.bug.keyring
704 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
705 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
706 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
707 rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
708 check_response "Permission denied"
709 }
710
711 function test_mon_misc()
712 {
713 # with and without verbosity
714 ceph osd dump | grep '^epoch'
715 ceph --concise osd dump | grep '^epoch'
716
717 ceph osd df | grep 'MIN/MAX VAR'
718
719 # df
720 ceph df > $TMPFILE
721 grep RAW $TMPFILE
722 grep -v DIRTY $TMPFILE
723 ceph df detail > $TMPFILE
724 grep DIRTY $TMPFILE
725 ceph df --format json > $TMPFILE
726 grep 'total_bytes' $TMPFILE
727 grep -v 'dirty' $TMPFILE
728 ceph df detail --format json > $TMPFILE
729 grep 'rd_bytes' $TMPFILE
730 grep 'dirty' $TMPFILE
731 ceph df --format xml | grep '<total_bytes>'
732 ceph df detail --format xml | grep '<rd_bytes>'
733
734 ceph fsid
735 ceph health
736 ceph health detail
737 ceph health --format json-pretty
738 ceph health detail --format xml-pretty
739
740 ceph time-sync-status
741
742 ceph node ls
743 for t in mon osd mds mgr ; do
744 ceph node ls $t
745 done
746
747 ceph_watch_start
748 mymsg="this is a test log message $$.$(date)"
749 ceph log "$mymsg"
750 ceph log last | grep "$mymsg"
751 ceph log last 100 | grep "$mymsg"
752 ceph_watch_wait "$mymsg"
753
754 ceph mgr dump
755 ceph mgr module ls
756 ceph mgr module enable restful
757 expect_false ceph mgr module enable foodne
758 ceph mgr module enable foodne --force
759 ceph mgr module disable foodne
760 ceph mgr module disable foodnebizbangbash
761
762 ceph mon metadata a
763 ceph mon metadata
764 ceph mon count-metadata ceph_version
765 ceph mon versions
766
767 ceph mgr metadata
768 ceph mgr versions
769 ceph mgr count-metadata ceph_version
770
771 ceph versions
772
773 ceph node ls
774 }
775
776 function check_mds_active()
777 {
778 fs_name=$1
779 ceph fs get $fs_name | grep active
780 }
781
782 function wait_mds_active()
783 {
784 fs_name=$1
785 max_run=300
786 for i in $(seq 1 $max_run) ; do
787 if ! check_mds_active $fs_name ; then
788 echo "waiting for an active MDS daemon ($i/$max_run)"
789 sleep 5
790 else
791 break
792 fi
793 done
794 check_mds_active $fs_name
795 }
796
797 function get_mds_gids()
798 {
799 fs_name=$1
800 ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
801 }
802
803 function fail_all_mds()
804 {
805 fs_name=$1
806 ceph fs set $fs_name cluster_down true
807 mds_gids=$(get_mds_gids $fs_name)
808 for mds_gid in $mds_gids ; do
809 ceph mds fail $mds_gid
810 done
811 if check_mds_active $fs_name ; then
812 echo "An active MDS remains, something went wrong"
813 ceph fs get $fs_name
814 exit -1
815 fi
816
817 }
818
819 function remove_all_fs()
820 {
821 existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
822 for fs_name in $existing_fs ; do
823 echo "Removing fs ${fs_name}..."
824 fail_all_mds $fs_name
825 echo "Removing existing filesystem '${fs_name}'..."
826 ceph fs rm $fs_name --yes-i-really-mean-it
827 echo "Removed '${fs_name}'."
828 done
829 }
830
831 # So that tests requiring MDS can skip if one is not configured
832 # in the cluster at all
833 function mds_exists()
834 {
835 ceph auth ls | grep "^mds"
836 }
837
838 # some of the commands are just not idempotent.
839 function without_test_dup_command()
840 {
841 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
842 $@
843 else
844 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
845 unset CEPH_CLI_TEST_DUP_COMMAND
846 $@
847 CEPH_CLI_TEST_DUP_COMMAND=saved
848 fi
849 }
850
851 function test_mds_tell()
852 {
853 local FS_NAME=cephfs
854 if ! mds_exists ; then
855 echo "Skipping test, no MDS found"
856 return
857 fi
858
859 remove_all_fs
860 ceph osd pool create fs_data 16
861 ceph osd pool create fs_metadata 16
862 ceph fs new $FS_NAME fs_metadata fs_data
863 wait_mds_active $FS_NAME
864
865 # Test injectargs by GID
866 old_mds_gids=$(get_mds_gids $FS_NAME)
867 echo Old GIDs: $old_mds_gids
868
869 for mds_gid in $old_mds_gids ; do
870 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
871 done
872 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
873
874 # Test respawn by rank
875 without_test_dup_command ceph tell mds.0 respawn
876 new_mds_gids=$old_mds_gids
877 while [ $new_mds_gids -eq $old_mds_gids ] ; do
878 sleep 5
879 new_mds_gids=$(get_mds_gids $FS_NAME)
880 done
881 echo New GIDs: $new_mds_gids
882
883 # Test respawn by ID
884 without_test_dup_command ceph tell mds.a respawn
885 new_mds_gids=$old_mds_gids
886 while [ $new_mds_gids -eq $old_mds_gids ] ; do
887 sleep 5
888 new_mds_gids=$(get_mds_gids $FS_NAME)
889 done
890 echo New GIDs: $new_mds_gids
891
892 remove_all_fs
893 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
894 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
895 }
896
897 function test_mon_mds()
898 {
899 local FS_NAME=cephfs
900 remove_all_fs
901
902 ceph osd pool create fs_data 16
903 ceph osd pool create fs_metadata 16
904 ceph fs new $FS_NAME fs_metadata fs_data
905
906 ceph fs set $FS_NAME cluster_down true
907 ceph fs set $FS_NAME cluster_down false
908
909 ceph mds compat rm_incompat 4
910 ceph mds compat rm_incompat 4
911
912 # We don't want any MDSs to be up, their activity can interfere with
913 # the "current_epoch + 1" checking below if they're generating updates
914 fail_all_mds $FS_NAME
915
916 ceph mds compat show
917 ceph fs dump
918 ceph fs get $FS_NAME
919 for mds_gid in $(get_mds_gids $FS_NAME) ; do
920 ceph mds metadata $mds_id
921 done
922 ceph mds metadata
923 ceph mds versions
924 ceph mds count-metadata os
925
926 # XXX mds fail, but how do you undo it?
927 mdsmapfile=$TEMP_DIR/mdsmap.$$
928 current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
929 [ -s $mdsmapfile ]
930 rm $mdsmapfile
931
932 ceph osd pool create data2 16
933 ceph osd pool create data3 16
934 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
935 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
936 ceph fs add_data_pool cephfs $data2_pool
937 ceph fs add_data_pool cephfs $data3_pool
938 ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
939 check_response "Error ENOENT"
940 ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
941 check_response "Error ENOENT"
942 ceph fs rm_data_pool cephfs $data2_pool
943 ceph fs rm_data_pool cephfs $data3_pool
944 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
945 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
946 ceph fs set cephfs max_mds 4
947 ceph fs set cephfs max_mds 3
948 ceph fs set cephfs max_mds 256
949 expect_false ceph fs set cephfs max_mds 257
950 ceph fs set cephfs max_mds 4
951 ceph fs set cephfs max_mds 256
952 expect_false ceph fs set cephfs max_mds 257
953 expect_false ceph fs set cephfs max_mds asdf
954 expect_false ceph fs set cephfs inline_data true
955 ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
956 ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it
957 ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it
958 expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it
959 ceph fs set cephfs inline_data false
960 ceph fs set cephfs inline_data no
961 ceph fs set cephfs inline_data 0
962 expect_false ceph fs set cephfs inline_data asdf
963 ceph fs set cephfs max_file_size 1048576
964 expect_false ceph fs set cephfs max_file_size 123asdf
965
966 expect_false ceph fs set cephfs allow_new_snaps
967 ceph fs set cephfs allow_new_snaps true
968 ceph fs set cephfs allow_new_snaps 0
969 ceph fs set cephfs allow_new_snaps false
970 ceph fs set cephfs allow_new_snaps no
971 expect_false ceph fs set cephfs allow_new_snaps taco
972
973 # we should never be able to add EC pools as data or metadata pools
974 # create an ec-pool...
975 ceph osd pool create mds-ec-pool 16 16 erasure
976 set +e
977 ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
978 check_response 'erasure-code' $? 22
979 set -e
980 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
981 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
982 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
983
984 fail_all_mds $FS_NAME
985
986 set +e
987 # Check that rmfailed requires confirmation
988 expect_false ceph mds rmfailed 0
989 ceph mds rmfailed 0 --yes-i-really-mean-it
990 set -e
991
992 # Check that `fs new` is no longer permitted
993 expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
994
995 # Check that 'fs reset' runs
996 ceph fs reset $FS_NAME --yes-i-really-mean-it
997
998 # Check that creating a second FS fails by default
999 ceph osd pool create fs_metadata2 16
1000 ceph osd pool create fs_data2 16
1001 set +e
1002 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1003 set -e
1004
1005 # Check that setting enable_multiple enables creation of second fs
1006 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1007 ceph fs new cephfs2 fs_metadata2 fs_data2
1008
1009 # Clean up multi-fs stuff
1010 fail_all_mds cephfs2
1011 ceph fs rm cephfs2 --yes-i-really-mean-it
1012 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1013 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1014
1015 fail_all_mds $FS_NAME
1016
1017 # Clean up to enable subsequent fs new tests
1018 ceph fs rm $FS_NAME --yes-i-really-mean-it
1019
1020 set +e
1021 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1022 check_response 'erasure-code' $? 22
1023 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1024 check_response 'erasure-code' $? 22
1025 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1026 check_response 'erasure-code' $? 22
1027 set -e
1028
1029 # ... new create a cache tier in front of the EC pool...
1030 ceph osd pool create mds-tier 2
1031 ceph osd tier add mds-ec-pool mds-tier
1032 ceph osd tier set-overlay mds-ec-pool mds-tier
1033 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1034
1035 # Use of a readonly tier should be forbidden
1036 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1037 set +e
1038 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1039 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1040 set -e
1041
1042 # Use of a writeback tier should enable FS creation
1043 ceph osd tier cache-mode mds-tier writeback
1044 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1045
1046 # While a FS exists using the tiered pools, I should not be allowed
1047 # to remove the tier
1048 set +e
1049 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1050 check_response 'in use by CephFS' $? 16
1051 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1052 check_response 'in use by CephFS' $? 16
1053 set -e
1054
1055 fail_all_mds $FS_NAME
1056 ceph fs rm $FS_NAME --yes-i-really-mean-it
1057
1058 # ... but we should be forbidden from using the cache pool in the FS directly.
1059 set +e
1060 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1061 check_response 'in use as a cache tier' $? 22
1062 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1063 check_response 'in use as a cache tier' $? 22
1064 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1065 check_response 'in use as a cache tier' $? 22
1066 set -e
1067
1068 # Clean up tier + EC pools
1069 ceph osd tier remove-overlay mds-ec-pool
1070 ceph osd tier remove mds-ec-pool mds-tier
1071
1072 # Create a FS using the 'cache' pool now that it's no longer a tier
1073 ceph fs new $FS_NAME fs_metadata mds-tier --force
1074
1075 # We should be forbidden from using this pool as a tier now that
1076 # it's in use for CephFS
1077 set +e
1078 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1079 check_response 'in use by CephFS' $? 16
1080 set -e
1081
1082 fail_all_mds $FS_NAME
1083 ceph fs rm $FS_NAME --yes-i-really-mean-it
1084
1085 # We should be permitted to use an EC pool with overwrites enabled
1086 # as the data pool...
1087 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1088 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1089 fail_all_mds $FS_NAME
1090 ceph fs rm $FS_NAME --yes-i-really-mean-it
1091
1092 # ...but not as the metadata pool
1093 set +e
1094 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1095 check_response 'erasure-code' $? 22
1096 set -e
1097
1098 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1099
1100 # Create a FS and check that we can subsequently add a cache tier to it
1101 ceph fs new $FS_NAME fs_metadata fs_data --force
1102
1103 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1104 ceph osd tier add fs_metadata mds-tier
1105 ceph osd tier cache-mode mds-tier writeback
1106 ceph osd tier set-overlay fs_metadata mds-tier
1107
1108 # Removing tier should be permitted because the underlying pool is
1109 # replicated (#11504 case)
1110 ceph osd tier cache-mode mds-tier readproxy
1111 ceph osd tier remove-overlay fs_metadata
1112 ceph osd tier remove fs_metadata mds-tier
1113 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1114
1115 # Clean up FS
1116 fail_all_mds $FS_NAME
1117 ceph fs rm $FS_NAME --yes-i-really-mean-it
1118
1119
1120
1121 ceph mds stat
1122 # ceph mds tell mds.a getmap
1123 # ceph mds rm
1124 # ceph mds rmfailed
1125 # ceph mds set_state
1126
1127 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1128 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1129 }
1130
1131 function test_mon_mds_metadata()
1132 {
1133 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1134 test "$nmons" -gt 0
1135
1136 ceph fs dump |
1137 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1138 while read gid id rank; do
1139 ceph mds metadata ${gid} | grep '"hostname":'
1140 ceph mds metadata ${id} | grep '"hostname":'
1141 ceph mds metadata ${rank} | grep '"hostname":'
1142
1143 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1144 test "$n" -eq "$nmons"
1145 done
1146
1147 expect_false ceph mds metadata UNKNOWN
1148 }
1149
1150 function test_mon_mon()
1151 {
1152 # print help message
1153 ceph --help mon
1154 # no mon add/remove
1155 ceph mon dump
1156 ceph mon getmap -o $TEMP_DIR/monmap.$$
1157 [ -s $TEMP_DIR/monmap.$$ ]
1158
1159 # ceph mon tell
1160 first=$(ceph mon dump -f json | jq -r '.mons[0].name')
1161 ceph tell mon.$first mon_status
1162
1163 # test mon features
1164 ceph mon feature ls
1165 ceph mon feature set kraken --yes-i-really-mean-it
1166 expect_false ceph mon feature set abcd
1167 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1168 }
1169
1170 function test_mon_priority_and_weight()
1171 {
1172 for i in 0 1 65535; do
1173 ceph mon set-weight a $i
1174 w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight')
1175 [[ "$w" == "$i" ]]
1176 done
1177
1178 for i in -1 65536; do
1179 expect_false ceph mon set-weight a $i
1180 done
1181 }
1182
1183 function gen_secrets_file()
1184 {
1185 # lets assume we can have the following types
1186 # all - generates both cephx and lockbox, with mock dm-crypt key
1187 # cephx - only cephx
1188 # no_cephx - lockbox and dm-crypt, no cephx
1189 # no_lockbox - dm-crypt and cephx, no lockbox
1190 # empty - empty file
1191 # empty_json - correct json, empty map
1192 # bad_json - bad json :)
1193 #
1194 local t=$1
1195 if [[ -z "$t" ]]; then
1196 t="all"
1197 fi
1198
1199 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1200 echo $fn
1201 if [[ "$t" == "empty" ]]; then
1202 return 0
1203 fi
1204
1205 echo "{" > $fn
1206 if [[ "$t" == "bad_json" ]]; then
1207 echo "asd: ; }" >> $fn
1208 return 0
1209 elif [[ "$t" == "empty_json" ]]; then
1210 echo "}" >> $fn
1211 return 0
1212 fi
1213
1214 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1215 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1216 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1217
1218 if [[ "$t" == "all" ]]; then
1219 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1220 elif [[ "$t" == "cephx" ]]; then
1221 echo "$cephx_secret" >> $fn
1222 elif [[ "$t" == "no_cephx" ]]; then
1223 echo "$lb_secret,$dmcrypt_key" >> $fn
1224 elif [[ "$t" == "no_lockbox" ]]; then
1225 echo "$cephx_secret,$dmcrypt_key" >> $fn
1226 else
1227 echo "unknown gen_secrets_file() type \'$fn\'"
1228 return 1
1229 fi
1230 echo "}" >> $fn
1231 return 0
1232 }
1233
1234 function test_mon_osd_create_destroy()
1235 {
1236 ceph osd new 2>&1 | grep 'EINVAL'
1237 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1238 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1239
1240 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1241
1242 old_osds=$(ceph osd ls)
1243 num_osds=$(ceph osd ls | wc -l)
1244
1245 uuid=$(uuidgen)
1246 id=$(ceph osd new $uuid 2>/dev/null)
1247
1248 for i in $old_osds; do
1249 [[ "$i" != "$id" ]]
1250 done
1251
1252 ceph osd find $id
1253
1254 id2=`ceph osd new $uuid 2>/dev/null`
1255
1256 [[ $id2 == $id ]]
1257
1258 ceph osd new $uuid $id
1259
1260 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1261 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1262
1263 uuid2=$(uuidgen)
1264 id2=$(ceph osd new $uuid2)
1265 ceph osd find $id2
1266 [[ "$id2" != "$id" ]]
1267
1268 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1269 ceph osd new $uuid2 $id2
1270
1271 # test with secrets
1272 empty_secrets=$(gen_secrets_file "empty")
1273 empty_json=$(gen_secrets_file "empty_json")
1274 all_secrets=$(gen_secrets_file "all")
1275 cephx_only=$(gen_secrets_file "cephx")
1276 no_cephx=$(gen_secrets_file "no_cephx")
1277 no_lockbox=$(gen_secrets_file "no_lockbox")
1278 bad_json=$(gen_secrets_file "bad_json")
1279
1280 # empty secrets should be idempotent
1281 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1282 [[ "$new_id" == "$id" ]]
1283
1284 # empty json, thus empty secrets
1285 new_id=$(ceph osd new $uuid $id -i $empty_json)
1286 [[ "$new_id" == "$id" ]]
1287
1288 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1289
1290 ceph osd rm $id
1291 ceph osd rm $id2
1292 ceph osd setmaxosd $old_maxosd
1293
1294 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1295 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1296
1297 osds=$(ceph osd ls)
1298 id=$(ceph osd new $uuid -i $all_secrets)
1299 for i in $osds; do
1300 [[ "$i" != "$id" ]]
1301 done
1302
1303 ceph osd find $id
1304
1305 # validate secrets and dm-crypt are set
1306 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1307 s=$(cat $all_secrets | jq '.cephx_secret')
1308 [[ $k == $s ]]
1309 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1310 jq '.key')
1311 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1312 [[ $k == $s ]]
1313 ceph config-key exists dm-crypt/osd/$uuid/luks
1314
1315 osds=$(ceph osd ls)
1316 id2=$(ceph osd new $uuid2 -i $cephx_only)
1317 for i in $osds; do
1318 [[ "$i" != "$id2" ]]
1319 done
1320
1321 ceph osd find $id2
1322 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1323 s=$(cat $all_secrets | jq '.cephx_secret')
1324 [[ $k == $s ]]
1325 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1326 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1327
1328 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1329 ceph osd destroy $id2 --yes-i-really-mean-it
1330 ceph osd find $id2
1331 expect_false ceph auth get-key osd.$id2
1332 ceph osd dump | grep osd.$id2 | grep destroyed
1333
1334 id3=$id2
1335 uuid3=$(uuidgen)
1336 ceph osd new $uuid3 $id3 -i $all_secrets
1337 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1338 ceph auth get-key client.osd-lockbox.$uuid3
1339 ceph auth get-key osd.$id3
1340 ceph config-key exists dm-crypt/osd/$uuid3/luks
1341
1342 ceph osd purge-new osd.$id3 --yes-i-really-mean-it
1343 expect_false ceph osd find $id2
1344 expect_false ceph auth get-key osd.$id2
1345 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1346 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1347 ceph osd purge osd.$id3 --yes-i-really-mean-it
1348 ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent
1349
1350 ceph osd purge osd.$id --yes-i-really-mean-it
1351 ceph osd purge 123456 --yes-i-really-mean-it
1352 expect_false ceph osd find $id
1353 expect_false ceph auth get-key osd.$id
1354 expect_false ceph auth get-key client.osd-lockbox.$uuid
1355 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1356
1357 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1358 $no_cephx $no_lockbox $bad_json
1359
1360 for i in $(ceph osd ls); do
1361 [[ "$i" != "$id" ]]
1362 [[ "$i" != "$id2" ]]
1363 [[ "$i" != "$id3" ]]
1364 done
1365
1366 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1367 ceph osd setmaxosd $old_maxosd
1368
1369 }
1370
1371 function test_mon_config_key()
1372 {
1373 key=asdfasdfqwerqwreasdfuniquesa123df
1374 ceph config-key list | grep -c $key | grep 0
1375 ceph config-key get $key | grep -c bar | grep 0
1376 ceph config-key set $key bar
1377 ceph config-key get $key | grep bar
1378 ceph config-key list | grep -c $key | grep 1
1379 ceph config-key dump | grep $key | grep bar
1380 ceph config-key rm $key
1381 expect_false ceph config-key get $key
1382 ceph config-key list | grep -c $key | grep 0
1383 ceph config-key dump | grep -c $key | grep 0
1384 }
1385
1386 function test_mon_osd()
1387 {
1388 #
1389 # osd blacklist
1390 #
1391 bl=192.168.0.1:0/1000
1392 ceph osd blacklist add $bl
1393 ceph osd blacklist ls | grep $bl
1394 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1395 ceph osd dump --format=json-pretty | grep $bl
1396 ceph osd dump | grep $bl
1397 ceph osd blacklist rm $bl
1398 ceph osd blacklist ls | expect_false grep $bl
1399
1400 bl=192.168.0.1
1401 # test without nonce, invalid nonce
1402 ceph osd blacklist add $bl
1403 ceph osd blacklist ls | grep $bl
1404 ceph osd blacklist rm $bl
1405 ceph osd blacklist ls | expect_false grep $bl
1406 expect_false "ceph osd blacklist $bl/-1"
1407 expect_false "ceph osd blacklist $bl/foo"
1408
1409 # test with wrong address
1410 expect_false "ceph osd blacklist 1234.56.78.90/100"
1411
1412 # Test `clear`
1413 ceph osd blacklist add $bl
1414 ceph osd blacklist ls | grep $bl
1415 ceph osd blacklist clear
1416 ceph osd blacklist ls | expect_false grep $bl
1417
1418 #
1419 # osd crush
1420 #
1421 ceph osd crush reweight-all
1422 ceph osd crush tunables legacy
1423 ceph osd crush show-tunables | grep argonaut
1424 ceph osd crush tunables bobtail
1425 ceph osd crush show-tunables | grep bobtail
1426 ceph osd crush tunables firefly
1427 ceph osd crush show-tunables | grep firefly
1428
1429 ceph osd crush set-tunable straw_calc_version 0
1430 ceph osd crush get-tunable straw_calc_version | grep 0
1431 ceph osd crush set-tunable straw_calc_version 1
1432 ceph osd crush get-tunable straw_calc_version | grep 1
1433
1434 #
1435 # require-min-compat-client
1436 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1437 ceph osd set-require-min-compat-client luminous
1438 ceph osd get-require-min-compat-client | grep luminous
1439 ceph osd dump | grep 'require_min_compat_client luminous'
1440
1441 #
1442 # osd scrub
1443 #
1444
1445 # blocking
1446 ceph osd scrub 0 --block
1447 ceph osd deep-scrub 0 --block
1448
1449 # how do I tell when these are done?
1450 ceph osd scrub 0
1451 ceph osd deep-scrub 0
1452 ceph osd repair 0
1453
1454 # pool scrub, force-recovery/backfill
1455 pool_names=`rados lspools`
1456 for pool_name in $pool_names
1457 do
1458 ceph osd pool scrub $pool_name
1459 ceph osd pool deep-scrub $pool_name
1460 ceph osd pool repair $pool_name
1461 ceph osd pool force-recovery $pool_name
1462 ceph osd pool cancel-force-recovery $pool_name
1463 ceph osd pool force-backfill $pool_name
1464 ceph osd pool cancel-force-backfill $pool_name
1465 done
1466
1467 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \
1468 norebalance norecover notieragent
1469 do
1470 ceph osd set $f
1471 ceph osd unset $f
1472 done
1473 expect_false ceph osd set bogus
1474 expect_false ceph osd unset bogus
1475 for f in sortbitwise recover_deletes require_jewel_osds \
1476 require_kraken_osds
1477 do
1478 expect_false ceph osd set $f
1479 expect_false ceph osd unset $f
1480 done
1481 ceph osd require-osd-release octopus
1482 # can't lower
1483 expect_false ceph osd require-osd-release nautilus
1484 expect_false ceph osd require-osd-release mimic
1485 expect_false ceph osd require-osd-release luminous
1486 # these are no-ops but should succeed.
1487
1488 ceph osd set noup
1489 ceph osd down 0
1490 ceph osd dump | grep 'osd.0 down'
1491 ceph osd unset noup
1492 max_run=1000
1493 for ((i=0; i < $max_run; i++)); do
1494 if ! ceph osd dump | grep 'osd.0 up'; then
1495 echo "waiting for osd.0 to come back up ($i/$max_run)"
1496 sleep 1
1497 else
1498 break
1499 fi
1500 done
1501 ceph osd dump | grep 'osd.0 up'
1502
1503 ceph osd dump | grep 'osd.0 up'
1504 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1505 ceph osd find 1
1506 ceph osd find osd.1
1507 expect_false ceph osd find osd.xyz
1508 expect_false ceph osd find xyz
1509 expect_false ceph osd find 0.1
1510 ceph --format plain osd find 1 # falls back to json-pretty
1511 if [ `uname` == Linux ]; then
1512 ceph osd metadata 1 | grep 'distro'
1513 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1514 fi
1515 ceph osd out 0
1516 ceph osd dump | grep 'osd.0.*out'
1517 ceph osd in 0
1518 ceph osd dump | grep 'osd.0.*in'
1519 ceph osd find 0
1520
1521 ceph osd info 0
1522 ceph osd info osd.0
1523 expect_false ceph osd info osd.xyz
1524 expect_false ceph osd info xyz
1525 expect_false ceph osd info 42
1526 expect_false ceph osd info osd.42
1527
1528 ceph osd info
1529 info_json=$(ceph osd info --format=json | jq -cM '.')
1530 dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
1531 [[ "${info_json}" == "${dump_json}" ]]
1532
1533 info_json=$(ceph osd info 0 --format=json | jq -cM '.')
1534 dump_json=$(ceph osd dump --format=json | \
1535 jq -cM '.osds[] | select(.osd == 0)')
1536 [[ "${info_json}" == "${dump_json}" ]]
1537
1538 info_plain="$(ceph osd info)"
1539 dump_plain="$(ceph osd dump | grep '^osd')"
1540 [[ "${info_plain}" == "${dump_plain}" ]]
1541
1542 info_plain="$(ceph osd info 0)"
1543 dump_plain="$(ceph osd dump | grep '^osd.0')"
1544 [[ "${info_plain}" == "${dump_plain}" ]]
1545
1546 ceph osd add-nodown 0 1
1547 ceph health detail | grep 'NODOWN'
1548 ceph osd rm-nodown 0 1
1549 ! ceph health detail | grep 'NODOWN'
1550
1551 ceph osd out 0 # so we can mark it as noin later
1552 ceph osd add-noin 0
1553 ceph health detail | grep 'NOIN'
1554 ceph osd rm-noin 0
1555 ! ceph health detail | grep 'NOIN'
1556 ceph osd in 0
1557
1558 ceph osd add-noout 0
1559 ceph health detail | grep 'NOOUT'
1560 ceph osd rm-noout 0
1561 ! ceph health detail | grep 'NOOUT'
1562
1563 # test osd id parse
1564 expect_false ceph osd add-noup 797er
1565 expect_false ceph osd add-nodown u9uwer
1566 expect_false ceph osd add-noin 78~15
1567
1568 expect_false ceph osd rm-noup 1234567
1569 expect_false ceph osd rm-nodown fsadf7
1570 expect_false ceph osd rm-noout 790-fd
1571
1572 ids=`ceph osd ls-tree default`
1573 for osd in $ids
1574 do
1575 ceph osd add-nodown $osd
1576 ceph osd add-noout $osd
1577 done
1578 ceph -s | grep 'NODOWN'
1579 ceph -s | grep 'NOOUT'
1580 ceph osd rm-nodown any
1581 ceph osd rm-noout all
1582 ! ceph -s | grep 'NODOWN'
1583 ! ceph -s | grep 'NOOUT'
1584
1585 # test crush node flags
1586 ceph osd add-noup osd.0
1587 ceph osd add-nodown osd.0
1588 ceph osd add-noin osd.0
1589 ceph osd add-noout osd.0
1590 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
1591 ceph osd rm-noup osd.0
1592 ceph osd rm-nodown osd.0
1593 ceph osd rm-noin osd.0
1594 ceph osd rm-noout osd.0
1595 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
1596
1597 ceph osd crush add-bucket foo host root=default
1598 ceph osd add-noup foo
1599 ceph osd add-nodown foo
1600 ceph osd add-noin foo
1601 ceph osd add-noout foo
1602 ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
1603 ceph osd rm-noup foo
1604 ceph osd rm-nodown foo
1605 ceph osd rm-noin foo
1606 ceph osd rm-noout foo
1607 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
1608 ceph osd add-noup foo
1609 ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
1610 ceph osd crush rm foo
1611 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
1612
1613 ceph osd set-group noup osd.0
1614 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1615 ceph osd set-group noup,nodown osd.0
1616 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1617 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1618 ceph osd set-group noup,nodown,noin osd.0
1619 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1620 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1621 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1622 ceph osd set-group noup,nodown,noin,noout osd.0
1623 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1624 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1625 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1626 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1627 ceph osd unset-group noup osd.0
1628 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
1629 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1630 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1631 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1632 ceph osd unset-group noup,nodown osd.0
1633 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown'
1634 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1635 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1636 ceph osd unset-group noup,nodown,noin osd.0
1637 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin'
1638 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1639 ceph osd unset-group noup,nodown,noin,noout osd.0
1640 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1641
1642 ceph osd set-group noup,nodown,noin,noout osd.0 osd.1
1643 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1644 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1645 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1646 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1647 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup'
1648 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown'
1649 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin'
1650 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout'
1651 ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1
1652 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1653 ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1654
1655 ceph osd set-group noup all
1656 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1657 ceph osd unset-group noup all
1658 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
1659
1660 # crush node flags
1661 ceph osd crush add-bucket foo host root=default
1662 ceph osd set-group noup foo
1663 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1664 ceph osd set-group noup,nodown foo
1665 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1666 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1667 ceph osd set-group noup,nodown,noin foo
1668 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1669 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1670 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1671 ceph osd set-group noup,nodown,noin,noout foo
1672 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1673 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1674 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1675 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1676
1677 ceph osd unset-group noup foo
1678 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup'
1679 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1680 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1681 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1682 ceph osd unset-group noup,nodown foo
1683 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown'
1684 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1685 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1686 ceph osd unset-group noup,nodown,noin foo
1687 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin'
1688 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1689 ceph osd unset-group noup,nodown,noin,noout foo
1690 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout'
1691
1692 ceph osd set-group noin,noout foo
1693 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1694 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1695 ceph osd unset-group noin,noout foo
1696 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
1697
1698 ceph osd set-group noup,nodown,noin,noout foo
1699 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1700 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1701 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1702 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1703 ceph osd crush rm foo
1704 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
1705
1706 # test device class flags
1707 osd_0_device_class=$(ceph osd crush get-device-class osd.0)
1708 ceph osd set-group noup $osd_0_device_class
1709 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1710 ceph osd set-group noup,nodown $osd_0_device_class
1711 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1712 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1713 ceph osd set-group noup,nodown,noin $osd_0_device_class
1714 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1715 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1716 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1717 ceph osd set-group noup,nodown,noin,noout $osd_0_device_class
1718 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1719 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1720 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1721 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1722
1723 ceph osd unset-group noup $osd_0_device_class
1724 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup'
1725 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1726 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1727 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1728 ceph osd unset-group noup,nodown $osd_0_device_class
1729 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown'
1730 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1731 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1732 ceph osd unset-group noup,nodown,noin $osd_0_device_class
1733 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin'
1734 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1735 ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class
1736 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout'
1737
1738 ceph osd set-group noin,noout $osd_0_device_class
1739 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1740 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1741 ceph osd unset-group noin,noout $osd_0_device_class
1742 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class
1743
1744 # make sure mark out preserves weight
1745 ceph osd reweight osd.0 .5
1746 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1747 ceph osd out 0
1748 ceph osd in 0
1749 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1750
1751 ceph osd getmap -o $f
1752 [ -s $f ]
1753 rm $f
1754 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1755 [ "$save" -gt 0 ]
1756 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1757 ceph osd setmaxosd 10
1758 ceph osd getmaxosd | grep 'max_osd = 10'
1759 ceph osd setmaxosd $save
1760 ceph osd getmaxosd | grep "max_osd = $save"
1761
1762 for id in `ceph osd ls` ; do
1763 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1764 done
1765
1766 ceph osd rm 0 2>&1 | grep 'EBUSY'
1767
1768 local old_osds=$(echo $(ceph osd ls))
1769 id=`ceph osd create`
1770 ceph osd find $id
1771 ceph osd lost $id --yes-i-really-mean-it
1772 expect_false ceph osd setmaxosd $id
1773 local new_osds=$(echo $(ceph osd ls))
1774 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1775 ceph osd rm $id
1776 done
1777
1778 uuid=`uuidgen`
1779 id=`ceph osd create $uuid`
1780 id2=`ceph osd create $uuid`
1781 [ "$id" = "$id2" ]
1782 ceph osd rm $id
1783
1784 ceph --help osd
1785
1786 # reset max_osd.
1787 ceph osd setmaxosd $id
1788 ceph osd getmaxosd | grep "max_osd = $save"
1789 local max_osd=$save
1790
1791 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1792 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1793
1794 id=`ceph osd create $uuid $max_osd`
1795 [ "$id" = "$max_osd" ]
1796 ceph osd find $id
1797 max_osd=$((max_osd + 1))
1798 ceph osd getmaxosd | grep "max_osd = $max_osd"
1799
1800 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1801 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
1802 id2=`ceph osd create $uuid`
1803 [ "$id" = "$id2" ]
1804 id2=`ceph osd create $uuid $id`
1805 [ "$id" = "$id2" ]
1806
1807 uuid=`uuidgen`
1808 local gap_start=$max_osd
1809 id=`ceph osd create $uuid $((gap_start + 100))`
1810 [ "$id" = "$((gap_start + 100))" ]
1811 max_osd=$((id + 1))
1812 ceph osd getmaxosd | grep "max_osd = $max_osd"
1813
1814 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
1815
1816 #
1817 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1818 # is repeated and consumes two osd id, not just one.
1819 #
1820 local next_osd=$gap_start
1821 id=`ceph osd create $(uuidgen)`
1822 [ "$id" = "$next_osd" ]
1823
1824 next_osd=$((id + 1))
1825 id=`ceph osd create $(uuidgen) $next_osd`
1826 [ "$id" = "$next_osd" ]
1827
1828 local new_osds=$(echo $(ceph osd ls))
1829 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1830 [ $id -ge $save ]
1831 ceph osd rm $id
1832 done
1833 ceph osd setmaxosd $save
1834
1835 ceph osd ls
1836 ceph osd pool create data 16
1837 ceph osd pool application enable data rados
1838 ceph osd lspools | grep data
1839 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1840 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1841 ceph osd pool delete data data --yes-i-really-really-mean-it
1842
1843 ceph osd pause
1844 ceph osd dump | grep 'flags.*pauserd,pausewr'
1845 ceph osd unpause
1846
1847 ceph osd tree
1848 ceph osd tree up
1849 ceph osd tree down
1850 ceph osd tree in
1851 ceph osd tree out
1852 ceph osd tree destroyed
1853 ceph osd tree up in
1854 ceph osd tree up out
1855 ceph osd tree down in
1856 ceph osd tree down out
1857 ceph osd tree out down
1858 expect_false ceph osd tree up down
1859 expect_false ceph osd tree up destroyed
1860 expect_false ceph osd tree down destroyed
1861 expect_false ceph osd tree up down destroyed
1862 expect_false ceph osd tree in out
1863 expect_false ceph osd tree up foo
1864
1865 ceph osd metadata
1866 ceph osd count-metadata os
1867 ceph osd versions
1868
1869 ceph osd perf
1870 ceph osd blocked-by
1871
1872 ceph osd stat | grep up
1873 }
1874
1875 function test_mon_crush()
1876 {
1877 f=$TEMP_DIR/map.$$
1878 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1879 [ -s $f ]
1880 [ "$epoch" -gt 1 ]
1881 nextepoch=$(( $epoch + 1 ))
1882 echo epoch $epoch nextepoch $nextepoch
1883 rm -f $f.epoch
1884 expect_false ceph osd setcrushmap $nextepoch -i $f
1885 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1886 echo gotepoch $gotepoch
1887 [ "$gotepoch" -eq "$nextepoch" ]
1888 # should be idempotent
1889 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1890 echo epoch $gotepoch
1891 [ "$gotepoch" -eq "$nextepoch" ]
1892 rm $f
1893 }
1894
1895 function test_mon_osd_pool()
1896 {
1897 #
1898 # osd pool
1899 #
1900 ceph osd pool create data 16
1901 ceph osd pool application enable data rados
1902 ceph osd pool mksnap data datasnap
1903 rados -p data lssnap | grep datasnap
1904 ceph osd pool rmsnap data datasnap
1905 expect_false ceph osd pool rmsnap pool_fake snapshot
1906 ceph osd pool delete data data --yes-i-really-really-mean-it
1907
1908 ceph osd pool create data2 16
1909 ceph osd pool application enable data2 rados
1910 ceph osd pool rename data2 data3
1911 ceph osd lspools | grep data3
1912 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1913
1914 ceph osd pool create replicated 16 16 replicated
1915 ceph osd pool create replicated 1 16 replicated
1916 ceph osd pool create replicated 16 16 # default is replicated
1917 ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num
1918 ceph osd pool application enable replicated rados
1919 # should fail because the type is not the same
1920 expect_false ceph osd pool create replicated 16 16 erasure
1921 ceph osd lspools | grep replicated
1922 ceph osd pool create ec_test 1 1 erasure
1923 ceph osd pool application enable ec_test rados
1924 set +e
1925 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1926 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1927 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1928 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
1929 else
1930 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1931 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1932 fi
1933 set -e
1934 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1935 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1936
1937 # test create pool with rule
1938 ceph osd erasure-code-profile set foo foo
1939 ceph osd erasure-code-profile ls | grep foo
1940 ceph osd crush rule create-erasure foo foo
1941 ceph osd pool create erasure 16 16 erasure foo
1942 expect_false ceph osd erasure-code-profile rm foo
1943 ceph osd pool delete erasure erasure --yes-i-really-really-mean-it
1944 ceph osd crush rule rm foo
1945 ceph osd erasure-code-profile rm foo
1946
1947 # autoscale mode
1948 ceph osd pool create modeon --autoscale-mode=on
1949 ceph osd dump | grep modeon | grep 'autoscale_mode on'
1950 ceph osd pool create modewarn --autoscale-mode=warn
1951 ceph osd dump | grep modewarn | grep 'autoscale_mode warn'
1952 ceph osd pool create modeoff --autoscale-mode=off
1953 ceph osd dump | grep modeoff | grep 'autoscale_mode off'
1954 ceph osd pool delete modeon modeon --yes-i-really-really-mean-it
1955 ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it
1956 ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it
1957 }
1958
1959 function test_mon_osd_pool_quota()
1960 {
1961 #
1962 # test osd pool set/get quota
1963 #
1964
1965 # create tmp pool
1966 ceph osd pool create tmp-quota-pool 32
1967 ceph osd pool application enable tmp-quota-pool rados
1968 #
1969 # set erroneous quotas
1970 #
1971 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1972 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1973 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1974 #
1975 # set valid quotas
1976 #
1977 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1978 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1979 #
1980 # get quotas in json-pretty format
1981 #
1982 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1983 grep '"quota_max_objects":.*10000000'
1984 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1985 grep '"quota_max_bytes":.*10'
1986 #
1987 # get quotas
1988 #
1989 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B'
1990 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects'
1991 #
1992 # set valid quotas with unit prefix
1993 #
1994 ceph osd pool set-quota tmp-quota-pool max_bytes 10K
1995 #
1996 # get quotas
1997 #
1998 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
1999 #
2000 # set valid quotas with unit prefix
2001 #
2002 ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
2003 #
2004 # get quotas
2005 #
2006 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
2007 #
2008 #
2009 # reset pool quotas
2010 #
2011 ceph osd pool set-quota tmp-quota-pool max_bytes 0
2012 ceph osd pool set-quota tmp-quota-pool max_objects 0
2013 #
2014 # test N/A quotas
2015 #
2016 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
2017 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
2018 #
2019 # cleanup tmp pool
2020 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
2021 }
2022
2023 function test_mon_pg()
2024 {
2025 # Make sure we start healthy.
2026 wait_for_health_ok
2027
2028 ceph pg debug unfound_objects_exist
2029 ceph pg debug degraded_pgs_exist
2030 ceph pg deep-scrub 1.0
2031 ceph pg dump
2032 ceph pg dump pgs_brief --format=json
2033 ceph pg dump pgs --format=json
2034 ceph pg dump pools --format=json
2035 ceph pg dump osds --format=json
2036 ceph pg dump sum --format=json
2037 ceph pg dump all --format=json
2038 ceph pg dump pgs_brief osds --format=json
2039 ceph pg dump pools osds pgs_brief --format=json
2040 ceph pg dump_json
2041 ceph pg dump_pools_json
2042 ceph pg dump_stuck inactive
2043 ceph pg dump_stuck unclean
2044 ceph pg dump_stuck stale
2045 ceph pg dump_stuck undersized
2046 ceph pg dump_stuck degraded
2047 ceph pg ls
2048 ceph pg ls 1
2049 ceph pg ls stale
2050 expect_false ceph pg ls scrubq
2051 ceph pg ls active stale repair recovering
2052 ceph pg ls 1 active
2053 ceph pg ls 1 active stale
2054 ceph pg ls-by-primary osd.0
2055 ceph pg ls-by-primary osd.0 1
2056 ceph pg ls-by-primary osd.0 active
2057 ceph pg ls-by-primary osd.0 active stale
2058 ceph pg ls-by-primary osd.0 1 active stale
2059 ceph pg ls-by-osd osd.0
2060 ceph pg ls-by-osd osd.0 1
2061 ceph pg ls-by-osd osd.0 active
2062 ceph pg ls-by-osd osd.0 active stale
2063 ceph pg ls-by-osd osd.0 1 active stale
2064 ceph pg ls-by-pool rbd
2065 ceph pg ls-by-pool rbd active stale
2066 # can't test this...
2067 # ceph pg force_create_pg
2068 ceph pg getmap -o $TEMP_DIR/map.$$
2069 [ -s $TEMP_DIR/map.$$ ]
2070 ceph pg map 1.0 | grep acting
2071 ceph pg repair 1.0
2072 ceph pg scrub 1.0
2073
2074 ceph osd set-full-ratio .962
2075 ceph osd dump | grep '^full_ratio 0.962'
2076 ceph osd set-backfillfull-ratio .912
2077 ceph osd dump | grep '^backfillfull_ratio 0.912'
2078 ceph osd set-nearfull-ratio .892
2079 ceph osd dump | grep '^nearfull_ratio 0.892'
2080
2081 # Check health status
2082 ceph osd set-nearfull-ratio .913
2083 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
2084 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
2085 ceph osd set-nearfull-ratio .892
2086 ceph osd set-backfillfull-ratio .963
2087 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
2088 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
2089 ceph osd set-backfillfull-ratio .912
2090
2091 # Check injected full results
2092 $SUDO ceph tell osd.0 injectfull nearfull
2093 wait_for_health "OSD_NEARFULL"
2094 ceph health detail | grep "osd.0 is near full"
2095 $SUDO ceph tell osd.0 injectfull none
2096 wait_for_health_ok
2097
2098 $SUDO ceph tell osd.1 injectfull backfillfull
2099 wait_for_health "OSD_BACKFILLFULL"
2100 ceph health detail | grep "osd.1 is backfill full"
2101 $SUDO ceph tell osd.1 injectfull none
2102 wait_for_health_ok
2103
2104 $SUDO ceph tell osd.2 injectfull failsafe
2105 # failsafe and full are the same as far as the monitor is concerned
2106 wait_for_health "OSD_FULL"
2107 ceph health detail | grep "osd.2 is full"
2108 $SUDO ceph tell osd.2 injectfull none
2109 wait_for_health_ok
2110
2111 $SUDO ceph tell osd.0 injectfull full
2112 wait_for_health "OSD_FULL"
2113 ceph health detail | grep "osd.0 is full"
2114 $SUDO ceph tell osd.0 injectfull none
2115 wait_for_health_ok
2116
2117 ceph pg stat | grep 'pgs:'
2118 ceph pg 1.0 query
2119 ceph tell 1.0 query
2120 first=$(ceph mon dump -f json | jq -r '.mons[0].name')
2121 ceph tell mon.$first quorum enter
2122 ceph quorum_status
2123 ceph report | grep osd_stats
2124 ceph status
2125 ceph -s
2126
2127 #
2128 # tell osd version
2129 #
2130 ceph tell osd.0 version
2131 expect_false ceph tell osd.9999 version
2132 expect_false ceph tell osd.foo version
2133
2134 # back to pg stuff
2135
2136 ceph tell osd.0 dump_pg_recovery_stats | grep Started
2137
2138 ceph osd reweight 0 0.9
2139 expect_false ceph osd reweight 0 -1
2140 ceph osd reweight osd.0 1
2141
2142 ceph osd primary-affinity osd.0 .9
2143 expect_false ceph osd primary-affinity osd.0 -2
2144 expect_false ceph osd primary-affinity osd.9999 .5
2145 ceph osd primary-affinity osd.0 1
2146
2147 ceph osd pool set rbd size 2
2148 ceph osd pg-temp 1.0 0 1
2149 ceph osd pg-temp 1.0 osd.1 osd.0
2150 expect_false ceph osd pg-temp 1.0 0 1 2
2151 expect_false ceph osd pg-temp asdf qwer
2152 expect_false ceph osd pg-temp 1.0 asdf
2153 ceph osd pg-temp 1.0 # cleanup pg-temp
2154
2155 ceph pg repeer 1.0
2156 expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore
2157
2158 # don't test ceph osd primary-temp for now
2159 }
2160
2161 function test_mon_osd_pool_set()
2162 {
2163 TEST_POOL_GETSET=pool_getset
2164 ceph osd pool create $TEST_POOL_GETSET 1
2165 ceph osd pool application enable $TEST_POOL_GETSET rados
2166 ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
2167 wait_for_clean
2168 ceph osd pool get $TEST_POOL_GETSET all
2169
2170 for s in pg_num pgp_num size min_size crush_rule; do
2171 ceph osd pool get $TEST_POOL_GETSET $s
2172 done
2173
2174 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
2175 (( new_size = old_size + 1 ))
2176 ceph osd pool set $TEST_POOL_GETSET size $new_size
2177 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
2178 ceph osd pool set $TEST_POOL_GETSET size $old_size
2179
2180 ceph osd pool create pool_erasure 1 1 erasure
2181 ceph osd pool application enable pool_erasure rados
2182 wait_for_clean
2183 set +e
2184 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
2185 check_response 'not change the size'
2186 set -e
2187 ceph osd pool get pool_erasure erasure_code_profile
2188 ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it
2189
2190 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
2191 ceph osd pool set $TEST_POOL_GETSET $flag false
2192 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
2193 ceph osd pool set $TEST_POOL_GETSET $flag true
2194 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
2195 ceph osd pool set $TEST_POOL_GETSET $flag 1
2196 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
2197 ceph osd pool set $TEST_POOL_GETSET $flag 0
2198 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
2199 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
2200 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
2201 done
2202
2203 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2204 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
2205 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
2206 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
2207 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2208
2209 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2210 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
2211 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
2212 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
2213 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2214
2215 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2216 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
2217 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
2218 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
2219 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2220
2221 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2222 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
2223 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
2224 ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
2225 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
2226 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
2227 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2228 expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11
2229 expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
2230
2231 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2232 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
2233 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
2234 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
2235 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2236
2237 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2238 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
2239 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
2240 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
2241 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2242
2243 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
2244 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
2245 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2246 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
2247 ceph osd pool set $TEST_POOL_GETSET pg_num 10
2248 wait_for_clean
2249 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2250 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0
2251 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0
2252
2253 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
2254 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
2255 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2256 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
2257 wait_for_clean
2258
2259 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
2260 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
2261 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
2262 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
2263 ceph osd pool set $TEST_POOL_GETSET size 2
2264 wait_for_clean
2265 ceph osd pool set $TEST_POOL_GETSET min_size 2
2266
2267 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2268 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2269
2270 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2271 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2272
2273 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
2274
2275 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2276 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2277 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2278 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2279 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2280
2281 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2282 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2283 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2284 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2285 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2286
2287 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2288 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2289 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2290 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2291 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2292 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2293 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2294
2295 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2296 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2297 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2298 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2299 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2300
2301 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2302 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2303 ceph osd pool set $TEST_POOL_GETSET $size 100
2304 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2305 ceph osd pool set $TEST_POOL_GETSET $size 0
2306 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2307 done
2308
2309 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2310 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2311 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2312 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2313
2314 }
2315
2316 function test_mon_osd_tiered_pool_set()
2317 {
2318 # this is really a tier pool
2319 ceph osd pool create real-tier 2
2320 ceph osd tier add rbd real-tier
2321
2322 # expect us to be unable to set negative values for hit_set_*
2323 for o in hit_set_period hit_set_count hit_set_fpp; do
2324 expect_false ceph osd pool set real_tier $o -1
2325 done
2326
2327 # and hit_set_fpp should be in range 0..1
2328 expect_false ceph osd pool set real_tier hit_set_fpp 2
2329
2330 ceph osd pool set real-tier hit_set_type explicit_hash
2331 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2332 ceph osd pool set real-tier hit_set_type explicit_object
2333 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2334 ceph osd pool set real-tier hit_set_type bloom
2335 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2336 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2337 ceph osd pool set real-tier hit_set_period 123
2338 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2339 ceph osd pool set real-tier hit_set_count 12
2340 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2341 ceph osd pool set real-tier hit_set_fpp .01
2342 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2343
2344 ceph osd pool set real-tier target_max_objects 123
2345 ceph osd pool get real-tier target_max_objects | \
2346 grep 'target_max_objects:[ \t]\+123'
2347 ceph osd pool set real-tier target_max_bytes 123456
2348 ceph osd pool get real-tier target_max_bytes | \
2349 grep 'target_max_bytes:[ \t]\+123456'
2350 ceph osd pool set real-tier cache_target_dirty_ratio .123
2351 ceph osd pool get real-tier cache_target_dirty_ratio | \
2352 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2353 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2354 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2355 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2356 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2357 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2358 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2359 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2360 ceph osd pool set real-tier cache_target_full_ratio .123
2361 ceph osd pool get real-tier cache_target_full_ratio | \
2362 grep 'cache_target_full_ratio:[ \t]\+0.123'
2363 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2364 ceph osd pool set real-tier cache_target_full_ratio 1.0
2365 ceph osd pool set real-tier cache_target_full_ratio 0
2366 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2367 ceph osd pool set real-tier cache_min_flush_age 123
2368 ceph osd pool get real-tier cache_min_flush_age | \
2369 grep 'cache_min_flush_age:[ \t]\+123'
2370 ceph osd pool set real-tier cache_min_evict_age 234
2371 ceph osd pool get real-tier cache_min_evict_age | \
2372 grep 'cache_min_evict_age:[ \t]\+234'
2373
2374 # iec vs si units
2375 ceph osd pool set real-tier target_max_objects 1K
2376 ceph osd pool get real-tier target_max_objects | grep 1000
2377 for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2378 ceph osd pool set real-tier $o 1Ki # no i suffix
2379 val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
2380 [[ $val == 1024 ]]
2381 ceph osd pool set real-tier $o 1M # with i suffix
2382 val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
2383 [[ $val == 1048576 ]]
2384 done
2385
2386 # this is not a tier pool
2387 ceph osd pool create fake-tier 2
2388 ceph osd pool application enable fake-tier rados
2389 wait_for_clean
2390
2391 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2392 expect_false ceph osd pool get fake-tier hit_set_type
2393 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2394 expect_false ceph osd pool get fake-tier hit_set_type
2395 expect_false ceph osd pool set fake-tier hit_set_type bloom
2396 expect_false ceph osd pool get fake-tier hit_set_type
2397 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2398 expect_false ceph osd pool set fake-tier hit_set_period 123
2399 expect_false ceph osd pool get fake-tier hit_set_period
2400 expect_false ceph osd pool set fake-tier hit_set_count 12
2401 expect_false ceph osd pool get fake-tier hit_set_count
2402 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2403 expect_false ceph osd pool get fake-tier hit_set_fpp
2404
2405 expect_false ceph osd pool set fake-tier target_max_objects 123
2406 expect_false ceph osd pool get fake-tier target_max_objects
2407 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2408 expect_false ceph osd pool get fake-tier target_max_bytes
2409 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2410 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2411 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2412 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2413 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2414 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2415 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2416 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2417 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2418 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2419 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2420 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2421 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2422 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2423 expect_false ceph osd pool get fake-tier cache_min_flush_age
2424 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2425 expect_false ceph osd pool get fake-tier cache_min_evict_age
2426
2427 ceph osd tier remove rbd real-tier
2428 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2429 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2430 }
2431
2432 function test_mon_osd_erasure_code()
2433 {
2434
2435 ceph osd erasure-code-profile set fooprofile a=b c=d
2436 ceph osd erasure-code-profile set fooprofile a=b c=d
2437 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2438 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2439 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2440 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2441 # make sure ruleset-foo doesn't work anymore
2442 expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
2443 ceph osd erasure-code-profile set barprofile crush-failure-domain=host
2444 # clean up
2445 ceph osd erasure-code-profile rm fooprofile
2446 ceph osd erasure-code-profile rm barprofile
2447
2448 # try weird k and m values
2449 expect_false ceph osd erasure-code-profile set badk k=1 m=1
2450 expect_false ceph osd erasure-code-profile set badk k=1 m=2
2451 expect_false ceph osd erasure-code-profile set badk k=0 m=2
2452 expect_false ceph osd erasure-code-profile set badk k=-1 m=2
2453 expect_false ceph osd erasure-code-profile set badm k=2 m=0
2454 expect_false ceph osd erasure-code-profile set badm k=2 m=-1
2455 ceph osd erasure-code-profile set good k=2 m=1
2456 ceph osd erasure-code-profile rm good
2457 }
2458
2459 function test_mon_osd_misc()
2460 {
2461 set +e
2462
2463 # expect error about missing 'pool' argument
2464 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2465
2466 # expect error about unused argument foo
2467 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2468
2469 # expect "not in range" for invalid overload percentage
2470 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2471
2472 set -e
2473
2474 local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd)
2475 local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd)
2476 # otherwise ceph-mgr complains like:
2477 # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
2478 # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
2479 ceph config set mgr mon_reweight_min_bytes_per_osd 0
2480 ceph config set mgr mon_reweight_min_pgs_per_osd 0
2481 ceph osd reweight-by-utilization 110
2482 ceph osd reweight-by-utilization 110 .5
2483 expect_false ceph osd reweight-by-utilization 110 0
2484 expect_false ceph osd reweight-by-utilization 110 -0.1
2485 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2486 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2487 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2488 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2489 ceph osd reweight-by-pg 110
2490 ceph osd test-reweight-by-pg 110 .5
2491 ceph osd reweight-by-pg 110 rbd
2492 ceph osd reweight-by-pg 110 .5 rbd
2493 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2494 # restore the setting
2495 ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd
2496 ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd
2497 }
2498
2499 function test_admin_heap_profiler()
2500 {
2501 do_test=1
2502 set +e
2503 # expect 'heap' commands to be correctly parsed
2504 ceph tell osd.0 heap stats 2>$TMPFILE
2505 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2506 echo "tcmalloc not enabled; skip heap profiler test"
2507 do_test=0
2508 fi
2509 set -e
2510
2511 [[ $do_test -eq 0 ]] && return 0
2512
2513 $SUDO ceph tell osd.0 heap start_profiler
2514 $SUDO ceph tell osd.0 heap dump
2515 $SUDO ceph tell osd.0 heap stop_profiler
2516 $SUDO ceph tell osd.0 heap release
2517 }
2518
2519 function test_osd_bench()
2520 {
2521 # test osd bench limits
2522 # As we should not rely on defaults (as they may change over time),
2523 # lets inject some values and perform some simple tests
2524 # max iops: 10 # 100 IOPS
2525 # max throughput: 10485760 # 10MB/s
2526 # max block size: 2097152 # 2MB
2527 # duration: 10 # 10 seconds
2528
2529 local args="\
2530 --osd-bench-duration 10 \
2531 --osd-bench-max-block-size 2097152 \
2532 --osd-bench-large-size-max-throughput 10485760 \
2533 --osd-bench-small-size-max-iops 10"
2534 ceph tell osd.0 injectargs ${args## }
2535
2536 # anything with a bs larger than 2097152 must fail
2537 expect_false ceph tell osd.0 bench 1 2097153
2538 # but using 'osd_bench_max_bs' must succeed
2539 ceph tell osd.0 bench 1 2097152
2540
2541 # we assume 1MB as a large bs; anything lower is a small bs
2542 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2543 # max count: 409600 (bytes)
2544
2545 # more than max count must not be allowed
2546 expect_false ceph tell osd.0 bench 409601 4096
2547 # but 409600 must be succeed
2548 ceph tell osd.0 bench 409600 4096
2549
2550 # for a large bs, we are limited by throughput.
2551 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2552 # the max count will be (10MB * 10s) = 100MB
2553 # max count: 104857600 (bytes)
2554
2555 # more than max count must not be allowed
2556 expect_false ceph tell osd.0 bench 104857601 2097152
2557 # up to max count must be allowed
2558 ceph tell osd.0 bench 104857600 2097152
2559 }
2560
2561 function test_osd_negative_filestore_merge_threshold()
2562 {
2563 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2564 expect_config_value "osd.0" "filestore_merge_threshold" -1
2565 }
2566
2567 function test_mon_tell()
2568 {
2569 for m in mon.a mon.b; do
2570 ceph tell $m sessions
2571 ceph_watch_start debug audit
2572 ceph tell mon.a sessions
2573 ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
2574 done
2575 expect_false ceph tell mon.foo version
2576 }
2577
2578 function test_mon_ping()
2579 {
2580 ceph ping mon.a
2581 ceph ping mon.b
2582 expect_false ceph ping mon.foo
2583
2584 ceph ping mon.\*
2585 }
2586
2587 function test_mon_deprecated_commands()
2588 {
2589 # current DEPRECATED commands are marked with FLAG(DEPRECATED)
2590 #
2591 # Testing should be accomplished by setting
2592 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2593 # each one of these commands.
2594
2595 ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete'
2596 expect_false ceph config-key list 2> $TMPFILE
2597 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2598
2599 ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete'
2600 }
2601
2602 function test_mon_cephdf_commands()
2603 {
2604 # ceph df detail:
2605 # pool section:
2606 # RAW USED The near raw used per pool in raw total
2607
2608 ceph osd pool create cephdf_for_test 1 1 replicated
2609 ceph osd pool application enable cephdf_for_test rados
2610 ceph osd pool set cephdf_for_test size 2
2611
2612 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2613 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2614
2615 #wait for update
2616 for i in `seq 1 10`; do
2617 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2618 sleep 1
2619 done
2620 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2621 # to sync mon with osd
2622 flush_pg_stats
2623 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2624 stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
2625 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2626
2627 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2628 rm ./cephdf_for_test
2629
2630 expect_false test $stored != $stored_raw
2631 }
2632
2633 function test_mon_pool_application()
2634 {
2635 ceph osd pool create app_for_test 16
2636
2637 ceph osd pool application enable app_for_test rbd
2638 expect_false ceph osd pool application enable app_for_test rgw
2639 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2640 ceph osd pool ls detail | grep "application rbd,rgw"
2641 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2642
2643 expect_false ceph osd pool application set app_for_test cephfs key value
2644 ceph osd pool application set app_for_test rbd key1 value1
2645 ceph osd pool application set app_for_test rbd key2 value2
2646 ceph osd pool application set app_for_test rgw key1 value1
2647 ceph osd pool application get app_for_test rbd key1 | grep 'value1'
2648 ceph osd pool application get app_for_test rbd key2 | grep 'value2'
2649 ceph osd pool application get app_for_test rgw key1 | grep 'value1'
2650
2651 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2652
2653 ceph osd pool application rm app_for_test rgw key1
2654 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2655 ceph osd pool application rm app_for_test rbd key2
2656 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2657 ceph osd pool application rm app_for_test rbd key1
2658 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2659 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2660
2661 expect_false ceph osd pool application disable app_for_test rgw
2662 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2663 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2664 ceph osd pool ls detail | grep "application rbd"
2665 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2666
2667 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2668 ceph osd pool ls detail | grep -v "application "
2669 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2670
2671 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2672 }
2673
2674 function test_mon_tell_help_command()
2675 {
2676 ceph tell mon.a help | grep sync_force
2677 ceph tell mon.a -h | grep sync_force
2678 ceph tell mon.a config -h | grep 'config diff get'
2679
2680 # wrong target
2681 expect_false ceph tell mon.zzz help
2682 }
2683
2684 function test_mon_stdin_stdout()
2685 {
2686 echo foo | ceph config-key set test_key -i -
2687 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2688 }
2689
2690 function test_osd_tell_help_command()
2691 {
2692 ceph tell osd.1 help
2693 expect_false ceph tell osd.100 help
2694 }
2695
2696 function test_osd_compact()
2697 {
2698 ceph tell osd.1 compact
2699 $SUDO ceph daemon osd.1 compact
2700 }
2701
2702 function test_mds_tell_help_command()
2703 {
2704 local FS_NAME=cephfs
2705 if ! mds_exists ; then
2706 echo "Skipping test, no MDS found"
2707 return
2708 fi
2709
2710 remove_all_fs
2711 ceph osd pool create fs_data 16
2712 ceph osd pool create fs_metadata 16
2713 ceph fs new $FS_NAME fs_metadata fs_data
2714 wait_mds_active $FS_NAME
2715
2716
2717 ceph tell mds.a help
2718 expect_false ceph tell mds.z help
2719
2720 remove_all_fs
2721 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2722 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2723 }
2724
2725 function test_mgr_tell()
2726 {
2727 ceph tell mgr version
2728 }
2729
2730 function test_mgr_devices()
2731 {
2732 ceph device ls
2733 expect_false ceph device info doesnotexist
2734 expect_false ceph device get-health-metrics doesnotexist
2735 }
2736
2737 function test_per_pool_scrub_status()
2738 {
2739 ceph osd pool create noscrub_pool 16
2740 ceph osd pool create noscrub_pool2 16
2741 ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2742 ceph -s --format json | \
2743 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2744 expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2745 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
2746 expect_false grep -q "Pool .* has .*scrub.* flag"
2747 ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2748 expect_false grep -q "Pool .* has .*scrub.* flag"
2749
2750 ceph osd pool set noscrub_pool noscrub 1
2751 ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
2752 ceph -s --format json | \
2753 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2754 expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
2755 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2756 expect_true grep -q "Pool noscrub_pool has noscrub flag"
2757 ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
2758
2759 ceph osd pool set noscrub_pool nodeep-scrub 1
2760 ceph osd pool set noscrub_pool2 nodeep-scrub 1
2761 ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2762 ceph -s --format json | \
2763 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2764 expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2765 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2766 expect_true grep -q "Pool noscrub_pool has noscrub flag"
2767 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2768 expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
2769 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2770 expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2771 ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
2772 ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
2773 ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2774
2775 ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it
2776 ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it
2777 }
2778
2779 #
2780 # New tests should be added to the TESTS array below
2781 #
2782 # Individual tests may be run using the '-t <testname>' argument
2783 # The user can specify '-t <testname>' as many times as she wants
2784 #
2785 # Tests will be run in order presented in the TESTS array, or in
2786 # the order specified by the '-t <testname>' options.
2787 #
2788 # '-l' will list all the available test names
2789 # '-h' will show usage
2790 #
2791 # The test maintains backward compatibility: not specifying arguments
2792 # will run all tests following the order they appear in the TESTS array.
2793 #
2794
2795 set +x
2796 MON_TESTS+=" mon_injectargs"
2797 MON_TESTS+=" mon_injectargs_SI"
2798 for i in `seq 9`; do
2799 MON_TESTS+=" tiering_$i";
2800 done
2801 MON_TESTS+=" auth"
2802 MON_TESTS+=" auth_profiles"
2803 MON_TESTS+=" mon_misc"
2804 MON_TESTS+=" mon_mon"
2805 MON_TESTS+=" mon_osd"
2806 MON_TESTS+=" mon_config_key"
2807 MON_TESTS+=" mon_crush"
2808 MON_TESTS+=" mon_osd_create_destroy"
2809 MON_TESTS+=" mon_osd_pool"
2810 MON_TESTS+=" mon_osd_pool_quota"
2811 MON_TESTS+=" mon_pg"
2812 MON_TESTS+=" mon_osd_pool_set"
2813 MON_TESTS+=" mon_osd_tiered_pool_set"
2814 MON_TESTS+=" mon_osd_erasure_code"
2815 MON_TESTS+=" mon_osd_misc"
2816 MON_TESTS+=" mon_tell"
2817 MON_TESTS+=" mon_ping"
2818 MON_TESTS+=" mon_deprecated_commands"
2819 MON_TESTS+=" mon_caps"
2820 MON_TESTS+=" mon_cephdf_commands"
2821 MON_TESTS+=" mon_tell_help_command"
2822 MON_TESTS+=" mon_stdin_stdout"
2823
2824 OSD_TESTS+=" osd_bench"
2825 OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2826 OSD_TESTS+=" tiering_agent"
2827 OSD_TESTS+=" admin_heap_profiler"
2828 OSD_TESTS+=" osd_tell_help_command"
2829 OSD_TESTS+=" osd_compact"
2830 OSD_TESTS+=" per_pool_scrub_status"
2831
2832 MDS_TESTS+=" mds_tell"
2833 MDS_TESTS+=" mon_mds"
2834 MDS_TESTS+=" mon_mds_metadata"
2835 MDS_TESTS+=" mds_tell_help_command"
2836
2837 MGR_TESTS+=" mgr_tell"
2838 MGR_TESTS+=" mgr_devices"
2839
2840 TESTS+=$MON_TESTS
2841 TESTS+=$OSD_TESTS
2842 TESTS+=$MDS_TESTS
2843 TESTS+=$MGR_TESTS
2844
2845 #
2846 # "main" follows
2847 #
2848
2849 function list_tests()
2850 {
2851 echo "AVAILABLE TESTS"
2852 for i in $TESTS; do
2853 echo " $i"
2854 done
2855 }
2856
2857 function usage()
2858 {
2859 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2860 }
2861
2862 tests_to_run=()
2863
2864 sanity_check=true
2865
2866 while [[ $# -gt 0 ]]; do
2867 opt=$1
2868
2869 case "$opt" in
2870 "-l" )
2871 do_list=1
2872 ;;
2873 "--asok-does-not-need-root" )
2874 SUDO=""
2875 ;;
2876 "--no-sanity-check" )
2877 sanity_check=false
2878 ;;
2879 "--test-mon" )
2880 tests_to_run+="$MON_TESTS"
2881 ;;
2882 "--test-osd" )
2883 tests_to_run+="$OSD_TESTS"
2884 ;;
2885 "--test-mds" )
2886 tests_to_run+="$MDS_TESTS"
2887 ;;
2888 "--test-mgr" )
2889 tests_to_run+="$MGR_TESTS"
2890 ;;
2891 "-t" )
2892 shift
2893 if [[ -z "$1" ]]; then
2894 echo "missing argument to '-t'"
2895 usage ;
2896 exit 1
2897 fi
2898 tests_to_run+=" $1"
2899 ;;
2900 "-h" )
2901 usage ;
2902 exit 0
2903 ;;
2904 esac
2905 shift
2906 done
2907
2908 if [[ $do_list -eq 1 ]]; then
2909 list_tests ;
2910 exit 0
2911 fi
2912
2913 ceph osd pool create rbd 16
2914
2915 if test -z "$tests_to_run" ; then
2916 tests_to_run="$TESTS"
2917 fi
2918
2919 if $sanity_check ; then
2920 wait_no_osd_down
2921 fi
2922 for i in $tests_to_run; do
2923 if $sanity_check ; then
2924 check_no_osd_down
2925 fi
2926 set -x
2927 test_${i}
2928 set +x
2929 done
2930 if $sanity_check ; then
2931 check_no_osd_down
2932 fi
2933
2934 set -x
2935
2936 echo OK