]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/cephtool/test.sh
import ceph quincy 17.2.1
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
1 #!/usr/bin/env bash
2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
4 set -x
5
6 source $(dirname $0)/../../standalone/ceph-helpers.sh
7
8 set -e
9 set -o functrace
10 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
11 SUDO=${SUDO:-sudo}
12 export CEPH_DEV=1
13
14 function check_no_osd_down()
15 {
16 ! ceph osd dump | grep ' down '
17 }
18
19 function wait_no_osd_down()
20 {
21 max_run=300
22 for i in $(seq 1 $max_run) ; do
23 if ! check_no_osd_down ; then
24 echo "waiting for osd(s) to come back up ($i/$max_run)"
25 sleep 1
26 else
27 break
28 fi
29 done
30 check_no_osd_down
31 }
32
33 function expect_false()
34 {
35 set -x
36 if "$@"; then return 1; else return 0; fi
37 }
38
39 function expect_true()
40 {
41 set -x
42 if ! "$@"; then return 1; else return 0; fi
43 }
44
45 TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
46 trap "rm -fr $TEMP_DIR" 0
47
48 TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
49
50 #
51 # retry_eagain max cmd args ...
52 #
53 # retry cmd args ... if it exits on error and its output contains the
54 # string EAGAIN, at most $max times
55 #
56 function retry_eagain()
57 {
58 local max=$1
59 shift
60 local status
61 local tmpfile=$TEMP_DIR/retry_eagain.$$
62 local count
63 for count in $(seq 1 $max) ; do
64 status=0
65 "$@" > $tmpfile 2>&1 || status=$?
66 if test $status = 0 ||
67 ! grep --quiet EAGAIN $tmpfile ; then
68 break
69 fi
70 sleep 1
71 done
72 if test $count = $max ; then
73 echo retried with non zero exit status, $max times: "$@" >&2
74 fi
75 cat $tmpfile
76 rm $tmpfile
77 return $status
78 }
79
80 #
81 # map_enxio_to_eagain cmd arg ...
82 #
83 # add EAGAIN to the output of cmd arg ... if the output contains
84 # ENXIO.
85 #
86 function map_enxio_to_eagain()
87 {
88 local status=0
89 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
90
91 "$@" > $tmpfile 2>&1 || status=$?
92 if test $status != 0 &&
93 grep --quiet ENXIO $tmpfile ; then
94 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
95 fi
96 cat $tmpfile
97 rm $tmpfile
98 return $status
99 }
100
101 function check_response()
102 {
103 expected_string=$1
104 retcode=$2
105 expected_retcode=$3
106 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
107 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
108 exit 1
109 fi
110
111 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
112 echo "Didn't find $expected_string in output" >&2
113 cat $TMPFILE >&2
114 exit 1
115 fi
116 }
117
118 function get_config_value_or_die()
119 {
120 local target config_opt raw val
121
122 target=$1
123 config_opt=$2
124
125 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
126 if [[ $? -ne 0 ]]; then
127 echo "error obtaining config opt '$config_opt' from '$target': $raw"
128 exit 1
129 fi
130
131 raw=`echo $raw | sed -e 's/[{} "]//g'`
132 val=`echo $raw | cut -f2 -d:`
133
134 echo "$val"
135 return 0
136 }
137
138 function expect_config_value()
139 {
140 local target config_opt expected_val val
141 target=$1
142 config_opt=$2
143 expected_val=$3
144
145 val=$(get_config_value_or_die $target $config_opt)
146
147 if [[ "$val" != "$expected_val" ]]; then
148 echo "expected '$expected_val', got '$val'"
149 exit 1
150 fi
151 }
152
153 function ceph_watch_start()
154 {
155 local whatch_opt=--watch
156
157 if [ -n "$1" ]; then
158 whatch_opt=--watch-$1
159 if [ -n "$2" ]; then
160 whatch_opt+=" --watch-channel $2"
161 fi
162 fi
163
164 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
165 ceph $whatch_opt > $CEPH_WATCH_FILE &
166 CEPH_WATCH_PID=$!
167
168 # wait until the "ceph" client is connected and receiving
169 # log messages from monitor
170 for i in `seq 3`; do
171 grep -q "cluster" $CEPH_WATCH_FILE && break
172 sleep 1
173 done
174 }
175
176 function ceph_watch_wait()
177 {
178 local regexp=$1
179 local timeout=30
180
181 if [ -n "$2" ]; then
182 timeout=$2
183 fi
184
185 for i in `seq ${timeout}`; do
186 grep -q "$regexp" $CEPH_WATCH_FILE && break
187 sleep 1
188 done
189
190 kill $CEPH_WATCH_PID
191
192 if ! grep "$regexp" $CEPH_WATCH_FILE; then
193 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
194 cat $CEPH_WATCH_FILE >&2
195 return 1
196 fi
197 }
198
199 function test_mon_injectargs()
200 {
201 ceph tell osd.0 injectargs --no-osd_enable_op_tracker
202 ceph tell osd.0 config get osd_enable_op_tracker | grep false
203 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500'
204 ceph tell osd.0 config get osd_enable_op_tracker | grep true
205 ceph tell osd.0 config get osd_op_history_duration | grep 500
206 ceph tell osd.0 injectargs --no-osd_enable_op_tracker
207 ceph tell osd.0 config get osd_enable_op_tracker | grep false
208 ceph tell osd.0 injectargs -- --osd_enable_op_tracker
209 ceph tell osd.0 config get osd_enable_op_tracker | grep true
210 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600'
211 ceph tell osd.0 config get osd_enable_op_tracker | grep true
212 ceph tell osd.0 config get osd_op_history_duration | grep 600
213
214 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200'
215 ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200
216
217 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2'
218 ceph tell osd.0 config get mon_probe_timeout | grep 2
219
220 ceph tell osd.0 injectargs -- '--mon-lease 6'
221 ceph tell osd.0 config get mon_lease | grep 6
222
223 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1
225 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
226
227 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
228 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
229
230 }
231
232 function test_mon_injectargs_SI()
233 {
234 # Test SI units during injectargs and 'config set'
235 # We only aim at testing the units are parsed accordingly
236 # and don't intend to test whether the options being set
237 # actually expect SI units to be passed.
238 # Keep in mind that all integer based options that are not based on bytes
239 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
240 # base 10.
241 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
242 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
243 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
244 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
245 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
246 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
247 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
248 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
249 check_response "(22) Invalid argument"
250 # now test with injectargs
251 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
252 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
253 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
254 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
255 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
256 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
257 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
258 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
259 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
260 }
261
262 function test_mon_injectargs_IEC()
263 {
264 # Test IEC units during injectargs and 'config set'
265 # We only aim at testing the units are parsed accordingly
266 # and don't intend to test whether the options being set
267 # actually expect IEC units to be passed.
268 # Keep in mind that all integer based options that are based on bytes
269 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
270 # unit modifiers (for backwards compatibility and convenience) and be parsed
271 # to base 2.
272 initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
273 $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
274 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
275 $SUDO ceph daemon mon.a config set mon_data_size_warn 15G
276 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
277 $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
278 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
279 $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
280 check_response "(22) Invalid argument"
281 # now test with injectargs
282 ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
283 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
284 ceph tell mon.a injectargs '--mon_data_size_warn 15G'
285 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
286 ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
287 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
288 expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
289 $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
290 }
291
292 function test_tiering_agent()
293 {
294 local slow=slow_eviction
295 local fast=fast_eviction
296 ceph osd pool create $slow 1 1
297 ceph osd pool application enable $slow rados
298 ceph osd pool create $fast 1 1
299 ceph osd tier add $slow $fast
300 ceph osd tier cache-mode $fast writeback
301 ceph osd tier set-overlay $slow $fast
302 ceph osd pool set $fast hit_set_type bloom
303 rados -p $slow put obj1 /etc/group
304 ceph osd pool set $fast target_max_objects 1
305 ceph osd pool set $fast hit_set_count 1
306 ceph osd pool set $fast hit_set_period 5
307 # wait for the object to be evicted from the cache
308 local evicted
309 evicted=false
310 for i in `seq 1 300` ; do
311 if ! rados -p $fast ls | grep obj1 ; then
312 evicted=true
313 break
314 fi
315 sleep 1
316 done
317 $evicted # assert
318 # the object is proxy read and promoted to the cache
319 rados -p $slow get obj1 - >/dev/null
320 # wait for the promoted object to be evicted again
321 evicted=false
322 for i in `seq 1 300` ; do
323 if ! rados -p $fast ls | grep obj1 ; then
324 evicted=true
325 break
326 fi
327 sleep 1
328 done
329 $evicted # assert
330 ceph osd tier remove-overlay $slow
331 ceph osd tier remove $slow $fast
332 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
333 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
334 }
335
336 function test_tiering_1()
337 {
338 # tiering
339 ceph osd pool create slow 2
340 ceph osd pool application enable slow rados
341 ceph osd pool create slow2 2
342 ceph osd pool application enable slow2 rados
343 ceph osd pool create cache 2
344 ceph osd pool create cache2 2
345 ceph osd tier add slow cache
346 ceph osd tier add slow cache2
347 expect_false ceph osd tier add slow2 cache
348 # application metadata should propagate to the tiers
349 ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow") | .application_metadata["rados"]' | grep '{}'
350 ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' | grep '{}'
351 ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache") | .application_metadata["rados"]' | grep '{}'
352 ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' | grep '{}'
353 # forward and proxy are removed/deprecated
354 expect_false ceph osd tier cache-mode cache forward
355 expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
356 expect_false ceph osd tier cache-mode cache proxy
357 expect_false ceph osd tier cache-mode cache proxy --yes-i-really-mean-it
358 # test some state transitions
359 ceph osd tier cache-mode cache writeback
360 expect_false ceph osd tier cache-mode cache readonly
361 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
362 ceph osd tier cache-mode cache readproxy
363 ceph osd tier cache-mode cache none
364 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
365 ceph osd tier cache-mode cache none
366 ceph osd tier cache-mode cache writeback
367 expect_false ceph osd tier cache-mode cache none
368 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
369 # test with dirty objects in the tier pool
370 # tier pool currently set to 'writeback'
371 rados -p cache put /etc/passwd /etc/passwd
372 flush_pg_stats
373 # 1 dirty object in pool 'cache'
374 ceph osd tier cache-mode cache readproxy
375 expect_false ceph osd tier cache-mode cache none
376 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
377 ceph osd tier cache-mode cache writeback
378 # remove object from tier pool
379 rados -p cache rm /etc/passwd
380 rados -p cache cache-flush-evict-all
381 flush_pg_stats
382 # no dirty objects in pool 'cache'
383 ceph osd tier cache-mode cache readproxy
384 ceph osd tier cache-mode cache none
385 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
386 TRIES=0
387 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
388 do
389 grep 'currently creating pgs' $TMPFILE
390 TRIES=$(( $TRIES + 1 ))
391 test $TRIES -ne 60
392 sleep 3
393 done
394 expect_false ceph osd pool set cache pg_num 4
395 ceph osd tier cache-mode cache none
396 ceph osd tier set-overlay slow cache
397 expect_false ceph osd tier set-overlay slow cache2
398 expect_false ceph osd tier remove slow cache
399 ceph osd tier remove-overlay slow
400 ceph osd tier set-overlay slow cache2
401 ceph osd tier remove-overlay slow
402 ceph osd tier remove slow cache
403 ceph osd tier add slow2 cache
404 expect_false ceph osd tier set-overlay slow cache
405 ceph osd tier set-overlay slow2 cache
406 ceph osd tier remove-overlay slow2
407 ceph osd tier remove slow2 cache
408 ceph osd tier remove slow cache2
409
410 # make sure a non-empty pool fails
411 rados -p cache2 put /etc/passwd /etc/passwd
412 while ! ceph df | grep cache2 | grep ' 1 ' ; do
413 echo waiting for pg stats to flush
414 sleep 2
415 done
416 expect_false ceph osd tier add slow cache2
417 ceph osd tier add slow cache2 --force-nonempty
418 ceph osd tier remove slow cache2
419
420 ceph osd pool ls | grep cache2
421 ceph osd pool ls -f json-pretty | grep cache2
422 ceph osd pool ls detail | grep cache2
423 ceph osd pool ls detail -f json-pretty | grep cache2
424
425 ceph osd pool delete slow slow --yes-i-really-really-mean-it
426 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
427 ceph osd pool delete cache cache --yes-i-really-really-mean-it
428 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
429 }
430
431 function test_tiering_2()
432 {
433 # make sure we can't clobber snapshot state
434 ceph osd pool create snap_base 2
435 ceph osd pool application enable snap_base rados
436 ceph osd pool create snap_cache 2
437 ceph osd pool mksnap snap_cache snapname
438 expect_false ceph osd tier add snap_base snap_cache
439 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
440 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
441 }
442
443 function test_tiering_3()
444 {
445 # make sure we can't create snapshot on tier
446 ceph osd pool create basex 2
447 ceph osd pool application enable basex rados
448 ceph osd pool create cachex 2
449 ceph osd tier add basex cachex
450 expect_false ceph osd pool mksnap cache snapname
451 ceph osd tier remove basex cachex
452 ceph osd pool delete basex basex --yes-i-really-really-mean-it
453 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
454 }
455
456 function test_tiering_4()
457 {
458 # make sure we can't create an ec pool tier
459 ceph osd pool create eccache 2 2 erasure
460 expect_false ceph osd set-require-min-compat-client bobtail
461 ceph osd pool create repbase 2
462 ceph osd pool application enable repbase rados
463 expect_false ceph osd tier add repbase eccache
464 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
465 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
466 }
467
468 function test_tiering_5()
469 {
470 # convenient add-cache command
471 ceph osd pool create slow 2
472 ceph osd pool application enable slow rados
473 ceph osd pool create cache3 2
474 ceph osd tier add-cache slow cache3 1024000
475 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
476 ceph osd tier remove slow cache3 2> $TMPFILE || true
477 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
478 ceph osd tier remove-overlay slow
479 ceph osd tier remove slow cache3
480 ceph osd pool ls | grep cache3
481 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
482 ! ceph osd pool ls | grep cache3 || exit 1
483 ceph osd pool delete slow slow --yes-i-really-really-mean-it
484 }
485
486 function test_tiering_6()
487 {
488 # check add-cache whether work
489 ceph osd pool create datapool 2
490 ceph osd pool application enable datapool rados
491 ceph osd pool create cachepool 2
492 ceph osd tier add-cache datapool cachepool 1024000
493 ceph osd tier cache-mode cachepool writeback
494 rados -p datapool put object /etc/passwd
495 rados -p cachepool stat object
496 rados -p cachepool cache-flush object
497 rados -p datapool stat object
498 ceph osd tier remove-overlay datapool
499 ceph osd tier remove datapool cachepool
500 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
501 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
502 }
503
504 function test_tiering_7()
505 {
506 # protection against pool removal when used as tiers
507 ceph osd pool create datapool 2
508 ceph osd pool application enable datapool rados
509 ceph osd pool create cachepool 2
510 ceph osd tier add-cache datapool cachepool 1024000
511 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
512 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
513 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
514 check_response "EBUSY: pool 'datapool' has tiers cachepool"
515 ceph osd tier remove-overlay datapool
516 ceph osd tier remove datapool cachepool
517 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
518 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
519 }
520
521 function test_tiering_8()
522 {
523 ## check health check
524 ceph osd set notieragent
525 ceph osd pool create datapool 2
526 ceph osd pool application enable datapool rados
527 ceph osd pool create cache4 2
528 ceph osd tier add-cache datapool cache4 1024000
529 ceph osd tier cache-mode cache4 writeback
530 tmpfile=$(mktemp|grep tmp)
531 dd if=/dev/zero of=$tmpfile bs=4K count=1
532 ceph osd pool set cache4 target_max_objects 200
533 ceph osd pool set cache4 target_max_bytes 1000000
534 rados -p cache4 put foo1 $tmpfile
535 rados -p cache4 put foo2 $tmpfile
536 rm -f $tmpfile
537 flush_pg_stats
538 ceph df | grep datapool | grep ' 2 '
539 ceph osd tier remove-overlay datapool
540 ceph osd tier remove datapool cache4
541 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
542 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
543 ceph osd unset notieragent
544 }
545
546 function test_tiering_9()
547 {
548 # make sure 'tier remove' behaves as we expect
549 # i.e., removing a tier from a pool that's not its base pool only
550 # results in a 'pool foo is now (or already was) not a tier of bar'
551 #
552 ceph osd pool create basepoolA 2
553 ceph osd pool application enable basepoolA rados
554 ceph osd pool create basepoolB 2
555 ceph osd pool application enable basepoolB rados
556 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
557 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
558
559 ceph osd pool create cache5 2
560 ceph osd pool create cache6 2
561 ceph osd tier add basepoolA cache5
562 ceph osd tier add basepoolB cache6
563 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
564 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
565 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
566 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
567
568 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
569 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
570 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
571 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
572
573 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
574 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
575
576 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
577 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
578 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
579 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
580 }
581
582 function test_auth()
583 {
584 expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
585 expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
586 ceph auth add client.xx mon 'allow *' osd "allow *"
587 ceph auth export client.xx >client.xx.keyring
588 ceph auth add client.xx -i client.xx.keyring
589 rm -f client.xx.keyring
590 ceph auth list | grep client.xx
591 ceph auth ls | grep client.xx
592 ceph auth get client.xx | grep caps | grep mon
593 ceph auth get client.xx | grep caps | grep osd
594 ceph auth get-key client.xx
595 ceph auth print-key client.xx
596 ceph auth print_key client.xx
597 ceph auth caps client.xx osd "allow rw"
598 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
599 ceph auth get client.xx | grep osd | grep "allow rw"
600 ceph auth caps client.xx mon 'allow command "osd tree"'
601 ceph auth export | grep client.xx
602 ceph auth export -o authfile
603 ceph auth import -i authfile 2>$TMPFILE
604 check_response "imported keyring"
605
606 ceph auth export -o authfile2
607 diff authfile authfile2
608 rm authfile authfile2
609 ceph auth del client.xx
610 expect_false ceph auth get client.xx
611
612 # (almost) interactive mode
613 echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
614 ceph auth get client.xx
615 # script mode
616 echo 'auth del client.xx' | ceph
617 expect_false ceph auth get client.xx
618 }
619
620 function test_auth_profiles()
621 {
622 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
623 mgr 'allow profile read-only'
624 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
625 mgr 'allow profile read-write'
626 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
627
628 ceph auth export > client.xx.keyring
629
630 # read-only is allowed all read-only commands (auth excluded)
631 ceph -n client.xx-profile-ro -k client.xx.keyring status
632 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
633 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
634 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
635 # read-only gets access denied for rw commands or auth commands
636 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
637 check_response "EACCES: access denied"
638 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
639 check_response "EACCES: access denied"
640 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
641 check_response "EACCES: access denied"
642
643 # read-write is allowed for all read-write commands (except auth)
644 ceph -n client.xx-profile-rw -k client.xx.keyring status
645 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
646 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
647 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
648 ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
649 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
650 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
651 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
652 # read-write gets access denied for auth commands
653 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
654 check_response "EACCES: access denied"
655
656 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
657 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
658 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
659 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
660 ceph -n client.xx-profile-rd -k client.xx.keyring status
661 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
662 check_response "EACCES: access denied"
663 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
664 check_response "EACCES: access denied"
665 # read-only 'mon' subsystem commands are allowed
666 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
667 # but read-write 'mon' commands are not
668 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
669 check_response "EACCES: access denied"
670 ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
671 check_response "EACCES: access denied"
672 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
673 check_response "EACCES: access denied"
674 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
675 check_response "EACCES: access denied"
676
677 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
678 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
679
680 # add a new role-definer with the existing role-definer
681 ceph -n client.xx-profile-rd -k client.xx.keyring \
682 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
683 ceph -n client.xx-profile-rd -k client.xx.keyring \
684 auth export > client.xx.keyring.2
685 # remove old role-definer using the new role-definer
686 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
687 auth del client.xx-profile-rd
688 # remove the remaining role-definer with admin
689 ceph auth del client.xx-profile-rd2
690 rm -f client.xx.keyring client.xx.keyring.2
691 }
692
693 function test_mon_caps()
694 {
695 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
696 chmod +r $TEMP_DIR/ceph.client.bug.keyring
697 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
698 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
699
700 # pass --no-mon-config since we are looking for the permission denied error
701 rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
702 cat $TMPFILE
703 check_response "Permission denied"
704
705 rm -rf $TEMP_DIR/ceph.client.bug.keyring
706 ceph auth del client.bug
707 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
708 chmod +r $TEMP_DIR/ceph.client.bug.keyring
709 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
710 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
711 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
712 rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
713 check_response "Permission denied"
714 }
715
716 function test_mon_misc()
717 {
718 # with and without verbosity
719 ceph osd dump | grep '^epoch'
720 ceph --concise osd dump | grep '^epoch'
721
722 ceph osd df | grep 'MIN/MAX VAR'
723
724 # df
725 ceph df > $TMPFILE
726 grep RAW $TMPFILE
727 grep -v DIRTY $TMPFILE
728 ceph df detail > $TMPFILE
729 grep DIRTY $TMPFILE
730 ceph df --format json > $TMPFILE
731 grep 'total_bytes' $TMPFILE
732 grep -v 'dirty' $TMPFILE
733 ceph df detail --format json > $TMPFILE
734 grep 'rd_bytes' $TMPFILE
735 grep 'dirty' $TMPFILE
736 ceph df --format xml | grep '<total_bytes>'
737 ceph df detail --format xml | grep '<rd_bytes>'
738
739 ceph fsid
740 ceph health
741 ceph health detail
742 ceph health --format json-pretty
743 ceph health detail --format xml-pretty
744
745 ceph time-sync-status
746
747 ceph node ls
748 for t in mon osd mds mgr ; do
749 ceph node ls $t
750 done
751
752 ceph_watch_start
753 mymsg="this is a test log message $$.$(date)"
754 ceph log "$mymsg"
755 ceph log last | grep "$mymsg"
756 ceph log last 100 | grep "$mymsg"
757 ceph_watch_wait "$mymsg"
758
759 ceph mgr stat
760 ceph mgr dump
761 ceph mgr module ls
762 ceph mgr module enable restful
763 expect_false ceph mgr module enable foodne
764 ceph mgr module enable foodne --force
765 ceph mgr module disable foodne
766 ceph mgr module disable foodnebizbangbash
767
768 ceph mon metadata a
769 ceph mon metadata
770 ceph mon count-metadata ceph_version
771 ceph mon versions
772
773 ceph mgr metadata
774 ceph mgr versions
775 ceph mgr count-metadata ceph_version
776
777 ceph versions
778
779 ceph node ls
780 }
781
782 function check_mds_active()
783 {
784 fs_name=$1
785 ceph fs get $fs_name | grep active
786 }
787
788 function wait_mds_active()
789 {
790 fs_name=$1
791 max_run=300
792 for i in $(seq 1 $max_run) ; do
793 if ! check_mds_active $fs_name ; then
794 echo "waiting for an active MDS daemon ($i/$max_run)"
795 sleep 5
796 else
797 break
798 fi
799 done
800 check_mds_active $fs_name
801 }
802
803 function get_mds_gids()
804 {
805 fs_name=$1
806 ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
807 }
808
809 function fail_all_mds()
810 {
811 fs_name=$1
812 ceph fs set $fs_name cluster_down true
813 mds_gids=$(get_mds_gids $fs_name)
814 for mds_gid in $mds_gids ; do
815 ceph mds fail $mds_gid
816 done
817 if check_mds_active $fs_name ; then
818 echo "An active MDS remains, something went wrong"
819 ceph fs get $fs_name
820 exit -1
821 fi
822
823 }
824
825 function remove_all_fs()
826 {
827 existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
828 for fs_name in $existing_fs ; do
829 echo "Removing fs ${fs_name}..."
830 fail_all_mds $fs_name
831 echo "Removing existing filesystem '${fs_name}'..."
832 ceph fs rm $fs_name --yes-i-really-mean-it
833 echo "Removed '${fs_name}'."
834 done
835 }
836
837 # So that tests requiring MDS can skip if one is not configured
838 # in the cluster at all
839 function mds_exists()
840 {
841 ceph auth ls | grep "^mds"
842 }
843
844 # some of the commands are just not idempotent.
845 function without_test_dup_command()
846 {
847 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
848 $@
849 else
850 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
851 unset CEPH_CLI_TEST_DUP_COMMAND
852 $@
853 CEPH_CLI_TEST_DUP_COMMAND=saved
854 fi
855 }
856
857 function test_mds_tell()
858 {
859 local FS_NAME=cephfs
860 if ! mds_exists ; then
861 echo "Skipping test, no MDS found"
862 return
863 fi
864
865 remove_all_fs
866 ceph osd pool create fs_data 16
867 ceph osd pool create fs_metadata 16
868 ceph fs new $FS_NAME fs_metadata fs_data
869 wait_mds_active $FS_NAME
870
871 # Test injectargs by GID
872 old_mds_gids=$(get_mds_gids $FS_NAME)
873 echo Old GIDs: $old_mds_gids
874
875 for mds_gid in $old_mds_gids ; do
876 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
877 done
878 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
879
880 # Test respawn by rank
881 without_test_dup_command ceph tell mds.0 respawn
882 new_mds_gids=$old_mds_gids
883 while [ $new_mds_gids -eq $old_mds_gids ] ; do
884 sleep 5
885 new_mds_gids=$(get_mds_gids $FS_NAME)
886 done
887 echo New GIDs: $new_mds_gids
888
889 # Test respawn by ID
890 without_test_dup_command ceph tell mds.a respawn
891 new_mds_gids=$old_mds_gids
892 while [ $new_mds_gids -eq $old_mds_gids ] ; do
893 sleep 5
894 new_mds_gids=$(get_mds_gids $FS_NAME)
895 done
896 echo New GIDs: $new_mds_gids
897
898 remove_all_fs
899 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
900 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
901 }
902
903 function test_mon_mds()
904 {
905 local FS_NAME=cephfs
906 remove_all_fs
907
908 ceph osd pool create fs_data 16
909 ceph osd pool create fs_metadata 16
910 ceph fs new $FS_NAME fs_metadata fs_data
911
912 ceph fs set $FS_NAME cluster_down true
913 ceph fs set $FS_NAME cluster_down false
914
915 ceph mds compat rm_incompat 4
916 ceph mds compat rm_incompat 4
917
918 # We don't want any MDSs to be up, their activity can interfere with
919 # the "current_epoch + 1" checking below if they're generating updates
920 fail_all_mds $FS_NAME
921
922 ceph mds compat show
923 ceph fs dump
924 ceph fs get $FS_NAME
925 for mds_gid in $(get_mds_gids $FS_NAME) ; do
926 ceph mds metadata $mds_id
927 done
928 ceph mds metadata
929 ceph mds versions
930 ceph mds count-metadata os
931
932 # XXX mds fail, but how do you undo it?
933 mdsmapfile=$TEMP_DIR/mdsmap.$$
934 current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
935 [ -s $mdsmapfile ]
936 rm $mdsmapfile
937
938 ceph osd pool create data2 16
939 ceph osd pool create data3 16
940 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
941 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
942 ceph fs add_data_pool cephfs $data2_pool
943 ceph fs add_data_pool cephfs $data3_pool
944 ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
945 check_response "Error ENOENT"
946 ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
947 check_response "Error ENOENT"
948 ceph fs rm_data_pool cephfs $data2_pool
949 ceph fs rm_data_pool cephfs $data3_pool
950 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
951 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
952 ceph fs set cephfs max_mds 4
953 ceph fs set cephfs max_mds 3
954 ceph fs set cephfs max_mds 256
955 expect_false ceph fs set cephfs max_mds 257
956 ceph fs set cephfs max_mds 4
957 ceph fs set cephfs max_mds 256
958 expect_false ceph fs set cephfs max_mds 257
959 expect_false ceph fs set cephfs max_mds asdf
960 expect_false ceph fs set cephfs inline_data true
961 ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
962 ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it
963 ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it
964 expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it
965 ceph fs set cephfs inline_data false
966 ceph fs set cephfs inline_data no
967 ceph fs set cephfs inline_data 0
968 expect_false ceph fs set cephfs inline_data asdf
969 ceph fs set cephfs max_file_size 1048576
970 expect_false ceph fs set cephfs max_file_size 123asdf
971
972 expect_false ceph fs set cephfs allow_new_snaps
973 ceph fs set cephfs allow_new_snaps true
974 ceph fs set cephfs allow_new_snaps 0
975 ceph fs set cephfs allow_new_snaps false
976 ceph fs set cephfs allow_new_snaps no
977 expect_false ceph fs set cephfs allow_new_snaps taco
978
979 # we should never be able to add EC pools as data or metadata pools
980 # create an ec-pool...
981 ceph osd pool create mds-ec-pool 16 16 erasure
982 set +e
983 ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
984 check_response 'erasure-code' $? 22
985 set -e
986 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
987 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
988 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
989
990 fail_all_mds $FS_NAME
991
992 set +e
993 # Check that rmfailed requires confirmation
994 expect_false ceph mds rmfailed 0
995 ceph mds rmfailed 0 --yes-i-really-mean-it
996 set -e
997
998 # Check that `fs new` is no longer permitted
999 expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
1000
1001 # Check that 'fs reset' runs
1002 ceph fs reset $FS_NAME --yes-i-really-mean-it
1003
1004 # Check that creating a second FS fails by default
1005 ceph osd pool create fs_metadata2 16
1006 ceph osd pool create fs_data2 16
1007 set +e
1008 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1009 set -e
1010
1011 # Check that setting enable_multiple enables creation of second fs
1012 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1013 ceph fs new cephfs2 fs_metadata2 fs_data2
1014
1015 # Clean up multi-fs stuff
1016 fail_all_mds cephfs2
1017 ceph fs rm cephfs2 --yes-i-really-mean-it
1018 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1019 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1020
1021 fail_all_mds $FS_NAME
1022
1023 # Clean up to enable subsequent fs new tests
1024 ceph fs rm $FS_NAME --yes-i-really-mean-it
1025
1026 set +e
1027 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1028 check_response 'erasure-code' $? 22
1029 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1030 check_response 'erasure-code' $? 22
1031 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1032 check_response 'erasure-code' $? 22
1033 set -e
1034
1035 # ... new create a cache tier in front of the EC pool...
1036 ceph osd pool create mds-tier 2
1037 ceph osd tier add mds-ec-pool mds-tier
1038 ceph osd tier set-overlay mds-ec-pool mds-tier
1039 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1040
1041 # Use of a readonly tier should be forbidden
1042 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1043 set +e
1044 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1045 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1046 set -e
1047
1048 # Use of a writeback tier should enable FS creation
1049 ceph osd tier cache-mode mds-tier writeback
1050 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1051
1052 # While a FS exists using the tiered pools, I should not be allowed
1053 # to remove the tier
1054 set +e
1055 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1056 check_response 'in use by CephFS' $? 16
1057 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1058 check_response 'in use by CephFS' $? 16
1059 set -e
1060
1061 fail_all_mds $FS_NAME
1062 ceph fs rm $FS_NAME --yes-i-really-mean-it
1063
1064 # ... but we should be forbidden from using the cache pool in the FS directly.
1065 set +e
1066 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1067 check_response 'in use as a cache tier' $? 22
1068 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1069 check_response 'in use as a cache tier' $? 22
1070 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1071 check_response 'in use as a cache tier' $? 22
1072 set -e
1073
1074 # Clean up tier + EC pools
1075 ceph osd tier remove-overlay mds-ec-pool
1076 ceph osd tier remove mds-ec-pool mds-tier
1077
1078 # Create a FS using the 'cache' pool now that it's no longer a tier
1079 ceph fs new $FS_NAME fs_metadata mds-tier --force
1080
1081 # We should be forbidden from using this pool as a tier now that
1082 # it's in use for CephFS
1083 set +e
1084 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1085 check_response 'in use by CephFS' $? 16
1086 set -e
1087
1088 fail_all_mds $FS_NAME
1089 ceph fs rm $FS_NAME --yes-i-really-mean-it
1090
1091 # We should be permitted to use an EC pool with overwrites enabled
1092 # as the data pool...
1093 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1094 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1095 fail_all_mds $FS_NAME
1096 ceph fs rm $FS_NAME --yes-i-really-mean-it
1097
1098 # ...but not as the metadata pool
1099 set +e
1100 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1101 check_response 'erasure-code' $? 22
1102 set -e
1103
1104 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1105
1106 # Create a FS and check that we can subsequently add a cache tier to it
1107 ceph fs new $FS_NAME fs_metadata fs_data --force
1108
1109 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1110 ceph osd tier add fs_metadata mds-tier
1111 ceph osd tier cache-mode mds-tier writeback
1112 ceph osd tier set-overlay fs_metadata mds-tier
1113
1114 # Removing tier should be permitted because the underlying pool is
1115 # replicated (#11504 case)
1116 ceph osd tier cache-mode mds-tier readproxy
1117 ceph osd tier remove-overlay fs_metadata
1118 ceph osd tier remove fs_metadata mds-tier
1119 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1120
1121 # Clean up FS
1122 fail_all_mds $FS_NAME
1123 ceph fs rm $FS_NAME --yes-i-really-mean-it
1124
1125
1126
1127 ceph mds stat
1128 # ceph mds tell mds.a getmap
1129 # ceph mds rm
1130 # ceph mds rmfailed
1131 # ceph mds set_state
1132
1133 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1134 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1135 }
1136
1137 function test_mon_mds_metadata()
1138 {
1139 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1140 test "$nmons" -gt 0
1141
1142 ceph fs dump |
1143 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1144 while read gid id rank; do
1145 ceph mds metadata ${gid} | grep '"hostname":'
1146 ceph mds metadata ${id} | grep '"hostname":'
1147 ceph mds metadata ${rank} | grep '"hostname":'
1148
1149 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1150 test "$n" -eq "$nmons"
1151 done
1152
1153 expect_false ceph mds metadata UNKNOWN
1154 }
1155
1156 function test_mon_mon()
1157 {
1158 # print help message
1159 ceph --help mon
1160 # -h works even when some arguments are passed
1161 ceph osd dump -h | grep 'osd dump'
1162 ceph osd dump 123 -h | grep 'osd dump'
1163 # no mon add/remove
1164 ceph mon dump
1165 ceph mon getmap -o $TEMP_DIR/monmap.$$
1166 [ -s $TEMP_DIR/monmap.$$ ]
1167
1168 # ceph mon tell
1169 first=$(ceph mon dump -f json | jq -r '.mons[0].name')
1170 ceph tell mon.$first mon_status
1171
1172 # test mon features
1173 ceph mon feature ls
1174 ceph mon feature set kraken --yes-i-really-mean-it
1175 expect_false ceph mon feature set abcd
1176 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1177
1178 # test elector
1179 expect_failure $TEMP_DIR ceph mon add disallowed_leader $first
1180 ceph mon set election_strategy disallow
1181 ceph mon add disallowed_leader $first
1182 ceph mon set election_strategy connectivity
1183 ceph mon rm disallowed_leader $first
1184 ceph mon set election_strategy classic
1185 expect_failure $TEMP_DIR ceph mon rm disallowed_leader $first
1186
1187 # test mon stat
1188 # don't check output, just ensure it does not fail.
1189 ceph mon stat
1190 ceph mon stat -f json | jq '.'
1191 }
1192
1193 function test_mon_priority_and_weight()
1194 {
1195 for i in 0 1 65535; do
1196 ceph mon set-weight a $i
1197 w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight')
1198 [[ "$w" == "$i" ]]
1199 done
1200
1201 for i in -1 65536; do
1202 expect_false ceph mon set-weight a $i
1203 done
1204 }
1205
1206 function gen_secrets_file()
1207 {
1208 # lets assume we can have the following types
1209 # all - generates both cephx and lockbox, with mock dm-crypt key
1210 # cephx - only cephx
1211 # no_cephx - lockbox and dm-crypt, no cephx
1212 # no_lockbox - dm-crypt and cephx, no lockbox
1213 # empty - empty file
1214 # empty_json - correct json, empty map
1215 # bad_json - bad json :)
1216 #
1217 local t=$1
1218 if [[ -z "$t" ]]; then
1219 t="all"
1220 fi
1221
1222 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1223 echo $fn
1224 if [[ "$t" == "empty" ]]; then
1225 return 0
1226 fi
1227
1228 echo "{" > $fn
1229 if [[ "$t" == "bad_json" ]]; then
1230 echo "asd: ; }" >> $fn
1231 return 0
1232 elif [[ "$t" == "empty_json" ]]; then
1233 echo "}" >> $fn
1234 return 0
1235 fi
1236
1237 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1238 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1239 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1240
1241 if [[ "$t" == "all" ]]; then
1242 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1243 elif [[ "$t" == "cephx" ]]; then
1244 echo "$cephx_secret" >> $fn
1245 elif [[ "$t" == "no_cephx" ]]; then
1246 echo "$lb_secret,$dmcrypt_key" >> $fn
1247 elif [[ "$t" == "no_lockbox" ]]; then
1248 echo "$cephx_secret,$dmcrypt_key" >> $fn
1249 else
1250 echo "unknown gen_secrets_file() type \'$fn\'"
1251 return 1
1252 fi
1253 echo "}" >> $fn
1254 return 0
1255 }
1256
1257 function test_mon_osd_create_destroy()
1258 {
1259 ceph osd new 2>&1 | grep 'EINVAL'
1260 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1261 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1262
1263 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1264
1265 old_osds=$(ceph osd ls)
1266 num_osds=$(ceph osd ls | wc -l)
1267
1268 uuid=$(uuidgen)
1269 id=$(ceph osd new $uuid 2>/dev/null)
1270
1271 for i in $old_osds; do
1272 [[ "$i" != "$id" ]]
1273 done
1274
1275 ceph osd find $id
1276
1277 id2=`ceph osd new $uuid 2>/dev/null`
1278
1279 [[ $id2 == $id ]]
1280
1281 ceph osd new $uuid $id
1282
1283 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1284 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1285
1286 uuid2=$(uuidgen)
1287 id2=$(ceph osd new $uuid2)
1288 ceph osd find $id2
1289 [[ "$id2" != "$id" ]]
1290
1291 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1292 ceph osd new $uuid2 $id2
1293
1294 # test with secrets
1295 empty_secrets=$(gen_secrets_file "empty")
1296 empty_json=$(gen_secrets_file "empty_json")
1297 all_secrets=$(gen_secrets_file "all")
1298 cephx_only=$(gen_secrets_file "cephx")
1299 no_cephx=$(gen_secrets_file "no_cephx")
1300 no_lockbox=$(gen_secrets_file "no_lockbox")
1301 bad_json=$(gen_secrets_file "bad_json")
1302
1303 # empty secrets should be idempotent
1304 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1305 [[ "$new_id" == "$id" ]]
1306
1307 # empty json, thus empty secrets
1308 new_id=$(ceph osd new $uuid $id -i $empty_json)
1309 [[ "$new_id" == "$id" ]]
1310
1311 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1312
1313 ceph osd rm $id
1314 ceph osd rm $id2
1315 ceph osd setmaxosd $old_maxosd
1316
1317 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1318 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1319
1320 osds=$(ceph osd ls)
1321 id=$(ceph osd new $uuid -i $all_secrets)
1322 for i in $osds; do
1323 [[ "$i" != "$id" ]]
1324 done
1325
1326 ceph osd find $id
1327
1328 # validate secrets and dm-crypt are set
1329 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1330 s=$(cat $all_secrets | jq '.cephx_secret')
1331 [[ $k == $s ]]
1332 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1333 jq '.key')
1334 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1335 [[ $k == $s ]]
1336 ceph config-key exists dm-crypt/osd/$uuid/luks
1337
1338 osds=$(ceph osd ls)
1339 id2=$(ceph osd new $uuid2 -i $cephx_only)
1340 for i in $osds; do
1341 [[ "$i" != "$id2" ]]
1342 done
1343
1344 ceph osd find $id2
1345 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1346 s=$(cat $all_secrets | jq '.cephx_secret')
1347 [[ $k == $s ]]
1348 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1349 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1350
1351 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1352 ceph osd destroy $id2 --yes-i-really-mean-it
1353 ceph osd find $id2
1354 expect_false ceph auth get-key osd.$id2
1355 ceph osd dump | grep osd.$id2 | grep destroyed
1356
1357 id3=$id2
1358 uuid3=$(uuidgen)
1359 ceph osd new $uuid3 $id3 -i $all_secrets
1360 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1361 ceph auth get-key client.osd-lockbox.$uuid3
1362 ceph auth get-key osd.$id3
1363 ceph config-key exists dm-crypt/osd/$uuid3/luks
1364
1365 ceph osd purge-new osd.$id3 --yes-i-really-mean-it
1366 expect_false ceph osd find $id2
1367 expect_false ceph auth get-key osd.$id2
1368 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1369 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1370 ceph osd purge osd.$id3 --yes-i-really-mean-it
1371 ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent
1372
1373 ceph osd purge osd.$id --yes-i-really-mean-it
1374 ceph osd purge 123456 --yes-i-really-mean-it
1375 expect_false ceph osd find $id
1376 expect_false ceph auth get-key osd.$id
1377 expect_false ceph auth get-key client.osd-lockbox.$uuid
1378 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1379
1380 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1381 $no_cephx $no_lockbox $bad_json
1382
1383 for i in $(ceph osd ls); do
1384 [[ "$i" != "$id" ]]
1385 [[ "$i" != "$id2" ]]
1386 [[ "$i" != "$id3" ]]
1387 done
1388
1389 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1390 ceph osd setmaxosd $old_maxosd
1391
1392 }
1393
1394 function test_mon_config_key()
1395 {
1396 key=asdfasdfqwerqwreasdfuniquesa123df
1397 ceph config-key list | grep -c $key | grep 0
1398 ceph config-key get $key | grep -c bar | grep 0
1399 ceph config-key set $key bar
1400 ceph config-key get $key | grep bar
1401 ceph config-key list | grep -c $key | grep 1
1402 ceph config-key dump | grep $key | grep bar
1403 ceph config-key rm $key
1404 expect_false ceph config-key get $key
1405 ceph config-key list | grep -c $key | grep 0
1406 ceph config-key dump | grep -c $key | grep 0
1407 }
1408
1409 function test_mon_osd()
1410 {
1411 #
1412 # osd blocklist
1413 #
1414 bl=192.168.0.1:0/1000
1415 ceph osd blocklist add $bl
1416 ceph osd blocklist ls | grep $bl
1417 ceph osd blocklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1418 ceph osd dump --format=json-pretty | grep $bl
1419 ceph osd dump | grep $bl
1420 ceph osd blocklist rm $bl
1421 ceph osd blocklist ls | expect_false grep $bl
1422
1423 bl=192.168.0.1
1424 # test without nonce, invalid nonce
1425 ceph osd blocklist add $bl
1426 ceph osd blocklist ls | grep $bl
1427 ceph osd blocklist rm $bl
1428 ceph osd blocklist ls | expect_false grep $bl
1429 expect_false "ceph osd blocklist add $bl/-1"
1430 expect_false "ceph osd blocklist add $bl/foo"
1431
1432 # test with invalid address
1433 expect_false "ceph osd blocklist add 1234.56.78.90/100"
1434
1435 # test range blocklisting
1436 bl=192.168.0.1:0/24
1437 ceph osd blocklist range add $bl
1438 ceph osd blocklist ls | grep $bl
1439 ceph osd blocklist range rm $bl
1440 ceph osd blocklist ls | expect_false grep $bl
1441 bad_bl=192.168.0.1/33
1442 expect_false ceph osd blocklist range add $bad_bl
1443
1444 # Test `clear`
1445 ceph osd blocklist add $bl
1446 ceph osd blocklist ls | grep $bl
1447 ceph osd blocklist clear
1448 ceph osd blocklist ls | expect_false grep $bl
1449
1450 # deprecated syntax?
1451 ceph osd blacklist ls
1452
1453 #
1454 # osd crush
1455 #
1456 ceph osd crush reweight-all
1457 ceph osd crush tunables legacy
1458 ceph osd crush show-tunables | grep argonaut
1459 ceph osd crush tunables bobtail
1460 ceph osd crush show-tunables | grep bobtail
1461 ceph osd crush tunables firefly
1462 ceph osd crush show-tunables | grep firefly
1463
1464 ceph osd crush set-tunable straw_calc_version 0
1465 ceph osd crush get-tunable straw_calc_version | grep 0
1466 ceph osd crush set-tunable straw_calc_version 1
1467 ceph osd crush get-tunable straw_calc_version | grep 1
1468
1469 #
1470 # require-min-compat-client
1471 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1472 ceph osd get-require-min-compat-client | grep luminous
1473 ceph osd dump | grep 'require_min_compat_client luminous'
1474
1475 #
1476 # osd scrub
1477 #
1478
1479 # blocking
1480 ceph osd scrub 0 --block
1481 ceph osd deep-scrub 0 --block
1482
1483 # how do I tell when these are done?
1484 ceph osd scrub 0
1485 ceph osd deep-scrub 0
1486 ceph osd repair 0
1487
1488 # pool scrub, force-recovery/backfill
1489 pool_names=`rados lspools`
1490 for pool_name in $pool_names
1491 do
1492 ceph osd pool scrub $pool_name
1493 ceph osd pool deep-scrub $pool_name
1494 ceph osd pool repair $pool_name
1495 ceph osd pool force-recovery $pool_name
1496 ceph osd pool cancel-force-recovery $pool_name
1497 ceph osd pool force-backfill $pool_name
1498 ceph osd pool cancel-force-backfill $pool_name
1499 done
1500
1501 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \
1502 norebalance norecover notieragent
1503 do
1504 ceph osd set $f
1505 ceph osd unset $f
1506 done
1507 expect_false ceph osd set bogus
1508 expect_false ceph osd unset bogus
1509 for f in sortbitwise recover_deletes require_jewel_osds \
1510 require_kraken_osds
1511 do
1512 expect_false ceph osd set $f
1513 expect_false ceph osd unset $f
1514 done
1515 ceph osd require-osd-release quincy
1516 # can't lower
1517 expect_false ceph osd require-osd-release pacific
1518 expect_false ceph osd require-osd-release octopus
1519 # these are no-ops but should succeed.
1520
1521 ceph osd set noup
1522 ceph osd down 0
1523 ceph osd dump | grep 'osd.0 down'
1524 ceph osd unset noup
1525 max_run=1000
1526 for ((i=0; i < $max_run; i++)); do
1527 if ! ceph osd dump | grep 'osd.0 up'; then
1528 echo "waiting for osd.0 to come back up ($i/$max_run)"
1529 sleep 1
1530 else
1531 break
1532 fi
1533 done
1534 ceph osd dump | grep 'osd.0 up'
1535
1536 ceph osd dump | grep 'osd.0 up'
1537 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1538 ceph osd find 1
1539 ceph osd find osd.1
1540 expect_false ceph osd find osd.xyz
1541 expect_false ceph osd find xyz
1542 expect_false ceph osd find 0.1
1543 ceph --format plain osd find 1 # falls back to json-pretty
1544 if [ `uname` == Linux ]; then
1545 ceph osd metadata 1 | grep 'distro'
1546 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1547 fi
1548 ceph osd out 0
1549 ceph osd dump | grep 'osd.0.*out'
1550 ceph osd in 0
1551 ceph osd dump | grep 'osd.0.*in'
1552 ceph osd find 0
1553
1554 ceph osd info 0
1555 ceph osd info osd.0
1556 expect_false ceph osd info osd.xyz
1557 expect_false ceph osd info xyz
1558 expect_false ceph osd info 42
1559 expect_false ceph osd info osd.42
1560
1561 ceph osd info
1562 info_json=$(ceph osd info --format=json | jq -cM '.')
1563 dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
1564 if [[ "${info_json}" != "${dump_json}" ]]; then
1565 echo "waiting for OSDs to settle"
1566 sleep 10
1567 info_json=$(ceph osd info --format=json | jq -cM '.')
1568 dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
1569 [[ "${info_json}" == "${dump_json}" ]]
1570 fi
1571
1572 info_json=$(ceph osd info 0 --format=json | jq -cM '.')
1573 dump_json=$(ceph osd dump --format=json | \
1574 jq -cM '.osds[] | select(.osd == 0)')
1575 [[ "${info_json}" == "${dump_json}" ]]
1576
1577 info_plain="$(ceph osd info)"
1578 dump_plain="$(ceph osd dump | grep '^osd')"
1579 [[ "${info_plain}" == "${dump_plain}" ]]
1580
1581 info_plain="$(ceph osd info 0)"
1582 dump_plain="$(ceph osd dump | grep '^osd.0')"
1583 [[ "${info_plain}" == "${dump_plain}" ]]
1584
1585 ceph osd add-nodown 0 1
1586 ceph health detail | grep 'NODOWN'
1587 ceph osd rm-nodown 0 1
1588 ! ceph health detail | grep 'NODOWN'
1589
1590 ceph osd out 0 # so we can mark it as noin later
1591 ceph osd add-noin 0
1592 ceph health detail | grep 'NOIN'
1593 ceph osd rm-noin 0
1594 ! ceph health detail | grep 'NOIN'
1595 ceph osd in 0
1596
1597 ceph osd add-noout 0
1598 ceph health detail | grep 'NOOUT'
1599 ceph osd rm-noout 0
1600 ! ceph health detail | grep 'NOOUT'
1601
1602 # test osd id parse
1603 expect_false ceph osd add-noup 797er
1604 expect_false ceph osd add-nodown u9uwer
1605 expect_false ceph osd add-noin 78~15
1606
1607 expect_false ceph osd rm-noup 1234567
1608 expect_false ceph osd rm-nodown fsadf7
1609 expect_false ceph osd rm-noout 790-fd
1610
1611 ids=`ceph osd ls-tree default`
1612 for osd in $ids
1613 do
1614 ceph osd add-nodown $osd
1615 ceph osd add-noout $osd
1616 done
1617 ceph -s | grep 'NODOWN'
1618 ceph -s | grep 'NOOUT'
1619 ceph osd rm-nodown any
1620 ceph osd rm-noout all
1621 ! ceph -s | grep 'NODOWN'
1622 ! ceph -s | grep 'NOOUT'
1623
1624 # test crush node flags
1625 ceph osd add-noup osd.0
1626 ceph osd add-nodown osd.0
1627 ceph osd add-noin osd.0
1628 ceph osd add-noout osd.0
1629 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
1630 ceph osd rm-noup osd.0
1631 ceph osd rm-nodown osd.0
1632 ceph osd rm-noin osd.0
1633 ceph osd rm-noout osd.0
1634 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
1635
1636 ceph osd crush add-bucket foo host root=default
1637 ceph osd add-noup foo
1638 ceph osd add-nodown foo
1639 ceph osd add-noin foo
1640 ceph osd add-noout foo
1641 ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
1642 ceph osd rm-noup foo
1643 ceph osd rm-nodown foo
1644 ceph osd rm-noin foo
1645 ceph osd rm-noout foo
1646 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
1647 ceph osd add-noup foo
1648 ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
1649 ceph osd crush rm foo
1650 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
1651
1652 ceph osd set-group noup osd.0
1653 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1654 ceph osd set-group noup,nodown osd.0
1655 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1656 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1657 ceph osd set-group noup,nodown,noin osd.0
1658 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1659 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1660 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1661 ceph osd set-group noup,nodown,noin,noout osd.0
1662 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1663 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1664 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1665 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1666 ceph osd unset-group noup osd.0
1667 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
1668 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1669 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1670 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1671 ceph osd unset-group noup,nodown osd.0
1672 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown'
1673 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1674 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1675 ceph osd unset-group noup,nodown,noin osd.0
1676 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin'
1677 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1678 ceph osd unset-group noup,nodown,noin,noout osd.0
1679 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1680
1681 ceph osd set-group noup,nodown,noin,noout osd.0 osd.1
1682 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1683 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1684 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1685 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1686 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup'
1687 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown'
1688 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin'
1689 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout'
1690 ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1
1691 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1692 ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1693
1694 ceph osd set-group noup all
1695 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1696 ceph osd unset-group noup all
1697 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
1698
1699 # crush node flags
1700 ceph osd crush add-bucket foo host root=default
1701 ceph osd set-group noup foo
1702 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1703 ceph osd set-group noup,nodown foo
1704 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1705 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1706 ceph osd set-group noup,nodown,noin foo
1707 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1708 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1709 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1710 ceph osd set-group noup,nodown,noin,noout foo
1711 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1712 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1713 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1714 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1715
1716 ceph osd unset-group noup foo
1717 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup'
1718 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1719 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1720 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1721 ceph osd unset-group noup,nodown foo
1722 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown'
1723 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1724 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1725 ceph osd unset-group noup,nodown,noin foo
1726 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin'
1727 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1728 ceph osd unset-group noup,nodown,noin,noout foo
1729 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout'
1730
1731 ceph osd set-group noin,noout foo
1732 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1733 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1734 ceph osd unset-group noin,noout foo
1735 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
1736
1737 ceph osd set-group noup,nodown,noin,noout foo
1738 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1739 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1740 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1741 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1742 ceph osd crush rm foo
1743 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
1744
1745 # test device class flags
1746 osd_0_device_class=$(ceph osd crush get-device-class osd.0)
1747 ceph osd set-group noup $osd_0_device_class
1748 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1749 ceph osd set-group noup,nodown $osd_0_device_class
1750 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1751 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1752 ceph osd set-group noup,nodown,noin $osd_0_device_class
1753 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1754 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1755 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1756 ceph osd set-group noup,nodown,noin,noout $osd_0_device_class
1757 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1758 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1759 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1760 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1761
1762 ceph osd unset-group noup $osd_0_device_class
1763 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup'
1764 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1765 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1766 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1767 ceph osd unset-group noup,nodown $osd_0_device_class
1768 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown'
1769 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1770 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1771 ceph osd unset-group noup,nodown,noin $osd_0_device_class
1772 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin'
1773 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1774 ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class
1775 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout'
1776
1777 ceph osd set-group noin,noout $osd_0_device_class
1778 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1779 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1780 ceph osd unset-group noin,noout $osd_0_device_class
1781 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class
1782
1783 # make sure mark out preserves weight
1784 ceph osd reweight osd.0 .5
1785 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1786 ceph osd out 0
1787 ceph osd in 0
1788 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1789
1790 ceph osd getmap -o $f
1791 [ -s $f ]
1792 rm $f
1793 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1794 [ "$save" -gt 0 ]
1795 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1796 ceph osd setmaxosd 10
1797 ceph osd getmaxosd | grep 'max_osd = 10'
1798 ceph osd setmaxosd $save
1799 ceph osd getmaxosd | grep "max_osd = $save"
1800
1801 for id in `ceph osd ls` ; do
1802 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1803 done
1804
1805 ceph osd rm 0 2>&1 | grep 'EBUSY'
1806
1807 local old_osds=$(echo $(ceph osd ls))
1808 id=`ceph osd create`
1809 ceph osd find $id
1810 ceph osd lost $id --yes-i-really-mean-it
1811 expect_false ceph osd setmaxosd $id
1812 local new_osds=$(echo $(ceph osd ls))
1813 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1814 ceph osd rm $id
1815 done
1816
1817 uuid=`uuidgen`
1818 id=`ceph osd create $uuid`
1819 id2=`ceph osd create $uuid`
1820 [ "$id" = "$id2" ]
1821 ceph osd rm $id
1822
1823 ceph --help osd
1824
1825 # reset max_osd.
1826 ceph osd setmaxosd $id
1827 ceph osd getmaxosd | grep "max_osd = $save"
1828 local max_osd=$save
1829
1830 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1831 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1832
1833 id=`ceph osd create $uuid $max_osd`
1834 [ "$id" = "$max_osd" ]
1835 ceph osd find $id
1836 max_osd=$((max_osd + 1))
1837 ceph osd getmaxosd | grep "max_osd = $max_osd"
1838
1839 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1840 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
1841 id2=`ceph osd create $uuid`
1842 [ "$id" = "$id2" ]
1843 id2=`ceph osd create $uuid $id`
1844 [ "$id" = "$id2" ]
1845
1846 uuid=`uuidgen`
1847 local gap_start=$max_osd
1848 id=`ceph osd create $uuid $((gap_start + 100))`
1849 [ "$id" = "$((gap_start + 100))" ]
1850 max_osd=$((id + 1))
1851 ceph osd getmaxosd | grep "max_osd = $max_osd"
1852
1853 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
1854
1855 #
1856 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1857 # is repeated and consumes two osd id, not just one.
1858 #
1859 local next_osd=$gap_start
1860 id=`ceph osd create $(uuidgen)`
1861 [ "$id" = "$next_osd" ]
1862
1863 next_osd=$((id + 1))
1864 id=`ceph osd create $(uuidgen) $next_osd`
1865 [ "$id" = "$next_osd" ]
1866
1867 local new_osds=$(echo $(ceph osd ls))
1868 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1869 [ $id -ge $save ]
1870 ceph osd rm $id
1871 done
1872 ceph osd setmaxosd $save
1873
1874 ceph osd ls
1875 ceph osd pool create data 16
1876 ceph osd pool application enable data rados
1877 ceph osd lspools | grep data
1878 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1879 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1880 ceph osd pool delete data data --yes-i-really-really-mean-it
1881
1882 ceph osd pause
1883 ceph osd dump | grep 'flags.*pauserd,pausewr'
1884 ceph osd unpause
1885
1886 ceph osd tree
1887 ceph osd tree up
1888 ceph osd tree down
1889 ceph osd tree in
1890 ceph osd tree out
1891 ceph osd tree destroyed
1892 ceph osd tree up in
1893 ceph osd tree up out
1894 ceph osd tree down in
1895 ceph osd tree down out
1896 ceph osd tree out down
1897 expect_false ceph osd tree up down
1898 expect_false ceph osd tree up destroyed
1899 expect_false ceph osd tree down destroyed
1900 expect_false ceph osd tree up down destroyed
1901 expect_false ceph osd tree in out
1902 expect_false ceph osd tree up foo
1903
1904 ceph osd metadata
1905 ceph osd count-metadata os
1906 ceph osd versions
1907
1908 ceph osd perf
1909 ceph osd blocked-by
1910
1911 ceph osd stat | grep up
1912 }
1913
1914 function test_mon_crush()
1915 {
1916 f=$TEMP_DIR/map.$$
1917 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1918 [ -s $f ]
1919 [ "$epoch" -gt 1 ]
1920 nextepoch=$(( $epoch + 1 ))
1921 echo epoch $epoch nextepoch $nextepoch
1922 rm -f $f.epoch
1923 expect_false ceph osd setcrushmap $nextepoch -i $f
1924 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1925 echo gotepoch $gotepoch
1926 [ "$gotepoch" -eq "$nextepoch" ]
1927 # should be idempotent
1928 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1929 echo epoch $gotepoch
1930 [ "$gotepoch" -eq "$nextepoch" ]
1931 rm $f
1932 }
1933
1934 function test_mon_osd_pool()
1935 {
1936 #
1937 # osd pool
1938 #
1939 ceph osd pool create data 16
1940 ceph osd pool application enable data rados
1941 ceph osd pool mksnap data datasnap
1942 rados -p data lssnap | grep datasnap
1943 ceph osd pool rmsnap data datasnap
1944 expect_false ceph osd pool rmsnap pool_fake snapshot
1945 ceph osd pool delete data data --yes-i-really-really-mean-it
1946
1947 ceph osd pool create data2 16
1948 ceph osd pool application enable data2 rados
1949 ceph osd pool rename data2 data3
1950 ceph osd lspools | grep data3
1951 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1952
1953 ceph osd pool create replicated 16 16 replicated
1954 ceph osd pool create replicated 1 16 replicated
1955 ceph osd pool create replicated 16 16 # default is replicated
1956 ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num
1957 ceph osd pool application enable replicated rados
1958 # should fail because the type is not the same
1959 expect_false ceph osd pool create replicated 16 16 erasure
1960 ceph osd lspools | grep replicated
1961 ceph osd pool create ec_test 1 1 erasure
1962 ceph osd pool application enable ec_test rados
1963 set +e
1964 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1965 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1966 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1967 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
1968 else
1969 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1970 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1971 fi
1972 set -e
1973 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1974 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1975
1976 # test create pool with rule
1977 ceph osd erasure-code-profile set foo foo
1978 ceph osd erasure-code-profile ls | grep foo
1979 ceph osd crush rule create-erasure foo foo
1980 ceph osd pool create erasure 16 16 erasure foo
1981 expect_false ceph osd erasure-code-profile rm foo
1982 ceph osd pool delete erasure erasure --yes-i-really-really-mean-it
1983 ceph osd crush rule rm foo
1984 ceph osd erasure-code-profile rm foo
1985
1986 # autoscale mode
1987 ceph osd pool create modeon --autoscale-mode=on
1988 ceph osd dump | grep modeon | grep 'autoscale_mode on'
1989 ceph osd pool create modewarn --autoscale-mode=warn
1990 ceph osd dump | grep modewarn | grep 'autoscale_mode warn'
1991 ceph osd pool create modeoff --autoscale-mode=off
1992 ceph osd dump | grep modeoff | grep 'autoscale_mode off'
1993 ceph osd pool delete modeon modeon --yes-i-really-really-mean-it
1994 ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it
1995 ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it
1996 }
1997
1998 function test_mon_osd_pool_quota()
1999 {
2000 #
2001 # test osd pool set/get quota
2002 #
2003
2004 # create tmp pool
2005 ceph osd pool create tmp-quota-pool 32
2006 ceph osd pool application enable tmp-quota-pool rados
2007 #
2008 # set erroneous quotas
2009 #
2010 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
2011 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
2012 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
2013 #
2014 # set valid quotas
2015 #
2016 ceph osd pool set-quota tmp-quota-pool max_bytes 10
2017 ceph osd pool set-quota tmp-quota-pool max_objects 10M
2018 #
2019 # get quotas in json-pretty format
2020 #
2021 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
2022 grep '"quota_max_objects":.*10000000'
2023 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
2024 grep '"quota_max_bytes":.*10'
2025 #
2026 # get quotas
2027 #
2028 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B'
2029 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects'
2030 #
2031 # set valid quotas with unit prefix
2032 #
2033 ceph osd pool set-quota tmp-quota-pool max_bytes 10K
2034 #
2035 # get quotas
2036 #
2037 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
2038 #
2039 # set valid quotas with unit prefix
2040 #
2041 ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
2042 #
2043 # get quotas
2044 #
2045 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
2046 #
2047 #
2048 # reset pool quotas
2049 #
2050 ceph osd pool set-quota tmp-quota-pool max_bytes 0
2051 ceph osd pool set-quota tmp-quota-pool max_objects 0
2052 #
2053 # test N/A quotas
2054 #
2055 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
2056 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
2057 #
2058 # cleanup tmp pool
2059 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
2060 }
2061
2062 function test_mon_pg()
2063 {
2064 # Make sure we start healthy.
2065 wait_for_health_ok
2066
2067 ceph pg debug unfound_objects_exist
2068 ceph pg debug degraded_pgs_exist
2069 ceph pg deep-scrub 1.0
2070 ceph pg dump
2071 ceph pg dump pgs_brief --format=json
2072 ceph pg dump pgs --format=json
2073 ceph pg dump pools --format=json
2074 ceph pg dump osds --format=json
2075 ceph pg dump sum --format=json
2076 ceph pg dump all --format=json
2077 ceph pg dump pgs_brief osds --format=json
2078 ceph pg dump pools osds pgs_brief --format=json
2079 ceph pg dump_json
2080 ceph pg dump_pools_json
2081 ceph pg dump_stuck inactive
2082 ceph pg dump_stuck unclean
2083 ceph pg dump_stuck stale
2084 ceph pg dump_stuck undersized
2085 ceph pg dump_stuck degraded
2086 ceph pg ls
2087 ceph pg ls 1
2088 ceph pg ls stale
2089 expect_false ceph pg ls scrubq
2090 ceph pg ls active stale repair recovering
2091 ceph pg ls 1 active
2092 ceph pg ls 1 active stale
2093 ceph pg ls-by-primary osd.0
2094 ceph pg ls-by-primary osd.0 1
2095 ceph pg ls-by-primary osd.0 active
2096 ceph pg ls-by-primary osd.0 active stale
2097 ceph pg ls-by-primary osd.0 1 active stale
2098 ceph pg ls-by-osd osd.0
2099 ceph pg ls-by-osd osd.0 1
2100 ceph pg ls-by-osd osd.0 active
2101 ceph pg ls-by-osd osd.0 active stale
2102 ceph pg ls-by-osd osd.0 1 active stale
2103 ceph pg ls-by-pool rbd
2104 ceph pg ls-by-pool rbd active stale
2105 # can't test this...
2106 # ceph pg force_create_pg
2107 ceph pg getmap -o $TEMP_DIR/map.$$
2108 [ -s $TEMP_DIR/map.$$ ]
2109 ceph pg map 1.0 | grep acting
2110 ceph pg repair 1.0
2111 ceph pg scrub 1.0
2112
2113 ceph osd set-full-ratio .962
2114 ceph osd dump | grep '^full_ratio 0.962'
2115 ceph osd set-backfillfull-ratio .912
2116 ceph osd dump | grep '^backfillfull_ratio 0.912'
2117 ceph osd set-nearfull-ratio .892
2118 ceph osd dump | grep '^nearfull_ratio 0.892'
2119
2120 # Check health status
2121 ceph osd set-nearfull-ratio .913
2122 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
2123 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
2124 ceph osd set-nearfull-ratio .892
2125 ceph osd set-backfillfull-ratio .963
2126 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
2127 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
2128 ceph osd set-backfillfull-ratio .912
2129
2130 # Check injected full results
2131 $SUDO ceph tell osd.0 injectfull nearfull
2132 wait_for_health "OSD_NEARFULL"
2133 ceph health detail | grep "osd.0 is near full"
2134 $SUDO ceph tell osd.0 injectfull none
2135 wait_for_health_ok
2136
2137 $SUDO ceph tell osd.1 injectfull backfillfull
2138 wait_for_health "OSD_BACKFILLFULL"
2139 ceph health detail | grep "osd.1 is backfill full"
2140 $SUDO ceph tell osd.1 injectfull none
2141 wait_for_health_ok
2142
2143 $SUDO ceph tell osd.2 injectfull failsafe
2144 # failsafe and full are the same as far as the monitor is concerned
2145 wait_for_health "OSD_FULL"
2146 ceph health detail | grep "osd.2 is full"
2147 $SUDO ceph tell osd.2 injectfull none
2148 wait_for_health_ok
2149
2150 $SUDO ceph tell osd.0 injectfull full
2151 wait_for_health "OSD_FULL"
2152 ceph health detail | grep "osd.0 is full"
2153 $SUDO ceph tell osd.0 injectfull none
2154 wait_for_health_ok
2155
2156 ceph pg stat | grep 'pgs:'
2157 ceph pg 1.0 query
2158 ceph tell 1.0 query
2159 first=$(ceph mon dump -f json | jq -r '.mons[0].name')
2160 ceph tell mon.$first quorum enter
2161 ceph quorum_status
2162 ceph report | grep osd_stats
2163 ceph status
2164 ceph -s
2165
2166 #
2167 # tell osd version
2168 #
2169 ceph tell osd.0 version
2170 expect_false ceph tell osd.9999 version
2171 expect_false ceph tell osd.foo version
2172
2173 # back to pg stuff
2174
2175 ceph tell osd.0 dump_pg_recovery_stats | grep Started
2176
2177 ceph osd reweight 0 0.9
2178 expect_false ceph osd reweight 0 -1
2179 ceph osd reweight osd.0 1
2180
2181 ceph osd primary-affinity osd.0 .9
2182 expect_false ceph osd primary-affinity osd.0 -2
2183 expect_false ceph osd primary-affinity osd.9999 .5
2184 ceph osd primary-affinity osd.0 1
2185
2186 ceph osd pool set rbd size 2
2187 ceph osd pg-temp 1.0 0 1
2188 ceph osd pg-temp 1.0 osd.1 osd.0
2189 expect_false ceph osd pg-temp 1.0 0 1 2
2190 expect_false ceph osd pg-temp asdf qwer
2191 expect_false ceph osd pg-temp 1.0 asdf
2192 ceph osd pg-temp 1.0 # cleanup pg-temp
2193
2194 ceph pg repeer 1.0
2195 expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore
2196
2197 # don't test ceph osd primary-temp for now
2198 }
2199
2200 function test_mon_osd_pool_set()
2201 {
2202 TEST_POOL_GETSET=pool_getset
2203 expect_false ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio -0.3
2204 expect_true ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio 1
2205 ceph osd pool application enable $TEST_POOL_GETSET rados
2206 ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
2207 wait_for_clean
2208 ceph osd pool get $TEST_POOL_GETSET all
2209
2210 for s in pg_num pgp_num size min_size crush_rule target_size_ratio; do
2211 ceph osd pool get $TEST_POOL_GETSET $s
2212 done
2213
2214 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
2215 (( new_size = old_size + 1 ))
2216 ceph osd pool set $TEST_POOL_GETSET size $new_size --yes-i-really-mean-it
2217 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
2218 ceph osd pool set $TEST_POOL_GETSET size $old_size --yes-i-really-mean-it
2219
2220 ceph osd pool create pool_erasure 1 1 erasure
2221 ceph osd pool application enable pool_erasure rados
2222 wait_for_clean
2223 set +e
2224 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
2225 check_response 'not change the size'
2226 set -e
2227 ceph osd pool get pool_erasure erasure_code_profile
2228 ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it
2229
2230 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub bulk; do
2231 ceph osd pool set $TEST_POOL_GETSET $flag false
2232 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
2233 ceph osd pool set $TEST_POOL_GETSET $flag true
2234 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
2235 ceph osd pool set $TEST_POOL_GETSET $flag 1
2236 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
2237 ceph osd pool set $TEST_POOL_GETSET $flag 0
2238 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
2239 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
2240 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
2241 done
2242
2243 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2244 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
2245 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
2246 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
2247 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2248
2249 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2250 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
2251 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
2252 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
2253 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2254
2255 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2256 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
2257 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
2258 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
2259 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2260
2261 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2262 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
2263 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
2264 ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
2265 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
2266 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
2267 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2268 expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11
2269 expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
2270
2271 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2272 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
2273 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
2274 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
2275 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2276
2277 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2278 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
2279 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
2280 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
2281 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2282
2283 expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio -3
2284 expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio abc
2285 expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 0.1
2286 expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 1
2287 ceph osd pool get $TEST_POOL_GETSET target_size_ratio | grep 'target_size_ratio: 1'
2288
2289 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
2290 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
2291 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2292 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
2293 ceph osd pool set $TEST_POOL_GETSET pg_num 10
2294 wait_for_clean
2295 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2296 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0
2297 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0
2298
2299 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
2300 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
2301 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2302 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
2303 wait_for_clean
2304
2305 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
2306 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
2307 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
2308 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
2309 ceph osd pool set $TEST_POOL_GETSET size 2
2310 wait_for_clean
2311 ceph osd pool set $TEST_POOL_GETSET min_size 2
2312
2313 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2314 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2315
2316 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2317 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2318
2319 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
2320
2321 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2322 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2323 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2324 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2325 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2326
2327 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2328 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2329 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2330 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2331 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2332
2333 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2334 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2335 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2336 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2337 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2338 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2339 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2340
2341 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2342 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2343 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2344 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2345 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2346
2347 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2348 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2349 ceph osd pool set $TEST_POOL_GETSET $size 100
2350 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2351 ceph osd pool set $TEST_POOL_GETSET $size 0
2352 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2353 done
2354
2355 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2356 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2357 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2358 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2359
2360 }
2361
2362 function test_mon_osd_tiered_pool_set()
2363 {
2364 # this is really a tier pool
2365 ceph osd pool create real-tier 2
2366 ceph osd tier add rbd real-tier
2367
2368 # expect us to be unable to set negative values for hit_set_*
2369 for o in hit_set_period hit_set_count hit_set_fpp; do
2370 expect_false ceph osd pool set real_tier $o -1
2371 done
2372
2373 # and hit_set_fpp should be in range 0..1
2374 expect_false ceph osd pool set real_tier hit_set_fpp 2
2375
2376 ceph osd pool set real-tier hit_set_type explicit_hash
2377 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2378 ceph osd pool set real-tier hit_set_type explicit_object
2379 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2380 ceph osd pool set real-tier hit_set_type bloom
2381 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2382 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2383 ceph osd pool set real-tier hit_set_period 123
2384 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2385 ceph osd pool set real-tier hit_set_count 12
2386 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2387 ceph osd pool set real-tier hit_set_fpp .01
2388 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2389
2390 ceph osd pool set real-tier target_max_objects 123
2391 ceph osd pool get real-tier target_max_objects | \
2392 grep 'target_max_objects:[ \t]\+123'
2393 ceph osd pool set real-tier target_max_bytes 123456
2394 ceph osd pool get real-tier target_max_bytes | \
2395 grep 'target_max_bytes:[ \t]\+123456'
2396 ceph osd pool set real-tier cache_target_dirty_ratio .123
2397 ceph osd pool get real-tier cache_target_dirty_ratio | \
2398 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2399 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2400 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2401 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2402 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2403 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2404 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2405 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2406 ceph osd pool set real-tier cache_target_full_ratio .123
2407 ceph osd pool get real-tier cache_target_full_ratio | \
2408 grep 'cache_target_full_ratio:[ \t]\+0.123'
2409 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2410 ceph osd pool set real-tier cache_target_full_ratio 1.0
2411 ceph osd pool set real-tier cache_target_full_ratio 0
2412 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2413 ceph osd pool set real-tier cache_min_flush_age 123
2414 ceph osd pool get real-tier cache_min_flush_age | \
2415 grep 'cache_min_flush_age:[ \t]\+123'
2416 ceph osd pool set real-tier cache_min_evict_age 234
2417 ceph osd pool get real-tier cache_min_evict_age | \
2418 grep 'cache_min_evict_age:[ \t]\+234'
2419
2420 # iec vs si units
2421 ceph osd pool set real-tier target_max_objects 1K
2422 ceph osd pool get real-tier target_max_objects | grep 1000
2423 for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2424 ceph osd pool set real-tier $o 1Ki # no i suffix
2425 val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
2426 [[ $val == 1024 ]]
2427 ceph osd pool set real-tier $o 1M # with i suffix
2428 val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
2429 [[ $val == 1048576 ]]
2430 done
2431
2432 # this is not a tier pool
2433 ceph osd pool create fake-tier 2
2434 ceph osd pool application enable fake-tier rados
2435 wait_for_clean
2436
2437 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2438 expect_false ceph osd pool get fake-tier hit_set_type
2439 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2440 expect_false ceph osd pool get fake-tier hit_set_type
2441 expect_false ceph osd pool set fake-tier hit_set_type bloom
2442 expect_false ceph osd pool get fake-tier hit_set_type
2443 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2444 expect_false ceph osd pool set fake-tier hit_set_period 123
2445 expect_false ceph osd pool get fake-tier hit_set_period
2446 expect_false ceph osd pool set fake-tier hit_set_count 12
2447 expect_false ceph osd pool get fake-tier hit_set_count
2448 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2449 expect_false ceph osd pool get fake-tier hit_set_fpp
2450
2451 expect_false ceph osd pool set fake-tier target_max_objects 123
2452 expect_false ceph osd pool get fake-tier target_max_objects
2453 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2454 expect_false ceph osd pool get fake-tier target_max_bytes
2455 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2456 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2457 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2458 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2459 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2460 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2461 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2462 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2463 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2464 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2465 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2466 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2467 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2468 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2469 expect_false ceph osd pool get fake-tier cache_min_flush_age
2470 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2471 expect_false ceph osd pool get fake-tier cache_min_evict_age
2472
2473 ceph osd tier remove rbd real-tier
2474 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2475 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2476 }
2477
2478 function test_mon_osd_erasure_code()
2479 {
2480
2481 ceph osd erasure-code-profile set fooprofile a=b c=d
2482 ceph osd erasure-code-profile set fooprofile a=b c=d
2483 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2484 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2485 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2486 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2487 # make sure rule-foo doesn't work anymore
2488 expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
2489 ceph osd erasure-code-profile set barprofile crush-failure-domain=host
2490 # clean up
2491 ceph osd erasure-code-profile rm fooprofile
2492 ceph osd erasure-code-profile rm barprofile
2493
2494 # try weird k and m values
2495 expect_false ceph osd erasure-code-profile set badk k=1 m=1
2496 expect_false ceph osd erasure-code-profile set badk k=1 m=2
2497 expect_false ceph osd erasure-code-profile set badk k=0 m=2
2498 expect_false ceph osd erasure-code-profile set badk k=-1 m=2
2499 expect_false ceph osd erasure-code-profile set badm k=2 m=0
2500 expect_false ceph osd erasure-code-profile set badm k=2 m=-1
2501 ceph osd erasure-code-profile set good k=2 m=1
2502 ceph osd erasure-code-profile rm good
2503 }
2504
2505 function test_mon_osd_misc()
2506 {
2507 set +e
2508
2509 # expect error about missing 'pool' argument
2510 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2511
2512 # expect error about unused argument foo
2513 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2514
2515 # expect "not in range" for invalid overload percentage
2516 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2517
2518 set -e
2519
2520 local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd)
2521 local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd)
2522 # otherwise ceph-mgr complains like:
2523 # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
2524 # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
2525 ceph config set mgr mon_reweight_min_bytes_per_osd 0
2526 ceph config set mgr mon_reweight_min_pgs_per_osd 0
2527 ceph osd reweight-by-utilization 110
2528 ceph osd reweight-by-utilization 110 .5
2529 expect_false ceph osd reweight-by-utilization 110 0
2530 expect_false ceph osd reweight-by-utilization 110 -0.1
2531 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2532 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2533 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2534 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2535 ceph osd reweight-by-pg 110
2536 ceph osd test-reweight-by-pg 110 .5
2537 ceph osd reweight-by-pg 110 rbd
2538 ceph osd reweight-by-pg 110 .5 rbd
2539 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2540 # restore the setting
2541 ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd
2542 ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd
2543 }
2544
2545 function test_admin_heap_profiler()
2546 {
2547 do_test=1
2548 set +e
2549 # expect 'heap' commands to be correctly parsed
2550 ceph tell osd.0 heap stats 2>$TMPFILE
2551 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2552 echo "tcmalloc not enabled; skip heap profiler test"
2553 do_test=0
2554 fi
2555 set -e
2556
2557 [[ $do_test -eq 0 ]] && return 0
2558
2559 $SUDO ceph tell osd.0 heap start_profiler
2560 $SUDO ceph tell osd.0 heap dump
2561 $SUDO ceph tell osd.0 heap stop_profiler
2562 $SUDO ceph tell osd.0 heap release
2563 }
2564
2565 function test_osd_bench()
2566 {
2567 # test osd bench limits
2568 # As we should not rely on defaults (as they may change over time),
2569 # lets inject some values and perform some simple tests
2570 # max iops: 10 # 100 IOPS
2571 # max throughput: 10485760 # 10MB/s
2572 # max block size: 2097152 # 2MB
2573 # duration: 10 # 10 seconds
2574
2575 local args="\
2576 --osd-bench-duration 10 \
2577 --osd-bench-max-block-size 2097152 \
2578 --osd-bench-large-size-max-throughput 10485760 \
2579 --osd-bench-small-size-max-iops 10"
2580 ceph tell osd.0 injectargs ${args## }
2581
2582 # anything with a bs larger than 2097152 must fail
2583 expect_false ceph tell osd.0 bench 1 2097153
2584 # but using 'osd_bench_max_bs' must succeed
2585 ceph tell osd.0 bench 1 2097152
2586
2587 # we assume 1MB as a large bs; anything lower is a small bs
2588 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2589 # max count: 409600 (bytes)
2590
2591 # more than max count must not be allowed
2592 expect_false ceph tell osd.0 bench 409601 4096
2593 # but 409600 must be succeed
2594 ceph tell osd.0 bench 409600 4096
2595
2596 # for a large bs, we are limited by throughput.
2597 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2598 # the max count will be (10MB * 10s) = 100MB
2599 # max count: 104857600 (bytes)
2600
2601 # more than max count must not be allowed
2602 expect_false ceph tell osd.0 bench 104857601 2097152
2603 # up to max count must be allowed
2604 ceph tell osd.0 bench 104857600 2097152
2605 }
2606
2607 function test_osd_negative_filestore_merge_threshold()
2608 {
2609 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2610 expect_config_value "osd.0" "filestore_merge_threshold" -1
2611 }
2612
2613 function test_mon_tell()
2614 {
2615 for m in mon.a mon.b; do
2616 ceph tell $m sessions
2617 ceph_watch_start debug audit
2618 ceph tell mon.a sessions
2619 ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
2620 done
2621 expect_false ceph tell mon.foo version
2622 }
2623
2624 function test_mon_ping()
2625 {
2626 ceph ping mon.a
2627 ceph ping mon.b
2628 expect_false ceph ping mon.foo
2629
2630 ceph ping mon.\*
2631 }
2632
2633 function test_mon_deprecated_commands()
2634 {
2635 # current DEPRECATED commands are marked with FLAG(DEPRECATED)
2636 #
2637 # Testing should be accomplished by setting
2638 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2639 # each one of these commands.
2640
2641 ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete'
2642 expect_false ceph config-key list 2> $TMPFILE
2643 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2644
2645 ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete'
2646 }
2647
2648 function test_mon_cephdf_commands()
2649 {
2650 # ceph df detail:
2651 # pool section:
2652 # RAW USED The near raw used per pool in raw total
2653
2654 ceph osd pool create cephdf_for_test 1 1 replicated
2655 ceph osd pool application enable cephdf_for_test rados
2656 ceph osd pool set cephdf_for_test size 2
2657
2658 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2659 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2660
2661 #wait for update
2662 for i in `seq 1 10`; do
2663 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2664 sleep 1
2665 done
2666 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2667 # to sync mon with osd
2668 flush_pg_stats
2669 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2670 stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
2671 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2672
2673 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2674 rm ./cephdf_for_test
2675
2676 expect_false test $stored != $stored_raw
2677 }
2678
2679 function test_mon_pool_application()
2680 {
2681 ceph osd pool create app_for_test 16
2682
2683 ceph osd pool application enable app_for_test rbd
2684 expect_false ceph osd pool application enable app_for_test rgw
2685 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2686 ceph osd pool ls detail | grep "application rbd,rgw"
2687 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2688
2689 expect_false ceph osd pool application set app_for_test cephfs key value
2690 ceph osd pool application set app_for_test rbd key1 value1
2691 ceph osd pool application set app_for_test rbd key2 value2
2692 ceph osd pool application set app_for_test rgw key1 value1
2693 ceph osd pool application get app_for_test rbd key1 | grep 'value1'
2694 ceph osd pool application get app_for_test rbd key2 | grep 'value2'
2695 ceph osd pool application get app_for_test rgw key1 | grep 'value1'
2696
2697 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2698
2699 ceph osd pool application rm app_for_test rgw key1
2700 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2701 ceph osd pool application rm app_for_test rbd key2
2702 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2703 ceph osd pool application rm app_for_test rbd key1
2704 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2705 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2706
2707 expect_false ceph osd pool application disable app_for_test rgw
2708 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2709 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2710 ceph osd pool ls detail | grep "application rbd"
2711 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2712
2713 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2714 ceph osd pool ls detail | grep -v "application "
2715 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2716
2717 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2718 }
2719
2720 function test_mon_tell_help_command()
2721 {
2722 ceph tell mon.a help | grep sync_force
2723 ceph tell mon.a -h | grep sync_force
2724 ceph tell mon.a config -h | grep 'config diff get'
2725
2726 # wrong target
2727 expect_false ceph tell mon.zzz help
2728 }
2729
2730 function test_mon_stdin_stdout()
2731 {
2732 echo foo | ceph config-key set test_key -i -
2733 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2734 }
2735
2736 function test_osd_tell_help_command()
2737 {
2738 ceph tell osd.1 help
2739 expect_false ceph tell osd.100 help
2740 }
2741
2742 function test_osd_compact()
2743 {
2744 ceph tell osd.1 compact
2745 $SUDO ceph daemon osd.1 compact
2746 }
2747
2748 function test_mds_tell_help_command()
2749 {
2750 local FS_NAME=cephfs
2751 if ! mds_exists ; then
2752 echo "Skipping test, no MDS found"
2753 return
2754 fi
2755
2756 remove_all_fs
2757 ceph osd pool create fs_data 16
2758 ceph osd pool create fs_metadata 16
2759 ceph fs new $FS_NAME fs_metadata fs_data
2760 wait_mds_active $FS_NAME
2761
2762
2763 ceph tell mds.a help
2764 expect_false ceph tell mds.z help
2765
2766 remove_all_fs
2767 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2768 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2769 }
2770
2771 function test_mgr_tell()
2772 {
2773 ceph tell mgr version
2774 }
2775
2776 function test_mgr_devices()
2777 {
2778 ceph device ls
2779 expect_false ceph device info doesnotexist
2780 expect_false ceph device get-health-metrics doesnotexist
2781 }
2782
2783 function test_per_pool_scrub_status()
2784 {
2785 ceph osd pool create noscrub_pool 16
2786 ceph osd pool create noscrub_pool2 16
2787 ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2788 ceph -s --format json | \
2789 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2790 expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2791 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
2792 expect_false grep -q "Pool .* has .*scrub.* flag"
2793 ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2794 expect_false grep -q "Pool .* has .*scrub.* flag"
2795
2796 ceph osd pool set noscrub_pool noscrub 1
2797 ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
2798 ceph -s --format json | \
2799 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2800 expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
2801 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2802 expect_true grep -q "Pool noscrub_pool has noscrub flag"
2803 ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
2804
2805 ceph osd pool set noscrub_pool nodeep-scrub 1
2806 ceph osd pool set noscrub_pool2 nodeep-scrub 1
2807 ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2808 ceph -s --format json | \
2809 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2810 expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2811 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2812 expect_true grep -q "Pool noscrub_pool has noscrub flag"
2813 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2814 expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
2815 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2816 expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2817 ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
2818 ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
2819 ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2820
2821 ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it
2822 ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it
2823 }
2824
2825 #
2826 # New tests should be added to the TESTS array below
2827 #
2828 # Individual tests may be run using the '-t <testname>' argument
2829 # The user can specify '-t <testname>' as many times as she wants
2830 #
2831 # Tests will be run in order presented in the TESTS array, or in
2832 # the order specified by the '-t <testname>' options.
2833 #
2834 # '-l' will list all the available test names
2835 # '-h' will show usage
2836 #
2837 # The test maintains backward compatibility: not specifying arguments
2838 # will run all tests following the order they appear in the TESTS array.
2839 #
2840
2841 set +x
2842 MON_TESTS+=" mon_injectargs"
2843 MON_TESTS+=" mon_injectargs_SI"
2844 for i in `seq 9`; do
2845 MON_TESTS+=" tiering_$i";
2846 done
2847 MON_TESTS+=" auth"
2848 MON_TESTS+=" auth_profiles"
2849 MON_TESTS+=" mon_misc"
2850 MON_TESTS+=" mon_mon"
2851 MON_TESTS+=" mon_osd"
2852 MON_TESTS+=" mon_config_key"
2853 MON_TESTS+=" mon_crush"
2854 MON_TESTS+=" mon_osd_create_destroy"
2855 MON_TESTS+=" mon_osd_pool"
2856 MON_TESTS+=" mon_osd_pool_quota"
2857 MON_TESTS+=" mon_pg"
2858 MON_TESTS+=" mon_osd_pool_set"
2859 MON_TESTS+=" mon_osd_tiered_pool_set"
2860 MON_TESTS+=" mon_osd_erasure_code"
2861 MON_TESTS+=" mon_osd_misc"
2862 MON_TESTS+=" mon_tell"
2863 MON_TESTS+=" mon_ping"
2864 MON_TESTS+=" mon_deprecated_commands"
2865 MON_TESTS+=" mon_caps"
2866 MON_TESTS+=" mon_cephdf_commands"
2867 MON_TESTS+=" mon_tell_help_command"
2868 MON_TESTS+=" mon_stdin_stdout"
2869
2870 OSD_TESTS+=" osd_bench"
2871 OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2872 OSD_TESTS+=" tiering_agent"
2873 OSD_TESTS+=" admin_heap_profiler"
2874 OSD_TESTS+=" osd_tell_help_command"
2875 OSD_TESTS+=" osd_compact"
2876 OSD_TESTS+=" per_pool_scrub_status"
2877
2878 MDS_TESTS+=" mds_tell"
2879 MDS_TESTS+=" mon_mds"
2880 MDS_TESTS+=" mon_mds_metadata"
2881 MDS_TESTS+=" mds_tell_help_command"
2882
2883 MGR_TESTS+=" mgr_tell"
2884 MGR_TESTS+=" mgr_devices"
2885
2886 TESTS+=$MON_TESTS
2887 TESTS+=$OSD_TESTS
2888 TESTS+=$MDS_TESTS
2889 TESTS+=$MGR_TESTS
2890
2891 #
2892 # "main" follows
2893 #
2894
2895 function list_tests()
2896 {
2897 echo "AVAILABLE TESTS"
2898 for i in $TESTS; do
2899 echo " $i"
2900 done
2901 }
2902
2903 function usage()
2904 {
2905 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2906 }
2907
2908 tests_to_run=()
2909
2910 sanity_check=true
2911
2912 while [[ $# -gt 0 ]]; do
2913 opt=$1
2914
2915 case "$opt" in
2916 "-l" )
2917 do_list=1
2918 ;;
2919 "--asok-does-not-need-root" )
2920 SUDO=""
2921 ;;
2922 "--no-sanity-check" )
2923 sanity_check=false
2924 ;;
2925 "--test-mon" )
2926 tests_to_run+="$MON_TESTS"
2927 ;;
2928 "--test-osd" )
2929 tests_to_run+="$OSD_TESTS"
2930 ;;
2931 "--test-mds" )
2932 tests_to_run+="$MDS_TESTS"
2933 ;;
2934 "--test-mgr" )
2935 tests_to_run+="$MGR_TESTS"
2936 ;;
2937 "-t" )
2938 shift
2939 if [[ -z "$1" ]]; then
2940 echo "missing argument to '-t'"
2941 usage ;
2942 exit 1
2943 fi
2944 tests_to_run+=" $1"
2945 ;;
2946 "-h" )
2947 usage ;
2948 exit 0
2949 ;;
2950 esac
2951 shift
2952 done
2953
2954 if [[ $do_list -eq 1 ]]; then
2955 list_tests ;
2956 exit 0
2957 fi
2958
2959 ceph osd pool create rbd 16
2960
2961 if test -z "$tests_to_run" ; then
2962 tests_to_run="$TESTS"
2963 fi
2964
2965 if $sanity_check ; then
2966 wait_no_osd_down
2967 fi
2968 for i in $tests_to_run; do
2969 if $sanity_check ; then
2970 check_no_osd_down
2971 fi
2972 set -x
2973 test_${i}
2974 set +x
2975 done
2976 if $sanity_check ; then
2977 check_no_osd_down
2978 fi
2979
2980 set -x
2981
2982 echo OK