]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/cephtool/test.sh
bump version to 18.2.2-pve1
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
1 #!/usr/bin/env bash
2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
4 set -x
5
6 source $(dirname $0)/../../standalone/ceph-helpers.sh
7
8 set -e
9 set -o functrace
10 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
11 SUDO=${SUDO:-sudo}
12 export CEPH_DEV=1
13
14 function check_no_osd_down()
15 {
16 ! ceph osd dump | grep ' down '
17 }
18
19 function wait_no_osd_down()
20 {
21 max_run=300
22 for i in $(seq 1 $max_run) ; do
23 if ! check_no_osd_down ; then
24 echo "waiting for osd(s) to come back up ($i/$max_run)"
25 sleep 1
26 else
27 break
28 fi
29 done
30 check_no_osd_down
31 }
32
33 function expect_false()
34 {
35 set -x
36 if "$@"; then return 1; else return 0; fi
37 }
38
39 function expect_true()
40 {
41 set -x
42 if ! "$@"; then return 1; else return 0; fi
43 }
44
45 TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
46 trap "rm -fr $TEMP_DIR" 0
47
48 TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
49
50 #
51 # retry_eagain max cmd args ...
52 #
53 # retry cmd args ... if it exits on error and its output contains the
54 # string EAGAIN, at most $max times
55 #
56 function retry_eagain()
57 {
58 local max=$1
59 shift
60 local status
61 local tmpfile=$TEMP_DIR/retry_eagain.$$
62 local count
63 for count in $(seq 1 $max) ; do
64 status=0
65 "$@" > $tmpfile 2>&1 || status=$?
66 if test $status = 0 ||
67 ! grep --quiet EAGAIN $tmpfile ; then
68 break
69 fi
70 sleep 1
71 done
72 if test $count = $max ; then
73 echo retried with non zero exit status, $max times: "$@" >&2
74 fi
75 cat $tmpfile
76 rm $tmpfile
77 return $status
78 }
79
80 #
81 # map_enxio_to_eagain cmd arg ...
82 #
83 # add EAGAIN to the output of cmd arg ... if the output contains
84 # ENXIO.
85 #
86 function map_enxio_to_eagain()
87 {
88 local status=0
89 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
90
91 "$@" > $tmpfile 2>&1 || status=$?
92 if test $status != 0 &&
93 grep --quiet ENXIO $tmpfile ; then
94 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
95 fi
96 cat $tmpfile
97 rm $tmpfile
98 return $status
99 }
100
101 function check_response()
102 {
103 expected_string=$1
104 retcode=$2
105 expected_retcode=$3
106 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
107 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
108 exit 1
109 fi
110
111 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
112 echo "Didn't find $expected_string in output" >&2
113 cat $TMPFILE >&2
114 exit 1
115 fi
116 }
117
118 function get_config_value_or_die()
119 {
120 local target config_opt raw val
121
122 target=$1
123 config_opt=$2
124
125 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
126 if [[ $? -ne 0 ]]; then
127 echo "error obtaining config opt '$config_opt' from '$target': $raw"
128 exit 1
129 fi
130
131 raw=`echo $raw | sed -e 's/[{} "]//g'`
132 val=`echo $raw | cut -f2 -d:`
133
134 echo "$val"
135 return 0
136 }
137
138 function expect_config_value()
139 {
140 local target config_opt expected_val val
141 target=$1
142 config_opt=$2
143 expected_val=$3
144
145 val=$(get_config_value_or_die $target $config_opt)
146
147 if [[ "$val" != "$expected_val" ]]; then
148 echo "expected '$expected_val', got '$val'"
149 exit 1
150 fi
151 }
152
153 function ceph_watch_start()
154 {
155 local whatch_opt=--watch
156
157 if [ -n "$1" ]; then
158 whatch_opt=--watch-$1
159 if [ -n "$2" ]; then
160 whatch_opt+=" --watch-channel $2"
161 fi
162 fi
163
164 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
165 ceph $whatch_opt > $CEPH_WATCH_FILE &
166 CEPH_WATCH_PID=$!
167
168 # wait until the "ceph" client is connected and receiving
169 # log messages from monitor
170 for i in `seq 3`; do
171 grep -q "cluster" $CEPH_WATCH_FILE && break
172 sleep 1
173 done
174 }
175
176 function ceph_watch_wait()
177 {
178 local regexp=$1
179 local timeout=30
180
181 if [ -n "$2" ]; then
182 timeout=$2
183 fi
184
185 for i in `seq ${timeout}`; do
186 grep -q "$regexp" $CEPH_WATCH_FILE && break
187 sleep 1
188 done
189
190 kill $CEPH_WATCH_PID
191
192 if ! grep "$regexp" $CEPH_WATCH_FILE; then
193 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
194 cat $CEPH_WATCH_FILE >&2
195 return 1
196 fi
197 }
198
199 function test_mon_injectargs()
200 {
201 ceph tell osd.0 injectargs --no-osd_enable_op_tracker
202 ceph tell osd.0 config get osd_enable_op_tracker | grep false
203 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500'
204 ceph tell osd.0 config get osd_enable_op_tracker | grep true
205 ceph tell osd.0 config get osd_op_history_duration | grep 500
206 ceph tell osd.0 injectargs --no-osd_enable_op_tracker
207 ceph tell osd.0 config get osd_enable_op_tracker | grep false
208 ceph tell osd.0 injectargs -- --osd_enable_op_tracker
209 ceph tell osd.0 config get osd_enable_op_tracker | grep true
210 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600'
211 ceph tell osd.0 config get osd_enable_op_tracker | grep true
212 ceph tell osd.0 config get osd_op_history_duration | grep 600
213
214 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200'
215 ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200
216
217 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2'
218 ceph tell osd.0 config get mon_probe_timeout | grep 2
219
220 ceph tell osd.0 injectargs -- '--mon-lease 6'
221 ceph tell osd.0 config get mon_lease | grep 6
222
223 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1
225 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
226
227 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
228 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
229
230 }
231
232 function test_mon_injectargs_SI()
233 {
234 # Test SI units during injectargs and 'config set'
235 # We only aim at testing the units are parsed accordingly
236 # and don't intend to test whether the options being set
237 # actually expect SI units to be passed.
238 # Keep in mind that all integer based options that are not based on bytes
239 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
240 # base 10.
241 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
242 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
243 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
244 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
245 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
246 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
247 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
248 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
249 check_response "(22) Invalid argument"
250 # now test with injectargs
251 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
252 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
253 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
254 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
255 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
256 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
257 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
258 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
259 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
260 }
261
262 function test_mon_injectargs_IEC()
263 {
264 # Test IEC units during injectargs and 'config set'
265 # We only aim at testing the units are parsed accordingly
266 # and don't intend to test whether the options being set
267 # actually expect IEC units to be passed.
268 # Keep in mind that all integer based options that are based on bytes
269 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
270 # unit modifiers (for backwards compatibility and convenience) and be parsed
271 # to base 2.
272 initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
273 $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
274 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
275 $SUDO ceph daemon mon.a config set mon_data_size_warn 15G
276 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
277 $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
278 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
279 $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
280 check_response "(22) Invalid argument"
281 # now test with injectargs
282 ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
283 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
284 ceph tell mon.a injectargs '--mon_data_size_warn 15G'
285 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
286 ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
287 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
288 expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
289 $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
290 }
291
292 function test_tiering_agent()
293 {
294 local slow=slow_eviction
295 local fast=fast_eviction
296 ceph osd pool create $slow 1 1
297 ceph osd pool application enable $slow rados
298 ceph osd pool create $fast 1 1
299 ceph osd tier add $slow $fast
300 ceph osd tier cache-mode $fast writeback
301 ceph osd tier set-overlay $slow $fast
302 ceph osd pool set $fast hit_set_type bloom
303 rados -p $slow put obj1 /etc/group
304 ceph osd pool set $fast target_max_objects 1
305 ceph osd pool set $fast hit_set_count 1
306 ceph osd pool set $fast hit_set_period 5
307 # wait for the object to be evicted from the cache
308 local evicted
309 evicted=false
310 for i in `seq 1 300` ; do
311 if ! rados -p $fast ls | grep obj1 ; then
312 evicted=true
313 break
314 fi
315 sleep 1
316 done
317 $evicted # assert
318 # the object is proxy read and promoted to the cache
319 rados -p $slow get obj1 - >/dev/null
320 # wait for the promoted object to be evicted again
321 evicted=false
322 for i in `seq 1 300` ; do
323 if ! rados -p $fast ls | grep obj1 ; then
324 evicted=true
325 break
326 fi
327 sleep 1
328 done
329 $evicted # assert
330 ceph osd tier remove-overlay $slow
331 ceph osd tier remove $slow $fast
332 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
333 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
334 }
335
336 function test_tiering_1()
337 {
338 # tiering
339 ceph osd pool create slow 2
340 ceph osd pool application enable slow rados
341 ceph osd pool create slow2 2
342 ceph osd pool application enable slow2 rados
343 ceph osd pool create cache 2
344 ceph osd pool create cache2 2
345 ceph osd tier add slow cache
346 ceph osd tier add slow cache2
347 expect_false ceph osd tier add slow2 cache
348 # application metadata should propagate to the tiers
349 ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow") | .application_metadata["rados"]' | grep '{}'
350 ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' | grep '{}'
351 ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache") | .application_metadata["rados"]' | grep '{}'
352 ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' | grep '{}'
353 # forward is removed/deprecated
354 expect_false ceph osd tier cache-mode cache forward
355 expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
356 # test some state transitions
357 ceph osd tier cache-mode cache writeback
358 expect_false ceph osd tier cache-mode cache readonly
359 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
360 ceph osd tier cache-mode cache proxy
361 ceph osd tier cache-mode cache readproxy
362 ceph osd tier cache-mode cache none
363 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
364 ceph osd tier cache-mode cache none
365 ceph osd tier cache-mode cache writeback
366 ceph osd tier cache-mode cache proxy
367 ceph osd tier cache-mode cache writeback
368 expect_false ceph osd tier cache-mode cache none
369 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
370 # test with dirty objects in the tier pool
371 # tier pool currently set to 'writeback'
372 rados -p cache put /etc/passwd /etc/passwd
373 flush_pg_stats
374 # 1 dirty object in pool 'cache'
375 ceph osd tier cache-mode cache proxy
376 expect_false ceph osd tier cache-mode cache none
377 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
378 ceph osd tier cache-mode cache writeback
379 # remove object from tier pool
380 rados -p cache rm /etc/passwd
381 rados -p cache cache-flush-evict-all
382 flush_pg_stats
383 # no dirty objects in pool 'cache'
384 ceph osd tier cache-mode cache proxy
385 ceph osd tier cache-mode cache none
386 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
387 TRIES=0
388 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
389 do
390 grep 'currently creating pgs' $TMPFILE
391 TRIES=$(( $TRIES + 1 ))
392 test $TRIES -ne 60
393 sleep 3
394 done
395 expect_false ceph osd pool set cache pg_num 4
396 ceph osd tier cache-mode cache none
397 ceph osd tier set-overlay slow cache
398 expect_false ceph osd tier set-overlay slow cache2
399 expect_false ceph osd tier remove slow cache
400 ceph osd tier remove-overlay slow
401 ceph osd tier set-overlay slow cache2
402 ceph osd tier remove-overlay slow
403 ceph osd tier remove slow cache
404 ceph osd tier add slow2 cache
405 expect_false ceph osd tier set-overlay slow cache
406 ceph osd tier set-overlay slow2 cache
407 ceph osd tier remove-overlay slow2
408 ceph osd tier remove slow2 cache
409 ceph osd tier remove slow cache2
410
411 # make sure a non-empty pool fails
412 rados -p cache2 put /etc/passwd /etc/passwd
413 while ! ceph df | grep cache2 | grep ' 1 ' ; do
414 echo waiting for pg stats to flush
415 sleep 2
416 done
417 expect_false ceph osd tier add slow cache2
418 ceph osd tier add slow cache2 --force-nonempty
419 ceph osd tier remove slow cache2
420
421 ceph osd pool ls | grep cache2
422 ceph osd pool ls -f json-pretty | grep cache2
423 ceph osd pool ls detail | grep cache2
424 ceph osd pool ls detail -f json-pretty | grep cache2
425
426 ceph osd pool delete slow slow --yes-i-really-really-mean-it
427 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
428 ceph osd pool delete cache cache --yes-i-really-really-mean-it
429 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
430 }
431
432 function test_tiering_2()
433 {
434 # make sure we can't clobber snapshot state
435 ceph osd pool create snap_base 2
436 ceph osd pool application enable snap_base rados
437 ceph osd pool create snap_cache 2
438 ceph osd pool mksnap snap_cache snapname
439 expect_false ceph osd tier add snap_base snap_cache
440 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
441 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
442 }
443
444 function test_tiering_3()
445 {
446 # make sure we can't create snapshot on tier
447 ceph osd pool create basex 2
448 ceph osd pool application enable basex rados
449 ceph osd pool create cachex 2
450 ceph osd tier add basex cachex
451 expect_false ceph osd pool mksnap cache snapname
452 ceph osd tier remove basex cachex
453 ceph osd pool delete basex basex --yes-i-really-really-mean-it
454 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
455 }
456
457 function test_tiering_4()
458 {
459 # make sure we can't create an ec pool tier
460 ceph osd pool create eccache 2 2 erasure
461 expect_false ceph osd set-require-min-compat-client bobtail
462 ceph osd pool create repbase 2
463 ceph osd pool application enable repbase rados
464 expect_false ceph osd tier add repbase eccache
465 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
466 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
467 }
468
469 function test_tiering_5()
470 {
471 # convenient add-cache command
472 ceph osd pool create slow 2
473 ceph osd pool application enable slow rados
474 ceph osd pool create cache3 2
475 ceph osd tier add-cache slow cache3 1024000
476 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
477 ceph osd tier remove slow cache3 2> $TMPFILE || true
478 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
479 ceph osd tier remove-overlay slow
480 ceph osd tier remove slow cache3
481 ceph osd pool ls | grep cache3
482 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
483 ! ceph osd pool ls | grep cache3 || exit 1
484 ceph osd pool delete slow slow --yes-i-really-really-mean-it
485 }
486
487 function test_tiering_6()
488 {
489 # check add-cache whether work
490 ceph osd pool create datapool 2
491 ceph osd pool application enable datapool rados
492 ceph osd pool create cachepool 2
493 ceph osd tier add-cache datapool cachepool 1024000
494 ceph osd tier cache-mode cachepool writeback
495 rados -p datapool put object /etc/passwd
496 rados -p cachepool stat object
497 rados -p cachepool cache-flush object
498 rados -p datapool stat object
499 ceph osd tier remove-overlay datapool
500 ceph osd tier remove datapool cachepool
501 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
502 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
503 }
504
505 function test_tiering_7()
506 {
507 # protection against pool removal when used as tiers
508 ceph osd pool create datapool 2
509 ceph osd pool application enable datapool rados
510 ceph osd pool create cachepool 2
511 ceph osd tier add-cache datapool cachepool 1024000
512 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
513 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
514 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
515 check_response "EBUSY: pool 'datapool' has tiers cachepool"
516 ceph osd tier remove-overlay datapool
517 ceph osd tier remove datapool cachepool
518 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
519 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
520 }
521
522 function test_tiering_8()
523 {
524 ## check health check
525 ceph osd set notieragent
526 ceph osd pool create datapool 2
527 ceph osd pool application enable datapool rados
528 ceph osd pool create cache4 2
529 ceph osd tier add-cache datapool cache4 1024000
530 ceph osd tier cache-mode cache4 writeback
531 tmpfile=$(mktemp|grep tmp)
532 dd if=/dev/zero of=$tmpfile bs=4K count=1
533 ceph osd pool set cache4 target_max_objects 200
534 ceph osd pool set cache4 target_max_bytes 1000000
535 rados -p cache4 put foo1 $tmpfile
536 rados -p cache4 put foo2 $tmpfile
537 rm -f $tmpfile
538 flush_pg_stats
539 ceph df | grep datapool | grep ' 2 '
540 ceph osd tier remove-overlay datapool
541 ceph osd tier remove datapool cache4
542 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
543 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
544 ceph osd unset notieragent
545 }
546
547 function test_tiering_9()
548 {
549 # make sure 'tier remove' behaves as we expect
550 # i.e., removing a tier from a pool that's not its base pool only
551 # results in a 'pool foo is now (or already was) not a tier of bar'
552 #
553 ceph osd pool create basepoolA 2
554 ceph osd pool application enable basepoolA rados
555 ceph osd pool create basepoolB 2
556 ceph osd pool application enable basepoolB rados
557 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
558 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
559
560 ceph osd pool create cache5 2
561 ceph osd pool create cache6 2
562 ceph osd tier add basepoolA cache5
563 ceph osd tier add basepoolB cache6
564 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
565 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
566 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
567 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
568
569 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
570 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
571 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
572 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
573
574 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
575 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
576
577 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
578 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
579 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
580 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
581 }
582
583 function test_auth()
584 {
585 expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
586 expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
587 ceph auth add client.xx mon 'allow *' osd "allow *"
588 ceph auth export client.xx >client.xx.keyring
589 ceph auth add client.xx -i client.xx.keyring
590 rm -f client.xx.keyring
591 ceph auth list | grep client.xx
592 ceph auth ls | grep client.xx
593 ceph auth get client.xx | grep caps | grep mon
594 ceph auth get client.xx | grep caps | grep osd
595 ceph auth get-key client.xx
596 ceph auth print-key client.xx
597 ceph auth print_key client.xx
598 ceph auth caps client.xx osd "allow rw"
599 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
600 ceph auth get client.xx | grep osd | grep "allow rw"
601 ceph auth caps client.xx mon 'allow command "osd tree"'
602 ceph auth export | grep client.xx
603 ceph auth export -o authfile
604 ceph auth import -i authfile
605
606 ceph auth export -o authfile2
607 diff authfile authfile2
608 rm authfile authfile2
609 ceph auth del client.xx
610 expect_false ceph auth get client.xx
611
612 # (almost) interactive mode
613 echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
614 ceph auth get client.xx
615 # script mode
616 echo 'auth del client.xx' | ceph
617 expect_false ceph auth get client.xx
618 }
619
620 function test_auth_profiles()
621 {
622 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
623 mgr 'allow profile read-only'
624 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
625 mgr 'allow profile read-write'
626 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
627
628 ceph auth export > client.xx.keyring
629
630 # read-only is allowed all read-only commands (auth excluded)
631 ceph -n client.xx-profile-ro -k client.xx.keyring status
632 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
633 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
634 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
635 # read-only gets access denied for rw commands or auth commands
636 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
637 check_response "EACCES: access denied"
638 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
639 check_response "EACCES: access denied"
640 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
641 check_response "EACCES: access denied"
642
643 # read-write is allowed for all read-write commands (except auth)
644 ceph -n client.xx-profile-rw -k client.xx.keyring status
645 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
646 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
647 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
648 ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
649 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
650 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
651 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
652 # read-write gets access denied for auth commands
653 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
654 check_response "EACCES: access denied"
655
656 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
657 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
658 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
659 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
660 ceph -n client.xx-profile-rd -k client.xx.keyring status
661 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
662 check_response "EACCES: access denied"
663 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
664 check_response "EACCES: access denied"
665 # read-only 'mon' subsystem commands are allowed
666 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
667 # but read-write 'mon' commands are not
668 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
669 check_response "EACCES: access denied"
670 ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
671 check_response "EACCES: access denied"
672 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
673 check_response "EACCES: access denied"
674 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
675 check_response "EACCES: access denied"
676
677 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
678 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
679
680 # add a new role-definer with the existing role-definer
681 ceph -n client.xx-profile-rd -k client.xx.keyring \
682 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
683 ceph -n client.xx-profile-rd -k client.xx.keyring \
684 auth export > client.xx.keyring.2
685 # remove old role-definer using the new role-definer
686 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
687 auth del client.xx-profile-rd
688 # remove the remaining role-definer with admin
689 ceph auth del client.xx-profile-rd2
690 rm -f client.xx.keyring client.xx.keyring.2
691 }
692
693 function test_mon_caps()
694 {
695 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
696 chmod +r $TEMP_DIR/ceph.client.bug.keyring
697 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
698 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
699
700 # pass --no-mon-config since we are looking for the permission denied error
701 rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
702 cat $TMPFILE
703 check_response "Permission denied"
704
705 rm -rf $TEMP_DIR/ceph.client.bug.keyring
706 ceph auth del client.bug
707 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
708 chmod +r $TEMP_DIR/ceph.client.bug.keyring
709 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
710 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
711 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
712 rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
713 check_response "Permission denied"
714 }
715
716 function test_mon_misc()
717 {
718 # with and without verbosity
719 ceph osd dump | grep '^epoch'
720 ceph --concise osd dump | grep '^epoch'
721
722 ceph osd df | grep 'MIN/MAX VAR'
723
724 # df
725 ceph df > $TMPFILE
726 grep RAW $TMPFILE
727 grep -v DIRTY $TMPFILE
728 ceph df detail > $TMPFILE
729 grep DIRTY $TMPFILE
730 ceph df --format json > $TMPFILE
731 grep 'total_bytes' $TMPFILE
732 grep -v 'dirty' $TMPFILE
733 ceph df detail --format json > $TMPFILE
734 grep 'rd_bytes' $TMPFILE
735 grep 'dirty' $TMPFILE
736 ceph df --format xml | grep '<total_bytes>'
737 ceph df detail --format xml | grep '<rd_bytes>'
738
739 ceph fsid
740 ceph health
741 ceph health detail
742 ceph health --format json-pretty
743 ceph health detail --format xml-pretty
744
745 ceph time-sync-status
746
747 ceph node ls
748 for t in mon osd mds mgr ; do
749 ceph node ls $t
750 done
751
752 ceph_watch_start
753 mymsg="this is a test log message $$.$(date)"
754 ceph log "$mymsg"
755 ceph log last | grep "$mymsg"
756 ceph log last 100 | grep "$mymsg"
757 ceph_watch_wait "$mymsg"
758
759 ceph mgr stat
760 ceph mgr dump
761 ceph mgr dump | jq -e '.active_clients[0].name'
762 ceph mgr module ls
763 ceph mgr module enable restful
764 expect_false ceph mgr module enable foodne
765 ceph mgr module enable foodne --force
766 ceph mgr module disable foodne
767 ceph mgr module disable foodnebizbangbash
768
769 ceph mon metadata a
770 ceph mon metadata
771 ceph mon count-metadata ceph_version
772 ceph mon versions
773
774 ceph mgr metadata
775 ceph mgr versions
776 ceph mgr count-metadata ceph_version
777
778 ceph versions
779
780 ceph node ls
781 }
782
783 function check_mds_active()
784 {
785 fs_name=$1
786 ceph fs get $fs_name | grep active
787 }
788
789 function wait_mds_active()
790 {
791 fs_name=$1
792 max_run=300
793 for i in $(seq 1 $max_run) ; do
794 if ! check_mds_active $fs_name ; then
795 echo "waiting for an active MDS daemon ($i/$max_run)"
796 sleep 5
797 else
798 break
799 fi
800 done
801 check_mds_active $fs_name
802 }
803
804 function get_mds_gids()
805 {
806 fs_name=$1
807 ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
808 }
809
810 function fail_all_mds()
811 {
812 fs_name=$1
813 ceph fs set $fs_name cluster_down true
814 mds_gids=$(get_mds_gids $fs_name)
815 for mds_gid in $mds_gids ; do
816 ceph mds fail $mds_gid
817 done
818 if check_mds_active $fs_name ; then
819 echo "An active MDS remains, something went wrong"
820 ceph fs get $fs_name
821 exit -1
822 fi
823
824 }
825
826 function remove_all_fs()
827 {
828 existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
829 for fs_name in $existing_fs ; do
830 echo "Removing fs ${fs_name}..."
831 fail_all_mds $fs_name
832 echo "Removing existing filesystem '${fs_name}'..."
833 ceph fs rm $fs_name --yes-i-really-mean-it
834 echo "Removed '${fs_name}'."
835 done
836 }
837
838 # So that tests requiring MDS can skip if one is not configured
839 # in the cluster at all
840 function mds_exists()
841 {
842 ceph auth ls | grep "^mds"
843 }
844
845 # some of the commands are just not idempotent.
846 function without_test_dup_command()
847 {
848 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
849 $@
850 else
851 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
852 unset CEPH_CLI_TEST_DUP_COMMAND
853 $@
854 CEPH_CLI_TEST_DUP_COMMAND=saved
855 fi
856 }
857
858 function test_mds_tell()
859 {
860 local FS_NAME=cephfs
861 if ! mds_exists ; then
862 echo "Skipping test, no MDS found"
863 return
864 fi
865
866 remove_all_fs
867 ceph osd pool create fs_data 16
868 ceph osd pool create fs_metadata 16
869 ceph fs new $FS_NAME fs_metadata fs_data
870 wait_mds_active $FS_NAME
871
872 # Test injectargs by GID
873 old_mds_gids=$(get_mds_gids $FS_NAME)
874 echo Old GIDs: $old_mds_gids
875
876 for mds_gid in $old_mds_gids ; do
877 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
878 done
879 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
880
881 # Test respawn by rank
882 without_test_dup_command ceph tell mds.0 respawn
883 new_mds_gids=$old_mds_gids
884 while [ $new_mds_gids -eq $old_mds_gids ] ; do
885 sleep 5
886 new_mds_gids=$(get_mds_gids $FS_NAME)
887 done
888 echo New GIDs: $new_mds_gids
889
890 # Test respawn by ID
891 without_test_dup_command ceph tell mds.a respawn
892 new_mds_gids=$old_mds_gids
893 while [ $new_mds_gids -eq $old_mds_gids ] ; do
894 sleep 5
895 new_mds_gids=$(get_mds_gids $FS_NAME)
896 done
897 echo New GIDs: $new_mds_gids
898
899 remove_all_fs
900 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
901 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
902 }
903
904 function test_mon_mds()
905 {
906 local FS_NAME=cephfs
907 remove_all_fs
908
909 ceph osd pool create fs_data 16
910 ceph osd pool create fs_metadata 16
911 ceph fs new $FS_NAME fs_metadata fs_data
912
913 ceph fs set $FS_NAME cluster_down true
914 ceph fs set $FS_NAME cluster_down false
915
916 ceph mds compat rm_incompat 4
917 ceph mds compat rm_incompat 4
918
919 # We don't want any MDSs to be up, their activity can interfere with
920 # the "current_epoch + 1" checking below if they're generating updates
921 fail_all_mds $FS_NAME
922
923 ceph mds compat show
924 ceph fs dump
925 ceph fs get $FS_NAME
926 for mds_gid in $(get_mds_gids $FS_NAME) ; do
927 ceph mds metadata $mds_id
928 done
929 ceph mds metadata
930 ceph mds versions
931 ceph mds count-metadata os
932
933 # XXX mds fail, but how do you undo it?
934 mdsmapfile=$TEMP_DIR/mdsmap.$$
935 current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
936 [ -s $mdsmapfile ]
937 rm $mdsmapfile
938
939 ceph osd pool create data2 16
940 ceph osd pool create data3 16
941 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
942 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
943 ceph fs add_data_pool cephfs $data2_pool
944 ceph fs add_data_pool cephfs $data3_pool
945 ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
946 check_response "Error ENOENT"
947 ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
948 check_response "Error ENOENT"
949 ceph fs rm_data_pool cephfs $data2_pool
950 ceph fs rm_data_pool cephfs $data3_pool
951 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
952 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
953 ceph fs set cephfs max_mds 4
954 ceph fs set cephfs max_mds 3
955 ceph fs set cephfs max_mds 256
956 expect_false ceph fs set cephfs max_mds 257
957 ceph fs set cephfs max_mds 4
958 ceph fs set cephfs max_mds 256
959 expect_false ceph fs set cephfs max_mds 257
960 expect_false ceph fs set cephfs max_mds asdf
961 expect_false ceph fs set cephfs inline_data true
962 ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
963 ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it
964 ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it
965 expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it
966 ceph fs set cephfs inline_data false
967 ceph fs set cephfs inline_data no
968 ceph fs set cephfs inline_data 0
969 expect_false ceph fs set cephfs inline_data asdf
970 ceph fs set cephfs max_file_size 1048576
971 expect_false ceph fs set cephfs max_file_size 123asdf
972
973 expect_false ceph fs set cephfs allow_new_snaps
974 ceph fs set cephfs allow_new_snaps true
975 ceph fs set cephfs allow_new_snaps 0
976 ceph fs set cephfs allow_new_snaps false
977 ceph fs set cephfs allow_new_snaps no
978 expect_false ceph fs set cephfs allow_new_snaps taco
979
980 # we should never be able to add EC pools as data or metadata pools
981 # create an ec-pool...
982 ceph osd pool create mds-ec-pool 16 16 erasure
983 set +e
984 ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
985 check_response 'erasure-code' $? 22
986 set -e
987 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
988 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
989 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
990
991 fail_all_mds $FS_NAME
992
993 set +e
994 # Check that rmfailed requires confirmation
995 expect_false ceph mds rmfailed 0
996 ceph mds rmfailed 0 --yes-i-really-mean-it
997 set -e
998
999 # Check that `fs new` is no longer permitted
1000 expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
1001
1002 # Check that 'fs reset' runs
1003 ceph fs reset $FS_NAME --yes-i-really-mean-it
1004
1005 # Check that creating a second FS fails by default
1006 ceph osd pool create fs_metadata2 16
1007 ceph osd pool create fs_data2 16
1008 set +e
1009 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1010 set -e
1011
1012 # Check that setting enable_multiple enables creation of second fs
1013 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1014 ceph fs new cephfs2 fs_metadata2 fs_data2
1015
1016 # Clean up multi-fs stuff
1017 fail_all_mds cephfs2
1018 ceph fs rm cephfs2 --yes-i-really-mean-it
1019 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1020 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1021
1022 fail_all_mds $FS_NAME
1023
1024 # Clean up to enable subsequent fs new tests
1025 ceph fs rm $FS_NAME --yes-i-really-mean-it
1026
1027 set +e
1028 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1029 check_response 'erasure-code' $? 22
1030 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1031 check_response 'already used by filesystem' $? 22
1032 ceph fs new $FS_NAME mds-ec-pool fs_data --force 2>$TMPFILE
1033 check_response 'erasure-code' $? 22
1034 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1035 check_response 'erasure-code' $? 22
1036 set -e
1037
1038 # ... new create a cache tier in front of the EC pool...
1039 ceph osd pool create mds-tier 2
1040 ceph osd tier add mds-ec-pool mds-tier
1041 ceph osd tier set-overlay mds-ec-pool mds-tier
1042 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1043
1044 # Use of a readonly tier should be forbidden
1045 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1046 set +e
1047 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1048 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1049 set -e
1050
1051 # Use of a writeback tier should enable FS creation
1052 ceph osd tier cache-mode mds-tier writeback
1053 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1054
1055 # While a FS exists using the tiered pools, I should not be allowed
1056 # to remove the tier
1057 set +e
1058 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1059 check_response 'in use by CephFS' $? 16
1060 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1061 check_response 'in use by CephFS' $? 16
1062 set -e
1063
1064 fail_all_mds $FS_NAME
1065 ceph fs rm $FS_NAME --yes-i-really-mean-it
1066
1067 # ... but we should be forbidden from using the cache pool in the FS directly.
1068 set +e
1069 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1070 check_response 'in use as a cache tier' $? 22
1071 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1072 check_response 'already used by filesystem' $? 22
1073 ceph fs new $FS_NAME mds-tier fs_data --force 2>$TMPFILE
1074 check_response 'in use as a cache tier' $? 22
1075 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1076 check_response 'already used by filesystem' $? 22
1077 ceph fs new $FS_NAME mds-tier mds-tier --force 2>$TMPFILE
1078 check_response 'in use as a cache tier' $? 22
1079 set -e
1080
1081 # Clean up tier + EC pools
1082 ceph osd tier remove-overlay mds-ec-pool
1083 ceph osd tier remove mds-ec-pool mds-tier
1084
1085 # Create a FS using the 'cache' pool now that it's no longer a tier
1086 ceph fs new $FS_NAME fs_metadata mds-tier --force
1087
1088 # We should be forbidden from using this pool as a tier now that
1089 # it's in use for CephFS
1090 set +e
1091 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1092 check_response 'in use by CephFS' $? 16
1093 set -e
1094
1095 fail_all_mds $FS_NAME
1096 ceph fs rm $FS_NAME --yes-i-really-mean-it
1097
1098 # We should be permitted to use an EC pool with overwrites enabled
1099 # as the data pool...
1100 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1101 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1102 fail_all_mds $FS_NAME
1103 ceph fs rm $FS_NAME --yes-i-really-mean-it
1104
1105 # ...but not as the metadata pool
1106 set +e
1107 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1108 check_response 'already used by filesystem' $? 22
1109 ceph fs new $FS_NAME mds-ec-pool fs_data --force 2>$TMPFILE
1110 check_response 'erasure-code' $? 22
1111 set -e
1112
1113 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1114
1115 # Create a FS and check that we can subsequently add a cache tier to it
1116 ceph fs new $FS_NAME fs_metadata fs_data --force
1117
1118 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1119 ceph osd tier add fs_metadata mds-tier
1120 ceph osd tier cache-mode mds-tier writeback
1121 ceph osd tier set-overlay fs_metadata mds-tier
1122
1123 # Removing tier should be permitted because the underlying pool is
1124 # replicated (#11504 case)
1125 ceph osd tier cache-mode mds-tier proxy
1126 ceph osd tier remove-overlay fs_metadata
1127 ceph osd tier remove fs_metadata mds-tier
1128 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1129
1130 # Clean up FS
1131 fail_all_mds $FS_NAME
1132 ceph fs rm $FS_NAME --yes-i-really-mean-it
1133
1134
1135
1136 ceph mds stat
1137 # ceph mds tell mds.a getmap
1138 # ceph mds rm
1139 # ceph mds rmfailed
1140 # ceph mds set_state
1141
1142 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1143 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1144 }
1145
1146 function test_mon_mds_metadata()
1147 {
1148 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1149 test "$nmons" -gt 0
1150
1151 ceph fs dump |
1152 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1153 while read gid id rank; do
1154 ceph mds metadata ${gid} | grep '"hostname":'
1155 ceph mds metadata ${id} | grep '"hostname":'
1156 ceph mds metadata ${rank} | grep '"hostname":'
1157
1158 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1159 test "$n" -eq "$nmons"
1160 done
1161
1162 expect_false ceph mds metadata UNKNOWN
1163 }
1164
1165 function test_mon_mon()
1166 {
1167 # print help message
1168 ceph --help mon
1169 # -h works even when some arguments are passed
1170 ceph osd dump -h | grep 'osd dump'
1171 ceph osd dump 123 -h | grep 'osd dump'
1172 # no mon add/remove
1173 ceph mon dump
1174 ceph mon getmap -o $TEMP_DIR/monmap.$$
1175 [ -s $TEMP_DIR/monmap.$$ ]
1176
1177 # ceph mon tell
1178 first=$(ceph mon dump -f json | jq -r '.mons[0].name')
1179 ceph tell mon.$first mon_status
1180
1181 # test mon features
1182 ceph mon feature ls
1183 ceph mon feature set kraken --yes-i-really-mean-it
1184 expect_false ceph mon feature set abcd
1185 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1186
1187 # test elector
1188 expect_failure $TEMP_DIR ceph mon add disallowed_leader $first
1189 ceph mon set election_strategy disallow
1190 ceph mon add disallowed_leader $first
1191 ceph mon set election_strategy connectivity
1192 ceph mon rm disallowed_leader $first
1193 ceph mon set election_strategy classic
1194 expect_failure $TEMP_DIR ceph mon rm disallowed_leader $first
1195
1196 # test mon stat
1197 # don't check output, just ensure it does not fail.
1198 ceph mon stat
1199 ceph mon stat -f json | jq '.'
1200 }
1201
1202 function test_mon_priority_and_weight()
1203 {
1204 for i in 0 1 65535; do
1205 ceph mon set-weight a $i
1206 w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight')
1207 [[ "$w" == "$i" ]]
1208 done
1209
1210 for i in -1 65536; do
1211 expect_false ceph mon set-weight a $i
1212 done
1213 }
1214
1215 function gen_secrets_file()
1216 {
1217 # lets assume we can have the following types
1218 # all - generates both cephx and lockbox, with mock dm-crypt key
1219 # cephx - only cephx
1220 # no_cephx - lockbox and dm-crypt, no cephx
1221 # no_lockbox - dm-crypt and cephx, no lockbox
1222 # empty - empty file
1223 # empty_json - correct json, empty map
1224 # bad_json - bad json :)
1225 #
1226 local t=$1
1227 if [[ -z "$t" ]]; then
1228 t="all"
1229 fi
1230
1231 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1232 echo $fn
1233 if [[ "$t" == "empty" ]]; then
1234 return 0
1235 fi
1236
1237 echo "{" > $fn
1238 if [[ "$t" == "bad_json" ]]; then
1239 echo "asd: ; }" >> $fn
1240 return 0
1241 elif [[ "$t" == "empty_json" ]]; then
1242 echo "}" >> $fn
1243 return 0
1244 fi
1245
1246 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1247 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1248 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1249
1250 if [[ "$t" == "all" ]]; then
1251 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1252 elif [[ "$t" == "cephx" ]]; then
1253 echo "$cephx_secret" >> $fn
1254 elif [[ "$t" == "no_cephx" ]]; then
1255 echo "$lb_secret,$dmcrypt_key" >> $fn
1256 elif [[ "$t" == "no_lockbox" ]]; then
1257 echo "$cephx_secret,$dmcrypt_key" >> $fn
1258 else
1259 echo "unknown gen_secrets_file() type \'$fn\'"
1260 return 1
1261 fi
1262 echo "}" >> $fn
1263 return 0
1264 }
1265
1266 function test_mon_osd_create_destroy()
1267 {
1268 ceph osd new 2>&1 | grep 'EINVAL'
1269 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1270 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1271
1272 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1273
1274 old_osds=$(ceph osd ls)
1275 num_osds=$(ceph osd ls | wc -l)
1276
1277 uuid=$(uuidgen)
1278 id=$(ceph osd new $uuid 2>/dev/null)
1279
1280 for i in $old_osds; do
1281 [[ "$i" != "$id" ]]
1282 done
1283
1284 ceph osd find $id
1285
1286 id2=`ceph osd new $uuid 2>/dev/null`
1287
1288 [[ $id2 == $id ]]
1289
1290 ceph osd new $uuid $id
1291
1292 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1293 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1294
1295 uuid2=$(uuidgen)
1296 id2=$(ceph osd new $uuid2)
1297 ceph osd find $id2
1298 [[ "$id2" != "$id" ]]
1299
1300 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1301 ceph osd new $uuid2 $id2
1302
1303 # test with secrets
1304 empty_secrets=$(gen_secrets_file "empty")
1305 empty_json=$(gen_secrets_file "empty_json")
1306 all_secrets=$(gen_secrets_file "all")
1307 cephx_only=$(gen_secrets_file "cephx")
1308 no_cephx=$(gen_secrets_file "no_cephx")
1309 no_lockbox=$(gen_secrets_file "no_lockbox")
1310 bad_json=$(gen_secrets_file "bad_json")
1311
1312 # empty secrets should be idempotent
1313 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1314 [[ "$new_id" == "$id" ]]
1315
1316 # empty json, thus empty secrets
1317 new_id=$(ceph osd new $uuid $id -i $empty_json)
1318 [[ "$new_id" == "$id" ]]
1319
1320 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1321
1322 ceph osd rm $id
1323 ceph osd rm $id2
1324 ceph osd setmaxosd $old_maxosd
1325
1326 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1327 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1328
1329 osds=$(ceph osd ls)
1330 id=$(ceph osd new $uuid -i $all_secrets)
1331 for i in $osds; do
1332 [[ "$i" != "$id" ]]
1333 done
1334
1335 ceph osd find $id
1336
1337 # validate secrets and dm-crypt are set
1338 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1339 s=$(cat $all_secrets | jq '.cephx_secret')
1340 [[ $k == $s ]]
1341 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1342 jq '.key')
1343 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1344 [[ $k == $s ]]
1345 ceph config-key exists dm-crypt/osd/$uuid/luks
1346
1347 osds=$(ceph osd ls)
1348 id2=$(ceph osd new $uuid2 -i $cephx_only)
1349 for i in $osds; do
1350 [[ "$i" != "$id2" ]]
1351 done
1352
1353 ceph osd find $id2
1354 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1355 s=$(cat $all_secrets | jq '.cephx_secret')
1356 [[ $k == $s ]]
1357 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1358 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1359
1360 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1361 ceph osd destroy $id2 --yes-i-really-mean-it
1362 ceph osd find $id2
1363 expect_false ceph auth get-key osd.$id2
1364 ceph osd dump | grep osd.$id2 | grep destroyed
1365
1366 id3=$id2
1367 uuid3=$(uuidgen)
1368 ceph osd new $uuid3 $id3 -i $all_secrets
1369 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1370 ceph auth get-key client.osd-lockbox.$uuid3
1371 ceph auth get-key osd.$id3
1372 ceph config-key exists dm-crypt/osd/$uuid3/luks
1373
1374 ceph osd purge-new osd.$id3 --yes-i-really-mean-it
1375 expect_false ceph osd find $id2
1376 expect_false ceph auth get-key osd.$id2
1377 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1378 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1379 ceph osd purge osd.$id3 --yes-i-really-mean-it
1380 ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent
1381
1382 ceph osd purge osd.$id --yes-i-really-mean-it
1383 ceph osd purge 123456 --yes-i-really-mean-it
1384 expect_false ceph osd find $id
1385 expect_false ceph auth get-key osd.$id
1386 expect_false ceph auth get-key client.osd-lockbox.$uuid
1387 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1388
1389 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1390 $no_cephx $no_lockbox $bad_json
1391
1392 for i in $(ceph osd ls); do
1393 [[ "$i" != "$id" ]]
1394 [[ "$i" != "$id2" ]]
1395 [[ "$i" != "$id3" ]]
1396 done
1397
1398 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1399 ceph osd setmaxosd $old_maxosd
1400
1401 }
1402
1403 function test_mon_config_key()
1404 {
1405 key=asdfasdfqwerqwreasdfuniquesa123df
1406 ceph config-key list | grep -c $key | grep 0
1407 ceph config-key get $key | grep -c bar | grep 0
1408 ceph config-key set $key bar
1409 ceph config-key get $key | grep bar
1410 ceph config-key list | grep -c $key | grep 1
1411 ceph config-key dump | grep $key | grep bar
1412 ceph config-key rm $key
1413 expect_false ceph config-key get $key
1414 ceph config-key list | grep -c $key | grep 0
1415 ceph config-key dump | grep -c $key | grep 0
1416 }
1417
1418 function test_mon_osd()
1419 {
1420 #
1421 # osd blocklist
1422 #
1423 bl=192.168.0.1:0/1000
1424 ceph osd blocklist add $bl
1425 ceph osd blocklist ls | grep $bl
1426 ceph osd blocklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1427 ceph osd dump --format=json-pretty | grep $bl
1428 ceph osd dump | grep $bl
1429 ceph osd blocklist rm $bl
1430 ceph osd blocklist ls | expect_false grep $bl
1431
1432 bl=192.168.0.1
1433 # test without nonce, invalid nonce
1434 ceph osd blocklist add $bl
1435 ceph osd blocklist ls | grep $bl
1436 ceph osd blocklist rm $bl
1437 ceph osd blocklist ls | expect_false grep $bl
1438 expect_false "ceph osd blocklist add $bl/-1"
1439 expect_false "ceph osd blocklist add $bl/foo"
1440
1441 # test with invalid address
1442 expect_false "ceph osd blocklist add 1234.56.78.90/100"
1443
1444 # test range blocklisting
1445 bl=192.168.0.1:0/24
1446 ceph osd blocklist range add $bl
1447 ceph osd blocklist ls | grep $bl
1448 ceph osd blocklist range rm $bl
1449 ceph osd blocklist ls | expect_false grep $bl
1450 bad_bl=192.168.0.1/33
1451 expect_false ceph osd blocklist range add $bad_bl
1452
1453 # Test `clear`
1454 ceph osd blocklist add $bl
1455 ceph osd blocklist ls | grep $bl
1456 ceph osd blocklist clear
1457 ceph osd blocklist ls | expect_false grep $bl
1458
1459 # deprecated syntax?
1460 ceph osd blacklist ls
1461
1462 #
1463 # osd crush
1464 #
1465 ceph osd crush reweight-all
1466 ceph osd crush tunables legacy
1467 ceph osd crush show-tunables | grep argonaut
1468 ceph osd crush tunables bobtail
1469 ceph osd crush show-tunables | grep bobtail
1470 ceph osd crush tunables firefly
1471 ceph osd crush show-tunables | grep firefly
1472
1473 ceph osd crush set-tunable straw_calc_version 0
1474 ceph osd crush get-tunable straw_calc_version | grep 0
1475 ceph osd crush set-tunable straw_calc_version 1
1476 ceph osd crush get-tunable straw_calc_version | grep 1
1477
1478 #
1479 # require-min-compat-client
1480 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1481 ceph osd get-require-min-compat-client | grep luminous
1482 ceph osd dump | grep 'require_min_compat_client luminous'
1483
1484 #
1485 # osd scrub
1486 #
1487
1488 # blocking
1489 ceph osd scrub 0 --block
1490 ceph osd deep-scrub 0 --block
1491
1492 # how do I tell when these are done?
1493 ceph osd scrub 0
1494 ceph osd deep-scrub 0
1495 ceph osd repair 0
1496
1497 # pool scrub, force-recovery/backfill
1498 pool_names=`rados lspools`
1499 for pool_name in $pool_names
1500 do
1501 ceph osd pool scrub $pool_name
1502 ceph osd pool deep-scrub $pool_name
1503 ceph osd pool repair $pool_name
1504 ceph osd pool force-recovery $pool_name
1505 ceph osd pool cancel-force-recovery $pool_name
1506 ceph osd pool force-backfill $pool_name
1507 ceph osd pool cancel-force-backfill $pool_name
1508 done
1509
1510 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \
1511 norebalance norecover notieragent noautoscale
1512 do
1513 ceph osd set $f
1514 ceph osd unset $f
1515 done
1516 expect_false ceph osd set bogus
1517 expect_false ceph osd unset bogus
1518 for f in sortbitwise recover_deletes require_jewel_osds \
1519 require_kraken_osds
1520 do
1521 expect_false ceph osd set $f
1522 expect_false ceph osd unset $f
1523 done
1524 ceph osd require-osd-release reef
1525 # can't lower
1526 expect_false ceph osd require-osd-release quincy
1527 expect_false ceph osd require-osd-release pacific
1528 # these are no-ops but should succeed.
1529
1530 ceph osd set noup
1531 ceph osd down 0
1532 ceph osd dump | grep 'osd.0 down'
1533 ceph osd unset noup
1534 max_run=1000
1535 for ((i=0; i < $max_run; i++)); do
1536 if ! ceph osd dump | grep 'osd.0 up'; then
1537 echo "waiting for osd.0 to come back up ($i/$max_run)"
1538 sleep 1
1539 else
1540 break
1541 fi
1542 done
1543 ceph osd dump | grep 'osd.0 up'
1544
1545 ceph osd dump | grep 'osd.0 up'
1546 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1547 ceph osd find 1
1548 ceph osd find osd.1
1549 expect_false ceph osd find osd.xyz
1550 expect_false ceph osd find xyz
1551 expect_false ceph osd find 0.1
1552 ceph --format plain osd find 1 # falls back to json-pretty
1553 if [ `uname` == Linux ]; then
1554 ceph osd metadata 1 | grep 'distro'
1555 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1556 fi
1557 ceph osd out 0
1558 ceph osd dump | grep 'osd.0.*out'
1559 ceph osd in 0
1560 ceph osd dump | grep 'osd.0.*in'
1561 ceph osd find 0
1562
1563 ceph osd info 0
1564 ceph osd info osd.0
1565 expect_false ceph osd info osd.xyz
1566 expect_false ceph osd info xyz
1567 expect_false ceph osd info 42
1568 expect_false ceph osd info osd.42
1569
1570 ceph osd info
1571 info_json=$(ceph osd info --format=json | jq -cM '.')
1572 dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
1573 if [[ "${info_json}" != "${dump_json}" ]]; then
1574 echo "waiting for OSDs to settle"
1575 sleep 10
1576 info_json=$(ceph osd info --format=json | jq -cM '.')
1577 dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
1578 [[ "${info_json}" == "${dump_json}" ]]
1579 fi
1580
1581 info_json=$(ceph osd info 0 --format=json | jq -cM '.')
1582 dump_json=$(ceph osd dump --format=json | \
1583 jq -cM '.osds[] | select(.osd == 0)')
1584 [[ "${info_json}" == "${dump_json}" ]]
1585
1586 info_plain="$(ceph osd info)"
1587 dump_plain="$(ceph osd dump | grep '^osd')"
1588 [[ "${info_plain}" == "${dump_plain}" ]]
1589
1590 info_plain="$(ceph osd info 0)"
1591 dump_plain="$(ceph osd dump | grep '^osd.0')"
1592 [[ "${info_plain}" == "${dump_plain}" ]]
1593
1594 ceph osd add-nodown 0 1
1595 ceph health detail | grep 'NODOWN'
1596 ceph osd rm-nodown 0 1
1597 ! ceph health detail | grep 'NODOWN'
1598
1599 ceph osd out 0 # so we can mark it as noin later
1600 ceph osd add-noin 0
1601 ceph health detail | grep 'NOIN'
1602 ceph osd rm-noin 0
1603 ! ceph health detail | grep 'NOIN'
1604 ceph osd in 0
1605
1606 ceph osd add-noout 0
1607 ceph health detail | grep 'NOOUT'
1608 ceph osd rm-noout 0
1609 ! ceph health detail | grep 'NOOUT'
1610
1611 # test osd id parse
1612 expect_false ceph osd add-noup 797er
1613 expect_false ceph osd add-nodown u9uwer
1614 expect_false ceph osd add-noin 78~15
1615
1616 expect_false ceph osd rm-noup 1234567
1617 expect_false ceph osd rm-nodown fsadf7
1618 expect_false ceph osd rm-noout 790-fd
1619
1620 ids=`ceph osd ls-tree default`
1621 for osd in $ids
1622 do
1623 ceph osd add-nodown $osd
1624 ceph osd add-noout $osd
1625 done
1626 ceph -s | grep 'NODOWN'
1627 ceph -s | grep 'NOOUT'
1628 ceph osd rm-nodown any
1629 ceph osd rm-noout all
1630 ! ceph -s | grep 'NODOWN'
1631 ! ceph -s | grep 'NOOUT'
1632
1633 # test crush node flags
1634 ceph osd add-noup osd.0
1635 ceph osd add-nodown osd.0
1636 ceph osd add-noin osd.0
1637 ceph osd add-noout osd.0
1638 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
1639 ceph osd rm-noup osd.0
1640 ceph osd rm-nodown osd.0
1641 ceph osd rm-noin osd.0
1642 ceph osd rm-noout osd.0
1643 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
1644
1645 ceph osd crush add-bucket foo host root=default
1646 ceph osd add-noup foo
1647 ceph osd add-nodown foo
1648 ceph osd add-noin foo
1649 ceph osd add-noout foo
1650 ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
1651 ceph osd rm-noup foo
1652 ceph osd rm-nodown foo
1653 ceph osd rm-noin foo
1654 ceph osd rm-noout foo
1655 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
1656 ceph osd add-noup foo
1657 ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
1658 ceph osd crush rm foo
1659 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
1660
1661 ceph osd set-group noup osd.0
1662 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1663 ceph osd set-group noup,nodown osd.0
1664 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1665 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1666 ceph osd set-group noup,nodown,noin osd.0
1667 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1668 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1669 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1670 ceph osd set-group noup,nodown,noin,noout osd.0
1671 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1672 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1673 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1674 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1675 ceph osd unset-group noup osd.0
1676 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
1677 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1678 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1679 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1680 ceph osd unset-group noup,nodown osd.0
1681 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown'
1682 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1683 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1684 ceph osd unset-group noup,nodown,noin osd.0
1685 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin'
1686 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1687 ceph osd unset-group noup,nodown,noin,noout osd.0
1688 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1689
1690 ceph osd set-group noup,nodown,noin,noout osd.0 osd.1
1691 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1692 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
1693 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
1694 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
1695 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup'
1696 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown'
1697 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin'
1698 ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout'
1699 ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1
1700 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1701 ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout'
1702
1703 ceph osd set-group noup all
1704 ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
1705 ceph osd unset-group noup all
1706 ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
1707
1708 # crush node flags
1709 ceph osd crush add-bucket foo host root=default
1710 ceph osd set-group noup foo
1711 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1712 ceph osd set-group noup,nodown foo
1713 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1714 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1715 ceph osd set-group noup,nodown,noin foo
1716 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1717 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1718 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1719 ceph osd set-group noup,nodown,noin,noout foo
1720 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1721 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1722 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1723 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1724
1725 ceph osd unset-group noup foo
1726 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup'
1727 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1728 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1729 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1730 ceph osd unset-group noup,nodown foo
1731 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown'
1732 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1733 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1734 ceph osd unset-group noup,nodown,noin foo
1735 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin'
1736 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1737 ceph osd unset-group noup,nodown,noin,noout foo
1738 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout'
1739
1740 ceph osd set-group noin,noout foo
1741 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1742 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1743 ceph osd unset-group noin,noout foo
1744 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
1745
1746 ceph osd set-group noup,nodown,noin,noout foo
1747 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
1748 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
1749 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
1750 ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
1751 ceph osd crush rm foo
1752 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
1753
1754 # test device class flags
1755 osd_0_device_class=$(ceph osd crush get-device-class osd.0)
1756 ceph osd set-group noup $osd_0_device_class
1757 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1758 ceph osd set-group noup,nodown $osd_0_device_class
1759 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1760 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1761 ceph osd set-group noup,nodown,noin $osd_0_device_class
1762 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1763 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1764 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1765 ceph osd set-group noup,nodown,noin,noout $osd_0_device_class
1766 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
1767 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1768 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1769 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1770
1771 ceph osd unset-group noup $osd_0_device_class
1772 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup'
1773 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
1774 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1775 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1776 ceph osd unset-group noup,nodown $osd_0_device_class
1777 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown'
1778 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1779 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1780 ceph osd unset-group noup,nodown,noin $osd_0_device_class
1781 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin'
1782 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1783 ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class
1784 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout'
1785
1786 ceph osd set-group noin,noout $osd_0_device_class
1787 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
1788 ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
1789 ceph osd unset-group noin,noout $osd_0_device_class
1790 ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class
1791
1792 # make sure mark out preserves weight
1793 ceph osd reweight osd.0 .5
1794 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1795 ceph osd out 0
1796 ceph osd in 0
1797 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1798
1799 ceph osd getmap -o $f
1800 [ -s $f ]
1801 rm $f
1802 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1803 [ "$save" -gt 0 ]
1804 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1805 ceph osd setmaxosd 10
1806 ceph osd getmaxosd | grep 'max_osd = 10'
1807 ceph osd setmaxosd $save
1808 ceph osd getmaxosd | grep "max_osd = $save"
1809
1810 for id in `ceph osd ls` ; do
1811 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1812 done
1813
1814 ceph osd rm 0 2>&1 | grep 'EBUSY'
1815
1816 local old_osds=$(echo $(ceph osd ls))
1817 id=`ceph osd create`
1818 ceph osd find $id
1819 ceph osd lost $id --yes-i-really-mean-it
1820 expect_false ceph osd setmaxosd $id
1821 local new_osds=$(echo $(ceph osd ls))
1822 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1823 ceph osd rm $id
1824 done
1825
1826 uuid=`uuidgen`
1827 id=`ceph osd create $uuid`
1828 id2=`ceph osd create $uuid`
1829 [ "$id" = "$id2" ]
1830 ceph osd rm $id
1831
1832 ceph --help osd
1833
1834 # reset max_osd.
1835 ceph osd setmaxosd $id
1836 ceph osd getmaxosd | grep "max_osd = $save"
1837 local max_osd=$save
1838
1839 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1840 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1841
1842 id=`ceph osd create $uuid $max_osd`
1843 [ "$id" = "$max_osd" ]
1844 ceph osd find $id
1845 max_osd=$((max_osd + 1))
1846 ceph osd getmaxosd | grep "max_osd = $max_osd"
1847
1848 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1849 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
1850 id2=`ceph osd create $uuid`
1851 [ "$id" = "$id2" ]
1852 id2=`ceph osd create $uuid $id`
1853 [ "$id" = "$id2" ]
1854
1855 uuid=`uuidgen`
1856 local gap_start=$max_osd
1857 id=`ceph osd create $uuid $((gap_start + 100))`
1858 [ "$id" = "$((gap_start + 100))" ]
1859 max_osd=$((id + 1))
1860 ceph osd getmaxosd | grep "max_osd = $max_osd"
1861
1862 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
1863
1864 #
1865 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1866 # is repeated and consumes two osd id, not just one.
1867 #
1868 local next_osd=$gap_start
1869 id=`ceph osd create $(uuidgen)`
1870 [ "$id" = "$next_osd" ]
1871
1872 next_osd=$((id + 1))
1873 id=`ceph osd create $(uuidgen) $next_osd`
1874 [ "$id" = "$next_osd" ]
1875
1876 local new_osds=$(echo $(ceph osd ls))
1877 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1878 [ $id -ge $save ]
1879 ceph osd rm $id
1880 done
1881 ceph osd setmaxosd $save
1882
1883 ceph osd ls
1884 ceph osd pool create data 16
1885 ceph osd pool application enable data rados
1886 ceph osd lspools | grep data
1887 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1888 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1889 ceph osd pool delete data data --yes-i-really-really-mean-it
1890
1891 ceph osd pause
1892 ceph osd dump | grep 'flags.*pauserd,pausewr'
1893 ceph osd unpause
1894
1895 ceph osd tree
1896 ceph osd tree up
1897 ceph osd tree down
1898 ceph osd tree in
1899 ceph osd tree out
1900 ceph osd tree destroyed
1901 ceph osd tree up in
1902 ceph osd tree up out
1903 ceph osd tree down in
1904 ceph osd tree down out
1905 ceph osd tree out down
1906 expect_false ceph osd tree up down
1907 expect_false ceph osd tree up destroyed
1908 expect_false ceph osd tree down destroyed
1909 expect_false ceph osd tree up down destroyed
1910 expect_false ceph osd tree in out
1911 expect_false ceph osd tree up foo
1912
1913 ceph osd metadata
1914 ceph osd count-metadata os
1915 ceph osd versions
1916
1917 ceph osd perf
1918 ceph osd blocked-by
1919
1920 ceph osd stat | grep up
1921 }
1922
1923 function test_mon_crush()
1924 {
1925 f=$TEMP_DIR/map.$$
1926 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1927 [ -s $f ]
1928 [ "$epoch" -gt 1 ]
1929 nextepoch=$(( $epoch + 1 ))
1930 echo epoch $epoch nextepoch $nextepoch
1931 rm -f $f.epoch
1932 expect_false ceph osd setcrushmap $nextepoch -i $f
1933 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1934 echo gotepoch $gotepoch
1935 [ "$gotepoch" -eq "$nextepoch" ]
1936 # should be idempotent
1937 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1938 echo epoch $gotepoch
1939 [ "$gotepoch" -eq "$nextepoch" ]
1940 rm $f
1941 }
1942
1943 function test_mon_osd_pool()
1944 {
1945 #
1946 # osd pool
1947 #
1948 ceph osd pool create data 16
1949 ceph osd pool application enable data rados
1950 ceph osd pool mksnap data datasnap
1951 rados -p data lssnap | grep datasnap
1952 ceph osd pool rmsnap data datasnap
1953 expect_false ceph osd pool rmsnap pool_fake snapshot
1954 ceph osd pool delete data data --yes-i-really-really-mean-it
1955
1956 ceph osd pool create data2 16
1957 ceph osd pool application enable data2 rados
1958 ceph osd pool rename data2 data3
1959 ceph osd lspools | grep data3
1960 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1961
1962 ceph osd pool create replicated 16 16 replicated
1963 ceph osd pool create replicated 1 16 replicated
1964 ceph osd pool create replicated 16 16 # default is replicated
1965 ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num
1966 ceph osd pool application enable replicated rados
1967 # should fail because the type is not the same
1968 expect_false ceph osd pool create replicated 16 16 erasure
1969 ceph osd lspools | grep replicated
1970 ceph osd pool create ec_test 1 1 erasure
1971 ceph osd pool application enable ec_test rados
1972 set +e
1973 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1974 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1975 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1976 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
1977 else
1978 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1979 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1980 fi
1981 set -e
1982 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1983 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1984
1985 # test create pool with rule
1986 ceph osd erasure-code-profile set foo foo
1987 ceph osd erasure-code-profile ls | grep foo
1988 ceph osd crush rule create-erasure foo foo
1989 ceph osd pool create erasure 16 16 erasure foo
1990 expect_false ceph osd erasure-code-profile rm foo
1991 ceph osd pool delete erasure erasure --yes-i-really-really-mean-it
1992 ceph osd crush rule rm foo
1993 ceph osd erasure-code-profile rm foo
1994
1995 # autoscale mode
1996 ceph osd pool create modeon --autoscale-mode=on
1997 ceph osd dump | grep modeon | grep 'autoscale_mode on'
1998 ceph osd pool create modewarn --autoscale-mode=warn
1999 ceph osd dump | grep modewarn | grep 'autoscale_mode warn'
2000 ceph osd pool create modeoff --autoscale-mode=off
2001 ceph osd dump | grep modeoff | grep 'autoscale_mode off'
2002 ceph osd pool delete modeon modeon --yes-i-really-really-mean-it
2003 ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it
2004 ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it
2005 }
2006
2007 function test_mon_osd_pool_quota()
2008 {
2009 #
2010 # test osd pool set/get quota
2011 #
2012
2013 # create tmp pool
2014 ceph osd pool create tmp-quota-pool 32
2015 ceph osd pool application enable tmp-quota-pool rados
2016 #
2017 # set erroneous quotas
2018 #
2019 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
2020 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
2021 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
2022 #
2023 # set valid quotas
2024 #
2025 ceph osd pool set-quota tmp-quota-pool max_bytes 10
2026 ceph osd pool set-quota tmp-quota-pool max_objects 10M
2027 #
2028 # get quotas in json-pretty format
2029 #
2030 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
2031 grep '"quota_max_objects":.*10000000'
2032 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
2033 grep '"quota_max_bytes":.*10'
2034 #
2035 # get quotas
2036 #
2037 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B'
2038 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects'
2039 #
2040 # set valid quotas with unit prefix
2041 #
2042 ceph osd pool set-quota tmp-quota-pool max_bytes 10K
2043 #
2044 # get quotas
2045 #
2046 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
2047 #
2048 # set valid quotas with unit prefix
2049 #
2050 ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
2051 #
2052 # get quotas
2053 #
2054 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
2055 #
2056 #
2057 # reset pool quotas
2058 #
2059 ceph osd pool set-quota tmp-quota-pool max_bytes 0
2060 ceph osd pool set-quota tmp-quota-pool max_objects 0
2061 #
2062 # test N/A quotas
2063 #
2064 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
2065 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
2066 #
2067 # cleanup tmp pool
2068 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
2069 }
2070
2071 function test_mon_pg()
2072 {
2073 # Make sure we start healthy.
2074 wait_for_health_ok
2075
2076 ceph pg debug unfound_objects_exist
2077 ceph pg debug degraded_pgs_exist
2078 ceph pg deep-scrub 1.0
2079 ceph pg dump
2080 ceph pg dump pgs_brief --format=json
2081 ceph pg dump pgs --format=json
2082 ceph pg dump pools --format=json
2083 ceph pg dump osds --format=json
2084 ceph pg dump sum --format=json
2085 ceph pg dump all --format=json
2086 ceph pg dump pgs_brief osds --format=json
2087 ceph pg dump pools osds pgs_brief --format=json
2088 ceph pg dump_json
2089 ceph pg dump_pools_json
2090 ceph pg dump_stuck inactive
2091 ceph pg dump_stuck unclean
2092 ceph pg dump_stuck stale
2093 ceph pg dump_stuck undersized
2094 ceph pg dump_stuck degraded
2095 ceph pg ls
2096 ceph pg ls 1
2097 ceph pg ls stale
2098 expect_false ceph pg ls scrubq
2099 ceph pg ls active stale repair recovering
2100 ceph pg ls 1 active
2101 ceph pg ls 1 active stale
2102 ceph pg ls-by-primary osd.0
2103 ceph pg ls-by-primary osd.0 1
2104 ceph pg ls-by-primary osd.0 active
2105 ceph pg ls-by-primary osd.0 active stale
2106 ceph pg ls-by-primary osd.0 1 active stale
2107 ceph pg ls-by-osd osd.0
2108 ceph pg ls-by-osd osd.0 1
2109 ceph pg ls-by-osd osd.0 active
2110 ceph pg ls-by-osd osd.0 active stale
2111 ceph pg ls-by-osd osd.0 1 active stale
2112 ceph pg ls-by-pool rbd
2113 ceph pg ls-by-pool rbd active stale
2114 # can't test this...
2115 # ceph pg force_create_pg
2116 ceph pg getmap -o $TEMP_DIR/map.$$
2117 [ -s $TEMP_DIR/map.$$ ]
2118 ceph pg map 1.0 | grep acting
2119 ceph pg repair 1.0
2120 ceph pg scrub 1.0
2121
2122 ceph osd set-full-ratio .962
2123 ceph osd dump | grep '^full_ratio 0.962'
2124 ceph osd set-backfillfull-ratio .912
2125 ceph osd dump | grep '^backfillfull_ratio 0.912'
2126 ceph osd set-nearfull-ratio .892
2127 ceph osd dump | grep '^nearfull_ratio 0.892'
2128
2129 # Check health status
2130 ceph osd set-nearfull-ratio .913
2131 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
2132 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
2133 ceph osd set-nearfull-ratio .892
2134 ceph osd set-backfillfull-ratio .963
2135 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
2136 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
2137 ceph osd set-backfillfull-ratio .912
2138
2139 # Check injected full results
2140 $SUDO ceph tell osd.0 injectfull nearfull
2141 wait_for_health "OSD_NEARFULL"
2142 ceph health detail | grep "osd.0 is near full"
2143 $SUDO ceph tell osd.0 injectfull none
2144 wait_for_health_ok
2145
2146 $SUDO ceph tell osd.1 injectfull backfillfull
2147 wait_for_health "OSD_BACKFILLFULL"
2148 ceph health detail | grep "osd.1 is backfill full"
2149 $SUDO ceph tell osd.1 injectfull none
2150 wait_for_health_ok
2151
2152 $SUDO ceph tell osd.2 injectfull failsafe
2153 # failsafe and full are the same as far as the monitor is concerned
2154 wait_for_health "OSD_FULL"
2155 ceph health detail | grep "osd.2 is full"
2156 $SUDO ceph tell osd.2 injectfull none
2157 wait_for_health_ok
2158
2159 $SUDO ceph tell osd.0 injectfull full
2160 wait_for_health "OSD_FULL"
2161 ceph health detail | grep "osd.0 is full"
2162 $SUDO ceph tell osd.0 injectfull none
2163 wait_for_health_ok
2164
2165 ceph pg stat | grep 'pgs:'
2166 ceph pg 1.0 query
2167 ceph tell 1.0 query
2168 first=$(ceph mon dump -f json | jq -r '.mons[0].name')
2169 ceph tell mon.$first quorum enter
2170 ceph quorum_status
2171 ceph report | grep osd_stats
2172 ceph status
2173 ceph -s
2174
2175 #
2176 # tell osd version
2177 #
2178 ceph tell osd.0 version
2179 expect_false ceph tell osd.9999 version
2180 expect_false ceph tell osd.foo version
2181
2182 # back to pg stuff
2183
2184 ceph tell osd.0 dump_pg_recovery_stats | grep Started
2185
2186 ceph osd reweight 0 0.9
2187 expect_false ceph osd reweight 0 -1
2188 ceph osd reweight osd.0 1
2189
2190 ceph osd primary-affinity osd.0 .9
2191 expect_false ceph osd primary-affinity osd.0 -2
2192 expect_false ceph osd primary-affinity osd.9999 .5
2193 ceph osd primary-affinity osd.0 1
2194
2195 ceph osd pool set rbd size 2
2196 ceph osd pg-temp 1.0 0 1
2197 ceph osd pg-temp 1.0 osd.1 osd.0
2198 expect_false ceph osd pg-temp 1.0 0 1 2
2199 expect_false ceph osd pg-temp asdf qwer
2200 expect_false ceph osd pg-temp 1.0 asdf
2201 ceph osd pg-temp 1.0 # cleanup pg-temp
2202
2203 ceph pg repeer 1.0
2204 expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore
2205
2206 # don't test ceph osd primary-temp for now
2207 }
2208
2209 function test_mon_osd_pool_set()
2210 {
2211 TEST_POOL_GETSET=pool_getset
2212 expect_false ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio -0.3
2213 expect_true ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio 1
2214 ceph osd pool application enable $TEST_POOL_GETSET rados
2215 ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
2216 wait_for_clean
2217 ceph osd pool get $TEST_POOL_GETSET all
2218
2219 for s in pg_num pgp_num size min_size crush_rule target_size_ratio; do
2220 ceph osd pool get $TEST_POOL_GETSET $s
2221 done
2222
2223 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
2224 (( new_size = old_size + 1 ))
2225 ceph osd pool set $TEST_POOL_GETSET size $new_size --yes-i-really-mean-it
2226 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
2227 ceph osd pool set $TEST_POOL_GETSET size $old_size --yes-i-really-mean-it
2228
2229 ceph osd pool create pool_erasure 1 1 erasure
2230 ceph osd pool application enable pool_erasure rados
2231 wait_for_clean
2232 set +e
2233 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
2234 check_response 'not change the size'
2235 set -e
2236 ceph osd pool get pool_erasure erasure_code_profile
2237 ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it
2238
2239 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub bulk; do
2240 ceph osd pool set $TEST_POOL_GETSET $flag false
2241 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
2242 ceph osd pool set $TEST_POOL_GETSET $flag true
2243 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
2244 ceph osd pool set $TEST_POOL_GETSET $flag 1
2245 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
2246 ceph osd pool set $TEST_POOL_GETSET $flag 0
2247 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
2248 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
2249 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
2250 done
2251
2252 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2253 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
2254 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
2255 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
2256 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
2257
2258 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2259 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
2260 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
2261 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
2262 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2263
2264 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2265 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
2266 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
2267 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
2268 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2269
2270 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2271 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
2272 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
2273 ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
2274 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
2275 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
2276 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2277 expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11
2278 expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
2279
2280 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2281 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
2282 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
2283 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
2284 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2285
2286 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2287 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
2288 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
2289 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
2290 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2291
2292 expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio -3
2293 expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio abc
2294 expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 0.1
2295 expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 1
2296 ceph osd pool get $TEST_POOL_GETSET target_size_ratio | grep 'target_size_ratio: 1'
2297
2298 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
2299 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
2300 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2301 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
2302 ceph osd pool set $TEST_POOL_GETSET pg_num 10
2303 wait_for_clean
2304 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2305 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0
2306 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0
2307
2308 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
2309 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
2310 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2311 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
2312 wait_for_clean
2313
2314 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
2315 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
2316 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
2317 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
2318 ceph osd pool set $TEST_POOL_GETSET size 2
2319 wait_for_clean
2320 ceph osd pool set $TEST_POOL_GETSET min_size 2
2321
2322 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2323 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2324
2325 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2326 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2327
2328 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
2329
2330 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2331 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2332 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2333 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2334 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2335
2336 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2337 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2338 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2339 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2340 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2341
2342 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2343 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2344 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2345 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2346 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2347 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2348 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2349
2350 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2351 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2352 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2353 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2354 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2355
2356 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2357 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2358 ceph osd pool set $TEST_POOL_GETSET $size 100
2359 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2360 ceph osd pool set $TEST_POOL_GETSET $size 0
2361 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2362 done
2363
2364 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2365 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2366 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2367 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2368
2369 }
2370
2371 function test_mon_osd_tiered_pool_set()
2372 {
2373 # this is really a tier pool
2374 ceph osd pool create real-tier 2
2375 ceph osd tier add rbd real-tier
2376
2377 # expect us to be unable to set negative values for hit_set_*
2378 for o in hit_set_period hit_set_count hit_set_fpp; do
2379 expect_false ceph osd pool set real_tier $o -1
2380 done
2381
2382 # and hit_set_fpp should be in range 0..1
2383 expect_false ceph osd pool set real_tier hit_set_fpp 2
2384
2385 ceph osd pool set real-tier hit_set_type explicit_hash
2386 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2387 ceph osd pool set real-tier hit_set_type explicit_object
2388 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2389 ceph osd pool set real-tier hit_set_type bloom
2390 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2391 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2392 ceph osd pool set real-tier hit_set_period 123
2393 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2394 ceph osd pool set real-tier hit_set_count 12
2395 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2396 ceph osd pool set real-tier hit_set_fpp .01
2397 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2398
2399 ceph osd pool set real-tier target_max_objects 123
2400 ceph osd pool get real-tier target_max_objects | \
2401 grep 'target_max_objects:[ \t]\+123'
2402 ceph osd pool set real-tier target_max_bytes 123456
2403 ceph osd pool get real-tier target_max_bytes | \
2404 grep 'target_max_bytes:[ \t]\+123456'
2405 ceph osd pool set real-tier cache_target_dirty_ratio .123
2406 ceph osd pool get real-tier cache_target_dirty_ratio | \
2407 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2408 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2409 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2410 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2411 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2412 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2413 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2414 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2415 ceph osd pool set real-tier cache_target_full_ratio .123
2416 ceph osd pool get real-tier cache_target_full_ratio | \
2417 grep 'cache_target_full_ratio:[ \t]\+0.123'
2418 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2419 ceph osd pool set real-tier cache_target_full_ratio 1.0
2420 ceph osd pool set real-tier cache_target_full_ratio 0
2421 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2422 ceph osd pool set real-tier cache_min_flush_age 123
2423 ceph osd pool get real-tier cache_min_flush_age | \
2424 grep 'cache_min_flush_age:[ \t]\+123'
2425 ceph osd pool set real-tier cache_min_evict_age 234
2426 ceph osd pool get real-tier cache_min_evict_age | \
2427 grep 'cache_min_evict_age:[ \t]\+234'
2428
2429 # iec vs si units
2430 ceph osd pool set real-tier target_max_objects 1K
2431 ceph osd pool get real-tier target_max_objects | grep 1000
2432 for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2433 ceph osd pool set real-tier $o 1Ki # no i suffix
2434 val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
2435 [[ $val == 1024 ]]
2436 ceph osd pool set real-tier $o 1M # with i suffix
2437 val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
2438 [[ $val == 1048576 ]]
2439 done
2440
2441 # this is not a tier pool
2442 ceph osd pool create fake-tier 2
2443 ceph osd pool application enable fake-tier rados
2444 wait_for_clean
2445
2446 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2447 expect_false ceph osd pool get fake-tier hit_set_type
2448 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2449 expect_false ceph osd pool get fake-tier hit_set_type
2450 expect_false ceph osd pool set fake-tier hit_set_type bloom
2451 expect_false ceph osd pool get fake-tier hit_set_type
2452 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2453 expect_false ceph osd pool set fake-tier hit_set_period 123
2454 expect_false ceph osd pool get fake-tier hit_set_period
2455 expect_false ceph osd pool set fake-tier hit_set_count 12
2456 expect_false ceph osd pool get fake-tier hit_set_count
2457 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2458 expect_false ceph osd pool get fake-tier hit_set_fpp
2459
2460 expect_false ceph osd pool set fake-tier target_max_objects 123
2461 expect_false ceph osd pool get fake-tier target_max_objects
2462 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2463 expect_false ceph osd pool get fake-tier target_max_bytes
2464 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2465 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2466 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2467 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2468 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2469 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2470 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2471 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2472 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2473 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2474 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2475 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2476 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2477 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2478 expect_false ceph osd pool get fake-tier cache_min_flush_age
2479 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2480 expect_false ceph osd pool get fake-tier cache_min_evict_age
2481
2482 ceph osd tier remove rbd real-tier
2483 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2484 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2485 }
2486
2487 function test_mon_osd_erasure_code()
2488 {
2489
2490 ceph osd erasure-code-profile set fooprofile a=b c=d
2491 ceph osd erasure-code-profile set fooprofile a=b c=d
2492 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2493 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2494 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2495 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2496 # make sure rule-foo doesn't work anymore
2497 expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
2498 ceph osd erasure-code-profile set barprofile crush-failure-domain=host
2499 # clean up
2500 ceph osd erasure-code-profile rm fooprofile
2501 ceph osd erasure-code-profile rm barprofile
2502
2503 # try weird k and m values
2504 expect_false ceph osd erasure-code-profile set badk k=1 m=1
2505 expect_false ceph osd erasure-code-profile set badk k=1 m=2
2506 expect_false ceph osd erasure-code-profile set badk k=0 m=2
2507 expect_false ceph osd erasure-code-profile set badk k=-1 m=2
2508 expect_false ceph osd erasure-code-profile set badm k=2 m=0
2509 expect_false ceph osd erasure-code-profile set badm k=2 m=-1
2510 ceph osd erasure-code-profile set good k=2 m=1
2511 ceph osd erasure-code-profile rm good
2512 }
2513
2514 function test_mon_osd_misc()
2515 {
2516 set +e
2517
2518 # expect error about missing 'pool' argument
2519 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2520
2521 # expect error about unused argument foo
2522 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2523
2524 # expect "not in range" for invalid overload percentage
2525 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2526
2527 set -e
2528
2529 local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd)
2530 local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd)
2531 # otherwise ceph-mgr complains like:
2532 # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
2533 # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
2534 ceph config set mgr mon_reweight_min_bytes_per_osd 0
2535 ceph config set mgr mon_reweight_min_pgs_per_osd 0
2536 ceph osd reweight-by-utilization 110
2537 ceph osd reweight-by-utilization 110 .5
2538 expect_false ceph osd reweight-by-utilization 110 0
2539 expect_false ceph osd reweight-by-utilization 110 -0.1
2540 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2541 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2542 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2543 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2544 ceph osd reweight-by-pg 110
2545 ceph osd test-reweight-by-pg 110 .5
2546 ceph osd reweight-by-pg 110 rbd
2547 ceph osd reweight-by-pg 110 .5 rbd
2548 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2549 # restore the setting
2550 ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd
2551 ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd
2552 }
2553
2554 function test_admin_heap_profiler()
2555 {
2556 do_test=1
2557 set +e
2558 # expect 'heap' commands to be correctly parsed
2559 ceph tell osd.0 heap stats 2>$TMPFILE
2560 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2561 echo "tcmalloc not enabled; skip heap profiler test"
2562 do_test=0
2563 fi
2564 set -e
2565
2566 [[ $do_test -eq 0 ]] && return 0
2567
2568 $SUDO ceph tell osd.0 heap start_profiler
2569 $SUDO ceph tell osd.0 heap dump
2570 $SUDO ceph tell osd.0 heap stop_profiler
2571 $SUDO ceph tell osd.0 heap release
2572 }
2573
2574 function test_osd_bench()
2575 {
2576 # test osd bench limits
2577 # As we should not rely on defaults (as they may change over time),
2578 # lets inject some values and perform some simple tests
2579 # max iops: 10 # 100 IOPS
2580 # max throughput: 10485760 # 10MB/s
2581 # max block size: 2097152 # 2MB
2582 # duration: 10 # 10 seconds
2583
2584 local args="\
2585 --osd-bench-duration 10 \
2586 --osd-bench-max-block-size 2097152 \
2587 --osd-bench-large-size-max-throughput 10485760 \
2588 --osd-bench-small-size-max-iops 10"
2589 ceph tell osd.0 injectargs ${args## }
2590
2591 # anything with a bs larger than 2097152 must fail
2592 expect_false ceph tell osd.0 bench 1 2097153
2593 # but using 'osd_bench_max_bs' must succeed
2594 ceph tell osd.0 bench 1 2097152
2595
2596 # we assume 1MB as a large bs; anything lower is a small bs
2597 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2598 # max count: 409600 (bytes)
2599
2600 # more than max count must not be allowed
2601 expect_false ceph tell osd.0 bench 409601 4096
2602 # but 409600 must be succeed
2603 ceph tell osd.0 bench 409600 4096
2604
2605 # for a large bs, we are limited by throughput.
2606 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2607 # the max count will be (10MB * 10s) = 100MB
2608 # max count: 104857600 (bytes)
2609
2610 # more than max count must not be allowed
2611 expect_false ceph tell osd.0 bench 104857601 2097152
2612 # up to max count must be allowed
2613 ceph tell osd.0 bench 104857600 2097152
2614 }
2615
2616 function test_osd_negative_filestore_merge_threshold()
2617 {
2618 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2619 expect_config_value "osd.0" "filestore_merge_threshold" -1
2620 }
2621
2622 function test_mon_tell()
2623 {
2624 for m in mon.a mon.b; do
2625 ceph tell $m sessions
2626 ceph_watch_start debug audit
2627 ceph tell mon.a sessions
2628 ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
2629 done
2630 expect_false ceph tell mon.foo version
2631 }
2632
2633 function test_mon_ping()
2634 {
2635 ceph ping mon.a
2636 ceph ping mon.b
2637 expect_false ceph ping mon.foo
2638
2639 ceph ping mon.\*
2640 }
2641
2642 function test_mon_deprecated_commands()
2643 {
2644 # current DEPRECATED commands are marked with FLAG(DEPRECATED)
2645 #
2646 # Testing should be accomplished by setting
2647 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2648 # each one of these commands.
2649
2650 ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete'
2651 expect_false ceph config-key list 2> $TMPFILE
2652 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2653
2654 ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete'
2655 }
2656
2657 function test_mon_cephdf_commands()
2658 {
2659 # ceph df detail:
2660 # pool section:
2661 # RAW USED The near raw used per pool in raw total
2662
2663 ceph osd pool create cephdf_for_test 1 1 replicated
2664 ceph osd pool application enable cephdf_for_test rados
2665 ceph osd pool set cephdf_for_test size 2
2666
2667 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2668 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2669
2670 #wait for update
2671 for i in `seq 1 10`; do
2672 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2673 sleep 1
2674 done
2675 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2676 # to sync mon with osd
2677 flush_pg_stats
2678 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2679 stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
2680 stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
2681
2682 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2683 rm ./cephdf_for_test
2684
2685 expect_false test $stored != $stored_raw
2686 }
2687
2688 function test_mon_pool_application()
2689 {
2690 ceph osd pool create app_for_test 16
2691
2692 ceph osd pool application enable app_for_test rbd
2693 expect_false ceph osd pool application enable app_for_test rgw
2694 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2695 ceph osd pool ls detail | grep "application rbd,rgw"
2696 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2697
2698 expect_false ceph osd pool application set app_for_test cephfs key value
2699 ceph osd pool application set app_for_test rbd key1 value1
2700 ceph osd pool application set app_for_test rbd key2 value2
2701 ceph osd pool application set app_for_test rgw key1 value1
2702 ceph osd pool application get app_for_test rbd key1 | grep 'value1'
2703 ceph osd pool application get app_for_test rbd key2 | grep 'value2'
2704 ceph osd pool application get app_for_test rgw key1 | grep 'value1'
2705
2706 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2707
2708 ceph osd pool application rm app_for_test rgw key1
2709 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2710 ceph osd pool application rm app_for_test rbd key2
2711 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2712 ceph osd pool application rm app_for_test rbd key1
2713 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2714 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2715
2716 expect_false ceph osd pool application disable app_for_test rgw
2717 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2718 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2719 ceph osd pool ls detail | grep "application rbd"
2720 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2721
2722 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2723 ceph osd pool ls detail | grep -v "application "
2724 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2725
2726 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2727 }
2728
2729 function test_mon_tell_help_command()
2730 {
2731 ceph tell mon.a help | grep sync_force
2732 ceph tell mon.a -h | grep sync_force
2733 ceph tell mon.a config -h | grep 'config diff get'
2734
2735 # wrong target
2736 expect_false ceph tell mon.zzz help
2737 }
2738
2739 function test_mon_stdin_stdout()
2740 {
2741 echo foo | ceph config-key set test_key -i -
2742 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2743 }
2744
2745 function test_osd_tell_help_command()
2746 {
2747 ceph tell osd.1 help
2748 expect_false ceph tell osd.100 help
2749 }
2750
2751 function test_osd_compact()
2752 {
2753 ceph tell osd.1 compact
2754 $SUDO ceph daemon osd.1 compact
2755 }
2756
2757 function test_mds_tell_help_command()
2758 {
2759 local FS_NAME=cephfs
2760 if ! mds_exists ; then
2761 echo "Skipping test, no MDS found"
2762 return
2763 fi
2764
2765 remove_all_fs
2766 ceph osd pool create fs_data 16
2767 ceph osd pool create fs_metadata 16
2768 ceph fs new $FS_NAME fs_metadata fs_data
2769 wait_mds_active $FS_NAME
2770
2771
2772 ceph tell mds.a help
2773 expect_false ceph tell mds.z help
2774
2775 remove_all_fs
2776 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2777 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2778 }
2779
2780 function test_mgr_tell()
2781 {
2782 ceph tell mgr version
2783 }
2784
2785 function test_mgr_devices()
2786 {
2787 ceph device ls
2788 expect_false ceph device info doesnotexist
2789 expect_false ceph device get-health-metrics doesnotexist
2790 }
2791
2792 function test_per_pool_scrub_status()
2793 {
2794 ceph osd pool create noscrub_pool 16
2795 ceph osd pool create noscrub_pool2 16
2796 ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2797 ceph -s --format json | \
2798 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2799 expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
2800 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
2801 expect_false grep -q "Pool .* has .*scrub.* flag"
2802 ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2803 expect_false grep -q "Pool .* has .*scrub.* flag"
2804
2805 ceph osd pool set noscrub_pool noscrub 1
2806 ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
2807 ceph -s --format json | \
2808 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2809 expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
2810 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2811 expect_true grep -q "Pool noscrub_pool has noscrub flag"
2812 ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
2813
2814 ceph osd pool set noscrub_pool nodeep-scrub 1
2815 ceph osd pool set noscrub_pool2 nodeep-scrub 1
2816 ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2817 ceph -s --format json | \
2818 jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
2819 expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
2820 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2821 expect_true grep -q "Pool noscrub_pool has noscrub flag"
2822 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2823 expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
2824 ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
2825 expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2826 ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
2827 ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
2828 ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
2829
2830 ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it
2831 ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it
2832 }
2833
2834 #
2835 # New tests should be added to the TESTS array below
2836 #
2837 # Individual tests may be run using the '-t <testname>' argument
2838 # The user can specify '-t <testname>' as many times as she wants
2839 #
2840 # Tests will be run in order presented in the TESTS array, or in
2841 # the order specified by the '-t <testname>' options.
2842 #
2843 # '-l' will list all the available test names
2844 # '-h' will show usage
2845 #
2846 # The test maintains backward compatibility: not specifying arguments
2847 # will run all tests following the order they appear in the TESTS array.
2848 #
2849
2850 set +x
2851 MON_TESTS+=" mon_injectargs"
2852 MON_TESTS+=" mon_injectargs_SI"
2853 for i in `seq 9`; do
2854 MON_TESTS+=" tiering_$i";
2855 done
2856 MON_TESTS+=" auth"
2857 MON_TESTS+=" auth_profiles"
2858 MON_TESTS+=" mon_misc"
2859 MON_TESTS+=" mon_mon"
2860 MON_TESTS+=" mon_osd"
2861 MON_TESTS+=" mon_config_key"
2862 MON_TESTS+=" mon_crush"
2863 MON_TESTS+=" mon_osd_create_destroy"
2864 MON_TESTS+=" mon_osd_pool"
2865 MON_TESTS+=" mon_osd_pool_quota"
2866 MON_TESTS+=" mon_pg"
2867 MON_TESTS+=" mon_osd_pool_set"
2868 MON_TESTS+=" mon_osd_tiered_pool_set"
2869 MON_TESTS+=" mon_osd_erasure_code"
2870 MON_TESTS+=" mon_osd_misc"
2871 MON_TESTS+=" mon_tell"
2872 MON_TESTS+=" mon_ping"
2873 MON_TESTS+=" mon_deprecated_commands"
2874 MON_TESTS+=" mon_caps"
2875 MON_TESTS+=" mon_cephdf_commands"
2876 MON_TESTS+=" mon_tell_help_command"
2877 MON_TESTS+=" mon_stdin_stdout"
2878
2879 OSD_TESTS+=" osd_bench"
2880 OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2881 OSD_TESTS+=" tiering_agent"
2882 OSD_TESTS+=" admin_heap_profiler"
2883 OSD_TESTS+=" osd_tell_help_command"
2884 OSD_TESTS+=" osd_compact"
2885 OSD_TESTS+=" per_pool_scrub_status"
2886
2887 MDS_TESTS+=" mds_tell"
2888 MDS_TESTS+=" mon_mds"
2889 MDS_TESTS+=" mon_mds_metadata"
2890 MDS_TESTS+=" mds_tell_help_command"
2891
2892 MGR_TESTS+=" mgr_tell"
2893 MGR_TESTS+=" mgr_devices"
2894
2895 TESTS+=$MON_TESTS
2896 TESTS+=$OSD_TESTS
2897 TESTS+=$MDS_TESTS
2898 TESTS+=$MGR_TESTS
2899
2900 #
2901 # "main" follows
2902 #
2903
2904 function list_tests()
2905 {
2906 echo "AVAILABLE TESTS"
2907 for i in $TESTS; do
2908 echo " $i"
2909 done
2910 }
2911
2912 function usage()
2913 {
2914 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2915 }
2916
2917 tests_to_run=()
2918
2919 sanity_check=true
2920
2921 while [[ $# -gt 0 ]]; do
2922 opt=$1
2923
2924 case "$opt" in
2925 "-l" )
2926 do_list=1
2927 ;;
2928 "--asok-does-not-need-root" )
2929 SUDO=""
2930 ;;
2931 "--no-sanity-check" )
2932 sanity_check=false
2933 ;;
2934 "--test-mon" )
2935 tests_to_run+="$MON_TESTS"
2936 ;;
2937 "--test-osd" )
2938 tests_to_run+="$OSD_TESTS"
2939 ;;
2940 "--test-mds" )
2941 tests_to_run+="$MDS_TESTS"
2942 ;;
2943 "--test-mgr" )
2944 tests_to_run+="$MGR_TESTS"
2945 ;;
2946 "-t" )
2947 shift
2948 if [[ -z "$1" ]]; then
2949 echo "missing argument to '-t'"
2950 usage ;
2951 exit 1
2952 fi
2953 tests_to_run+=" $1"
2954 ;;
2955 "-h" )
2956 usage ;
2957 exit 0
2958 ;;
2959 esac
2960 shift
2961 done
2962
2963 if [[ $do_list -eq 1 ]]; then
2964 list_tests ;
2965 exit 0
2966 fi
2967
2968 ceph osd pool create rbd 16
2969
2970 if test -z "$tests_to_run" ; then
2971 tests_to_run="$TESTS"
2972 fi
2973
2974 if $sanity_check ; then
2975 wait_no_osd_down
2976 fi
2977 for i in $tests_to_run; do
2978 if $sanity_check ; then
2979 check_no_osd_down
2980 fi
2981 set -x
2982 test_${i}
2983 set +x
2984 done
2985 if $sanity_check ; then
2986 check_no_osd_down
2987 fi
2988
2989 set -x
2990
2991 echo OK