]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/cephtool/test.sh
import ceph 12.2.12
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
1 #!/bin/bash -x
2 # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3 # vim: ts=8 sw=8 ft=bash smarttab
4
5 source $(dirname $0)/../../standalone/ceph-helpers.sh
6
7 set -e
8 set -o functrace
9 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
10 SUDO=${SUDO:-sudo}
11 export CEPH_DEV=1
12
13 function get_admin_socket()
14 {
15 local client=$1
16
17 if test -n "$CEPH_ASOK_DIR";
18 then
19 echo $(get_asok_dir)/$client.asok
20 else
21 local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
23 fi
24 }
25
26 function check_no_osd_down()
27 {
28 ! ceph osd dump | grep ' down '
29 }
30
31 function wait_no_osd_down()
32 {
33 max_run=300
34 for i in $(seq 1 $max_run) ; do
35 if ! check_no_osd_down ; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
37 sleep 1
38 else
39 break
40 fi
41 done
42 check_no_osd_down
43 }
44
45 function expect_false()
46 {
47 set -x
48 if "$@"; then return 1; else return 0; fi
49 }
50
51
52 TEMP_DIR=$(mktemp -d ${TMPDIR:-/tmp}/cephtool.XXX)
53 trap "rm -fr $TEMP_DIR" 0
54
55 TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
56
57 #
58 # retry_eagain max cmd args ...
59 #
60 # retry cmd args ... if it exits on error and its output contains the
61 # string EAGAIN, at most $max times
62 #
63 function retry_eagain()
64 {
65 local max=$1
66 shift
67 local status
68 local tmpfile=$TEMP_DIR/retry_eagain.$$
69 local count
70 for count in $(seq 1 $max) ; do
71 status=0
72 "$@" > $tmpfile 2>&1 || status=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN $tmpfile ; then
75 break
76 fi
77 sleep 1
78 done
79 if test $count = $max ; then
80 echo retried with non zero exit status, $max times: "$@" >&2
81 fi
82 cat $tmpfile
83 rm $tmpfile
84 return $status
85 }
86
87 #
88 # map_enxio_to_eagain cmd arg ...
89 #
90 # add EAGAIN to the output of cmd arg ... if the output contains
91 # ENXIO.
92 #
93 function map_enxio_to_eagain()
94 {
95 local status=0
96 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
97
98 "$@" > $tmpfile 2>&1 || status=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO $tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
102 fi
103 cat $tmpfile
104 rm $tmpfile
105 return $status
106 }
107
108 function check_response()
109 {
110 expected_string=$1
111 retcode=$2
112 expected_retcode=$3
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
115 exit 1
116 fi
117
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
120 cat $TMPFILE >&2
121 exit 1
122 fi
123 }
124
125 function get_config_value_or_die()
126 {
127 local target config_opt raw val
128
129 target=$1
130 config_opt=$2
131
132 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $? -ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
135 exit 1
136 fi
137
138 raw=`echo $raw | sed -e 's/[{} "]//g'`
139 val=`echo $raw | cut -f2 -d:`
140
141 echo "$val"
142 return 0
143 }
144
145 function expect_config_value()
146 {
147 local target config_opt expected_val val
148 target=$1
149 config_opt=$2
150 expected_val=$3
151
152 val=$(get_config_value_or_die $target $config_opt)
153
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
156 exit 1
157 fi
158 }
159
160 function ceph_watch_start()
161 {
162 local whatch_opt=--watch
163
164 if [ -n "$1" ]; then
165 whatch_opt=--watch-$1
166 if [ -n "$2" ]; then
167 whatch_opt+=" --watch-channel $2"
168 fi
169 fi
170
171 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
172 ceph $whatch_opt > $CEPH_WATCH_FILE &
173 CEPH_WATCH_PID=$!
174
175 # wait until the "ceph" client is connected and receiving
176 # log messages from monitor
177 for i in `seq 3`; do
178 grep -q "cluster" $CEPH_WATCH_FILE && break
179 sleep 1
180 done
181 }
182
183 function ceph_watch_wait()
184 {
185 local regexp=$1
186 local timeout=30
187
188 if [ -n "$2" ]; then
189 timeout=$2
190 fi
191
192 for i in `seq ${timeout}`; do
193 grep -q "$regexp" $CEPH_WATCH_FILE && break
194 sleep 1
195 done
196
197 kill $CEPH_WATCH_PID
198
199 if ! grep "$regexp" $CEPH_WATCH_FILE; then
200 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
201 cat $CEPH_WATCH_FILE >&2
202 return 1
203 fi
204 }
205
206 function test_mon_injectargs()
207 {
208 CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
209 check_response "osd_enable_op_tracker = 'false'"
210 ! grep "the.dump" $TMPFILE || return 1
211 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
212 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
213 ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
214 check_response "osd_enable_op_tracker = 'false'"
215 ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
216 check_response "osd_enable_op_tracker = 'true'"
217 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
218 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
219 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
220 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
221
222 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' >& $TMPFILE || return 1
223 check_response "osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
224
225 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' >& $TMPFILE || return 1
226 check_response "mon_probe_timeout = '2.000000' (not observed, change may require restart)"
227
228 ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
229 check_response "mon_lease = '6.000000' (not observed, change may require restart)"
230
231 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
232 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 >& $TMPFILE || return 1
233 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
234 }
235
236 function test_mon_injectargs_SI()
237 {
238 # Test SI units during injectargs and 'config set'
239 # We only aim at testing the units are parsed accordingly
240 # and don't intend to test whether the options being set
241 # actually expect SI units to be passed.
242 # Keep in mind that all integer based options that are not based on bytes
243 # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
244 # base 10.
245 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
246 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
247 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
248 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
249 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
250 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
251 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
252 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
253 check_response "'10F': (22) Invalid argument"
254 # now test with injectargs
255 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
256 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
257 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
258 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
259 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
260 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
261 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
262 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
263 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
264 }
265
266 function test_mon_injectargs_IEC()
267 {
268 # Test IEC units during injectargs and 'config set'
269 # We only aim at testing the units are parsed accordingly
270 # and don't intend to test whether the options being set
271 # actually expect IEC units to be passed.
272 # Keep in mind that all integer based options that are based on bytes
273 # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
274 # unit modifiers (for backwards compatibility and convinience) and be parsed
275 # to base 2.
276 initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
277 $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
278 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
279 $SUDO ceph daemon mon.a config set mon_data_size_warn 15G
280 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
281 $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
282 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
283 $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
284 check_response "'10F': (22) Invalid argument"
285 # now test with injectargs
286 ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
287 expect_config_value "mon.a" "mon_data_size_warn" 15000000000
288 ceph tell mon.a injectargs '--mon_data_size_warn 15G'
289 expect_config_value "mon.a" "mon_data_size_warn" 16106127360
290 ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
291 expect_config_value "mon.a" "mon_data_size_warn" 17179869184
292 expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
293 $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
294 }
295
296 function test_tiering_agent()
297 {
298 local slow=slow_eviction
299 local fast=fast_eviction
300 ceph osd pool create $slow 1 1
301 ceph osd pool application enable $slow rados
302 ceph osd pool create $fast 1 1
303 ceph osd tier add $slow $fast
304 ceph osd tier cache-mode $fast writeback
305 ceph osd tier set-overlay $slow $fast
306 ceph osd pool set $fast hit_set_type bloom
307 rados -p $slow put obj1 /etc/group
308 ceph osd pool set $fast target_max_objects 1
309 ceph osd pool set $fast hit_set_count 1
310 ceph osd pool set $fast hit_set_period 5
311 # wait for the object to be evicted from the cache
312 local evicted
313 evicted=false
314 for i in `seq 1 300` ; do
315 if ! rados -p $fast ls | grep obj1 ; then
316 evicted=true
317 break
318 fi
319 sleep 1
320 done
321 $evicted # assert
322 # the object is proxy read and promoted to the cache
323 rados -p $slow get obj1 - >/dev/null
324 # wait for the promoted object to be evicted again
325 evicted=false
326 for i in `seq 1 300` ; do
327 if ! rados -p $fast ls | grep obj1 ; then
328 evicted=true
329 break
330 fi
331 sleep 1
332 done
333 $evicted # assert
334 ceph osd tier remove-overlay $slow
335 ceph osd tier remove $slow $fast
336 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
337 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
338 }
339
340 function test_tiering_1()
341 {
342 # tiering
343 ceph osd pool create slow 2
344 ceph osd pool application enable slow rados
345 ceph osd pool create slow2 2
346 ceph osd pool application enable slow2 rados
347 ceph osd pool create cache 2
348 ceph osd pool create cache2 2
349 ceph osd tier add slow cache
350 ceph osd tier add slow cache2
351 expect_false ceph osd tier add slow2 cache
352 # test some state transitions
353 ceph osd tier cache-mode cache writeback
354 expect_false ceph osd tier cache-mode cache forward
355 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
356 expect_false ceph osd tier cache-mode cache readonly
357 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
358 expect_false ceph osd tier cache-mode cache forward
359 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
360 ceph osd tier cache-mode cache none
361 ceph osd tier cache-mode cache writeback
362 ceph osd tier cache-mode cache proxy
363 ceph osd tier cache-mode cache writeback
364 expect_false ceph osd tier cache-mode cache none
365 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
366 # test with dirty objects in the tier pool
367 # tier pool currently set to 'writeback'
368 rados -p cache put /etc/passwd /etc/passwd
369 flush_pg_stats
370 # 1 dirty object in pool 'cache'
371 ceph osd tier cache-mode cache proxy
372 expect_false ceph osd tier cache-mode cache none
373 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
374 ceph osd tier cache-mode cache writeback
375 # remove object from tier pool
376 rados -p cache rm /etc/passwd
377 rados -p cache cache-flush-evict-all
378 flush_pg_stats
379 # no dirty objects in pool 'cache'
380 ceph osd tier cache-mode cache proxy
381 ceph osd tier cache-mode cache none
382 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
383 TRIES=0
384 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
385 do
386 grep 'currently creating pgs' $TMPFILE
387 TRIES=$(( $TRIES + 1 ))
388 test $TRIES -ne 60
389 sleep 3
390 done
391 expect_false ceph osd pool set cache pg_num 4
392 ceph osd tier cache-mode cache none
393 ceph osd tier set-overlay slow cache
394 expect_false ceph osd tier set-overlay slow cache2
395 expect_false ceph osd tier remove slow cache
396 ceph osd tier remove-overlay slow
397 ceph osd tier set-overlay slow cache2
398 ceph osd tier remove-overlay slow
399 ceph osd tier remove slow cache
400 ceph osd tier add slow2 cache
401 expect_false ceph osd tier set-overlay slow cache
402 ceph osd tier set-overlay slow2 cache
403 ceph osd tier remove-overlay slow2
404 ceph osd tier remove slow2 cache
405 ceph osd tier remove slow cache2
406
407 # make sure a non-empty pool fails
408 rados -p cache2 put /etc/passwd /etc/passwd
409 while ! ceph df | grep cache2 | grep ' 1 ' ; do
410 echo waiting for pg stats to flush
411 sleep 2
412 done
413 expect_false ceph osd tier add slow cache2
414 ceph osd tier add slow cache2 --force-nonempty
415 ceph osd tier remove slow cache2
416
417 ceph osd pool ls | grep cache2
418 ceph osd pool ls -f json-pretty | grep cache2
419 ceph osd pool ls detail | grep cache2
420 ceph osd pool ls detail -f json-pretty | grep cache2
421
422 ceph osd pool delete slow slow --yes-i-really-really-mean-it
423 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
424 ceph osd pool delete cache cache --yes-i-really-really-mean-it
425 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
426 }
427
428 function test_tiering_2()
429 {
430 # make sure we can't clobber snapshot state
431 ceph osd pool create snap_base 2
432 ceph osd pool application enable snap_base rados
433 ceph osd pool create snap_cache 2
434 ceph osd pool mksnap snap_cache snapname
435 expect_false ceph osd tier add snap_base snap_cache
436 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
437 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
438 }
439
440 function test_tiering_3()
441 {
442 # make sure we can't create snapshot on tier
443 ceph osd pool create basex 2
444 ceph osd pool application enable basex rados
445 ceph osd pool create cachex 2
446 ceph osd tier add basex cachex
447 expect_false ceph osd pool mksnap cache snapname
448 ceph osd tier remove basex cachex
449 ceph osd pool delete basex basex --yes-i-really-really-mean-it
450 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
451 }
452
453 function test_tiering_4()
454 {
455 # make sure we can't create an ec pool tier
456 ceph osd pool create eccache 2 2 erasure
457 expect_false ceph osd set-require-min-compat-client bobtail
458 ceph osd pool create repbase 2
459 ceph osd pool application enable repbase rados
460 expect_false ceph osd tier add repbase eccache
461 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
462 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
463 }
464
465 function test_tiering_5()
466 {
467 # convenient add-cache command
468 ceph osd pool create slow 2
469 ceph osd pool application enable slow rados
470 ceph osd pool create cache3 2
471 ceph osd tier add-cache slow cache3 1024000
472 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
473 ceph osd tier remove slow cache3 2> $TMPFILE || true
474 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
475 ceph osd tier remove-overlay slow
476 ceph osd tier remove slow cache3
477 ceph osd pool ls | grep cache3
478 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
479 ! ceph osd pool ls | grep cache3 || exit 1
480 ceph osd pool delete slow slow --yes-i-really-really-mean-it
481 }
482
483 function test_tiering_6()
484 {
485 # check add-cache whether work
486 ceph osd pool create datapool 2
487 ceph osd pool application enable datapool rados
488 ceph osd pool create cachepool 2
489 ceph osd tier add-cache datapool cachepool 1024000
490 ceph osd tier cache-mode cachepool writeback
491 rados -p datapool put object /etc/passwd
492 rados -p cachepool stat object
493 rados -p cachepool cache-flush object
494 rados -p datapool stat object
495 ceph osd tier remove-overlay datapool
496 ceph osd tier remove datapool cachepool
497 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
498 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
499 }
500
501 function test_tiering_7()
502 {
503 # protection against pool removal when used as tiers
504 ceph osd pool create datapool 2
505 ceph osd pool application enable datapool rados
506 ceph osd pool create cachepool 2
507 ceph osd tier add-cache datapool cachepool 1024000
508 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
509 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
510 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
511 check_response "EBUSY: pool 'datapool' has tiers cachepool"
512 ceph osd tier remove-overlay datapool
513 ceph osd tier remove datapool cachepool
514 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
515 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
516 }
517
518 function test_tiering_8()
519 {
520 ## check health check
521 ceph osd set notieragent
522 ceph osd pool create datapool 2
523 ceph osd pool application enable datapool rados
524 ceph osd pool create cache4 2
525 ceph osd tier add-cache datapool cache4 1024000
526 ceph osd tier cache-mode cache4 writeback
527 tmpfile=$(mktemp|grep tmp)
528 dd if=/dev/zero of=$tmpfile bs=4K count=1
529 ceph osd pool set cache4 target_max_objects 200
530 ceph osd pool set cache4 target_max_bytes 1000000
531 rados -p cache4 put foo1 $tmpfile
532 rados -p cache4 put foo2 $tmpfile
533 rm -f $tmpfile
534 flush_pg_stats
535 ceph df | grep datapool | grep ' 2 '
536 ceph osd tier remove-overlay datapool
537 ceph osd tier remove datapool cache4
538 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
539 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
540 ceph osd unset notieragent
541 }
542
543 function test_tiering_9()
544 {
545 # make sure 'tier remove' behaves as we expect
546 # i.e., removing a tier from a pool that's not its base pool only
547 # results in a 'pool foo is now (or already was) not a tier of bar'
548 #
549 ceph osd pool create basepoolA 2
550 ceph osd pool application enable basepoolA rados
551 ceph osd pool create basepoolB 2
552 ceph osd pool application enable basepoolB rados
553 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
554 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
555
556 ceph osd pool create cache5 2
557 ceph osd pool create cache6 2
558 ceph osd tier add basepoolA cache5
559 ceph osd tier add basepoolB cache6
560 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
561 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
562 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
563 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
564
565 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
566 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
567 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
568 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
569
570 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
571 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
572
573 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
574 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
575 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
576 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
577 }
578
579 function test_auth()
580 {
581 expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
582 expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
583 ceph auth add client.xx mon 'allow *' osd "allow *"
584 ceph auth export client.xx >client.xx.keyring
585 ceph auth add client.xx -i client.xx.keyring
586 rm -f client.xx.keyring
587 ceph auth list | grep client.xx
588 ceph auth ls | grep client.xx
589 ceph auth get client.xx | grep caps | grep mon
590 ceph auth get client.xx | grep caps | grep osd
591 ceph auth get-key client.xx
592 ceph auth print-key client.xx
593 ceph auth print_key client.xx
594 ceph auth caps client.xx osd "allow rw"
595 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
596 ceph auth get client.xx | grep osd | grep "allow rw"
597 ceph auth export | grep client.xx
598 ceph auth export -o authfile
599 ceph auth import -i authfile
600 ceph auth export -o authfile2
601 diff authfile authfile2
602 rm authfile authfile2
603 ceph auth del client.xx
604 expect_false ceph auth get client.xx
605
606 # (almost) interactive mode
607 echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
608 ceph auth get client.xx
609 # script mode
610 echo 'auth del client.xx' | ceph
611 expect_false ceph auth get client.xx
612
613 #
614 # get / set auid
615 #
616 local auid=444
617 ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
618 expect_false ceph auth import --in-file TEST-keyring
619 rm TEST-keyring
620 ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
621 ceph auth import --in-file TEST-keyring
622 rm TEST-keyring
623 ceph auth get client.TEST > $TMPFILE
624 check_response "auid = $auid"
625 ceph --format json-pretty auth get client.TEST > $TMPFILE
626 check_response '"auid": '$auid
627 ceph auth ls > $TMPFILE
628 check_response "auid: $auid"
629 ceph --format json-pretty auth ls > $TMPFILE
630 check_response '"auid": '$auid
631 ceph auth del client.TEST
632 }
633
634 function test_auth_profiles()
635 {
636 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
637 mgr 'allow profile read-only'
638 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
639 mgr 'allow profile read-write'
640 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
641
642 ceph auth export > client.xx.keyring
643
644 # read-only is allowed all read-only commands (auth excluded)
645 ceph -n client.xx-profile-ro -k client.xx.keyring status
646 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
647 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
648 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
649 ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
650 # read-only gets access denied for rw commands or auth commands
651 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
652 check_response "EACCES: access denied"
653 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
654 check_response "EACCES: access denied"
655 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
656 check_response "EACCES: access denied"
657
658 # read-write is allowed for all read-write commands (except auth)
659 ceph -n client.xx-profile-rw -k client.xx.keyring status
660 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
661 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
662 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
663 ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
664 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
665 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
666 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
667 # read-write gets access denied for auth commands
668 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
669 check_response "EACCES: access denied"
670
671 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
672 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
673 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
674 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
675 ceph -n client.xx-profile-rd -k client.xx.keyring status
676 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
677 check_response "EACCES: access denied"
678 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
679 check_response "EACCES: access denied"
680 # read-only 'mon' subsystem commands are allowed
681 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
682 # but read-write 'mon' commands are not
683 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
684 check_response "EACCES: access denied"
685 ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
686 check_response "EACCES: access denied"
687 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
688 check_response "EACCES: access denied"
689 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
690 check_response "EACCES: access denied"
691
692 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
693 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
694
695 # add a new role-definer with the existing role-definer
696 ceph -n client.xx-profile-rd -k client.xx.keyring \
697 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
698 ceph -n client.xx-profile-rd -k client.xx.keyring \
699 auth export > client.xx.keyring.2
700 # remove old role-definer using the new role-definer
701 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
702 auth del client.xx-profile-rd
703 # remove the remaining role-definer with admin
704 ceph auth del client.xx-profile-rd2
705 rm -f client.xx.keyring client.xx.keyring.2
706 }
707
708 function test_mon_caps()
709 {
710 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
711 chmod +r $TEMP_DIR/ceph.client.bug.keyring
712 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
713 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
714
715 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
716 check_response "Permission denied"
717
718 rm -rf $TEMP_DIR/ceph.client.bug.keyring
719 ceph auth del client.bug
720 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
721 chmod +r $TEMP_DIR/ceph.client.bug.keyring
722 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
723 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
724 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
725 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
726 check_response "Permission denied"
727 }
728
729 function test_mon_misc()
730 {
731 # with and without verbosity
732 ceph osd dump | grep '^epoch'
733 ceph --concise osd dump | grep '^epoch'
734
735 ceph osd df | grep 'MIN/MAX VAR'
736
737 # df
738 ceph df > $TMPFILE
739 grep GLOBAL $TMPFILE
740 grep -v DIRTY $TMPFILE
741 ceph df detail > $TMPFILE
742 grep DIRTY $TMPFILE
743 ceph df --format json > $TMPFILE
744 grep 'total_bytes' $TMPFILE
745 grep -v 'dirty' $TMPFILE
746 ceph df detail --format json > $TMPFILE
747 grep 'rd_bytes' $TMPFILE
748 grep 'dirty' $TMPFILE
749 ceph df --format xml | grep '<total_bytes>'
750 ceph df detail --format xml | grep '<rd_bytes>'
751
752 ceph fsid
753 ceph health
754 ceph health detail
755 ceph health --format json-pretty
756 ceph health detail --format xml-pretty
757
758 ceph time-sync-status
759
760 ceph node ls
761 for t in mon osd mds ; do
762 ceph node ls $t
763 done
764
765 ceph_watch_start
766 mymsg="this is a test log message $$.$(date)"
767 ceph log "$mymsg"
768 ceph log last | grep "$mymsg"
769 ceph log last 100 | grep "$mymsg"
770 ceph_watch_wait "$mymsg"
771
772 ceph mgr dump
773 ceph mgr module ls
774 ceph mgr module enable restful
775 expect_false ceph mgr module enable foodne
776 ceph mgr module enable foodne --force
777 ceph mgr module disable foodne
778 ceph mgr module disable foodnebizbangbash
779
780 ceph mon metadata a
781 ceph mon metadata
782 ceph mon count-metadata ceph_version
783 ceph mon versions
784
785 ceph mgr metadata
786 ceph mgr versions
787 ceph mgr count-metadata ceph_version
788
789 ceph versions
790
791 ceph node ls
792 }
793
794 function check_mds_active()
795 {
796 fs_name=$1
797 ceph fs get $fs_name | grep active
798 }
799
800 function wait_mds_active()
801 {
802 fs_name=$1
803 max_run=300
804 for i in $(seq 1 $max_run) ; do
805 if ! check_mds_active $fs_name ; then
806 echo "waiting for an active MDS daemon ($i/$max_run)"
807 sleep 5
808 else
809 break
810 fi
811 done
812 check_mds_active $fs_name
813 }
814
815 function get_mds_gids()
816 {
817 fs_name=$1
818 ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
819 }
820
821 function fail_all_mds()
822 {
823 fs_name=$1
824 ceph fs set $fs_name cluster_down true
825 mds_gids=$(get_mds_gids $fs_name)
826 for mds_gid in $mds_gids ; do
827 ceph mds fail $mds_gid
828 done
829 if check_mds_active $fs_name ; then
830 echo "An active MDS remains, something went wrong"
831 ceph fs get $fs_name
832 exit -1
833 fi
834
835 }
836
837 function remove_all_fs()
838 {
839 existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
840 for fs_name in $existing_fs ; do
841 echo "Removing fs ${fs_name}..."
842 fail_all_mds $fs_name
843 echo "Removing existing filesystem '${fs_name}'..."
844 ceph fs rm $fs_name --yes-i-really-mean-it
845 echo "Removed '${fs_name}'."
846 done
847 }
848
849 # So that tests requiring MDS can skip if one is not configured
850 # in the cluster at all
851 function mds_exists()
852 {
853 ceph auth ls | grep "^mds"
854 }
855
856 # some of the commands are just not idempotent.
857 function without_test_dup_command()
858 {
859 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
860 $@
861 else
862 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
863 unset CEPH_CLI_TEST_DUP_COMMAND
864 $@
865 CEPH_CLI_TEST_DUP_COMMAND=saved
866 fi
867 }
868
869 function test_mds_tell()
870 {
871 local FS_NAME=cephfs
872 if ! mds_exists ; then
873 echo "Skipping test, no MDS found"
874 return
875 fi
876
877 remove_all_fs
878 ceph osd pool create fs_data 10
879 ceph osd pool create fs_metadata 10
880 ceph fs new $FS_NAME fs_metadata fs_data
881 wait_mds_active $FS_NAME
882
883 # Test injectargs by GID
884 old_mds_gids=$(get_mds_gids $FS_NAME)
885 echo Old GIDs: $old_mds_gids
886
887 for mds_gid in $old_mds_gids ; do
888 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
889 done
890 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
891
892 # Test respawn by rank
893 without_test_dup_command ceph tell mds.0 respawn
894 new_mds_gids=$old_mds_gids
895 while [ $new_mds_gids -eq $old_mds_gids ] ; do
896 sleep 5
897 new_mds_gids=$(get_mds_gids $FS_NAME)
898 done
899 echo New GIDs: $new_mds_gids
900
901 # Test respawn by ID
902 without_test_dup_command ceph tell mds.a respawn
903 new_mds_gids=$old_mds_gids
904 while [ $new_mds_gids -eq $old_mds_gids ] ; do
905 sleep 5
906 new_mds_gids=$(get_mds_gids $FS_NAME)
907 done
908 echo New GIDs: $new_mds_gids
909
910 remove_all_fs
911 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
912 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
913 }
914
915 function test_mon_mds()
916 {
917 local FS_NAME=cephfs
918 remove_all_fs
919
920 ceph osd pool create fs_data 10
921 ceph osd pool create fs_metadata 10
922 ceph fs new $FS_NAME fs_metadata fs_data
923
924 ceph fs set $FS_NAME cluster_down true
925 ceph fs set $FS_NAME cluster_down false
926
927 # Legacy commands, act on default fs
928 ceph mds cluster_down
929 ceph mds cluster_up
930
931 ceph mds compat rm_incompat 4
932 ceph mds compat rm_incompat 4
933
934 # We don't want any MDSs to be up, their activity can interfere with
935 # the "current_epoch + 1" checking below if they're generating updates
936 fail_all_mds $FS_NAME
937
938 ceph mds compat show
939 expect_false ceph mds deactivate 2
940 ceph mds dump
941 ceph fs dump
942 ceph fs get $FS_NAME
943 for mds_gid in $(get_mds_gids $FS_NAME) ; do
944 ceph mds metadata $mds_id
945 done
946 ceph mds metadata
947 ceph mds versions
948 ceph mds count-metadata os
949
950 # XXX mds fail, but how do you undo it?
951 mdsmapfile=$TEMP_DIR/mdsmap.$$
952 current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
953 [ -s $mdsmapfile ]
954 rm $mdsmapfile
955
956 ceph osd pool create data2 10
957 ceph osd pool create data3 10
958 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
959 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
960 ceph mds add_data_pool $data2_pool
961 ceph mds add_data_pool $data3_pool
962 ceph mds add_data_pool 100 >& $TMPFILE || true
963 check_response "Error ENOENT"
964 ceph mds add_data_pool foobarbaz >& $TMPFILE || true
965 check_response "Error ENOENT"
966 ceph mds remove_data_pool $data2_pool
967 ceph mds remove_data_pool $data3_pool
968 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
969 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
970 ceph mds set allow_multimds false
971 expect_false ceph mds set_max_mds 4
972 ceph mds set allow_multimds true
973 ceph mds set_max_mds 4
974 ceph mds set_max_mds 3
975 ceph mds set_max_mds 256
976 expect_false ceph mds set_max_mds 257
977 ceph mds set max_mds 4
978 ceph mds set max_mds 256
979 expect_false ceph mds set max_mds 257
980 expect_false ceph mds set max_mds asdf
981 expect_false ceph mds set inline_data true
982 ceph mds set inline_data true --yes-i-really-mean-it
983 ceph mds set inline_data yes --yes-i-really-mean-it
984 ceph mds set inline_data 1 --yes-i-really-mean-it
985 expect_false ceph mds set inline_data --yes-i-really-mean-it
986 ceph mds set inline_data false
987 ceph mds set inline_data no
988 ceph mds set inline_data 0
989 expect_false ceph mds set inline_data asdf
990 ceph mds set max_file_size 1048576
991 expect_false ceph mds set max_file_size 123asdf
992
993 expect_false ceph mds set allow_new_snaps
994 expect_false ceph mds set allow_new_snaps true
995 ceph mds set allow_new_snaps true --yes-i-really-mean-it
996 ceph mds set allow_new_snaps 0
997 ceph mds set allow_new_snaps false
998 ceph mds set allow_new_snaps no
999 expect_false ceph mds set allow_new_snaps taco
1000
1001 # we should never be able to add EC pools as data or metadata pools
1002 # create an ec-pool...
1003 ceph osd pool create mds-ec-pool 10 10 erasure
1004 set +e
1005 ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
1006 check_response 'erasure-code' $? 22
1007 set -e
1008 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
1009 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
1010 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
1011
1012 fail_all_mds $FS_NAME
1013
1014 set +e
1015 # Check that rmfailed requires confirmation
1016 expect_false ceph mds rmfailed 0
1017 ceph mds rmfailed 0 --yes-i-really-mean-it
1018 set -e
1019
1020 # Check that `newfs` is no longer permitted
1021 expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
1022
1023 # Check that 'fs reset' runs
1024 ceph fs reset $FS_NAME --yes-i-really-mean-it
1025
1026 # Check that creating a second FS fails by default
1027 ceph osd pool create fs_metadata2 10
1028 ceph osd pool create fs_data2 10
1029 set +e
1030 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
1031 set -e
1032
1033 # Check that setting enable_multiple enables creation of second fs
1034 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1035 ceph fs new cephfs2 fs_metadata2 fs_data2
1036
1037 # Clean up multi-fs stuff
1038 fail_all_mds cephfs2
1039 ceph fs rm cephfs2 --yes-i-really-mean-it
1040 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1041 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1042
1043 fail_all_mds $FS_NAME
1044
1045 # Clean up to enable subsequent fs new tests
1046 ceph fs rm $FS_NAME --yes-i-really-mean-it
1047
1048 set +e
1049 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1050 check_response 'erasure-code' $? 22
1051 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1052 check_response 'erasure-code' $? 22
1053 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1054 check_response 'erasure-code' $? 22
1055 set -e
1056
1057 # ... new create a cache tier in front of the EC pool...
1058 ceph osd pool create mds-tier 2
1059 ceph osd tier add mds-ec-pool mds-tier
1060 ceph osd tier set-overlay mds-ec-pool mds-tier
1061 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1062
1063 # Use of a readonly tier should be forbidden
1064 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1065 set +e
1066 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1067 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1068 set -e
1069
1070 # Use of a writeback tier should enable FS creation
1071 ceph osd tier cache-mode mds-tier writeback
1072 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1073
1074 # While a FS exists using the tiered pools, I should not be allowed
1075 # to remove the tier
1076 set +e
1077 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1078 check_response 'in use by CephFS' $? 16
1079 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1080 check_response 'in use by CephFS' $? 16
1081 set -e
1082
1083 fail_all_mds $FS_NAME
1084 ceph fs rm $FS_NAME --yes-i-really-mean-it
1085
1086 # ... but we should be forbidden from using the cache pool in the FS directly.
1087 set +e
1088 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1089 check_response 'in use as a cache tier' $? 22
1090 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1091 check_response 'in use as a cache tier' $? 22
1092 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1093 check_response 'in use as a cache tier' $? 22
1094 set -e
1095
1096 # Clean up tier + EC pools
1097 ceph osd tier remove-overlay mds-ec-pool
1098 ceph osd tier remove mds-ec-pool mds-tier
1099
1100 # Create a FS using the 'cache' pool now that it's no longer a tier
1101 ceph fs new $FS_NAME fs_metadata mds-tier --force
1102
1103 # We should be forbidden from using this pool as a tier now that
1104 # it's in use for CephFS
1105 set +e
1106 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1107 check_response 'in use by CephFS' $? 16
1108 set -e
1109
1110 fail_all_mds $FS_NAME
1111 ceph fs rm $FS_NAME --yes-i-really-mean-it
1112
1113 # We should be permitted to use an EC pool with overwrites enabled
1114 # as the data pool...
1115 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1116 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1117 fail_all_mds $FS_NAME
1118 ceph fs rm $FS_NAME --yes-i-really-mean-it
1119
1120 # ...but not as the metadata pool
1121 set +e
1122 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1123 check_response 'erasure-code' $? 22
1124 set -e
1125
1126 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1127
1128 # Create a FS and check that we can subsequently add a cache tier to it
1129 ceph fs new $FS_NAME fs_metadata fs_data --force
1130
1131 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1132 ceph osd tier add fs_metadata mds-tier
1133 ceph osd tier cache-mode mds-tier writeback
1134 ceph osd tier set-overlay fs_metadata mds-tier
1135
1136 # Removing tier should be permitted because the underlying pool is
1137 # replicated (#11504 case)
1138 ceph osd tier cache-mode mds-tier proxy
1139 ceph osd tier remove-overlay fs_metadata
1140 ceph osd tier remove fs_metadata mds-tier
1141 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1142
1143 # Clean up FS
1144 fail_all_mds $FS_NAME
1145 ceph fs rm $FS_NAME --yes-i-really-mean-it
1146
1147
1148
1149 ceph mds stat
1150 # ceph mds tell mds.a getmap
1151 # ceph mds rm
1152 # ceph mds rmfailed
1153 # ceph mds set_state
1154 # ceph mds stop
1155
1156 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1157 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1158 }
1159
1160 function test_mon_mds_metadata()
1161 {
1162 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1163 test "$nmons" -gt 0
1164
1165 ceph mds dump |
1166 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1167 while read gid id rank; do
1168 ceph mds metadata ${gid} | grep '"hostname":'
1169 ceph mds metadata ${id} | grep '"hostname":'
1170 ceph mds metadata ${rank} | grep '"hostname":'
1171
1172 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1173 test "$n" -eq "$nmons"
1174 done
1175
1176 expect_false ceph mds metadata UNKNOWN
1177 }
1178
1179 function test_mon_mon()
1180 {
1181 # print help message
1182 ceph --help mon
1183 # no mon add/remove
1184 ceph mon dump
1185 ceph mon getmap -o $TEMP_DIR/monmap.$$
1186 [ -s $TEMP_DIR/monmap.$$ ]
1187 # ceph mon tell
1188 ceph mon_status
1189
1190 # test mon features
1191 ceph mon feature ls
1192 ceph mon feature set kraken --yes-i-really-mean-it
1193 expect_false ceph mon feature set abcd
1194 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1195 }
1196
1197 function gen_secrets_file()
1198 {
1199 # lets assume we can have the following types
1200 # all - generates both cephx and lockbox, with mock dm-crypt key
1201 # cephx - only cephx
1202 # no_cephx - lockbox and dm-crypt, no cephx
1203 # no_lockbox - dm-crypt and cephx, no lockbox
1204 # empty - empty file
1205 # empty_json - correct json, empty map
1206 # bad_json - bad json :)
1207 #
1208 local t=$1
1209 if [[ -z "$t" ]]; then
1210 t="all"
1211 fi
1212
1213 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1214 echo $fn
1215 if [[ "$t" == "empty" ]]; then
1216 return 0
1217 fi
1218
1219 echo "{" > $fn
1220 if [[ "$t" == "bad_json" ]]; then
1221 echo "asd: ; }" >> $fn
1222 return 0
1223 elif [[ "$t" == "empty_json" ]]; then
1224 echo "}" >> $fn
1225 return 0
1226 fi
1227
1228 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1229 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1230 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1231
1232 if [[ "$t" == "all" ]]; then
1233 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1234 elif [[ "$t" == "cephx" ]]; then
1235 echo "$cephx_secret" >> $fn
1236 elif [[ "$t" == "no_cephx" ]]; then
1237 echo "$lb_secret,$dmcrypt_key" >> $fn
1238 elif [[ "$t" == "no_lockbox" ]]; then
1239 echo "$cephx_secret,$dmcrypt_key" >> $fn
1240 else
1241 echo "unknown gen_secrets_file() type \'$fn\'"
1242 return 1
1243 fi
1244 echo "}" >> $fn
1245 return 0
1246 }
1247
1248 function test_mon_osd_create_destroy()
1249 {
1250 ceph osd new 2>&1 | grep 'EINVAL'
1251 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1252 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1253
1254 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1255
1256 old_osds=$(ceph osd ls)
1257 num_osds=$(ceph osd ls | wc -l)
1258
1259 uuid=$(uuidgen)
1260 id=$(ceph osd new $uuid 2>/dev/null)
1261
1262 for i in $old_osds; do
1263 [[ "$i" != "$id" ]]
1264 done
1265
1266 ceph osd find $id
1267
1268 id2=`ceph osd new $uuid 2>/dev/null`
1269
1270 [[ $id2 == $id ]]
1271
1272 ceph osd new $uuid $id
1273
1274 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1275 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1276
1277 uuid2=$(uuidgen)
1278 id2=$(ceph osd new $uuid2)
1279 ceph osd find $id2
1280 [[ "$id2" != "$id" ]]
1281
1282 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1283 ceph osd new $uuid2 $id2
1284
1285 # test with secrets
1286 empty_secrets=$(gen_secrets_file "empty")
1287 empty_json=$(gen_secrets_file "empty_json")
1288 all_secrets=$(gen_secrets_file "all")
1289 cephx_only=$(gen_secrets_file "cephx")
1290 no_cephx=$(gen_secrets_file "no_cephx")
1291 no_lockbox=$(gen_secrets_file "no_lockbox")
1292 bad_json=$(gen_secrets_file "bad_json")
1293
1294 # empty secrets should be idempotent
1295 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1296 [[ "$new_id" == "$id" ]]
1297
1298 # empty json, thus empty secrets
1299 new_id=$(ceph osd new $uuid $id -i $empty_json)
1300 [[ "$new_id" == "$id" ]]
1301
1302 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1303
1304 ceph osd rm $id
1305 ceph osd rm $id2
1306 ceph osd setmaxosd $old_maxosd
1307
1308 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1309 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1310
1311 osds=$(ceph osd ls)
1312 id=$(ceph osd new $uuid -i $all_secrets)
1313 for i in $osds; do
1314 [[ "$i" != "$id" ]]
1315 done
1316
1317 ceph osd find $id
1318
1319 # validate secrets and dm-crypt are set
1320 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1321 s=$(cat $all_secrets | jq '.cephx_secret')
1322 [[ $k == $s ]]
1323 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1324 jq '.key')
1325 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1326 [[ $k == $s ]]
1327 ceph config-key exists dm-crypt/osd/$uuid/luks
1328
1329 osds=$(ceph osd ls)
1330 id2=$(ceph osd new $uuid2 -i $cephx_only)
1331 for i in $osds; do
1332 [[ "$i" != "$id2" ]]
1333 done
1334
1335 ceph osd find $id2
1336 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1337 s=$(cat $all_secrets | jq '.cephx_secret')
1338 [[ $k == $s ]]
1339 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1340 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1341
1342 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1343 ceph osd destroy $id2 --yes-i-really-mean-it
1344 ceph osd find $id2
1345 expect_false ceph auth get-key osd.$id2
1346 ceph osd dump | grep osd.$id2 | grep destroyed
1347
1348 id3=$id2
1349 uuid3=$(uuidgen)
1350 ceph osd new $uuid3 $id3 -i $all_secrets
1351 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1352 ceph auth get-key client.osd-lockbox.$uuid3
1353 ceph auth get-key osd.$id3
1354 ceph config-key exists dm-crypt/osd/$uuid3/luks
1355
1356 ceph osd purge osd.$id3 --yes-i-really-mean-it
1357 expect_false ceph osd find $id2
1358 expect_false ceph auth get-key osd.$id2
1359 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1360 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1361 ceph osd purge osd.$id3 --yes-i-really-mean-it
1362 ceph osd purge osd.$id3 --yes-i-really-mean-it # idempotent
1363
1364 ceph osd purge osd.$id --yes-i-really-mean-it
1365 ceph osd purge 123456 --yes-i-really-mean-it
1366 expect_false ceph osd find $id
1367 expect_false ceph auth get-key osd.$id
1368 expect_false ceph auth get-key client.osd-lockbox.$uuid
1369 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1370
1371 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1372 $no_cephx $no_lockbox $bad_json
1373
1374 for i in $(ceph osd ls); do
1375 [[ "$i" != "$id" ]]
1376 [[ "$i" != "$id2" ]]
1377 [[ "$i" != "$id3" ]]
1378 done
1379
1380 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1381 ceph osd setmaxosd $old_maxosd
1382
1383 }
1384
1385 function test_mon_config_key()
1386 {
1387 key=asdfasdfqwerqwreasdfuniquesa123df
1388 ceph config-key list | grep -c $key | grep 0
1389 ceph config-key get $key | grep -c bar | grep 0
1390 ceph config-key set $key bar
1391 ceph config-key get $key | grep bar
1392 ceph config-key list | grep -c $key | grep 1
1393 ceph config-key dump | grep $key | grep bar
1394 ceph config-key rm $key
1395 expect_false ceph config-key get $key
1396 ceph config-key list | grep -c $key | grep 0
1397 ceph config-key dump | grep -c $key | grep 0
1398 }
1399
1400 function test_mon_osd()
1401 {
1402 #
1403 # osd blacklist
1404 #
1405 bl=192.168.0.1:0/1000
1406 ceph osd blacklist add $bl
1407 ceph osd blacklist ls | grep $bl
1408 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1409 ceph osd dump --format=json-pretty | grep $bl
1410 ceph osd dump | grep "^blacklist $bl"
1411 ceph osd blacklist rm $bl
1412 ceph osd blacklist ls | expect_false grep $bl
1413
1414 bl=192.168.0.1
1415 # test without nonce, invalid nonce
1416 ceph osd blacklist add $bl
1417 ceph osd blacklist ls | grep $bl
1418 ceph osd blacklist rm $bl
1419 ceph osd blacklist ls | expect_false grep $expect_false bl
1420 expect_false "ceph osd blacklist $bl/-1"
1421 expect_false "ceph osd blacklist $bl/foo"
1422
1423 # test with wrong address
1424 expect_false "ceph osd blacklist 1234.56.78.90/100"
1425
1426 # Test `clear`
1427 ceph osd blacklist add $bl
1428 ceph osd blacklist ls | grep $bl
1429 ceph osd blacklist clear
1430 ceph osd blacklist ls | expect_false grep $bl
1431
1432 #
1433 # osd crush
1434 #
1435 ceph osd crush reweight-all
1436 ceph osd crush tunables legacy
1437 ceph osd crush show-tunables | grep argonaut
1438 ceph osd crush tunables bobtail
1439 ceph osd crush show-tunables | grep bobtail
1440 ceph osd crush tunables firefly
1441 ceph osd crush show-tunables | grep firefly
1442
1443 ceph osd crush set-tunable straw_calc_version 0
1444 ceph osd crush get-tunable straw_calc_version | grep 0
1445 ceph osd crush set-tunable straw_calc_version 1
1446 ceph osd crush get-tunable straw_calc_version | grep 1
1447
1448 #
1449 # require-min-compat-client
1450 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1451 ceph osd set-require-min-compat-client luminous
1452 ceph osd dump | grep 'require_min_compat_client luminous'
1453
1454 #
1455 # osd scrub
1456 #
1457 # how do I tell when these are done?
1458 ceph osd scrub 0
1459 ceph osd deep-scrub 0
1460 ceph osd repair 0
1461
1462 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1463 do
1464 ceph osd set $f
1465 ceph osd unset $f
1466 done
1467 expect_false ceph osd unset sortbitwise # cannot be unset
1468 expect_false ceph osd set bogus
1469 expect_false ceph osd unset bogus
1470 ceph osd require-osd-release luminous
1471 # can't lower (or use new command for anything but jewel)
1472 expect_false ceph osd require-osd-release jewel
1473 # these are no-ops but should succeed.
1474 ceph osd set require_jewel_osds
1475 ceph osd set require_kraken_osds
1476 expect_false ceph osd unset require_jewel_osds
1477
1478 ceph osd set noup
1479 ceph osd down 0
1480 ceph osd dump | grep 'osd.0 down'
1481 ceph osd unset noup
1482 max_run=1000
1483 for ((i=0; i < $max_run; i++)); do
1484 if ! ceph osd dump | grep 'osd.0 up'; then
1485 echo "waiting for osd.0 to come back up ($i/$max_run)"
1486 sleep 1
1487 else
1488 break
1489 fi
1490 done
1491 ceph osd dump | grep 'osd.0 up'
1492
1493 ceph osd dump | grep 'osd.0 up'
1494 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1495 ceph osd find 1
1496 ceph osd find osd.1
1497 expect_false ceph osd find osd.xyz
1498 expect_false ceph osd find xyz
1499 expect_false ceph osd find 0.1
1500 ceph --format plain osd find 1 # falls back to json-pretty
1501 if [ `uname` == Linux ]; then
1502 ceph osd metadata 1 | grep 'distro'
1503 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1504 fi
1505 ceph osd out 0
1506 ceph osd dump | grep 'osd.0.*out'
1507 ceph osd in 0
1508 ceph osd dump | grep 'osd.0.*in'
1509 ceph osd find 0
1510
1511 ceph osd add-nodown 0 1
1512 ceph health detail | grep 'NODOWN'
1513 ceph osd rm-nodown 0 1
1514 ! ceph health detail | grep 'NODOWN'
1515
1516 ceph osd out 0 # so we can mark it as noin later
1517 ceph osd add-noin 0
1518 ceph health detail | grep 'NOIN'
1519 ceph osd rm-noin 0
1520 ! ceph health detail | grep 'NOIN'
1521 ceph osd in 0
1522
1523 ceph osd add-noout 0
1524 ceph health detail | grep 'NOOUT'
1525 ceph osd rm-noout 0
1526 ! ceph health detail | grep 'NOOUT'
1527
1528 # test osd id parse
1529 expect_false ceph osd add-noup 797er
1530 expect_false ceph osd add-nodown u9uwer
1531 expect_false ceph osd add-noin 78~15
1532 expect_false ceph osd add-noout 0 all 1
1533
1534 expect_false ceph osd rm-noup 1234567
1535 expect_false ceph osd rm-nodown fsadf7
1536 expect_false ceph osd rm-noin 0 1 any
1537 expect_false ceph osd rm-noout 790-fd
1538
1539 ids=`ceph osd ls-tree default`
1540 for osd in $ids
1541 do
1542 ceph osd add-nodown $osd
1543 ceph osd add-noout $osd
1544 done
1545 ceph -s | grep 'NODOWN'
1546 ceph -s | grep 'NOOUT'
1547 ceph osd rm-nodown any
1548 ceph osd rm-noout all
1549 ! ceph -s | grep 'NODOWN'
1550 ! ceph -s | grep 'NOOUT'
1551
1552 # make sure mark out preserves weight
1553 ceph osd reweight osd.0 .5
1554 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1555 ceph osd out 0
1556 ceph osd in 0
1557 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1558
1559 ceph osd getmap -o $f
1560 [ -s $f ]
1561 rm $f
1562 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1563 [ "$save" -gt 0 ]
1564 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1565 ceph osd setmaxosd 10
1566 ceph osd getmaxosd | grep 'max_osd = 10'
1567 ceph osd setmaxosd $save
1568 ceph osd getmaxosd | grep "max_osd = $save"
1569
1570 for id in `ceph osd ls` ; do
1571 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1572 done
1573
1574 ceph osd rm 0 2>&1 | grep 'EBUSY'
1575
1576 local old_osds=$(echo $(ceph osd ls))
1577 id=`ceph osd create`
1578 ceph osd find $id
1579 ceph osd lost $id --yes-i-really-mean-it
1580 expect_false ceph osd setmaxosd $id
1581 local new_osds=$(echo $(ceph osd ls))
1582 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1583 ceph osd rm $id
1584 done
1585
1586 uuid=`uuidgen`
1587 id=`ceph osd create $uuid`
1588 id2=`ceph osd create $uuid`
1589 [ "$id" = "$id2" ]
1590 ceph osd rm $id
1591
1592 ceph --help osd
1593
1594 # reset max_osd.
1595 ceph osd setmaxosd $id
1596 ceph osd getmaxosd | grep "max_osd = $save"
1597 local max_osd=$save
1598
1599 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1600 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1601
1602 id=`ceph osd create $uuid $max_osd`
1603 [ "$id" = "$max_osd" ]
1604 ceph osd find $id
1605 max_osd=$((max_osd + 1))
1606 ceph osd getmaxosd | grep "max_osd = $max_osd"
1607
1608 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1609 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
1610 id2=`ceph osd create $uuid`
1611 [ "$id" = "$id2" ]
1612 id2=`ceph osd create $uuid $id`
1613 [ "$id" = "$id2" ]
1614
1615 uuid=`uuidgen`
1616 local gap_start=$max_osd
1617 id=`ceph osd create $uuid $((gap_start + 100))`
1618 [ "$id" = "$((gap_start + 100))" ]
1619 max_osd=$((id + 1))
1620 ceph osd getmaxosd | grep "max_osd = $max_osd"
1621
1622 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
1623
1624 #
1625 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1626 # is repeated and consumes two osd id, not just one.
1627 #
1628 local next_osd=$gap_start
1629 id=`ceph osd create $(uuidgen)`
1630 [ "$id" = "$next_osd" ]
1631
1632 next_osd=$((id + 1))
1633 id=`ceph osd create $(uuidgen) $next_osd`
1634 [ "$id" = "$next_osd" ]
1635
1636 local new_osds=$(echo $(ceph osd ls))
1637 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1638 [ $id -ge $save ]
1639 ceph osd rm $id
1640 done
1641 ceph osd setmaxosd $save
1642
1643 ceph osd ls
1644 ceph osd pool create data 10
1645 ceph osd pool application enable data rados
1646 ceph osd lspools | grep data
1647 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1648 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1649 ceph osd pool delete data data --yes-i-really-really-mean-it
1650
1651 ceph osd pause
1652 ceph osd dump | grep 'flags.*pauserd,pausewr'
1653 ceph osd unpause
1654
1655 ceph osd tree
1656 ceph osd tree up
1657 ceph osd tree down
1658 ceph osd tree in
1659 ceph osd tree out
1660 ceph osd tree destroyed
1661 ceph osd tree up in
1662 ceph osd tree up out
1663 ceph osd tree down in
1664 ceph osd tree down out
1665 ceph osd tree out down
1666 expect_false ceph osd tree up down
1667 expect_false ceph osd tree up destroyed
1668 expect_false ceph osd tree down destroyed
1669 expect_false ceph osd tree up down destroyed
1670 expect_false ceph osd tree in out
1671 expect_false ceph osd tree up foo
1672
1673 ceph osd metadata
1674 ceph osd count-metadata os
1675 ceph osd versions
1676
1677 ceph osd perf
1678 ceph osd blocked-by
1679
1680 ceph osd stat | grep up,
1681 }
1682
1683 function test_mon_crush()
1684 {
1685 f=$TEMP_DIR/map.$$
1686 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1687 [ -s $f ]
1688 [ "$epoch" -gt 1 ]
1689 nextepoch=$(( $epoch + 1 ))
1690 echo epoch $epoch nextepoch $nextepoch
1691 rm -f $f.epoch
1692 expect_false ceph osd setcrushmap $nextepoch -i $f
1693 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1694 echo gotepoch $gotepoch
1695 [ "$gotepoch" -eq "$nextepoch" ]
1696 # should be idempotent
1697 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1698 echo epoch $gotepoch
1699 [ "$gotepoch" -eq "$nextepoch" ]
1700 rm $f
1701 }
1702
1703 function test_mon_osd_pool()
1704 {
1705 #
1706 # osd pool
1707 #
1708 ceph osd pool create data 10
1709 ceph osd pool application enable data rados
1710 ceph osd pool mksnap data datasnap
1711 rados -p data lssnap | grep datasnap
1712 ceph osd pool rmsnap data datasnap
1713 expect_false ceph osd pool rmsnap pool_fake snapshot
1714 ceph osd pool delete data data --yes-i-really-really-mean-it
1715
1716 ceph osd pool create data2 10
1717 ceph osd pool application enable data2 rados
1718 ceph osd pool rename data2 data3
1719 ceph osd lspools | grep data3
1720 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1721
1722 ceph osd pool create replicated 12 12 replicated
1723 ceph osd pool create replicated 12 12 replicated
1724 ceph osd pool create replicated 12 12 # default is replicated
1725 ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
1726 ceph osd pool application enable replicated rados
1727 # should fail because the type is not the same
1728 expect_false ceph osd pool create replicated 12 12 erasure
1729 ceph osd lspools | grep replicated
1730 ceph osd pool create ec_test 1 1 erasure
1731 ceph osd pool application enable ec_test rados
1732 set +e
1733 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1734 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
1735 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
1736 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
1737 else
1738 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1739 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1740 fi
1741 set -e
1742 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1743 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1744 }
1745
1746 function test_mon_osd_pool_quota()
1747 {
1748 #
1749 # test osd pool set/get quota
1750 #
1751
1752 # create tmp pool
1753 ceph osd pool create tmp-quota-pool 36
1754 ceph osd pool application enable tmp-quota-pool rados
1755 #
1756 # set erroneous quotas
1757 #
1758 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1759 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1760 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1761 #
1762 # set valid quotas
1763 #
1764 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1765 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1766 #
1767 # get quotas in json-pretty format
1768 #
1769 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1770 grep '"quota_max_objects":.*10000000'
1771 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1772 grep '"quota_max_bytes":.*10'
1773 #
1774 # get quotas
1775 #
1776 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
1777 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10M objects'
1778 #
1779 # set valid quotas with unit prefix
1780 #
1781 ceph osd pool set-quota tmp-quota-pool max_bytes 10K
1782 #
1783 # get quotas
1784 #
1785 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10Ki'
1786 #
1787 # set valid quotas with unit prefix
1788 #
1789 ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
1790 #
1791 # get quotas
1792 #
1793 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10Ki'
1794 #
1795 #
1796 # reset pool quotas
1797 #
1798 ceph osd pool set-quota tmp-quota-pool max_bytes 0
1799 ceph osd pool set-quota tmp-quota-pool max_objects 0
1800 #
1801 # test N/A quotas
1802 #
1803 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
1804 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
1805 #
1806 # cleanup tmp pool
1807 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
1808 }
1809
1810 function test_mon_pg()
1811 {
1812 # Make sure we start healthy.
1813 wait_for_health_ok
1814
1815 ceph pg debug unfound_objects_exist
1816 ceph pg debug degraded_pgs_exist
1817 ceph pg deep-scrub 1.0
1818 ceph pg dump
1819 ceph pg dump pgs_brief --format=json
1820 ceph pg dump pgs --format=json
1821 ceph pg dump pools --format=json
1822 ceph pg dump osds --format=json
1823 ceph pg dump sum --format=json
1824 ceph pg dump all --format=json
1825 ceph pg dump pgs_brief osds --format=json
1826 ceph pg dump pools osds pgs_brief --format=json
1827 ceph pg dump_json
1828 ceph pg dump_pools_json
1829 ceph pg dump_stuck inactive
1830 ceph pg dump_stuck unclean
1831 ceph pg dump_stuck stale
1832 ceph pg dump_stuck undersized
1833 ceph pg dump_stuck degraded
1834 ceph pg ls
1835 ceph pg ls 1
1836 ceph pg ls stale
1837 expect_false ceph pg ls scrubq
1838 ceph pg ls active stale repair recovering
1839 ceph pg ls 1 active
1840 ceph pg ls 1 active stale
1841 ceph pg ls-by-primary osd.0
1842 ceph pg ls-by-primary osd.0 1
1843 ceph pg ls-by-primary osd.0 active
1844 ceph pg ls-by-primary osd.0 active stale
1845 ceph pg ls-by-primary osd.0 1 active stale
1846 ceph pg ls-by-osd osd.0
1847 ceph pg ls-by-osd osd.0 1
1848 ceph pg ls-by-osd osd.0 active
1849 ceph pg ls-by-osd osd.0 active stale
1850 ceph pg ls-by-osd osd.0 1 active stale
1851 ceph pg ls-by-pool rbd
1852 ceph pg ls-by-pool rbd active stale
1853 # can't test this...
1854 # ceph pg force_create_pg
1855 ceph pg getmap -o $TEMP_DIR/map.$$
1856 [ -s $TEMP_DIR/map.$$ ]
1857 ceph pg map 1.0 | grep acting
1858 ceph pg repair 1.0
1859 ceph pg scrub 1.0
1860
1861 ceph osd set-full-ratio .962
1862 ceph osd dump | grep '^full_ratio 0.962'
1863 ceph osd set-backfillfull-ratio .912
1864 ceph osd dump | grep '^backfillfull_ratio 0.912'
1865 ceph osd set-nearfull-ratio .892
1866 ceph osd dump | grep '^nearfull_ratio 0.892'
1867
1868 # Check health status
1869 ceph osd set-nearfull-ratio .913
1870 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1871 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
1872 ceph osd set-nearfull-ratio .892
1873 ceph osd set-backfillfull-ratio .963
1874 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1875 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
1876 ceph osd set-backfillfull-ratio .912
1877
1878 # Check injected full results
1879 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
1880 wait_for_health "OSD_NEARFULL"
1881 ceph health detail | grep "osd.0 is near full"
1882 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1883 wait_for_health_ok
1884
1885 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
1886 wait_for_health "OSD_BACKFILLFULL"
1887 ceph health detail | grep "osd.1 is backfill full"
1888 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
1889 wait_for_health_ok
1890
1891 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
1892 # failsafe and full are the same as far as the monitor is concerned
1893 wait_for_health "OSD_FULL"
1894 ceph health detail | grep "osd.2 is full"
1895 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
1896 wait_for_health_ok
1897
1898 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
1899 wait_for_health "OSD_FULL"
1900 ceph health detail | grep "osd.0 is full"
1901 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1902 wait_for_health_ok
1903
1904 ceph pg stat | grep 'pgs:'
1905 ceph pg 1.0 query
1906 ceph tell 1.0 query
1907 ceph quorum enter
1908 ceph quorum_status
1909 ceph report | grep osd_stats
1910 ceph status
1911 ceph -s
1912
1913 #
1914 # tell osd version
1915 #
1916 ceph tell osd.0 version
1917 expect_false ceph tell osd.9999 version
1918 expect_false ceph tell osd.foo version
1919
1920 # back to pg stuff
1921
1922 ceph tell osd.0 dump_pg_recovery_stats | grep Started
1923
1924 ceph osd reweight 0 0.9
1925 expect_false ceph osd reweight 0 -1
1926 ceph osd reweight osd.0 1
1927
1928 ceph osd primary-affinity osd.0 .9
1929 expect_false ceph osd primary-affinity osd.0 -2
1930 expect_false ceph osd primary-affinity osd.9999 .5
1931 ceph osd primary-affinity osd.0 1
1932
1933 ceph osd pool set rbd size 2
1934 ceph osd pg-temp 1.0 0 1
1935 ceph osd pg-temp 1.0 osd.1 osd.0
1936 expect_false ceph osd pg-temp 1.0 0 1 2
1937 expect_false ceph osd pg-temp asdf qwer
1938 expect_false ceph osd pg-temp 1.0 asdf
1939 expect_false ceph osd pg-temp 1.0
1940
1941 # don't test ceph osd primary-temp for now
1942 }
1943
1944 function test_mon_osd_pool_set()
1945 {
1946 TEST_POOL_GETSET=pool_getset
1947 ceph osd pool create $TEST_POOL_GETSET 1
1948 ceph osd pool application enable $TEST_POOL_GETSET rados
1949 wait_for_clean
1950 ceph osd pool get $TEST_POOL_GETSET all
1951
1952 for s in pg_num pgp_num size min_size crush_rule; do
1953 ceph osd pool get $TEST_POOL_GETSET $s
1954 done
1955
1956 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
1957 (( new_size = old_size + 1 ))
1958 ceph osd pool set $TEST_POOL_GETSET size $new_size
1959 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
1960 ceph osd pool set $TEST_POOL_GETSET size $old_size
1961
1962 ceph osd pool create pool_erasure 1 1 erasure
1963 ceph osd pool application enable pool_erasure rados
1964 wait_for_clean
1965 set +e
1966 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
1967 check_response 'not change the size'
1968 set -e
1969 ceph osd pool get pool_erasure erasure_code_profile
1970
1971 auid=5555
1972 ceph osd pool set $TEST_POOL_GETSET auid $auid
1973 ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
1974 ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
1975 ceph osd pool set $TEST_POOL_GETSET auid 0
1976
1977 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
1978 ceph osd pool set $TEST_POOL_GETSET $flag false
1979 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1980 ceph osd pool set $TEST_POOL_GETSET $flag true
1981 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1982 ceph osd pool set $TEST_POOL_GETSET $flag 1
1983 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1984 ceph osd pool set $TEST_POOL_GETSET $flag 0
1985 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1986 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
1987 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
1988 done
1989
1990 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1991 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
1992 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
1993 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
1994 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1995
1996 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1997 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
1998 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
1999 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
2000 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
2001
2002 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2003 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
2004 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
2005 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
2006 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
2007
2008 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2009 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
2010 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
2011 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
2012 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
2013
2014 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2015 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
2016 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
2017 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
2018 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
2019
2020 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2021 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
2022 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
2023 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
2024 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
2025
2026 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
2027 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
2028 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2029 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
2030 ceph osd pool set $TEST_POOL_GETSET pg_num 10
2031 wait_for_clean
2032 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
2033
2034 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
2035 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
2036 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2037 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
2038 wait_for_clean
2039 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
2040 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32 + 1))
2041 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2042
2043 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
2044 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
2045 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
2046 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
2047 ceph osd pool set $TEST_POOL_GETSET size 2
2048 wait_for_clean
2049 ceph osd pool set $TEST_POOL_GETSET min_size 2
2050
2051 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2052 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2053
2054 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2055 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2056
2057 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
2058
2059 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2060 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2061 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2062 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2063 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2064
2065 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2066 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2067 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2068 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2069 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2070
2071 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2072 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2073 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2074 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2075 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2076 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2077 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2078
2079 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2080 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2081 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2082 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2083 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2084
2085 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2086 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2087 ceph osd pool set $TEST_POOL_GETSET $size 100
2088 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2089 ceph osd pool set $TEST_POOL_GETSET $size 0
2090 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2091 done
2092
2093 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2094 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2095 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2096 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2097
2098 }
2099
2100 function test_mon_osd_tiered_pool_set()
2101 {
2102 # this is really a tier pool
2103 ceph osd pool create real-tier 2
2104 ceph osd tier add rbd real-tier
2105
2106 ceph osd pool set real-tier hit_set_type explicit_hash
2107 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2108 ceph osd pool set real-tier hit_set_type explicit_object
2109 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2110 ceph osd pool set real-tier hit_set_type bloom
2111 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2112 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2113 ceph osd pool set real-tier hit_set_period 123
2114 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2115 ceph osd pool set real-tier hit_set_count 12
2116 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2117 ceph osd pool set real-tier hit_set_fpp .01
2118 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2119
2120 ceph osd pool set real-tier target_max_objects 123
2121 ceph osd pool get real-tier target_max_objects | \
2122 grep 'target_max_objects:[ \t]\+123'
2123 ceph osd pool set real-tier target_max_bytes 123456
2124 ceph osd pool get real-tier target_max_bytes | \
2125 grep 'target_max_bytes:[ \t]\+123456'
2126 ceph osd pool set real-tier cache_target_dirty_ratio .123
2127 ceph osd pool get real-tier cache_target_dirty_ratio | \
2128 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2129 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2130 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2131 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2132 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2133 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2134 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2135 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2136 ceph osd pool set real-tier cache_target_full_ratio .123
2137 ceph osd pool get real-tier cache_target_full_ratio | \
2138 grep 'cache_target_full_ratio:[ \t]\+0.123'
2139 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2140 ceph osd pool set real-tier cache_target_full_ratio 1.0
2141 ceph osd pool set real-tier cache_target_full_ratio 0
2142 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2143 ceph osd pool set real-tier cache_min_flush_age 123
2144 ceph osd pool get real-tier cache_min_flush_age | \
2145 grep 'cache_min_flush_age:[ \t]\+123'
2146 ceph osd pool set real-tier cache_min_evict_age 234
2147 ceph osd pool get real-tier cache_min_evict_age | \
2148 grep 'cache_min_evict_age:[ \t]\+234'
2149
2150 # this is not a tier pool
2151 ceph osd pool create fake-tier 2
2152 ceph osd pool application enable fake-tier rados
2153 wait_for_clean
2154
2155 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2156 expect_false ceph osd pool get fake-tier hit_set_type
2157 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2158 expect_false ceph osd pool get fake-tier hit_set_type
2159 expect_false ceph osd pool set fake-tier hit_set_type bloom
2160 expect_false ceph osd pool get fake-tier hit_set_type
2161 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2162 expect_false ceph osd pool set fake-tier hit_set_period 123
2163 expect_false ceph osd pool get fake-tier hit_set_period
2164 expect_false ceph osd pool set fake-tier hit_set_count 12
2165 expect_false ceph osd pool get fake-tier hit_set_count
2166 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2167 expect_false ceph osd pool get fake-tier hit_set_fpp
2168
2169 expect_false ceph osd pool set fake-tier target_max_objects 123
2170 expect_false ceph osd pool get fake-tier target_max_objects
2171 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2172 expect_false ceph osd pool get fake-tier target_max_bytes
2173 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2174 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2175 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2176 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2177 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2178 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2179 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2180 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2181 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2182 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2183 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2184 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2185 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2186 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2187 expect_false ceph osd pool get fake-tier cache_min_flush_age
2188 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2189 expect_false ceph osd pool get fake-tier cache_min_evict_age
2190
2191 ceph osd tier remove rbd real-tier
2192 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2193 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2194 }
2195
2196 function test_mon_osd_erasure_code()
2197 {
2198
2199 ceph osd erasure-code-profile set fooprofile a=b c=d
2200 ceph osd erasure-code-profile set fooprofile a=b c=d
2201 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2202 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2203 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2204 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2205 # ruleset-foo will work for luminous only
2206 ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
2207 ceph osd erasure-code-profile set barprofile crush-failure-domain=host
2208 # clean up
2209 ceph osd erasure-code-profile rm fooprofile
2210 ceph osd erasure-code-profile rm barprofile
2211 }
2212
2213 function test_mon_osd_misc()
2214 {
2215 set +e
2216
2217 # expect error about missing 'pool' argument
2218 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2219
2220 # expect error about unused argument foo
2221 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2222
2223 # expect "not in range" for invalid full ratio
2224 ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
2225
2226 # expect "not in range" for invalid overload percentage
2227 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2228
2229 set -e
2230
2231 ceph osd reweight-by-utilization 110
2232 ceph osd reweight-by-utilization 110 .5
2233 expect_false ceph osd reweight-by-utilization 110 0
2234 expect_false ceph osd reweight-by-utilization 110 -0.1
2235 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2236 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2237 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2238 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2239 ceph osd reweight-by-pg 110
2240 ceph osd test-reweight-by-pg 110 .5
2241 ceph osd reweight-by-pg 110 rbd
2242 ceph osd reweight-by-pg 110 .5 rbd
2243 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2244 }
2245
2246 function test_mon_heap_profiler()
2247 {
2248 do_test=1
2249 set +e
2250 # expect 'heap' commands to be correctly parsed
2251 ceph heap stats 2>$TMPFILE
2252 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2253 echo "tcmalloc not enabled; skip heap profiler test"
2254 do_test=0
2255 fi
2256 set -e
2257
2258 [[ $do_test -eq 0 ]] && return 0
2259
2260 ceph heap start_profiler
2261 ceph heap dump
2262 ceph heap stop_profiler
2263 ceph heap release
2264 }
2265
2266 function test_admin_heap_profiler()
2267 {
2268 do_test=1
2269 set +e
2270 # expect 'heap' commands to be correctly parsed
2271 ceph heap stats 2>$TMPFILE
2272 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2273 echo "tcmalloc not enabled; skip heap profiler test"
2274 do_test=0
2275 fi
2276 set -e
2277
2278 [[ $do_test -eq 0 ]] && return 0
2279
2280 local admin_socket=$(get_admin_socket osd.0)
2281
2282 $SUDO ceph --admin-daemon $admin_socket heap start_profiler
2283 $SUDO ceph --admin-daemon $admin_socket heap dump
2284 $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
2285 $SUDO ceph --admin-daemon $admin_socket heap release
2286 }
2287
2288 function test_osd_bench()
2289 {
2290 # test osd bench limits
2291 # As we should not rely on defaults (as they may change over time),
2292 # lets inject some values and perform some simple tests
2293 # max iops: 10 # 100 IOPS
2294 # max throughput: 10485760 # 10MB/s
2295 # max block size: 2097152 # 2MB
2296 # duration: 10 # 10 seconds
2297
2298 local args="\
2299 --osd-bench-duration 10 \
2300 --osd-bench-max-block-size 2097152 \
2301 --osd-bench-large-size-max-throughput 10485760 \
2302 --osd-bench-small-size-max-iops 10"
2303 ceph tell osd.0 injectargs ${args## }
2304
2305 # anything with a bs larger than 2097152 must fail
2306 expect_false ceph tell osd.0 bench 1 2097153
2307 # but using 'osd_bench_max_bs' must succeed
2308 ceph tell osd.0 bench 1 2097152
2309
2310 # we assume 1MB as a large bs; anything lower is a small bs
2311 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2312 # max count: 409600 (bytes)
2313
2314 # more than max count must not be allowed
2315 expect_false ceph tell osd.0 bench 409601 4096
2316 # but 409600 must be succeed
2317 ceph tell osd.0 bench 409600 4096
2318
2319 # for a large bs, we are limited by throughput.
2320 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2321 # the max count will be (10MB * 10s) = 100MB
2322 # max count: 104857600 (bytes)
2323
2324 # more than max count must not be allowed
2325 expect_false ceph tell osd.0 bench 104857601 2097152
2326 # up to max count must be allowed
2327 ceph tell osd.0 bench 104857600 2097152
2328 }
2329
2330 function test_osd_negative_filestore_merge_threshold()
2331 {
2332 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2333 expect_config_value "osd.0" "filestore_merge_threshold" -1
2334 }
2335
2336 function test_mon_tell()
2337 {
2338 ceph tell mon.a version
2339 ceph tell mon.b version
2340 expect_false ceph tell mon.foo version
2341
2342 sleep 1
2343
2344 ceph_watch_start debug audit
2345 ceph tell mon.a version
2346 ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2347
2348 ceph_watch_start debug audit
2349 ceph tell mon.b version
2350 ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
2351 }
2352
2353 function test_mon_ping()
2354 {
2355 ceph ping mon.a
2356 ceph ping mon.b
2357 expect_false ceph ping mon.foo
2358
2359 ceph ping mon.\*
2360 }
2361
2362 function test_mon_deprecated_commands()
2363 {
2364 # current DEPRECATED commands are:
2365 # ceph compact
2366 # ceph scrub
2367 # ceph sync force
2368 #
2369 # Testing should be accomplished by setting
2370 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2371 # each one of these commands.
2372
2373 ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
2374 expect_false ceph tell mon.a compact 2> $TMPFILE
2375 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2376
2377 expect_false ceph tell mon.a scrub 2> $TMPFILE
2378 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2379
2380 expect_false ceph tell mon.a sync force 2> $TMPFILE
2381 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2382
2383 ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
2384 }
2385
2386 function test_mon_cephdf_commands()
2387 {
2388 # ceph df detail:
2389 # pool section:
2390 # RAW USED The near raw used per pool in raw total
2391
2392 ceph osd pool create cephdf_for_test 32 32 replicated
2393 ceph osd pool application enable cephdf_for_test rados
2394 ceph osd pool set cephdf_for_test size 2
2395
2396 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2397 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2398
2399 #wait for update
2400 for i in `seq 1 10`; do
2401 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2402 sleep 1
2403 done
2404 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2405 # to sync mon with osd
2406 flush_pg_stats
2407 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2408 cal_raw_used_size=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2409 raw_used_size=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
2410
2411 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2412 rm ./cephdf_for_test
2413
2414 expect_false test $cal_raw_used_size != $raw_used_size
2415 }
2416
2417 function test_mon_pool_application()
2418 {
2419 ceph osd pool create app_for_test 10
2420
2421 ceph osd pool application enable app_for_test rbd
2422 expect_false ceph osd pool application enable app_for_test rgw
2423 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2424 ceph osd pool ls detail | grep "application rbd,rgw"
2425 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2426
2427 expect_false ceph osd pool application set app_for_test cephfs key value
2428 ceph osd pool application set app_for_test rbd key1 value1
2429 ceph osd pool application set app_for_test rbd key2 value2
2430 ceph osd pool application set app_for_test rgw key1 value1
2431 ceph osd pool application get app_for_test rbd key1 | grep 'value1'
2432 ceph osd pool application get app_for_test rbd key2 | grep 'value2'
2433 ceph osd pool application get app_for_test rgw key1 | grep 'value1'
2434
2435 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2436
2437 ceph osd pool application rm app_for_test rgw key1
2438 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2439 ceph osd pool application rm app_for_test rbd key2
2440 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2441 ceph osd pool application rm app_for_test rbd key1
2442 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2443 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2444
2445 expect_false ceph osd pool application disable app_for_test rgw
2446 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2447 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2448 ceph osd pool ls detail | grep "application rbd"
2449 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2450
2451 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2452 ceph osd pool ls detail | grep -v "application "
2453 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2454
2455 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2456 }
2457
2458 function test_mon_tell_help_command()
2459 {
2460 ceph tell mon.a help
2461
2462 # wrong target
2463 expect_false ceph tell mon.zzz help
2464 }
2465
2466 function test_mon_stdin_stdout()
2467 {
2468 echo foo | ceph config-key set test_key -i -
2469 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2470 }
2471
2472 function test_osd_tell_help_command()
2473 {
2474 ceph tell osd.1 help
2475 expect_false ceph tell osd.100 help
2476 }
2477
2478 function test_osd_compact()
2479 {
2480 ceph tell osd.1 compact
2481 $SUDO ceph daemon osd.1 compact
2482 }
2483
2484 function test_mds_tell_help_command()
2485 {
2486 local FS_NAME=cephfs
2487 if ! mds_exists ; then
2488 echo "Skipping test, no MDS found"
2489 return
2490 fi
2491
2492 remove_all_fs
2493 ceph osd pool create fs_data 10
2494 ceph osd pool create fs_metadata 10
2495 ceph fs new $FS_NAME fs_metadata fs_data
2496 wait_mds_active $FS_NAME
2497
2498
2499 ceph tell mds.a help
2500 expect_false ceph tell mds.z help
2501
2502 remove_all_fs
2503 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2504 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2505 }
2506
2507 function test_mgr_tell()
2508 {
2509 ceph tell mgr help
2510 #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
2511 ceph tell mgr osd status
2512 }
2513
2514 #
2515 # New tests should be added to the TESTS array below
2516 #
2517 # Individual tests may be run using the '-t <testname>' argument
2518 # The user can specify '-t <testname>' as many times as she wants
2519 #
2520 # Tests will be run in order presented in the TESTS array, or in
2521 # the order specified by the '-t <testname>' options.
2522 #
2523 # '-l' will list all the available test names
2524 # '-h' will show usage
2525 #
2526 # The test maintains backward compatibility: not specifying arguments
2527 # will run all tests following the order they appear in the TESTS array.
2528 #
2529
2530 set +x
2531 MON_TESTS+=" mon_injectargs"
2532 MON_TESTS+=" mon_injectargs_SI"
2533 for i in `seq 9`; do
2534 MON_TESTS+=" tiering_$i";
2535 done
2536 MON_TESTS+=" auth"
2537 MON_TESTS+=" auth_profiles"
2538 MON_TESTS+=" mon_misc"
2539 MON_TESTS+=" mon_mon"
2540 MON_TESTS+=" mon_osd"
2541 MON_TESTS+=" mon_config_key"
2542 MON_TESTS+=" mon_crush"
2543 MON_TESTS+=" mon_osd_create_destroy"
2544 MON_TESTS+=" mon_osd_pool"
2545 MON_TESTS+=" mon_osd_pool_quota"
2546 MON_TESTS+=" mon_pg"
2547 MON_TESTS+=" mon_osd_pool_set"
2548 MON_TESTS+=" mon_osd_tiered_pool_set"
2549 MON_TESTS+=" mon_osd_erasure_code"
2550 MON_TESTS+=" mon_osd_misc"
2551 MON_TESTS+=" mon_heap_profiler"
2552 MON_TESTS+=" mon_tell"
2553 MON_TESTS+=" mon_ping"
2554 MON_TESTS+=" mon_deprecated_commands"
2555 MON_TESTS+=" mon_caps"
2556 MON_TESTS+=" mon_cephdf_commands"
2557 MON_TESTS+=" mon_tell_help_command"
2558 MON_TESTS+=" mon_stdin_stdout"
2559
2560 OSD_TESTS+=" osd_bench"
2561 OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2562 OSD_TESTS+=" tiering_agent"
2563 OSD_TESTS+=" admin_heap_profiler"
2564 OSD_TESTS+=" osd_tell_help_command"
2565 OSD_TESTS+=" osd_compact"
2566
2567 MDS_TESTS+=" mds_tell"
2568 MDS_TESTS+=" mon_mds"
2569 MDS_TESTS+=" mon_mds_metadata"
2570 MDS_TESTS+=" mds_tell_help_command"
2571
2572 MGR_TESTS+=" mgr_tell"
2573
2574 TESTS+=$MON_TESTS
2575 TESTS+=$OSD_TESTS
2576 TESTS+=$MDS_TESTS
2577 TESTS+=$MGR_TESTS
2578
2579 #
2580 # "main" follows
2581 #
2582
2583 function list_tests()
2584 {
2585 echo "AVAILABLE TESTS"
2586 for i in $TESTS; do
2587 echo " $i"
2588 done
2589 }
2590
2591 function usage()
2592 {
2593 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2594 }
2595
2596 tests_to_run=()
2597
2598 sanity_check=true
2599
2600 while [[ $# -gt 0 ]]; do
2601 opt=$1
2602
2603 case "$opt" in
2604 "-l" )
2605 do_list=1
2606 ;;
2607 "--asok-does-not-need-root" )
2608 SUDO=""
2609 ;;
2610 "--no-sanity-check" )
2611 sanity_check=false
2612 ;;
2613 "--test-mon" )
2614 tests_to_run+="$MON_TESTS"
2615 ;;
2616 "--test-osd" )
2617 tests_to_run+="$OSD_TESTS"
2618 ;;
2619 "--test-mds" )
2620 tests_to_run+="$MDS_TESTS"
2621 ;;
2622 "--test-mgr" )
2623 tests_to_run+="$MGR_TESTS"
2624 ;;
2625 "-t" )
2626 shift
2627 if [[ -z "$1" ]]; then
2628 echo "missing argument to '-t'"
2629 usage ;
2630 exit 1
2631 fi
2632 tests_to_run+=" $1"
2633 ;;
2634 "-h" )
2635 usage ;
2636 exit 0
2637 ;;
2638 esac
2639 shift
2640 done
2641
2642 if [[ $do_list -eq 1 ]]; then
2643 list_tests ;
2644 exit 0
2645 fi
2646
2647 ceph osd pool create rbd 10
2648
2649 if test -z "$tests_to_run" ; then
2650 tests_to_run="$TESTS"
2651 fi
2652
2653 if $sanity_check ; then
2654 wait_no_osd_down
2655 fi
2656 for i in $tests_to_run; do
2657 if $sanity_check ; then
2658 check_no_osd_down
2659 fi
2660 set -x
2661 test_${i}
2662 set +x
2663 done
2664 if $sanity_check ; then
2665 check_no_osd_down
2666 fi
2667
2668 set -x
2669
2670 echo OK