]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/cephtool/test.sh
update sources to v12.1.2
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
CommitLineData
7c673cae 1#!/bin/bash -x
31f18b77
FG
2# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3# vim: ts=8 sw=8 ft=bash smarttab
7c673cae 4
c07f9fc5 5source $(dirname $0)/../../standalone/ceph-helpers.sh
7c673cae
FG
6
7set -e
8set -o functrace
9PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
10SUDO=${SUDO:-sudo}
31f18b77 11export CEPH_DEV=1
7c673cae
FG
12
13function get_admin_socket()
14{
15 local client=$1
16
c07f9fc5 17 if test -n "$CEPH_ASOK_DIR";
7c673cae 18 then
c07f9fc5 19 echo $(get_asok_dir)/$client.asok
7c673cae
FG
20 else
21 local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
23 fi
24}
25
26function check_no_osd_down()
27{
28 ! ceph osd dump | grep ' down '
29}
30
31function wait_no_osd_down()
32{
33 max_run=300
34 for i in $(seq 1 $max_run) ; do
35 if ! check_no_osd_down ; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
37 sleep 1
38 else
39 break
40 fi
41 done
42 check_no_osd_down
43}
44
45function expect_false()
46{
47 set -x
48 if "$@"; then return 1; else return 0; fi
49}
50
51
52TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
53trap "rm -fr $TEMP_DIR" 0
54
55TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
56
57#
58# retry_eagain max cmd args ...
59#
60# retry cmd args ... if it exits on error and its output contains the
61# string EAGAIN, at most $max times
62#
63function retry_eagain()
64{
65 local max=$1
66 shift
67 local status
68 local tmpfile=$TEMP_DIR/retry_eagain.$$
69 local count
70 for count in $(seq 1 $max) ; do
71 status=0
72 "$@" > $tmpfile 2>&1 || status=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN $tmpfile ; then
75 break
76 fi
77 sleep 1
78 done
79 if test $count = $max ; then
80 echo retried with non zero exit status, $max times: "$@" >&2
81 fi
82 cat $tmpfile
83 rm $tmpfile
84 return $status
85}
86
87#
88# map_enxio_to_eagain cmd arg ...
89#
90# add EAGAIN to the output of cmd arg ... if the output contains
91# ENXIO.
92#
93function map_enxio_to_eagain()
94{
95 local status=0
96 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
97
98 "$@" > $tmpfile 2>&1 || status=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO $tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
102 fi
103 cat $tmpfile
104 rm $tmpfile
105 return $status
106}
107
108function check_response()
109{
110 expected_string=$1
111 retcode=$2
112 expected_retcode=$3
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
115 exit 1
116 fi
117
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
120 cat $TMPFILE >&2
121 exit 1
122 fi
123}
124
125function get_config_value_or_die()
126{
127 local target config_opt raw val
128
129 target=$1
130 config_opt=$2
131
132 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $? -ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
135 exit 1
136 fi
137
138 raw=`echo $raw | sed -e 's/[{} "]//g'`
139 val=`echo $raw | cut -f2 -d:`
140
141 echo "$val"
142 return 0
143}
144
145function expect_config_value()
146{
147 local target config_opt expected_val val
148 target=$1
149 config_opt=$2
150 expected_val=$3
151
152 val=$(get_config_value_or_die $target $config_opt)
153
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
156 exit 1
157 fi
158}
159
160function ceph_watch_start()
161{
162 local whatch_opt=--watch
163
164 if [ -n "$1" ]; then
165 whatch_opt=--watch-$1
c07f9fc5
FG
166 if [ -n "$2" ]; then
167 whatch_opt+=" --watch-channel $2"
168 fi
7c673cae
FG
169 fi
170
171 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
172 ceph $whatch_opt > $CEPH_WATCH_FILE &
173 CEPH_WATCH_PID=$!
174
175 # wait until the "ceph" client is connected and receiving
176 # log messages from monitor
177 for i in `seq 3`; do
178 grep -q "cluster" $CEPH_WATCH_FILE && break
179 sleep 1
180 done
181}
182
183function ceph_watch_wait()
184{
185 local regexp=$1
186 local timeout=30
187
188 if [ -n "$2" ]; then
189 timeout=$2
190 fi
191
192 for i in `seq ${timeout}`; do
193 grep -q "$regexp" $CEPH_WATCH_FILE && break
194 sleep 1
195 done
196
197 kill $CEPH_WATCH_PID
198
199 if ! grep "$regexp" $CEPH_WATCH_FILE; then
200 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
201 cat $CEPH_WATCH_FILE >&2
202 return 1
203 fi
204}
205
206function test_mon_injectargs()
207{
208 CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
209 check_response "osd_enable_op_tracker = 'false'"
210 ! grep "the.dump" $TMPFILE || return 1
211 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
212 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
213 ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
214 check_response "osd_enable_op_tracker = 'false'"
215 ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
216 check_response "osd_enable_op_tracker = 'true'"
217 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
218 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
219 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
220 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
221
224ce89b
WB
222 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' >& $TMPFILE || return 1
223 check_response "osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
224
225 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' >& $TMPFILE || return 1
226 check_response "mon_probe_timeout = '2.000000' (not observed, change may require restart)"
227
7c673cae 228 ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
224ce89b 229 check_response "mon_lease = '6.000000' (not observed, change may require restart)"
7c673cae
FG
230
231 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224ce89b
WB
232 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 >& $TMPFILE || return 1
233 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
7c673cae
FG
234}
235
236function test_mon_injectargs_SI()
237{
238 # Test SI units during injectargs and 'config set'
239 # We only aim at testing the units are parsed accordingly
240 # and don't intend to test whether the options being set
241 # actually expect SI units to be passed.
242 # Keep in mind that all integer based options (i.e., INT,
243 # LONG, U32, U64) will accept SI unit modifiers.
244 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
245 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
246 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
247 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
248 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
249 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
250 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
251 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
252 check_response "'10F': (22) Invalid argument"
253 # now test with injectargs
254 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
255 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
256 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
257 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
258 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
259 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
260 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
261 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
262 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
263}
264
265function test_tiering_agent()
266{
267 local slow=slow_eviction
268 local fast=fast_eviction
269 ceph osd pool create $slow 1 1
c07f9fc5 270 ceph osd pool application enable $slow rados
7c673cae
FG
271 ceph osd pool create $fast 1 1
272 ceph osd tier add $slow $fast
273 ceph osd tier cache-mode $fast writeback
274 ceph osd tier set-overlay $slow $fast
275 ceph osd pool set $fast hit_set_type bloom
276 rados -p $slow put obj1 /etc/group
277 ceph osd pool set $fast target_max_objects 1
278 ceph osd pool set $fast hit_set_count 1
279 ceph osd pool set $fast hit_set_period 5
280 # wait for the object to be evicted from the cache
281 local evicted
282 evicted=false
283 for i in `seq 1 300` ; do
284 if ! rados -p $fast ls | grep obj1 ; then
285 evicted=true
286 break
287 fi
288 sleep 1
289 done
290 $evicted # assert
291 # the object is proxy read and promoted to the cache
292 rados -p $slow get obj1 - >/dev/null
293 # wait for the promoted object to be evicted again
294 evicted=false
295 for i in `seq 1 300` ; do
296 if ! rados -p $fast ls | grep obj1 ; then
297 evicted=true
298 break
299 fi
300 sleep 1
301 done
302 $evicted # assert
303 ceph osd tier remove-overlay $slow
304 ceph osd tier remove $slow $fast
305 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
306 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
307}
308
31f18b77 309function test_tiering_1()
7c673cae
FG
310{
311 # tiering
312 ceph osd pool create slow 2
c07f9fc5 313 ceph osd pool application enable slow rados
7c673cae 314 ceph osd pool create slow2 2
c07f9fc5 315 ceph osd pool application enable slow2 rados
7c673cae
FG
316 ceph osd pool create cache 2
317 ceph osd pool create cache2 2
318 ceph osd tier add slow cache
319 ceph osd tier add slow cache2
320 expect_false ceph osd tier add slow2 cache
321 # test some state transitions
322 ceph osd tier cache-mode cache writeback
323 expect_false ceph osd tier cache-mode cache forward
324 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
325 expect_false ceph osd tier cache-mode cache readonly
326 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
327 expect_false ceph osd tier cache-mode cache forward
328 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
329 ceph osd tier cache-mode cache none
330 ceph osd tier cache-mode cache writeback
331 ceph osd tier cache-mode cache proxy
332 ceph osd tier cache-mode cache writeback
333 expect_false ceph osd tier cache-mode cache none
334 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
335 # test with dirty objects in the tier pool
336 # tier pool currently set to 'writeback'
337 rados -p cache put /etc/passwd /etc/passwd
31f18b77 338 flush_pg_stats
7c673cae
FG
339 # 1 dirty object in pool 'cache'
340 ceph osd tier cache-mode cache proxy
341 expect_false ceph osd tier cache-mode cache none
342 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
343 ceph osd tier cache-mode cache writeback
344 # remove object from tier pool
345 rados -p cache rm /etc/passwd
346 rados -p cache cache-flush-evict-all
31f18b77 347 flush_pg_stats
7c673cae
FG
348 # no dirty objects in pool 'cache'
349 ceph osd tier cache-mode cache proxy
350 ceph osd tier cache-mode cache none
351 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
352 TRIES=0
353 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
354 do
355 grep 'currently creating pgs' $TMPFILE
356 TRIES=$(( $TRIES + 1 ))
357 test $TRIES -ne 60
358 sleep 3
359 done
360 expect_false ceph osd pool set cache pg_num 4
361 ceph osd tier cache-mode cache none
362 ceph osd tier set-overlay slow cache
363 expect_false ceph osd tier set-overlay slow cache2
364 expect_false ceph osd tier remove slow cache
365 ceph osd tier remove-overlay slow
366 ceph osd tier set-overlay slow cache2
367 ceph osd tier remove-overlay slow
368 ceph osd tier remove slow cache
369 ceph osd tier add slow2 cache
370 expect_false ceph osd tier set-overlay slow cache
371 ceph osd tier set-overlay slow2 cache
372 ceph osd tier remove-overlay slow2
373 ceph osd tier remove slow2 cache
374 ceph osd tier remove slow cache2
375
376 # make sure a non-empty pool fails
377 rados -p cache2 put /etc/passwd /etc/passwd
378 while ! ceph df | grep cache2 | grep ' 1 ' ; do
379 echo waiting for pg stats to flush
380 sleep 2
381 done
382 expect_false ceph osd tier add slow cache2
383 ceph osd tier add slow cache2 --force-nonempty
384 ceph osd tier remove slow cache2
385
386 ceph osd pool ls | grep cache2
387 ceph osd pool ls -f json-pretty | grep cache2
388 ceph osd pool ls detail | grep cache2
389 ceph osd pool ls detail -f json-pretty | grep cache2
390
31f18b77
FG
391 ceph osd pool delete slow slow --yes-i-really-really-mean-it
392 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
7c673cae
FG
393 ceph osd pool delete cache cache --yes-i-really-really-mean-it
394 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
31f18b77 395}
7c673cae 396
31f18b77
FG
397function test_tiering_2()
398{
7c673cae
FG
399 # make sure we can't clobber snapshot state
400 ceph osd pool create snap_base 2
c07f9fc5 401 ceph osd pool application enable snap_base rados
7c673cae
FG
402 ceph osd pool create snap_cache 2
403 ceph osd pool mksnap snap_cache snapname
404 expect_false ceph osd tier add snap_base snap_cache
405 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
406 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
31f18b77 407}
7c673cae 408
31f18b77
FG
409function test_tiering_3()
410{
7c673cae
FG
411 # make sure we can't create snapshot on tier
412 ceph osd pool create basex 2
c07f9fc5 413 ceph osd pool application enable basex rados
7c673cae
FG
414 ceph osd pool create cachex 2
415 ceph osd tier add basex cachex
416 expect_false ceph osd pool mksnap cache snapname
417 ceph osd tier remove basex cachex
418 ceph osd pool delete basex basex --yes-i-really-really-mean-it
419 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
31f18b77 420}
7c673cae 421
31f18b77
FG
422function test_tiering_4()
423{
7c673cae
FG
424 # make sure we can't create an ec pool tier
425 ceph osd pool create eccache 2 2 erasure
426 expect_false ceph osd set-require-min-compat-client bobtail
427 ceph osd pool create repbase 2
c07f9fc5 428 ceph osd pool application enable repbase rados
7c673cae
FG
429 expect_false ceph osd tier add repbase eccache
430 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
431 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
31f18b77 432}
7c673cae 433
31f18b77
FG
434function test_tiering_5()
435{
7c673cae 436 # convenient add-cache command
31f18b77 437 ceph osd pool create slow 2
c07f9fc5 438 ceph osd pool application enable slow rados
7c673cae
FG
439 ceph osd pool create cache3 2
440 ceph osd tier add-cache slow cache3 1024000
441 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
442 ceph osd tier remove slow cache3 2> $TMPFILE || true
443 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
444 ceph osd tier remove-overlay slow
445 ceph osd tier remove slow cache3
446 ceph osd pool ls | grep cache3
447 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
448 ! ceph osd pool ls | grep cache3 || exit 1
7c673cae 449 ceph osd pool delete slow slow --yes-i-really-really-mean-it
31f18b77 450}
7c673cae 451
31f18b77
FG
452function test_tiering_6()
453{
7c673cae
FG
454 # check add-cache whether work
455 ceph osd pool create datapool 2
c07f9fc5 456 ceph osd pool application enable datapool rados
7c673cae
FG
457 ceph osd pool create cachepool 2
458 ceph osd tier add-cache datapool cachepool 1024000
459 ceph osd tier cache-mode cachepool writeback
460 rados -p datapool put object /etc/passwd
461 rados -p cachepool stat object
462 rados -p cachepool cache-flush object
463 rados -p datapool stat object
464 ceph osd tier remove-overlay datapool
465 ceph osd tier remove datapool cachepool
466 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
467 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
31f18b77 468}
7c673cae 469
31f18b77
FG
470function test_tiering_7()
471{
7c673cae
FG
472 # protection against pool removal when used as tiers
473 ceph osd pool create datapool 2
c07f9fc5 474 ceph osd pool application enable datapool rados
7c673cae
FG
475 ceph osd pool create cachepool 2
476 ceph osd tier add-cache datapool cachepool 1024000
477 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
478 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
479 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
480 check_response "EBUSY: pool 'datapool' has tiers cachepool"
481 ceph osd tier remove-overlay datapool
482 ceph osd tier remove datapool cachepool
483 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
484 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
31f18b77 485}
7c673cae 486
31f18b77
FG
487function test_tiering_8()
488{
7c673cae
FG
489 ## check health check
490 ceph osd set notieragent
491 ceph osd pool create datapool 2
c07f9fc5 492 ceph osd pool application enable datapool rados
7c673cae
FG
493 ceph osd pool create cache4 2
494 ceph osd tier add-cache datapool cache4 1024000
495 ceph osd tier cache-mode cache4 writeback
496 tmpfile=$(mktemp|grep tmp)
497 dd if=/dev/zero of=$tmpfile bs=4K count=1
498 ceph osd pool set cache4 target_max_objects 200
499 ceph osd pool set cache4 target_max_bytes 1000000
500 rados -p cache4 put foo1 $tmpfile
501 rados -p cache4 put foo2 $tmpfile
502 rm -f $tmpfile
31f18b77 503 flush_pg_stats
7c673cae
FG
504 ceph df | grep datapool | grep ' 2 '
505 ceph osd tier remove-overlay datapool
506 ceph osd tier remove datapool cache4
507 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
508 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
509 ceph osd unset notieragent
31f18b77 510}
7c673cae 511
31f18b77
FG
512function test_tiering_9()
513{
7c673cae
FG
514 # make sure 'tier remove' behaves as we expect
515 # i.e., removing a tier from a pool that's not its base pool only
516 # results in a 'pool foo is now (or already was) not a tier of bar'
517 #
518 ceph osd pool create basepoolA 2
c07f9fc5 519 ceph osd pool application enable basepoolA rados
7c673cae 520 ceph osd pool create basepoolB 2
c07f9fc5 521 ceph osd pool application enable basepoolB rados
7c673cae
FG
522 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
523 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
524
525 ceph osd pool create cache5 2
526 ceph osd pool create cache6 2
527 ceph osd tier add basepoolA cache5
528 ceph osd tier add basepoolB cache6
529 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
530 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
531 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
532 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
533
534 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
535 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
536 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
537 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
538
539 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
540 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
541
542 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
543 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
544 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
545 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
546}
547
548function test_auth()
549{
550 ceph auth add client.xx mon allow osd "allow *"
551 ceph auth export client.xx >client.xx.keyring
552 ceph auth add client.xx -i client.xx.keyring
553 rm -f client.xx.keyring
554 ceph auth list | grep client.xx
c07f9fc5 555 ceph auth ls | grep client.xx
7c673cae
FG
556 ceph auth get client.xx | grep caps | grep mon
557 ceph auth get client.xx | grep caps | grep osd
558 ceph auth get-key client.xx
559 ceph auth print-key client.xx
560 ceph auth print_key client.xx
561 ceph auth caps client.xx osd "allow rw"
562 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
563 ceph auth get client.xx | grep osd | grep "allow rw"
564 ceph auth export | grep client.xx
565 ceph auth export -o authfile
566 ceph auth import -i authfile
567 ceph auth export -o authfile2
568 diff authfile authfile2
569 rm authfile authfile2
570 ceph auth del client.xx
571 expect_false ceph auth get client.xx
572
573 # (almost) interactive mode
574 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
575 ceph auth get client.xx
576 # script mode
577 echo 'auth del client.xx' | ceph
578 expect_false ceph auth get client.xx
579
580 #
581 # get / set auid
582 #
583 local auid=444
584 ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
585 expect_false ceph auth import --in-file TEST-keyring
586 rm TEST-keyring
587 ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
588 ceph auth import --in-file TEST-keyring
589 rm TEST-keyring
590 ceph auth get client.TEST > $TMPFILE
591 check_response "auid = $auid"
592 ceph --format json-pretty auth get client.TEST > $TMPFILE
593 check_response '"auid": '$auid
c07f9fc5 594 ceph auth ls > $TMPFILE
7c673cae 595 check_response "auid: $auid"
c07f9fc5 596 ceph --format json-pretty auth ls > $TMPFILE
7c673cae
FG
597 check_response '"auid": '$auid
598 ceph auth del client.TEST
599}
600
601function test_auth_profiles()
602{
603 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
604 mgr 'allow profile read-only'
605 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
606 mgr 'allow profile read-write'
607 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
608
609 ceph auth export > client.xx.keyring
610
611 # read-only is allowed all read-only commands (auth excluded)
612 ceph -n client.xx-profile-ro -k client.xx.keyring status
613 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
614 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
615 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
616 ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
617 # read-only gets access denied for rw commands or auth commands
618 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
619 check_response "EACCES: access denied"
620 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
621 check_response "EACCES: access denied"
c07f9fc5 622 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
7c673cae
FG
623 check_response "EACCES: access denied"
624
625 # read-write is allowed for all read-write commands (except auth)
626 ceph -n client.xx-profile-rw -k client.xx.keyring status
627 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
628 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
629 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
630 ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
631 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
632 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
633 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
634 # read-write gets access denied for auth commands
c07f9fc5 635 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
7c673cae
FG
636 check_response "EACCES: access denied"
637
638 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
c07f9fc5 639 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
7c673cae
FG
640 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
641 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
642 ceph -n client.xx-profile-rd -k client.xx.keyring status
643 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
644 check_response "EACCES: access denied"
645 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
646 check_response "EACCES: access denied"
647 # read-only 'mon' subsystem commands are allowed
648 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
649 # but read-write 'mon' commands are not
650 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
651 check_response "EACCES: access denied"
652 ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
653 check_response "EACCES: access denied"
654 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
655 check_response "EACCES: access denied"
656 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
657 check_response "EACCES: access denied"
658
659 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
660 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
661
662 # add a new role-definer with the existing role-definer
663 ceph -n client.xx-profile-rd -k client.xx.keyring \
664 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
665 ceph -n client.xx-profile-rd -k client.xx.keyring \
666 auth export > client.xx.keyring.2
667 # remove old role-definer using the new role-definer
668 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
669 auth del client.xx-profile-rd
670 # remove the remaining role-definer with admin
671 ceph auth del client.xx-profile-rd2
672 rm -f client.xx.keyring client.xx.keyring.2
673}
674
675function test_mon_caps()
676{
677 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
678 chmod +r $TEMP_DIR/ceph.client.bug.keyring
679 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
680 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
681
682 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
683 check_response "Permission denied"
684
685 rm -rf $TEMP_DIR/ceph.client.bug.keyring
686 ceph auth del client.bug
687 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
688 chmod +r $TEMP_DIR/ceph.client.bug.keyring
689 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
690 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
691 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
692 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
693 check_response "Permission denied"
694}
695
696function test_mon_misc()
697{
698 # with and without verbosity
699 ceph osd dump | grep '^epoch'
700 ceph --concise osd dump | grep '^epoch'
701
31f18b77
FG
702 ceph osd df | grep 'MIN/MAX VAR'
703
7c673cae
FG
704 # df
705 ceph df > $TMPFILE
706 grep GLOBAL $TMPFILE
707 grep -v DIRTY $TMPFILE
708 ceph df detail > $TMPFILE
709 grep DIRTY $TMPFILE
710 ceph df --format json > $TMPFILE
711 grep 'total_bytes' $TMPFILE
712 grep -v 'dirty' $TMPFILE
713 ceph df detail --format json > $TMPFILE
714 grep 'rd_bytes' $TMPFILE
715 grep 'dirty' $TMPFILE
716 ceph df --format xml | grep '<total_bytes>'
717 ceph df detail --format xml | grep '<rd_bytes>'
718
719 ceph fsid
720 ceph health
721 ceph health detail
722 ceph health --format json-pretty
723 ceph health detail --format xml-pretty
724
224ce89b
WB
725 ceph time-sync-status
726
7c673cae
FG
727 ceph node ls
728 for t in mon osd mds ; do
729 ceph node ls $t
730 done
731
732 ceph_watch_start
733 mymsg="this is a test log message $$.$(date)"
734 ceph log "$mymsg"
31f18b77
FG
735 ceph log last | grep "$mymsg"
736 ceph log last 100 | grep "$mymsg"
7c673cae
FG
737 ceph_watch_wait "$mymsg"
738
31f18b77 739 ceph mgr dump
224ce89b
WB
740 ceph mgr module ls
741 ceph mgr module enable restful
742 expect_false ceph mgr module enable foodne
743 ceph mgr module enable foodne --force
744 ceph mgr module disable foodne
745 ceph mgr module disable foodnebizbangbash
31f18b77 746
7c673cae
FG
747 ceph mon metadata a
748 ceph mon metadata
31f18b77
FG
749 ceph mon count-metadata ceph_version
750 ceph mon versions
751
c07f9fc5
FG
752 ceph mgr metadata
753 ceph mgr versions
754 ceph mgr count-metadata ceph_version
755
756 ceph versions
757
7c673cae
FG
758 ceph node ls
759}
760
761function check_mds_active()
762{
763 fs_name=$1
764 ceph fs get $fs_name | grep active
765}
766
767function wait_mds_active()
768{
769 fs_name=$1
770 max_run=300
771 for i in $(seq 1 $max_run) ; do
772 if ! check_mds_active $fs_name ; then
773 echo "waiting for an active MDS daemon ($i/$max_run)"
774 sleep 5
775 else
776 break
777 fi
778 done
779 check_mds_active $fs_name
780}
781
782function get_mds_gids()
783{
784 fs_name=$1
785 ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
786}
787
788function fail_all_mds()
789{
790 fs_name=$1
791 ceph fs set $fs_name cluster_down true
792 mds_gids=$(get_mds_gids $fs_name)
793 for mds_gid in $mds_gids ; do
794 ceph mds fail $mds_gid
795 done
796 if check_mds_active $fs_name ; then
797 echo "An active MDS remains, something went wrong"
798 ceph fs get $fs_name
799 exit -1
800 fi
801
802}
803
804function remove_all_fs()
805{
806 existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
807 for fs_name in $existing_fs ; do
808 echo "Removing fs ${fs_name}..."
809 fail_all_mds $fs_name
810 echo "Removing existing filesystem '${fs_name}'..."
811 ceph fs rm $fs_name --yes-i-really-mean-it
812 echo "Removed '${fs_name}'."
813 done
814}
815
816# So that tests requiring MDS can skip if one is not configured
817# in the cluster at all
818function mds_exists()
819{
c07f9fc5 820 ceph auth ls | grep "^mds"
7c673cae
FG
821}
822
823# some of the commands are just not idempotent.
824function without_test_dup_command()
825{
826 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
827 $@
828 else
829 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
830 unset CEPH_CLI_TEST_DUP_COMMAND
831 $@
832 CEPH_CLI_TEST_DUP_COMMAND=saved
833 fi
834}
835
836function test_mds_tell()
837{
31f18b77 838 local FS_NAME=cephfs
7c673cae
FG
839 if ! mds_exists ; then
840 echo "Skipping test, no MDS found"
841 return
842 fi
843
844 remove_all_fs
845 ceph osd pool create fs_data 10
846 ceph osd pool create fs_metadata 10
847 ceph fs new $FS_NAME fs_metadata fs_data
848 wait_mds_active $FS_NAME
849
850 # Test injectargs by GID
851 old_mds_gids=$(get_mds_gids $FS_NAME)
852 echo Old GIDs: $old_mds_gids
853
854 for mds_gid in $old_mds_gids ; do
855 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
856 done
857 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
858
859 # Test respawn by rank
860 without_test_dup_command ceph tell mds.0 respawn
861 new_mds_gids=$old_mds_gids
862 while [ $new_mds_gids -eq $old_mds_gids ] ; do
863 sleep 5
864 new_mds_gids=$(get_mds_gids $FS_NAME)
865 done
866 echo New GIDs: $new_mds_gids
867
868 # Test respawn by ID
869 without_test_dup_command ceph tell mds.a respawn
870 new_mds_gids=$old_mds_gids
871 while [ $new_mds_gids -eq $old_mds_gids ] ; do
872 sleep 5
873 new_mds_gids=$(get_mds_gids $FS_NAME)
874 done
875 echo New GIDs: $new_mds_gids
876
877 remove_all_fs
878 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
879 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
880}
881
882function test_mon_mds()
883{
31f18b77 884 local FS_NAME=cephfs
7c673cae
FG
885 remove_all_fs
886
887 ceph osd pool create fs_data 10
888 ceph osd pool create fs_metadata 10
889 ceph fs new $FS_NAME fs_metadata fs_data
890
891 ceph fs set $FS_NAME cluster_down true
892 ceph fs set $FS_NAME cluster_down false
893
894 # Legacy commands, act on default fs
895 ceph mds cluster_down
896 ceph mds cluster_up
897
898 ceph mds compat rm_incompat 4
899 ceph mds compat rm_incompat 4
900
901 # We don't want any MDSs to be up, their activity can interfere with
902 # the "current_epoch + 1" checking below if they're generating updates
903 fail_all_mds $FS_NAME
904
905 ceph mds compat show
906 expect_false ceph mds deactivate 2
907 ceph mds dump
908 ceph fs dump
909 ceph fs get $FS_NAME
910 for mds_gid in $(get_mds_gids $FS_NAME) ; do
911 ceph mds metadata $mds_id
912 done
913 ceph mds metadata
31f18b77
FG
914 ceph mds versions
915 ceph mds count-metadata os
7c673cae
FG
916
917 # XXX mds fail, but how do you undo it?
918 mdsmapfile=$TEMP_DIR/mdsmap.$$
919 current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
920 [ -s $mdsmapfile ]
921 rm $mdsmapfile
922
923 ceph osd pool create data2 10
924 ceph osd pool create data3 10
925 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
926 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
927 ceph mds add_data_pool $data2_pool
928 ceph mds add_data_pool $data3_pool
929 ceph mds add_data_pool 100 >& $TMPFILE || true
930 check_response "Error ENOENT"
931 ceph mds add_data_pool foobarbaz >& $TMPFILE || true
932 check_response "Error ENOENT"
933 ceph mds remove_data_pool $data2_pool
934 ceph mds remove_data_pool $data3_pool
935 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
936 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
224ce89b 937 ceph mds set allow_multimds false
7c673cae 938 expect_false ceph mds set_max_mds 4
224ce89b 939 ceph mds set allow_multimds true
7c673cae
FG
940 ceph mds set_max_mds 4
941 ceph mds set_max_mds 3
942 ceph mds set_max_mds 256
943 expect_false ceph mds set_max_mds 257
944 ceph mds set max_mds 4
945 ceph mds set max_mds 256
946 expect_false ceph mds set max_mds 257
947 expect_false ceph mds set max_mds asdf
948 expect_false ceph mds set inline_data true
949 ceph mds set inline_data true --yes-i-really-mean-it
950 ceph mds set inline_data yes --yes-i-really-mean-it
951 ceph mds set inline_data 1 --yes-i-really-mean-it
952 expect_false ceph mds set inline_data --yes-i-really-mean-it
953 ceph mds set inline_data false
954 ceph mds set inline_data no
955 ceph mds set inline_data 0
956 expect_false ceph mds set inline_data asdf
957 ceph mds set max_file_size 1048576
958 expect_false ceph mds set max_file_size 123asdf
959
960 expect_false ceph mds set allow_new_snaps
961 expect_false ceph mds set allow_new_snaps true
962 ceph mds set allow_new_snaps true --yes-i-really-mean-it
963 ceph mds set allow_new_snaps 0
964 ceph mds set allow_new_snaps false
965 ceph mds set allow_new_snaps no
966 expect_false ceph mds set allow_new_snaps taco
967
968 # we should never be able to add EC pools as data or metadata pools
969 # create an ec-pool...
970 ceph osd pool create mds-ec-pool 10 10 erasure
971 set +e
972 ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
973 check_response 'erasure-code' $? 22
974 set -e
975 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
976 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
977 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
978
979 fail_all_mds $FS_NAME
980
981 set +e
982 # Check that rmfailed requires confirmation
983 expect_false ceph mds rmfailed 0
984 ceph mds rmfailed 0 --yes-i-really-mean-it
985 set -e
986
987 # Check that `newfs` is no longer permitted
988 expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
989
990 # Check that 'fs reset' runs
991 ceph fs reset $FS_NAME --yes-i-really-mean-it
992
993 # Check that creating a second FS fails by default
994 ceph osd pool create fs_metadata2 10
995 ceph osd pool create fs_data2 10
996 set +e
997 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
998 set -e
999
1000 # Check that setting enable_multiple enables creation of second fs
1001 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1002 ceph fs new cephfs2 fs_metadata2 fs_data2
1003
1004 # Clean up multi-fs stuff
1005 fail_all_mds cephfs2
1006 ceph fs rm cephfs2 --yes-i-really-mean-it
1007 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1008 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1009
1010 fail_all_mds $FS_NAME
1011
1012 # Clean up to enable subsequent fs new tests
1013 ceph fs rm $FS_NAME --yes-i-really-mean-it
1014
1015 set +e
1016 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1017 check_response 'erasure-code' $? 22
1018 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1019 check_response 'erasure-code' $? 22
1020 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1021 check_response 'erasure-code' $? 22
1022 set -e
1023
1024 # ... new create a cache tier in front of the EC pool...
1025 ceph osd pool create mds-tier 2
1026 ceph osd tier add mds-ec-pool mds-tier
1027 ceph osd tier set-overlay mds-ec-pool mds-tier
1028 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1029
1030 # Use of a readonly tier should be forbidden
1031 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1032 set +e
1033 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1034 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1035 set -e
1036
1037 # Use of a writeback tier should enable FS creation
1038 ceph osd tier cache-mode mds-tier writeback
1039 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1040
1041 # While a FS exists using the tiered pools, I should not be allowed
1042 # to remove the tier
1043 set +e
1044 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1045 check_response 'in use by CephFS' $? 16
1046 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1047 check_response 'in use by CephFS' $? 16
1048 set -e
1049
1050 fail_all_mds $FS_NAME
1051 ceph fs rm $FS_NAME --yes-i-really-mean-it
1052
1053 # ... but we should be forbidden from using the cache pool in the FS directly.
1054 set +e
1055 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1056 check_response 'in use as a cache tier' $? 22
1057 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1058 check_response 'in use as a cache tier' $? 22
1059 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1060 check_response 'in use as a cache tier' $? 22
1061 set -e
1062
1063 # Clean up tier + EC pools
1064 ceph osd tier remove-overlay mds-ec-pool
1065 ceph osd tier remove mds-ec-pool mds-tier
1066
1067 # Create a FS using the 'cache' pool now that it's no longer a tier
1068 ceph fs new $FS_NAME fs_metadata mds-tier --force
1069
1070 # We should be forbidden from using this pool as a tier now that
1071 # it's in use for CephFS
1072 set +e
1073 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1074 check_response 'in use by CephFS' $? 16
1075 set -e
1076
1077 fail_all_mds $FS_NAME
1078 ceph fs rm $FS_NAME --yes-i-really-mean-it
1079
1080 # We should be permitted to use an EC pool with overwrites enabled
1081 # as the data pool...
1082 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1083 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1084 fail_all_mds $FS_NAME
1085 ceph fs rm $FS_NAME --yes-i-really-mean-it
1086
1087 # ...but not as the metadata pool
1088 set +e
1089 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1090 check_response 'erasure-code' $? 22
1091 set -e
1092
1093 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1094
1095 # Create a FS and check that we can subsequently add a cache tier to it
1096 ceph fs new $FS_NAME fs_metadata fs_data --force
1097
1098 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1099 ceph osd tier add fs_metadata mds-tier
1100 ceph osd tier cache-mode mds-tier writeback
1101 ceph osd tier set-overlay fs_metadata mds-tier
1102
1103 # Removing tier should be permitted because the underlying pool is
1104 # replicated (#11504 case)
1105 ceph osd tier cache-mode mds-tier proxy
1106 ceph osd tier remove-overlay fs_metadata
1107 ceph osd tier remove fs_metadata mds-tier
1108 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1109
1110 # Clean up FS
1111 fail_all_mds $FS_NAME
1112 ceph fs rm $FS_NAME --yes-i-really-mean-it
1113
1114
1115
1116 ceph mds stat
1117 # ceph mds tell mds.a getmap
1118 # ceph mds rm
1119 # ceph mds rmfailed
1120 # ceph mds set_state
1121 # ceph mds stop
1122
1123 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1124 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1125}
1126
1127function test_mon_mds_metadata()
1128{
1129 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1130 test "$nmons" -gt 0
1131
1132 ceph mds dump |
1133 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1134 while read gid id rank; do
1135 ceph mds metadata ${gid} | grep '"hostname":'
1136 ceph mds metadata ${id} | grep '"hostname":'
1137 ceph mds metadata ${rank} | grep '"hostname":'
1138
1139 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1140 test "$n" -eq "$nmons"
1141 done
1142
1143 expect_false ceph mds metadata UNKNOWN
1144}
1145
1146function test_mon_mon()
1147{
1148 # print help message
1149 ceph --help mon
1150 # no mon add/remove
1151 ceph mon dump
1152 ceph mon getmap -o $TEMP_DIR/monmap.$$
1153 [ -s $TEMP_DIR/monmap.$$ ]
1154 # ceph mon tell
1155 ceph mon_status
1156
1157 # test mon features
224ce89b 1158 ceph mon feature ls
7c673cae
FG
1159 ceph mon feature set kraken --yes-i-really-mean-it
1160 expect_false ceph mon feature set abcd
1161 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1162}
1163
31f18b77
FG
1164function gen_secrets_file()
1165{
1166 # lets assume we can have the following types
1167 # all - generates both cephx and lockbox, with mock dm-crypt key
1168 # cephx - only cephx
1169 # no_cephx - lockbox and dm-crypt, no cephx
1170 # no_lockbox - dm-crypt and cephx, no lockbox
1171 # empty - empty file
1172 # empty_json - correct json, empty map
1173 # bad_json - bad json :)
1174 #
1175 local t=$1
1176 if [[ -z "$t" ]]; then
1177 t="all"
1178 fi
1179
1180 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1181 echo $fn
1182 if [[ "$t" == "empty" ]]; then
1183 return 0
1184 fi
1185
1186 echo "{" > $fn
1187 if [[ "$t" == "bad_json" ]]; then
1188 echo "asd: ; }" >> $fn
1189 return 0
1190 elif [[ "$t" == "empty_json" ]]; then
1191 echo "}" >> $fn
1192 return 0
1193 fi
1194
1195 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1196 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1197 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1198
1199 if [[ "$t" == "all" ]]; then
1200 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1201 elif [[ "$t" == "cephx" ]]; then
1202 echo "$cephx_secret" >> $fn
1203 elif [[ "$t" == "no_cephx" ]]; then
1204 echo "$lb_secret,$dmcrypt_key" >> $fn
1205 elif [[ "$t" == "no_lockbox" ]]; then
1206 echo "$cephx_secret,$dmcrypt_key" >> $fn
1207 else
1208 echo "unknown gen_secrets_file() type \'$fn\'"
1209 return 1
1210 fi
1211 echo "}" >> $fn
1212 return 0
1213}
1214
1215function test_mon_osd_create_destroy()
1216{
1217 ceph osd new 2>&1 | grep 'EINVAL'
1218 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1219 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1220
1221 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1222
1223 old_osds=$(ceph osd ls)
1224 num_osds=$(ceph osd ls | wc -l)
1225
1226 uuid=$(uuidgen)
1227 id=$(ceph osd new $uuid 2>/dev/null)
1228
1229 for i in $old_osds; do
1230 [[ "$i" != "$id" ]]
1231 done
1232
1233 ceph osd find $id
1234
1235 id2=`ceph osd new $uuid 2>/dev/null`
1236
1237 [[ $id2 == $id ]]
1238
1239 ceph osd new $uuid $id
1240
1241 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1242 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1243
1244 uuid2=$(uuidgen)
1245 id2=$(ceph osd new $uuid2)
1246 ceph osd find $id2
1247 [[ "$id2" != "$id" ]]
1248
1249 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1250 ceph osd new $uuid2 $id2
1251
1252 # test with secrets
1253 empty_secrets=$(gen_secrets_file "empty")
1254 empty_json=$(gen_secrets_file "empty_json")
1255 all_secrets=$(gen_secrets_file "all")
1256 cephx_only=$(gen_secrets_file "cephx")
1257 no_cephx=$(gen_secrets_file "no_cephx")
1258 no_lockbox=$(gen_secrets_file "no_lockbox")
1259 bad_json=$(gen_secrets_file "bad_json")
1260
1261 # empty secrets should be idempotent
1262 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1263 [[ "$new_id" == "$id" ]]
1264
1265 # empty json, thus empty secrets
1266 new_id=$(ceph osd new $uuid $id -i $empty_json)
1267 [[ "$new_id" == "$id" ]]
1268
1269 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1270
1271 ceph osd rm $id
1272 ceph osd rm $id2
1273 ceph osd setmaxosd $old_maxosd
1274
1275 ceph osd new $uuid -i $bad_json 2>&1 | grep 'EINVAL'
1276 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1277 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1278
1279 osds=$(ceph osd ls)
1280 id=$(ceph osd new $uuid -i $all_secrets)
1281 for i in $osds; do
1282 [[ "$i" != "$id" ]]
1283 done
1284
1285 ceph osd find $id
1286
1287 # validate secrets and dm-crypt are set
1288 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1289 s=$(cat $all_secrets | jq '.cephx_secret')
1290 [[ $k == $s ]]
1291 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1292 jq '.key')
1293 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1294 [[ $k == $s ]]
1295 ceph config-key exists dm-crypt/osd/$uuid/luks
1296
1297 osds=$(ceph osd ls)
1298 id2=$(ceph osd new $uuid2 -i $cephx_only)
1299 for i in $osds; do
1300 [[ "$i" != "$id2" ]]
1301 done
1302
1303 ceph osd find $id2
1304 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1305 s=$(cat $all_secrets | jq '.cephx_secret')
1306 [[ $k == $s ]]
1307 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1308 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1309
1310 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1311 ceph osd destroy $id2 --yes-i-really-mean-it
1312 ceph osd find $id2
1313 expect_false ceph auth get-key osd.$id2
1314 ceph osd dump | grep osd.$id2 | grep destroyed
1315
1316 id3=$id2
1317 uuid3=$(uuidgen)
1318 ceph osd new $uuid3 $id3 -i $all_secrets
1319 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1320 ceph auth get-key client.osd-lockbox.$uuid3
1321 ceph auth get-key osd.$id3
1322 ceph config-key exists dm-crypt/osd/$uuid3/luks
1323
1324 ceph osd purge osd.$id3 --yes-i-really-mean-it
1325 expect_false ceph osd find $id2
1326 expect_false ceph auth get-key osd.$id2
1327 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1328 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1329 ceph osd purge osd.$id3 --yes-i-really-mean-it
1330 ceph osd purge osd.$id3 --yes-i-really-mean-it
1331
1332 ceph osd purge osd.$id --yes-i-really-mean-it
1333 expect_false ceph osd find $id
1334 expect_false ceph auth get-key osd.$id
1335 expect_false ceph auth get-key client.osd-lockbox.$uuid
1336 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1337
1338 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1339 $no_cephx $no_lockbox $bad_json
1340
1341 for i in $(ceph osd ls); do
1342 [[ "$i" != "$id" ]]
1343 [[ "$i" != "$id2" ]]
1344 [[ "$i" != "$id3" ]]
1345 done
1346
1347 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1348 ceph osd setmaxosd $old_maxosd
1349
1350}
1351
c07f9fc5
FG
1352function test_mon_config_key()
1353{
1354 key=asdfasdfqwerqwreasdfuniquesa123df
1355 ceph config-key list | grep -c $key | grep 0
1356 ceph config-key get $key | grep -c bar | grep 0
1357 ceph config-key set $key bar
1358 ceph config-key get $key | grep bar
1359 ceph config-key list | grep -c $key | grep 1
1360 ceph config-key dump | grep $key | grep bar
1361 ceph config-key rm $key
1362 expect_false ceph config-key get $key
1363 ceph config-key list | grep -c $key | grep 0
1364 ceph config-key dump | grep -c $key | grep 0
1365}
1366
7c673cae
FG
1367function test_mon_osd()
1368{
1369 #
1370 # osd blacklist
1371 #
1372 bl=192.168.0.1:0/1000
1373 ceph osd blacklist add $bl
1374 ceph osd blacklist ls | grep $bl
1375 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1376 ceph osd dump --format=json-pretty | grep $bl
1377 ceph osd dump | grep "^blacklist $bl"
1378 ceph osd blacklist rm $bl
1379 ceph osd blacklist ls | expect_false grep $bl
1380
1381 bl=192.168.0.1
1382 # test without nonce, invalid nonce
1383 ceph osd blacklist add $bl
1384 ceph osd blacklist ls | grep $bl
1385 ceph osd blacklist rm $bl
1386 ceph osd blacklist ls | expect_false grep $expect_false bl
1387 expect_false "ceph osd blacklist $bl/-1"
1388 expect_false "ceph osd blacklist $bl/foo"
1389
1390 # test with wrong address
1391 expect_false "ceph osd blacklist 1234.56.78.90/100"
1392
1393 # Test `clear`
1394 ceph osd blacklist add $bl
1395 ceph osd blacklist ls | grep $bl
1396 ceph osd blacklist clear
1397 ceph osd blacklist ls | expect_false grep $bl
1398
1399 #
1400 # osd crush
1401 #
1402 ceph osd crush reweight-all
1403 ceph osd crush tunables legacy
1404 ceph osd crush show-tunables | grep argonaut
1405 ceph osd crush tunables bobtail
1406 ceph osd crush show-tunables | grep bobtail
1407 ceph osd crush tunables firefly
1408 ceph osd crush show-tunables | grep firefly
1409
1410 ceph osd crush set-tunable straw_calc_version 0
1411 ceph osd crush get-tunable straw_calc_version | grep 0
1412 ceph osd crush set-tunable straw_calc_version 1
1413 ceph osd crush get-tunable straw_calc_version | grep 1
1414
1415 #
1416 # require-min-compat-client
1417 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1418 ceph osd set-require-min-compat-client luminous
1419 ceph osd dump | grep 'require_min_compat_client luminous'
1420
1421 #
1422 # osd scrub
1423 #
1424 # how do I tell when these are done?
1425 ceph osd scrub 0
1426 ceph osd deep-scrub 0
1427 ceph osd repair 0
1428
1429 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1430 do
1431 ceph osd set $f
1432 ceph osd unset $f
1433 done
1434 expect_false ceph osd unset sortbitwise # cannot be unset
1435 expect_false ceph osd set bogus
1436 expect_false ceph osd unset bogus
31f18b77
FG
1437 ceph osd require-osd-release luminous
1438 # can't lower (or use new command for anything but jewel)
1439 expect_false ceph osd require-osd-release jewel
1440 # these are no-ops but should succeed.
7c673cae 1441 ceph osd set require_jewel_osds
7c673cae 1442 ceph osd set require_kraken_osds
31f18b77 1443 expect_false ceph osd unset require_jewel_osds
7c673cae
FG
1444
1445 ceph osd set noup
1446 ceph osd down 0
1447 ceph osd dump | grep 'osd.0 down'
1448 ceph osd unset noup
1449 max_run=1000
1450 for ((i=0; i < $max_run; i++)); do
1451 if ! ceph osd dump | grep 'osd.0 up'; then
1452 echo "waiting for osd.0 to come back up ($i/$max_run)"
1453 sleep 1
1454 else
1455 break
1456 fi
1457 done
1458 ceph osd dump | grep 'osd.0 up'
1459
1460 ceph osd dump | grep 'osd.0 up'
1461 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1462 ceph osd find 1
1463 ceph osd find osd.1
1464 expect_false ceph osd find osd.xyz
1465 expect_false ceph osd find xyz
1466 expect_false ceph osd find 0.1
1467 ceph --format plain osd find 1 # falls back to json-pretty
1468 if [ `uname` == Linux ]; then
1469 ceph osd metadata 1 | grep 'distro'
1470 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1471 fi
1472 ceph osd out 0
1473 ceph osd dump | grep 'osd.0.*out'
1474 ceph osd in 0
1475 ceph osd dump | grep 'osd.0.*in'
1476 ceph osd find 0
1477
31f18b77 1478 ceph osd add-nodown 0 1
224ce89b 1479 ceph health detail | grep 'NODOWN'
31f18b77 1480 ceph osd rm-nodown 0 1
224ce89b 1481 ! ceph health detail | grep 'NODOWN'
31f18b77
FG
1482
1483 ceph osd out 0 # so we can mark it as noin later
1484 ceph osd add-noin 0
224ce89b 1485 ceph health detail | grep 'NOIN'
31f18b77 1486 ceph osd rm-noin 0
224ce89b 1487 ! ceph health detail | grep 'NOIN'
31f18b77
FG
1488 ceph osd in 0
1489
1490 ceph osd add-noout 0
224ce89b 1491 ceph health detail | grep 'NOOUT'
31f18b77 1492 ceph osd rm-noout 0
224ce89b 1493 ! ceph health detail | grep 'NOOUT'
31f18b77
FG
1494
1495 # test osd id parse
1496 expect_false ceph osd add-noup 797er
1497 expect_false ceph osd add-nodown u9uwer
1498 expect_false ceph osd add-noin 78~15
1499 expect_false ceph osd add-noout 0 all 1
1500
1501 expect_false ceph osd rm-noup 1234567
1502 expect_false ceph osd rm-nodown fsadf7
1503 expect_false ceph osd rm-noin 0 1 any
1504 expect_false ceph osd rm-noout 790-fd
1505
1506 ids=`ceph osd ls-tree default`
1507 for osd in $ids
1508 do
1509 ceph osd add-nodown $osd
1510 ceph osd add-noout $osd
1511 done
224ce89b
WB
1512 ceph -s | grep 'NODOWN'
1513 ceph -s | grep 'NOOUT'
31f18b77
FG
1514 ceph osd rm-nodown any
1515 ceph osd rm-noout all
224ce89b
WB
1516 ! ceph -s | grep 'NODOWN'
1517 ! ceph -s | grep 'NOOUT'
31f18b77 1518
7c673cae
FG
1519 # make sure mark out preserves weight
1520 ceph osd reweight osd.0 .5
1521 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1522 ceph osd out 0
1523 ceph osd in 0
1524 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1525
7c673cae
FG
1526 ceph osd getmap -o $f
1527 [ -s $f ]
1528 rm $f
1529 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1530 [ "$save" -gt 0 ]
1531 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1532 ceph osd setmaxosd 10
1533 ceph osd getmaxosd | grep 'max_osd = 10'
1534 ceph osd setmaxosd $save
1535 ceph osd getmaxosd | grep "max_osd = $save"
1536
1537 for id in `ceph osd ls` ; do
1538 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1539 done
1540
1541 ceph osd rm 0 2>&1 | grep 'EBUSY'
1542
1543 local old_osds=$(echo $(ceph osd ls))
1544 id=`ceph osd create`
1545 ceph osd find $id
1546 ceph osd lost $id --yes-i-really-mean-it
1547 expect_false ceph osd setmaxosd $id
1548 local new_osds=$(echo $(ceph osd ls))
1549 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1550 ceph osd rm $id
1551 done
1552
1553 uuid=`uuidgen`
1554 id=`ceph osd create $uuid`
1555 id2=`ceph osd create $uuid`
1556 [ "$id" = "$id2" ]
1557 ceph osd rm $id
1558
1559 ceph --help osd
1560
1561 # reset max_osd.
1562 ceph osd setmaxosd $id
1563 ceph osd getmaxosd | grep "max_osd = $save"
1564 local max_osd=$save
1565
1566 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1567 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1568
1569 id=`ceph osd create $uuid $max_osd`
1570 [ "$id" = "$max_osd" ]
1571 ceph osd find $id
1572 max_osd=$((max_osd + 1))
1573 ceph osd getmaxosd | grep "max_osd = $max_osd"
1574
31f18b77
FG
1575 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1576 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
7c673cae
FG
1577 id2=`ceph osd create $uuid`
1578 [ "$id" = "$id2" ]
1579 id2=`ceph osd create $uuid $id`
1580 [ "$id" = "$id2" ]
1581
1582 uuid=`uuidgen`
1583 local gap_start=$max_osd
1584 id=`ceph osd create $uuid $((gap_start + 100))`
1585 [ "$id" = "$((gap_start + 100))" ]
1586 max_osd=$((id + 1))
1587 ceph osd getmaxosd | grep "max_osd = $max_osd"
1588
31f18b77 1589 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
7c673cae
FG
1590
1591 #
1592 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1593 # is repeated and consumes two osd id, not just one.
1594 #
1595 local next_osd
1596 if test "$CEPH_CLI_TEST_DUP_COMMAND" ; then
1597 next_osd=$((gap_start + 1))
1598 else
1599 next_osd=$gap_start
1600 fi
1601 id=`ceph osd create`
1602 [ "$id" = "$next_osd" ]
1603
1604 next_osd=$((id + 1))
1605 id=`ceph osd create $(uuidgen)`
1606 [ "$id" = "$next_osd" ]
1607
1608 next_osd=$((id + 1))
1609 id=`ceph osd create $(uuidgen) $next_osd`
1610 [ "$id" = "$next_osd" ]
1611
1612 local new_osds=$(echo $(ceph osd ls))
1613 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1614 [ $id -ge $save ]
1615 ceph osd rm $id
1616 done
1617 ceph osd setmaxosd $save
1618
1619 ceph osd ls
1620 ceph osd pool create data 10
c07f9fc5 1621 ceph osd pool application enable data rados
7c673cae
FG
1622 ceph osd lspools | grep data
1623 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1624 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1625 ceph osd pool delete data data --yes-i-really-really-mean-it
1626
1627 ceph osd pause
1628 ceph osd dump | grep 'flags.*pauserd,pausewr'
1629 ceph osd unpause
1630
1631 ceph osd tree
31f18b77
FG
1632 ceph osd tree up
1633 ceph osd tree down
1634 ceph osd tree in
1635 ceph osd tree out
c07f9fc5 1636 ceph osd tree destroyed
31f18b77
FG
1637 ceph osd tree up in
1638 ceph osd tree up out
1639 ceph osd tree down in
1640 ceph osd tree down out
1641 ceph osd tree out down
1642 expect_false ceph osd tree up down
c07f9fc5
FG
1643 expect_false ceph osd tree up destroyed
1644 expect_false ceph osd tree down destroyed
1645 expect_false ceph osd tree up down destroyed
31f18b77
FG
1646 expect_false ceph osd tree in out
1647 expect_false ceph osd tree up foo
1648
1649 ceph osd metadata
1650 ceph osd count-metadata os
1651 ceph osd versions
1652
7c673cae
FG
1653 ceph osd perf
1654 ceph osd blocked-by
1655
1656 ceph osd stat | grep up,
1657}
1658
31f18b77
FG
1659function test_mon_crush()
1660{
1661 f=$TEMP_DIR/map.$$
1662 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1663 [ -s $f ]
1664 [ "$epoch" -gt 1 ]
1665 nextepoch=$(( $epoch + 1 ))
1666 echo epoch $epoch nextepoch $nextepoch
1667 rm -f $f.epoch
1668 expect_false ceph osd setcrushmap $nextepoch -i $f
1669 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1670 echo gotepoch $gotepoch
1671 [ "$gotepoch" -eq "$nextepoch" ]
1672 # should be idempotent
1673 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1674 echo epoch $gotepoch
1675 [ "$gotepoch" -eq "$nextepoch" ]
1676 rm $f
1677}
1678
7c673cae
FG
1679function test_mon_osd_pool()
1680{
1681 #
1682 # osd pool
1683 #
1684 ceph osd pool create data 10
c07f9fc5 1685 ceph osd pool application enable data rados
7c673cae
FG
1686 ceph osd pool mksnap data datasnap
1687 rados -p data lssnap | grep datasnap
1688 ceph osd pool rmsnap data datasnap
1689 expect_false ceph osd pool rmsnap pool_fake snapshot
1690 ceph osd pool delete data data --yes-i-really-really-mean-it
1691
1692 ceph osd pool create data2 10
c07f9fc5 1693 ceph osd pool application enable data2 rados
7c673cae
FG
1694 ceph osd pool rename data2 data3
1695 ceph osd lspools | grep data3
1696 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1697
1698 ceph osd pool create replicated 12 12 replicated
1699 ceph osd pool create replicated 12 12 replicated
1700 ceph osd pool create replicated 12 12 # default is replicated
1701 ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
c07f9fc5 1702 ceph osd pool application enable replicated rados
7c673cae
FG
1703 # should fail because the type is not the same
1704 expect_false ceph osd pool create replicated 12 12 erasure
1705 ceph osd lspools | grep replicated
1706 ceph osd pool create ec_test 1 1 erasure
c07f9fc5 1707 ceph osd pool application enable ec_test rados
7c673cae 1708 set +e
c07f9fc5
FG
1709 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1710 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
7c673cae 1711 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
c07f9fc5 1712 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
7c673cae
FG
1713 else
1714 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1715 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1716 fi
1717 set -e
1718 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1719 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1720}
1721
1722function test_mon_osd_pool_quota()
1723{
1724 #
1725 # test osd pool set/get quota
1726 #
1727
1728 # create tmp pool
1729 ceph osd pool create tmp-quota-pool 36
c07f9fc5 1730 ceph osd pool application enable tmp-quota-pool rados
7c673cae
FG
1731 #
1732 # set erroneous quotas
1733 #
1734 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1735 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1736 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1737 #
1738 # set valid quotas
1739 #
1740 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1741 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1742 #
1743 # get quotas
1744 #
1745 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
1746 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
1747 #
1748 # get quotas in json-pretty format
1749 #
1750 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1751 grep '"quota_max_objects":.*10485760'
1752 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1753 grep '"quota_max_bytes":.*10'
1754 #
1755 # reset pool quotas
1756 #
1757 ceph osd pool set-quota tmp-quota-pool max_bytes 0
1758 ceph osd pool set-quota tmp-quota-pool max_objects 0
1759 #
1760 # test N/A quotas
1761 #
1762 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
1763 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
1764 #
1765 # cleanup tmp pool
1766 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
1767}
1768
1769function test_mon_pg()
1770{
1771 # Make sure we start healthy.
1772 wait_for_health_ok
1773
1774 ceph pg debug unfound_objects_exist
1775 ceph pg debug degraded_pgs_exist
224ce89b 1776 ceph pg deep-scrub 1.0
7c673cae
FG
1777 ceph pg dump
1778 ceph pg dump pgs_brief --format=json
1779 ceph pg dump pgs --format=json
1780 ceph pg dump pools --format=json
1781 ceph pg dump osds --format=json
1782 ceph pg dump sum --format=json
1783 ceph pg dump all --format=json
1784 ceph pg dump pgs_brief osds --format=json
1785 ceph pg dump pools osds pgs_brief --format=json
1786 ceph pg dump_json
1787 ceph pg dump_pools_json
1788 ceph pg dump_stuck inactive
1789 ceph pg dump_stuck unclean
1790 ceph pg dump_stuck stale
1791 ceph pg dump_stuck undersized
1792 ceph pg dump_stuck degraded
1793 ceph pg ls
224ce89b 1794 ceph pg ls 1
7c673cae
FG
1795 ceph pg ls stale
1796 expect_false ceph pg ls scrubq
1797 ceph pg ls active stale repair recovering
224ce89b
WB
1798 ceph pg ls 1 active
1799 ceph pg ls 1 active stale
7c673cae 1800 ceph pg ls-by-primary osd.0
224ce89b 1801 ceph pg ls-by-primary osd.0 1
7c673cae
FG
1802 ceph pg ls-by-primary osd.0 active
1803 ceph pg ls-by-primary osd.0 active stale
224ce89b 1804 ceph pg ls-by-primary osd.0 1 active stale
7c673cae 1805 ceph pg ls-by-osd osd.0
224ce89b 1806 ceph pg ls-by-osd osd.0 1
7c673cae
FG
1807 ceph pg ls-by-osd osd.0 active
1808 ceph pg ls-by-osd osd.0 active stale
224ce89b 1809 ceph pg ls-by-osd osd.0 1 active stale
7c673cae
FG
1810 ceph pg ls-by-pool rbd
1811 ceph pg ls-by-pool rbd active stale
1812 # can't test this...
1813 # ceph pg force_create_pg
1814 ceph pg getmap -o $TEMP_DIR/map.$$
1815 [ -s $TEMP_DIR/map.$$ ]
224ce89b
WB
1816 ceph pg map 1.0 | grep acting
1817 ceph pg repair 1.0
1818 ceph pg scrub 1.0
7c673cae
FG
1819
1820 ceph osd set-full-ratio .962
1821 ceph osd dump | grep '^full_ratio 0.962'
1822 ceph osd set-backfillfull-ratio .912
1823 ceph osd dump | grep '^backfillfull_ratio 0.912'
1824 ceph osd set-nearfull-ratio .892
1825 ceph osd dump | grep '^nearfull_ratio 0.892'
1826
1827 # Check health status
1828 ceph osd set-nearfull-ratio .913
224ce89b
WB
1829 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1830 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
7c673cae
FG
1831 ceph osd set-nearfull-ratio .892
1832 ceph osd set-backfillfull-ratio .963
224ce89b
WB
1833 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1834 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
7c673cae
FG
1835 ceph osd set-backfillfull-ratio .912
1836
1837 # Check injected full results
1838 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
224ce89b
WB
1839 wait_for_health "OSD_NEARFULL"
1840 ceph health detail | grep "osd.0 is near full"
1841 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1842 wait_for_health_ok
1843
7c673cae 1844 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
224ce89b
WB
1845 wait_for_health "OSD_BACKFILLFULL"
1846 ceph health detail | grep "osd.1 is backfill full"
1847 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
1848 wait_for_health_ok
1849
7c673cae
FG
1850 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
1851 # failsafe and full are the same as far as the monitor is concerned
224ce89b
WB
1852 wait_for_health "OSD_FULL"
1853 ceph health detail | grep "osd.2 is full"
1854 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
1855 wait_for_health_ok
1856
7c673cae 1857 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
224ce89b 1858 wait_for_health "OSD_FULL"
31f18b77 1859 ceph health detail | grep "osd.0 is full"
7c673cae 1860 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
7c673cae
FG
1861 wait_for_health_ok
1862
1863 ceph pg stat | grep 'pgs:'
224ce89b
WB
1864 ceph pg 1.0 query
1865 ceph tell 1.0 query
7c673cae
FG
1866 ceph quorum enter
1867 ceph quorum_status
1868 ceph report | grep osd_stats
1869 ceph status
1870 ceph -s
1871
1872 #
1873 # tell osd version
1874 #
1875 ceph tell osd.0 version
1876 expect_false ceph tell osd.9999 version
1877 expect_false ceph tell osd.foo version
1878
1879 # back to pg stuff
1880
1881 ceph tell osd.0 dump_pg_recovery_stats | grep Started
1882
1883 ceph osd reweight 0 0.9
1884 expect_false ceph osd reweight 0 -1
1885 ceph osd reweight osd.0 1
1886
1887 ceph osd primary-affinity osd.0 .9
1888 expect_false ceph osd primary-affinity osd.0 -2
1889 expect_false ceph osd primary-affinity osd.9999 .5
1890 ceph osd primary-affinity osd.0 1
1891
224ce89b
WB
1892 ceph osd pool set rbd size 2
1893 ceph osd pg-temp 1.0 0 1
1894 ceph osd pg-temp 1.0 osd.1 osd.0
1895 expect_false ceph osd pg-temp 1.0 0 1 2
7c673cae 1896 expect_false ceph osd pg-temp asdf qwer
224ce89b
WB
1897 expect_false ceph osd pg-temp 1.0 asdf
1898 expect_false ceph osd pg-temp 1.0
7c673cae
FG
1899
1900 # don't test ceph osd primary-temp for now
1901}
1902
1903function test_mon_osd_pool_set()
1904{
1905 TEST_POOL_GETSET=pool_getset
1906 ceph osd pool create $TEST_POOL_GETSET 1
c07f9fc5 1907 ceph osd pool application enable $TEST_POOL_GETSET rados
7c673cae
FG
1908 wait_for_clean
1909 ceph osd pool get $TEST_POOL_GETSET all
1910
31f18b77 1911 for s in pg_num pgp_num size min_size crush_rule; do
7c673cae
FG
1912 ceph osd pool get $TEST_POOL_GETSET $s
1913 done
1914
1915 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
1916 (( new_size = old_size + 1 ))
1917 ceph osd pool set $TEST_POOL_GETSET size $new_size
1918 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
1919 ceph osd pool set $TEST_POOL_GETSET size $old_size
1920
1921 ceph osd pool create pool_erasure 1 1 erasure
c07f9fc5 1922 ceph osd pool application enable pool_erasure rados
7c673cae
FG
1923 wait_for_clean
1924 set +e
1925 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
1926 check_response 'not change the size'
1927 set -e
1928 ceph osd pool get pool_erasure erasure_code_profile
1929
1930 auid=5555
1931 ceph osd pool set $TEST_POOL_GETSET auid $auid
1932 ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
1933 ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
1934 ceph osd pool set $TEST_POOL_GETSET auid 0
1935
1936 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
1937 ceph osd pool set $TEST_POOL_GETSET $flag false
1938 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1939 ceph osd pool set $TEST_POOL_GETSET $flag true
1940 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1941 ceph osd pool set $TEST_POOL_GETSET $flag 1
1942 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1943 ceph osd pool set $TEST_POOL_GETSET $flag 0
1944 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1945 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
1946 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
1947 done
1948
1949 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1950 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
1951 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
1952 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
1953 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1954
1955 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1956 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
1957 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
1958 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
1959 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1960
1961 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1962 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
1963 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
1964 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
1965 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1966
1967 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1968 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
1969 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
1970 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
1971 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1972
1973 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1974 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
1975 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
1976 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
1977 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1978
1979 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1980 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
1981 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
1982 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
1983 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1984
1985 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
1986 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
1987 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1988 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
1989 ceph osd pool set $TEST_POOL_GETSET pg_num 10
1990 wait_for_clean
1991 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1992
1993 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
c07f9fc5 1994 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
7c673cae
FG
1995 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1996 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
1997 wait_for_clean
1998 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
c07f9fc5 1999 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32 + 1))
7c673cae
FG
2000 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
2001
2002 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
2003 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
2004 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
2005 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
2006 ceph osd pool set $TEST_POOL_GETSET size 2
2007 wait_for_clean
2008 ceph osd pool set $TEST_POOL_GETSET min_size 2
2009
2010 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2011 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2012
2013 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2014 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2015
7c673cae 2016 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
224ce89b
WB
2017
2018 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2019 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2020 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2021 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2022 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2023
2024 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2025 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2026 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2027 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2028 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2029
2030 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2031 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2032 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2033 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2034 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2035 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2036 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2037
2038 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2039 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2040 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2041 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2042 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2043
2044 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2045 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2046 ceph osd pool set $TEST_POOL_GETSET $size 100
2047 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2048 ceph osd pool set $TEST_POOL_GETSET $size 0
2049 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2050 done
c07f9fc5
FG
2051
2052 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2053 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2054 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2055 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2056
7c673cae
FG
2057}
2058
2059function test_mon_osd_tiered_pool_set()
2060{
2061 # this is really a tier pool
2062 ceph osd pool create real-tier 2
2063 ceph osd tier add rbd real-tier
2064
2065 ceph osd pool set real-tier hit_set_type explicit_hash
2066 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2067 ceph osd pool set real-tier hit_set_type explicit_object
2068 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2069 ceph osd pool set real-tier hit_set_type bloom
2070 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2071 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2072 ceph osd pool set real-tier hit_set_period 123
2073 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2074 ceph osd pool set real-tier hit_set_count 12
2075 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2076 ceph osd pool set real-tier hit_set_fpp .01
2077 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2078
2079 ceph osd pool set real-tier target_max_objects 123
2080 ceph osd pool get real-tier target_max_objects | \
2081 grep 'target_max_objects:[ \t]\+123'
2082 ceph osd pool set real-tier target_max_bytes 123456
2083 ceph osd pool get real-tier target_max_bytes | \
2084 grep 'target_max_bytes:[ \t]\+123456'
2085 ceph osd pool set real-tier cache_target_dirty_ratio .123
2086 ceph osd pool get real-tier cache_target_dirty_ratio | \
2087 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2088 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2089 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2090 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2091 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2092 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2093 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2094 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2095 ceph osd pool set real-tier cache_target_full_ratio .123
2096 ceph osd pool get real-tier cache_target_full_ratio | \
2097 grep 'cache_target_full_ratio:[ \t]\+0.123'
2098 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2099 ceph osd pool set real-tier cache_target_full_ratio 1.0
2100 ceph osd pool set real-tier cache_target_full_ratio 0
2101 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2102 ceph osd pool set real-tier cache_min_flush_age 123
2103 ceph osd pool get real-tier cache_min_flush_age | \
2104 grep 'cache_min_flush_age:[ \t]\+123'
2105 ceph osd pool set real-tier cache_min_evict_age 234
2106 ceph osd pool get real-tier cache_min_evict_age | \
2107 grep 'cache_min_evict_age:[ \t]\+234'
2108
2109 # this is not a tier pool
2110 ceph osd pool create fake-tier 2
c07f9fc5 2111 ceph osd pool application enable fake-tier rados
7c673cae
FG
2112 wait_for_clean
2113
2114 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2115 expect_false ceph osd pool get fake-tier hit_set_type
2116 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2117 expect_false ceph osd pool get fake-tier hit_set_type
2118 expect_false ceph osd pool set fake-tier hit_set_type bloom
2119 expect_false ceph osd pool get fake-tier hit_set_type
2120 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2121 expect_false ceph osd pool set fake-tier hit_set_period 123
2122 expect_false ceph osd pool get fake-tier hit_set_period
2123 expect_false ceph osd pool set fake-tier hit_set_count 12
2124 expect_false ceph osd pool get fake-tier hit_set_count
2125 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2126 expect_false ceph osd pool get fake-tier hit_set_fpp
2127
2128 expect_false ceph osd pool set fake-tier target_max_objects 123
2129 expect_false ceph osd pool get fake-tier target_max_objects
2130 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2131 expect_false ceph osd pool get fake-tier target_max_bytes
2132 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2133 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2134 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2135 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2136 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2137 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2138 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2139 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2140 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2141 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2142 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2143 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2144 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2145 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2146 expect_false ceph osd pool get fake-tier cache_min_flush_age
2147 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2148 expect_false ceph osd pool get fake-tier cache_min_evict_age
2149
2150 ceph osd tier remove rbd real-tier
2151 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2152 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2153}
2154
2155function test_mon_osd_erasure_code()
2156{
2157
2158 ceph osd erasure-code-profile set fooprofile a=b c=d
2159 ceph osd erasure-code-profile set fooprofile a=b c=d
2160 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2161 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2162 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2163 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
2164 #
2165 # cleanup by removing profile 'fooprofile'
2166 ceph osd erasure-code-profile rm fooprofile
2167}
2168
2169function test_mon_osd_misc()
2170{
2171 set +e
2172
2173 # expect error about missing 'pool' argument
2174 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2175
2176 # expect error about unused argument foo
2177 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2178
2179 # expect "not in range" for invalid full ratio
2180 ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
2181
2182 # expect "not in range" for invalid overload percentage
2183 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2184
2185 set -e
2186
2187 ceph osd reweight-by-utilization 110
2188 ceph osd reweight-by-utilization 110 .5
2189 expect_false ceph osd reweight-by-utilization 110 0
2190 expect_false ceph osd reweight-by-utilization 110 -0.1
2191 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2192 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2193 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2194 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2195 ceph osd reweight-by-pg 110
2196 ceph osd test-reweight-by-pg 110 .5
2197 ceph osd reweight-by-pg 110 rbd
2198 ceph osd reweight-by-pg 110 .5 rbd
2199 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2200}
2201
2202function test_mon_heap_profiler()
2203{
2204 do_test=1
2205 set +e
2206 # expect 'heap' commands to be correctly parsed
2207 ceph heap stats 2>$TMPFILE
2208 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2209 echo "tcmalloc not enabled; skip heap profiler test"
2210 do_test=0
2211 fi
2212 set -e
2213
2214 [[ $do_test -eq 0 ]] && return 0
2215
2216 ceph heap start_profiler
2217 ceph heap dump
2218 ceph heap stop_profiler
2219 ceph heap release
2220}
2221
2222function test_admin_heap_profiler()
2223{
2224 do_test=1
2225 set +e
2226 # expect 'heap' commands to be correctly parsed
2227 ceph heap stats 2>$TMPFILE
2228 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2229 echo "tcmalloc not enabled; skip heap profiler test"
2230 do_test=0
2231 fi
2232 set -e
2233
2234 [[ $do_test -eq 0 ]] && return 0
2235
2236 local admin_socket=$(get_admin_socket osd.0)
2237
2238 $SUDO ceph --admin-daemon $admin_socket heap start_profiler
2239 $SUDO ceph --admin-daemon $admin_socket heap dump
2240 $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
2241 $SUDO ceph --admin-daemon $admin_socket heap release
2242}
2243
2244function test_osd_bench()
2245{
2246 # test osd bench limits
2247 # As we should not rely on defaults (as they may change over time),
2248 # lets inject some values and perform some simple tests
2249 # max iops: 10 # 100 IOPS
2250 # max throughput: 10485760 # 10MB/s
2251 # max block size: 2097152 # 2MB
2252 # duration: 10 # 10 seconds
2253
2254 local args="\
2255 --osd-bench-duration 10 \
2256 --osd-bench-max-block-size 2097152 \
2257 --osd-bench-large-size-max-throughput 10485760 \
2258 --osd-bench-small-size-max-iops 10"
2259 ceph tell osd.0 injectargs ${args## }
2260
2261 # anything with a bs larger than 2097152 must fail
2262 expect_false ceph tell osd.0 bench 1 2097153
2263 # but using 'osd_bench_max_bs' must succeed
2264 ceph tell osd.0 bench 1 2097152
2265
2266 # we assume 1MB as a large bs; anything lower is a small bs
2267 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2268 # max count: 409600 (bytes)
2269
2270 # more than max count must not be allowed
2271 expect_false ceph tell osd.0 bench 409601 4096
2272 # but 409600 must be succeed
2273 ceph tell osd.0 bench 409600 4096
2274
2275 # for a large bs, we are limited by throughput.
2276 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2277 # the max count will be (10MB * 10s) = 100MB
2278 # max count: 104857600 (bytes)
2279
2280 # more than max count must not be allowed
2281 expect_false ceph tell osd.0 bench 104857601 2097152
2282 # up to max count must be allowed
2283 ceph tell osd.0 bench 104857600 2097152
2284}
2285
2286function test_osd_negative_filestore_merge_threshold()
2287{
2288 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2289 expect_config_value "osd.0" "filestore_merge_threshold" -1
2290}
2291
2292function test_mon_tell()
2293{
2294 ceph tell mon.a version
2295 ceph tell mon.b version
2296 expect_false ceph tell mon.foo version
2297
2298 sleep 1
2299
c07f9fc5 2300 ceph_watch_start debug audit
7c673cae 2301 ceph tell mon.a version
31f18b77 2302 ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
7c673cae 2303
c07f9fc5 2304 ceph_watch_start debug audit
7c673cae 2305 ceph tell mon.b version
31f18b77 2306 ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
7c673cae
FG
2307}
2308
7c673cae
FG
2309function test_mon_ping()
2310{
2311 ceph ping mon.a
2312 ceph ping mon.b
2313 expect_false ceph ping mon.foo
2314
2315 ceph ping mon.\*
2316}
2317
2318function test_mon_deprecated_commands()
2319{
2320 # current DEPRECATED commands are:
2321 # ceph compact
2322 # ceph scrub
2323 # ceph sync force
2324 #
2325 # Testing should be accomplished by setting
2326 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2327 # each one of these commands.
2328
2329 ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
2330 expect_false ceph tell mon.a compact 2> $TMPFILE
2331 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2332
2333 expect_false ceph tell mon.a scrub 2> $TMPFILE
2334 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2335
2336 expect_false ceph tell mon.a sync force 2> $TMPFILE
2337 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2338
2339 ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
2340}
2341
2342function test_mon_cephdf_commands()
2343{
2344 # ceph df detail:
2345 # pool section:
2346 # RAW USED The near raw used per pool in raw total
2347
2348 ceph osd pool create cephdf_for_test 32 32 replicated
c07f9fc5 2349 ceph osd pool application enable cephdf_for_test rados
7c673cae
FG
2350 ceph osd pool set cephdf_for_test size 2
2351
2352 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2353 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2354
2355 #wait for update
2356 for i in `seq 1 10`; do
2357 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2358 sleep 1
2359 done
31f18b77
FG
2360 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2361 # to sync mon with osd
2362 flush_pg_stats
2363 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2364 cal_raw_used_size=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2365 raw_used_size=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
7c673cae
FG
2366
2367 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2368 rm ./cephdf_for_test
2369
2370 expect_false test $cal_raw_used_size != $raw_used_size
2371}
2372
c07f9fc5
FG
2373function test_mon_pool_application()
2374{
2375 ceph osd pool create app_for_test 10
2376
2377 ceph osd pool application enable app_for_test rbd
2378 expect_false ceph osd pool application enable app_for_test rgw
2379 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2380 ceph osd pool ls detail | grep "application rbd,rgw"
2381 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2382
2383 expect_false ceph osd pool application set app_for_test cephfs key value
2384 ceph osd pool application set app_for_test rbd key1 value1
2385 ceph osd pool application set app_for_test rbd key2 value2
2386 ceph osd pool application set app_for_test rgw key1 value1
2387
2388 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2389
2390 ceph osd pool application rm app_for_test rgw key1
2391 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2392 ceph osd pool application rm app_for_test rbd key2
2393 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2394 ceph osd pool application rm app_for_test rbd key1
2395 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2396 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2397
2398 expect_false ceph osd pool application disable app_for_test rgw
2399 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2400 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2401 ceph osd pool ls detail | grep "application rbd"
2402 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2403
2404 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2405 ceph osd pool ls detail | grep -v "application "
2406 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2407
2408 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2409}
2410
31f18b77
FG
2411function test_mon_tell_help_command()
2412{
2413 ceph tell mon.a help
2414
2415 # wrong target
2416 expect_false ceph tell mon.zzz help
2417}
2418
c07f9fc5
FG
2419function test_mon_stdin_stdout()
2420{
2421 echo foo | ceph config-key set test_key -i -
2422 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2423}
2424
31f18b77
FG
2425function test_osd_tell_help_command()
2426{
2427 ceph tell osd.1 help
2428 expect_false ceph tell osd.100 help
2429}
2430
224ce89b
WB
2431function test_osd_compact()
2432{
2433 ceph tell osd.1 compact
c07f9fc5 2434 $SUDO ceph daemon osd.1 compact
224ce89b
WB
2435}
2436
31f18b77
FG
2437function test_mds_tell_help_command()
2438{
2439 local FS_NAME=cephfs
2440 if ! mds_exists ; then
2441 echo "Skipping test, no MDS found"
2442 return
2443 fi
2444
2445 remove_all_fs
2446 ceph osd pool create fs_data 10
2447 ceph osd pool create fs_metadata 10
2448 ceph fs new $FS_NAME fs_metadata fs_data
2449 wait_mds_active $FS_NAME
2450
2451
2452 ceph tell mds.a help
2453 expect_false ceph tell mds.z help
2454
2455 remove_all_fs
2456 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2457 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2458}
2459
224ce89b 2460function test_mgr_tell()
31f18b77
FG
2461{
2462 ceph tell mgr help
c07f9fc5 2463 #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
224ce89b 2464 ceph tell mgr osd status
31f18b77
FG
2465}
2466
7c673cae
FG
2467#
2468# New tests should be added to the TESTS array below
2469#
2470# Individual tests may be run using the '-t <testname>' argument
2471# The user can specify '-t <testname>' as many times as she wants
2472#
2473# Tests will be run in order presented in the TESTS array, or in
2474# the order specified by the '-t <testname>' options.
2475#
2476# '-l' will list all the available test names
2477# '-h' will show usage
2478#
2479# The test maintains backward compatibility: not specifying arguments
2480# will run all tests following the order they appear in the TESTS array.
2481#
2482
2483set +x
2484MON_TESTS+=" mon_injectargs"
2485MON_TESTS+=" mon_injectargs_SI"
31f18b77
FG
2486for i in `seq 9`; do
2487 MON_TESTS+=" tiering_$i";
2488done
7c673cae
FG
2489MON_TESTS+=" auth"
2490MON_TESTS+=" auth_profiles"
2491MON_TESTS+=" mon_misc"
2492MON_TESTS+=" mon_mon"
2493MON_TESTS+=" mon_osd"
c07f9fc5 2494MON_TESTS+=" mon_config_key"
31f18b77
FG
2495MON_TESTS+=" mon_crush"
2496MON_TESTS+=" mon_osd_create_destroy"
7c673cae
FG
2497MON_TESTS+=" mon_osd_pool"
2498MON_TESTS+=" mon_osd_pool_quota"
2499MON_TESTS+=" mon_pg"
2500MON_TESTS+=" mon_osd_pool_set"
2501MON_TESTS+=" mon_osd_tiered_pool_set"
2502MON_TESTS+=" mon_osd_erasure_code"
2503MON_TESTS+=" mon_osd_misc"
2504MON_TESTS+=" mon_heap_profiler"
2505MON_TESTS+=" mon_tell"
7c673cae
FG
2506MON_TESTS+=" mon_ping"
2507MON_TESTS+=" mon_deprecated_commands"
2508MON_TESTS+=" mon_caps"
2509MON_TESTS+=" mon_cephdf_commands"
31f18b77 2510MON_TESTS+=" mon_tell_help_command"
c07f9fc5 2511MON_TESTS+=" mon_stdin_stdout"
31f18b77 2512
7c673cae
FG
2513OSD_TESTS+=" osd_bench"
2514OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2515OSD_TESTS+=" tiering_agent"
2516OSD_TESTS+=" admin_heap_profiler"
31f18b77 2517OSD_TESTS+=" osd_tell_help_command"
224ce89b 2518OSD_TESTS+=" osd_compact"
7c673cae
FG
2519
2520MDS_TESTS+=" mds_tell"
2521MDS_TESTS+=" mon_mds"
2522MDS_TESTS+=" mon_mds_metadata"
31f18b77
FG
2523MDS_TESTS+=" mds_tell_help_command"
2524
224ce89b 2525MGR_TESTS+=" mgr_tell"
7c673cae
FG
2526
2527TESTS+=$MON_TESTS
2528TESTS+=$OSD_TESTS
2529TESTS+=$MDS_TESTS
31f18b77 2530TESTS+=$MGR_TESTS
7c673cae
FG
2531
2532#
2533# "main" follows
2534#
2535
2536function list_tests()
2537{
2538 echo "AVAILABLE TESTS"
2539 for i in $TESTS; do
2540 echo " $i"
2541 done
2542}
2543
2544function usage()
2545{
2546 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2547}
2548
2549tests_to_run=()
2550
2551sanity_check=true
2552
2553while [[ $# -gt 0 ]]; do
2554 opt=$1
2555
2556 case "$opt" in
2557 "-l" )
2558 do_list=1
2559 ;;
2560 "--asok-does-not-need-root" )
2561 SUDO=""
2562 ;;
2563 "--no-sanity-check" )
2564 sanity_check=false
2565 ;;
2566 "--test-mon" )
2567 tests_to_run+="$MON_TESTS"
2568 ;;
2569 "--test-osd" )
2570 tests_to_run+="$OSD_TESTS"
2571 ;;
2572 "--test-mds" )
2573 tests_to_run+="$MDS_TESTS"
2574 ;;
31f18b77
FG
2575 "--test-mgr" )
2576 tests_to_run+="$MGR_TESTS"
2577 ;;
7c673cae
FG
2578 "-t" )
2579 shift
2580 if [[ -z "$1" ]]; then
2581 echo "missing argument to '-t'"
2582 usage ;
2583 exit 1
2584 fi
2585 tests_to_run+=" $1"
2586 ;;
2587 "-h" )
2588 usage ;
2589 exit 0
2590 ;;
2591 esac
2592 shift
2593done
2594
2595if [[ $do_list -eq 1 ]]; then
2596 list_tests ;
2597 exit 0
2598fi
2599
224ce89b
WB
2600ceph osd pool create rbd 10
2601
7c673cae
FG
2602if test -z "$tests_to_run" ; then
2603 tests_to_run="$TESTS"
2604fi
2605
2606if $sanity_check ; then
2607 wait_no_osd_down
2608fi
2609for i in $tests_to_run; do
2610 if $sanity_check ; then
2611 check_no_osd_down
2612 fi
2613 set -x
2614 test_${i}
2615 set +x
2616done
2617if $sanity_check ; then
2618 check_no_osd_down
2619fi
2620
2621set -x
2622
2623echo OK