]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/cephtool/test.sh
update sources to 12.2.2
[ceph.git] / ceph / qa / workunits / cephtool / test.sh
CommitLineData
7c673cae 1#!/bin/bash -x
31f18b77
FG
2# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
3# vim: ts=8 sw=8 ft=bash smarttab
7c673cae 4
c07f9fc5 5source $(dirname $0)/../../standalone/ceph-helpers.sh
7c673cae
FG
6
7set -e
8set -o functrace
9PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
10SUDO=${SUDO:-sudo}
31f18b77 11export CEPH_DEV=1
7c673cae
FG
12
13function get_admin_socket()
14{
15 local client=$1
16
c07f9fc5 17 if test -n "$CEPH_ASOK_DIR";
7c673cae 18 then
c07f9fc5 19 echo $(get_asok_dir)/$client.asok
7c673cae
FG
20 else
21 local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
22 echo "/var/run/ceph/$cluster-$client.asok"
23 fi
24}
25
26function check_no_osd_down()
27{
28 ! ceph osd dump | grep ' down '
29}
30
31function wait_no_osd_down()
32{
33 max_run=300
34 for i in $(seq 1 $max_run) ; do
35 if ! check_no_osd_down ; then
36 echo "waiting for osd(s) to come back up ($i/$max_run)"
37 sleep 1
38 else
39 break
40 fi
41 done
42 check_no_osd_down
43}
44
45function expect_false()
46{
47 set -x
48 if "$@"; then return 1; else return 0; fi
49}
50
51
52TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
53trap "rm -fr $TEMP_DIR" 0
54
55TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
56
57#
58# retry_eagain max cmd args ...
59#
60# retry cmd args ... if it exits on error and its output contains the
61# string EAGAIN, at most $max times
62#
63function retry_eagain()
64{
65 local max=$1
66 shift
67 local status
68 local tmpfile=$TEMP_DIR/retry_eagain.$$
69 local count
70 for count in $(seq 1 $max) ; do
71 status=0
72 "$@" > $tmpfile 2>&1 || status=$?
73 if test $status = 0 ||
74 ! grep --quiet EAGAIN $tmpfile ; then
75 break
76 fi
77 sleep 1
78 done
79 if test $count = $max ; then
80 echo retried with non zero exit status, $max times: "$@" >&2
81 fi
82 cat $tmpfile
83 rm $tmpfile
84 return $status
85}
86
87#
88# map_enxio_to_eagain cmd arg ...
89#
90# add EAGAIN to the output of cmd arg ... if the output contains
91# ENXIO.
92#
93function map_enxio_to_eagain()
94{
95 local status=0
96 local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
97
98 "$@" > $tmpfile 2>&1 || status=$?
99 if test $status != 0 &&
100 grep --quiet ENXIO $tmpfile ; then
101 echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
102 fi
103 cat $tmpfile
104 rm $tmpfile
105 return $status
106}
107
108function check_response()
109{
110 expected_string=$1
111 retcode=$2
112 expected_retcode=$3
113 if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
114 echo "return code invalid: got $retcode, expected $expected_retcode" >&2
115 exit 1
116 fi
117
118 if ! grep --quiet -- "$expected_string" $TMPFILE ; then
119 echo "Didn't find $expected_string in output" >&2
120 cat $TMPFILE >&2
121 exit 1
122 fi
123}
124
125function get_config_value_or_die()
126{
127 local target config_opt raw val
128
129 target=$1
130 config_opt=$2
131
132 raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
133 if [[ $? -ne 0 ]]; then
134 echo "error obtaining config opt '$config_opt' from '$target': $raw"
135 exit 1
136 fi
137
138 raw=`echo $raw | sed -e 's/[{} "]//g'`
139 val=`echo $raw | cut -f2 -d:`
140
141 echo "$val"
142 return 0
143}
144
145function expect_config_value()
146{
147 local target config_opt expected_val val
148 target=$1
149 config_opt=$2
150 expected_val=$3
151
152 val=$(get_config_value_or_die $target $config_opt)
153
154 if [[ "$val" != "$expected_val" ]]; then
155 echo "expected '$expected_val', got '$val'"
156 exit 1
157 fi
158}
159
160function ceph_watch_start()
161{
162 local whatch_opt=--watch
163
164 if [ -n "$1" ]; then
165 whatch_opt=--watch-$1
c07f9fc5
FG
166 if [ -n "$2" ]; then
167 whatch_opt+=" --watch-channel $2"
168 fi
7c673cae
FG
169 fi
170
171 CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
172 ceph $whatch_opt > $CEPH_WATCH_FILE &
173 CEPH_WATCH_PID=$!
174
175 # wait until the "ceph" client is connected and receiving
176 # log messages from monitor
177 for i in `seq 3`; do
178 grep -q "cluster" $CEPH_WATCH_FILE && break
179 sleep 1
180 done
181}
182
183function ceph_watch_wait()
184{
185 local regexp=$1
186 local timeout=30
187
188 if [ -n "$2" ]; then
189 timeout=$2
190 fi
191
192 for i in `seq ${timeout}`; do
193 grep -q "$regexp" $CEPH_WATCH_FILE && break
194 sleep 1
195 done
196
197 kill $CEPH_WATCH_PID
198
199 if ! grep "$regexp" $CEPH_WATCH_FILE; then
200 echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
201 cat $CEPH_WATCH_FILE >&2
202 return 1
203 fi
204}
205
206function test_mon_injectargs()
207{
208 CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
209 check_response "osd_enable_op_tracker = 'false'"
210 ! grep "the.dump" $TMPFILE || return 1
211 ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
212 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
213 ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
214 check_response "osd_enable_op_tracker = 'false'"
215 ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
216 check_response "osd_enable_op_tracker = 'true'"
217 ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
218 check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
219 expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
220 ceph tell osd.0 injectargs -- '--osd_op_history_duration'
221
224ce89b
WB
222 ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' >& $TMPFILE || return 1
223 check_response "osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
224
225 ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' >& $TMPFILE || return 1
226 check_response "mon_probe_timeout = '2.000000' (not observed, change may require restart)"
227
7c673cae 228 ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
224ce89b 229 check_response "mon_lease = '6.000000' (not observed, change may require restart)"
7c673cae
FG
230
231 # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
224ce89b
WB
232 expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 >& $TMPFILE || return 1
233 check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
7c673cae
FG
234}
235
236function test_mon_injectargs_SI()
237{
238 # Test SI units during injectargs and 'config set'
239 # We only aim at testing the units are parsed accordingly
240 # and don't intend to test whether the options being set
241 # actually expect SI units to be passed.
242 # Keep in mind that all integer based options (i.e., INT,
243 # LONG, U32, U64) will accept SI unit modifiers.
244 initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
245 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
246 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
247 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
248 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
249 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
250 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
251 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
252 check_response "'10F': (22) Invalid argument"
253 # now test with injectargs
254 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
255 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
256 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
257 expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
258 ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
259 expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
260 expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
261 expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
262 $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
263}
264
265function test_tiering_agent()
266{
267 local slow=slow_eviction
268 local fast=fast_eviction
269 ceph osd pool create $slow 1 1
c07f9fc5 270 ceph osd pool application enable $slow rados
7c673cae
FG
271 ceph osd pool create $fast 1 1
272 ceph osd tier add $slow $fast
273 ceph osd tier cache-mode $fast writeback
274 ceph osd tier set-overlay $slow $fast
275 ceph osd pool set $fast hit_set_type bloom
276 rados -p $slow put obj1 /etc/group
277 ceph osd pool set $fast target_max_objects 1
278 ceph osd pool set $fast hit_set_count 1
279 ceph osd pool set $fast hit_set_period 5
280 # wait for the object to be evicted from the cache
281 local evicted
282 evicted=false
283 for i in `seq 1 300` ; do
284 if ! rados -p $fast ls | grep obj1 ; then
285 evicted=true
286 break
287 fi
288 sleep 1
289 done
290 $evicted # assert
291 # the object is proxy read and promoted to the cache
292 rados -p $slow get obj1 - >/dev/null
293 # wait for the promoted object to be evicted again
294 evicted=false
295 for i in `seq 1 300` ; do
296 if ! rados -p $fast ls | grep obj1 ; then
297 evicted=true
298 break
299 fi
300 sleep 1
301 done
302 $evicted # assert
303 ceph osd tier remove-overlay $slow
304 ceph osd tier remove $slow $fast
305 ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
306 ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
307}
308
31f18b77 309function test_tiering_1()
7c673cae
FG
310{
311 # tiering
312 ceph osd pool create slow 2
c07f9fc5 313 ceph osd pool application enable slow rados
7c673cae 314 ceph osd pool create slow2 2
c07f9fc5 315 ceph osd pool application enable slow2 rados
7c673cae
FG
316 ceph osd pool create cache 2
317 ceph osd pool create cache2 2
318 ceph osd tier add slow cache
319 ceph osd tier add slow cache2
320 expect_false ceph osd tier add slow2 cache
321 # test some state transitions
322 ceph osd tier cache-mode cache writeback
323 expect_false ceph osd tier cache-mode cache forward
324 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
325 expect_false ceph osd tier cache-mode cache readonly
326 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
327 expect_false ceph osd tier cache-mode cache forward
328 ceph osd tier cache-mode cache forward --yes-i-really-mean-it
329 ceph osd tier cache-mode cache none
330 ceph osd tier cache-mode cache writeback
331 ceph osd tier cache-mode cache proxy
332 ceph osd tier cache-mode cache writeback
333 expect_false ceph osd tier cache-mode cache none
334 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
335 # test with dirty objects in the tier pool
336 # tier pool currently set to 'writeback'
337 rados -p cache put /etc/passwd /etc/passwd
31f18b77 338 flush_pg_stats
7c673cae
FG
339 # 1 dirty object in pool 'cache'
340 ceph osd tier cache-mode cache proxy
341 expect_false ceph osd tier cache-mode cache none
342 expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
343 ceph osd tier cache-mode cache writeback
344 # remove object from tier pool
345 rados -p cache rm /etc/passwd
346 rados -p cache cache-flush-evict-all
31f18b77 347 flush_pg_stats
7c673cae
FG
348 # no dirty objects in pool 'cache'
349 ceph osd tier cache-mode cache proxy
350 ceph osd tier cache-mode cache none
351 ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
352 TRIES=0
353 while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
354 do
355 grep 'currently creating pgs' $TMPFILE
356 TRIES=$(( $TRIES + 1 ))
357 test $TRIES -ne 60
358 sleep 3
359 done
360 expect_false ceph osd pool set cache pg_num 4
361 ceph osd tier cache-mode cache none
362 ceph osd tier set-overlay slow cache
363 expect_false ceph osd tier set-overlay slow cache2
364 expect_false ceph osd tier remove slow cache
365 ceph osd tier remove-overlay slow
366 ceph osd tier set-overlay slow cache2
367 ceph osd tier remove-overlay slow
368 ceph osd tier remove slow cache
369 ceph osd tier add slow2 cache
370 expect_false ceph osd tier set-overlay slow cache
371 ceph osd tier set-overlay slow2 cache
372 ceph osd tier remove-overlay slow2
373 ceph osd tier remove slow2 cache
374 ceph osd tier remove slow cache2
375
376 # make sure a non-empty pool fails
377 rados -p cache2 put /etc/passwd /etc/passwd
378 while ! ceph df | grep cache2 | grep ' 1 ' ; do
379 echo waiting for pg stats to flush
380 sleep 2
381 done
382 expect_false ceph osd tier add slow cache2
383 ceph osd tier add slow cache2 --force-nonempty
384 ceph osd tier remove slow cache2
385
386 ceph osd pool ls | grep cache2
387 ceph osd pool ls -f json-pretty | grep cache2
388 ceph osd pool ls detail | grep cache2
389 ceph osd pool ls detail -f json-pretty | grep cache2
390
31f18b77
FG
391 ceph osd pool delete slow slow --yes-i-really-really-mean-it
392 ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
7c673cae
FG
393 ceph osd pool delete cache cache --yes-i-really-really-mean-it
394 ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
31f18b77 395}
7c673cae 396
31f18b77
FG
397function test_tiering_2()
398{
7c673cae
FG
399 # make sure we can't clobber snapshot state
400 ceph osd pool create snap_base 2
c07f9fc5 401 ceph osd pool application enable snap_base rados
7c673cae
FG
402 ceph osd pool create snap_cache 2
403 ceph osd pool mksnap snap_cache snapname
404 expect_false ceph osd tier add snap_base snap_cache
405 ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
406 ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
31f18b77 407}
7c673cae 408
31f18b77
FG
409function test_tiering_3()
410{
7c673cae
FG
411 # make sure we can't create snapshot on tier
412 ceph osd pool create basex 2
c07f9fc5 413 ceph osd pool application enable basex rados
7c673cae
FG
414 ceph osd pool create cachex 2
415 ceph osd tier add basex cachex
416 expect_false ceph osd pool mksnap cache snapname
417 ceph osd tier remove basex cachex
418 ceph osd pool delete basex basex --yes-i-really-really-mean-it
419 ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
31f18b77 420}
7c673cae 421
31f18b77
FG
422function test_tiering_4()
423{
7c673cae
FG
424 # make sure we can't create an ec pool tier
425 ceph osd pool create eccache 2 2 erasure
426 expect_false ceph osd set-require-min-compat-client bobtail
427 ceph osd pool create repbase 2
c07f9fc5 428 ceph osd pool application enable repbase rados
7c673cae
FG
429 expect_false ceph osd tier add repbase eccache
430 ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
431 ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
31f18b77 432}
7c673cae 433
31f18b77
FG
434function test_tiering_5()
435{
7c673cae 436 # convenient add-cache command
31f18b77 437 ceph osd pool create slow 2
c07f9fc5 438 ceph osd pool application enable slow rados
7c673cae
FG
439 ceph osd pool create cache3 2
440 ceph osd tier add-cache slow cache3 1024000
441 ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
442 ceph osd tier remove slow cache3 2> $TMPFILE || true
443 check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
444 ceph osd tier remove-overlay slow
445 ceph osd tier remove slow cache3
446 ceph osd pool ls | grep cache3
447 ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
448 ! ceph osd pool ls | grep cache3 || exit 1
7c673cae 449 ceph osd pool delete slow slow --yes-i-really-really-mean-it
31f18b77 450}
7c673cae 451
31f18b77
FG
452function test_tiering_6()
453{
7c673cae
FG
454 # check add-cache whether work
455 ceph osd pool create datapool 2
c07f9fc5 456 ceph osd pool application enable datapool rados
7c673cae
FG
457 ceph osd pool create cachepool 2
458 ceph osd tier add-cache datapool cachepool 1024000
459 ceph osd tier cache-mode cachepool writeback
460 rados -p datapool put object /etc/passwd
461 rados -p cachepool stat object
462 rados -p cachepool cache-flush object
463 rados -p datapool stat object
464 ceph osd tier remove-overlay datapool
465 ceph osd tier remove datapool cachepool
466 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
467 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
31f18b77 468}
7c673cae 469
31f18b77
FG
470function test_tiering_7()
471{
7c673cae
FG
472 # protection against pool removal when used as tiers
473 ceph osd pool create datapool 2
c07f9fc5 474 ceph osd pool application enable datapool rados
7c673cae
FG
475 ceph osd pool create cachepool 2
476 ceph osd tier add-cache datapool cachepool 1024000
477 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
478 check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
479 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
480 check_response "EBUSY: pool 'datapool' has tiers cachepool"
481 ceph osd tier remove-overlay datapool
482 ceph osd tier remove datapool cachepool
483 ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
484 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
31f18b77 485}
7c673cae 486
31f18b77
FG
487function test_tiering_8()
488{
7c673cae
FG
489 ## check health check
490 ceph osd set notieragent
491 ceph osd pool create datapool 2
c07f9fc5 492 ceph osd pool application enable datapool rados
7c673cae
FG
493 ceph osd pool create cache4 2
494 ceph osd tier add-cache datapool cache4 1024000
495 ceph osd tier cache-mode cache4 writeback
496 tmpfile=$(mktemp|grep tmp)
497 dd if=/dev/zero of=$tmpfile bs=4K count=1
498 ceph osd pool set cache4 target_max_objects 200
499 ceph osd pool set cache4 target_max_bytes 1000000
500 rados -p cache4 put foo1 $tmpfile
501 rados -p cache4 put foo2 $tmpfile
502 rm -f $tmpfile
31f18b77 503 flush_pg_stats
7c673cae
FG
504 ceph df | grep datapool | grep ' 2 '
505 ceph osd tier remove-overlay datapool
506 ceph osd tier remove datapool cache4
507 ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
508 ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
509 ceph osd unset notieragent
31f18b77 510}
7c673cae 511
31f18b77
FG
512function test_tiering_9()
513{
7c673cae
FG
514 # make sure 'tier remove' behaves as we expect
515 # i.e., removing a tier from a pool that's not its base pool only
516 # results in a 'pool foo is now (or already was) not a tier of bar'
517 #
518 ceph osd pool create basepoolA 2
c07f9fc5 519 ceph osd pool application enable basepoolA rados
7c673cae 520 ceph osd pool create basepoolB 2
c07f9fc5 521 ceph osd pool application enable basepoolB rados
7c673cae
FG
522 poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
523 poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
524
525 ceph osd pool create cache5 2
526 ceph osd pool create cache6 2
527 ceph osd tier add basepoolA cache5
528 ceph osd tier add basepoolB cache6
529 ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
530 ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
531 ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
532 ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
533
534 ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
535 ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
536 ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
537 ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
538
539 ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
540 ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
541
542 ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
543 ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
544 ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
545 ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
546}
547
548function test_auth()
549{
550 ceph auth add client.xx mon allow osd "allow *"
551 ceph auth export client.xx >client.xx.keyring
552 ceph auth add client.xx -i client.xx.keyring
553 rm -f client.xx.keyring
554 ceph auth list | grep client.xx
c07f9fc5 555 ceph auth ls | grep client.xx
7c673cae
FG
556 ceph auth get client.xx | grep caps | grep mon
557 ceph auth get client.xx | grep caps | grep osd
558 ceph auth get-key client.xx
559 ceph auth print-key client.xx
560 ceph auth print_key client.xx
561 ceph auth caps client.xx osd "allow rw"
562 expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
563 ceph auth get client.xx | grep osd | grep "allow rw"
564 ceph auth export | grep client.xx
565 ceph auth export -o authfile
566 ceph auth import -i authfile
567 ceph auth export -o authfile2
568 diff authfile authfile2
569 rm authfile authfile2
570 ceph auth del client.xx
571 expect_false ceph auth get client.xx
572
573 # (almost) interactive mode
574 echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
575 ceph auth get client.xx
576 # script mode
577 echo 'auth del client.xx' | ceph
578 expect_false ceph auth get client.xx
579
580 #
581 # get / set auid
582 #
583 local auid=444
584 ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
585 expect_false ceph auth import --in-file TEST-keyring
586 rm TEST-keyring
587 ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
588 ceph auth import --in-file TEST-keyring
589 rm TEST-keyring
590 ceph auth get client.TEST > $TMPFILE
591 check_response "auid = $auid"
592 ceph --format json-pretty auth get client.TEST > $TMPFILE
593 check_response '"auid": '$auid
c07f9fc5 594 ceph auth ls > $TMPFILE
7c673cae 595 check_response "auid: $auid"
c07f9fc5 596 ceph --format json-pretty auth ls > $TMPFILE
7c673cae
FG
597 check_response '"auid": '$auid
598 ceph auth del client.TEST
599}
600
601function test_auth_profiles()
602{
603 ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
604 mgr 'allow profile read-only'
605 ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
606 mgr 'allow profile read-write'
607 ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
608
609 ceph auth export > client.xx.keyring
610
611 # read-only is allowed all read-only commands (auth excluded)
612 ceph -n client.xx-profile-ro -k client.xx.keyring status
613 ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
614 ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
615 ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
616 ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
617 # read-only gets access denied for rw commands or auth commands
618 ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
619 check_response "EACCES: access denied"
620 ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
621 check_response "EACCES: access denied"
c07f9fc5 622 ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
7c673cae
FG
623 check_response "EACCES: access denied"
624
625 # read-write is allowed for all read-write commands (except auth)
626 ceph -n client.xx-profile-rw -k client.xx.keyring status
627 ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
628 ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
629 ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
630 ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
631 ceph -n client.xx-profile-rw -k client.xx.keyring log foo
632 ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
633 ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
634 # read-write gets access denied for auth commands
c07f9fc5 635 ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
7c673cae
FG
636 check_response "EACCES: access denied"
637
638 # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
c07f9fc5 639 ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
7c673cae
FG
640 ceph -n client.xx-profile-rd -k client.xx.keyring auth export
641 ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
642 ceph -n client.xx-profile-rd -k client.xx.keyring status
643 ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
644 check_response "EACCES: access denied"
645 ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
646 check_response "EACCES: access denied"
647 # read-only 'mon' subsystem commands are allowed
648 ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
649 # but read-write 'mon' commands are not
650 ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
651 check_response "EACCES: access denied"
652 ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
653 check_response "EACCES: access denied"
654 ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
655 check_response "EACCES: access denied"
656 ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
657 check_response "EACCES: access denied"
658
659 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
660 ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
661
662 # add a new role-definer with the existing role-definer
663 ceph -n client.xx-profile-rd -k client.xx.keyring \
664 auth add client.xx-profile-rd2 mon 'allow profile role-definer'
665 ceph -n client.xx-profile-rd -k client.xx.keyring \
666 auth export > client.xx.keyring.2
667 # remove old role-definer using the new role-definer
668 ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
669 auth del client.xx-profile-rd
670 # remove the remaining role-definer with admin
671 ceph auth del client.xx-profile-rd2
672 rm -f client.xx.keyring client.xx.keyring.2
673}
674
675function test_mon_caps()
676{
677 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
678 chmod +r $TEMP_DIR/ceph.client.bug.keyring
679 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
680 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
681
682 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
683 check_response "Permission denied"
684
685 rm -rf $TEMP_DIR/ceph.client.bug.keyring
686 ceph auth del client.bug
687 ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
688 chmod +r $TEMP_DIR/ceph.client.bug.keyring
689 ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
690 ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
691 ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
692 rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
693 check_response "Permission denied"
694}
695
696function test_mon_misc()
697{
698 # with and without verbosity
699 ceph osd dump | grep '^epoch'
700 ceph --concise osd dump | grep '^epoch'
701
31f18b77
FG
702 ceph osd df | grep 'MIN/MAX VAR'
703
7c673cae
FG
704 # df
705 ceph df > $TMPFILE
706 grep GLOBAL $TMPFILE
707 grep -v DIRTY $TMPFILE
708 ceph df detail > $TMPFILE
709 grep DIRTY $TMPFILE
710 ceph df --format json > $TMPFILE
711 grep 'total_bytes' $TMPFILE
712 grep -v 'dirty' $TMPFILE
713 ceph df detail --format json > $TMPFILE
714 grep 'rd_bytes' $TMPFILE
715 grep 'dirty' $TMPFILE
716 ceph df --format xml | grep '<total_bytes>'
717 ceph df detail --format xml | grep '<rd_bytes>'
718
719 ceph fsid
720 ceph health
721 ceph health detail
722 ceph health --format json-pretty
723 ceph health detail --format xml-pretty
724
224ce89b
WB
725 ceph time-sync-status
726
7c673cae
FG
727 ceph node ls
728 for t in mon osd mds ; do
729 ceph node ls $t
730 done
731
732 ceph_watch_start
733 mymsg="this is a test log message $$.$(date)"
734 ceph log "$mymsg"
31f18b77
FG
735 ceph log last | grep "$mymsg"
736 ceph log last 100 | grep "$mymsg"
7c673cae
FG
737 ceph_watch_wait "$mymsg"
738
31f18b77 739 ceph mgr dump
224ce89b
WB
740 ceph mgr module ls
741 ceph mgr module enable restful
742 expect_false ceph mgr module enable foodne
743 ceph mgr module enable foodne --force
744 ceph mgr module disable foodne
745 ceph mgr module disable foodnebizbangbash
31f18b77 746
7c673cae
FG
747 ceph mon metadata a
748 ceph mon metadata
31f18b77
FG
749 ceph mon count-metadata ceph_version
750 ceph mon versions
751
c07f9fc5
FG
752 ceph mgr metadata
753 ceph mgr versions
754 ceph mgr count-metadata ceph_version
755
756 ceph versions
757
7c673cae
FG
758 ceph node ls
759}
760
761function check_mds_active()
762{
763 fs_name=$1
764 ceph fs get $fs_name | grep active
765}
766
767function wait_mds_active()
768{
769 fs_name=$1
770 max_run=300
771 for i in $(seq 1 $max_run) ; do
772 if ! check_mds_active $fs_name ; then
773 echo "waiting for an active MDS daemon ($i/$max_run)"
774 sleep 5
775 else
776 break
777 fi
778 done
779 check_mds_active $fs_name
780}
781
782function get_mds_gids()
783{
784 fs_name=$1
785 ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
786}
787
788function fail_all_mds()
789{
790 fs_name=$1
791 ceph fs set $fs_name cluster_down true
792 mds_gids=$(get_mds_gids $fs_name)
793 for mds_gid in $mds_gids ; do
794 ceph mds fail $mds_gid
795 done
796 if check_mds_active $fs_name ; then
797 echo "An active MDS remains, something went wrong"
798 ceph fs get $fs_name
799 exit -1
800 fi
801
802}
803
804function remove_all_fs()
805{
806 existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
807 for fs_name in $existing_fs ; do
808 echo "Removing fs ${fs_name}..."
809 fail_all_mds $fs_name
810 echo "Removing existing filesystem '${fs_name}'..."
811 ceph fs rm $fs_name --yes-i-really-mean-it
812 echo "Removed '${fs_name}'."
813 done
814}
815
816# So that tests requiring MDS can skip if one is not configured
817# in the cluster at all
818function mds_exists()
819{
c07f9fc5 820 ceph auth ls | grep "^mds"
7c673cae
FG
821}
822
823# some of the commands are just not idempotent.
824function without_test_dup_command()
825{
826 if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
827 $@
828 else
829 local saved=${CEPH_CLI_TEST_DUP_COMMAND}
830 unset CEPH_CLI_TEST_DUP_COMMAND
831 $@
832 CEPH_CLI_TEST_DUP_COMMAND=saved
833 fi
834}
835
836function test_mds_tell()
837{
31f18b77 838 local FS_NAME=cephfs
7c673cae
FG
839 if ! mds_exists ; then
840 echo "Skipping test, no MDS found"
841 return
842 fi
843
844 remove_all_fs
845 ceph osd pool create fs_data 10
846 ceph osd pool create fs_metadata 10
847 ceph fs new $FS_NAME fs_metadata fs_data
848 wait_mds_active $FS_NAME
849
850 # Test injectargs by GID
851 old_mds_gids=$(get_mds_gids $FS_NAME)
852 echo Old GIDs: $old_mds_gids
853
854 for mds_gid in $old_mds_gids ; do
855 ceph tell mds.$mds_gid injectargs "--debug-mds 20"
856 done
857 expect_false ceph tell mds.a injectargs mds_max_file_recover -1
858
859 # Test respawn by rank
860 without_test_dup_command ceph tell mds.0 respawn
861 new_mds_gids=$old_mds_gids
862 while [ $new_mds_gids -eq $old_mds_gids ] ; do
863 sleep 5
864 new_mds_gids=$(get_mds_gids $FS_NAME)
865 done
866 echo New GIDs: $new_mds_gids
867
868 # Test respawn by ID
869 without_test_dup_command ceph tell mds.a respawn
870 new_mds_gids=$old_mds_gids
871 while [ $new_mds_gids -eq $old_mds_gids ] ; do
872 sleep 5
873 new_mds_gids=$(get_mds_gids $FS_NAME)
874 done
875 echo New GIDs: $new_mds_gids
876
877 remove_all_fs
878 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
879 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
880}
881
882function test_mon_mds()
883{
31f18b77 884 local FS_NAME=cephfs
7c673cae
FG
885 remove_all_fs
886
887 ceph osd pool create fs_data 10
888 ceph osd pool create fs_metadata 10
889 ceph fs new $FS_NAME fs_metadata fs_data
890
891 ceph fs set $FS_NAME cluster_down true
892 ceph fs set $FS_NAME cluster_down false
893
894 # Legacy commands, act on default fs
895 ceph mds cluster_down
896 ceph mds cluster_up
897
898 ceph mds compat rm_incompat 4
899 ceph mds compat rm_incompat 4
900
901 # We don't want any MDSs to be up, their activity can interfere with
902 # the "current_epoch + 1" checking below if they're generating updates
903 fail_all_mds $FS_NAME
904
905 ceph mds compat show
906 expect_false ceph mds deactivate 2
907 ceph mds dump
908 ceph fs dump
909 ceph fs get $FS_NAME
910 for mds_gid in $(get_mds_gids $FS_NAME) ; do
911 ceph mds metadata $mds_id
912 done
913 ceph mds metadata
31f18b77
FG
914 ceph mds versions
915 ceph mds count-metadata os
7c673cae
FG
916
917 # XXX mds fail, but how do you undo it?
918 mdsmapfile=$TEMP_DIR/mdsmap.$$
919 current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
920 [ -s $mdsmapfile ]
921 rm $mdsmapfile
922
923 ceph osd pool create data2 10
924 ceph osd pool create data3 10
925 data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
926 data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
927 ceph mds add_data_pool $data2_pool
928 ceph mds add_data_pool $data3_pool
929 ceph mds add_data_pool 100 >& $TMPFILE || true
930 check_response "Error ENOENT"
931 ceph mds add_data_pool foobarbaz >& $TMPFILE || true
932 check_response "Error ENOENT"
933 ceph mds remove_data_pool $data2_pool
934 ceph mds remove_data_pool $data3_pool
935 ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
936 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
224ce89b 937 ceph mds set allow_multimds false
7c673cae 938 expect_false ceph mds set_max_mds 4
224ce89b 939 ceph mds set allow_multimds true
7c673cae
FG
940 ceph mds set_max_mds 4
941 ceph mds set_max_mds 3
942 ceph mds set_max_mds 256
943 expect_false ceph mds set_max_mds 257
944 ceph mds set max_mds 4
945 ceph mds set max_mds 256
946 expect_false ceph mds set max_mds 257
947 expect_false ceph mds set max_mds asdf
948 expect_false ceph mds set inline_data true
949 ceph mds set inline_data true --yes-i-really-mean-it
950 ceph mds set inline_data yes --yes-i-really-mean-it
951 ceph mds set inline_data 1 --yes-i-really-mean-it
952 expect_false ceph mds set inline_data --yes-i-really-mean-it
953 ceph mds set inline_data false
954 ceph mds set inline_data no
955 ceph mds set inline_data 0
956 expect_false ceph mds set inline_data asdf
957 ceph mds set max_file_size 1048576
958 expect_false ceph mds set max_file_size 123asdf
959
960 expect_false ceph mds set allow_new_snaps
961 expect_false ceph mds set allow_new_snaps true
962 ceph mds set allow_new_snaps true --yes-i-really-mean-it
963 ceph mds set allow_new_snaps 0
964 ceph mds set allow_new_snaps false
965 ceph mds set allow_new_snaps no
966 expect_false ceph mds set allow_new_snaps taco
967
968 # we should never be able to add EC pools as data or metadata pools
969 # create an ec-pool...
970 ceph osd pool create mds-ec-pool 10 10 erasure
971 set +e
972 ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
973 check_response 'erasure-code' $? 22
974 set -e
975 ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
976 data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
977 metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
978
979 fail_all_mds $FS_NAME
980
981 set +e
982 # Check that rmfailed requires confirmation
983 expect_false ceph mds rmfailed 0
984 ceph mds rmfailed 0 --yes-i-really-mean-it
985 set -e
986
987 # Check that `newfs` is no longer permitted
988 expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
989
990 # Check that 'fs reset' runs
991 ceph fs reset $FS_NAME --yes-i-really-mean-it
992
993 # Check that creating a second FS fails by default
994 ceph osd pool create fs_metadata2 10
995 ceph osd pool create fs_data2 10
996 set +e
997 expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
998 set -e
999
1000 # Check that setting enable_multiple enables creation of second fs
1001 ceph fs flag set enable_multiple true --yes-i-really-mean-it
1002 ceph fs new cephfs2 fs_metadata2 fs_data2
1003
1004 # Clean up multi-fs stuff
1005 fail_all_mds cephfs2
1006 ceph fs rm cephfs2 --yes-i-really-mean-it
1007 ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
1008 ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
1009
1010 fail_all_mds $FS_NAME
1011
1012 # Clean up to enable subsequent fs new tests
1013 ceph fs rm $FS_NAME --yes-i-really-mean-it
1014
1015 set +e
1016 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1017 check_response 'erasure-code' $? 22
1018 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1019 check_response 'erasure-code' $? 22
1020 ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
1021 check_response 'erasure-code' $? 22
1022 set -e
1023
1024 # ... new create a cache tier in front of the EC pool...
1025 ceph osd pool create mds-tier 2
1026 ceph osd tier add mds-ec-pool mds-tier
1027 ceph osd tier set-overlay mds-ec-pool mds-tier
1028 tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
1029
1030 # Use of a readonly tier should be forbidden
1031 ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
1032 set +e
1033 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1034 check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
1035 set -e
1036
1037 # Use of a writeback tier should enable FS creation
1038 ceph osd tier cache-mode mds-tier writeback
1039 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
1040
1041 # While a FS exists using the tiered pools, I should not be allowed
1042 # to remove the tier
1043 set +e
1044 ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
1045 check_response 'in use by CephFS' $? 16
1046 ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
1047 check_response 'in use by CephFS' $? 16
1048 set -e
1049
1050 fail_all_mds $FS_NAME
1051 ceph fs rm $FS_NAME --yes-i-really-mean-it
1052
1053 # ... but we should be forbidden from using the cache pool in the FS directly.
1054 set +e
1055 ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
1056 check_response 'in use as a cache tier' $? 22
1057 ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
1058 check_response 'in use as a cache tier' $? 22
1059 ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
1060 check_response 'in use as a cache tier' $? 22
1061 set -e
1062
1063 # Clean up tier + EC pools
1064 ceph osd tier remove-overlay mds-ec-pool
1065 ceph osd tier remove mds-ec-pool mds-tier
1066
1067 # Create a FS using the 'cache' pool now that it's no longer a tier
1068 ceph fs new $FS_NAME fs_metadata mds-tier --force
1069
1070 # We should be forbidden from using this pool as a tier now that
1071 # it's in use for CephFS
1072 set +e
1073 ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
1074 check_response 'in use by CephFS' $? 16
1075 set -e
1076
1077 fail_all_mds $FS_NAME
1078 ceph fs rm $FS_NAME --yes-i-really-mean-it
1079
1080 # We should be permitted to use an EC pool with overwrites enabled
1081 # as the data pool...
1082 ceph osd pool set mds-ec-pool allow_ec_overwrites true
1083 ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
1084 fail_all_mds $FS_NAME
1085 ceph fs rm $FS_NAME --yes-i-really-mean-it
1086
1087 # ...but not as the metadata pool
1088 set +e
1089 ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
1090 check_response 'erasure-code' $? 22
1091 set -e
1092
1093 ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
1094
1095 # Create a FS and check that we can subsequently add a cache tier to it
1096 ceph fs new $FS_NAME fs_metadata fs_data --force
1097
1098 # Adding overlay to FS pool should be permitted, RADOS clients handle this.
1099 ceph osd tier add fs_metadata mds-tier
1100 ceph osd tier cache-mode mds-tier writeback
1101 ceph osd tier set-overlay fs_metadata mds-tier
1102
1103 # Removing tier should be permitted because the underlying pool is
1104 # replicated (#11504 case)
1105 ceph osd tier cache-mode mds-tier proxy
1106 ceph osd tier remove-overlay fs_metadata
1107 ceph osd tier remove fs_metadata mds-tier
1108 ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
1109
1110 # Clean up FS
1111 fail_all_mds $FS_NAME
1112 ceph fs rm $FS_NAME --yes-i-really-mean-it
1113
1114
1115
1116 ceph mds stat
1117 # ceph mds tell mds.a getmap
1118 # ceph mds rm
1119 # ceph mds rmfailed
1120 # ceph mds set_state
1121 # ceph mds stop
1122
1123 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
1124 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
1125}
1126
1127function test_mon_mds_metadata()
1128{
1129 local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
1130 test "$nmons" -gt 0
1131
1132 ceph mds dump |
1133 sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
1134 while read gid id rank; do
1135 ceph mds metadata ${gid} | grep '"hostname":'
1136 ceph mds metadata ${id} | grep '"hostname":'
1137 ceph mds metadata ${rank} | grep '"hostname":'
1138
1139 local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
1140 test "$n" -eq "$nmons"
1141 done
1142
1143 expect_false ceph mds metadata UNKNOWN
1144}
1145
1146function test_mon_mon()
1147{
1148 # print help message
1149 ceph --help mon
1150 # no mon add/remove
1151 ceph mon dump
1152 ceph mon getmap -o $TEMP_DIR/monmap.$$
1153 [ -s $TEMP_DIR/monmap.$$ ]
1154 # ceph mon tell
1155 ceph mon_status
1156
1157 # test mon features
224ce89b 1158 ceph mon feature ls
7c673cae
FG
1159 ceph mon feature set kraken --yes-i-really-mean-it
1160 expect_false ceph mon feature set abcd
1161 expect_false ceph mon feature set abcd --yes-i-really-mean-it
1162}
1163
31f18b77
FG
1164function gen_secrets_file()
1165{
1166 # lets assume we can have the following types
1167 # all - generates both cephx and lockbox, with mock dm-crypt key
1168 # cephx - only cephx
1169 # no_cephx - lockbox and dm-crypt, no cephx
1170 # no_lockbox - dm-crypt and cephx, no lockbox
1171 # empty - empty file
1172 # empty_json - correct json, empty map
1173 # bad_json - bad json :)
1174 #
1175 local t=$1
1176 if [[ -z "$t" ]]; then
1177 t="all"
1178 fi
1179
1180 fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
1181 echo $fn
1182 if [[ "$t" == "empty" ]]; then
1183 return 0
1184 fi
1185
1186 echo "{" > $fn
1187 if [[ "$t" == "bad_json" ]]; then
1188 echo "asd: ; }" >> $fn
1189 return 0
1190 elif [[ "$t" == "empty_json" ]]; then
1191 echo "}" >> $fn
1192 return 0
1193 fi
1194
1195 cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
1196 lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
1197 dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
1198
1199 if [[ "$t" == "all" ]]; then
1200 echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
1201 elif [[ "$t" == "cephx" ]]; then
1202 echo "$cephx_secret" >> $fn
1203 elif [[ "$t" == "no_cephx" ]]; then
1204 echo "$lb_secret,$dmcrypt_key" >> $fn
1205 elif [[ "$t" == "no_lockbox" ]]; then
1206 echo "$cephx_secret,$dmcrypt_key" >> $fn
1207 else
1208 echo "unknown gen_secrets_file() type \'$fn\'"
1209 return 1
1210 fi
1211 echo "}" >> $fn
1212 return 0
1213}
1214
1215function test_mon_osd_create_destroy()
1216{
1217 ceph osd new 2>&1 | grep 'EINVAL'
1218 ceph osd new '' -1 2>&1 | grep 'EINVAL'
1219 ceph osd new '' 10 2>&1 | grep 'EINVAL'
1220
1221 old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1222
1223 old_osds=$(ceph osd ls)
1224 num_osds=$(ceph osd ls | wc -l)
1225
1226 uuid=$(uuidgen)
1227 id=$(ceph osd new $uuid 2>/dev/null)
1228
1229 for i in $old_osds; do
1230 [[ "$i" != "$id" ]]
1231 done
1232
1233 ceph osd find $id
1234
1235 id2=`ceph osd new $uuid 2>/dev/null`
1236
1237 [[ $id2 == $id ]]
1238
1239 ceph osd new $uuid $id
1240
1241 id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1242 ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
1243
1244 uuid2=$(uuidgen)
1245 id2=$(ceph osd new $uuid2)
1246 ceph osd find $id2
1247 [[ "$id2" != "$id" ]]
1248
1249 ceph osd new $uuid $id2 2>&1 | grep EEXIST
1250 ceph osd new $uuid2 $id2
1251
1252 # test with secrets
1253 empty_secrets=$(gen_secrets_file "empty")
1254 empty_json=$(gen_secrets_file "empty_json")
1255 all_secrets=$(gen_secrets_file "all")
1256 cephx_only=$(gen_secrets_file "cephx")
1257 no_cephx=$(gen_secrets_file "no_cephx")
1258 no_lockbox=$(gen_secrets_file "no_lockbox")
1259 bad_json=$(gen_secrets_file "bad_json")
1260
1261 # empty secrets should be idempotent
1262 new_id=$(ceph osd new $uuid $id -i $empty_secrets)
1263 [[ "$new_id" == "$id" ]]
1264
1265 # empty json, thus empty secrets
1266 new_id=$(ceph osd new $uuid $id -i $empty_json)
1267 [[ "$new_id" == "$id" ]]
1268
1269 ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
1270
1271 ceph osd rm $id
1272 ceph osd rm $id2
1273 ceph osd setmaxosd $old_maxosd
1274
1275 ceph osd new $uuid -i $bad_json 2>&1 | grep 'EINVAL'
1276 ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
1277 ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
1278
1279 osds=$(ceph osd ls)
1280 id=$(ceph osd new $uuid -i $all_secrets)
1281 for i in $osds; do
1282 [[ "$i" != "$id" ]]
1283 done
1284
1285 ceph osd find $id
1286
1287 # validate secrets and dm-crypt are set
1288 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1289 s=$(cat $all_secrets | jq '.cephx_secret')
1290 [[ $k == $s ]]
1291 k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
1292 jq '.key')
1293 s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
1294 [[ $k == $s ]]
1295 ceph config-key exists dm-crypt/osd/$uuid/luks
1296
1297 osds=$(ceph osd ls)
1298 id2=$(ceph osd new $uuid2 -i $cephx_only)
1299 for i in $osds; do
1300 [[ "$i" != "$id2" ]]
1301 done
1302
1303 ceph osd find $id2
1304 k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
1305 s=$(cat $all_secrets | jq '.cephx_secret')
1306 [[ $k == $s ]]
1307 expect_false ceph auth get-key client.osd-lockbox.$uuid2
1308 expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
1309
1310 ceph osd destroy osd.$id2 --yes-i-really-mean-it
1311 ceph osd destroy $id2 --yes-i-really-mean-it
1312 ceph osd find $id2
1313 expect_false ceph auth get-key osd.$id2
1314 ceph osd dump | grep osd.$id2 | grep destroyed
1315
1316 id3=$id2
1317 uuid3=$(uuidgen)
1318 ceph osd new $uuid3 $id3 -i $all_secrets
1319 ceph osd dump | grep osd.$id3 | expect_false grep destroyed
1320 ceph auth get-key client.osd-lockbox.$uuid3
1321 ceph auth get-key osd.$id3
1322 ceph config-key exists dm-crypt/osd/$uuid3/luks
1323
1324 ceph osd purge osd.$id3 --yes-i-really-mean-it
1325 expect_false ceph osd find $id2
1326 expect_false ceph auth get-key osd.$id2
1327 expect_false ceph auth get-key client.osd-lockbox.$uuid3
1328 expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
1329 ceph osd purge osd.$id3 --yes-i-really-mean-it
d2e6a577 1330 ceph osd purge osd.$id3 --yes-i-really-mean-it # idempotent
31f18b77
FG
1331
1332 ceph osd purge osd.$id --yes-i-really-mean-it
d2e6a577 1333 ceph osd purge 123456 --yes-i-really-mean-it
31f18b77
FG
1334 expect_false ceph osd find $id
1335 expect_false ceph auth get-key osd.$id
1336 expect_false ceph auth get-key client.osd-lockbox.$uuid
1337 expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
1338
1339 rm $empty_secrets $empty_json $all_secrets $cephx_only \
1340 $no_cephx $no_lockbox $bad_json
1341
1342 for i in $(ceph osd ls); do
1343 [[ "$i" != "$id" ]]
1344 [[ "$i" != "$id2" ]]
1345 [[ "$i" != "$id3" ]]
1346 done
1347
1348 [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
1349 ceph osd setmaxosd $old_maxosd
1350
1351}
1352
c07f9fc5
FG
1353function test_mon_config_key()
1354{
1355 key=asdfasdfqwerqwreasdfuniquesa123df
1356 ceph config-key list | grep -c $key | grep 0
1357 ceph config-key get $key | grep -c bar | grep 0
1358 ceph config-key set $key bar
1359 ceph config-key get $key | grep bar
1360 ceph config-key list | grep -c $key | grep 1
1361 ceph config-key dump | grep $key | grep bar
1362 ceph config-key rm $key
1363 expect_false ceph config-key get $key
1364 ceph config-key list | grep -c $key | grep 0
1365 ceph config-key dump | grep -c $key | grep 0
1366}
1367
7c673cae
FG
1368function test_mon_osd()
1369{
1370 #
1371 # osd blacklist
1372 #
1373 bl=192.168.0.1:0/1000
1374 ceph osd blacklist add $bl
1375 ceph osd blacklist ls | grep $bl
1376 ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
1377 ceph osd dump --format=json-pretty | grep $bl
1378 ceph osd dump | grep "^blacklist $bl"
1379 ceph osd blacklist rm $bl
1380 ceph osd blacklist ls | expect_false grep $bl
1381
1382 bl=192.168.0.1
1383 # test without nonce, invalid nonce
1384 ceph osd blacklist add $bl
1385 ceph osd blacklist ls | grep $bl
1386 ceph osd blacklist rm $bl
1387 ceph osd blacklist ls | expect_false grep $expect_false bl
1388 expect_false "ceph osd blacklist $bl/-1"
1389 expect_false "ceph osd blacklist $bl/foo"
1390
1391 # test with wrong address
1392 expect_false "ceph osd blacklist 1234.56.78.90/100"
1393
1394 # Test `clear`
1395 ceph osd blacklist add $bl
1396 ceph osd blacklist ls | grep $bl
1397 ceph osd blacklist clear
1398 ceph osd blacklist ls | expect_false grep $bl
1399
1400 #
1401 # osd crush
1402 #
1403 ceph osd crush reweight-all
1404 ceph osd crush tunables legacy
1405 ceph osd crush show-tunables | grep argonaut
1406 ceph osd crush tunables bobtail
1407 ceph osd crush show-tunables | grep bobtail
1408 ceph osd crush tunables firefly
1409 ceph osd crush show-tunables | grep firefly
1410
1411 ceph osd crush set-tunable straw_calc_version 0
1412 ceph osd crush get-tunable straw_calc_version | grep 0
1413 ceph osd crush set-tunable straw_calc_version 1
1414 ceph osd crush get-tunable straw_calc_version | grep 1
1415
1416 #
1417 # require-min-compat-client
1418 expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
1419 ceph osd set-require-min-compat-client luminous
1420 ceph osd dump | grep 'require_min_compat_client luminous'
1421
1422 #
1423 # osd scrub
1424 #
1425 # how do I tell when these are done?
1426 ceph osd scrub 0
1427 ceph osd deep-scrub 0
1428 ceph osd repair 0
1429
1430 for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
1431 do
1432 ceph osd set $f
1433 ceph osd unset $f
1434 done
1435 expect_false ceph osd unset sortbitwise # cannot be unset
1436 expect_false ceph osd set bogus
1437 expect_false ceph osd unset bogus
31f18b77
FG
1438 ceph osd require-osd-release luminous
1439 # can't lower (or use new command for anything but jewel)
1440 expect_false ceph osd require-osd-release jewel
1441 # these are no-ops but should succeed.
7c673cae 1442 ceph osd set require_jewel_osds
7c673cae 1443 ceph osd set require_kraken_osds
31f18b77 1444 expect_false ceph osd unset require_jewel_osds
7c673cae
FG
1445
1446 ceph osd set noup
1447 ceph osd down 0
1448 ceph osd dump | grep 'osd.0 down'
1449 ceph osd unset noup
1450 max_run=1000
1451 for ((i=0; i < $max_run; i++)); do
1452 if ! ceph osd dump | grep 'osd.0 up'; then
1453 echo "waiting for osd.0 to come back up ($i/$max_run)"
1454 sleep 1
1455 else
1456 break
1457 fi
1458 done
1459 ceph osd dump | grep 'osd.0 up'
1460
1461 ceph osd dump | grep 'osd.0 up'
1462 # ceph osd find expects the OsdName, so both ints and osd.n should work.
1463 ceph osd find 1
1464 ceph osd find osd.1
1465 expect_false ceph osd find osd.xyz
1466 expect_false ceph osd find xyz
1467 expect_false ceph osd find 0.1
1468 ceph --format plain osd find 1 # falls back to json-pretty
1469 if [ `uname` == Linux ]; then
1470 ceph osd metadata 1 | grep 'distro'
1471 ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
1472 fi
1473 ceph osd out 0
1474 ceph osd dump | grep 'osd.0.*out'
1475 ceph osd in 0
1476 ceph osd dump | grep 'osd.0.*in'
1477 ceph osd find 0
1478
31f18b77 1479 ceph osd add-nodown 0 1
224ce89b 1480 ceph health detail | grep 'NODOWN'
31f18b77 1481 ceph osd rm-nodown 0 1
224ce89b 1482 ! ceph health detail | grep 'NODOWN'
31f18b77
FG
1483
1484 ceph osd out 0 # so we can mark it as noin later
1485 ceph osd add-noin 0
224ce89b 1486 ceph health detail | grep 'NOIN'
31f18b77 1487 ceph osd rm-noin 0
224ce89b 1488 ! ceph health detail | grep 'NOIN'
31f18b77
FG
1489 ceph osd in 0
1490
1491 ceph osd add-noout 0
224ce89b 1492 ceph health detail | grep 'NOOUT'
31f18b77 1493 ceph osd rm-noout 0
224ce89b 1494 ! ceph health detail | grep 'NOOUT'
31f18b77
FG
1495
1496 # test osd id parse
1497 expect_false ceph osd add-noup 797er
1498 expect_false ceph osd add-nodown u9uwer
1499 expect_false ceph osd add-noin 78~15
1500 expect_false ceph osd add-noout 0 all 1
1501
1502 expect_false ceph osd rm-noup 1234567
1503 expect_false ceph osd rm-nodown fsadf7
1504 expect_false ceph osd rm-noin 0 1 any
1505 expect_false ceph osd rm-noout 790-fd
1506
1507 ids=`ceph osd ls-tree default`
1508 for osd in $ids
1509 do
1510 ceph osd add-nodown $osd
1511 ceph osd add-noout $osd
1512 done
224ce89b
WB
1513 ceph -s | grep 'NODOWN'
1514 ceph -s | grep 'NOOUT'
31f18b77
FG
1515 ceph osd rm-nodown any
1516 ceph osd rm-noout all
224ce89b
WB
1517 ! ceph -s | grep 'NODOWN'
1518 ! ceph -s | grep 'NOOUT'
31f18b77 1519
7c673cae
FG
1520 # make sure mark out preserves weight
1521 ceph osd reweight osd.0 .5
1522 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1523 ceph osd out 0
1524 ceph osd in 0
1525 ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
1526
7c673cae
FG
1527 ceph osd getmap -o $f
1528 [ -s $f ]
1529 rm $f
1530 save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
1531 [ "$save" -gt 0 ]
1532 ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
1533 ceph osd setmaxosd 10
1534 ceph osd getmaxosd | grep 'max_osd = 10'
1535 ceph osd setmaxosd $save
1536 ceph osd getmaxosd | grep "max_osd = $save"
1537
1538 for id in `ceph osd ls` ; do
1539 retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
1540 done
1541
1542 ceph osd rm 0 2>&1 | grep 'EBUSY'
1543
1544 local old_osds=$(echo $(ceph osd ls))
1545 id=`ceph osd create`
1546 ceph osd find $id
1547 ceph osd lost $id --yes-i-really-mean-it
1548 expect_false ceph osd setmaxosd $id
1549 local new_osds=$(echo $(ceph osd ls))
1550 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1551 ceph osd rm $id
1552 done
1553
1554 uuid=`uuidgen`
1555 id=`ceph osd create $uuid`
1556 id2=`ceph osd create $uuid`
1557 [ "$id" = "$id2" ]
1558 ceph osd rm $id
1559
1560 ceph --help osd
1561
1562 # reset max_osd.
1563 ceph osd setmaxosd $id
1564 ceph osd getmaxosd | grep "max_osd = $save"
1565 local max_osd=$save
1566
1567 ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
1568 ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
1569
1570 id=`ceph osd create $uuid $max_osd`
1571 [ "$id" = "$max_osd" ]
1572 ceph osd find $id
1573 max_osd=$((max_osd + 1))
1574 ceph osd getmaxosd | grep "max_osd = $max_osd"
1575
31f18b77
FG
1576 ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
1577 ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
7c673cae
FG
1578 id2=`ceph osd create $uuid`
1579 [ "$id" = "$id2" ]
1580 id2=`ceph osd create $uuid $id`
1581 [ "$id" = "$id2" ]
1582
1583 uuid=`uuidgen`
1584 local gap_start=$max_osd
1585 id=`ceph osd create $uuid $((gap_start + 100))`
1586 [ "$id" = "$((gap_start + 100))" ]
1587 max_osd=$((id + 1))
1588 ceph osd getmaxosd | grep "max_osd = $max_osd"
1589
31f18b77 1590 ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
7c673cae
FG
1591
1592 #
1593 # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
1594 # is repeated and consumes two osd id, not just one.
1595 #
3efd9988 1596 local next_osd=$gap_start
7c673cae
FG
1597 id=`ceph osd create $(uuidgen)`
1598 [ "$id" = "$next_osd" ]
1599
1600 next_osd=$((id + 1))
1601 id=`ceph osd create $(uuidgen) $next_osd`
1602 [ "$id" = "$next_osd" ]
1603
1604 local new_osds=$(echo $(ceph osd ls))
1605 for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
1606 [ $id -ge $save ]
1607 ceph osd rm $id
1608 done
1609 ceph osd setmaxosd $save
1610
1611 ceph osd ls
1612 ceph osd pool create data 10
c07f9fc5 1613 ceph osd pool application enable data rados
7c673cae
FG
1614 ceph osd lspools | grep data
1615 ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
1616 ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
1617 ceph osd pool delete data data --yes-i-really-really-mean-it
1618
1619 ceph osd pause
1620 ceph osd dump | grep 'flags.*pauserd,pausewr'
1621 ceph osd unpause
1622
1623 ceph osd tree
31f18b77
FG
1624 ceph osd tree up
1625 ceph osd tree down
1626 ceph osd tree in
1627 ceph osd tree out
c07f9fc5 1628 ceph osd tree destroyed
31f18b77
FG
1629 ceph osd tree up in
1630 ceph osd tree up out
1631 ceph osd tree down in
1632 ceph osd tree down out
1633 ceph osd tree out down
1634 expect_false ceph osd tree up down
c07f9fc5
FG
1635 expect_false ceph osd tree up destroyed
1636 expect_false ceph osd tree down destroyed
1637 expect_false ceph osd tree up down destroyed
31f18b77
FG
1638 expect_false ceph osd tree in out
1639 expect_false ceph osd tree up foo
1640
1641 ceph osd metadata
1642 ceph osd count-metadata os
1643 ceph osd versions
1644
7c673cae
FG
1645 ceph osd perf
1646 ceph osd blocked-by
1647
1648 ceph osd stat | grep up,
1649}
1650
31f18b77
FG
1651function test_mon_crush()
1652{
1653 f=$TEMP_DIR/map.$$
1654 epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
1655 [ -s $f ]
1656 [ "$epoch" -gt 1 ]
1657 nextepoch=$(( $epoch + 1 ))
1658 echo epoch $epoch nextepoch $nextepoch
1659 rm -f $f.epoch
1660 expect_false ceph osd setcrushmap $nextepoch -i $f
1661 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1662 echo gotepoch $gotepoch
1663 [ "$gotepoch" -eq "$nextepoch" ]
1664 # should be idempotent
1665 gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
1666 echo epoch $gotepoch
1667 [ "$gotepoch" -eq "$nextepoch" ]
1668 rm $f
1669}
1670
7c673cae
FG
1671function test_mon_osd_pool()
1672{
1673 #
1674 # osd pool
1675 #
1676 ceph osd pool create data 10
c07f9fc5 1677 ceph osd pool application enable data rados
7c673cae
FG
1678 ceph osd pool mksnap data datasnap
1679 rados -p data lssnap | grep datasnap
1680 ceph osd pool rmsnap data datasnap
1681 expect_false ceph osd pool rmsnap pool_fake snapshot
1682 ceph osd pool delete data data --yes-i-really-really-mean-it
1683
1684 ceph osd pool create data2 10
c07f9fc5 1685 ceph osd pool application enable data2 rados
7c673cae
FG
1686 ceph osd pool rename data2 data3
1687 ceph osd lspools | grep data3
1688 ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
1689
1690 ceph osd pool create replicated 12 12 replicated
1691 ceph osd pool create replicated 12 12 replicated
1692 ceph osd pool create replicated 12 12 # default is replicated
1693 ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
c07f9fc5 1694 ceph osd pool application enable replicated rados
7c673cae
FG
1695 # should fail because the type is not the same
1696 expect_false ceph osd pool create replicated 12 12 erasure
1697 ceph osd lspools | grep replicated
1698 ceph osd pool create ec_test 1 1 erasure
c07f9fc5 1699 ceph osd pool application enable ec_test rados
7c673cae 1700 set +e
c07f9fc5
FG
1701 ceph osd count-metadata osd_objectstore | grep 'bluestore'
1702 if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
7c673cae 1703 ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
c07f9fc5 1704 check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
7c673cae
FG
1705 else
1706 ceph osd pool set ec_test allow_ec_overwrites true || return 1
1707 expect_false ceph osd pool set ec_test allow_ec_overwrites false
1708 fi
1709 set -e
1710 ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
1711 ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
1712}
1713
1714function test_mon_osd_pool_quota()
1715{
1716 #
1717 # test osd pool set/get quota
1718 #
1719
1720 # create tmp pool
1721 ceph osd pool create tmp-quota-pool 36
c07f9fc5 1722 ceph osd pool application enable tmp-quota-pool rados
7c673cae
FG
1723 #
1724 # set erroneous quotas
1725 #
1726 expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
1727 expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
1728 expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
1729 #
1730 # set valid quotas
1731 #
1732 ceph osd pool set-quota tmp-quota-pool max_bytes 10
1733 ceph osd pool set-quota tmp-quota-pool max_objects 10M
1734 #
1735 # get quotas
1736 #
1737 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
1738 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
1739 #
1740 # get quotas in json-pretty format
1741 #
1742 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1743 grep '"quota_max_objects":.*10485760'
1744 ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
1745 grep '"quota_max_bytes":.*10'
1746 #
1747 # reset pool quotas
1748 #
1749 ceph osd pool set-quota tmp-quota-pool max_bytes 0
1750 ceph osd pool set-quota tmp-quota-pool max_objects 0
1751 #
1752 # test N/A quotas
1753 #
1754 ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
1755 ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
1756 #
1757 # cleanup tmp pool
1758 ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
1759}
1760
1761function test_mon_pg()
1762{
1763 # Make sure we start healthy.
1764 wait_for_health_ok
1765
1766 ceph pg debug unfound_objects_exist
1767 ceph pg debug degraded_pgs_exist
224ce89b 1768 ceph pg deep-scrub 1.0
7c673cae
FG
1769 ceph pg dump
1770 ceph pg dump pgs_brief --format=json
1771 ceph pg dump pgs --format=json
1772 ceph pg dump pools --format=json
1773 ceph pg dump osds --format=json
1774 ceph pg dump sum --format=json
1775 ceph pg dump all --format=json
1776 ceph pg dump pgs_brief osds --format=json
1777 ceph pg dump pools osds pgs_brief --format=json
1778 ceph pg dump_json
1779 ceph pg dump_pools_json
1780 ceph pg dump_stuck inactive
1781 ceph pg dump_stuck unclean
1782 ceph pg dump_stuck stale
1783 ceph pg dump_stuck undersized
1784 ceph pg dump_stuck degraded
1785 ceph pg ls
224ce89b 1786 ceph pg ls 1
7c673cae
FG
1787 ceph pg ls stale
1788 expect_false ceph pg ls scrubq
1789 ceph pg ls active stale repair recovering
224ce89b
WB
1790 ceph pg ls 1 active
1791 ceph pg ls 1 active stale
7c673cae 1792 ceph pg ls-by-primary osd.0
224ce89b 1793 ceph pg ls-by-primary osd.0 1
7c673cae
FG
1794 ceph pg ls-by-primary osd.0 active
1795 ceph pg ls-by-primary osd.0 active stale
224ce89b 1796 ceph pg ls-by-primary osd.0 1 active stale
7c673cae 1797 ceph pg ls-by-osd osd.0
224ce89b 1798 ceph pg ls-by-osd osd.0 1
7c673cae
FG
1799 ceph pg ls-by-osd osd.0 active
1800 ceph pg ls-by-osd osd.0 active stale
224ce89b 1801 ceph pg ls-by-osd osd.0 1 active stale
7c673cae
FG
1802 ceph pg ls-by-pool rbd
1803 ceph pg ls-by-pool rbd active stale
1804 # can't test this...
1805 # ceph pg force_create_pg
1806 ceph pg getmap -o $TEMP_DIR/map.$$
1807 [ -s $TEMP_DIR/map.$$ ]
224ce89b
WB
1808 ceph pg map 1.0 | grep acting
1809 ceph pg repair 1.0
1810 ceph pg scrub 1.0
7c673cae
FG
1811
1812 ceph osd set-full-ratio .962
1813 ceph osd dump | grep '^full_ratio 0.962'
1814 ceph osd set-backfillfull-ratio .912
1815 ceph osd dump | grep '^backfillfull_ratio 0.912'
1816 ceph osd set-nearfull-ratio .892
1817 ceph osd dump | grep '^nearfull_ratio 0.892'
1818
1819 # Check health status
1820 ceph osd set-nearfull-ratio .913
224ce89b
WB
1821 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1822 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
7c673cae
FG
1823 ceph osd set-nearfull-ratio .892
1824 ceph osd set-backfillfull-ratio .963
224ce89b
WB
1825 ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
1826 ceph health detail | grep OSD_OUT_OF_ORDER_FULL
7c673cae
FG
1827 ceph osd set-backfillfull-ratio .912
1828
1829 # Check injected full results
1830 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
224ce89b
WB
1831 wait_for_health "OSD_NEARFULL"
1832 ceph health detail | grep "osd.0 is near full"
1833 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
1834 wait_for_health_ok
1835
7c673cae 1836 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
224ce89b
WB
1837 wait_for_health "OSD_BACKFILLFULL"
1838 ceph health detail | grep "osd.1 is backfill full"
1839 $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
1840 wait_for_health_ok
1841
7c673cae
FG
1842 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
1843 # failsafe and full are the same as far as the monitor is concerned
224ce89b
WB
1844 wait_for_health "OSD_FULL"
1845 ceph health detail | grep "osd.2 is full"
1846 $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
1847 wait_for_health_ok
1848
7c673cae 1849 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
224ce89b 1850 wait_for_health "OSD_FULL"
31f18b77 1851 ceph health detail | grep "osd.0 is full"
7c673cae 1852 $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
7c673cae
FG
1853 wait_for_health_ok
1854
1855 ceph pg stat | grep 'pgs:'
224ce89b
WB
1856 ceph pg 1.0 query
1857 ceph tell 1.0 query
7c673cae
FG
1858 ceph quorum enter
1859 ceph quorum_status
1860 ceph report | grep osd_stats
1861 ceph status
1862 ceph -s
1863
1864 #
1865 # tell osd version
1866 #
1867 ceph tell osd.0 version
1868 expect_false ceph tell osd.9999 version
1869 expect_false ceph tell osd.foo version
1870
1871 # back to pg stuff
1872
1873 ceph tell osd.0 dump_pg_recovery_stats | grep Started
1874
1875 ceph osd reweight 0 0.9
1876 expect_false ceph osd reweight 0 -1
1877 ceph osd reweight osd.0 1
1878
1879 ceph osd primary-affinity osd.0 .9
1880 expect_false ceph osd primary-affinity osd.0 -2
1881 expect_false ceph osd primary-affinity osd.9999 .5
1882 ceph osd primary-affinity osd.0 1
1883
224ce89b
WB
1884 ceph osd pool set rbd size 2
1885 ceph osd pg-temp 1.0 0 1
1886 ceph osd pg-temp 1.0 osd.1 osd.0
1887 expect_false ceph osd pg-temp 1.0 0 1 2
7c673cae 1888 expect_false ceph osd pg-temp asdf qwer
224ce89b
WB
1889 expect_false ceph osd pg-temp 1.0 asdf
1890 expect_false ceph osd pg-temp 1.0
7c673cae
FG
1891
1892 # don't test ceph osd primary-temp for now
1893}
1894
1895function test_mon_osd_pool_set()
1896{
1897 TEST_POOL_GETSET=pool_getset
1898 ceph osd pool create $TEST_POOL_GETSET 1
c07f9fc5 1899 ceph osd pool application enable $TEST_POOL_GETSET rados
7c673cae
FG
1900 wait_for_clean
1901 ceph osd pool get $TEST_POOL_GETSET all
1902
31f18b77 1903 for s in pg_num pgp_num size min_size crush_rule; do
7c673cae
FG
1904 ceph osd pool get $TEST_POOL_GETSET $s
1905 done
1906
1907 old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
1908 (( new_size = old_size + 1 ))
1909 ceph osd pool set $TEST_POOL_GETSET size $new_size
1910 ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
1911 ceph osd pool set $TEST_POOL_GETSET size $old_size
1912
1913 ceph osd pool create pool_erasure 1 1 erasure
c07f9fc5 1914 ceph osd pool application enable pool_erasure rados
7c673cae
FG
1915 wait_for_clean
1916 set +e
1917 ceph osd pool set pool_erasure size 4444 2>$TMPFILE
1918 check_response 'not change the size'
1919 set -e
1920 ceph osd pool get pool_erasure erasure_code_profile
1921
1922 auid=5555
1923 ceph osd pool set $TEST_POOL_GETSET auid $auid
1924 ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
1925 ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
1926 ceph osd pool set $TEST_POOL_GETSET auid 0
1927
1928 for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
1929 ceph osd pool set $TEST_POOL_GETSET $flag false
1930 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1931 ceph osd pool set $TEST_POOL_GETSET $flag true
1932 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1933 ceph osd pool set $TEST_POOL_GETSET $flag 1
1934 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
1935 ceph osd pool set $TEST_POOL_GETSET $flag 0
1936 ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
1937 expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
1938 expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
1939 done
1940
1941 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1942 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
1943 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
1944 ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
1945 ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
1946
1947 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1948 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
1949 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
1950 ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
1951 ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
1952
1953 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1954 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
1955 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
1956 ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
1957 ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
1958
1959 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1960 ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
1961 ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
1962 ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
1963 ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
1964
1965 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1966 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
1967 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
1968 ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
1969 ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
1970
1971 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1972 ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
1973 ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
1974 ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
1975 ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
1976
1977 ceph osd pool set $TEST_POOL_GETSET nopgchange 1
1978 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
1979 expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1980 ceph osd pool set $TEST_POOL_GETSET nopgchange 0
1981 ceph osd pool set $TEST_POOL_GETSET pg_num 10
1982 wait_for_clean
1983 ceph osd pool set $TEST_POOL_GETSET pgp_num 10
1984
1985 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
c07f9fc5 1986 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
7c673cae
FG
1987 ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1988 ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
1989 wait_for_clean
1990 old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
c07f9fc5 1991 new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32 + 1))
7c673cae
FG
1992 expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
1993
1994 ceph osd pool set $TEST_POOL_GETSET nosizechange 1
1995 expect_false ceph osd pool set $TEST_POOL_GETSET size 2
1996 expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
1997 ceph osd pool set $TEST_POOL_GETSET nosizechange 0
1998 ceph osd pool set $TEST_POOL_GETSET size 2
1999 wait_for_clean
2000 ceph osd pool set $TEST_POOL_GETSET min_size 2
2001
2002 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
2003 ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
2004
2005 expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
2006 ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
2007
7c673cae 2008 ceph osd pool get rbd crush_rule | grep 'crush_rule: '
224ce89b
WB
2009
2010 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2011 ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
2012 ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
2013 ceph osd pool set $TEST_POOL_GETSET compression_mode unset
2014 ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
2015
2016 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2017 ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
2018 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
2019 ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
2020 ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
2021
2022 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2023 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
2024 expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
2025 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
2026 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
2027 ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
2028 ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
2029
2030 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2031 ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
2032 ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
2033 ceph osd pool set $TEST_POOL_GETSET csum_type unset
2034 ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
2035
2036 for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
2037 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2038 ceph osd pool set $TEST_POOL_GETSET $size 100
2039 ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
2040 ceph osd pool set $TEST_POOL_GETSET $size 0
2041 ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
2042 done
c07f9fc5
FG
2043
2044 ceph osd pool set $TEST_POOL_GETSET nodelete 1
2045 expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2046 ceph osd pool set $TEST_POOL_GETSET nodelete 0
2047 ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
2048
7c673cae
FG
2049}
2050
2051function test_mon_osd_tiered_pool_set()
2052{
2053 # this is really a tier pool
2054 ceph osd pool create real-tier 2
2055 ceph osd tier add rbd real-tier
2056
2057 ceph osd pool set real-tier hit_set_type explicit_hash
2058 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
2059 ceph osd pool set real-tier hit_set_type explicit_object
2060 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
2061 ceph osd pool set real-tier hit_set_type bloom
2062 ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
2063 expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
2064 ceph osd pool set real-tier hit_set_period 123
2065 ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
2066 ceph osd pool set real-tier hit_set_count 12
2067 ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
2068 ceph osd pool set real-tier hit_set_fpp .01
2069 ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
2070
2071 ceph osd pool set real-tier target_max_objects 123
2072 ceph osd pool get real-tier target_max_objects | \
2073 grep 'target_max_objects:[ \t]\+123'
2074 ceph osd pool set real-tier target_max_bytes 123456
2075 ceph osd pool get real-tier target_max_bytes | \
2076 grep 'target_max_bytes:[ \t]\+123456'
2077 ceph osd pool set real-tier cache_target_dirty_ratio .123
2078 ceph osd pool get real-tier cache_target_dirty_ratio | \
2079 grep 'cache_target_dirty_ratio:[ \t]\+0.123'
2080 expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
2081 expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
2082 ceph osd pool set real-tier cache_target_dirty_high_ratio .123
2083 ceph osd pool get real-tier cache_target_dirty_high_ratio | \
2084 grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
2085 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
2086 expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
2087 ceph osd pool set real-tier cache_target_full_ratio .123
2088 ceph osd pool get real-tier cache_target_full_ratio | \
2089 grep 'cache_target_full_ratio:[ \t]\+0.123'
2090 ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
2091 ceph osd pool set real-tier cache_target_full_ratio 1.0
2092 ceph osd pool set real-tier cache_target_full_ratio 0
2093 expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
2094 ceph osd pool set real-tier cache_min_flush_age 123
2095 ceph osd pool get real-tier cache_min_flush_age | \
2096 grep 'cache_min_flush_age:[ \t]\+123'
2097 ceph osd pool set real-tier cache_min_evict_age 234
2098 ceph osd pool get real-tier cache_min_evict_age | \
2099 grep 'cache_min_evict_age:[ \t]\+234'
2100
2101 # this is not a tier pool
2102 ceph osd pool create fake-tier 2
c07f9fc5 2103 ceph osd pool application enable fake-tier rados
7c673cae
FG
2104 wait_for_clean
2105
2106 expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
2107 expect_false ceph osd pool get fake-tier hit_set_type
2108 expect_false ceph osd pool set fake-tier hit_set_type explicit_object
2109 expect_false ceph osd pool get fake-tier hit_set_type
2110 expect_false ceph osd pool set fake-tier hit_set_type bloom
2111 expect_false ceph osd pool get fake-tier hit_set_type
2112 expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
2113 expect_false ceph osd pool set fake-tier hit_set_period 123
2114 expect_false ceph osd pool get fake-tier hit_set_period
2115 expect_false ceph osd pool set fake-tier hit_set_count 12
2116 expect_false ceph osd pool get fake-tier hit_set_count
2117 expect_false ceph osd pool set fake-tier hit_set_fpp .01
2118 expect_false ceph osd pool get fake-tier hit_set_fpp
2119
2120 expect_false ceph osd pool set fake-tier target_max_objects 123
2121 expect_false ceph osd pool get fake-tier target_max_objects
2122 expect_false ceph osd pool set fake-tier target_max_bytes 123456
2123 expect_false ceph osd pool get fake-tier target_max_bytes
2124 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
2125 expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
2126 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
2127 expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
2128 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
2129 expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
2130 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
2131 expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
2132 expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
2133 expect_false ceph osd pool get fake-tier cache_target_full_ratio
2134 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
2135 expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
2136 expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
2137 expect_false ceph osd pool set fake-tier cache_min_flush_age 123
2138 expect_false ceph osd pool get fake-tier cache_min_flush_age
2139 expect_false ceph osd pool set fake-tier cache_min_evict_age 234
2140 expect_false ceph osd pool get fake-tier cache_min_evict_age
2141
2142 ceph osd tier remove rbd real-tier
2143 ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
2144 ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
2145}
2146
2147function test_mon_osd_erasure_code()
2148{
2149
2150 ceph osd erasure-code-profile set fooprofile a=b c=d
2151 ceph osd erasure-code-profile set fooprofile a=b c=d
2152 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2153 ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
2154 ceph osd erasure-code-profile set fooprofile a=b c=d e=f
2155 expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
3efd9988
FG
2156 # ruleset-foo will work for luminous only
2157 ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
2158 ceph osd erasure-code-profile set barprofile crush-failure-domain=host
2159 # clean up
7c673cae 2160 ceph osd erasure-code-profile rm fooprofile
3efd9988 2161 ceph osd erasure-code-profile rm barprofile
7c673cae
FG
2162}
2163
2164function test_mon_osd_misc()
2165{
2166 set +e
2167
2168 # expect error about missing 'pool' argument
2169 ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
2170
2171 # expect error about unused argument foo
2172 ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
2173
2174 # expect "not in range" for invalid full ratio
2175 ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
2176
2177 # expect "not in range" for invalid overload percentage
2178 ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
2179
2180 set -e
2181
2182 ceph osd reweight-by-utilization 110
2183 ceph osd reweight-by-utilization 110 .5
2184 expect_false ceph osd reweight-by-utilization 110 0
2185 expect_false ceph osd reweight-by-utilization 110 -0.1
2186 ceph osd test-reweight-by-utilization 110 .5 --no-increasing
2187 ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
2188 expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
2189 expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
2190 ceph osd reweight-by-pg 110
2191 ceph osd test-reweight-by-pg 110 .5
2192 ceph osd reweight-by-pg 110 rbd
2193 ceph osd reweight-by-pg 110 .5 rbd
2194 expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
2195}
2196
2197function test_mon_heap_profiler()
2198{
2199 do_test=1
2200 set +e
2201 # expect 'heap' commands to be correctly parsed
2202 ceph heap stats 2>$TMPFILE
2203 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2204 echo "tcmalloc not enabled; skip heap profiler test"
2205 do_test=0
2206 fi
2207 set -e
2208
2209 [[ $do_test -eq 0 ]] && return 0
2210
2211 ceph heap start_profiler
2212 ceph heap dump
2213 ceph heap stop_profiler
2214 ceph heap release
2215}
2216
2217function test_admin_heap_profiler()
2218{
2219 do_test=1
2220 set +e
2221 # expect 'heap' commands to be correctly parsed
2222 ceph heap stats 2>$TMPFILE
2223 if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
2224 echo "tcmalloc not enabled; skip heap profiler test"
2225 do_test=0
2226 fi
2227 set -e
2228
2229 [[ $do_test -eq 0 ]] && return 0
2230
2231 local admin_socket=$(get_admin_socket osd.0)
2232
2233 $SUDO ceph --admin-daemon $admin_socket heap start_profiler
2234 $SUDO ceph --admin-daemon $admin_socket heap dump
2235 $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
2236 $SUDO ceph --admin-daemon $admin_socket heap release
2237}
2238
2239function test_osd_bench()
2240{
2241 # test osd bench limits
2242 # As we should not rely on defaults (as they may change over time),
2243 # lets inject some values and perform some simple tests
2244 # max iops: 10 # 100 IOPS
2245 # max throughput: 10485760 # 10MB/s
2246 # max block size: 2097152 # 2MB
2247 # duration: 10 # 10 seconds
2248
2249 local args="\
2250 --osd-bench-duration 10 \
2251 --osd-bench-max-block-size 2097152 \
2252 --osd-bench-large-size-max-throughput 10485760 \
2253 --osd-bench-small-size-max-iops 10"
2254 ceph tell osd.0 injectargs ${args## }
2255
2256 # anything with a bs larger than 2097152 must fail
2257 expect_false ceph tell osd.0 bench 1 2097153
2258 # but using 'osd_bench_max_bs' must succeed
2259 ceph tell osd.0 bench 1 2097152
2260
2261 # we assume 1MB as a large bs; anything lower is a small bs
2262 # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
2263 # max count: 409600 (bytes)
2264
2265 # more than max count must not be allowed
2266 expect_false ceph tell osd.0 bench 409601 4096
2267 # but 409600 must be succeed
2268 ceph tell osd.0 bench 409600 4096
2269
2270 # for a large bs, we are limited by throughput.
2271 # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
2272 # the max count will be (10MB * 10s) = 100MB
2273 # max count: 104857600 (bytes)
2274
2275 # more than max count must not be allowed
2276 expect_false ceph tell osd.0 bench 104857601 2097152
2277 # up to max count must be allowed
2278 ceph tell osd.0 bench 104857600 2097152
2279}
2280
2281function test_osd_negative_filestore_merge_threshold()
2282{
2283 $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
2284 expect_config_value "osd.0" "filestore_merge_threshold" -1
2285}
2286
2287function test_mon_tell()
2288{
2289 ceph tell mon.a version
2290 ceph tell mon.b version
2291 expect_false ceph tell mon.foo version
2292
2293 sleep 1
2294
c07f9fc5 2295 ceph_watch_start debug audit
7c673cae 2296 ceph tell mon.a version
31f18b77 2297 ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
7c673cae 2298
c07f9fc5 2299 ceph_watch_start debug audit
7c673cae 2300 ceph tell mon.b version
31f18b77 2301 ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
7c673cae
FG
2302}
2303
7c673cae
FG
2304function test_mon_ping()
2305{
2306 ceph ping mon.a
2307 ceph ping mon.b
2308 expect_false ceph ping mon.foo
2309
2310 ceph ping mon.\*
2311}
2312
2313function test_mon_deprecated_commands()
2314{
2315 # current DEPRECATED commands are:
2316 # ceph compact
2317 # ceph scrub
2318 # ceph sync force
2319 #
2320 # Testing should be accomplished by setting
2321 # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
2322 # each one of these commands.
2323
2324 ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
2325 expect_false ceph tell mon.a compact 2> $TMPFILE
2326 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2327
2328 expect_false ceph tell mon.a scrub 2> $TMPFILE
2329 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2330
2331 expect_false ceph tell mon.a sync force 2> $TMPFILE
2332 check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
2333
2334 ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
2335}
2336
2337function test_mon_cephdf_commands()
2338{
2339 # ceph df detail:
2340 # pool section:
2341 # RAW USED The near raw used per pool in raw total
2342
2343 ceph osd pool create cephdf_for_test 32 32 replicated
c07f9fc5 2344 ceph osd pool application enable cephdf_for_test rados
7c673cae
FG
2345 ceph osd pool set cephdf_for_test size 2
2346
2347 dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
2348 rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
2349
2350 #wait for update
2351 for i in `seq 1 10`; do
2352 rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
2353 sleep 1
2354 done
31f18b77
FG
2355 # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
2356 # to sync mon with osd
2357 flush_pg_stats
2358 local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
2359 cal_raw_used_size=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
2360 raw_used_size=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
7c673cae
FG
2361
2362 ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
2363 rm ./cephdf_for_test
2364
2365 expect_false test $cal_raw_used_size != $raw_used_size
2366}
2367
c07f9fc5
FG
2368function test_mon_pool_application()
2369{
2370 ceph osd pool create app_for_test 10
2371
2372 ceph osd pool application enable app_for_test rbd
2373 expect_false ceph osd pool application enable app_for_test rgw
2374 ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
2375 ceph osd pool ls detail | grep "application rbd,rgw"
2376 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2377
2378 expect_false ceph osd pool application set app_for_test cephfs key value
2379 ceph osd pool application set app_for_test rbd key1 value1
2380 ceph osd pool application set app_for_test rbd key2 value2
2381 ceph osd pool application set app_for_test rgw key1 value1
181888fb
FG
2382 ceph osd pool application get app_for_test rbd key1 | grep 'value1'
2383 ceph osd pool application get app_for_test rbd key2 | grep 'value2'
2384 ceph osd pool application get app_for_test rgw key1 | grep 'value1'
c07f9fc5
FG
2385
2386 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
2387
2388 ceph osd pool application rm app_for_test rgw key1
2389 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
2390 ceph osd pool application rm app_for_test rbd key2
2391 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
2392 ceph osd pool application rm app_for_test rbd key1
2393 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
2394 ceph osd pool application rm app_for_test rbd key1 # should be idempotent
2395
2396 expect_false ceph osd pool application disable app_for_test rgw
2397 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2398 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
2399 ceph osd pool ls detail | grep "application rbd"
2400 ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
2401
2402 ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
2403 ceph osd pool ls detail | grep -v "application "
2404 ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
2405
2406 ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
2407}
2408
31f18b77
FG
2409function test_mon_tell_help_command()
2410{
2411 ceph tell mon.a help
2412
2413 # wrong target
2414 expect_false ceph tell mon.zzz help
2415}
2416
c07f9fc5
FG
2417function test_mon_stdin_stdout()
2418{
2419 echo foo | ceph config-key set test_key -i -
2420 ceph config-key get test_key -o - | grep -c foo | grep -q 1
2421}
2422
31f18b77
FG
2423function test_osd_tell_help_command()
2424{
2425 ceph tell osd.1 help
2426 expect_false ceph tell osd.100 help
2427}
2428
224ce89b
WB
2429function test_osd_compact()
2430{
2431 ceph tell osd.1 compact
c07f9fc5 2432 $SUDO ceph daemon osd.1 compact
224ce89b
WB
2433}
2434
31f18b77
FG
2435function test_mds_tell_help_command()
2436{
2437 local FS_NAME=cephfs
2438 if ! mds_exists ; then
2439 echo "Skipping test, no MDS found"
2440 return
2441 fi
2442
2443 remove_all_fs
2444 ceph osd pool create fs_data 10
2445 ceph osd pool create fs_metadata 10
2446 ceph fs new $FS_NAME fs_metadata fs_data
2447 wait_mds_active $FS_NAME
2448
2449
2450 ceph tell mds.a help
2451 expect_false ceph tell mds.z help
2452
2453 remove_all_fs
2454 ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
2455 ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
2456}
2457
224ce89b 2458function test_mgr_tell()
31f18b77
FG
2459{
2460 ceph tell mgr help
c07f9fc5 2461 #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
224ce89b 2462 ceph tell mgr osd status
31f18b77
FG
2463}
2464
7c673cae
FG
2465#
2466# New tests should be added to the TESTS array below
2467#
2468# Individual tests may be run using the '-t <testname>' argument
2469# The user can specify '-t <testname>' as many times as she wants
2470#
2471# Tests will be run in order presented in the TESTS array, or in
2472# the order specified by the '-t <testname>' options.
2473#
2474# '-l' will list all the available test names
2475# '-h' will show usage
2476#
2477# The test maintains backward compatibility: not specifying arguments
2478# will run all tests following the order they appear in the TESTS array.
2479#
2480
2481set +x
2482MON_TESTS+=" mon_injectargs"
2483MON_TESTS+=" mon_injectargs_SI"
31f18b77
FG
2484for i in `seq 9`; do
2485 MON_TESTS+=" tiering_$i";
2486done
7c673cae
FG
2487MON_TESTS+=" auth"
2488MON_TESTS+=" auth_profiles"
2489MON_TESTS+=" mon_misc"
2490MON_TESTS+=" mon_mon"
2491MON_TESTS+=" mon_osd"
c07f9fc5 2492MON_TESTS+=" mon_config_key"
31f18b77
FG
2493MON_TESTS+=" mon_crush"
2494MON_TESTS+=" mon_osd_create_destroy"
7c673cae
FG
2495MON_TESTS+=" mon_osd_pool"
2496MON_TESTS+=" mon_osd_pool_quota"
2497MON_TESTS+=" mon_pg"
2498MON_TESTS+=" mon_osd_pool_set"
2499MON_TESTS+=" mon_osd_tiered_pool_set"
2500MON_TESTS+=" mon_osd_erasure_code"
2501MON_TESTS+=" mon_osd_misc"
2502MON_TESTS+=" mon_heap_profiler"
2503MON_TESTS+=" mon_tell"
7c673cae
FG
2504MON_TESTS+=" mon_ping"
2505MON_TESTS+=" mon_deprecated_commands"
2506MON_TESTS+=" mon_caps"
2507MON_TESTS+=" mon_cephdf_commands"
31f18b77 2508MON_TESTS+=" mon_tell_help_command"
c07f9fc5 2509MON_TESTS+=" mon_stdin_stdout"
31f18b77 2510
7c673cae
FG
2511OSD_TESTS+=" osd_bench"
2512OSD_TESTS+=" osd_negative_filestore_merge_threshold"
2513OSD_TESTS+=" tiering_agent"
2514OSD_TESTS+=" admin_heap_profiler"
31f18b77 2515OSD_TESTS+=" osd_tell_help_command"
224ce89b 2516OSD_TESTS+=" osd_compact"
7c673cae
FG
2517
2518MDS_TESTS+=" mds_tell"
2519MDS_TESTS+=" mon_mds"
2520MDS_TESTS+=" mon_mds_metadata"
31f18b77
FG
2521MDS_TESTS+=" mds_tell_help_command"
2522
224ce89b 2523MGR_TESTS+=" mgr_tell"
7c673cae
FG
2524
2525TESTS+=$MON_TESTS
2526TESTS+=$OSD_TESTS
2527TESTS+=$MDS_TESTS
31f18b77 2528TESTS+=$MGR_TESTS
7c673cae
FG
2529
2530#
2531# "main" follows
2532#
2533
2534function list_tests()
2535{
2536 echo "AVAILABLE TESTS"
2537 for i in $TESTS; do
2538 echo " $i"
2539 done
2540}
2541
2542function usage()
2543{
2544 echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
2545}
2546
2547tests_to_run=()
2548
2549sanity_check=true
2550
2551while [[ $# -gt 0 ]]; do
2552 opt=$1
2553
2554 case "$opt" in
2555 "-l" )
2556 do_list=1
2557 ;;
2558 "--asok-does-not-need-root" )
2559 SUDO=""
2560 ;;
2561 "--no-sanity-check" )
2562 sanity_check=false
2563 ;;
2564 "--test-mon" )
2565 tests_to_run+="$MON_TESTS"
2566 ;;
2567 "--test-osd" )
2568 tests_to_run+="$OSD_TESTS"
2569 ;;
2570 "--test-mds" )
2571 tests_to_run+="$MDS_TESTS"
2572 ;;
31f18b77
FG
2573 "--test-mgr" )
2574 tests_to_run+="$MGR_TESTS"
2575 ;;
7c673cae
FG
2576 "-t" )
2577 shift
2578 if [[ -z "$1" ]]; then
2579 echo "missing argument to '-t'"
2580 usage ;
2581 exit 1
2582 fi
2583 tests_to_run+=" $1"
2584 ;;
2585 "-h" )
2586 usage ;
2587 exit 0
2588 ;;
2589 esac
2590 shift
2591done
2592
2593if [[ $do_list -eq 1 ]]; then
2594 list_tests ;
2595 exit 0
2596fi
2597
224ce89b
WB
2598ceph osd pool create rbd 10
2599
7c673cae
FG
2600if test -z "$tests_to_run" ; then
2601 tests_to_run="$TESTS"
2602fi
2603
2604if $sanity_check ; then
2605 wait_no_osd_down
2606fi
2607for i in $tests_to_run; do
2608 if $sanity_check ; then
2609 check_no_osd_down
2610 fi
2611 set -x
2612 test_${i}
2613 set +x
2614done
2615if $sanity_check ; then
2616 check_no_osd_down
2617fi
2618
2619set -x
2620
2621echo OK