]>
Commit | Line | Data |
---|---|---|
11fdf7f2 | 1 | #!/usr/bin/env bash |
31f18b77 FG |
2 | # -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*- |
3 | # vim: ts=8 sw=8 ft=bash smarttab | |
11fdf7f2 | 4 | set -x |
7c673cae | 5 | |
c07f9fc5 | 6 | source $(dirname $0)/../../standalone/ceph-helpers.sh |
7c673cae FG |
7 | |
8 | set -e | |
9 | set -o functrace | |
10 | PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: ' | |
11 | SUDO=${SUDO:-sudo} | |
31f18b77 | 12 | export CEPH_DEV=1 |
7c673cae | 13 | |
7c673cae FG |
14 | function check_no_osd_down() |
15 | { | |
16 | ! ceph osd dump | grep ' down ' | |
17 | } | |
18 | ||
19 | function wait_no_osd_down() | |
20 | { | |
21 | max_run=300 | |
22 | for i in $(seq 1 $max_run) ; do | |
23 | if ! check_no_osd_down ; then | |
24 | echo "waiting for osd(s) to come back up ($i/$max_run)" | |
25 | sleep 1 | |
26 | else | |
27 | break | |
28 | fi | |
29 | done | |
30 | check_no_osd_down | |
31 | } | |
32 | ||
33 | function expect_false() | |
34 | { | |
35 | set -x | |
36 | if "$@"; then return 1; else return 0; fi | |
37 | } | |
38 | ||
eafe8130 TL |
39 | function expect_true() |
40 | { | |
41 | set -x | |
42 | if ! "$@"; then return 1; else return 0; fi | |
43 | } | |
7c673cae | 44 | |
11fdf7f2 | 45 | TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX) |
7c673cae FG |
46 | trap "rm -fr $TEMP_DIR" 0 |
47 | ||
48 | TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX) | |
49 | ||
50 | # | |
51 | # retry_eagain max cmd args ... | |
52 | # | |
53 | # retry cmd args ... if it exits on error and its output contains the | |
54 | # string EAGAIN, at most $max times | |
55 | # | |
56 | function retry_eagain() | |
57 | { | |
58 | local max=$1 | |
59 | shift | |
60 | local status | |
61 | local tmpfile=$TEMP_DIR/retry_eagain.$$ | |
62 | local count | |
63 | for count in $(seq 1 $max) ; do | |
64 | status=0 | |
65 | "$@" > $tmpfile 2>&1 || status=$? | |
66 | if test $status = 0 || | |
67 | ! grep --quiet EAGAIN $tmpfile ; then | |
68 | break | |
69 | fi | |
70 | sleep 1 | |
71 | done | |
72 | if test $count = $max ; then | |
73 | echo retried with non zero exit status, $max times: "$@" >&2 | |
74 | fi | |
75 | cat $tmpfile | |
76 | rm $tmpfile | |
77 | return $status | |
78 | } | |
79 | ||
80 | # | |
81 | # map_enxio_to_eagain cmd arg ... | |
82 | # | |
83 | # add EAGAIN to the output of cmd arg ... if the output contains | |
84 | # ENXIO. | |
85 | # | |
86 | function map_enxio_to_eagain() | |
87 | { | |
88 | local status=0 | |
89 | local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$ | |
90 | ||
91 | "$@" > $tmpfile 2>&1 || status=$? | |
92 | if test $status != 0 && | |
93 | grep --quiet ENXIO $tmpfile ; then | |
94 | echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile | |
95 | fi | |
96 | cat $tmpfile | |
97 | rm $tmpfile | |
98 | return $status | |
99 | } | |
100 | ||
101 | function check_response() | |
102 | { | |
103 | expected_string=$1 | |
104 | retcode=$2 | |
105 | expected_retcode=$3 | |
106 | if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then | |
107 | echo "return code invalid: got $retcode, expected $expected_retcode" >&2 | |
108 | exit 1 | |
109 | fi | |
110 | ||
111 | if ! grep --quiet -- "$expected_string" $TMPFILE ; then | |
112 | echo "Didn't find $expected_string in output" >&2 | |
113 | cat $TMPFILE >&2 | |
114 | exit 1 | |
115 | fi | |
116 | } | |
117 | ||
118 | function get_config_value_or_die() | |
119 | { | |
120 | local target config_opt raw val | |
121 | ||
122 | target=$1 | |
123 | config_opt=$2 | |
124 | ||
125 | raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`" | |
126 | if [[ $? -ne 0 ]]; then | |
127 | echo "error obtaining config opt '$config_opt' from '$target': $raw" | |
128 | exit 1 | |
129 | fi | |
130 | ||
131 | raw=`echo $raw | sed -e 's/[{} "]//g'` | |
132 | val=`echo $raw | cut -f2 -d:` | |
133 | ||
134 | echo "$val" | |
135 | return 0 | |
136 | } | |
137 | ||
138 | function expect_config_value() | |
139 | { | |
140 | local target config_opt expected_val val | |
141 | target=$1 | |
142 | config_opt=$2 | |
143 | expected_val=$3 | |
144 | ||
145 | val=$(get_config_value_or_die $target $config_opt) | |
146 | ||
147 | if [[ "$val" != "$expected_val" ]]; then | |
148 | echo "expected '$expected_val', got '$val'" | |
149 | exit 1 | |
150 | fi | |
151 | } | |
152 | ||
153 | function ceph_watch_start() | |
154 | { | |
155 | local whatch_opt=--watch | |
156 | ||
157 | if [ -n "$1" ]; then | |
158 | whatch_opt=--watch-$1 | |
c07f9fc5 FG |
159 | if [ -n "$2" ]; then |
160 | whatch_opt+=" --watch-channel $2" | |
161 | fi | |
7c673cae FG |
162 | fi |
163 | ||
164 | CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$ | |
165 | ceph $whatch_opt > $CEPH_WATCH_FILE & | |
166 | CEPH_WATCH_PID=$! | |
167 | ||
168 | # wait until the "ceph" client is connected and receiving | |
169 | # log messages from monitor | |
170 | for i in `seq 3`; do | |
171 | grep -q "cluster" $CEPH_WATCH_FILE && break | |
172 | sleep 1 | |
173 | done | |
174 | } | |
175 | ||
176 | function ceph_watch_wait() | |
177 | { | |
178 | local regexp=$1 | |
179 | local timeout=30 | |
180 | ||
181 | if [ -n "$2" ]; then | |
182 | timeout=$2 | |
183 | fi | |
184 | ||
185 | for i in `seq ${timeout}`; do | |
186 | grep -q "$regexp" $CEPH_WATCH_FILE && break | |
187 | sleep 1 | |
188 | done | |
189 | ||
190 | kill $CEPH_WATCH_PID | |
191 | ||
192 | if ! grep "$regexp" $CEPH_WATCH_FILE; then | |
193 | echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2 | |
194 | cat $CEPH_WATCH_FILE >&2 | |
195 | return 1 | |
196 | fi | |
197 | } | |
198 | ||
199 | function test_mon_injectargs() | |
200 | { | |
11fdf7f2 TL |
201 | ceph tell osd.0 injectargs --no-osd_enable_op_tracker |
202 | ceph tell osd.0 config get osd_enable_op_tracker | grep false | |
203 | ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' | |
204 | ceph tell osd.0 config get osd_enable_op_tracker | grep true | |
205 | ceph tell osd.0 config get osd_op_history_duration | grep 500 | |
206 | ceph tell osd.0 injectargs --no-osd_enable_op_tracker | |
207 | ceph tell osd.0 config get osd_enable_op_tracker | grep false | |
208 | ceph tell osd.0 injectargs -- --osd_enable_op_tracker | |
209 | ceph tell osd.0 config get osd_enable_op_tracker | grep true | |
210 | ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' | |
211 | ceph tell osd.0 config get osd_enable_op_tracker | grep true | |
212 | ceph tell osd.0 config get osd_op_history_duration | grep 600 | |
213 | ||
214 | ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' | |
215 | ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200 | |
216 | ||
217 | ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' | |
218 | ceph tell osd.0 config get mon_probe_timeout | grep 2 | |
219 | ||
220 | ceph tell osd.0 injectargs -- '--mon-lease 6' | |
221 | ceph tell osd.0 config get mon_lease | grep 6 | |
7c673cae FG |
222 | |
223 | # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting | |
9f95a23c | 224 | expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1 |
224ce89b | 225 | check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs" |
11fdf7f2 TL |
226 | |
227 | expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \ | |
228 | ceph tell osd.0 injectargs -- '--osd_op_history_duration' | |
229 | ||
7c673cae FG |
230 | } |
231 | ||
232 | function test_mon_injectargs_SI() | |
233 | { | |
234 | # Test SI units during injectargs and 'config set' | |
235 | # We only aim at testing the units are parsed accordingly | |
236 | # and don't intend to test whether the options being set | |
237 | # actually expect SI units to be passed. | |
1adf2230 AA |
238 | # Keep in mind that all integer based options that are not based on bytes |
239 | # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to | |
240 | # base 10. | |
7c673cae FG |
241 | initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects") |
242 | $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10 | |
243 | expect_config_value "mon.a" "mon_pg_warn_min_objects" 10 | |
244 | $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K | |
1adf2230 | 245 | expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000 |
7c673cae | 246 | $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G |
1adf2230 | 247 | expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000 |
7c673cae | 248 | $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true |
9f95a23c | 249 | check_response "(22) Invalid argument" |
7c673cae FG |
250 | # now test with injectargs |
251 | ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10' | |
252 | expect_config_value "mon.a" "mon_pg_warn_min_objects" 10 | |
253 | ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K' | |
1adf2230 | 254 | expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000 |
7c673cae | 255 | ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G' |
1adf2230 | 256 | expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000 |
7c673cae FG |
257 | expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F' |
258 | expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1' | |
259 | $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value | |
260 | } | |
261 | ||
1adf2230 AA |
262 | function test_mon_injectargs_IEC() |
263 | { | |
264 | # Test IEC units during injectargs and 'config set' | |
265 | # We only aim at testing the units are parsed accordingly | |
266 | # and don't intend to test whether the options being set | |
267 | # actually expect IEC units to be passed. | |
268 | # Keep in mind that all integer based options that are based on bytes | |
269 | # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI | |
11fdf7f2 | 270 | # unit modifiers (for backwards compatibility and convenience) and be parsed |
1adf2230 AA |
271 | # to base 2. |
272 | initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn") | |
273 | $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000 | |
274 | expect_config_value "mon.a" "mon_data_size_warn" 15000000000 | |
275 | $SUDO ceph daemon mon.a config set mon_data_size_warn 15G | |
276 | expect_config_value "mon.a" "mon_data_size_warn" 16106127360 | |
277 | $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi | |
278 | expect_config_value "mon.a" "mon_data_size_warn" 17179869184 | |
279 | $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true | |
9f95a23c | 280 | check_response "(22) Invalid argument" |
1adf2230 AA |
281 | # now test with injectargs |
282 | ceph tell mon.a injectargs '--mon_data_size_warn 15000000000' | |
283 | expect_config_value "mon.a" "mon_data_size_warn" 15000000000 | |
284 | ceph tell mon.a injectargs '--mon_data_size_warn 15G' | |
285 | expect_config_value "mon.a" "mon_data_size_warn" 16106127360 | |
286 | ceph tell mon.a injectargs '--mon_data_size_warn 16Gi' | |
287 | expect_config_value "mon.a" "mon_data_size_warn" 17179869184 | |
288 | expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F' | |
289 | $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value | |
290 | } | |
291 | ||
7c673cae FG |
292 | function test_tiering_agent() |
293 | { | |
294 | local slow=slow_eviction | |
295 | local fast=fast_eviction | |
296 | ceph osd pool create $slow 1 1 | |
c07f9fc5 | 297 | ceph osd pool application enable $slow rados |
7c673cae FG |
298 | ceph osd pool create $fast 1 1 |
299 | ceph osd tier add $slow $fast | |
300 | ceph osd tier cache-mode $fast writeback | |
301 | ceph osd tier set-overlay $slow $fast | |
302 | ceph osd pool set $fast hit_set_type bloom | |
303 | rados -p $slow put obj1 /etc/group | |
304 | ceph osd pool set $fast target_max_objects 1 | |
305 | ceph osd pool set $fast hit_set_count 1 | |
306 | ceph osd pool set $fast hit_set_period 5 | |
307 | # wait for the object to be evicted from the cache | |
308 | local evicted | |
309 | evicted=false | |
310 | for i in `seq 1 300` ; do | |
311 | if ! rados -p $fast ls | grep obj1 ; then | |
312 | evicted=true | |
313 | break | |
314 | fi | |
315 | sleep 1 | |
316 | done | |
317 | $evicted # assert | |
318 | # the object is proxy read and promoted to the cache | |
319 | rados -p $slow get obj1 - >/dev/null | |
320 | # wait for the promoted object to be evicted again | |
321 | evicted=false | |
322 | for i in `seq 1 300` ; do | |
323 | if ! rados -p $fast ls | grep obj1 ; then | |
324 | evicted=true | |
325 | break | |
326 | fi | |
327 | sleep 1 | |
328 | done | |
329 | $evicted # assert | |
330 | ceph osd tier remove-overlay $slow | |
331 | ceph osd tier remove $slow $fast | |
332 | ceph osd pool delete $fast $fast --yes-i-really-really-mean-it | |
333 | ceph osd pool delete $slow $slow --yes-i-really-really-mean-it | |
334 | } | |
335 | ||
31f18b77 | 336 | function test_tiering_1() |
7c673cae FG |
337 | { |
338 | # tiering | |
339 | ceph osd pool create slow 2 | |
c07f9fc5 | 340 | ceph osd pool application enable slow rados |
7c673cae | 341 | ceph osd pool create slow2 2 |
c07f9fc5 | 342 | ceph osd pool application enable slow2 rados |
7c673cae FG |
343 | ceph osd pool create cache 2 |
344 | ceph osd pool create cache2 2 | |
345 | ceph osd tier add slow cache | |
346 | ceph osd tier add slow cache2 | |
347 | expect_false ceph osd tier add slow2 cache | |
7f7e6c64 TL |
348 | # application metadata should propagate to the tiers |
349 | ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow") | .application_metadata["rados"]' | grep '{}' | |
350 | ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' | grep '{}' | |
351 | ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache") | .application_metadata["rados"]' | grep '{}' | |
352 | ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' | grep '{}' | |
e306af50 | 353 | # forward and proxy are removed/deprecated |
7c673cae | 354 | expect_false ceph osd tier cache-mode cache forward |
9f95a23c | 355 | expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it |
e306af50 TL |
356 | expect_false ceph osd tier cache-mode cache proxy |
357 | expect_false ceph osd tier cache-mode cache proxy --yes-i-really-mean-it | |
358 | # test some state transitions | |
359 | ceph osd tier cache-mode cache writeback | |
7c673cae | 360 | expect_false ceph osd tier cache-mode cache readonly |
e306af50 TL |
361 | expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it |
362 | ceph osd tier cache-mode cache readproxy | |
9f95a23c | 363 | ceph osd tier cache-mode cache none |
7c673cae | 364 | ceph osd tier cache-mode cache readonly --yes-i-really-mean-it |
7c673cae FG |
365 | ceph osd tier cache-mode cache none |
366 | ceph osd tier cache-mode cache writeback | |
7c673cae FG |
367 | expect_false ceph osd tier cache-mode cache none |
368 | expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it | |
369 | # test with dirty objects in the tier pool | |
370 | # tier pool currently set to 'writeback' | |
371 | rados -p cache put /etc/passwd /etc/passwd | |
31f18b77 | 372 | flush_pg_stats |
7c673cae | 373 | # 1 dirty object in pool 'cache' |
e306af50 | 374 | ceph osd tier cache-mode cache readproxy |
7c673cae FG |
375 | expect_false ceph osd tier cache-mode cache none |
376 | expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it | |
377 | ceph osd tier cache-mode cache writeback | |
378 | # remove object from tier pool | |
379 | rados -p cache rm /etc/passwd | |
380 | rados -p cache cache-flush-evict-all | |
31f18b77 | 381 | flush_pg_stats |
7c673cae | 382 | # no dirty objects in pool 'cache' |
e306af50 | 383 | ceph osd tier cache-mode cache readproxy |
7c673cae FG |
384 | ceph osd tier cache-mode cache none |
385 | ceph osd tier cache-mode cache readonly --yes-i-really-mean-it | |
386 | TRIES=0 | |
387 | while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE | |
388 | do | |
389 | grep 'currently creating pgs' $TMPFILE | |
390 | TRIES=$(( $TRIES + 1 )) | |
391 | test $TRIES -ne 60 | |
392 | sleep 3 | |
393 | done | |
394 | expect_false ceph osd pool set cache pg_num 4 | |
395 | ceph osd tier cache-mode cache none | |
396 | ceph osd tier set-overlay slow cache | |
397 | expect_false ceph osd tier set-overlay slow cache2 | |
398 | expect_false ceph osd tier remove slow cache | |
399 | ceph osd tier remove-overlay slow | |
400 | ceph osd tier set-overlay slow cache2 | |
401 | ceph osd tier remove-overlay slow | |
402 | ceph osd tier remove slow cache | |
403 | ceph osd tier add slow2 cache | |
404 | expect_false ceph osd tier set-overlay slow cache | |
405 | ceph osd tier set-overlay slow2 cache | |
406 | ceph osd tier remove-overlay slow2 | |
407 | ceph osd tier remove slow2 cache | |
408 | ceph osd tier remove slow cache2 | |
409 | ||
410 | # make sure a non-empty pool fails | |
411 | rados -p cache2 put /etc/passwd /etc/passwd | |
412 | while ! ceph df | grep cache2 | grep ' 1 ' ; do | |
413 | echo waiting for pg stats to flush | |
414 | sleep 2 | |
415 | done | |
416 | expect_false ceph osd tier add slow cache2 | |
417 | ceph osd tier add slow cache2 --force-nonempty | |
418 | ceph osd tier remove slow cache2 | |
419 | ||
420 | ceph osd pool ls | grep cache2 | |
421 | ceph osd pool ls -f json-pretty | grep cache2 | |
422 | ceph osd pool ls detail | grep cache2 | |
423 | ceph osd pool ls detail -f json-pretty | grep cache2 | |
424 | ||
31f18b77 FG |
425 | ceph osd pool delete slow slow --yes-i-really-really-mean-it |
426 | ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it | |
7c673cae FG |
427 | ceph osd pool delete cache cache --yes-i-really-really-mean-it |
428 | ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it | |
31f18b77 | 429 | } |
7c673cae | 430 | |
31f18b77 FG |
431 | function test_tiering_2() |
432 | { | |
7c673cae FG |
433 | # make sure we can't clobber snapshot state |
434 | ceph osd pool create snap_base 2 | |
c07f9fc5 | 435 | ceph osd pool application enable snap_base rados |
7c673cae FG |
436 | ceph osd pool create snap_cache 2 |
437 | ceph osd pool mksnap snap_cache snapname | |
438 | expect_false ceph osd tier add snap_base snap_cache | |
439 | ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it | |
440 | ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it | |
31f18b77 | 441 | } |
7c673cae | 442 | |
31f18b77 FG |
443 | function test_tiering_3() |
444 | { | |
7c673cae FG |
445 | # make sure we can't create snapshot on tier |
446 | ceph osd pool create basex 2 | |
c07f9fc5 | 447 | ceph osd pool application enable basex rados |
7c673cae FG |
448 | ceph osd pool create cachex 2 |
449 | ceph osd tier add basex cachex | |
450 | expect_false ceph osd pool mksnap cache snapname | |
451 | ceph osd tier remove basex cachex | |
452 | ceph osd pool delete basex basex --yes-i-really-really-mean-it | |
453 | ceph osd pool delete cachex cachex --yes-i-really-really-mean-it | |
31f18b77 | 454 | } |
7c673cae | 455 | |
31f18b77 FG |
456 | function test_tiering_4() |
457 | { | |
7c673cae FG |
458 | # make sure we can't create an ec pool tier |
459 | ceph osd pool create eccache 2 2 erasure | |
460 | expect_false ceph osd set-require-min-compat-client bobtail | |
461 | ceph osd pool create repbase 2 | |
c07f9fc5 | 462 | ceph osd pool application enable repbase rados |
7c673cae FG |
463 | expect_false ceph osd tier add repbase eccache |
464 | ceph osd pool delete repbase repbase --yes-i-really-really-mean-it | |
465 | ceph osd pool delete eccache eccache --yes-i-really-really-mean-it | |
31f18b77 | 466 | } |
7c673cae | 467 | |
31f18b77 FG |
468 | function test_tiering_5() |
469 | { | |
7c673cae | 470 | # convenient add-cache command |
31f18b77 | 471 | ceph osd pool create slow 2 |
c07f9fc5 | 472 | ceph osd pool application enable slow rados |
7c673cae FG |
473 | ceph osd pool create cache3 2 |
474 | ceph osd tier add-cache slow cache3 1024000 | |
475 | ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4' | |
476 | ceph osd tier remove slow cache3 2> $TMPFILE || true | |
477 | check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first" | |
478 | ceph osd tier remove-overlay slow | |
479 | ceph osd tier remove slow cache3 | |
480 | ceph osd pool ls | grep cache3 | |
481 | ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it | |
482 | ! ceph osd pool ls | grep cache3 || exit 1 | |
7c673cae | 483 | ceph osd pool delete slow slow --yes-i-really-really-mean-it |
31f18b77 | 484 | } |
7c673cae | 485 | |
31f18b77 FG |
486 | function test_tiering_6() |
487 | { | |
7c673cae FG |
488 | # check add-cache whether work |
489 | ceph osd pool create datapool 2 | |
c07f9fc5 | 490 | ceph osd pool application enable datapool rados |
7c673cae FG |
491 | ceph osd pool create cachepool 2 |
492 | ceph osd tier add-cache datapool cachepool 1024000 | |
493 | ceph osd tier cache-mode cachepool writeback | |
494 | rados -p datapool put object /etc/passwd | |
495 | rados -p cachepool stat object | |
496 | rados -p cachepool cache-flush object | |
497 | rados -p datapool stat object | |
498 | ceph osd tier remove-overlay datapool | |
499 | ceph osd tier remove datapool cachepool | |
500 | ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it | |
501 | ceph osd pool delete datapool datapool --yes-i-really-really-mean-it | |
31f18b77 | 502 | } |
7c673cae | 503 | |
31f18b77 FG |
504 | function test_tiering_7() |
505 | { | |
7c673cae FG |
506 | # protection against pool removal when used as tiers |
507 | ceph osd pool create datapool 2 | |
c07f9fc5 | 508 | ceph osd pool application enable datapool rados |
7c673cae FG |
509 | ceph osd pool create cachepool 2 |
510 | ceph osd tier add-cache datapool cachepool 1024000 | |
511 | ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true | |
512 | check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'" | |
513 | ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true | |
514 | check_response "EBUSY: pool 'datapool' has tiers cachepool" | |
515 | ceph osd tier remove-overlay datapool | |
516 | ceph osd tier remove datapool cachepool | |
517 | ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it | |
518 | ceph osd pool delete datapool datapool --yes-i-really-really-mean-it | |
31f18b77 | 519 | } |
7c673cae | 520 | |
31f18b77 FG |
521 | function test_tiering_8() |
522 | { | |
7c673cae FG |
523 | ## check health check |
524 | ceph osd set notieragent | |
525 | ceph osd pool create datapool 2 | |
c07f9fc5 | 526 | ceph osd pool application enable datapool rados |
7c673cae FG |
527 | ceph osd pool create cache4 2 |
528 | ceph osd tier add-cache datapool cache4 1024000 | |
529 | ceph osd tier cache-mode cache4 writeback | |
530 | tmpfile=$(mktemp|grep tmp) | |
531 | dd if=/dev/zero of=$tmpfile bs=4K count=1 | |
532 | ceph osd pool set cache4 target_max_objects 200 | |
533 | ceph osd pool set cache4 target_max_bytes 1000000 | |
534 | rados -p cache4 put foo1 $tmpfile | |
535 | rados -p cache4 put foo2 $tmpfile | |
536 | rm -f $tmpfile | |
31f18b77 | 537 | flush_pg_stats |
7c673cae FG |
538 | ceph df | grep datapool | grep ' 2 ' |
539 | ceph osd tier remove-overlay datapool | |
540 | ceph osd tier remove datapool cache4 | |
541 | ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it | |
542 | ceph osd pool delete datapool datapool --yes-i-really-really-mean-it | |
543 | ceph osd unset notieragent | |
31f18b77 | 544 | } |
7c673cae | 545 | |
31f18b77 FG |
546 | function test_tiering_9() |
547 | { | |
7c673cae FG |
548 | # make sure 'tier remove' behaves as we expect |
549 | # i.e., removing a tier from a pool that's not its base pool only | |
550 | # results in a 'pool foo is now (or already was) not a tier of bar' | |
551 | # | |
552 | ceph osd pool create basepoolA 2 | |
c07f9fc5 | 553 | ceph osd pool application enable basepoolA rados |
7c673cae | 554 | ceph osd pool create basepoolB 2 |
c07f9fc5 | 555 | ceph osd pool application enable basepoolB rados |
7c673cae FG |
556 | poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}') |
557 | poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}') | |
558 | ||
559 | ceph osd pool create cache5 2 | |
560 | ceph osd pool create cache6 2 | |
561 | ceph osd tier add basepoolA cache5 | |
562 | ceph osd tier add basepoolB cache6 | |
563 | ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of' | |
564 | ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id" | |
565 | ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of' | |
566 | ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id" | |
567 | ||
568 | ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of' | |
569 | ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1 | |
570 | ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of' | |
571 | ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1 | |
572 | ||
573 | ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1 | |
574 | ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1 | |
575 | ||
576 | ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it | |
577 | ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it | |
578 | ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it | |
579 | ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it | |
580 | } | |
581 | ||
582 | function test_auth() | |
583 | { | |
a8e16298 TL |
584 | expect_false ceph auth add client.xx mon 'invalid' osd "allow *" |
585 | expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *" | |
586 | ceph auth add client.xx mon 'allow *' osd "allow *" | |
7c673cae FG |
587 | ceph auth export client.xx >client.xx.keyring |
588 | ceph auth add client.xx -i client.xx.keyring | |
589 | rm -f client.xx.keyring | |
590 | ceph auth list | grep client.xx | |
c07f9fc5 | 591 | ceph auth ls | grep client.xx |
7c673cae FG |
592 | ceph auth get client.xx | grep caps | grep mon |
593 | ceph auth get client.xx | grep caps | grep osd | |
594 | ceph auth get-key client.xx | |
595 | ceph auth print-key client.xx | |
596 | ceph auth print_key client.xx | |
597 | ceph auth caps client.xx osd "allow rw" | |
598 | expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon" | |
599 | ceph auth get client.xx | grep osd | grep "allow rw" | |
9f95a23c | 600 | ceph auth caps client.xx mon 'allow command "osd tree"' |
7c673cae FG |
601 | ceph auth export | grep client.xx |
602 | ceph auth export -o authfile | |
9f95a23c TL |
603 | ceph auth import -i authfile 2>$TMPFILE |
604 | check_response "imported keyring" | |
605 | ||
7c673cae FG |
606 | ceph auth export -o authfile2 |
607 | diff authfile authfile2 | |
608 | rm authfile authfile2 | |
609 | ceph auth del client.xx | |
610 | expect_false ceph auth get client.xx | |
611 | ||
612 | # (almost) interactive mode | |
a8e16298 | 613 | echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph |
7c673cae FG |
614 | ceph auth get client.xx |
615 | # script mode | |
616 | echo 'auth del client.xx' | ceph | |
617 | expect_false ceph auth get client.xx | |
7c673cae FG |
618 | } |
619 | ||
620 | function test_auth_profiles() | |
621 | { | |
622 | ceph auth add client.xx-profile-ro mon 'allow profile read-only' \ | |
623 | mgr 'allow profile read-only' | |
624 | ceph auth add client.xx-profile-rw mon 'allow profile read-write' \ | |
625 | mgr 'allow profile read-write' | |
626 | ceph auth add client.xx-profile-rd mon 'allow profile role-definer' | |
627 | ||
628 | ceph auth export > client.xx.keyring | |
629 | ||
630 | # read-only is allowed all read-only commands (auth excluded) | |
631 | ceph -n client.xx-profile-ro -k client.xx.keyring status | |
632 | ceph -n client.xx-profile-ro -k client.xx.keyring osd dump | |
633 | ceph -n client.xx-profile-ro -k client.xx.keyring pg dump | |
634 | ceph -n client.xx-profile-ro -k client.xx.keyring mon dump | |
7c673cae FG |
635 | # read-only gets access denied for rw commands or auth commands |
636 | ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true | |
637 | check_response "EACCES: access denied" | |
638 | ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true | |
639 | check_response "EACCES: access denied" | |
c07f9fc5 | 640 | ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true |
7c673cae FG |
641 | check_response "EACCES: access denied" |
642 | ||
643 | # read-write is allowed for all read-write commands (except auth) | |
644 | ceph -n client.xx-profile-rw -k client.xx.keyring status | |
645 | ceph -n client.xx-profile-rw -k client.xx.keyring osd dump | |
646 | ceph -n client.xx-profile-rw -k client.xx.keyring pg dump | |
647 | ceph -n client.xx-profile-rw -k client.xx.keyring mon dump | |
11fdf7f2 | 648 | ceph -n client.xx-profile-rw -k client.xx.keyring fs dump |
7c673cae FG |
649 | ceph -n client.xx-profile-rw -k client.xx.keyring log foo |
650 | ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout | |
651 | ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout | |
652 | # read-write gets access denied for auth commands | |
c07f9fc5 | 653 | ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true |
7c673cae FG |
654 | check_response "EACCES: access denied" |
655 | ||
656 | # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands | |
c07f9fc5 | 657 | ceph -n client.xx-profile-rd -k client.xx.keyring auth ls |
7c673cae FG |
658 | ceph -n client.xx-profile-rd -k client.xx.keyring auth export |
659 | ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo | |
660 | ceph -n client.xx-profile-rd -k client.xx.keyring status | |
661 | ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true | |
662 | check_response "EACCES: access denied" | |
663 | ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true | |
664 | check_response "EACCES: access denied" | |
665 | # read-only 'mon' subsystem commands are allowed | |
666 | ceph -n client.xx-profile-rd -k client.xx.keyring mon dump | |
667 | # but read-write 'mon' commands are not | |
668 | ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true | |
669 | check_response "EACCES: access denied" | |
11fdf7f2 | 670 | ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true |
7c673cae FG |
671 | check_response "EACCES: access denied" |
672 | ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true | |
673 | check_response "EACCES: access denied" | |
674 | ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true | |
675 | check_response "EACCES: access denied" | |
676 | ||
677 | ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro | |
678 | ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw | |
679 | ||
680 | # add a new role-definer with the existing role-definer | |
681 | ceph -n client.xx-profile-rd -k client.xx.keyring \ | |
682 | auth add client.xx-profile-rd2 mon 'allow profile role-definer' | |
683 | ceph -n client.xx-profile-rd -k client.xx.keyring \ | |
684 | auth export > client.xx.keyring.2 | |
685 | # remove old role-definer using the new role-definer | |
686 | ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \ | |
687 | auth del client.xx-profile-rd | |
688 | # remove the remaining role-definer with admin | |
689 | ceph auth del client.xx-profile-rd2 | |
690 | rm -f client.xx.keyring client.xx.keyring.2 | |
691 | } | |
692 | ||
693 | function test_mon_caps() | |
694 | { | |
695 | ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring | |
696 | chmod +r $TEMP_DIR/ceph.client.bug.keyring | |
697 | ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key | |
698 | ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring | |
699 | ||
11fdf7f2 TL |
700 | # pass --no-mon-config since we are looking for the permission denied error |
701 | rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true | |
702 | cat $TMPFILE | |
7c673cae FG |
703 | check_response "Permission denied" |
704 | ||
705 | rm -rf $TEMP_DIR/ceph.client.bug.keyring | |
706 | ceph auth del client.bug | |
707 | ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring | |
708 | chmod +r $TEMP_DIR/ceph.client.bug.keyring | |
709 | ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key | |
710 | ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring | |
711 | ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring | |
11fdf7f2 | 712 | rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true |
7c673cae FG |
713 | check_response "Permission denied" |
714 | } | |
715 | ||
716 | function test_mon_misc() | |
717 | { | |
718 | # with and without verbosity | |
719 | ceph osd dump | grep '^epoch' | |
720 | ceph --concise osd dump | grep '^epoch' | |
721 | ||
31f18b77 FG |
722 | ceph osd df | grep 'MIN/MAX VAR' |
723 | ||
7c673cae FG |
724 | # df |
725 | ceph df > $TMPFILE | |
11fdf7f2 | 726 | grep RAW $TMPFILE |
7c673cae FG |
727 | grep -v DIRTY $TMPFILE |
728 | ceph df detail > $TMPFILE | |
729 | grep DIRTY $TMPFILE | |
730 | ceph df --format json > $TMPFILE | |
731 | grep 'total_bytes' $TMPFILE | |
732 | grep -v 'dirty' $TMPFILE | |
733 | ceph df detail --format json > $TMPFILE | |
734 | grep 'rd_bytes' $TMPFILE | |
735 | grep 'dirty' $TMPFILE | |
736 | ceph df --format xml | grep '<total_bytes>' | |
737 | ceph df detail --format xml | grep '<rd_bytes>' | |
738 | ||
739 | ceph fsid | |
740 | ceph health | |
741 | ceph health detail | |
742 | ceph health --format json-pretty | |
743 | ceph health detail --format xml-pretty | |
744 | ||
224ce89b WB |
745 | ceph time-sync-status |
746 | ||
7c673cae | 747 | ceph node ls |
11fdf7f2 | 748 | for t in mon osd mds mgr ; do |
7c673cae FG |
749 | ceph node ls $t |
750 | done | |
751 | ||
752 | ceph_watch_start | |
753 | mymsg="this is a test log message $$.$(date)" | |
754 | ceph log "$mymsg" | |
31f18b77 FG |
755 | ceph log last | grep "$mymsg" |
756 | ceph log last 100 | grep "$mymsg" | |
7c673cae FG |
757 | ceph_watch_wait "$mymsg" |
758 | ||
31f18b77 | 759 | ceph mgr dump |
224ce89b WB |
760 | ceph mgr module ls |
761 | ceph mgr module enable restful | |
762 | expect_false ceph mgr module enable foodne | |
763 | ceph mgr module enable foodne --force | |
764 | ceph mgr module disable foodne | |
765 | ceph mgr module disable foodnebizbangbash | |
31f18b77 | 766 | |
7c673cae FG |
767 | ceph mon metadata a |
768 | ceph mon metadata | |
31f18b77 FG |
769 | ceph mon count-metadata ceph_version |
770 | ceph mon versions | |
771 | ||
c07f9fc5 FG |
772 | ceph mgr metadata |
773 | ceph mgr versions | |
774 | ceph mgr count-metadata ceph_version | |
775 | ||
776 | ceph versions | |
777 | ||
7c673cae FG |
778 | ceph node ls |
779 | } | |
780 | ||
781 | function check_mds_active() | |
782 | { | |
783 | fs_name=$1 | |
784 | ceph fs get $fs_name | grep active | |
785 | } | |
786 | ||
787 | function wait_mds_active() | |
788 | { | |
789 | fs_name=$1 | |
790 | max_run=300 | |
791 | for i in $(seq 1 $max_run) ; do | |
792 | if ! check_mds_active $fs_name ; then | |
793 | echo "waiting for an active MDS daemon ($i/$max_run)" | |
794 | sleep 5 | |
795 | else | |
796 | break | |
797 | fi | |
798 | done | |
799 | check_mds_active $fs_name | |
800 | } | |
801 | ||
802 | function get_mds_gids() | |
803 | { | |
804 | fs_name=$1 | |
9f95a23c | 805 | ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))" |
7c673cae FG |
806 | } |
807 | ||
808 | function fail_all_mds() | |
809 | { | |
810 | fs_name=$1 | |
811 | ceph fs set $fs_name cluster_down true | |
812 | mds_gids=$(get_mds_gids $fs_name) | |
813 | for mds_gid in $mds_gids ; do | |
814 | ceph mds fail $mds_gid | |
815 | done | |
816 | if check_mds_active $fs_name ; then | |
817 | echo "An active MDS remains, something went wrong" | |
818 | ceph fs get $fs_name | |
819 | exit -1 | |
820 | fi | |
821 | ||
822 | } | |
823 | ||
824 | function remove_all_fs() | |
825 | { | |
9f95a23c | 826 | existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))") |
7c673cae FG |
827 | for fs_name in $existing_fs ; do |
828 | echo "Removing fs ${fs_name}..." | |
829 | fail_all_mds $fs_name | |
830 | echo "Removing existing filesystem '${fs_name}'..." | |
831 | ceph fs rm $fs_name --yes-i-really-mean-it | |
832 | echo "Removed '${fs_name}'." | |
833 | done | |
834 | } | |
835 | ||
836 | # So that tests requiring MDS can skip if one is not configured | |
837 | # in the cluster at all | |
838 | function mds_exists() | |
839 | { | |
c07f9fc5 | 840 | ceph auth ls | grep "^mds" |
7c673cae FG |
841 | } |
842 | ||
843 | # some of the commands are just not idempotent. | |
844 | function without_test_dup_command() | |
845 | { | |
846 | if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then | |
847 | $@ | |
848 | else | |
849 | local saved=${CEPH_CLI_TEST_DUP_COMMAND} | |
850 | unset CEPH_CLI_TEST_DUP_COMMAND | |
851 | $@ | |
852 | CEPH_CLI_TEST_DUP_COMMAND=saved | |
853 | fi | |
854 | } | |
855 | ||
856 | function test_mds_tell() | |
857 | { | |
31f18b77 | 858 | local FS_NAME=cephfs |
7c673cae FG |
859 | if ! mds_exists ; then |
860 | echo "Skipping test, no MDS found" | |
861 | return | |
862 | fi | |
863 | ||
864 | remove_all_fs | |
9f95a23c TL |
865 | ceph osd pool create fs_data 16 |
866 | ceph osd pool create fs_metadata 16 | |
7c673cae FG |
867 | ceph fs new $FS_NAME fs_metadata fs_data |
868 | wait_mds_active $FS_NAME | |
869 | ||
870 | # Test injectargs by GID | |
871 | old_mds_gids=$(get_mds_gids $FS_NAME) | |
872 | echo Old GIDs: $old_mds_gids | |
873 | ||
874 | for mds_gid in $old_mds_gids ; do | |
875 | ceph tell mds.$mds_gid injectargs "--debug-mds 20" | |
876 | done | |
877 | expect_false ceph tell mds.a injectargs mds_max_file_recover -1 | |
878 | ||
879 | # Test respawn by rank | |
880 | without_test_dup_command ceph tell mds.0 respawn | |
881 | new_mds_gids=$old_mds_gids | |
882 | while [ $new_mds_gids -eq $old_mds_gids ] ; do | |
883 | sleep 5 | |
884 | new_mds_gids=$(get_mds_gids $FS_NAME) | |
885 | done | |
886 | echo New GIDs: $new_mds_gids | |
887 | ||
888 | # Test respawn by ID | |
889 | without_test_dup_command ceph tell mds.a respawn | |
890 | new_mds_gids=$old_mds_gids | |
891 | while [ $new_mds_gids -eq $old_mds_gids ] ; do | |
892 | sleep 5 | |
893 | new_mds_gids=$(get_mds_gids $FS_NAME) | |
894 | done | |
895 | echo New GIDs: $new_mds_gids | |
896 | ||
897 | remove_all_fs | |
898 | ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it | |
899 | ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it | |
900 | } | |
901 | ||
902 | function test_mon_mds() | |
903 | { | |
31f18b77 | 904 | local FS_NAME=cephfs |
7c673cae FG |
905 | remove_all_fs |
906 | ||
9f95a23c TL |
907 | ceph osd pool create fs_data 16 |
908 | ceph osd pool create fs_metadata 16 | |
7c673cae FG |
909 | ceph fs new $FS_NAME fs_metadata fs_data |
910 | ||
911 | ceph fs set $FS_NAME cluster_down true | |
912 | ceph fs set $FS_NAME cluster_down false | |
913 | ||
7c673cae FG |
914 | ceph mds compat rm_incompat 4 |
915 | ceph mds compat rm_incompat 4 | |
916 | ||
917 | # We don't want any MDSs to be up, their activity can interfere with | |
918 | # the "current_epoch + 1" checking below if they're generating updates | |
919 | fail_all_mds $FS_NAME | |
920 | ||
921 | ceph mds compat show | |
7c673cae FG |
922 | ceph fs dump |
923 | ceph fs get $FS_NAME | |
924 | for mds_gid in $(get_mds_gids $FS_NAME) ; do | |
925 | ceph mds metadata $mds_id | |
926 | done | |
927 | ceph mds metadata | |
31f18b77 FG |
928 | ceph mds versions |
929 | ceph mds count-metadata os | |
7c673cae FG |
930 | |
931 | # XXX mds fail, but how do you undo it? | |
932 | mdsmapfile=$TEMP_DIR/mdsmap.$$ | |
11fdf7f2 | 933 | current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //') |
7c673cae FG |
934 | [ -s $mdsmapfile ] |
935 | rm $mdsmapfile | |
936 | ||
9f95a23c TL |
937 | ceph osd pool create data2 16 |
938 | ceph osd pool create data3 16 | |
7c673cae FG |
939 | data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}') |
940 | data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}') | |
11fdf7f2 TL |
941 | ceph fs add_data_pool cephfs $data2_pool |
942 | ceph fs add_data_pool cephfs $data3_pool | |
943 | ceph fs add_data_pool cephfs 100 >& $TMPFILE || true | |
7c673cae | 944 | check_response "Error ENOENT" |
11fdf7f2 | 945 | ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true |
7c673cae | 946 | check_response "Error ENOENT" |
11fdf7f2 TL |
947 | ceph fs rm_data_pool cephfs $data2_pool |
948 | ceph fs rm_data_pool cephfs $data3_pool | |
7c673cae FG |
949 | ceph osd pool delete data2 data2 --yes-i-really-really-mean-it |
950 | ceph osd pool delete data3 data3 --yes-i-really-really-mean-it | |
11fdf7f2 TL |
951 | ceph fs set cephfs max_mds 4 |
952 | ceph fs set cephfs max_mds 3 | |
953 | ceph fs set cephfs max_mds 256 | |
954 | expect_false ceph fs set cephfs max_mds 257 | |
955 | ceph fs set cephfs max_mds 4 | |
956 | ceph fs set cephfs max_mds 256 | |
957 | expect_false ceph fs set cephfs max_mds 257 | |
958 | expect_false ceph fs set cephfs max_mds asdf | |
959 | expect_false ceph fs set cephfs inline_data true | |
9f95a23c TL |
960 | ceph fs set cephfs inline_data true --yes-i-really-really-mean-it |
961 | ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it | |
962 | ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it | |
963 | expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it | |
11fdf7f2 TL |
964 | ceph fs set cephfs inline_data false |
965 | ceph fs set cephfs inline_data no | |
966 | ceph fs set cephfs inline_data 0 | |
967 | expect_false ceph fs set cephfs inline_data asdf | |
968 | ceph fs set cephfs max_file_size 1048576 | |
969 | expect_false ceph fs set cephfs max_file_size 123asdf | |
970 | ||
971 | expect_false ceph fs set cephfs allow_new_snaps | |
972 | ceph fs set cephfs allow_new_snaps true | |
973 | ceph fs set cephfs allow_new_snaps 0 | |
974 | ceph fs set cephfs allow_new_snaps false | |
975 | ceph fs set cephfs allow_new_snaps no | |
976 | expect_false ceph fs set cephfs allow_new_snaps taco | |
7c673cae FG |
977 | |
978 | # we should never be able to add EC pools as data or metadata pools | |
979 | # create an ec-pool... | |
9f95a23c | 980 | ceph osd pool create mds-ec-pool 16 16 erasure |
7c673cae | 981 | set +e |
11fdf7f2 | 982 | ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE |
7c673cae FG |
983 | check_response 'erasure-code' $? 22 |
984 | set -e | |
985 | ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}') | |
986 | data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}') | |
987 | metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}') | |
988 | ||
989 | fail_all_mds $FS_NAME | |
990 | ||
991 | set +e | |
992 | # Check that rmfailed requires confirmation | |
993 | expect_false ceph mds rmfailed 0 | |
994 | ceph mds rmfailed 0 --yes-i-really-mean-it | |
995 | set -e | |
996 | ||
11fdf7f2 TL |
997 | # Check that `fs new` is no longer permitted |
998 | expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE | |
7c673cae FG |
999 | |
1000 | # Check that 'fs reset' runs | |
1001 | ceph fs reset $FS_NAME --yes-i-really-mean-it | |
1002 | ||
1003 | # Check that creating a second FS fails by default | |
9f95a23c TL |
1004 | ceph osd pool create fs_metadata2 16 |
1005 | ceph osd pool create fs_data2 16 | |
7c673cae FG |
1006 | set +e |
1007 | expect_false ceph fs new cephfs2 fs_metadata2 fs_data2 | |
1008 | set -e | |
1009 | ||
1010 | # Check that setting enable_multiple enables creation of second fs | |
1011 | ceph fs flag set enable_multiple true --yes-i-really-mean-it | |
1012 | ceph fs new cephfs2 fs_metadata2 fs_data2 | |
1013 | ||
1014 | # Clean up multi-fs stuff | |
1015 | fail_all_mds cephfs2 | |
1016 | ceph fs rm cephfs2 --yes-i-really-mean-it | |
1017 | ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it | |
1018 | ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it | |
1019 | ||
1020 | fail_all_mds $FS_NAME | |
1021 | ||
1022 | # Clean up to enable subsequent fs new tests | |
1023 | ceph fs rm $FS_NAME --yes-i-really-mean-it | |
1024 | ||
1025 | set +e | |
1026 | ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE | |
1027 | check_response 'erasure-code' $? 22 | |
1028 | ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE | |
1029 | check_response 'erasure-code' $? 22 | |
1030 | ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE | |
1031 | check_response 'erasure-code' $? 22 | |
1032 | set -e | |
1033 | ||
1034 | # ... new create a cache tier in front of the EC pool... | |
1035 | ceph osd pool create mds-tier 2 | |
1036 | ceph osd tier add mds-ec-pool mds-tier | |
1037 | ceph osd tier set-overlay mds-ec-pool mds-tier | |
1038 | tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}') | |
1039 | ||
1040 | # Use of a readonly tier should be forbidden | |
1041 | ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it | |
1042 | set +e | |
1043 | ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE | |
1044 | check_response 'has a write tier (mds-tier) that is configured to forward' $? 22 | |
1045 | set -e | |
1046 | ||
1047 | # Use of a writeback tier should enable FS creation | |
1048 | ceph osd tier cache-mode mds-tier writeback | |
1049 | ceph fs new $FS_NAME fs_metadata mds-ec-pool --force | |
1050 | ||
1051 | # While a FS exists using the tiered pools, I should not be allowed | |
1052 | # to remove the tier | |
1053 | set +e | |
1054 | ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE | |
1055 | check_response 'in use by CephFS' $? 16 | |
1056 | ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE | |
1057 | check_response 'in use by CephFS' $? 16 | |
1058 | set -e | |
1059 | ||
1060 | fail_all_mds $FS_NAME | |
1061 | ceph fs rm $FS_NAME --yes-i-really-mean-it | |
1062 | ||
1063 | # ... but we should be forbidden from using the cache pool in the FS directly. | |
1064 | set +e | |
1065 | ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE | |
1066 | check_response 'in use as a cache tier' $? 22 | |
1067 | ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE | |
1068 | check_response 'in use as a cache tier' $? 22 | |
1069 | ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE | |
1070 | check_response 'in use as a cache tier' $? 22 | |
1071 | set -e | |
1072 | ||
1073 | # Clean up tier + EC pools | |
1074 | ceph osd tier remove-overlay mds-ec-pool | |
1075 | ceph osd tier remove mds-ec-pool mds-tier | |
1076 | ||
1077 | # Create a FS using the 'cache' pool now that it's no longer a tier | |
1078 | ceph fs new $FS_NAME fs_metadata mds-tier --force | |
1079 | ||
1080 | # We should be forbidden from using this pool as a tier now that | |
1081 | # it's in use for CephFS | |
1082 | set +e | |
1083 | ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE | |
1084 | check_response 'in use by CephFS' $? 16 | |
1085 | set -e | |
1086 | ||
1087 | fail_all_mds $FS_NAME | |
1088 | ceph fs rm $FS_NAME --yes-i-really-mean-it | |
1089 | ||
1090 | # We should be permitted to use an EC pool with overwrites enabled | |
1091 | # as the data pool... | |
1092 | ceph osd pool set mds-ec-pool allow_ec_overwrites true | |
1093 | ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE | |
1094 | fail_all_mds $FS_NAME | |
1095 | ceph fs rm $FS_NAME --yes-i-really-mean-it | |
1096 | ||
1097 | # ...but not as the metadata pool | |
1098 | set +e | |
1099 | ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE | |
1100 | check_response 'erasure-code' $? 22 | |
1101 | set -e | |
1102 | ||
1103 | ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it | |
1104 | ||
1105 | # Create a FS and check that we can subsequently add a cache tier to it | |
1106 | ceph fs new $FS_NAME fs_metadata fs_data --force | |
1107 | ||
1108 | # Adding overlay to FS pool should be permitted, RADOS clients handle this. | |
1109 | ceph osd tier add fs_metadata mds-tier | |
1110 | ceph osd tier cache-mode mds-tier writeback | |
1111 | ceph osd tier set-overlay fs_metadata mds-tier | |
1112 | ||
1113 | # Removing tier should be permitted because the underlying pool is | |
1114 | # replicated (#11504 case) | |
e306af50 | 1115 | ceph osd tier cache-mode mds-tier readproxy |
7c673cae FG |
1116 | ceph osd tier remove-overlay fs_metadata |
1117 | ceph osd tier remove fs_metadata mds-tier | |
1118 | ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it | |
1119 | ||
1120 | # Clean up FS | |
1121 | fail_all_mds $FS_NAME | |
1122 | ceph fs rm $FS_NAME --yes-i-really-mean-it | |
1123 | ||
1124 | ||
1125 | ||
1126 | ceph mds stat | |
1127 | # ceph mds tell mds.a getmap | |
1128 | # ceph mds rm | |
1129 | # ceph mds rmfailed | |
1130 | # ceph mds set_state | |
7c673cae FG |
1131 | |
1132 | ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it | |
1133 | ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it | |
1134 | } | |
1135 | ||
1136 | function test_mon_mds_metadata() | |
1137 | { | |
1138 | local nmons=$(ceph tell 'mon.*' version | grep -c 'version') | |
1139 | test "$nmons" -gt 0 | |
1140 | ||
11fdf7f2 | 1141 | ceph fs dump | |
7c673cae FG |
1142 | sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" | |
1143 | while read gid id rank; do | |
1144 | ceph mds metadata ${gid} | grep '"hostname":' | |
1145 | ceph mds metadata ${id} | grep '"hostname":' | |
1146 | ceph mds metadata ${rank} | grep '"hostname":' | |
1147 | ||
1148 | local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":') | |
1149 | test "$n" -eq "$nmons" | |
1150 | done | |
1151 | ||
1152 | expect_false ceph mds metadata UNKNOWN | |
1153 | } | |
1154 | ||
1155 | function test_mon_mon() | |
1156 | { | |
1157 | # print help message | |
1158 | ceph --help mon | |
1159 | # no mon add/remove | |
1160 | ceph mon dump | |
1161 | ceph mon getmap -o $TEMP_DIR/monmap.$$ | |
1162 | [ -s $TEMP_DIR/monmap.$$ ] | |
9f95a23c | 1163 | |
7c673cae | 1164 | # ceph mon tell |
9f95a23c TL |
1165 | first=$(ceph mon dump -f json | jq -r '.mons[0].name') |
1166 | ceph tell mon.$first mon_status | |
7c673cae FG |
1167 | |
1168 | # test mon features | |
224ce89b | 1169 | ceph mon feature ls |
7c673cae FG |
1170 | ceph mon feature set kraken --yes-i-really-mean-it |
1171 | expect_false ceph mon feature set abcd | |
1172 | expect_false ceph mon feature set abcd --yes-i-really-mean-it | |
f91f0fd5 TL |
1173 | |
1174 | # test mon stat | |
1175 | # don't check output, just ensure it does not fail. | |
1176 | ceph mon stat | |
1177 | ceph mon stat -f json | jq '.' | |
7c673cae FG |
1178 | } |
1179 | ||
9f95a23c TL |
1180 | function test_mon_priority_and_weight() |
1181 | { | |
1182 | for i in 0 1 65535; do | |
1183 | ceph mon set-weight a $i | |
1184 | w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight') | |
1185 | [[ "$w" == "$i" ]] | |
1186 | done | |
1187 | ||
1188 | for i in -1 65536; do | |
1189 | expect_false ceph mon set-weight a $i | |
1190 | done | |
1191 | } | |
1192 | ||
31f18b77 FG |
1193 | function gen_secrets_file() |
1194 | { | |
1195 | # lets assume we can have the following types | |
1196 | # all - generates both cephx and lockbox, with mock dm-crypt key | |
1197 | # cephx - only cephx | |
1198 | # no_cephx - lockbox and dm-crypt, no cephx | |
1199 | # no_lockbox - dm-crypt and cephx, no lockbox | |
1200 | # empty - empty file | |
1201 | # empty_json - correct json, empty map | |
1202 | # bad_json - bad json :) | |
1203 | # | |
1204 | local t=$1 | |
1205 | if [[ -z "$t" ]]; then | |
1206 | t="all" | |
1207 | fi | |
1208 | ||
1209 | fn=$(mktemp $TEMP_DIR/secret.XXXXXX) | |
1210 | echo $fn | |
1211 | if [[ "$t" == "empty" ]]; then | |
1212 | return 0 | |
1213 | fi | |
1214 | ||
1215 | echo "{" > $fn | |
1216 | if [[ "$t" == "bad_json" ]]; then | |
1217 | echo "asd: ; }" >> $fn | |
1218 | return 0 | |
1219 | elif [[ "$t" == "empty_json" ]]; then | |
1220 | echo "}" >> $fn | |
1221 | return 0 | |
1222 | fi | |
1223 | ||
1224 | cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\"" | |
1225 | lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\"" | |
1226 | dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\"" | |
1227 | ||
1228 | if [[ "$t" == "all" ]]; then | |
1229 | echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn | |
1230 | elif [[ "$t" == "cephx" ]]; then | |
1231 | echo "$cephx_secret" >> $fn | |
1232 | elif [[ "$t" == "no_cephx" ]]; then | |
1233 | echo "$lb_secret,$dmcrypt_key" >> $fn | |
1234 | elif [[ "$t" == "no_lockbox" ]]; then | |
1235 | echo "$cephx_secret,$dmcrypt_key" >> $fn | |
1236 | else | |
1237 | echo "unknown gen_secrets_file() type \'$fn\'" | |
1238 | return 1 | |
1239 | fi | |
1240 | echo "}" >> $fn | |
1241 | return 0 | |
1242 | } | |
1243 | ||
1244 | function test_mon_osd_create_destroy() | |
1245 | { | |
1246 | ceph osd new 2>&1 | grep 'EINVAL' | |
1247 | ceph osd new '' -1 2>&1 | grep 'EINVAL' | |
1248 | ceph osd new '' 10 2>&1 | grep 'EINVAL' | |
1249 | ||
1250 | old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//') | |
1251 | ||
1252 | old_osds=$(ceph osd ls) | |
1253 | num_osds=$(ceph osd ls | wc -l) | |
1254 | ||
1255 | uuid=$(uuidgen) | |
1256 | id=$(ceph osd new $uuid 2>/dev/null) | |
1257 | ||
1258 | for i in $old_osds; do | |
1259 | [[ "$i" != "$id" ]] | |
1260 | done | |
1261 | ||
1262 | ceph osd find $id | |
1263 | ||
1264 | id2=`ceph osd new $uuid 2>/dev/null` | |
1265 | ||
1266 | [[ $id2 == $id ]] | |
1267 | ||
1268 | ceph osd new $uuid $id | |
1269 | ||
1270 | id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//') | |
1271 | ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST | |
1272 | ||
1273 | uuid2=$(uuidgen) | |
1274 | id2=$(ceph osd new $uuid2) | |
1275 | ceph osd find $id2 | |
1276 | [[ "$id2" != "$id" ]] | |
1277 | ||
1278 | ceph osd new $uuid $id2 2>&1 | grep EEXIST | |
1279 | ceph osd new $uuid2 $id2 | |
1280 | ||
1281 | # test with secrets | |
1282 | empty_secrets=$(gen_secrets_file "empty") | |
1283 | empty_json=$(gen_secrets_file "empty_json") | |
1284 | all_secrets=$(gen_secrets_file "all") | |
1285 | cephx_only=$(gen_secrets_file "cephx") | |
1286 | no_cephx=$(gen_secrets_file "no_cephx") | |
1287 | no_lockbox=$(gen_secrets_file "no_lockbox") | |
1288 | bad_json=$(gen_secrets_file "bad_json") | |
1289 | ||
1290 | # empty secrets should be idempotent | |
1291 | new_id=$(ceph osd new $uuid $id -i $empty_secrets) | |
1292 | [[ "$new_id" == "$id" ]] | |
1293 | ||
1294 | # empty json, thus empty secrets | |
1295 | new_id=$(ceph osd new $uuid $id -i $empty_json) | |
1296 | [[ "$new_id" == "$id" ]] | |
1297 | ||
1298 | ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST' | |
1299 | ||
1300 | ceph osd rm $id | |
1301 | ceph osd rm $id2 | |
1302 | ceph osd setmaxosd $old_maxosd | |
1303 | ||
31f18b77 FG |
1304 | ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL' |
1305 | ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL' | |
1306 | ||
1307 | osds=$(ceph osd ls) | |
1308 | id=$(ceph osd new $uuid -i $all_secrets) | |
1309 | for i in $osds; do | |
1310 | [[ "$i" != "$id" ]] | |
1311 | done | |
1312 | ||
1313 | ceph osd find $id | |
1314 | ||
1315 | # validate secrets and dm-crypt are set | |
1316 | k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key') | |
1317 | s=$(cat $all_secrets | jq '.cephx_secret') | |
1318 | [[ $k == $s ]] | |
1319 | k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \ | |
1320 | jq '.key') | |
1321 | s=$(cat $all_secrets | jq '.cephx_lockbox_secret') | |
1322 | [[ $k == $s ]] | |
1323 | ceph config-key exists dm-crypt/osd/$uuid/luks | |
1324 | ||
1325 | osds=$(ceph osd ls) | |
1326 | id2=$(ceph osd new $uuid2 -i $cephx_only) | |
1327 | for i in $osds; do | |
1328 | [[ "$i" != "$id2" ]] | |
1329 | done | |
1330 | ||
1331 | ceph osd find $id2 | |
1332 | k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key') | |
1333 | s=$(cat $all_secrets | jq '.cephx_secret') | |
1334 | [[ $k == $s ]] | |
1335 | expect_false ceph auth get-key client.osd-lockbox.$uuid2 | |
1336 | expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks | |
1337 | ||
1338 | ceph osd destroy osd.$id2 --yes-i-really-mean-it | |
1339 | ceph osd destroy $id2 --yes-i-really-mean-it | |
1340 | ceph osd find $id2 | |
1341 | expect_false ceph auth get-key osd.$id2 | |
1342 | ceph osd dump | grep osd.$id2 | grep destroyed | |
1343 | ||
1344 | id3=$id2 | |
1345 | uuid3=$(uuidgen) | |
1346 | ceph osd new $uuid3 $id3 -i $all_secrets | |
1347 | ceph osd dump | grep osd.$id3 | expect_false grep destroyed | |
1348 | ceph auth get-key client.osd-lockbox.$uuid3 | |
1349 | ceph auth get-key osd.$id3 | |
1350 | ceph config-key exists dm-crypt/osd/$uuid3/luks | |
1351 | ||
11fdf7f2 | 1352 | ceph osd purge-new osd.$id3 --yes-i-really-mean-it |
31f18b77 FG |
1353 | expect_false ceph osd find $id2 |
1354 | expect_false ceph auth get-key osd.$id2 | |
1355 | expect_false ceph auth get-key client.osd-lockbox.$uuid3 | |
1356 | expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks | |
1357 | ceph osd purge osd.$id3 --yes-i-really-mean-it | |
11fdf7f2 | 1358 | ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent |
31f18b77 FG |
1359 | |
1360 | ceph osd purge osd.$id --yes-i-really-mean-it | |
d2e6a577 | 1361 | ceph osd purge 123456 --yes-i-really-mean-it |
31f18b77 FG |
1362 | expect_false ceph osd find $id |
1363 | expect_false ceph auth get-key osd.$id | |
1364 | expect_false ceph auth get-key client.osd-lockbox.$uuid | |
1365 | expect_false ceph config-key exists dm-crypt/osd/$uuid/luks | |
1366 | ||
1367 | rm $empty_secrets $empty_json $all_secrets $cephx_only \ | |
1368 | $no_cephx $no_lockbox $bad_json | |
1369 | ||
1370 | for i in $(ceph osd ls); do | |
1371 | [[ "$i" != "$id" ]] | |
1372 | [[ "$i" != "$id2" ]] | |
1373 | [[ "$i" != "$id3" ]] | |
1374 | done | |
1375 | ||
1376 | [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]] | |
1377 | ceph osd setmaxosd $old_maxosd | |
1378 | ||
1379 | } | |
1380 | ||
c07f9fc5 FG |
1381 | function test_mon_config_key() |
1382 | { | |
1383 | key=asdfasdfqwerqwreasdfuniquesa123df | |
1384 | ceph config-key list | grep -c $key | grep 0 | |
1385 | ceph config-key get $key | grep -c bar | grep 0 | |
1386 | ceph config-key set $key bar | |
1387 | ceph config-key get $key | grep bar | |
1388 | ceph config-key list | grep -c $key | grep 1 | |
1389 | ceph config-key dump | grep $key | grep bar | |
1390 | ceph config-key rm $key | |
1391 | expect_false ceph config-key get $key | |
1392 | ceph config-key list | grep -c $key | grep 0 | |
1393 | ceph config-key dump | grep -c $key | grep 0 | |
1394 | } | |
1395 | ||
7c673cae FG |
1396 | function test_mon_osd() |
1397 | { | |
1398 | # | |
1399 | # osd blacklist | |
1400 | # | |
1401 | bl=192.168.0.1:0/1000 | |
1402 | ceph osd blacklist add $bl | |
1403 | ceph osd blacklist ls | grep $bl | |
1404 | ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl | |
1405 | ceph osd dump --format=json-pretty | grep $bl | |
11fdf7f2 | 1406 | ceph osd dump | grep $bl |
7c673cae FG |
1407 | ceph osd blacklist rm $bl |
1408 | ceph osd blacklist ls | expect_false grep $bl | |
1409 | ||
1410 | bl=192.168.0.1 | |
1411 | # test without nonce, invalid nonce | |
1412 | ceph osd blacklist add $bl | |
1413 | ceph osd blacklist ls | grep $bl | |
1414 | ceph osd blacklist rm $bl | |
11fdf7f2 | 1415 | ceph osd blacklist ls | expect_false grep $bl |
7c673cae FG |
1416 | expect_false "ceph osd blacklist $bl/-1" |
1417 | expect_false "ceph osd blacklist $bl/foo" | |
1418 | ||
1419 | # test with wrong address | |
1420 | expect_false "ceph osd blacklist 1234.56.78.90/100" | |
1421 | ||
1422 | # Test `clear` | |
1423 | ceph osd blacklist add $bl | |
1424 | ceph osd blacklist ls | grep $bl | |
1425 | ceph osd blacklist clear | |
1426 | ceph osd blacklist ls | expect_false grep $bl | |
1427 | ||
1428 | # | |
1429 | # osd crush | |
1430 | # | |
1431 | ceph osd crush reweight-all | |
1432 | ceph osd crush tunables legacy | |
1433 | ceph osd crush show-tunables | grep argonaut | |
1434 | ceph osd crush tunables bobtail | |
1435 | ceph osd crush show-tunables | grep bobtail | |
1436 | ceph osd crush tunables firefly | |
1437 | ceph osd crush show-tunables | grep firefly | |
1438 | ||
1439 | ceph osd crush set-tunable straw_calc_version 0 | |
1440 | ceph osd crush get-tunable straw_calc_version | grep 0 | |
1441 | ceph osd crush set-tunable straw_calc_version 1 | |
1442 | ceph osd crush get-tunable straw_calc_version | grep 1 | |
1443 | ||
1444 | # | |
1445 | # require-min-compat-client | |
1446 | expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables | |
1447 | ceph osd set-require-min-compat-client luminous | |
11fdf7f2 | 1448 | ceph osd get-require-min-compat-client | grep luminous |
7c673cae FG |
1449 | ceph osd dump | grep 'require_min_compat_client luminous' |
1450 | ||
1451 | # | |
1452 | # osd scrub | |
1453 | # | |
11fdf7f2 TL |
1454 | |
1455 | # blocking | |
1456 | ceph osd scrub 0 --block | |
1457 | ceph osd deep-scrub 0 --block | |
1458 | ||
7c673cae FG |
1459 | # how do I tell when these are done? |
1460 | ceph osd scrub 0 | |
1461 | ceph osd deep-scrub 0 | |
1462 | ceph osd repair 0 | |
1463 | ||
11fdf7f2 TL |
1464 | # pool scrub, force-recovery/backfill |
1465 | pool_names=`rados lspools` | |
1466 | for pool_name in $pool_names | |
1467 | do | |
1468 | ceph osd pool scrub $pool_name | |
1469 | ceph osd pool deep-scrub $pool_name | |
1470 | ceph osd pool repair $pool_name | |
1471 | ceph osd pool force-recovery $pool_name | |
1472 | ceph osd pool cancel-force-recovery $pool_name | |
1473 | ceph osd pool force-backfill $pool_name | |
1474 | ceph osd pool cancel-force-backfill $pool_name | |
1475 | done | |
1476 | ||
1477 | for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \ | |
9f95a23c | 1478 | norebalance norecover notieragent |
7c673cae FG |
1479 | do |
1480 | ceph osd set $f | |
1481 | ceph osd unset $f | |
1482 | done | |
7c673cae FG |
1483 | expect_false ceph osd set bogus |
1484 | expect_false ceph osd unset bogus | |
11fdf7f2 TL |
1485 | for f in sortbitwise recover_deletes require_jewel_osds \ |
1486 | require_kraken_osds | |
1487 | do | |
1488 | expect_false ceph osd set $f | |
1489 | expect_false ceph osd unset $f | |
1490 | done | |
9f95a23c TL |
1491 | ceph osd require-osd-release octopus |
1492 | # can't lower | |
1493 | expect_false ceph osd require-osd-release nautilus | |
1494 | expect_false ceph osd require-osd-release mimic | |
1495 | expect_false ceph osd require-osd-release luminous | |
31f18b77 | 1496 | # these are no-ops but should succeed. |
7c673cae FG |
1497 | |
1498 | ceph osd set noup | |
1499 | ceph osd down 0 | |
1500 | ceph osd dump | grep 'osd.0 down' | |
1501 | ceph osd unset noup | |
1502 | max_run=1000 | |
1503 | for ((i=0; i < $max_run; i++)); do | |
1504 | if ! ceph osd dump | grep 'osd.0 up'; then | |
1505 | echo "waiting for osd.0 to come back up ($i/$max_run)" | |
1506 | sleep 1 | |
1507 | else | |
1508 | break | |
1509 | fi | |
1510 | done | |
1511 | ceph osd dump | grep 'osd.0 up' | |
1512 | ||
1513 | ceph osd dump | grep 'osd.0 up' | |
1514 | # ceph osd find expects the OsdName, so both ints and osd.n should work. | |
1515 | ceph osd find 1 | |
1516 | ceph osd find osd.1 | |
1517 | expect_false ceph osd find osd.xyz | |
1518 | expect_false ceph osd find xyz | |
1519 | expect_false ceph osd find 0.1 | |
1520 | ceph --format plain osd find 1 # falls back to json-pretty | |
1521 | if [ `uname` == Linux ]; then | |
1522 | ceph osd metadata 1 | grep 'distro' | |
1523 | ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty | |
1524 | fi | |
1525 | ceph osd out 0 | |
1526 | ceph osd dump | grep 'osd.0.*out' | |
1527 | ceph osd in 0 | |
1528 | ceph osd dump | grep 'osd.0.*in' | |
1529 | ceph osd find 0 | |
1530 | ||
9f95a23c TL |
1531 | ceph osd info 0 |
1532 | ceph osd info osd.0 | |
1533 | expect_false ceph osd info osd.xyz | |
1534 | expect_false ceph osd info xyz | |
1535 | expect_false ceph osd info 42 | |
1536 | expect_false ceph osd info osd.42 | |
1537 | ||
1538 | ceph osd info | |
1539 | info_json=$(ceph osd info --format=json | jq -cM '.') | |
1540 | dump_json=$(ceph osd dump --format=json | jq -cM '.osds') | |
1541 | [[ "${info_json}" == "${dump_json}" ]] | |
1542 | ||
1543 | info_json=$(ceph osd info 0 --format=json | jq -cM '.') | |
1544 | dump_json=$(ceph osd dump --format=json | \ | |
1545 | jq -cM '.osds[] | select(.osd == 0)') | |
1546 | [[ "${info_json}" == "${dump_json}" ]] | |
1547 | ||
1548 | info_plain="$(ceph osd info)" | |
1549 | dump_plain="$(ceph osd dump | grep '^osd')" | |
1550 | [[ "${info_plain}" == "${dump_plain}" ]] | |
1551 | ||
1552 | info_plain="$(ceph osd info 0)" | |
1553 | dump_plain="$(ceph osd dump | grep '^osd.0')" | |
1554 | [[ "${info_plain}" == "${dump_plain}" ]] | |
1555 | ||
31f18b77 | 1556 | ceph osd add-nodown 0 1 |
224ce89b | 1557 | ceph health detail | grep 'NODOWN' |
31f18b77 | 1558 | ceph osd rm-nodown 0 1 |
224ce89b | 1559 | ! ceph health detail | grep 'NODOWN' |
31f18b77 FG |
1560 | |
1561 | ceph osd out 0 # so we can mark it as noin later | |
1562 | ceph osd add-noin 0 | |
224ce89b | 1563 | ceph health detail | grep 'NOIN' |
31f18b77 | 1564 | ceph osd rm-noin 0 |
224ce89b | 1565 | ! ceph health detail | grep 'NOIN' |
31f18b77 FG |
1566 | ceph osd in 0 |
1567 | ||
1568 | ceph osd add-noout 0 | |
224ce89b | 1569 | ceph health detail | grep 'NOOUT' |
31f18b77 | 1570 | ceph osd rm-noout 0 |
224ce89b | 1571 | ! ceph health detail | grep 'NOOUT' |
31f18b77 FG |
1572 | |
1573 | # test osd id parse | |
1574 | expect_false ceph osd add-noup 797er | |
1575 | expect_false ceph osd add-nodown u9uwer | |
1576 | expect_false ceph osd add-noin 78~15 | |
31f18b77 FG |
1577 | |
1578 | expect_false ceph osd rm-noup 1234567 | |
1579 | expect_false ceph osd rm-nodown fsadf7 | |
31f18b77 FG |
1580 | expect_false ceph osd rm-noout 790-fd |
1581 | ||
1582 | ids=`ceph osd ls-tree default` | |
1583 | for osd in $ids | |
1584 | do | |
1585 | ceph osd add-nodown $osd | |
1586 | ceph osd add-noout $osd | |
1587 | done | |
224ce89b WB |
1588 | ceph -s | grep 'NODOWN' |
1589 | ceph -s | grep 'NOOUT' | |
31f18b77 FG |
1590 | ceph osd rm-nodown any |
1591 | ceph osd rm-noout all | |
224ce89b WB |
1592 | ! ceph -s | grep 'NODOWN' |
1593 | ! ceph -s | grep 'NOOUT' | |
31f18b77 | 1594 | |
81eedcae TL |
1595 | # test crush node flags |
1596 | ceph osd add-noup osd.0 | |
1597 | ceph osd add-nodown osd.0 | |
1598 | ceph osd add-noin osd.0 | |
1599 | ceph osd add-noout osd.0 | |
1600 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0" | |
1601 | ceph osd rm-noup osd.0 | |
1602 | ceph osd rm-nodown osd.0 | |
1603 | ceph osd rm-noin osd.0 | |
1604 | ceph osd rm-noout osd.0 | |
1605 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0" | |
1606 | ||
1607 | ceph osd crush add-bucket foo host root=default | |
1608 | ceph osd add-noup foo | |
1609 | ceph osd add-nodown foo | |
1610 | ceph osd add-noin foo | |
1611 | ceph osd add-noout foo | |
1612 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo | |
1613 | ceph osd rm-noup foo | |
1614 | ceph osd rm-nodown foo | |
1615 | ceph osd rm-noin foo | |
1616 | ceph osd rm-noout foo | |
1617 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo | |
1618 | ceph osd add-noup foo | |
1619 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo | |
1620 | ceph osd crush rm foo | |
1621 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo | |
1622 | ||
1623 | ceph osd set-group noup osd.0 | |
1624 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' | |
1625 | ceph osd set-group noup,nodown osd.0 | |
1626 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' | |
1627 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' | |
1628 | ceph osd set-group noup,nodown,noin osd.0 | |
1629 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' | |
1630 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' | |
1631 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' | |
1632 | ceph osd set-group noup,nodown,noin,noout osd.0 | |
1633 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' | |
1634 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' | |
1635 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' | |
1636 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' | |
1637 | ceph osd unset-group noup osd.0 | |
1638 | ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup' | |
1639 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' | |
1640 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' | |
1641 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' | |
1642 | ceph osd unset-group noup,nodown osd.0 | |
1643 | ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown' | |
1644 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' | |
1645 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' | |
1646 | ceph osd unset-group noup,nodown,noin osd.0 | |
1647 | ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin' | |
1648 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' | |
1649 | ceph osd unset-group noup,nodown,noin,noout osd.0 | |
1650 | ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout' | |
1651 | ||
1652 | ceph osd set-group noup,nodown,noin,noout osd.0 osd.1 | |
1653 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' | |
1654 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' | |
1655 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' | |
1656 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' | |
1657 | ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup' | |
1658 | ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown' | |
1659 | ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin' | |
1660 | ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout' | |
1661 | ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1 | |
1662 | ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout' | |
1663 | ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout' | |
1664 | ||
1665 | ceph osd set-group noup all | |
1666 | ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' | |
1667 | ceph osd unset-group noup all | |
1668 | ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup' | |
1669 | ||
1670 | # crush node flags | |
1671 | ceph osd crush add-bucket foo host root=default | |
1672 | ceph osd set-group noup foo | |
1673 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' | |
1674 | ceph osd set-group noup,nodown foo | |
1675 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' | |
1676 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' | |
1677 | ceph osd set-group noup,nodown,noin foo | |
1678 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' | |
1679 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' | |
1680 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' | |
1681 | ceph osd set-group noup,nodown,noin,noout foo | |
1682 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' | |
1683 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' | |
1684 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' | |
1685 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' | |
1686 | ||
1687 | ceph osd unset-group noup foo | |
1688 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup' | |
1689 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' | |
1690 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' | |
1691 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' | |
1692 | ceph osd unset-group noup,nodown foo | |
1693 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown' | |
1694 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' | |
1695 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' | |
1696 | ceph osd unset-group noup,nodown,noin foo | |
1697 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin' | |
1698 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' | |
1699 | ceph osd unset-group noup,nodown,noin,noout foo | |
1700 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout' | |
1701 | ||
1702 | ceph osd set-group noin,noout foo | |
1703 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' | |
1704 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' | |
1705 | ceph osd unset-group noin,noout foo | |
1706 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo' | |
1707 | ||
1708 | ceph osd set-group noup,nodown,noin,noout foo | |
1709 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' | |
1710 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' | |
1711 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' | |
1712 | ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' | |
1713 | ceph osd crush rm foo | |
1714 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo' | |
1715 | ||
1716 | # test device class flags | |
1717 | osd_0_device_class=$(ceph osd crush get-device-class osd.0) | |
1718 | ceph osd set-group noup $osd_0_device_class | |
1719 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup' | |
1720 | ceph osd set-group noup,nodown $osd_0_device_class | |
1721 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup' | |
1722 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown' | |
1723 | ceph osd set-group noup,nodown,noin $osd_0_device_class | |
1724 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup' | |
1725 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown' | |
1726 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' | |
1727 | ceph osd set-group noup,nodown,noin,noout $osd_0_device_class | |
1728 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup' | |
1729 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown' | |
1730 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' | |
1731 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' | |
1732 | ||
1733 | ceph osd unset-group noup $osd_0_device_class | |
1734 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup' | |
1735 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown' | |
1736 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' | |
1737 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' | |
1738 | ceph osd unset-group noup,nodown $osd_0_device_class | |
1739 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown' | |
1740 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' | |
1741 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' | |
1742 | ceph osd unset-group noup,nodown,noin $osd_0_device_class | |
1743 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin' | |
1744 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' | |
1745 | ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class | |
1746 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout' | |
1747 | ||
1748 | ceph osd set-group noin,noout $osd_0_device_class | |
1749 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' | |
1750 | ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' | |
1751 | ceph osd unset-group noin,noout $osd_0_device_class | |
1752 | ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class | |
1753 | ||
7c673cae FG |
1754 | # make sure mark out preserves weight |
1755 | ceph osd reweight osd.0 .5 | |
1756 | ceph osd dump | grep ^osd.0 | grep 'weight 0.5' | |
1757 | ceph osd out 0 | |
1758 | ceph osd in 0 | |
1759 | ceph osd dump | grep ^osd.0 | grep 'weight 0.5' | |
1760 | ||
7c673cae FG |
1761 | ceph osd getmap -o $f |
1762 | [ -s $f ] | |
1763 | rm $f | |
1764 | save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//') | |
1765 | [ "$save" -gt 0 ] | |
1766 | ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY' | |
1767 | ceph osd setmaxosd 10 | |
1768 | ceph osd getmaxosd | grep 'max_osd = 10' | |
1769 | ceph osd setmaxosd $save | |
1770 | ceph osd getmaxosd | grep "max_osd = $save" | |
1771 | ||
1772 | for id in `ceph osd ls` ; do | |
1773 | retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version | |
1774 | done | |
1775 | ||
1776 | ceph osd rm 0 2>&1 | grep 'EBUSY' | |
1777 | ||
1778 | local old_osds=$(echo $(ceph osd ls)) | |
1779 | id=`ceph osd create` | |
1780 | ceph osd find $id | |
1781 | ceph osd lost $id --yes-i-really-mean-it | |
1782 | expect_false ceph osd setmaxosd $id | |
1783 | local new_osds=$(echo $(ceph osd ls)) | |
1784 | for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do | |
1785 | ceph osd rm $id | |
1786 | done | |
1787 | ||
1788 | uuid=`uuidgen` | |
1789 | id=`ceph osd create $uuid` | |
1790 | id2=`ceph osd create $uuid` | |
1791 | [ "$id" = "$id2" ] | |
1792 | ceph osd rm $id | |
1793 | ||
1794 | ceph --help osd | |
1795 | ||
1796 | # reset max_osd. | |
1797 | ceph osd setmaxosd $id | |
1798 | ceph osd getmaxosd | grep "max_osd = $save" | |
1799 | local max_osd=$save | |
1800 | ||
1801 | ceph osd create $uuid 0 2>&1 | grep 'EINVAL' | |
1802 | ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL' | |
1803 | ||
1804 | id=`ceph osd create $uuid $max_osd` | |
1805 | [ "$id" = "$max_osd" ] | |
1806 | ceph osd find $id | |
1807 | max_osd=$((max_osd + 1)) | |
1808 | ceph osd getmaxosd | grep "max_osd = $max_osd" | |
1809 | ||
31f18b77 FG |
1810 | ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST' |
1811 | ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST' | |
7c673cae FG |
1812 | id2=`ceph osd create $uuid` |
1813 | [ "$id" = "$id2" ] | |
1814 | id2=`ceph osd create $uuid $id` | |
1815 | [ "$id" = "$id2" ] | |
1816 | ||
1817 | uuid=`uuidgen` | |
1818 | local gap_start=$max_osd | |
1819 | id=`ceph osd create $uuid $((gap_start + 100))` | |
1820 | [ "$id" = "$((gap_start + 100))" ] | |
1821 | max_osd=$((id + 1)) | |
1822 | ceph osd getmaxosd | grep "max_osd = $max_osd" | |
1823 | ||
31f18b77 | 1824 | ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST' |
7c673cae FG |
1825 | |
1826 | # | |
1827 | # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create | |
1828 | # is repeated and consumes two osd id, not just one. | |
1829 | # | |
3efd9988 | 1830 | local next_osd=$gap_start |
7c673cae FG |
1831 | id=`ceph osd create $(uuidgen)` |
1832 | [ "$id" = "$next_osd" ] | |
1833 | ||
1834 | next_osd=$((id + 1)) | |
1835 | id=`ceph osd create $(uuidgen) $next_osd` | |
1836 | [ "$id" = "$next_osd" ] | |
1837 | ||
1838 | local new_osds=$(echo $(ceph osd ls)) | |
1839 | for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do | |
1840 | [ $id -ge $save ] | |
1841 | ceph osd rm $id | |
1842 | done | |
1843 | ceph osd setmaxosd $save | |
1844 | ||
1845 | ceph osd ls | |
9f95a23c | 1846 | ceph osd pool create data 16 |
c07f9fc5 | 1847 | ceph osd pool application enable data rados |
7c673cae FG |
1848 | ceph osd lspools | grep data |
1849 | ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting' | |
1850 | ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting' | |
1851 | ceph osd pool delete data data --yes-i-really-really-mean-it | |
1852 | ||
1853 | ceph osd pause | |
1854 | ceph osd dump | grep 'flags.*pauserd,pausewr' | |
1855 | ceph osd unpause | |
1856 | ||
1857 | ceph osd tree | |
31f18b77 FG |
1858 | ceph osd tree up |
1859 | ceph osd tree down | |
1860 | ceph osd tree in | |
1861 | ceph osd tree out | |
c07f9fc5 | 1862 | ceph osd tree destroyed |
31f18b77 FG |
1863 | ceph osd tree up in |
1864 | ceph osd tree up out | |
1865 | ceph osd tree down in | |
1866 | ceph osd tree down out | |
1867 | ceph osd tree out down | |
1868 | expect_false ceph osd tree up down | |
c07f9fc5 FG |
1869 | expect_false ceph osd tree up destroyed |
1870 | expect_false ceph osd tree down destroyed | |
1871 | expect_false ceph osd tree up down destroyed | |
31f18b77 FG |
1872 | expect_false ceph osd tree in out |
1873 | expect_false ceph osd tree up foo | |
1874 | ||
1875 | ceph osd metadata | |
1876 | ceph osd count-metadata os | |
1877 | ceph osd versions | |
1878 | ||
7c673cae FG |
1879 | ceph osd perf |
1880 | ceph osd blocked-by | |
1881 | ||
11fdf7f2 | 1882 | ceph osd stat | grep up |
7c673cae FG |
1883 | } |
1884 | ||
31f18b77 FG |
1885 | function test_mon_crush() |
1886 | { | |
1887 | f=$TEMP_DIR/map.$$ | |
1888 | epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1) | |
1889 | [ -s $f ] | |
1890 | [ "$epoch" -gt 1 ] | |
1891 | nextepoch=$(( $epoch + 1 )) | |
1892 | echo epoch $epoch nextepoch $nextepoch | |
1893 | rm -f $f.epoch | |
1894 | expect_false ceph osd setcrushmap $nextepoch -i $f | |
1895 | gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1) | |
1896 | echo gotepoch $gotepoch | |
1897 | [ "$gotepoch" -eq "$nextepoch" ] | |
1898 | # should be idempotent | |
1899 | gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1) | |
1900 | echo epoch $gotepoch | |
1901 | [ "$gotepoch" -eq "$nextepoch" ] | |
1902 | rm $f | |
1903 | } | |
1904 | ||
7c673cae FG |
1905 | function test_mon_osd_pool() |
1906 | { | |
1907 | # | |
1908 | # osd pool | |
1909 | # | |
9f95a23c | 1910 | ceph osd pool create data 16 |
c07f9fc5 | 1911 | ceph osd pool application enable data rados |
7c673cae FG |
1912 | ceph osd pool mksnap data datasnap |
1913 | rados -p data lssnap | grep datasnap | |
1914 | ceph osd pool rmsnap data datasnap | |
1915 | expect_false ceph osd pool rmsnap pool_fake snapshot | |
1916 | ceph osd pool delete data data --yes-i-really-really-mean-it | |
1917 | ||
9f95a23c | 1918 | ceph osd pool create data2 16 |
c07f9fc5 | 1919 | ceph osd pool application enable data2 rados |
7c673cae FG |
1920 | ceph osd pool rename data2 data3 |
1921 | ceph osd lspools | grep data3 | |
1922 | ceph osd pool delete data3 data3 --yes-i-really-really-mean-it | |
1923 | ||
9f95a23c TL |
1924 | ceph osd pool create replicated 16 16 replicated |
1925 | ceph osd pool create replicated 1 16 replicated | |
1926 | ceph osd pool create replicated 16 16 # default is replicated | |
1927 | ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num | |
c07f9fc5 | 1928 | ceph osd pool application enable replicated rados |
7c673cae | 1929 | # should fail because the type is not the same |
9f95a23c | 1930 | expect_false ceph osd pool create replicated 16 16 erasure |
7c673cae FG |
1931 | ceph osd lspools | grep replicated |
1932 | ceph osd pool create ec_test 1 1 erasure | |
c07f9fc5 | 1933 | ceph osd pool application enable ec_test rados |
7c673cae | 1934 | set +e |
c07f9fc5 FG |
1935 | ceph osd count-metadata osd_objectstore | grep 'bluestore' |
1936 | if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail | |
7c673cae | 1937 | ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE |
c07f9fc5 | 1938 | check_response "pool must only be stored on bluestore for scrubbing to work" $? 22 |
7c673cae FG |
1939 | else |
1940 | ceph osd pool set ec_test allow_ec_overwrites true || return 1 | |
1941 | expect_false ceph osd pool set ec_test allow_ec_overwrites false | |
1942 | fi | |
1943 | set -e | |
1944 | ceph osd pool delete replicated replicated --yes-i-really-really-mean-it | |
1945 | ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it | |
11fdf7f2 TL |
1946 | |
1947 | # test create pool with rule | |
1948 | ceph osd erasure-code-profile set foo foo | |
1949 | ceph osd erasure-code-profile ls | grep foo | |
1950 | ceph osd crush rule create-erasure foo foo | |
9f95a23c | 1951 | ceph osd pool create erasure 16 16 erasure foo |
11fdf7f2 TL |
1952 | expect_false ceph osd erasure-code-profile rm foo |
1953 | ceph osd pool delete erasure erasure --yes-i-really-really-mean-it | |
1954 | ceph osd crush rule rm foo | |
1955 | ceph osd erasure-code-profile rm foo | |
1956 | ||
9f95a23c TL |
1957 | # autoscale mode |
1958 | ceph osd pool create modeon --autoscale-mode=on | |
1959 | ceph osd dump | grep modeon | grep 'autoscale_mode on' | |
1960 | ceph osd pool create modewarn --autoscale-mode=warn | |
1961 | ceph osd dump | grep modewarn | grep 'autoscale_mode warn' | |
1962 | ceph osd pool create modeoff --autoscale-mode=off | |
1963 | ceph osd dump | grep modeoff | grep 'autoscale_mode off' | |
1964 | ceph osd pool delete modeon modeon --yes-i-really-really-mean-it | |
1965 | ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it | |
1966 | ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it | |
7c673cae FG |
1967 | } |
1968 | ||
1969 | function test_mon_osd_pool_quota() | |
1970 | { | |
1971 | # | |
1972 | # test osd pool set/get quota | |
1973 | # | |
1974 | ||
1975 | # create tmp pool | |
9f95a23c | 1976 | ceph osd pool create tmp-quota-pool 32 |
c07f9fc5 | 1977 | ceph osd pool application enable tmp-quota-pool rados |
7c673cae FG |
1978 | # |
1979 | # set erroneous quotas | |
1980 | # | |
1981 | expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10 | |
1982 | expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1 | |
1983 | expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa | |
1984 | # | |
1985 | # set valid quotas | |
1986 | # | |
1987 | ceph osd pool set-quota tmp-quota-pool max_bytes 10 | |
1988 | ceph osd pool set-quota tmp-quota-pool max_objects 10M | |
1989 | # | |
7c673cae FG |
1990 | # get quotas in json-pretty format |
1991 | # | |
1992 | ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \ | |
1adf2230 | 1993 | grep '"quota_max_objects":.*10000000' |
7c673cae FG |
1994 | ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \ |
1995 | grep '"quota_max_bytes":.*10' | |
1996 | # | |
1adf2230 AA |
1997 | # get quotas |
1998 | # | |
11fdf7f2 TL |
1999 | ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B' |
2000 | ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects' | |
1adf2230 AA |
2001 | # |
2002 | # set valid quotas with unit prefix | |
2003 | # | |
2004 | ceph osd pool set-quota tmp-quota-pool max_bytes 10K | |
2005 | # | |
2006 | # get quotas | |
2007 | # | |
11fdf7f2 | 2008 | ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki' |
1adf2230 AA |
2009 | # |
2010 | # set valid quotas with unit prefix | |
2011 | # | |
2012 | ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki | |
2013 | # | |
2014 | # get quotas | |
2015 | # | |
11fdf7f2 | 2016 | ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki' |
1adf2230 AA |
2017 | # |
2018 | # | |
7c673cae FG |
2019 | # reset pool quotas |
2020 | # | |
2021 | ceph osd pool set-quota tmp-quota-pool max_bytes 0 | |
2022 | ceph osd pool set-quota tmp-quota-pool max_objects 0 | |
2023 | # | |
2024 | # test N/A quotas | |
2025 | # | |
2026 | ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A' | |
2027 | ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A' | |
2028 | # | |
2029 | # cleanup tmp pool | |
2030 | ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it | |
2031 | } | |
2032 | ||
2033 | function test_mon_pg() | |
2034 | { | |
2035 | # Make sure we start healthy. | |
2036 | wait_for_health_ok | |
2037 | ||
2038 | ceph pg debug unfound_objects_exist | |
2039 | ceph pg debug degraded_pgs_exist | |
224ce89b | 2040 | ceph pg deep-scrub 1.0 |
7c673cae FG |
2041 | ceph pg dump |
2042 | ceph pg dump pgs_brief --format=json | |
2043 | ceph pg dump pgs --format=json | |
2044 | ceph pg dump pools --format=json | |
2045 | ceph pg dump osds --format=json | |
2046 | ceph pg dump sum --format=json | |
2047 | ceph pg dump all --format=json | |
2048 | ceph pg dump pgs_brief osds --format=json | |
2049 | ceph pg dump pools osds pgs_brief --format=json | |
2050 | ceph pg dump_json | |
2051 | ceph pg dump_pools_json | |
2052 | ceph pg dump_stuck inactive | |
2053 | ceph pg dump_stuck unclean | |
2054 | ceph pg dump_stuck stale | |
2055 | ceph pg dump_stuck undersized | |
2056 | ceph pg dump_stuck degraded | |
2057 | ceph pg ls | |
224ce89b | 2058 | ceph pg ls 1 |
7c673cae FG |
2059 | ceph pg ls stale |
2060 | expect_false ceph pg ls scrubq | |
2061 | ceph pg ls active stale repair recovering | |
224ce89b WB |
2062 | ceph pg ls 1 active |
2063 | ceph pg ls 1 active stale | |
7c673cae | 2064 | ceph pg ls-by-primary osd.0 |
224ce89b | 2065 | ceph pg ls-by-primary osd.0 1 |
7c673cae FG |
2066 | ceph pg ls-by-primary osd.0 active |
2067 | ceph pg ls-by-primary osd.0 active stale | |
224ce89b | 2068 | ceph pg ls-by-primary osd.0 1 active stale |
7c673cae | 2069 | ceph pg ls-by-osd osd.0 |
224ce89b | 2070 | ceph pg ls-by-osd osd.0 1 |
7c673cae FG |
2071 | ceph pg ls-by-osd osd.0 active |
2072 | ceph pg ls-by-osd osd.0 active stale | |
224ce89b | 2073 | ceph pg ls-by-osd osd.0 1 active stale |
7c673cae FG |
2074 | ceph pg ls-by-pool rbd |
2075 | ceph pg ls-by-pool rbd active stale | |
2076 | # can't test this... | |
2077 | # ceph pg force_create_pg | |
2078 | ceph pg getmap -o $TEMP_DIR/map.$$ | |
2079 | [ -s $TEMP_DIR/map.$$ ] | |
224ce89b WB |
2080 | ceph pg map 1.0 | grep acting |
2081 | ceph pg repair 1.0 | |
2082 | ceph pg scrub 1.0 | |
7c673cae FG |
2083 | |
2084 | ceph osd set-full-ratio .962 | |
2085 | ceph osd dump | grep '^full_ratio 0.962' | |
2086 | ceph osd set-backfillfull-ratio .912 | |
2087 | ceph osd dump | grep '^backfillfull_ratio 0.912' | |
2088 | ceph osd set-nearfull-ratio .892 | |
2089 | ceph osd dump | grep '^nearfull_ratio 0.892' | |
2090 | ||
2091 | # Check health status | |
2092 | ceph osd set-nearfull-ratio .913 | |
224ce89b WB |
2093 | ceph health -f json | grep OSD_OUT_OF_ORDER_FULL |
2094 | ceph health detail | grep OSD_OUT_OF_ORDER_FULL | |
7c673cae FG |
2095 | ceph osd set-nearfull-ratio .892 |
2096 | ceph osd set-backfillfull-ratio .963 | |
224ce89b WB |
2097 | ceph health -f json | grep OSD_OUT_OF_ORDER_FULL |
2098 | ceph health detail | grep OSD_OUT_OF_ORDER_FULL | |
7c673cae FG |
2099 | ceph osd set-backfillfull-ratio .912 |
2100 | ||
2101 | # Check injected full results | |
9f95a23c | 2102 | $SUDO ceph tell osd.0 injectfull nearfull |
224ce89b WB |
2103 | wait_for_health "OSD_NEARFULL" |
2104 | ceph health detail | grep "osd.0 is near full" | |
9f95a23c | 2105 | $SUDO ceph tell osd.0 injectfull none |
224ce89b WB |
2106 | wait_for_health_ok |
2107 | ||
9f95a23c | 2108 | $SUDO ceph tell osd.1 injectfull backfillfull |
224ce89b WB |
2109 | wait_for_health "OSD_BACKFILLFULL" |
2110 | ceph health detail | grep "osd.1 is backfill full" | |
9f95a23c | 2111 | $SUDO ceph tell osd.1 injectfull none |
224ce89b WB |
2112 | wait_for_health_ok |
2113 | ||
9f95a23c | 2114 | $SUDO ceph tell osd.2 injectfull failsafe |
7c673cae | 2115 | # failsafe and full are the same as far as the monitor is concerned |
224ce89b WB |
2116 | wait_for_health "OSD_FULL" |
2117 | ceph health detail | grep "osd.2 is full" | |
9f95a23c | 2118 | $SUDO ceph tell osd.2 injectfull none |
224ce89b WB |
2119 | wait_for_health_ok |
2120 | ||
9f95a23c | 2121 | $SUDO ceph tell osd.0 injectfull full |
224ce89b | 2122 | wait_for_health "OSD_FULL" |
31f18b77 | 2123 | ceph health detail | grep "osd.0 is full" |
9f95a23c | 2124 | $SUDO ceph tell osd.0 injectfull none |
7c673cae FG |
2125 | wait_for_health_ok |
2126 | ||
2127 | ceph pg stat | grep 'pgs:' | |
224ce89b WB |
2128 | ceph pg 1.0 query |
2129 | ceph tell 1.0 query | |
9f95a23c TL |
2130 | first=$(ceph mon dump -f json | jq -r '.mons[0].name') |
2131 | ceph tell mon.$first quorum enter | |
7c673cae FG |
2132 | ceph quorum_status |
2133 | ceph report | grep osd_stats | |
2134 | ceph status | |
2135 | ceph -s | |
2136 | ||
2137 | # | |
2138 | # tell osd version | |
2139 | # | |
2140 | ceph tell osd.0 version | |
2141 | expect_false ceph tell osd.9999 version | |
2142 | expect_false ceph tell osd.foo version | |
2143 | ||
2144 | # back to pg stuff | |
2145 | ||
2146 | ceph tell osd.0 dump_pg_recovery_stats | grep Started | |
2147 | ||
2148 | ceph osd reweight 0 0.9 | |
2149 | expect_false ceph osd reweight 0 -1 | |
2150 | ceph osd reweight osd.0 1 | |
2151 | ||
2152 | ceph osd primary-affinity osd.0 .9 | |
2153 | expect_false ceph osd primary-affinity osd.0 -2 | |
2154 | expect_false ceph osd primary-affinity osd.9999 .5 | |
2155 | ceph osd primary-affinity osd.0 1 | |
2156 | ||
224ce89b WB |
2157 | ceph osd pool set rbd size 2 |
2158 | ceph osd pg-temp 1.0 0 1 | |
2159 | ceph osd pg-temp 1.0 osd.1 osd.0 | |
2160 | expect_false ceph osd pg-temp 1.0 0 1 2 | |
7c673cae | 2161 | expect_false ceph osd pg-temp asdf qwer |
224ce89b | 2162 | expect_false ceph osd pg-temp 1.0 asdf |
11fdf7f2 TL |
2163 | ceph osd pg-temp 1.0 # cleanup pg-temp |
2164 | ||
2165 | ceph pg repeer 1.0 | |
2166 | expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore | |
7c673cae FG |
2167 | |
2168 | # don't test ceph osd primary-temp for now | |
2169 | } | |
2170 | ||
2171 | function test_mon_osd_pool_set() | |
2172 | { | |
2173 | TEST_POOL_GETSET=pool_getset | |
2174 | ceph osd pool create $TEST_POOL_GETSET 1 | |
c07f9fc5 | 2175 | ceph osd pool application enable $TEST_POOL_GETSET rados |
11fdf7f2 | 2176 | ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off |
7c673cae FG |
2177 | wait_for_clean |
2178 | ceph osd pool get $TEST_POOL_GETSET all | |
2179 | ||
31f18b77 | 2180 | for s in pg_num pgp_num size min_size crush_rule; do |
7c673cae FG |
2181 | ceph osd pool get $TEST_POOL_GETSET $s |
2182 | done | |
2183 | ||
2184 | old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //') | |
2185 | (( new_size = old_size + 1 )) | |
2186 | ceph osd pool set $TEST_POOL_GETSET size $new_size | |
2187 | ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size" | |
2188 | ceph osd pool set $TEST_POOL_GETSET size $old_size | |
2189 | ||
2190 | ceph osd pool create pool_erasure 1 1 erasure | |
c07f9fc5 | 2191 | ceph osd pool application enable pool_erasure rados |
7c673cae FG |
2192 | wait_for_clean |
2193 | set +e | |
2194 | ceph osd pool set pool_erasure size 4444 2>$TMPFILE | |
2195 | check_response 'not change the size' | |
2196 | set -e | |
2197 | ceph osd pool get pool_erasure erasure_code_profile | |
9f95a23c | 2198 | ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it |
7c673cae | 2199 | |
7c673cae FG |
2200 | for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do |
2201 | ceph osd pool set $TEST_POOL_GETSET $flag false | |
2202 | ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false" | |
2203 | ceph osd pool set $TEST_POOL_GETSET $flag true | |
2204 | ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true" | |
2205 | ceph osd pool set $TEST_POOL_GETSET $flag 1 | |
2206 | ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true" | |
2207 | ceph osd pool set $TEST_POOL_GETSET $flag 0 | |
2208 | ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false" | |
2209 | expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf | |
2210 | expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2 | |
2211 | done | |
2212 | ||
2213 | ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.' | |
2214 | ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456 | |
2215 | ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456' | |
2216 | ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0 | |
2217 | ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.' | |
2218 | ||
2219 | ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.' | |
2220 | ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456 | |
2221 | ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456' | |
2222 | ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0 | |
2223 | ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.' | |
2224 | ||
2225 | ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.' | |
2226 | ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456 | |
2227 | ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456' | |
2228 | ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0 | |
2229 | ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.' | |
2230 | ||
2231 | ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.' | |
2232 | ceph osd pool set $TEST_POOL_GETSET recovery_priority 5 | |
2233 | ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5' | |
81eedcae TL |
2234 | ceph osd pool set $TEST_POOL_GETSET recovery_priority -5 |
2235 | ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5' | |
7c673cae FG |
2236 | ceph osd pool set $TEST_POOL_GETSET recovery_priority 0 |
2237 | ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.' | |
81eedcae TL |
2238 | expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11 |
2239 | expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11 | |
7c673cae FG |
2240 | |
2241 | ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.' | |
2242 | ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5 | |
2243 | ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5' | |
2244 | ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0 | |
2245 | ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.' | |
2246 | ||
2247 | ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.' | |
2248 | ceph osd pool set $TEST_POOL_GETSET scrub_priority 5 | |
2249 | ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5' | |
2250 | ceph osd pool set $TEST_POOL_GETSET scrub_priority 0 | |
2251 | ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.' | |
2252 | ||
2253 | ceph osd pool set $TEST_POOL_GETSET nopgchange 1 | |
2254 | expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10 | |
2255 | expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10 | |
2256 | ceph osd pool set $TEST_POOL_GETSET nopgchange 0 | |
2257 | ceph osd pool set $TEST_POOL_GETSET pg_num 10 | |
2258 | wait_for_clean | |
2259 | ceph osd pool set $TEST_POOL_GETSET pgp_num 10 | |
11fdf7f2 TL |
2260 | expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0 |
2261 | expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0 | |
7c673cae FG |
2262 | |
2263 | old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //') | |
c07f9fc5 | 2264 | new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32)) |
7c673cae FG |
2265 | ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs |
2266 | ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs | |
2267 | wait_for_clean | |
7c673cae FG |
2268 | |
2269 | ceph osd pool set $TEST_POOL_GETSET nosizechange 1 | |
2270 | expect_false ceph osd pool set $TEST_POOL_GETSET size 2 | |
2271 | expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2 | |
2272 | ceph osd pool set $TEST_POOL_GETSET nosizechange 0 | |
2273 | ceph osd pool set $TEST_POOL_GETSET size 2 | |
2274 | wait_for_clean | |
2275 | ceph osd pool set $TEST_POOL_GETSET min_size 2 | |
2276 | ||
2277 | expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0 | |
2278 | ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it | |
2279 | ||
2280 | expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1 | |
2281 | ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it | |
2282 | ||
7c673cae | 2283 | ceph osd pool get rbd crush_rule | grep 'crush_rule: ' |
224ce89b WB |
2284 | |
2285 | ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.' | |
2286 | ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive | |
2287 | ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive' | |
2288 | ceph osd pool set $TEST_POOL_GETSET compression_mode unset | |
2289 | ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.' | |
2290 | ||
2291 | ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.' | |
2292 | ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib | |
2293 | ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib' | |
2294 | ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset | |
2295 | ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.' | |
2296 | ||
2297 | ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.' | |
2298 | expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1 | |
2299 | expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2 | |
2300 | ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2 | |
2301 | ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2' | |
2302 | ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0 | |
2303 | ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.' | |
2304 | ||
2305 | ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.' | |
2306 | ceph osd pool set $TEST_POOL_GETSET csum_type crc32c | |
2307 | ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c' | |
2308 | ceph osd pool set $TEST_POOL_GETSET csum_type unset | |
2309 | ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.' | |
2310 | ||
2311 | for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do | |
2312 | ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.' | |
2313 | ceph osd pool set $TEST_POOL_GETSET $size 100 | |
2314 | ceph osd pool get $TEST_POOL_GETSET $size | grep '100' | |
2315 | ceph osd pool set $TEST_POOL_GETSET $size 0 | |
2316 | ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.' | |
2317 | done | |
c07f9fc5 FG |
2318 | |
2319 | ceph osd pool set $TEST_POOL_GETSET nodelete 1 | |
2320 | expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it | |
2321 | ceph osd pool set $TEST_POOL_GETSET nodelete 0 | |
2322 | ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it | |
2323 | ||
7c673cae FG |
2324 | } |
2325 | ||
2326 | function test_mon_osd_tiered_pool_set() | |
2327 | { | |
2328 | # this is really a tier pool | |
2329 | ceph osd pool create real-tier 2 | |
2330 | ceph osd tier add rbd real-tier | |
2331 | ||
11fdf7f2 TL |
2332 | # expect us to be unable to set negative values for hit_set_* |
2333 | for o in hit_set_period hit_set_count hit_set_fpp; do | |
2334 | expect_false ceph osd pool set real_tier $o -1 | |
2335 | done | |
2336 | ||
2337 | # and hit_set_fpp should be in range 0..1 | |
2338 | expect_false ceph osd pool set real_tier hit_set_fpp 2 | |
2339 | ||
7c673cae FG |
2340 | ceph osd pool set real-tier hit_set_type explicit_hash |
2341 | ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash" | |
2342 | ceph osd pool set real-tier hit_set_type explicit_object | |
2343 | ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object" | |
2344 | ceph osd pool set real-tier hit_set_type bloom | |
2345 | ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom" | |
2346 | expect_false ceph osd pool set real-tier hit_set_type i_dont_exist | |
2347 | ceph osd pool set real-tier hit_set_period 123 | |
2348 | ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123" | |
2349 | ceph osd pool set real-tier hit_set_count 12 | |
2350 | ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12" | |
2351 | ceph osd pool set real-tier hit_set_fpp .01 | |
2352 | ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01" | |
2353 | ||
2354 | ceph osd pool set real-tier target_max_objects 123 | |
2355 | ceph osd pool get real-tier target_max_objects | \ | |
2356 | grep 'target_max_objects:[ \t]\+123' | |
2357 | ceph osd pool set real-tier target_max_bytes 123456 | |
2358 | ceph osd pool get real-tier target_max_bytes | \ | |
2359 | grep 'target_max_bytes:[ \t]\+123456' | |
2360 | ceph osd pool set real-tier cache_target_dirty_ratio .123 | |
2361 | ceph osd pool get real-tier cache_target_dirty_ratio | \ | |
2362 | grep 'cache_target_dirty_ratio:[ \t]\+0.123' | |
2363 | expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2 | |
2364 | expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1 | |
2365 | ceph osd pool set real-tier cache_target_dirty_high_ratio .123 | |
2366 | ceph osd pool get real-tier cache_target_dirty_high_ratio | \ | |
2367 | grep 'cache_target_dirty_high_ratio:[ \t]\+0.123' | |
2368 | expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2 | |
2369 | expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1 | |
2370 | ceph osd pool set real-tier cache_target_full_ratio .123 | |
2371 | ceph osd pool get real-tier cache_target_full_ratio | \ | |
2372 | grep 'cache_target_full_ratio:[ \t]\+0.123' | |
2373 | ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000' | |
2374 | ceph osd pool set real-tier cache_target_full_ratio 1.0 | |
2375 | ceph osd pool set real-tier cache_target_full_ratio 0 | |
2376 | expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1 | |
2377 | ceph osd pool set real-tier cache_min_flush_age 123 | |
2378 | ceph osd pool get real-tier cache_min_flush_age | \ | |
2379 | grep 'cache_min_flush_age:[ \t]\+123' | |
2380 | ceph osd pool set real-tier cache_min_evict_age 234 | |
2381 | ceph osd pool get real-tier cache_min_evict_age | \ | |
2382 | grep 'cache_min_evict_age:[ \t]\+234' | |
2383 | ||
9f95a23c TL |
2384 | # iec vs si units |
2385 | ceph osd pool set real-tier target_max_objects 1K | |
2386 | ceph osd pool get real-tier target_max_objects | grep 1000 | |
2387 | for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do | |
2388 | ceph osd pool set real-tier $o 1Ki # no i suffix | |
2389 | val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o") | |
2390 | [[ $val == 1024 ]] | |
2391 | ceph osd pool set real-tier $o 1M # with i suffix | |
2392 | val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o") | |
2393 | [[ $val == 1048576 ]] | |
2394 | done | |
2395 | ||
7c673cae FG |
2396 | # this is not a tier pool |
2397 | ceph osd pool create fake-tier 2 | |
c07f9fc5 | 2398 | ceph osd pool application enable fake-tier rados |
7c673cae FG |
2399 | wait_for_clean |
2400 | ||
2401 | expect_false ceph osd pool set fake-tier hit_set_type explicit_hash | |
2402 | expect_false ceph osd pool get fake-tier hit_set_type | |
2403 | expect_false ceph osd pool set fake-tier hit_set_type explicit_object | |
2404 | expect_false ceph osd pool get fake-tier hit_set_type | |
2405 | expect_false ceph osd pool set fake-tier hit_set_type bloom | |
2406 | expect_false ceph osd pool get fake-tier hit_set_type | |
2407 | expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist | |
2408 | expect_false ceph osd pool set fake-tier hit_set_period 123 | |
2409 | expect_false ceph osd pool get fake-tier hit_set_period | |
2410 | expect_false ceph osd pool set fake-tier hit_set_count 12 | |
2411 | expect_false ceph osd pool get fake-tier hit_set_count | |
2412 | expect_false ceph osd pool set fake-tier hit_set_fpp .01 | |
2413 | expect_false ceph osd pool get fake-tier hit_set_fpp | |
2414 | ||
2415 | expect_false ceph osd pool set fake-tier target_max_objects 123 | |
2416 | expect_false ceph osd pool get fake-tier target_max_objects | |
2417 | expect_false ceph osd pool set fake-tier target_max_bytes 123456 | |
2418 | expect_false ceph osd pool get fake-tier target_max_bytes | |
2419 | expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123 | |
2420 | expect_false ceph osd pool get fake-tier cache_target_dirty_ratio | |
2421 | expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2 | |
2422 | expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1 | |
2423 | expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123 | |
2424 | expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio | |
2425 | expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2 | |
2426 | expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1 | |
2427 | expect_false ceph osd pool set fake-tier cache_target_full_ratio .123 | |
2428 | expect_false ceph osd pool get fake-tier cache_target_full_ratio | |
2429 | expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0 | |
2430 | expect_false ceph osd pool set fake-tier cache_target_full_ratio 0 | |
2431 | expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1 | |
2432 | expect_false ceph osd pool set fake-tier cache_min_flush_age 123 | |
2433 | expect_false ceph osd pool get fake-tier cache_min_flush_age | |
2434 | expect_false ceph osd pool set fake-tier cache_min_evict_age 234 | |
2435 | expect_false ceph osd pool get fake-tier cache_min_evict_age | |
2436 | ||
2437 | ceph osd tier remove rbd real-tier | |
2438 | ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it | |
2439 | ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it | |
2440 | } | |
2441 | ||
2442 | function test_mon_osd_erasure_code() | |
2443 | { | |
2444 | ||
2445 | ceph osd erasure-code-profile set fooprofile a=b c=d | |
2446 | ceph osd erasure-code-profile set fooprofile a=b c=d | |
2447 | expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f | |
2448 | ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force | |
2449 | ceph osd erasure-code-profile set fooprofile a=b c=d e=f | |
2450 | expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h | |
11fdf7f2 TL |
2451 | # make sure ruleset-foo doesn't work anymore |
2452 | expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host | |
3efd9988 FG |
2453 | ceph osd erasure-code-profile set barprofile crush-failure-domain=host |
2454 | # clean up | |
7c673cae | 2455 | ceph osd erasure-code-profile rm fooprofile |
3efd9988 | 2456 | ceph osd erasure-code-profile rm barprofile |
11fdf7f2 TL |
2457 | |
2458 | # try weird k and m values | |
2459 | expect_false ceph osd erasure-code-profile set badk k=1 m=1 | |
2460 | expect_false ceph osd erasure-code-profile set badk k=1 m=2 | |
2461 | expect_false ceph osd erasure-code-profile set badk k=0 m=2 | |
2462 | expect_false ceph osd erasure-code-profile set badk k=-1 m=2 | |
2463 | expect_false ceph osd erasure-code-profile set badm k=2 m=0 | |
2464 | expect_false ceph osd erasure-code-profile set badm k=2 m=-1 | |
2465 | ceph osd erasure-code-profile set good k=2 m=1 | |
2466 | ceph osd erasure-code-profile rm good | |
7c673cae FG |
2467 | } |
2468 | ||
2469 | function test_mon_osd_misc() | |
2470 | { | |
2471 | set +e | |
2472 | ||
2473 | # expect error about missing 'pool' argument | |
2474 | ceph osd map 2>$TMPFILE; check_response 'pool' $? 22 | |
2475 | ||
2476 | # expect error about unused argument foo | |
2477 | ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22 | |
2478 | ||
7c673cae FG |
2479 | # expect "not in range" for invalid overload percentage |
2480 | ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22 | |
2481 | ||
2482 | set -e | |
2483 | ||
9f95a23c TL |
2484 | local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd) |
2485 | local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd) | |
2486 | # otherwise ceph-mgr complains like: | |
2487 | # Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds! | |
2488 | # Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds! | |
2489 | ceph config set mgr mon_reweight_min_bytes_per_osd 0 | |
2490 | ceph config set mgr mon_reweight_min_pgs_per_osd 0 | |
7c673cae FG |
2491 | ceph osd reweight-by-utilization 110 |
2492 | ceph osd reweight-by-utilization 110 .5 | |
2493 | expect_false ceph osd reweight-by-utilization 110 0 | |
2494 | expect_false ceph osd reweight-by-utilization 110 -0.1 | |
2495 | ceph osd test-reweight-by-utilization 110 .5 --no-increasing | |
2496 | ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing | |
2497 | expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing | |
2498 | expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing | |
2499 | ceph osd reweight-by-pg 110 | |
2500 | ceph osd test-reweight-by-pg 110 .5 | |
2501 | ceph osd reweight-by-pg 110 rbd | |
2502 | ceph osd reweight-by-pg 110 .5 rbd | |
2503 | expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf | |
9f95a23c TL |
2504 | # restore the setting |
2505 | ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd | |
2506 | ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd | |
7c673cae FG |
2507 | } |
2508 | ||
2509 | function test_admin_heap_profiler() | |
2510 | { | |
2511 | do_test=1 | |
2512 | set +e | |
2513 | # expect 'heap' commands to be correctly parsed | |
9f95a23c | 2514 | ceph tell osd.0 heap stats 2>$TMPFILE |
7c673cae FG |
2515 | if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then |
2516 | echo "tcmalloc not enabled; skip heap profiler test" | |
2517 | do_test=0 | |
2518 | fi | |
2519 | set -e | |
2520 | ||
2521 | [[ $do_test -eq 0 ]] && return 0 | |
2522 | ||
9f95a23c TL |
2523 | $SUDO ceph tell osd.0 heap start_profiler |
2524 | $SUDO ceph tell osd.0 heap dump | |
2525 | $SUDO ceph tell osd.0 heap stop_profiler | |
2526 | $SUDO ceph tell osd.0 heap release | |
7c673cae FG |
2527 | } |
2528 | ||
2529 | function test_osd_bench() | |
2530 | { | |
2531 | # test osd bench limits | |
2532 | # As we should not rely on defaults (as they may change over time), | |
2533 | # lets inject some values and perform some simple tests | |
2534 | # max iops: 10 # 100 IOPS | |
2535 | # max throughput: 10485760 # 10MB/s | |
2536 | # max block size: 2097152 # 2MB | |
2537 | # duration: 10 # 10 seconds | |
2538 | ||
2539 | local args="\ | |
2540 | --osd-bench-duration 10 \ | |
2541 | --osd-bench-max-block-size 2097152 \ | |
2542 | --osd-bench-large-size-max-throughput 10485760 \ | |
2543 | --osd-bench-small-size-max-iops 10" | |
2544 | ceph tell osd.0 injectargs ${args## } | |
2545 | ||
2546 | # anything with a bs larger than 2097152 must fail | |
2547 | expect_false ceph tell osd.0 bench 1 2097153 | |
2548 | # but using 'osd_bench_max_bs' must succeed | |
2549 | ceph tell osd.0 bench 1 2097152 | |
2550 | ||
2551 | # we assume 1MB as a large bs; anything lower is a small bs | |
2552 | # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS | |
2553 | # max count: 409600 (bytes) | |
2554 | ||
2555 | # more than max count must not be allowed | |
2556 | expect_false ceph tell osd.0 bench 409601 4096 | |
2557 | # but 409600 must be succeed | |
2558 | ceph tell osd.0 bench 409600 4096 | |
2559 | ||
2560 | # for a large bs, we are limited by throughput. | |
2561 | # for a 2MB block size for 10 seconds, assuming 10MB/s throughput, | |
2562 | # the max count will be (10MB * 10s) = 100MB | |
2563 | # max count: 104857600 (bytes) | |
2564 | ||
2565 | # more than max count must not be allowed | |
2566 | expect_false ceph tell osd.0 bench 104857601 2097152 | |
2567 | # up to max count must be allowed | |
2568 | ceph tell osd.0 bench 104857600 2097152 | |
2569 | } | |
2570 | ||
2571 | function test_osd_negative_filestore_merge_threshold() | |
2572 | { | |
2573 | $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1 | |
2574 | expect_config_value "osd.0" "filestore_merge_threshold" -1 | |
2575 | } | |
2576 | ||
2577 | function test_mon_tell() | |
2578 | { | |
9f95a23c TL |
2579 | for m in mon.a mon.b; do |
2580 | ceph tell $m sessions | |
2581 | ceph_watch_start debug audit | |
2582 | ceph tell mon.a sessions | |
2583 | ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch" | |
2584 | done | |
7c673cae | 2585 | expect_false ceph tell mon.foo version |
7c673cae FG |
2586 | } |
2587 | ||
7c673cae FG |
2588 | function test_mon_ping() |
2589 | { | |
2590 | ceph ping mon.a | |
2591 | ceph ping mon.b | |
2592 | expect_false ceph ping mon.foo | |
2593 | ||
2594 | ceph ping mon.\* | |
2595 | } | |
2596 | ||
2597 | function test_mon_deprecated_commands() | |
2598 | { | |
9f95a23c | 2599 | # current DEPRECATED commands are marked with FLAG(DEPRECATED) |
7c673cae FG |
2600 | # |
2601 | # Testing should be accomplished by setting | |
2602 | # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for | |
2603 | # each one of these commands. | |
2604 | ||
9f95a23c TL |
2605 | ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete' |
2606 | expect_false ceph config-key list 2> $TMPFILE | |
7c673cae FG |
2607 | check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete" |
2608 | ||
9f95a23c | 2609 | ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete' |
7c673cae FG |
2610 | } |
2611 | ||
2612 | function test_mon_cephdf_commands() | |
2613 | { | |
2614 | # ceph df detail: | |
2615 | # pool section: | |
2616 | # RAW USED The near raw used per pool in raw total | |
2617 | ||
11fdf7f2 | 2618 | ceph osd pool create cephdf_for_test 1 1 replicated |
c07f9fc5 | 2619 | ceph osd pool application enable cephdf_for_test rados |
7c673cae FG |
2620 | ceph osd pool set cephdf_for_test size 2 |
2621 | ||
2622 | dd if=/dev/zero of=./cephdf_for_test bs=4k count=1 | |
2623 | rados put cephdf_for_test cephdf_for_test -p cephdf_for_test | |
2624 | ||
2625 | #wait for update | |
2626 | for i in `seq 1 10`; do | |
2627 | rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break | |
2628 | sleep 1 | |
2629 | done | |
31f18b77 FG |
2630 | # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need |
2631 | # to sync mon with osd | |
2632 | flush_pg_stats | |
2633 | local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats' | |
11fdf7f2 TL |
2634 | stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"` |
2635 | stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"` | |
7c673cae FG |
2636 | |
2637 | ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it | |
2638 | rm ./cephdf_for_test | |
2639 | ||
11fdf7f2 | 2640 | expect_false test $stored != $stored_raw |
7c673cae FG |
2641 | } |
2642 | ||
c07f9fc5 FG |
2643 | function test_mon_pool_application() |
2644 | { | |
9f95a23c | 2645 | ceph osd pool create app_for_test 16 |
c07f9fc5 FG |
2646 | |
2647 | ceph osd pool application enable app_for_test rbd | |
2648 | expect_false ceph osd pool application enable app_for_test rgw | |
2649 | ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it | |
2650 | ceph osd pool ls detail | grep "application rbd,rgw" | |
2651 | ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}' | |
2652 | ||
2653 | expect_false ceph osd pool application set app_for_test cephfs key value | |
2654 | ceph osd pool application set app_for_test rbd key1 value1 | |
2655 | ceph osd pool application set app_for_test rbd key2 value2 | |
2656 | ceph osd pool application set app_for_test rgw key1 value1 | |
181888fb FG |
2657 | ceph osd pool application get app_for_test rbd key1 | grep 'value1' |
2658 | ceph osd pool application get app_for_test rbd key2 | grep 'value2' | |
2659 | ceph osd pool application get app_for_test rgw key1 | grep 'value1' | |
c07f9fc5 FG |
2660 | |
2661 | ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}' | |
2662 | ||
2663 | ceph osd pool application rm app_for_test rgw key1 | |
2664 | ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}' | |
2665 | ceph osd pool application rm app_for_test rbd key2 | |
2666 | ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}' | |
2667 | ceph osd pool application rm app_for_test rbd key1 | |
2668 | ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}' | |
2669 | ceph osd pool application rm app_for_test rbd key1 # should be idempotent | |
2670 | ||
2671 | expect_false ceph osd pool application disable app_for_test rgw | |
2672 | ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it | |
2673 | ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent | |
2674 | ceph osd pool ls detail | grep "application rbd" | |
2675 | ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}' | |
2676 | ||
2677 | ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it | |
2678 | ceph osd pool ls detail | grep -v "application " | |
2679 | ceph osd pool ls detail --format=json | grep '"application_metadata":{}' | |
2680 | ||
2681 | ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it | |
2682 | } | |
2683 | ||
31f18b77 FG |
2684 | function test_mon_tell_help_command() |
2685 | { | |
9f95a23c TL |
2686 | ceph tell mon.a help | grep sync_force |
2687 | ceph tell mon.a -h | grep sync_force | |
2688 | ceph tell mon.a config -h | grep 'config diff get' | |
31f18b77 FG |
2689 | |
2690 | # wrong target | |
2691 | expect_false ceph tell mon.zzz help | |
2692 | } | |
2693 | ||
c07f9fc5 FG |
2694 | function test_mon_stdin_stdout() |
2695 | { | |
2696 | echo foo | ceph config-key set test_key -i - | |
2697 | ceph config-key get test_key -o - | grep -c foo | grep -q 1 | |
2698 | } | |
2699 | ||
31f18b77 FG |
2700 | function test_osd_tell_help_command() |
2701 | { | |
2702 | ceph tell osd.1 help | |
2703 | expect_false ceph tell osd.100 help | |
2704 | } | |
2705 | ||
224ce89b WB |
2706 | function test_osd_compact() |
2707 | { | |
2708 | ceph tell osd.1 compact | |
c07f9fc5 | 2709 | $SUDO ceph daemon osd.1 compact |
224ce89b WB |
2710 | } |
2711 | ||
31f18b77 FG |
2712 | function test_mds_tell_help_command() |
2713 | { | |
2714 | local FS_NAME=cephfs | |
2715 | if ! mds_exists ; then | |
2716 | echo "Skipping test, no MDS found" | |
2717 | return | |
2718 | fi | |
2719 | ||
2720 | remove_all_fs | |
9f95a23c TL |
2721 | ceph osd pool create fs_data 16 |
2722 | ceph osd pool create fs_metadata 16 | |
31f18b77 FG |
2723 | ceph fs new $FS_NAME fs_metadata fs_data |
2724 | wait_mds_active $FS_NAME | |
2725 | ||
2726 | ||
2727 | ceph tell mds.a help | |
2728 | expect_false ceph tell mds.z help | |
2729 | ||
2730 | remove_all_fs | |
2731 | ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it | |
2732 | ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it | |
2733 | } | |
2734 | ||
224ce89b | 2735 | function test_mgr_tell() |
31f18b77 | 2736 | { |
9f95a23c | 2737 | ceph tell mgr version |
31f18b77 FG |
2738 | } |
2739 | ||
eafe8130 TL |
2740 | function test_mgr_devices() |
2741 | { | |
2742 | ceph device ls | |
2743 | expect_false ceph device info doesnotexist | |
2744 | expect_false ceph device get-health-metrics doesnotexist | |
2745 | } | |
2746 | ||
2747 | function test_per_pool_scrub_status() | |
2748 | { | |
9f95a23c TL |
2749 | ceph osd pool create noscrub_pool 16 |
2750 | ceph osd pool create noscrub_pool2 16 | |
eafe8130 TL |
2751 | ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set" |
2752 | ceph -s --format json | \ | |
2753 | jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \ | |
2754 | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set" | |
2755 | ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | | |
2756 | expect_false grep -q "Pool .* has .*scrub.* flag" | |
2757 | ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ | |
2758 | expect_false grep -q "Pool .* has .*scrub.* flag" | |
2759 | ||
2760 | ceph osd pool set noscrub_pool noscrub 1 | |
2761 | ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set" | |
2762 | ceph -s --format json | \ | |
2763 | jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \ | |
2764 | expect_true grep -q "Some pool(s) have the noscrub flag(s) set" | |
2765 | ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ | |
2766 | expect_true grep -q "Pool noscrub_pool has noscrub flag" | |
2767 | ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag" | |
2768 | ||
2769 | ceph osd pool set noscrub_pool nodeep-scrub 1 | |
2770 | ceph osd pool set noscrub_pool2 nodeep-scrub 1 | |
2771 | ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set" | |
2772 | ceph -s --format json | \ | |
2773 | jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \ | |
2774 | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set" | |
2775 | ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ | |
2776 | expect_true grep -q "Pool noscrub_pool has noscrub flag" | |
2777 | ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ | |
2778 | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag" | |
2779 | ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ | |
2780 | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag" | |
2781 | ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag" | |
2782 | ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag" | |
2783 | ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag" | |
2784 | ||
2785 | ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it | |
2786 | ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it | |
2787 | } | |
2788 | ||
7c673cae FG |
2789 | # |
2790 | # New tests should be added to the TESTS array below | |
2791 | # | |
2792 | # Individual tests may be run using the '-t <testname>' argument | |
2793 | # The user can specify '-t <testname>' as many times as she wants | |
2794 | # | |
2795 | # Tests will be run in order presented in the TESTS array, or in | |
2796 | # the order specified by the '-t <testname>' options. | |
2797 | # | |
2798 | # '-l' will list all the available test names | |
2799 | # '-h' will show usage | |
2800 | # | |
2801 | # The test maintains backward compatibility: not specifying arguments | |
2802 | # will run all tests following the order they appear in the TESTS array. | |
2803 | # | |
2804 | ||
2805 | set +x | |
2806 | MON_TESTS+=" mon_injectargs" | |
2807 | MON_TESTS+=" mon_injectargs_SI" | |
31f18b77 FG |
2808 | for i in `seq 9`; do |
2809 | MON_TESTS+=" tiering_$i"; | |
2810 | done | |
7c673cae FG |
2811 | MON_TESTS+=" auth" |
2812 | MON_TESTS+=" auth_profiles" | |
2813 | MON_TESTS+=" mon_misc" | |
2814 | MON_TESTS+=" mon_mon" | |
2815 | MON_TESTS+=" mon_osd" | |
c07f9fc5 | 2816 | MON_TESTS+=" mon_config_key" |
31f18b77 FG |
2817 | MON_TESTS+=" mon_crush" |
2818 | MON_TESTS+=" mon_osd_create_destroy" | |
7c673cae FG |
2819 | MON_TESTS+=" mon_osd_pool" |
2820 | MON_TESTS+=" mon_osd_pool_quota" | |
2821 | MON_TESTS+=" mon_pg" | |
2822 | MON_TESTS+=" mon_osd_pool_set" | |
2823 | MON_TESTS+=" mon_osd_tiered_pool_set" | |
2824 | MON_TESTS+=" mon_osd_erasure_code" | |
2825 | MON_TESTS+=" mon_osd_misc" | |
7c673cae | 2826 | MON_TESTS+=" mon_tell" |
7c673cae FG |
2827 | MON_TESTS+=" mon_ping" |
2828 | MON_TESTS+=" mon_deprecated_commands" | |
2829 | MON_TESTS+=" mon_caps" | |
2830 | MON_TESTS+=" mon_cephdf_commands" | |
31f18b77 | 2831 | MON_TESTS+=" mon_tell_help_command" |
c07f9fc5 | 2832 | MON_TESTS+=" mon_stdin_stdout" |
31f18b77 | 2833 | |
7c673cae FG |
2834 | OSD_TESTS+=" osd_bench" |
2835 | OSD_TESTS+=" osd_negative_filestore_merge_threshold" | |
2836 | OSD_TESTS+=" tiering_agent" | |
2837 | OSD_TESTS+=" admin_heap_profiler" | |
31f18b77 | 2838 | OSD_TESTS+=" osd_tell_help_command" |
224ce89b | 2839 | OSD_TESTS+=" osd_compact" |
eafe8130 | 2840 | OSD_TESTS+=" per_pool_scrub_status" |
7c673cae FG |
2841 | |
2842 | MDS_TESTS+=" mds_tell" | |
2843 | MDS_TESTS+=" mon_mds" | |
2844 | MDS_TESTS+=" mon_mds_metadata" | |
31f18b77 FG |
2845 | MDS_TESTS+=" mds_tell_help_command" |
2846 | ||
224ce89b | 2847 | MGR_TESTS+=" mgr_tell" |
eafe8130 | 2848 | MGR_TESTS+=" mgr_devices" |
7c673cae FG |
2849 | |
2850 | TESTS+=$MON_TESTS | |
2851 | TESTS+=$OSD_TESTS | |
2852 | TESTS+=$MDS_TESTS | |
31f18b77 | 2853 | TESTS+=$MGR_TESTS |
7c673cae FG |
2854 | |
2855 | # | |
2856 | # "main" follows | |
2857 | # | |
2858 | ||
2859 | function list_tests() | |
2860 | { | |
2861 | echo "AVAILABLE TESTS" | |
2862 | for i in $TESTS; do | |
2863 | echo " $i" | |
2864 | done | |
2865 | } | |
2866 | ||
2867 | function usage() | |
2868 | { | |
2869 | echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]" | |
2870 | } | |
2871 | ||
2872 | tests_to_run=() | |
2873 | ||
2874 | sanity_check=true | |
2875 | ||
2876 | while [[ $# -gt 0 ]]; do | |
2877 | opt=$1 | |
2878 | ||
2879 | case "$opt" in | |
2880 | "-l" ) | |
2881 | do_list=1 | |
2882 | ;; | |
2883 | "--asok-does-not-need-root" ) | |
2884 | SUDO="" | |
2885 | ;; | |
2886 | "--no-sanity-check" ) | |
2887 | sanity_check=false | |
2888 | ;; | |
2889 | "--test-mon" ) | |
2890 | tests_to_run+="$MON_TESTS" | |
2891 | ;; | |
2892 | "--test-osd" ) | |
2893 | tests_to_run+="$OSD_TESTS" | |
2894 | ;; | |
2895 | "--test-mds" ) | |
2896 | tests_to_run+="$MDS_TESTS" | |
2897 | ;; | |
31f18b77 FG |
2898 | "--test-mgr" ) |
2899 | tests_to_run+="$MGR_TESTS" | |
2900 | ;; | |
7c673cae FG |
2901 | "-t" ) |
2902 | shift | |
2903 | if [[ -z "$1" ]]; then | |
2904 | echo "missing argument to '-t'" | |
2905 | usage ; | |
2906 | exit 1 | |
2907 | fi | |
2908 | tests_to_run+=" $1" | |
2909 | ;; | |
2910 | "-h" ) | |
2911 | usage ; | |
2912 | exit 0 | |
2913 | ;; | |
2914 | esac | |
2915 | shift | |
2916 | done | |
2917 | ||
2918 | if [[ $do_list -eq 1 ]]; then | |
2919 | list_tests ; | |
2920 | exit 0 | |
2921 | fi | |
2922 | ||
9f95a23c | 2923 | ceph osd pool create rbd 16 |
224ce89b | 2924 | |
7c673cae FG |
2925 | if test -z "$tests_to_run" ; then |
2926 | tests_to_run="$TESTS" | |
2927 | fi | |
2928 | ||
2929 | if $sanity_check ; then | |
2930 | wait_no_osd_down | |
2931 | fi | |
2932 | for i in $tests_to_run; do | |
2933 | if $sanity_check ; then | |
2934 | check_no_osd_down | |
2935 | fi | |
2936 | set -x | |
2937 | test_${i} | |
2938 | set +x | |
2939 | done | |
2940 | if $sanity_check ; then | |
2941 | check_no_osd_down | |
2942 | fi | |
2943 | ||
2944 | set -x | |
2945 | ||
2946 | echo OK |