3 source $CEPH_ROOT/qa
/standalone
/ceph-helpers.sh
5 [ `uname` = FreeBSD
] && exit 0
11 local funcs
=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
12 for func
in $funcs ; do
13 setup
$dir ||
return 1
14 $func $dir ||
return 1
15 teardown
$dir ||
return 1
19 function TEST_bluestore
() {
22 local flimit
=$
(ulimit -n)
23 if [ $flimit -lt 1536 ]; then
24 echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
26 export CEPH_MON
="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
28 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
29 CEPH_ARGS
+="--mon-host=$CEPH_MON "
30 CEPH_ARGS
+="--bluestore_block_size=2147483648 "
31 CEPH_ARGS
+="--bluestore_block_db_create=true "
32 CEPH_ARGS
+="--bluestore_block_db_size=1073741824 "
33 CEPH_ARGS
+="--bluestore_block_wal_size=536870912 "
34 CEPH_ARGS
+="--bluestore_block_wal_create=true "
35 CEPH_ARGS
+="--bluestore_fsck_on_mount=true "
37 run_mon
$dir a ||
return 1
38 run_mgr
$dir x ||
return 1
39 run_osd
$dir 0 ||
return 1
40 osd_pid0
=$
(cat $dir/osd
.0.pid
)
41 run_osd
$dir 1 ||
return 1
42 osd_pid1
=$
(cat $dir/osd
.1.pid
)
43 run_osd
$dir 2 ||
return 1
44 osd_pid2
=$
(cat $dir/osd
.2.pid
)
45 run_osd
$dir 3 ||
return 1
46 osd_pid3
=$
(cat $dir/osd
.3.pid
)
53 timeout
60 rados bench
-p foo
30 write -b 4096 --no-cleanup #|| return 1
58 while kill $osd_pid0; do sleep 1 ; done
60 while kill $osd_pid1; do sleep 1 ; done
62 while kill $osd_pid2; do sleep 1 ; done
64 while kill $osd_pid3; do sleep 1 ; done
68 ceph-bluestore-tool
--path $dir/0 fsck ||
return 1
69 ceph-bluestore-tool
--path $dir/1 fsck ||
return 1
70 ceph-bluestore-tool
--path $dir/2 fsck ||
return 1
71 ceph-bluestore-tool
--path $dir/3 fsck ||
return 1
73 truncate
$dir/0/block
-s 4294967296 # 4GB
74 ceph-bluestore-tool
--path $dir/0 bluefs-bdev-expand ||
return 1
75 truncate
$dir/1/block
-s 4311744512 # 4GB + 16MB
76 ceph-bluestore-tool
--path $dir/1 bluefs-bdev-expand ||
return 1
77 truncate
$dir/2/block
-s 4295099392 # 4GB + 129KB
78 ceph-bluestore-tool
--path $dir/2 bluefs-bdev-expand ||
return 1
79 truncate
$dir/3/block
-s 4293918720 # 4GB - 1MB
80 ceph-bluestore-tool
--path $dir/3 bluefs-bdev-expand ||
return 1
82 # slow, DB, WAL -> slow, DB
83 ceph-bluestore-tool
--path $dir/0 fsck ||
return 1
84 ceph-bluestore-tool
--path $dir/1 fsck ||
return 1
85 ceph-bluestore-tool
--path $dir/2 fsck ||
return 1
86 ceph-bluestore-tool
--path $dir/3 fsck ||
return 1
88 ceph-bluestore-tool
--path $dir/0 bluefs-bdev-sizes
90 ceph-bluestore-tool
--path $dir/0 \
91 --devs-source $dir/0/block.wal \
92 --dev-target $dir/0/block.db \
93 --command bluefs-bdev-migrate ||
return 1
95 ceph-bluestore-tool
--path $dir/0 fsck ||
return 1
97 # slow, DB, WAL -> slow, WAL
98 ceph-bluestore-tool
--path $dir/1 \
99 --devs-source $dir/1/block.db \
100 --dev-target $dir/1/block \
101 --command bluefs-bdev-migrate ||
return 1
103 ceph-bluestore-tool
--path $dir/1 fsck ||
return 1
105 # slow, DB, WAL -> slow
106 ceph-bluestore-tool
--path $dir/2 \
107 --devs-source $dir/2/block.wal \
108 --devs-source $dir/2/block.db \
109 --dev-target $dir/2/block \
110 --command bluefs-bdev-migrate ||
return 1
112 ceph-bluestore-tool
--path $dir/2 fsck ||
return 1
114 # slow, DB, WAL -> slow, WAL (negative case)
115 ceph-bluestore-tool
--path $dir/3 \
116 --devs-source $dir/3/block.db \
117 --dev-target $dir/3/block.wal \
118 --command bluefs-bdev-migrate
120 # Migration to WAL is unsupported
121 if [ $?
-eq 0 ]; then
124 ceph-bluestore-tool
--path $dir/3 fsck ||
return 1
126 # slow, DB, WAL -> slow, DB (WAL to slow then slow to DB)
127 ceph-bluestore-tool
--path $dir/3 \
128 --devs-source $dir/3/block.wal \
129 --dev-target $dir/3/block \
130 --command bluefs-bdev-migrate ||
return 1
132 ceph-bluestore-tool
--path $dir/3 fsck ||
return 1
134 ceph-bluestore-tool
--path $dir/3 \
135 --devs-source $dir/3/block \
136 --dev-target $dir/3/block.db \
137 --command bluefs-bdev-migrate ||
return 1
139 ceph-bluestore-tool
--path $dir/3 fsck ||
return 1
141 activate_osd
$dir 0 ||
return 1
142 osd_pid0
=$
(cat $dir/osd
.0.pid
)
143 activate_osd
$dir 1 ||
return 1
144 osd_pid1
=$
(cat $dir/osd
.1.pid
)
145 activate_osd
$dir 2 ||
return 1
146 osd_pid2
=$
(cat $dir/osd
.2.pid
)
147 activate_osd
$dir 3 ||
return 1
148 osd_pid3
=$
(cat $dir/osd
.3.pid
)
150 wait_for_clean ||
return 1
153 timeout
60 rados bench
-p foo
30 write -b 4096 --no-cleanup #|| return 1
156 while kill $osd_pid0; do sleep 1 ; done
158 while kill $osd_pid1; do sleep 1 ; done
160 while kill $osd_pid2; do sleep 1 ; done
162 while kill $osd_pid3; do sleep 1 ; done
165 # slow, DB -> slow, DB, WAL
166 ceph-bluestore-tool
--path $dir/0 fsck ||
return 1
168 dd if=/dev
/zero of
=$dir/0/wal count
=512 bs
=1M
169 ceph-bluestore-tool
--path $dir/0 \
170 --dev-target $dir/0/wal \
171 --command bluefs-bdev-new-wal ||
return 1
173 ceph-bluestore-tool
--path $dir/0 fsck ||
return 1
175 # slow, WAL -> slow, DB, WAL
176 ceph-bluestore-tool
--path $dir/1 fsck ||
return 1
178 dd if=/dev
/zero of
=$dir/1/db count
=1024 bs
=1M
179 ceph-bluestore-tool
--path $dir/1 \
180 --dev-target $dir/1/db \
181 --command bluefs-bdev-new-db ||
return 1
183 ceph-bluestore-tool
--path $dir/1 \
184 --devs-source $dir/1/block \
185 --dev-target $dir/1/block.db \
186 --command bluefs-bdev-migrate ||
return 1
188 ceph-bluestore-tool
--path $dir/1 fsck ||
return 1
190 # slow -> slow, DB, WAL
191 ceph-bluestore-tool
--path $dir/2 fsck ||
return 1
193 ceph-bluestore-tool
--path $dir/2 \
194 --command bluefs-bdev-new-db ||
return 1
196 ceph-bluestore-tool
--path $dir/2 \
197 --command bluefs-bdev-new-wal ||
return 1
199 ceph-bluestore-tool
--path $dir/2 \
200 --devs-source $dir/2/block \
201 --dev-target $dir/2/block.db \
202 --command bluefs-bdev-migrate ||
return 1
204 ceph-bluestore-tool
--path $dir/2 fsck ||
return 1
206 # slow, DB -> slow, WAL
207 ceph-bluestore-tool
--path $dir/3 fsck ||
return 1
209 ceph-bluestore-tool
--path $dir/3 \
210 --command bluefs-bdev-new-wal ||
return 1
212 ceph-bluestore-tool
--path $dir/3 \
213 --devs-source $dir/3/block.db \
214 --dev-target $dir/3/block \
215 --command bluefs-bdev-migrate ||
return 1
217 ceph-bluestore-tool
--path $dir/3 fsck ||
return 1
219 activate_osd
$dir 0 ||
return 1
220 osd_pid0
=$
(cat $dir/osd
.0.pid
)
221 activate_osd
$dir 1 ||
return 1
222 osd_pid1
=$
(cat $dir/osd
.1.pid
)
223 activate_osd
$dir 2 ||
return 1
224 osd_pid2
=$
(cat $dir/osd
.2.pid
)
225 activate_osd
$dir 3 ||
return 1
226 osd_pid3
=$
(cat $dir/osd
.3.pid
)
229 timeout
60 rados bench
-p foo
30 write -b 4096 --no-cleanup #|| return 1
232 while kill $osd_pid0; do sleep 1 ; done
234 while kill $osd_pid1; do sleep 1 ; done
236 while kill $osd_pid2; do sleep 1 ; done
238 while kill $osd_pid3; do sleep 1 ; done
241 # slow, DB1, WAL -> slow, DB2, WAL
242 ceph-bluestore-tool
--path $dir/0 fsck ||
return 1
244 dd if=/dev
/zero of
=$dir/0/db2 count
=1024 bs
=1M
245 ceph-bluestore-tool
--path $dir/0 \
246 --devs-source $dir/0/block.db \
247 --dev-target $dir/0/db2 \
248 --command bluefs-bdev-migrate ||
return 1
250 ceph-bluestore-tool
--path $dir/0 fsck ||
return 1
252 # slow, DB, WAL1 -> slow, DB, WAL2
254 dd if=/dev
/zero of
=$dir/0/wal2 count
=512 bs
=1M
255 ceph-bluestore-tool
--path $dir/0 \
256 --devs-source $dir/0/block.wal \
257 --dev-target $dir/0/wal2 \
258 --command bluefs-bdev-migrate ||
return 1
261 ceph-bluestore-tool
--path $dir/0 fsck ||
return 1
263 # slow, DB + WAL -> slow, DB2 -> slow
264 ceph-bluestore-tool
--path $dir/1 fsck ||
return 1
266 dd if=/dev
/zero of
=$dir/1/db2 count
=1024 bs
=1M
267 ceph-bluestore-tool
--path $dir/1 \
268 --devs-source $dir/1/block.db \
269 --devs-source $dir/1/block.wal \
270 --dev-target $dir/1/db2 \
271 --command bluefs-bdev-migrate ||
return 1
275 ceph-bluestore-tool
--path $dir/1 fsck ||
return 1
277 ceph-bluestore-tool
--path $dir/1 \
278 --devs-source $dir/1/block.db \
279 --dev-target $dir/1/block \
280 --command bluefs-bdev-migrate ||
return 1
284 ceph-bluestore-tool
--path $dir/1 fsck ||
return 1
286 # slow -> slow, DB (negative case)
287 ceph-objectstore-tool
--type bluestore
--data-path $dir/2 \
288 --op fsck
--no-mon-config ||
return 1
290 dd if=/dev
/zero of
=$dir/2/db2 count
=1024 bs
=1M
291 ceph-bluestore-tool
--path $dir/2 \
292 --devs-source $dir/2/block \
293 --dev-target $dir/2/db2 \
294 --command bluefs-bdev-migrate
296 # Migration from slow-only to new device is unsupported
297 if [ $?
-eq 0 ]; then
300 ceph-bluestore-tool
--path $dir/2 fsck ||
return 1
302 # slow + DB + WAL -> slow, DB2
303 dd if=/dev
/zero of
=$dir/2/db2 count
=1024 bs
=1M
305 ceph-bluestore-tool
--path $dir/2 \
306 --devs-source $dir/2/block \
307 --devs-source $dir/2/block.db \
308 --devs-source $dir/2/block.wal \
309 --dev-target $dir/2/db2 \
310 --command bluefs-bdev-migrate ||
return 1
312 ceph-bluestore-tool
--path $dir/2 fsck ||
return 1
314 # slow + WAL -> slow2, WAL2
315 dd if=/dev
/zero of
=$dir/3/wal2 count
=1024 bs
=1M
317 ceph-bluestore-tool
--path $dir/3 \
318 --devs-source $dir/3/block \
319 --devs-source $dir/3/block.wal \
320 --dev-target $dir/3/wal2 \
321 --command bluefs-bdev-migrate ||
return 1
323 ceph-bluestore-tool
--path $dir/3 fsck ||
return 1
325 activate_osd
$dir 0 ||
return 1
326 osd_pid0
=$
(cat $dir/osd
.0.pid
)
327 activate_osd
$dir 1 ||
return 1
328 osd_pid1
=$
(cat $dir/osd
.1.pid
)
329 activate_osd
$dir 2 ||
return 1
330 osd_pid2
=$
(cat $dir/osd
.2.pid
)
331 activate_osd
$dir 3 ||
return 1
332 osd_pid3
=$
(cat $dir/osd
.3.pid
)
335 timeout
60 rados bench
-p foo
30 write -b 4096 --no-cleanup #|| return 1
337 wait_for_clean ||
return 1
340 function TEST_bluestore2
() {
343 local flimit
=$
(ulimit -n)
344 if [ $flimit -lt 1536 ]; then
345 echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
347 export CEPH_MON
="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
349 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
350 CEPH_ARGS
+="--mon-host=$CEPH_MON "
351 CEPH_ARGS
+="--bluestore_block_size=4294967296 "
352 CEPH_ARGS
+="--bluestore_block_db_create=true "
353 CEPH_ARGS
+="--bluestore_block_db_size=1073741824 "
354 CEPH_ARGS
+="--bluestore_block_wal_create=false "
355 CEPH_ARGS
+="--bluestore_fsck_on_mount=true "
356 CEPH_ARGS
+="--osd_pool_default_size=1 "
357 CEPH_ARGS
+="--osd_pool_default_min_size=1 "
358 CEPH_ARGS
+="--bluestore_debug_enforce_settings=ssd "
360 run_mon
$dir a ||
return 1
361 run_mgr
$dir x ||
return 1
362 run_osd
$dir 0 ||
return 1
363 osd_pid0
=$
(cat $dir/osd
.0.pid
)
369 while [[ $retry -le 5 ]]; do
371 timeout
60 rados bench
-p foo
10 write --write-omap --no-cleanup #|| return 1
373 #give RocksDB some time to cooldown and put files to slow level(s)
376 db_used
=$
( ceph tell osd
.0 perf dump bluefs | jq
".bluefs.db_used_bytes" )
377 spilled_over
=$
( ceph tell osd
.0 perf dump bluefs | jq
".bluefs.slow_used_bytes" )
379 test $spilled_over -eq 0 ||
break
381 test $spilled_over -gt 0 ||
return 1
383 while kill $osd_pid0; do sleep 1 ; done
386 ceph-bluestore-tool
--path $dir/0 \
387 --devs-source $dir/0/block.db \
388 --dev-target $dir/0/block \
389 --command bluefs-bdev-migrate ||
return 1
391 ceph-bluestore-tool
--path $dir/0 \
392 --command bluefs-bdev-sizes ||
return 1
394 ceph-bluestore-tool
--path $dir/0 \
395 --command fsck ||
return 1
397 activate_osd
$dir 0 ||
return 1
398 osd_pid0
=$
(cat $dir/osd
.0.pid
)
400 wait_for_clean ||
return 1
403 function TEST_bluestore_expand
() {
406 local flimit
=$
(ulimit -n)
407 if [ $flimit -lt 1536 ]; then
408 echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
410 export CEPH_MON
="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
412 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
413 CEPH_ARGS
+="--mon-host=$CEPH_MON "
414 CEPH_ARGS
+="--bluestore_block_size=4294967296 "
415 CEPH_ARGS
+="--bluestore_block_db_create=true "
416 CEPH_ARGS
+="--bluestore_block_db_size=1073741824 "
417 CEPH_ARGS
+="--bluestore_block_wal_create=false "
418 CEPH_ARGS
+="--bluestore_fsck_on_mount=true "
419 CEPH_ARGS
+="--osd_pool_default_size=1 "
420 CEPH_ARGS
+="--osd_pool_default_min_size=1 "
421 CEPH_ARGS
+="--bluestore_debug_enforce_settings=ssd "
423 run_mon
$dir a ||
return 1
424 run_mgr
$dir x ||
return 1
425 run_osd
$dir 0 ||
return 1
426 osd_pid0
=$
(cat $dir/osd
.0.pid
)
432 timeout
60 rados bench
-p foo
30 write -b 4096 --no-cleanup #|| return 1
435 total_space_before
=$
( ceph tell osd
.0 perf dump bluefs | jq
".bluefs.slow_total_bytes" )
436 free_space_before
=`ceph tell osd.0 bluestore bluefs device info | grep "BDEV_SLOW" -A 2 | grep free | cut -d':' -f 2 | cut -d"," -f 1 | cut -d' ' -f 2`
439 while kill $osd_pid0; do sleep 1 ; done
442 # destage allocation to file before expand (in case fast-shutdown skipped that step)
443 ceph-bluestore-tool
--log-file $dir/bluestore_tool.log
--path $dir/0 allocmap ||
return 1
445 # expand slow devices
446 ceph-bluestore-tool
--log-file $dir/bluestore_tool.log
--path $dir/0 fsck ||
return 1
448 requested_space
=4294967296 # 4GB
449 truncate
$dir/0/block
-s $requested_space
450 ceph-bluestore-tool
--log-file $dir/bluestore_tool.log
--path $dir/0 bluefs-bdev-expand ||
return 1
452 # slow, DB, WAL -> slow, DB
453 ceph-bluestore-tool
--log-file $dir/bluestore_tool.log
--path $dir/0 fsck ||
return 1
455 # compare allocation-file with RocksDB state
456 ceph-bluestore-tool
--log-file $dir/bluestore_tool.log
--path $dir/0 qfsck ||
return 1
458 ceph-bluestore-tool
--log-file $dir/bluestore_tool.log
--path $dir/0 bluefs-bdev-sizes
460 activate_osd
$dir 0 ||
return 1
461 osd_pid0
=$
(cat $dir/osd
.0.pid
)
463 wait_for_clean ||
return 1
465 total_space_after
=$
( ceph tell osd
.0 perf dump bluefs | jq
".bluefs.slow_total_bytes" )
466 free_space_after
=`ceph tell osd.0 bluestore bluefs device info | grep "BDEV_SLOW" -A 2 | grep free | cut -d':' -f 2 | cut -d"," -f 1 | cut -d' ' -f 2`
468 if [$total_space_after != $requested_space]; then
469 echo "total_space_after = $total_space_after"
470 echo "requested_space = $requested_space"
474 total_space_added
=$
((total_space_after
- total_space_before
))
475 free_space_added
=$
((free_space_after
- free_space_before
))
477 let new_used_space
=($total_space_added - $free_space_added)
479 # allow upto 128KB to be consumed
480 if [ $new_used_space -gt 131072 ]; then
481 echo "total_space_added = $total_space_added"
482 echo "free_space_added = $free_space_added"
487 while kill $osd_pid0; do sleep 1 ; done
490 ceph-bluestore-tool
--log-file $dir/bluestore_tool.log
--path $dir/0 qfsck ||
return 1
493 main osd-bluefs-volume-ops
"$@"
496 # compile-command: "cd ../.. ; make -j4 && test/osd/osd-bluefs-volume-ops.sh"