]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-disk/tests/ceph-disk.sh
update sources to v12.1.2
[ceph.git] / ceph / src / ceph-disk / tests / ceph-disk.sh
1 #!/bin/bash
2 #
3 # Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014, 2015, 2016, 2017 Red Hat <contact@redhat.com>
5 #
6 # Author: Loic Dachary <loic@dachary.org>
7 #
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library Public License as published by
10 # the Free Software Foundation; either version 2, or (at your option)
11 # any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Library Public License for more details.
17 #
18
19 # ceph-disk.sh is launched by tox which expects tox.ini in current
20 # directory. so we cannot run ceph-disk.sh in build directory directly,
21 # and hence not able to use detect-build-env-vars.sh to set the build
22 # env vars.
23 if [ -z "$CEPH_ROOT" ] || [ -z "$CEPH_BIN" ] || [ -z "$CEPH_LIB" ]; then
24 CEPH_ROOT=`readlink -f $(dirname $0)/../../..`
25 CEPH_BIN=$CEPH_ROOT
26 CEPH_LIB=$CEPH_ROOT/.libs
27 fi
28 source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
29
30 set -x
31
32 PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
33
34 export PATH=$CEPH_BIN:.:$PATH # make sure program from sources are preferred
35 export LD_LIBRARY_PATH=$CEPH_LIB
36 : ${CEPH_DISK:=ceph-disk}
37 CEPH_DISK_ARGS=
38 CEPH_DISK_ARGS+=" --verbose"
39 CEPH_DISK_ARGS+=" --prepend-to-path="
40 : ${CEPH_DISK_TIMEOUT:=360}
41 if [ `uname` != FreeBSD ]; then
42 PROCDIR=""
43 else
44 PROCDIR="/compat/linux"
45 fi
46
47 cat=$(which cat)
48 diff=$(which diff)
49 mkdir=$(which mkdir)
50 rm=$(which rm)
51 uuidgen=$(which uuidgen)
52 if [ `uname` = FreeBSD ]; then
53 # for unknown reasons FreeBSD timeout does not return sometimes
54 timeout=""
55 else
56 timeout="$(which timeout) $CEPH_DISK_TIMEOUT"
57 fi
58
59 function setup() {
60 local dir=$1
61 teardown $dir
62 mkdir -p $dir/osd
63 mkdir -p $(get_asok_dir)
64 touch $dir/ceph.conf # so ceph-disk think ceph is the cluster
65 }
66
67 function teardown() {
68 local dir=$1
69 if ! test -e $dir ; then
70 return
71 fi
72 kill_daemons $dir
73 if [ `uname` != FreeBSD ] && \
74 [ $(stat -f -c '%T' .) == "btrfs" ]; then
75 rm -fr $dir/*/*db
76 __teardown_btrfs $dir
77 fi
78 grep " $(pwd)/$dir/" < ${PROCDIR}/proc/mounts | while read mounted rest ; do
79 umount $mounted
80 done
81 rm -fr $dir
82 rm -rf $(get_asok_dir)
83 }
84
85 function command_fixture() {
86 local dir=$1
87 shift
88 local command=$1
89 shift
90 local fpath=`readlink -f $(which $command)`
91 [ "$fpath" = `readlink -f $CEPH_BIN/$command` ] || [ "$fpath" = `readlink -f $(pwd)/$command` ] || return 1
92
93 cat > $dir/$command <<EOF
94 #!/bin/bash
95 touch $dir/used-$command
96 exec $CEPH_BIN/$command "\$@"
97 EOF
98 chmod +x $dir/$command
99 }
100
101 function tweak_path() {
102 local dir=$1
103 shift
104 local tweaker=$1
105 shift
106
107 setup $dir
108
109 command_fixture $dir ceph-conf || return 1
110 command_fixture $dir ceph-osd || return 1
111
112 test_activate_dir $dir || return 1
113
114 [ ! -f $dir/used-ceph-conf ] || return 1
115 [ ! -f $dir/used-ceph-osd ] || return 1
116
117 teardown $dir
118
119 setup $dir
120
121 command_fixture $dir ceph-conf || return 1
122 command_fixture $dir ceph-osd || return 1
123
124 $tweaker $dir test_activate_dir || return 1
125
126 [ -f $dir/used-ceph-osd ] || return 1
127
128 teardown $dir
129 }
130
131 function use_prepend_to_path() {
132 local dir=$1
133 shift
134
135 local ceph_disk_args
136 ceph_disk_args+=" --statedir=$dir"
137 ceph_disk_args+=" --sysconfdir=$dir"
138 ceph_disk_args+=" --prepend-to-path=$dir"
139 ceph_disk_args+=" --verbose"
140 CEPH_DISK_ARGS="$ceph_disk_args" \
141 "$@" $dir || return 1
142 }
143
144 function test_prepend_to_path() {
145 local dir=$1
146 shift
147 tweak_path $dir use_prepend_to_path || return 1
148 }
149
150 function use_path() {
151 local dir=$1
152 shift
153 PATH="$dir:$PATH" \
154 "$@" $dir || return 1
155 }
156
157 function test_path() {
158 local dir=$1
159 shift
160 tweak_path $dir use_path || return 1
161 }
162
163 function test_no_path() {
164 local dir=$1
165 shift
166 ( export PATH=../ceph-detect-init/virtualenv/bin:virtualenv/bin:$CEPH_BIN:/usr/bin:/bin:/usr/local/bin ; test_activate_dir $dir) || return 1
167 }
168
169 function test_mark_init() {
170 local dir=$1
171 shift
172
173 run_mon $dir a
174 create_rbd_pool
175
176 local osd_data=$dir/dir
177 $mkdir -p $osd_data
178
179 local osd_uuid=$($uuidgen)
180
181 $mkdir -p $osd_data
182
183 ${CEPH_DISK} $CEPH_DISK_ARGS \
184 prepare --filestore --osd-uuid $osd_uuid $osd_data || return 1
185
186 ${CEPH_DISK} $CEPH_DISK_ARGS \
187 --verbose \
188 activate \
189 --mark-init=auto \
190 --no-start-daemon \
191 $osd_data || return 1
192
193 test -f $osd_data/$(ceph-detect-init) || return 1
194
195 if test systemd = $(ceph-detect-init) ; then
196 expected=sysvinit
197 else
198 expected=systemd
199 fi
200 $timeout ${CEPH_DISK} $CEPH_DISK_ARGS \
201 --verbose \
202 activate \
203 --mark-init=$expected \
204 --no-start-daemon \
205 $osd_data || return 1
206
207 ! test -f $osd_data/$(ceph-detect-init) || return 1
208 test -f $osd_data/$expected || return 1
209 }
210
211 function test_zap() {
212 local dir=$1
213 local osd_data=$dir/dir
214 $mkdir -p $osd_data
215
216 ${CEPH_DISK} $CEPH_DISK_ARGS zap $osd_data 2>&1 | grep -q 'not full block device' || return 1
217
218 $rm -fr $osd_data
219 }
220
221 # ceph-disk prepare returns immediately on success if the magic file
222 # exists in the --osd-data directory.
223 function test_activate_dir_magic() {
224 local dir=$1
225 local uuid=$($uuidgen)
226 local osd_data=$dir/osd
227
228 echo a failure to create the fsid file implies the magic file is not created
229
230 mkdir -p $osd_data/fsid
231 CEPH_ARGS="--fsid $uuid" \
232 ${CEPH_DISK} $CEPH_DISK_ARGS prepare --filestore $osd_data > $dir/out 2>&1
233 grep --quiet 'Is a directory' $dir/out || return 1
234 ! [ -f $osd_data/magic ] || return 1
235 rmdir $osd_data/fsid
236
237 echo successfully prepare the OSD
238
239 CEPH_ARGS="--fsid $uuid" \
240 ${CEPH_DISK} $CEPH_DISK_ARGS prepare --filestore $osd_data 2>&1 | tee $dir/out
241 grep --quiet 'Preparing osd data dir' $dir/out || return 1
242 grep --quiet $uuid $osd_data/ceph_fsid || return 1
243 [ -f $osd_data/magic ] || return 1
244
245 echo will not override an existing OSD
246
247 CEPH_ARGS="--fsid $($uuidgen)" \
248 ${CEPH_DISK} $CEPH_DISK_ARGS prepare --filestore $osd_data 2>&1 | tee $dir/out
249 grep --quiet 'Data dir .* already exists' $dir/out || return 1
250 grep --quiet $uuid $osd_data/ceph_fsid || return 1
251 }
252
253 function read_write() {
254 local dir=$1
255 local file=${2:-$(uuidgen)}
256 local pool=rbd
257
258 echo FOO > $dir/$file
259 $timeout rados --pool $pool put $file $dir/$file || return 1
260 $timeout rados --pool $pool get $file $dir/$file.copy || return 1
261 $diff $dir/$file $dir/$file.copy || return 1
262 }
263
264 function test_pool_read_write() {
265 local dir=$1
266 local pool=rbd
267
268 $timeout ceph osd pool set $pool size 1 || return 1
269 read_write $dir || return 1
270 }
271
272 function test_activate() {
273 local dir=$1
274 shift
275 local osd_data=$1
276 shift
277
278 mkdir -p $osd_data
279
280 ${CEPH_DISK} $CEPH_DISK_ARGS \
281 prepare --filestore "$@" $osd_data || return 1
282
283 $timeout ${CEPH_DISK} $CEPH_DISK_ARGS \
284 activate \
285 --mark-init=none \
286 $osd_data || return 1
287
288 test_pool_read_write $dir || return 1
289 }
290
291 function test_reuse_osd_id() {
292 local dir=$1
293
294 run_mon $dir a || return 1
295 run_mgr $dir x || return 1
296 create_rbd_pool
297
298 test_activate $dir $dir/dir1 --osd-uuid $(uuidgen) || return 1
299
300 #
301 # add a new OSD with a given OSD id (13)
302 #
303 local osd_uuid=$($uuidgen)
304 local osd_id=13
305 test_activate $dir $dir/dir2 --osd-id $osd_id --osd-uuid $osd_uuid || return 1
306 test $osd_id = $(ceph osd new $osd_uuid) || return 1
307
308 #
309 # make sure the OSD is in use by the PGs
310 #
311 wait_osd_id_used_by_pgs $osd_id $PG_NUM || return 1
312 read_write $dir SOMETHING || return 1
313
314 #
315 # set the OSD out and verify it is no longer used by the PGs
316 #
317 ceph osd out osd.$osd_id || return 1
318 wait_osd_id_used_by_pgs $osd_id 0 || return 1
319
320 #
321 # kill the OSD and destroy it (do not purge, retain its place in the crushmap)
322 #
323 kill_daemons $dir TERM osd.$osd_id || return 1
324 ceph osd destroy osd.$osd_id --yes-i-really-mean-it || return 1
325
326 #
327 # add a new OSD with the same id as the destroyed OSD
328 #
329 osd_uuid=$($uuidgen)
330 test_activate $dir $dir/dir3 --osd-id $osd_id --osd-uuid $osd_uuid || return 1
331 test $osd_id = $(ceph osd new $osd_uuid) || return 1
332 }
333
334 function test_activate_dir() {
335 local dir=$1
336 shift
337
338 run_mon $dir a || return 1
339 run_mgr $dir x || return 1
340 create_rbd_pool
341 $@
342
343 test_activate $dir $dir/dir || return 1
344 }
345
346 function test_activate_dir_bluestore() {
347 local dir=$1
348
349 run_mon $dir a || return 1
350 run_mgr $dir x || return 1
351 create_rbd_pool
352
353 local osd_data=$dir/dir
354 $mkdir -p $osd_data
355 local to_prepare=$osd_data
356 local to_activate=$osd_data
357 local osd_uuid=$($uuidgen)
358
359 CEPH_ARGS=" --bluestore-block-size=10737418240 $CEPH_ARGS" \
360 ${CEPH_DISK} $CEPH_DISK_ARGS \
361 prepare --bluestore --block-file --osd-uuid $osd_uuid $to_prepare || return 1
362
363 CEPH_ARGS=" --osd-objectstore=bluestore --bluestore-fsck-on-mount=true --bluestore-block-db-size=67108864 --bluestore-block-wal-size=134217728 --bluestore-block-size=10737418240 $CEPH_ARGS" \
364 $timeout ${CEPH_DISK} $CEPH_DISK_ARGS \
365 activate \
366 --mark-init=none \
367 $to_activate || return 1
368
369 test_pool_read_write $dir || return 1
370 }
371
372 function test_find_cluster_by_uuid() {
373 local dir=$1
374 test_activate_dir $dir 2>&1 | tee $dir/test_find
375 ! grep "No cluster conf found in $dir" $dir/test_find || return 1
376 teardown $dir
377
378 setup $dir
379 test_activate_dir $dir "rm $dir/ceph.conf" > $dir/test_find 2>&1
380 cp $dir/test_find /tmp
381 grep --quiet "No cluster conf found in $dir" $dir/test_find || return 1
382 }
383
384 # http://tracker.ceph.com/issues/9653
385 function test_keyring_path() {
386 local dir=$1
387 test_activate_dir $dir 2>&1 | tee $dir/test_keyring
388 grep --quiet "keyring $dir/bootstrap-osd/ceph.keyring" $dir/test_keyring || return 1
389 }
390
391 function test_crush_device_class() {
392 local dir=$1
393 shift
394
395 run_mon $dir a
396 create_rbd_pool
397
398 local osd_data=$dir/dir
399 $mkdir -p $osd_data
400
401 local osd_uuid=$($uuidgen)
402
403 $mkdir -p $osd_data
404
405 ${CEPH_DISK} $CEPH_DISK_ARGS \
406 prepare --filestore --osd-uuid $osd_uuid \
407 --crush-device-class CRUSH_CLASS \
408 $osd_data || return 1
409 test -f $osd_data/crush_device_class || return 1
410 test $(cat $osd_data/crush_device_class) = CRUSH_CLASS || return 1
411
412 CEPH_ARGS="--crush-location=root=default $CEPH_ARGS" \
413 ${CEPH_DISK} $CEPH_DISK_ARGS \
414 --verbose \
415 activate \
416 --mark-init=none \
417 $osd_data || return 1
418
419 ok=false
420 for delay in 2 4 8 16 32 64 128 256 ; do
421 if ceph osd crush dump | grep --quiet 'CRUSH_CLASS' ; then
422 ok=true
423 break
424 fi
425 sleep $delay
426 ceph osd crush dump # for debugging purposes
427 done
428 $ok || return 1
429 }
430
431 function run() {
432 local dir=$1
433 shift
434 CEPH_DISK_ARGS+=" --statedir=$dir"
435 CEPH_DISK_ARGS+=" --sysconfdir=$dir"
436
437 export CEPH_MON="127.0.0.1:7451" # git grep '\<7451\>' : there must be only one
438 export CEPH_ARGS
439 CEPH_ARGS+=" --fsid=$(uuidgen)"
440 CEPH_ARGS+=" --auth-supported=none"
441 CEPH_ARGS+=" --mon-host=$CEPH_MON"
442 CEPH_ARGS+=" --chdir="
443 CEPH_ARGS+=" --journal-dio=false"
444 CEPH_ARGS+=" --erasure-code-dir=$CEPH_LIB"
445 CEPH_ARGS+=" --plugin-dir=$CEPH_LIB"
446 CEPH_ARGS+=" --log-file=$dir/\$name.log"
447 CEPH_ARGS+=" --pid-file=$dir/\$name.pidfile"
448 CEPH_ARGS+=" --osd-class-dir=$CEPH_LIB"
449 CEPH_ARGS+=" --run-dir=$dir"
450 CEPH_ARGS+=" --osd-failsafe-full-ratio=.99"
451 CEPH_ARGS+=" --osd-journal-size=100"
452 CEPH_ARGS+=" --debug-osd=20"
453 CEPH_ARGS+=" --debug-bdev=20"
454 CEPH_ARGS+=" --debug-bluestore=20"
455 CEPH_ARGS+=" --osd-max-object-name-len=460"
456 CEPH_ARGS+=" --osd-max-object-namespace-len=64 "
457 local default_actions
458 default_actions+="test_path "
459 default_actions+="test_no_path "
460 default_actions+="test_find_cluster_by_uuid "
461 default_actions+="test_prepend_to_path "
462 default_actions+="test_activate_dir_magic "
463 default_actions+="test_activate_dir "
464 default_actions+="test_keyring_path "
465 [ `uname` != FreeBSD ] && \
466 default_actions+="test_mark_init "
467 default_actions+="test_zap "
468 [ `uname` != FreeBSD ] && \
469 default_actions+="test_activate_dir_bluestore "
470 default_actions+="test_crush_device_class "
471 default_actions+="test_reuse_osd_id "
472 local actions=${@:-$default_actions}
473 for action in $actions ; do
474 setup $dir || return 1
475 set -x
476 $action $dir || return 1
477 set +x
478 teardown $dir || return 1
479 done
480 }
481
482 main test-ceph-disk "$@"
483
484 # Local Variables:
485 # compile-command: "cd .. ; test/ceph-disk.sh # test_activate_dir"
486 # End: