]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/rbd/rbd_mirror_helpers.sh
update sources to v12.1.3
[ceph.git] / ceph / qa / workunits / rbd / rbd_mirror_helpers.sh
1 #!/bin/sh
2 #
3 # rbd_mirror_helpers.sh - shared rbd-mirror daemon helper functions
4 #
5 # The scripts starts two ("local" and "remote") clusters using mstart.sh script,
6 # creates a temporary directory, used for cluster configs, daemon logs, admin
7 # socket, temporary files, and launches rbd-mirror daemon.
8 #
9 # There are several env variables useful when troubleshooting a test failure:
10 #
11 # RBD_MIRROR_NOCLEANUP - if not empty, don't run the cleanup (stop processes,
12 # destroy the clusters and remove the temp directory)
13 # on exit, so it is possible to check the test state
14 # after failure.
15 # RBD_MIRROR_TEMDIR - use this path when creating the temporary directory
16 # (should not exist) instead of running mktemp(1).
17 # RBD_MIRROR_ARGS - use this to pass additional arguments to started
18 # rbd-mirror daemons.
19 # RBD_MIRROR_VARGS - use this to pass additional arguments to vstart.sh
20 # when starting clusters.
21 #
22 # The cleanup can be done as a separate step, running the script with
23 # `cleanup ${RBD_MIRROR_TEMDIR}' arguments.
24 #
25 # Note, as other workunits tests, rbd_mirror.sh expects to find ceph binaries
26 # in PATH.
27 #
28 # Thus a typical troubleshooting session:
29 #
30 # From Ceph src dir (CEPH_SRC_PATH), start the test in NOCLEANUP mode and with
31 # TEMPDIR pointing to a known location:
32 #
33 # cd $CEPH_SRC_PATH
34 # PATH=$CEPH_SRC_PATH:$PATH
35 # RBD_MIRROR_NOCLEANUP=1 RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
36 # ../qa/workunits/rbd/rbd_mirror.sh
37 #
38 # After the test failure cd to TEMPDIR and check the current state:
39 #
40 # cd /tmp/tmp.rbd_mirror
41 # ls
42 # less rbd-mirror.cluster1_daemon.$pid.log
43 # ceph --cluster cluster1 -s
44 # ceph --cluster cluster1 -s
45 # rbd --cluster cluster2 -p mirror ls
46 # rbd --cluster cluster2 -p mirror journal status --image test
47 # ceph --admin-daemon rbd-mirror.cluster1_daemon.cluster1.$pid.asok help
48 # ...
49 #
50 # Also you can execute commands (functions) from the script:
51 #
52 # cd $CEPH_SRC_PATH
53 # export RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror
54 # ../qa/workunits/rbd/rbd_mirror.sh status
55 # ../qa/workunits/rbd/rbd_mirror.sh stop_mirror cluster1
56 # ../qa/workunits/rbd/rbd_mirror.sh start_mirror cluster2
57 # ../qa/workunits/rbd/rbd_mirror.sh flush cluster2
58 # ...
59 #
60 # Eventually, run the cleanup:
61 #
62 # cd $CEPH_SRC_PATH
63 # RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
64 # ../qa/workunits/rbd/rbd_mirror.sh cleanup
65 #
66
67 CLUSTER1=cluster1
68 CLUSTER2=cluster2
69 POOL=mirror
70 PARENT_POOL=mirror_parent
71 TEMPDIR=
72 USER_ID=mirror
73 export CEPH_ARGS="--id ${USER_ID}"
74
75 CEPH_ROOT=$(readlink -f $(dirname $0)/../../../src)
76 CEPH_BIN=.
77 CEPH_SRC=.
78 if [ -e CMakeCache.txt ]; then
79 CEPH_SRC=${CEPH_ROOT}
80 CEPH_ROOT=${PWD}
81 CEPH_BIN=./bin
82
83 # needed for ceph CLI under cmake
84 export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
85 export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind
86 for x in ${CEPH_ROOT}/lib/cython_modules/lib* ; do
87 export PYTHONPATH="${PYTHONPATH}:${x}"
88 done
89 fi
90
91 # These vars facilitate running this script in an environment with
92 # ceph installed from packages, like teuthology. These are not defined
93 # by default.
94 #
95 # RBD_MIRROR_USE_EXISTING_CLUSTER - if set, do not start and stop ceph clusters
96 # RBD_MIRROR_USE_RBD_MIRROR - if set, use an existing instance of rbd-mirror
97 # running as ceph client $CEPH_ID. If empty,
98 # this script will start and stop rbd-mirror
99
100 #
101 # Functions
102 #
103
104 # Parse a value in format cluster[:instance] and set cluster and instance vars.
105 set_cluster_instance()
106 {
107 local val=$1
108 local cluster_var_name=$2
109 local instance_var_name=$3
110
111 cluster=${val%:*}
112 instance=${val##*:}
113
114 if [ "${instance}" = "${val}" ]; then
115 # instance was not specified, use default
116 instance=0
117 fi
118
119 eval ${cluster_var_name}=${cluster}
120 eval ${instance_var_name}=${instance}
121 }
122
123 daemon_asok_file()
124 {
125 local local_cluster=$1
126 local cluster=$2
127 local instance
128
129 set_cluster_instance "${local_cluster}" local_cluster instance
130
131 if [ -n "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
132 echo $(ceph-conf --cluster $local_cluster --name "client.${CEPH_ID}" 'admin socket')
133 else
134 echo "${TEMPDIR}/rbd-mirror.${local_cluster}_daemon.${instance}.${cluster}.asok"
135 fi
136 }
137
138 daemon_pid_file()
139 {
140 local cluster=$1
141 local instance
142
143 set_cluster_instance "${cluster}" cluster instance
144
145 if [ -n "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
146 echo $(ceph-conf --cluster $cluster --name "client.${CEPH_ID}" 'pid file')
147 else
148 echo "${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.pid"
149 fi
150 }
151
152 testlog()
153 {
154 echo $(date '+%F %T') $@ | tee -a "${TEMPDIR}/rbd-mirror.test.log" >&2
155 }
156
157 expect_failure()
158 {
159 local expected="$1" ; shift
160 local out=${TEMPDIR}/expect_failure.out
161
162 if "$@" > ${out} 2>&1 ; then
163 cat ${out} >&2
164 return 1
165 fi
166
167 if [ -z "${expected}" ]; then
168 return 0
169 fi
170
171 if ! grep -q "${expected}" ${out} ; then
172 cat ${out} >&2
173 return 1
174 fi
175
176 return 0
177 }
178
179 setup()
180 {
181 local c
182 trap cleanup INT TERM EXIT
183
184 if [ -n "${RBD_MIRROR_TEMDIR}" ]; then
185 test -d "${RBD_MIRROR_TEMDIR}" ||
186 mkdir "${RBD_MIRROR_TEMDIR}"
187 TEMPDIR="${RBD_MIRROR_TEMDIR}"
188 cd ${TEMPDIR}
189 else
190 TEMPDIR=`mktemp -d`
191 fi
192
193 if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
194 cd ${CEPH_ROOT}
195 CEPH_ARGS='' ${CEPH_SRC}/mstart.sh ${CLUSTER1} -n ${RBD_MIRROR_VARGS}
196 CEPH_ARGS='' ${CEPH_SRC}/mstart.sh ${CLUSTER2} -n ${RBD_MIRROR_VARGS}
197
198 CEPH_ARGS='' ceph --conf run/${CLUSTER1}/ceph.conf \
199 auth get-or-create client.${USER_ID} mon 'profile rbd' osd 'profile rbd' >> \
200 run/${CLUSTER1}/keyring
201 CEPH_ARGS='' ceph --conf run/${CLUSTER2}/ceph.conf \
202 auth get-or-create client.${USER_ID} mon 'profile rbd' osd 'profile rbd' >> \
203 run/${CLUSTER2}/keyring
204
205 rm -f ${TEMPDIR}/${CLUSTER1}.conf
206 ln -s $(readlink -f run/${CLUSTER1}/ceph.conf) \
207 ${TEMPDIR}/${CLUSTER1}.conf
208 rm -f ${TEMPDIR}/${CLUSTER2}.conf
209 ln -s $(readlink -f run/${CLUSTER2}/ceph.conf) \
210 ${TEMPDIR}/${CLUSTER2}.conf
211
212 cd ${TEMPDIR}
213 fi
214
215 CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool create ${POOL} 64 64
216 CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool create ${PARENT_POOL} 64 64
217 CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool create ${PARENT_POOL} 64 64
218 CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool create ${POOL} 64 64
219
220 CEPH_ARGS='' rbd --cluster ${CLUSTER1} pool init ${POOL}
221 CEPH_ARGS='' rbd --cluster ${CLUSTER2} pool init ${POOL}
222 CEPH_ARGS='' rbd --cluster ${CLUSTER1} pool init ${PARENT_POOL}
223 CEPH_ARGS='' rbd --cluster ${CLUSTER2} pool init ${PARENT_POOL}
224
225 rbd --cluster ${CLUSTER1} mirror pool enable ${POOL} pool
226 rbd --cluster ${CLUSTER2} mirror pool enable ${POOL} pool
227 rbd --cluster ${CLUSTER1} mirror pool enable ${PARENT_POOL} image
228 rbd --cluster ${CLUSTER2} mirror pool enable ${PARENT_POOL} image
229
230 rbd --cluster ${CLUSTER1} mirror pool peer add ${POOL} ${CLUSTER2}
231 rbd --cluster ${CLUSTER2} mirror pool peer add ${POOL} ${CLUSTER1}
232 rbd --cluster ${CLUSTER1} mirror pool peer add ${PARENT_POOL} ${CLUSTER2}
233 rbd --cluster ${CLUSTER2} mirror pool peer add ${PARENT_POOL} ${CLUSTER1}
234 }
235
236 cleanup()
237 {
238 test -n "${RBD_MIRROR_NOCLEANUP}" && return
239 local cluster instance
240
241 set +e
242
243 for cluster in "${CLUSTER1}" "${CLUSTER2}"; do
244 for instance in `seq 0 9`; do
245 stop_mirror "${cluster}:${instance}"
246 done
247 done
248
249 if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
250 cd ${CEPH_ROOT}
251 CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER1}
252 CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER2}
253 else
254 CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
255 CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
256 CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
257 CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
258 fi
259 test "${RBD_MIRROR_TEMDIR}" = "${TEMPDIR}" ||
260 rm -Rf ${TEMPDIR}
261 }
262
263 start_mirror()
264 {
265 local cluster=$1
266 local instance
267
268 set_cluster_instance "${cluster}" cluster instance
269
270 test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
271
272 rbd-mirror \
273 --cluster ${cluster} \
274 --id mirror \
275 --pid-file=$(daemon_pid_file "${cluster}:${instance}") \
276 --log-file=${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.log \
277 --admin-socket=${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.\$cluster.asok \
278 --rbd-mirror-delete-retry-interval=5 \
279 --rbd-mirror-image-state-check-interval=5 \
280 --rbd-mirror-journal-poll-age=1 \
281 --rbd-mirror-pool-replayers-refresh-interval=5 \
282 --debug-rbd=30 --debug-journaler=30 \
283 --debug-rbd_mirror=30 \
284 --daemonize=true \
285 ${RBD_MIRROR_ARGS}
286 }
287
288 stop_mirror()
289 {
290 local cluster=$1
291 local sig=$2
292
293 test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
294
295 local pid
296 pid=$(cat $(daemon_pid_file "${cluster}") 2>/dev/null) || :
297 if [ -n "${pid}" ]
298 then
299 kill ${sig} ${pid}
300 for s in 1 2 4 8 16 32; do
301 sleep $s
302 ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}' && break
303 done
304 ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}'
305 fi
306 rm -f $(daemon_asok_file "${cluster}" "${CLUSTER1}")
307 rm -f $(daemon_asok_file "${cluster}" "${CLUSTER2}")
308 rm -f $(daemon_pid_file "${cluster}")
309 }
310
311 admin_daemon()
312 {
313 local cluster=$1 ; shift
314 local instance
315
316 set_cluster_instance "${cluster}" cluster instance
317
318 local asok_file=$(daemon_asok_file "${cluster}:${instance}" "${cluster}")
319 test -S "${asok_file}"
320
321 ceph --admin-daemon ${asok_file} $@
322 }
323
324 status()
325 {
326 local cluster daemon image_pool image
327
328 for cluster in ${CLUSTER1} ${CLUSTER2}
329 do
330 echo "${cluster} status"
331 ceph --cluster ${cluster} -s
332 echo
333
334 for image_pool in ${POOL} ${PARENT_POOL}
335 do
336 echo "${cluster} ${image_pool} images"
337 rbd --cluster ${cluster} -p ${image_pool} ls
338 echo
339
340 echo "${cluster} ${image_pool} mirror pool status"
341 rbd --cluster ${cluster} -p ${image_pool} mirror pool status --verbose
342 echo
343
344 for image in `rbd --cluster ${cluster} -p ${image_pool} ls 2>/dev/null`
345 do
346 echo "image ${image} info"
347 rbd --cluster ${cluster} -p ${image_pool} info ${image}
348 echo
349 echo "image ${image} journal status"
350 rbd --cluster ${cluster} -p ${image_pool} journal status --image ${image}
351 echo
352 done
353 done
354 done
355
356 local ret
357
358 for cluster in "${CLUSTER1}" "${CLUSTER2}"
359 do
360 local pid_file=$(daemon_pid_file ${cluster} )
361 if [ ! -e ${pid_file} ]
362 then
363 echo "${cluster} rbd-mirror not running or unknown" \
364 "(${pid_file} not exist)"
365 continue
366 fi
367
368 local pid
369 pid=$(cat ${pid_file} 2>/dev/null) || :
370 if [ -z "${pid}" ]
371 then
372 echo "${cluster} rbd-mirror not running or unknown" \
373 "(can't find pid using ${pid_file})"
374 ret=1
375 continue
376 fi
377
378 echo "${daemon} rbd-mirror process in ps output:"
379 if ps auxww |
380 awk -v pid=${pid} 'NR == 1 {print} $2 == pid {print; exit 1}'
381 then
382 echo
383 echo "${cluster} rbd-mirror not running" \
384 "(can't find pid $pid in ps output)"
385 ret=1
386 continue
387 fi
388 echo
389
390 local asok_file=$(daemon_asok_file ${cluster} ${cluster})
391 if [ ! -S "${asok_file}" ]
392 then
393 echo "${cluster} rbd-mirror asok is unknown (${asok_file} not exits)"
394 ret=1
395 continue
396 fi
397
398 echo "${cluster} rbd-mirror status"
399 ceph --admin-daemon ${asok_file} rbd mirror status
400 echo
401 done
402
403 return ${ret}
404 }
405
406 flush()
407 {
408 local cluster=$1
409 local pool=$2
410 local image=$3
411 local cmd="rbd mirror flush"
412
413 if [ -n "${image}" ]
414 then
415 cmd="${cmd} ${pool}/${image}"
416 fi
417
418 admin_daemon "${cluster}" ${cmd}
419 }
420
421 test_image_replay_state()
422 {
423 local cluster=$1
424 local pool=$2
425 local image=$3
426 local test_state=$4
427 local current_state=stopped
428
429 admin_daemon "${cluster}" help |
430 fgrep "\"rbd mirror status ${pool}/${image}\"" &&
431 admin_daemon "${cluster}" rbd mirror status ${pool}/${image} |
432 grep -i 'state.*Replaying' &&
433 current_state=started
434
435 test "${test_state}" = "${current_state}"
436 }
437
438 wait_for_image_replay_state()
439 {
440 local cluster=$1
441 local pool=$2
442 local image=$3
443 local state=$4
444 local s
445
446 # TODO: add a way to force rbd-mirror to update replayers
447 for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
448 sleep ${s}
449 test_image_replay_state "${cluster}" "${pool}" "${image}" "${state}" && return 0
450 done
451 return 1
452 }
453
454 wait_for_image_replay_started()
455 {
456 local cluster=$1
457 local pool=$2
458 local image=$3
459
460 wait_for_image_replay_state "${cluster}" "${pool}" "${image}" started
461 }
462
463 wait_for_image_replay_stopped()
464 {
465 local cluster=$1
466 local pool=$2
467 local image=$3
468
469 wait_for_image_replay_state "${cluster}" "${pool}" "${image}" stopped
470 }
471
472 get_position()
473 {
474 local cluster=$1
475 local pool=$2
476 local image=$3
477 local id_regexp=$4
478
479 # Parse line like below, looking for the first position
480 # [id=, commit_position=[positions=[[object_number=1, tag_tid=3, entry_tid=9], [object_number=0, tag_tid=3, entry_tid=8], [object_number=3, tag_tid=3, entry_tid=7], [object_number=2, tag_tid=3, entry_tid=6]]]]
481
482 local status_log=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.status
483 rbd --cluster ${cluster} -p ${pool} journal status --image ${image} |
484 tee ${status_log} >&2
485 sed -nEe 's/^.*\[id='"${id_regexp}"',.*positions=\[\[([^]]*)\],.*state=connected.*$/\1/p' \
486 ${status_log}
487 }
488
489 get_master_position()
490 {
491 local cluster=$1
492 local pool=$2
493 local image=$3
494
495 get_position "${cluster}" "${pool}" "${image}" ''
496 }
497
498 get_mirror_position()
499 {
500 local cluster=$1
501 local pool=$2
502 local image=$3
503
504 get_position "${cluster}" "${pool}" "${image}" '..*'
505 }
506
507 wait_for_replay_complete()
508 {
509 local local_cluster=$1
510 local cluster=$2
511 local pool=$3
512 local image=$4
513 local s master_pos mirror_pos last_mirror_pos
514 local master_tag master_entry mirror_tag mirror_entry
515
516 while true; do
517 for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
518 sleep ${s}
519 flush "${local_cluster}" "${pool}" "${image}"
520 master_pos=$(get_master_position "${cluster}" "${pool}" "${image}")
521 mirror_pos=$(get_mirror_position "${cluster}" "${pool}" "${image}")
522 test -n "${master_pos}" -a "${master_pos}" = "${mirror_pos}" && return 0
523 test "${mirror_pos}" != "${last_mirror_pos}" && break
524 done
525
526 test "${mirror_pos}" = "${last_mirror_pos}" && return 1
527 last_mirror_pos="${mirror_pos}"
528
529 # handle the case where the mirror is ahead of the master
530 master_tag=$(echo "${master_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
531 mirror_tag=$(echo "${mirror_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
532 master_entry=$(echo "${master_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
533 mirror_entry=$(echo "${mirror_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
534 test "${master_tag}" = "${mirror_tag}" -a ${master_entry} -le ${mirror_entry} && return 0
535 done
536 return 1
537 }
538
539 test_status_in_pool_dir()
540 {
541 local cluster=$1
542 local pool=$2
543 local image=$3
544 local state_pattern=$4
545 local description_pattern=$5
546
547 local status_log=${TEMPDIR}/${cluster}-${image}.mirror_status
548 rbd --cluster ${cluster} -p ${pool} mirror image status ${image} |
549 tee ${status_log} >&2
550 grep "state: .*${state_pattern}" ${status_log} || return 1
551 grep "description: .*${description_pattern}" ${status_log} || return 1
552 }
553
554 wait_for_status_in_pool_dir()
555 {
556 local cluster=$1
557 local pool=$2
558 local image=$3
559 local state_pattern=$4
560 local description_pattern=$5
561
562 for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
563 sleep ${s}
564 test_status_in_pool_dir ${cluster} ${pool} ${image} ${state_pattern} ${description_pattern} && return 0
565 done
566 return 1
567 }
568
569 create_image()
570 {
571 local cluster=$1 ; shift
572 local pool=$1 ; shift
573 local image=$1 ; shift
574 local size=128
575
576 if [ -n "$1" ]; then
577 size=$1
578 shift
579 fi
580
581 rbd --cluster ${cluster} -p ${pool} create --size ${size} \
582 --image-feature layering,exclusive-lock,journaling $@ ${image}
583 }
584
585 set_image_meta()
586 {
587 local cluster=$1
588 local pool=$2
589 local image=$3
590 local key=$4
591 local val=$5
592
593 rbd --cluster ${cluster} -p ${pool} image-meta set ${image} $key $val
594 }
595
596 remove_image()
597 {
598 local cluster=$1
599 local pool=$2
600 local image=$3
601
602 rbd --cluster=${cluster} -p ${pool} snap purge ${image}
603 rbd --cluster=${cluster} -p ${pool} rm ${image}
604 }
605
606 remove_image_retry()
607 {
608 local cluster=$1
609 local pool=$2
610 local image=$3
611
612 for s in 1 2 4 8 16 32; do
613 remove_image ${cluster} ${pool} ${image} && return 0
614 sleep ${s}
615 done
616 return 1
617 }
618
619 clone_image()
620 {
621 local cluster=$1
622 local parent_pool=$2
623 local parent_image=$3
624 local parent_snap=$4
625 local clone_pool=$5
626 local clone_image=$6
627
628 rbd --cluster ${cluster} clone ${parent_pool}/${parent_image}@${parent_snap} \
629 ${clone_pool}/${clone_image} --image-feature layering,exclusive-lock,journaling
630 }
631
632 disconnect_image()
633 {
634 local cluster=$1
635 local pool=$2
636 local image=$3
637
638 rbd --cluster ${cluster} -p ${pool} journal client disconnect \
639 --image ${image}
640 }
641
642 create_snapshot()
643 {
644 local cluster=$1
645 local pool=$2
646 local image=$3
647 local snap=$4
648
649 rbd --cluster ${cluster} -p ${pool} snap create ${image}@${snap}
650 }
651
652 remove_snapshot()
653 {
654 local cluster=$1
655 local pool=$2
656 local image=$3
657 local snap=$4
658
659 rbd --cluster ${cluster} -p ${pool} snap rm ${image}@${snap}
660 }
661
662 rename_snapshot()
663 {
664 local cluster=$1
665 local pool=$2
666 local image=$3
667 local snap=$4
668 local new_snap=$5
669
670 rbd --cluster ${cluster} -p ${pool} snap rename ${image}@${snap} ${image}@${new_snap}
671 }
672
673 purge_snapshots()
674 {
675 local cluster=$1
676 local pool=$2
677 local image=$3
678
679 rbd --cluster ${cluster} -p ${pool} snap purge ${image}
680 }
681
682 protect_snapshot()
683 {
684 local cluster=$1
685 local pool=$2
686 local image=$3
687 local snap=$4
688
689 rbd --cluster ${cluster} -p ${pool} snap protect ${image}@${snap}
690 }
691
692 unprotect_snapshot()
693 {
694 local cluster=$1
695 local pool=$2
696 local image=$3
697 local snap=$4
698
699 rbd --cluster ${cluster} -p ${pool} snap unprotect ${image}@${snap}
700 }
701
702 wait_for_snap_present()
703 {
704 local cluster=$1
705 local pool=$2
706 local image=$3
707 local snap_name=$4
708 local s
709
710 for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
711 sleep ${s}
712 rbd --cluster ${cluster} -p ${pool} info ${image}@${snap_name} || continue
713 return 0
714 done
715 return 1
716 }
717
718 write_image()
719 {
720 local cluster=$1
721 local pool=$2
722 local image=$3
723 local count=$4
724 local size=$5
725
726 test -n "${size}" || size=4096
727
728 rbd --cluster ${cluster} -p ${pool} bench ${image} --io-type write \
729 --io-size ${size} --io-threads 1 --io-total $((size * count)) \
730 --io-pattern rand
731 }
732
733 stress_write_image()
734 {
735 local cluster=$1
736 local pool=$2
737 local image=$3
738 local duration=$(awk 'BEGIN {srand(); print int(10 * rand()) + 5}')
739
740 timeout ${duration}s ceph_test_rbd_mirror_random_write \
741 --cluster ${cluster} ${pool} ${image} \
742 --debug-rbd=20 --debug-journaler=20 \
743 2> ${TEMPDIR}/rbd-mirror-random-write.log || true
744 }
745
746 compare_images()
747 {
748 local pool=$1
749 local image=$2
750
751 local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
752 local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
753
754 rm -f ${rmt_export} ${loc_export}
755 rbd --cluster ${CLUSTER2} -p ${pool} export ${image} ${rmt_export}
756 rbd --cluster ${CLUSTER1} -p ${pool} export ${image} ${loc_export}
757 cmp ${rmt_export} ${loc_export}
758 rm -f ${rmt_export} ${loc_export}
759 }
760
761 demote_image()
762 {
763 local cluster=$1
764 local pool=$2
765 local image=$3
766
767 rbd --cluster=${cluster} mirror image demote ${pool}/${image}
768 }
769
770 promote_image()
771 {
772 local cluster=$1
773 local pool=$2
774 local image=$3
775 local force=$4
776
777 rbd --cluster=${cluster} mirror image promote ${pool}/${image} ${force}
778 }
779
780 set_pool_mirror_mode()
781 {
782 local cluster=$1
783 local pool=$2
784 local mode=$3
785
786 rbd --cluster=${cluster} -p ${pool} mirror pool enable ${mode}
787 }
788
789 disable_mirror()
790 {
791 local cluster=$1
792 local pool=$2
793 local image=$3
794
795 rbd --cluster=${cluster} mirror image disable ${pool}/${image}
796 }
797
798 enable_mirror()
799 {
800 local cluster=$1
801 local pool=$2
802 local image=$3
803
804 rbd --cluster=${cluster} mirror image enable ${pool}/${image}
805 }
806
807 test_image_present()
808 {
809 local cluster=$1
810 local pool=$2
811 local image=$3
812 local test_state=$4
813 local image_id=$5
814 local current_state=deleted
815 local current_image_id
816
817 current_image_id=$(get_image_id ${cluster} ${pool} ${image})
818 test -n "${current_image_id}" &&
819 test -z "${image_id}" -o "${image_id}" = "${current_image_id}" &&
820 current_state=present
821
822 test "${test_state}" = "${current_state}"
823 }
824
825 wait_for_image_present()
826 {
827 local cluster=$1
828 local pool=$2
829 local image=$3
830 local state=$4
831 local image_id=$5
832 local s
833
834 test -n "${image_id}" ||
835 image_id=$(get_image_id ${cluster} ${pool} ${image})
836
837 # TODO: add a way to force rbd-mirror to update replayers
838 for s in 0.1 1 2 4 8 8 8 8 8 8 8 8 16 16 32 32; do
839 sleep ${s}
840 test_image_present \
841 "${cluster}" "${pool}" "${image}" "${state}" "${image_id}" &&
842 return 0
843 done
844 return 1
845 }
846
847 get_image_id()
848 {
849 local cluster=$1
850 local pool=$2
851 local image=$3
852
853 rbd --cluster=${cluster} -p ${pool} info ${image} |
854 sed -ne 's/^.*block_name_prefix: rbd_data\.//p'
855 }
856
857 request_resync_image()
858 {
859 local cluster=$1
860 local pool=$2
861 local image=$3
862 local image_id_var_name=$1
863
864 eval "${image_id_var_name}='$(get_image_id ${cluster} ${pool} ${image})'"
865 eval 'test -n "$'${image_id_var_name}'"'
866
867 rbd --cluster=${cluster} -p ${pool} mirror image resync ${image}
868 }
869
870 #
871 # Main
872 #
873
874 if [ "$#" -gt 0 ]
875 then
876 if [ -z "${RBD_MIRROR_TEMDIR}" ]
877 then
878 echo "RBD_MIRROR_TEMDIR is not set" >&2
879 exit 1
880 fi
881
882 TEMPDIR="${RBD_MIRROR_TEMDIR}"
883 cd ${TEMPDIR}
884 $@
885 exit $?
886 fi
887
888 set -xe
889
890 setup