3 # Copyright (C) 2015 Red Hat <contact@redhat.com>
6 # Author: Kefu Chai <kchai@redhat.com>
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library Public License as published by
10 # the Free Software Foundation; either version 2, or (at your option)
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Library Public License for more details.
19 source $CEPH_ROOT/qa
/standalone
/ceph-helpers.sh
25 export CEPH_MON
="127.0.0.1:7112" # git grep '\<7112\>' : there must be only one
27 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS
+="--mon-host=$CEPH_MON "
29 CEPH_ARGS
+="--osd-mclock-profile=high_recovery_ops "
31 local funcs
=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
32 for func
in $funcs ; do
33 setup
$dir ||
return 1
34 run_mon
$dir a ||
return 1
35 run_mgr
$dir x ||
return 1
36 create_pool rbd
4 ||
return 1
38 # check that erasure code plugins are preloaded
39 CEPH_ARGS
='' ceph
--admin-daemon $
(get_asok_path mon.a
) log flush ||
return 1
40 grep 'load: jerasure.*lrc' $dir/mon.a.log ||
return 1
41 $func $dir ||
return 1
42 teardown
$dir ||
return 1
46 function setup_osds
() {
50 for id
in $
(seq 0 $
(expr $count - 1)) ; do
51 run_osd
$dir $id ||
return 1
54 # check that erasure code plugins are preloaded
55 CEPH_ARGS
='' ceph
--admin-daemon $
(get_asok_path osd
.0) log flush ||
return 1
56 grep 'load: jerasure.*lrc' $dir/osd
.0.log ||
return 1
59 function get_state
() {
62 ceph
--format json pg dump pgs
2>/dev
/null | \
63 jq
-r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
66 function create_erasure_coded_pool
() {
74 ceph osd erasure-code-profile
set myprofile \
77 crush-failure-domain
=osd ||
return 1
78 create_pool
$poolname 1 1 erasure myprofile \
80 wait_for_clean ||
return 1
83 function delete_erasure_coded_pool
() {
85 ceph osd pool delete
$poolname $poolname --yes-i-really-really-mean-it
86 ceph osd erasure-code-profile
rm myprofile
89 function rados_put
() {
92 local objname
=${3:-SOMETHING}
94 for marker
in AAA BBB CCCC DDDD
; do
95 printf "%*s" 1024 $marker
98 # get and put an object, compare they are equal
100 rados
--pool $poolname put
$objname $dir/ORIGINAL ||
return 1
103 function rados_get
() {
106 local objname
=${3:-SOMETHING}
107 local expect
=${4:-ok}
110 # Expect a failure to get object
112 if [ $expect = "fail" ];
114 ! rados
--pool $poolname get
$objname $dir/COPY
118 # get an object, compare with $dir/ORIGINAL
120 rados
--pool $poolname get
$objname $dir/COPY ||
return 1
121 diff $dir/ORIGINAL
$dir/COPY ||
return 1
126 function inject_remove
() {
140 local -a initial_osds
=($
(get_osds
$poolname $objname))
141 local osd_id
=${initial_osds[$shard_id]}
142 objectstore_tool
$dir $osd_id $objname remove ||
return 1
145 # Test with an inject error
146 function rados_put_get_data
() {
155 # inject eio to speificied shard
157 local poolname
=pool-jerasure
158 local objname
=obj-
$inject-$$
-$shard_id
159 rados_put
$dir $poolname $objname ||
return 1
160 inject_
$inject ec data
$poolname $objname $dir $shard_id ||
return 1
161 rados_get
$dir $poolname $objname ||
return 1
163 if [ "$arg" = "recovery" ];
166 # take out the last OSD used to store the object,
167 # bring it back, and check for clean PGs which means
168 # recovery didn't crash the primary.
170 local -a initial_osds
=($
(get_osds
$poolname $objname))
171 local last_osd
=${initial_osds[-1]}
173 kill_daemons
$dir TERM osd.
${last_osd} >&2 < /dev
/null ||
return 1
174 ceph osd out
${last_osd} ||
return 1
175 ! get_osds
$poolname $objname |
grep '\<'${last_osd}'\>' ||
return 1
176 ceph osd
in ${last_osd} ||
return 1
177 activate_osd
$dir ${last_osd} ||
return 1
178 wait_for_clean ||
return 1
179 # Won't check for eio on get here -- recovery above might have fixed it
181 shard_id
=$
(expr $shard_id + 1)
182 inject_
$inject ec data
$poolname $objname $dir $shard_id ||
return 1
183 rados_get
$dir $poolname $objname fail ||
return 1
189 # Change the size of speificied shard
191 function set_size
() {
202 local poolname
=pool-jerasure
203 local -a initial_osds
=($
(get_osds
$poolname $objname))
204 local osd_id
=${initial_osds[$shard_id]}
206 if [ "$mode" = "add" ];
208 objectstore_tool
$dir $osd_id $objname get-bytes
$dir/CORRUPT ||
return 1
209 dd if=/dev
/urandom bs
=$bytes count
=1 >> $dir/CORRUPT
210 elif [ "$bytes" = "0" ];
214 dd if=/dev
/urandom bs
=$bytes count
=1 of
=$dir/CORRUPT
216 objectstore_tool
$dir $osd_id $objname set-bytes
$dir/CORRUPT ||
return 1
221 function rados_get_data_bad_size
() {
230 local poolname
=pool-jerasure
231 local objname
=obj-size-$$
-$shard_id-$bytes
232 rados_put
$dir $poolname $objname ||
return 1
234 # Change the size of speificied shard
236 set_size
$objname $dir $shard_id $bytes $mode ||
return 1
238 rados_get
$dir $poolname $objname ||
return 1
240 # Leave objname and modify another shard
241 shard_id
=$
(expr $shard_id + 1)
242 set_size
$objname $dir $shard_id $bytes $mode ||
return 1
243 rados_get
$dir $poolname $objname fail ||
return 1
248 # These two test cases try to validate the following behavior:
249 # For object on EC pool, if there is one shard having read error (
250 # either primary or replica), client can still read object.
252 # If 2 shards have read errors the client will get an error.
254 function TEST_rados_get_subread_eio_shard_0
() {
256 setup_osds
4 ||
return 1
258 local poolname
=pool-jerasure
259 create_erasure_coded_pool
$poolname 2 1 ||
return 1
260 # inject eio on primary OSD (0) and replica OSD (1)
262 rados_put_get_data eio
$dir $shard_id ||
return 1
263 delete_erasure_coded_pool
$poolname
266 function TEST_rados_get_subread_eio_shard_1
() {
268 setup_osds
4 ||
return 1
270 local poolname
=pool-jerasure
271 create_erasure_coded_pool
$poolname 2 1 ||
return 1
272 # inject eio into replicas OSD (1) and OSD (2)
274 rados_put_get_data eio
$dir $shard_id ||
return 1
275 delete_erasure_coded_pool
$poolname
278 # We don't remove the object from the primary because
279 # that just causes it to appear to be missing
281 function TEST_rados_get_subread_missing
() {
283 setup_osds
4 ||
return 1
285 local poolname
=pool-jerasure
286 create_erasure_coded_pool
$poolname 2 1 ||
return 1
287 # inject remove into replicas OSD (1) and OSD (2)
289 rados_put_get_data remove
$dir $shard_id ||
return 1
290 delete_erasure_coded_pool
$poolname
295 # These two test cases try to validate that following behavior:
296 # For object on EC pool, if there is one shard which an incorrect
297 # size this will cause an internal read error, client can still read object.
299 # If 2 shards have incorrect size the client will get an error.
301 function TEST_rados_get_bad_size_shard_0
() {
303 setup_osds
4 ||
return 1
305 local poolname
=pool-jerasure
306 create_erasure_coded_pool
$poolname 2 1 ||
return 1
307 # Set incorrect size into primary OSD (0) and replica OSD (1)
309 rados_get_data_bad_size
$dir $shard_id 10 ||
return 1
310 rados_get_data_bad_size
$dir $shard_id 0 ||
return 1
311 rados_get_data_bad_size
$dir $shard_id 256 add ||
return 1
312 delete_erasure_coded_pool
$poolname
315 function TEST_rados_get_bad_size_shard_1
() {
317 setup_osds
4 ||
return 1
319 local poolname
=pool-jerasure
320 create_erasure_coded_pool
$poolname 2 1 ||
return 1
321 # Set incorrect size into replicas OSD (1) and OSD (2)
323 rados_get_data_bad_size
$dir $shard_id 10 ||
return 1
324 rados_get_data_bad_size
$dir $shard_id 0 ||
return 1
325 rados_get_data_bad_size
$dir $shard_id 256 add ||
return 1
326 delete_erasure_coded_pool
$poolname
329 function TEST_rados_get_with_subreadall_eio_shard_0
() {
333 setup_osds
4 ||
return 1
335 local poolname
=pool-jerasure
336 create_erasure_coded_pool
$poolname 2 1 ||
return 1
337 # inject eio on primary OSD (0)
338 rados_put_get_data eio
$dir $shard_id recovery ||
return 1
340 delete_erasure_coded_pool
$poolname
343 function TEST_rados_get_with_subreadall_eio_shard_1
() {
347 setup_osds
4 ||
return 1
349 local poolname
=pool-jerasure
350 create_erasure_coded_pool
$poolname 2 1 ||
return 1
351 # inject eio on replica OSD (1)
352 rados_put_get_data eio
$dir $shard_id recovery ||
return 1
354 delete_erasure_coded_pool
$poolname
357 # Test recovery the object attr read error
358 function TEST_ec_object_attr_read_error
() {
360 local objname
=myobject
362 setup_osds
7 ||
return 1
364 local poolname
=pool-jerasure
365 create_erasure_coded_pool
$poolname 3 2 ||
return 1
367 local primary_osd
=$
(get_primary
$poolname $objname)
369 kill_daemons
$dir TERM osd.
${primary_osd} >&2 < /dev
/null ||
return 1
372 rados_put
$dir $poolname $objname ||
return 1
374 # Inject eio, shard 1 is the one read attr
375 inject_eio ec mdata
$poolname $objname $dir 1 ||
return 1
378 activate_osd
$dir ${primary_osd} ||
return 1
380 # Cluster should recover this object
381 wait_for_clean ||
return 1
383 rados_get
$dir $poolname myobject ||
return 1
385 delete_erasure_coded_pool
$poolname
388 # Test recovery the first k copies aren't all available
389 function TEST_ec_single_recovery_error
() {
391 local objname
=myobject
393 setup_osds
7 ||
return 1
395 local poolname
=pool-jerasure
396 create_erasure_coded_pool
$poolname 3 2 ||
return 1
398 rados_put
$dir $poolname $objname ||
return 1
399 inject_eio ec data
$poolname $objname $dir 0 ||
return 1
401 local -a initial_osds
=($
(get_osds
$poolname $objname))
402 local last_osd
=${initial_osds[-1]}
404 kill_daemons
$dir TERM osd.
${last_osd} >&2 < /dev
/null ||
return 1
405 ceph osd down
${last_osd} ||
return 1
406 ceph osd out
${last_osd} ||
return 1
408 # Cluster should recover this object
409 wait_for_clean ||
return 1
411 rados_get
$dir $poolname myobject ||
return 1
413 delete_erasure_coded_pool
$poolname
416 # Test recovery when repeated reads are needed due to EIO
417 function TEST_ec_recovery_multiple_errors
() {
419 local objname
=myobject
421 setup_osds
9 ||
return 1
423 local poolname
=pool-jerasure
424 create_erasure_coded_pool
$poolname 4 4 ||
return 1
426 rados_put
$dir $poolname $objname ||
return 1
427 inject_eio ec data
$poolname $objname $dir 0 ||
return 1
428 # first read will try shards 0,1,2 when 0 gets EIO, shard 3 gets
429 # tried as well. Make that fail to test multiple-EIO handling.
430 inject_eio ec data
$poolname $objname $dir 3 ||
return 1
431 inject_eio ec data
$poolname $objname $dir 4 ||
return 1
433 local -a initial_osds
=($
(get_osds
$poolname $objname))
434 local last_osd
=${initial_osds[-1]}
436 kill_daemons
$dir TERM osd.
${last_osd} >&2 < /dev
/null ||
return 1
437 ceph osd down
${last_osd} ||
return 1
438 ceph osd out
${last_osd} ||
return 1
440 # Cluster should recover this object
441 wait_for_clean ||
return 1
443 rados_get
$dir $poolname myobject ||
return 1
445 delete_erasure_coded_pool
$poolname
448 # Test recovery when there's only one shard to recover, but multiple
449 # objects recovering in one RecoveryOp
450 function TEST_ec_recovery_multiple_objects
() {
452 local objname
=myobject
455 CEPH_ARGS
+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 '
456 setup_osds
7 ||
return 1
459 local poolname
=pool-jerasure
460 create_erasure_coded_pool
$poolname 3 2 ||
return 1
462 rados_put
$dir $poolname test1
463 rados_put
$dir $poolname test2
464 rados_put
$dir $poolname test3
466 ceph osd out
0 ||
return 1
468 # Cluster should recover these objects all at once
469 wait_for_clean ||
return 1
471 rados_get
$dir $poolname test1
472 rados_get
$dir $poolname test2
473 rados_get
$dir $poolname test3
475 delete_erasure_coded_pool
$poolname
478 # test multi-object recovery when the one missing shard gets EIO
479 function TEST_ec_recovery_multiple_objects_eio
() {
481 local objname
=myobject
484 CEPH_ARGS
+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 '
485 setup_osds
7 ||
return 1
488 local poolname
=pool-jerasure
489 create_erasure_coded_pool
$poolname 3 2 ||
return 1
491 rados_put
$dir $poolname test1
492 rados_put
$dir $poolname test2
493 rados_put
$dir $poolname test3
495 # can't read from this shard anymore
496 inject_eio ec data
$poolname $objname $dir 0 ||
return 1
497 ceph osd out
0 ||
return 1
499 # Cluster should recover these objects all at once
500 wait_for_clean ||
return 1
502 rados_get
$dir $poolname test1
503 rados_get
$dir $poolname test2
504 rados_get
$dir $poolname test3
506 delete_erasure_coded_pool
$poolname
509 # Test backfill with unfound object
510 function TEST_ec_backfill_unfound
() {
512 local objname
=myobject
514 # Must be between 1 and $lastobj
518 CEPH_ARGS
+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10'
519 setup_osds
5 ||
return 1
522 local poolname
=pool-jerasure
523 create_erasure_coded_pool
$poolname 3 2 ||
return 1
527 rados_put
$dir $poolname $objname ||
return 1
528 local primary
=$
(get_primary
$poolname $objname)
530 local -a initial_osds
=($
(get_osds
$poolname $objname))
531 local last_osd
=${initial_osds[-1]}
532 kill_daemons
$dir TERM osd.
${last_osd} 2>&2 < /dev
/null ||
return 1
533 ceph osd down
${last_osd} ||
return 1
534 ceph osd out
${last_osd} ||
return 1
538 dd if=/dev
/urandom of
=${dir}/ORIGINAL bs
=1024 count
=4
539 for i
in $
(seq 1 $lastobj)
541 rados
--pool $poolname put obj
${i} $dir/ORIGINAL ||
return 1
544 inject_eio ec data
$poolname $testobj $dir 0 ||
return 1
545 inject_eio ec data
$poolname $testobj $dir 1 ||
return 1
547 activate_osd
$dir ${last_osd} ||
return 1
548 ceph osd
in ${last_osd} ||
return 1
552 for tmp
in $
(seq 1 240); do
553 state
=$
(get_state
2.0)
554 echo $state |
grep backfill_unfound
555 if [ "$?" = "0" ]; then
563 kill_daemons
$dir TERM osd.
${last_osd} 2>&2 < /dev
/null ||
return 1
567 ceph pg
2.0 list_unfound
570 ceph pg
2.0 list_unfound |
grep -q $testobj ||
return 1
572 check
=$
(ceph pg
2.0 list_unfound | jq
".available_might_have_unfound")
573 test "$check" == "true" ||
return 1
575 eval check
=$
(ceph pg
2.0 list_unfound | jq .might_have_unfound
[0].status
)
576 test "$check" == "osd is down" ||
return 1
578 eval check
=$
(ceph pg
2.0 list_unfound | jq .might_have_unfound
[0].osd
)
579 test "$check" == "2(4)" ||
return 1
581 activate_osd
$dir ${last_osd} ||
return 1
583 # Command should hang because object is unfound
584 timeout
5 rados
-p $poolname get
$testobj $dir/CHECK
585 test $?
= "124" ||
return 1
587 ceph pg
2.0 mark_unfound_lost delete
589 wait_for_clean ||
return 1
591 for i
in $
(seq 1 $lastobj)
593 if [ obj
${i} = "$testobj" ]; then
594 # Doesn't exist anymore
595 ! rados
-p $poolname get
$testobj $dir/CHECK ||
return 1
597 rados
--pool $poolname get obj
${i} $dir/CHECK ||
return 1
598 diff -q $dir/ORIGINAL
$dir/CHECK ||
return 1
602 rm -f ${dir}/ORIGINAL
${dir}/CHECK
604 delete_erasure_coded_pool
$poolname
607 # Test recovery with unfound object
608 function TEST_ec_recovery_unfound
() {
610 local objname
=myobject
612 # Must be between 1 and $lastobj
616 CEPH_ARGS
+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 '
617 CEPH_ARGS
+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10'
618 setup_osds
5 ||
return 1
621 local poolname
=pool-jerasure
622 create_erasure_coded_pool
$poolname 3 2 ||
return 1
626 rados_put
$dir $poolname $objname ||
return 1
628 local -a initial_osds
=($
(get_osds
$poolname $objname))
629 local last_osd
=${initial_osds[-1]}
630 kill_daemons
$dir TERM osd.
${last_osd} 2>&2 < /dev
/null ||
return 1
631 ceph osd down
${last_osd} ||
return 1
632 ceph osd out
${last_osd} ||
return 1
636 dd if=/dev
/urandom of
=${dir}/ORIGINAL bs
=1024 count
=4
637 for i
in $
(seq 1 $lastobj)
639 rados
--pool $poolname put obj
${i} $dir/ORIGINAL ||
return 1
642 inject_eio ec data
$poolname $testobj $dir 0 ||
return 1
643 inject_eio ec data
$poolname $testobj $dir 1 ||
return 1
645 activate_osd
$dir ${last_osd} ||
return 1
646 ceph osd
in ${last_osd} ||
return 1
650 for tmp
in $
(seq 1 100); do
651 state
=$
(get_state
2.0)
652 echo $state |
grep recovery_unfound
653 if [ "$?" = "0" ]; then
661 ceph pg
2.0 list_unfound
664 ceph pg
2.0 list_unfound |
grep -q $testobj ||
return 1
666 check
=$
(ceph pg
2.0 list_unfound | jq
".available_might_have_unfound")
667 test "$check" == "true" ||
return 1
669 check
=$
(ceph pg
2.0 list_unfound | jq
".might_have_unfound | length")
670 test $check == 0 ||
return 1
672 # Command should hang because object is unfound
673 timeout
5 rados
-p $poolname get
$testobj $dir/CHECK
674 test $?
= "124" ||
return 1
676 ceph pg
2.0 mark_unfound_lost delete
678 wait_for_clean ||
return 1
680 for i
in $
(seq 1 $lastobj)
682 if [ obj
${i} = "$testobj" ]; then
683 # Doesn't exist anymore
684 ! rados
-p $poolname get
$testobj $dir/CHECK ||
return 1
686 rados
--pool $poolname get obj
${i} $dir/CHECK ||
return 1
687 diff -q $dir/ORIGINAL
$dir/CHECK ||
return 1
691 rm -f ${dir}/ORIGINAL
${dir}/CHECK
693 delete_erasure_coded_pool
$poolname
696 main test-erasure-eio
"$@"
699 # compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-eio.sh"