3 # Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
6 # Author: Loic Dachary <loic@dachary.org>
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library Public License as published by
10 # the Free Software Foundation; either version 2, or (at your option)
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Library Public License for more details.
19 source $CEPH_ROOT/qa
/standalone
/ceph-helpers.sh
25 export CEPH_MON
="127.0.0.1:7101" # git grep '\<7101\>' : there must be only one
27 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS
+="--mon-host=$CEPH_MON --mon-osd-prime-pg-temp=false"
30 setup
$dir ||
return 1
31 run_mon
$dir a ||
return 1
32 run_mgr
$dir x ||
return 1
33 # check that erasure code plugins are preloaded
34 CEPH_ARGS
='' ceph
--admin-daemon $
(get_asok_path mon.a
) log flush ||
return 1
35 grep 'load: jerasure.*lrc' $dir/mon.a.log ||
return 1
36 for id
in $
(seq 0 10) ; do
37 run_osd
$dir $id ||
return 1
39 create_rbd_pool ||
return 1
40 wait_for_clean ||
return 1
41 # check that erasure code plugins are preloaded
42 CEPH_ARGS
='' ceph
--admin-daemon $
(get_asok_path osd
.0) log flush ||
return 1
43 grep 'load: jerasure.*lrc' $dir/osd
.0.log ||
return 1
44 create_erasure_coded_pool ecpool ||
return 1
46 local funcs
=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
47 for func
in $funcs ; do
48 $func $dir ||
return 1
51 delete_pool ecpool ||
return 1
52 teardown
$dir ||
return 1
55 function create_erasure_coded_pool
() {
58 ceph osd erasure-code-profile
set myprofile \
59 crush-failure-domain
=osd ||
return 1
60 create_pool
$poolname 12 12 erasure myprofile \
62 wait_for_clean ||
return 1
65 function rados_put_get
() {
68 local objname
=${3:-SOMETHING}
71 for marker
in AAA BBB CCCC DDDD
; do
72 printf "%*s" 1024 $marker
76 # get and put an object, compare they are equal
78 rados
--pool $poolname put
$objname $dir/ORIGINAL ||
return 1
79 rados
--pool $poolname get
$objname $dir/COPY ||
return 1
80 diff $dir/ORIGINAL
$dir/COPY ||
return 1
84 # take out an OSD used to store the object and
85 # check the object can still be retrieved, which implies
88 local -a initial_osds
=($
(get_osds
$poolname $objname))
89 local last
=$
((${#initial_osds[@]} - 1))
90 ceph osd out
${initial_osds[$last]} ||
return 1
91 ! get_osds
$poolname $objname |
grep '\<'${initial_osds[$last]}'\>' ||
return 1
92 rados
--pool $poolname get
$objname $dir/COPY ||
return 1
93 diff $dir/ORIGINAL
$dir/COPY ||
return 1
94 ceph osd
in ${initial_osds[$last]} ||
return 1
99 function rados_osds_out_in
() {
102 local objname
=${3:-SOMETHING}
105 for marker
in FFFF GGGG HHHH IIII
; do
106 printf "%*s" 1024 $marker
110 # get and put an object, compare they are equal
112 rados
--pool $poolname put
$objname $dir/ORIGINAL ||
return 1
113 rados
--pool $poolname get
$objname $dir/COPY ||
return 1
114 diff $dir/ORIGINAL
$dir/COPY ||
return 1
118 # take out two OSDs used to store the object, wait for the cluster
119 # to be clean (i.e. all PG are clean and active) again which
120 # implies the PG have been moved to use the remaining OSDs. Check
121 # the object can still be retrieved.
123 wait_for_clean ||
return 1
124 local osds_list
=$
(get_osds
$poolname $objname)
125 local -a osds
=($osds_list)
127 ceph osd out
${osds[$osd]} ||
return 1
129 wait_for_clean ||
return 1
131 # verify the object is no longer mapped to the osds that are out
134 ! get_osds
$poolname $objname |
grep '\<'${osds[$osd]}'\>' ||
return 1
136 rados
--pool $poolname get
$objname $dir/COPY ||
return 1
137 diff $dir/ORIGINAL
$dir/COPY ||
return 1
139 # bring the osds back in, , wait for the cluster
140 # to be clean (i.e. all PG are clean and active) again which
141 # implies the PG go back to using the same osds as before
144 ceph osd
in ${osds[$osd]} ||
return 1
146 wait_for_clean ||
return 1
147 test "$osds_list" = "$(get_osds $poolname $objname)" ||
return 1
151 function TEST_rados_put_get_lrc_advanced
() {
153 local poolname
=pool-lrc-a
154 local profile
=profile-lrc-a
156 ceph osd erasure-code-profile
set $profile \
159 crush-steps
='[ [ "chooseleaf", "osd", 0 ] ]' \
160 layers
='[ [ "DDc", "" ] ]' ||
return 1
161 create_pool
$poolname 12 12 erasure
$profile \
164 rados_put_get
$dir $poolname ||
return 1
166 delete_pool
$poolname
167 ceph osd erasure-code-profile
rm $profile
170 function TEST_rados_put_get_lrc_kml
() {
172 local poolname
=pool-lrc
173 local profile
=profile-lrc
175 ceph osd erasure-code-profile
set $profile \
178 crush-failure-domain
=osd ||
return 1
179 create_pool
$poolname 12 12 erasure
$profile \
182 rados_put_get
$dir $poolname ||
return 1
184 delete_pool
$poolname
185 ceph osd erasure-code-profile
rm $profile
188 function TEST_rados_put_get_isa
() {
189 if ! erasure_code_plugin_exists isa
; then
190 echo "SKIP because plugin isa has not been built"
194 local poolname
=pool-isa
196 ceph osd erasure-code-profile
set profile-isa \
198 crush-failure-domain
=osd ||
return 1
199 create_pool
$poolname 1 1 erasure profile-isa \
202 rados_put_get
$dir $poolname ||
return 1
204 delete_pool
$poolname
207 function TEST_rados_put_get_jerasure
() {
210 rados_put_get
$dir ecpool ||
return 1
212 local poolname
=pool-jerasure
213 local profile
=profile-jerasure
215 ceph osd erasure-code-profile
set $profile \
218 crush-failure-domain
=osd ||
return 1
219 create_pool
$poolname 12 12 erasure
$profile \
222 rados_put_get
$dir $poolname ||
return 1
223 rados_osds_out_in
$dir $poolname ||
return 1
225 delete_pool
$poolname
226 ceph osd erasure-code-profile
rm $profile
229 function TEST_rados_put_get_shec
() {
232 local poolname
=pool-shec
233 local profile
=profile-shec
235 ceph osd erasure-code-profile
set $profile \
238 crush-failure-domain
=osd ||
return 1
239 create_pool
$poolname 12 12 erasure
$profile \
242 rados_put_get
$dir $poolname ||
return 1
244 delete_pool
$poolname
245 ceph osd erasure-code-profile
rm $profile
248 function TEST_alignment_constraints
() {
250 echo "$payload" > $dir/ORIGINAL
252 # Verify that the rados command enforces alignment constraints
253 # imposed by the stripe width
254 # See http://tracker.ceph.com/issues/8622
256 local stripe_unit
=$
(ceph-conf
--show-config-value osd_pool_erasure_code_stripe_unit
)
257 eval local $
(ceph osd erasure-code-profile get myprofile |
grep k
=)
258 local block_size
=$
((stripe_unit
* k
- 1))
259 dd if=/dev
/zero of
=$dir/ORIGINAL bs
=$block_size count
=2
260 rados
--block-size=$block_size \
261 --pool ecpool put UNALIGNED
$dir/ORIGINAL ||
return 1
265 function chunk_size
() {
266 echo $
(ceph-conf
--show-config-value osd_pool_erasure_code_stripe_unit
)
270 # By default an object will be split in two (k=2) with the first part
271 # of the object in the first OSD of the up set and the second part in
272 # the next OSD in the up set. This layout is defined by the mapping
273 # parameter and this function helps verify that the first and second
274 # part of the object are located in the OSD where they should be.
276 function verify_chunk_mapping
() {
282 local payload
=$
(printf '%*s' $
(chunk_size
) FIRST
$poolname ; printf '%*s' $
(chunk_size
) SECOND
$poolname)
283 echo -n "$payload" > $dir/ORIGINAL
285 rados
--pool $poolname put SOMETHING
$poolname $dir/ORIGINAL ||
return 1
286 rados
--pool $poolname get SOMETHING
$poolname $dir/COPY ||
return 1
287 local -a osds
=($
(get_osds
$poolname SOMETHING
$poolname))
288 for (( i
= 0; i
< ${#osds[@]}; i
++ )) ; do
289 ceph daemon osd.
${osds[$i]} flush_journal
291 diff $dir/ORIGINAL
$dir/COPY ||
return 1
294 local -a osds
=($
(get_osds
$poolname SOMETHING
$poolname))
295 grep --quiet --recursive --text FIRST
$poolname $dir/${osds[$first]} ||
return 1
296 grep --quiet --recursive --text SECOND
$poolname $dir/${osds[$second]} ||
return 1
299 function TEST_chunk_mapping
() {
303 # mapping=DD_ is the default:
304 # first OSD (i.e. 0) in the up set has the first part of the object
305 # second OSD (i.e. 1) in the up set has the second part of the object
307 verify_chunk_mapping
$dir ecpool
0 1 ||
return 1
309 ceph osd erasure-code-profile
set remap-profile \
311 layers
='[ [ "_DD", "" ] ]' \
313 crush-steps
='[ [ "choose", "osd", 0 ] ]' ||
return 1
314 ceph osd erasure-code-profile get remap-profile
315 create_pool remap-pool
12 12 erasure remap-profile \
320 # second OSD (i.e. 1) in the up set has the first part of the object
321 # third OSD (i.e. 2) in the up set has the second part of the object
323 verify_chunk_mapping
$dir remap-pool
1 2 ||
return 1
325 delete_pool remap-pool
326 ceph osd erasure-code-profile
rm remap-profile
329 main test-erasure-code
"$@"
332 # compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-code.sh"