]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/erasure-code/test-erasure-code.sh
update sources to 12.2.7
[ceph.git] / ceph / qa / standalone / erasure-code / test-erasure-code.sh
1 #!/bin/bash
2 #
3 # Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
5 #
6 # Author: Loic Dachary <loic@dachary.org>
7 #
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library Public License as published by
10 # the Free Software Foundation; either version 2, or (at your option)
11 # any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Library Public License for more details.
17 #
18
19 source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
20
21 function run() {
22 local dir=$1
23 shift
24
25 export CEPH_MON="127.0.0.1:7101" # git grep '\<7101\>' : there must be only one
26 export CEPH_ARGS
27 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS+="--mon-host=$CEPH_MON --mon-osd-prime-pg-temp=false"
29
30 setup $dir || return 1
31 run_mon $dir a || return 1
32 run_mgr $dir x || return 1
33 # check that erasure code plugins are preloaded
34 CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
35 grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
36 for id in $(seq 0 10) ; do
37 run_osd $dir $id || return 1
38 done
39 create_rbd_pool || return 1
40 wait_for_clean || return 1
41 # check that erasure code plugins are preloaded
42 CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
43 grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
44 create_erasure_coded_pool ecpool || return 1
45
46 local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
47 for func in $funcs ; do
48 $func $dir || return 1
49 done
50
51 delete_pool ecpool || return 1
52 teardown $dir || return 1
53 }
54
55 function create_erasure_coded_pool() {
56 local poolname=$1
57
58 ceph osd erasure-code-profile set myprofile \
59 crush-failure-domain=osd || return 1
60 create_pool $poolname 12 12 erasure myprofile \
61 || return 1
62 wait_for_clean || return 1
63 }
64
65 function rados_put_get() {
66 local dir=$1
67 local poolname=$2
68 local objname=${3:-SOMETHING}
69
70
71 for marker in AAA BBB CCCC DDDD ; do
72 printf "%*s" 1024 $marker
73 done > $dir/ORIGINAL
74
75 #
76 # get and put an object, compare they are equal
77 #
78 rados --pool $poolname put $objname $dir/ORIGINAL || return 1
79 rados --pool $poolname get $objname $dir/COPY || return 1
80 diff $dir/ORIGINAL $dir/COPY || return 1
81 rm $dir/COPY
82
83 #
84 # take out an OSD used to store the object and
85 # check the object can still be retrieved, which implies
86 # recovery
87 #
88 local -a initial_osds=($(get_osds $poolname $objname))
89 local last=$((${#initial_osds[@]} - 1))
90 ceph osd out ${initial_osds[$last]} || return 1
91 ! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1
92 rados --pool $poolname get $objname $dir/COPY || return 1
93 diff $dir/ORIGINAL $dir/COPY || return 1
94 ceph osd in ${initial_osds[$last]} || return 1
95
96 rm $dir/ORIGINAL
97 }
98
99 function rados_osds_out_in() {
100 local dir=$1
101 local poolname=$2
102 local objname=${3:-SOMETHING}
103
104
105 for marker in FFFF GGGG HHHH IIII ; do
106 printf "%*s" 1024 $marker
107 done > $dir/ORIGINAL
108
109 #
110 # get and put an object, compare they are equal
111 #
112 rados --pool $poolname put $objname $dir/ORIGINAL || return 1
113 rados --pool $poolname get $objname $dir/COPY || return 1
114 diff $dir/ORIGINAL $dir/COPY || return 1
115 rm $dir/COPY
116
117 #
118 # take out two OSDs used to store the object, wait for the cluster
119 # to be clean (i.e. all PG are clean and active) again which
120 # implies the PG have been moved to use the remaining OSDs. Check
121 # the object can still be retrieved.
122 #
123 wait_for_clean || return 1
124 local osds_list=$(get_osds $poolname $objname)
125 local -a osds=($osds_list)
126 for osd in 0 1 ; do
127 ceph osd out ${osds[$osd]} || return 1
128 done
129 wait_for_clean || return 1
130 #
131 # verify the object is no longer mapped to the osds that are out
132 #
133 for osd in 0 1 ; do
134 ! get_osds $poolname $objname | grep '\<'${osds[$osd]}'\>' || return 1
135 done
136 rados --pool $poolname get $objname $dir/COPY || return 1
137 diff $dir/ORIGINAL $dir/COPY || return 1
138 #
139 # bring the osds back in, , wait for the cluster
140 # to be clean (i.e. all PG are clean and active) again which
141 # implies the PG go back to using the same osds as before
142 #
143 for osd in 0 1 ; do
144 ceph osd in ${osds[$osd]} || return 1
145 done
146 wait_for_clean || return 1
147 test "$osds_list" = "$(get_osds $poolname $objname)" || return 1
148 rm $dir/ORIGINAL
149 }
150
151 function TEST_rados_put_get_lrc_advanced() {
152 local dir=$1
153 local poolname=pool-lrc-a
154 local profile=profile-lrc-a
155
156 ceph osd erasure-code-profile set $profile \
157 plugin=lrc \
158 mapping=DD_ \
159 crush-steps='[ [ "chooseleaf", "osd", 0 ] ]' \
160 layers='[ [ "DDc", "" ] ]' || return 1
161 create_pool $poolname 12 12 erasure $profile \
162 || return 1
163
164 rados_put_get $dir $poolname || return 1
165
166 delete_pool $poolname
167 ceph osd erasure-code-profile rm $profile
168 }
169
170 function TEST_rados_put_get_lrc_kml() {
171 local dir=$1
172 local poolname=pool-lrc
173 local profile=profile-lrc
174
175 ceph osd erasure-code-profile set $profile \
176 plugin=lrc \
177 k=4 m=2 l=3 \
178 crush-failure-domain=osd || return 1
179 create_pool $poolname 12 12 erasure $profile \
180 || return 1
181
182 rados_put_get $dir $poolname || return 1
183
184 delete_pool $poolname
185 ceph osd erasure-code-profile rm $profile
186 }
187
188 function TEST_rados_put_get_isa() {
189 if ! erasure_code_plugin_exists isa ; then
190 echo "SKIP because plugin isa has not been built"
191 return 0
192 fi
193 local dir=$1
194 local poolname=pool-isa
195
196 ceph osd erasure-code-profile set profile-isa \
197 plugin=isa \
198 crush-failure-domain=osd || return 1
199 create_pool $poolname 1 1 erasure profile-isa \
200 || return 1
201
202 rados_put_get $dir $poolname || return 1
203
204 delete_pool $poolname
205 }
206
207 function TEST_rados_put_get_jerasure() {
208 local dir=$1
209
210 rados_put_get $dir ecpool || return 1
211
212 local poolname=pool-jerasure
213 local profile=profile-jerasure
214
215 ceph osd erasure-code-profile set $profile \
216 plugin=jerasure \
217 k=4 m=2 \
218 crush-failure-domain=osd || return 1
219 create_pool $poolname 12 12 erasure $profile \
220 || return 1
221
222 rados_put_get $dir $poolname || return 1
223 rados_osds_out_in $dir $poolname || return 1
224
225 delete_pool $poolname
226 ceph osd erasure-code-profile rm $profile
227 }
228
229 function TEST_rados_put_get_shec() {
230 local dir=$1
231
232 local poolname=pool-shec
233 local profile=profile-shec
234
235 ceph osd erasure-code-profile set $profile \
236 plugin=shec \
237 k=2 m=1 c=1 \
238 crush-failure-domain=osd || return 1
239 create_pool $poolname 12 12 erasure $profile \
240 || return 1
241
242 rados_put_get $dir $poolname || return 1
243
244 delete_pool $poolname
245 ceph osd erasure-code-profile rm $profile
246 }
247
248 function TEST_alignment_constraints() {
249 local payload=ABC
250 echo "$payload" > $dir/ORIGINAL
251 #
252 # Verify that the rados command enforces alignment constraints
253 # imposed by the stripe width
254 # See http://tracker.ceph.com/issues/8622
255 #
256 local stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
257 eval local $(ceph osd erasure-code-profile get myprofile | grep k=)
258 local block_size=$((stripe_unit * k - 1))
259 dd if=/dev/zero of=$dir/ORIGINAL bs=$block_size count=2
260 rados --block-size=$block_size \
261 --pool ecpool put UNALIGNED $dir/ORIGINAL || return 1
262 rm $dir/ORIGINAL
263 }
264
265 function chunk_size() {
266 echo $(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
267 }
268
269 #
270 # By default an object will be split in two (k=2) with the first part
271 # of the object in the first OSD of the up set and the second part in
272 # the next OSD in the up set. This layout is defined by the mapping
273 # parameter and this function helps verify that the first and second
274 # part of the object are located in the OSD where they should be.
275 #
276 function verify_chunk_mapping() {
277 local dir=$1
278 local poolname=$2
279 local first=$3
280 local second=$4
281
282 local payload=$(printf '%*s' $(chunk_size) FIRST$poolname ; printf '%*s' $(chunk_size) SECOND$poolname)
283 echo -n "$payload" > $dir/ORIGINAL
284
285 rados --pool $poolname put SOMETHING$poolname $dir/ORIGINAL || return 1
286 rados --pool $poolname get SOMETHING$poolname $dir/COPY || return 1
287 local -a osds=($(get_osds $poolname SOMETHING$poolname))
288 for (( i = 0; i < ${#osds[@]}; i++ )) ; do
289 ceph daemon osd.${osds[$i]} flush_journal
290 done
291 diff $dir/ORIGINAL $dir/COPY || return 1
292 rm $dir/COPY
293
294 local -a osds=($(get_osds $poolname SOMETHING$poolname))
295 grep --quiet --recursive --text FIRST$poolname $dir/${osds[$first]} || return 1
296 grep --quiet --recursive --text SECOND$poolname $dir/${osds[$second]} || return 1
297 }
298
299 function TEST_chunk_mapping() {
300 local dir=$1
301
302 #
303 # mapping=DD_ is the default:
304 # first OSD (i.e. 0) in the up set has the first part of the object
305 # second OSD (i.e. 1) in the up set has the second part of the object
306 #
307 verify_chunk_mapping $dir ecpool 0 1 || return 1
308
309 ceph osd erasure-code-profile set remap-profile \
310 plugin=lrc \
311 layers='[ [ "_DD", "" ] ]' \
312 mapping='_DD' \
313 crush-steps='[ [ "choose", "osd", 0 ] ]' || return 1
314 ceph osd erasure-code-profile get remap-profile
315 create_pool remap-pool 12 12 erasure remap-profile \
316 || return 1
317
318 #
319 # mapping=_DD
320 # second OSD (i.e. 1) in the up set has the first part of the object
321 # third OSD (i.e. 2) in the up set has the second part of the object
322 #
323 verify_chunk_mapping $dir remap-pool 1 2 || return 1
324
325 delete_pool remap-pool
326 ceph osd erasure-code-profile rm remap-profile
327 }
328
329 main test-erasure-code "$@"
330
331 # Local Variables:
332 # compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-code.sh"
333 # End: