]> git.proxmox.com Git - ceph.git/blame - ceph/src/test/erasure-code/test-erasure-code.sh
update sources to v12.1.1
[ceph.git] / ceph / src / test / erasure-code / test-erasure-code.sh
CommitLineData
7c673cae
FG
1#!/bin/bash
2#
3# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
5#
6# Author: Loic Dachary <loic@dachary.org>
7#
8# This program is free software; you can redistribute it and/or modify
9# it under the terms of the GNU Library Public License as published by
10# the Free Software Foundation; either version 2, or (at your option)
11# any later version.
12#
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU Library Public License for more details.
17#
18
19source $(dirname $0)/../detect-build-env-vars.sh
20source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
21
22function run() {
23 local dir=$1
24 shift
25
26 export CEPH_MON="127.0.0.1:7101" # git grep '\<7101\>' : there must be only one
27 export CEPH_ARGS
28 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
29 CEPH_ARGS+="--mon-host=$CEPH_MON --mon-osd-prime-pg-temp=false"
30
31 setup $dir || return 1
32 run_mon $dir a || return 1
33 run_mgr $dir x || return 1
34 # check that erasure code plugins are preloaded
35 CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
36 grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
37 for id in $(seq 0 10) ; do
38 run_osd $dir $id || return 1
39 done
40 wait_for_clean || return 1
41 # check that erasure code plugins are preloaded
42 CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
43 grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
44 create_erasure_coded_pool ecpool || return 1
45
46 local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
47 for func in $funcs ; do
48 $func $dir || return 1
49 done
50
51 delete_pool ecpool || return 1
52 teardown $dir || return 1
53}
54
55function create_erasure_coded_pool() {
56 local poolname=$1
57
58 ceph osd erasure-code-profile set myprofile \
224ce89b 59 crush-failure-domain=osd || return 1
7c673cae
FG
60 ceph osd pool create $poolname 12 12 erasure myprofile \
61 || return 1
62 wait_for_clean || return 1
63}
64
65function delete_pool() {
66 local poolname=$1
67
68 ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
69}
70
71function rados_put_get() {
72 local dir=$1
73 local poolname=$2
74 local objname=${3:-SOMETHING}
75
76
77 for marker in AAA BBB CCCC DDDD ; do
78 printf "%*s" 1024 $marker
79 done > $dir/ORIGINAL
80
81 #
82 # get and put an object, compare they are equal
83 #
84 rados --pool $poolname put $objname $dir/ORIGINAL || return 1
85 rados --pool $poolname get $objname $dir/COPY || return 1
86 diff $dir/ORIGINAL $dir/COPY || return 1
87 rm $dir/COPY
88
89 #
90 # take out an OSD used to store the object and
91 # check the object can still be retrieved, which implies
92 # recovery
93 #
94 local -a initial_osds=($(get_osds $poolname $objname))
95 local last=$((${#initial_osds[@]} - 1))
96 ceph osd out ${initial_osds[$last]} || return 1
97 ! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1
98 rados --pool $poolname get $objname $dir/COPY || return 1
99 diff $dir/ORIGINAL $dir/COPY || return 1
100 ceph osd in ${initial_osds[$last]} || return 1
101
102 rm $dir/ORIGINAL
103}
104
105function rados_osds_out_in() {
106 local dir=$1
107 local poolname=$2
108 local objname=${3:-SOMETHING}
109
110
111 for marker in FFFF GGGG HHHH IIII ; do
112 printf "%*s" 1024 $marker
113 done > $dir/ORIGINAL
114
115 #
116 # get and put an object, compare they are equal
117 #
118 rados --pool $poolname put $objname $dir/ORIGINAL || return 1
119 rados --pool $poolname get $objname $dir/COPY || return 1
120 diff $dir/ORIGINAL $dir/COPY || return 1
121 rm $dir/COPY
122
123 #
124 # take out two OSDs used to store the object, wait for the cluster
125 # to be clean (i.e. all PG are clean and active) again which
126 # implies the PG have been moved to use the remaining OSDs. Check
127 # the object can still be retrieved.
128 #
129 wait_for_clean || return 1
130 local osds_list=$(get_osds $poolname $objname)
131 local -a osds=($osds_list)
132 for osd in 0 1 ; do
133 ceph osd out ${osds[$osd]} || return 1
134 done
135 wait_for_clean || return 1
136 #
137 # verify the object is no longer mapped to the osds that are out
138 #
139 for osd in 0 1 ; do
140 ! get_osds $poolname $objname | grep '\<'${osds[$osd]}'\>' || return 1
141 done
142 rados --pool $poolname get $objname $dir/COPY || return 1
143 diff $dir/ORIGINAL $dir/COPY || return 1
144 #
145 # bring the osds back in, , wait for the cluster
146 # to be clean (i.e. all PG are clean and active) again which
147 # implies the PG go back to using the same osds as before
148 #
149 for osd in 0 1 ; do
150 ceph osd in ${osds[$osd]} || return 1
151 done
152 wait_for_clean || return 1
153 test "$osds_list" = "$(get_osds $poolname $objname)" || return 1
154 rm $dir/ORIGINAL
155}
156
157function TEST_rados_put_get_lrc_advanced() {
158 local dir=$1
159 local poolname=pool-lrc-a
160 local profile=profile-lrc-a
161
162 ceph osd erasure-code-profile set $profile \
163 plugin=lrc \
164 mapping=DD_ \
224ce89b 165 crush-steps='[ [ "chooseleaf", "osd", 0 ] ]' \
7c673cae
FG
166 layers='[ [ "DDc", "" ] ]' || return 1
167 ceph osd pool create $poolname 12 12 erasure $profile \
168 || return 1
169
170 rados_put_get $dir $poolname || return 1
171
172 delete_pool $poolname
173 ceph osd erasure-code-profile rm $profile
174}
175
176function TEST_rados_put_get_lrc_kml() {
177 local dir=$1
178 local poolname=pool-lrc
179 local profile=profile-lrc
180
181 ceph osd erasure-code-profile set $profile \
182 plugin=lrc \
183 k=4 m=2 l=3 \
224ce89b 184 crush-failure-domain=osd || return 1
7c673cae
FG
185 ceph osd pool create $poolname 12 12 erasure $profile \
186 || return 1
187
188 rados_put_get $dir $poolname || return 1
189
190 delete_pool $poolname
191 ceph osd erasure-code-profile rm $profile
192}
193
194function TEST_rados_put_get_isa() {
195 if ! erasure_code_plugin_exists isa ; then
196 echo "SKIP because plugin isa has not been built"
197 return 0
198 fi
199 local dir=$1
200 local poolname=pool-isa
201
202 ceph osd erasure-code-profile set profile-isa \
203 plugin=isa \
224ce89b 204 crush-failure-domain=osd || return 1
7c673cae
FG
205 ceph osd pool create $poolname 1 1 erasure profile-isa \
206 || return 1
207
208 rados_put_get $dir $poolname || return 1
209
210 delete_pool $poolname
211}
212
213function TEST_rados_put_get_jerasure() {
214 local dir=$1
215
216 rados_put_get $dir ecpool || return 1
217
218 local poolname=pool-jerasure
219 local profile=profile-jerasure
220
221 ceph osd erasure-code-profile set $profile \
222 plugin=jerasure \
223 k=4 m=2 \
224ce89b 224 crush-failure-domain=osd || return 1
7c673cae
FG
225 ceph osd pool create $poolname 12 12 erasure $profile \
226 || return 1
227
228 rados_put_get $dir $poolname || return 1
229 rados_osds_out_in $dir $poolname || return 1
230
231 delete_pool $poolname
232 ceph osd erasure-code-profile rm $profile
233}
234
235function TEST_rados_put_get_shec() {
236 local dir=$1
237
238 local poolname=pool-shec
239 local profile=profile-shec
240
241 ceph osd erasure-code-profile set $profile \
242 plugin=shec \
243 k=2 m=1 c=1 \
224ce89b 244 crush-failure-domain=osd || return 1
7c673cae
FG
245 ceph osd pool create $poolname 12 12 erasure $profile \
246 || return 1
247
248 rados_put_get $dir $poolname || return 1
249
250 delete_pool $poolname
251 ceph osd erasure-code-profile rm $profile
252}
253
254function TEST_alignment_constraints() {
255 local payload=ABC
256 echo "$payload" > $dir/ORIGINAL
257 #
258 # Verify that the rados command enforces alignment constraints
259 # imposed by the stripe width
260 # See http://tracker.ceph.com/issues/8622
261 #
262 local stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
263 eval local $(ceph osd erasure-code-profile get myprofile | grep k=)
264 local block_size=$((stripe_unit * k - 1))
265 dd if=/dev/zero of=$dir/ORIGINAL bs=$block_size count=2
266 rados --block-size=$block_size \
267 --pool ecpool put UNALIGNED $dir/ORIGINAL || return 1
268 rm $dir/ORIGINAL
269}
270
271function chunk_size() {
272 echo $(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
273}
274
275#
276# By default an object will be split in two (k=2) with the first part
277# of the object in the first OSD of the up set and the second part in
278# the next OSD in the up set. This layout is defined by the mapping
279# parameter and this function helps verify that the first and second
280# part of the object are located in the OSD where they should be.
281#
282function verify_chunk_mapping() {
283 local dir=$1
284 local poolname=$2
285 local first=$3
286 local second=$4
287
288 local payload=$(printf '%*s' $(chunk_size) FIRST$poolname ; printf '%*s' $(chunk_size) SECOND$poolname)
289 echo -n "$payload" > $dir/ORIGINAL
290
291 rados --pool $poolname put SOMETHING$poolname $dir/ORIGINAL || return 1
292 rados --pool $poolname get SOMETHING$poolname $dir/COPY || return 1
293 local -a osds=($(get_osds $poolname SOMETHING$poolname))
294 for (( i = 0; i < ${#osds[@]}; i++ )) ; do
295 ceph daemon osd.${osds[$i]} flush_journal
296 done
297 diff $dir/ORIGINAL $dir/COPY || return 1
298 rm $dir/COPY
299
300 local -a osds=($(get_osds $poolname SOMETHING$poolname))
301 grep --quiet --recursive --text FIRST$poolname $dir/${osds[$first]} || return 1
302 grep --quiet --recursive --text SECOND$poolname $dir/${osds[$second]} || return 1
303}
304
305function TEST_chunk_mapping() {
306 local dir=$1
307
308 #
309 # mapping=DD_ is the default:
310 # first OSD (i.e. 0) in the up set has the first part of the object
311 # second OSD (i.e. 1) in the up set has the second part of the object
312 #
313 verify_chunk_mapping $dir ecpool 0 1 || return 1
314
315 ceph osd erasure-code-profile set remap-profile \
316 plugin=lrc \
317 layers='[ [ "_DD", "" ] ]' \
318 mapping='_DD' \
224ce89b 319 crush-steps='[ [ "choose", "osd", 0 ] ]' || return 1
7c673cae
FG
320 ceph osd erasure-code-profile get remap-profile
321 ceph osd pool create remap-pool 12 12 erasure remap-profile \
322 || return 1
323
324 #
325 # mapping=_DD
326 # second OSD (i.e. 1) in the up set has the first part of the object
327 # third OSD (i.e. 2) in the up set has the second part of the object
328 #
329 verify_chunk_mapping $dir remap-pool 1 2 || return 1
330
331 delete_pool remap-pool
332 ceph osd erasure-code-profile rm remap-profile
333}
334
335main test-erasure-code "$@"
336
337# Local Variables:
338# compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-code.sh"
339# End: