]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/mon/osd-crush.sh
ac704e0f5b950c9b66b93a9ccd3ce7194b975633
[ceph.git] / ceph / src / test / mon / osd-crush.sh
1 #!/bin/bash
2 #
3 # Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4 # Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
5 #
6 # Author: Loic Dachary <loic@dachary.org>
7 #
8 # This program is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library Public License as published by
10 # the Free Software Foundation; either version 2, or (at your option)
11 # any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Library Public License for more details.
17 #
18 source $(dirname $0)/../detect-build-env-vars.sh
19 source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
20
21 function run() {
22 local dir=$1
23 shift
24
25 export CEPH_MON="127.0.0.1:7104" # git grep '\<7104\>' : there must be only one
26 export CEPH_ARGS
27 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS+="--mon-host=$CEPH_MON "
29
30 local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
31 for func in $funcs ; do
32 setup $dir || return 1
33 $func $dir || return 1
34 teardown $dir || return 1
35 done
36 }
37
38 function TEST_crush_rule_create_simple() {
39 local dir=$1
40
41 run_mon $dir a || return 1
42
43 ceph --format xml osd crush rule dump replicated_rule | \
44 egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
45 grep '<op>choose_firstn</op><num>0</num><type>osd</type>' || return 1
46 local ruleset=ruleset0
47 local root=host1
48 ceph osd crush add-bucket $root host
49 local failure_domain=osd
50 ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1
51 ceph osd crush rule create-simple $ruleset $root $failure_domain 2>&1 | \
52 grep "$ruleset already exists" || return 1
53 ceph --format xml osd crush rule dump $ruleset | \
54 egrep '<op>take</op><item>[^<]+</item><item_name>'$root'</item_name>' | \
55 grep '<op>choose_firstn</op><num>0</num><type>'$failure_domain'</type>' || return 1
56 ceph osd crush rule rm $ruleset || return 1
57 }
58
59 function TEST_crush_rule_dump() {
60 local dir=$1
61
62 run_mon $dir a || return 1
63
64 local ruleset=ruleset1
65 ceph osd crush rule create-erasure $ruleset || return 1
66 test $(ceph --format json osd crush rule dump $ruleset | \
67 jq ".rule_name == \"$ruleset\"") == true || return 1
68 test $(ceph --format json osd crush rule dump | \
69 jq "map(select(.rule_name == \"$ruleset\")) | length == 1") == true || return 1
70 ! ceph osd crush rule dump non_existent_ruleset || return 1
71 ceph osd crush rule rm $ruleset || return 1
72 }
73
74 function TEST_crush_rule_rm() {
75 local ruleset=erasure2
76
77 run_mon $dir a || return 1
78
79 ceph osd crush rule create-erasure $ruleset default || return 1
80 ceph osd crush rule ls | grep $ruleset || return 1
81 ceph osd crush rule rm $ruleset || return 1
82 ! ceph osd crush rule ls | grep $ruleset || return 1
83 }
84
85 function TEST_crush_rule_create_erasure() {
86 local dir=$1
87
88 run_mon $dir a || return 1
89 # should have at least one OSD
90 run_osd $dir 0 || return 1
91
92 local ruleset=ruleset3
93 #
94 # create a new ruleset with the default profile, implicitly
95 #
96 ceph osd crush rule create-erasure $ruleset || return 1
97 ceph osd crush rule create-erasure $ruleset 2>&1 | \
98 grep "$ruleset already exists" || return 1
99 ceph --format xml osd crush rule dump $ruleset | \
100 egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
101 grep '<op>chooseleaf_indep</op><num>0</num><type>host</type>' || return 1
102 ceph osd crush rule rm $ruleset || return 1
103 ! ceph osd crush rule ls | grep $ruleset || return 1
104 #
105 # create a new ruleset with the default profile, explicitly
106 #
107 ceph osd crush rule create-erasure $ruleset default || return 1
108 ceph osd crush rule ls | grep $ruleset || return 1
109 ceph osd crush rule rm $ruleset || return 1
110 ! ceph osd crush rule ls | grep $ruleset || return 1
111 #
112 # create a new ruleset and the default profile, implicitly
113 #
114 ceph osd erasure-code-profile rm default || return 1
115 ! ceph osd erasure-code-profile ls | grep default || return 1
116 ceph osd crush rule create-erasure $ruleset || return 1
117 CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
118 grep 'profile set default' $dir/mon.a.log || return 1
119 ceph osd erasure-code-profile ls | grep default || return 1
120 ceph osd crush rule rm $ruleset || return 1
121 ! ceph osd crush rule ls | grep $ruleset || return 1
122 #
123 # verify that if the crushmap contains a bugous ruleset,
124 # it will prevent the creation of a pool.
125 #
126 local crushtool_path_old=`ceph-conf --show-config-value crushtool`
127 ceph tell mon.\* injectargs --crushtool "false"
128
129 expect_failure $dir "Error EINVAL" \
130 ceph osd pool create mypool 1 1 erasure || return 1
131 }
132
133 function check_ruleset_id_match_rule_id() {
134 local rule_name=$1
135 rule_id=`ceph osd crush rule dump $rule_name | grep "\"rule_id\":" | awk -F ":|," '{print int($2)}'`
136 ruleset_id=`ceph osd crush rule dump $rule_name | grep "\"ruleset\":"| awk -F ":|," '{print int($2)}'`
137 test $ruleset_id = $rule_id || return 1
138 }
139
140 function generate_manipulated_rules() {
141 local dir=$1
142 ceph osd crush add-bucket $root host
143 ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
144 ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
145 ceph osd getcrushmap -o $dir/original_map
146 crushtool -d $dir/original_map -o $dir/decoded_original_map
147 #manipulate the rulesets , to make the rule_id != ruleset_id
148 ${SED} -i 's/ruleset 0/ruleset 3/' $dir/decoded_original_map
149 ${SED} -i 's/ruleset 2/ruleset 0/' $dir/decoded_original_map
150 ${SED} -i 's/ruleset 1/ruleset 2/' $dir/decoded_original_map
151
152 crushtool -c $dir/decoded_original_map -o $dir/new_map
153 ceph osd setcrushmap -i $dir/new_map
154
155 ceph osd crush rule dump
156 }
157
158 function TEST_crush_ruleset_match_rule_when_creating() {
159 local dir=$1
160
161 run_mon $dir a || return 1
162
163 local root=host1
164
165 generate_manipulated_rules $dir
166
167 ceph osd crush rule create-simple special_rule_simple $root osd firstn || return 1
168
169 ceph osd crush rule dump
170 #show special_rule_simple has same rule_id and ruleset_id
171 check_ruleset_id_match_rule_id special_rule_simple || return 1
172 }
173
174 function TEST_add_ruleset_failed() {
175 local dir=$1
176
177 run_mon $dir a || return 1
178
179 local root=host1
180
181 ceph osd crush add-bucket $root host
182 ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
183 ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
184 ceph osd getcrushmap > $dir/crushmap || return 1
185 crushtool --decompile $dir/crushmap > $dir/crushmap.txt || return 1
186 for i in $(seq 3 255)
187 do
188 cat <<EOF
189 rule test_rule$i {
190 ruleset $i
191 type replicated
192 min_size 1
193 max_size 10
194 step take $root
195 step choose firstn 0 type osd
196 step emit
197 }
198 EOF
199 done >> $dir/crushmap.txt
200 crushtool --compile $dir/crushmap.txt -o $dir/crushmap || return 1
201 ceph osd setcrushmap -i $dir/crushmap || return 1
202 ceph osd crush rule create-simple test_rule_nospace $root osd firstn 2>&1 | grep "Error ENOSPC" || return 1
203
204 }
205
206 function TEST_crush_rename_bucket() {
207 local dir=$1
208
209 run_mon $dir a || return 1
210
211 ceph osd crush add-bucket host1 host
212 ceph osd tree
213 ! ceph osd tree | grep host2 || return 1
214 ceph osd crush rename-bucket host1 host2 || return 1
215 ceph osd tree
216 ceph osd tree | grep host2 || return 1
217 ceph osd crush rename-bucket host1 host2 || return 1 # idempotency
218 ceph osd crush rename-bucket nonexistent something 2>&1 | grep "Error ENOENT" || return 1
219 }
220
221 function TEST_crush_reject_empty() {
222 local dir=$1
223 run_mon $dir a || return 1
224 # should have at least one OSD
225 run_osd $dir 0 || return 1
226
227 local empty_map=$dir/empty_map
228 :> $empty_map.txt
229 crushtool -c $empty_map.txt -o $empty_map.map || return 1
230 expect_failure $dir "Error EINVAL" \
231 ceph osd setcrushmap -i $empty_map.map || return 1
232 }
233
234 function TEST_crush_tree() {
235 local dir=$1
236 run_mon $dir a || return 1
237
238 ceph osd crush tree --format=xml | \
239 $XMLSTARLET val -e -r $CEPH_ROOT/src/test/mon/osd-crush-tree.rng - || return 1
240 }
241
242 # NB: disable me if i am too time consuming
243 function TEST_crush_repair_faulty_crushmap() {
244 local dir=$1
245 fsid=$(uuidgen)
246 MONA=127.0.0.1:7113 # git grep '\<7113\>' : there must be only one
247 MONB=127.0.0.1:7114 # git grep '\<7114\>' : there must be only one
248 MONC=127.0.0.1:7115 # git grep '\<7115\>' : there must be only one
249 CEPH_ARGS_orig=$CEPH_ARGS
250 CEPH_ARGS="--fsid=$fsid --auth-supported=none "
251 CEPH_ARGS+="--mon-initial-members=a,b,c "
252 CEPH_ARGS+="--mon-host=$MONA,$MONB,$MONC "
253 run_mon $dir a --public-addr $MONA || return 1
254 run_mon $dir b --public-addr $MONB || return 1
255 run_mon $dir c --public-addr $MONC || return 1
256
257 local empty_map=$dir/empty_map
258 :> $empty_map.txt
259 crushtool -c $empty_map.txt -o $empty_map.map || return 1
260
261 local crushtool_path_old=`ceph-conf --show-config-value crushtool`
262 ceph tell mon.\* injectargs --crushtool "true"
263
264
265 #import empty crushmap should failture.because the default pool rbd use the rule
266 ceph osd setcrushmap -i $empty_map.map 2>&1|grep "Error EINVAL: the crush rule no"|| return 1
267
268 #remove the default pool rbd
269 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
270
271 #now it can be successful to set the empty crush map
272 ceph osd setcrushmap -i $empty_map.map || return 1
273
274 # should be an empty crush map without any buckets
275 success=false
276 for delay in 1 2 4 8 16 32 64 128 256 ; do
277 if test $(ceph osd crush dump --format=json | \
278 jq '.buckets | length == 0') == true ; then
279 success=true
280 break
281 fi
282 sleep $delay
283 done
284 if ! $success ; then
285 ceph osd crush dump --format=json-pretty
286 return 1
287 fi
288 # bring them down, the "ceph" commands will try to hunt for other monitor in
289 # vain, after mon.a is offline
290 kill_daemons $dir || return 1
291 # rewrite the monstore with the good crush map,
292 $CEPH_ROOT/src/tools/ceph-monstore-update-crush.sh --rewrite $dir/a || return 1
293
294 run_mon $dir a --public-addr $MONA || return 1
295 run_mon $dir b --public-addr $MONB || return 1
296 run_mon $dir c --public-addr $MONC || return 1
297 # the buckets are back
298 test $(ceph osd crush dump --format=json | \
299 jq '.buckets | length > 0') == true || return 1
300 CEPH_ARGS=$CEPH_ARGS_orig
301 }
302
303 main osd-crush "$@"
304
305 # Local Variables:
306 # compile-command: "cd ../.. ; make -j4 && test/mon/osd-crush.sh"
307 # End: