]> git.proxmox.com Git - ceph.git/blame - ceph/src/test/mon/osd-crush.sh
bump version to 12.0.3-pve3
[ceph.git] / ceph / src / test / mon / osd-crush.sh
CommitLineData
7c673cae
FG
1#!/bin/bash
2#
3# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
5#
6# Author: Loic Dachary <loic@dachary.org>
7#
8# This program is free software; you can redistribute it and/or modify
9# it under the terms of the GNU Library Public License as published by
10# the Free Software Foundation; either version 2, or (at your option)
11# any later version.
12#
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU Library Public License for more details.
17#
18source $(dirname $0)/../detect-build-env-vars.sh
19source $CEPH_ROOT/qa/workunits/ceph-helpers.sh
20
21function run() {
22 local dir=$1
23 shift
24
25 export CEPH_MON="127.0.0.1:7104" # git grep '\<7104\>' : there must be only one
26 export CEPH_ARGS
27 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS+="--mon-host=$CEPH_MON "
29
30 local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
31 for func in $funcs ; do
32 setup $dir || return 1
33 $func $dir || return 1
34 teardown $dir || return 1
35 done
36}
37
38function TEST_crush_rule_create_simple() {
39 local dir=$1
40
41 run_mon $dir a || return 1
42
43 ceph --format xml osd crush rule dump replicated_ruleset | \
44 egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
45 grep '<op>choose_firstn</op><num>0</num><type>osd</type>' || return 1
46 local ruleset=ruleset0
47 local root=host1
48 ceph osd crush add-bucket $root host
49 local failure_domain=osd
50 ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1
51 ceph osd crush rule create-simple $ruleset $root $failure_domain 2>&1 | \
52 grep "$ruleset already exists" || return 1
53 ceph --format xml osd crush rule dump $ruleset | \
54 egrep '<op>take</op><item>[^<]+</item><item_name>'$root'</item_name>' | \
55 grep '<op>choose_firstn</op><num>0</num><type>'$failure_domain'</type>' || return 1
56 ceph osd crush rule rm $ruleset || return 1
57}
58
59function TEST_crush_rule_dump() {
60 local dir=$1
61
62 run_mon $dir a || return 1
63
64 local ruleset=ruleset1
65 ceph osd crush rule create-erasure $ruleset || return 1
66 local expected
67 expected="<rule_name>$ruleset</rule_name>"
68 ceph --format xml osd crush rule dump $ruleset | grep $expected || return 1
69 expected='"rule_name": "'$ruleset'"'
70 ceph osd crush rule dump | grep "$expected" || return 1
71 ! ceph osd crush rule dump non_existent_ruleset || return 1
72 ceph osd crush rule rm $ruleset || return 1
73}
74
75function TEST_crush_rule_rm() {
76 local ruleset=erasure2
77
78 run_mon $dir a || return 1
79
80 ceph osd crush rule create-erasure $ruleset default || return 1
81 ceph osd crush rule ls | grep $ruleset || return 1
82 ceph osd crush rule rm $ruleset || return 1
83 ! ceph osd crush rule ls | grep $ruleset || return 1
84}
85
86function TEST_crush_rule_create_erasure() {
87 local dir=$1
88
89 run_mon $dir a || return 1
90 # should have at least one OSD
91 run_osd $dir 0 || return 1
92
93 local ruleset=ruleset3
94 #
95 # create a new ruleset with the default profile, implicitly
96 #
97 ceph osd crush rule create-erasure $ruleset || return 1
98 ceph osd crush rule create-erasure $ruleset 2>&1 | \
99 grep "$ruleset already exists" || return 1
100 ceph --format xml osd crush rule dump $ruleset | \
101 egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
102 grep '<op>chooseleaf_indep</op><num>0</num><type>host</type>' || return 1
103 ceph osd crush rule rm $ruleset || return 1
104 ! ceph osd crush rule ls | grep $ruleset || return 1
105 #
106 # create a new ruleset with the default profile, explicitly
107 #
108 ceph osd crush rule create-erasure $ruleset default || return 1
109 ceph osd crush rule ls | grep $ruleset || return 1
110 ceph osd crush rule rm $ruleset || return 1
111 ! ceph osd crush rule ls | grep $ruleset || return 1
112 #
113 # create a new ruleset and the default profile, implicitly
114 #
115 ceph osd erasure-code-profile rm default || return 1
116 ! ceph osd erasure-code-profile ls | grep default || return 1
117 ceph osd crush rule create-erasure $ruleset || return 1
118 CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
119 grep 'profile set default' $dir/mon.a.log || return 1
120 ceph osd erasure-code-profile ls | grep default || return 1
121 ceph osd crush rule rm $ruleset || return 1
122 ! ceph osd crush rule ls | grep $ruleset || return 1
123 #
124 # verify that if the crushmap contains a bugous ruleset,
125 # it will prevent the creation of a pool.
126 #
127 local crushtool_path_old=`ceph-conf --show-config-value crushtool`
128 ceph tell mon.\* injectargs --crushtool "false"
129
130 expect_failure $dir "Error EINVAL" \
131 ceph osd pool create mypool 1 1 erasure || return 1
132}
133
134function check_ruleset_id_match_rule_id() {
135 local rule_name=$1
136 rule_id=`ceph osd crush rule dump $rule_name | grep "\"rule_id\":" | awk -F ":|," '{print int($2)}'`
137 ruleset_id=`ceph osd crush rule dump $rule_name | grep "\"ruleset\":"| awk -F ":|," '{print int($2)}'`
138 test $ruleset_id = $rule_id || return 1
139}
140
141function generate_manipulated_rules() {
142 local dir=$1
143 ceph osd crush add-bucket $root host
144 ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
145 ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
146 ceph osd getcrushmap -o $dir/original_map
147 crushtool -d $dir/original_map -o $dir/decoded_original_map
148 #manipulate the rulesets , to make the rule_id != ruleset_id
149 ${SED} -i 's/ruleset 0/ruleset 3/' $dir/decoded_original_map
150 ${SED} -i 's/ruleset 2/ruleset 0/' $dir/decoded_original_map
151 ${SED} -i 's/ruleset 1/ruleset 2/' $dir/decoded_original_map
152
153 crushtool -c $dir/decoded_original_map -o $dir/new_map
154 ceph osd setcrushmap -i $dir/new_map
155
156 ceph osd crush rule dump
157}
158
159function TEST_crush_ruleset_match_rule_when_creating() {
160 local dir=$1
161
162 run_mon $dir a || return 1
163
164 local root=host1
165
166 generate_manipulated_rules $dir
167
168 ceph osd crush rule create-simple special_rule_simple $root osd firstn || return 1
169
170 ceph osd crush rule dump
171 #show special_rule_simple has same rule_id and ruleset_id
172 check_ruleset_id_match_rule_id special_rule_simple || return 1
173}
174
175function TEST_add_ruleset_failed() {
176 local dir=$1
177
178 run_mon $dir a || return 1
179
180 local root=host1
181
182 ceph osd crush add-bucket $root host
183 ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
184 ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
185 ceph osd getcrushmap > $dir/crushmap || return 1
186 crushtool --decompile $dir/crushmap > $dir/crushmap.txt || return 1
187 for i in $(seq 3 255)
188 do
189 cat <<EOF
190rule test_rule$i {
191 ruleset $i
192 type replicated
193 min_size 1
194 max_size 10
195 step take $root
196 step choose firstn 0 type osd
197 step emit
198}
199EOF
200 done >> $dir/crushmap.txt
201 crushtool --compile $dir/crushmap.txt -o $dir/crushmap || return 1
202 ceph osd setcrushmap -i $dir/crushmap || return 1
203 ceph osd crush rule create-simple test_rule_nospace $root osd firstn 2>&1 | grep "Error ENOSPC" || return 1
204
205}
206
207function TEST_crush_rename_bucket() {
208 local dir=$1
209
210 run_mon $dir a || return 1
211
212 ceph osd crush add-bucket host1 host
213 ! ceph osd tree | grep host2 || return 1
214 ceph osd crush rename-bucket host1 host2 || return 1
215 ceph osd tree | grep host2 || return 1
216 ceph osd crush rename-bucket host1 host2 || return 1 # idempotency
217 ceph osd crush rename-bucket nonexistent something 2>&1 | grep "Error ENOENT" || return 1
218}
219
220function TEST_crush_reject_empty() {
221 local dir=$1
222 run_mon $dir a || return 1
223 # should have at least one OSD
224 run_osd $dir 0 || return 1
225
226 local empty_map=$dir/empty_map
227 :> $empty_map.txt
228 crushtool -c $empty_map.txt -o $empty_map.map || return 1
229 expect_failure $dir "Error EINVAL" \
230 ceph osd setcrushmap -i $empty_map.map || return 1
231}
232
233function TEST_crush_tree() {
234 local dir=$1
235 run_mon $dir a || return 1
236
237 ceph osd crush tree --format=xml | \
238 $XMLSTARLET val -e -r $CEPH_ROOT/src/test/mon/osd-crush-tree.rng - || return 1
239}
240
241# NB: disable me if i am too time consuming
242function TEST_crush_repair_faulty_crushmap() {
243 local dir=$1
244 fsid=$(uuidgen)
245 MONA=127.0.0.1:7113 # git grep '\<7113\>' : there must be only one
246 MONB=127.0.0.1:7114 # git grep '\<7114\>' : there must be only one
247 MONC=127.0.0.1:7115 # git grep '\<7115\>' : there must be only one
248 CEPH_ARGS_orig=$CEPH_ARGS
249 CEPH_ARGS="--fsid=$fsid --auth-supported=none "
250 CEPH_ARGS+="--mon-initial-members=a,b,c "
251 CEPH_ARGS+="--mon-host=$MONA,$MONB,$MONC "
252 run_mon $dir a --public-addr $MONA || return 1
253 run_mon $dir b --public-addr $MONB || return 1
254 run_mon $dir c --public-addr $MONC || return 1
255
256 local empty_map=$dir/empty_map
257 :> $empty_map.txt
258 crushtool -c $empty_map.txt -o $empty_map.map || return 1
259
260 local crushtool_path_old=`ceph-conf --show-config-value crushtool`
261 ceph tell mon.\* injectargs --crushtool "true"
262
263
264 #import empty crushmap should failture.because the default pool rbd use the rule
265 ceph osd setcrushmap -i $empty_map.map 2>&1|grep "Error EINVAL: the crush rule no"|| return 1
266
267 #remove the default pool rbd
268 ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
269
270 #now it can be successful to set the empty crush map
271 ceph osd setcrushmap -i $empty_map.map || return 1
272
273 # should be an empty crush map without any buckets
274 success=false
275 for delay in 1 2 4 8 16 32 64 128 256 ; do
276 if ! test $(ceph osd crush dump --format=xml | \
277 $XMLSTARLET sel -t -m "//buckets/bucket" -v .) ; then
278 success=true
279 break
280 fi
281 sleep $delay
282 done
283 if ! $success ; then
284 ceph osd crush dump --format=xml
285 return 1
286 fi
287 # bring them down, the "ceph" commands will try to hunt for other monitor in
288 # vain, after mon.a is offline
289 kill_daemons $dir || return 1
290 # rewrite the monstore with the good crush map,
291 $CEPH_ROOT/src/tools/ceph-monstore-update-crush.sh --rewrite $dir/a || return 1
292
293 run_mon $dir a --public-addr $MONA || return 1
294 run_mon $dir b --public-addr $MONB || return 1
295 run_mon $dir c --public-addr $MONC || return 1
296 # the buckets are back
297 test $(ceph osd crush dump --format=xml | \
298 $XMLSTARLET sel -t -m "//buckets/bucket" -v .) || return 1
299 CEPH_ARGS=$CEPH_ARGS_orig
300}
301
302main osd-crush "$@"
303
304# Local Variables:
305# compile-command: "cd ../.. ; make -j4 && test/mon/osd-crush.sh"
306# End: