]>
Commit | Line | Data |
---|---|---|
11fdf7f2 | 1 | #!/usr/bin/env bash |
7c673cae FG |
2 | # |
3 | # Copyright (C) 2013, 2014 Cloudwatt <libre.licensing@cloudwatt.com> | |
4 | # Copyright (C) 2014, 2015 Red Hat <contact@redhat.com> | |
5 | # | |
6 | # Author: Loic Dachary <loic@dachary.org> | |
7 | # | |
8 | # This program is free software; you can redistribute it and/or modify | |
9 | # it under the terms of the GNU Library Public License as published by | |
10 | # the Free Software Foundation; either version 2, or (at your option) | |
11 | # any later version. | |
12 | # | |
13 | # This program is distributed in the hope that it will be useful, | |
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | # GNU Library Public License for more details. | |
17 | # | |
c07f9fc5 | 18 | source $CEPH_ROOT/qa/standalone/ceph-helpers.sh |
7c673cae FG |
19 | |
20 | function run() { | |
21 | local dir=$1 | |
22 | shift | |
23 | ||
24 | export CEPH_MON="127.0.0.1:7105" # git grep '\<7105\>' : there must be only one | |
7c673cae FG |
25 | CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " |
26 | CEPH_ARGS+="--mon-host=$CEPH_MON " | |
94b18763 | 27 | export CEPH_ARGS |
7c673cae FG |
28 | |
29 | local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} | |
30 | for func in $funcs ; do | |
31 | setup $dir || return 1 | |
32 | $func $dir || return 1 | |
33 | teardown $dir || return 1 | |
34 | done | |
35 | } | |
36 | ||
7c673cae FG |
37 | # Before http://tracker.ceph.com/issues/8307 the invalid profile was created |
38 | function TEST_erasure_invalid_profile() { | |
39 | local dir=$1 | |
40 | run_mon $dir a || return 1 | |
41 | local poolname=pool_erasure | |
42 | local notaprofile=not-a-valid-erasure-code-profile | |
43 | ! ceph osd pool create $poolname 12 12 erasure $notaprofile || return 1 | |
44 | ! ceph osd erasure-code-profile ls | grep $notaprofile || return 1 | |
45 | } | |
46 | ||
47 | function TEST_erasure_crush_rule() { | |
48 | local dir=$1 | |
49 | run_mon $dir a || return 1 | |
c07f9fc5 | 50 | # |
7c673cae FG |
51 | # choose the crush ruleset used with an erasure coded pool |
52 | # | |
53 | local crush_ruleset=myruleset | |
54 | ! ceph osd crush rule ls | grep $crush_ruleset || return 1 | |
55 | ceph osd crush rule create-erasure $crush_ruleset | |
56 | ceph osd crush rule ls | grep $crush_ruleset | |
57 | local poolname | |
58 | poolname=pool_erasure1 | |
31f18b77 | 59 | ! ceph --format json osd dump | grep '"crush_rule":1' || return 1 |
7c673cae | 60 | ceph osd pool create $poolname 12 12 erasure default $crush_ruleset |
31f18b77 | 61 | ceph --format json osd dump | grep '"crush_rule":1' || return 1 |
7c673cae FG |
62 | # |
63 | # a crush ruleset by the name of the pool is implicitly created | |
64 | # | |
65 | poolname=pool_erasure2 | |
66 | ceph osd erasure-code-profile set myprofile | |
67 | ceph osd pool create $poolname 12 12 erasure myprofile | |
68 | ceph osd crush rule ls | grep $poolname || return 1 | |
69 | # | |
70 | # a non existent crush ruleset given in argument is an error | |
71 | # http://tracker.ceph.com/issues/9304 | |
72 | # | |
73 | poolname=pool_erasure3 | |
74 | ! ceph osd pool create $poolname 12 12 erasure myprofile INVALIDRULESET || return 1 | |
75 | } | |
76 | ||
77 | function TEST_erasure_code_profile_default() { | |
78 | local dir=$1 | |
79 | run_mon $dir a || return 1 | |
80 | ceph osd erasure-code-profile rm default || return 1 | |
81 | ! ceph osd erasure-code-profile ls | grep default || return 1 | |
82 | ceph osd pool create $poolname 12 12 erasure default | |
83 | ceph osd erasure-code-profile ls | grep default || return 1 | |
84 | } | |
85 | ||
86 | function TEST_erasure_crush_stripe_unit() { | |
87 | local dir=$1 | |
88 | # the default stripe unit is used to initialize the pool | |
89 | run_mon $dir a --public-addr $CEPH_MON | |
90 | stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit) | |
91 | eval local $(ceph osd erasure-code-profile get myprofile | grep k=) | |
92 | stripe_width = $((stripe_unit * k)) | |
93 | ceph osd pool create pool_erasure 12 12 erasure | |
94 | ceph --format json osd dump | tee $dir/osd.json | |
95 | grep '"stripe_width":'$stripe_width $dir/osd.json > /dev/null || return 1 | |
96 | } | |
97 | ||
98 | function TEST_erasure_crush_stripe_unit_padded() { | |
99 | local dir=$1 | |
100 | # setting osd_pool_erasure_code_stripe_unit modifies the stripe_width | |
101 | # and it is padded as required by the default plugin | |
102 | profile+=" plugin=jerasure" | |
103 | profile+=" technique=reed_sol_van" | |
104 | k=4 | |
105 | profile+=" k=$k" | |
106 | profile+=" m=2" | |
107 | actual_stripe_unit=2048 | |
108 | desired_stripe_unit=$((actual_stripe_unit - 1)) | |
109 | actual_stripe_width=$((actual_stripe_unit * k)) | |
110 | run_mon $dir a \ | |
111 | --osd_pool_erasure_code_stripe_unit $desired_stripe_unit \ | |
112 | --osd_pool_default_erasure_code_profile "$profile" || return 1 | |
113 | ceph osd pool create pool_erasure 12 12 erasure | |
114 | ceph osd dump | tee $dir/osd.json | |
115 | grep "stripe_width $actual_stripe_width" $dir/osd.json > /dev/null || return 1 | |
116 | } | |
117 | ||
118 | function TEST_erasure_code_pool() { | |
119 | local dir=$1 | |
120 | run_mon $dir a || return 1 | |
121 | ceph --format json osd dump > $dir/osd.json | |
122 | local expected='"erasure_code_profile":"default"' | |
123 | ! grep "$expected" $dir/osd.json || return 1 | |
124 | ceph osd pool create erasurecodes 12 12 erasure | |
125 | ceph --format json osd dump | tee $dir/osd.json | |
126 | grep "$expected" $dir/osd.json > /dev/null || return 1 | |
127 | ||
128 | ceph osd pool create erasurecodes 12 12 erasure 2>&1 | \ | |
129 | grep 'already exists' || return 1 | |
130 | ceph osd pool create erasurecodes 12 12 2>&1 | \ | |
131 | grep 'cannot change to type replicated' || return 1 | |
132 | } | |
133 | ||
134 | function TEST_replicated_pool_with_ruleset() { | |
135 | local dir=$1 | |
136 | run_mon $dir a | |
137 | local ruleset=ruleset0 | |
138 | local root=host1 | |
139 | ceph osd crush add-bucket $root host | |
140 | local failure_domain=osd | |
141 | local poolname=mypool | |
142 | ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1 | |
143 | ceph osd crush rule ls | grep $ruleset | |
c07f9fc5 | 144 | ceph osd pool create $poolname 12 12 replicated $ruleset || return 1 |
7c673cae | 145 | rule_id=`ceph osd crush rule dump $ruleset | grep "rule_id" | awk -F[' ':,] '{print $4}'` |
31f18b77 FG |
146 | ceph osd pool get $poolname crush_rule 2>&1 | \ |
147 | grep "crush_rule: $rule_id" || return 1 | |
7c673cae FG |
148 | #non-existent crush ruleset |
149 | ceph osd pool create newpool 12 12 replicated non-existent 2>&1 | \ | |
150 | grep "doesn't exist" || return 1 | |
151 | } | |
152 | ||
7c673cae FG |
153 | function TEST_erasure_code_pool_lrc() { |
154 | local dir=$1 | |
155 | run_mon $dir a || return 1 | |
156 | ||
157 | ceph osd erasure-code-profile set LRCprofile \ | |
158 | plugin=lrc \ | |
159 | mapping=DD_ \ | |
160 | layers='[ [ "DDc", "" ] ]' || return 1 | |
161 | ||
162 | ceph --format json osd dump > $dir/osd.json | |
163 | local expected='"erasure_code_profile":"LRCprofile"' | |
164 | local poolname=erasurecodes | |
165 | ! grep "$expected" $dir/osd.json || return 1 | |
166 | ceph osd pool create $poolname 12 12 erasure LRCprofile | |
167 | ceph --format json osd dump | tee $dir/osd.json | |
168 | grep "$expected" $dir/osd.json > /dev/null || return 1 | |
169 | ceph osd crush rule ls | grep $poolname || return 1 | |
170 | } | |
171 | ||
172 | function TEST_replicated_pool() { | |
173 | local dir=$1 | |
174 | run_mon $dir a || return 1 | |
c07f9fc5 | 175 | ceph osd pool create replicated 12 12 replicated replicated_rule || return 1 |
31f18b77 | 176 | ceph osd pool create replicated 12 12 replicated replicated_rule 2>&1 | \ |
7c673cae FG |
177 | grep 'already exists' || return 1 |
178 | # default is replicated | |
c07f9fc5 | 179 | ceph osd pool create replicated1 12 12 || return 1 |
7c673cae | 180 | # default is replicated, pgp_num = pg_num |
c07f9fc5 | 181 | ceph osd pool create replicated2 12 || return 1 |
7c673cae FG |
182 | ceph osd pool create replicated 12 12 erasure 2>&1 | \ |
183 | grep 'cannot change to type erasure' || return 1 | |
184 | } | |
185 | ||
186 | function TEST_no_pool_delete() { | |
187 | local dir=$1 | |
188 | run_mon $dir a || return 1 | |
189 | ceph osd pool create foo 1 || return 1 | |
190 | ceph tell mon.a injectargs -- --no-mon-allow-pool-delete || return 1 | |
191 | ! ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1 | |
192 | ceph tell mon.a injectargs -- --mon-allow-pool-delete || return 1 | |
193 | ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1 | |
194 | } | |
195 | ||
196 | function TEST_utf8_cli() { | |
197 | local dir=$1 | |
198 | run_mon $dir a || return 1 | |
199 | # Hopefully it's safe to include literal UTF-8 characters to test | |
200 | # the fix for http://tracker.ceph.com/issues/7387. If it turns out | |
201 | # to not be OK (when is the default encoding *not* UTF-8?), maybe | |
202 | # the character '黄' can be replaced with the escape $'\xe9\xbb\x84' | |
3efd9988 | 203 | ceph osd pool create 黄 16 || return 1 |
7c673cae FG |
204 | ceph osd lspools 2>&1 | \ |
205 | grep "黄" || return 1 | |
206 | ceph -f json-pretty osd dump | \ | |
207 | python -c "import json; import sys; json.load(sys.stdin)" || return 1 | |
208 | ceph osd pool delete 黄 黄 --yes-i-really-really-mean-it | |
209 | } | |
210 | ||
94b18763 FG |
211 | function TEST_pool_create_rep_expected_num_objects() { |
212 | local dir=$1 | |
213 | setup $dir || return 1 | |
214 | ||
94b18763 FG |
215 | export CEPH_ARGS |
216 | run_mon $dir a || return 1 | |
11fdf7f2 | 217 | run_mgr $dir x || return 1 |
eafe8130 TL |
218 | # disable pg dir merge |
219 | run_osd_filestore $dir 0 || return 1 | |
94b18763 FG |
220 | |
221 | ceph osd pool create rep_expected_num_objects 64 64 replicated replicated_rule 100000 || return 1 | |
222 | # wait for pg dir creating | |
11fdf7f2 TL |
223 | sleep 30 |
224 | ceph pg ls | |
225 | find ${dir}/0/current -ls | |
94b18763 FG |
226 | ret=$(find ${dir}/0/current/1.0_head/ | grep DIR | wc -l) |
227 | if [ "$ret" -le 2 ]; | |
228 | then | |
229 | return 1 | |
230 | else | |
231 | echo "TEST_pool_create_rep_expected_num_objects PASS" | |
232 | fi | |
233 | } | |
234 | ||
81eedcae TL |
235 | function check_pool_priority() { |
236 | local dir=$1 | |
237 | shift | |
238 | local pools=$1 | |
239 | shift | |
240 | local spread="$1" | |
241 | shift | |
242 | local results="$1" | |
243 | ||
244 | setup $dir || return 1 | |
245 | ||
246 | EXTRA_OPTS="--debug_allow_any_pool_priority=true" | |
247 | export EXTRA_OPTS | |
248 | run_mon $dir a || return 1 | |
249 | run_mgr $dir x || return 1 | |
250 | run_osd $dir 0 || return 1 | |
251 | run_osd $dir 1 || return 1 | |
252 | run_osd $dir 2 || return 1 | |
253 | ||
254 | # Add pool 0 too | |
255 | for i in $(seq 0 $pools) | |
256 | do | |
257 | num=$(expr $i + 1) | |
258 | ceph osd pool create test${num} 1 1 | |
259 | done | |
260 | ||
261 | wait_for_clean || return 1 | |
262 | for i in $(seq 0 $pools) | |
263 | do | |
264 | num=$(expr $i + 1) | |
265 | ceph osd pool set test${num} recovery_priority $(expr $i \* $spread) | |
266 | done | |
267 | ||
268 | #grep "recovery_priority.*pool set" out/mon.a.log | |
269 | ||
270 | bin/ceph osd dump | |
271 | ||
272 | # Restart everything so mon converts the priorities | |
273 | kill_daemons | |
274 | run_mon $dir a || return 1 | |
275 | run_mgr $dir x || return 1 | |
276 | activate_osd $dir 0 || return 1 | |
277 | activate_osd $dir 1 || return 1 | |
278 | activate_osd $dir 2 || return 1 | |
279 | sleep 5 | |
280 | ||
281 | grep convert $dir/mon.a.log | |
282 | ceph osd dump | |
283 | ||
284 | pos=1 | |
285 | for i in $(ceph osd dump | grep ^pool | sed 's/.*recovery_priority //' | awk '{ print $1 }') | |
286 | do | |
287 | result=$(echo $results | awk "{ print \$${pos} }") | |
288 | # A value of 0 is an unset value so sed/awk gets "pool" | |
289 | if test $result = "0" | |
290 | then | |
291 | result="pool" | |
292 | fi | |
293 | test "$result" = "$i" || return 1 | |
294 | pos=$(expr $pos + 1) | |
295 | done | |
296 | } | |
297 | ||
298 | function TEST_pool_pos_only_prio() { | |
299 | local dir=$1 | |
300 | check_pool_priority $dir 20 5 "0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10" || return 1 | |
301 | } | |
302 | ||
303 | function TEST_pool_neg_only_prio() { | |
304 | local dir=$1 | |
305 | check_pool_priority $dir 20 -5 "0 0 -1 -1 -2 -2 -3 -3 -4 -4 -5 -5 -6 -6 -7 -7 -8 -8 -9 -9 -10" || return 1 | |
306 | } | |
307 | ||
308 | function TEST_pool_both_prio() { | |
309 | local dir=$1 | |
310 | check_pool_priority $dir 20 "5 - 50" "-10 -9 -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10" || return 1 | |
311 | } | |
312 | ||
313 | function TEST_pool_both_prio_no_neg() { | |
314 | local dir=$1 | |
315 | check_pool_priority $dir 20 "2 - 4" "-4 -2 0 0 1 1 2 2 3 3 4 5 5 6 6 7 7 8 8 9 10" || return 1 | |
316 | } | |
317 | ||
318 | function TEST_pool_both_prio_no_pos() { | |
319 | local dir=$1 | |
320 | check_pool_priority $dir 20 "2 - 36" "-10 -9 -8 -8 -7 -7 -6 -6 -5 -5 -4 -3 -3 -2 -2 -1 -1 0 0 2 4" || return 1 | |
321 | } | |
322 | ||
323 | ||
7c673cae FG |
324 | main osd-pool-create "$@" |
325 | ||
326 | # Local Variables: | |
327 | # compile-command: "cd ../.. ; make -j4 && test/mon/osd-pool-create.sh" | |
328 | # End: |