]> git.proxmox.com Git - ceph.git/blame - ceph/qa/standalone/mon/osd-pool-create.sh
import 15.2.0 Octopus source
[ceph.git] / ceph / qa / standalone / mon / osd-pool-create.sh
CommitLineData
11fdf7f2 1#!/usr/bin/env bash
7c673cae
FG
2#
3# Copyright (C) 2013, 2014 Cloudwatt <libre.licensing@cloudwatt.com>
4# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
5#
6# Author: Loic Dachary <loic@dachary.org>
7#
8# This program is free software; you can redistribute it and/or modify
9# it under the terms of the GNU Library Public License as published by
10# the Free Software Foundation; either version 2, or (at your option)
11# any later version.
12#
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU Library Public License for more details.
17#
c07f9fc5 18source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
7c673cae
FG
19
20function run() {
21 local dir=$1
22 shift
23
24 export CEPH_MON="127.0.0.1:7105" # git grep '\<7105\>' : there must be only one
7c673cae
FG
25 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
26 CEPH_ARGS+="--mon-host=$CEPH_MON "
94b18763 27 export CEPH_ARGS
7c673cae
FG
28
29 local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
30 for func in $funcs ; do
31 setup $dir || return 1
32 $func $dir || return 1
33 teardown $dir || return 1
34 done
35}
36
7c673cae
FG
37# Before http://tracker.ceph.com/issues/8307 the invalid profile was created
38function TEST_erasure_invalid_profile() {
39 local dir=$1
40 run_mon $dir a || return 1
41 local poolname=pool_erasure
42 local notaprofile=not-a-valid-erasure-code-profile
43 ! ceph osd pool create $poolname 12 12 erasure $notaprofile || return 1
44 ! ceph osd erasure-code-profile ls | grep $notaprofile || return 1
45}
46
47function TEST_erasure_crush_rule() {
48 local dir=$1
49 run_mon $dir a || return 1
c07f9fc5 50 #
7c673cae
FG
51 # choose the crush ruleset used with an erasure coded pool
52 #
53 local crush_ruleset=myruleset
54 ! ceph osd crush rule ls | grep $crush_ruleset || return 1
55 ceph osd crush rule create-erasure $crush_ruleset
56 ceph osd crush rule ls | grep $crush_ruleset
57 local poolname
58 poolname=pool_erasure1
31f18b77 59 ! ceph --format json osd dump | grep '"crush_rule":1' || return 1
7c673cae 60 ceph osd pool create $poolname 12 12 erasure default $crush_ruleset
31f18b77 61 ceph --format json osd dump | grep '"crush_rule":1' || return 1
7c673cae
FG
62 #
63 # a crush ruleset by the name of the pool is implicitly created
64 #
65 poolname=pool_erasure2
66 ceph osd erasure-code-profile set myprofile
67 ceph osd pool create $poolname 12 12 erasure myprofile
68 ceph osd crush rule ls | grep $poolname || return 1
69 #
70 # a non existent crush ruleset given in argument is an error
71 # http://tracker.ceph.com/issues/9304
72 #
73 poolname=pool_erasure3
74 ! ceph osd pool create $poolname 12 12 erasure myprofile INVALIDRULESET || return 1
75}
76
77function TEST_erasure_code_profile_default() {
78 local dir=$1
79 run_mon $dir a || return 1
80 ceph osd erasure-code-profile rm default || return 1
81 ! ceph osd erasure-code-profile ls | grep default || return 1
82 ceph osd pool create $poolname 12 12 erasure default
83 ceph osd erasure-code-profile ls | grep default || return 1
84}
85
86function TEST_erasure_crush_stripe_unit() {
87 local dir=$1
88 # the default stripe unit is used to initialize the pool
89 run_mon $dir a --public-addr $CEPH_MON
90 stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
91 eval local $(ceph osd erasure-code-profile get myprofile | grep k=)
92 stripe_width = $((stripe_unit * k))
93 ceph osd pool create pool_erasure 12 12 erasure
94 ceph --format json osd dump | tee $dir/osd.json
95 grep '"stripe_width":'$stripe_width $dir/osd.json > /dev/null || return 1
96}
97
98function TEST_erasure_crush_stripe_unit_padded() {
99 local dir=$1
100 # setting osd_pool_erasure_code_stripe_unit modifies the stripe_width
101 # and it is padded as required by the default plugin
102 profile+=" plugin=jerasure"
103 profile+=" technique=reed_sol_van"
104 k=4
105 profile+=" k=$k"
106 profile+=" m=2"
107 actual_stripe_unit=2048
108 desired_stripe_unit=$((actual_stripe_unit - 1))
109 actual_stripe_width=$((actual_stripe_unit * k))
110 run_mon $dir a \
111 --osd_pool_erasure_code_stripe_unit $desired_stripe_unit \
112 --osd_pool_default_erasure_code_profile "$profile" || return 1
113 ceph osd pool create pool_erasure 12 12 erasure
114 ceph osd dump | tee $dir/osd.json
115 grep "stripe_width $actual_stripe_width" $dir/osd.json > /dev/null || return 1
116}
117
118function TEST_erasure_code_pool() {
119 local dir=$1
120 run_mon $dir a || return 1
121 ceph --format json osd dump > $dir/osd.json
122 local expected='"erasure_code_profile":"default"'
123 ! grep "$expected" $dir/osd.json || return 1
124 ceph osd pool create erasurecodes 12 12 erasure
125 ceph --format json osd dump | tee $dir/osd.json
126 grep "$expected" $dir/osd.json > /dev/null || return 1
127
128 ceph osd pool create erasurecodes 12 12 erasure 2>&1 | \
129 grep 'already exists' || return 1
130 ceph osd pool create erasurecodes 12 12 2>&1 | \
131 grep 'cannot change to type replicated' || return 1
132}
133
134function TEST_replicated_pool_with_ruleset() {
135 local dir=$1
136 run_mon $dir a
137 local ruleset=ruleset0
138 local root=host1
139 ceph osd crush add-bucket $root host
140 local failure_domain=osd
141 local poolname=mypool
142 ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1
143 ceph osd crush rule ls | grep $ruleset
c07f9fc5 144 ceph osd pool create $poolname 12 12 replicated $ruleset || return 1
7c673cae 145 rule_id=`ceph osd crush rule dump $ruleset | grep "rule_id" | awk -F[' ':,] '{print $4}'`
31f18b77
FG
146 ceph osd pool get $poolname crush_rule 2>&1 | \
147 grep "crush_rule: $rule_id" || return 1
7c673cae
FG
148 #non-existent crush ruleset
149 ceph osd pool create newpool 12 12 replicated non-existent 2>&1 | \
150 grep "doesn't exist" || return 1
151}
152
7c673cae
FG
153function TEST_erasure_code_pool_lrc() {
154 local dir=$1
155 run_mon $dir a || return 1
156
157 ceph osd erasure-code-profile set LRCprofile \
158 plugin=lrc \
159 mapping=DD_ \
160 layers='[ [ "DDc", "" ] ]' || return 1
161
162 ceph --format json osd dump > $dir/osd.json
163 local expected='"erasure_code_profile":"LRCprofile"'
164 local poolname=erasurecodes
165 ! grep "$expected" $dir/osd.json || return 1
166 ceph osd pool create $poolname 12 12 erasure LRCprofile
167 ceph --format json osd dump | tee $dir/osd.json
168 grep "$expected" $dir/osd.json > /dev/null || return 1
169 ceph osd crush rule ls | grep $poolname || return 1
170}
171
172function TEST_replicated_pool() {
173 local dir=$1
174 run_mon $dir a || return 1
c07f9fc5 175 ceph osd pool create replicated 12 12 replicated replicated_rule || return 1
31f18b77 176 ceph osd pool create replicated 12 12 replicated replicated_rule 2>&1 | \
7c673cae
FG
177 grep 'already exists' || return 1
178 # default is replicated
c07f9fc5 179 ceph osd pool create replicated1 12 12 || return 1
7c673cae 180 # default is replicated, pgp_num = pg_num
c07f9fc5 181 ceph osd pool create replicated2 12 || return 1
7c673cae
FG
182 ceph osd pool create replicated 12 12 erasure 2>&1 | \
183 grep 'cannot change to type erasure' || return 1
184}
185
186function TEST_no_pool_delete() {
187 local dir=$1
188 run_mon $dir a || return 1
189 ceph osd pool create foo 1 || return 1
190 ceph tell mon.a injectargs -- --no-mon-allow-pool-delete || return 1
191 ! ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
192 ceph tell mon.a injectargs -- --mon-allow-pool-delete || return 1
193 ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
194}
195
196function TEST_utf8_cli() {
197 local dir=$1
198 run_mon $dir a || return 1
199 # Hopefully it's safe to include literal UTF-8 characters to test
200 # the fix for http://tracker.ceph.com/issues/7387. If it turns out
201 # to not be OK (when is the default encoding *not* UTF-8?), maybe
202 # the character '黄' can be replaced with the escape $'\xe9\xbb\x84'
9f95a23c
TL
203 OLDLANG="$LANG"
204 export LANG=en_US.UTF-8
3efd9988 205 ceph osd pool create 黄 16 || return 1
7c673cae
FG
206 ceph osd lspools 2>&1 | \
207 grep "黄" || return 1
208 ceph -f json-pretty osd dump | \
9f95a23c 209 python3 -c "import json; import sys; json.load(sys.stdin)" || return 1
7c673cae 210 ceph osd pool delete 黄 黄 --yes-i-really-really-mean-it
9f95a23c 211 export LANG="$OLDLANG"
7c673cae
FG
212}
213
94b18763
FG
214function TEST_pool_create_rep_expected_num_objects() {
215 local dir=$1
216 setup $dir || return 1
217
94b18763
FG
218 export CEPH_ARGS
219 run_mon $dir a || return 1
11fdf7f2 220 run_mgr $dir x || return 1
eafe8130
TL
221 # disable pg dir merge
222 run_osd_filestore $dir 0 || return 1
94b18763
FG
223
224 ceph osd pool create rep_expected_num_objects 64 64 replicated replicated_rule 100000 || return 1
225 # wait for pg dir creating
11fdf7f2
TL
226 sleep 30
227 ceph pg ls
228 find ${dir}/0/current -ls
94b18763
FG
229 ret=$(find ${dir}/0/current/1.0_head/ | grep DIR | wc -l)
230 if [ "$ret" -le 2 ];
231 then
232 return 1
233 else
234 echo "TEST_pool_create_rep_expected_num_objects PASS"
235 fi
236}
237
81eedcae
TL
238function check_pool_priority() {
239 local dir=$1
240 shift
241 local pools=$1
242 shift
243 local spread="$1"
244 shift
245 local results="$1"
246
247 setup $dir || return 1
248
249 EXTRA_OPTS="--debug_allow_any_pool_priority=true"
250 export EXTRA_OPTS
251 run_mon $dir a || return 1
252 run_mgr $dir x || return 1
253 run_osd $dir 0 || return 1
254 run_osd $dir 1 || return 1
255 run_osd $dir 2 || return 1
256
257 # Add pool 0 too
258 for i in $(seq 0 $pools)
259 do
260 num=$(expr $i + 1)
261 ceph osd pool create test${num} 1 1
262 done
263
264 wait_for_clean || return 1
265 for i in $(seq 0 $pools)
266 do
267 num=$(expr $i + 1)
268 ceph osd pool set test${num} recovery_priority $(expr $i \* $spread)
269 done
270
271 #grep "recovery_priority.*pool set" out/mon.a.log
272
273 bin/ceph osd dump
274
275 # Restart everything so mon converts the priorities
276 kill_daemons
277 run_mon $dir a || return 1
278 run_mgr $dir x || return 1
279 activate_osd $dir 0 || return 1
280 activate_osd $dir 1 || return 1
281 activate_osd $dir 2 || return 1
282 sleep 5
283
284 grep convert $dir/mon.a.log
285 ceph osd dump
286
287 pos=1
288 for i in $(ceph osd dump | grep ^pool | sed 's/.*recovery_priority //' | awk '{ print $1 }')
289 do
290 result=$(echo $results | awk "{ print \$${pos} }")
291 # A value of 0 is an unset value so sed/awk gets "pool"
292 if test $result = "0"
293 then
294 result="pool"
295 fi
296 test "$result" = "$i" || return 1
297 pos=$(expr $pos + 1)
298 done
299}
300
301function TEST_pool_pos_only_prio() {
302 local dir=$1
303 check_pool_priority $dir 20 5 "0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10" || return 1
304}
305
306function TEST_pool_neg_only_prio() {
307 local dir=$1
308 check_pool_priority $dir 20 -5 "0 0 -1 -1 -2 -2 -3 -3 -4 -4 -5 -5 -6 -6 -7 -7 -8 -8 -9 -9 -10" || return 1
309}
310
311function TEST_pool_both_prio() {
312 local dir=$1
313 check_pool_priority $dir 20 "5 - 50" "-10 -9 -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10" || return 1
314}
315
316function TEST_pool_both_prio_no_neg() {
317 local dir=$1
318 check_pool_priority $dir 20 "2 - 4" "-4 -2 0 0 1 1 2 2 3 3 4 5 5 6 6 7 7 8 8 9 10" || return 1
319}
320
321function TEST_pool_both_prio_no_pos() {
322 local dir=$1
323 check_pool_priority $dir 20 "2 - 36" "-10 -9 -8 -8 -7 -7 -6 -6 -5 -5 -4 -3 -3 -2 -2 -1 -1 0 0 2 4" || return 1
324}
325
326
7c673cae
FG
327main osd-pool-create "$@"
328
329# Local Variables:
330# compile-command: "cd ../.. ; make -j4 && test/mon/osd-pool-create.sh"
331# End: