]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/crush/crush-classes.sh
ee9c9092e6e5528a60e87d908c6691f09c352c62
[ceph.git] / ceph / qa / standalone / crush / crush-classes.sh
1 #!/bin/bash
2 #
3 # Copyright (C) 2017 Red Hat <contact@redhat.com>
4 #
5 # Author: Loic Dachary <loic@dachary.org>
6 #
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU Library Public License as published by
9 # the Free Software Foundation; either version 2, or (at your option)
10 # any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Library Public License for more details.
16 #
17
18 source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
19
20 function run() {
21 local dir=$1
22 shift
23
24 export CEPH_MON="127.0.0.1:7130" # git grep '\<7130\>' : there must be only one
25 export CEPH_ARGS
26 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
27 CEPH_ARGS+="--mon-host=$CEPH_MON "
28 #
29 # Disable auto-class, so we can inject device class manually below
30 #
31 CEPH_ARGS+="--osd-class-update-on-start=false "
32
33 local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
34 for func in $funcs ; do
35 setup $dir || return 1
36 $func $dir || return 1
37 teardown $dir || return 1
38 done
39 }
40
41 function add_something() {
42 local dir=$1
43 local obj=${2:-SOMETHING}
44
45 local payload=ABCDEF
46 echo $payload > $dir/ORIGINAL
47 rados --pool rbd put $obj $dir/ORIGINAL || return 1
48 }
49
50 function get_osds_up() {
51 local poolname=$1
52 local objectname=$2
53
54 local osds=$(ceph --format xml osd map $poolname $objectname 2>/dev/null | \
55 $XMLSTARLET sel -t -m "//up/osd" -v . -o ' ')
56 # get rid of the trailing space
57 echo $osds
58 }
59
60 function TEST_classes() {
61 local dir=$1
62
63 run_mon $dir a || return 1
64 run_osd $dir 0 || return 1
65 run_osd $dir 1 || return 1
66 run_osd $dir 2 || return 1
67 create_rbd_pool || return 1
68
69 test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1
70 add_something $dir SOMETHING || return 1
71
72 #
73 # osd.0 has class ssd and the rule is modified
74 # to only take ssd devices.
75 #
76 ceph osd getcrushmap > $dir/map || return 1
77 crushtool -d $dir/map -o $dir/map.txt || return 1
78 ${SED} -i \
79 -e '/device 0 osd.0/s/$/ class ssd/' \
80 -e '/step take default/s/$/ class ssd/' \
81 $dir/map.txt || return 1
82 crushtool -c $dir/map.txt -o $dir/map-new || return 1
83 ceph osd setcrushmap -i $dir/map-new || return 1
84
85 #
86 # There can only be one mapping since there only is
87 # one device with ssd class.
88 #
89 ok=false
90 for delay in 2 4 8 16 32 64 128 256 ; do
91 if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0" ; then
92 ok=true
93 break
94 fi
95 sleep $delay
96 ceph osd dump # for debugging purposes
97 ceph pg dump # for debugging purposes
98 done
99 $ok || return 1
100 #
101 # Writing keeps working because the pool is min_size 1 by
102 # default.
103 #
104 add_something $dir SOMETHING_ELSE || return 1
105
106 #
107 # Sanity check that the rule indeed has ssd
108 # generated bucket with a name including ~ssd.
109 #
110 ceph osd crush dump | grep -q '~ssd' || return 1
111 }
112
113 function TEST_set_device_class() {
114 local dir=$1
115
116 TEST_classes $dir || return 1
117
118 ceph osd crush set-device-class ssd osd.0 || return 1
119 ceph osd crush class ls-osd ssd | grep 0 || return 1
120 ceph osd crush set-device-class ssd osd.1 || return 1
121 ceph osd crush class ls-osd ssd | grep 1 || return 1
122 ceph osd crush set-device-class ssd 0 1 || return 1 # should be idempotent
123
124 ok=false
125 for delay in 2 4 8 16 32 64 128 256 ; do
126 if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0 1" ; then
127 ok=true
128 break
129 fi
130 sleep $delay
131 ceph osd crush dump
132 ceph osd dump # for debugging purposes
133 ceph pg dump # for debugging purposes
134 done
135 $ok || return 1
136 }
137
138 function TEST_mon_classes() {
139 local dir=$1
140
141 run_mon $dir a || return 1
142 run_osd $dir 0 || return 1
143 run_osd $dir 1 || return 1
144 run_osd $dir 2 || return 1
145 create_rbd_pool || return 1
146
147 test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1
148 add_something $dir SOMETHING || return 1
149
150 # test rm-device-class
151 ceph osd crush set-device-class aaa osd.0 || return 1
152 ceph osd tree | grep -q 'aaa' || return 1
153 ceph osd crush dump | grep -q '~aaa' || return 1
154 ceph osd crush tree --show-shadow | grep -q '~aaa' || return 1
155 ceph osd crush set-device-class bbb osd.1 || return 1
156 ceph osd tree | grep -q 'bbb' || return 1
157 ceph osd crush dump | grep -q '~bbb' || return 1
158 ceph osd crush tree --show-shadow | grep -q '~bbb' || return 1
159 ceph osd crush set-device-class ccc osd.2 || return 1
160 ceph osd tree | grep -q 'ccc' || return 1
161 ceph osd crush dump | grep -q '~ccc' || return 1
162 ceph osd crush tree --show-shadow | grep -q '~ccc' || return 1
163 ceph osd crush rm-device-class 0 || return 1
164 ceph osd tree | grep -q 'aaa' && return 1
165 ceph osd crush dump | grep -q '~aaa' && return 1
166 ceph osd crush tree --show-shadow | grep -q '~aaa' && return 1
167 ceph osd crush class ls | grep -q 'aaa' && return 1
168 ceph osd crush rm-device-class 1 || return 1
169 ceph osd tree | grep -q 'bbb' && return 1
170 ceph osd crush dump | grep -q '~bbb' && return 1
171 ceph osd crush tree --show-shadow | grep -q '~bbb' && return 1
172 ceph osd crush class ls | grep -q 'bbb' && return 1
173 ceph osd crush rm-device-class 2 || return 1
174 ceph osd tree | grep -q 'ccc' && return 1
175 ceph osd crush dump | grep -q '~ccc' && return 1
176 ceph osd crush tree --show-shadow | grep -q '~ccc' && return 1
177 ceph osd crush class ls | grep -q 'ccc' && return 1
178 ceph osd crush set-device-class asdf all || return 1
179 ceph osd tree | grep -q 'asdf' || return 1
180 ceph osd crush dump | grep -q '~asdf' || return 1
181 ceph osd crush tree --show-shadow | grep -q '~asdf' || return 1
182 ceph osd crush rm-device-class all || return 1
183 ceph osd tree | grep -q 'asdf' && return 1
184 ceph osd crush dump | grep -q '~asdf' && return 1
185 ceph osd crush tree --show-shadow | grep -q '~asdf' && return 1
186
187 # test 'class rm' automatically recycles shadow trees
188 ceph osd crush set-device-class asdf 0 1 2 || return 1
189 ceph osd tree | grep -q 'asdf' || return 1
190 ceph osd crush dump | grep -q '~asdf' || return 1
191 ceph osd crush tree --show-shadow | grep -q '~asdf' || return 1
192 ceph osd crush class ls | grep -q 'asdf' || return 1
193 ceph osd crush class rm asdf || return 1
194 ceph osd tree | grep -q 'asdf' && return 1
195 ceph osd crush dump | grep -q '~asdf' && return 1
196 ceph osd crush tree --show-shadow | grep -q '~asdf' && return 1
197 ceph osd crush class ls | grep -q 'asdf' && return 1
198
199 ceph osd crush set-device-class abc osd.2 || return 1
200 ceph osd crush move osd.2 root=foo rack=foo-rack host=foo-host || return 1
201 out=`ceph osd tree |awk '$1 == 2 && $2 == "abc" {print $0}'`
202 if [ "$out" == "" ]; then
203 return 1
204 fi
205
206 # verify 'crush move' too
207 ceph osd crush dump | grep -q 'foo~abc' || return 1
208 ceph osd crush tree --show-shadow | grep -q 'foo~abc' || return 1
209 ceph osd crush dump | grep -q 'foo-rack~abc' || return 1
210 ceph osd crush tree --show-shadow | grep -q 'foo-rack~abc' || return 1
211 ceph osd crush dump | grep -q 'foo-host~abc' || return 1
212 ceph osd crush tree --show-shadow | grep -q 'foo-host~abc' || return 1
213 ceph osd crush rm-device-class osd.2 || return 1
214 ceph osd crush dump | grep -q 'foo~abc' && return 1
215 ceph osd crush tree --show-shadow | grep -q 'foo~abc' && return 1
216 ceph osd crush dump | grep -q 'foo-rack~abc' && return 1
217 ceph osd crush tree --show-shadow | grep -q 'foo-rack~abc' && return 1
218 ceph osd crush dump | grep -q 'foo-host~abc' && return 1
219 ceph osd crush tree --show-shadow | grep -q 'foo-host~abc' && return 1
220 # restore class, so we can continue to test create-replicated
221 ceph osd crush set-device-class abc osd.2 || return 1
222
223 ceph osd crush rule create-replicated foo-rule foo host abc || return 1
224
225 # test class_is_in_use
226 ceph osd crush set-device-class hdd osd.0 || return 1
227 ceph osd crush set-device-class ssd osd.1 || return 1
228 ceph osd crush rule create-replicated foo-hdd1 default host hdd || return 1
229 ceph osd crush rule create-replicated foo-hdd2 default host hdd || return 1
230 ceph osd crush rule create-replicated foo-ssd default host ssd || return 1
231 expect_failure $dir EBUSY ceph osd crush class rm hdd || return 1
232 expect_failure $dir EBUSY ceph osd crush class rm ssd || return 1
233 ceph osd crush rule rm foo-hdd1 || return 1
234 expect_failure $dir EBUSY ceph osd crush class rm hdd || return 1 # still referenced by foo-hdd2
235 ceph osd crush rule rm foo-hdd2 || return 1
236 ceph osd crush rule rm foo-ssd || return 1
237 ceph osd crush class rm hdd || return 1
238 ceph osd crush class rm ssd || return 1
239 expect_failure $dir EBUSY ceph osd crush class rm abc || return 1 # still referenced by foo-rule
240 ceph osd crush rule rm foo-rule || return 1
241 ceph osd crush class rm abc || return 1
242
243 # test set-device-class implicitly change class
244 ceph osd crush set-device-class hdd osd.0 || return 1
245 expect_failure $dir EBUSY ceph osd crush set-device-class nvme osd.0 || return 1
246 }
247
248 main crush-classes "$@"
249
250 # Local Variables:
251 # compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-classes.sh"
252 # End: