3 # Copyright (C) 2017 Red Hat <contact@redhat.com>
5 # Author: Loic Dachary <loic@dachary.org>
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU Library Public License as published by
9 # the Free Software Foundation; either version 2, or (at your option)
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Library Public License for more details.
18 source $CEPH_ROOT/qa
/standalone
/ceph-helpers.sh
24 export CEPH_MON
="127.0.0.1:7130" # git grep '\<7130\>' : there must be only one
26 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
27 CEPH_ARGS
+="--mon-host=$CEPH_MON "
29 # Disable auto-class, so we can inject device class manually below
31 CEPH_ARGS
+="--osd-class-update-on-start=false "
33 local funcs
=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
34 for func
in $funcs ; do
35 setup
$dir ||
return 1
36 $func $dir ||
return 1
37 teardown
$dir ||
return 1
41 function add_something
() {
43 local obj
=${2:-SOMETHING}
46 echo $payload > $dir/ORIGINAL
47 rados
--pool rbd put
$obj $dir/ORIGINAL ||
return 1
50 function get_osds_up
() {
54 local osds
=$
(ceph
--format xml osd map
$poolname $objectname 2>/dev
/null | \
55 $XMLSTARLET sel
-t -m "//up/osd" -v .
-o ' ')
56 # get rid of the trailing space
60 function TEST_classes
() {
63 run_mon
$dir a ||
return 1
64 run_osd
$dir 0 ||
return 1
65 run_osd
$dir 1 ||
return 1
66 run_osd
$dir 2 ||
return 1
67 create_rbd_pool ||
return 1
69 test "$(get_osds_up rbd SOMETHING)" == "1 2 0" ||
return 1
70 add_something
$dir SOMETHING ||
return 1
73 # osd.0 has class ssd and the rule is modified
74 # to only take ssd devices.
76 ceph osd getcrushmap
> $dir/map ||
return 1
77 crushtool
-d $dir/map
-o $dir/map.txt ||
return 1
79 -e '/device 0 osd.0/s/$/ class ssd/' \
80 -e '/step take default/s/$/ class ssd/' \
81 $dir/map.txt ||
return 1
82 crushtool
-c $dir/map.txt
-o $dir/map-new ||
return 1
83 ceph osd setcrushmap
-i $dir/map-new ||
return 1
86 # There can only be one mapping since there only is
87 # one device with ssd class.
90 for delay
in 2 4 8 16 32 64 128 256 ; do
91 if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0" ; then
96 ceph osd dump
# for debugging purposes
97 ceph pg dump
# for debugging purposes
101 # Writing keeps working because the pool is min_size 1 by
104 add_something
$dir SOMETHING_ELSE ||
return 1
107 # Sanity check that the rule indeed has ssd
108 # generated bucket with a name including ~ssd.
110 ceph osd crush dump |
grep -q '~ssd' ||
return 1
113 function TEST_set_device_class
() {
116 TEST_classes
$dir ||
return 1
118 ceph osd crush set-device-class ssd osd
.0 ||
return 1
119 ceph osd crush class ls-osd ssd |
grep 0 ||
return 1
120 ceph osd crush set-device-class ssd osd
.1 ||
return 1
121 ceph osd crush class ls-osd ssd |
grep 1 ||
return 1
122 ceph osd crush set-device-class ssd
0 1 ||
return 1 # should be idempotent
125 for delay
in 2 4 8 16 32 64 128 256 ; do
126 if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0 1" ; then
132 ceph osd dump
# for debugging purposes
133 ceph pg dump
# for debugging purposes
138 function TEST_mon_classes
() {
141 run_mon
$dir a ||
return 1
142 run_osd
$dir 0 ||
return 1
143 run_osd
$dir 1 ||
return 1
144 run_osd
$dir 2 ||
return 1
145 create_rbd_pool ||
return 1
147 test "$(get_osds_up rbd SOMETHING)" == "1 2 0" ||
return 1
148 add_something
$dir SOMETHING ||
return 1
150 # test rm-device-class
151 ceph osd crush set-device-class aaa osd
.0 ||
return 1
152 ceph osd tree |
grep -q 'aaa' ||
return 1
153 ceph osd crush dump |
grep -q '~aaa' ||
return 1
154 ceph osd crush tree
--show-shadow |
grep -q '~aaa' ||
return 1
155 ceph osd crush set-device-class bbb osd
.1 ||
return 1
156 ceph osd tree |
grep -q 'bbb' ||
return 1
157 ceph osd crush dump |
grep -q '~bbb' ||
return 1
158 ceph osd crush tree
--show-shadow |
grep -q '~bbb' ||
return 1
159 ceph osd crush set-device-class ccc osd
.2 ||
return 1
160 ceph osd tree |
grep -q 'ccc' ||
return 1
161 ceph osd crush dump |
grep -q '~ccc' ||
return 1
162 ceph osd crush tree
--show-shadow |
grep -q '~ccc' ||
return 1
163 ceph osd crush rm-device-class
0 ||
return 1
164 ceph osd tree |
grep -q 'aaa' && return 1
165 ceph osd crush class
ls |
grep -q 'aaa' && return 1 # class 'aaa' should gone
166 ceph osd crush rm-device-class
1 ||
return 1
167 ceph osd tree |
grep -q 'bbb' && return 1
168 ceph osd crush class
ls |
grep -q 'bbb' && return 1 # class 'bbb' should gone
169 ceph osd crush rm-device-class
2 ||
return 1
170 ceph osd tree |
grep -q 'ccc' && return 1
171 ceph osd crush class
ls |
grep -q 'ccc' && return 1 # class 'ccc' should gone
172 ceph osd crush set-device-class asdf all ||
return 1
173 ceph osd tree |
grep -q 'asdf' ||
return 1
174 ceph osd crush dump |
grep -q '~asdf' ||
return 1
175 ceph osd crush tree
--show-shadow |
grep -q '~asdf' ||
return 1
176 ceph osd crush rule create-replicated asdf-rule default
host asdf ||
return 1
177 ceph osd crush rm-device-class all ||
return 1
178 ceph osd tree |
grep -q 'asdf' && return 1
179 ceph osd crush class
ls |
grep -q 'asdf' ||
return 1 # still referenced by asdf-rule
181 ceph osd crush set-device-class abc osd
.2 ||
return 1
182 ceph osd crush move osd
.2 root
=foo rack
=foo-rack
host=foo-host ||
return 1
183 out
=`ceph osd tree |awk '$1 == 2 && $2 == "abc" {print $0}'`
184 if [ "$out" == "" ]; then
188 # verify 'crush move' too
189 ceph osd crush dump |
grep -q 'foo~abc' ||
return 1
190 ceph osd crush tree
--show-shadow |
grep -q 'foo~abc' ||
return 1
191 ceph osd crush dump |
grep -q 'foo-rack~abc' ||
return 1
192 ceph osd crush tree
--show-shadow |
grep -q 'foo-rack~abc' ||
return 1
193 ceph osd crush dump |
grep -q 'foo-host~abc' ||
return 1
194 ceph osd crush tree
--show-shadow |
grep -q 'foo-host~abc' ||
return 1
195 ceph osd crush rm-device-class osd
.2 ||
return 1
196 # restore class, so we can continue to test create-replicated
197 ceph osd crush set-device-class abc osd
.2 ||
return 1
199 ceph osd crush rule create-replicated foo-rule foo
host abc ||
return 1
201 # test set-device-class implicitly change class
202 ceph osd crush set-device-class hdd osd
.0 ||
return 1
203 expect_failure
$dir EBUSY ceph osd crush set-device-class nvme osd
.0 ||
return 1
206 main crush-classes
"$@"
209 # compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-classes.sh"