3 # Copyright (C) 2017 Red Hat <contact@redhat.com>
5 # Author: Loic Dachary <loic@dachary.org>
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU Library Public License as published by
9 # the Free Software Foundation; either version 2, or (at your option)
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Library Public License for more details.
18 source $
(dirname $0)/..
/detect-build-env-vars.sh
19 source $CEPH_ROOT/qa
/workunits
/ceph-helpers.sh
25 export CEPH_MON
="127.0.0.1:7130" # git grep '\<7130\>' : there must be only one
27 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
28 CEPH_ARGS
+="--mon-host=$CEPH_MON "
30 local funcs
=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
31 for func
in $funcs ; do
32 setup
$dir ||
return 1
33 $func $dir ||
return 1
34 teardown
$dir ||
return 1
38 function add_something
() {
40 local obj
=${2:-SOMETHING}
43 echo $payload > $dir/ORIGINAL
44 rados
--pool rbd put
$obj $dir/ORIGINAL ||
return 1
47 function get_osds_up
() {
51 local osds
=$
(ceph
--format xml osd map
$poolname $objectname 2>/dev
/null | \
52 $XMLSTARLET sel
-t -m "//up/osd" -v .
-o ' ')
53 # get rid of the trailing space
57 function TEST_classes
() {
60 run_mon
$dir a ||
return 1
61 run_osd
$dir 0 ||
return 1
62 run_osd
$dir 1 ||
return 1
63 run_osd
$dir 2 ||
return 1
65 test "$(get_osds_up rbd SOMETHING)" == "1 2 0" ||
return 1
66 add_something
$dir SOMETHING ||
return 1
69 # osd.0 has class ssd and the rule is modified
70 # to only take ssd devices.
72 ceph osd getcrushmap
> $dir/map ||
return 1
73 crushtool
-d $dir/map
-o $dir/map.txt ||
return 1
75 -e '/device 0 osd.0/s/$/ class ssd/' \
76 -e '/step take default/s/$/ class ssd/' \
77 $dir/map.txt ||
return 1
78 crushtool
-c $dir/map.txt
-o $dir/map-new ||
return 1
79 ceph osd setcrushmap
-i $dir/map-new ||
return 1
82 # There can only be one mapping since there only is
83 # one device with ssd class.
86 for delay
in 2 4 8 16 32 64 128 256 ; do
87 if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0" ; then
92 ceph osd dump
# for debugging purposes
93 ceph pg dump
# for debugging purposes
97 # Writing keeps working because the pool is min_size 1 by
100 add_something
$dir SOMETHING_ELSE ||
return 1
103 # Sanity check that the rule indeed has ssd
104 # generated bucket with a name including ~ssd.
106 ceph osd crush dump |
grep -q '~ssd' ||
return 1
109 function TEST_set_device_class
() {
112 TEST_classes
$dir ||
return 1
114 ceph osd crush set-device-class osd
.0 ssd ||
return 1
115 ceph osd crush set-device-class osd
.1 ssd ||
return 1
118 for delay
in 2 4 8 16 32 64 128 256 ; do
119 if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0 1" ; then
125 ceph osd dump
# for debugging purposes
126 ceph pg dump
# for debugging purposes
131 function TEST_mon_classes
() {
134 run_mon
$dir a ||
return 1
135 ceph osd crush class create CLASS ||
return 1
136 ceph osd crush class create CLASS ||
return 1 # idempotent
137 ceph osd crush class
ls |
grep CLASS ||
return 1
138 ceph osd crush class
rm CLASS ||
return 1
139 expect_failure
$dir ENOENT ceph osd crush class
rm CLASS ||
return 1
142 main crush-classes
"$@"
145 # compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-classes.sh"