3 NUM_OSDS
=$
(ceph osd
ls |
wc -l)
4 if [ $NUM_OSDS -lt 6 ]; then
5 echo "test requires at least 6 OSDs"
9 NUM_POOLS
=$
(ceph osd pool
ls |
wc -l)
10 if [ $NUM_POOLS -gt 0 ]; then
11 echo "test requires no preexisting pools"
20 if bash
-c "$cmd" ; then
24 if [ $sec -eq 0 ]; then
33 function power2
() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" |
bc -l;}
36 ceph config
set mgr mgr
/pg_autoscaler
/sleep_interval
5
37 ceph mgr module
enable pg_autoscaler
40 ceph osd pool create a
16 --pg-num-min 4
41 ceph osd pool create b
16 --pg-num-min 2
42 ceph osd pool
set a pg_autoscale_mode on
43 ceph osd pool
set b pg_autoscale_mode on
45 # get num pools again since we created more pools
46 NUM_POOLS
=$
(ceph osd pool
ls |
wc -l)
48 # get profiles of pool a and b
49 PROFILE1
=$
(ceph osd pool autoscale-status |
grep 'a' |
grep -o -m 1 'scale-up\|scale-down' || true
)
50 PROFILE2
=$
(ceph osd pool autoscale-status |
grep 'b' |
grep -o -m 1 'scale-up\|scale-down' || true
)
52 # evaluate the default profile a
53 if [[ $PROFILE1 = "scale-up" ]]
55 echo "Success: pool a PROFILE is scale-up"
57 echo "Error: a PROFILE is scale-down"
61 # evaluate the default profile of pool b
62 if [[ $PROFILE2 = "scale-up" ]]
64 echo "Success: pool b PROFILE is scale-up"
66 echo "Error: b PROFILE is scale-down"
70 # This part of this code will now evaluate the accuracy of
73 # change to scale-down profile
74 ceph osd pool
set autoscale-profile scale-down
76 # get profiles of pool a and b
77 PROFILE1
=$
(ceph osd pool autoscale-status |
grep 'a' |
grep -o -m 1 'scale-up\|scale-down' || true
)
78 PROFILE2
=$
(ceph osd pool autoscale-status |
grep 'b' |
grep -o -m 1 'scale-up\|scale-down' || true
)
80 # evaluate that profile a is now scale-down
81 if [[ $PROFILE1 = "scale-down" ]]
83 echo "Success: pool a PROFILE is scale-down"
85 echo "Error: a PROFILE is scale-up"
89 # evaluate the profile of b is now scale-down
90 if [[ $PROFILE2 = "scale-down" ]]
92 echo "Success: pool b PROFILE is scale-down"
94 echo "Error: b PROFILE is scale-up"
99 POOL_SIZE_A
=$
(ceph osd pool get a size|
grep -Eo '[0-9]{1,4}')
100 POOL_SIZE_B
=$
(ceph osd pool get b size|
grep -Eo '[0-9]{1,4}')
102 # calculate target pg of each pools
103 TARGET_PG_A
=$
(power2 $
((($NUM_OSDS * 100)/($NUM_POOLS)/($POOL_SIZE_A))))
104 TARGET_PG_B
=$
(power2 $
((($NUM_OSDS * 100)/($NUM_POOLS)/($POOL_SIZE_B))))
106 # evaluate target_pg against pg num of each pools
107 wait_for
120 "ceph osd pool get a pg_num | grep $TARGET_PG_A"
108 wait_for
120 "ceph osd pool get b pg_num | grep $TARGET_PG_B"
111 ceph osd pool
set a target_size_ratio
5
112 ceph osd pool
set b target_size_ratio
1
114 APGS
=$
(ceph osd dump
-f json-pretty | jq
'.pools[0].pg_num_target')
115 BPGS
=$
(ceph osd dump
-f json-pretty | jq
'.pools[1].pg_num_target')
119 # small ratio change does not change pg_num
120 ceph osd pool
set a target_size_ratio
7
121 ceph osd pool
set b target_size_ratio
2
123 APGS2
=$
(ceph osd dump
-f json-pretty | jq
'.pools[0].pg_num_target')
124 BPGS2
=$
(ceph osd dump
-f json-pretty | jq
'.pools[1].pg_num_target')
125 test $APGS -eq $APGS2
126 test $BPGS -eq $BPGS2
129 ceph osd pool
set a target_size_bytes
1000000000000000
130 ceph osd pool
set b target_size_bytes
1000000000000000
131 ceph osd pool
set a target_size_ratio
0
132 ceph osd pool
set b target_size_ratio
0
133 wait_for
60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
135 ceph osd pool
set a target_size_bytes
1000
136 ceph osd pool
set b target_size_bytes
1000
137 ceph osd pool
set a target_size_ratio
1
138 wait_for
60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO"
140 ceph osd pool
rm a a
--yes-i-really-really-mean-it
141 ceph osd pool
rm b b
--yes-i-really-really-mean-it