3 NUM_OSDS
=$
(ceph osd
ls |
wc -l)
4 if [ $NUM_OSDS -lt 6 ]; then
5 echo "test requires at least 6 OSDs"
9 NUM_POOLS
=$
(ceph osd pool
ls |
wc -l)
10 if [ $NUM_POOLS -gt 0 ]; then
11 echo "test requires no preexisting pools"
20 if bash
-c "$cmd" ; then
24 if [ $sec -eq 0 ]; then
33 function power2
() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" |
bc -l;}
35 function eval_actual_expected_val
() {
37 local expected_value
=$2
38 if [[ $actual_value = $expected_value ]]
40 echo "Success: " $actual_value "=" $expected_value
42 echo "Error: " $actual_value "!=" $expected_value
48 ceph config
set mgr mgr
/pg_autoscaler
/sleep_interval
60
49 ceph mgr module
enable pg_autoscaler
50 # ceph config set global osd_pool_default_pg_autoscale_mode on
53 ceph osd pool create meta0
16
54 ceph osd pool create bulk0
16 --bulk
55 ceph osd pool create bulk1
16 --bulk
56 ceph osd pool create bulk2
16 --bulk
57 ceph osd pool
set meta0 pg_autoscale_mode on
58 ceph osd pool
set bulk0 pg_autoscale_mode on
59 ceph osd pool
set bulk1 pg_autoscale_mode on
60 ceph osd pool
set bulk2 pg_autoscale_mode on
62 ceph osd pool
set meta0 size
2
63 ceph osd pool
set bulk0 size
2
64 ceph osd pool
set bulk1 size
2
65 ceph osd pool
set bulk2 size
2
67 # get num pools again since we created more pools
68 NUM_POOLS
=$
(ceph osd pool
ls |
wc -l)
70 # get bulk flag of each pool through the command ceph osd pool autoscale-status
71 BULK_FLAG_1
=$
(ceph osd pool autoscale-status |
grep 'meta0' |
grep -o -m 1 'True\|False' || true
)
72 BULK_FLAG_2
=$
(ceph osd pool autoscale-status |
grep 'bulk0' |
grep -o -m 1 'True\|False' || true
)
73 BULK_FLAG_3
=$
(ceph osd pool autoscale-status |
grep 'bulk1' |
grep -o -m 1 'True\|False' || true
)
74 BULK_FLAG_4
=$
(ceph osd pool autoscale-status |
grep 'bulk2' |
grep -o -m 1 'True\|False' || true
)
76 # evaluate the accuracy of ceph osd pool autoscale-status specifically the `BULK` column
78 eval_actual_expected_val
$BULK_FLAG_1 'False'
79 eval_actual_expected_val
$BULK_FLAG_2 'True'
80 eval_actual_expected_val
$BULK_FLAG_3 'True'
81 eval_actual_expected_val
$BULK_FLAG_4 'True'
83 # This part of this code will now evaluate the accuracy of the autoscaler
86 POOL_SIZE_1
=$
(ceph osd pool get meta0 size|
grep -Eo '[0-9]{1,4}')
87 POOL_SIZE_2
=$
(ceph osd pool get bulk0 size|
grep -Eo '[0-9]{1,4}')
88 POOL_SIZE_3
=$
(ceph osd pool get bulk1 size|
grep -Eo '[0-9]{1,4}')
89 POOL_SIZE_4
=$
(ceph osd pool get bulk2 size|
grep -Eo '[0-9]{1,4}')
91 # Calculate target pg of each pools
92 # First Pool is a non-bulk so we do it first.
93 # Since the Capacity ratio = 0 we first meta pool remains the same pg_num
95 TARGET_PG_1
=$
(ceph osd pool get meta0 pg_num|
grep -Eo '[0-9]{1,4}')
97 NUM_POOLS_LEFT
=$NUM_POOLS-1
98 # Rest of the pool is bulk and even pools so pretty straight forward
100 TARGET_PG_2
=$
(power2 $
((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_2))))
101 TARGET_PG_3
=$
(power2 $
((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_3))))
102 TARGET_PG_4
=$
(power2 $
((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_4))))
104 # evaluate target_pg against pg num of each pools
105 wait_for
300 "ceph osd pool get meta0 pg_num | grep $TARGET_PG_1"
106 wait_for
300 "ceph osd pool get bulk0 pg_num | grep $TARGET_PG_2"
107 wait_for
300 "ceph osd pool get bulk1 pg_num | grep $TARGET_PG_3"
108 wait_for
300 "ceph osd pool get bulk2 pg_num | grep $TARGET_PG_4"
111 ceph osd pool
set meta0 target_size_ratio
5
112 ceph osd pool
set bulk0 target_size_ratio
1
114 APGS
=$
(ceph osd dump
-f json-pretty | jq
'.pools[0].pg_num_target')
115 BPGS
=$
(ceph osd dump
-f json-pretty | jq
'.pools[1].pg_num_target')
119 # small ratio change does not change pg_num
120 ceph osd pool
set meta0 target_size_ratio
7
121 ceph osd pool
set bulk0 target_size_ratio
2
123 APGS2
=$
(ceph osd dump
-f json-pretty | jq
'.pools[0].pg_num_target')
124 BPGS2
=$
(ceph osd dump
-f json-pretty | jq
'.pools[1].pg_num_target')
125 test $APGS -eq $APGS2
126 test $BPGS -eq $BPGS2
129 ceph osd pool
set meta0 target_size_bytes
1000000000000000
130 ceph osd pool
set bulk0 target_size_bytes
1000000000000000
131 ceph osd pool
set meta0 target_size_ratio
0
132 ceph osd pool
set bulk0 target_size_ratio
0
133 wait_for
60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
135 ceph osd pool
set meta0 target_size_bytes
1000
136 ceph osd pool
set bulk0 target_size_bytes
1000
137 ceph osd pool
set meta0 target_size_ratio
1
138 wait_for
60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO"
140 ceph osd pool
rm meta0 meta0
--yes-i-really-really-mean-it
141 ceph osd pool
rm bulk0 bulk0
--yes-i-really-really-mean-it
142 ceph osd pool
rm bulk1 bulk1
--yes-i-really-really-mean-it
143 ceph osd pool
rm bulk2 bulk2
--yes-i-really-really-mean-it