3 NUM_OSDS
=$
(ceph osd
ls |
wc -l)
4 if [ $NUM_OSDS -lt 6 ]; then
5 echo "test requires at least 6 OSDs"
9 NUM_POOLS
=$
(ceph osd pool
ls |
wc -l)
10 if [ $NUM_POOLS -gt 0 ]; then
11 echo "test requires no preexisting pools"
20 if bash
-c "$cmd" ; then
24 if [ $sec -eq 0 ]; then
34 ceph config
set mgr mgr
/pg_autoscaler
/sleep_interval
5
35 ceph mgr module
enable pg_autoscaler
38 ceph osd pool create a
16 --pg-num-min 4
39 ceph osd pool create b
16 --pg-num-min 2
40 ceph osd pool
set a pg_autoscale_mode on
41 ceph osd pool
set b pg_autoscale_mode on
43 wait_for
120 "ceph osd pool get a pg_num | grep 4"
44 wait_for
120 "ceph osd pool get b pg_num | grep 2"
47 ceph osd pool
set a target_size_ratio
.5
48 ceph osd pool
set b target_size_ratio
.1
50 APGS
=$
(ceph osd dump
-f json-pretty | jq
'.pools[0].pg_num')
51 BPGS
=$
(ceph osd dump
-f json-pretty | jq
'.pools[1].pg_num')
55 # small ratio change does not change pg_num
56 ceph osd pool
set a target_size_ratio
.7
57 ceph osd pool
set b target_size_ratio
.2
59 ceph osd pool get a pg_num |
grep $APGS
60 ceph osd pool get b pg_num |
grep $BPGS
63 ceph osd pool
set a target_size_ratio
.9
64 ceph osd pool
set b target_size_ratio
.9
65 wait_for
60 "ceph health detail | grep POOL_TARGET_SIZE_RATIO_OVERCOMMITTED"
66 wait_for
60 "ceph health detail | grep 1.8"
67 ceph osd pool
set a target_size_ratio
0
68 ceph osd pool
set b target_size_ratio
0
71 ceph osd pool
set a target_size_bytes
1000000000000000
72 ceph osd pool
set b target_size_bytes
1000000000000000
73 wait_for
60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
74 ceph osd pool
set a target_size_bytes
0
75 ceph osd pool
set b target_size_ratio
0
77 ceph osd pool
rm a a
--yes-i-really-really-mean-it
78 ceph osd pool
rm b b
--yes-i-really-really-mean-it