]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/mon/pg_autoscaler.sh
import ceph 16.2.7
[ceph.git] / ceph / qa / workunits / mon / pg_autoscaler.sh
1 #!/bin/bash -ex
2
3 NUM_OSDS=$(ceph osd ls | wc -l)
4 if [ $NUM_OSDS -lt 6 ]; then
5 echo "test requires at least 6 OSDs"
6 exit 1
7 fi
8
9 NUM_POOLS=$(ceph osd pool ls | wc -l)
10 if [ $NUM_POOLS -gt 0 ]; then
11 echo "test requires no preexisting pools"
12 exit 1
13 fi
14
15 function wait_for() {
16 local sec=$1
17 local cmd=$2
18
19 while true ; do
20 if bash -c "$cmd" ; then
21 break
22 fi
23 sec=$(( $sec - 1 ))
24 if [ $sec -eq 0 ]; then
25 echo failed
26 return 1
27 fi
28 sleep 1
29 done
30 return 0
31 }
32
33 function power2() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" | bc -l;}
34
35 # enable
36 ceph config set mgr mgr/pg_autoscaler/sleep_interval 5
37 ceph mgr module enable pg_autoscaler
38
39 # pg_num_min
40 ceph osd pool create a 16 --pg-num-min 4
41 ceph osd pool create b 16 --pg-num-min 2
42 ceph osd pool set a pg_autoscale_mode on
43 ceph osd pool set b pg_autoscale_mode on
44
45 # get num pools again since we created more pools
46 NUM_POOLS=$(ceph osd pool ls | wc -l)
47
48 # get profiles of pool a and b
49 PROFILE1=$(ceph osd pool autoscale-status | grep 'a' | grep -o -m 1 'scale-up\|scale-down' || true)
50 PROFILE2=$(ceph osd pool autoscale-status | grep 'b' | grep -o -m 1 'scale-up\|scale-down' || true)
51
52 # evaluate the default profile a
53 if [[ $PROFILE1 = "scale-up" ]]
54 then
55 echo "Success: pool a PROFILE is scale-up"
56 else
57 echo "Error: a PROFILE is scale-down"
58 exit 1
59 fi
60
61 # evaluate the default profile of pool b
62 if [[ $PROFILE2 = "scale-up" ]]
63 then
64 echo "Success: pool b PROFILE is scale-up"
65 else
66 echo "Error: b PROFILE is scale-down"
67 exit 1
68 fi
69
70 # This part of this code will now evaluate the accuracy of
71 # scale-down profile
72
73 # change to scale-down profile
74 ceph osd pool set autoscale-profile scale-down
75
76 # get profiles of pool a and b
77 PROFILE1=$(ceph osd pool autoscale-status | grep 'a' | grep -o -m 1 'scale-up\|scale-down' || true)
78 PROFILE2=$(ceph osd pool autoscale-status | grep 'b' | grep -o -m 1 'scale-up\|scale-down' || true)
79
80 # evaluate that profile a is now scale-down
81 if [[ $PROFILE1 = "scale-down" ]]
82 then
83 echo "Success: pool a PROFILE is scale-down"
84 else
85 echo "Error: a PROFILE is scale-up"
86 exit 1
87 fi
88
89 # evaluate the profile of b is now scale-down
90 if [[ $PROFILE2 = "scale-down" ]]
91 then
92 echo "Success: pool b PROFILE is scale-down"
93 else
94 echo "Error: b PROFILE is scale-up"
95 exit 1
96 fi
97
98 # get pool size
99 POOL_SIZE_A=$(ceph osd pool get a size| grep -Eo '[0-9]{1,4}')
100 POOL_SIZE_B=$(ceph osd pool get b size| grep -Eo '[0-9]{1,4}')
101
102 # calculate target pg of each pools
103 TARGET_PG_A=$(power2 $((($NUM_OSDS * 100)/($NUM_POOLS)/($POOL_SIZE_A))))
104 TARGET_PG_B=$(power2 $((($NUM_OSDS * 100)/($NUM_POOLS)/($POOL_SIZE_B))))
105
106 # evaluate target_pg against pg num of each pools
107 wait_for 120 "ceph osd pool get a pg_num | grep $TARGET_PG_A"
108 wait_for 120 "ceph osd pool get b pg_num | grep $TARGET_PG_B"
109
110 # target ratio
111 ceph osd pool set a target_size_ratio 5
112 ceph osd pool set b target_size_ratio 1
113 sleep 10
114 APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
115 BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
116 test $APGS -gt 100
117 test $BPGS -gt 10
118
119 # small ratio change does not change pg_num
120 ceph osd pool set a target_size_ratio 7
121 ceph osd pool set b target_size_ratio 2
122 sleep 10
123 APGS2=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
124 BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
125 test $APGS -eq $APGS2
126 test $BPGS -eq $BPGS2
127
128 # target_size
129 ceph osd pool set a target_size_bytes 1000000000000000
130 ceph osd pool set b target_size_bytes 1000000000000000
131 ceph osd pool set a target_size_ratio 0
132 ceph osd pool set b target_size_ratio 0
133 wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
134
135 ceph osd pool set a target_size_bytes 1000
136 ceph osd pool set b target_size_bytes 1000
137 ceph osd pool set a target_size_ratio 1
138 wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO"
139
140 ceph osd pool rm a a --yes-i-really-really-mean-it
141 ceph osd pool rm b b --yes-i-really-really-mean-it
142
143 echo OK