]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | #!/bin/bash -ex |
2 | ||
3 | NUM_OSDS=$(ceph osd ls | wc -l) | |
4 | if [ $NUM_OSDS -lt 6 ]; then | |
5 | echo "test requires at least 6 OSDs" | |
6 | exit 1 | |
7 | fi | |
8 | ||
9 | NUM_POOLS=$(ceph osd pool ls | wc -l) | |
10 | if [ $NUM_POOLS -gt 0 ]; then | |
11 | echo "test requires no preexisting pools" | |
12 | exit 1 | |
13 | fi | |
14 | ||
15 | function wait_for() { | |
16 | local sec=$1 | |
17 | local cmd=$2 | |
18 | ||
19 | while true ; do | |
20effc67 TL |
20 | if bash -c "$cmd" ; then |
21 | break | |
22 | fi | |
23 | sec=$(( $sec - 1 )) | |
24 | if [ $sec -eq 0 ]; then | |
25 | echo failed | |
26 | return 1 | |
27 | fi | |
28 | sleep 1 | |
11fdf7f2 TL |
29 | done |
30 | return 0 | |
31 | } | |
32 | ||
522d829b TL |
33 | function power2() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" | bc -l;} |
34 | ||
20effc67 TL |
35 | function eval_actual_expected_val() { |
36 | local actual_value=$1 | |
37 | local expected_value=$2 | |
38 | if [[ $actual_value = $expected_value ]] | |
39 | then | |
40 | echo "Success: " $actual_value "=" $expected_value | |
41 | else | |
42 | echo "Error: " $actual_value "!=" $expected_value | |
43 | exit 1 | |
44 | fi | |
45 | } | |
46 | ||
11fdf7f2 | 47 | # enable |
20effc67 | 48 | ceph config set mgr mgr/pg_autoscaler/sleep_interval 60 |
11fdf7f2 | 49 | ceph mgr module enable pg_autoscaler |
20effc67 | 50 | # ceph config set global osd_pool_default_pg_autoscale_mode on |
11fdf7f2 TL |
51 | |
52 | # pg_num_min | |
20effc67 TL |
53 | ceph osd pool create meta0 16 |
54 | ceph osd pool create bulk0 16 --bulk | |
55 | ceph osd pool create bulk1 16 --bulk | |
56 | ceph osd pool create bulk2 16 --bulk | |
57 | ceph osd pool set meta0 pg_autoscale_mode on | |
58 | ceph osd pool set bulk0 pg_autoscale_mode on | |
59 | ceph osd pool set bulk1 pg_autoscale_mode on | |
60 | ceph osd pool set bulk2 pg_autoscale_mode on | |
61 | # set pool size | |
62 | ceph osd pool set meta0 size 2 | |
63 | ceph osd pool set bulk0 size 2 | |
64 | ceph osd pool set bulk1 size 2 | |
65 | ceph osd pool set bulk2 size 2 | |
11fdf7f2 | 66 | |
522d829b TL |
67 | # get num pools again since we created more pools |
68 | NUM_POOLS=$(ceph osd pool ls | wc -l) | |
69 | ||
20effc67 TL |
70 | # get bulk flag of each pool through the command ceph osd pool autoscale-status |
71 | BULK_FLAG_1=$(ceph osd pool autoscale-status | grep 'meta0' | grep -o -m 1 'True\|False' || true) | |
72 | BULK_FLAG_2=$(ceph osd pool autoscale-status | grep 'bulk0' | grep -o -m 1 'True\|False' || true) | |
73 | BULK_FLAG_3=$(ceph osd pool autoscale-status | grep 'bulk1' | grep -o -m 1 'True\|False' || true) | |
74 | BULK_FLAG_4=$(ceph osd pool autoscale-status | grep 'bulk2' | grep -o -m 1 'True\|False' || true) | |
a4b75251 | 75 | |
20effc67 | 76 | # evaluate the accuracy of ceph osd pool autoscale-status specifically the `BULK` column |
a4b75251 | 77 | |
20effc67 TL |
78 | eval_actual_expected_val $BULK_FLAG_1 'False' |
79 | eval_actual_expected_val $BULK_FLAG_2 'True' | |
80 | eval_actual_expected_val $BULK_FLAG_3 'True' | |
81 | eval_actual_expected_val $BULK_FLAG_4 'True' | |
a4b75251 | 82 | |
20effc67 | 83 | # This part of this code will now evaluate the accuracy of the autoscaler |
a4b75251 | 84 | |
522d829b | 85 | # get pool size |
20effc67 TL |
86 | POOL_SIZE_1=$(ceph osd pool get meta0 size| grep -Eo '[0-9]{1,4}') |
87 | POOL_SIZE_2=$(ceph osd pool get bulk0 size| grep -Eo '[0-9]{1,4}') | |
88 | POOL_SIZE_3=$(ceph osd pool get bulk1 size| grep -Eo '[0-9]{1,4}') | |
89 | POOL_SIZE_4=$(ceph osd pool get bulk2 size| grep -Eo '[0-9]{1,4}') | |
90 | ||
91 | # Calculate target pg of each pools | |
92 | # First Pool is a non-bulk so we do it first. | |
93 | # Since the Capacity ratio = 0 we first meta pool remains the same pg_num | |
94 | ||
95 | TARGET_PG_1=$(ceph osd pool get meta0 pg_num| grep -Eo '[0-9]{1,4}') | |
96 | PG_LEFT=$NUM_OSDS*100 | |
97 | NUM_POOLS_LEFT=$NUM_POOLS-1 | |
98 | # Rest of the pool is bulk and even pools so pretty straight forward | |
99 | # calculations. | |
100 | TARGET_PG_2=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_2)))) | |
101 | TARGET_PG_3=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_3)))) | |
102 | TARGET_PG_4=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_4)))) | |
522d829b TL |
103 | |
104 | # evaluate target_pg against pg num of each pools | |
20effc67 TL |
105 | wait_for 300 "ceph osd pool get meta0 pg_num | grep $TARGET_PG_1" |
106 | wait_for 300 "ceph osd pool get bulk0 pg_num | grep $TARGET_PG_2" | |
107 | wait_for 300 "ceph osd pool get bulk1 pg_num | grep $TARGET_PG_3" | |
108 | wait_for 300 "ceph osd pool get bulk2 pg_num | grep $TARGET_PG_4" | |
11fdf7f2 TL |
109 | |
110 | # target ratio | |
20effc67 TL |
111 | ceph osd pool set meta0 target_size_ratio 5 |
112 | ceph osd pool set bulk0 target_size_ratio 1 | |
113 | sleep 60 | |
9f95a23c TL |
114 | APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target') |
115 | BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target') | |
11fdf7f2 TL |
116 | test $APGS -gt 100 |
117 | test $BPGS -gt 10 | |
118 | ||
119 | # small ratio change does not change pg_num | |
20effc67 TL |
120 | ceph osd pool set meta0 target_size_ratio 7 |
121 | ceph osd pool set bulk0 target_size_ratio 2 | |
122 | sleep 60 | |
9f95a23c TL |
123 | APGS2=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target') |
124 | BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target') | |
125 | test $APGS -eq $APGS2 | |
126 | test $BPGS -eq $BPGS2 | |
11fdf7f2 TL |
127 | |
128 | # target_size | |
20effc67 TL |
129 | ceph osd pool set meta0 target_size_bytes 1000000000000000 |
130 | ceph osd pool set bulk0 target_size_bytes 1000000000000000 | |
131 | ceph osd pool set meta0 target_size_ratio 0 | |
132 | ceph osd pool set bulk0 target_size_ratio 0 | |
9f95a23c TL |
133 | wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED" |
134 | ||
20effc67 TL |
135 | ceph osd pool set meta0 target_size_bytes 1000 |
136 | ceph osd pool set bulk0 target_size_bytes 1000 | |
137 | ceph osd pool set meta0 target_size_ratio 1 | |
9f95a23c | 138 | wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO" |
11fdf7f2 | 139 | |
20effc67 TL |
140 | ceph osd pool rm meta0 meta0 --yes-i-really-really-mean-it |
141 | ceph osd pool rm bulk0 bulk0 --yes-i-really-really-mean-it | |
142 | ceph osd pool rm bulk1 bulk1 --yes-i-really-really-mean-it | |
143 | ceph osd pool rm bulk2 bulk2 --yes-i-really-really-mean-it | |
11fdf7f2 TL |
144 | |
145 | echo OK | |
20effc67 | 146 |