]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/mon/pg_autoscaler.sh
import quincy beta 17.1.0
[ceph.git] / ceph / qa / workunits / mon / pg_autoscaler.sh
CommitLineData
11fdf7f2
TL
1#!/bin/bash -ex
2
3NUM_OSDS=$(ceph osd ls | wc -l)
4if [ $NUM_OSDS -lt 6 ]; then
5 echo "test requires at least 6 OSDs"
6 exit 1
7fi
8
9NUM_POOLS=$(ceph osd pool ls | wc -l)
10if [ $NUM_POOLS -gt 0 ]; then
11 echo "test requires no preexisting pools"
12 exit 1
13fi
14
15function wait_for() {
16 local sec=$1
17 local cmd=$2
18
19 while true ; do
20effc67
TL
20 if bash -c "$cmd" ; then
21 break
22 fi
23 sec=$(( $sec - 1 ))
24 if [ $sec -eq 0 ]; then
25 echo failed
26 return 1
27 fi
28 sleep 1
11fdf7f2
TL
29 done
30 return 0
31}
32
522d829b
TL
33function power2() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" | bc -l;}
34
20effc67
TL
35function eval_actual_expected_val() {
36 local actual_value=$1
37 local expected_value=$2
38 if [[ $actual_value = $expected_value ]]
39 then
40 echo "Success: " $actual_value "=" $expected_value
41 else
42 echo "Error: " $actual_value "!=" $expected_value
43 exit 1
44 fi
45}
46
11fdf7f2 47# enable
20effc67 48ceph config set mgr mgr/pg_autoscaler/sleep_interval 60
11fdf7f2 49ceph mgr module enable pg_autoscaler
20effc67 50# ceph config set global osd_pool_default_pg_autoscale_mode on
11fdf7f2
TL
51
52# pg_num_min
20effc67
TL
53ceph osd pool create meta0 16
54ceph osd pool create bulk0 16 --bulk
55ceph osd pool create bulk1 16 --bulk
56ceph osd pool create bulk2 16 --bulk
57ceph osd pool set meta0 pg_autoscale_mode on
58ceph osd pool set bulk0 pg_autoscale_mode on
59ceph osd pool set bulk1 pg_autoscale_mode on
60ceph osd pool set bulk2 pg_autoscale_mode on
61# set pool size
62ceph osd pool set meta0 size 2
63ceph osd pool set bulk0 size 2
64ceph osd pool set bulk1 size 2
65ceph osd pool set bulk2 size 2
11fdf7f2 66
522d829b
TL
67# get num pools again since we created more pools
68NUM_POOLS=$(ceph osd pool ls | wc -l)
69
20effc67
TL
70# get bulk flag of each pool through the command ceph osd pool autoscale-status
71BULK_FLAG_1=$(ceph osd pool autoscale-status | grep 'meta0' | grep -o -m 1 'True\|False' || true)
72BULK_FLAG_2=$(ceph osd pool autoscale-status | grep 'bulk0' | grep -o -m 1 'True\|False' || true)
73BULK_FLAG_3=$(ceph osd pool autoscale-status | grep 'bulk1' | grep -o -m 1 'True\|False' || true)
74BULK_FLAG_4=$(ceph osd pool autoscale-status | grep 'bulk2' | grep -o -m 1 'True\|False' || true)
a4b75251 75
20effc67 76# evaluate the accuracy of ceph osd pool autoscale-status specifically the `BULK` column
a4b75251 77
20effc67
TL
78eval_actual_expected_val $BULK_FLAG_1 'False'
79eval_actual_expected_val $BULK_FLAG_2 'True'
80eval_actual_expected_val $BULK_FLAG_3 'True'
81eval_actual_expected_val $BULK_FLAG_4 'True'
a4b75251 82
20effc67 83# This part of this code will now evaluate the accuracy of the autoscaler
a4b75251 84
522d829b 85# get pool size
20effc67
TL
86POOL_SIZE_1=$(ceph osd pool get meta0 size| grep -Eo '[0-9]{1,4}')
87POOL_SIZE_2=$(ceph osd pool get bulk0 size| grep -Eo '[0-9]{1,4}')
88POOL_SIZE_3=$(ceph osd pool get bulk1 size| grep -Eo '[0-9]{1,4}')
89POOL_SIZE_4=$(ceph osd pool get bulk2 size| grep -Eo '[0-9]{1,4}')
90
91# Calculate target pg of each pools
92# First Pool is a non-bulk so we do it first.
93# Since the Capacity ratio = 0 we first meta pool remains the same pg_num
94
95TARGET_PG_1=$(ceph osd pool get meta0 pg_num| grep -Eo '[0-9]{1,4}')
96PG_LEFT=$NUM_OSDS*100
97NUM_POOLS_LEFT=$NUM_POOLS-1
98# Rest of the pool is bulk and even pools so pretty straight forward
99# calculations.
100TARGET_PG_2=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_2))))
101TARGET_PG_3=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_3))))
102TARGET_PG_4=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_4))))
522d829b
TL
103
104# evaluate target_pg against pg num of each pools
20effc67
TL
105wait_for 300 "ceph osd pool get meta0 pg_num | grep $TARGET_PG_1"
106wait_for 300 "ceph osd pool get bulk0 pg_num | grep $TARGET_PG_2"
107wait_for 300 "ceph osd pool get bulk1 pg_num | grep $TARGET_PG_3"
108wait_for 300 "ceph osd pool get bulk2 pg_num | grep $TARGET_PG_4"
11fdf7f2
TL
109
110# target ratio
20effc67
TL
111ceph osd pool set meta0 target_size_ratio 5
112ceph osd pool set bulk0 target_size_ratio 1
113sleep 60
9f95a23c
TL
114APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
115BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
11fdf7f2
TL
116test $APGS -gt 100
117test $BPGS -gt 10
118
119# small ratio change does not change pg_num
20effc67
TL
120ceph osd pool set meta0 target_size_ratio 7
121ceph osd pool set bulk0 target_size_ratio 2
122sleep 60
9f95a23c
TL
123APGS2=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
124BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
125test $APGS -eq $APGS2
126test $BPGS -eq $BPGS2
11fdf7f2
TL
127
128# target_size
20effc67
TL
129ceph osd pool set meta0 target_size_bytes 1000000000000000
130ceph osd pool set bulk0 target_size_bytes 1000000000000000
131ceph osd pool set meta0 target_size_ratio 0
132ceph osd pool set bulk0 target_size_ratio 0
9f95a23c
TL
133wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
134
20effc67
TL
135ceph osd pool set meta0 target_size_bytes 1000
136ceph osd pool set bulk0 target_size_bytes 1000
137ceph osd pool set meta0 target_size_ratio 1
9f95a23c 138wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO"
11fdf7f2 139
20effc67
TL
140ceph osd pool rm meta0 meta0 --yes-i-really-really-mean-it
141ceph osd pool rm bulk0 bulk0 --yes-i-really-really-mean-it
142ceph osd pool rm bulk1 bulk1 --yes-i-really-really-mean-it
143ceph osd pool rm bulk2 bulk2 --yes-i-really-really-mean-it
11fdf7f2
TL
144
145echo OK
20effc67 146