]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/mon/pg_autoscaler.sh
bump version to 16.2.6-pve2
[ceph.git] / ceph / qa / workunits / mon / pg_autoscaler.sh
1 #!/bin/bash -ex
2
3 NUM_OSDS=$(ceph osd ls | wc -l)
4 if [ $NUM_OSDS -lt 6 ]; then
5 echo "test requires at least 6 OSDs"
6 exit 1
7 fi
8
9 NUM_POOLS=$(ceph osd pool ls | wc -l)
10 if [ $NUM_POOLS -gt 0 ]; then
11 echo "test requires no preexisting pools"
12 exit 1
13 fi
14
15 function wait_for() {
16 local sec=$1
17 local cmd=$2
18
19 while true ; do
20 if bash -c "$cmd" ; then
21 break
22 fi
23 sec=$(( $sec - 1 ))
24 if [ $sec -eq 0 ]; then
25 echo failed
26 return 1
27 fi
28 sleep 1
29 done
30 return 0
31 }
32
33 function power2() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" | bc -l;}
34
35 # enable
36 ceph config set mgr mgr/pg_autoscaler/sleep_interval 5
37 ceph mgr module enable pg_autoscaler
38
39 # pg_num_min
40 ceph osd pool create a 16 --pg-num-min 4
41 ceph osd pool create b 16 --pg-num-min 2
42 ceph osd pool set a pg_autoscale_mode on
43 ceph osd pool set b pg_autoscale_mode on
44
45 # get num pools again since we created more pools
46 NUM_POOLS=$(ceph osd pool ls | wc -l)
47
48 # get pool size
49 POOL_SIZE_A=$(ceph osd pool get a size| grep -Eo '[0-9]{1,4}')
50 POOL_SIZE_B=$(ceph osd pool get b size| grep -Eo '[0-9]{1,4}')
51
52 # calculate target pg of each pools
53 TARGET_PG_A=$(power2 $((($NUM_OSDS * 100)/($NUM_POOLS)/($POOL_SIZE_A))))
54 TARGET_PG_B=$(power2 $((($NUM_OSDS * 100)/($NUM_POOLS)/($POOL_SIZE_B))))
55
56 # evaluate target_pg against pg num of each pools
57 wait_for 120 "ceph osd pool get a pg_num | grep $TARGET_PG_A"
58 wait_for 120 "ceph osd pool get b pg_num | grep $TARGET_PG_B"
59
60 # target ratio
61 ceph osd pool set a target_size_ratio 5
62 ceph osd pool set b target_size_ratio 1
63 sleep 10
64 APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
65 BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
66 test $APGS -gt 100
67 test $BPGS -gt 10
68
69 # small ratio change does not change pg_num
70 ceph osd pool set a target_size_ratio 7
71 ceph osd pool set b target_size_ratio 2
72 sleep 10
73 APGS2=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
74 BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
75 test $APGS -eq $APGS2
76 test $BPGS -eq $BPGS2
77
78 # target_size
79 ceph osd pool set a target_size_bytes 1000000000000000
80 ceph osd pool set b target_size_bytes 1000000000000000
81 ceph osd pool set a target_size_ratio 0
82 ceph osd pool set b target_size_ratio 0
83 wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
84
85 ceph osd pool set a target_size_bytes 1000
86 ceph osd pool set b target_size_bytes 1000
87 ceph osd pool set a target_size_ratio 1
88 wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO"
89
90 ceph osd pool rm a a --yes-i-really-really-mean-it
91 ceph osd pool rm b b --yes-i-really-really-mean-it
92
93 echo OK