]> git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/mon/pg_autoscaler.sh
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / workunits / mon / pg_autoscaler.sh
1 #!/bin/bash -ex
2
3 NUM_OSDS=$(ceph osd ls | wc -l)
4 if [ $NUM_OSDS -lt 6 ]; then
5 echo "test requires at least 6 OSDs"
6 exit 1
7 fi
8
9 NUM_POOLS=$(ceph osd pool ls | wc -l)
10 if [ $NUM_POOLS -gt 0 ]; then
11 echo "test requires no preexisting pools"
12 exit 1
13 fi
14
15 function wait_for() {
16 local sec=$1
17 local cmd=$2
18
19 while true ; do
20 if bash -c "$cmd" ; then
21 break
22 fi
23 sec=$(( $sec - 1 ))
24 if [ $sec -eq 0 ]; then
25 echo failed
26 return 1
27 fi
28 sleep 1
29 done
30 return 0
31 }
32
33 # enable
34 ceph config set mgr mgr/pg_autoscaler/sleep_interval 5
35 ceph mgr module enable pg_autoscaler
36
37 # pg_num_min
38 ceph osd pool create a 16 --pg-num-min 4
39 ceph osd pool create b 16 --pg-num-min 2
40 ceph osd pool set a pg_autoscale_mode on
41 ceph osd pool set b pg_autoscale_mode on
42
43 wait_for 120 "ceph osd pool get a pg_num | grep 4"
44 wait_for 120 "ceph osd pool get b pg_num | grep 2"
45
46 # target ratio
47 ceph osd pool set a target_size_ratio .5
48 ceph osd pool set b target_size_ratio .1
49 sleep 30
50 APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num')
51 BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num')
52 test $APGS -gt 100
53 test $BPGS -gt 10
54
55 # small ratio change does not change pg_num
56 ceph osd pool set a target_size_ratio .7
57 ceph osd pool set b target_size_ratio .2
58 sleep 10
59 ceph osd pool get a pg_num | grep $APGS
60 ceph osd pool get b pg_num | grep $BPGS
61
62 # too much ratio
63 ceph osd pool set a target_size_ratio .9
64 ceph osd pool set b target_size_ratio .9
65 wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_RATIO_OVERCOMMITTED"
66 wait_for 60 "ceph health detail | grep 1.8"
67 ceph osd pool set a target_size_ratio 0
68 ceph osd pool set b target_size_ratio 0
69
70 # target_size
71 ceph osd pool set a target_size_bytes 1000000000000000
72 ceph osd pool set b target_size_bytes 1000000000000000
73 wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
74 ceph osd pool set a target_size_bytes 0
75 ceph osd pool set b target_size_ratio 0
76
77 ceph osd pool rm a a --yes-i-really-really-mean-it
78 ceph osd pool rm b b --yes-i-really-really-mean-it
79
80 echo OK