]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/mon/pg_autoscaler.sh
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / workunits / mon / pg_autoscaler.sh
CommitLineData
11fdf7f2
TL
1#!/bin/bash -ex
2
3NUM_OSDS=$(ceph osd ls | wc -l)
4if [ $NUM_OSDS -lt 6 ]; then
5 echo "test requires at least 6 OSDs"
6 exit 1
7fi
8
9NUM_POOLS=$(ceph osd pool ls | wc -l)
10if [ $NUM_POOLS -gt 0 ]; then
11 echo "test requires no preexisting pools"
12 exit 1
13fi
14
15function wait_for() {
16 local sec=$1
17 local cmd=$2
18
19 while true ; do
20 if bash -c "$cmd" ; then
21 break
22 fi
23 sec=$(( $sec - 1 ))
24 if [ $sec -eq 0 ]; then
25 echo failed
26 return 1
27 fi
28 sleep 1
29 done
30 return 0
31}
32
33# enable
34ceph config set mgr mgr/pg_autoscaler/sleep_interval 5
35ceph mgr module enable pg_autoscaler
36
37# pg_num_min
38ceph osd pool create a 16 --pg-num-min 4
39ceph osd pool create b 16 --pg-num-min 2
40ceph osd pool set a pg_autoscale_mode on
41ceph osd pool set b pg_autoscale_mode on
42
43wait_for 120 "ceph osd pool get a pg_num | grep 4"
44wait_for 120 "ceph osd pool get b pg_num | grep 2"
45
46# target ratio
47ceph osd pool set a target_size_ratio .5
48ceph osd pool set b target_size_ratio .1
49sleep 30
50APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num')
51BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num')
52test $APGS -gt 100
53test $BPGS -gt 10
54
55# small ratio change does not change pg_num
56ceph osd pool set a target_size_ratio .7
57ceph osd pool set b target_size_ratio .2
58sleep 10
59ceph osd pool get a pg_num | grep $APGS
60ceph osd pool get b pg_num | grep $BPGS
61
62# too much ratio
63ceph osd pool set a target_size_ratio .9
64ceph osd pool set b target_size_ratio .9
65wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_RATIO_OVERCOMMITTED"
66wait_for 60 "ceph health detail | grep 1.8"
67ceph osd pool set a target_size_ratio 0
68ceph osd pool set b target_size_ratio 0
69
70# target_size
71ceph osd pool set a target_size_bytes 1000000000000000
72ceph osd pool set b target_size_bytes 1000000000000000
73wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
74ceph osd pool set a target_size_bytes 0
75ceph osd pool set b target_size_ratio 0
76
77ceph osd pool rm a a --yes-i-really-really-mean-it
78ceph osd pool rm b b --yes-i-really-really-mean-it
79
80echo OK