]> git.proxmox.com Git - ceph.git/blob - ceph/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled
46f0ab1b4a1711c4c3c818fdf3ba4c4b52c88acd
[ceph.git] / ceph / qa / suites / rados / singleton / all / mon-memory-target-compliance.yaml.disabled
1 roles:
2 - - mon.a
3 - mgr.x
4 - osd.0
5 - osd.1
6 - osd.2
7 - osd.3
8 - osd.4
9 - osd.5
10 - osd.6
11 - osd.7
12 - osd.8
13 - osd.9
14 - osd.10
15 - osd.11
16 - osd.12
17 - osd.13
18 - osd.14
19 - client.0
20 openstack:
21 - volumes: # attached to each instance
22 count: 4
23 size: 1 # GB
24 overrides:
25 ceph:
26 conf:
27 mon:
28 mon memory target: 134217728 # reduced to 128_M
29 rocksdb cache size: 67108864 # reduced to 64_M
30 mon osd cache size: 100000
31 mon osd cache size min: 134217728
32 osd:
33 osd memory target: 1610612736 # reduced to 1.5_G
34 osd objectstore: bluestore
35 debug bluestore: 20
36 osd scrub min interval: 60
37 osd scrub max interval: 120
38 osd max backfills: 9
39
40 tasks:
41 - install:
42 branch: wip-sseshasa2-testing-2019-07-30-1825 # change as appropriate
43 - ceph:
44 create_rbd_pool: false
45 pre-mgr-commands:
46 - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
47 log-whitelist:
48 - overall HEALTH_
49 - \(OSDMAP_FLAGS\)
50 - \(OSD_
51 - \(PG_
52 - \(POOL_
53 - \(CACHE_POOL_
54 - \(OBJECT_
55 - \(SLOW_OPS\)
56 - \(REQUEST_SLOW\)
57 - \(TOO_FEW_PGS\)
58 - slow request
59 - interactive:
60 - parallel:
61 - log-mon-rss
62 - stress-tasks
63 - benchload
64 - exec:
65 client.0:
66 - "ceph_test_mon_memory_target 134217728" # mon memory target
67 - "ceph_test_mon_rss_usage 134217728"
68 log-mon-rss:
69 - background_exec:
70 client.0:
71 - while true
72 - do /usr/bin/ceph_test_log_rss_usage ceph-mon >> /var/log/ceph/ceph-mon-rss-usage.log
73 - sleep 300 # log rss usage every 5 mins. May be modified accordingly
74 - done
75 - exec:
76 client.0:
77 - sleep 37860 # sum total of the radosbench test times below plus 60 secs
78 benchload: # The total radosbench test below translates to 10.5 hrs
79 - full_sequential:
80 - radosbench:
81 clients: [client.0]
82 time: 1800
83 - radosbench:
84 clients: [client.0]
85 time: 1800
86 - radosbench:
87 clients: [client.0]
88 time: 1800
89 - radosbench:
90 clients: [client.0]
91 time: 1800
92 - radosbench:
93 clients: [client.0]
94 time: 1800
95 - radosbench:
96 clients: [client.0]
97 time: 1800
98 - radosbench:
99 clients: [client.0]
100 time: 1800
101 - radosbench:
102 clients: [client.0]
103 time: 1800
104 - radosbench:
105 clients: [client.0]
106 time: 1800
107 - radosbench:
108 clients: [client.0]
109 time: 1800
110 - radosbench:
111 clients: [client.0]
112 time: 1800
113 - radosbench:
114 clients: [client.0]
115 time: 1800
116 - radosbench:
117 clients: [client.0]
118 time: 1800
119 - radosbench:
120 clients: [client.0]
121 time: 1800
122 - radosbench:
123 clients: [client.0]
124 time: 1800
125 - radosbench:
126 clients: [client.0]
127 time: 1800
128 - radosbench:
129 clients: [client.0]
130 time: 1800
131 - radosbench:
132 clients: [client.0]
133 time: 1800
134 - radosbench:
135 clients: [client.0]
136 time: 1800
137 - radosbench:
138 clients: [client.0]
139 time: 1800
140 - radosbench:
141 clients: [client.0]
142 time: 1800
143 stress-tasks:
144 - thrashosds:
145 op_delay: 1
146 bdev_inject_crash: 1
147 bdev_inject_crash_probability: .8
148 chance_down: 80
149 chance_pgnum_grow: 3
150 chance_pgpnum_fix: 1
151 chance_thrash_cluster_full: 0
152 chance_thrash_pg_upmap: 3
153 chance_thrash_pg_upmap_items: 3
154 min_in: 2