]>
Commit | Line | Data |
---|---|---|
20effc67 TL |
1 | Feature: Ceph Cluster Dashboard |
2 | ||
f51cf556 | 3 | Scenario: "Test cluster health" |
20effc67 TL |
4 | Given the following series: |
5 | | metrics | values | | |
f78120f9 TL |
6 | | ceph_health_status{job="ceph",cluster="mycluster"} | 1 | |
7 | Then Grafana panel `Ceph health status` with legend `EMPTY` shows: | |
20effc67 | 8 | | metrics | values | |
f78120f9 | 9 | | ceph_health_status{job="ceph",cluster="mycluster"} | 1 | |
20effc67 | 10 | |
f51cf556 TL |
11 | Scenario: "Test Firing Alerts Warning" |
12 | Given the following series: | |
13 | | metrics | values | | |
f78120f9 TL |
14 | | ALERTS{alertstate="firing",alertname="Ceph.1", severity="warning", cluster="mycluster"} | 1 | |
15 | | ALERTS{alertstate="firing",alertname="Ceph.1", severity="critical", cluster="someothercluster"} | 1 | | |
16 | | ALERTS{alertstate="firing",alertname="Ceph.2", severity="critical", cluster="mycluster"} | 1 | | |
f51cf556 TL |
17 | Then Grafana panel `Firing Alerts` with legend `Warning` shows: |
18 | | metrics | values | | |
19 | | {} | 1 | | |
20effc67 | 20 | |
f51cf556 TL |
21 | Scenario: "Test Firing Alerts Critical" |
22 | Given the following series: | |
23 | | metrics | values | | |
f78120f9 TL |
24 | | ALERTS{alertstate="firing",alertname="Ceph.1", severity="warning", cluster="mycluster"} | 1 | |
25 | | ALERTS{alertstate="firing",alertname="Ceph.1", severity="warning", cluster="someothercluster"} | 1 | | |
26 | | ALERTS{alertstate="firing",alertname="Ceph.2", severity="critical", cluster="mycluster"} | 1 | | |
f51cf556 TL |
27 | Then Grafana panel `Firing Alerts` with legend `Critical` shows: |
28 | | metrics | values | | |
29 | | {} | 1 | | |
20effc67 | 30 | |
f51cf556 TL |
31 | Scenario: "Test Available Capacity" |
32 | Given the following series: | |
33 | | metrics | values | | |
f78120f9 TL |
34 | | ceph_cluster_total_bytes{job="ceph",cluster="mycluster"}| 100 | |
35 | | ceph_cluster_total_used_bytes{job="ceph",cluster="mycluster"}| 70 | | |
f51cf556 TL |
36 | Then Grafana panel `Available Capacity` with legend `EMPTY` shows: |
37 | | metrics | values | | |
f78120f9 | 38 | | {job="ceph",cluster="mycluster"} | 0.3 | |
20effc67 | 39 | |
f51cf556 TL |
40 | Scenario: "Test Cluster Capacity" |
41 | Given the following series: | |
42 | | metrics | values | | |
f78120f9 | 43 | | ceph_cluster_total_bytes{job="ceph",cluster="mycluster"}| 100 | |
f51cf556 TL |
44 | Then Grafana panel `Cluster Capacity` with legend `EMPTY` shows: |
45 | | metrics | values | | |
f78120f9 | 46 | | ceph_cluster_total_bytes{job="ceph",cluster="mycluster"} | 100 | |
f51cf556 TL |
47 | |
48 | Scenario: "Test Used Capacity" | |
49 | Given the following series: | |
50 | | metrics | values | | |
f78120f9 | 51 | | ceph_cluster_total_used_bytes{job="ceph",cluster="mycluster"}| 100 | |
f51cf556 TL |
52 | Then Grafana panel `Used Capacity` with legend `EMPTY` shows: |
53 | | metrics | values | | |
f78120f9 | 54 | | ceph_cluster_total_used_bytes{job="ceph",cluster="mycluster"} | 100 | |
f51cf556 TL |
55 | |
56 | Scenario: "Test Write Throughput" | |
57 | Given the following series: | |
58 | | metrics | values | | |
f78120f9 TL |
59 | | ceph_osd_op_w_in_bytes{job="ceph", cluster="mycluster", osd="osd.0"} | 500 500 500 | |
60 | | ceph_osd_op_w_in_bytes{job="ceph", cluster="mycluster", osd="osd.1"} | 500 120 110 | | |
f51cf556 TL |
61 | Then Grafana panel `Write Throughput` with legend `EMPTY` shows: |
62 | | metrics | values | | |
63 | | {} | 2 | | |
64 | ||
65 | Scenario: "Test Write IOPS" | |
66 | Given the following series: | |
67 | | metrics | values | | |
f78120f9 TL |
68 | | ceph_osd_op_w{job="ceph",cluster="mycluster", osd="osd.0"} | 500 500 500 | |
69 | | ceph_osd_op_w{job="ceph",cluster="mycluster", osd="osd.1"} | 500 120 110 | | |
f51cf556 TL |
70 | Then Grafana panel `Write IOPS` with legend `EMPTY` shows: |
71 | | metrics | values | | |
72 | | {} | 2 | | |
73 | ||
74 | Scenario: "Test Read Throughput" | |
75 | Given the following series: | |
76 | | metrics | values | | |
f78120f9 TL |
77 | | ceph_osd_op_r_out_bytes{job="ceph", cluster="mycluster", osd="osd.0"} | 500 500 500 | |
78 | | ceph_osd_op_r_out_bytes{job="ceph", cluster="mycluster", osd="osd.1"} | 500 120 110 | | |
f51cf556 TL |
79 | Then Grafana panel `Read Throughput` with legend `EMPTY` shows: |
80 | | metrics | values | | |
81 | | {} | 2 | | |
82 | ||
83 | Scenario: "Test Read IOPS" | |
84 | Given the following series: | |
85 | | metrics | values | | |
f78120f9 TL |
86 | | ceph_osd_op_r{job="ceph", cluster="mycluster", osd="osd.0"} | 500 500 500 | |
87 | | ceph_osd_op_r{job="ceph", cluster="mycluster", osd="osd.1"} | 500 120 110 | | |
f51cf556 TL |
88 | Then Grafana panel `Read IOPS` with legend `EMPTY` shows: |
89 | | metrics | values | | |
90 | | {} | 2 | | |
91 | ||
92 | Scenario: "Test OSDs All" | |
93 | Given the following series: | |
94 | | metrics | values | | |
f78120f9 TL |
95 | | ceph_osd_metadata{job="ceph", cluster="mycluster", osd="osd.0"} | 1 | |
96 | | ceph_osd_metadata{job="ceph", cluster="mycluster", osd="osd.2"} | 1 | | |
97 | | ceph_osd_metadata{job="ceph", cluster="mycluster", osd="osd.3"} | 1 | | |
f51cf556 TL |
98 | Then Grafana panel `OSDs` with legend `All` shows: |
99 | | metrics | values | | |
100 | | {} | 3 | | |
101 | ||
102 | Scenario: "Test OSDs In" | |
103 | Given the following series: | |
104 | | metrics | values | | |
f78120f9 TL |
105 | | ceph_osd_in{job="ceph", cluster="mycluster", osd="osd.0"} | 1 | |
106 | | ceph_osd_in{job="ceph", cluster="mycluster", osd="osd.1"} | 1 | | |
107 | | ceph_osd_in{job="ceph", cluster="mycluster", osd="osd.2"} | 1 | | |
f51cf556 TL |
108 | Then Grafana panel `OSDs` with legend `In` shows: |
109 | | metrics | values | | |
110 | | {} | 3 | | |
111 | ||
112 | Scenario: "Test OSDs Out" | |
113 | Given the following series: | |
114 | | metrics | values | | |
f78120f9 TL |
115 | | ceph_osd_in{cjob="ceph", cluster="mycluster", osd="osd.0"} | 1 | |
116 | | ceph_osd_in{job="ceph", cluster="mycluster", osd="osd.1"} | 0 | | |
117 | | ceph_osd_in{job="ceph", cluster="mycluster", osd="osd.2"} | 0 | | |
f51cf556 TL |
118 | Then Grafana panel `OSDs` with legend `Out` shows: |
119 | | metrics | values | | |
120 | | {} | 2 | | |
121 | ||
122 | Scenario: "Test OSDs Up" | |
123 | Given the following series: | |
124 | | metrics | values | | |
f78120f9 TL |
125 | | ceph_osd_up{job="ceph", cluster="mycluster", osd="osd.0"} | 1 | |
126 | | ceph_osd_up{job="ceph", cluster="mycluster", osd="osd.1"} | 0 | | |
127 | | ceph_osd_up{job="ceph", cluster="mycluster", osd="osd.2"} | 0 | | |
f51cf556 TL |
128 | Then Grafana panel `OSDs` with legend `Up` shows: |
129 | | metrics | values | | |
130 | | {} | 1 | | |
131 | ||
132 | Scenario: "Test OSDs Down" | |
133 | Given the following series: | |
134 | | metrics | values | | |
f78120f9 TL |
135 | | ceph_osd_up{job="ceph", cluster="mycluster", osd="osd.0"} | 1 | |
136 | | ceph_osd_up{job="ceph", cluster="mycluster", osd="osd.1"} | 0 | | |
137 | | ceph_osd_up{job="ceph", cluster="mycluster", osd="osd.2"} | 0 | | |
f51cf556 TL |
138 | Then Grafana panel `OSDs` with legend `Down` shows: |
139 | | metrics | values | | |
140 | | {} | 2 | | |
141 | ||
142 | Scenario: "Test MGRs Standby" | |
143 | Given the following series: | |
144 | | metrics | values | | |
f78120f9 TL |
145 | | ceph_mgr_status{job="ceph",cluster="mycluster", osd="osd.0"} | 1 | |
146 | | ceph_mgr_status{job="ceph",cluster="mycluster", osd="osd.1"} | 0 | | |
147 | | ceph_mgr_status{job="ceph",cluster="mycluster", osd="osd.2"} | 0 | | |
f51cf556 TL |
148 | Then Grafana panel `MGRs` with legend `Standby` shows: |
149 | | metrics | values | | |
150 | | {} | 2 | | |
151 | ||
152 | Scenario: "Test MGRs Active" | |
153 | Given the following series: | |
154 | | metrics | values | | |
f78120f9 TL |
155 | | ceph_mgr_status{job="ceph",cluster="mycluster", osd="osd.0"} | 1 | |
156 | | ceph_mgr_status{job="ceph",cluster="mycluster", osd="osd.1"} | 0 | | |
f51cf556 TL |
157 | Then Grafana panel `MGRs` with legend `Active` shows: |
158 | | metrics | values | | |
159 | | {} | 1 | | |
160 | ||
161 | Scenario: "Test Monitors Total" | |
162 | Given the following series: | |
163 | | metrics | values | | |
f78120f9 TL |
164 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.0"} | 1 | |
165 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.1"} | 0 | | |
166 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.2"} | 0 | | |
f51cf556 TL |
167 | Then Grafana panel `Monitors` with legend `Total` shows: |
168 | | metrics | values | | |
169 | | {} | 3 | | |
170 | ||
171 | Scenario: "Test Monitors In Quorum" | |
172 | Given the following series: | |
173 | | metrics | values | | |
f78120f9 TL |
174 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.0"} | 1 | |
175 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.1"} | 0 | | |
176 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.2"} | 0 | | |
f51cf556 TL |
177 | Then Grafana panel `Monitors` with legend `In Quorum` shows: |
178 | | metrics | values | | |
179 | | {} | 1 | | |
180 | ||
181 | Scenario: "Test Monitors out of Quorum" | |
182 | Given the following series: | |
183 | | metrics | values | | |
f78120f9 TL |
184 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.0"} | 1 | |
185 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.1"} | 0 | | |
186 | | ceph_mon_quorum_status{job="ceph",cluster="mycluster", osd="osd.2"} | 0 | | |
f51cf556 TL |
187 | Then Grafana panel `Monitors` with legend `MONs out of Quorum` shows: |
188 | | metrics | values | | |
189 | | {} | 2 | | |
190 | ||
191 | Scenario: "Test Total Capacity" | |
192 | Given the following series: | |
193 | | metrics | values | | |
f78120f9 | 194 | | ceph_cluster_total_bytes{job="ceph",cluster="mycluster", osd="osd.0"} | 100 | |
f51cf556 TL |
195 | Then Grafana panel `Capacity` with legend `Total Capacity` shows: |
196 | | metrics | values | | |
f78120f9 | 197 | | ceph_cluster_total_bytes{job="ceph", cluster="mycluster", osd="osd.0"} | 100 | |
f51cf556 TL |
198 | |
199 | Scenario: "Test Used Capacity" | |
200 | Given the following series: | |
201 | | metrics | values | | |
f78120f9 | 202 | | ceph_cluster_total_used_bytes{job="ceph",cluster="mycluster", osd="osd.0"} | 100 | |
f51cf556 TL |
203 | Then Grafana panel `Capacity` with legend `Used` shows: |
204 | | metrics | values | | |
f78120f9 | 205 | | ceph_cluster_total_used_bytes{job="ceph",cluster="mycluster", osd="osd.0"} | 100 | |
f51cf556 TL |
206 | |
207 | Scenario: "Test Cluster Throughput Write" | |
208 | Given the following series: | |
209 | | metrics | values | | |
f78120f9 TL |
210 | | ceph_osd_op_w_in_bytes{job="ceph",cluster="mycluster", osd="osd.0"} | 1000 1000| |
211 | | ceph_osd_op_w_in_bytes{job="ceph",cluster="mycluster", osd="osd.1"} | 2000 1500 | | |
f51cf556 TL |
212 | Then Grafana panel `Cluster Throughput` with legend `Write` shows: |
213 | | metrics | values | | |
214 | | {} | 25 | | |
215 | ||
216 | Scenario: "Test Cluster Throughput Read" | |
217 | Given the following series: | |
218 | | metrics | values | | |
f78120f9 TL |
219 | | ceph_osd_op_r_out_bytes{job="ceph",cluster="mycluster", osd="osd.0"} | 1000 1000| |
220 | | ceph_osd_op_r_out_bytes{job="ceph",cluster="mycluster", osd="osd.1"} | 2000 1500 | | |
f51cf556 TL |
221 | Then Grafana panel `Cluster Throughput` with legend `Read` shows: |
222 | | metrics | values | | |
223 | | {} | 25 | | |
224 | ||
225 | Scenario: "Test IOPS Read" | |
226 | Given the following series: | |
227 | | metrics | values | | |
f78120f9 TL |
228 | | ceph_osd_op_r{job="ceph",cluster="mycluster", osd="osd.0"} | 1000 1000| |
229 | | ceph_osd_op_r{job="ceph",cluster="mycluster", osd="osd.1"} | 2000 1500 | | |
f51cf556 TL |
230 | Then Grafana panel `IOPS` with legend `Read` shows: |
231 | | metrics | values | | |
232 | | {} | 25 | | |
233 | ||
234 | Scenario: "Test IOPS Write" | |
235 | Given the following series: | |
236 | | metrics | values | | |
f78120f9 TL |
237 | | ceph_osd_op_w{job="ceph",cluster="mycluster", osd="osd.0"} | 1000 1000| |
238 | | ceph_osd_op_w{job="ceph",cluster="mycluster", osd="osd.1"} | 2000 1500 | | |
f51cf556 TL |
239 | Then Grafana panel `IOPS` with legend `Write` shows: |
240 | | metrics | values | | |
241 | | {} | 25 | | |
242 | ||
243 | Scenario: "Test Pool Used Bytes" | |
244 | Given the following series: | |
245 | | metrics | values | | |
f78120f9 TL |
246 | | ceph_pool_bytes_used{job="ceph", cluster="mycluster", pool_id="1"} | 10000 | |
247 | | ceph_pool_bytes_used{job="ceph", cluster="mycluster", pool_id="2"} | 20000 | | |
248 | | ceph_pool_bytes_used{job="ceph", cluster="mycluster", pool_id="3"} | 30000 | | |
249 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="1", name="pool1"} | 2000 | | |
250 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="2", name="pool2"} | 4000 | | |
251 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="3", name="pool3"} | 6000 | | |
f51cf556 TL |
252 | Then Grafana panel `Pool Used Bytes` with legend `{{name}}` shows: |
253 | | metrics | values | | |
f78120f9 TL |
254 | | {job="ceph", cluster="mycluster", name="pool1", pool_id="1"} | 20000000 | |
255 | | {job="ceph", cluster="mycluster", name="pool2", pool_id="2"} | 80000000 | | |
256 | | {job="ceph", cluster="mycluster", name="pool3", pool_id="3"} | 180000000 | | |
f51cf556 TL |
257 | |
258 | Scenario: "Test Pool Used RAW Bytes" | |
259 | Given the following series: | |
260 | | metrics | values | | |
f78120f9 TL |
261 | | ceph_pool_stored_raw{job="ceph", cluster="mycluster", pool_id="1"} | 10000 | |
262 | | ceph_pool_stored_raw{job="ceph", cluster="mycluster", pool_id="2"} | 20000 | | |
263 | | ceph_pool_stored_raw{job="ceph", cluster="mycluster", pool_id="3"} | 30000 | | |
264 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="1", name="pool1"} | 2000 | | |
265 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="2", name="pool2"} | 4000 | | |
266 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="3", name="pool3"} | 6000 | | |
f51cf556 TL |
267 | Then Grafana panel `Pool Used RAW Bytes` with legend `{{name}}` shows: |
268 | | metrics | values | | |
f78120f9 TL |
269 | | {job="ceph", cluster="mycluster", name="pool1", pool_id="1"} | 20000000 | |
270 | | {job="ceph", cluster="mycluster", name="pool2", pool_id="2"} | 80000000 | | |
271 | | {job="ceph", cluster="mycluster", name="pool3", pool_id="3"} | 180000000 | | |
f51cf556 TL |
272 | |
273 | Scenario: "Test Pool Objects Quota" | |
274 | Given the following series: | |
275 | | metrics | values | | |
f78120f9 TL |
276 | | ceph_pool_quota_objects{job="ceph", cluster="mycluster", pool_id="1"} | 10 | |
277 | | ceph_pool_quota_objects{job="ceph", cluster="mycluster", pool_id="2"} | 20 | | |
278 | | ceph_pool_quota_objects{job="ceph", cluster="mycluster", pool_id="3"} | 30 | | |
279 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="1", name="pool1"} | 10 | | |
280 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="2", name="pool2"} | 15 | | |
281 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="3", name="pool3"} | 15 | | |
f51cf556 TL |
282 | Then Grafana panel `Pool Objects Quota` with legend `{{name}}` shows: |
283 | | metrics | values | | |
f78120f9 TL |
284 | | {job="ceph", cluster="mycluster", name="pool1", pool_id="1"} | 100 | |
285 | | {job="ceph", cluster="mycluster", name="pool2", pool_id="2"} | 300 | | |
286 | | {job="ceph", cluster="mycluster", name="pool3", pool_id="3"} | 450| | |
f51cf556 TL |
287 | |
288 | Scenario: "Test Pool Quota Bytes" | |
289 | Given the following series: | |
290 | | metrics | values | | |
f78120f9 TL |
291 | | ceph_pool_quota_bytes{job="ceph", cluster="mycluster", pool_id="1"} | 100 | |
292 | | ceph_pool_quota_bytes{job="ceph", cluster="mycluster", pool_id="2"} | 200 | | |
293 | | ceph_pool_quota_bytes{job="ceph", cluster="mycluster", pool_id="3"} | 300 | | |
294 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="1", name="pool1"} | 100 | | |
295 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="2", name="pool2"} | 150 | | |
296 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="3", name="pool3"} | 150 | | |
f51cf556 TL |
297 | Then Grafana panel `Pool Quota Bytes` with legend `{{name}}` shows: |
298 | | metrics | values | | |
f78120f9 TL |
299 | | {job="ceph", cluster="mycluster", name="pool1", pool_id="1"} | 10000 | |
300 | | {job="ceph", cluster="mycluster", name="pool2", pool_id="2"} | 30000 | | |
301 | | {job="ceph", cluster="mycluster", name="pool3", pool_id="3"} | 45000 | | |
f51cf556 TL |
302 | |
303 | Scenario: "Test Objects Per Pool" | |
304 | Given the following series: | |
305 | | metrics | values | | |
f78120f9 TL |
306 | | ceph_pool_objects{job="ceph", cluster="mycluster", pool_id="1"} | 100 | |
307 | | ceph_pool_objects{job="ceph", cluster="mycluster", pool_id="2"} | 200 | | |
308 | | ceph_pool_objects{job="ceph", cluster="mycluster", pool_id="3"} | 300 | | |
309 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="1", name="pool1"} | 100 | | |
310 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="2", name="pool2"} | 150 | | |
311 | | ceph_pool_metadata{job="ceph", cluster="mycluster", pool_id="3", name="pool3"} | 150 | | |
f51cf556 TL |
312 | Then Grafana panel `Objects Per Pool` with legend `{{name}}` shows: |
313 | | metrics | values | | |
f78120f9 TL |
314 | | {job="ceph", cluster="mycluster", name="pool1", pool_id="1"} | 10000 | |
315 | | {job="ceph", cluster="mycluster", name="pool2", pool_id="2"} | 30000 | | |
316 | | {job="ceph", cluster="mycluster", name="pool3", pool_id="3"} | 45000| | |
f51cf556 TL |
317 | |
318 | Scenario: "Test OSD Type Count" | |
319 | Given the following series: | |
320 | | metrics | values | | |
f78120f9 TL |
321 | | ceph_pool_objects{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
322 | | ceph_pool_objects{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
323 | Then Grafana panel `OSD Type Count` with legend `Total` shows: |
324 | | metrics | values | | |
325 | | {} | 30 | | |
326 | ||
327 | Scenario: "Test PGs State Backfill Toofull" | |
328 | Given the following series: | |
329 | | metrics | values | | |
f78120f9 TL |
330 | | ceph_pg_backfill_toofull{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
331 | | ceph_pg_backfill_toofull{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
332 | Then Grafana panel `PGs State` with legend `Backfill Toofull` shows: |
333 | | metrics | values | | |
334 | | {} | 30 | | |
335 | ||
336 | Scenario: "Test PGs State Remapped" | |
337 | Given the following series: | |
338 | | metrics | values | | |
f78120f9 TL |
339 | | ceph_pg_remapped{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
340 | | ceph_pg_remapped{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
341 | Then Grafana panel `PGs State` with legend `Remapped` shows: |
342 | | metrics | values | | |
343 | | {} | 30 | | |
344 | ||
345 | Scenario: "Test PGs State Backfill" | |
346 | Given the following series: | |
347 | | metrics | values | | |
f78120f9 TL |
348 | | ceph_pg_backfill{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
349 | | ceph_pg_backfill{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
350 | Then Grafana panel `PGs State` with legend `Backfill` shows: |
351 | | metrics | values | | |
352 | | {} | 30 | | |
353 | ||
354 | Scenario: "Test PGs State Peered" | |
355 | Given the following series: | |
356 | | metrics | values | | |
f78120f9 TL |
357 | | ceph_pg_peered{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
358 | | ceph_pg_peered{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
359 | Then Grafana panel `PGs State` with legend `Peered` shows: |
360 | | metrics | values | | |
361 | | {} | 30 | | |
362 | ||
363 | Scenario: "Test PGs State Down" | |
364 | Given the following series: | |
365 | | metrics | values | | |
f78120f9 TL |
366 | | ceph_pg_down{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
367 | | ceph_pg_down{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
368 | Then Grafana panel `PGs State` with legend `Down` shows: |
369 | | metrics | values | | |
370 | | {} | 30 | | |
371 | ||
372 | Scenario: "Test PGs State Repair" | |
373 | Given the following series: | |
374 | | metrics | values | | |
f78120f9 TL |
375 | | ceph_pg_repair{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
376 | | ceph_pg_repair{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
377 | Then Grafana panel `PGs State` with legend `Repair` shows: |
378 | | metrics | values | | |
379 | | {} | 30 | | |
380 | ||
381 | Scenario: "Test PGs State Recovering" | |
382 | Given the following series: | |
383 | | metrics | values | | |
f78120f9 TL |
384 | | ceph_pg_recovering{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
385 | | ceph_pg_recovering{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
386 | Then Grafana panel `PGs State` with legend `Recovering` shows: |
387 | | metrics | values | | |
388 | | {} | 30 | | |
389 | ||
390 | Scenario: "Test PGs State Deep" | |
391 | Given the following series: | |
392 | | metrics | values | | |
f78120f9 TL |
393 | | ceph_pg_deep{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
394 | | ceph_pg_deep{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
395 | Then Grafana panel `PGs State` with legend `Deep` shows: |
396 | | metrics | values | | |
397 | | {} | 30 | | |
398 | ||
399 | Scenario: "Test PGs State Wait Backfill" | |
400 | Given the following series: | |
401 | | metrics | values | | |
f78120f9 TL |
402 | | ceph_pg_wait_backfill{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
403 | | ceph_pg_wait_backfill{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
404 | Then Grafana panel `PGs State` with legend `Wait Backfill` shows: |
405 | | metrics | values | | |
406 | | {} | 30 | | |
407 | ||
408 | Scenario: "Test PGs State Creating" | |
409 | Given the following series: | |
410 | | metrics | values | | |
f78120f9 TL |
411 | | ceph_pg_creating{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
412 | | ceph_pg_creating{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
413 | Then Grafana panel `PGs State` with legend `Creating` shows: |
414 | | metrics | values | | |
415 | | {} | 30 | | |
416 | ||
417 | Scenario: "Test PGs State Forced Recovery" | |
418 | Given the following series: | |
419 | | metrics | values | | |
f78120f9 TL |
420 | | ceph_pg_forced_recovery{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
421 | | ceph_pg_forced_recovery{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
422 | Then Grafana panel `PGs State` with legend `Forced Recovery` shows: |
423 | | metrics | values | | |
424 | | {} | 30 | | |
425 | ||
426 | Scenario: "Test PGs State Forced Backfill" | |
427 | Given the following series: | |
428 | | metrics | values | | |
f78120f9 TL |
429 | | ceph_pg_forced_backfill{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
430 | | ceph_pg_forced_backfill{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
431 | Then Grafana panel `PGs State` with legend `Forced Backfill` shows: |
432 | | metrics | values | | |
433 | | {} | 30 | | |
434 | ||
435 | Scenario: "Test PGs State Incomplete" | |
436 | Given the following series: | |
437 | | metrics | values | | |
f78120f9 TL |
438 | | ceph_pg_incomplete{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
439 | | ceph_pg_incomplete{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
440 | Then Grafana panel `PGs State` with legend `Incomplete` shows: |
441 | | metrics | values | | |
442 | | {} | 30 | | |
443 | ||
444 | Scenario: "Test PGs State Undersized" | |
445 | Given the following series: | |
446 | | metrics | values | | |
f78120f9 TL |
447 | | ceph_pg_undersized{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
448 | | ceph_pg_undersized{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
449 | Then Grafana panel `PGs State` with legend `Undersized` shows: |
450 | | metrics | values | | |
451 | | {} | 30 | | |
452 | ||
453 | Scenario: "Test Stuck PGs Undersized" | |
454 | Given the following series: | |
455 | | metrics | values | | |
f78120f9 TL |
456 | | ceph_pg_undersized{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
457 | | ceph_pg_undersized{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
458 | Then Grafana panel `Stuck PGs` with legend `Undersized` shows: |
459 | | metrics | values | | |
460 | | {} | 30 | | |
461 | ||
462 | Scenario: "Test Stuck PGs Stale" | |
463 | Given the following series: | |
464 | | metrics | values | | |
f78120f9 TL |
465 | | ceph_pg_stale{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
466 | | ceph_pg_stale{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
467 | Then Grafana panel `Stuck PGs` with legend `Stale` shows: |
468 | | metrics | values | | |
469 | | {} | 30 | | |
470 | ||
471 | Scenario: "Test Stuck PGs Degraded" | |
472 | Given the following series: | |
473 | | metrics | values | | |
f78120f9 TL |
474 | | ceph_pg_degraded{job="ceph", cluster="mycluster", osd="osd.0"} | 10 | |
475 | | ceph_pg_degraded{job="ceph", cluster="mycluster", osd="osd.1"} | 20 | | |
f51cf556 TL |
476 | Then Grafana panel `Stuck PGs` with legend `Degraded` shows: |
477 | | metrics | values | | |
478 | | {} | 30 | | |
479 | ||
480 | Scenario: "Test Recovery Operations" | |
481 | Given the following series: | |
482 | | metrics | values | | |
f78120f9 TL |
483 | | ceph_osd_recovery_ops{job="ceph", cluster="mycluster", osd="osd.0"}| 250 200 | |
484 | | ceph_osd_recovery_ops{job="ceph", cluster="mycluster", osd="osd.1"} | 800 100 | | |
f51cf556 TL |
485 | When variable `interval` is `120s` |
486 | Then Grafana panel `Recovery Operations` with legend `OPS` shows: | |
487 | | metrics | values | | |
488 | | {} | 5 | | |
489 | ||
490 | Scenario: "Test Ceph Versions OSD" | |
491 | Given the following series: | |
492 | | metrics | values | | |
f78120f9 TL |
493 | | ceph_osd_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)", ceph_daemon="osd.0", device_class="ssd"} | 1 | |
494 | | ceph_osd_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)", ceph_daemon="osd.1", device_class="hdd"} | 1 | | |
f51cf556 TL |
495 | Then Grafana panel `Ceph Versions` with legend `OSD Services` shows: |
496 | | metrics | values | | |
f78120f9 TL |
497 | | {ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} | 1 | |
498 | | {ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)"} | 1 | | |
f51cf556 TL |
499 | |
500 | Scenario: "Test Ceph Versions Mon" | |
501 | Given the following series: | |
502 | | metrics | values | | |
f78120f9 TL |
503 | | ceph_mon_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)", hostname="somehostname"}| 1 | |
504 | | ceph_mon_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)", hostname="someotherhostname"}| 1 | | |
f51cf556 TL |
505 | Then Grafana panel `Ceph Versions` with legend `Mon Services` shows: |
506 | | metrics | values | | |
f78120f9 TL |
507 | | {ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} | 1 | |
508 | | {ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)"} | 1 | | |
f51cf556 TL |
509 | |
510 | Scenario: "Test Ceph Versions MDS" | |
511 | Given the following series: | |
512 | | metrics | values | | |
f78120f9 TL |
513 | | ceph_mds_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)", hostname="someotherhostname", ceph_daemon="mds.someotherhostname",fs_id="1"}| 1 | |
514 | | ceph_mds_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)", hostname="somehostname", ceph_daemon="mds.somehostname",fs_id="1"}| 1 | | |
f51cf556 TL |
515 | Then Grafana panel `Ceph Versions` with legend `MDS Services` shows: |
516 | | metrics | values | | |
f78120f9 TL |
517 | | {ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} | 1 | |
518 | | {ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)"} | 1 | | |
f51cf556 TL |
519 | |
520 | Scenario: "Test Ceph Versions RGW" | |
521 | Given the following series: | |
522 | | metrics | values | | |
f78120f9 TL |
523 | | ceph_rgw_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)", ceph_daemon="rgw.somehostname", hostname="somehostname"}| 1 | |
524 | | ceph_rgw_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)", ceph_daemon="rgw.someotherhostname", hostname="someotherhostname"}| 1 | | |
f51cf556 TL |
525 | Then Grafana panel `Ceph Versions` with legend `RGW Services` shows: |
526 | | metrics | values | | |
f78120f9 TL |
527 | | {ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} | 1 | |
528 | | {ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)"} | 1 | | |
f51cf556 TL |
529 | |
530 | Scenario: "Test Ceph Versions MGR" | |
531 | Given the following series: | |
532 | | metrics | values | | |
f78120f9 TL |
533 | | ceph_mgr_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)", ceph_daemon="mgr.somehostname", hostname="somehostname"}| 1 | |
534 | | ceph_mgr_metadata{job="ceph", cluster="mycluster", ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)", ceph_daemon="mgr.someotherhostname", hostname="someotherhostname"}| 1 | | |
f51cf556 TL |
535 | Then Grafana panel `Ceph Versions` with legend `MGR Services` shows: |
536 | | metrics | values | | |
f78120f9 TL |
537 | | {ceph_version="ceph version 18.2.1 (7fe91d5d5842e04be3b4f514d6dd990c54b29c76) reef (stable)"} | 1 | |
538 | | {ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)"} | 1 | |