]> git.proxmox.com Git - ceph.git/blob - ceph/monitoring/ceph-mixin/dashboards/rgw.libsonnet
437eb783f1875ed10fa420b87d0fe0db93b7def5
[ceph.git] / ceph / monitoring / ceph-mixin / dashboards / rgw.libsonnet
1 local g = import 'grafonnet/grafana.libsonnet';
2 local u = import 'utils.libsonnet';
3
4 (import 'utils.libsonnet') {
5 'radosgw-sync-overview.json':
6 local RgwSyncOverviewPanel(title, formatY1, labelY1, rgwMetric, x, y, w, h) =
7 $.graphPanelSchema({},
8 title,
9 '',
10 'null as zero',
11 true,
12 formatY1,
13 'short',
14 labelY1,
15 null,
16 0,
17 1,
18 '$datasource')
19 .addTargets(
20 [
21 $.addTargetSchema(
22 'sum by (source_zone) (rate(%(rgwMetric)s{%(matchers)s}[$__rate_interval]))'
23 % ($.matchers() + { rgwMetric: rgwMetric }),
24 '{{source_zone}}'
25 ),
26 ]
27 ) + { gridPos: { x: x, y: y, w: w, h: h } };
28
29 $.dashboardSchema(
30 'RGW Sync Overview',
31 '',
32 'rgw-sync-overview',
33 'now-1h',
34 '30s',
35 16,
36 $._config.dashboardTags + ['overview'],
37 ''
38 )
39 .addAnnotation(
40 $.addAnnotationSchema(
41 1,
42 '-- Grafana --',
43 true,
44 true,
45 'rgba(0, 211, 255, 1)',
46 'Annotations & Alerts',
47 'dashboard'
48 )
49 )
50 .addRequired(
51 type='grafana', id='grafana', name='Grafana', version='5.0.0'
52 )
53 .addRequired(
54 type='panel', id='graph', name='Graph', version='5.0.0'
55 )
56 .addTemplate(
57 g.template.datasource('datasource', 'prometheus', 'default', label='Data Source')
58 )
59 .addTemplate(
60 $.addClusterTemplate()
61 )
62 .addTemplate(
63 $.addJobTemplate()
64 )
65 .addTemplate(
66 $.addTemplateSchema(
67 'rgw_servers',
68 '$datasource',
69 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
70 1,
71 true,
72 1,
73 '',
74 'RGW Server'
75 )
76 )
77 .addPanels([
78 RgwSyncOverviewPanel(
79 'Replication (throughput) from Source Zone',
80 'Bps',
81 null,
82 'ceph_data_sync_from_zone_fetch_bytes_sum',
83 0,
84 0,
85 8,
86 7
87 ),
88 RgwSyncOverviewPanel(
89 'Replication (objects) from Source Zone',
90 'short',
91 'Objects/s',
92 'ceph_data_sync_from_zone_fetch_bytes_count',
93 8,
94 0,
95 8,
96 7
97 ),
98 RgwSyncOverviewPanel(
99 'Polling Request Latency from Source Zone',
100 'ms',
101 null,
102 'ceph_data_sync_from_zone_poll_latency_sum',
103 16,
104 0,
105 8,
106 7
107 ),
108 RgwSyncOverviewPanel(
109 'Unsuccessful Object Replications from Source Zone',
110 'short',
111 'Count/s',
112 'ceph_data_sync_from_zone_fetch_errors',
113 0,
114 7,
115 8,
116 7
117 ),
118 ]),
119 'radosgw-overview.json':
120 local RgwOverviewPanel(
121 title,
122 description,
123 formatY1,
124 formatY2,
125 expr1,
126 legendFormat1,
127 x,
128 y,
129 w,
130 h,
131 datasource='$datasource',
132 legend_alignAsTable=false,
133 legend_avg=false,
134 legend_min=false,
135 legend_max=false,
136 legend_current=false,
137 legend_values=false
138 ) =
139 $.graphPanelSchema(
140 {},
141 title,
142 description,
143 'null',
144 false,
145 formatY1,
146 formatY2,
147 null,
148 null,
149 0,
150 1,
151 datasource,
152 legend_alignAsTable,
153 legend_avg,
154 legend_min,
155 legend_max,
156 legend_current,
157 legend_values
158 )
159 .addTargets(
160 [$.addTargetSchema(expr1, legendFormat1)]
161 ) + { gridPos: { x: x, y: y, w: w, h: h } };
162
163 $.dashboardSchema(
164 'RGW Overview',
165 '',
166 'WAkugZpiz',
167 'now-1h',
168 '30s',
169 16,
170 $._config.dashboardTags + ['overview'],
171 ''
172 )
173 .addAnnotation(
174 $.addAnnotationSchema(
175 1,
176 '-- Grafana --',
177 true,
178 true,
179 'rgba(0, 211, 255, 1)',
180 'Annotations & Alerts',
181 'dashboard'
182 )
183 )
184 .addRequired(
185 type='grafana', id='grafana', name='Grafana', version='5.0.0'
186 )
187 .addRequired(
188 type='panel', id='graph', name='Graph', version='5.0.0'
189 )
190 .addTemplate(
191 g.template.datasource('datasource',
192 'prometheus',
193 'default',
194 label='Data Source')
195 )
196 .addTemplate(
197 $.addClusterTemplate()
198 )
199 .addTemplate(
200 $.addJobTemplate()
201 )
202 .addTemplate(
203 $.addTemplateSchema(
204 'rgw_servers',
205 '$datasource',
206 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
207 1,
208 true,
209 1,
210 '',
211 'RGW Server'
212 )
213 )
214 .addTemplate(
215 $.addTemplateSchema(
216 'code',
217 '$datasource',
218 'label_values(haproxy_server_http_responses_total{job=~"$job_haproxy", instance=~"$ingress_service"}, code)',
219 1,
220 true,
221 1,
222 'HTTP Code',
223 ''
224 )
225 )
226 .addTemplate(
227 $.addTemplateSchema(
228 'job_haproxy',
229 '$datasource',
230 'label_values(haproxy_server_status, job)',
231 1,
232 true,
233 1,
234 'job haproxy',
235 '(.*)',
236 multi=true,
237 allValues='.+',
238 ),
239 )
240 .addTemplate(
241 $.addTemplateSchema(
242 'ingress_service',
243 '$datasource',
244 'label_values(haproxy_server_status{job=~"$job_haproxy"}, instance)',
245 1,
246 true,
247 1,
248 'Ingress Service',
249 ''
250 )
251 )
252 .addPanels([
253 $.addRowSchema(false,
254 true,
255 'RGW Overview - All Gateways') +
256 {
257 gridPos: { x: 0, y: 0, w: 24, h: 1 },
258 },
259 RgwOverviewPanel(
260 'Average GET/PUT Latencies',
261 '',
262 's',
263 'short',
264 |||
265 rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
266 rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval]) *
267 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s}
268 ||| % $.matchers(),
269 'GET AVG',
270 0,
271 1,
272 8,
273 7
274 ).addTargets(
275 [
276 $.addTargetSchema(
277 |||
278 rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
279 rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval]) *
280 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s}
281 ||| % $.matchers(),
282 'PUT AVG'
283 ),
284 ]
285 ),
286 RgwOverviewPanel(
287 'Total Requests/sec by RGW Instance',
288 '',
289 'none',
290 'short',
291 |||
292 sum by (rgw_host) (
293 label_replace(
294 rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) *
295 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
296 "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
297 )
298 )
299 ||| % $.matchers(),
300 '{{rgw_host}}',
301 8,
302 1,
303 7,
304 7
305 ),
306 RgwOverviewPanel(
307 'GET Latencies by RGW Instance',
308 'Latencies are shown stacked, without a yaxis to provide a visual indication of GET latency imbalance across RGW hosts',
309 's',
310 'short',
311 |||
312 label_replace(
313 rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
314 rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval]) *
315 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
316 "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
317 )
318 ||| % $.matchers(),
319 '{{rgw_host}}',
320 15,
321 1,
322 6,
323 7
324 ),
325 RgwOverviewPanel(
326 'Bandwidth Consumed by Type',
327 'Total bytes transferred in/out of all radosgw instances within the cluster',
328 'bytes',
329 'short',
330 'sum(rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]))' % $.matchers(),
331 'GETs',
332 0,
333 8,
334 8,
335 6
336 ).addTargets(
337 [$.addTargetSchema('sum(rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval]))' % $.matchers(),
338 'PUTs')]
339 ),
340 RgwOverviewPanel(
341 'Bandwidth by RGW Instance',
342 'Total bytes transferred in/out through get/put operations, by radosgw instance',
343 'bytes',
344 'short',
345 |||
346 label_replace(sum by (instance_id) (
347 rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]) +
348 rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval])) *
349 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
350 "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
351 )
352 ||| % $.matchers(),
353 '{{rgw_host}}',
354 8,
355 8,
356 7,
357 6
358 ),
359 RgwOverviewPanel(
360 'PUT Latencies by RGW Instance',
361 'Latencies are shown stacked, without a yaxis to provide a visual indication of PUT latency imbalance across RGW hosts',
362 's',
363 'short',
364 |||
365 label_replace(
366 rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
367 rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval]) *
368 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s},
369 "rgw_host", "$1", "ceph_daemon", "rgw.(.*)"
370 )
371 ||| % $.matchers(),
372 '{{rgw_host}}',
373 15,
374 8,
375 6,
376 6
377 ),
378 $.addRowSchema(
379 false, true, 'RGW Overview - HAProxy Metrics'
380 ) + { gridPos: { x: 0, y: 12, w: 9, h: 12 } },
381 RgwOverviewPanel(
382 'Total responses by HTTP code',
383 '',
384 'short',
385 'short',
386 |||
387 sum(
388 rate(
389 haproxy_frontend_http_responses_total{code=~"$code", job=~"$job_haproxy", instance=~"$ingress_service", proxy=~"frontend"}[$__rate_interval]
390 )
391 ) by (code)
392 |||,
393 'Frontend {{ code }}',
394 0,
395 12,
396 5,
397 12,
398 '$datasource',
399 true,
400 true,
401 true,
402 true,
403 true,
404 true
405 )
406 .addTargets(
407 [
408 $.addTargetSchema(
409 |||
410 sum(
411 rate(
412 haproxy_backend_http_responses_total{code=~"$code", job=~"$job_haproxy", instance=~"$ingress_service", proxy=~"backend"}[$__rate_interval]
413 )
414 ) by (code)
415 |||, 'Backend {{ code }}'
416 ),
417 ]
418 )
419 .addSeriesOverride([
420 {
421 alias: '/.*Back.*/',
422 transform: 'negative-Y',
423 },
424 { alias: '/.*1.*/' },
425 { alias: '/.*2.*/' },
426 { alias: '/.*3.*/' },
427 { alias: '/.*4.*/' },
428 { alias: '/.*5.*/' },
429 { alias: '/.*other.*/' },
430 ]),
431 RgwOverviewPanel(
432 'Total requests / responses',
433 '',
434 'short',
435 'short',
436 |||
437 sum(
438 rate(
439 haproxy_frontend_http_requests_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
440 )
441 ) by (instance)
442 |||,
443 'Requests',
444 5,
445 12,
446 5,
447 12,
448 '$datasource',
449 true,
450 true,
451 true,
452 true,
453 true,
454 true
455 )
456 .addTargets(
457 [
458 $.addTargetSchema(
459 |||
460 sum(
461 rate(
462 haproxy_backend_response_errors_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
463 )
464 ) by (instance)
465 |||, 'Response errors', 'time_series', 2
466 ),
467 $.addTargetSchema(
468 |||
469 sum(
470 rate(
471 haproxy_frontend_request_errors_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
472 )
473 ) by (instance)
474 |||, 'Requests errors'
475 ),
476 $.addTargetSchema(
477 |||
478 sum(
479 rate(
480 haproxy_backend_redispatch_warnings_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
481 )
482 ) by (instance)
483 |||, 'Backend redispatch', 'time_series', 2
484 ),
485 $.addTargetSchema(
486 |||
487 sum(
488 rate(
489 haproxy_backend_retry_warnings_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
490 )
491 ) by (instance)
492 |||, 'Backend retry', 'time_series', 2
493 ),
494 $.addTargetSchema(
495 |||
496 sum(
497 rate(
498 haproxy_frontend_requests_denied_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
499 )
500 ) by (instance)
501 |||, 'Request denied', 'time_series', 2
502 ),
503 $.addTargetSchema(
504 |||
505 sum(
506 haproxy_backend_current_queue{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}
507 ) by (instance)
508 |||, 'Backend Queued', 'time_series', 2
509 ),
510 ]
511 )
512 .addSeriesOverride([
513 {
514 alias: '/.*Response.*/',
515 transform: 'negative-Y',
516 },
517 {
518 alias: '/.*Backend.*/',
519 transform: 'negative-Y',
520 },
521 ]),
522 RgwOverviewPanel(
523 'Total number of connections',
524 '',
525 'short',
526 'short',
527 |||
528 sum(
529 rate(
530 haproxy_frontend_connections_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
531 )
532 ) by (instance)
533 |||,
534 'Front',
535 10,
536 12,
537 5,
538 12,
539 '$datasource',
540 true,
541 true,
542 true,
543 true,
544 true,
545 true
546 )
547 .addTargets(
548 [
549 $.addTargetSchema(
550 |||
551 sum(
552 rate(
553 haproxy_backend_connection_attempts_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
554 )
555 ) by (instance)
556 |||, 'Back'
557 ),
558 $.addTargetSchema(
559 |||
560 sum(
561 rate(
562 haproxy_backend_connection_errors_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
563 )
564 ) by (instance)
565 |||, 'Back errors'
566 ),
567 ]
568 )
569 .addSeriesOverride([
570 {
571 alias: '/.*Back.*/',
572 transform: 'negative-Y',
573 },
574 ]),
575 RgwOverviewPanel(
576 'Current total of incoming / outgoing bytes',
577 '',
578 'short',
579 'short',
580 |||
581 sum(
582 rate(
583 haproxy_frontend_bytes_in_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
584 ) * 8
585 ) by (instance)
586 |||,
587 'IN Front',
588 15,
589 12,
590 6,
591 12,
592 '$datasource',
593 true,
594 true,
595 true,
596 true,
597 true,
598 true
599 )
600 .addTargets(
601 [
602 $.addTargetSchema(
603 |||
604 sum(
605 rate(
606 haproxy_frontend_bytes_out_total{proxy=~"frontend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
607 ) * 8
608 ) by (instance)
609 |||, 'OUT Front', 'time_series', 2
610 ),
611 $.addTargetSchema(
612 |||
613 sum(
614 rate(
615 haproxy_backend_bytes_in_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
616 ) * 8
617 ) by (instance)
618 |||, 'IN Back', 'time_series', 2
619 ),
620 $.addTargetSchema(
621 |||
622 sum(
623 rate(
624 haproxy_backend_bytes_out_total{proxy=~"backend", job=~"$job_haproxy", instance=~"$ingress_service"}[$__rate_interval]
625 ) * 8
626 ) by (instance)
627 |||, 'OUT Back', 'time_series', 2
628 ),
629 ]
630 )
631 .addSeriesOverride([
632 {
633 alias: '/.*OUT.*/',
634 transform: 'negative-Y',
635 },
636 ]),
637 ]),
638 'radosgw-detail.json':
639 local RgwDetailsPanel(aliasColors,
640 title,
641 description,
642 formatY1,
643 formatY2,
644 expr1,
645 expr2,
646 legendFormat1,
647 legendFormat2,
648 x,
649 y,
650 w,
651 h) =
652 $.graphPanelSchema(aliasColors,
653 title,
654 description,
655 'null',
656 false,
657 formatY1,
658 formatY2,
659 null,
660 null,
661 0,
662 1,
663 '$datasource')
664 .addTargets(
665 [$.addTargetSchema(expr1, legendFormat1), $.addTargetSchema(expr2, legendFormat2)]
666 ) + { gridPos: { x: x, y: y, w: w, h: h } };
667
668 $.dashboardSchema(
669 'RGW Instance Detail',
670 '',
671 'x5ARzZtmk',
672 'now-1h',
673 '30s',
674 16,
675 $._config.dashboardTags + ['overview'],
676 ''
677 )
678 .addAnnotation(
679 $.addAnnotationSchema(
680 1,
681 '-- Grafana --',
682 true,
683 true,
684 'rgba(0, 211, 255, 1)',
685 'Annotations & Alerts',
686 'dashboard'
687 )
688 )
689 .addRequired(
690 type='grafana', id='grafana', name='Grafana', version='5.0.0'
691 )
692 .addRequired(
693 type='panel',
694 id='grafana-piechart-panel',
695 name='Pie Chart',
696 version='1.3.3'
697 )
698 .addRequired(
699 type='panel', id='graph', name='Graph', version='5.0.0'
700 )
701 .addTemplate(
702 g.template.datasource('datasource',
703 'prometheus',
704 'default',
705 label='Data Source')
706 )
707 .addTemplate(
708 $.addClusterTemplate()
709 )
710 .addTemplate(
711 $.addJobTemplate()
712 )
713 .addTemplate(
714 $.addTemplateSchema('rgw_servers',
715 '$datasource',
716 'label_values(ceph_rgw_metadata{%(matchers)s}, ceph_daemon)' % $.matchers(),
717 1,
718 true,
719 1,
720 '',
721 '')
722 )
723 .addPanels([
724 $.addRowSchema(false, true, 'RGW Host Detail : $rgw_servers') + { gridPos: { x: 0, y: 0, w: 24, h: 1 } },
725 RgwDetailsPanel(
726 {},
727 '$rgw_servers GET/PUT Latencies',
728 '',
729 's',
730 'short',
731 |||
732 sum by (instance_id) (
733 rate(ceph_rgw_get_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
734 rate(ceph_rgw_get_initial_lat_count{%(matchers)s}[$__rate_interval])
735 ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
736 ||| % $.matchers(),
737 |||
738 sum by (instance_id) (
739 rate(ceph_rgw_put_initial_lat_sum{%(matchers)s}[$__rate_interval]) /
740 rate(ceph_rgw_put_initial_lat_count{%(matchers)s}[$__rate_interval])
741 ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
742 ||| % $.matchers(),
743 'GET {{ceph_daemon}}',
744 'PUT {{ceph_daemon}}',
745 0,
746 1,
747 6,
748 8
749 ),
750 RgwDetailsPanel(
751 {},
752 'Bandwidth by HTTP Operation',
753 '',
754 'bytes',
755 'short',
756 |||
757 rate(ceph_rgw_get_b{%(matchers)s}[$__rate_interval]) *
758 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
759 ||| % $.matchers(),
760 |||
761 rate(ceph_rgw_put_b{%(matchers)s}[$__rate_interval]) *
762 on (instance_id) group_left (ceph_daemon)
763 ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
764 ||| % $.matchers(),
765 'GETs {{ceph_daemon}}',
766 'PUTs {{ceph_daemon}}',
767 6,
768 1,
769 7,
770 8
771 ),
772 RgwDetailsPanel(
773 {
774 GETs: '#7eb26d',
775 Other: '#447ebc',
776 PUTs: '#eab839',
777 Requests: '#3f2b5b',
778 'Requests Failed': '#bf1b00',
779 },
780 'HTTP Request Breakdown',
781 '',
782 'short',
783 'short',
784 |||
785 rate(ceph_rgw_failed_req{%(matchers)s}[$__rate_interval]) *
786 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s,ceph_daemon=~"$rgw_servers"}
787 ||| % $.matchers(),
788 |||
789 rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) *
790 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
791 ||| % $.matchers(),
792 'Requests Failed {{ceph_daemon}}',
793 'GETs {{ceph_daemon}}',
794 13,
795 1,
796 7,
797 8
798 )
799 .addTargets(
800 [
801 $.addTargetSchema(
802 |||
803 rate(ceph_rgw_put{%(matchers)s}[$__rate_interval]) *
804 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
805 ||| % $.matchers(),
806 'PUTs {{ceph_daemon}}'
807 ),
808 $.addTargetSchema(
809 |||
810 (
811 rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) -
812 (
813 rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) +
814 rate(ceph_rgw_put{%(matchers)s}[$__rate_interval])
815 )
816 ) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
817 ||| % $.matchers(),
818 'Other {{ceph_daemon}}'
819 ),
820 ]
821 ),
822 $.simplePieChart(
823 {
824 GETs: '#7eb26d',
825 'Other (HEAD,POST,DELETE)': '#447ebc',
826 PUTs: '#eab839',
827 Requests: '#3f2b5b',
828 Failures: '#bf1b00',
829 }, '', 'Workload Breakdown'
830 )
831 .addTarget($.addTargetSchema(
832 |||
833 rate(ceph_rgw_failed_req{%(matchers)s}[$__rate_interval]) *
834 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
835 ||| % $.matchers(),
836 'Failures {{ceph_daemon}}'
837 ))
838 .addTarget($.addTargetSchema(
839 |||
840 rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) *
841 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
842 ||| % $.matchers(),
843 'GETs {{ceph_daemon}}'
844 ))
845 .addTarget($.addTargetSchema(
846 |||
847 rate(ceph_rgw_put{%(matchers)s}[$__rate_interval]) *
848 on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
849 ||| % $.matchers(),
850 'PUTs {{ceph_daemon}}'
851 ))
852 .addTarget($.addTargetSchema(
853 |||
854 (
855 rate(ceph_rgw_req{%(matchers)s}[$__rate_interval]) -
856 (
857 rate(ceph_rgw_get{%(matchers)s}[$__rate_interval]) +
858 rate(ceph_rgw_put{%(matchers)s}[$__rate_interval])
859 )
860 ) * on (instance_id) group_left (ceph_daemon)
861 ceph_rgw_metadata{%(matchers)s, ceph_daemon=~"$rgw_servers"}
862 ||| % $.matchers(),
863 'Other (DELETE,LIST) {{ceph_daemon}}'
864 )) + { gridPos: { x: 20, y: 1, w: 4, h: 8 } },
865 ]),
866 }