]> git.proxmox.com Git - ceph.git/blame - ceph/src/rgw/rgw_dmclock_scheduler_ctx.cc
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / rgw / rgw_dmclock_scheduler_ctx.cc
CommitLineData
11fdf7f2 1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
9f95a23c 2// vim: ts=8 sw=2 smarttab ft=cpp
11fdf7f2
TL
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2019 Red Hat, Inc.
7 * (C) 2019 SUSE Linux LLC
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14#include "rgw_dmclock_scheduler_ctx.h"
15
16namespace rgw::dmclock {
17
18ClientConfig::ClientConfig(CephContext *cct)
19{
20 update(cct->_conf);
21}
22
23ClientInfo* ClientConfig::operator()(client_id client)
24{
25 return &clients[static_cast<size_t>(client)];
26}
27
28const char** ClientConfig::get_tracked_conf_keys() const
29{
30 static const char* keys[] = {
31 "rgw_dmclock_admin_res",
32 "rgw_dmclock_admin_wgt",
33 "rgw_dmclock_admin_lim",
34 "rgw_dmclock_auth_res",
35 "rgw_dmclock_auth_wgt",
36 "rgw_dmclock_auth_lim",
37 "rgw_dmclock_data_res",
38 "rgw_dmclock_data_wgt",
39 "rgw_dmclock_data_lim",
40 "rgw_dmclock_metadata_res",
41 "rgw_dmclock_metadata_wgt",
42 "rgw_dmclock_metadata_lim",
43 "rgw_max_concurrent_requests",
44 nullptr
45 };
46 return keys;
47}
48
49void ClientConfig::update(const ConfigProxy& conf)
50{
51 clients.clear();
52 static_assert(0 == static_cast<int>(client_id::admin));
53 clients.emplace_back(conf.get_val<double>("rgw_dmclock_admin_res"),
54 conf.get_val<double>("rgw_dmclock_admin_wgt"),
55 conf.get_val<double>("rgw_dmclock_admin_lim"));
56 static_assert(1 == static_cast<int>(client_id::auth));
57 clients.emplace_back(conf.get_val<double>("rgw_dmclock_auth_res"),
58 conf.get_val<double>("rgw_dmclock_auth_wgt"),
59 conf.get_val<double>("rgw_dmclock_auth_lim"));
60 static_assert(2 == static_cast<int>(client_id::data));
61 clients.emplace_back(conf.get_val<double>("rgw_dmclock_data_res"),
62 conf.get_val<double>("rgw_dmclock_data_wgt"),
63 conf.get_val<double>("rgw_dmclock_data_lim"));
64 static_assert(3 == static_cast<int>(client_id::metadata));
65 clients.emplace_back(conf.get_val<double>("rgw_dmclock_metadata_res"),
66 conf.get_val<double>("rgw_dmclock_metadata_wgt"),
67 conf.get_val<double>("rgw_dmclock_metadata_lim"));
68}
69
70void ClientConfig::handle_conf_change(const ConfigProxy& conf,
71 const std::set<std::string>& changed)
72{
73 update(conf);
74}
75
76ClientCounters::ClientCounters(CephContext *cct)
77{
78 clients[static_cast<size_t>(client_id::admin)] =
79 queue_counters::build(cct, "dmclock-admin");
80 clients[static_cast<size_t>(client_id::auth)] =
81 queue_counters::build(cct, "dmclock-auth");
82 clients[static_cast<size_t>(client_id::data)] =
83 queue_counters::build(cct, "dmclock-data");
84 clients[static_cast<size_t>(client_id::metadata)] =
85 queue_counters::build(cct, "dmclock-metadata");
86 clients[static_cast<size_t>(client_id::count)] =
87 throttle_counters::build(cct, "dmclock-scheduler");
88}
89
90void inc(ClientSums& sums, client_id client, Cost cost)
91{
92 auto& sum = sums[static_cast<size_t>(client)];
93 sum.count++;
94 sum.cost += cost;
95}
96
97void on_cancel(PerfCounters *c, const ClientSum& sum)
98{
99 if (sum.count) {
100 c->dec(queue_counters::l_qlen, sum.count);
101 c->inc(queue_counters::l_cancel, sum.count);
102 }
103 if (sum.cost) {
104 c->dec(queue_counters::l_cost, sum.cost);
105 c->inc(queue_counters::l_cancel_cost, sum.cost);
106 }
107}
108
109void on_process(PerfCounters* c, const ClientSum& rsum, const ClientSum& psum)
110{
111 if (rsum.count) {
112 c->inc(queue_counters::l_res, rsum.count);
113 }
114 if (rsum.cost) {
115 c->inc(queue_counters::l_res_cost, rsum.cost);
116 }
117 if (psum.count) {
118 c->inc(queue_counters::l_prio, psum.count);
119 }
120 if (psum.cost) {
121 c->inc(queue_counters::l_prio_cost, psum.cost);
122 }
123 if (rsum.count + psum.count) {
124 c->dec(queue_counters::l_qlen, rsum.count + psum.count);
125 }
126 if (rsum.cost + psum.cost) {
127 c->dec(queue_counters::l_cost, rsum.cost + psum.cost);
128 }
129}
130} // namespace rgw::dmclock
131
132namespace queue_counters {
133
134PerfCountersRef build(CephContext *cct, const std::string& name)
135{
136 if (!cct->_conf->throttler_perf_counter) {
137 return {};
138 }
139
140 PerfCountersBuilder b(cct, name, l_first, l_last);
141 b.add_u64(l_qlen, "qlen", "Queue size");
142 b.add_u64(l_cost, "cost", "Cost of queued requests");
143 b.add_u64_counter(l_res, "res", "Requests satisfied by reservation");
144 b.add_u64_counter(l_res_cost, "res_cost", "Cost satisfied by reservation");
145 b.add_u64_counter(l_prio, "prio", "Requests satisfied by priority");
146 b.add_u64_counter(l_prio_cost, "prio_cost", "Cost satisfied by priority");
147 b.add_u64_counter(l_limit, "limit", "Requests rejected by limit");
148 b.add_u64_counter(l_limit_cost, "limit_cost", "Cost rejected by limit");
149 b.add_u64_counter(l_cancel, "cancel", "Cancels");
150 b.add_u64_counter(l_cancel_cost, "cancel_cost", "Canceled cost");
151 b.add_time_avg(l_res_latency, "res latency", "Reservation latency");
152 b.add_time_avg(l_prio_latency, "prio latency", "Priority latency");
153
154 auto logger = PerfCountersRef{ b.create_perf_counters(), cct };
155 cct->get_perfcounters_collection()->add(logger.get());
156 return logger;
157}
158
159} // namespace queue_counters
160
161namespace throttle_counters {
162
163PerfCountersRef build(CephContext *cct, const std::string& name)
164{
165 if (!cct->_conf->throttler_perf_counter) {
166 return {};
167 }
168
169 PerfCountersBuilder b(cct, name, l_first, l_last);
170 b.add_u64(l_throttle, "throttle", "Requests throttled");
f67539c2 171 b.add_u64(l_outstanding, "outstanding", "Outstanding Requests");
11fdf7f2
TL
172
173 auto logger = PerfCountersRef{ b.create_perf_counters(), cct };
174 cct->get_perfcounters_collection()->add(logger.get());
175 return logger;
176}
177
178} // namespace throttle_counters