]>
git.proxmox.com Git - ceph.git/blob - ceph/src/rgw/rgw_dmclock_scheduler_ctx.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab ft=cpp
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2019 Red Hat, Inc.
7 * (C) 2019 SUSE Linux LLC
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
14 #include "rgw_dmclock_scheduler_ctx.h"
16 namespace rgw::dmclock
{
18 ClientConfig::ClientConfig(CephContext
*cct
)
23 ClientInfo
* ClientConfig::operator()(client_id client
)
25 return &clients
[static_cast<size_t>(client
)];
28 const char** ClientConfig::get_tracked_conf_keys() const
30 static const char* keys
[] = {
31 "rgw_dmclock_admin_res",
32 "rgw_dmclock_admin_wgt",
33 "rgw_dmclock_admin_lim",
34 "rgw_dmclock_auth_res",
35 "rgw_dmclock_auth_wgt",
36 "rgw_dmclock_auth_lim",
37 "rgw_dmclock_data_res",
38 "rgw_dmclock_data_wgt",
39 "rgw_dmclock_data_lim",
40 "rgw_dmclock_metadata_res",
41 "rgw_dmclock_metadata_wgt",
42 "rgw_dmclock_metadata_lim",
43 "rgw_max_concurrent_requests",
49 void ClientConfig::update(const ConfigProxy
& conf
)
52 static_assert(0 == static_cast<int>(client_id::admin
));
53 clients
.emplace_back(conf
.get_val
<double>("rgw_dmclock_admin_res"),
54 conf
.get_val
<double>("rgw_dmclock_admin_wgt"),
55 conf
.get_val
<double>("rgw_dmclock_admin_lim"));
56 static_assert(1 == static_cast<int>(client_id::auth
));
57 clients
.emplace_back(conf
.get_val
<double>("rgw_dmclock_auth_res"),
58 conf
.get_val
<double>("rgw_dmclock_auth_wgt"),
59 conf
.get_val
<double>("rgw_dmclock_auth_lim"));
60 static_assert(2 == static_cast<int>(client_id::data
));
61 clients
.emplace_back(conf
.get_val
<double>("rgw_dmclock_data_res"),
62 conf
.get_val
<double>("rgw_dmclock_data_wgt"),
63 conf
.get_val
<double>("rgw_dmclock_data_lim"));
64 static_assert(3 == static_cast<int>(client_id::metadata
));
65 clients
.emplace_back(conf
.get_val
<double>("rgw_dmclock_metadata_res"),
66 conf
.get_val
<double>("rgw_dmclock_metadata_wgt"),
67 conf
.get_val
<double>("rgw_dmclock_metadata_lim"));
70 void ClientConfig::handle_conf_change(const ConfigProxy
& conf
,
71 const std::set
<std::string
>& changed
)
76 ClientCounters::ClientCounters(CephContext
*cct
)
78 clients
[static_cast<size_t>(client_id::admin
)] =
79 queue_counters::build(cct
, "dmclock-admin");
80 clients
[static_cast<size_t>(client_id::auth
)] =
81 queue_counters::build(cct
, "dmclock-auth");
82 clients
[static_cast<size_t>(client_id::data
)] =
83 queue_counters::build(cct
, "dmclock-data");
84 clients
[static_cast<size_t>(client_id::metadata
)] =
85 queue_counters::build(cct
, "dmclock-metadata");
86 clients
[static_cast<size_t>(client_id::count
)] =
87 throttle_counters::build(cct
, "dmclock-scheduler");
90 void inc(ClientSums
& sums
, client_id client
, Cost cost
)
92 auto& sum
= sums
[static_cast<size_t>(client
)];
97 void on_cancel(PerfCounters
*c
, const ClientSum
& sum
)
100 c
->dec(queue_counters::l_qlen
, sum
.count
);
101 c
->inc(queue_counters::l_cancel
, sum
.count
);
104 c
->dec(queue_counters::l_cost
, sum
.cost
);
105 c
->inc(queue_counters::l_cancel_cost
, sum
.cost
);
109 void on_process(PerfCounters
* c
, const ClientSum
& rsum
, const ClientSum
& psum
)
112 c
->inc(queue_counters::l_res
, rsum
.count
);
115 c
->inc(queue_counters::l_res_cost
, rsum
.cost
);
118 c
->inc(queue_counters::l_prio
, psum
.count
);
121 c
->inc(queue_counters::l_prio_cost
, psum
.cost
);
123 if (rsum
.count
+ psum
.count
) {
124 c
->dec(queue_counters::l_qlen
, rsum
.count
+ psum
.count
);
126 if (rsum
.cost
+ psum
.cost
) {
127 c
->dec(queue_counters::l_cost
, rsum
.cost
+ psum
.cost
);
130 } // namespace rgw::dmclock
132 namespace queue_counters
{
134 PerfCountersRef
build(CephContext
*cct
, const std::string
& name
)
136 if (!cct
->_conf
->throttler_perf_counter
) {
140 PerfCountersBuilder
b(cct
, name
, l_first
, l_last
);
141 b
.add_u64(l_qlen
, "qlen", "Queue size");
142 b
.add_u64(l_cost
, "cost", "Cost of queued requests");
143 b
.add_u64_counter(l_res
, "res", "Requests satisfied by reservation");
144 b
.add_u64_counter(l_res_cost
, "res_cost", "Cost satisfied by reservation");
145 b
.add_u64_counter(l_prio
, "prio", "Requests satisfied by priority");
146 b
.add_u64_counter(l_prio_cost
, "prio_cost", "Cost satisfied by priority");
147 b
.add_u64_counter(l_limit
, "limit", "Requests rejected by limit");
148 b
.add_u64_counter(l_limit_cost
, "limit_cost", "Cost rejected by limit");
149 b
.add_u64_counter(l_cancel
, "cancel", "Cancels");
150 b
.add_u64_counter(l_cancel_cost
, "cancel_cost", "Canceled cost");
151 b
.add_time_avg(l_res_latency
, "res latency", "Reservation latency");
152 b
.add_time_avg(l_prio_latency
, "prio latency", "Priority latency");
154 auto logger
= PerfCountersRef
{ b
.create_perf_counters(), cct
};
155 cct
->get_perfcounters_collection()->add(logger
.get());
159 } // namespace queue_counters
161 namespace throttle_counters
{
163 PerfCountersRef
build(CephContext
*cct
, const std::string
& name
)
165 if (!cct
->_conf
->throttler_perf_counter
) {
169 PerfCountersBuilder
b(cct
, name
, l_first
, l_last
);
170 b
.add_u64(l_throttle
, "throttle", "Requests throttled");
172 auto logger
= PerfCountersRef
{ b
.create_perf_counters(), cct
};
173 cct
->get_perfcounters_collection()->add(logger
.get());
177 } // namespace throttle_counters