]> git.proxmox.com Git - ceph.git/blob - ceph/src/tools/rbd_mirror/Throttler.cc
import 15.2.0 Octopus source
[ceph.git] / ceph / src / tools / rbd_mirror / Throttler.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2016 SUSE LINUX GmbH
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14
15 #include "Throttler.h"
16 #include "common/Formatter.h"
17 #include "common/debug.h"
18 #include "common/errno.h"
19 #include "librbd/Utils.h"
20
21 #define dout_context g_ceph_context
22 #define dout_subsys ceph_subsys_rbd_mirror
23 #undef dout_prefix
24 #define dout_prefix *_dout << "rbd::mirror::Throttler:: " << this \
25 << " " << __func__ << ": "
26
27 namespace rbd {
28 namespace mirror {
29
30 template <typename I>
31 Throttler<I>::Throttler(CephContext *cct, const std::string &config_key)
32 : m_cct(cct), m_config_key(config_key),
33 m_config_keys{m_config_key.c_str(), nullptr},
34 m_lock(ceph::make_mutex(
35 librbd::util::unique_lock_name("rbd::mirror::Throttler", this))),
36 m_max_concurrent_ops(cct->_conf.get_val<uint64_t>(m_config_key)) {
37 dout(20) << m_config_key << "=" << m_max_concurrent_ops << dendl;
38 m_cct->_conf.add_observer(this);
39 }
40
41 template <typename I>
42 Throttler<I>::~Throttler() {
43 m_cct->_conf.remove_observer(this);
44
45 std::lock_guard locker{m_lock};
46 ceph_assert(m_inflight_ops.empty());
47 ceph_assert(m_queue.empty());
48 }
49
50 template <typename I>
51 void Throttler<I>::start_op(const std::string &ns,
52 const std::string &id_,
53 Context *on_start) {
54 Id id{ns, id_};
55
56 dout(20) << "id=" << id << dendl;
57
58 int r = 0;
59 {
60 std::lock_guard locker{m_lock};
61
62 if (m_inflight_ops.count(id) > 0) {
63 dout(20) << "duplicate for already started op " << id << dendl;
64 } else if (m_queued_ops.count(id) > 0) {
65 dout(20) << "duplicate for already queued op " << id << dendl;
66 std::swap(m_queued_ops[id], on_start);
67 r = -ENOENT;
68 } else if (m_max_concurrent_ops == 0 ||
69 m_inflight_ops.size() < m_max_concurrent_ops) {
70 ceph_assert(m_queue.empty());
71 m_inflight_ops.insert(id);
72 dout(20) << "ready to start op for " << id << " ["
73 << m_inflight_ops.size() << "/" << m_max_concurrent_ops << "]"
74 << dendl;
75 } else {
76 m_queue.push_back(id);
77 std::swap(m_queued_ops[id], on_start);
78 dout(20) << "op for " << id << " has been queued" << dendl;
79 }
80 }
81
82 if (on_start != nullptr) {
83 on_start->complete(r);
84 }
85 }
86
87 template <typename I>
88 bool Throttler<I>::cancel_op(const std::string &ns,
89 const std::string &id_) {
90 Id id{ns, id_};
91
92 dout(20) << "id=" << id << dendl;
93
94 Context *on_start = nullptr;
95 {
96 std::lock_guard locker{m_lock};
97 auto it = m_queued_ops.find(id);
98 if (it != m_queued_ops.end()) {
99 dout(20) << "canceled queued op for " << id << dendl;
100 m_queue.remove(id);
101 on_start = it->second;
102 m_queued_ops.erase(it);
103 }
104 }
105
106 if (on_start == nullptr) {
107 return false;
108 }
109
110 on_start->complete(-ECANCELED);
111 return true;
112 }
113
114 template <typename I>
115 void Throttler<I>::finish_op(const std::string &ns,
116 const std::string &id_) {
117 Id id{ns, id_};
118
119 dout(20) << "id=" << id << dendl;
120
121 if (cancel_op(ns, id_)) {
122 return;
123 }
124
125 Context *on_start = nullptr;
126 {
127 std::lock_guard locker{m_lock};
128
129 m_inflight_ops.erase(id);
130
131 if (m_inflight_ops.size() < m_max_concurrent_ops && !m_queue.empty()) {
132 auto id = m_queue.front();
133 auto it = m_queued_ops.find(id);
134 ceph_assert(it != m_queued_ops.end());
135 m_inflight_ops.insert(id);
136 dout(20) << "ready to start op for " << id << " ["
137 << m_inflight_ops.size() << "/" << m_max_concurrent_ops << "]"
138 << dendl;
139 on_start = it->second;
140 m_queued_ops.erase(it);
141 m_queue.pop_front();
142 }
143 }
144
145 if (on_start != nullptr) {
146 on_start->complete(0);
147 }
148 }
149
150 template <typename I>
151 void Throttler<I>::drain(const std::string &ns, int r) {
152 dout(20) << "ns=" << ns << dendl;
153
154 std::map<Id, Context *> queued_ops;
155 {
156 std::lock_guard locker{m_lock};
157 for (auto it = m_queued_ops.begin(); it != m_queued_ops.end(); ) {
158 if (it->first.first == ns) {
159 queued_ops[it->first] = it->second;
160 m_queue.remove(it->first);
161 it = m_queued_ops.erase(it);
162 } else {
163 it++;
164 }
165 }
166 for (auto it = m_inflight_ops.begin(); it != m_inflight_ops.end(); ) {
167 if (it->first == ns) {
168 dout(20) << "inflight_op " << *it << dendl;
169 it = m_inflight_ops.erase(it);
170 } else {
171 it++;
172 }
173 }
174 }
175
176 for (auto &it : queued_ops) {
177 dout(20) << "queued_op " << it.first << dendl;
178 it.second->complete(r);
179 }
180 }
181
182 template <typename I>
183 void Throttler<I>::set_max_concurrent_ops(uint32_t max) {
184 dout(20) << "max=" << max << dendl;
185
186 std::list<Context *> ops;
187 {
188 std::lock_guard locker{m_lock};
189 m_max_concurrent_ops = max;
190
191 // Start waiting ops in the case of available free slots
192 while ((m_max_concurrent_ops == 0 ||
193 m_inflight_ops.size() < m_max_concurrent_ops) &&
194 !m_queue.empty()) {
195 auto id = m_queue.front();
196 m_inflight_ops.insert(id);
197 dout(20) << "ready to start op for " << id << " ["
198 << m_inflight_ops.size() << "/" << m_max_concurrent_ops << "]"
199 << dendl;
200 auto it = m_queued_ops.find(id);
201 ceph_assert(it != m_queued_ops.end());
202 ops.push_back(it->second);
203 m_queued_ops.erase(it);
204 m_queue.pop_front();
205 }
206 }
207
208 for (const auto& ctx : ops) {
209 ctx->complete(0);
210 }
211 }
212
213 template <typename I>
214 void Throttler<I>::print_status(ceph::Formatter *f) {
215 dout(20) << dendl;
216
217 std::lock_guard locker{m_lock};
218
219 f->dump_int("max_parallel_requests", m_max_concurrent_ops);
220 f->dump_int("running_requests", m_inflight_ops.size());
221 f->dump_int("waiting_requests", m_queue.size());
222 }
223
224 template <typename I>
225 const char** Throttler<I>::get_tracked_conf_keys() const {
226 return m_config_keys;
227 }
228
229 template <typename I>
230 void Throttler<I>::handle_conf_change(const ConfigProxy& conf,
231 const set<string> &changed) {
232 if (changed.count(m_config_key)) {
233 set_max_concurrent_ops(conf.get_val<uint64_t>(m_config_key));
234 }
235 }
236
237 } // namespace mirror
238 } // namespace rbd
239
240 template class rbd::mirror::Throttler<librbd::ImageCtx>;