]> git.proxmox.com Git - ceph.git/blob - ceph/src/common/lockdep.cc
import quincy beta 17.1.0
[ceph.git] / ceph / src / common / lockdep.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2008-2011 New Dream Network
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
12 *
13 */
14 #include "lockdep.h"
15 #include <bitset>
16 #include "common/ceph_context.h"
17 #include "common/dout.h"
18 #include "common/valgrind.h"
19
20 /******* Constants **********/
21 #define lockdep_dout(v) lsubdout(g_lockdep_ceph_ctx, lockdep, v)
22 #define BACKTRACE_SKIP 2
23
24 /******* Globals **********/
25 bool g_lockdep;
26 struct lockdep_stopper_t {
27 // disable lockdep when this module destructs.
28 ~lockdep_stopper_t() {
29 g_lockdep = 0;
30 }
31 };
32
33
34 static pthread_mutex_t lockdep_mutex = PTHREAD_MUTEX_INITIALIZER;
35 static CephContext *g_lockdep_ceph_ctx = NULL;
36 static lockdep_stopper_t lockdep_stopper;
37 static ceph::unordered_map<std::string, int> lock_ids;
38 static std::map<int, std::string> lock_names;
39 static std::map<int, int> lock_refs;
40 static constexpr size_t MAX_LOCKS = 128 * 1024; // increase me as needed
41 static std::bitset<MAX_LOCKS> free_ids; // bit set = free
42 static ceph::unordered_map<pthread_t, std::map<int,ceph::BackTrace*> > held;
43 static constexpr size_t NR_LOCKS = 4096; // the initial number of locks
44 static std::vector<std::bitset<MAX_LOCKS>> follows(NR_LOCKS); // follows[a][b] means b taken after a
45 static std::vector<std::map<int,ceph::BackTrace *>> follows_bt(NR_LOCKS);
46 // upper bound of lock id
47 unsigned current_maxid;
48 int last_freed_id = -1;
49 static bool free_ids_inited;
50
51 static bool lockdep_force_backtrace()
52 {
53 return (g_lockdep_ceph_ctx != NULL &&
54 g_lockdep_ceph_ctx->_conf->lockdep_force_backtrace);
55 }
56
57 /******* Functions **********/
58 void lockdep_register_ceph_context(CephContext *cct)
59 {
60 static_assert((MAX_LOCKS > 0) && (MAX_LOCKS % 8 == 0),
61 "lockdep's MAX_LOCKS needs to be divisible by 8 to operate correctly.");
62 pthread_mutex_lock(&lockdep_mutex);
63 if (g_lockdep_ceph_ctx == NULL) {
64 ANNOTATE_BENIGN_RACE_SIZED(&g_lockdep_ceph_ctx, sizeof(g_lockdep_ceph_ctx),
65 "lockdep cct");
66 ANNOTATE_BENIGN_RACE_SIZED(&g_lockdep, sizeof(g_lockdep),
67 "lockdep enabled");
68 g_lockdep = true;
69 g_lockdep_ceph_ctx = cct;
70 lockdep_dout(1) << "lockdep start" << dendl;
71 if (!free_ids_inited) {
72 free_ids_inited = true;
73 // FIPS zeroization audit 20191115: this memset is not security related.
74 free_ids.set();
75 }
76 }
77 pthread_mutex_unlock(&lockdep_mutex);
78 }
79
80 void lockdep_unregister_ceph_context(CephContext *cct)
81 {
82 pthread_mutex_lock(&lockdep_mutex);
83 if (cct == g_lockdep_ceph_ctx) {
84 lockdep_dout(1) << "lockdep stop" << dendl;
85 // this cct is going away; shut it down!
86 g_lockdep = false;
87 g_lockdep_ceph_ctx = NULL;
88
89 // blow away all of our state, too, in case it starts up again.
90 for (unsigned i = 0; i < current_maxid; ++i) {
91 for (unsigned j = 0; j < current_maxid; ++j) {
92 delete follows_bt[i][j];
93 }
94 }
95
96 held.clear();
97 lock_names.clear();
98 lock_ids.clear();
99 std::for_each(follows.begin(), std::next(follows.begin(), current_maxid),
100 [](auto& follow) { follow.reset(); });
101 std::for_each(follows_bt.begin(), std::next(follows_bt.begin(), current_maxid),
102 [](auto& follow_bt) { follow_bt = {}; });
103 }
104 pthread_mutex_unlock(&lockdep_mutex);
105 }
106
107 int lockdep_dump_locks()
108 {
109 pthread_mutex_lock(&lockdep_mutex);
110 if (!g_lockdep)
111 goto out;
112
113 for (auto p = held.begin(); p != held.end(); ++p) {
114 lockdep_dout(0) << "--- thread " << p->first << " ---" << dendl;
115 for (auto q = p->second.begin();
116 q != p->second.end();
117 ++q) {
118 lockdep_dout(0) << " * " << lock_names[q->first] << "\n";
119 if (q->second)
120 *_dout << *(q->second);
121 *_dout << dendl;
122 }
123 }
124 out:
125 pthread_mutex_unlock(&lockdep_mutex);
126 return 0;
127 }
128
129 int lockdep_get_free_id(void)
130 {
131 // if there's id known to be freed lately, reuse it
132 if (last_freed_id >= 0 &&
133 free_ids.test(last_freed_id)) {
134 int tmp = last_freed_id;
135 last_freed_id = -1;
136 free_ids.reset(tmp);
137 lockdep_dout(1) << "lockdep reusing last freed id " << tmp << dendl;
138 return tmp;
139 }
140
141 // walk through entire array and locate nonzero char, then find
142 // actual bit.
143 for (size_t i = 0; i < free_ids.size(); ++i) {
144 if (free_ids.test(i)) {
145 free_ids.reset(i);
146 return i;
147 }
148 }
149
150 // not found
151 lockdep_dout(0) << "failing miserably..." << dendl;
152 return -1;
153 }
154
155 static int _lockdep_register(const char *name)
156 {
157 int id = -1;
158
159 if (!g_lockdep)
160 return id;
161 ceph::unordered_map<std::string, int>::iterator p = lock_ids.find(name);
162 if (p == lock_ids.end()) {
163 id = lockdep_get_free_id();
164 if (id < 0) {
165 lockdep_dout(0) << "ERROR OUT OF IDS .. have 0"
166 << " max " << MAX_LOCKS << dendl;
167 for (auto& p : lock_names) {
168 lockdep_dout(0) << " lock " << p.first << " " << p.second << dendl;
169 }
170 ceph_abort();
171 }
172 if (current_maxid <= (unsigned)id) {
173 current_maxid = (unsigned)id + 1;
174 if (current_maxid == follows.size()) {
175 follows.resize(current_maxid + 1);
176 follows_bt.resize(current_maxid + 1);
177 }
178 }
179 lock_ids[name] = id;
180 lock_names[id] = name;
181 lockdep_dout(10) << "registered '" << name << "' as " << id << dendl;
182 } else {
183 id = p->second;
184 lockdep_dout(20) << "had '" << name << "' as " << id << dendl;
185 }
186
187 ++lock_refs[id];
188
189 return id;
190 }
191
192 int lockdep_register(const char *name)
193 {
194 int id;
195
196 pthread_mutex_lock(&lockdep_mutex);
197 id = _lockdep_register(name);
198 pthread_mutex_unlock(&lockdep_mutex);
199 return id;
200 }
201
202 void lockdep_unregister(int id)
203 {
204 if (id < 0) {
205 return;
206 }
207
208 pthread_mutex_lock(&lockdep_mutex);
209
210 std::string name;
211 auto p = lock_names.find(id);
212 if (p == lock_names.end())
213 name = "unknown" ;
214 else
215 name = p->second;
216
217 int &refs = lock_refs[id];
218 if (--refs == 0) {
219 if (p != lock_names.end()) {
220 // reset dependency ordering
221 follows[id].reset();
222 for (unsigned i=0; i<current_maxid; ++i) {
223 delete follows_bt[id][i];
224 follows_bt[id][i] = NULL;
225
226 delete follows_bt[i][id];
227 follows_bt[i][id] = NULL;
228 follows[i].reset(id);
229 }
230
231 lockdep_dout(10) << "unregistered '" << name << "' from " << id << dendl;
232 lock_ids.erase(p->second);
233 lock_names.erase(id);
234 }
235 lock_refs.erase(id);
236 free_ids.set(id);
237 last_freed_id = id;
238 } else if (g_lockdep) {
239 lockdep_dout(20) << "have " << refs << " of '" << name << "' " <<
240 "from " << id << dendl;
241 }
242 pthread_mutex_unlock(&lockdep_mutex);
243 }
244
245
246 // does b follow a?
247 static bool does_follow(int a, int b)
248 {
249 if (follows[a].test(b)) {
250 lockdep_dout(0) << "\n";
251 *_dout << "------------------------------------" << "\n";
252 *_dout << "existing dependency " << lock_names[a] << " (" << a << ") -> "
253 << lock_names[b] << " (" << b << ") at:\n";
254 if (follows_bt[a][b]) {
255 follows_bt[a][b]->print(*_dout);
256 }
257 *_dout << dendl;
258 return true;
259 }
260
261 for (unsigned i=0; i<current_maxid; i++) {
262 if (follows[a].test(i) &&
263 does_follow(i, b)) {
264 lockdep_dout(0) << "existing intermediate dependency " << lock_names[a]
265 << " (" << a << ") -> " << lock_names[i] << " (" << i << ") at:\n";
266 if (follows_bt[a][i]) {
267 follows_bt[a][i]->print(*_dout);
268 }
269 *_dout << dendl;
270 return true;
271 }
272 }
273
274 return false;
275 }
276
277 int lockdep_will_lock(const char *name, int id, bool force_backtrace,
278 bool recursive)
279 {
280 pthread_t p = pthread_self();
281
282 pthread_mutex_lock(&lockdep_mutex);
283 if (!g_lockdep) {
284 pthread_mutex_unlock(&lockdep_mutex);
285 return id;
286 }
287
288 if (id < 0)
289 id = _lockdep_register(name);
290
291 lockdep_dout(20) << "_will_lock " << name << " (" << id << ")" << dendl;
292
293 // check dependency graph
294 auto& m = held[p];
295 for (auto p = m.begin(); p != m.end(); ++p) {
296 if (p->first == id) {
297 if (!recursive) {
298 lockdep_dout(0) << "\n";
299 *_dout << "recursive lock of " << name << " (" << id << ")\n";
300 auto bt = new ceph::ClibBackTrace(BACKTRACE_SKIP);
301 bt->print(*_dout);
302 if (p->second) {
303 *_dout << "\npreviously locked at\n";
304 p->second->print(*_dout);
305 }
306 delete bt;
307 *_dout << dendl;
308 ceph_abort();
309 }
310 } else if (!follows[p->first].test(id)) {
311 // new dependency
312
313 // did we just create a cycle?
314 if (does_follow(id, p->first)) {
315 auto bt = new ceph::ClibBackTrace(BACKTRACE_SKIP);
316 lockdep_dout(0) << "new dependency " << lock_names[p->first]
317 << " (" << p->first << ") -> " << name << " (" << id << ")"
318 << " creates a cycle at\n";
319 bt->print(*_dout);
320 *_dout << dendl;
321
322 lockdep_dout(0) << "btw, i am holding these locks:" << dendl;
323 for (auto q = m.begin(); q != m.end(); ++q) {
324 lockdep_dout(0) << " " << lock_names[q->first] << " (" << q->first << ")" << dendl;
325 if (q->second) {
326 lockdep_dout(0) << " ";
327 q->second->print(*_dout);
328 *_dout << dendl;
329 }
330 }
331
332 lockdep_dout(0) << "\n" << dendl;
333
334 // don't add this dependency, or we'll get aMutex. cycle in the graph, and
335 // does_follow() won't terminate.
336
337 ceph_abort(); // actually, we should just die here.
338 } else {
339 ceph::BackTrace* bt = NULL;
340 if (force_backtrace || lockdep_force_backtrace()) {
341 bt = new ceph::ClibBackTrace(BACKTRACE_SKIP);
342 }
343 follows[p->first].set(id);
344 follows_bt[p->first][id] = bt;
345 lockdep_dout(10) << lock_names[p->first] << " -> " << name << " at" << dendl;
346 //bt->print(*_dout);
347 }
348 }
349 }
350 pthread_mutex_unlock(&lockdep_mutex);
351 return id;
352 }
353
354 int lockdep_locked(const char *name, int id, bool force_backtrace)
355 {
356 pthread_t p = pthread_self();
357
358 pthread_mutex_lock(&lockdep_mutex);
359 if (!g_lockdep)
360 goto out;
361 if (id < 0)
362 id = _lockdep_register(name);
363
364 lockdep_dout(20) << "_locked " << name << dendl;
365 if (force_backtrace || lockdep_force_backtrace())
366 held[p][id] = new ceph::ClibBackTrace(BACKTRACE_SKIP);
367 else
368 held[p][id] = 0;
369 out:
370 pthread_mutex_unlock(&lockdep_mutex);
371 return id;
372 }
373
374 int lockdep_will_unlock(const char *name, int id)
375 {
376 pthread_t p = pthread_self();
377
378 if (id < 0) {
379 //id = lockdep_register(name);
380 ceph_assert(id == -1);
381 return id;
382 }
383
384 pthread_mutex_lock(&lockdep_mutex);
385 if (!g_lockdep)
386 goto out;
387 lockdep_dout(20) << "_will_unlock " << name << dendl;
388
389 // don't assert.. lockdep may be enabled at any point in time
390 //assert(held.count(p));
391 //assert(held[p].count(id));
392
393 delete held[p][id];
394 held[p].erase(id);
395 out:
396 pthread_mutex_unlock(&lockdep_mutex);
397 return id;
398 }
399
400