]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //===-- sanitizer_deadlock_detector1.cc -----------------------------------===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // Deadlock detector implementation based on NxN adjacency bit matrix. | |
11 | // | |
12 | //===----------------------------------------------------------------------===// | |
13 | ||
14 | #include "sanitizer_deadlock_detector_interface.h" | |
15 | #include "sanitizer_deadlock_detector.h" | |
16 | #include "sanitizer_allocator_internal.h" | |
17 | #include "sanitizer_placement_new.h" | |
18 | #include "sanitizer_mutex.h" | |
19 | ||
20 | #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1 | |
21 | ||
22 | namespace __sanitizer { | |
23 | ||
24 | typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector. | |
25 | ||
26 | struct DDPhysicalThread { | |
27 | }; | |
28 | ||
29 | struct DDLogicalThread { | |
30 | u64 ctx; | |
31 | DeadlockDetectorTLS<DDBV> dd; | |
32 | DDReport rep; | |
33 | bool report_pending; | |
34 | }; | |
35 | ||
36 | struct DD : public DDetector { | |
37 | SpinMutex mtx; | |
38 | DeadlockDetector<DDBV> dd; | |
39 | DDFlags flags; | |
40 | ||
41 | explicit DD(const DDFlags *flags); | |
42 | ||
92a42be0 SL |
43 | DDPhysicalThread *CreatePhysicalThread() override; |
44 | void DestroyPhysicalThread(DDPhysicalThread *pt) override; | |
1a4d82fc | 45 | |
92a42be0 SL |
46 | DDLogicalThread *CreateLogicalThread(u64 ctx) override; |
47 | void DestroyLogicalThread(DDLogicalThread *lt) override; | |
1a4d82fc | 48 | |
92a42be0 SL |
49 | void MutexInit(DDCallback *cb, DDMutex *m) override; |
50 | void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override; | |
51 | void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, | |
52 | bool trylock) override; | |
53 | void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override; | |
54 | void MutexDestroy(DDCallback *cb, DDMutex *m) override; | |
1a4d82fc | 55 | |
92a42be0 | 56 | DDReport *GetReport(DDCallback *cb) override; |
1a4d82fc JJ |
57 | |
58 | void MutexEnsureID(DDLogicalThread *lt, DDMutex *m); | |
59 | void ReportDeadlock(DDCallback *cb, DDMutex *m); | |
60 | }; | |
61 | ||
62 | DDetector *DDetector::Create(const DDFlags *flags) { | |
63 | (void)flags; | |
64 | void *mem = MmapOrDie(sizeof(DD), "deadlock detector"); | |
65 | return new(mem) DD(flags); | |
66 | } | |
67 | ||
68 | DD::DD(const DDFlags *flags) | |
69 | : flags(*flags) { | |
70 | dd.clear(); | |
71 | } | |
72 | ||
73 | DDPhysicalThread* DD::CreatePhysicalThread() { | |
92a42be0 | 74 | return nullptr; |
1a4d82fc JJ |
75 | } |
76 | ||
77 | void DD::DestroyPhysicalThread(DDPhysicalThread *pt) { | |
78 | } | |
79 | ||
80 | DDLogicalThread* DD::CreateLogicalThread(u64 ctx) { | |
81 | DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt)); | |
82 | lt->ctx = ctx; | |
83 | lt->dd.clear(); | |
84 | lt->report_pending = false; | |
85 | return lt; | |
86 | } | |
87 | ||
88 | void DD::DestroyLogicalThread(DDLogicalThread *lt) { | |
89 | lt->~DDLogicalThread(); | |
90 | InternalFree(lt); | |
91 | } | |
92 | ||
93 | void DD::MutexInit(DDCallback *cb, DDMutex *m) { | |
94 | m->id = 0; | |
95 | m->stk = cb->Unwind(); | |
96 | } | |
97 | ||
98 | void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) { | |
99 | if (!dd.nodeBelongsToCurrentEpoch(m->id)) | |
100 | m->id = dd.newNode(reinterpret_cast<uptr>(m)); | |
101 | dd.ensureCurrentEpoch(<->dd); | |
102 | } | |
103 | ||
104 | void DD::MutexBeforeLock(DDCallback *cb, | |
105 | DDMutex *m, bool wlock) { | |
106 | DDLogicalThread *lt = cb->lt; | |
107 | if (lt->dd.empty()) return; // This will be the first lock held by lt. | |
108 | if (dd.hasAllEdges(<->dd, m->id)) return; // We already have all edges. | |
109 | SpinMutexLock lk(&mtx); | |
110 | MutexEnsureID(lt, m); | |
111 | if (dd.isHeld(<->dd, m->id)) | |
112 | return; // FIXME: allow this only for recursive locks. | |
113 | if (dd.onLockBefore(<->dd, m->id)) { | |
114 | // Actually add this edge now so that we have all the stack traces. | |
115 | dd.addEdges(<->dd, m->id, cb->Unwind(), cb->UniqueTid()); | |
116 | ReportDeadlock(cb, m); | |
117 | } | |
118 | } | |
119 | ||
120 | void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) { | |
121 | DDLogicalThread *lt = cb->lt; | |
5bcae85e | 122 | uptr path[20]; |
1a4d82fc | 123 | uptr len = dd.findPathToLock(<->dd, m->id, path, ARRAY_SIZE(path)); |
5bcae85e SL |
124 | if (len == 0U) { |
125 | // A cycle of 20+ locks? Well, that's a bit odd... | |
126 | Printf("WARNING: too long mutex cycle found\n"); | |
127 | return; | |
128 | } | |
1a4d82fc JJ |
129 | CHECK_EQ(m->id, path[0]); |
130 | lt->report_pending = true; | |
5bcae85e | 131 | len = Min<uptr>(len, DDReport::kMaxLoopSize); |
1a4d82fc JJ |
132 | DDReport *rep = <->rep; |
133 | rep->n = len; | |
134 | for (uptr i = 0; i < len; i++) { | |
135 | uptr from = path[i]; | |
136 | uptr to = path[(i + 1) % len]; | |
137 | DDMutex *m0 = (DDMutex*)dd.getData(from); | |
138 | DDMutex *m1 = (DDMutex*)dd.getData(to); | |
139 | ||
140 | u32 stk_from = -1U, stk_to = -1U; | |
141 | int unique_tid = 0; | |
142 | dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid); | |
143 | // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to, | |
144 | // unique_tid); | |
145 | rep->loop[i].thr_ctx = unique_tid; | |
146 | rep->loop[i].mtx_ctx0 = m0->ctx; | |
147 | rep->loop[i].mtx_ctx1 = m1->ctx; | |
148 | rep->loop[i].stk[0] = stk_to; | |
149 | rep->loop[i].stk[1] = stk_from; | |
150 | } | |
151 | } | |
152 | ||
153 | void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) { | |
154 | DDLogicalThread *lt = cb->lt; | |
155 | u32 stk = 0; | |
156 | if (flags.second_deadlock_stack) | |
157 | stk = cb->Unwind(); | |
158 | // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk); | |
159 | if (dd.onFirstLock(<->dd, m->id, stk)) | |
160 | return; | |
161 | if (dd.onLockFast(<->dd, m->id, stk)) | |
162 | return; | |
163 | ||
164 | SpinMutexLock lk(&mtx); | |
165 | MutexEnsureID(lt, m); | |
166 | if (wlock) // Only a recursive rlock may be held. | |
167 | CHECK(!dd.isHeld(<->dd, m->id)); | |
168 | if (!trylock) | |
169 | dd.addEdges(<->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid()); | |
170 | dd.onLockAfter(<->dd, m->id, stk); | |
171 | } | |
172 | ||
173 | void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) { | |
174 | // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id); | |
175 | dd.onUnlock(&cb->lt->dd, m->id); | |
176 | } | |
177 | ||
178 | void DD::MutexDestroy(DDCallback *cb, | |
179 | DDMutex *m) { | |
180 | if (!m->id) return; | |
181 | SpinMutexLock lk(&mtx); | |
182 | if (dd.nodeBelongsToCurrentEpoch(m->id)) | |
183 | dd.removeNode(m->id); | |
184 | m->id = 0; | |
185 | } | |
186 | ||
187 | DDReport *DD::GetReport(DDCallback *cb) { | |
188 | if (!cb->lt->report_pending) | |
92a42be0 | 189 | return nullptr; |
1a4d82fc JJ |
190 | cb->lt->report_pending = false; |
191 | return &cb->lt->rep; | |
192 | } | |
193 | ||
92a42be0 SL |
194 | } // namespace __sanitizer |
195 | #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1 |