]>
git.proxmox.com Git - rustc.git/blob - src/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cc
1 //===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Deadlock detector implementation based on NxN adjacency bit matrix.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_deadlock_detector_interface.h"
15 #include "sanitizer_deadlock_detector.h"
16 #include "sanitizer_allocator_internal.h"
17 #include "sanitizer_placement_new.h"
18 #include "sanitizer_mutex.h"
20 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
22 namespace __sanitizer
{
24 typedef TwoLevelBitVector
<> DDBV
; // DeadlockDetector's bit vector.
26 struct DDPhysicalThread
{
29 struct DDLogicalThread
{
31 DeadlockDetectorTLS
<DDBV
> dd
;
36 struct DD
: public DDetector
{
38 DeadlockDetector
<DDBV
> dd
;
41 explicit DD(const DDFlags
*flags
);
43 DDPhysicalThread
*CreatePhysicalThread() override
;
44 void DestroyPhysicalThread(DDPhysicalThread
*pt
) override
;
46 DDLogicalThread
*CreateLogicalThread(u64 ctx
) override
;
47 void DestroyLogicalThread(DDLogicalThread
*lt
) override
;
49 void MutexInit(DDCallback
*cb
, DDMutex
*m
) override
;
50 void MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) override
;
51 void MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
52 bool trylock
) override
;
53 void MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) override
;
54 void MutexDestroy(DDCallback
*cb
, DDMutex
*m
) override
;
56 DDReport
*GetReport(DDCallback
*cb
) override
;
58 void MutexEnsureID(DDLogicalThread
*lt
, DDMutex
*m
);
59 void ReportDeadlock(DDCallback
*cb
, DDMutex
*m
);
62 DDetector
*DDetector::Create(const DDFlags
*flags
) {
64 void *mem
= MmapOrDie(sizeof(DD
), "deadlock detector");
65 return new(mem
) DD(flags
);
68 DD::DD(const DDFlags
*flags
)
73 DDPhysicalThread
* DD::CreatePhysicalThread() {
77 void DD::DestroyPhysicalThread(DDPhysicalThread
*pt
) {
80 DDLogicalThread
* DD::CreateLogicalThread(u64 ctx
) {
81 DDLogicalThread
*lt
= (DDLogicalThread
*)InternalAlloc(sizeof(*lt
));
84 lt
->report_pending
= false;
88 void DD::DestroyLogicalThread(DDLogicalThread
*lt
) {
89 lt
->~DDLogicalThread();
93 void DD::MutexInit(DDCallback
*cb
, DDMutex
*m
) {
95 m
->stk
= cb
->Unwind();
98 void DD::MutexEnsureID(DDLogicalThread
*lt
, DDMutex
*m
) {
99 if (!dd
.nodeBelongsToCurrentEpoch(m
->id
))
100 m
->id
= dd
.newNode(reinterpret_cast<uptr
>(m
));
101 dd
.ensureCurrentEpoch(<
->dd
);
104 void DD::MutexBeforeLock(DDCallback
*cb
,
105 DDMutex
*m
, bool wlock
) {
106 DDLogicalThread
*lt
= cb
->lt
;
107 if (lt
->dd
.empty()) return; // This will be the first lock held by lt.
108 if (dd
.hasAllEdges(<
->dd
, m
->id
)) return; // We already have all edges.
109 SpinMutexLock
lk(&mtx
);
110 MutexEnsureID(lt
, m
);
111 if (dd
.isHeld(<
->dd
, m
->id
))
112 return; // FIXME: allow this only for recursive locks.
113 if (dd
.onLockBefore(<
->dd
, m
->id
)) {
114 // Actually add this edge now so that we have all the stack traces.
115 dd
.addEdges(<
->dd
, m
->id
, cb
->Unwind(), cb
->UniqueTid());
116 ReportDeadlock(cb
, m
);
120 void DD::ReportDeadlock(DDCallback
*cb
, DDMutex
*m
) {
121 DDLogicalThread
*lt
= cb
->lt
;
123 uptr len
= dd
.findPathToLock(<
->dd
, m
->id
, path
, ARRAY_SIZE(path
));
125 // A cycle of 20+ locks? Well, that's a bit odd...
126 Printf("WARNING: too long mutex cycle found\n");
129 CHECK_EQ(m
->id
, path
[0]);
130 lt
->report_pending
= true;
131 len
= Min
<uptr
>(len
, DDReport::kMaxLoopSize
);
132 DDReport
*rep
= <
->rep
;
134 for (uptr i
= 0; i
< len
; i
++) {
136 uptr to
= path
[(i
+ 1) % len
];
137 DDMutex
*m0
= (DDMutex
*)dd
.getData(from
);
138 DDMutex
*m1
= (DDMutex
*)dd
.getData(to
);
140 u32 stk_from
= -1U, stk_to
= -1U;
142 dd
.findEdge(from
, to
, &stk_from
, &stk_to
, &unique_tid
);
143 // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
145 rep
->loop
[i
].thr_ctx
= unique_tid
;
146 rep
->loop
[i
].mtx_ctx0
= m0
->ctx
;
147 rep
->loop
[i
].mtx_ctx1
= m1
->ctx
;
148 rep
->loop
[i
].stk
[0] = stk_to
;
149 rep
->loop
[i
].stk
[1] = stk_from
;
153 void DD::MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
, bool trylock
) {
154 DDLogicalThread
*lt
= cb
->lt
;
156 if (flags
.second_deadlock_stack
)
158 // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
159 if (dd
.onFirstLock(<
->dd
, m
->id
, stk
))
161 if (dd
.onLockFast(<
->dd
, m
->id
, stk
))
164 SpinMutexLock
lk(&mtx
);
165 MutexEnsureID(lt
, m
);
166 if (wlock
) // Only a recursive rlock may be held.
167 CHECK(!dd
.isHeld(<
->dd
, m
->id
));
169 dd
.addEdges(<
->dd
, m
->id
, stk
? stk
: cb
->Unwind(), cb
->UniqueTid());
170 dd
.onLockAfter(<
->dd
, m
->id
, stk
);
173 void DD::MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
174 // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
175 dd
.onUnlock(&cb
->lt
->dd
, m
->id
);
178 void DD::MutexDestroy(DDCallback
*cb
,
181 SpinMutexLock
lk(&mtx
);
182 if (dd
.nodeBelongsToCurrentEpoch(m
->id
))
183 dd
.removeNode(m
->id
);
187 DDReport
*DD::GetReport(DDCallback
*cb
) {
188 if (!cb
->lt
->report_pending
)
190 cb
->lt
->report_pending
= false;
194 } // namespace __sanitizer
195 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1