1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15 #include <sanitizer_common/sanitizer_stackdepot.h>
18 #include "tsan_flags.h"
19 #include "tsan_sync.h"
20 #include "tsan_report.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_platform.h"
26 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
);
28 struct Callback
: DDCallback
{
32 Callback(ThreadState
*thr
, uptr pc
)
35 DDCallback::pt
= thr
->proc()->dd_pt
;
36 DDCallback::lt
= thr
->dd_lt
;
39 u32
Unwind() override
{ return CurrentStackId(thr
, pc
); }
40 int UniqueTid() override
{ return thr
->unique_id
; }
43 void DDMutexInit(ThreadState
*thr
, uptr pc
, SyncVar
*s
) {
45 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
46 s
->dd
.ctx
= s
->GetId();
49 static void ReportMutexMisuse(ThreadState
*thr
, uptr pc
, ReportType typ
,
51 // In Go, these misuses are either impossible, or detected by std lib,
52 // or false positives (e.g. unlock in a different thread).
55 ThreadRegistryLock
l(ctx
->thread_registry
);
56 ScopedReport
rep(typ
);
58 VarSizeStackTrace trace
;
59 ObtainCurrentStack(thr
, pc
, &trace
);
60 rep
.AddStack(trace
, true);
61 rep
.AddLocation(addr
, 1);
62 OutputReport(thr
, rep
);
65 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
,
66 bool rw
, bool recursive
, bool linker_init
) {
67 DPrintf("#%d: MutexCreate %zx\n", thr
->tid
, addr
);
68 StatInc(thr
, StatMutexCreate
);
69 if (!linker_init
&& IsAppMem(addr
)) {
70 CHECK(!thr
->is_freeing
);
71 thr
->is_freeing
= true;
72 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
73 thr
->is_freeing
= false;
75 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
77 s
->is_recursive
= recursive
;
78 s
->is_linker_init
= linker_init
;
79 if (!SANITIZER_GO
&& s
->creation_stack_id
== 0)
80 s
->creation_stack_id
= CurrentStackId(thr
, pc
);
84 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
) {
85 DPrintf("#%d: MutexDestroy %zx\n", thr
->tid
, addr
);
86 StatInc(thr
, StatMutexDestroy
);
87 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, true);
90 if (s
->is_linker_init
) {
91 // Destroy is no-op for linker-initialized mutexes.
95 if (common_flags()->detect_deadlocks
) {
97 ctx
->dd
->MutexDestroy(&cb
, &s
->dd
);
98 ctx
->dd
->MutexInit(&cb
, &s
->dd
);
100 bool unlock_locked
= false;
101 if (flags()->report_destroy_locked
102 && s
->owner_tid
!= SyncVar::kInvalidTid
105 unlock_locked
= true;
107 u64 mid
= s
->GetId();
108 u32 last_lock
= s
->last_lock
;
110 s
->Reset(thr
->proc()); // must not reset it before the report is printed
113 ThreadRegistryLock
l(ctx
->thread_registry
);
114 ScopedReport
rep(ReportTypeMutexDestroyLocked
);
116 VarSizeStackTrace trace
;
117 ObtainCurrentStack(thr
, pc
, &trace
);
119 FastState
last(last_lock
);
120 RestoreStack(last
.tid(), last
.epoch(), &trace
, 0);
121 rep
.AddStack(trace
, true);
122 rep
.AddLocation(addr
, 1);
123 OutputReport(thr
, rep
);
125 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, true);
127 s
->Reset(thr
->proc());
131 thr
->mset
.Remove(mid
);
132 // Imitate a memory write to catch unlock-destroy races.
133 // Do this outside of sync mutex, because it can report a race which locks
135 if (IsAppMem(addr
)) {
136 CHECK(!thr
->is_freeing
);
137 thr
->is_freeing
= true;
138 MemoryWrite(thr
, pc
, addr
, kSizeLog1
);
139 thr
->is_freeing
= false;
141 // s will be destroyed and freed in MetaMap::FreeBlock.
144 void MutexLock(ThreadState
*thr
, uptr pc
, uptr addr
, int rec
, bool try_lock
) {
145 DPrintf("#%d: MutexLock %zx rec=%d\n", thr
->tid
, addr
, rec
);
148 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
149 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
150 thr
->fast_state
.IncrementEpoch();
151 TraceAddEvent(thr
, thr
->fast_state
, EventTypeLock
, s
->GetId());
152 bool report_double_lock
= false;
153 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
154 CHECK_EQ(s
->recursion
, 0);
155 s
->owner_tid
= thr
->tid
;
156 s
->last_lock
= thr
->fast_state
.raw();
157 } else if (s
->owner_tid
== thr
->tid
) {
158 CHECK_GT(s
->recursion
, 0);
159 } else if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
161 report_double_lock
= true;
163 if (s
->recursion
== 0) {
164 StatInc(thr
, StatMutexLock
);
165 AcquireImpl(thr
, pc
, &s
->clock
);
166 AcquireImpl(thr
, pc
, &s
->read_clock
);
167 } else if (!s
->is_recursive
) {
168 StatInc(thr
, StatMutexRecLock
);
171 thr
->mset
.Add(s
->GetId(), true, thr
->fast_state
.epoch());
172 if (common_flags()->detect_deadlocks
&& (s
->recursion
- rec
) == 0) {
173 Callback
cb(thr
, pc
);
175 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, true);
176 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, true, try_lock
);
178 u64 mid
= s
->GetId();
180 // Can't touch s after this point.
181 if (report_double_lock
)
182 ReportMutexMisuse(thr
, pc
, ReportTypeMutexDoubleLock
, addr
, mid
);
183 if (common_flags()->detect_deadlocks
) {
184 Callback
cb(thr
, pc
);
185 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
189 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, bool all
) {
190 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr
->tid
, addr
, all
);
192 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
193 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
194 thr
->fast_state
.IncrementEpoch();
195 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
197 bool report_bad_unlock
= false;
198 if (!SANITIZER_GO
&& (s
->recursion
== 0 || s
->owner_tid
!= thr
->tid
)) {
199 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
201 report_bad_unlock
= true;
204 rec
= all
? s
->recursion
: 1;
206 if (s
->recursion
== 0) {
207 StatInc(thr
, StatMutexUnlock
);
208 s
->owner_tid
= SyncVar::kInvalidTid
;
209 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
211 StatInc(thr
, StatMutexRecUnlock
);
214 thr
->mset
.Del(s
->GetId(), true);
215 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0 &&
216 !report_bad_unlock
) {
217 Callback
cb(thr
, pc
);
218 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, true);
220 u64 mid
= s
->GetId();
222 // Can't touch s after this point.
223 if (report_bad_unlock
)
224 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
225 if (common_flags()->detect_deadlocks
&& !report_bad_unlock
) {
226 Callback
cb(thr
, pc
);
227 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
232 void MutexReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, bool trylock
) {
233 DPrintf("#%d: MutexReadLock %zx\n", thr
->tid
, addr
);
234 StatInc(thr
, StatMutexReadLock
);
236 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
237 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, false);
238 thr
->fast_state
.IncrementEpoch();
239 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRLock
, s
->GetId());
240 bool report_bad_lock
= false;
241 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
242 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
244 report_bad_lock
= true;
247 AcquireImpl(thr
, pc
, &s
->clock
);
248 s
->last_lock
= thr
->fast_state
.raw();
249 thr
->mset
.Add(s
->GetId(), false, thr
->fast_state
.epoch());
250 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
251 Callback
cb(thr
, pc
);
253 ctx
->dd
->MutexBeforeLock(&cb
, &s
->dd
, false);
254 ctx
->dd
->MutexAfterLock(&cb
, &s
->dd
, false, trylock
);
256 u64 mid
= s
->GetId();
258 // Can't touch s after this point.
260 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadLock
, addr
, mid
);
261 if (common_flags()->detect_deadlocks
) {
262 Callback
cb(thr
, pc
);
263 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
267 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
268 DPrintf("#%d: MutexReadUnlock %zx\n", thr
->tid
, addr
);
269 StatInc(thr
, StatMutexReadUnlock
);
271 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
272 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
273 thr
->fast_state
.IncrementEpoch();
274 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
275 bool report_bad_unlock
= false;
276 if (s
->owner_tid
!= SyncVar::kInvalidTid
) {
277 if (flags()->report_mutex_bugs
&& !s
->is_broken
) {
279 report_bad_unlock
= true;
282 ReleaseImpl(thr
, pc
, &s
->read_clock
);
283 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
284 Callback
cb(thr
, pc
);
285 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, false);
287 u64 mid
= s
->GetId();
289 // Can't touch s after this point.
290 thr
->mset
.Del(mid
, false);
291 if (report_bad_unlock
)
292 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadReadUnlock
, addr
, mid
);
293 if (common_flags()->detect_deadlocks
) {
294 Callback
cb(thr
, pc
);
295 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
299 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
) {
300 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr
->tid
, addr
);
302 MemoryReadAtomic(thr
, pc
, addr
, kSizeLog1
);
303 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
305 bool report_bad_unlock
= false;
306 if (s
->owner_tid
== SyncVar::kInvalidTid
) {
307 // Seems to be read unlock.
309 StatInc(thr
, StatMutexReadUnlock
);
310 thr
->fast_state
.IncrementEpoch();
311 TraceAddEvent(thr
, thr
->fast_state
, EventTypeRUnlock
, s
->GetId());
312 ReleaseImpl(thr
, pc
, &s
->read_clock
);
313 } else if (s
->owner_tid
== thr
->tid
) {
314 // Seems to be write unlock.
315 thr
->fast_state
.IncrementEpoch();
316 TraceAddEvent(thr
, thr
->fast_state
, EventTypeUnlock
, s
->GetId());
317 CHECK_GT(s
->recursion
, 0);
319 if (s
->recursion
== 0) {
320 StatInc(thr
, StatMutexUnlock
);
321 s
->owner_tid
= SyncVar::kInvalidTid
;
322 ReleaseImpl(thr
, pc
, &s
->clock
);
324 StatInc(thr
, StatMutexRecUnlock
);
326 } else if (!s
->is_broken
) {
328 report_bad_unlock
= true;
330 thr
->mset
.Del(s
->GetId(), write
);
331 if (common_flags()->detect_deadlocks
&& s
->recursion
== 0) {
332 Callback
cb(thr
, pc
);
333 ctx
->dd
->MutexBeforeUnlock(&cb
, &s
->dd
, write
);
335 u64 mid
= s
->GetId();
337 // Can't touch s after this point.
338 if (report_bad_unlock
)
339 ReportMutexMisuse(thr
, pc
, ReportTypeMutexBadUnlock
, addr
, mid
);
340 if (common_flags()->detect_deadlocks
) {
341 Callback
cb(thr
, pc
);
342 ReportDeadlock(thr
, pc
, ctx
->dd
->GetReport(&cb
));
346 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
) {
347 DPrintf("#%d: MutexRepair %zx\n", thr
->tid
, addr
);
348 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
349 s
->owner_tid
= SyncVar::kInvalidTid
;
354 void MutexInvalidAccess(ThreadState
*thr
, uptr pc
, uptr addr
) {
355 DPrintf("#%d: MutexInvalidAccess %zx\n", thr
->tid
, addr
);
356 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
357 u64 mid
= s
->GetId();
359 ReportMutexMisuse(thr
, pc
, ReportTypeMutexInvalidAccess
, addr
, mid
);
362 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
) {
363 DPrintf("#%d: Acquire %zx\n", thr
->tid
, addr
);
364 if (thr
->ignore_sync
)
366 SyncVar
*s
= ctx
->metamap
.GetIfExistsAndLock(addr
, false);
369 AcquireImpl(thr
, pc
, &s
->clock
);
373 static void UpdateClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
374 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
375 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
376 if (tctx
->status
== ThreadStatusRunning
)
377 thr
->clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
379 thr
->clock
.set(tctx
->tid
, tctx
->epoch1
);
382 void AcquireGlobal(ThreadState
*thr
, uptr pc
) {
383 DPrintf("#%d: AcquireGlobal\n", thr
->tid
);
384 if (thr
->ignore_sync
)
386 ThreadRegistryLock
l(ctx
->thread_registry
);
387 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
388 UpdateClockCallback
, thr
);
391 void Release(ThreadState
*thr
, uptr pc
, uptr addr
) {
392 DPrintf("#%d: Release %zx\n", thr
->tid
, addr
);
393 if (thr
->ignore_sync
)
395 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
396 thr
->fast_state
.IncrementEpoch();
397 // Can't increment epoch w/o writing to the trace as well.
398 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
399 ReleaseImpl(thr
, pc
, &s
->clock
);
403 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
) {
404 DPrintf("#%d: ReleaseStore %zx\n", thr
->tid
, addr
);
405 if (thr
->ignore_sync
)
407 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, addr
, true);
408 thr
->fast_state
.IncrementEpoch();
409 // Can't increment epoch w/o writing to the trace as well.
410 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
411 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
416 static void UpdateSleepClockCallback(ThreadContextBase
*tctx_base
, void *arg
) {
417 ThreadState
*thr
= reinterpret_cast<ThreadState
*>(arg
);
418 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
419 if (tctx
->status
== ThreadStatusRunning
)
420 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->thr
->fast_state
.epoch());
422 thr
->last_sleep_clock
.set(tctx
->tid
, tctx
->epoch1
);
425 void AfterSleep(ThreadState
*thr
, uptr pc
) {
426 DPrintf("#%d: AfterSleep %zx\n", thr
->tid
);
427 if (thr
->ignore_sync
)
429 thr
->last_sleep_stack_id
= CurrentStackId(thr
, pc
);
430 ThreadRegistryLock
l(ctx
->thread_registry
);
431 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
432 UpdateSleepClockCallback
, thr
);
436 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
437 if (thr
->ignore_sync
)
439 thr
->clock
.set(thr
->fast_state
.epoch());
440 thr
->clock
.acquire(&thr
->proc()->clock_cache
, c
);
441 StatInc(thr
, StatSyncAcquire
);
444 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
445 if (thr
->ignore_sync
)
447 thr
->clock
.set(thr
->fast_state
.epoch());
448 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
449 thr
->clock
.release(&thr
->proc()->clock_cache
, c
);
450 StatInc(thr
, StatSyncRelease
);
453 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
454 if (thr
->ignore_sync
)
456 thr
->clock
.set(thr
->fast_state
.epoch());
457 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
458 thr
->clock
.ReleaseStore(&thr
->proc()->clock_cache
, c
);
459 StatInc(thr
, StatSyncRelease
);
462 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
) {
463 if (thr
->ignore_sync
)
465 thr
->clock
.set(thr
->fast_state
.epoch());
466 thr
->fast_synch_epoch
= thr
->fast_state
.epoch();
467 thr
->clock
.acq_rel(&thr
->proc()->clock_cache
, c
);
468 StatInc(thr
, StatSyncAcquire
);
469 StatInc(thr
, StatSyncRelease
);
472 void ReportDeadlock(ThreadState
*thr
, uptr pc
, DDReport
*r
) {
475 ThreadRegistryLock
l(ctx
->thread_registry
);
476 ScopedReport
rep(ReportTypeDeadlock
);
477 for (int i
= 0; i
< r
->n
; i
++) {
478 rep
.AddMutex(r
->loop
[i
].mtx_ctx0
);
479 rep
.AddUniqueTid((int)r
->loop
[i
].thr_ctx
);
480 rep
.AddThread((int)r
->loop
[i
].thr_ctx
);
482 uptr dummy_pc
= 0x42;
483 for (int i
= 0; i
< r
->n
; i
++) {
484 for (int j
= 0; j
< (flags()->second_deadlock_stack
? 2 : 1); j
++) {
485 u32 stk
= r
->loop
[i
].stk
[j
];
486 if (stk
&& stk
!= 0xffffffff) {
487 rep
.AddStack(StackDepotGet(stk
), true);
489 // Sometimes we fail to extract the stack trace (FIXME: investigate),
490 // but we should still produce some stack trace in the report.
491 rep
.AddStack(StackTrace(&dummy_pc
, 1), true);
495 OutputReport(thr
, rep
);
498 } // namespace __tsan