]> git.proxmox.com Git - rustc.git/blob - src/libcompiler_builtins/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc
898f32df182b6c681ff52d4c36d57b43c1a5bba1
[rustc.git] / src / libcompiler_builtins / compiler-rt / lib / tsan / rtl / tsan_interceptors.cc
1 //===-- tsan_interceptors.cc ----------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // FIXME: move as many interceptors as possible into
13 // sanitizer_common/sanitizer_common_interceptors.inc
14 //===----------------------------------------------------------------------===//
15
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_linux.h"
19 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_tls_get_addr.h"
23 #include "interception/interception.h"
24 #include "tsan_interceptors.h"
25 #include "tsan_interface.h"
26 #include "tsan_platform.h"
27 #include "tsan_suppressions.h"
28 #include "tsan_rtl.h"
29 #include "tsan_mman.h"
30 #include "tsan_fd.h"
31
32 #if SANITIZER_POSIX
33 #include "sanitizer_common/sanitizer_posix.h"
34 #endif
35
36 using namespace __tsan; // NOLINT
37
38 #if SANITIZER_FREEBSD || SANITIZER_MAC
39 #define __errno_location __error
40 #define stdout __stdoutp
41 #define stderr __stderrp
42 #endif
43
44 #if SANITIZER_ANDROID
45 #define __errno_location __errno
46 #define mallopt(a, b)
47 #endif
48
49 #if SANITIZER_LINUX || SANITIZER_FREEBSD
50 #define PTHREAD_CREATE_DETACHED 1
51 #elif SANITIZER_MAC
52 #define PTHREAD_CREATE_DETACHED 2
53 #endif
54
55
56 #ifdef __mips__
57 const int kSigCount = 129;
58 #else
59 const int kSigCount = 65;
60 #endif
61
62 struct my_siginfo_t {
63 // The size is determined by looking at sizeof of real siginfo_t on linux.
64 u64 opaque[128 / sizeof(u64)];
65 };
66
67 #ifdef __mips__
68 struct ucontext_t {
69 u64 opaque[768 / sizeof(u64) + 1];
70 };
71 #else
72 struct ucontext_t {
73 // The size is determined by looking at sizeof of real ucontext_t on linux.
74 u64 opaque[936 / sizeof(u64) + 1];
75 };
76 #endif
77
78 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
79 #define PTHREAD_ABI_BASE "GLIBC_2.3.2"
80 #elif defined(__aarch64__) || SANITIZER_PPC64V2
81 #define PTHREAD_ABI_BASE "GLIBC_2.17"
82 #endif
83
84 extern "C" int pthread_attr_init(void *attr);
85 extern "C" int pthread_attr_destroy(void *attr);
86 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
87 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
88 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
89 extern "C" int pthread_setspecific(unsigned key, const void *v);
90 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
91 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
92 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
93 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
94 extern "C" void *pthread_self();
95 extern "C" void _exit(int status);
96 extern "C" int *__errno_location();
97 extern "C" int fileno_unlocked(void *stream);
98 extern "C" int dirfd(void *dirp);
99 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
100 extern "C" int mallopt(int param, int value);
101 #endif
102 extern __sanitizer_FILE *stdout, *stderr;
103 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
104 const int PTHREAD_MUTEX_RECURSIVE = 1;
105 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
106 #else
107 const int PTHREAD_MUTEX_RECURSIVE = 2;
108 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
109 #endif
110 const int EINVAL = 22;
111 const int EBUSY = 16;
112 const int EOWNERDEAD = 130;
113 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
114 const int EPOLL_CTL_ADD = 1;
115 #endif
116 const int SIGILL = 4;
117 const int SIGABRT = 6;
118 const int SIGFPE = 8;
119 const int SIGSEGV = 11;
120 const int SIGPIPE = 13;
121 const int SIGTERM = 15;
122 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC
123 const int SIGBUS = 10;
124 const int SIGSYS = 12;
125 #else
126 const int SIGBUS = 7;
127 const int SIGSYS = 31;
128 #endif
129 void *const MAP_FAILED = (void*)-1;
130 #if !SANITIZER_MAC
131 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
132 #endif
133 const int MAP_FIXED = 0x10;
134 typedef long long_t; // NOLINT
135
136 // From /usr/include/unistd.h
137 # define F_ULOCK 0 /* Unlock a previously locked region. */
138 # define F_LOCK 1 /* Lock a region for exclusive use. */
139 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */
140 # define F_TEST 3 /* Test a region for other processes locks. */
141
142 #define errno (*__errno_location())
143
144 typedef void (*sighandler_t)(int sig);
145 typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
146
147 #if SANITIZER_ANDROID
148 struct sigaction_t {
149 u32 sa_flags;
150 union {
151 sighandler_t sa_handler;
152 sigactionhandler_t sa_sigaction;
153 };
154 __sanitizer_sigset_t sa_mask;
155 void (*sa_restorer)();
156 };
157 #else
158 struct sigaction_t {
159 #ifdef __mips__
160 u32 sa_flags;
161 #endif
162 union {
163 sighandler_t sa_handler;
164 sigactionhandler_t sa_sigaction;
165 };
166 #if SANITIZER_FREEBSD
167 int sa_flags;
168 __sanitizer_sigset_t sa_mask;
169 #elif SANITIZER_MAC
170 __sanitizer_sigset_t sa_mask;
171 int sa_flags;
172 #else
173 __sanitizer_sigset_t sa_mask;
174 #ifndef __mips__
175 int sa_flags;
176 #endif
177 void (*sa_restorer)();
178 #endif
179 };
180 #endif
181
182 const sighandler_t SIG_DFL = (sighandler_t)0;
183 const sighandler_t SIG_IGN = (sighandler_t)1;
184 const sighandler_t SIG_ERR = (sighandler_t)-1;
185 #if SANITIZER_FREEBSD || SANITIZER_MAC
186 const int SA_SIGINFO = 0x40;
187 const int SIG_SETMASK = 3;
188 #elif defined(__mips__)
189 const int SA_SIGINFO = 8;
190 const int SIG_SETMASK = 3;
191 #else
192 const int SA_SIGINFO = 4;
193 const int SIG_SETMASK = 2;
194 #endif
195
196 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
197 (!cur_thread()->is_inited)
198
199 static sigaction_t sigactions[kSigCount];
200
201 namespace __tsan {
202 struct SignalDesc {
203 bool armed;
204 bool sigaction;
205 my_siginfo_t siginfo;
206 ucontext_t ctx;
207 };
208
209 struct ThreadSignalContext {
210 int int_signal_send;
211 atomic_uintptr_t in_blocking_func;
212 atomic_uintptr_t have_pending_signals;
213 SignalDesc pending_signals[kSigCount];
214 // emptyset and oldset are too big for stack.
215 __sanitizer_sigset_t emptyset;
216 __sanitizer_sigset_t oldset;
217 };
218
219 // The object is 64-byte aligned, because we want hot data to be located in
220 // a single cache line if possible (it's accessed in every interceptor).
221 static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)];
222 static LibIgnore *libignore() {
223 return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]);
224 }
225
226 void InitializeLibIgnore() {
227 const SuppressionContext &supp = *Suppressions();
228 const uptr n = supp.SuppressionCount();
229 for (uptr i = 0; i < n; i++) {
230 const Suppression *s = supp.SuppressionAt(i);
231 if (0 == internal_strcmp(s->type, kSuppressionLib))
232 libignore()->AddIgnoredLibrary(s->templ);
233 }
234 if (flags()->ignore_noninstrumented_modules)
235 libignore()->IgnoreNoninstrumentedModules(true);
236 libignore()->OnLibraryLoaded(0);
237 }
238
239 } // namespace __tsan
240
241 static ThreadSignalContext *SigCtx(ThreadState *thr) {
242 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
243 if (ctx == 0 && !thr->is_dead) {
244 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
245 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
246 thr->signal_ctx = ctx;
247 }
248 return ctx;
249 }
250
251 #if !SANITIZER_MAC
252 static unsigned g_thread_finalize_key;
253 #endif
254
255 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
256 uptr pc)
257 : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
258 Initialize(thr);
259 if (!thr_->is_inited) return;
260 if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
261 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
262 ignoring_ =
263 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
264 libignore()->IsIgnored(pc, &in_ignored_lib_));
265 EnableIgnores();
266 }
267
268 ScopedInterceptor::~ScopedInterceptor() {
269 if (!thr_->is_inited) return;
270 DisableIgnores();
271 if (!thr_->ignore_interceptors) {
272 ProcessPendingSignals(thr_);
273 FuncExit(thr_);
274 CheckNoLocks(thr_);
275 }
276 }
277
278 void ScopedInterceptor::EnableIgnores() {
279 if (ignoring_) {
280 ThreadIgnoreBegin(thr_, pc_);
281 if (in_ignored_lib_) {
282 DCHECK(!thr_->in_ignored_lib);
283 thr_->in_ignored_lib = true;
284 }
285 }
286 }
287
288 void ScopedInterceptor::DisableIgnores() {
289 if (ignoring_) {
290 ThreadIgnoreEnd(thr_, pc_);
291 if (in_ignored_lib_) {
292 DCHECK(thr_->in_ignored_lib);
293 thr_->in_ignored_lib = false;
294 }
295 }
296 }
297
298 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
299 #if SANITIZER_FREEBSD
300 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
301 #else
302 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
303 #endif
304
305 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \
306 MemoryAccessRange((thr), (pc), (uptr)(s), \
307 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
308
309 #define READ_STRING(thr, pc, s, n) \
310 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
311
312 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
313
314 struct BlockingCall {
315 explicit BlockingCall(ThreadState *thr)
316 : thr(thr)
317 , ctx(SigCtx(thr)) {
318 for (;;) {
319 atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
320 if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
321 break;
322 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
323 ProcessPendingSignals(thr);
324 }
325 // When we are in a "blocking call", we process signals asynchronously
326 // (right when they arrive). In this context we do not expect to be
327 // executing any user/runtime code. The known interceptor sequence when
328 // this is not true is: pthread_join -> munmap(stack). It's fine
329 // to ignore munmap in this case -- we handle stack shadow separately.
330 thr->ignore_interceptors++;
331 }
332
333 ~BlockingCall() {
334 thr->ignore_interceptors--;
335 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
336 }
337
338 ThreadState *thr;
339 ThreadSignalContext *ctx;
340 };
341
342 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
343 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
344 unsigned res = BLOCK_REAL(sleep)(sec);
345 AfterSleep(thr, pc);
346 return res;
347 }
348
349 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
350 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
351 int res = BLOCK_REAL(usleep)(usec);
352 AfterSleep(thr, pc);
353 return res;
354 }
355
356 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
357 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
358 int res = BLOCK_REAL(nanosleep)(req, rem);
359 AfterSleep(thr, pc);
360 return res;
361 }
362
363 // The sole reason tsan wraps atexit callbacks is to establish synchronization
364 // between callback setup and callback execution.
365 struct AtExitCtx {
366 void (*f)();
367 void *arg;
368 };
369
370 static void at_exit_wrapper(void *arg) {
371 ThreadState *thr = cur_thread();
372 uptr pc = 0;
373 Acquire(thr, pc, (uptr)arg);
374 AtExitCtx *ctx = (AtExitCtx*)arg;
375 ((void(*)(void *arg))ctx->f)(ctx->arg);
376 InternalFree(ctx);
377 }
378
379 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
380 void *arg, void *dso);
381
382 #if !SANITIZER_ANDROID
383 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
384 if (cur_thread()->in_symbolizer)
385 return 0;
386 // We want to setup the atexit callback even if we are in ignored lib
387 // or after fork.
388 SCOPED_INTERCEPTOR_RAW(atexit, f);
389 return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
390 }
391 #endif
392
393 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
394 if (cur_thread()->in_symbolizer)
395 return 0;
396 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
397 return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
398 }
399
400 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
401 void *arg, void *dso) {
402 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
403 ctx->f = f;
404 ctx->arg = arg;
405 Release(thr, pc, (uptr)ctx);
406 // Memory allocation in __cxa_atexit will race with free during exit,
407 // because we do not see synchronization around atexit callback list.
408 ThreadIgnoreBegin(thr, pc);
409 int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso);
410 ThreadIgnoreEnd(thr, pc);
411 return res;
412 }
413
414 #if !SANITIZER_MAC
415 static void on_exit_wrapper(int status, void *arg) {
416 ThreadState *thr = cur_thread();
417 uptr pc = 0;
418 Acquire(thr, pc, (uptr)arg);
419 AtExitCtx *ctx = (AtExitCtx*)arg;
420 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
421 InternalFree(ctx);
422 }
423
424 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
425 if (cur_thread()->in_symbolizer)
426 return 0;
427 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
428 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
429 ctx->f = (void(*)())f;
430 ctx->arg = arg;
431 Release(thr, pc, (uptr)ctx);
432 // Memory allocation in __cxa_atexit will race with free during exit,
433 // because we do not see synchronization around atexit callback list.
434 ThreadIgnoreBegin(thr, pc);
435 int res = REAL(on_exit)(on_exit_wrapper, ctx);
436 ThreadIgnoreEnd(thr, pc);
437 return res;
438 }
439 #endif
440
441 // Cleanup old bufs.
442 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
443 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
444 JmpBuf *buf = &thr->jmp_bufs[i];
445 if (buf->sp <= sp) {
446 uptr sz = thr->jmp_bufs.Size();
447 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
448 thr->jmp_bufs.PopBack();
449 i--;
450 }
451 }
452 }
453
454 static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
455 if (!thr->is_inited) // called from libc guts during bootstrap
456 return;
457 // Cleanup old bufs.
458 JmpBufGarbageCollect(thr, sp);
459 // Remember the buf.
460 JmpBuf *buf = thr->jmp_bufs.PushBack();
461 buf->sp = sp;
462 buf->mangled_sp = mangled_sp;
463 buf->shadow_stack_pos = thr->shadow_stack_pos;
464 ThreadSignalContext *sctx = SigCtx(thr);
465 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
466 buf->in_blocking_func = sctx ?
467 atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
468 false;
469 buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
470 memory_order_relaxed);
471 }
472
473 static void LongJmp(ThreadState *thr, uptr *env) {
474 #ifdef __powerpc__
475 uptr mangled_sp = env[0];
476 #elif SANITIZER_FREEBSD || SANITIZER_MAC
477 uptr mangled_sp = env[2];
478 #elif defined(SANITIZER_LINUX)
479 # ifdef __aarch64__
480 uptr mangled_sp = env[13];
481 # elif defined(__mips64)
482 uptr mangled_sp = env[1];
483 # else
484 uptr mangled_sp = env[6];
485 # endif
486 #endif
487 // Find the saved buf by mangled_sp.
488 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
489 JmpBuf *buf = &thr->jmp_bufs[i];
490 if (buf->mangled_sp == mangled_sp) {
491 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
492 // Unwind the stack.
493 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
494 FuncExit(thr);
495 ThreadSignalContext *sctx = SigCtx(thr);
496 if (sctx) {
497 sctx->int_signal_send = buf->int_signal_send;
498 atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
499 memory_order_relaxed);
500 }
501 atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
502 memory_order_relaxed);
503 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
504 return;
505 }
506 }
507 Printf("ThreadSanitizer: can't find longjmp buf\n");
508 CHECK(0);
509 }
510
511 // FIXME: put everything below into a common extern "C" block?
512 extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
513 SetJmp(cur_thread(), sp, mangled_sp);
514 }
515
516 #if SANITIZER_MAC
517 TSAN_INTERCEPTOR(int, setjmp, void *env);
518 TSAN_INTERCEPTOR(int, _setjmp, void *env);
519 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
520 #else // SANITIZER_MAC
521 // Not called. Merely to satisfy TSAN_INTERCEPT().
522 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
523 int __interceptor_setjmp(void *env);
524 extern "C" int __interceptor_setjmp(void *env) {
525 CHECK(0);
526 return 0;
527 }
528
529 // FIXME: any reason to have a separate declaration?
530 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
531 int __interceptor__setjmp(void *env);
532 extern "C" int __interceptor__setjmp(void *env) {
533 CHECK(0);
534 return 0;
535 }
536
537 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
538 int __interceptor_sigsetjmp(void *env);
539 extern "C" int __interceptor_sigsetjmp(void *env) {
540 CHECK(0);
541 return 0;
542 }
543
544 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
545 int __interceptor___sigsetjmp(void *env);
546 extern "C" int __interceptor___sigsetjmp(void *env) {
547 CHECK(0);
548 return 0;
549 }
550
551 extern "C" int setjmp(void *env);
552 extern "C" int _setjmp(void *env);
553 extern "C" int sigsetjmp(void *env);
554 extern "C" int __sigsetjmp(void *env);
555 DEFINE_REAL(int, setjmp, void *env)
556 DEFINE_REAL(int, _setjmp, void *env)
557 DEFINE_REAL(int, sigsetjmp, void *env)
558 DEFINE_REAL(int, __sigsetjmp, void *env)
559 #endif // SANITIZER_MAC
560
561 TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
562 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
563 // bad things will happen. We will jump over ScopedInterceptor dtor and can
564 // leave thr->in_ignored_lib set.
565 {
566 SCOPED_INTERCEPTOR_RAW(longjmp, env, val);
567 }
568 LongJmp(cur_thread(), env);
569 REAL(longjmp)(env, val);
570 }
571
572 TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
573 {
574 SCOPED_INTERCEPTOR_RAW(siglongjmp, env, val);
575 }
576 LongJmp(cur_thread(), env);
577 REAL(siglongjmp)(env, val);
578 }
579
580 #if !SANITIZER_MAC
581 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
582 if (cur_thread()->in_symbolizer)
583 return InternalAlloc(size);
584 void *p = 0;
585 {
586 SCOPED_INTERCEPTOR_RAW(malloc, size);
587 p = user_alloc(thr, pc, size);
588 }
589 invoke_malloc_hook(p, size);
590 return p;
591 }
592
593 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
594 SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
595 return user_alloc(thr, pc, sz, align);
596 }
597
598 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
599 if (cur_thread()->in_symbolizer)
600 return InternalCalloc(size, n);
601 void *p = 0;
602 {
603 SCOPED_INTERCEPTOR_RAW(calloc, size, n);
604 p = user_calloc(thr, pc, size, n);
605 }
606 invoke_malloc_hook(p, n * size);
607 return p;
608 }
609
610 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
611 if (cur_thread()->in_symbolizer)
612 return InternalRealloc(p, size);
613 if (p)
614 invoke_free_hook(p);
615 {
616 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
617 p = user_realloc(thr, pc, p, size);
618 }
619 invoke_malloc_hook(p, size);
620 return p;
621 }
622
623 TSAN_INTERCEPTOR(void, free, void *p) {
624 if (p == 0)
625 return;
626 if (cur_thread()->in_symbolizer)
627 return InternalFree(p);
628 invoke_free_hook(p);
629 SCOPED_INTERCEPTOR_RAW(free, p);
630 user_free(thr, pc, p);
631 }
632
633 TSAN_INTERCEPTOR(void, cfree, void *p) {
634 if (p == 0)
635 return;
636 if (cur_thread()->in_symbolizer)
637 return InternalFree(p);
638 invoke_free_hook(p);
639 SCOPED_INTERCEPTOR_RAW(cfree, p);
640 user_free(thr, pc, p);
641 }
642
643 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
644 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
645 return user_alloc_usable_size(p);
646 }
647 #endif
648
649 TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
650 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
651 uptr srclen = internal_strlen(src);
652 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
653 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
654 return REAL(strcpy)(dst, src); // NOLINT
655 }
656
657 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
658 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
659 uptr srclen = internal_strnlen(src, n);
660 MemoryAccessRange(thr, pc, (uptr)dst, n, true);
661 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
662 return REAL(strncpy)(dst, src, n);
663 }
664
665 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
666 SCOPED_TSAN_INTERCEPTOR(strdup, str);
667 // strdup will call malloc, so no instrumentation is required here.
668 return REAL(strdup)(str);
669 }
670
671 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
672 if (*addr) {
673 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
674 if (flags & MAP_FIXED) {
675 errno = EINVAL;
676 return false;
677 } else {
678 *addr = 0;
679 }
680 }
681 }
682 return true;
683 }
684
685 TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
686 int fd, OFF_T off) {
687 SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off);
688 if (!fix_mmap_addr(&addr, sz, flags))
689 return MAP_FAILED;
690 void *res = REAL(mmap)(addr, sz, prot, flags, fd, off);
691 if (res != MAP_FAILED) {
692 if (fd > 0)
693 FdAccess(thr, pc, fd);
694
695 if (thr->ignore_reads_and_writes == 0)
696 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
697 else
698 MemoryResetRange(thr, pc, (uptr)res, sz);
699 }
700 return res;
701 }
702
703 #if SANITIZER_LINUX
704 TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
705 int fd, OFF64_T off) {
706 SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
707 if (!fix_mmap_addr(&addr, sz, flags))
708 return MAP_FAILED;
709 void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off);
710 if (res != MAP_FAILED) {
711 if (fd > 0)
712 FdAccess(thr, pc, fd);
713
714 if (thr->ignore_reads_and_writes == 0)
715 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
716 else
717 MemoryResetRange(thr, pc, (uptr)res, sz);
718 }
719 return res;
720 }
721 #define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64)
722 #else
723 #define TSAN_MAYBE_INTERCEPT_MMAP64
724 #endif
725
726 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
727 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
728 if (sz != 0) {
729 // If sz == 0, munmap will return EINVAL and don't unmap any memory.
730 DontNeedShadowFor((uptr)addr, sz);
731 ScopedGlobalProcessor sgp;
732 ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz);
733 }
734 int res = REAL(munmap)(addr, sz);
735 return res;
736 }
737
738 #if SANITIZER_LINUX
739 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
740 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
741 return user_alloc(thr, pc, sz, align);
742 }
743 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
744 #else
745 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
746 #endif
747
748 #if !SANITIZER_MAC
749 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
750 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
751 return user_alloc(thr, pc, sz, align);
752 }
753
754 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
755 SCOPED_INTERCEPTOR_RAW(valloc, sz);
756 return user_alloc(thr, pc, sz, GetPageSizeCached());
757 }
758 #endif
759
760 #if SANITIZER_LINUX
761 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
762 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
763 sz = RoundUp(sz, GetPageSizeCached());
764 return user_alloc(thr, pc, sz, GetPageSizeCached());
765 }
766 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
767 #else
768 #define TSAN_MAYBE_INTERCEPT_PVALLOC
769 #endif
770
771 #if !SANITIZER_MAC
772 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
773 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
774 *memptr = user_alloc(thr, pc, sz, align);
775 return 0;
776 }
777 #endif
778
779 // __cxa_guard_acquire and friends need to be intercepted in a special way -
780 // regular interceptors will break statically-linked libstdc++. Linux
781 // interceptors are especially defined as weak functions (so that they don't
782 // cause link errors when user defines them as well). So they silently
783 // auto-disable themselves when such symbol is already present in the binary. If
784 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
785 // will silently replace our interceptor. That's why on Linux we simply export
786 // these interceptors with INTERFACE_ATTRIBUTE.
787 // On OS X, we don't support statically linking, so we just use a regular
788 // interceptor.
789 #if SANITIZER_MAC
790 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
791 #else
792 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
793 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
794 #endif
795
796 // Used in thread-safe function static initialization.
797 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
798 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
799 for (;;) {
800 u32 cmp = atomic_load(g, memory_order_acquire);
801 if (cmp == 0) {
802 if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
803 return 1;
804 } else if (cmp == 1) {
805 Acquire(thr, pc, (uptr)g);
806 return 0;
807 } else {
808 internal_sched_yield();
809 }
810 }
811 }
812
813 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
814 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
815 Release(thr, pc, (uptr)g);
816 atomic_store(g, 1, memory_order_release);
817 }
818
819 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
820 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
821 atomic_store(g, 0, memory_order_relaxed);
822 }
823
824 namespace __tsan {
825 void DestroyThreadState() {
826 ThreadState *thr = cur_thread();
827 Processor *proc = thr->proc();
828 ThreadFinish(thr);
829 ProcUnwire(proc, thr);
830 ProcDestroy(proc);
831 ThreadSignalContext *sctx = thr->signal_ctx;
832 if (sctx) {
833 thr->signal_ctx = 0;
834 UnmapOrDie(sctx, sizeof(*sctx));
835 }
836 DTLS_Destroy();
837 cur_thread_finalize();
838 }
839 } // namespace __tsan
840
841 #if !SANITIZER_MAC
842 static void thread_finalize(void *v) {
843 uptr iter = (uptr)v;
844 if (iter > 1) {
845 if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
846 Printf("ThreadSanitizer: failed to set thread key\n");
847 Die();
848 }
849 return;
850 }
851 DestroyThreadState();
852 }
853 #endif
854
855
856 struct ThreadParam {
857 void* (*callback)(void *arg);
858 void *param;
859 atomic_uintptr_t tid;
860 };
861
862 extern "C" void *__tsan_thread_start_func(void *arg) {
863 ThreadParam *p = (ThreadParam*)arg;
864 void* (*callback)(void *arg) = p->callback;
865 void *param = p->param;
866 int tid = 0;
867 {
868 ThreadState *thr = cur_thread();
869 // Thread-local state is not initialized yet.
870 ScopedIgnoreInterceptors ignore;
871 #if !SANITIZER_MAC
872 ThreadIgnoreBegin(thr, 0);
873 if (pthread_setspecific(g_thread_finalize_key,
874 (void *)GetPthreadDestructorIterations())) {
875 Printf("ThreadSanitizer: failed to set thread key\n");
876 Die();
877 }
878 ThreadIgnoreEnd(thr, 0);
879 #endif
880 while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
881 internal_sched_yield();
882 Processor *proc = ProcCreate();
883 ProcWire(proc, thr);
884 ThreadStart(thr, tid, GetTid());
885 atomic_store(&p->tid, 0, memory_order_release);
886 }
887 void *res = callback(param);
888 // Prevent the callback from being tail called,
889 // it mixes up stack traces.
890 volatile int foo = 42;
891 foo++;
892 return res;
893 }
894
895 TSAN_INTERCEPTOR(int, pthread_create,
896 void *th, void *attr, void *(*callback)(void*), void * param) {
897 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
898 if (ctx->after_multithreaded_fork) {
899 if (flags()->die_after_fork) {
900 Report("ThreadSanitizer: starting new threads after multi-threaded "
901 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
902 Die();
903 } else {
904 VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
905 "fork is not supported (pid %d). Continuing because of "
906 "die_after_fork=0, but you are on your own\n", internal_getpid());
907 }
908 }
909 __sanitizer_pthread_attr_t myattr;
910 if (attr == 0) {
911 pthread_attr_init(&myattr);
912 attr = &myattr;
913 }
914 int detached = 0;
915 REAL(pthread_attr_getdetachstate)(attr, &detached);
916 AdjustStackSize(attr);
917
918 ThreadParam p;
919 p.callback = callback;
920 p.param = param;
921 atomic_store(&p.tid, 0, memory_order_relaxed);
922 int res = -1;
923 {
924 // Otherwise we see false positives in pthread stack manipulation.
925 ScopedIgnoreInterceptors ignore;
926 ThreadIgnoreBegin(thr, pc);
927 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
928 ThreadIgnoreEnd(thr, pc);
929 }
930 if (res == 0) {
931 int tid = ThreadCreate(thr, pc, *(uptr*)th,
932 detached == PTHREAD_CREATE_DETACHED);
933 CHECK_NE(tid, 0);
934 // Synchronization on p.tid serves two purposes:
935 // 1. ThreadCreate must finish before the new thread starts.
936 // Otherwise the new thread can call pthread_detach, but the pthread_t
937 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
938 // 2. ThreadStart must finish before this thread continues.
939 // Otherwise, this thread can call pthread_detach and reset thr->sync
940 // before the new thread got a chance to acquire from it in ThreadStart.
941 atomic_store(&p.tid, tid, memory_order_release);
942 while (atomic_load(&p.tid, memory_order_acquire) != 0)
943 internal_sched_yield();
944 }
945 if (attr == &myattr)
946 pthread_attr_destroy(&myattr);
947 return res;
948 }
949
950 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
951 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
952 int tid = ThreadTid(thr, pc, (uptr)th);
953 ThreadIgnoreBegin(thr, pc);
954 int res = BLOCK_REAL(pthread_join)(th, ret);
955 ThreadIgnoreEnd(thr, pc);
956 if (res == 0) {
957 ThreadJoin(thr, pc, tid);
958 }
959 return res;
960 }
961
962 DEFINE_REAL_PTHREAD_FUNCTIONS
963
964 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
965 SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
966 int tid = ThreadTid(thr, pc, (uptr)th);
967 int res = REAL(pthread_detach)(th);
968 if (res == 0) {
969 ThreadDetach(thr, pc, tid);
970 }
971 return res;
972 }
973
974 // Problem:
975 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
976 // pthread_cond_t has different size in the different versions.
977 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
978 // after pthread_cond_t (old cond is smaller).
979 // If we call old REAL functions for new pthread_cond_t, we will lose some
980 // functionality (e.g. old functions do not support waiting against
981 // CLOCK_REALTIME).
982 // Proper handling would require to have 2 versions of interceptors as well.
983 // But this is messy, in particular requires linker scripts when sanitizer
984 // runtime is linked into a shared library.
985 // Instead we assume we don't have dynamic libraries built against old
986 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
987 // that allows to work with old libraries (but this mode does not support
988 // some features, e.g. pthread_condattr_getpshared).
989 static void *init_cond(void *c, bool force = false) {
990 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
991 // So we allocate additional memory on the side large enough to hold
992 // any pthread_cond_t object. Always call new REAL functions, but pass
993 // the aux object to them.
994 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
995 // first word of pthread_cond_t to zero.
996 // It's all relevant only for linux.
997 if (!common_flags()->legacy_pthread_cond)
998 return c;
999 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1000 uptr cond = atomic_load(p, memory_order_acquire);
1001 if (!force && cond != 0)
1002 return (void*)cond;
1003 void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1004 internal_memset(newcond, 0, pthread_cond_t_sz);
1005 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1006 memory_order_acq_rel))
1007 return newcond;
1008 WRAP(free)(newcond);
1009 return (void*)cond;
1010 }
1011
1012 struct CondMutexUnlockCtx {
1013 ScopedInterceptor *si;
1014 ThreadState *thr;
1015 uptr pc;
1016 void *m;
1017 };
1018
1019 static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
1020 // pthread_cond_wait interceptor has enabled async signal delivery
1021 // (see BlockingCall below). Disable async signals since we are running
1022 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1023 // since the thread is cancelled, so we have to manually execute them
1024 // (the thread still can run some user code due to pthread_cleanup_push).
1025 ThreadSignalContext *ctx = SigCtx(arg->thr);
1026 CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
1027 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
1028 MutexLock(arg->thr, arg->pc, (uptr)arg->m);
1029 // Undo BlockingCall ctor effects.
1030 arg->thr->ignore_interceptors--;
1031 arg->si->~ScopedInterceptor();
1032 }
1033
1034 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1035 void *cond = init_cond(c, true);
1036 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1037 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1038 return REAL(pthread_cond_init)(cond, a);
1039 }
1040
1041 static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
1042 int (*fn)(void *c, void *m, void *abstime), void *c,
1043 void *m, void *t) {
1044 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1045 MutexUnlock(thr, pc, (uptr)m);
1046 CondMutexUnlockCtx arg = {si, thr, pc, m};
1047 int res = 0;
1048 // This ensures that we handle mutex lock even in case of pthread_cancel.
1049 // See test/tsan/cond_cancel.cc.
1050 {
1051 // Enable signal delivery while the thread is blocked.
1052 BlockingCall bc(thr);
1053 res = call_pthread_cancel_with_cleanup(
1054 fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
1055 }
1056 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1057 MutexLock(thr, pc, (uptr)m);
1058 return res;
1059 }
1060
1061 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1062 void *cond = init_cond(c);
1063 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1064 return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
1065 pthread_cond_wait),
1066 cond, m, 0);
1067 }
1068
1069 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1070 void *cond = init_cond(c);
1071 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1072 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
1073 abstime);
1074 }
1075
1076 #if SANITIZER_MAC
1077 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1078 void *reltime) {
1079 void *cond = init_cond(c);
1080 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1081 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
1082 m, reltime);
1083 }
1084 #endif
1085
1086 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1087 void *cond = init_cond(c);
1088 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1089 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1090 return REAL(pthread_cond_signal)(cond);
1091 }
1092
1093 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1094 void *cond = init_cond(c);
1095 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1096 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1097 return REAL(pthread_cond_broadcast)(cond);
1098 }
1099
1100 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1101 void *cond = init_cond(c);
1102 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1103 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1104 int res = REAL(pthread_cond_destroy)(cond);
1105 if (common_flags()->legacy_pthread_cond) {
1106 // Free our aux cond and zero the pointer to not leave dangling pointers.
1107 WRAP(free)(cond);
1108 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1109 }
1110 return res;
1111 }
1112
1113 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1114 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1115 int res = REAL(pthread_mutex_init)(m, a);
1116 if (res == 0) {
1117 bool recursive = false;
1118 if (a) {
1119 int type = 0;
1120 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1121 recursive = (type == PTHREAD_MUTEX_RECURSIVE
1122 || type == PTHREAD_MUTEX_RECURSIVE_NP);
1123 }
1124 MutexCreate(thr, pc, (uptr)m, false, recursive, false);
1125 }
1126 return res;
1127 }
1128
1129 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1130 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1131 int res = REAL(pthread_mutex_destroy)(m);
1132 if (res == 0 || res == EBUSY) {
1133 MutexDestroy(thr, pc, (uptr)m);
1134 }
1135 return res;
1136 }
1137
1138 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1139 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1140 int res = REAL(pthread_mutex_trylock)(m);
1141 if (res == EOWNERDEAD)
1142 MutexRepair(thr, pc, (uptr)m);
1143 if (res == 0 || res == EOWNERDEAD)
1144 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
1145 return res;
1146 }
1147
1148 #if !SANITIZER_MAC
1149 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1150 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1151 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1152 if (res == 0) {
1153 MutexLock(thr, pc, (uptr)m);
1154 }
1155 return res;
1156 }
1157 #endif
1158
1159 #if !SANITIZER_MAC
1160 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1161 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1162 int res = REAL(pthread_spin_init)(m, pshared);
1163 if (res == 0) {
1164 MutexCreate(thr, pc, (uptr)m, false, false, false);
1165 }
1166 return res;
1167 }
1168
1169 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1170 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1171 int res = REAL(pthread_spin_destroy)(m);
1172 if (res == 0) {
1173 MutexDestroy(thr, pc, (uptr)m);
1174 }
1175 return res;
1176 }
1177
1178 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1179 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1180 int res = REAL(pthread_spin_lock)(m);
1181 if (res == 0) {
1182 MutexLock(thr, pc, (uptr)m);
1183 }
1184 return res;
1185 }
1186
1187 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1188 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1189 int res = REAL(pthread_spin_trylock)(m);
1190 if (res == 0) {
1191 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
1192 }
1193 return res;
1194 }
1195
1196 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1197 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1198 MutexUnlock(thr, pc, (uptr)m);
1199 int res = REAL(pthread_spin_unlock)(m);
1200 return res;
1201 }
1202 #endif
1203
1204 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1205 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1206 int res = REAL(pthread_rwlock_init)(m, a);
1207 if (res == 0) {
1208 MutexCreate(thr, pc, (uptr)m, true, false, false);
1209 }
1210 return res;
1211 }
1212
1213 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1214 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1215 int res = REAL(pthread_rwlock_destroy)(m);
1216 if (res == 0) {
1217 MutexDestroy(thr, pc, (uptr)m);
1218 }
1219 return res;
1220 }
1221
1222 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1223 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1224 int res = REAL(pthread_rwlock_rdlock)(m);
1225 if (res == 0) {
1226 MutexReadLock(thr, pc, (uptr)m);
1227 }
1228 return res;
1229 }
1230
1231 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1232 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1233 int res = REAL(pthread_rwlock_tryrdlock)(m);
1234 if (res == 0) {
1235 MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
1236 }
1237 return res;
1238 }
1239
1240 #if !SANITIZER_MAC
1241 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1242 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1243 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1244 if (res == 0) {
1245 MutexReadLock(thr, pc, (uptr)m);
1246 }
1247 return res;
1248 }
1249 #endif
1250
1251 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1252 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1253 int res = REAL(pthread_rwlock_wrlock)(m);
1254 if (res == 0) {
1255 MutexLock(thr, pc, (uptr)m);
1256 }
1257 return res;
1258 }
1259
1260 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1261 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1262 int res = REAL(pthread_rwlock_trywrlock)(m);
1263 if (res == 0) {
1264 MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
1265 }
1266 return res;
1267 }
1268
1269 #if !SANITIZER_MAC
1270 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1271 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1272 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1273 if (res == 0) {
1274 MutexLock(thr, pc, (uptr)m);
1275 }
1276 return res;
1277 }
1278 #endif
1279
1280 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1281 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1282 MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1283 int res = REAL(pthread_rwlock_unlock)(m);
1284 return res;
1285 }
1286
1287 #if !SANITIZER_MAC
1288 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1289 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1290 MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1291 int res = REAL(pthread_barrier_init)(b, a, count);
1292 return res;
1293 }
1294
1295 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1296 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1297 MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1298 int res = REAL(pthread_barrier_destroy)(b);
1299 return res;
1300 }
1301
1302 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1303 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1304 Release(thr, pc, (uptr)b);
1305 MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1306 int res = REAL(pthread_barrier_wait)(b);
1307 MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1308 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1309 Acquire(thr, pc, (uptr)b);
1310 }
1311 return res;
1312 }
1313 #endif
1314
1315 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1316 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1317 if (o == 0 || f == 0)
1318 return EINVAL;
1319 atomic_uint32_t *a;
1320 if (!SANITIZER_MAC)
1321 a = static_cast<atomic_uint32_t*>(o);
1322 else // On OS X, pthread_once_t has a header with a long-sized signature.
1323 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1324 u32 v = atomic_load(a, memory_order_acquire);
1325 if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
1326 memory_order_relaxed)) {
1327 (*f)();
1328 if (!thr->in_ignored_lib)
1329 Release(thr, pc, (uptr)o);
1330 atomic_store(a, 2, memory_order_release);
1331 } else {
1332 while (v != 2) {
1333 internal_sched_yield();
1334 v = atomic_load(a, memory_order_acquire);
1335 }
1336 if (!thr->in_ignored_lib)
1337 Acquire(thr, pc, (uptr)o);
1338 }
1339 return 0;
1340 }
1341
1342 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1343 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1344 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1345 if (fd > 0)
1346 FdAccess(thr, pc, fd);
1347 return REAL(__fxstat)(version, fd, buf);
1348 }
1349 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1350 #else
1351 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1352 #endif
1353
1354 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1355 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID
1356 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1357 if (fd > 0)
1358 FdAccess(thr, pc, fd);
1359 return REAL(fstat)(fd, buf);
1360 #else
1361 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1362 if (fd > 0)
1363 FdAccess(thr, pc, fd);
1364 return REAL(__fxstat)(0, fd, buf);
1365 #endif
1366 }
1367
1368 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1369 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1370 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1371 if (fd > 0)
1372 FdAccess(thr, pc, fd);
1373 return REAL(__fxstat64)(version, fd, buf);
1374 }
1375 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1376 #else
1377 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1378 #endif
1379
1380 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1381 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1382 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1383 if (fd > 0)
1384 FdAccess(thr, pc, fd);
1385 return REAL(__fxstat64)(0, fd, buf);
1386 }
1387 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1388 #else
1389 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1390 #endif
1391
1392 TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
1393 SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
1394 READ_STRING(thr, pc, name, 0);
1395 int fd = REAL(open)(name, flags, mode);
1396 if (fd >= 0)
1397 FdFileCreate(thr, pc, fd);
1398 return fd;
1399 }
1400
1401 #if SANITIZER_LINUX
1402 TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
1403 SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
1404 READ_STRING(thr, pc, name, 0);
1405 int fd = REAL(open64)(name, flags, mode);
1406 if (fd >= 0)
1407 FdFileCreate(thr, pc, fd);
1408 return fd;
1409 }
1410 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1411 #else
1412 #define TSAN_MAYBE_INTERCEPT_OPEN64
1413 #endif
1414
1415 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1416 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1417 READ_STRING(thr, pc, name, 0);
1418 int fd = REAL(creat)(name, mode);
1419 if (fd >= 0)
1420 FdFileCreate(thr, pc, fd);
1421 return fd;
1422 }
1423
1424 #if SANITIZER_LINUX
1425 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1426 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1427 READ_STRING(thr, pc, name, 0);
1428 int fd = REAL(creat64)(name, mode);
1429 if (fd >= 0)
1430 FdFileCreate(thr, pc, fd);
1431 return fd;
1432 }
1433 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1434 #else
1435 #define TSAN_MAYBE_INTERCEPT_CREAT64
1436 #endif
1437
1438 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1439 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1440 int newfd = REAL(dup)(oldfd);
1441 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1442 FdDup(thr, pc, oldfd, newfd, true);
1443 return newfd;
1444 }
1445
1446 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1447 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1448 int newfd2 = REAL(dup2)(oldfd, newfd);
1449 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1450 FdDup(thr, pc, oldfd, newfd2, false);
1451 return newfd2;
1452 }
1453
1454 #if !SANITIZER_MAC
1455 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1456 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1457 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1458 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1459 FdDup(thr, pc, oldfd, newfd2, false);
1460 return newfd2;
1461 }
1462 #endif
1463
1464 #if SANITIZER_LINUX
1465 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1466 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1467 int fd = REAL(eventfd)(initval, flags);
1468 if (fd >= 0)
1469 FdEventCreate(thr, pc, fd);
1470 return fd;
1471 }
1472 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1473 #else
1474 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1475 #endif
1476
1477 #if SANITIZER_LINUX
1478 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1479 SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
1480 if (fd >= 0)
1481 FdClose(thr, pc, fd);
1482 fd = REAL(signalfd)(fd, mask, flags);
1483 if (fd >= 0)
1484 FdSignalCreate(thr, pc, fd);
1485 return fd;
1486 }
1487 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1488 #else
1489 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1490 #endif
1491
1492 #if SANITIZER_LINUX
1493 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1494 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1495 int fd = REAL(inotify_init)(fake);
1496 if (fd >= 0)
1497 FdInotifyCreate(thr, pc, fd);
1498 return fd;
1499 }
1500 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1501 #else
1502 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1503 #endif
1504
1505 #if SANITIZER_LINUX
1506 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1507 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1508 int fd = REAL(inotify_init1)(flags);
1509 if (fd >= 0)
1510 FdInotifyCreate(thr, pc, fd);
1511 return fd;
1512 }
1513 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1514 #else
1515 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1516 #endif
1517
1518 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1519 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1520 int fd = REAL(socket)(domain, type, protocol);
1521 if (fd >= 0)
1522 FdSocketCreate(thr, pc, fd);
1523 return fd;
1524 }
1525
1526 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1527 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1528 int res = REAL(socketpair)(domain, type, protocol, fd);
1529 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1530 FdPipeCreate(thr, pc, fd[0], fd[1]);
1531 return res;
1532 }
1533
1534 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1535 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1536 FdSocketConnecting(thr, pc, fd);
1537 int res = REAL(connect)(fd, addr, addrlen);
1538 if (res == 0 && fd >= 0)
1539 FdSocketConnect(thr, pc, fd);
1540 return res;
1541 }
1542
1543 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1544 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1545 int res = REAL(bind)(fd, addr, addrlen);
1546 if (fd > 0 && res == 0)
1547 FdAccess(thr, pc, fd);
1548 return res;
1549 }
1550
1551 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1552 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1553 int res = REAL(listen)(fd, backlog);
1554 if (fd > 0 && res == 0)
1555 FdAccess(thr, pc, fd);
1556 return res;
1557 }
1558
1559 TSAN_INTERCEPTOR(int, close, int fd) {
1560 SCOPED_TSAN_INTERCEPTOR(close, fd);
1561 if (fd >= 0)
1562 FdClose(thr, pc, fd);
1563 return REAL(close)(fd);
1564 }
1565
1566 #if SANITIZER_LINUX
1567 TSAN_INTERCEPTOR(int, __close, int fd) {
1568 SCOPED_TSAN_INTERCEPTOR(__close, fd);
1569 if (fd >= 0)
1570 FdClose(thr, pc, fd);
1571 return REAL(__close)(fd);
1572 }
1573 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1574 #else
1575 #define TSAN_MAYBE_INTERCEPT___CLOSE
1576 #endif
1577
1578 // glibc guts
1579 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1580 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1581 SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
1582 int fds[64];
1583 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1584 for (int i = 0; i < cnt; i++) {
1585 if (fds[i] > 0)
1586 FdClose(thr, pc, fds[i]);
1587 }
1588 REAL(__res_iclose)(state, free_addr);
1589 }
1590 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1591 #else
1592 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1593 #endif
1594
1595 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1596 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1597 int res = REAL(pipe)(pipefd);
1598 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1599 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1600 return res;
1601 }
1602
1603 #if !SANITIZER_MAC
1604 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1605 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1606 int res = REAL(pipe2)(pipefd, flags);
1607 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1608 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1609 return res;
1610 }
1611 #endif
1612
1613 TSAN_INTERCEPTOR(int, unlink, char *path) {
1614 SCOPED_TSAN_INTERCEPTOR(unlink, path);
1615 Release(thr, pc, File2addr(path));
1616 int res = REAL(unlink)(path);
1617 return res;
1618 }
1619
1620 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1621 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1622 void *res = REAL(tmpfile)(fake);
1623 if (res) {
1624 int fd = fileno_unlocked(res);
1625 if (fd >= 0)
1626 FdFileCreate(thr, pc, fd);
1627 }
1628 return res;
1629 }
1630
1631 #if SANITIZER_LINUX
1632 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1633 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1634 void *res = REAL(tmpfile64)(fake);
1635 if (res) {
1636 int fd = fileno_unlocked(res);
1637 if (fd >= 0)
1638 FdFileCreate(thr, pc, fd);
1639 }
1640 return res;
1641 }
1642 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1643 #else
1644 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1645 #endif
1646
1647 TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
1648 // libc file streams can call user-supplied functions, see fopencookie.
1649 {
1650 SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f);
1651 MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true);
1652 }
1653 return REAL(fread)(ptr, size, nmemb, f);
1654 }
1655
1656 TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
1657 // libc file streams can call user-supplied functions, see fopencookie.
1658 {
1659 SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f);
1660 MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false);
1661 }
1662 return REAL(fwrite)(p, size, nmemb, f);
1663 }
1664
1665 static void FlushStreams() {
1666 // Flushing all the streams here may freeze the process if a child thread is
1667 // performing file stream operations at the same time.
1668 REAL(fflush)(stdout);
1669 REAL(fflush)(stderr);
1670 }
1671
1672 TSAN_INTERCEPTOR(void, abort, int fake) {
1673 SCOPED_TSAN_INTERCEPTOR(abort, fake);
1674 FlushStreams();
1675 REAL(abort)(fake);
1676 }
1677
1678 TSAN_INTERCEPTOR(int, puts, const char *s) {
1679 SCOPED_TSAN_INTERCEPTOR(puts, s);
1680 MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false);
1681 return REAL(puts)(s);
1682 }
1683
1684 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1685 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1686 Release(thr, pc, Dir2addr(path));
1687 int res = REAL(rmdir)(path);
1688 return res;
1689 }
1690
1691 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1692 SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
1693 if (dirp) {
1694 int fd = dirfd(dirp);
1695 FdClose(thr, pc, fd);
1696 }
1697 return REAL(closedir)(dirp);
1698 }
1699
1700 #if SANITIZER_LINUX
1701 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1702 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1703 int fd = REAL(epoll_create)(size);
1704 if (fd >= 0)
1705 FdPollCreate(thr, pc, fd);
1706 return fd;
1707 }
1708
1709 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1710 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1711 int fd = REAL(epoll_create1)(flags);
1712 if (fd >= 0)
1713 FdPollCreate(thr, pc, fd);
1714 return fd;
1715 }
1716
1717 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1718 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1719 if (epfd >= 0)
1720 FdAccess(thr, pc, epfd);
1721 if (epfd >= 0 && fd >= 0)
1722 FdAccess(thr, pc, fd);
1723 if (op == EPOLL_CTL_ADD && epfd >= 0)
1724 FdRelease(thr, pc, epfd);
1725 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1726 return res;
1727 }
1728
1729 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1730 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1731 if (epfd >= 0)
1732 FdAccess(thr, pc, epfd);
1733 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1734 if (res > 0 && epfd >= 0)
1735 FdAcquire(thr, pc, epfd);
1736 return res;
1737 }
1738
1739 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1740 void *sigmask) {
1741 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1742 if (epfd >= 0)
1743 FdAccess(thr, pc, epfd);
1744 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1745 if (res > 0 && epfd >= 0)
1746 FdAcquire(thr, pc, epfd);
1747 return res;
1748 }
1749
1750 #define TSAN_MAYBE_INTERCEPT_EPOLL \
1751 TSAN_INTERCEPT(epoll_create); \
1752 TSAN_INTERCEPT(epoll_create1); \
1753 TSAN_INTERCEPT(epoll_ctl); \
1754 TSAN_INTERCEPT(epoll_wait); \
1755 TSAN_INTERCEPT(epoll_pwait)
1756 #else
1757 #define TSAN_MAYBE_INTERCEPT_EPOLL
1758 #endif
1759
1760 // The following functions are intercepted merely to process pending signals.
1761 // If program blocks signal X, we must deliver the signal before the function
1762 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
1763 // it's better to deliver the signal straight away.
1764 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
1765 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
1766 return REAL(sigsuspend)(mask);
1767 }
1768
1769 TSAN_INTERCEPTOR(int, sigblock, int mask) {
1770 SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
1771 return REAL(sigblock)(mask);
1772 }
1773
1774 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
1775 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
1776 return REAL(sigsetmask)(mask);
1777 }
1778
1779 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
1780 __sanitizer_sigset_t *oldset) {
1781 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
1782 return REAL(pthread_sigmask)(how, set, oldset);
1783 }
1784
1785 namespace __tsan {
1786
1787 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
1788 bool sigact, int sig, my_siginfo_t *info, void *uctx) {
1789 if (acquire)
1790 Acquire(thr, 0, (uptr)&sigactions[sig]);
1791 // Signals are generally asynchronous, so if we receive a signals when
1792 // ignores are enabled we should disable ignores. This is critical for sync
1793 // and interceptors, because otherwise we can miss syncronization and report
1794 // false races.
1795 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
1796 int ignore_interceptors = thr->ignore_interceptors;
1797 int ignore_sync = thr->ignore_sync;
1798 if (!ctx->after_multithreaded_fork) {
1799 thr->ignore_reads_and_writes = 0;
1800 thr->fast_state.ClearIgnoreBit();
1801 thr->ignore_interceptors = 0;
1802 thr->ignore_sync = 0;
1803 }
1804 // Ensure that the handler does not spoil errno.
1805 const int saved_errno = errno;
1806 errno = 99;
1807 // This code races with sigaction. Be careful to not read sa_sigaction twice.
1808 // Also need to remember pc for reporting before the call,
1809 // because the handler can reset it.
1810 volatile uptr pc = sigact ?
1811 (uptr)sigactions[sig].sa_sigaction :
1812 (uptr)sigactions[sig].sa_handler;
1813 if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) {
1814 if (sigact)
1815 ((sigactionhandler_t)pc)(sig, info, uctx);
1816 else
1817 ((sighandler_t)pc)(sig);
1818 }
1819 if (!ctx->after_multithreaded_fork) {
1820 thr->ignore_reads_and_writes = ignore_reads_and_writes;
1821 if (ignore_reads_and_writes)
1822 thr->fast_state.SetIgnoreBit();
1823 thr->ignore_interceptors = ignore_interceptors;
1824 thr->ignore_sync = ignore_sync;
1825 }
1826 // We do not detect errno spoiling for SIGTERM,
1827 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
1828 // tsan reports false positive in such case.
1829 // It's difficult to properly detect this situation (reraise),
1830 // because in async signal processing case (when handler is called directly
1831 // from rtl_generic_sighandler) we have not yet received the reraised
1832 // signal; and it looks too fragile to intercept all ways to reraise a signal.
1833 if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
1834 VarSizeStackTrace stack;
1835 // StackTrace::GetNestInstructionPc(pc) is used because return address is
1836 // expected, OutputReport() will undo this.
1837 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
1838 ThreadRegistryLock l(ctx->thread_registry);
1839 ScopedReport rep(ReportTypeErrnoInSignal);
1840 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
1841 rep.AddStack(stack, true);
1842 OutputReport(thr, rep);
1843 }
1844 }
1845 errno = saved_errno;
1846 }
1847
1848 void ProcessPendingSignals(ThreadState *thr) {
1849 ThreadSignalContext *sctx = SigCtx(thr);
1850 if (sctx == 0 ||
1851 atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
1852 return;
1853 atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
1854 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1855 internal_sigfillset(&sctx->emptyset);
1856 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
1857 CHECK_EQ(res, 0);
1858 for (int sig = 0; sig < kSigCount; sig++) {
1859 SignalDesc *signal = &sctx->pending_signals[sig];
1860 if (signal->armed) {
1861 signal->armed = false;
1862 CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
1863 &signal->siginfo, &signal->ctx);
1864 }
1865 }
1866 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
1867 CHECK_EQ(res, 0);
1868 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1869 }
1870
1871 } // namespace __tsan
1872
1873 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
1874 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
1875 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
1876 // If we are sending signal to ourselves, we must process it now.
1877 (sctx && sig == sctx->int_signal_send);
1878 }
1879
1880 void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
1881 my_siginfo_t *info, void *ctx) {
1882 ThreadState *thr = cur_thread();
1883 ThreadSignalContext *sctx = SigCtx(thr);
1884 if (sig < 0 || sig >= kSigCount) {
1885 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
1886 return;
1887 }
1888 // Don't mess with synchronous signals.
1889 const bool sync = is_sync_signal(sctx, sig);
1890 if (sync ||
1891 // If we are in blocking function, we can safely process it now
1892 // (but check if we are in a recursive interceptor,
1893 // i.e. pthread_join()->munmap()).
1894 (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
1895 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1896 if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
1897 atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
1898 CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
1899 atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
1900 } else {
1901 // Be very conservative with when we do acquire in this case.
1902 // It's unsafe to do acquire in async handlers, because ThreadState
1903 // can be in inconsistent state.
1904 // SIGSYS looks relatively safe -- it's synchronous and can actually
1905 // need some global state.
1906 bool acq = (sig == SIGSYS);
1907 CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
1908 }
1909 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1910 return;
1911 }
1912
1913 if (sctx == 0)
1914 return;
1915 SignalDesc *signal = &sctx->pending_signals[sig];
1916 if (signal->armed == false) {
1917 signal->armed = true;
1918 signal->sigaction = sigact;
1919 if (info)
1920 internal_memcpy(&signal->siginfo, info, sizeof(*info));
1921 if (ctx)
1922 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
1923 atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
1924 }
1925 }
1926
1927 static void rtl_sighandler(int sig) {
1928 rtl_generic_sighandler(false, sig, 0, 0);
1929 }
1930
1931 static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) {
1932 rtl_generic_sighandler(true, sig, info, ctx);
1933 }
1934
1935 TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
1936 // Note: if we call REAL(sigaction) directly for any reason without proxying
1937 // the signal handler through rtl_sigaction, very bad things will happen.
1938 // The handler will run synchronously and corrupt tsan per-thread state.
1939 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
1940 if (old)
1941 internal_memcpy(old, &sigactions[sig], sizeof(*old));
1942 if (act == 0)
1943 return 0;
1944 // Copy act into sigactions[sig].
1945 // Can't use struct copy, because compiler can emit call to memcpy.
1946 // Can't use internal_memcpy, because it copies byte-by-byte,
1947 // and signal handler reads the sa_handler concurrently. It it can read
1948 // some bytes from old value and some bytes from new value.
1949 // Use volatile to prevent insertion of memcpy.
1950 sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler;
1951 sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags;
1952 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
1953 sizeof(sigactions[sig].sa_mask));
1954 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
1955 sigactions[sig].sa_restorer = act->sa_restorer;
1956 #endif
1957 sigaction_t newact;
1958 internal_memcpy(&newact, act, sizeof(newact));
1959 internal_sigfillset(&newact.sa_mask);
1960 if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) {
1961 if (newact.sa_flags & SA_SIGINFO)
1962 newact.sa_sigaction = rtl_sigaction;
1963 else
1964 newact.sa_handler = rtl_sighandler;
1965 }
1966 ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
1967 int res = REAL(sigaction)(sig, &newact, 0);
1968 return res;
1969 }
1970
1971 TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
1972 sigaction_t act;
1973 act.sa_handler = h;
1974 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
1975 act.sa_flags = 0;
1976 sigaction_t old;
1977 int res = sigaction(sig, &act, &old);
1978 if (res)
1979 return SIG_ERR;
1980 return old.sa_handler;
1981 }
1982
1983 TSAN_INTERCEPTOR(int, raise, int sig) {
1984 SCOPED_TSAN_INTERCEPTOR(raise, sig);
1985 ThreadSignalContext *sctx = SigCtx(thr);
1986 CHECK_NE(sctx, 0);
1987 int prev = sctx->int_signal_send;
1988 sctx->int_signal_send = sig;
1989 int res = REAL(raise)(sig);
1990 CHECK_EQ(sctx->int_signal_send, sig);
1991 sctx->int_signal_send = prev;
1992 return res;
1993 }
1994
1995 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
1996 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
1997 ThreadSignalContext *sctx = SigCtx(thr);
1998 CHECK_NE(sctx, 0);
1999 int prev = sctx->int_signal_send;
2000 if (pid == (int)internal_getpid()) {
2001 sctx->int_signal_send = sig;
2002 }
2003 int res = REAL(kill)(pid, sig);
2004 if (pid == (int)internal_getpid()) {
2005 CHECK_EQ(sctx->int_signal_send, sig);
2006 sctx->int_signal_send = prev;
2007 }
2008 return res;
2009 }
2010
2011 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2012 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2013 ThreadSignalContext *sctx = SigCtx(thr);
2014 CHECK_NE(sctx, 0);
2015 int prev = sctx->int_signal_send;
2016 if (tid == pthread_self()) {
2017 sctx->int_signal_send = sig;
2018 }
2019 int res = REAL(pthread_kill)(tid, sig);
2020 if (tid == pthread_self()) {
2021 CHECK_EQ(sctx->int_signal_send, sig);
2022 sctx->int_signal_send = prev;
2023 }
2024 return res;
2025 }
2026
2027 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2028 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2029 // It's intercepted merely to process pending signals.
2030 return REAL(gettimeofday)(tv, tz);
2031 }
2032
2033 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2034 void *hints, void *rv) {
2035 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2036 // We miss atomic synchronization in getaddrinfo,
2037 // and can report false race between malloc and free
2038 // inside of getaddrinfo. So ignore memory accesses.
2039 ThreadIgnoreBegin(thr, pc);
2040 int res = REAL(getaddrinfo)(node, service, hints, rv);
2041 ThreadIgnoreEnd(thr, pc);
2042 return res;
2043 }
2044
2045 TSAN_INTERCEPTOR(int, fork, int fake) {
2046 if (cur_thread()->in_symbolizer)
2047 return REAL(fork)(fake);
2048 SCOPED_INTERCEPTOR_RAW(fork, fake);
2049 ForkBefore(thr, pc);
2050 int pid;
2051 {
2052 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
2053 // we'll assert in CheckNoLocks() unless we ignore interceptors.
2054 ScopedIgnoreInterceptors ignore;
2055 pid = REAL(fork)(fake);
2056 }
2057 if (pid == 0) {
2058 // child
2059 ForkChildAfter(thr, pc);
2060 FdOnFork(thr, pc);
2061 } else if (pid > 0) {
2062 // parent
2063 ForkParentAfter(thr, pc);
2064 } else {
2065 // error
2066 ForkParentAfter(thr, pc);
2067 }
2068 return pid;
2069 }
2070
2071 TSAN_INTERCEPTOR(int, vfork, int fake) {
2072 // Some programs (e.g. openjdk) call close for all file descriptors
2073 // in the child process. Under tsan it leads to false positives, because
2074 // address space is shared, so the parent process also thinks that
2075 // the descriptors are closed (while they are actually not).
2076 // This leads to false positives due to missed synchronization.
2077 // Strictly saying this is undefined behavior, because vfork child is not
2078 // allowed to call any functions other than exec/exit. But this is what
2079 // openjdk does, so we want to handle it.
2080 // We could disable interceptors in the child process. But it's not possible
2081 // to simply intercept and wrap vfork, because vfork child is not allowed
2082 // to return from the function that calls vfork, and that's exactly what
2083 // we would do. So this would require some assembly trickery as well.
2084 // Instead we simply turn vfork into fork.
2085 return WRAP(fork)(fake);
2086 }
2087
2088 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2089 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2090 void *data);
2091 struct dl_iterate_phdr_data {
2092 ThreadState *thr;
2093 uptr pc;
2094 dl_iterate_phdr_cb_t cb;
2095 void *data;
2096 };
2097
2098 static bool IsAppNotRodata(uptr addr) {
2099 return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
2100 }
2101
2102 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2103 void *data) {
2104 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2105 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2106 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2107 // inside of dynamic linker, so we "unpoison" it here in order to not
2108 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2109 // because some libc functions call __libc_dlopen.
2110 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2111 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2112 internal_strlen(info->dlpi_name));
2113 int res = cbdata->cb(info, size, cbdata->data);
2114 // Perform the check one more time in case info->dlpi_name was overwritten
2115 // by user callback.
2116 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2117 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2118 internal_strlen(info->dlpi_name));
2119 return res;
2120 }
2121
2122 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2123 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2124 dl_iterate_phdr_data cbdata;
2125 cbdata.thr = thr;
2126 cbdata.pc = pc;
2127 cbdata.cb = cb;
2128 cbdata.data = data;
2129 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2130 return res;
2131 }
2132 #endif
2133
2134 static int OnExit(ThreadState *thr) {
2135 int status = Finalize(thr);
2136 FlushStreams();
2137 return status;
2138 }
2139
2140 struct TsanInterceptorContext {
2141 ThreadState *thr;
2142 const uptr caller_pc;
2143 const uptr pc;
2144 };
2145
2146 #if !SANITIZER_MAC
2147 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2148 __sanitizer_msghdr *msg) {
2149 int fds[64];
2150 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2151 for (int i = 0; i < cnt; i++)
2152 FdEventCreate(thr, pc, fds[i]);
2153 }
2154 #endif
2155
2156 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2157 // Causes interceptor recursion (getaddrinfo() and fopen())
2158 #undef SANITIZER_INTERCEPT_GETADDRINFO
2159 // There interceptors do not seem to be strictly necessary for tsan.
2160 // But we see cases where the interceptors consume 70% of execution time.
2161 // Memory blocks passed to fgetgrent_r are "written to" by tsan several times.
2162 // First, there is some recursion (getgrnam_r calls fgetgrent_r), and each
2163 // function "writes to" the buffer. Then, the same memory is "written to"
2164 // twice, first as buf and then as pwbufp (both of them refer to the same
2165 // addresses).
2166 #undef SANITIZER_INTERCEPT_GETPWENT
2167 #undef SANITIZER_INTERCEPT_GETPWENT_R
2168 #undef SANITIZER_INTERCEPT_FGETPWENT
2169 #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
2170 #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
2171 // We define our own.
2172 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2173 #define NEED_TLS_GET_ADDR
2174 #endif
2175 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2176
2177 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
2178 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2179 INTERCEPT_FUNCTION_VER(name, ver)
2180
2181 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
2182 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
2183 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
2184 true)
2185
2186 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
2187 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
2188 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
2189 false)
2190
2191 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
2192 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
2193 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2194 ctx = (void *)&_ctx; \
2195 (void) ctx;
2196
2197 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2198 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2199 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2200 ctx = (void *)&_ctx; \
2201 (void) ctx;
2202
2203 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2204 Acquire(thr, pc, File2addr(path)); \
2205 if (file) { \
2206 int fd = fileno_unlocked(file); \
2207 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2208 }
2209
2210 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2211 if (file) { \
2212 int fd = fileno_unlocked(file); \
2213 if (fd >= 0) FdClose(thr, pc, fd); \
2214 }
2215
2216 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2217 libignore()->OnLibraryLoaded(filename)
2218
2219 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2220 libignore()->OnLibraryUnloaded()
2221
2222 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2223 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2224
2225 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2226 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2227
2228 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2229 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2230
2231 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2232 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2233
2234 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2235 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2236
2237 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2238 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2239
2240 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2241 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2242
2243 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2244 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2245
2246 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2247 __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
2248
2249 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2250
2251 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2252 OnExit(((TsanInterceptorContext *) ctx)->thr)
2253
2254 #define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \
2255 MutexLock(((TsanInterceptorContext *)ctx)->thr, \
2256 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2257
2258 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
2259 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
2260 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2261
2262 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
2263 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
2264 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2265
2266 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
2267 MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
2268 ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2269
2270 #if !SANITIZER_MAC
2271 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2272 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2273 ((TsanInterceptorContext *)ctx)->pc, msg)
2274 #endif
2275
2276 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2277 if (TsanThread *t = GetCurrentThread()) { \
2278 *begin = t->tls_begin(); \
2279 *end = t->tls_end(); \
2280 } else { \
2281 *begin = *end = 0; \
2282 }
2283
2284 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2285 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2286
2287 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2288 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2289
2290 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2291
2292 #define TSAN_SYSCALL() \
2293 ThreadState *thr = cur_thread(); \
2294 if (thr->ignore_interceptors) \
2295 return; \
2296 ScopedSyscall scoped_syscall(thr) \
2297 /**/
2298
2299 struct ScopedSyscall {
2300 ThreadState *thr;
2301
2302 explicit ScopedSyscall(ThreadState *thr)
2303 : thr(thr) {
2304 Initialize(thr);
2305 }
2306
2307 ~ScopedSyscall() {
2308 ProcessPendingSignals(thr);
2309 }
2310 };
2311
2312 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
2313 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2314 TSAN_SYSCALL();
2315 MemoryAccessRange(thr, pc, p, s, write);
2316 }
2317
2318 static void syscall_acquire(uptr pc, uptr addr) {
2319 TSAN_SYSCALL();
2320 Acquire(thr, pc, addr);
2321 DPrintf("syscall_acquire(%p)\n", addr);
2322 }
2323
2324 static void syscall_release(uptr pc, uptr addr) {
2325 TSAN_SYSCALL();
2326 DPrintf("syscall_release(%p)\n", addr);
2327 Release(thr, pc, addr);
2328 }
2329
2330 static void syscall_fd_close(uptr pc, int fd) {
2331 TSAN_SYSCALL();
2332 FdClose(thr, pc, fd);
2333 }
2334
2335 static USED void syscall_fd_acquire(uptr pc, int fd) {
2336 TSAN_SYSCALL();
2337 FdAcquire(thr, pc, fd);
2338 DPrintf("syscall_fd_acquire(%p)\n", fd);
2339 }
2340
2341 static USED void syscall_fd_release(uptr pc, int fd) {
2342 TSAN_SYSCALL();
2343 DPrintf("syscall_fd_release(%p)\n", fd);
2344 FdRelease(thr, pc, fd);
2345 }
2346
2347 static void syscall_pre_fork(uptr pc) {
2348 TSAN_SYSCALL();
2349 ForkBefore(thr, pc);
2350 }
2351
2352 static void syscall_post_fork(uptr pc, int pid) {
2353 TSAN_SYSCALL();
2354 if (pid == 0) {
2355 // child
2356 ForkChildAfter(thr, pc);
2357 FdOnFork(thr, pc);
2358 } else if (pid > 0) {
2359 // parent
2360 ForkParentAfter(thr, pc);
2361 } else {
2362 // error
2363 ForkParentAfter(thr, pc);
2364 }
2365 }
2366 #endif
2367
2368 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2369 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2370
2371 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2372 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2373
2374 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2375 do { \
2376 (void)(p); \
2377 (void)(s); \
2378 } while (false)
2379
2380 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2381 do { \
2382 (void)(p); \
2383 (void)(s); \
2384 } while (false)
2385
2386 #define COMMON_SYSCALL_ACQUIRE(addr) \
2387 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2388
2389 #define COMMON_SYSCALL_RELEASE(addr) \
2390 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2391
2392 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2393
2394 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2395
2396 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2397
2398 #define COMMON_SYSCALL_PRE_FORK() \
2399 syscall_pre_fork(GET_CALLER_PC())
2400
2401 #define COMMON_SYSCALL_POST_FORK(res) \
2402 syscall_post_fork(GET_CALLER_PC(), res)
2403
2404 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2405
2406 #ifdef NEED_TLS_GET_ADDR
2407 // Define own interceptor instead of sanitizer_common's for three reasons:
2408 // 1. It must not process pending signals.
2409 // Signal handlers may contain MOVDQA instruction (see below).
2410 // 2. It must be as simple as possible to not contain MOVDQA.
2411 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2412 // is empty for tsan (meant only for msan).
2413 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2414 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2415 // So the interceptor must work with mis-aligned stack, in particular, does not
2416 // execute MOVDQA with stack addresses.
2417 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2418 void *res = REAL(__tls_get_addr)(arg);
2419 ThreadState *thr = cur_thread();
2420 if (!thr)
2421 return res;
2422 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, thr->tls_size);
2423 if (!dtv)
2424 return res;
2425 // New DTLS block has been allocated.
2426 MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2427 return res;
2428 }
2429 #endif
2430
2431 namespace __tsan {
2432
2433 static void finalize(void *arg) {
2434 ThreadState *thr = cur_thread();
2435 int status = Finalize(thr);
2436 // Make sure the output is not lost.
2437 FlushStreams();
2438 if (status)
2439 Die();
2440 }
2441
2442 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2443 static void unreachable() {
2444 Report("FATAL: ThreadSanitizer: unreachable called\n");
2445 Die();
2446 }
2447 #endif
2448
2449 void InitializeInterceptors() {
2450 #if !SANITIZER_MAC
2451 // We need to setup it early, because functions like dlsym() can call it.
2452 REAL(memset) = internal_memset;
2453 REAL(memcpy) = internal_memcpy;
2454 #endif
2455
2456 // Instruct libc malloc to consume less memory.
2457 #if SANITIZER_LINUX
2458 mallopt(1, 0); // M_MXFAST
2459 mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
2460 #endif
2461
2462 InitializeCommonInterceptors();
2463
2464 #if !SANITIZER_MAC
2465 // We can not use TSAN_INTERCEPT to get setjmp addr,
2466 // because it does &setjmp and setjmp is not present in some versions of libc.
2467 using __interception::GetRealFunctionAddress;
2468 GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0);
2469 GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
2470 GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0);
2471 GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
2472 #endif
2473
2474 TSAN_INTERCEPT(longjmp);
2475 TSAN_INTERCEPT(siglongjmp);
2476
2477 TSAN_INTERCEPT(malloc);
2478 TSAN_INTERCEPT(__libc_memalign);
2479 TSAN_INTERCEPT(calloc);
2480 TSAN_INTERCEPT(realloc);
2481 TSAN_INTERCEPT(free);
2482 TSAN_INTERCEPT(cfree);
2483 TSAN_INTERCEPT(mmap);
2484 TSAN_MAYBE_INTERCEPT_MMAP64;
2485 TSAN_INTERCEPT(munmap);
2486 TSAN_MAYBE_INTERCEPT_MEMALIGN;
2487 TSAN_INTERCEPT(valloc);
2488 TSAN_MAYBE_INTERCEPT_PVALLOC;
2489 TSAN_INTERCEPT(posix_memalign);
2490
2491 TSAN_INTERCEPT(strcpy); // NOLINT
2492 TSAN_INTERCEPT(strncpy);
2493 TSAN_INTERCEPT(strdup);
2494
2495 TSAN_INTERCEPT(pthread_create);
2496 TSAN_INTERCEPT(pthread_join);
2497 TSAN_INTERCEPT(pthread_detach);
2498
2499 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2500 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2501 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2502 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2503 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2504 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2505
2506 TSAN_INTERCEPT(pthread_mutex_init);
2507 TSAN_INTERCEPT(pthread_mutex_destroy);
2508 TSAN_INTERCEPT(pthread_mutex_trylock);
2509 TSAN_INTERCEPT(pthread_mutex_timedlock);
2510
2511 TSAN_INTERCEPT(pthread_spin_init);
2512 TSAN_INTERCEPT(pthread_spin_destroy);
2513 TSAN_INTERCEPT(pthread_spin_lock);
2514 TSAN_INTERCEPT(pthread_spin_trylock);
2515 TSAN_INTERCEPT(pthread_spin_unlock);
2516
2517 TSAN_INTERCEPT(pthread_rwlock_init);
2518 TSAN_INTERCEPT(pthread_rwlock_destroy);
2519 TSAN_INTERCEPT(pthread_rwlock_rdlock);
2520 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2521 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2522 TSAN_INTERCEPT(pthread_rwlock_wrlock);
2523 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2524 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2525 TSAN_INTERCEPT(pthread_rwlock_unlock);
2526
2527 TSAN_INTERCEPT(pthread_barrier_init);
2528 TSAN_INTERCEPT(pthread_barrier_destroy);
2529 TSAN_INTERCEPT(pthread_barrier_wait);
2530
2531 TSAN_INTERCEPT(pthread_once);
2532
2533 TSAN_INTERCEPT(fstat);
2534 TSAN_MAYBE_INTERCEPT___FXSTAT;
2535 TSAN_MAYBE_INTERCEPT_FSTAT64;
2536 TSAN_MAYBE_INTERCEPT___FXSTAT64;
2537 TSAN_INTERCEPT(open);
2538 TSAN_MAYBE_INTERCEPT_OPEN64;
2539 TSAN_INTERCEPT(creat);
2540 TSAN_MAYBE_INTERCEPT_CREAT64;
2541 TSAN_INTERCEPT(dup);
2542 TSAN_INTERCEPT(dup2);
2543 TSAN_INTERCEPT(dup3);
2544 TSAN_MAYBE_INTERCEPT_EVENTFD;
2545 TSAN_MAYBE_INTERCEPT_SIGNALFD;
2546 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2547 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2548 TSAN_INTERCEPT(socket);
2549 TSAN_INTERCEPT(socketpair);
2550 TSAN_INTERCEPT(connect);
2551 TSAN_INTERCEPT(bind);
2552 TSAN_INTERCEPT(listen);
2553 TSAN_MAYBE_INTERCEPT_EPOLL;
2554 TSAN_INTERCEPT(close);
2555 TSAN_MAYBE_INTERCEPT___CLOSE;
2556 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2557 TSAN_INTERCEPT(pipe);
2558 TSAN_INTERCEPT(pipe2);
2559
2560 TSAN_INTERCEPT(unlink);
2561 TSAN_INTERCEPT(tmpfile);
2562 TSAN_MAYBE_INTERCEPT_TMPFILE64;
2563 TSAN_INTERCEPT(fread);
2564 TSAN_INTERCEPT(fwrite);
2565 TSAN_INTERCEPT(abort);
2566 TSAN_INTERCEPT(puts);
2567 TSAN_INTERCEPT(rmdir);
2568 TSAN_INTERCEPT(closedir);
2569
2570 TSAN_INTERCEPT(sigaction);
2571 TSAN_INTERCEPT(signal);
2572 TSAN_INTERCEPT(sigsuspend);
2573 TSAN_INTERCEPT(sigblock);
2574 TSAN_INTERCEPT(sigsetmask);
2575 TSAN_INTERCEPT(pthread_sigmask);
2576 TSAN_INTERCEPT(raise);
2577 TSAN_INTERCEPT(kill);
2578 TSAN_INTERCEPT(pthread_kill);
2579 TSAN_INTERCEPT(sleep);
2580 TSAN_INTERCEPT(usleep);
2581 TSAN_INTERCEPT(nanosleep);
2582 TSAN_INTERCEPT(gettimeofday);
2583 TSAN_INTERCEPT(getaddrinfo);
2584
2585 TSAN_INTERCEPT(fork);
2586 TSAN_INTERCEPT(vfork);
2587 #if !SANITIZER_ANDROID
2588 TSAN_INTERCEPT(dl_iterate_phdr);
2589 #endif
2590 TSAN_INTERCEPT(on_exit);
2591 TSAN_INTERCEPT(__cxa_atexit);
2592 TSAN_INTERCEPT(_exit);
2593
2594 #ifdef NEED_TLS_GET_ADDR
2595 TSAN_INTERCEPT(__tls_get_addr);
2596 #endif
2597
2598 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2599 // Need to setup it, because interceptors check that the function is resolved.
2600 // But atexit is emitted directly into the module, so can't be resolved.
2601 REAL(atexit) = (int(*)(void(*)()))unreachable;
2602 #endif
2603
2604 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
2605 Printf("ThreadSanitizer: failed to setup atexit callback\n");
2606 Die();
2607 }
2608
2609 #if !SANITIZER_MAC
2610 if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
2611 Printf("ThreadSanitizer: failed to create thread key\n");
2612 Die();
2613 }
2614 #endif
2615
2616 FdInit();
2617 }
2618
2619 } // namespace __tsan
2620
2621 // Invisible barrier for tests.
2622 // There were several unsuccessful iterations for this functionality:
2623 // 1. Initially it was implemented in user code using
2624 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
2625 // MacOS. Futexes are linux-specific for this matter.
2626 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
2627 // "as-if synchronized via sleep" messages in reports which failed some
2628 // output tests.
2629 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
2630 // visible events, which lead to "failed to restore stack trace" failures.
2631 // Note that no_sanitize_thread attribute does not turn off atomic interception
2632 // so attaching it to the function defined in user code does not help.
2633 // That's why we now have what we have.
2634 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
2635 void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
2636 if (count >= (1 << 8)) {
2637 Printf("barrier_init: count is too large (%d)\n", count);
2638 Die();
2639 }
2640 // 8 lsb is thread count, the remaining are count of entered threads.
2641 *barrier = count;
2642 }
2643
2644 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
2645 void __tsan_testonly_barrier_wait(u64 *barrier) {
2646 unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
2647 unsigned old_epoch = (old >> 8) / (old & 0xff);
2648 for (;;) {
2649 unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
2650 unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
2651 if (cur_epoch != old_epoch)
2652 return;
2653 internal_sched_yield();
2654 }
2655 }