]> git.proxmox.com Git - rustc.git/blob - src/llvm/projects/compiler-rt/lib/tsan/rtl/tsan_rtl.cc
Imported Upstream version 0.6
[rustc.git] / src / llvm / projects / compiler-rt / lib / tsan / rtl / tsan_rtl.cc
1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // Main file (entry points) for the TSan run-time.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
23 #include "tsan_rtl.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26
27 volatile int __tsan_resumed = 0;
28
29 extern "C" void __tsan_resume() {
30 __tsan_resumed = 1;
31 }
32
33 namespace __tsan {
34
35 #ifndef TSAN_GO
36 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
37 #endif
38 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
39
40 static Context *ctx;
41 Context *CTX() {
42 return ctx;
43 }
44
45 Context::Context()
46 : initialized()
47 , report_mtx(MutexTypeReport, StatMtxReport)
48 , nreported()
49 , nmissed_expected()
50 , thread_mtx(MutexTypeThreads, StatMtxThreads)
51 , racy_stacks(MBlockRacyStacks)
52 , racy_addresses(MBlockRacyAddresses) {
53 }
54
55 // The objects are allocated in TLS, so one may rely on zero-initialization.
56 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
57 uptr stk_addr, uptr stk_size,
58 uptr tls_addr, uptr tls_size)
59 : fast_state(tid, epoch)
60 // Do not touch these, rely on zero initialization,
61 // they may be accessed before the ctor.
62 // , fast_ignore_reads()
63 // , fast_ignore_writes()
64 // , in_rtl()
65 , shadow_stack_pos(&shadow_stack[0])
66 , tid(tid)
67 , unique_id(unique_id)
68 , stk_addr(stk_addr)
69 , stk_size(stk_size)
70 , tls_addr(tls_addr)
71 , tls_size(tls_size) {
72 }
73
74 ThreadContext::ThreadContext(int tid)
75 : tid(tid)
76 , unique_id()
77 , user_id()
78 , thr()
79 , status(ThreadStatusInvalid)
80 , detached()
81 , reuse_count()
82 , epoch0()
83 , epoch1()
84 , dead_info()
85 , dead_next() {
86 }
87
88 static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
89 uptr shadow = GetShadowMemoryConsumption();
90
91 int nthread = 0;
92 int nlivethread = 0;
93 uptr threadmem = 0;
94 {
95 Lock l(&ctx->thread_mtx);
96 for (unsigned i = 0; i < kMaxTid; i++) {
97 ThreadContext *tctx = ctx->threads[i];
98 if (tctx == 0)
99 continue;
100 nthread += 1;
101 threadmem += sizeof(ThreadContext);
102 if (tctx->status != ThreadStatusRunning)
103 continue;
104 nlivethread += 1;
105 threadmem += sizeof(ThreadState);
106 }
107 }
108
109 uptr nsync = 0;
110 uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync);
111
112 internal_snprintf(buf, buf_size, "%d: shadow=%zuMB"
113 " thread=%zuMB(total=%d/live=%d)"
114 " sync=%zuMB(cnt=%zu)\n",
115 num,
116 shadow >> 20,
117 threadmem >> 20, nthread, nlivethread,
118 syncmem >> 20, nsync);
119 }
120
121 static void MemoryProfileThread(void *arg) {
122 ScopedInRtl in_rtl;
123 fd_t fd = (fd_t)(uptr)arg;
124 for (int i = 0; ; i++) {
125 InternalScopedBuffer<char> buf(4096);
126 WriteMemoryProfile(buf.data(), buf.size(), i);
127 internal_write(fd, buf.data(), internal_strlen(buf.data()));
128 SleepForSeconds(1);
129 }
130 }
131
132 static void InitializeMemoryProfile() {
133 if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
134 return;
135 InternalScopedBuffer<char> filename(4096);
136 internal_snprintf(filename.data(), filename.size(), "%s.%d",
137 flags()->profile_memory, GetPid());
138 fd_t fd = internal_open(filename.data(), true);
139 if (fd == kInvalidFd) {
140 TsanPrintf("Failed to open memory profile file '%s'\n", &filename[0]);
141 Die();
142 }
143 internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
144 }
145
146 static void MemoryFlushThread(void *arg) {
147 ScopedInRtl in_rtl;
148 for (int i = 0; ; i++) {
149 SleepForMillis(flags()->flush_memory_ms);
150 FlushShadowMemory();
151 }
152 }
153
154 static void InitializeMemoryFlush() {
155 if (flags()->flush_memory_ms == 0)
156 return;
157 if (flags()->flush_memory_ms < 100)
158 flags()->flush_memory_ms = 100;
159 internal_start_thread(&MemoryFlushThread, 0);
160 }
161
162 void Initialize(ThreadState *thr) {
163 // Thread safe because done before all threads exist.
164 static bool is_initialized = false;
165 if (is_initialized)
166 return;
167 is_initialized = true;
168 // Install tool-specific callbacks in sanitizer_common.
169 SetCheckFailedCallback(TsanCheckFailed);
170
171 ScopedInRtl in_rtl;
172 #ifndef TSAN_GO
173 InitializeAllocator();
174 #endif
175 InitializeInterceptors();
176 const char *env = InitializePlatform();
177 InitializeMutex();
178 InitializeDynamicAnnotations();
179 ctx = new(ctx_placeholder) Context;
180 InitializeShadowMemory();
181 ctx->dead_list_size = 0;
182 ctx->dead_list_head = 0;
183 ctx->dead_list_tail = 0;
184 InitializeFlags(&ctx->flags, env);
185 InitializeSuppressions();
186 #ifndef TSAN_GO
187 // Initialize external symbolizer before internal threads are started.
188 const char *external_symbolizer = flags()->external_symbolizer_path;
189 if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
190 InitializeExternalSymbolizer(external_symbolizer);
191 }
192 #endif
193 InitializeMemoryProfile();
194 InitializeMemoryFlush();
195
196 if (ctx->flags.verbosity)
197 TsanPrintf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
198 GetPid());
199
200 // Initialize thread 0.
201 ctx->thread_seq = 0;
202 int tid = ThreadCreate(thr, 0, 0, true);
203 CHECK_EQ(tid, 0);
204 ThreadStart(thr, tid);
205 CHECK_EQ(thr->in_rtl, 1);
206 ctx->initialized = true;
207
208 if (flags()->stop_on_start) {
209 TsanPrintf("ThreadSanitizer is suspended at startup (pid %d)."
210 " Call __tsan_resume().\n",
211 GetPid());
212 while (__tsan_resumed == 0);
213 }
214 }
215
216 int Finalize(ThreadState *thr) {
217 ScopedInRtl in_rtl;
218 Context *ctx = __tsan::ctx;
219 bool failed = false;
220
221 ThreadFinalize(thr);
222
223 if (ctx->nreported) {
224 failed = true;
225 TsanPrintf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
226 }
227
228 if (ctx->nmissed_expected) {
229 failed = true;
230 TsanPrintf("ThreadSanitizer: missed %d expected races\n",
231 ctx->nmissed_expected);
232 }
233
234 StatOutput(ctx->stat);
235 return failed ? flags()->exitcode : 0;
236 }
237
238 #ifndef TSAN_GO
239 u32 CurrentStackId(ThreadState *thr, uptr pc) {
240 if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
241 return 0;
242 if (pc) {
243 thr->shadow_stack_pos[0] = pc;
244 thr->shadow_stack_pos++;
245 }
246 u32 id = StackDepotPut(thr->shadow_stack,
247 thr->shadow_stack_pos - thr->shadow_stack);
248 if (pc)
249 thr->shadow_stack_pos--;
250 return id;
251 }
252 #endif
253
254 void TraceSwitch(ThreadState *thr) {
255 thr->nomalloc++;
256 ScopedInRtl in_rtl;
257 Lock l(&thr->trace.mtx);
258 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts;
259 TraceHeader *hdr = &thr->trace.headers[trace];
260 hdr->epoch0 = thr->fast_state.epoch();
261 hdr->stack0.ObtainCurrent(thr, 0);
262 thr->nomalloc--;
263 }
264
265 #ifndef TSAN_GO
266 extern "C" void __tsan_trace_switch() {
267 TraceSwitch(cur_thread());
268 }
269
270 extern "C" void __tsan_report_race() {
271 ReportRace(cur_thread());
272 }
273 #endif
274
275 ALWAYS_INLINE
276 static Shadow LoadShadow(u64 *p) {
277 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
278 return Shadow(raw);
279 }
280
281 ALWAYS_INLINE
282 static void StoreShadow(u64 *sp, u64 s) {
283 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
284 }
285
286 ALWAYS_INLINE
287 static void StoreIfNotYetStored(u64 *sp, u64 *s) {
288 StoreShadow(sp, *s);
289 *s = 0;
290 }
291
292 static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
293 Shadow cur, Shadow old) {
294 thr->racy_state[0] = cur.raw();
295 thr->racy_state[1] = old.raw();
296 thr->racy_shadow_addr = shadow_mem;
297 #ifndef TSAN_GO
298 HACKY_CALL(__tsan_report_race);
299 #else
300 ReportRace(thr);
301 #endif
302 }
303
304 static inline bool BothReads(Shadow s, int kAccessIsWrite) {
305 return !kAccessIsWrite && !s.is_write();
306 }
307
308 static inline bool OldIsRWStronger(Shadow old, int kAccessIsWrite) {
309 return old.is_write() || !kAccessIsWrite;
310 }
311
312 static inline bool OldIsRWWeaker(Shadow old, int kAccessIsWrite) {
313 return !old.is_write() || kAccessIsWrite;
314 }
315
316 static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
317 return old.epoch() >= thr->fast_synch_epoch;
318 }
319
320 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
321 return thr->clock.get(old.tid()) >= old.epoch();
322 }
323
324 ALWAYS_INLINE
325 void MemoryAccessImpl(ThreadState *thr, uptr addr,
326 int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state,
327 u64 *shadow_mem, Shadow cur) {
328 StatInc(thr, StatMop);
329 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
330 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
331
332 // This potentially can live in an MMX/SSE scratch register.
333 // The required intrinsics are:
334 // __m128i _mm_move_epi64(__m128i*);
335 // _mm_storel_epi64(u64*, __m128i);
336 u64 store_word = cur.raw();
337
338 // scan all the shadow values and dispatch to 4 categories:
339 // same, replace, candidate and race (see comments below).
340 // we consider only 3 cases regarding access sizes:
341 // equal, intersect and not intersect. initially I considered
342 // larger and smaller as well, it allowed to replace some
343 // 'candidates' with 'same' or 'replace', but I think
344 // it's just not worth it (performance- and complexity-wise).
345
346 Shadow old(0);
347 if (kShadowCnt == 1) {
348 int idx = 0;
349 #include "tsan_update_shadow_word_inl.h"
350 } else if (kShadowCnt == 2) {
351 int idx = 0;
352 #include "tsan_update_shadow_word_inl.h"
353 idx = 1;
354 #include "tsan_update_shadow_word_inl.h"
355 } else if (kShadowCnt == 4) {
356 int idx = 0;
357 #include "tsan_update_shadow_word_inl.h"
358 idx = 1;
359 #include "tsan_update_shadow_word_inl.h"
360 idx = 2;
361 #include "tsan_update_shadow_word_inl.h"
362 idx = 3;
363 #include "tsan_update_shadow_word_inl.h"
364 } else if (kShadowCnt == 8) {
365 int idx = 0;
366 #include "tsan_update_shadow_word_inl.h"
367 idx = 1;
368 #include "tsan_update_shadow_word_inl.h"
369 idx = 2;
370 #include "tsan_update_shadow_word_inl.h"
371 idx = 3;
372 #include "tsan_update_shadow_word_inl.h"
373 idx = 4;
374 #include "tsan_update_shadow_word_inl.h"
375 idx = 5;
376 #include "tsan_update_shadow_word_inl.h"
377 idx = 6;
378 #include "tsan_update_shadow_word_inl.h"
379 idx = 7;
380 #include "tsan_update_shadow_word_inl.h"
381 } else {
382 CHECK(false);
383 }
384
385 // we did not find any races and had already stored
386 // the current access info, so we are done
387 if (LIKELY(store_word == 0))
388 return;
389 // choose a random candidate slot and replace it
390 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
391 StatInc(thr, StatShadowReplace);
392 return;
393 RACE:
394 HandleRace(thr, shadow_mem, cur, old);
395 return;
396 }
397
398 ALWAYS_INLINE
399 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
400 int kAccessSizeLog, bool kAccessIsWrite) {
401 u64 *shadow_mem = (u64*)MemToShadow(addr);
402 DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d"
403 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
404 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
405 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
406 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
407 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
408 #if TSAN_DEBUG
409 if (!IsAppMem(addr)) {
410 TsanPrintf("Access to non app mem %zx\n", addr);
411 DCHECK(IsAppMem(addr));
412 }
413 if (!IsShadowMem((uptr)shadow_mem)) {
414 TsanPrintf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
415 DCHECK(IsShadowMem((uptr)shadow_mem));
416 }
417 #endif
418
419 FastState fast_state = thr->fast_state;
420 if (fast_state.GetIgnoreBit())
421 return;
422 fast_state.IncrementEpoch();
423 thr->fast_state = fast_state;
424 Shadow cur(fast_state);
425 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
426 cur.SetWrite(kAccessIsWrite);
427
428 // We must not store to the trace if we do not store to the shadow.
429 // That is, this call must be moved somewhere below.
430 TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
431
432 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, fast_state,
433 shadow_mem, cur);
434 }
435
436 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
437 u64 val) {
438 if (size == 0)
439 return;
440 // FIXME: fix me.
441 uptr offset = addr % kShadowCell;
442 if (offset) {
443 offset = kShadowCell - offset;
444 if (size <= offset)
445 return;
446 addr += offset;
447 size -= offset;
448 }
449 DCHECK_EQ(addr % 8, 0);
450 // If a user passes some insane arguments (memset(0)),
451 // let it just crash as usual.
452 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
453 return;
454 (void)thr;
455 (void)pc;
456 // Some programs mmap like hundreds of GBs but actually used a small part.
457 // So, it's better to report a false positive on the memory
458 // then to hang here senselessly.
459 const uptr kMaxResetSize = 4ull*1024*1024*1024;
460 if (size > kMaxResetSize)
461 size = kMaxResetSize;
462 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
463 u64 *p = (u64*)MemToShadow(addr);
464 CHECK(IsShadowMem((uptr)p));
465 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
466 // FIXME: may overwrite a part outside the region
467 for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) {
468 p[i++] = val;
469 for (uptr j = 1; j < kShadowCnt; j++)
470 p[i++] = 0;
471 }
472 }
473
474 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
475 MemoryRangeSet(thr, pc, addr, size, 0);
476 }
477
478 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
479 MemoryAccessRange(thr, pc, addr, size, true);
480 Shadow s(thr->fast_state);
481 s.MarkAsFreed();
482 s.SetWrite(true);
483 s.SetAddr0AndSizeLog(0, 3);
484 MemoryRangeSet(thr, pc, addr, size, s.raw());
485 }
486
487 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
488 Shadow s(thr->fast_state);
489 s.SetWrite(true);
490 s.SetAddr0AndSizeLog(0, 3);
491 MemoryRangeSet(thr, pc, addr, size, s.raw());
492 }
493
494 void FuncEntry(ThreadState *thr, uptr pc) {
495 DCHECK_EQ(thr->in_rtl, 0);
496 StatInc(thr, StatFuncEnter);
497 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
498 thr->fast_state.IncrementEpoch();
499 TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc);
500
501 // Shadow stack maintenance can be replaced with
502 // stack unwinding during trace switch (which presumably must be faster).
503 DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
504 #ifndef TSAN_GO
505 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
506 #else
507 if (thr->shadow_stack_pos == thr->shadow_stack_end) {
508 const int sz = thr->shadow_stack_end - thr->shadow_stack;
509 const int newsz = 2 * sz;
510 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
511 newsz * sizeof(uptr));
512 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
513 internal_free(thr->shadow_stack);
514 thr->shadow_stack = newstack;
515 thr->shadow_stack_pos = newstack + sz;
516 thr->shadow_stack_end = newstack + newsz;
517 }
518 #endif
519 thr->shadow_stack_pos[0] = pc;
520 thr->shadow_stack_pos++;
521 }
522
523 void FuncExit(ThreadState *thr) {
524 DCHECK_EQ(thr->in_rtl, 0);
525 StatInc(thr, StatFuncExit);
526 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
527 thr->fast_state.IncrementEpoch();
528 TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0);
529
530 DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
531 #ifndef TSAN_GO
532 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
533 #endif
534 thr->shadow_stack_pos--;
535 }
536
537 void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
538 DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
539 thr->ignore_reads_and_writes += begin ? 1 : -1;
540 CHECK_GE(thr->ignore_reads_and_writes, 0);
541 if (thr->ignore_reads_and_writes)
542 thr->fast_state.SetIgnoreBit();
543 else
544 thr->fast_state.ClearIgnoreBit();
545 }
546
547 bool MD5Hash::operator==(const MD5Hash &other) const {
548 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
549 }
550
551 #if TSAN_DEBUG
552 void build_consistency_debug() {}
553 #else
554 void build_consistency_release() {}
555 #endif
556
557 #if TSAN_COLLECT_STATS
558 void build_consistency_stats() {}
559 #else
560 void build_consistency_nostats() {}
561 #endif
562
563 #if TSAN_SHADOW_COUNT == 1
564 void build_consistency_shadow1() {}
565 #elif TSAN_SHADOW_COUNT == 2
566 void build_consistency_shadow2() {}
567 #elif TSAN_SHADOW_COUNT == 4
568 void build_consistency_shadow4() {}
569 #else
570 void build_consistency_shadow8() {}
571 #endif
572
573 } // namespace __tsan
574
575 #ifndef TSAN_GO
576 // Must be included in this file to make sure everything is inlined.
577 #include "tsan_interface_inl.h"
578 #endif