]> git.proxmox.com Git - rustc.git/blame - src/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cc
New upstream version 1.19.0+dfsg1
[rustc.git] / src / compiler-rt / lib / tsan / tests / rtl / tsan_test_util_posix.cc
CommitLineData
3157f602 1//===-- tsan_test_util_posix.cc -------------------------------------------===//
1a4d82fc
JJ
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
3157f602 12// Test utils, Linux, FreeBSD and Darwin implementation.
1a4d82fc
JJ
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "tsan_interface.h"
7cac9316 17#include "tsan_posix_util.h"
1a4d82fc
JJ
18#include "tsan_test_util.h"
19#include "tsan_report.h"
20
21#include "gtest/gtest.h"
22
23#include <assert.h>
24#include <pthread.h>
25#include <stdio.h>
26#include <stdint.h>
27#include <string.h>
28#include <unistd.h>
29#include <errno.h>
30
31using namespace __tsan; // NOLINT
32
33static __thread bool expect_report;
34static __thread bool expect_report_reported;
35static __thread ReportType expect_report_type;
36
1a4d82fc
JJ
37static void *BeforeInitThread(void *param) {
38 (void)param;
39 return 0;
40}
41
42static void AtExit() {
43}
44
45void TestMutexBeforeInit() {
46 // Mutexes must be usable before __tsan_init();
47 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
3157f602
XL
48 __interceptor_pthread_mutex_lock(&mtx);
49 __interceptor_pthread_mutex_unlock(&mtx);
50 __interceptor_pthread_mutex_destroy(&mtx);
1a4d82fc 51 pthread_t thr;
3157f602
XL
52 __interceptor_pthread_create(&thr, 0, BeforeInitThread, 0);
53 __interceptor_pthread_join(thr, 0);
1a4d82fc
JJ
54 atexit(AtExit);
55}
56
57namespace __tsan {
58bool OnReport(const ReportDesc *rep, bool suppressed) {
59 if (expect_report) {
60 if (rep->typ != expect_report_type) {
61 printf("Expected report of type %d, got type %d\n",
62 (int)expect_report_type, (int)rep->typ);
7cac9316 63 EXPECT_TRUE(false) << "Wrong report type";
1a4d82fc
JJ
64 return false;
65 }
66 } else {
7cac9316 67 EXPECT_TRUE(false) << "Unexpected report";
1a4d82fc
JJ
68 return false;
69 }
70 expect_report_reported = true;
71 return true;
72}
73} // namespace __tsan
74
75static void* allocate_addr(int size, int offset_from_aligned = 0) {
76 static uintptr_t foo;
77 static atomic_uintptr_t uniq = {(uintptr_t)&foo}; // Some real address.
78 const int kAlign = 16;
79 CHECK(offset_from_aligned < kAlign);
80 size = (size + 2 * kAlign) & ~(kAlign - 1);
81 uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
82 return (void*)(addr + offset_from_aligned);
83}
84
85MemLoc::MemLoc(int offset_from_aligned)
86 : loc_(allocate_addr(16, offset_from_aligned)) {
87}
88
89MemLoc::~MemLoc() {
90}
91
92Mutex::Mutex(Type type)
93 : alive_()
94 , type_(type) {
95}
96
97Mutex::~Mutex() {
98 CHECK(!alive_);
99}
100
101void Mutex::Init() {
102 CHECK(!alive_);
103 alive_ = true;
104 if (type_ == Normal)
3157f602
XL
105 CHECK_EQ(__interceptor_pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
106#ifndef __APPLE__
1a4d82fc
JJ
107 else if (type_ == Spin)
108 CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
3157f602 109#endif
1a4d82fc 110 else if (type_ == RW)
3157f602 111 CHECK_EQ(__interceptor_pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
1a4d82fc
JJ
112 else
113 CHECK(0);
114}
115
116void Mutex::StaticInit() {
117 CHECK(!alive_);
118 CHECK(type_ == Normal);
119 alive_ = true;
120 pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
121 memcpy(mtx_, &tmp, sizeof(tmp));
122}
123
124void Mutex::Destroy() {
125 CHECK(alive_);
126 alive_ = false;
127 if (type_ == Normal)
3157f602
XL
128 CHECK_EQ(__interceptor_pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
129#ifndef __APPLE__
1a4d82fc
JJ
130 else if (type_ == Spin)
131 CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
3157f602 132#endif
1a4d82fc 133 else if (type_ == RW)
3157f602 134 CHECK_EQ(__interceptor_pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
1a4d82fc
JJ
135}
136
137void Mutex::Lock() {
138 CHECK(alive_);
139 if (type_ == Normal)
3157f602
XL
140 CHECK_EQ(__interceptor_pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
141#ifndef __APPLE__
1a4d82fc
JJ
142 else if (type_ == Spin)
143 CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
3157f602 144#endif
1a4d82fc 145 else if (type_ == RW)
3157f602 146 CHECK_EQ(__interceptor_pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
1a4d82fc
JJ
147}
148
149bool Mutex::TryLock() {
150 CHECK(alive_);
151 if (type_ == Normal)
3157f602
XL
152 return __interceptor_pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
153#ifndef __APPLE__
1a4d82fc
JJ
154 else if (type_ == Spin)
155 return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
3157f602 156#endif
1a4d82fc 157 else if (type_ == RW)
3157f602 158 return __interceptor_pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
1a4d82fc
JJ
159 return false;
160}
161
162void Mutex::Unlock() {
163 CHECK(alive_);
164 if (type_ == Normal)
3157f602
XL
165 CHECK_EQ(__interceptor_pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
166#ifndef __APPLE__
1a4d82fc
JJ
167 else if (type_ == Spin)
168 CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
3157f602 169#endif
1a4d82fc 170 else if (type_ == RW)
3157f602 171 CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
1a4d82fc
JJ
172}
173
174void Mutex::ReadLock() {
175 CHECK(alive_);
176 CHECK(type_ == RW);
3157f602 177 CHECK_EQ(__interceptor_pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
1a4d82fc
JJ
178}
179
180bool Mutex::TryReadLock() {
181 CHECK(alive_);
182 CHECK(type_ == RW);
3157f602 183 return __interceptor_pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0;
1a4d82fc
JJ
184}
185
186void Mutex::ReadUnlock() {
187 CHECK(alive_);
188 CHECK(type_ == RW);
3157f602 189 CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
1a4d82fc
JJ
190}
191
192struct Event {
193 enum Type {
194 SHUTDOWN,
195 READ,
196 WRITE,
197 VPTR_UPDATE,
198 CALL,
199 RETURN,
200 MUTEX_CREATE,
201 MUTEX_DESTROY,
202 MUTEX_LOCK,
203 MUTEX_TRYLOCK,
204 MUTEX_UNLOCK,
205 MUTEX_READLOCK,
206 MUTEX_TRYREADLOCK,
207 MUTEX_READUNLOCK,
208 MEMCPY,
209 MEMSET
210 };
211 Type type;
212 void *ptr;
213 uptr arg;
214 uptr arg2;
215 bool res;
216 bool expect_report;
217 ReportType report_type;
218
219 Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
220 : type(type)
221 , ptr(const_cast<void*>(ptr))
222 , arg(arg)
223 , arg2(arg2)
224 , res()
225 , expect_report()
226 , report_type() {
227 }
228
229 void ExpectReport(ReportType type) {
230 expect_report = true;
231 report_type = type;
232 }
233};
234
235struct ScopedThread::Impl {
236 pthread_t thread;
237 bool main;
238 bool detached;
239 atomic_uintptr_t event; // Event*
240
241 static void *ScopedThreadCallback(void *arg);
242 void send(Event *ev);
243 void HandleEvent(Event *ev);
244};
245
246void ScopedThread::Impl::HandleEvent(Event *ev) {
247 CHECK_EQ(expect_report, false);
248 expect_report = ev->expect_report;
249 expect_report_reported = false;
250 expect_report_type = ev->report_type;
251 switch (ev->type) {
252 case Event::READ:
253 case Event::WRITE: {
254 void (*tsan_mop)(void *addr) = 0;
255 if (ev->type == Event::READ) {
256 switch (ev->arg /*size*/) {
257 case 1: tsan_mop = __tsan_read1; break;
258 case 2: tsan_mop = __tsan_read2; break;
259 case 4: tsan_mop = __tsan_read4; break;
260 case 8: tsan_mop = __tsan_read8; break;
261 case 16: tsan_mop = __tsan_read16; break;
262 }
263 } else {
264 switch (ev->arg /*size*/) {
265 case 1: tsan_mop = __tsan_write1; break;
266 case 2: tsan_mop = __tsan_write2; break;
267 case 4: tsan_mop = __tsan_write4; break;
268 case 8: tsan_mop = __tsan_write8; break;
269 case 16: tsan_mop = __tsan_write16; break;
270 }
271 }
272 CHECK_NE(tsan_mop, 0);
3157f602 273#if defined(__FreeBSD__) || defined(__APPLE__)
92a42be0
SL
274 const int ErrCode = ESOCKTNOSUPPORT;
275#else
276 const int ErrCode = ECHRNG;
277#endif
278 errno = ErrCode;
1a4d82fc 279 tsan_mop(ev->ptr);
92a42be0 280 CHECK_EQ(ErrCode, errno); // In no case must errno be changed.
1a4d82fc
JJ
281 break;
282 }
283 case Event::VPTR_UPDATE:
284 __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
285 break;
286 case Event::CALL:
287 __tsan_func_entry((void*)((uptr)ev->ptr));
288 break;
289 case Event::RETURN:
290 __tsan_func_exit();
291 break;
292 case Event::MUTEX_CREATE:
293 static_cast<Mutex*>(ev->ptr)->Init();
294 break;
295 case Event::MUTEX_DESTROY:
296 static_cast<Mutex*>(ev->ptr)->Destroy();
297 break;
298 case Event::MUTEX_LOCK:
299 static_cast<Mutex*>(ev->ptr)->Lock();
300 break;
301 case Event::MUTEX_TRYLOCK:
302 ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
303 break;
304 case Event::MUTEX_UNLOCK:
305 static_cast<Mutex*>(ev->ptr)->Unlock();
306 break;
307 case Event::MUTEX_READLOCK:
308 static_cast<Mutex*>(ev->ptr)->ReadLock();
309 break;
310 case Event::MUTEX_TRYREADLOCK:
311 ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
312 break;
313 case Event::MUTEX_READUNLOCK:
314 static_cast<Mutex*>(ev->ptr)->ReadUnlock();
315 break;
316 case Event::MEMCPY:
317 __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
318 break;
319 case Event::MEMSET:
320 __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
321 break;
322 default: CHECK(0);
323 }
324 if (expect_report && !expect_report_reported) {
325 printf("Missed expected report of type %d\n", (int)ev->report_type);
7cac9316 326 EXPECT_TRUE(false) << "Missed expected race";
1a4d82fc
JJ
327 }
328 expect_report = false;
329}
330
331void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
332 __tsan_func_entry(__builtin_return_address(0));
333 Impl *impl = (Impl*)arg;
334 for (;;) {
335 Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
336 if (ev == 0) {
3157f602 337 sched_yield();
1a4d82fc
JJ
338 continue;
339 }
340 if (ev->type == Event::SHUTDOWN) {
341 atomic_store(&impl->event, 0, memory_order_release);
342 break;
343 }
344 impl->HandleEvent(ev);
345 atomic_store(&impl->event, 0, memory_order_release);
346 }
347 __tsan_func_exit();
348 return 0;
349}
350
351void ScopedThread::Impl::send(Event *e) {
352 if (main) {
353 HandleEvent(e);
354 } else {
355 CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
356 atomic_store(&event, (uintptr_t)e, memory_order_release);
357 while (atomic_load(&event, memory_order_acquire) != 0)
3157f602 358 sched_yield();
1a4d82fc
JJ
359 }
360}
361
362ScopedThread::ScopedThread(bool detached, bool main) {
363 impl_ = new Impl;
364 impl_->main = main;
365 impl_->detached = detached;
366 atomic_store(&impl_->event, 0, memory_order_relaxed);
367 if (!main) {
368 pthread_attr_t attr;
369 pthread_attr_init(&attr);
3157f602
XL
370 pthread_attr_setdetachstate(
371 &attr, detached ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE);
1a4d82fc 372 pthread_attr_setstacksize(&attr, 64*1024);
3157f602 373 __interceptor_pthread_create(&impl_->thread, &attr,
1a4d82fc
JJ
374 ScopedThread::Impl::ScopedThreadCallback, impl_);
375 }
376}
377
378ScopedThread::~ScopedThread() {
379 if (!impl_->main) {
380 Event event(Event::SHUTDOWN);
381 impl_->send(&event);
382 if (!impl_->detached)
3157f602 383 __interceptor_pthread_join(impl_->thread, 0);
1a4d82fc
JJ
384 }
385 delete impl_;
386}
387
388void ScopedThread::Detach() {
389 CHECK(!impl_->main);
390 CHECK(!impl_->detached);
391 impl_->detached = true;
3157f602 392 __interceptor_pthread_detach(impl_->thread);
1a4d82fc
JJ
393}
394
395void ScopedThread::Access(void *addr, bool is_write,
396 int size, bool expect_race) {
397 Event event(is_write ? Event::WRITE : Event::READ, addr, size);
398 if (expect_race)
399 event.ExpectReport(ReportTypeRace);
400 impl_->send(&event);
401}
402
403void ScopedThread::VptrUpdate(const MemLoc &vptr,
404 const MemLoc &new_val,
405 bool expect_race) {
406 Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
407 if (expect_race)
408 event.ExpectReport(ReportTypeRace);
409 impl_->send(&event);
410}
411
412void ScopedThread::Call(void(*pc)()) {
413 Event event(Event::CALL, (void*)((uintptr_t)pc));
414 impl_->send(&event);
415}
416
417void ScopedThread::Return() {
418 Event event(Event::RETURN);
419 impl_->send(&event);
420}
421
422void ScopedThread::Create(const Mutex &m) {
423 Event event(Event::MUTEX_CREATE, &m);
424 impl_->send(&event);
425}
426
427void ScopedThread::Destroy(const Mutex &m) {
428 Event event(Event::MUTEX_DESTROY, &m);
429 impl_->send(&event);
430}
431
432void ScopedThread::Lock(const Mutex &m) {
433 Event event(Event::MUTEX_LOCK, &m);
434 impl_->send(&event);
435}
436
437bool ScopedThread::TryLock(const Mutex &m) {
438 Event event(Event::MUTEX_TRYLOCK, &m);
439 impl_->send(&event);
440 return event.res;
441}
442
443void ScopedThread::Unlock(const Mutex &m) {
444 Event event(Event::MUTEX_UNLOCK, &m);
445 impl_->send(&event);
446}
447
448void ScopedThread::ReadLock(const Mutex &m) {
449 Event event(Event::MUTEX_READLOCK, &m);
450 impl_->send(&event);
451}
452
453bool ScopedThread::TryReadLock(const Mutex &m) {
454 Event event(Event::MUTEX_TRYREADLOCK, &m);
455 impl_->send(&event);
456 return event.res;
457}
458
459void ScopedThread::ReadUnlock(const Mutex &m) {
460 Event event(Event::MUTEX_READUNLOCK, &m);
461 impl_->send(&event);
462}
463
464void ScopedThread::Memcpy(void *dst, const void *src, int size,
465 bool expect_race) {
466 Event event(Event::MEMCPY, dst, (uptr)src, size);
467 if (expect_race)
468 event.ExpectReport(ReportTypeRace);
469 impl_->send(&event);
470}
471
472void ScopedThread::Memset(void *dst, int val, int size,
473 bool expect_race) {
474 Event event(Event::MEMSET, dst, val, size);
475 if (expect_race)
476 event.ExpectReport(ReportTypeRace);
477 impl_->send(&event);
478}