]> git.proxmox.com Git - rustc.git/blame - src/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
New upstream version 1.12.0+dfsg1
[rustc.git] / src / compiler-rt / lib / tsan / rtl / tsan_interface_atomic.cc
CommitLineData
1a4d82fc
JJ
1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14// ThreadSanitizer atomic operations are based on C++11/C1x standards.
92a42be0 15// For background see C++11 standard. A slightly older, publicly
1a4d82fc
JJ
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
22#include "sanitizer_common/sanitizer_placement_new.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "sanitizer_common/sanitizer_mutex.h"
25#include "tsan_flags.h"
5bcae85e 26#include "tsan_interface.h"
1a4d82fc
JJ
27#include "tsan_rtl.h"
28
29using namespace __tsan; // NOLINT
30
92a42be0 31#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
1a4d82fc
JJ
32// Protects emulation of 128-bit atomic operations.
33static StaticSpinMutex mutex128;
92a42be0 34#endif
1a4d82fc 35
1a4d82fc
JJ
36static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
39}
40
41static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
43}
44
45static bool IsReleaseOrder(morder mo) {
46 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
47}
48
49static bool IsAcquireOrder(morder mo) {
50 return mo == mo_consume || mo == mo_acquire
51 || mo == mo_acq_rel || mo == mo_seq_cst;
52}
53
54static bool IsAcqRelOrder(morder mo) {
55 return mo == mo_acq_rel || mo == mo_seq_cst;
56}
57
58template<typename T> T func_xchg(volatile T *v, T op) {
59 T res = __sync_lock_test_and_set(v, op);
60 // __sync_lock_test_and_set does not contain full barrier.
61 __sync_synchronize();
62 return res;
63}
64
65template<typename T> T func_add(volatile T *v, T op) {
66 return __sync_fetch_and_add(v, op);
67}
68
69template<typename T> T func_sub(volatile T *v, T op) {
70 return __sync_fetch_and_sub(v, op);
71}
72
73template<typename T> T func_and(volatile T *v, T op) {
74 return __sync_fetch_and_and(v, op);
75}
76
77template<typename T> T func_or(volatile T *v, T op) {
78 return __sync_fetch_and_or(v, op);
79}
80
81template<typename T> T func_xor(volatile T *v, T op) {
82 return __sync_fetch_and_xor(v, op);
83}
84
85template<typename T> T func_nand(volatile T *v, T op) {
86 // clang does not support __sync_fetch_and_nand.
87 T cmp = *v;
88 for (;;) {
89 T newv = ~(cmp & op);
90 T cur = __sync_val_compare_and_swap(v, cmp, newv);
91 if (cmp == cur)
92 return cmp;
93 cmp = cur;
94 }
95}
96
97template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
98 return __sync_val_compare_and_swap(v, cmp, xch);
99}
100
101// clang does not support 128-bit atomic ops.
102// Atomic ops are executed under tsan internal mutex,
103// here we assume that the atomic variables are not accessed
104// from non-instrumented code.
92a42be0
SL
105#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
106 && __TSAN_HAS_INT128
1a4d82fc
JJ
107a128 func_xchg(volatile a128 *v, a128 op) {
108 SpinMutexLock lock(&mutex128);
109 a128 cmp = *v;
110 *v = op;
111 return cmp;
112}
113
114a128 func_add(volatile a128 *v, a128 op) {
115 SpinMutexLock lock(&mutex128);
116 a128 cmp = *v;
117 *v = cmp + op;
118 return cmp;
119}
120
121a128 func_sub(volatile a128 *v, a128 op) {
122 SpinMutexLock lock(&mutex128);
123 a128 cmp = *v;
124 *v = cmp - op;
125 return cmp;
126}
127
128a128 func_and(volatile a128 *v, a128 op) {
129 SpinMutexLock lock(&mutex128);
130 a128 cmp = *v;
131 *v = cmp & op;
132 return cmp;
133}
134
135a128 func_or(volatile a128 *v, a128 op) {
136 SpinMutexLock lock(&mutex128);
137 a128 cmp = *v;
138 *v = cmp | op;
139 return cmp;
140}
141
142a128 func_xor(volatile a128 *v, a128 op) {
143 SpinMutexLock lock(&mutex128);
144 a128 cmp = *v;
145 *v = cmp ^ op;
146 return cmp;
147}
148
149a128 func_nand(volatile a128 *v, a128 op) {
150 SpinMutexLock lock(&mutex128);
151 a128 cmp = *v;
152 *v = ~(cmp & op);
153 return cmp;
154}
155
156a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
157 SpinMutexLock lock(&mutex128);
158 a128 cur = *v;
159 if (cur == cmp)
160 *v = xch;
161 return cur;
162}
163#endif
164
165template<typename T>
166static int SizeLog() {
167 if (sizeof(T) <= 1)
168 return kSizeLog1;
169 else if (sizeof(T) <= 2)
170 return kSizeLog2;
171 else if (sizeof(T) <= 4)
172 return kSizeLog4;
173 else
174 return kSizeLog8;
175 // For 16-byte atomics we also use 8-byte memory access,
176 // this leads to false negatives only in very obscure cases.
177}
178
92a42be0 179#ifndef SANITIZER_GO
1a4d82fc 180static atomic_uint8_t *to_atomic(const volatile a8 *a) {
92a42be0 181 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
1a4d82fc
JJ
182}
183
184static atomic_uint16_t *to_atomic(const volatile a16 *a) {
92a42be0 185 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
1a4d82fc 186}
92a42be0 187#endif
1a4d82fc
JJ
188
189static atomic_uint32_t *to_atomic(const volatile a32 *a) {
92a42be0 190 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
1a4d82fc
JJ
191}
192
193static atomic_uint64_t *to_atomic(const volatile a64 *a) {
92a42be0 194 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
1a4d82fc
JJ
195}
196
197static memory_order to_mo(morder mo) {
198 switch (mo) {
199 case mo_relaxed: return memory_order_relaxed;
200 case mo_consume: return memory_order_consume;
201 case mo_acquire: return memory_order_acquire;
202 case mo_release: return memory_order_release;
203 case mo_acq_rel: return memory_order_acq_rel;
204 case mo_seq_cst: return memory_order_seq_cst;
205 }
206 CHECK(0);
207 return memory_order_seq_cst;
208}
209
210template<typename T>
211static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
212 return atomic_load(to_atomic(a), to_mo(mo));
213}
214
92a42be0 215#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
1a4d82fc
JJ
216static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
217 SpinMutexLock lock(&mutex128);
218 return *a;
219}
92a42be0 220#endif
1a4d82fc
JJ
221
222template<typename T>
223static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
224 morder mo) {
225 CHECK(IsLoadOrder(mo));
226 // This fast-path is critical for performance.
227 // Assume the access is atomic.
228 if (!IsAcquireOrder(mo)) {
229 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
230 return NoTsanAtomicLoad(a, mo);
231 }
92a42be0 232 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
1a4d82fc
JJ
233 AcquireImpl(thr, pc, &s->clock);
234 T v = NoTsanAtomicLoad(a, mo);
235 s->mtx.ReadUnlock();
236 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
237 return v;
238}
239
240template<typename T>
241static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
242 atomic_store(to_atomic(a), v, to_mo(mo));
243}
244
92a42be0 245#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
1a4d82fc
JJ
246static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
247 SpinMutexLock lock(&mutex128);
248 *a = v;
249}
92a42be0 250#endif
1a4d82fc
JJ
251
252template<typename T>
253static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
254 morder mo) {
255 CHECK(IsStoreOrder(mo));
256 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
257 // This fast-path is critical for performance.
258 // Assume the access is atomic.
259 // Strictly saying even relaxed store cuts off release sequence,
260 // so must reset the clock.
261 if (!IsReleaseOrder(mo)) {
262 NoTsanAtomicStore(a, v, mo);
263 return;
264 }
265 __sync_synchronize();
92a42be0 266 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
1a4d82fc
JJ
267 thr->fast_state.IncrementEpoch();
268 // Can't increment epoch w/o writing to the trace as well.
269 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
270 ReleaseImpl(thr, pc, &s->clock);
271 NoTsanAtomicStore(a, v, mo);
272 s->mtx.Unlock();
273}
274
275template<typename T, T (*F)(volatile T *v, T op)>
276static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
277 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
278 SyncVar *s = 0;
279 if (mo != mo_relaxed) {
92a42be0 280 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
1a4d82fc
JJ
281 thr->fast_state.IncrementEpoch();
282 // Can't increment epoch w/o writing to the trace as well.
283 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
284 if (IsAcqRelOrder(mo))
285 AcquireReleaseImpl(thr, pc, &s->clock);
286 else if (IsReleaseOrder(mo))
287 ReleaseImpl(thr, pc, &s->clock);
288 else if (IsAcquireOrder(mo))
289 AcquireImpl(thr, pc, &s->clock);
290 }
291 v = F(a, v);
292 if (s)
293 s->mtx.Unlock();
294 return v;
295}
296
297template<typename T>
298static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
299 return func_xchg(a, v);
300}
301
302template<typename T>
303static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
304 return func_add(a, v);
305}
306
307template<typename T>
308static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
309 return func_sub(a, v);
310}
311
312template<typename T>
313static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
314 return func_and(a, v);
315}
316
317template<typename T>
318static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
319 return func_or(a, v);
320}
321
322template<typename T>
323static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
324 return func_xor(a, v);
325}
326
327template<typename T>
328static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
329 return func_nand(a, v);
330}
331
332template<typename T>
333static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
334 morder mo) {
335 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
336}
337
338template<typename T>
339static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
340 morder mo) {
341 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
342}
343
344template<typename T>
345static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
346 morder mo) {
347 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
348}
349
350template<typename T>
351static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
352 morder mo) {
353 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
354}
355
356template<typename T>
357static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
358 morder mo) {
359 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
360}
361
362template<typename T>
363static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
364 morder mo) {
365 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
366}
367
368template<typename T>
369static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
370 morder mo) {
371 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
372}
373
374template<typename T>
375static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
376 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
377}
378
92a42be0 379#if __TSAN_HAS_INT128
1a4d82fc
JJ
380static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
381 morder mo, morder fmo) {
382 a128 old = *c;
383 a128 cur = func_cas(a, old, v);
384 if (cur == old)
385 return true;
386 *c = cur;
387 return false;
388}
92a42be0 389#endif
1a4d82fc
JJ
390
391template<typename T>
92a42be0
SL
392static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
393 NoTsanAtomicCAS(a, &c, v, mo, fmo);
394 return c;
1a4d82fc
JJ
395}
396
397template<typename T>
398static bool AtomicCAS(ThreadState *thr, uptr pc,
399 volatile T *a, T *c, T v, morder mo, morder fmo) {
400 (void)fmo; // Unused because llvm does not pass it yet.
401 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
402 SyncVar *s = 0;
403 bool write_lock = mo != mo_acquire && mo != mo_consume;
404 if (mo != mo_relaxed) {
92a42be0 405 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
1a4d82fc
JJ
406 thr->fast_state.IncrementEpoch();
407 // Can't increment epoch w/o writing to the trace as well.
408 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
409 if (IsAcqRelOrder(mo))
410 AcquireReleaseImpl(thr, pc, &s->clock);
411 else if (IsReleaseOrder(mo))
412 ReleaseImpl(thr, pc, &s->clock);
413 else if (IsAcquireOrder(mo))
414 AcquireImpl(thr, pc, &s->clock);
415 }
416 T cc = *c;
417 T pr = func_cas(a, cc, v);
418 if (s) {
419 if (write_lock)
420 s->mtx.Unlock();
421 else
422 s->mtx.ReadUnlock();
423 }
424 if (pr == cc)
425 return true;
426 *c = pr;
427 return false;
428}
429
430template<typename T>
431static T AtomicCAS(ThreadState *thr, uptr pc,
432 volatile T *a, T c, T v, morder mo, morder fmo) {
433 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
434 return c;
435}
436
92a42be0 437#ifndef SANITIZER_GO
1a4d82fc
JJ
438static void NoTsanAtomicFence(morder mo) {
439 __sync_synchronize();
440}
441
442static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
443 // FIXME(dvyukov): not implemented.
444 __sync_synchronize();
445}
92a42be0
SL
446#endif
447
448// Interface functions follow.
449#ifndef SANITIZER_GO
450
451// C/C++
452
453#define SCOPED_ATOMIC(func, ...) \
454 const uptr callpc = (uptr)__builtin_return_address(0); \
455 uptr pc = StackTrace::GetCurrentPc(); \
456 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
457 ThreadState *const thr = cur_thread(); \
458 if (thr->ignore_interceptors) \
459 return NoTsanAtomic##func(__VA_ARGS__); \
460 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
461 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
462 return Atomic##func(thr, pc, __VA_ARGS__); \
463/**/
464
465class ScopedAtomic {
466 public:
467 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
468 morder mo, const char *func)
469 : thr_(thr) {
470 FuncEntry(thr_, pc);
471 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
472 }
473 ~ScopedAtomic() {
474 ProcessPendingSignals(thr_);
475 FuncExit(thr_);
476 }
477 private:
478 ThreadState *thr_;
479};
480
481static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
482 StatInc(thr, StatAtomic);
483 StatInc(thr, t);
484 StatInc(thr, size == 1 ? StatAtomic1
485 : size == 2 ? StatAtomic2
486 : size == 4 ? StatAtomic4
487 : size == 8 ? StatAtomic8
488 : StatAtomic16);
489 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
490 : mo == mo_consume ? StatAtomicConsume
491 : mo == mo_acquire ? StatAtomicAcquire
492 : mo == mo_release ? StatAtomicRelease
493 : mo == mo_acq_rel ? StatAtomicAcq_Rel
494 : StatAtomicSeq_Cst);
495}
1a4d82fc
JJ
496
497extern "C" {
498SANITIZER_INTERFACE_ATTRIBUTE
499a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
500 SCOPED_ATOMIC(Load, a, mo);
501}
502
503SANITIZER_INTERFACE_ATTRIBUTE
504a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
505 SCOPED_ATOMIC(Load, a, mo);
506}
507
508SANITIZER_INTERFACE_ATTRIBUTE
509a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
510 SCOPED_ATOMIC(Load, a, mo);
511}
512
513SANITIZER_INTERFACE_ATTRIBUTE
514a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
515 SCOPED_ATOMIC(Load, a, mo);
516}
517
518#if __TSAN_HAS_INT128
519SANITIZER_INTERFACE_ATTRIBUTE
520a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
521 SCOPED_ATOMIC(Load, a, mo);
522}
523#endif
524
525SANITIZER_INTERFACE_ATTRIBUTE
526void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
527 SCOPED_ATOMIC(Store, a, v, mo);
528}
529
530SANITIZER_INTERFACE_ATTRIBUTE
531void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
532 SCOPED_ATOMIC(Store, a, v, mo);
533}
534
535SANITIZER_INTERFACE_ATTRIBUTE
536void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
537 SCOPED_ATOMIC(Store, a, v, mo);
538}
539
540SANITIZER_INTERFACE_ATTRIBUTE
541void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
542 SCOPED_ATOMIC(Store, a, v, mo);
543}
544
545#if __TSAN_HAS_INT128
546SANITIZER_INTERFACE_ATTRIBUTE
547void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
548 SCOPED_ATOMIC(Store, a, v, mo);
549}
550#endif
551
552SANITIZER_INTERFACE_ATTRIBUTE
553a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
554 SCOPED_ATOMIC(Exchange, a, v, mo);
555}
556
557SANITIZER_INTERFACE_ATTRIBUTE
558a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
559 SCOPED_ATOMIC(Exchange, a, v, mo);
560}
561
562SANITIZER_INTERFACE_ATTRIBUTE
563a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
564 SCOPED_ATOMIC(Exchange, a, v, mo);
565}
566
567SANITIZER_INTERFACE_ATTRIBUTE
568a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
569 SCOPED_ATOMIC(Exchange, a, v, mo);
570}
571
572#if __TSAN_HAS_INT128
573SANITIZER_INTERFACE_ATTRIBUTE
574a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
575 SCOPED_ATOMIC(Exchange, a, v, mo);
576}
577#endif
578
579SANITIZER_INTERFACE_ATTRIBUTE
580a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
581 SCOPED_ATOMIC(FetchAdd, a, v, mo);
582}
583
584SANITIZER_INTERFACE_ATTRIBUTE
585a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
586 SCOPED_ATOMIC(FetchAdd, a, v, mo);
587}
588
589SANITIZER_INTERFACE_ATTRIBUTE
590a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
591 SCOPED_ATOMIC(FetchAdd, a, v, mo);
592}
593
594SANITIZER_INTERFACE_ATTRIBUTE
595a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
596 SCOPED_ATOMIC(FetchAdd, a, v, mo);
597}
598
599#if __TSAN_HAS_INT128
600SANITIZER_INTERFACE_ATTRIBUTE
601a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
602 SCOPED_ATOMIC(FetchAdd, a, v, mo);
603}
604#endif
605
606SANITIZER_INTERFACE_ATTRIBUTE
607a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
608 SCOPED_ATOMIC(FetchSub, a, v, mo);
609}
610
611SANITIZER_INTERFACE_ATTRIBUTE
612a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
613 SCOPED_ATOMIC(FetchSub, a, v, mo);
614}
615
616SANITIZER_INTERFACE_ATTRIBUTE
617a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
618 SCOPED_ATOMIC(FetchSub, a, v, mo);
619}
620
621SANITIZER_INTERFACE_ATTRIBUTE
622a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
623 SCOPED_ATOMIC(FetchSub, a, v, mo);
624}
625
626#if __TSAN_HAS_INT128
627SANITIZER_INTERFACE_ATTRIBUTE
628a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
629 SCOPED_ATOMIC(FetchSub, a, v, mo);
630}
631#endif
632
633SANITIZER_INTERFACE_ATTRIBUTE
634a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
635 SCOPED_ATOMIC(FetchAnd, a, v, mo);
636}
637
638SANITIZER_INTERFACE_ATTRIBUTE
639a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
640 SCOPED_ATOMIC(FetchAnd, a, v, mo);
641}
642
643SANITIZER_INTERFACE_ATTRIBUTE
644a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
645 SCOPED_ATOMIC(FetchAnd, a, v, mo);
646}
647
648SANITIZER_INTERFACE_ATTRIBUTE
649a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
650 SCOPED_ATOMIC(FetchAnd, a, v, mo);
651}
652
653#if __TSAN_HAS_INT128
654SANITIZER_INTERFACE_ATTRIBUTE
655a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
656 SCOPED_ATOMIC(FetchAnd, a, v, mo);
657}
658#endif
659
660SANITIZER_INTERFACE_ATTRIBUTE
661a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
662 SCOPED_ATOMIC(FetchOr, a, v, mo);
663}
664
665SANITIZER_INTERFACE_ATTRIBUTE
666a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
667 SCOPED_ATOMIC(FetchOr, a, v, mo);
668}
669
670SANITIZER_INTERFACE_ATTRIBUTE
671a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
672 SCOPED_ATOMIC(FetchOr, a, v, mo);
673}
674
675SANITIZER_INTERFACE_ATTRIBUTE
676a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
677 SCOPED_ATOMIC(FetchOr, a, v, mo);
678}
679
680#if __TSAN_HAS_INT128
681SANITIZER_INTERFACE_ATTRIBUTE
682a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
683 SCOPED_ATOMIC(FetchOr, a, v, mo);
684}
685#endif
686
687SANITIZER_INTERFACE_ATTRIBUTE
688a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
689 SCOPED_ATOMIC(FetchXor, a, v, mo);
690}
691
692SANITIZER_INTERFACE_ATTRIBUTE
693a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
694 SCOPED_ATOMIC(FetchXor, a, v, mo);
695}
696
697SANITIZER_INTERFACE_ATTRIBUTE
698a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
699 SCOPED_ATOMIC(FetchXor, a, v, mo);
700}
701
702SANITIZER_INTERFACE_ATTRIBUTE
703a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
704 SCOPED_ATOMIC(FetchXor, a, v, mo);
705}
706
707#if __TSAN_HAS_INT128
708SANITIZER_INTERFACE_ATTRIBUTE
709a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
710 SCOPED_ATOMIC(FetchXor, a, v, mo);
711}
712#endif
713
714SANITIZER_INTERFACE_ATTRIBUTE
715a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
716 SCOPED_ATOMIC(FetchNand, a, v, mo);
717}
718
719SANITIZER_INTERFACE_ATTRIBUTE
720a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
721 SCOPED_ATOMIC(FetchNand, a, v, mo);
722}
723
724SANITIZER_INTERFACE_ATTRIBUTE
725a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
726 SCOPED_ATOMIC(FetchNand, a, v, mo);
727}
728
729SANITIZER_INTERFACE_ATTRIBUTE
730a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
731 SCOPED_ATOMIC(FetchNand, a, v, mo);
732}
733
734#if __TSAN_HAS_INT128
735SANITIZER_INTERFACE_ATTRIBUTE
736a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
737 SCOPED_ATOMIC(FetchNand, a, v, mo);
738}
739#endif
740
741SANITIZER_INTERFACE_ATTRIBUTE
742int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
743 morder mo, morder fmo) {
744 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
745}
746
747SANITIZER_INTERFACE_ATTRIBUTE
748int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
749 morder mo, morder fmo) {
750 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
751}
752
753SANITIZER_INTERFACE_ATTRIBUTE
754int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
755 morder mo, morder fmo) {
756 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
757}
758
759SANITIZER_INTERFACE_ATTRIBUTE
760int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
761 morder mo, morder fmo) {
762 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
763}
764
765#if __TSAN_HAS_INT128
766SANITIZER_INTERFACE_ATTRIBUTE
767int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
768 morder mo, morder fmo) {
769 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
770}
771#endif
772
773SANITIZER_INTERFACE_ATTRIBUTE
774int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
775 morder mo, morder fmo) {
776 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
777}
778
779SANITIZER_INTERFACE_ATTRIBUTE
780int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
781 morder mo, morder fmo) {
782 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
783}
784
785SANITIZER_INTERFACE_ATTRIBUTE
786int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
787 morder mo, morder fmo) {
788 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
789}
790
791SANITIZER_INTERFACE_ATTRIBUTE
792int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
793 morder mo, morder fmo) {
794 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
795}
796
797#if __TSAN_HAS_INT128
798SANITIZER_INTERFACE_ATTRIBUTE
799int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
800 morder mo, morder fmo) {
801 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
802}
803#endif
804
805SANITIZER_INTERFACE_ATTRIBUTE
806a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
807 morder mo, morder fmo) {
808 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
809}
810
811SANITIZER_INTERFACE_ATTRIBUTE
812a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
813 morder mo, morder fmo) {
814 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
815}
816
817SANITIZER_INTERFACE_ATTRIBUTE
818a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
819 morder mo, morder fmo) {
820 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
821}
822
823SANITIZER_INTERFACE_ATTRIBUTE
824a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
825 morder mo, morder fmo) {
826 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
827}
828
829#if __TSAN_HAS_INT128
830SANITIZER_INTERFACE_ATTRIBUTE
831a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
832 morder mo, morder fmo) {
833 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
834}
835#endif
836
837SANITIZER_INTERFACE_ATTRIBUTE
838void __tsan_atomic_thread_fence(morder mo) {
839 char* a = 0;
840 SCOPED_ATOMIC(Fence, mo);
841}
842
843SANITIZER_INTERFACE_ATTRIBUTE
844void __tsan_atomic_signal_fence(morder mo) {
845}
846} // extern "C"
92a42be0
SL
847
848#else // #ifndef SANITIZER_GO
849
850// Go
851
852#define ATOMIC(func, ...) \
853 if (thr->ignore_sync) { \
854 NoTsanAtomic##func(__VA_ARGS__); \
855 } else { \
856 FuncEntry(thr, cpc); \
857 Atomic##func(thr, pc, __VA_ARGS__); \
858 FuncExit(thr); \
859 } \
860/**/
861
862#define ATOMIC_RET(func, ret, ...) \
863 if (thr->ignore_sync) { \
864 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
865 } else { \
866 FuncEntry(thr, cpc); \
867 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
868 FuncExit(thr); \
869 } \
870/**/
871
872extern "C" {
873SANITIZER_INTERFACE_ATTRIBUTE
874void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
875 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
876}
877
878SANITIZER_INTERFACE_ATTRIBUTE
879void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
880 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
881}
882
883SANITIZER_INTERFACE_ATTRIBUTE
884void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
885 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
886}
887
888SANITIZER_INTERFACE_ATTRIBUTE
889void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
890 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
891}
892
893SANITIZER_INTERFACE_ATTRIBUTE
894void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
895 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
896}
897
898SANITIZER_INTERFACE_ATTRIBUTE
899void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
900 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
901}
902
903SANITIZER_INTERFACE_ATTRIBUTE
904void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
905 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
906}
907
908SANITIZER_INTERFACE_ATTRIBUTE
909void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
910 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
911}
912
913SANITIZER_INTERFACE_ATTRIBUTE
914void __tsan_go_atomic32_compare_exchange(
915 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
916 a32 cur = 0;
917 a32 cmp = *(a32*)(a+8);
918 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
919 *(bool*)(a+16) = (cur == cmp);
920}
921
922SANITIZER_INTERFACE_ATTRIBUTE
923void __tsan_go_atomic64_compare_exchange(
924 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
925 a64 cur = 0;
926 a64 cmp = *(a64*)(a+8);
927 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
928 *(bool*)(a+24) = (cur == cmp);
929}
930} // extern "C"
931#endif // #ifndef SANITIZER_GO