1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
15 // For background see C++11 standard. A slightly older, publicly
16 // available draft of the standard (not entirely up-to-date, but close enough
17 // for casual browsing) is available here:
18 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19 // The following page contains more background information:
20 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
22 #include "sanitizer_common/sanitizer_placement_new.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_mutex.h"
25 #include "tsan_flags.h"
26 #include "tsan_interface.h"
29 using namespace __tsan
; // NOLINT
31 #if !SANITIZER_GO && __TSAN_HAS_INT128
32 // Protects emulation of 128-bit atomic operations.
33 static StaticSpinMutex mutex128
;
36 static bool IsLoadOrder(morder mo
) {
37 return mo
== mo_relaxed
|| mo
== mo_consume
38 || mo
== mo_acquire
|| mo
== mo_seq_cst
;
41 static bool IsStoreOrder(morder mo
) {
42 return mo
== mo_relaxed
|| mo
== mo_release
|| mo
== mo_seq_cst
;
45 static bool IsReleaseOrder(morder mo
) {
46 return mo
== mo_release
|| mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
49 static bool IsAcquireOrder(morder mo
) {
50 return mo
== mo_consume
|| mo
== mo_acquire
51 || mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
54 static bool IsAcqRelOrder(morder mo
) {
55 return mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
58 template<typename T
> T
func_xchg(volatile T
*v
, T op
) {
59 T res
= __sync_lock_test_and_set(v
, op
);
60 // __sync_lock_test_and_set does not contain full barrier.
65 template<typename T
> T
func_add(volatile T
*v
, T op
) {
66 return __sync_fetch_and_add(v
, op
);
69 template<typename T
> T
func_sub(volatile T
*v
, T op
) {
70 return __sync_fetch_and_sub(v
, op
);
73 template<typename T
> T
func_and(volatile T
*v
, T op
) {
74 return __sync_fetch_and_and(v
, op
);
77 template<typename T
> T
func_or(volatile T
*v
, T op
) {
78 return __sync_fetch_and_or(v
, op
);
81 template<typename T
> T
func_xor(volatile T
*v
, T op
) {
82 return __sync_fetch_and_xor(v
, op
);
85 template<typename T
> T
func_nand(volatile T
*v
, T op
) {
86 // clang does not support __sync_fetch_and_nand.
90 T cur
= __sync_val_compare_and_swap(v
, cmp
, newv
);
97 template<typename T
> T
func_cas(volatile T
*v
, T cmp
, T xch
) {
98 return __sync_val_compare_and_swap(v
, cmp
, xch
);
101 // clang does not support 128-bit atomic ops.
102 // Atomic ops are executed under tsan internal mutex,
103 // here we assume that the atomic variables are not accessed
104 // from non-instrumented code.
105 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
107 a128
func_xchg(volatile a128
*v
, a128 op
) {
108 SpinMutexLock
lock(&mutex128
);
114 a128
func_add(volatile a128
*v
, a128 op
) {
115 SpinMutexLock
lock(&mutex128
);
121 a128
func_sub(volatile a128
*v
, a128 op
) {
122 SpinMutexLock
lock(&mutex128
);
128 a128
func_and(volatile a128
*v
, a128 op
) {
129 SpinMutexLock
lock(&mutex128
);
135 a128
func_or(volatile a128
*v
, a128 op
) {
136 SpinMutexLock
lock(&mutex128
);
142 a128
func_xor(volatile a128
*v
, a128 op
) {
143 SpinMutexLock
lock(&mutex128
);
149 a128
func_nand(volatile a128
*v
, a128 op
) {
150 SpinMutexLock
lock(&mutex128
);
156 a128
func_cas(volatile a128
*v
, a128 cmp
, a128 xch
) {
157 SpinMutexLock
lock(&mutex128
);
166 static int SizeLog() {
169 else if (sizeof(T
) <= 2)
171 else if (sizeof(T
) <= 4)
175 // For 16-byte atomics we also use 8-byte memory access,
176 // this leads to false negatives only in very obscure cases.
180 static atomic_uint8_t
*to_atomic(const volatile a8
*a
) {
181 return reinterpret_cast<atomic_uint8_t
*>(const_cast<a8
*>(a
));
184 static atomic_uint16_t
*to_atomic(const volatile a16
*a
) {
185 return reinterpret_cast<atomic_uint16_t
*>(const_cast<a16
*>(a
));
189 static atomic_uint32_t
*to_atomic(const volatile a32
*a
) {
190 return reinterpret_cast<atomic_uint32_t
*>(const_cast<a32
*>(a
));
193 static atomic_uint64_t
*to_atomic(const volatile a64
*a
) {
194 return reinterpret_cast<atomic_uint64_t
*>(const_cast<a64
*>(a
));
197 static memory_order
to_mo(morder mo
) {
199 case mo_relaxed
: return memory_order_relaxed
;
200 case mo_consume
: return memory_order_consume
;
201 case mo_acquire
: return memory_order_acquire
;
202 case mo_release
: return memory_order_release
;
203 case mo_acq_rel
: return memory_order_acq_rel
;
204 case mo_seq_cst
: return memory_order_seq_cst
;
207 return memory_order_seq_cst
;
211 static T
NoTsanAtomicLoad(const volatile T
*a
, morder mo
) {
212 return atomic_load(to_atomic(a
), to_mo(mo
));
215 #if __TSAN_HAS_INT128 && !SANITIZER_GO
216 static a128
NoTsanAtomicLoad(const volatile a128
*a
, morder mo
) {
217 SpinMutexLock
lock(&mutex128
);
223 static T
AtomicLoad(ThreadState
*thr
, uptr pc
, const volatile T
*a
,
225 CHECK(IsLoadOrder(mo
));
226 // This fast-path is critical for performance.
227 // Assume the access is atomic.
228 if (!IsAcquireOrder(mo
)) {
229 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
230 return NoTsanAtomicLoad(a
, mo
);
232 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, false);
233 AcquireImpl(thr
, pc
, &s
->clock
);
234 T v
= NoTsanAtomicLoad(a
, mo
);
236 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
241 static void NoTsanAtomicStore(volatile T
*a
, T v
, morder mo
) {
242 atomic_store(to_atomic(a
), v
, to_mo(mo
));
245 #if __TSAN_HAS_INT128 && !SANITIZER_GO
246 static void NoTsanAtomicStore(volatile a128
*a
, a128 v
, morder mo
) {
247 SpinMutexLock
lock(&mutex128
);
253 static void AtomicStore(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
255 CHECK(IsStoreOrder(mo
));
256 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
257 // This fast-path is critical for performance.
258 // Assume the access is atomic.
259 // Strictly saying even relaxed store cuts off release sequence,
260 // so must reset the clock.
261 if (!IsReleaseOrder(mo
)) {
262 NoTsanAtomicStore(a
, v
, mo
);
265 __sync_synchronize();
266 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
267 thr
->fast_state
.IncrementEpoch();
268 // Can't increment epoch w/o writing to the trace as well.
269 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
270 ReleaseStoreImpl(thr
, pc
, &s
->clock
);
271 NoTsanAtomicStore(a
, v
, mo
);
275 template<typename T
, T (*F
)(volatile T
*v
, T op
)>
276 static T
AtomicRMW(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
, morder mo
) {
277 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
279 if (mo
!= mo_relaxed
) {
280 s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
281 thr
->fast_state
.IncrementEpoch();
282 // Can't increment epoch w/o writing to the trace as well.
283 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
284 if (IsAcqRelOrder(mo
))
285 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
286 else if (IsReleaseOrder(mo
))
287 ReleaseImpl(thr
, pc
, &s
->clock
);
288 else if (IsAcquireOrder(mo
))
289 AcquireImpl(thr
, pc
, &s
->clock
);
298 static T
NoTsanAtomicExchange(volatile T
*a
, T v
, morder mo
) {
299 return func_xchg(a
, v
);
303 static T
NoTsanAtomicFetchAdd(volatile T
*a
, T v
, morder mo
) {
304 return func_add(a
, v
);
308 static T
NoTsanAtomicFetchSub(volatile T
*a
, T v
, morder mo
) {
309 return func_sub(a
, v
);
313 static T
NoTsanAtomicFetchAnd(volatile T
*a
, T v
, morder mo
) {
314 return func_and(a
, v
);
318 static T
NoTsanAtomicFetchOr(volatile T
*a
, T v
, morder mo
) {
319 return func_or(a
, v
);
323 static T
NoTsanAtomicFetchXor(volatile T
*a
, T v
, morder mo
) {
324 return func_xor(a
, v
);
328 static T
NoTsanAtomicFetchNand(volatile T
*a
, T v
, morder mo
) {
329 return func_nand(a
, v
);
333 static T
AtomicExchange(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
335 return AtomicRMW
<T
, func_xchg
>(thr
, pc
, a
, v
, mo
);
339 static T
AtomicFetchAdd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
341 return AtomicRMW
<T
, func_add
>(thr
, pc
, a
, v
, mo
);
345 static T
AtomicFetchSub(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
347 return AtomicRMW
<T
, func_sub
>(thr
, pc
, a
, v
, mo
);
351 static T
AtomicFetchAnd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
353 return AtomicRMW
<T
, func_and
>(thr
, pc
, a
, v
, mo
);
357 static T
AtomicFetchOr(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
359 return AtomicRMW
<T
, func_or
>(thr
, pc
, a
, v
, mo
);
363 static T
AtomicFetchXor(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
365 return AtomicRMW
<T
, func_xor
>(thr
, pc
, a
, v
, mo
);
369 static T
AtomicFetchNand(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
371 return AtomicRMW
<T
, func_nand
>(thr
, pc
, a
, v
, mo
);
375 static bool NoTsanAtomicCAS(volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
376 return atomic_compare_exchange_strong(to_atomic(a
), c
, v
, to_mo(mo
));
379 #if __TSAN_HAS_INT128
380 static bool NoTsanAtomicCAS(volatile a128
*a
, a128
*c
, a128 v
,
381 morder mo
, morder fmo
) {
383 a128 cur
= func_cas(a
, old
, v
);
392 static T
NoTsanAtomicCAS(volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
393 NoTsanAtomicCAS(a
, &c
, v
, mo
, fmo
);
398 static bool AtomicCAS(ThreadState
*thr
, uptr pc
,
399 volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
400 (void)fmo
; // Unused because llvm does not pass it yet.
401 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
403 bool write_lock
= mo
!= mo_acquire
&& mo
!= mo_consume
;
404 if (mo
!= mo_relaxed
) {
405 s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, write_lock
);
406 thr
->fast_state
.IncrementEpoch();
407 // Can't increment epoch w/o writing to the trace as well.
408 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
409 if (IsAcqRelOrder(mo
))
410 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
411 else if (IsReleaseOrder(mo
))
412 ReleaseImpl(thr
, pc
, &s
->clock
);
413 else if (IsAcquireOrder(mo
))
414 AcquireImpl(thr
, pc
, &s
->clock
);
417 T pr
= func_cas(a
, cc
, v
);
431 static T
AtomicCAS(ThreadState
*thr
, uptr pc
,
432 volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
433 AtomicCAS(thr
, pc
, a
, &c
, v
, mo
, fmo
);
438 static void NoTsanAtomicFence(morder mo
) {
439 __sync_synchronize();
442 static void AtomicFence(ThreadState
*thr
, uptr pc
, morder mo
) {
443 // FIXME(dvyukov): not implemented.
444 __sync_synchronize();
448 // Interface functions follow.
453 #define SCOPED_ATOMIC(func, ...) \
454 const uptr callpc = (uptr)__builtin_return_address(0); \
455 uptr pc = StackTrace::GetCurrentPc(); \
456 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
457 ThreadState *const thr = cur_thread(); \
458 if (thr->ignore_interceptors) \
459 return NoTsanAtomic##func(__VA_ARGS__); \
460 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
461 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
462 return Atomic##func(thr, pc, __VA_ARGS__); \
467 ScopedAtomic(ThreadState
*thr
, uptr pc
, const volatile void *a
,
468 morder mo
, const char *func
)
471 DPrintf("#%d: %s(%p, %d)\n", thr_
->tid
, func
, a
, mo
);
474 ProcessPendingSignals(thr_
);
481 static void AtomicStatInc(ThreadState
*thr
, uptr size
, morder mo
, StatType t
) {
482 StatInc(thr
, StatAtomic
);
484 StatInc(thr
, size
== 1 ? StatAtomic1
485 : size
== 2 ? StatAtomic2
486 : size
== 4 ? StatAtomic4
487 : size
== 8 ? StatAtomic8
489 StatInc(thr
, mo
== mo_relaxed
? StatAtomicRelaxed
490 : mo
== mo_consume
? StatAtomicConsume
491 : mo
== mo_acquire
? StatAtomicAcquire
492 : mo
== mo_release
? StatAtomicRelease
493 : mo
== mo_acq_rel
? StatAtomicAcq_Rel
494 : StatAtomicSeq_Cst
);
498 SANITIZER_INTERFACE_ATTRIBUTE
499 a8
__tsan_atomic8_load(const volatile a8
*a
, morder mo
) {
500 SCOPED_ATOMIC(Load
, a
, mo
);
503 SANITIZER_INTERFACE_ATTRIBUTE
504 a16
__tsan_atomic16_load(const volatile a16
*a
, morder mo
) {
505 SCOPED_ATOMIC(Load
, a
, mo
);
508 SANITIZER_INTERFACE_ATTRIBUTE
509 a32
__tsan_atomic32_load(const volatile a32
*a
, morder mo
) {
510 SCOPED_ATOMIC(Load
, a
, mo
);
513 SANITIZER_INTERFACE_ATTRIBUTE
514 a64
__tsan_atomic64_load(const volatile a64
*a
, morder mo
) {
515 SCOPED_ATOMIC(Load
, a
, mo
);
518 #if __TSAN_HAS_INT128
519 SANITIZER_INTERFACE_ATTRIBUTE
520 a128
__tsan_atomic128_load(const volatile a128
*a
, morder mo
) {
521 SCOPED_ATOMIC(Load
, a
, mo
);
525 SANITIZER_INTERFACE_ATTRIBUTE
526 void __tsan_atomic8_store(volatile a8
*a
, a8 v
, morder mo
) {
527 SCOPED_ATOMIC(Store
, a
, v
, mo
);
530 SANITIZER_INTERFACE_ATTRIBUTE
531 void __tsan_atomic16_store(volatile a16
*a
, a16 v
, morder mo
) {
532 SCOPED_ATOMIC(Store
, a
, v
, mo
);
535 SANITIZER_INTERFACE_ATTRIBUTE
536 void __tsan_atomic32_store(volatile a32
*a
, a32 v
, morder mo
) {
537 SCOPED_ATOMIC(Store
, a
, v
, mo
);
540 SANITIZER_INTERFACE_ATTRIBUTE
541 void __tsan_atomic64_store(volatile a64
*a
, a64 v
, morder mo
) {
542 SCOPED_ATOMIC(Store
, a
, v
, mo
);
545 #if __TSAN_HAS_INT128
546 SANITIZER_INTERFACE_ATTRIBUTE
547 void __tsan_atomic128_store(volatile a128
*a
, a128 v
, morder mo
) {
548 SCOPED_ATOMIC(Store
, a
, v
, mo
);
552 SANITIZER_INTERFACE_ATTRIBUTE
553 a8
__tsan_atomic8_exchange(volatile a8
*a
, a8 v
, morder mo
) {
554 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
557 SANITIZER_INTERFACE_ATTRIBUTE
558 a16
__tsan_atomic16_exchange(volatile a16
*a
, a16 v
, morder mo
) {
559 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
562 SANITIZER_INTERFACE_ATTRIBUTE
563 a32
__tsan_atomic32_exchange(volatile a32
*a
, a32 v
, morder mo
) {
564 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
567 SANITIZER_INTERFACE_ATTRIBUTE
568 a64
__tsan_atomic64_exchange(volatile a64
*a
, a64 v
, morder mo
) {
569 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
572 #if __TSAN_HAS_INT128
573 SANITIZER_INTERFACE_ATTRIBUTE
574 a128
__tsan_atomic128_exchange(volatile a128
*a
, a128 v
, morder mo
) {
575 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
579 SANITIZER_INTERFACE_ATTRIBUTE
580 a8
__tsan_atomic8_fetch_add(volatile a8
*a
, a8 v
, morder mo
) {
581 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
584 SANITIZER_INTERFACE_ATTRIBUTE
585 a16
__tsan_atomic16_fetch_add(volatile a16
*a
, a16 v
, morder mo
) {
586 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
589 SANITIZER_INTERFACE_ATTRIBUTE
590 a32
__tsan_atomic32_fetch_add(volatile a32
*a
, a32 v
, morder mo
) {
591 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
594 SANITIZER_INTERFACE_ATTRIBUTE
595 a64
__tsan_atomic64_fetch_add(volatile a64
*a
, a64 v
, morder mo
) {
596 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
599 #if __TSAN_HAS_INT128
600 SANITIZER_INTERFACE_ATTRIBUTE
601 a128
__tsan_atomic128_fetch_add(volatile a128
*a
, a128 v
, morder mo
) {
602 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
606 SANITIZER_INTERFACE_ATTRIBUTE
607 a8
__tsan_atomic8_fetch_sub(volatile a8
*a
, a8 v
, morder mo
) {
608 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
611 SANITIZER_INTERFACE_ATTRIBUTE
612 a16
__tsan_atomic16_fetch_sub(volatile a16
*a
, a16 v
, morder mo
) {
613 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
616 SANITIZER_INTERFACE_ATTRIBUTE
617 a32
__tsan_atomic32_fetch_sub(volatile a32
*a
, a32 v
, morder mo
) {
618 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
621 SANITIZER_INTERFACE_ATTRIBUTE
622 a64
__tsan_atomic64_fetch_sub(volatile a64
*a
, a64 v
, morder mo
) {
623 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
626 #if __TSAN_HAS_INT128
627 SANITIZER_INTERFACE_ATTRIBUTE
628 a128
__tsan_atomic128_fetch_sub(volatile a128
*a
, a128 v
, morder mo
) {
629 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
633 SANITIZER_INTERFACE_ATTRIBUTE
634 a8
__tsan_atomic8_fetch_and(volatile a8
*a
, a8 v
, morder mo
) {
635 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
638 SANITIZER_INTERFACE_ATTRIBUTE
639 a16
__tsan_atomic16_fetch_and(volatile a16
*a
, a16 v
, morder mo
) {
640 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
643 SANITIZER_INTERFACE_ATTRIBUTE
644 a32
__tsan_atomic32_fetch_and(volatile a32
*a
, a32 v
, morder mo
) {
645 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
648 SANITIZER_INTERFACE_ATTRIBUTE
649 a64
__tsan_atomic64_fetch_and(volatile a64
*a
, a64 v
, morder mo
) {
650 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
653 #if __TSAN_HAS_INT128
654 SANITIZER_INTERFACE_ATTRIBUTE
655 a128
__tsan_atomic128_fetch_and(volatile a128
*a
, a128 v
, morder mo
) {
656 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
660 SANITIZER_INTERFACE_ATTRIBUTE
661 a8
__tsan_atomic8_fetch_or(volatile a8
*a
, a8 v
, morder mo
) {
662 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
665 SANITIZER_INTERFACE_ATTRIBUTE
666 a16
__tsan_atomic16_fetch_or(volatile a16
*a
, a16 v
, morder mo
) {
667 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
670 SANITIZER_INTERFACE_ATTRIBUTE
671 a32
__tsan_atomic32_fetch_or(volatile a32
*a
, a32 v
, morder mo
) {
672 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
675 SANITIZER_INTERFACE_ATTRIBUTE
676 a64
__tsan_atomic64_fetch_or(volatile a64
*a
, a64 v
, morder mo
) {
677 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
680 #if __TSAN_HAS_INT128
681 SANITIZER_INTERFACE_ATTRIBUTE
682 a128
__tsan_atomic128_fetch_or(volatile a128
*a
, a128 v
, morder mo
) {
683 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
687 SANITIZER_INTERFACE_ATTRIBUTE
688 a8
__tsan_atomic8_fetch_xor(volatile a8
*a
, a8 v
, morder mo
) {
689 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
692 SANITIZER_INTERFACE_ATTRIBUTE
693 a16
__tsan_atomic16_fetch_xor(volatile a16
*a
, a16 v
, morder mo
) {
694 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
697 SANITIZER_INTERFACE_ATTRIBUTE
698 a32
__tsan_atomic32_fetch_xor(volatile a32
*a
, a32 v
, morder mo
) {
699 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
702 SANITIZER_INTERFACE_ATTRIBUTE
703 a64
__tsan_atomic64_fetch_xor(volatile a64
*a
, a64 v
, morder mo
) {
704 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
707 #if __TSAN_HAS_INT128
708 SANITIZER_INTERFACE_ATTRIBUTE
709 a128
__tsan_atomic128_fetch_xor(volatile a128
*a
, a128 v
, morder mo
) {
710 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
714 SANITIZER_INTERFACE_ATTRIBUTE
715 a8
__tsan_atomic8_fetch_nand(volatile a8
*a
, a8 v
, morder mo
) {
716 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
719 SANITIZER_INTERFACE_ATTRIBUTE
720 a16
__tsan_atomic16_fetch_nand(volatile a16
*a
, a16 v
, morder mo
) {
721 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
724 SANITIZER_INTERFACE_ATTRIBUTE
725 a32
__tsan_atomic32_fetch_nand(volatile a32
*a
, a32 v
, morder mo
) {
726 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
729 SANITIZER_INTERFACE_ATTRIBUTE
730 a64
__tsan_atomic64_fetch_nand(volatile a64
*a
, a64 v
, morder mo
) {
731 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
734 #if __TSAN_HAS_INT128
735 SANITIZER_INTERFACE_ATTRIBUTE
736 a128
__tsan_atomic128_fetch_nand(volatile a128
*a
, a128 v
, morder mo
) {
737 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
741 SANITIZER_INTERFACE_ATTRIBUTE
742 int __tsan_atomic8_compare_exchange_strong(volatile a8
*a
, a8
*c
, a8 v
,
743 morder mo
, morder fmo
) {
744 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
747 SANITIZER_INTERFACE_ATTRIBUTE
748 int __tsan_atomic16_compare_exchange_strong(volatile a16
*a
, a16
*c
, a16 v
,
749 morder mo
, morder fmo
) {
750 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
753 SANITIZER_INTERFACE_ATTRIBUTE
754 int __tsan_atomic32_compare_exchange_strong(volatile a32
*a
, a32
*c
, a32 v
,
755 morder mo
, morder fmo
) {
756 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
759 SANITIZER_INTERFACE_ATTRIBUTE
760 int __tsan_atomic64_compare_exchange_strong(volatile a64
*a
, a64
*c
, a64 v
,
761 morder mo
, morder fmo
) {
762 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
765 #if __TSAN_HAS_INT128
766 SANITIZER_INTERFACE_ATTRIBUTE
767 int __tsan_atomic128_compare_exchange_strong(volatile a128
*a
, a128
*c
, a128 v
,
768 morder mo
, morder fmo
) {
769 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
773 SANITIZER_INTERFACE_ATTRIBUTE
774 int __tsan_atomic8_compare_exchange_weak(volatile a8
*a
, a8
*c
, a8 v
,
775 morder mo
, morder fmo
) {
776 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
779 SANITIZER_INTERFACE_ATTRIBUTE
780 int __tsan_atomic16_compare_exchange_weak(volatile a16
*a
, a16
*c
, a16 v
,
781 morder mo
, morder fmo
) {
782 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
785 SANITIZER_INTERFACE_ATTRIBUTE
786 int __tsan_atomic32_compare_exchange_weak(volatile a32
*a
, a32
*c
, a32 v
,
787 morder mo
, morder fmo
) {
788 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
791 SANITIZER_INTERFACE_ATTRIBUTE
792 int __tsan_atomic64_compare_exchange_weak(volatile a64
*a
, a64
*c
, a64 v
,
793 morder mo
, morder fmo
) {
794 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
797 #if __TSAN_HAS_INT128
798 SANITIZER_INTERFACE_ATTRIBUTE
799 int __tsan_atomic128_compare_exchange_weak(volatile a128
*a
, a128
*c
, a128 v
,
800 morder mo
, morder fmo
) {
801 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
805 SANITIZER_INTERFACE_ATTRIBUTE
806 a8
__tsan_atomic8_compare_exchange_val(volatile a8
*a
, a8 c
, a8 v
,
807 morder mo
, morder fmo
) {
808 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
811 SANITIZER_INTERFACE_ATTRIBUTE
812 a16
__tsan_atomic16_compare_exchange_val(volatile a16
*a
, a16 c
, a16 v
,
813 morder mo
, morder fmo
) {
814 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
817 SANITIZER_INTERFACE_ATTRIBUTE
818 a32
__tsan_atomic32_compare_exchange_val(volatile a32
*a
, a32 c
, a32 v
,
819 morder mo
, morder fmo
) {
820 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
823 SANITIZER_INTERFACE_ATTRIBUTE
824 a64
__tsan_atomic64_compare_exchange_val(volatile a64
*a
, a64 c
, a64 v
,
825 morder mo
, morder fmo
) {
826 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
829 #if __TSAN_HAS_INT128
830 SANITIZER_INTERFACE_ATTRIBUTE
831 a128
__tsan_atomic128_compare_exchange_val(volatile a128
*a
, a128 c
, a128 v
,
832 morder mo
, morder fmo
) {
833 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
837 SANITIZER_INTERFACE_ATTRIBUTE
838 void __tsan_atomic_thread_fence(morder mo
) {
840 SCOPED_ATOMIC(Fence
, mo
);
843 SANITIZER_INTERFACE_ATTRIBUTE
844 void __tsan_atomic_signal_fence(morder mo
) {
848 #else // #if !SANITIZER_GO
852 #define ATOMIC(func, ...) \
853 if (thr->ignore_sync) { \
854 NoTsanAtomic##func(__VA_ARGS__); \
856 FuncEntry(thr, cpc); \
857 Atomic##func(thr, pc, __VA_ARGS__); \
862 #define ATOMIC_RET(func, ret, ...) \
863 if (thr->ignore_sync) { \
864 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
866 FuncEntry(thr, cpc); \
867 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
873 SANITIZER_INTERFACE_ATTRIBUTE
874 void __tsan_go_atomic32_load(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
875 ATOMIC_RET(Load
, *(a32
*)(a
+8), *(a32
**)a
, mo_acquire
);
878 SANITIZER_INTERFACE_ATTRIBUTE
879 void __tsan_go_atomic64_load(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
880 ATOMIC_RET(Load
, *(a64
*)(a
+8), *(a64
**)a
, mo_acquire
);
883 SANITIZER_INTERFACE_ATTRIBUTE
884 void __tsan_go_atomic32_store(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
885 ATOMIC(Store
, *(a32
**)a
, *(a32
*)(a
+8), mo_release
);
888 SANITIZER_INTERFACE_ATTRIBUTE
889 void __tsan_go_atomic64_store(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
890 ATOMIC(Store
, *(a64
**)a
, *(a64
*)(a
+8), mo_release
);
893 SANITIZER_INTERFACE_ATTRIBUTE
894 void __tsan_go_atomic32_fetch_add(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
895 ATOMIC_RET(FetchAdd
, *(a32
*)(a
+16), *(a32
**)a
, *(a32
*)(a
+8), mo_acq_rel
);
898 SANITIZER_INTERFACE_ATTRIBUTE
899 void __tsan_go_atomic64_fetch_add(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
900 ATOMIC_RET(FetchAdd
, *(a64
*)(a
+16), *(a64
**)a
, *(a64
*)(a
+8), mo_acq_rel
);
903 SANITIZER_INTERFACE_ATTRIBUTE
904 void __tsan_go_atomic32_exchange(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
905 ATOMIC_RET(Exchange
, *(a32
*)(a
+16), *(a32
**)a
, *(a32
*)(a
+8), mo_acq_rel
);
908 SANITIZER_INTERFACE_ATTRIBUTE
909 void __tsan_go_atomic64_exchange(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
910 ATOMIC_RET(Exchange
, *(a64
*)(a
+16), *(a64
**)a
, *(a64
*)(a
+8), mo_acq_rel
);
913 SANITIZER_INTERFACE_ATTRIBUTE
914 void __tsan_go_atomic32_compare_exchange(
915 ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
917 a32 cmp
= *(a32
*)(a
+8);
918 ATOMIC_RET(CAS
, cur
, *(a32
**)a
, cmp
, *(a32
*)(a
+12), mo_acq_rel
, mo_acquire
);
919 *(bool*)(a
+16) = (cur
== cmp
);
922 SANITIZER_INTERFACE_ATTRIBUTE
923 void __tsan_go_atomic64_compare_exchange(
924 ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
926 a64 cmp
= *(a64
*)(a
+8);
927 ATOMIC_RET(CAS
, cur
, *(a64
**)a
, cmp
, *(a64
*)(a
+16), mo_acq_rel
, mo_acquire
);
928 *(bool*)(a
+24) = (cur
== cmp
);
931 #endif // #if !SANITIZER_GO