]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. | |
11 | // | |
12 | //===----------------------------------------------------------------------===// | |
13 | ||
14 | #ifndef SANITIZER_MUTEX_H | |
15 | #define SANITIZER_MUTEX_H | |
16 | ||
17 | #include "sanitizer_atomic.h" | |
18 | #include "sanitizer_internal_defs.h" | |
19 | #include "sanitizer_libc.h" | |
20 | ||
21 | namespace __sanitizer { | |
22 | ||
23 | class StaticSpinMutex { | |
24 | public: | |
25 | void Init() { | |
26 | atomic_store(&state_, 0, memory_order_relaxed); | |
27 | } | |
28 | ||
29 | void Lock() { | |
30 | if (TryLock()) | |
31 | return; | |
32 | LockSlow(); | |
33 | } | |
34 | ||
35 | bool TryLock() { | |
36 | return atomic_exchange(&state_, 1, memory_order_acquire) == 0; | |
37 | } | |
38 | ||
39 | void Unlock() { | |
40 | atomic_store(&state_, 0, memory_order_release); | |
41 | } | |
42 | ||
43 | void CheckLocked() { | |
44 | CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1); | |
45 | } | |
46 | ||
47 | private: | |
48 | atomic_uint8_t state_; | |
49 | ||
50 | void NOINLINE LockSlow() { | |
51 | for (int i = 0;; i++) { | |
52 | if (i < 10) | |
53 | proc_yield(10); | |
54 | else | |
55 | internal_sched_yield(); | |
56 | if (atomic_load(&state_, memory_order_relaxed) == 0 | |
57 | && atomic_exchange(&state_, 1, memory_order_acquire) == 0) | |
58 | return; | |
59 | } | |
60 | } | |
61 | }; | |
62 | ||
63 | class SpinMutex : public StaticSpinMutex { | |
64 | public: | |
65 | SpinMutex() { | |
66 | Init(); | |
67 | } | |
68 | ||
69 | private: | |
70 | SpinMutex(const SpinMutex&); | |
71 | void operator=(const SpinMutex&); | |
72 | }; | |
73 | ||
74 | class BlockingMutex { | |
75 | public: | |
92a42be0 SL |
76 | #if SANITIZER_WINDOWS |
77 | // Windows does not currently support LinkerInitialized | |
1a4d82fc | 78 | explicit BlockingMutex(LinkerInitialized); |
92a42be0 SL |
79 | #else |
80 | explicit constexpr BlockingMutex(LinkerInitialized) | |
81 | : opaque_storage_ {0, }, owner_(0) {} | |
82 | #endif | |
1a4d82fc JJ |
83 | BlockingMutex(); |
84 | void Lock(); | |
85 | void Unlock(); | |
86 | void CheckLocked(); | |
87 | private: | |
88 | uptr opaque_storage_[10]; | |
89 | uptr owner_; // for debugging | |
90 | }; | |
91 | ||
92 | // Reader-writer spin mutex. | |
93 | class RWMutex { | |
94 | public: | |
95 | RWMutex() { | |
96 | atomic_store(&state_, kUnlocked, memory_order_relaxed); | |
97 | } | |
98 | ||
99 | ~RWMutex() { | |
100 | CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); | |
101 | } | |
102 | ||
103 | void Lock() { | |
104 | u32 cmp = kUnlocked; | |
105 | if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, | |
106 | memory_order_acquire)) | |
107 | return; | |
108 | LockSlow(); | |
109 | } | |
110 | ||
111 | void Unlock() { | |
112 | u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); | |
113 | DCHECK_NE(prev & kWriteLock, 0); | |
114 | (void)prev; | |
115 | } | |
116 | ||
117 | void ReadLock() { | |
118 | u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); | |
119 | if ((prev & kWriteLock) == 0) | |
120 | return; | |
121 | ReadLockSlow(); | |
122 | } | |
123 | ||
124 | void ReadUnlock() { | |
125 | u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); | |
126 | DCHECK_EQ(prev & kWriteLock, 0); | |
127 | DCHECK_GT(prev & ~kWriteLock, 0); | |
128 | (void)prev; | |
129 | } | |
130 | ||
131 | void CheckLocked() { | |
132 | CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked); | |
133 | } | |
134 | ||
135 | private: | |
136 | atomic_uint32_t state_; | |
137 | ||
138 | enum { | |
139 | kUnlocked = 0, | |
140 | kWriteLock = 1, | |
141 | kReadLock = 2 | |
142 | }; | |
143 | ||
144 | void NOINLINE LockSlow() { | |
145 | for (int i = 0;; i++) { | |
146 | if (i < 10) | |
147 | proc_yield(10); | |
148 | else | |
149 | internal_sched_yield(); | |
150 | u32 cmp = atomic_load(&state_, memory_order_relaxed); | |
151 | if (cmp == kUnlocked && | |
152 | atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, | |
153 | memory_order_acquire)) | |
154 | return; | |
155 | } | |
156 | } | |
157 | ||
158 | void NOINLINE ReadLockSlow() { | |
159 | for (int i = 0;; i++) { | |
160 | if (i < 10) | |
161 | proc_yield(10); | |
162 | else | |
163 | internal_sched_yield(); | |
164 | u32 prev = atomic_load(&state_, memory_order_acquire); | |
165 | if ((prev & kWriteLock) == 0) | |
166 | return; | |
167 | } | |
168 | } | |
169 | ||
170 | RWMutex(const RWMutex&); | |
171 | void operator = (const RWMutex&); | |
172 | }; | |
173 | ||
174 | template<typename MutexType> | |
175 | class GenericScopedLock { | |
176 | public: | |
177 | explicit GenericScopedLock(MutexType *mu) | |
178 | : mu_(mu) { | |
179 | mu_->Lock(); | |
180 | } | |
181 | ||
182 | ~GenericScopedLock() { | |
183 | mu_->Unlock(); | |
184 | } | |
185 | ||
186 | private: | |
187 | MutexType *mu_; | |
188 | ||
189 | GenericScopedLock(const GenericScopedLock&); | |
190 | void operator=(const GenericScopedLock&); | |
191 | }; | |
192 | ||
193 | template<typename MutexType> | |
194 | class GenericScopedReadLock { | |
195 | public: | |
196 | explicit GenericScopedReadLock(MutexType *mu) | |
197 | : mu_(mu) { | |
198 | mu_->ReadLock(); | |
199 | } | |
200 | ||
201 | ~GenericScopedReadLock() { | |
202 | mu_->ReadUnlock(); | |
203 | } | |
204 | ||
205 | private: | |
206 | MutexType *mu_; | |
207 | ||
208 | GenericScopedReadLock(const GenericScopedReadLock&); | |
209 | void operator=(const GenericScopedReadLock&); | |
210 | }; | |
211 | ||
212 | typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock; | |
213 | typedef GenericScopedLock<BlockingMutex> BlockingMutexLock; | |
214 | typedef GenericScopedLock<RWMutex> RWMutexLock; | |
215 | typedef GenericScopedReadLock<RWMutex> RWMutexReadLock; | |
216 | ||
217 | } // namespace __sanitizer | |
218 | ||
219 | #endif // SANITIZER_MUTEX_H |