]>
Commit | Line | Data |
---|---|---|
7c673cae | 1 | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
11fdf7f2 TL |
2 | // This source code is licensed under both the GPLv2 (found in the |
3 | // COPYING file in the root directory) and Apache 2.0 License | |
4 | // (found in the LICENSE.Apache file in the root directory). | |
7c673cae FG |
5 | // |
6 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |
7 | // Use of this source code is governed by a BSD-style license that can be | |
8 | // found in the LICENSE file. See the AUTHORS file for names of contributors. | |
9 | ||
10 | #include "util/thread_local.h" | |
1e59de90 | 11 | |
7c673cae FG |
12 | #include <stdlib.h> |
13 | ||
1e59de90 TL |
14 | #include "port/likely.h" |
15 | #include "util/mutexlock.h" | |
16 | ||
f67539c2 | 17 | namespace ROCKSDB_NAMESPACE { |
7c673cae FG |
18 | |
19 | struct Entry { | |
20 | Entry() : ptr(nullptr) {} | |
21 | Entry(const Entry& e) : ptr(e.ptr.load(std::memory_order_relaxed)) {} | |
22 | std::atomic<void*> ptr; | |
23 | }; | |
24 | ||
25 | class StaticMeta; | |
26 | ||
27 | // This is the structure that is declared as "thread_local" storage. | |
28 | // The vector keep list of atomic pointer for all instances for "current" | |
29 | // thread. The vector is indexed by an Id that is unique in process and | |
30 | // associated with one ThreadLocalPtr instance. The Id is assigned by a | |
31 | // global StaticMeta singleton. So if we instantiated 3 ThreadLocalPtr | |
32 | // instances, each thread will have a ThreadData with a vector of size 3: | |
33 | // --------------------------------------------------- | |
20effc67 | 34 | // | | instance 1 | instance 2 | instance 3 | |
7c673cae FG |
35 | // --------------------------------------------------- |
36 | // | thread 1 | void* | void* | void* | <- ThreadData | |
37 | // --------------------------------------------------- | |
38 | // | thread 2 | void* | void* | void* | <- ThreadData | |
39 | // --------------------------------------------------- | |
40 | // | thread 3 | void* | void* | void* | <- ThreadData | |
41 | // --------------------------------------------------- | |
42 | struct ThreadData { | |
11fdf7f2 | 43 | explicit ThreadData(ThreadLocalPtr::StaticMeta* _inst) |
1e59de90 | 44 | : entries(), next(nullptr), prev(nullptr), inst(_inst) {} |
7c673cae FG |
45 | std::vector<Entry> entries; |
46 | ThreadData* next; | |
47 | ThreadData* prev; | |
48 | ThreadLocalPtr::StaticMeta* inst; | |
49 | }; | |
50 | ||
51 | class ThreadLocalPtr::StaticMeta { | |
1e59de90 | 52 | public: |
7c673cae FG |
53 | StaticMeta(); |
54 | ||
55 | // Return the next available Id | |
56 | uint32_t GetId(); | |
57 | // Return the next available Id without claiming it | |
58 | uint32_t PeekId() const; | |
59 | // Return the given Id back to the free pool. This also triggers | |
60 | // UnrefHandler for associated pointer value (if not NULL) for all threads. | |
61 | void ReclaimId(uint32_t id); | |
62 | ||
63 | // Return the pointer value for the given id for the current thread. | |
64 | void* Get(uint32_t id) const; | |
65 | // Reset the pointer value for the given id for the current thread. | |
66 | void Reset(uint32_t id, void* ptr); | |
67 | // Atomically swap the supplied ptr and return the previous value | |
68 | void* Swap(uint32_t id, void* ptr); | |
69 | // Atomically compare and swap the provided value only if it equals | |
70 | // to expected value. | |
71 | bool CompareAndSwap(uint32_t id, void* ptr, void*& expected); | |
72 | // Reset all thread local data to replacement, and return non-nullptr | |
73 | // data for all existing threads | |
74 | void Scrape(uint32_t id, autovector<void*>* ptrs, void* const replacement); | |
75 | // Update res by applying func on each thread-local value. Holds a lock that | |
76 | // prevents unref handler from running during this call, but clients must | |
77 | // still provide external synchronization since the owning thread can | |
78 | // access the values without internal locking, e.g., via Get() and Reset(). | |
79 | void Fold(uint32_t id, FoldFunc func, void* res); | |
80 | ||
81 | // Register the UnrefHandler for id | |
82 | void SetHandler(uint32_t id, UnrefHandler handler); | |
83 | ||
84 | // protect inst, next_instance_id_, free_instance_ids_, head_, | |
85 | // ThreadData.entries | |
86 | // | |
87 | // Note that here we prefer function static variable instead of the usual | |
88 | // global static variable. The reason is that c++ destruction order of | |
89 | // static variables in the reverse order of their construction order. | |
90 | // However, C++ does not guarantee any construction order when global | |
91 | // static variables are defined in different files, while the function | |
92 | // static variables are initialized when their function are first called. | |
93 | // As a result, the construction order of the function static variables | |
94 | // can be controlled by properly invoke their first function calls in | |
95 | // the right order. | |
96 | // | |
97 | // For instance, the following function contains a function static | |
98 | // variable. We place a dummy function call of this inside | |
99 | // Env::Default() to ensure the construction order of the construction | |
100 | // order. | |
101 | static port::Mutex* Mutex(); | |
102 | ||
103 | // Returns the member mutex of the current StaticMeta. In general, | |
104 | // Mutex() should be used instead of this one. However, in case where | |
105 | // the static variable inside Instance() goes out of scope, MemberMutex() | |
106 | // should be used. One example is OnThreadExit() function. | |
107 | port::Mutex* MemberMutex() { return &mutex_; } | |
108 | ||
1e59de90 | 109 | private: |
7c673cae FG |
110 | // Get UnrefHandler for id with acquiring mutex |
111 | // REQUIRES: mutex locked | |
112 | UnrefHandler GetHandler(uint32_t id); | |
113 | ||
114 | // Triggered before a thread terminates | |
115 | static void OnThreadExit(void* ptr); | |
116 | ||
117 | // Add current thread's ThreadData to the global chain | |
118 | // REQUIRES: mutex locked | |
119 | void AddThreadData(ThreadData* d); | |
120 | ||
121 | // Remove current thread's ThreadData from the global chain | |
122 | // REQUIRES: mutex locked | |
123 | void RemoveThreadData(ThreadData* d); | |
124 | ||
125 | static ThreadData* GetThreadLocal(); | |
126 | ||
127 | uint32_t next_instance_id_; | |
128 | // Used to recycle Ids in case ThreadLocalPtr is instantiated and destroyed | |
129 | // frequently. This also prevents it from blowing up the vector space. | |
130 | autovector<uint32_t> free_instance_ids_; | |
131 | // Chain all thread local structure together. This is necessary since | |
132 | // when one ThreadLocalPtr gets destroyed, we need to loop over each | |
133 | // thread's version of pointer corresponding to that instance and | |
134 | // call UnrefHandler for it. | |
135 | ThreadData head_; | |
136 | ||
137 | std::unordered_map<uint32_t, UnrefHandler> handler_map_; | |
138 | ||
139 | // The private mutex. Developers should always use Mutex() instead of | |
140 | // using this variable directly. | |
141 | port::Mutex mutex_; | |
7c673cae | 142 | // Thread local storage |
1e59de90 | 143 | static thread_local ThreadData* tls_; |
7c673cae FG |
144 | |
145 | // Used to make thread exit trigger possible if !defined(OS_MACOSX). | |
146 | // Otherwise, used to retrieve thread data. | |
147 | pthread_key_t pthread_key_; | |
148 | }; | |
149 | ||
1e59de90 | 150 | thread_local ThreadData* ThreadLocalPtr::StaticMeta::tls_ = nullptr; |
7c673cae FG |
151 | |
152 | // Windows doesn't support a per-thread destructor with its | |
153 | // TLS primitives. So, we build it manually by inserting a | |
154 | // function to be called on each thread's exit. | |
155 | // See http://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way | |
156 | // and http://www.nynaeve.net/?p=183 | |
157 | // | |
158 | // really we do this to have clear conscience since using TLS with thread-pools | |
159 | // is iffy | |
160 | // although OK within a request. But otherwise, threads have no identity in its | |
161 | // modern use. | |
162 | ||
163 | // This runs on windows only called from the System Loader | |
164 | #ifdef OS_WIN | |
165 | ||
166 | // Windows cleanup routine is invoked from a System Loader with a different | |
167 | // signature so we can not directly hookup the original OnThreadExit which is | |
168 | // private member | |
169 | // so we make StaticMeta class share with the us the address of the function so | |
170 | // we can invoke it. | |
171 | namespace wintlscleanup { | |
172 | ||
173 | // This is set to OnThreadExit in StaticMeta singleton constructor | |
174 | UnrefHandler thread_local_inclass_routine = nullptr; | |
1e59de90 | 175 | pthread_key_t thread_local_key = pthread_key_t(-1); |
7c673cae FG |
176 | |
177 | // Static callback function to call with each thread termination. | |
178 | void NTAPI WinOnThreadExit(PVOID module, DWORD reason, PVOID reserved) { | |
179 | // We decided to punt on PROCESS_EXIT | |
180 | if (DLL_THREAD_DETACH == reason) { | |
11fdf7f2 TL |
181 | if (thread_local_key != pthread_key_t(-1) && |
182 | thread_local_inclass_routine != nullptr) { | |
183 | void* tls = TlsGetValue(thread_local_key); | |
7c673cae FG |
184 | if (tls != nullptr) { |
185 | thread_local_inclass_routine(tls); | |
186 | } | |
187 | } | |
188 | } | |
189 | } | |
190 | ||
1e59de90 | 191 | } // namespace wintlscleanup |
7c673cae FG |
192 | |
193 | // extern "C" suppresses C++ name mangling so we know the symbol name for the | |
194 | // linker /INCLUDE:symbol pragma above. | |
195 | extern "C" { | |
196 | ||
197 | #ifdef _MSC_VER | |
198 | // The linker must not discard thread_callback_on_exit. (We force a reference | |
199 | // to this variable with a linker /include:symbol pragma to ensure that.) If | |
200 | // this variable is discarded, the OnThreadExit function will never be called. | |
494da23a | 201 | #ifndef _X86_ |
7c673cae FG |
202 | |
203 | // .CRT section is merged with .rdata on x64 so it must be constant data. | |
204 | #pragma const_seg(".CRT$XLB") | |
205 | // When defining a const variable, it must have external linkage to be sure the | |
206 | // linker doesn't discard it. | |
207 | extern const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit; | |
208 | const PIMAGE_TLS_CALLBACK p_thread_callback_on_exit = | |
209 | wintlscleanup::WinOnThreadExit; | |
210 | // Reset the default section. | |
211 | #pragma const_seg() | |
212 | ||
213 | #pragma comment(linker, "/include:_tls_used") | |
214 | #pragma comment(linker, "/include:p_thread_callback_on_exit") | |
215 | ||
494da23a | 216 | #else // _X86_ |
7c673cae FG |
217 | |
218 | #pragma data_seg(".CRT$XLB") | |
219 | PIMAGE_TLS_CALLBACK p_thread_callback_on_exit = wintlscleanup::WinOnThreadExit; | |
220 | // Reset the default section. | |
221 | #pragma data_seg() | |
222 | ||
223 | #pragma comment(linker, "/INCLUDE:__tls_used") | |
224 | #pragma comment(linker, "/INCLUDE:_p_thread_callback_on_exit") | |
225 | ||
494da23a | 226 | #endif // _X86_ |
7c673cae FG |
227 | |
228 | #else | |
229 | // https://github.com/couchbase/gperftools/blob/master/src/windows/port.cc | |
230 | BOOL WINAPI DllMain(HINSTANCE h, DWORD dwReason, PVOID pv) { | |
231 | if (dwReason == DLL_THREAD_DETACH) | |
232 | wintlscleanup::WinOnThreadExit(h, dwReason, pv); | |
233 | return TRUE; | |
234 | } | |
235 | #endif | |
236 | } // extern "C" | |
237 | ||
238 | #endif // OS_WIN | |
239 | ||
240 | void ThreadLocalPtr::InitSingletons() { ThreadLocalPtr::Instance(); } | |
241 | ||
242 | ThreadLocalPtr::StaticMeta* ThreadLocalPtr::Instance() { | |
243 | // Here we prefer function static variable instead of global | |
244 | // static variable as function static variable is initialized | |
245 | // when the function is first call. As a result, we can properly | |
246 | // control their construction order by properly preparing their | |
247 | // first function call. | |
248 | // | |
249 | // Note that here we decide to make "inst" a static pointer w/o deleting | |
250 | // it at the end instead of a static variable. This is to avoid the following | |
251 | // destruction order disaster happens when a child thread using ThreadLocalPtr | |
252 | // dies AFTER the main thread dies: When a child thread happens to use | |
253 | // ThreadLocalPtr, it will try to delete its thread-local data on its | |
254 | // OnThreadExit when the child thread dies. However, OnThreadExit depends | |
255 | // on the following variable. As a result, if the main thread dies before any | |
256 | // child thread happen to use ThreadLocalPtr dies, then the destruction of | |
257 | // the following variable will go first, then OnThreadExit, therefore causing | |
258 | // invalid access. | |
259 | // | |
1e59de90 TL |
260 | // The above problem can be solved by using thread_local to store tls_. |
261 | // thread_local supports dynamic construction and destruction of | |
7c673cae FG |
262 | // non-primitive typed variables. As a result, we can guarantee the |
263 | // destruction order even when the main thread dies before any child threads. | |
7c673cae FG |
264 | static ThreadLocalPtr::StaticMeta* inst = new ThreadLocalPtr::StaticMeta(); |
265 | return inst; | |
266 | } | |
267 | ||
268 | port::Mutex* ThreadLocalPtr::StaticMeta::Mutex() { return &Instance()->mutex_; } | |
269 | ||
270 | void ThreadLocalPtr::StaticMeta::OnThreadExit(void* ptr) { | |
271 | auto* tls = static_cast<ThreadData*>(ptr); | |
272 | assert(tls != nullptr); | |
273 | ||
274 | // Use the cached StaticMeta::Instance() instead of directly calling | |
275 | // the variable inside StaticMeta::Instance() might already go out of | |
276 | // scope here in case this OnThreadExit is called after the main thread | |
277 | // dies. | |
278 | auto* inst = tls->inst; | |
279 | pthread_setspecific(inst->pthread_key_, nullptr); | |
280 | ||
281 | MutexLock l(inst->MemberMutex()); | |
282 | inst->RemoveThreadData(tls); | |
283 | // Unref stored pointers of current thread from all instances | |
284 | uint32_t id = 0; | |
285 | for (auto& e : tls->entries) { | |
286 | void* raw = e.ptr.load(); | |
287 | if (raw != nullptr) { | |
288 | auto unref = inst->GetHandler(id); | |
289 | if (unref != nullptr) { | |
290 | unref(raw); | |
291 | } | |
292 | } | |
293 | ++id; | |
294 | } | |
295 | // Delete thread local structure no matter if it is Mac platform | |
296 | delete tls; | |
297 | } | |
298 | ||
11fdf7f2 | 299 | ThreadLocalPtr::StaticMeta::StaticMeta() |
1e59de90 | 300 | : next_instance_id_(0), head_(this), pthread_key_(0) { |
7c673cae FG |
301 | if (pthread_key_create(&pthread_key_, &OnThreadExit) != 0) { |
302 | abort(); | |
303 | } | |
304 | ||
305 | // OnThreadExit is not getting called on the main thread. | |
306 | // Call through the static destructor mechanism to avoid memory leak. | |
307 | // | |
308 | // Caveats: ~A() will be invoked _after_ ~StaticMeta for the global | |
309 | // singleton (destructors are invoked in reverse order of constructor | |
310 | // _completion_); the latter must not mutate internal members. This | |
311 | // cleanup mechanism inherently relies on use-after-release of the | |
312 | // StaticMeta, and is brittle with respect to compiler-specific handling | |
313 | // of memory backing destructed statically-scoped objects. Perhaps | |
314 | // registering with atexit(3) would be more robust. | |
315 | // | |
316 | // This is not required on Windows. | |
317 | #if !defined(OS_WIN) | |
318 | static struct A { | |
319 | ~A() { | |
7c673cae FG |
320 | if (tls_) { |
321 | OnThreadExit(tls_); | |
322 | } | |
323 | } | |
324 | } a; | |
325 | #endif // !defined(OS_WIN) | |
326 | ||
327 | head_.next = &head_; | |
328 | head_.prev = &head_; | |
329 | ||
330 | #ifdef OS_WIN | |
331 | // Share with Windows its cleanup routine and the key | |
332 | wintlscleanup::thread_local_inclass_routine = OnThreadExit; | |
333 | wintlscleanup::thread_local_key = pthread_key_; | |
334 | #endif | |
335 | } | |
336 | ||
337 | void ThreadLocalPtr::StaticMeta::AddThreadData(ThreadData* d) { | |
338 | Mutex()->AssertHeld(); | |
339 | d->next = &head_; | |
340 | d->prev = head_.prev; | |
341 | head_.prev->next = d; | |
342 | head_.prev = d; | |
343 | } | |
344 | ||
1e59de90 | 345 | void ThreadLocalPtr::StaticMeta::RemoveThreadData(ThreadData* d) { |
7c673cae FG |
346 | Mutex()->AssertHeld(); |
347 | d->next->prev = d->prev; | |
348 | d->prev->next = d->next; | |
349 | d->next = d->prev = d; | |
350 | } | |
351 | ||
352 | ThreadData* ThreadLocalPtr::StaticMeta::GetThreadLocal() { | |
7c673cae FG |
353 | if (UNLIKELY(tls_ == nullptr)) { |
354 | auto* inst = Instance(); | |
355 | tls_ = new ThreadData(inst); | |
356 | { | |
357 | // Register it in the global chain, needs to be done before thread exit | |
358 | // handler registration | |
359 | MutexLock l(Mutex()); | |
360 | inst->AddThreadData(tls_); | |
361 | } | |
362 | // Even it is not OS_MACOSX, need to register value for pthread_key_ so that | |
363 | // its exit handler will be triggered. | |
364 | if (pthread_setspecific(inst->pthread_key_, tls_) != 0) { | |
365 | { | |
366 | MutexLock l(Mutex()); | |
367 | inst->RemoveThreadData(tls_); | |
368 | } | |
369 | delete tls_; | |
370 | abort(); | |
371 | } | |
372 | } | |
373 | return tls_; | |
374 | } | |
375 | ||
376 | void* ThreadLocalPtr::StaticMeta::Get(uint32_t id) const { | |
377 | auto* tls = GetThreadLocal(); | |
378 | if (UNLIKELY(id >= tls->entries.size())) { | |
379 | return nullptr; | |
380 | } | |
381 | return tls->entries[id].ptr.load(std::memory_order_acquire); | |
382 | } | |
383 | ||
384 | void ThreadLocalPtr::StaticMeta::Reset(uint32_t id, void* ptr) { | |
385 | auto* tls = GetThreadLocal(); | |
386 | if (UNLIKELY(id >= tls->entries.size())) { | |
387 | // Need mutex to protect entries access within ReclaimId | |
388 | MutexLock l(Mutex()); | |
389 | tls->entries.resize(id + 1); | |
390 | } | |
391 | tls->entries[id].ptr.store(ptr, std::memory_order_release); | |
392 | } | |
393 | ||
394 | void* ThreadLocalPtr::StaticMeta::Swap(uint32_t id, void* ptr) { | |
395 | auto* tls = GetThreadLocal(); | |
396 | if (UNLIKELY(id >= tls->entries.size())) { | |
397 | // Need mutex to protect entries access within ReclaimId | |
398 | MutexLock l(Mutex()); | |
399 | tls->entries.resize(id + 1); | |
400 | } | |
401 | return tls->entries[id].ptr.exchange(ptr, std::memory_order_acquire); | |
402 | } | |
403 | ||
404 | bool ThreadLocalPtr::StaticMeta::CompareAndSwap(uint32_t id, void* ptr, | |
1e59de90 | 405 | void*& expected) { |
7c673cae FG |
406 | auto* tls = GetThreadLocal(); |
407 | if (UNLIKELY(id >= tls->entries.size())) { | |
408 | // Need mutex to protect entries access within ReclaimId | |
409 | MutexLock l(Mutex()); | |
410 | tls->entries.resize(id + 1); | |
411 | } | |
412 | return tls->entries[id].ptr.compare_exchange_strong( | |
413 | expected, ptr, std::memory_order_release, std::memory_order_relaxed); | |
414 | } | |
415 | ||
416 | void ThreadLocalPtr::StaticMeta::Scrape(uint32_t id, autovector<void*>* ptrs, | |
1e59de90 | 417 | void* const replacement) { |
7c673cae FG |
418 | MutexLock l(Mutex()); |
419 | for (ThreadData* t = head_.next; t != &head_; t = t->next) { | |
420 | if (id < t->entries.size()) { | |
421 | void* ptr = | |
422 | t->entries[id].ptr.exchange(replacement, std::memory_order_acquire); | |
423 | if (ptr != nullptr) { | |
424 | ptrs->push_back(ptr); | |
425 | } | |
426 | } | |
427 | } | |
428 | } | |
429 | ||
430 | void ThreadLocalPtr::StaticMeta::Fold(uint32_t id, FoldFunc func, void* res) { | |
431 | MutexLock l(Mutex()); | |
432 | for (ThreadData* t = head_.next; t != &head_; t = t->next) { | |
433 | if (id < t->entries.size()) { | |
434 | void* ptr = t->entries[id].ptr.load(); | |
435 | if (ptr != nullptr) { | |
436 | func(ptr, res); | |
437 | } | |
438 | } | |
439 | } | |
440 | } | |
441 | ||
1e59de90 | 442 | uint32_t ThreadLocalPtr::TEST_PeekId() { return Instance()->PeekId(); } |
7c673cae FG |
443 | |
444 | void ThreadLocalPtr::StaticMeta::SetHandler(uint32_t id, UnrefHandler handler) { | |
445 | MutexLock l(Mutex()); | |
446 | handler_map_[id] = handler; | |
447 | } | |
448 | ||
449 | UnrefHandler ThreadLocalPtr::StaticMeta::GetHandler(uint32_t id) { | |
450 | Mutex()->AssertHeld(); | |
451 | auto iter = handler_map_.find(id); | |
452 | if (iter == handler_map_.end()) { | |
453 | return nullptr; | |
454 | } | |
455 | return iter->second; | |
456 | } | |
457 | ||
458 | uint32_t ThreadLocalPtr::StaticMeta::GetId() { | |
459 | MutexLock l(Mutex()); | |
460 | if (free_instance_ids_.empty()) { | |
461 | return next_instance_id_++; | |
462 | } | |
463 | ||
464 | uint32_t id = free_instance_ids_.back(); | |
465 | free_instance_ids_.pop_back(); | |
466 | return id; | |
467 | } | |
468 | ||
469 | uint32_t ThreadLocalPtr::StaticMeta::PeekId() const { | |
470 | MutexLock l(Mutex()); | |
471 | if (!free_instance_ids_.empty()) { | |
472 | return free_instance_ids_.back(); | |
473 | } | |
474 | return next_instance_id_; | |
475 | } | |
476 | ||
477 | void ThreadLocalPtr::StaticMeta::ReclaimId(uint32_t id) { | |
478 | // This id is not used, go through all thread local data and release | |
479 | // corresponding value | |
480 | MutexLock l(Mutex()); | |
481 | auto unref = GetHandler(id); | |
482 | for (ThreadData* t = head_.next; t != &head_; t = t->next) { | |
483 | if (id < t->entries.size()) { | |
484 | void* ptr = t->entries[id].ptr.exchange(nullptr); | |
485 | if (ptr != nullptr && unref != nullptr) { | |
486 | unref(ptr); | |
487 | } | |
488 | } | |
489 | } | |
490 | handler_map_[id] = nullptr; | |
491 | free_instance_ids_.push_back(id); | |
492 | } | |
493 | ||
494 | ThreadLocalPtr::ThreadLocalPtr(UnrefHandler handler) | |
495 | : id_(Instance()->GetId()) { | |
496 | if (handler != nullptr) { | |
497 | Instance()->SetHandler(id_, handler); | |
498 | } | |
499 | } | |
500 | ||
1e59de90 | 501 | ThreadLocalPtr::~ThreadLocalPtr() { Instance()->ReclaimId(id_); } |
7c673cae | 502 | |
1e59de90 | 503 | void* ThreadLocalPtr::Get() const { return Instance()->Get(id_); } |
7c673cae | 504 | |
1e59de90 | 505 | void ThreadLocalPtr::Reset(void* ptr) { Instance()->Reset(id_, ptr); } |
7c673cae | 506 | |
1e59de90 | 507 | void* ThreadLocalPtr::Swap(void* ptr) { return Instance()->Swap(id_, ptr); } |
7c673cae FG |
508 | |
509 | bool ThreadLocalPtr::CompareAndSwap(void* ptr, void*& expected) { | |
510 | return Instance()->CompareAndSwap(id_, ptr, expected); | |
511 | } | |
512 | ||
513 | void ThreadLocalPtr::Scrape(autovector<void*>* ptrs, void* const replacement) { | |
514 | Instance()->Scrape(id_, ptrs, replacement); | |
515 | } | |
516 | ||
517 | void ThreadLocalPtr::Fold(FoldFunc func, void* res) { | |
518 | Instance()->Fold(id_, func, res); | |
519 | } | |
520 | ||
f67539c2 | 521 | } // namespace ROCKSDB_NAMESPACE |