1 use std
::cell
::UnsafeCell
;
2 use std
::collections
::HashMap
;
4 use std
::marker
::PhantomData
;
6 use std
::ops
::{Deref, DerefMut}
;
7 use std
::panic
::{RefUnwindSafe, UnwindSafe}
;
8 use std
::sync
::{LockResult, PoisonError, TryLockError, TryLockResult}
;
9 use std
::sync
::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}
;
10 use std
::thread
::{self, ThreadId}
;
12 use crate::sync
::once_lock
::OnceLock
;
13 use crate::CachePadded
;
15 /// The number of shards per sharded lock. Must be a power of two.
16 const NUM_SHARDS
: usize = 8;
18 /// A shard containing a single reader-writer lock.
20 /// The inner reader-writer lock.
23 /// The write-guard keeping this shard locked.
25 /// Write operations will lock each shard and store the guard here. These guards get dropped at
26 /// the same time the big guard is dropped.
27 write_guard
: UnsafeCell
<Option
<RwLockWriteGuard
<'
static, ()>>>,
30 /// A sharded reader-writer lock.
32 /// This lock is equivalent to [`RwLock`], except read operations are faster and write operations
35 /// A `ShardedLock` is internally made of a list of *shards*, each being a [`RwLock`] occupying a
36 /// single cache line. Read operations will pick one of the shards depending on the current thread
37 /// and lock it. Write operations need to lock all shards in succession.
39 /// By splitting the lock into shards, concurrent read operations will in most cases choose
40 /// different shards and thus update different cache lines, which is good for scalability. However,
41 /// write operations need to do more work and are therefore slower than usual.
43 /// The priority policy of the lock is dependent on the underlying operating system's
44 /// implementation, and this type does not guarantee that any particular policy will be used.
48 /// A `ShardedLock`, like [`RwLock`], will become poisoned on a panic. Note that it may only be
49 /// poisoned if a panic occurs while a write operation is in progress. If a panic occurs in any
50 /// read operation, the lock will not be poisoned.
55 /// use crossbeam_utils::sync::ShardedLock;
57 /// let lock = ShardedLock::new(5);
59 /// // Any number of read locks can be held at once.
61 /// let r1 = lock.read().unwrap();
62 /// let r2 = lock.read().unwrap();
63 /// assert_eq!(*r1, 5);
64 /// assert_eq!(*r2, 5);
65 /// } // Read locks are dropped at this point.
67 /// // However, only one write lock may be held.
69 /// let mut w = lock.write().unwrap();
71 /// assert_eq!(*w, 6);
72 /// } // Write lock is dropped here.
75 /// [`RwLock`]: std::sync::RwLock
76 pub struct ShardedLock
<T
: ?Sized
> {
77 /// A list of locks protecting the internal data.
78 shards
: Box
<[CachePadded
<Shard
>]>,
80 /// The internal data.
84 unsafe impl<T
: ?Sized
+ Send
> Send
for ShardedLock
<T
> {}
85 unsafe impl<T
: ?Sized
+ Send
+ Sync
> Sync
for ShardedLock
<T
> {}
87 impl<T
: ?Sized
> UnwindSafe
for ShardedLock
<T
> {}
88 impl<T
: ?Sized
> RefUnwindSafe
for ShardedLock
<T
> {}
90 impl<T
> ShardedLock
<T
> {
91 /// Creates a new sharded reader-writer lock.
96 /// use crossbeam_utils::sync::ShardedLock;
98 /// let lock = ShardedLock::new(5);
100 pub fn new(value
: T
) -> ShardedLock
<T
> {
102 shards
: (0..NUM_SHARDS
)
104 CachePadded
::new(Shard
{
105 lock
: RwLock
::new(()),
106 write_guard
: UnsafeCell
::new(None
),
109 .collect
::<Box
<[_
]>>(),
110 value
: UnsafeCell
::new(value
),
114 /// Consumes this lock, returning the underlying data.
118 /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
119 /// operation panics.
124 /// use crossbeam_utils::sync::ShardedLock;
126 /// let lock = ShardedLock::new(String::new());
128 /// let mut s = lock.write().unwrap();
129 /// *s = "modified".to_owned();
131 /// assert_eq!(lock.into_inner().unwrap(), "modified");
133 pub fn into_inner(self) -> LockResult
<T
> {
134 let is_poisoned
= self.is_poisoned();
135 let inner
= self.value
.into_inner();
138 Err(PoisonError
::new(inner
))
145 impl<T
: ?Sized
> ShardedLock
<T
> {
146 /// Returns `true` if the lock is poisoned.
148 /// If another thread can still access the lock, it may become poisoned at any time. A `false`
149 /// result should not be trusted without additional synchronization.
154 /// use crossbeam_utils::sync::ShardedLock;
155 /// use std::sync::Arc;
158 /// let lock = Arc::new(ShardedLock::new(0));
159 /// let c_lock = lock.clone();
161 /// let _ = thread::spawn(move || {
162 /// let _lock = c_lock.write().unwrap();
163 /// panic!(); // the lock gets poisoned
165 /// assert_eq!(lock.is_poisoned(), true);
167 pub fn is_poisoned(&self) -> bool
{
168 self.shards
[0].lock
.is_poisoned()
171 /// Returns a mutable reference to the underlying data.
173 /// Since this call borrows the lock mutably, no actual locking needs to take place.
177 /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
178 /// operation panics.
183 /// use crossbeam_utils::sync::ShardedLock;
185 /// let mut lock = ShardedLock::new(0);
186 /// *lock.get_mut().unwrap() = 10;
187 /// assert_eq!(*lock.read().unwrap(), 10);
189 pub fn get_mut(&mut self) -> LockResult
<&mut T
> {
190 let is_poisoned
= self.is_poisoned();
191 let inner
= unsafe { &mut *self.value.get() }
;
194 Err(PoisonError
::new(inner
))
200 /// Attempts to acquire this lock with shared read access.
202 /// If the access could not be granted at this time, an error is returned. Otherwise, a guard
203 /// is returned which will release the shared access when it is dropped. This method does not
204 /// provide any guarantees with respect to the ordering of whether contentious readers or
205 /// writers will acquire the lock first.
209 /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
210 /// operation panics.
215 /// use crossbeam_utils::sync::ShardedLock;
217 /// let lock = ShardedLock::new(1);
219 /// match lock.try_read() {
220 /// Ok(n) => assert_eq!(*n, 1),
221 /// Err(_) => unreachable!(),
224 pub fn try_read(&self) -> TryLockResult
<ShardedLockReadGuard
<'_
, T
>> {
225 // Take the current thread index and map it to a shard index. Thread indices will tend to
226 // distribute shards among threads equally, thus reducing contention due to read-locking.
227 let current_index
= current_index().unwrap_or(0);
228 let shard_index
= current_index
& (self.shards
.len() - 1);
230 match self.shards
[shard_index
].lock
.try_read() {
231 Ok(guard
) => Ok(ShardedLockReadGuard
{
234 _marker
: PhantomData
,
236 Err(TryLockError
::Poisoned(err
)) => {
237 let guard
= ShardedLockReadGuard
{
239 _guard
: err
.into_inner(),
240 _marker
: PhantomData
,
242 Err(TryLockError
::Poisoned(PoisonError
::new(guard
)))
244 Err(TryLockError
::WouldBlock
) => Err(TryLockError
::WouldBlock
),
248 /// Locks with shared read access, blocking the current thread until it can be acquired.
250 /// The calling thread will be blocked until there are no more writers which hold the lock.
251 /// There may be other readers currently inside the lock when this method returns. This method
252 /// does not provide any guarantees with respect to the ordering of whether contentious readers
253 /// or writers will acquire the lock first.
255 /// Returns a guard which will release the shared access when dropped.
259 /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
260 /// operation panics.
264 /// This method might panic when called if the lock is already held by the current thread.
269 /// use crossbeam_utils::sync::ShardedLock;
270 /// use std::sync::Arc;
273 /// let lock = Arc::new(ShardedLock::new(1));
274 /// let c_lock = lock.clone();
276 /// let n = lock.read().unwrap();
277 /// assert_eq!(*n, 1);
279 /// thread::spawn(move || {
280 /// let r = c_lock.read();
281 /// assert!(r.is_ok());
282 /// }).join().unwrap();
284 pub fn read(&self) -> LockResult
<ShardedLockReadGuard
<'_
, T
>> {
285 // Take the current thread index and map it to a shard index. Thread indices will tend to
286 // distribute shards among threads equally, thus reducing contention due to read-locking.
287 let current_index
= current_index().unwrap_or(0);
288 let shard_index
= current_index
& (self.shards
.len() - 1);
290 match self.shards
[shard_index
].lock
.read() {
291 Ok(guard
) => Ok(ShardedLockReadGuard
{
294 _marker
: PhantomData
,
296 Err(err
) => Err(PoisonError
::new(ShardedLockReadGuard
{
298 _guard
: err
.into_inner(),
299 _marker
: PhantomData
,
304 /// Attempts to acquire this lock with exclusive write access.
306 /// If the access could not be granted at this time, an error is returned. Otherwise, a guard
307 /// is returned which will release the exclusive access when it is dropped. This method does
308 /// not provide any guarantees with respect to the ordering of whether contentious readers or
309 /// writers will acquire the lock first.
313 /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
314 /// operation panics.
319 /// use crossbeam_utils::sync::ShardedLock;
321 /// let lock = ShardedLock::new(1);
323 /// let n = lock.read().unwrap();
324 /// assert_eq!(*n, 1);
326 /// assert!(lock.try_write().is_err());
328 pub fn try_write(&self) -> TryLockResult
<ShardedLockWriteGuard
<'_
, T
>> {
329 let mut poisoned
= false;
330 let mut blocked
= None
;
332 // Write-lock each shard in succession.
333 for (i
, shard
) in self.shards
.iter().enumerate() {
334 let guard
= match shard
.lock
.try_write() {
336 Err(TryLockError
::Poisoned(err
)) => {
340 Err(TryLockError
::WouldBlock
) => {
346 // Store the guard into the shard.
348 let guard
: RwLockWriteGuard
<'
static, ()> = mem
::transmute(guard
);
349 let dest
: *mut _
= shard
.write_guard
.get();
354 if let Some(i
) = blocked
{
355 // Unlock the shards in reverse order of locking.
356 for shard
in self.shards
[0..i
].iter().rev() {
358 let dest
: *mut _
= shard
.write_guard
.get();
359 let guard
= (*dest
).take();
363 Err(TryLockError
::WouldBlock
)
365 let guard
= ShardedLockWriteGuard
{
367 _marker
: PhantomData
,
369 Err(TryLockError
::Poisoned(PoisonError
::new(guard
)))
371 Ok(ShardedLockWriteGuard
{
373 _marker
: PhantomData
,
378 /// Locks with exclusive write access, blocking the current thread until it can be acquired.
380 /// The calling thread will be blocked until there are no more writers which hold the lock.
381 /// There may be other readers currently inside the lock when this method returns. This method
382 /// does not provide any guarantees with respect to the ordering of whether contentious readers
383 /// or writers will acquire the lock first.
385 /// Returns a guard which will release the exclusive access when dropped.
389 /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
390 /// operation panics.
394 /// This method might panic when called if the lock is already held by the current thread.
399 /// use crossbeam_utils::sync::ShardedLock;
401 /// let lock = ShardedLock::new(1);
403 /// let mut n = lock.write().unwrap();
406 /// assert!(lock.try_read().is_err());
408 pub fn write(&self) -> LockResult
<ShardedLockWriteGuard
<'_
, T
>> {
409 let mut poisoned
= false;
411 // Write-lock each shard in succession.
412 for shard
in self.shards
.iter() {
413 let guard
= match shard
.lock
.write() {
421 // Store the guard into the shard.
423 let guard
: RwLockWriteGuard
<'_
, ()> = guard
;
424 let guard
: RwLockWriteGuard
<'
static, ()> = mem
::transmute(guard
);
425 let dest
: *mut _
= shard
.write_guard
.get();
431 Err(PoisonError
::new(ShardedLockWriteGuard
{
433 _marker
: PhantomData
,
436 Ok(ShardedLockWriteGuard
{
438 _marker
: PhantomData
,
444 impl<T
: ?Sized
+ fmt
::Debug
> fmt
::Debug
for ShardedLock
<T
> {
445 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
446 match self.try_read() {
448 .debug_struct("ShardedLock")
449 .field("data", &&*guard
)
451 Err(TryLockError
::Poisoned(err
)) => f
452 .debug_struct("ShardedLock")
453 .field("data", &&**err
.get_ref())
455 Err(TryLockError
::WouldBlock
) => {
456 struct LockedPlaceholder
;
457 impl fmt
::Debug
for LockedPlaceholder
{
458 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
459 f
.write_str("<locked>")
462 f
.debug_struct("ShardedLock")
463 .field("data", &LockedPlaceholder
)
470 impl<T
: Default
> Default
for ShardedLock
<T
> {
471 fn default() -> ShardedLock
<T
> {
472 ShardedLock
::new(Default
::default())
476 impl<T
> From
<T
> for ShardedLock
<T
> {
477 fn from(t
: T
) -> Self {
482 /// A guard used to release the shared read access of a [`ShardedLock`] when dropped.
483 #[clippy::has_significant_drop]
484 pub struct ShardedLockReadGuard
<'a
, T
: ?Sized
> {
485 lock
: &'a ShardedLock
<T
>,
486 _guard
: RwLockReadGuard
<'a
, ()>,
487 _marker
: PhantomData
<RwLockReadGuard
<'a
, T
>>,
490 unsafe impl<T
: ?Sized
+ Sync
> Sync
for ShardedLockReadGuard
<'_
, T
> {}
492 impl<T
: ?Sized
> Deref
for ShardedLockReadGuard
<'_
, T
> {
495 fn deref(&self) -> &T
{
496 unsafe { &*self.lock.value.get() }
500 impl<T
: fmt
::Debug
> fmt
::Debug
for ShardedLockReadGuard
<'_
, T
> {
501 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
502 f
.debug_struct("ShardedLockReadGuard")
503 .field("lock", &self.lock
)
508 impl<T
: ?Sized
+ fmt
::Display
> fmt
::Display
for ShardedLockReadGuard
<'_
, T
> {
509 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
514 /// A guard used to release the exclusive write access of a [`ShardedLock`] when dropped.
515 #[clippy::has_significant_drop]
516 pub struct ShardedLockWriteGuard
<'a
, T
: ?Sized
> {
517 lock
: &'a ShardedLock
<T
>,
518 _marker
: PhantomData
<RwLockWriteGuard
<'a
, T
>>,
521 unsafe impl<T
: ?Sized
+ Sync
> Sync
for ShardedLockWriteGuard
<'_
, T
> {}
523 impl<T
: ?Sized
> Drop
for ShardedLockWriteGuard
<'_
, T
> {
525 // Unlock the shards in reverse order of locking.
526 for shard
in self.lock
.shards
.iter().rev() {
528 let dest
: *mut _
= shard
.write_guard
.get();
529 let guard
= (*dest
).take();
536 impl<T
: fmt
::Debug
> fmt
::Debug
for ShardedLockWriteGuard
<'_
, T
> {
537 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
538 f
.debug_struct("ShardedLockWriteGuard")
539 .field("lock", &self.lock
)
544 impl<T
: ?Sized
+ fmt
::Display
> fmt
::Display
for ShardedLockWriteGuard
<'_
, T
> {
545 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
550 impl<T
: ?Sized
> Deref
for ShardedLockWriteGuard
<'_
, T
> {
553 fn deref(&self) -> &T
{
554 unsafe { &*self.lock.value.get() }
558 impl<T
: ?Sized
> DerefMut
for ShardedLockWriteGuard
<'_
, T
> {
559 fn deref_mut(&mut self) -> &mut T
{
560 unsafe { &mut *self.lock.value.get() }
564 /// Returns a `usize` that identifies the current thread.
566 /// Each thread is associated with an 'index'. While there are no particular guarantees, indices
567 /// usually tend to be consecutive numbers between 0 and the number of running threads.
569 /// Since this function accesses TLS, `None` might be returned if the current thread's TLS is
572 fn current_index() -> Option
<usize> {
573 REGISTRATION
.try_with(|reg
| reg
.index
).ok()
576 /// The global registry keeping track of registered threads and indices.
577 struct ThreadIndices
{
578 /// Mapping from `ThreadId` to thread index.
579 mapping
: HashMap
<ThreadId
, usize>,
581 /// A list of free indices.
582 free_list
: Vec
<usize>,
584 /// The next index to allocate if the free list is empty.
588 fn thread_indices() -> &'
static Mutex
<ThreadIndices
> {
589 static THREAD_INDICES
: OnceLock
<Mutex
<ThreadIndices
>> = OnceLock
::new();
590 fn init() -> Mutex
<ThreadIndices
> {
591 Mutex
::new(ThreadIndices
{
592 mapping
: HashMap
::new(),
593 free_list
: Vec
::new(),
597 THREAD_INDICES
.get_or_init(init
)
600 /// A registration of a thread with an index.
602 /// When dropped, unregisters the thread and frees the reserved index.
603 struct Registration
{
608 impl Drop
for Registration
{
610 let mut indices
= thread_indices().lock().unwrap();
611 indices
.mapping
.remove(&self.thread_id
);
612 indices
.free_list
.push(self.index
);
617 static REGISTRATION
: Registration
= {
618 let thread_id
= thread
::current().id();
619 let mut indices
= thread_indices().lock().unwrap();
621 let index
= match indices
.free_list
.pop() {
624 let i
= indices
.next_index
;
625 indices
.next_index
+= 1;
629 indices
.mapping
.insert(thread_id
, index
);