]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
85aaf69f | 11 | #![stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
12 | |
13 | //! Threadsafe reference-counted boxes (the `Arc<T>` type). | |
14 | //! | |
85aaf69f SL |
15 | //! The `Arc<T>` type provides shared ownership of an immutable value. |
16 | //! Destruction is deterministic, and will occur as soon as the last owner is | |
17 | //! gone. It is marked as `Send` because it uses atomic reference counting. | |
1a4d82fc | 18 | //! |
85aaf69f SL |
19 | //! If you do not need thread-safety, and just need shared ownership, consider |
20 | //! the [`Rc<T>` type](../rc/struct.Rc.html). It is the same as `Arc<T>`, but | |
21 | //! does not use atomics, making it both thread-unsafe as well as significantly | |
22 | //! faster when updating the reference count. | |
1a4d82fc | 23 | //! |
85aaf69f SL |
24 | //! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer |
25 | //! to the box. A `Weak<T>` pointer can be upgraded to an `Arc<T>` pointer, but | |
26 | //! will return `None` if the value has already been dropped. | |
1a4d82fc | 27 | //! |
85aaf69f SL |
28 | //! For example, a tree with parent pointers can be represented by putting the |
29 | //! nodes behind strong `Arc<T>` pointers, and then storing the parent pointers | |
30 | //! as `Weak<T>` pointers. | |
1a4d82fc JJ |
31 | //! |
32 | //! # Examples | |
33 | //! | |
bd371182 | 34 | //! Sharing some immutable data between threads: |
1a4d82fc | 35 | //! |
c34b1796 | 36 | //! ```no_run |
1a4d82fc | 37 | //! use std::sync::Arc; |
85aaf69f | 38 | //! use std::thread; |
1a4d82fc | 39 | //! |
85aaf69f | 40 | //! let five = Arc::new(5); |
1a4d82fc | 41 | //! |
85aaf69f | 42 | //! for _ in 0..10 { |
1a4d82fc JJ |
43 | //! let five = five.clone(); |
44 | //! | |
85aaf69f | 45 | //! thread::spawn(move || { |
1a4d82fc JJ |
46 | //! println!("{:?}", five); |
47 | //! }); | |
48 | //! } | |
49 | //! ``` | |
50 | //! | |
bd371182 | 51 | //! Sharing mutable data safely between threads with a `Mutex`: |
1a4d82fc | 52 | //! |
c34b1796 | 53 | //! ```no_run |
1a4d82fc | 54 | //! use std::sync::{Arc, Mutex}; |
85aaf69f | 55 | //! use std::thread; |
1a4d82fc | 56 | //! |
85aaf69f | 57 | //! let five = Arc::new(Mutex::new(5)); |
1a4d82fc | 58 | //! |
85aaf69f | 59 | //! for _ in 0..10 { |
1a4d82fc JJ |
60 | //! let five = five.clone(); |
61 | //! | |
85aaf69f | 62 | //! thread::spawn(move || { |
1a4d82fc JJ |
63 | //! let mut number = five.lock().unwrap(); |
64 | //! | |
65 | //! *number += 1; | |
66 | //! | |
67 | //! println!("{}", *number); // prints 6 | |
68 | //! }); | |
69 | //! } | |
70 | //! ``` | |
71 | ||
c34b1796 AL |
72 | use boxed::Box; |
73 | ||
e9174d1e SL |
74 | use core::sync::atomic; |
75 | use core::sync::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; | |
76 | use core::borrow; | |
85aaf69f | 77 | use core::fmt; |
c34b1796 | 78 | use core::cmp::Ordering; |
62682a34 | 79 | use core::mem::{align_of_val, size_of_val}; |
92a42be0 | 80 | use core::intrinsics::abort; |
1a4d82fc | 81 | use core::mem; |
92a42be0 SL |
82 | use core::ops::Deref; |
83 | #[cfg(not(stage0))] | |
84 | use core::ops::CoerceUnsized; | |
b039eaaf | 85 | use core::ptr::{self, Shared}; |
92a42be0 | 86 | #[cfg(not(stage0))] |
62682a34 | 87 | use core::marker::Unsize; |
1a4d82fc | 88 | use core::hash::{Hash, Hasher}; |
c1a9b12d | 89 | use core::{usize, isize}; |
92a42be0 | 90 | use core::convert::From; |
1a4d82fc JJ |
91 | use heap::deallocate; |
92 | ||
c1a9b12d SL |
93 | const MAX_REFCOUNT: usize = (isize::MAX) as usize; |
94 | ||
1a4d82fc JJ |
95 | /// An atomically reference counted wrapper for shared state. |
96 | /// | |
c34b1796 | 97 | /// # Examples |
1a4d82fc | 98 | /// |
b039eaaf | 99 | /// In this example, a large vector is shared between several threads. |
85aaf69f | 100 | /// With simple pipes, without `Arc`, a copy would have to be made for each |
bd371182 | 101 | /// thread. |
1a4d82fc | 102 | /// |
c34b1796 AL |
103 | /// When you clone an `Arc<T>`, it will create another pointer to the data and |
104 | /// increase the reference counter. | |
105 | /// | |
106 | /// ``` | |
1a4d82fc | 107 | /// use std::sync::Arc; |
85aaf69f | 108 | /// use std::thread; |
1a4d82fc JJ |
109 | /// |
110 | /// fn main() { | |
9346a6ac | 111 | /// let numbers: Vec<_> = (0..100u32).collect(); |
1a4d82fc JJ |
112 | /// let shared_numbers = Arc::new(numbers); |
113 | /// | |
85aaf69f | 114 | /// for _ in 0..10 { |
1a4d82fc JJ |
115 | /// let child_numbers = shared_numbers.clone(); |
116 | /// | |
85aaf69f | 117 | /// thread::spawn(move || { |
c34b1796 | 118 | /// let local_numbers = &child_numbers[..]; |
1a4d82fc JJ |
119 | /// |
120 | /// // Work with the local numbers | |
121 | /// }); | |
122 | /// } | |
123 | /// } | |
124 | /// ``` | |
125 | #[unsafe_no_drop_flag] | |
85aaf69f | 126 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 127 | pub struct Arc<T: ?Sized> { |
1a4d82fc JJ |
128 | // FIXME #12808: strange name to try to avoid interfering with |
129 | // field accesses of the contained type via Deref | |
b039eaaf | 130 | _ptr: Shared<ArcInner<T>>, |
1a4d82fc JJ |
131 | } |
132 | ||
92a42be0 SL |
133 | #[stable(feature = "rust1", since = "1.0.0")] |
134 | unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {} | |
135 | #[stable(feature = "rust1", since = "1.0.0")] | |
136 | unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {} | |
1a4d82fc | 137 | |
92a42be0 SL |
138 | // remove cfg after new snapshot |
139 | #[cfg(not(stage0))] | |
140 | #[unstable(feature = "coerce_unsized", issue = "27732")] | |
62682a34 | 141 | impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {} |
1a4d82fc JJ |
142 | |
143 | /// A weak pointer to an `Arc`. | |
144 | /// | |
c34b1796 AL |
145 | /// Weak pointers will not keep the data inside of the `Arc` alive, and can be |
146 | /// used to break cycles between `Arc` pointers. | |
1a4d82fc | 147 | #[unsafe_no_drop_flag] |
e9174d1e | 148 | #[stable(feature = "arc_weak", since = "1.4.0")] |
62682a34 | 149 | pub struct Weak<T: ?Sized> { |
1a4d82fc JJ |
150 | // FIXME #12808: strange name to try to avoid interfering with |
151 | // field accesses of the contained type via Deref | |
b039eaaf | 152 | _ptr: Shared<ArcInner<T>>, |
1a4d82fc JJ |
153 | } |
154 | ||
92a42be0 SL |
155 | #[stable(feature = "rust1", since = "1.0.0")] |
156 | unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {} | |
157 | #[stable(feature = "rust1", since = "1.0.0")] | |
158 | unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {} | |
1a4d82fc | 159 | |
92a42be0 SL |
160 | // remove cfg after new snapshot |
161 | #[cfg(not(stage0))] | |
162 | #[unstable(feature = "coerce_unsized", issue = "27732")] | |
c1a9b12d SL |
163 | impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {} |
164 | ||
c34b1796 | 165 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 166 | impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> { |
c34b1796 AL |
167 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
168 | write!(f, "(Weak)") | |
169 | } | |
170 | } | |
171 | ||
62682a34 | 172 | struct ArcInner<T: ?Sized> { |
85aaf69f | 173 | strong: atomic::AtomicUsize, |
c1a9b12d SL |
174 | |
175 | // the value usize::MAX acts as a sentinel for temporarily "locking" the | |
176 | // ability to upgrade weak pointers or downgrade strong ones; this is used | |
e9174d1e | 177 | // to avoid races in `make_mut` and `get_mut`. |
85aaf69f | 178 | weak: atomic::AtomicUsize, |
c1a9b12d | 179 | |
1a4d82fc JJ |
180 | data: T, |
181 | } | |
182 | ||
62682a34 SL |
183 | unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {} |
184 | unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {} | |
1a4d82fc JJ |
185 | |
186 | impl<T> Arc<T> { | |
187 | /// Constructs a new `Arc<T>`. | |
188 | /// | |
189 | /// # Examples | |
190 | /// | |
191 | /// ``` | |
192 | /// use std::sync::Arc; | |
193 | /// | |
85aaf69f | 194 | /// let five = Arc::new(5); |
1a4d82fc JJ |
195 | /// ``` |
196 | #[inline] | |
85aaf69f | 197 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
198 | pub fn new(data: T) -> Arc<T> { |
199 | // Start the weak pointer count as 1 which is the weak pointer that's | |
200 | // held by all the strong pointers (kinda), see std/rc.rs for more info | |
c34b1796 | 201 | let x: Box<_> = box ArcInner { |
85aaf69f SL |
202 | strong: atomic::AtomicUsize::new(1), |
203 | weak: atomic::AtomicUsize::new(1), | |
1a4d82fc JJ |
204 | data: data, |
205 | }; | |
b039eaaf | 206 | Arc { _ptr: unsafe { Shared::new(Box::into_raw(x)) } } |
e9174d1e SL |
207 | } |
208 | ||
209 | /// Unwraps the contained value if the `Arc<T>` has only one strong reference. | |
210 | /// This will succeed even if there are outstanding weak references. | |
211 | /// | |
212 | /// Otherwise, an `Err` is returned with the same `Arc<T>`. | |
213 | /// | |
214 | /// # Examples | |
215 | /// | |
216 | /// ``` | |
217 | /// use std::sync::Arc; | |
218 | /// | |
219 | /// let x = Arc::new(3); | |
220 | /// assert_eq!(Arc::try_unwrap(x), Ok(3)); | |
221 | /// | |
222 | /// let x = Arc::new(4); | |
223 | /// let _y = x.clone(); | |
224 | /// assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4))); | |
225 | /// ``` | |
226 | #[inline] | |
227 | #[stable(feature = "arc_unique", since = "1.4.0")] | |
228 | pub fn try_unwrap(this: Self) -> Result<T, Self> { | |
229 | // See `drop` for why all these atomics are like this | |
b039eaaf | 230 | if this.inner().strong.compare_and_swap(1, 0, Release) != 1 { |
92a42be0 | 231 | return Err(this); |
b039eaaf | 232 | } |
e9174d1e SL |
233 | |
234 | atomic::fence(Acquire); | |
235 | ||
236 | unsafe { | |
237 | let ptr = *this._ptr; | |
238 | let elem = ptr::read(&(*ptr).data); | |
239 | ||
240 | // Make a weak pointer to clean up the implicit strong-weak reference | |
241 | let _weak = Weak { _ptr: this._ptr }; | |
242 | mem::forget(this); | |
243 | ||
244 | Ok(elem) | |
245 | } | |
1a4d82fc | 246 | } |
62682a34 | 247 | } |
1a4d82fc | 248 | |
62682a34 | 249 | impl<T: ?Sized> Arc<T> { |
1a4d82fc JJ |
250 | /// Downgrades the `Arc<T>` to a `Weak<T>` reference. |
251 | /// | |
252 | /// # Examples | |
253 | /// | |
254 | /// ``` | |
255 | /// use std::sync::Arc; | |
256 | /// | |
85aaf69f | 257 | /// let five = Arc::new(5); |
1a4d82fc | 258 | /// |
e9174d1e | 259 | /// let weak_five = Arc::downgrade(&five); |
1a4d82fc | 260 | /// ``` |
e9174d1e SL |
261 | #[stable(feature = "arc_weak", since = "1.4.0")] |
262 | pub fn downgrade(this: &Self) -> Weak<T> { | |
c1a9b12d SL |
263 | loop { |
264 | // This Relaxed is OK because we're checking the value in the CAS | |
265 | // below. | |
e9174d1e | 266 | let cur = this.inner().weak.load(Relaxed); |
c1a9b12d SL |
267 | |
268 | // check if the weak counter is currently "locked"; if so, spin. | |
b039eaaf | 269 | if cur == usize::MAX { |
92a42be0 | 270 | continue; |
b039eaaf | 271 | } |
c1a9b12d SL |
272 | |
273 | // NOTE: this code currently ignores the possibility of overflow | |
274 | // into usize::MAX; in general both Rc and Arc need to be adjusted | |
275 | // to deal with overflow. | |
276 | ||
277 | // Unlike with Clone(), we need this to be an Acquire read to | |
278 | // synchronize with the write coming from `is_unique`, so that the | |
279 | // events prior to that write happen before this read. | |
e9174d1e | 280 | if this.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur { |
92a42be0 | 281 | return Weak { _ptr: this._ptr }; |
c1a9b12d SL |
282 | } |
283 | } | |
1a4d82fc | 284 | } |
1a4d82fc | 285 | |
62682a34 SL |
286 | /// Get the number of weak references to this value. |
287 | #[inline] | |
e9174d1e SL |
288 | #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy", |
289 | issue = "28356")] | |
290 | pub fn weak_count(this: &Self) -> usize { | |
62682a34 SL |
291 | this.inner().weak.load(SeqCst) - 1 |
292 | } | |
293 | ||
294 | /// Get the number of strong references to this value. | |
295 | #[inline] | |
e9174d1e SL |
296 | #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy", |
297 | issue = "28356")] | |
298 | pub fn strong_count(this: &Self) -> usize { | |
62682a34 SL |
299 | this.inner().strong.load(SeqCst) |
300 | } | |
301 | ||
1a4d82fc JJ |
302 | #[inline] |
303 | fn inner(&self) -> &ArcInner<T> { | |
c34b1796 AL |
304 | // This unsafety is ok because while this arc is alive we're guaranteed |
305 | // that the inner pointer is valid. Furthermore, we know that the | |
306 | // `ArcInner` structure itself is `Sync` because the inner data is | |
307 | // `Sync` as well, so we're ok loaning out an immutable pointer to these | |
308 | // contents. | |
1a4d82fc JJ |
309 | unsafe { &**self._ptr } |
310 | } | |
c34b1796 AL |
311 | |
312 | // Non-inlined part of `drop`. | |
313 | #[inline(never)] | |
314 | unsafe fn drop_slow(&mut self) { | |
315 | let ptr = *self._ptr; | |
316 | ||
317 | // Destroy the data at this time, even though we may not free the box | |
318 | // allocation itself (there may still be weak pointers lying around). | |
92a42be0 | 319 | ptr::drop_in_place(&mut (*ptr).data); |
c34b1796 AL |
320 | |
321 | if self.inner().weak.fetch_sub(1, Release) == 1 { | |
322 | atomic::fence(Acquire); | |
62682a34 | 323 | deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) |
c34b1796 AL |
324 | } |
325 | } | |
1a4d82fc JJ |
326 | } |
327 | ||
85aaf69f | 328 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 329 | impl<T: ?Sized> Clone for Arc<T> { |
1a4d82fc JJ |
330 | /// Makes a clone of the `Arc<T>`. |
331 | /// | |
332 | /// This increases the strong reference count. | |
333 | /// | |
334 | /// # Examples | |
335 | /// | |
336 | /// ``` | |
337 | /// use std::sync::Arc; | |
338 | /// | |
85aaf69f | 339 | /// let five = Arc::new(5); |
1a4d82fc JJ |
340 | /// |
341 | /// five.clone(); | |
342 | /// ``` | |
343 | #[inline] | |
344 | fn clone(&self) -> Arc<T> { | |
c34b1796 AL |
345 | // Using a relaxed ordering is alright here, as knowledge of the |
346 | // original reference prevents other threads from erroneously deleting | |
347 | // the object. | |
1a4d82fc | 348 | // |
c34b1796 AL |
349 | // As explained in the [Boost documentation][1], Increasing the |
350 | // reference counter can always be done with memory_order_relaxed: New | |
351 | // references to an object can only be formed from an existing | |
352 | // reference, and passing an existing reference from one thread to | |
353 | // another must already provide any required synchronization. | |
1a4d82fc JJ |
354 | // |
355 | // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) | |
c1a9b12d SL |
356 | let old_size = self.inner().strong.fetch_add(1, Relaxed); |
357 | ||
358 | // However we need to guard against massive refcounts in case someone | |
359 | // is `mem::forget`ing Arcs. If we don't do this the count can overflow | |
360 | // and users will use-after free. We racily saturate to `isize::MAX` on | |
361 | // the assumption that there aren't ~2 billion threads incrementing | |
362 | // the reference count at once. This branch will never be taken in | |
363 | // any realistic program. | |
364 | // | |
365 | // We abort because such a program is incredibly degenerate, and we | |
366 | // don't care to support it. | |
367 | if old_size > MAX_REFCOUNT { | |
b039eaaf SL |
368 | unsafe { |
369 | abort(); | |
370 | } | |
c1a9b12d SL |
371 | } |
372 | ||
1a4d82fc JJ |
373 | Arc { _ptr: self._ptr } |
374 | } | |
375 | } | |
376 | ||
85aaf69f | 377 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 378 | impl<T: ?Sized> Deref for Arc<T> { |
1a4d82fc JJ |
379 | type Target = T; |
380 | ||
381 | #[inline] | |
382 | fn deref(&self) -> &T { | |
383 | &self.inner().data | |
384 | } | |
385 | } | |
386 | ||
c34b1796 | 387 | impl<T: Clone> Arc<T> { |
e9174d1e SL |
388 | #[unstable(feature = "arc_make_unique", reason = "renamed to Arc::make_mut", |
389 | issue = "27718")] | |
92a42be0 | 390 | #[rustc_deprecated(since = "1.4.0", reason = "renamed to Arc::make_mut")] |
e9174d1e SL |
391 | pub fn make_unique(this: &mut Self) -> &mut T { |
392 | Arc::make_mut(this) | |
393 | } | |
394 | ||
395 | /// Make a mutable reference into the given `Arc<T>` by cloning the inner | |
396 | /// data if the `Arc<T>` doesn't have one strong reference and no weak | |
397 | /// references. | |
1a4d82fc | 398 | /// |
e9174d1e | 399 | /// This is also referred to as a copy-on-write. |
62682a34 | 400 | /// |
1a4d82fc JJ |
401 | /// # Examples |
402 | /// | |
403 | /// ``` | |
404 | /// use std::sync::Arc; | |
405 | /// | |
e9174d1e SL |
406 | /// let mut data = Arc::new(5); |
407 | /// | |
408 | /// *Arc::make_mut(&mut data) += 1; // Won't clone anything | |
409 | /// let mut other_data = data.clone(); // Won't clone inner data | |
410 | /// *Arc::make_mut(&mut data) += 1; // Clones inner data | |
411 | /// *Arc::make_mut(&mut data) += 1; // Won't clone anything | |
412 | /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything | |
413 | /// | |
414 | /// // Note: data and other_data now point to different numbers | |
415 | /// assert_eq!(*data, 8); | |
416 | /// assert_eq!(*other_data, 12); | |
1a4d82fc | 417 | /// |
1a4d82fc JJ |
418 | /// ``` |
419 | #[inline] | |
e9174d1e SL |
420 | #[stable(feature = "arc_unique", since = "1.4.0")] |
421 | pub fn make_mut(this: &mut Self) -> &mut T { | |
c1a9b12d SL |
422 | // Note that we hold both a strong reference and a weak reference. |
423 | // Thus, releasing our strong reference only will not, by itself, cause | |
424 | // the memory to be deallocated. | |
62682a34 | 425 | // |
c1a9b12d SL |
426 | // Use Acquire to ensure that we see any writes to `weak` that happen |
427 | // before release writes (i.e., decrements) to `strong`. Since we hold a | |
428 | // weak count, there's no chance the ArcInner itself could be | |
429 | // deallocated. | |
430 | if this.inner().strong.compare_and_swap(1, 0, Acquire) != 1 { | |
431 | // Another srong pointer exists; clone | |
432 | *this = Arc::new((**this).clone()); | |
433 | } else if this.inner().weak.load(Relaxed) != 1 { | |
434 | // Relaxed suffices in the above because this is fundamentally an | |
435 | // optimization: we are always racing with weak pointers being | |
436 | // dropped. Worst case, we end up allocated a new Arc unnecessarily. | |
437 | ||
438 | // We removed the last strong ref, but there are additional weak | |
439 | // refs remaining. We'll move the contents to a new Arc, and | |
440 | // invalidate the other weak refs. | |
441 | ||
442 | // Note that it is not possible for the read of `weak` to yield | |
443 | // usize::MAX (i.e., locked), since the weak count can only be | |
444 | // locked by a thread with a strong reference. | |
445 | ||
446 | // Materialize our own implicit weak pointer, so that it can clean | |
447 | // up the ArcInner as needed. | |
448 | let weak = Weak { _ptr: this._ptr }; | |
449 | ||
450 | // mark the data itself as already deallocated | |
451 | unsafe { | |
452 | // there is no data race in the implicit write caused by `read` | |
453 | // here (due to zeroing) because data is no longer accessed by | |
454 | // other threads (due to there being no more strong refs at this | |
455 | // point). | |
456 | let mut swap = Arc::new(ptr::read(&(**weak._ptr).data)); | |
457 | mem::swap(this, &mut swap); | |
458 | mem::forget(swap); | |
459 | } | |
460 | } else { | |
461 | // We were the sole reference of either kind; bump back up the | |
462 | // strong ref count. | |
463 | this.inner().strong.store(1, Release); | |
1a4d82fc | 464 | } |
c1a9b12d | 465 | |
9346a6ac | 466 | // As with `get_mut()`, the unsafety is ok because our reference was |
c34b1796 | 467 | // either unique to begin with, or became one upon cloning the contents. |
c1a9b12d SL |
468 | unsafe { |
469 | let inner = &mut **this._ptr; | |
470 | &mut inner.data | |
471 | } | |
1a4d82fc JJ |
472 | } |
473 | } | |
474 | ||
c1a9b12d | 475 | impl<T: ?Sized> Arc<T> { |
e9174d1e SL |
476 | /// Returns a mutable reference to the contained value if the `Arc<T>` has |
477 | /// one strong reference and no weak references. | |
c1a9b12d SL |
478 | /// |
479 | /// # Examples | |
480 | /// | |
481 | /// ``` | |
e9174d1e | 482 | /// use std::sync::Arc; |
c1a9b12d SL |
483 | /// |
484 | /// let mut x = Arc::new(3); | |
485 | /// *Arc::get_mut(&mut x).unwrap() = 4; | |
486 | /// assert_eq!(*x, 4); | |
487 | /// | |
488 | /// let _y = x.clone(); | |
489 | /// assert!(Arc::get_mut(&mut x).is_none()); | |
c1a9b12d SL |
490 | /// ``` |
491 | #[inline] | |
e9174d1e SL |
492 | #[stable(feature = "arc_unique", since = "1.4.0")] |
493 | pub fn get_mut(this: &mut Self) -> Option<&mut T> { | |
c1a9b12d SL |
494 | if this.is_unique() { |
495 | // This unsafety is ok because we're guaranteed that the pointer | |
496 | // returned is the *only* pointer that will ever be returned to T. Our | |
497 | // reference count is guaranteed to be 1 at this point, and we required | |
498 | // the Arc itself to be `mut`, so we're returning the only possible | |
499 | // reference to the inner data. | |
500 | unsafe { | |
501 | let inner = &mut **this._ptr; | |
502 | Some(&mut inner.data) | |
503 | } | |
504 | } else { | |
505 | None | |
506 | } | |
507 | } | |
508 | ||
509 | /// Determine whether this is the unique reference (including weak refs) to | |
510 | /// the underlying data. | |
511 | /// | |
512 | /// Note that this requires locking the weak ref count. | |
513 | fn is_unique(&mut self) -> bool { | |
514 | // lock the weak pointer count if we appear to be the sole weak pointer | |
515 | // holder. | |
516 | // | |
517 | // The acquire label here ensures a happens-before relationship with any | |
518 | // writes to `strong` prior to decrements of the `weak` count (via drop, | |
519 | // which uses Release). | |
520 | if self.inner().weak.compare_and_swap(1, usize::MAX, Acquire) == 1 { | |
521 | // Due to the previous acquire read, this will observe any writes to | |
522 | // `strong` that were due to upgrading weak pointers; only strong | |
523 | // clones remain, which require that the strong count is > 1 anyway. | |
524 | let unique = self.inner().strong.load(Relaxed) == 1; | |
525 | ||
526 | // The release write here synchronizes with a read in `downgrade`, | |
527 | // effectively preventing the above read of `strong` from happening | |
528 | // after the write. | |
529 | self.inner().weak.store(1, Release); // release the lock | |
530 | unique | |
531 | } else { | |
532 | false | |
533 | } | |
534 | } | |
535 | } | |
536 | ||
85aaf69f | 537 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 538 | impl<T: ?Sized> Drop for Arc<T> { |
1a4d82fc JJ |
539 | /// Drops the `Arc<T>`. |
540 | /// | |
c34b1796 AL |
541 | /// This will decrement the strong reference count. If the strong reference |
542 | /// count becomes zero and the only other references are `Weak<T>` ones, | |
543 | /// `drop`s the inner value. | |
1a4d82fc JJ |
544 | /// |
545 | /// # Examples | |
546 | /// | |
547 | /// ``` | |
548 | /// use std::sync::Arc; | |
549 | /// | |
550 | /// { | |
85aaf69f | 551 | /// let five = Arc::new(5); |
1a4d82fc JJ |
552 | /// |
553 | /// // stuff | |
554 | /// | |
85aaf69f | 555 | /// drop(five); // explicit drop |
1a4d82fc JJ |
556 | /// } |
557 | /// { | |
85aaf69f | 558 | /// let five = Arc::new(5); |
1a4d82fc JJ |
559 | /// |
560 | /// // stuff | |
561 | /// | |
562 | /// } // implicit drop | |
563 | /// ``` | |
b039eaaf | 564 | #[unsafe_destructor_blind_to_params] |
c34b1796 | 565 | #[inline] |
1a4d82fc | 566 | fn drop(&mut self) { |
c34b1796 AL |
567 | // This structure has #[unsafe_no_drop_flag], so this drop glue may run |
568 | // more than once (but it is guaranteed to be zeroed after the first if | |
569 | // it's run more than once) | |
1a4d82fc | 570 | let ptr = *self._ptr; |
c34b1796 | 571 | // if ptr.is_null() { return } |
62682a34 | 572 | if ptr as *mut u8 as usize == 0 || ptr as *mut u8 as usize == mem::POST_DROP_USIZE { |
92a42be0 | 573 | return; |
62682a34 | 574 | } |
1a4d82fc | 575 | |
c34b1796 AL |
576 | // Because `fetch_sub` is already atomic, we do not need to synchronize |
577 | // with other threads unless we are going to delete the object. This | |
578 | // same logic applies to the below `fetch_sub` to the `weak` count. | |
b039eaaf | 579 | if self.inner().strong.fetch_sub(1, Release) != 1 { |
92a42be0 | 580 | return; |
b039eaaf | 581 | } |
1a4d82fc | 582 | |
c34b1796 AL |
583 | // This fence is needed to prevent reordering of use of the data and |
584 | // deletion of the data. Because it is marked `Release`, the decreasing | |
585 | // of the reference count synchronizes with this `Acquire` fence. This | |
586 | // means that use of the data happens before decreasing the reference | |
587 | // count, which happens before this fence, which happens before the | |
588 | // deletion of the data. | |
1a4d82fc JJ |
589 | // |
590 | // As explained in the [Boost documentation][1], | |
591 | // | |
c34b1796 AL |
592 | // > It is important to enforce any possible access to the object in one |
593 | // > thread (through an existing reference) to *happen before* deleting | |
594 | // > the object in a different thread. This is achieved by a "release" | |
595 | // > operation after dropping a reference (any access to the object | |
596 | // > through this reference must obviously happened before), and an | |
597 | // > "acquire" operation before deleting the object. | |
1a4d82fc JJ |
598 | // |
599 | // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) | |
600 | atomic::fence(Acquire); | |
601 | ||
c34b1796 | 602 | unsafe { |
b039eaaf | 603 | self.drop_slow(); |
1a4d82fc JJ |
604 | } |
605 | } | |
606 | } | |
607 | ||
62682a34 | 608 | impl<T: ?Sized> Weak<T> { |
1a4d82fc JJ |
609 | /// Upgrades a weak reference to a strong reference. |
610 | /// | |
611 | /// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible. | |
612 | /// | |
c34b1796 AL |
613 | /// Returns `None` if there were no strong references and the data was |
614 | /// destroyed. | |
1a4d82fc JJ |
615 | /// |
616 | /// # Examples | |
617 | /// | |
618 | /// ``` | |
619 | /// use std::sync::Arc; | |
620 | /// | |
85aaf69f | 621 | /// let five = Arc::new(5); |
1a4d82fc | 622 | /// |
e9174d1e | 623 | /// let weak_five = Arc::downgrade(&five); |
1a4d82fc JJ |
624 | /// |
625 | /// let strong_five: Option<Arc<_>> = weak_five.upgrade(); | |
626 | /// ``` | |
e9174d1e | 627 | #[stable(feature = "arc_weak", since = "1.4.0")] |
1a4d82fc | 628 | pub fn upgrade(&self) -> Option<Arc<T>> { |
c34b1796 | 629 | // We use a CAS loop to increment the strong count instead of a |
9346a6ac | 630 | // fetch_add because once the count hits 0 it must never be above 0. |
1a4d82fc JJ |
631 | let inner = self.inner(); |
632 | loop { | |
c1a9b12d SL |
633 | // Relaxed load because any write of 0 that we can observe |
634 | // leaves the field in a permanently zero state (so a | |
635 | // "stale" read of 0 is fine), and any other value is | |
636 | // confirmed via the CAS below. | |
637 | let n = inner.strong.load(Relaxed); | |
b039eaaf | 638 | if n == 0 { |
92a42be0 SL |
639 | return None; |
640 | } | |
641 | ||
642 | // See comments in `Arc::clone` for why we do this (for `mem::forget`). | |
643 | if n > MAX_REFCOUNT { | |
644 | unsafe { abort(); } | |
b039eaaf | 645 | } |
c1a9b12d SL |
646 | |
647 | // Relaxed is valid for the same reason it is on Arc's Clone impl | |
648 | let old = inner.strong.compare_and_swap(n, n + 1, Relaxed); | |
b039eaaf | 649 | if old == n { |
92a42be0 | 650 | return Some(Arc { _ptr: self._ptr }); |
b039eaaf | 651 | } |
1a4d82fc JJ |
652 | } |
653 | } | |
654 | ||
655 | #[inline] | |
656 | fn inner(&self) -> &ArcInner<T> { | |
657 | // See comments above for why this is "safe" | |
658 | unsafe { &**self._ptr } | |
659 | } | |
660 | } | |
661 | ||
e9174d1e | 662 | #[stable(feature = "arc_weak", since = "1.4.0")] |
62682a34 | 663 | impl<T: ?Sized> Clone for Weak<T> { |
1a4d82fc JJ |
664 | /// Makes a clone of the `Weak<T>`. |
665 | /// | |
666 | /// This increases the weak reference count. | |
667 | /// | |
668 | /// # Examples | |
669 | /// | |
670 | /// ``` | |
671 | /// use std::sync::Arc; | |
672 | /// | |
e9174d1e | 673 | /// let weak_five = Arc::downgrade(&Arc::new(5)); |
1a4d82fc JJ |
674 | /// |
675 | /// weak_five.clone(); | |
676 | /// ``` | |
677 | #[inline] | |
678 | fn clone(&self) -> Weak<T> { | |
c1a9b12d SL |
679 | // See comments in Arc::clone() for why this is relaxed. This can use a |
680 | // fetch_add (ignoring the lock) because the weak count is only locked | |
681 | // where are *no other* weak pointers in existence. (So we can't be | |
682 | // running this code in that case). | |
683 | let old_size = self.inner().weak.fetch_add(1, Relaxed); | |
684 | ||
685 | // See comments in Arc::clone() for why we do this (for mem::forget). | |
686 | if old_size > MAX_REFCOUNT { | |
b039eaaf SL |
687 | unsafe { |
688 | abort(); | |
689 | } | |
c1a9b12d SL |
690 | } |
691 | ||
92a42be0 | 692 | return Weak { _ptr: self._ptr }; |
1a4d82fc JJ |
693 | } |
694 | } | |
695 | ||
85aaf69f | 696 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 697 | impl<T: ?Sized> Drop for Weak<T> { |
1a4d82fc JJ |
698 | /// Drops the `Weak<T>`. |
699 | /// | |
700 | /// This will decrement the weak reference count. | |
701 | /// | |
702 | /// # Examples | |
703 | /// | |
704 | /// ``` | |
705 | /// use std::sync::Arc; | |
706 | /// | |
707 | /// { | |
85aaf69f | 708 | /// let five = Arc::new(5); |
e9174d1e | 709 | /// let weak_five = Arc::downgrade(&five); |
1a4d82fc JJ |
710 | /// |
711 | /// // stuff | |
712 | /// | |
85aaf69f | 713 | /// drop(weak_five); // explicit drop |
1a4d82fc JJ |
714 | /// } |
715 | /// { | |
85aaf69f | 716 | /// let five = Arc::new(5); |
e9174d1e | 717 | /// let weak_five = Arc::downgrade(&five); |
1a4d82fc JJ |
718 | /// |
719 | /// // stuff | |
720 | /// | |
721 | /// } // implicit drop | |
722 | /// ``` | |
723 | fn drop(&mut self) { | |
724 | let ptr = *self._ptr; | |
725 | ||
726 | // see comments above for why this check is here | |
62682a34 | 727 | if ptr as *mut u8 as usize == 0 || ptr as *mut u8 as usize == mem::POST_DROP_USIZE { |
92a42be0 | 728 | return; |
62682a34 | 729 | } |
1a4d82fc | 730 | |
c34b1796 AL |
731 | // If we find out that we were the last weak pointer, then its time to |
732 | // deallocate the data entirely. See the discussion in Arc::drop() about | |
733 | // the memory orderings | |
c1a9b12d SL |
734 | // |
735 | // It's not necessary to check for the locked state here, because the | |
736 | // weak count can only be locked if there was precisely one weak ref, | |
737 | // meaning that drop could only subsequently run ON that remaining weak | |
738 | // ref, which can only happen after the lock is released. | |
1a4d82fc JJ |
739 | if self.inner().weak.fetch_sub(1, Release) == 1 { |
740 | atomic::fence(Acquire); | |
b039eaaf | 741 | unsafe { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } |
1a4d82fc JJ |
742 | } |
743 | } | |
744 | } | |
745 | ||
85aaf69f | 746 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 747 | impl<T: ?Sized + PartialEq> PartialEq for Arc<T> { |
1a4d82fc JJ |
748 | /// Equality for two `Arc<T>`s. |
749 | /// | |
750 | /// Two `Arc<T>`s are equal if their inner value are equal. | |
751 | /// | |
752 | /// # Examples | |
753 | /// | |
754 | /// ``` | |
755 | /// use std::sync::Arc; | |
756 | /// | |
85aaf69f | 757 | /// let five = Arc::new(5); |
1a4d82fc | 758 | /// |
85aaf69f | 759 | /// five == Arc::new(5); |
1a4d82fc | 760 | /// ``` |
b039eaaf SL |
761 | fn eq(&self, other: &Arc<T>) -> bool { |
762 | *(*self) == *(*other) | |
763 | } | |
1a4d82fc JJ |
764 | |
765 | /// Inequality for two `Arc<T>`s. | |
766 | /// | |
767 | /// Two `Arc<T>`s are unequal if their inner value are unequal. | |
768 | /// | |
769 | /// # Examples | |
770 | /// | |
771 | /// ``` | |
772 | /// use std::sync::Arc; | |
773 | /// | |
85aaf69f | 774 | /// let five = Arc::new(5); |
1a4d82fc | 775 | /// |
85aaf69f | 776 | /// five != Arc::new(5); |
1a4d82fc | 777 | /// ``` |
b039eaaf SL |
778 | fn ne(&self, other: &Arc<T>) -> bool { |
779 | *(*self) != *(*other) | |
780 | } | |
1a4d82fc | 781 | } |
85aaf69f | 782 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 783 | impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> { |
1a4d82fc JJ |
784 | /// Partial comparison for two `Arc<T>`s. |
785 | /// | |
786 | /// The two are compared by calling `partial_cmp()` on their inner values. | |
787 | /// | |
788 | /// # Examples | |
789 | /// | |
790 | /// ``` | |
791 | /// use std::sync::Arc; | |
792 | /// | |
85aaf69f | 793 | /// let five = Arc::new(5); |
1a4d82fc | 794 | /// |
85aaf69f | 795 | /// five.partial_cmp(&Arc::new(5)); |
1a4d82fc JJ |
796 | /// ``` |
797 | fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { | |
798 | (**self).partial_cmp(&**other) | |
799 | } | |
800 | ||
801 | /// Less-than comparison for two `Arc<T>`s. | |
802 | /// | |
803 | /// The two are compared by calling `<` on their inner values. | |
804 | /// | |
805 | /// # Examples | |
806 | /// | |
807 | /// ``` | |
808 | /// use std::sync::Arc; | |
809 | /// | |
85aaf69f | 810 | /// let five = Arc::new(5); |
1a4d82fc | 811 | /// |
85aaf69f | 812 | /// five < Arc::new(5); |
1a4d82fc | 813 | /// ``` |
b039eaaf SL |
814 | fn lt(&self, other: &Arc<T>) -> bool { |
815 | *(*self) < *(*other) | |
816 | } | |
1a4d82fc JJ |
817 | |
818 | /// 'Less-than or equal to' comparison for two `Arc<T>`s. | |
819 | /// | |
820 | /// The two are compared by calling `<=` on their inner values. | |
821 | /// | |
822 | /// # Examples | |
823 | /// | |
824 | /// ``` | |
825 | /// use std::sync::Arc; | |
826 | /// | |
85aaf69f | 827 | /// let five = Arc::new(5); |
1a4d82fc | 828 | /// |
85aaf69f | 829 | /// five <= Arc::new(5); |
1a4d82fc | 830 | /// ``` |
b039eaaf SL |
831 | fn le(&self, other: &Arc<T>) -> bool { |
832 | *(*self) <= *(*other) | |
833 | } | |
1a4d82fc JJ |
834 | |
835 | /// Greater-than comparison for two `Arc<T>`s. | |
836 | /// | |
837 | /// The two are compared by calling `>` on their inner values. | |
838 | /// | |
839 | /// # Examples | |
840 | /// | |
841 | /// ``` | |
842 | /// use std::sync::Arc; | |
843 | /// | |
85aaf69f | 844 | /// let five = Arc::new(5); |
1a4d82fc | 845 | /// |
85aaf69f | 846 | /// five > Arc::new(5); |
1a4d82fc | 847 | /// ``` |
b039eaaf SL |
848 | fn gt(&self, other: &Arc<T>) -> bool { |
849 | *(*self) > *(*other) | |
850 | } | |
1a4d82fc JJ |
851 | |
852 | /// 'Greater-than or equal to' comparison for two `Arc<T>`s. | |
853 | /// | |
854 | /// The two are compared by calling `>=` on their inner values. | |
855 | /// | |
856 | /// # Examples | |
857 | /// | |
858 | /// ``` | |
859 | /// use std::sync::Arc; | |
860 | /// | |
85aaf69f | 861 | /// let five = Arc::new(5); |
1a4d82fc | 862 | /// |
85aaf69f | 863 | /// five >= Arc::new(5); |
1a4d82fc | 864 | /// ``` |
b039eaaf SL |
865 | fn ge(&self, other: &Arc<T>) -> bool { |
866 | *(*self) >= *(*other) | |
867 | } | |
1a4d82fc | 868 | } |
85aaf69f | 869 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 870 | impl<T: ?Sized + Ord> Ord for Arc<T> { |
b039eaaf SL |
871 | fn cmp(&self, other: &Arc<T>) -> Ordering { |
872 | (**self).cmp(&**other) | |
873 | } | |
1a4d82fc | 874 | } |
85aaf69f | 875 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 876 | impl<T: ?Sized + Eq> Eq for Arc<T> {} |
1a4d82fc | 877 | |
85aaf69f | 878 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 879 | impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> { |
1a4d82fc | 880 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
85aaf69f | 881 | fmt::Display::fmt(&**self, f) |
1a4d82fc JJ |
882 | } |
883 | } | |
884 | ||
85aaf69f | 885 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 886 | impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> { |
1a4d82fc | 887 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
85aaf69f | 888 | fmt::Debug::fmt(&**self, f) |
1a4d82fc JJ |
889 | } |
890 | } | |
891 | ||
9346a6ac AL |
892 | #[stable(feature = "rust1", since = "1.0.0")] |
893 | impl<T> fmt::Pointer for Arc<T> { | |
894 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
895 | fmt::Pointer::fmt(&*self._ptr, f) | |
896 | } | |
897 | } | |
898 | ||
85aaf69f | 899 | #[stable(feature = "rust1", since = "1.0.0")] |
d9579d0f | 900 | impl<T: Default> Default for Arc<T> { |
b039eaaf SL |
901 | fn default() -> Arc<T> { |
902 | Arc::new(Default::default()) | |
903 | } | |
1a4d82fc JJ |
904 | } |
905 | ||
85aaf69f | 906 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 907 | impl<T: ?Sized + Hash> Hash for Arc<T> { |
85aaf69f SL |
908 | fn hash<H: Hasher>(&self, state: &mut H) { |
909 | (**self).hash(state) | |
910 | } | |
911 | } | |
1a4d82fc | 912 | |
92a42be0 SL |
913 | #[stable(feature = "from_for_ptrs", since = "1.6.0")] |
914 | impl<T> From<T> for Arc<T> { | |
915 | fn from(t: T) -> Self { | |
916 | Arc::new(t) | |
917 | } | |
918 | } | |
919 | ||
1a4d82fc | 920 | #[cfg(test)] |
1a4d82fc JJ |
921 | mod tests { |
922 | use std::clone::Clone; | |
923 | use std::sync::mpsc::channel; | |
924 | use std::mem::drop; | |
925 | use std::ops::Drop; | |
926 | use std::option::Option; | |
927 | use std::option::Option::{Some, None}; | |
928 | use std::sync::atomic; | |
929 | use std::sync::atomic::Ordering::{Acquire, SeqCst}; | |
85aaf69f | 930 | use std::thread; |
1a4d82fc | 931 | use std::vec::Vec; |
e9174d1e | 932 | use super::{Arc, Weak}; |
1a4d82fc | 933 | use std::sync::Mutex; |
92a42be0 | 934 | use std::convert::From; |
1a4d82fc | 935 | |
85aaf69f | 936 | struct Canary(*mut atomic::AtomicUsize); |
1a4d82fc | 937 | |
92a42be0 | 938 | impl Drop for Canary { |
1a4d82fc JJ |
939 | fn drop(&mut self) { |
940 | unsafe { | |
941 | match *self { | |
942 | Canary(c) => { | |
943 | (*c).fetch_add(1, SeqCst); | |
944 | } | |
945 | } | |
946 | } | |
947 | } | |
948 | } | |
949 | ||
950 | #[test] | |
951 | fn manually_share_arc() { | |
92a42be0 | 952 | let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; |
1a4d82fc JJ |
953 | let arc_v = Arc::new(v); |
954 | ||
955 | let (tx, rx) = channel(); | |
956 | ||
85aaf69f SL |
957 | let _t = thread::spawn(move || { |
958 | let arc_v: Arc<Vec<i32>> = rx.recv().unwrap(); | |
1a4d82fc JJ |
959 | assert_eq!((*arc_v)[3], 4); |
960 | }); | |
961 | ||
962 | tx.send(arc_v.clone()).unwrap(); | |
963 | ||
964 | assert_eq!((*arc_v)[2], 3); | |
965 | assert_eq!((*arc_v)[4], 5); | |
966 | } | |
967 | ||
c34b1796 | 968 | #[test] |
9346a6ac | 969 | fn test_arc_get_mut() { |
e9174d1e SL |
970 | let mut x = Arc::new(3); |
971 | *Arc::get_mut(&mut x).unwrap() = 4; | |
972 | assert_eq!(*x, 4); | |
973 | let y = x.clone(); | |
974 | assert!(Arc::get_mut(&mut x).is_none()); | |
975 | drop(y); | |
976 | assert!(Arc::get_mut(&mut x).is_some()); | |
977 | let _w = Arc::downgrade(&x); | |
978 | assert!(Arc::get_mut(&mut x).is_none()); | |
c34b1796 AL |
979 | } |
980 | ||
1a4d82fc | 981 | #[test] |
e9174d1e SL |
982 | fn try_unwrap() { |
983 | let x = Arc::new(3); | |
984 | assert_eq!(Arc::try_unwrap(x), Ok(3)); | |
985 | let x = Arc::new(4); | |
986 | let _y = x.clone(); | |
987 | assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4))); | |
988 | let x = Arc::new(5); | |
989 | let _w = Arc::downgrade(&x); | |
990 | assert_eq!(Arc::try_unwrap(x), Ok(5)); | |
991 | } | |
992 | ||
993 | #[test] | |
994 | fn test_cowarc_clone_make_mut() { | |
995 | let mut cow0 = Arc::new(75); | |
996 | let mut cow1 = cow0.clone(); | |
997 | let mut cow2 = cow1.clone(); | |
998 | ||
999 | assert!(75 == *Arc::make_mut(&mut cow0)); | |
1000 | assert!(75 == *Arc::make_mut(&mut cow1)); | |
1001 | assert!(75 == *Arc::make_mut(&mut cow2)); | |
1002 | ||
1003 | *Arc::make_mut(&mut cow0) += 1; | |
1004 | *Arc::make_mut(&mut cow1) += 2; | |
1005 | *Arc::make_mut(&mut cow2) += 3; | |
1006 | ||
1007 | assert!(76 == *cow0); | |
1008 | assert!(77 == *cow1); | |
1009 | assert!(78 == *cow2); | |
1010 | ||
1011 | // none should point to the same backing memory | |
1012 | assert!(*cow0 != *cow1); | |
1013 | assert!(*cow0 != *cow2); | |
1014 | assert!(*cow1 != *cow2); | |
1a4d82fc JJ |
1015 | } |
1016 | ||
1017 | #[test] | |
1018 | fn test_cowarc_clone_unique2() { | |
85aaf69f | 1019 | let mut cow0 = Arc::new(75); |
1a4d82fc JJ |
1020 | let cow1 = cow0.clone(); |
1021 | let cow2 = cow1.clone(); | |
1022 | ||
1023 | assert!(75 == *cow0); | |
1024 | assert!(75 == *cow1); | |
1025 | assert!(75 == *cow2); | |
1026 | ||
e9174d1e | 1027 | *Arc::make_mut(&mut cow0) += 1; |
1a4d82fc JJ |
1028 | assert!(76 == *cow0); |
1029 | assert!(75 == *cow1); | |
1030 | assert!(75 == *cow2); | |
1031 | ||
1032 | // cow1 and cow2 should share the same contents | |
1033 | // cow0 should have a unique reference | |
1034 | assert!(*cow0 != *cow1); | |
1035 | assert!(*cow0 != *cow2); | |
1036 | assert!(*cow1 == *cow2); | |
1037 | } | |
1038 | ||
1039 | #[test] | |
1040 | fn test_cowarc_clone_weak() { | |
85aaf69f | 1041 | let mut cow0 = Arc::new(75); |
e9174d1e | 1042 | let cow1_weak = Arc::downgrade(&cow0); |
1a4d82fc JJ |
1043 | |
1044 | assert!(75 == *cow0); | |
1045 | assert!(75 == *cow1_weak.upgrade().unwrap()); | |
1046 | ||
e9174d1e | 1047 | *Arc::make_mut(&mut cow0) += 1; |
1a4d82fc JJ |
1048 | |
1049 | assert!(76 == *cow0); | |
1050 | assert!(cow1_weak.upgrade().is_none()); | |
1051 | } | |
1052 | ||
1053 | #[test] | |
1054 | fn test_live() { | |
85aaf69f | 1055 | let x = Arc::new(5); |
e9174d1e | 1056 | let y = Arc::downgrade(&x); |
1a4d82fc JJ |
1057 | assert!(y.upgrade().is_some()); |
1058 | } | |
1059 | ||
1060 | #[test] | |
1061 | fn test_dead() { | |
85aaf69f | 1062 | let x = Arc::new(5); |
e9174d1e | 1063 | let y = Arc::downgrade(&x); |
1a4d82fc JJ |
1064 | drop(x); |
1065 | assert!(y.upgrade().is_none()); | |
1066 | } | |
1067 | ||
1068 | #[test] | |
1069 | fn weak_self_cyclic() { | |
1070 | struct Cycle { | |
b039eaaf | 1071 | x: Mutex<Option<Weak<Cycle>>>, |
1a4d82fc JJ |
1072 | } |
1073 | ||
1074 | let a = Arc::new(Cycle { x: Mutex::new(None) }); | |
e9174d1e | 1075 | let b = Arc::downgrade(&a.clone()); |
1a4d82fc JJ |
1076 | *a.x.lock().unwrap() = Some(b); |
1077 | ||
1078 | // hopefully we don't double-free (or leak)... | |
1079 | } | |
1080 | ||
1081 | #[test] | |
1082 | fn drop_arc() { | |
85aaf69f SL |
1083 | let mut canary = atomic::AtomicUsize::new(0); |
1084 | let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); | |
1a4d82fc JJ |
1085 | drop(x); |
1086 | assert!(canary.load(Acquire) == 1); | |
1087 | } | |
1088 | ||
1089 | #[test] | |
1090 | fn drop_arc_weak() { | |
85aaf69f SL |
1091 | let mut canary = atomic::AtomicUsize::new(0); |
1092 | let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); | |
e9174d1e | 1093 | let arc_weak = Arc::downgrade(&arc); |
1a4d82fc JJ |
1094 | assert!(canary.load(Acquire) == 0); |
1095 | drop(arc); | |
1096 | assert!(canary.load(Acquire) == 1); | |
1097 | drop(arc_weak); | |
1098 | } | |
1099 | ||
1100 | #[test] | |
1101 | fn test_strong_count() { | |
1102 | let a = Arc::new(0u32); | |
e9174d1e SL |
1103 | assert!(Arc::strong_count(&a) == 1); |
1104 | let w = Arc::downgrade(&a); | |
1105 | assert!(Arc::strong_count(&a) == 1); | |
1a4d82fc | 1106 | let b = w.upgrade().expect(""); |
e9174d1e SL |
1107 | assert!(Arc::strong_count(&b) == 2); |
1108 | assert!(Arc::strong_count(&a) == 2); | |
1a4d82fc JJ |
1109 | drop(w); |
1110 | drop(a); | |
e9174d1e | 1111 | assert!(Arc::strong_count(&b) == 1); |
1a4d82fc | 1112 | let c = b.clone(); |
e9174d1e SL |
1113 | assert!(Arc::strong_count(&b) == 2); |
1114 | assert!(Arc::strong_count(&c) == 2); | |
1a4d82fc JJ |
1115 | } |
1116 | ||
1117 | #[test] | |
1118 | fn test_weak_count() { | |
1119 | let a = Arc::new(0u32); | |
e9174d1e SL |
1120 | assert!(Arc::strong_count(&a) == 1); |
1121 | assert!(Arc::weak_count(&a) == 0); | |
1122 | let w = Arc::downgrade(&a); | |
1123 | assert!(Arc::strong_count(&a) == 1); | |
1124 | assert!(Arc::weak_count(&a) == 1); | |
1a4d82fc | 1125 | let x = w.clone(); |
e9174d1e | 1126 | assert!(Arc::weak_count(&a) == 2); |
1a4d82fc JJ |
1127 | drop(w); |
1128 | drop(x); | |
e9174d1e SL |
1129 | assert!(Arc::strong_count(&a) == 1); |
1130 | assert!(Arc::weak_count(&a) == 0); | |
1a4d82fc | 1131 | let c = a.clone(); |
e9174d1e SL |
1132 | assert!(Arc::strong_count(&a) == 2); |
1133 | assert!(Arc::weak_count(&a) == 0); | |
1134 | let d = Arc::downgrade(&c); | |
1135 | assert!(Arc::weak_count(&c) == 1); | |
1136 | assert!(Arc::strong_count(&c) == 2); | |
1a4d82fc JJ |
1137 | |
1138 | drop(a); | |
1139 | drop(c); | |
1140 | drop(d); | |
1141 | } | |
1142 | ||
1143 | #[test] | |
1144 | fn show_arc() { | |
1145 | let a = Arc::new(5u32); | |
85aaf69f | 1146 | assert_eq!(format!("{:?}", a), "5"); |
1a4d82fc JJ |
1147 | } |
1148 | ||
1149 | // Make sure deriving works with Arc<T> | |
85aaf69f | 1150 | #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)] |
b039eaaf SL |
1151 | struct Foo { |
1152 | inner: Arc<i32>, | |
1153 | } | |
62682a34 SL |
1154 | |
1155 | #[test] | |
1156 | fn test_unsized() { | |
1157 | let x: Arc<[i32]> = Arc::new([1, 2, 3]); | |
1158 | assert_eq!(format!("{:?}", x), "[1, 2, 3]"); | |
e9174d1e | 1159 | let y = Arc::downgrade(&x.clone()); |
62682a34 SL |
1160 | drop(x); |
1161 | assert!(y.upgrade().is_none()); | |
1162 | } | |
92a42be0 SL |
1163 | |
1164 | #[test] | |
1165 | fn test_from_owned() { | |
1166 | let foo = 123; | |
1167 | let foo_arc = Arc::from(foo); | |
1168 | assert!(123 == *foo_arc); | |
1169 | } | |
1a4d82fc | 1170 | } |
e9174d1e | 1171 | |
92a42be0 | 1172 | #[stable(feature = "rust1", since = "1.0.0")] |
e9174d1e | 1173 | impl<T: ?Sized> borrow::Borrow<T> for Arc<T> { |
b039eaaf SL |
1174 | fn borrow(&self) -> &T { |
1175 | &**self | |
1176 | } | |
1177 | } | |
1178 | ||
1179 | #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] | |
1180 | impl<T: ?Sized> AsRef<T> for Arc<T> { | |
1181 | fn as_ref(&self) -> &T { | |
1182 | &**self | |
1183 | } | |
e9174d1e | 1184 | } |