]> git.proxmox.com Git - rustc.git/blob - src/liballoc/sync.rs
New upstream version 1.39.0+dfsg1
[rustc.git] / src / liballoc / sync.rs
1 #![stable(feature = "rust1", since = "1.0.0")]
2
3 //! Thread-safe reference-counting pointers.
4 //!
5 //! See the [`Arc<T>`][arc] documentation for more details.
6 //!
7 //! [arc]: struct.Arc.html
8
9 use core::any::Any;
10 use core::array::LengthAtMost32;
11 use core::sync::atomic;
12 use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
13 use core::borrow;
14 use core::fmt;
15 use core::cmp::{self, Ordering};
16 use core::iter;
17 use core::intrinsics::abort;
18 use core::mem::{self, align_of, align_of_val, size_of_val};
19 use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn};
20 use core::pin::Pin;
21 use core::ptr::{self, NonNull};
22 use core::marker::{Unpin, Unsize, PhantomData};
23 use core::hash::{Hash, Hasher};
24 use core::{isize, usize};
25 use core::convert::{From, TryFrom};
26 use core::slice::{self, from_raw_parts_mut};
27
28 use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error};
29 use crate::boxed::Box;
30 use crate::rc::is_dangling;
31 use crate::string::String;
32 use crate::vec::Vec;
33
34 #[cfg(test)]
35 mod tests;
36
37 /// A soft limit on the amount of references that may be made to an `Arc`.
38 ///
39 /// Going above this limit will abort your program (although not
40 /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
41 const MAX_REFCOUNT: usize = (isize::MAX) as usize;
42
43 /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
44 /// Reference Counted'.
45 ///
46 /// The type `Arc<T>` provides shared ownership of a value of type `T`,
47 /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
48 /// a new `Arc` instance, which points to the same value on the heap as the
49 /// source `Arc`, while increasing a reference count. When the last `Arc`
50 /// pointer to a given value is destroyed, the pointed-to value is also
51 /// destroyed.
52 ///
53 /// Shared references in Rust disallow mutation by default, and `Arc` is no
54 /// exception: you cannot generally obtain a mutable reference to something
55 /// inside an `Arc`. If you need to mutate through an `Arc`, use
56 /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
57 /// types.
58 ///
59 /// ## Thread Safety
60 ///
61 /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
62 /// counting. This means that it is thread-safe. The disadvantage is that
63 /// atomic operations are more expensive than ordinary memory accesses. If you
64 /// are not sharing reference-counted values between threads, consider using
65 /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
66 /// compiler will catch any attempt to send an [`Rc<T>`] between threads.
67 /// However, a library might choose `Arc<T>` in order to give library consumers
68 /// more flexibility.
69 ///
70 /// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
71 /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
72 /// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
73 /// first: after all, isn't the point of `Arc<T>` thread safety? The key is
74 /// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
75 /// data, but it doesn't add thread safety to its data. Consider
76 /// `Arc<`[`RefCell<T>`]`>`. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
77 /// [`Send`], `Arc<`[`RefCell<T>`]`>` would be as well. But then we'd have a problem:
78 /// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
79 /// non-atomic operations.
80 ///
81 /// In the end, this means that you may need to pair `Arc<T>` with some sort of
82 /// [`std::sync`] type, usually [`Mutex<T>`][mutex].
83 ///
84 /// ## Breaking cycles with `Weak`
85 ///
86 /// The [`downgrade`][downgrade] method can be used to create a non-owning
87 /// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d
88 /// to an `Arc`, but this will return [`None`] if the value has already been
89 /// dropped.
90 ///
91 /// A cycle between `Arc` pointers will never be deallocated. For this reason,
92 /// [`Weak`][weak] is used to break cycles. For example, a tree could have
93 /// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak]
94 /// pointers from children back to their parents.
95 ///
96 /// # Cloning references
97 ///
98 /// Creating a new reference from an existing reference counted pointer is done using the
99 /// `Clone` trait implemented for [`Arc<T>`][arc] and [`Weak<T>`][weak].
100 ///
101 /// ```
102 /// use std::sync::Arc;
103 /// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
104 /// // The two syntaxes below are equivalent.
105 /// let a = foo.clone();
106 /// let b = Arc::clone(&foo);
107 /// // a, b, and foo are all Arcs that point to the same memory location
108 /// ```
109 ///
110 /// ## `Deref` behavior
111 ///
112 /// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
113 /// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
114 /// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
115 /// functions, called using function-like syntax:
116 ///
117 /// ```
118 /// use std::sync::Arc;
119 /// let my_arc = Arc::new(());
120 ///
121 /// Arc::downgrade(&my_arc);
122 /// ```
123 ///
124 /// [`Weak<T>`][weak] does not auto-dereference to `T`, because the value may have
125 /// already been destroyed.
126 ///
127 /// [arc]: struct.Arc.html
128 /// [weak]: struct.Weak.html
129 /// [`Rc<T>`]: ../../std/rc/struct.Rc.html
130 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
131 /// [mutex]: ../../std/sync/struct.Mutex.html
132 /// [rwlock]: ../../std/sync/struct.RwLock.html
133 /// [atomic]: ../../std/sync/atomic/index.html
134 /// [`Send`]: ../../std/marker/trait.Send.html
135 /// [`Sync`]: ../../std/marker/trait.Sync.html
136 /// [deref]: ../../std/ops/trait.Deref.html
137 /// [downgrade]: struct.Arc.html#method.downgrade
138 /// [upgrade]: struct.Weak.html#method.upgrade
139 /// [`None`]: ../../std/option/enum.Option.html#variant.None
140 /// [`RefCell<T>`]: ../../std/cell/struct.RefCell.html
141 /// [`std::sync`]: ../../std/sync/index.html
142 /// [`Arc::clone(&from)`]: #method.clone
143 ///
144 /// # Examples
145 ///
146 /// Sharing some immutable data between threads:
147 ///
148 // Note that we **do not** run these tests here. The windows builders get super
149 // unhappy if a thread outlives the main thread and then exits at the same time
150 // (something deadlocks) so we just avoid this entirely by not running these
151 // tests.
152 /// ```no_run
153 /// use std::sync::Arc;
154 /// use std::thread;
155 ///
156 /// let five = Arc::new(5);
157 ///
158 /// for _ in 0..10 {
159 /// let five = Arc::clone(&five);
160 ///
161 /// thread::spawn(move || {
162 /// println!("{:?}", five);
163 /// });
164 /// }
165 /// ```
166 ///
167 /// Sharing a mutable [`AtomicUsize`]:
168 ///
169 /// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html
170 ///
171 /// ```no_run
172 /// use std::sync::Arc;
173 /// use std::sync::atomic::{AtomicUsize, Ordering};
174 /// use std::thread;
175 ///
176 /// let val = Arc::new(AtomicUsize::new(5));
177 ///
178 /// for _ in 0..10 {
179 /// let val = Arc::clone(&val);
180 ///
181 /// thread::spawn(move || {
182 /// let v = val.fetch_add(1, Ordering::SeqCst);
183 /// println!("{:?}", v);
184 /// });
185 /// }
186 /// ```
187 ///
188 /// See the [`rc` documentation][rc_examples] for more examples of reference
189 /// counting in general.
190 ///
191 /// [rc_examples]: ../../std/rc/index.html#examples
192 #[cfg_attr(not(test), lang = "arc")]
193 #[stable(feature = "rust1", since = "1.0.0")]
194 pub struct Arc<T: ?Sized> {
195 ptr: NonNull<ArcInner<T>>,
196 phantom: PhantomData<T>,
197 }
198
199 #[stable(feature = "rust1", since = "1.0.0")]
200 unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
201 #[stable(feature = "rust1", since = "1.0.0")]
202 unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
203
204 #[unstable(feature = "coerce_unsized", issue = "27732")]
205 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
206
207 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
208 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
209
210 impl<T: ?Sized> Arc<T> {
211 fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
212 Self {
213 ptr,
214 phantom: PhantomData,
215 }
216 }
217
218 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
219 Self::from_inner(NonNull::new_unchecked(ptr))
220 }
221 }
222
223 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
224 /// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
225 /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
226 ///
227 /// Since a `Weak` reference does not count towards ownership, it will not
228 /// prevent the inner value from being dropped, and `Weak` itself makes no
229 /// guarantees about the value still being present and may return [`None`]
230 /// when [`upgrade`]d.
231 ///
232 /// A `Weak` pointer is useful for keeping a temporary reference to the value
233 /// within [`Arc`] without extending its lifetime. It is also used to prevent
234 /// circular references between [`Arc`] pointers, since mutual owning references
235 /// would never allow either [`Arc`] to be dropped. For example, a tree could
236 /// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
237 /// pointers from children back to their parents.
238 ///
239 /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
240 ///
241 /// [`Arc`]: struct.Arc.html
242 /// [`Arc::downgrade`]: struct.Arc.html#method.downgrade
243 /// [`upgrade`]: struct.Weak.html#method.upgrade
244 /// [`Option`]: ../../std/option/enum.Option.html
245 /// [`None`]: ../../std/option/enum.Option.html#variant.None
246 #[stable(feature = "arc_weak", since = "1.4.0")]
247 pub struct Weak<T: ?Sized> {
248 // This is a `NonNull` to allow optimizing the size of this type in enums,
249 // but it is not necessarily a valid pointer.
250 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
251 // to allocate space on the heap. That's not a value a real pointer
252 // will ever have because RcBox has alignment at least 2.
253 ptr: NonNull<ArcInner<T>>,
254 }
255
256 #[stable(feature = "arc_weak", since = "1.4.0")]
257 unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
258 #[stable(feature = "arc_weak", since = "1.4.0")]
259 unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
260
261 #[unstable(feature = "coerce_unsized", issue = "27732")]
262 impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
263 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
264 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
265
266 #[stable(feature = "arc_weak", since = "1.4.0")]
267 impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
268 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
269 write!(f, "(Weak)")
270 }
271 }
272
273 struct ArcInner<T: ?Sized> {
274 strong: atomic::AtomicUsize,
275
276 // the value usize::MAX acts as a sentinel for temporarily "locking" the
277 // ability to upgrade weak pointers or downgrade strong ones; this is used
278 // to avoid races in `make_mut` and `get_mut`.
279 weak: atomic::AtomicUsize,
280
281 data: T,
282 }
283
284 unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
285 unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
286
287 impl<T> Arc<T> {
288 /// Constructs a new `Arc<T>`.
289 ///
290 /// # Examples
291 ///
292 /// ```
293 /// use std::sync::Arc;
294 ///
295 /// let five = Arc::new(5);
296 /// ```
297 #[inline]
298 #[stable(feature = "rust1", since = "1.0.0")]
299 pub fn new(data: T) -> Arc<T> {
300 // Start the weak pointer count as 1 which is the weak pointer that's
301 // held by all the strong pointers (kinda), see std/rc.rs for more info
302 let x: Box<_> = box ArcInner {
303 strong: atomic::AtomicUsize::new(1),
304 weak: atomic::AtomicUsize::new(1),
305 data,
306 };
307 Self::from_inner(Box::into_raw_non_null(x))
308 }
309
310 /// Constructs a new `Arc` with uninitialized contents.
311 ///
312 /// # Examples
313 ///
314 /// ```
315 /// #![feature(new_uninit)]
316 /// #![feature(get_mut_unchecked)]
317 ///
318 /// use std::sync::Arc;
319 ///
320 /// let mut five = Arc::<u32>::new_uninit();
321 ///
322 /// let five = unsafe {
323 /// // Deferred initialization:
324 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
325 ///
326 /// five.assume_init()
327 /// };
328 ///
329 /// assert_eq!(*five, 5)
330 /// ```
331 #[unstable(feature = "new_uninit", issue = "63291")]
332 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
333 unsafe {
334 Arc::from_ptr(Arc::allocate_for_layout(
335 Layout::new::<T>(),
336 |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
337 ))
338 }
339 }
340
341 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
342 /// `data` will be pinned in memory and unable to be moved.
343 #[stable(feature = "pin", since = "1.33.0")]
344 pub fn pin(data: T) -> Pin<Arc<T>> {
345 unsafe { Pin::new_unchecked(Arc::new(data)) }
346 }
347
348 /// Returns the contained value, if the `Arc` has exactly one strong reference.
349 ///
350 /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was
351 /// passed in.
352 ///
353 /// This will succeed even if there are outstanding weak references.
354 ///
355 /// [result]: ../../std/result/enum.Result.html
356 ///
357 /// # Examples
358 ///
359 /// ```
360 /// use std::sync::Arc;
361 ///
362 /// let x = Arc::new(3);
363 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
364 ///
365 /// let x = Arc::new(4);
366 /// let _y = Arc::clone(&x);
367 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
368 /// ```
369 #[inline]
370 #[stable(feature = "arc_unique", since = "1.4.0")]
371 pub fn try_unwrap(this: Self) -> Result<T, Self> {
372 // See `drop` for why all these atomics are like this
373 if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() {
374 return Err(this);
375 }
376
377 atomic::fence(Acquire);
378
379 unsafe {
380 let elem = ptr::read(&this.ptr.as_ref().data);
381
382 // Make a weak pointer to clean up the implicit strong-weak reference
383 let _weak = Weak { ptr: this.ptr };
384 mem::forget(this);
385
386 Ok(elem)
387 }
388 }
389 }
390
391 impl<T> Arc<[T]> {
392 /// Constructs a new reference-counted slice with uninitialized contents.
393 ///
394 /// # Examples
395 ///
396 /// ```
397 /// #![feature(new_uninit)]
398 /// #![feature(get_mut_unchecked)]
399 ///
400 /// use std::sync::Arc;
401 ///
402 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
403 ///
404 /// let values = unsafe {
405 /// // Deferred initialization:
406 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
407 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
408 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
409 ///
410 /// values.assume_init()
411 /// };
412 ///
413 /// assert_eq!(*values, [1, 2, 3])
414 /// ```
415 #[unstable(feature = "new_uninit", issue = "63291")]
416 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
417 unsafe {
418 Arc::from_ptr(Arc::allocate_for_slice(len))
419 }
420 }
421 }
422
423 impl<T> Arc<mem::MaybeUninit<T>> {
424 /// Converts to `Arc<T>`.
425 ///
426 /// # Safety
427 ///
428 /// As with [`MaybeUninit::assume_init`],
429 /// it is up to the caller to guarantee that the value
430 /// really is in an initialized state.
431 /// Calling this when the content is not yet fully initialized
432 /// causes immediate undefined behavior.
433 ///
434 /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init
435 ///
436 /// # Examples
437 ///
438 /// ```
439 /// #![feature(new_uninit)]
440 /// #![feature(get_mut_unchecked)]
441 ///
442 /// use std::sync::Arc;
443 ///
444 /// let mut five = Arc::<u32>::new_uninit();
445 ///
446 /// let five = unsafe {
447 /// // Deferred initialization:
448 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
449 ///
450 /// five.assume_init()
451 /// };
452 ///
453 /// assert_eq!(*five, 5)
454 /// ```
455 #[unstable(feature = "new_uninit", issue = "63291")]
456 #[inline]
457 pub unsafe fn assume_init(self) -> Arc<T> {
458 Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast())
459 }
460 }
461
462 impl<T> Arc<[mem::MaybeUninit<T>]> {
463 /// Converts to `Arc<[T]>`.
464 ///
465 /// # Safety
466 ///
467 /// As with [`MaybeUninit::assume_init`],
468 /// it is up to the caller to guarantee that the value
469 /// really is in an initialized state.
470 /// Calling this when the content is not yet fully initialized
471 /// causes immediate undefined behavior.
472 ///
473 /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init
474 ///
475 /// # Examples
476 ///
477 /// ```
478 /// #![feature(new_uninit)]
479 /// #![feature(get_mut_unchecked)]
480 ///
481 /// use std::sync::Arc;
482 ///
483 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
484 ///
485 /// let values = unsafe {
486 /// // Deferred initialization:
487 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
488 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
489 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
490 ///
491 /// values.assume_init()
492 /// };
493 ///
494 /// assert_eq!(*values, [1, 2, 3])
495 /// ```
496 #[unstable(feature = "new_uninit", issue = "63291")]
497 #[inline]
498 pub unsafe fn assume_init(self) -> Arc<[T]> {
499 Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _)
500 }
501 }
502
503 impl<T: ?Sized> Arc<T> {
504 /// Consumes the `Arc`, returning the wrapped pointer.
505 ///
506 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
507 /// [`Arc::from_raw`][from_raw].
508 ///
509 /// [from_raw]: struct.Arc.html#method.from_raw
510 ///
511 /// # Examples
512 ///
513 /// ```
514 /// use std::sync::Arc;
515 ///
516 /// let x = Arc::new("hello".to_owned());
517 /// let x_ptr = Arc::into_raw(x);
518 /// assert_eq!(unsafe { &*x_ptr }, "hello");
519 /// ```
520 #[stable(feature = "rc_raw", since = "1.17.0")]
521 pub fn into_raw(this: Self) -> *const T {
522 let ptr: *const T = &*this;
523 mem::forget(this);
524 ptr
525 }
526
527 /// Constructs an `Arc` from a raw pointer.
528 ///
529 /// The raw pointer must have been previously returned by a call to a
530 /// [`Arc::into_raw`][into_raw].
531 ///
532 /// This function is unsafe because improper use may lead to memory problems. For example, a
533 /// double-free may occur if the function is called twice on the same raw pointer.
534 ///
535 /// [into_raw]: struct.Arc.html#method.into_raw
536 ///
537 /// # Examples
538 ///
539 /// ```
540 /// use std::sync::Arc;
541 ///
542 /// let x = Arc::new("hello".to_owned());
543 /// let x_ptr = Arc::into_raw(x);
544 ///
545 /// unsafe {
546 /// // Convert back to an `Arc` to prevent leak.
547 /// let x = Arc::from_raw(x_ptr);
548 /// assert_eq!(&*x, "hello");
549 ///
550 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
551 /// }
552 ///
553 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
554 /// ```
555 #[stable(feature = "rc_raw", since = "1.17.0")]
556 pub unsafe fn from_raw(ptr: *const T) -> Self {
557 let offset = data_offset(ptr);
558
559 // Reverse the offset to find the original ArcInner.
560 let fake_ptr = ptr as *mut ArcInner<T>;
561 let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
562
563 Self::from_ptr(arc_ptr)
564 }
565
566 /// Consumes the `Arc`, returning the wrapped pointer as `NonNull<T>`.
567 ///
568 /// # Examples
569 ///
570 /// ```
571 /// #![feature(rc_into_raw_non_null)]
572 ///
573 /// use std::sync::Arc;
574 ///
575 /// let x = Arc::new("hello".to_owned());
576 /// let ptr = Arc::into_raw_non_null(x);
577 /// let deref = unsafe { ptr.as_ref() };
578 /// assert_eq!(deref, "hello");
579 /// ```
580 #[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
581 #[inline]
582 pub fn into_raw_non_null(this: Self) -> NonNull<T> {
583 // safe because Arc guarantees its pointer is non-null
584 unsafe { NonNull::new_unchecked(Arc::into_raw(this) as *mut _) }
585 }
586
587 /// Creates a new [`Weak`][weak] pointer to this value.
588 ///
589 /// [weak]: struct.Weak.html
590 ///
591 /// # Examples
592 ///
593 /// ```
594 /// use std::sync::Arc;
595 ///
596 /// let five = Arc::new(5);
597 ///
598 /// let weak_five = Arc::downgrade(&five);
599 /// ```
600 #[stable(feature = "arc_weak", since = "1.4.0")]
601 pub fn downgrade(this: &Self) -> Weak<T> {
602 // This Relaxed is OK because we're checking the value in the CAS
603 // below.
604 let mut cur = this.inner().weak.load(Relaxed);
605
606 loop {
607 // check if the weak counter is currently "locked"; if so, spin.
608 if cur == usize::MAX {
609 cur = this.inner().weak.load(Relaxed);
610 continue;
611 }
612
613 // NOTE: this code currently ignores the possibility of overflow
614 // into usize::MAX; in general both Rc and Arc need to be adjusted
615 // to deal with overflow.
616
617 // Unlike with Clone(), we need this to be an Acquire read to
618 // synchronize with the write coming from `is_unique`, so that the
619 // events prior to that write happen before this read.
620 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
621 Ok(_) => {
622 // Make sure we do not create a dangling Weak
623 debug_assert!(!is_dangling(this.ptr));
624 return Weak { ptr: this.ptr };
625 }
626 Err(old) => cur = old,
627 }
628 }
629 }
630
631 /// Gets the number of [`Weak`][weak] pointers to this value.
632 ///
633 /// [weak]: struct.Weak.html
634 ///
635 /// # Safety
636 ///
637 /// This method by itself is safe, but using it correctly requires extra care.
638 /// Another thread can change the weak count at any time,
639 /// including potentially between calling this method and acting on the result.
640 ///
641 /// # Examples
642 ///
643 /// ```
644 /// use std::sync::Arc;
645 ///
646 /// let five = Arc::new(5);
647 /// let _weak_five = Arc::downgrade(&five);
648 ///
649 /// // This assertion is deterministic because we haven't shared
650 /// // the `Arc` or `Weak` between threads.
651 /// assert_eq!(1, Arc::weak_count(&five));
652 /// ```
653 #[inline]
654 #[stable(feature = "arc_counts", since = "1.15.0")]
655 pub fn weak_count(this: &Self) -> usize {
656 let cnt = this.inner().weak.load(SeqCst);
657 // If the weak count is currently locked, the value of the
658 // count was 0 just before taking the lock.
659 if cnt == usize::MAX { 0 } else { cnt - 1 }
660 }
661
662 /// Gets the number of strong (`Arc`) pointers to this value.
663 ///
664 /// # Safety
665 ///
666 /// This method by itself is safe, but using it correctly requires extra care.
667 /// Another thread can change the strong count at any time,
668 /// including potentially between calling this method and acting on the result.
669 ///
670 /// # Examples
671 ///
672 /// ```
673 /// use std::sync::Arc;
674 ///
675 /// let five = Arc::new(5);
676 /// let _also_five = Arc::clone(&five);
677 ///
678 /// // This assertion is deterministic because we haven't shared
679 /// // the `Arc` between threads.
680 /// assert_eq!(2, Arc::strong_count(&five));
681 /// ```
682 #[inline]
683 #[stable(feature = "arc_counts", since = "1.15.0")]
684 pub fn strong_count(this: &Self) -> usize {
685 this.inner().strong.load(SeqCst)
686 }
687
688 #[inline]
689 fn inner(&self) -> &ArcInner<T> {
690 // This unsafety is ok because while this arc is alive we're guaranteed
691 // that the inner pointer is valid. Furthermore, we know that the
692 // `ArcInner` structure itself is `Sync` because the inner data is
693 // `Sync` as well, so we're ok loaning out an immutable pointer to these
694 // contents.
695 unsafe { self.ptr.as_ref() }
696 }
697
698 // Non-inlined part of `drop`.
699 #[inline(never)]
700 unsafe fn drop_slow(&mut self) {
701 // Destroy the data at this time, even though we may not free the box
702 // allocation itself (there may still be weak pointers lying around).
703 ptr::drop_in_place(&mut self.ptr.as_mut().data);
704
705 if self.inner().weak.fetch_sub(1, Release) == 1 {
706 atomic::fence(Acquire);
707 Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
708 }
709 }
710
711 #[inline]
712 #[stable(feature = "ptr_eq", since = "1.17.0")]
713 /// Returns `true` if the two `Arc`s point to the same value (not
714 /// just values that compare as equal).
715 ///
716 /// # Examples
717 ///
718 /// ```
719 /// use std::sync::Arc;
720 ///
721 /// let five = Arc::new(5);
722 /// let same_five = Arc::clone(&five);
723 /// let other_five = Arc::new(5);
724 ///
725 /// assert!(Arc::ptr_eq(&five, &same_five));
726 /// assert!(!Arc::ptr_eq(&five, &other_five));
727 /// ```
728 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
729 this.ptr.as_ptr() == other.ptr.as_ptr()
730 }
731 }
732
733 impl<T: ?Sized> Arc<T> {
734 /// Allocates an `ArcInner<T>` with sufficient space for
735 /// a possibly-unsized value where the value has the layout provided.
736 ///
737 /// The function `mem_to_arcinner` is called with the data pointer
738 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
739 unsafe fn allocate_for_layout(
740 value_layout: Layout,
741 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>
742 ) -> *mut ArcInner<T> {
743 // Calculate layout using the given value layout.
744 // Previously, layout was calculated on the expression
745 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
746 // reference (see #54908).
747 let layout = Layout::new::<ArcInner<()>>()
748 .extend(value_layout).unwrap().0
749 .pad_to_align().unwrap();
750
751 let mem = Global.alloc(layout)
752 .unwrap_or_else(|_| handle_alloc_error(layout));
753
754 // Initialize the ArcInner
755 let inner = mem_to_arcinner(mem.as_ptr());
756 debug_assert_eq!(Layout::for_value(&*inner), layout);
757
758 ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
759 ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
760
761 inner
762 }
763
764 /// Allocates an `ArcInner<T>` with sufficient space for an unsized value.
765 unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
766 // Allocate for the `ArcInner<T>` using the given value.
767 Self::allocate_for_layout(
768 Layout::for_value(&*ptr),
769 |mem| set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>,
770 )
771 }
772
773 fn from_box(v: Box<T>) -> Arc<T> {
774 unsafe {
775 let box_unique = Box::into_unique(v);
776 let bptr = box_unique.as_ptr();
777
778 let value_size = size_of_val(&*bptr);
779 let ptr = Self::allocate_for_ptr(bptr);
780
781 // Copy value as bytes
782 ptr::copy_nonoverlapping(
783 bptr as *const T as *const u8,
784 &mut (*ptr).data as *mut _ as *mut u8,
785 value_size);
786
787 // Free the allocation without dropping its contents
788 box_free(box_unique);
789
790 Self::from_ptr(ptr)
791 }
792 }
793 }
794
795 impl<T> Arc<[T]> {
796 /// Allocates an `ArcInner<[T]>` with the given length.
797 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
798 Self::allocate_for_layout(
799 Layout::array::<T>(len).unwrap(),
800 |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
801 )
802 }
803 }
804
805 /// Sets the data pointer of a `?Sized` raw pointer.
806 ///
807 /// For a slice/trait object, this sets the `data` field and leaves the rest
808 /// unchanged. For a sized raw pointer, this simply sets the pointer.
809 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
810 ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
811 ptr
812 }
813
814 impl<T> Arc<[T]> {
815 /// Copy elements from slice into newly allocated Arc<[T]>
816 ///
817 /// Unsafe because the caller must either take ownership or bind `T: Copy`.
818 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
819 let ptr = Self::allocate_for_slice(v.len());
820
821 ptr::copy_nonoverlapping(
822 v.as_ptr(),
823 &mut (*ptr).data as *mut [T] as *mut T,
824 v.len());
825
826 Self::from_ptr(ptr)
827 }
828
829 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
830 ///
831 /// Behavior is undefined should the size be wrong.
832 unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
833 // Panic guard while cloning T elements.
834 // In the event of a panic, elements that have been written
835 // into the new ArcInner will be dropped, then the memory freed.
836 struct Guard<T> {
837 mem: NonNull<u8>,
838 elems: *mut T,
839 layout: Layout,
840 n_elems: usize,
841 }
842
843 impl<T> Drop for Guard<T> {
844 fn drop(&mut self) {
845 unsafe {
846 let slice = from_raw_parts_mut(self.elems, self.n_elems);
847 ptr::drop_in_place(slice);
848
849 Global.dealloc(self.mem.cast(), self.layout);
850 }
851 }
852 }
853
854 let ptr = Self::allocate_for_slice(len);
855
856 let mem = ptr as *mut _ as *mut u8;
857 let layout = Layout::for_value(&*ptr);
858
859 // Pointer to first element
860 let elems = &mut (*ptr).data as *mut [T] as *mut T;
861
862 let mut guard = Guard {
863 mem: NonNull::new_unchecked(mem),
864 elems,
865 layout,
866 n_elems: 0,
867 };
868
869 for (i, item) in iter.enumerate() {
870 ptr::write(elems.add(i), item);
871 guard.n_elems += 1;
872 }
873
874 // All clear. Forget the guard so it doesn't free the new ArcInner.
875 mem::forget(guard);
876
877 Self::from_ptr(ptr)
878 }
879 }
880
881 /// Specialization trait used for `From<&[T]>`.
882 trait ArcFromSlice<T> {
883 fn from_slice(slice: &[T]) -> Self;
884 }
885
886 impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
887 #[inline]
888 default fn from_slice(v: &[T]) -> Self {
889 unsafe {
890 Self::from_iter_exact(v.iter().cloned(), v.len())
891 }
892 }
893 }
894
895 impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
896 #[inline]
897 fn from_slice(v: &[T]) -> Self {
898 unsafe { Arc::copy_from_slice(v) }
899 }
900 }
901
902 #[stable(feature = "rust1", since = "1.0.0")]
903 impl<T: ?Sized> Clone for Arc<T> {
904 /// Makes a clone of the `Arc` pointer.
905 ///
906 /// This creates another pointer to the same inner value, increasing the
907 /// strong reference count.
908 ///
909 /// # Examples
910 ///
911 /// ```
912 /// use std::sync::Arc;
913 ///
914 /// let five = Arc::new(5);
915 ///
916 /// let _ = Arc::clone(&five);
917 /// ```
918 #[inline]
919 fn clone(&self) -> Arc<T> {
920 // Using a relaxed ordering is alright here, as knowledge of the
921 // original reference prevents other threads from erroneously deleting
922 // the object.
923 //
924 // As explained in the [Boost documentation][1], Increasing the
925 // reference counter can always be done with memory_order_relaxed: New
926 // references to an object can only be formed from an existing
927 // reference, and passing an existing reference from one thread to
928 // another must already provide any required synchronization.
929 //
930 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
931 let old_size = self.inner().strong.fetch_add(1, Relaxed);
932
933 // However we need to guard against massive refcounts in case someone
934 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
935 // and users will use-after free. We racily saturate to `isize::MAX` on
936 // the assumption that there aren't ~2 billion threads incrementing
937 // the reference count at once. This branch will never be taken in
938 // any realistic program.
939 //
940 // We abort because such a program is incredibly degenerate, and we
941 // don't care to support it.
942 if old_size > MAX_REFCOUNT {
943 unsafe {
944 abort();
945 }
946 }
947
948 Self::from_inner(self.ptr)
949 }
950 }
951
952 #[stable(feature = "rust1", since = "1.0.0")]
953 impl<T: ?Sized> Deref for Arc<T> {
954 type Target = T;
955
956 #[inline]
957 fn deref(&self) -> &T {
958 &self.inner().data
959 }
960 }
961
962 #[unstable(feature = "receiver_trait", issue = "0")]
963 impl<T: ?Sized> Receiver for Arc<T> {}
964
965 impl<T: Clone> Arc<T> {
966 /// Makes a mutable reference into the given `Arc`.
967 ///
968 /// If there are other `Arc` or [`Weak`][weak] pointers to the same value,
969 /// then `make_mut` will invoke [`clone`][clone] on the inner value to
970 /// ensure unique ownership. This is also referred to as clone-on-write.
971 ///
972 /// See also [`get_mut`][get_mut], which will fail rather than cloning.
973 ///
974 /// [weak]: struct.Weak.html
975 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
976 /// [get_mut]: struct.Arc.html#method.get_mut
977 ///
978 /// # Examples
979 ///
980 /// ```
981 /// use std::sync::Arc;
982 ///
983 /// let mut data = Arc::new(5);
984 ///
985 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
986 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
987 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
988 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
989 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
990 ///
991 /// // Now `data` and `other_data` point to different values.
992 /// assert_eq!(*data, 8);
993 /// assert_eq!(*other_data, 12);
994 /// ```
995 #[inline]
996 #[stable(feature = "arc_unique", since = "1.4.0")]
997 pub fn make_mut(this: &mut Self) -> &mut T {
998 // Note that we hold both a strong reference and a weak reference.
999 // Thus, releasing our strong reference only will not, by itself, cause
1000 // the memory to be deallocated.
1001 //
1002 // Use Acquire to ensure that we see any writes to `weak` that happen
1003 // before release writes (i.e., decrements) to `strong`. Since we hold a
1004 // weak count, there's no chance the ArcInner itself could be
1005 // deallocated.
1006 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
1007 // Another strong pointer exists; clone
1008 *this = Arc::new((**this).clone());
1009 } else if this.inner().weak.load(Relaxed) != 1 {
1010 // Relaxed suffices in the above because this is fundamentally an
1011 // optimization: we are always racing with weak pointers being
1012 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
1013
1014 // We removed the last strong ref, but there are additional weak
1015 // refs remaining. We'll move the contents to a new Arc, and
1016 // invalidate the other weak refs.
1017
1018 // Note that it is not possible for the read of `weak` to yield
1019 // usize::MAX (i.e., locked), since the weak count can only be
1020 // locked by a thread with a strong reference.
1021
1022 // Materialize our own implicit weak pointer, so that it can clean
1023 // up the ArcInner as needed.
1024 let weak = Weak { ptr: this.ptr };
1025
1026 // mark the data itself as already deallocated
1027 unsafe {
1028 // there is no data race in the implicit write caused by `read`
1029 // here (due to zeroing) because data is no longer accessed by
1030 // other threads (due to there being no more strong refs at this
1031 // point).
1032 let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data));
1033 mem::swap(this, &mut swap);
1034 mem::forget(swap);
1035 }
1036 } else {
1037 // We were the sole reference of either kind; bump back up the
1038 // strong ref count.
1039 this.inner().strong.store(1, Release);
1040 }
1041
1042 // As with `get_mut()`, the unsafety is ok because our reference was
1043 // either unique to begin with, or became one upon cloning the contents.
1044 unsafe {
1045 &mut this.ptr.as_mut().data
1046 }
1047 }
1048 }
1049
1050 impl<T: ?Sized> Arc<T> {
1051 /// Returns a mutable reference to the inner value, if there are
1052 /// no other `Arc` or [`Weak`][weak] pointers to the same value.
1053 ///
1054 /// Returns [`None`][option] otherwise, because it is not safe to
1055 /// mutate a shared value.
1056 ///
1057 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
1058 /// the inner value when it's shared.
1059 ///
1060 /// [weak]: struct.Weak.html
1061 /// [option]: ../../std/option/enum.Option.html
1062 /// [make_mut]: struct.Arc.html#method.make_mut
1063 /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
1064 ///
1065 /// # Examples
1066 ///
1067 /// ```
1068 /// use std::sync::Arc;
1069 ///
1070 /// let mut x = Arc::new(3);
1071 /// *Arc::get_mut(&mut x).unwrap() = 4;
1072 /// assert_eq!(*x, 4);
1073 ///
1074 /// let _y = Arc::clone(&x);
1075 /// assert!(Arc::get_mut(&mut x).is_none());
1076 /// ```
1077 #[inline]
1078 #[stable(feature = "arc_unique", since = "1.4.0")]
1079 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
1080 if this.is_unique() {
1081 // This unsafety is ok because we're guaranteed that the pointer
1082 // returned is the *only* pointer that will ever be returned to T. Our
1083 // reference count is guaranteed to be 1 at this point, and we required
1084 // the Arc itself to be `mut`, so we're returning the only possible
1085 // reference to the inner data.
1086 unsafe {
1087 Some(Arc::get_mut_unchecked(this))
1088 }
1089 } else {
1090 None
1091 }
1092 }
1093
1094 /// Returns a mutable reference to the inner value,
1095 /// without any check.
1096 ///
1097 /// See also [`get_mut`], which is safe and does appropriate checks.
1098 ///
1099 /// [`get_mut`]: struct.Arc.html#method.get_mut
1100 ///
1101 /// # Safety
1102 ///
1103 /// Any other `Arc` or [`Weak`] pointers to the same value must not be dereferenced
1104 /// for the duration of the returned borrow.
1105 /// This is trivially the case if no such pointers exist,
1106 /// for example immediately after `Arc::new`.
1107 ///
1108 /// # Examples
1109 ///
1110 /// ```
1111 /// #![feature(get_mut_unchecked)]
1112 ///
1113 /// use std::sync::Arc;
1114 ///
1115 /// let mut x = Arc::new(String::new());
1116 /// unsafe {
1117 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
1118 /// }
1119 /// assert_eq!(*x, "foo");
1120 /// ```
1121 #[inline]
1122 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
1123 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
1124 &mut this.ptr.as_mut().data
1125 }
1126
1127 /// Determine whether this is the unique reference (including weak refs) to
1128 /// the underlying data.
1129 ///
1130 /// Note that this requires locking the weak ref count.
1131 fn is_unique(&mut self) -> bool {
1132 // lock the weak pointer count if we appear to be the sole weak pointer
1133 // holder.
1134 //
1135 // The acquire label here ensures a happens-before relationship with any
1136 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
1137 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
1138 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
1139 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
1140 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
1141 // counter in `drop` -- the only access that happens when any but the last reference
1142 // is being dropped.
1143 let unique = self.inner().strong.load(Acquire) == 1;
1144
1145 // The release write here synchronizes with a read in `downgrade`,
1146 // effectively preventing the above read of `strong` from happening
1147 // after the write.
1148 self.inner().weak.store(1, Release); // release the lock
1149 unique
1150 } else {
1151 false
1152 }
1153 }
1154 }
1155
1156 #[stable(feature = "rust1", since = "1.0.0")]
1157 unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
1158 /// Drops the `Arc`.
1159 ///
1160 /// This will decrement the strong reference count. If the strong reference
1161 /// count reaches zero then the only other references (if any) are
1162 /// [`Weak`], so we `drop` the inner value.
1163 ///
1164 /// # Examples
1165 ///
1166 /// ```
1167 /// use std::sync::Arc;
1168 ///
1169 /// struct Foo;
1170 ///
1171 /// impl Drop for Foo {
1172 /// fn drop(&mut self) {
1173 /// println!("dropped!");
1174 /// }
1175 /// }
1176 ///
1177 /// let foo = Arc::new(Foo);
1178 /// let foo2 = Arc::clone(&foo);
1179 ///
1180 /// drop(foo); // Doesn't print anything
1181 /// drop(foo2); // Prints "dropped!"
1182 /// ```
1183 ///
1184 /// [`Weak`]: ../../std/sync/struct.Weak.html
1185 #[inline]
1186 fn drop(&mut self) {
1187 // Because `fetch_sub` is already atomic, we do not need to synchronize
1188 // with other threads unless we are going to delete the object. This
1189 // same logic applies to the below `fetch_sub` to the `weak` count.
1190 if self.inner().strong.fetch_sub(1, Release) != 1 {
1191 return;
1192 }
1193
1194 // This fence is needed to prevent reordering of use of the data and
1195 // deletion of the data. Because it is marked `Release`, the decreasing
1196 // of the reference count synchronizes with this `Acquire` fence. This
1197 // means that use of the data happens before decreasing the reference
1198 // count, which happens before this fence, which happens before the
1199 // deletion of the data.
1200 //
1201 // As explained in the [Boost documentation][1],
1202 //
1203 // > It is important to enforce any possible access to the object in one
1204 // > thread (through an existing reference) to *happen before* deleting
1205 // > the object in a different thread. This is achieved by a "release"
1206 // > operation after dropping a reference (any access to the object
1207 // > through this reference must obviously happened before), and an
1208 // > "acquire" operation before deleting the object.
1209 //
1210 // In particular, while the contents of an Arc are usually immutable, it's
1211 // possible to have interior writes to something like a Mutex<T>. Since a
1212 // Mutex is not acquired when it is deleted, we can't rely on its
1213 // synchronization logic to make writes in thread A visible to a destructor
1214 // running in thread B.
1215 //
1216 // Also note that the Acquire fence here could probably be replaced with an
1217 // Acquire load, which could improve performance in highly-contended
1218 // situations. See [2].
1219 //
1220 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1221 // [2]: (https://github.com/rust-lang/rust/pull/41714)
1222 atomic::fence(Acquire);
1223
1224 unsafe {
1225 self.drop_slow();
1226 }
1227 }
1228 }
1229
1230 impl Arc<dyn Any + Send + Sync> {
1231 #[inline]
1232 #[stable(feature = "rc_downcast", since = "1.29.0")]
1233 /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
1234 ///
1235 /// # Examples
1236 ///
1237 /// ```
1238 /// use std::any::Any;
1239 /// use std::sync::Arc;
1240 ///
1241 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
1242 /// if let Ok(string) = value.downcast::<String>() {
1243 /// println!("String ({}): {}", string.len(), string);
1244 /// }
1245 /// }
1246 ///
1247 /// fn main() {
1248 /// let my_string = "Hello World".to_string();
1249 /// print_if_string(Arc::new(my_string));
1250 /// print_if_string(Arc::new(0i8));
1251 /// }
1252 /// ```
1253 pub fn downcast<T>(self) -> Result<Arc<T>, Self>
1254 where
1255 T: Any + Send + Sync + 'static,
1256 {
1257 if (*self).is::<T>() {
1258 let ptr = self.ptr.cast::<ArcInner<T>>();
1259 mem::forget(self);
1260 Ok(Arc::from_inner(ptr))
1261 } else {
1262 Err(self)
1263 }
1264 }
1265 }
1266
1267 impl<T> Weak<T> {
1268 /// Constructs a new `Weak<T>`, without allocating any memory.
1269 /// Calling [`upgrade`] on the return value always gives [`None`].
1270 ///
1271 /// [`upgrade`]: struct.Weak.html#method.upgrade
1272 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1273 ///
1274 /// # Examples
1275 ///
1276 /// ```
1277 /// use std::sync::Weak;
1278 ///
1279 /// let empty: Weak<i64> = Weak::new();
1280 /// assert!(empty.upgrade().is_none());
1281 /// ```
1282 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1283 pub fn new() -> Weak<T> {
1284 Weak {
1285 ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0"),
1286 }
1287 }
1288
1289 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
1290 ///
1291 /// It is up to the caller to ensure that the object is still alive when accessing it through
1292 /// the pointer.
1293 ///
1294 /// The pointer may be [`null`] or be dangling in case the object has already been destroyed.
1295 ///
1296 /// # Examples
1297 ///
1298 /// ```
1299 /// #![feature(weak_into_raw)]
1300 ///
1301 /// use std::sync::Arc;
1302 /// use std::ptr;
1303 ///
1304 /// let strong = Arc::new("hello".to_owned());
1305 /// let weak = Arc::downgrade(&strong);
1306 /// // Both point to the same object
1307 /// assert!(ptr::eq(&*strong, weak.as_raw()));
1308 /// // The strong here keeps it alive, so we can still access the object.
1309 /// assert_eq!("hello", unsafe { &*weak.as_raw() });
1310 ///
1311 /// drop(strong);
1312 /// // But not any more. We can do weak.as_raw(), but accessing the pointer would lead to
1313 /// // undefined behaviour.
1314 /// // assert_eq!("hello", unsafe { &*weak.as_raw() });
1315 /// ```
1316 ///
1317 /// [`null`]: ../../std/ptr/fn.null.html
1318 #[unstable(feature = "weak_into_raw", issue = "60728")]
1319 pub fn as_raw(&self) -> *const T {
1320 match self.inner() {
1321 None => ptr::null(),
1322 Some(inner) => {
1323 let offset = data_offset_sized::<T>();
1324 let ptr = inner as *const ArcInner<T>;
1325 // Note: while the pointer we create may already point to dropped value, the
1326 // allocation still lives (it must hold the weak point as long as we are alive).
1327 // Therefore, the offset is OK to do, it won't get out of the allocation.
1328 let ptr = unsafe { (ptr as *const u8).offset(offset) };
1329 ptr as *const T
1330 }
1331 }
1332 }
1333
1334 /// Consumes the `Weak<T>` and turns it into a raw pointer.
1335 ///
1336 /// This converts the weak pointer into a raw pointer, preserving the original weak count. It
1337 /// can be turned back into the `Weak<T>` with [`from_raw`].
1338 ///
1339 /// The same restrictions of accessing the target of the pointer as with
1340 /// [`as_raw`] apply.
1341 ///
1342 /// # Examples
1343 ///
1344 /// ```
1345 /// #![feature(weak_into_raw)]
1346 ///
1347 /// use std::sync::{Arc, Weak};
1348 ///
1349 /// let strong = Arc::new("hello".to_owned());
1350 /// let weak = Arc::downgrade(&strong);
1351 /// let raw = weak.into_raw();
1352 ///
1353 /// assert_eq!(1, Arc::weak_count(&strong));
1354 /// assert_eq!("hello", unsafe { &*raw });
1355 ///
1356 /// drop(unsafe { Weak::from_raw(raw) });
1357 /// assert_eq!(0, Arc::weak_count(&strong));
1358 /// ```
1359 ///
1360 /// [`from_raw`]: struct.Weak.html#method.from_raw
1361 /// [`as_raw`]: struct.Weak.html#method.as_raw
1362 #[unstable(feature = "weak_into_raw", issue = "60728")]
1363 pub fn into_raw(self) -> *const T {
1364 let result = self.as_raw();
1365 mem::forget(self);
1366 result
1367 }
1368
1369 /// Converts a raw pointer previously created by [`into_raw`] back into
1370 /// `Weak<T>`.
1371 ///
1372 /// This can be used to safely get a strong reference (by calling [`upgrade`]
1373 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
1374 ///
1375 /// It takes ownership of one weak count. In case a [`null`] is passed, a dangling [`Weak`] is
1376 /// returned.
1377 ///
1378 /// # Safety
1379 ///
1380 /// The pointer must represent one valid weak count. In other words, it must point to `T` which
1381 /// is or *was* managed by an [`Arc`] and the weak count of that [`Arc`] must not have reached
1382 /// 0. It is allowed for the strong count to be 0.
1383 ///
1384 /// # Examples
1385 ///
1386 /// ```
1387 /// #![feature(weak_into_raw)]
1388 ///
1389 /// use std::sync::{Arc, Weak};
1390 ///
1391 /// let strong = Arc::new("hello".to_owned());
1392 ///
1393 /// let raw_1 = Arc::downgrade(&strong).into_raw();
1394 /// let raw_2 = Arc::downgrade(&strong).into_raw();
1395 ///
1396 /// assert_eq!(2, Arc::weak_count(&strong));
1397 ///
1398 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
1399 /// assert_eq!(1, Arc::weak_count(&strong));
1400 ///
1401 /// drop(strong);
1402 ///
1403 /// // Decrement the last weak count.
1404 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
1405 /// ```
1406 ///
1407 /// [`null`]: ../../std/ptr/fn.null.html
1408 /// [`into_raw`]: struct.Weak.html#method.into_raw
1409 /// [`upgrade`]: struct.Weak.html#method.upgrade
1410 /// [`Weak`]: struct.Weak.html
1411 /// [`Arc`]: struct.Arc.html
1412 #[unstable(feature = "weak_into_raw", issue = "60728")]
1413 pub unsafe fn from_raw(ptr: *const T) -> Self {
1414 if ptr.is_null() {
1415 Self::new()
1416 } else {
1417 // See Arc::from_raw for details
1418 let offset = data_offset(ptr);
1419 let fake_ptr = ptr as *mut ArcInner<T>;
1420 let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
1421 Weak {
1422 ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw"),
1423 }
1424 }
1425 }
1426 }
1427
1428 impl<T: ?Sized> Weak<T> {
1429 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], extending
1430 /// the lifetime of the value if successful.
1431 ///
1432 /// Returns [`None`] if the value has since been dropped.
1433 ///
1434 /// [`Arc`]: struct.Arc.html
1435 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1436 ///
1437 /// # Examples
1438 ///
1439 /// ```
1440 /// use std::sync::Arc;
1441 ///
1442 /// let five = Arc::new(5);
1443 ///
1444 /// let weak_five = Arc::downgrade(&five);
1445 ///
1446 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
1447 /// assert!(strong_five.is_some());
1448 ///
1449 /// // Destroy all strong pointers.
1450 /// drop(strong_five);
1451 /// drop(five);
1452 ///
1453 /// assert!(weak_five.upgrade().is_none());
1454 /// ```
1455 #[stable(feature = "arc_weak", since = "1.4.0")]
1456 pub fn upgrade(&self) -> Option<Arc<T>> {
1457 // We use a CAS loop to increment the strong count instead of a
1458 // fetch_add because once the count hits 0 it must never be above 0.
1459 let inner = self.inner()?;
1460
1461 // Relaxed load because any write of 0 that we can observe
1462 // leaves the field in a permanently zero state (so a
1463 // "stale" read of 0 is fine), and any other value is
1464 // confirmed via the CAS below.
1465 let mut n = inner.strong.load(Relaxed);
1466
1467 loop {
1468 if n == 0 {
1469 return None;
1470 }
1471
1472 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
1473 if n > MAX_REFCOUNT {
1474 unsafe {
1475 abort();
1476 }
1477 }
1478
1479 // Relaxed is valid for the same reason it is on Arc's Clone impl
1480 match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
1481 Ok(_) => return Some(Arc::from_inner(self.ptr)), // null checked above
1482 Err(old) => n = old,
1483 }
1484 }
1485 }
1486
1487 /// Gets the number of strong (`Arc`) pointers pointing to this value.
1488 ///
1489 /// If `self` was created using [`Weak::new`], this will return 0.
1490 ///
1491 /// [`Weak::new`]: #method.new
1492 #[unstable(feature = "weak_counts", issue = "57977")]
1493 pub fn strong_count(&self) -> usize {
1494 if let Some(inner) = self.inner() {
1495 inner.strong.load(SeqCst)
1496 } else {
1497 0
1498 }
1499 }
1500
1501 /// Gets an approximation of the number of `Weak` pointers pointing to this
1502 /// value.
1503 ///
1504 /// If `self` was created using [`Weak::new`], this will return 0. If not,
1505 /// the returned value is at least 1, since `self` still points to the
1506 /// value.
1507 ///
1508 /// # Accuracy
1509 ///
1510 /// Due to implementation details, the returned value can be off by 1 in
1511 /// either direction when other threads are manipulating any `Arc`s or
1512 /// `Weak`s pointing to the same value.
1513 ///
1514 /// [`Weak::new`]: #method.new
1515 #[unstable(feature = "weak_counts", issue = "57977")]
1516 pub fn weak_count(&self) -> Option<usize> {
1517 // Due to the implicit weak pointer added when any strong pointers are
1518 // around, we cannot implement `weak_count` correctly since it
1519 // necessarily requires accessing the strong count and weak count in an
1520 // unsynchronized fashion. So this version is a bit racy.
1521 self.inner().map(|inner| {
1522 let strong = inner.strong.load(SeqCst);
1523 let weak = inner.weak.load(SeqCst);
1524 if strong == 0 {
1525 // If the last `Arc` has *just* been dropped, it might not yet
1526 // have removed the implicit weak count, so the value we get
1527 // here might be 1 too high.
1528 weak
1529 } else {
1530 // As long as there's still at least 1 `Arc` around, subtract
1531 // the implicit weak pointer.
1532 // Note that the last `Arc` might get dropped between the 2
1533 // loads we do above, removing the implicit weak pointer. This
1534 // means that the value might be 1 too low here. In order to not
1535 // return 0 here (which would happen if we're the only weak
1536 // pointer), we guard against that specifically.
1537 cmp::max(1, weak - 1)
1538 }
1539 })
1540 }
1541
1542 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
1543 /// (i.e., when this `Weak` was created by `Weak::new`).
1544 #[inline]
1545 fn inner(&self) -> Option<&ArcInner<T>> {
1546 if is_dangling(self.ptr) {
1547 None
1548 } else {
1549 Some(unsafe { self.ptr.as_ref() })
1550 }
1551 }
1552
1553 /// Returns `true` if the two `Weak`s point to the same value (not just
1554 /// values that compare as equal), or if both don't point to any value
1555 /// (because they were created with `Weak::new()`).
1556 ///
1557 /// # Notes
1558 ///
1559 /// Since this compares pointers it means that `Weak::new()` will equal each
1560 /// other, even though they don't point to any value.
1561 ///
1562 /// # Examples
1563 ///
1564 /// ```
1565 /// use std::sync::Arc;
1566 ///
1567 /// let first_rc = Arc::new(5);
1568 /// let first = Arc::downgrade(&first_rc);
1569 /// let second = Arc::downgrade(&first_rc);
1570 ///
1571 /// assert!(first.ptr_eq(&second));
1572 ///
1573 /// let third_rc = Arc::new(5);
1574 /// let third = Arc::downgrade(&third_rc);
1575 ///
1576 /// assert!(!first.ptr_eq(&third));
1577 /// ```
1578 ///
1579 /// Comparing `Weak::new`.
1580 ///
1581 /// ```
1582 /// use std::sync::{Arc, Weak};
1583 ///
1584 /// let first = Weak::new();
1585 /// let second = Weak::new();
1586 /// assert!(first.ptr_eq(&second));
1587 ///
1588 /// let third_rc = Arc::new(());
1589 /// let third = Arc::downgrade(&third_rc);
1590 /// assert!(!first.ptr_eq(&third));
1591 /// ```
1592 #[inline]
1593 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
1594 pub fn ptr_eq(&self, other: &Self) -> bool {
1595 self.ptr.as_ptr() == other.ptr.as_ptr()
1596 }
1597 }
1598
1599 #[stable(feature = "arc_weak", since = "1.4.0")]
1600 impl<T: ?Sized> Clone for Weak<T> {
1601 /// Makes a clone of the `Weak` pointer that points to the same value.
1602 ///
1603 /// # Examples
1604 ///
1605 /// ```
1606 /// use std::sync::{Arc, Weak};
1607 ///
1608 /// let weak_five = Arc::downgrade(&Arc::new(5));
1609 ///
1610 /// let _ = Weak::clone(&weak_five);
1611 /// ```
1612 #[inline]
1613 fn clone(&self) -> Weak<T> {
1614 let inner = if let Some(inner) = self.inner() {
1615 inner
1616 } else {
1617 return Weak { ptr: self.ptr };
1618 };
1619 // See comments in Arc::clone() for why this is relaxed. This can use a
1620 // fetch_add (ignoring the lock) because the weak count is only locked
1621 // where are *no other* weak pointers in existence. (So we can't be
1622 // running this code in that case).
1623 let old_size = inner.weak.fetch_add(1, Relaxed);
1624
1625 // See comments in Arc::clone() for why we do this (for mem::forget).
1626 if old_size > MAX_REFCOUNT {
1627 unsafe {
1628 abort();
1629 }
1630 }
1631
1632 return Weak { ptr: self.ptr };
1633 }
1634 }
1635
1636 #[stable(feature = "downgraded_weak", since = "1.10.0")]
1637 impl<T> Default for Weak<T> {
1638 /// Constructs a new `Weak<T>`, without allocating memory.
1639 /// Calling [`upgrade`] on the return value always
1640 /// gives [`None`].
1641 ///
1642 /// [`None`]: ../../std/option/enum.Option.html#variant.None
1643 /// [`upgrade`]: ../../std/sync/struct.Weak.html#method.upgrade
1644 ///
1645 /// # Examples
1646 ///
1647 /// ```
1648 /// use std::sync::Weak;
1649 ///
1650 /// let empty: Weak<i64> = Default::default();
1651 /// assert!(empty.upgrade().is_none());
1652 /// ```
1653 fn default() -> Weak<T> {
1654 Weak::new()
1655 }
1656 }
1657
1658 #[stable(feature = "arc_weak", since = "1.4.0")]
1659 impl<T: ?Sized> Drop for Weak<T> {
1660 /// Drops the `Weak` pointer.
1661 ///
1662 /// # Examples
1663 ///
1664 /// ```
1665 /// use std::sync::{Arc, Weak};
1666 ///
1667 /// struct Foo;
1668 ///
1669 /// impl Drop for Foo {
1670 /// fn drop(&mut self) {
1671 /// println!("dropped!");
1672 /// }
1673 /// }
1674 ///
1675 /// let foo = Arc::new(Foo);
1676 /// let weak_foo = Arc::downgrade(&foo);
1677 /// let other_weak_foo = Weak::clone(&weak_foo);
1678 ///
1679 /// drop(weak_foo); // Doesn't print anything
1680 /// drop(foo); // Prints "dropped!"
1681 ///
1682 /// assert!(other_weak_foo.upgrade().is_none());
1683 /// ```
1684 fn drop(&mut self) {
1685 // If we find out that we were the last weak pointer, then its time to
1686 // deallocate the data entirely. See the discussion in Arc::drop() about
1687 // the memory orderings
1688 //
1689 // It's not necessary to check for the locked state here, because the
1690 // weak count can only be locked if there was precisely one weak ref,
1691 // meaning that drop could only subsequently run ON that remaining weak
1692 // ref, which can only happen after the lock is released.
1693 let inner = if let Some(inner) = self.inner() {
1694 inner
1695 } else {
1696 return
1697 };
1698
1699 if inner.weak.fetch_sub(1, Release) == 1 {
1700 atomic::fence(Acquire);
1701 unsafe {
1702 Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()))
1703 }
1704 }
1705 }
1706 }
1707
1708 #[stable(feature = "rust1", since = "1.0.0")]
1709 trait ArcEqIdent<T: ?Sized + PartialEq> {
1710 fn eq(&self, other: &Arc<T>) -> bool;
1711 fn ne(&self, other: &Arc<T>) -> bool;
1712 }
1713
1714 #[stable(feature = "rust1", since = "1.0.0")]
1715 impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
1716 #[inline]
1717 default fn eq(&self, other: &Arc<T>) -> bool {
1718 **self == **other
1719 }
1720 #[inline]
1721 default fn ne(&self, other: &Arc<T>) -> bool {
1722 **self != **other
1723 }
1724 }
1725
1726 /// We're doing this specialization here, and not as a more general optimization on `&T`, because it
1727 /// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
1728 /// store large values, that are slow to clone, but also heavy to check for equality, causing this
1729 /// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
1730 /// the same value, than two `&T`s.
1731 #[stable(feature = "rust1", since = "1.0.0")]
1732 impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> {
1733 #[inline]
1734 fn eq(&self, other: &Arc<T>) -> bool {
1735 Arc::ptr_eq(self, other) || **self == **other
1736 }
1737
1738 #[inline]
1739 fn ne(&self, other: &Arc<T>) -> bool {
1740 !Arc::ptr_eq(self, other) && **self != **other
1741 }
1742 }
1743
1744 #[stable(feature = "rust1", since = "1.0.0")]
1745 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
1746 /// Equality for two `Arc`s.
1747 ///
1748 /// Two `Arc`s are equal if their inner values are equal.
1749 ///
1750 /// If `T` also implements `Eq`, two `Arc`s that point to the same value are
1751 /// always equal.
1752 ///
1753 /// # Examples
1754 ///
1755 /// ```
1756 /// use std::sync::Arc;
1757 ///
1758 /// let five = Arc::new(5);
1759 ///
1760 /// assert!(five == Arc::new(5));
1761 /// ```
1762 #[inline]
1763 fn eq(&self, other: &Arc<T>) -> bool {
1764 ArcEqIdent::eq(self, other)
1765 }
1766
1767 /// Inequality for two `Arc`s.
1768 ///
1769 /// Two `Arc`s are unequal if their inner values are unequal.
1770 ///
1771 /// If `T` also implements `Eq`, two `Arc`s that point to the same value are
1772 /// never unequal.
1773 ///
1774 /// # Examples
1775 ///
1776 /// ```
1777 /// use std::sync::Arc;
1778 ///
1779 /// let five = Arc::new(5);
1780 ///
1781 /// assert!(five != Arc::new(6));
1782 /// ```
1783 #[inline]
1784 fn ne(&self, other: &Arc<T>) -> bool {
1785 ArcEqIdent::ne(self, other)
1786 }
1787 }
1788
1789 #[stable(feature = "rust1", since = "1.0.0")]
1790 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
1791 /// Partial comparison for two `Arc`s.
1792 ///
1793 /// The two are compared by calling `partial_cmp()` on their inner values.
1794 ///
1795 /// # Examples
1796 ///
1797 /// ```
1798 /// use std::sync::Arc;
1799 /// use std::cmp::Ordering;
1800 ///
1801 /// let five = Arc::new(5);
1802 ///
1803 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
1804 /// ```
1805 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
1806 (**self).partial_cmp(&**other)
1807 }
1808
1809 /// Less-than comparison for two `Arc`s.
1810 ///
1811 /// The two are compared by calling `<` on their inner values.
1812 ///
1813 /// # Examples
1814 ///
1815 /// ```
1816 /// use std::sync::Arc;
1817 ///
1818 /// let five = Arc::new(5);
1819 ///
1820 /// assert!(five < Arc::new(6));
1821 /// ```
1822 fn lt(&self, other: &Arc<T>) -> bool {
1823 *(*self) < *(*other)
1824 }
1825
1826 /// 'Less than or equal to' comparison for two `Arc`s.
1827 ///
1828 /// The two are compared by calling `<=` on their inner values.
1829 ///
1830 /// # Examples
1831 ///
1832 /// ```
1833 /// use std::sync::Arc;
1834 ///
1835 /// let five = Arc::new(5);
1836 ///
1837 /// assert!(five <= Arc::new(5));
1838 /// ```
1839 fn le(&self, other: &Arc<T>) -> bool {
1840 *(*self) <= *(*other)
1841 }
1842
1843 /// Greater-than comparison for two `Arc`s.
1844 ///
1845 /// The two are compared by calling `>` on their inner values.
1846 ///
1847 /// # Examples
1848 ///
1849 /// ```
1850 /// use std::sync::Arc;
1851 ///
1852 /// let five = Arc::new(5);
1853 ///
1854 /// assert!(five > Arc::new(4));
1855 /// ```
1856 fn gt(&self, other: &Arc<T>) -> bool {
1857 *(*self) > *(*other)
1858 }
1859
1860 /// 'Greater than or equal to' comparison for two `Arc`s.
1861 ///
1862 /// The two are compared by calling `>=` on their inner values.
1863 ///
1864 /// # Examples
1865 ///
1866 /// ```
1867 /// use std::sync::Arc;
1868 ///
1869 /// let five = Arc::new(5);
1870 ///
1871 /// assert!(five >= Arc::new(5));
1872 /// ```
1873 fn ge(&self, other: &Arc<T>) -> bool {
1874 *(*self) >= *(*other)
1875 }
1876 }
1877 #[stable(feature = "rust1", since = "1.0.0")]
1878 impl<T: ?Sized + Ord> Ord for Arc<T> {
1879 /// Comparison for two `Arc`s.
1880 ///
1881 /// The two are compared by calling `cmp()` on their inner values.
1882 ///
1883 /// # Examples
1884 ///
1885 /// ```
1886 /// use std::sync::Arc;
1887 /// use std::cmp::Ordering;
1888 ///
1889 /// let five = Arc::new(5);
1890 ///
1891 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
1892 /// ```
1893 fn cmp(&self, other: &Arc<T>) -> Ordering {
1894 (**self).cmp(&**other)
1895 }
1896 }
1897 #[stable(feature = "rust1", since = "1.0.0")]
1898 impl<T: ?Sized + Eq> Eq for Arc<T> {}
1899
1900 #[stable(feature = "rust1", since = "1.0.0")]
1901 impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
1902 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1903 fmt::Display::fmt(&**self, f)
1904 }
1905 }
1906
1907 #[stable(feature = "rust1", since = "1.0.0")]
1908 impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
1909 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1910 fmt::Debug::fmt(&**self, f)
1911 }
1912 }
1913
1914 #[stable(feature = "rust1", since = "1.0.0")]
1915 impl<T: ?Sized> fmt::Pointer for Arc<T> {
1916 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1917 fmt::Pointer::fmt(&(&**self as *const T), f)
1918 }
1919 }
1920
1921 #[stable(feature = "rust1", since = "1.0.0")]
1922 impl<T: Default> Default for Arc<T> {
1923 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
1924 ///
1925 /// # Examples
1926 ///
1927 /// ```
1928 /// use std::sync::Arc;
1929 ///
1930 /// let x: Arc<i32> = Default::default();
1931 /// assert_eq!(*x, 0);
1932 /// ```
1933 fn default() -> Arc<T> {
1934 Arc::new(Default::default())
1935 }
1936 }
1937
1938 #[stable(feature = "rust1", since = "1.0.0")]
1939 impl<T: ?Sized + Hash> Hash for Arc<T> {
1940 fn hash<H: Hasher>(&self, state: &mut H) {
1941 (**self).hash(state)
1942 }
1943 }
1944
1945 #[stable(feature = "from_for_ptrs", since = "1.6.0")]
1946 impl<T> From<T> for Arc<T> {
1947 fn from(t: T) -> Self {
1948 Arc::new(t)
1949 }
1950 }
1951
1952 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1953 impl<T: Clone> From<&[T]> for Arc<[T]> {
1954 #[inline]
1955 fn from(v: &[T]) -> Arc<[T]> {
1956 <Self as ArcFromSlice<T>>::from_slice(v)
1957 }
1958 }
1959
1960 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1961 impl From<&str> for Arc<str> {
1962 #[inline]
1963 fn from(v: &str) -> Arc<str> {
1964 let arc = Arc::<[u8]>::from(v.as_bytes());
1965 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
1966 }
1967 }
1968
1969 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1970 impl From<String> for Arc<str> {
1971 #[inline]
1972 fn from(v: String) -> Arc<str> {
1973 Arc::from(&v[..])
1974 }
1975 }
1976
1977 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1978 impl<T: ?Sized> From<Box<T>> for Arc<T> {
1979 #[inline]
1980 fn from(v: Box<T>) -> Arc<T> {
1981 Arc::from_box(v)
1982 }
1983 }
1984
1985 #[stable(feature = "shared_from_slice", since = "1.21.0")]
1986 impl<T> From<Vec<T>> for Arc<[T]> {
1987 #[inline]
1988 fn from(mut v: Vec<T>) -> Arc<[T]> {
1989 unsafe {
1990 let arc = Arc::copy_from_slice(&v);
1991
1992 // Allow the Vec to free its memory, but not destroy its contents
1993 v.set_len(0);
1994
1995 arc
1996 }
1997 }
1998 }
1999
2000 #[unstable(feature = "boxed_slice_try_from", issue = "0")]
2001 impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]>
2002 where
2003 [T; N]: LengthAtMost32,
2004 {
2005 type Error = Arc<[T]>;
2006
2007 fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
2008 if boxed_slice.len() == N {
2009 Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) })
2010 } else {
2011 Err(boxed_slice)
2012 }
2013 }
2014 }
2015
2016 #[stable(feature = "shared_from_iter", since = "1.37.0")]
2017 impl<T> iter::FromIterator<T> for Arc<[T]> {
2018 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
2019 ///
2020 /// # Performance characteristics
2021 ///
2022 /// ## The general case
2023 ///
2024 /// In the general case, collecting into `Arc<[T]>` is done by first
2025 /// collecting into a `Vec<T>`. That is, when writing the following:
2026 ///
2027 /// ```rust
2028 /// # use std::sync::Arc;
2029 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
2030 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2031 /// ```
2032 ///
2033 /// this behaves as if we wrote:
2034 ///
2035 /// ```rust
2036 /// # use std::sync::Arc;
2037 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
2038 /// .collect::<Vec<_>>() // The first set of allocations happens here.
2039 /// .into(); // A second allocation for `Arc<[T]>` happens here.
2040 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2041 /// ```
2042 ///
2043 /// This will allocate as many times as needed for constructing the `Vec<T>`
2044 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
2045 ///
2046 /// ## Iterators of known length
2047 ///
2048 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
2049 /// a single allocation will be made for the `Arc<[T]>`. For example:
2050 ///
2051 /// ```rust
2052 /// # use std::sync::Arc;
2053 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
2054 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
2055 /// ```
2056 fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
2057 ArcFromIter::from_iter(iter.into_iter())
2058 }
2059 }
2060
2061 /// Specialization trait used for collecting into `Arc<[T]>`.
2062 trait ArcFromIter<T, I> {
2063 fn from_iter(iter: I) -> Self;
2064 }
2065
2066 impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
2067 default fn from_iter(iter: I) -> Self {
2068 iter.collect::<Vec<T>>().into()
2069 }
2070 }
2071
2072 impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
2073 default fn from_iter(iter: I) -> Self {
2074 // This is the case for a `TrustedLen` iterator.
2075 let (low, high) = iter.size_hint();
2076 if let Some(high) = high {
2077 debug_assert_eq!(
2078 low, high,
2079 "TrustedLen iterator's size hint is not exact: {:?}",
2080 (low, high)
2081 );
2082
2083 unsafe {
2084 // SAFETY: We need to ensure that the iterator has an exact length and we have.
2085 Arc::from_iter_exact(iter, low)
2086 }
2087 } else {
2088 // Fall back to normal implementation.
2089 iter.collect::<Vec<T>>().into()
2090 }
2091 }
2092 }
2093
2094 impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> {
2095 fn from_iter(iter: slice::Iter<'a, T>) -> Self {
2096 // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`.
2097 //
2098 // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
2099 // which is even more performant.
2100 //
2101 // In the fall-back case we have `T: Clone`. This is still better
2102 // than the `TrustedLen` implementation as slices have a known length
2103 // and so we get to avoid calling `size_hint` and avoid the branching.
2104 iter.as_slice().into()
2105 }
2106 }
2107
2108 #[stable(feature = "rust1", since = "1.0.0")]
2109 impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
2110 fn borrow(&self) -> &T {
2111 &**self
2112 }
2113 }
2114
2115 #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
2116 impl<T: ?Sized> AsRef<T> for Arc<T> {
2117 fn as_ref(&self) -> &T {
2118 &**self
2119 }
2120 }
2121
2122 #[stable(feature = "pin", since = "1.33.0")]
2123 impl<T: ?Sized> Unpin for Arc<T> { }
2124
2125 /// Computes the offset of the data field within `ArcInner`.
2126 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
2127 // Align the unsized value to the end of the `ArcInner`.
2128 // Because it is `?Sized`, it will always be the last field in memory.
2129 data_offset_align(align_of_val(&*ptr))
2130 }
2131
2132 /// Computes the offset of the data field within `ArcInner`.
2133 ///
2134 /// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
2135 fn data_offset_sized<T>() -> isize {
2136 data_offset_align(align_of::<T>())
2137 }
2138
2139 #[inline]
2140 fn data_offset_align(align: usize) -> isize {
2141 let layout = Layout::new::<ArcInner<()>>();
2142 (layout.size() + layout.padding_needed_for(align)) as isize
2143 }