use core::intrinsics::abort;
use core::iter;
use core::marker::{PhantomData, Unpin, Unsize};
-use core::mem::{self, align_of_val, size_of_val};
+use core::mem::{self, align_of_val_raw, size_of_val};
use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
use core::pin::Pin;
use core::ptr::{self, NonNull};
use core::sync::atomic;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
-use crate::alloc::{box_free, handle_alloc_error, AllocError, Allocator, Global, Layout};
+use crate::alloc::{
+ box_free, handle_alloc_error, AllocError, Allocator, Global, Layout, WriteCloneIntoRaw,
+};
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
use crate::rc::is_dangling;
// reference into a strong reference.
unsafe {
let inner = init_ptr.as_ptr();
- ptr::write(&raw mut (*inner).data, data);
+ ptr::write(ptr::addr_of_mut!((*inner).data), data);
// The above write to the data field must be visible to any threads which
// observe a non-zero strong count. Therefore we need at least "Release" ordering
unsafe { Pin::new_unchecked(Arc::new(data)) }
}
+ /// Constructs a new `Arc<T>`, returning an error if allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::try_new(5)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
+ // Start the weak pointer count as 1 which is the weak pointer that's
+ // held by all the strong pointers (kinda), see std/rc.rs for more info
+ let x: Box<_> = Box::try_new(ArcInner {
+ strong: atomic::AtomicUsize::new(1),
+ weak: atomic::AtomicUsize::new(1),
+ data,
+ })?;
+ Ok(Self::from_inner(Box::leak(x).into()))
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, returning an error
+ /// if allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit, allocator_api)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut five = Arc::<u32>::try_new_uninit()?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, returning an error if allocation fails.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit, allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let zero = Arc::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
/// Returns the inner value, if the `Arc` has exactly one strong reference.
///
/// Otherwise, an [`Err`] is returned with the same `Arc` that was
// SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
// this is required to retain raw/mut provenance such that e.g. `get_mut` can
// write through the pointer after the Rc is recovered through `from_raw`.
- unsafe { &raw const (*ptr).data }
+ unsafe { ptr::addr_of_mut!((*ptr).data) }
}
/// Constructs an `Arc<T>` from a raw pointer.
let offset = data_offset(ptr);
// Reverse the offset to find the original ArcInner.
- let fake_ptr = ptr as *mut ArcInner<T>;
- let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
+ let arc_ptr = (ptr as *mut ArcInner<T>).set_ptr_value((ptr as *mut u8).offset(-offset));
Self::from_ptr(arc_ptr)
}
match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
Ok(_) => {
// Make sure we do not create a dangling Weak
- debug_assert!(!is_dangling(this.ptr));
+ debug_assert!(!is_dangling(this.ptr.as_ptr()));
return Weak { ptr: this.ptr };
}
Err(old) => cur = old,
/// # Examples
///
/// ```
- /// #![feature(arc_mutate_strong_count)]
- ///
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// unsafe {
/// let ptr = Arc::into_raw(five);
- /// Arc::incr_strong_count(ptr);
+ /// Arc::increment_strong_count(ptr);
///
/// // This assertion is deterministic because we haven't shared
/// // the `Arc` between threads.
/// }
/// ```
#[inline]
- #[unstable(feature = "arc_mutate_strong_count", issue = "71983")]
- pub unsafe fn incr_strong_count(ptr: *const T) {
+ #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
+ pub unsafe fn increment_strong_count(ptr: *const T) {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
let arc = unsafe { mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr)) };
// Now increase refcount, but don't drop new refcount either
/// # Examples
///
/// ```
- /// #![feature(arc_mutate_strong_count)]
- ///
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
///
/// unsafe {
/// let ptr = Arc::into_raw(five);
- /// Arc::incr_strong_count(ptr);
+ /// Arc::increment_strong_count(ptr);
///
/// // Those assertions are deterministic because we haven't shared
/// // the `Arc` between threads.
/// let five = Arc::from_raw(ptr);
/// assert_eq!(2, Arc::strong_count(&five));
- /// Arc::decr_strong_count(ptr);
+ /// Arc::decrement_strong_count(ptr);
/// assert_eq!(1, Arc::strong_count(&five));
/// }
/// ```
#[inline]
- #[unstable(feature = "arc_mutate_strong_count", issue = "71983")]
- pub unsafe fn decr_strong_count(ptr: *const T) {
+ #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
+ pub unsafe fn decrement_strong_count(ptr: *const T) {
unsafe { mem::drop(Arc::from_raw(ptr)) };
}
// `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
// reference (see #54908).
let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ unsafe {
+ Arc::try_allocate_for_layout(value_layout, allocate, mem_to_arcinner)
+ .unwrap_or_else(|_| handle_alloc_error(layout))
+ }
+ }
+
+ /// Allocates an `ArcInner<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided,
+ /// returning an error if allocation fails.
+ ///
+ /// The function `mem_to_arcinner` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+ unsafe fn try_allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
+ ) -> Result<*mut ArcInner<T>, AllocError> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
- let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
+ let ptr = allocate(layout)?;
// Initialize the ArcInner
let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
}
- inner
+ Ok(inner)
}
/// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.allocate(layout),
- |mem| set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>,
+ |mem| (ptr as *mut ArcInner<T>).set_ptr_value(mem) as *mut ArcInner<T>,
)
}
}
)
}
}
-}
-/// Sets the data pointer of a `?Sized` raw pointer.
-///
-/// For a slice/trait object, this sets the `data` field and leaves the rest
-/// unchanged. For a sized raw pointer, this simply sets the pointer.
-unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
- unsafe {
- ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
- }
- ptr
-}
-
-impl<T> Arc<[T]> {
/// Copy elements from slice into newly allocated Arc<\[T\]>
///
/// Unsafe because the caller must either take ownership or bind `T: Copy`.
// weak count, there's no chance the ArcInner itself could be
// deallocated.
if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
- // Another strong pointer exists; clone
- *this = Arc::new((**this).clone());
+ // Another strong pointer exists, so we must clone.
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut arc = Self::new_uninit();
+ unsafe {
+ let data = Arc::get_mut_unchecked(&mut arc);
+ (**this).write_clone_into_raw(data.as_mut_ptr());
+ *this = arc.assume_init();
+ }
} else if this.inner().weak.load(Relaxed) != 1 {
// Relaxed suffices in the above because this is fundamentally an
// optimization: we are always racing with weak pointers being
// Materialize our own implicit weak pointer, so that it can clean
// up the ArcInner as needed.
- let weak = Weak { ptr: this.ptr };
+ let _weak = Weak { ptr: this.ptr };
- // mark the data itself as already deallocated
+ // Can just steal the data, all that's left is Weaks
+ let mut arc = Self::new_uninit();
unsafe {
- // there is no data race in the implicit write caused by `read`
- // here (due to zeroing) because data is no longer accessed by
- // other threads (due to there being no more strong refs at this
- // point).
- let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data));
- mem::swap(this, &mut swap);
- mem::forget(swap);
+ let data = Arc::get_mut_unchecked(&mut arc);
+ data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
+ ptr::write(this, arc.assume_init());
}
} else {
// We were the sole reference of either kind; bump back up the
strong: &'a atomic::AtomicUsize,
}
-impl<T> Weak<T> {
+impl<T: ?Sized> Weak<T> {
/// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
///
/// The pointer is valid only if there are some strong references. The pointer may be dangling,
pub fn as_ptr(&self) -> *const T {
let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
- // SAFETY: we must offset the pointer manually, and said pointer may be
- // a dangling weak (usize::MAX) if T is sized. data_offset is safe to call,
- // because we know that a pointer to unsized T was derived from a real
- // unsized T, as dangling weaks are only created for sized T. wrapping_offset
- // is used so that we can use the same code path for the non-dangling
- // unsized case and the potentially dangling sized case.
- unsafe {
- let offset = data_offset(ptr as *mut T);
- set_data_ptr(ptr as *mut T, (ptr as *mut u8).wrapping_offset(offset))
+ if is_dangling(ptr) {
+ // If the pointer is dangling, we return the sentinel directly. This cannot be
+ // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
+ ptr as *const T
+ } else {
+ // SAFETY: if is_dangling returns false, then the pointer is dereferencable.
+ // The payload may be dropped at this point, and we have to maintain provenance,
+ // so use raw pointer manipulation.
+ unsafe { ptr::addr_of_mut!((*ptr).data) }
}
}
/// [`forget`]: std::mem::forget
#[stable(feature = "weak_into_raw", since = "1.45.0")]
pub unsafe fn from_raw(ptr: *const T) -> Self {
- // SAFETY: data_offset is safe to call, because this pointer originates from a Weak.
// See Weak::as_ptr for context on how the input pointer is derived.
- let offset = unsafe { data_offset(ptr) };
- // Reverse the offset to find the original ArcInner.
- // SAFETY: we use wrapping_offset here because the pointer may be dangling (but only if T: Sized)
- let ptr = unsafe {
- set_data_ptr(ptr as *mut ArcInner<T>, (ptr as *mut u8).wrapping_offset(-offset))
+ let ptr = if is_dangling(ptr as *mut T) {
+ // This is a dangling Weak.
+ ptr as *mut ArcInner<T>
+ } else {
+ // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
+ // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
+ let offset = unsafe { data_offset(ptr) };
+ // Thus, we reverse the offset to get the whole RcBox.
+ // SAFETY: the pointer originated from a Weak, so this offset is safe.
+ unsafe { (ptr as *mut ArcInner<T>).set_ptr_value((ptr as *mut u8).offset(-offset)) }
};
// SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
- unsafe { Weak { ptr: NonNull::new_unchecked(ptr) } }
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
}
}
/// (i.e., when this `Weak` was created by `Weak::new`).
#[inline]
fn inner(&self) -> Option<WeakInner<'_>> {
- if is_dangling(self.ptr) {
+ if is_dangling(self.ptr.as_ptr()) {
None
} else {
// We are careful to *not* create a reference covering the "data" field, as
if inner.weak.fetch_sub(1, Release) == 1 {
acquire!(inner.weak);
- unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) }
+ unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())) }
}
}
}
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized> Unpin for Arc<T> {}
-/// Get the offset within an `ArcInner` for
-/// a payload of type described by a pointer.
+/// Get the offset within an `ArcInner` for the payload behind a pointer.
///
/// # Safety
///
-/// This has the same safety requirements as `align_of_val_raw`. In effect:
-///
-/// - This function is safe for any argument if `T` is sized, and
-/// - if `T` is unsized, the pointer must have appropriate pointer metadata
-/// acquired from the real instance that you are getting this offset for.
+/// The pointer must point to (and have valid metadata for) a previously
+/// valid instance of T, but the T is allowed to be dropped.
unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
- // Align the unsized value to the end of the `ArcInner`.
- // Because it is `?Sized`, it will always be the last field in memory.
- // Note: This is a detail of the current implementation of the compiler,
- // and is not a guaranteed language detail. Do not rely on it outside of std.
- unsafe { data_offset_align(align_of_val(&*ptr)) }
+ // Align the unsized value to the end of the ArcInner.
+ // Because RcBox is repr(C), it will always be the last field in memory.
+ // SAFETY: since the only unsized types possible are slices, trait objects,
+ // and extern types, the input safety requirement is currently enough to
+ // satisfy the requirements of align_of_val_raw; this is an implementation
+ // detail of the language that may not be relied upon outside of std.
+ unsafe { data_offset_align(align_of_val_raw(ptr)) }
}
#[inline]