1 use core
::iter
::{FromIterator, Iterator}
;
2 use core
::mem
::{self, ManuallyDrop, MaybeUninit}
;
3 use core
::ops
::{Deref, DerefMut}
;
4 use core
::ptr
::{self, NonNull}
;
5 use core
::{cmp, fmt, hash, isize, slice, usize}
;
8 borrow
::{Borrow, BorrowMut}
,
15 use crate::buf
::{IntoIter, UninitSlice}
;
16 use crate::bytes
::Vtable
;
18 use crate::loom
::sync
::atomic
::AtomicMut
;
19 use crate::loom
::sync
::atomic
::{AtomicPtr, AtomicUsize, Ordering}
;
20 use crate::{Buf, BufMut, Bytes}
;
22 /// A unique reference to a contiguous slice of memory.
24 /// `BytesMut` represents a unique view into a potentially shared memory region.
25 /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
26 /// mutate the memory.
28 /// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
29 /// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
30 /// same `buf` overlaps with its slice. That guarantee means that a write lock
35 /// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
36 /// necessary. However, explicitly reserving the required space up-front before
37 /// a series of inserts will be more efficient.
42 /// use bytes::{BytesMut, BufMut};
44 /// let mut buf = BytesMut::with_capacity(64);
48 /// buf.put(&b"llo"[..]);
50 /// assert_eq!(&buf[..], b"hello");
52 /// // Freeze the buffer so that it can be shared
53 /// let a = buf.freeze();
55 /// // This does not allocate, instead `b` points to the same memory.
56 /// let b = a.clone();
58 /// assert_eq!(&a[..], b"hello");
59 /// assert_eq!(&b[..], b"hello");
68 // Thread-safe reference-counted container for the shared storage. This mostly
69 // the same as `core::sync::Arc` but without the weak counter. The ref counting
70 // fns are based on the ones found in `std`.
72 // The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
73 // up making the overall code simpler and easier to reason about. This is due to
74 // some of the logic around setting `Inner::arc` and other ways the `arc` field
75 // is used. Using `Arc` ended up requiring a number of funky transmutes and
76 // other shenanigans to make it work.
79 original_capacity_repr
: usize,
80 ref_count
: AtomicUsize
,
83 // Buffer storage strategy flags.
84 const KIND_ARC
: usize = 0b0;
85 const KIND_VEC
: usize = 0b1;
86 const KIND_MASK
: usize = 0b1;
88 // The max original capacity value. Any `Bytes` allocated with a greater initial
89 // capacity will default to this.
90 const MAX_ORIGINAL_CAPACITY_WIDTH
: usize = 17;
91 // The original capacity algorithm will not take effect unless the originally
92 // allocated capacity was at least 1kb in size.
93 const MIN_ORIGINAL_CAPACITY_WIDTH
: usize = 10;
94 // The original capacity is stored in powers of 2 starting at 1kb to a max of
95 // 64kb. Representing it as such requires only 3 bits of storage.
96 const ORIGINAL_CAPACITY_MASK
: usize = 0b11100;
97 const ORIGINAL_CAPACITY_OFFSET
: usize = 2;
99 // When the storage is in the `Vec` representation, the pointer can be advanced
100 // at most this value. This is due to the amount of storage available to track
101 // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
103 const VEC_POS_OFFSET
: usize = 5;
104 const MAX_VEC_POS
: usize = usize::MAX
>> VEC_POS_OFFSET
;
105 const NOT_VEC_POS_MASK
: usize = 0b11111;
107 #[cfg(target_pointer_width = "64")]
108 const PTR_WIDTH
: usize = 64;
109 #[cfg(target_pointer_width = "32")]
110 const PTR_WIDTH
: usize = 32;
114 * ===== BytesMut =====
119 /// Creates a new `BytesMut` with the specified capacity.
121 /// The returned `BytesMut` will be able to hold at least `capacity` bytes
122 /// without reallocating.
124 /// It is important to note that this function does not specify the length
125 /// of the returned `BytesMut`, but only the capacity.
130 /// use bytes::{BytesMut, BufMut};
132 /// let mut bytes = BytesMut::with_capacity(64);
134 /// // `bytes` contains no data, even though there is capacity
135 /// assert_eq!(bytes.len(), 0);
137 /// bytes.put(&b"hello world"[..]);
139 /// assert_eq!(&bytes[..], b"hello world");
142 pub fn with_capacity(capacity
: usize) -> BytesMut
{
143 BytesMut
::from_vec(Vec
::with_capacity(capacity
))
146 /// Creates a new `BytesMut` with default capacity.
148 /// Resulting object has length 0 and unspecified capacity.
149 /// This function does not allocate.
154 /// use bytes::{BytesMut, BufMut};
156 /// let mut bytes = BytesMut::new();
158 /// assert_eq!(0, bytes.len());
160 /// bytes.reserve(2);
161 /// bytes.put_slice(b"xy");
163 /// assert_eq!(&b"xy"[..], &bytes[..]);
166 pub fn new() -> BytesMut
{
167 BytesMut
::with_capacity(0)
170 /// Returns the number of bytes contained in this `BytesMut`.
175 /// use bytes::BytesMut;
177 /// let b = BytesMut::from(&b"hello"[..]);
178 /// assert_eq!(b.len(), 5);
181 pub fn len(&self) -> usize {
185 /// Returns true if the `BytesMut` has a length of 0.
190 /// use bytes::BytesMut;
192 /// let b = BytesMut::with_capacity(64);
193 /// assert!(b.is_empty());
196 pub fn is_empty(&self) -> bool
{
200 /// Returns the number of bytes the `BytesMut` can hold without reallocating.
205 /// use bytes::BytesMut;
207 /// let b = BytesMut::with_capacity(64);
208 /// assert_eq!(b.capacity(), 64);
211 pub fn capacity(&self) -> usize {
215 /// Converts `self` into an immutable `Bytes`.
217 /// The conversion is zero cost and is used to indicate that the slice
218 /// referenced by the handle will no longer be mutated. Once the conversion
219 /// is done, the handle can be cloned and shared across threads.
224 /// use bytes::{BytesMut, BufMut};
227 /// let mut b = BytesMut::with_capacity(64);
228 /// b.put(&b"hello world"[..]);
229 /// let b1 = b.freeze();
230 /// let b2 = b1.clone();
232 /// let th = thread::spawn(move || {
233 /// assert_eq!(&b1[..], b"hello world");
236 /// assert_eq!(&b2[..], b"hello world");
237 /// th.join().unwrap();
240 pub fn freeze(mut self) -> Bytes
{
241 if self.kind() == KIND_VEC
{
242 // Just re-use `Bytes` internal Vec vtable
244 let (off
, _
) = self.get_vec_pos();
245 let vec
= rebuild_vec(self.ptr
.as_ptr(), self.len
, self.cap
, off
);
247 let mut b
: Bytes
= vec
.into();
252 debug_assert_eq
!(self.kind(), KIND_ARC
);
254 let ptr
= self.ptr
.as_ptr();
256 let data
= AtomicPtr
::new(self.data
.cast());
258 unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
262 /// Creates a new `BytesMut`, which is initialized with zero.
267 /// use bytes::BytesMut;
269 /// let zeros = BytesMut::zeroed(42);
271 /// assert_eq!(zeros.len(), 42);
272 /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
274 pub fn zeroed(len
: usize) -> BytesMut
{
275 BytesMut
::from_vec(vec
![0; len
])
278 /// Splits the bytes into two at the given index.
280 /// Afterwards `self` contains elements `[0, at)`, and the returned
281 /// `BytesMut` contains elements `[at, capacity)`.
283 /// This is an `O(1)` operation that just increases the reference count
284 /// and sets a few indices.
289 /// use bytes::BytesMut;
291 /// let mut a = BytesMut::from(&b"hello world"[..]);
292 /// let mut b = a.split_off(5);
297 /// assert_eq!(&a[..], b"jello");
298 /// assert_eq!(&b[..], b"!world");
303 /// Panics if `at > capacity`.
304 #[must_use = "consider BytesMut::truncate if you don't need the other half"]
305 pub fn split_off(&mut self, at
: usize) -> BytesMut
{
307 at
<= self.capacity(),
308 "split_off out of bounds: {:?} <= {:?}",
313 let mut other
= self.shallow_clone();
320 /// Removes the bytes from the current view, returning them in a new
321 /// `BytesMut` handle.
323 /// Afterwards, `self` will be empty, but will retain any additional
324 /// capacity that it had before the operation. This is identical to
325 /// `self.split_to(self.len())`.
327 /// This is an `O(1)` operation that just increases the reference count and
328 /// sets a few indices.
333 /// use bytes::{BytesMut, BufMut};
335 /// let mut buf = BytesMut::with_capacity(1024);
336 /// buf.put(&b"hello world"[..]);
338 /// let other = buf.split();
340 /// assert!(buf.is_empty());
341 /// assert_eq!(1013, buf.capacity());
343 /// assert_eq!(other, b"hello world"[..]);
345 #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
346 pub fn split(&mut self) -> BytesMut
{
347 let len
= self.len();
351 /// Splits the buffer into two at the given index.
353 /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
354 /// contains elements `[0, at)`.
356 /// This is an `O(1)` operation that just increases the reference count and
357 /// sets a few indices.
362 /// use bytes::BytesMut;
364 /// let mut a = BytesMut::from(&b"hello world"[..]);
365 /// let mut b = a.split_to(5);
370 /// assert_eq!(&a[..], b"!world");
371 /// assert_eq!(&b[..], b"jello");
376 /// Panics if `at > len`.
377 #[must_use = "consider BytesMut::advance if you don't need the other half"]
378 pub fn split_to(&mut self, at
: usize) -> BytesMut
{
381 "split_to out of bounds: {:?} <= {:?}",
387 let mut other
= self.shallow_clone();
394 /// Shortens the buffer, keeping the first `len` bytes and dropping the
397 /// If `len` is greater than the buffer's current length, this has no
400 /// Existing underlying capacity is preserved.
402 /// The [`split_off`] method can emulate `truncate`, but this causes the
403 /// excess bytes to be returned instead of dropped.
408 /// use bytes::BytesMut;
410 /// let mut buf = BytesMut::from(&b"hello world"[..]);
412 /// assert_eq!(buf, b"hello"[..]);
415 /// [`split_off`]: #method.split_off
416 pub fn truncate(&mut self, len
: usize) {
417 if len
<= self.len() {
424 /// Clears the buffer, removing all data. Existing capacity is preserved.
429 /// use bytes::BytesMut;
431 /// let mut buf = BytesMut::from(&b"hello world"[..]);
433 /// assert!(buf.is_empty());
435 pub fn clear(&mut self) {
439 /// Resizes the buffer so that `len` is equal to `new_len`.
441 /// If `new_len` is greater than `len`, the buffer is extended by the
442 /// difference with each additional byte set to `value`. If `new_len` is
443 /// less than `len`, the buffer is simply truncated.
448 /// use bytes::BytesMut;
450 /// let mut buf = BytesMut::new();
452 /// buf.resize(3, 0x1);
453 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
455 /// buf.resize(2, 0x2);
456 /// assert_eq!(&buf[..], &[0x1, 0x1]);
458 /// buf.resize(4, 0x3);
459 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
461 pub fn resize(&mut self, new_len
: usize, value
: u8) {
462 let len
= self.len();
464 let additional
= new_len
- len
;
465 self.reserve(additional
);
467 let dst
= self.chunk_mut().as_mut_ptr();
468 ptr
::write_bytes(dst
, value
, additional
);
469 self.set_len(new_len
);
472 self.truncate(new_len
);
476 /// Sets the length of the buffer.
478 /// This will explicitly set the size of the buffer without actually
479 /// modifying the data, so it is up to the caller to ensure that the data
480 /// has been initialized.
485 /// use bytes::BytesMut;
487 /// let mut b = BytesMut::from(&b"hello world"[..]);
493 /// assert_eq!(&b[..], b"hello");
499 /// assert_eq!(&b[..], b"hello world");
502 pub unsafe fn set_len(&mut self, len
: usize) {
503 debug_assert
!(len
<= self.cap
, "set_len out of bounds");
507 /// Reserves capacity for at least `additional` more bytes to be inserted
508 /// into the given `BytesMut`.
510 /// More than `additional` bytes may be reserved in order to avoid frequent
511 /// reallocations. A call to `reserve` may result in an allocation.
513 /// Before allocating new buffer space, the function will attempt to reclaim
514 /// space in the existing buffer. If the current handle references a view
515 /// into a larger original buffer, and all other handles referencing part
516 /// of the same original buffer have been dropped, then the current view
517 /// can be copied/shifted to the front of the buffer and the handle can take
518 /// ownership of the full buffer, provided that the full buffer is large
519 /// enough to fit the requested additional capacity.
521 /// This optimization will only happen if shifting the data from the current
522 /// view to the front of the buffer is not too expensive in terms of the
523 /// (amortized) time required. The precise condition is subject to change;
524 /// as of now, the length of the data being shifted needs to be at least as
525 /// large as the distance that it's shifted by. If the current view is empty
526 /// and the original buffer is large enough to fit the requested additional
527 /// capacity, then reallocations will never happen.
531 /// In the following example, a new buffer is allocated.
534 /// use bytes::BytesMut;
536 /// let mut buf = BytesMut::from(&b"hello"[..]);
538 /// assert!(buf.capacity() >= 69);
541 /// In the following example, the existing buffer is reclaimed.
544 /// use bytes::{BytesMut, BufMut};
546 /// let mut buf = BytesMut::with_capacity(128);
547 /// buf.put(&[0; 64][..]);
549 /// let ptr = buf.as_ptr();
550 /// let other = buf.split();
552 /// assert!(buf.is_empty());
553 /// assert_eq!(buf.capacity(), 64);
556 /// buf.reserve(128);
558 /// assert_eq!(buf.capacity(), 128);
559 /// assert_eq!(buf.as_ptr(), ptr);
564 /// Panics if the new capacity overflows `usize`.
566 pub fn reserve(&mut self, additional
: usize) {
567 let len
= self.len();
568 let rem
= self.capacity() - len
;
570 if additional
<= rem
{
571 // The handle can already store at least `additional` more bytes, so
572 // there is no further work needed to be done.
576 self.reserve_inner(additional
);
579 // In separate function to allow the short-circuits in `reserve` to
580 // be inline-able. Significant helps performance.
581 fn reserve_inner(&mut self, additional
: usize) {
582 let len
= self.len();
583 let kind
= self.kind();
585 if kind
== KIND_VEC
{
586 // If there's enough free space before the start of the buffer, then
587 // just copy the data backwards and reuse the already-allocated
590 // Otherwise, since backed by a vector, use `Vec::reserve`
592 // We need to make sure that this optimization does not kill the
593 // amortized runtimes of BytesMut's operations.
595 let (off
, prev
) = self.get_vec_pos();
597 // Only reuse space if we can satisfy the requested additional space.
599 // Also check if the value of `off` suggests that enough bytes
600 // have been read to account for the overhead of shifting all
601 // the data (in an amortized analysis).
602 // Hence the condition `off >= self.len()`.
604 // This condition also already implies that the buffer is going
605 // to be (at least) half-empty in the end; so we do not break
606 // the (amortized) runtime with future resizes of the underlying
609 // [For more details check issue #524, and PR #525.]
610 if self.capacity() - self.len() + off
>= additional
&& off
>= self.len() {
611 // There's enough space, and it's not too much overhead:
614 // Just move the pointer back to the start after copying
616 let base_ptr
= self.ptr
.as_ptr().offset(-(off
as isize));
617 // Since `off >= self.len()`, the two regions don't overlap.
618 ptr
::copy_nonoverlapping(self.ptr
.as_ptr(), base_ptr
, self.len
);
619 self.ptr
= vptr(base_ptr
);
620 self.set_vec_pos(0, prev
);
622 // Length stays constant, but since we moved backwards we
623 // can gain capacity back.
626 // Not enough space, or reusing might be too much overhead:
627 // allocate more space!
629 ManuallyDrop
::new(rebuild_vec(self.ptr
.as_ptr(), self.len
, self.cap
, off
));
630 v
.reserve(additional
);
633 self.ptr
= vptr(v
.as_mut_ptr().add(off
));
634 self.len
= v
.len() - off
;
635 self.cap
= v
.capacity() - off
;
642 debug_assert_eq
!(kind
, KIND_ARC
);
643 let shared
: *mut Shared
= self.data
;
645 // Reserving involves abandoning the currently shared buffer and
646 // allocating a new vector with the requested capacity.
648 // Compute the new capacity
649 let mut new_cap
= len
.checked_add(additional
).expect("overflow");
651 let original_capacity
;
652 let original_capacity_repr
;
655 original_capacity_repr
= (*shared
).original_capacity_repr
;
656 original_capacity
= original_capacity_from_repr(original_capacity_repr
);
658 // First, try to reclaim the buffer. This is possible if the current
659 // handle is the only outstanding handle pointing to the buffer.
660 if (*shared
).is_unique() {
661 // This is the only handle to the buffer. It can be reclaimed.
662 // However, before doing the work of copying data, check to make
663 // sure that the vector has enough capacity.
664 let v
= &mut (*shared
).vec
;
666 let v_capacity
= v
.capacity();
667 let ptr
= v
.as_mut_ptr();
669 let offset
= offset_from(self.ptr
.as_ptr(), ptr
);
671 // Compare the condition in the `kind == KIND_VEC` case above
673 if v_capacity
>= new_cap
+ offset
{
675 // no copy is necessary
676 } else if v_capacity
>= new_cap
&& offset
>= len
{
677 // The capacity is sufficient, and copying is not too much
678 // overhead: reclaim the buffer!
680 // `offset >= len` means: no overlap
681 ptr
::copy_nonoverlapping(self.ptr
.as_ptr(), ptr
, len
);
683 self.ptr
= vptr(ptr
);
684 self.cap
= v
.capacity();
687 let off
= (self.ptr
.as_ptr() as usize) - (v
.as_ptr() as usize);
689 // new_cap is calculated in terms of `BytesMut`, not the underlying
690 // `Vec`, so it does not take the offset into account.
692 // Thus we have to manually add it here.
693 new_cap
= new_cap
.checked_add(off
).expect("overflow");
695 // The vector capacity is not sufficient. The reserve request is
696 // asking for more than the initial buffer capacity. Allocate more
697 // than requested if `new_cap` is not much bigger than the current
700 // There are some situations, using `reserve_exact` that the
701 // buffer capacity could be below `original_capacity`, so do a
703 let double
= v
.capacity().checked_shl(1).unwrap_or(new_cap
);
705 new_cap
= cmp
::max(double
, new_cap
);
707 // No space - allocate more
709 // The length field of `Shared::vec` is not used by the `BytesMut`;
710 // instead we use the `len` field in the `BytesMut` itself. However,
711 // when calling `reserve`, it doesn't guarantee that data stored in
712 // the unused capacity of the vector is copied over to the new
713 // allocation, so we need to ensure that we don't have any data we
714 // care about in the unused capacity before calling `reserve`.
715 debug_assert
!(off
+ len
<= v
.capacity());
716 v
.set_len(off
+ len
);
717 v
.reserve(new_cap
- v
.len());
720 self.ptr
= vptr(v
.as_mut_ptr().add(off
));
721 self.cap
= v
.capacity() - off
;
726 new_cap
= cmp
::max(new_cap
, original_capacity
);
730 // Create a new vector to store the data
731 let mut v
= ManuallyDrop
::new(Vec
::with_capacity(new_cap
));
734 v
.extend_from_slice(self.as_ref());
736 // Release the shared handle. This must be done *after* the bytes are
738 unsafe { release_shared(shared) }
;
741 let data
= (original_capacity_repr
<< ORIGINAL_CAPACITY_OFFSET
) | KIND_VEC
;
742 self.data
= invalid_ptr(data
);
743 self.ptr
= vptr(v
.as_mut_ptr());
745 self.cap
= v
.capacity();
748 /// Appends given bytes to this `BytesMut`.
750 /// If this `BytesMut` object does not have enough capacity, it is resized
756 /// use bytes::BytesMut;
758 /// let mut buf = BytesMut::with_capacity(0);
759 /// buf.extend_from_slice(b"aaabbb");
760 /// buf.extend_from_slice(b"cccddd");
762 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
765 pub fn extend_from_slice(&mut self, extend
: &[u8]) {
766 let cnt
= extend
.len();
770 let dst
= self.spare_capacity_mut();
772 debug_assert
!(dst
.len() >= cnt
);
774 ptr
::copy_nonoverlapping(extend
.as_ptr(), dst
.as_mut_ptr().cast(), cnt
);
778 self.advance_mut(cnt
);
782 /// Absorbs a `BytesMut` that was previously split off.
784 /// If the two `BytesMut` objects were previously contiguous and not mutated
785 /// in a way that causes re-allocation i.e., if `other` was created by
786 /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
787 /// that just decreases a reference count and sets a few indices.
788 /// Otherwise this method degenerates to
789 /// `self.extend_from_slice(other.as_ref())`.
794 /// use bytes::BytesMut;
796 /// let mut buf = BytesMut::with_capacity(64);
797 /// buf.extend_from_slice(b"aaabbbcccddd");
799 /// let split = buf.split_off(6);
800 /// assert_eq!(b"aaabbb", &buf[..]);
801 /// assert_eq!(b"cccddd", &split[..]);
803 /// buf.unsplit(split);
804 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
806 pub fn unsplit(&mut self, other
: BytesMut
) {
812 if let Err(other
) = self.try_unsplit(other
) {
813 self.extend_from_slice(other
.as_ref());
819 // For now, use a `Vec` to manage the memory for us, but we may want to
820 // change that in the future to some alternate allocator strategy.
822 // Thus, we don't expose an easy way to construct from a `Vec` since an
823 // internal change could make a simple pattern (`BytesMut::from(vec)`)
824 // suddenly a lot more expensive.
826 pub(crate) fn from_vec(mut vec
: Vec
<u8>) -> BytesMut
{
827 let ptr
= vptr(vec
.as_mut_ptr());
829 let cap
= vec
.capacity();
832 let original_capacity_repr
= original_capacity_to_repr(cap
);
833 let data
= (original_capacity_repr
<< ORIGINAL_CAPACITY_OFFSET
) | KIND_VEC
;
839 data
: invalid_ptr(data
),
844 fn as_slice(&self) -> &[u8] {
845 unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
849 fn as_slice_mut(&mut self) -> &mut [u8] {
850 unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
853 unsafe fn set_start(&mut self, start
: usize) {
854 // Setting the start to 0 is a no-op, so return early if this is the
860 debug_assert
!(start
<= self.cap
, "internal: set_start out of bounds");
862 let kind
= self.kind();
864 if kind
== KIND_VEC
{
865 // Setting the start when in vec representation is a little more
866 // complicated. First, we have to track how far ahead the
867 // "start" of the byte buffer from the beginning of the vec. We
868 // also have to ensure that we don't exceed the maximum shift.
869 let (mut pos
, prev
) = self.get_vec_pos();
872 if pos
<= MAX_VEC_POS
{
873 self.set_vec_pos(pos
, prev
);
875 // The repr must be upgraded to ARC. This will never happen
876 // on 64 bit systems and will only happen on 32 bit systems
877 // when shifting past 134,217,727 bytes. As such, we don't
878 // worry too much about performance here.
879 self.promote_to_shared(/*ref_count = */ 1);
883 // Updating the start of the view is setting `ptr` to point to the
884 // new start and updating the `len` field to reflect the new length
886 self.ptr
= vptr(self.ptr
.as_ptr().add(start
));
888 if self.len
>= start
{
897 unsafe fn set_end(&mut self, end
: usize) {
898 debug_assert_eq
!(self.kind(), KIND_ARC
);
899 assert
!(end
<= self.cap
, "set_end out of bounds");
902 self.len
= cmp
::min(self.len
, end
);
905 fn try_unsplit(&mut self, other
: BytesMut
) -> Result
<(), BytesMut
> {
906 if other
.capacity() == 0 {
910 let ptr
= unsafe { self.ptr.as_ptr().add(self.len) }
;
911 if ptr
== other
.ptr
.as_ptr()
912 && self.kind() == KIND_ARC
913 && other
.kind() == KIND_ARC
914 && self.data
== other
.data
916 // Contiguous blocks, just combine directly
917 self.len
+= other
.len
;
918 self.cap
+= other
.cap
;
926 fn kind(&self) -> usize {
927 self.data
as usize & KIND_MASK
930 unsafe fn promote_to_shared(&mut self, ref_cnt
: usize) {
931 debug_assert_eq
!(self.kind(), KIND_VEC
);
932 debug_assert
!(ref_cnt
== 1 || ref_cnt
== 2);
934 let original_capacity_repr
=
935 (self.data
as usize & ORIGINAL_CAPACITY_MASK
) >> ORIGINAL_CAPACITY_OFFSET
;
937 // The vec offset cannot be concurrently mutated, so there
938 // should be no danger reading it.
939 let off
= (self.data
as usize) >> VEC_POS_OFFSET
;
941 // First, allocate a new `Shared` instance containing the
942 // `Vec` fields. It's important to note that `ptr`, `len`,
943 // and `cap` cannot be mutated without having `&mut self`.
944 // This means that these fields will not be concurrently
945 // updated and since the buffer hasn't been promoted to an
946 // `Arc`, those three fields still are the components of the
948 let shared
= Box
::new(Shared
{
949 vec
: rebuild_vec(self.ptr
.as_ptr(), self.len
, self.cap
, off
),
950 original_capacity_repr
,
951 ref_count
: AtomicUsize
::new(ref_cnt
),
954 let shared
= Box
::into_raw(shared
);
956 // The pointer should be aligned, so this assert should
958 debug_assert_eq
!(shared
as usize & KIND_MASK
, KIND_ARC
);
963 /// Makes an exact shallow clone of `self`.
965 /// The kind of `self` doesn't matter, but this is unsafe
966 /// because the clone will have the same offsets. You must
967 /// be sure the returned value to the user doesn't allow
968 /// two views into the same range.
970 unsafe fn shallow_clone(&mut self) -> BytesMut
{
971 if self.kind() == KIND_ARC
{
972 increment_shared(self.data
);
975 self.promote_to_shared(/*ref_count = */ 2);
981 unsafe fn get_vec_pos(&mut self) -> (usize, usize) {
982 debug_assert_eq
!(self.kind(), KIND_VEC
);
984 let prev
= self.data
as usize;
985 (prev
>> VEC_POS_OFFSET
, prev
)
989 unsafe fn set_vec_pos(&mut self, pos
: usize, prev
: usize) {
990 debug_assert_eq
!(self.kind(), KIND_VEC
);
991 debug_assert
!(pos
<= MAX_VEC_POS
);
993 self.data
= invalid_ptr((pos
<< VEC_POS_OFFSET
) | (prev
& NOT_VEC_POS_MASK
));
996 /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
998 /// The returned slice can be used to fill the buffer with data (e.g. by
999 /// reading from a file) before marking the data as initialized using the
1000 /// [`set_len`] method.
1002 /// [`set_len`]: BytesMut::set_len
1007 /// use bytes::BytesMut;
1009 /// // Allocate buffer big enough for 10 bytes.
1010 /// let mut buf = BytesMut::with_capacity(10);
1012 /// // Fill in the first 3 elements.
1013 /// let uninit = buf.spare_capacity_mut();
1014 /// uninit[0].write(0);
1015 /// uninit[1].write(1);
1016 /// uninit[2].write(2);
1018 /// // Mark the first 3 bytes of the buffer as being initialized.
1023 /// assert_eq!(&buf[..], &[0, 1, 2]);
1026 pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit
<u8>] {
1028 let ptr
= self.ptr
.as_ptr().add(self.len
);
1029 let len
= self.cap
- self.len
;
1031 slice
::from_raw_parts_mut(ptr
.cast(), len
)
1036 impl Drop
for BytesMut
{
1037 fn drop(&mut self) {
1038 let kind
= self.kind();
1040 if kind
== KIND_VEC
{
1042 let (off
, _
) = self.get_vec_pos();
1044 // Vector storage, free the vector
1045 let _
= rebuild_vec(self.ptr
.as_ptr(), self.len
, self.cap
, off
);
1047 } else if kind
== KIND_ARC
{
1048 unsafe { release_shared(self.data) }
;
1053 impl Buf
for BytesMut
{
1055 fn remaining(&self) -> usize {
1060 fn chunk(&self) -> &[u8] {
1065 fn advance(&mut self, cnt
: usize) {
1067 cnt
<= self.remaining(),
1068 "cannot advance past `remaining`: {:?} <= {:?}",
1073 self.set_start(cnt
);
1077 fn copy_to_bytes(&mut self, len
: usize) -> crate::Bytes
{
1078 self.split_to(len
).freeze()
1082 unsafe impl BufMut
for BytesMut
{
1084 fn remaining_mut(&self) -> usize {
1085 usize::MAX
- self.len()
1089 unsafe fn advance_mut(&mut self, cnt
: usize) {
1090 let new_len
= self.len() + cnt
;
1092 new_len
<= self.cap
,
1093 "new_len = {}; capacity = {}",
1101 fn chunk_mut(&mut self) -> &mut UninitSlice
{
1102 if self.capacity() == self.len() {
1105 self.spare_capacity_mut().into()
1108 // Specialize these methods so they can skip checking `remaining_mut`
1109 // and `advance_mut`.
1111 fn put
<T
: crate::Buf
>(&mut self, mut src
: T
)
1115 while src
.has_remaining() {
1116 let s
= src
.chunk();
1118 self.extend_from_slice(s
);
1123 fn put_slice(&mut self, src
: &[u8]) {
1124 self.extend_from_slice(src
);
1127 fn put_bytes(&mut self, val
: u8, cnt
: usize) {
1130 let dst
= self.spare_capacity_mut();
1132 debug_assert
!(dst
.len() >= cnt
);
1134 ptr
::write_bytes(dst
.as_mut_ptr(), val
, cnt
);
1136 self.advance_mut(cnt
);
1141 impl AsRef
<[u8]> for BytesMut
{
1143 fn as_ref(&self) -> &[u8] {
1148 impl Deref
for BytesMut
{
1152 fn deref(&self) -> &[u8] {
1157 impl AsMut
<[u8]> for BytesMut
{
1159 fn as_mut(&mut self) -> &mut [u8] {
1164 impl DerefMut
for BytesMut
{
1166 fn deref_mut(&mut self) -> &mut [u8] {
1171 impl<'a
> From
<&'a
[u8]> for BytesMut
{
1172 fn from(src
: &'a
[u8]) -> BytesMut
{
1173 BytesMut
::from_vec(src
.to_vec())
1177 impl<'a
> From
<&'a
str> for BytesMut
{
1178 fn from(src
: &'a
str) -> BytesMut
{
1179 BytesMut
::from(src
.as_bytes())
1183 impl From
<BytesMut
> for Bytes
{
1184 fn from(src
: BytesMut
) -> Bytes
{
1189 impl PartialEq
for BytesMut
{
1190 fn eq(&self, other
: &BytesMut
) -> bool
{
1191 self.as_slice() == other
.as_slice()
1195 impl PartialOrd
for BytesMut
{
1196 fn partial_cmp(&self, other
: &BytesMut
) -> Option
<cmp
::Ordering
> {
1197 self.as_slice().partial_cmp(other
.as_slice())
1201 impl Ord
for BytesMut
{
1202 fn cmp(&self, other
: &BytesMut
) -> cmp
::Ordering
{
1203 self.as_slice().cmp(other
.as_slice())
1207 impl Eq
for BytesMut {}
1209 impl Default
for BytesMut
{
1211 fn default() -> BytesMut
{
1216 impl hash
::Hash
for BytesMut
{
1217 fn hash
<H
>(&self, state
: &mut H
)
1221 let s
: &[u8] = self.as_ref();
1226 impl Borrow
<[u8]> for BytesMut
{
1227 fn borrow(&self) -> &[u8] {
1232 impl BorrowMut
<[u8]> for BytesMut
{
1233 fn borrow_mut(&mut self) -> &mut [u8] {
1238 impl fmt
::Write
for BytesMut
{
1240 fn write_str(&mut self, s
: &str) -> fmt
::Result
{
1241 if self.remaining_mut() >= s
.len() {
1242 self.put_slice(s
.as_bytes());
1250 fn write_fmt(&mut self, args
: fmt
::Arguments
<'_
>) -> fmt
::Result
{
1251 fmt
::write(self, args
)
1255 impl Clone
for BytesMut
{
1256 fn clone(&self) -> BytesMut
{
1257 BytesMut
::from(&self[..])
1261 impl IntoIterator
for BytesMut
{
1263 type IntoIter
= IntoIter
<BytesMut
>;
1265 fn into_iter(self) -> Self::IntoIter
{
1270 impl<'a
> IntoIterator
for &'a BytesMut
{
1272 type IntoIter
= core
::slice
::Iter
<'a
, u8>;
1274 fn into_iter(self) -> Self::IntoIter
{
1275 self.as_ref().iter()
1279 impl Extend
<u8> for BytesMut
{
1280 fn extend
<T
>(&mut self, iter
: T
)
1282 T
: IntoIterator
<Item
= u8>,
1284 let iter
= iter
.into_iter();
1286 let (lower
, _
) = iter
.size_hint();
1287 self.reserve(lower
);
1290 // 1. If self.kind() == KIND_VEC, use Vec::extend
1291 // 2. Make `reserve` inline-able
1299 impl<'a
> Extend
<&'a
u8> for BytesMut
{
1300 fn extend
<T
>(&mut self, iter
: T
)
1302 T
: IntoIterator
<Item
= &'a
u8>,
1304 self.extend(iter
.into_iter().copied())
1308 impl Extend
<Bytes
> for BytesMut
{
1309 fn extend
<T
>(&mut self, iter
: T
)
1311 T
: IntoIterator
<Item
= Bytes
>,
1314 self.extend_from_slice(&bytes
)
1319 impl FromIterator
<u8> for BytesMut
{
1320 fn from_iter
<T
: IntoIterator
<Item
= u8>>(into_iter
: T
) -> Self {
1321 BytesMut
::from_vec(Vec
::from_iter(into_iter
))
1325 impl<'a
> FromIterator
<&'a
u8> for BytesMut
{
1326 fn from_iter
<T
: IntoIterator
<Item
= &'a
u8>>(into_iter
: T
) -> Self {
1327 BytesMut
::from_iter(into_iter
.into_iter().copied())
1337 unsafe fn increment_shared(ptr
: *mut Shared
) {
1338 let old_size
= (*ptr
).ref_count
.fetch_add(1, Ordering
::Relaxed
);
1340 if old_size
> isize::MAX
as usize {
1345 unsafe fn release_shared(ptr
: *mut Shared
) {
1346 // `Shared` storage... follow the drop steps from Arc.
1347 if (*ptr
).ref_count
.fetch_sub(1, Ordering
::Release
) != 1 {
1351 // This fence is needed to prevent reordering of use of the data and
1352 // deletion of the data. Because it is marked `Release`, the decreasing
1353 // of the reference count synchronizes with this `Acquire` fence. This
1354 // means that use of the data happens before decreasing the reference
1355 // count, which happens before this fence, which happens before the
1356 // deletion of the data.
1358 // As explained in the [Boost documentation][1],
1360 // > It is important to enforce any possible access to the object in one
1361 // > thread (through an existing reference) to *happen before* deleting
1362 // > the object in a different thread. This is achieved by a "release"
1363 // > operation after dropping a reference (any access to the object
1364 // > through this reference must obviously happened before), and an
1365 // > "acquire" operation before deleting the object.
1367 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1369 // Thread sanitizer does not support atomic fences. Use an atomic load
1371 (*ptr
).ref_count
.load(Ordering
::Acquire
);
1374 drop(Box
::from_raw(ptr
));
1378 fn is_unique(&self) -> bool
{
1379 // The goal is to check if the current handle is the only handle
1380 // that currently has access to the buffer. This is done by
1381 // checking if the `ref_count` is currently 1.
1383 // The `Acquire` ordering synchronizes with the `Release` as
1384 // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1385 // operation guarantees that any mutations done in other threads
1386 // are ordered before the `ref_count` is decremented. As such,
1387 // this `Acquire` will guarantee that those mutations are
1388 // visible to the current thread.
1389 self.ref_count
.load(Ordering
::Acquire
) == 1
1394 fn original_capacity_to_repr(cap
: usize) -> usize {
1395 let width
= PTR_WIDTH
- ((cap
>> MIN_ORIGINAL_CAPACITY_WIDTH
).leading_zeros() as usize);
1398 MAX_ORIGINAL_CAPACITY_WIDTH
- MIN_ORIGINAL_CAPACITY_WIDTH
,
1402 fn original_capacity_from_repr(repr
: usize) -> usize {
1407 1 << (repr
+ (MIN_ORIGINAL_CAPACITY_WIDTH
- 1))
1412 fn test_original_capacity_to_repr() {
1413 assert_eq!(original_capacity_to_repr(0), 0);
1417 for width in 1..(max_width + 1) {
1418 let cap = 1 << width - 1;
1420 let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1422 } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1423 width - MIN_ORIGINAL_CAPACITY_WIDTH
1425 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1428 assert_eq!(original_capacity_to_repr(cap), expected);
1431 assert_eq!(original_capacity_to_repr(cap + 1), expected);
1434 // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1435 if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1436 assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1437 assert_eq!(original_capacity_to_repr(cap + 76), expected);
1438 } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1439 assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1440 assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1446 fn test_original_capacity_from_repr() {
1447 assert_eq!(0, original_capacity_from_repr(0));
1449 let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1451 assert_eq!(min_cap, original_capacity_from_repr(1));
1452 assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1453 assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1454 assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1455 assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1456 assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1457 assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1461 unsafe impl Send
for BytesMut {}
1462 unsafe impl Sync
for BytesMut {}
1466 * ===== PartialEq / PartialOrd =====
1470 impl PartialEq
<[u8]> for BytesMut
{
1471 fn eq(&self, other
: &[u8]) -> bool
{
1476 impl PartialOrd
<[u8]> for BytesMut
{
1477 fn partial_cmp(&self, other
: &[u8]) -> Option
<cmp
::Ordering
> {
1478 (**self).partial_cmp(other
)
1482 impl PartialEq
<BytesMut
> for [u8] {
1483 fn eq(&self, other
: &BytesMut
) -> bool
{
1488 impl PartialOrd
<BytesMut
> for [u8] {
1489 fn partial_cmp(&self, other
: &BytesMut
) -> Option
<cmp
::Ordering
> {
1490 <[u8] as PartialOrd
<[u8]>>::partial_cmp(self, other
)
1494 impl PartialEq
<str> for BytesMut
{
1495 fn eq(&self, other
: &str) -> bool
{
1496 &**self == other
.as_bytes()
1500 impl PartialOrd
<str> for BytesMut
{
1501 fn partial_cmp(&self, other
: &str) -> Option
<cmp
::Ordering
> {
1502 (**self).partial_cmp(other
.as_bytes())
1506 impl PartialEq
<BytesMut
> for str {
1507 fn eq(&self, other
: &BytesMut
) -> bool
{
1512 impl PartialOrd
<BytesMut
> for str {
1513 fn partial_cmp(&self, other
: &BytesMut
) -> Option
<cmp
::Ordering
> {
1514 <[u8] as PartialOrd
<[u8]>>::partial_cmp(self.as_bytes(), other
)
1518 impl PartialEq
<Vec
<u8>> for BytesMut
{
1519 fn eq(&self, other
: &Vec
<u8>) -> bool
{
1524 impl PartialOrd
<Vec
<u8>> for BytesMut
{
1525 fn partial_cmp(&self, other
: &Vec
<u8>) -> Option
<cmp
::Ordering
> {
1526 (**self).partial_cmp(&other
[..])
1530 impl PartialEq
<BytesMut
> for Vec
<u8> {
1531 fn eq(&self, other
: &BytesMut
) -> bool
{
1536 impl PartialOrd
<BytesMut
> for Vec
<u8> {
1537 fn partial_cmp(&self, other
: &BytesMut
) -> Option
<cmp
::Ordering
> {
1538 other
.partial_cmp(self)
1542 impl PartialEq
<String
> for BytesMut
{
1543 fn eq(&self, other
: &String
) -> bool
{
1548 impl PartialOrd
<String
> for BytesMut
{
1549 fn partial_cmp(&self, other
: &String
) -> Option
<cmp
::Ordering
> {
1550 (**self).partial_cmp(other
.as_bytes())
1554 impl PartialEq
<BytesMut
> for String
{
1555 fn eq(&self, other
: &BytesMut
) -> bool
{
1560 impl PartialOrd
<BytesMut
> for String
{
1561 fn partial_cmp(&self, other
: &BytesMut
) -> Option
<cmp
::Ordering
> {
1562 <[u8] as PartialOrd
<[u8]>>::partial_cmp(self.as_bytes(), other
)
1566 impl<'a
, T
: ?Sized
> PartialEq
<&'a T
> for BytesMut
1568 BytesMut
: PartialEq
<T
>,
1570 fn eq(&self, other
: &&'a T
) -> bool
{
1575 impl<'a
, T
: ?Sized
> PartialOrd
<&'a T
> for BytesMut
1577 BytesMut
: PartialOrd
<T
>,
1579 fn partial_cmp(&self, other
: &&'a T
) -> Option
<cmp
::Ordering
> {
1580 self.partial_cmp(*other
)
1584 impl PartialEq
<BytesMut
> for &[u8] {
1585 fn eq(&self, other
: &BytesMut
) -> bool
{
1590 impl PartialOrd
<BytesMut
> for &[u8] {
1591 fn partial_cmp(&self, other
: &BytesMut
) -> Option
<cmp
::Ordering
> {
1592 <[u8] as PartialOrd
<[u8]>>::partial_cmp(self, other
)
1596 impl PartialEq
<BytesMut
> for &str {
1597 fn eq(&self, other
: &BytesMut
) -> bool
{
1602 impl PartialOrd
<BytesMut
> for &str {
1603 fn partial_cmp(&self, other
: &BytesMut
) -> Option
<cmp
::Ordering
> {
1604 other
.partial_cmp(self)
1608 impl PartialEq
<BytesMut
> for Bytes
{
1609 fn eq(&self, other
: &BytesMut
) -> bool
{
1610 other
[..] == self[..]
1614 impl PartialEq
<Bytes
> for BytesMut
{
1615 fn eq(&self, other
: &Bytes
) -> bool
{
1616 other
[..] == self[..]
1620 impl From
<BytesMut
> for Vec
<u8> {
1621 fn from(mut bytes
: BytesMut
) -> Self {
1622 let kind
= bytes
.kind();
1624 let mut vec
= if kind
== KIND_VEC
{
1626 let (off
, _
) = bytes
.get_vec_pos();
1627 rebuild_vec(bytes
.ptr
.as_ptr(), bytes
.len
, bytes
.cap
, off
)
1629 } else if kind
== KIND_ARC
{
1630 let shared
= bytes
.data
as *mut Shared
;
1632 if unsafe { (*shared).is_unique() }
{
1633 let vec
= mem
::replace(unsafe { &mut (*shared).vec }
, Vec
::new());
1635 unsafe { release_shared(shared) }
;
1639 return bytes
.deref().to_vec();
1642 return bytes
.deref().to_vec();
1645 let len
= bytes
.len
;
1648 ptr
::copy(bytes
.ptr
.as_ptr(), vec
.as_mut_ptr(), len
);
1659 fn vptr(ptr
: *mut u8) -> NonNull
<u8> {
1660 if cfg
!(debug_assertions
) {
1661 NonNull
::new(ptr
).expect("Vec pointer should be non-null")
1663 unsafe { NonNull::new_unchecked(ptr) }
1667 /// Returns a dangling pointer with the given address. This is used to store
1668 /// integer data in pointer fields.
1670 /// It is equivalent to `addr as *mut T`, but this fails on miri when strict
1671 /// provenance checking is enabled.
1673 fn invalid_ptr
<T
>(addr
: usize) -> *mut T
{
1674 let ptr
= core
::ptr
::null_mut
::<u8>().wrapping_add(addr
);
1675 debug_assert_eq
!(ptr
as usize, addr
);
1679 /// Precondition: dst >= original
1681 /// The following line is equivalent to:
1684 /// self.ptr.as_ptr().offset_from(ptr) as usize;
1687 /// But due to min rust is 1.39 and it is only stablised
1688 /// in 1.47, we cannot use it.
1690 fn offset_from(dst
: *mut u8, original
: *mut u8) -> usize {
1691 debug_assert
!(dst
>= original
);
1693 dst
as usize - original
as usize
1696 unsafe fn rebuild_vec(ptr
: *mut u8, mut len
: usize, mut cap
: usize, off
: usize) -> Vec
<u8> {
1697 let ptr
= ptr
.offset(-(off
as isize));
1701 Vec
::from_raw_parts(ptr
, len
, cap
)
1704 // ===== impl SharedVtable =====
1706 static SHARED_VTABLE
: Vtable
= Vtable
{
1707 clone
: shared_v_clone
,
1708 to_vec
: shared_v_to_vec
,
1709 drop
: shared_v_drop
,
1712 unsafe fn shared_v_clone(data
: &AtomicPtr
<()>, ptr
: *const u8, len
: usize) -> Bytes
{
1713 let shared
= data
.load(Ordering
::Relaxed
) as *mut Shared
;
1714 increment_shared(shared
);
1716 let data
= AtomicPtr
::new(shared
as *mut ());
1717 Bytes
::with_vtable(ptr
, len
, data
, &SHARED_VTABLE
)
1720 unsafe fn shared_v_to_vec(data
: &AtomicPtr
<()>, ptr
: *const u8, len
: usize) -> Vec
<u8> {
1721 let shared
: *mut Shared
= data
.load(Ordering
::Relaxed
).cast();
1723 if (*shared
).is_unique() {
1724 let shared
= &mut *shared
;
1727 let mut vec
= mem
::replace(&mut shared
.vec
, Vec
::new());
1728 release_shared(shared
);
1731 ptr
::copy(ptr
, vec
.as_mut_ptr(), len
);
1736 let v
= slice
::from_raw_parts(ptr
, len
).to_vec();
1737 release_shared(shared
);
1742 unsafe fn shared_v_drop(data
: &mut AtomicPtr
<()>, _ptr
: *const u8, _len
: usize) {
1743 data
.with_mut(|shared
| {
1744 release_shared(*shared
as *mut Shared
);
1751 /// use bytes::BytesMut;
1752 /// #[deny(unused_must_use)]
1754 /// let mut b1 = BytesMut::from("hello world");
1758 fn _split_to_must_use() {}
1761 /// use bytes::BytesMut;
1762 /// #[deny(unused_must_use)]
1764 /// let mut b1 = BytesMut::from("hello world");
1765 /// b1.split_off(6);
1768 fn _split_off_must_use() {}
1771 /// use bytes::BytesMut;
1772 /// #[deny(unused_must_use)]
1774 /// let mut b1 = BytesMut::from("hello world");
1778 fn _split_must_use() {}
1781 #[cfg(all(test, loom))]
1783 use loom
::sync
::Arc
;
1786 use super::BytesMut
;
1790 fn bytes_mut_cloning_frozen() {
1792 let a
= BytesMut
::from(&b
"abcdefgh"[..]).split().freeze();
1793 let addr
= a
.as_ptr() as usize;
1795 // test the Bytes::clone is Sync by putting it in an Arc
1796 let a1
= Arc
::new(a
);
1797 let a2
= a1
.clone();
1799 let t1
= thread
::spawn(move || {
1800 let b
: Bytes
= (*a1
).clone();
1801 assert_eq
!(b
.as_ptr() as usize, addr
);
1804 let t2
= thread
::spawn(move || {
1805 let b
: Bytes
= (*a2
).clone();
1806 assert_eq
!(b
.as_ptr() as usize, addr
);