1 use core::iter::{FromIterator, Iterator};
2 use core::mem::{self, ManuallyDrop};
3 use core::ops::{Deref, DerefMut};
4 use core::ptr::{self, NonNull};
5 use core::{cmp, fmt, hash, isize, slice, usize};
6 
7 use alloc::{
8     borrow::{Borrow, BorrowMut},
9     boxed::Box,
10     string::String,
11     vec::Vec,
12 };
13 
14 use crate::buf::{IntoIter, UninitSlice};
15 use crate::bytes::Vtable;
16 #[allow(unused)]
17 use crate::loom::sync::atomic::AtomicMut;
18 use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
19 use crate::{Buf, BufMut, Bytes};
20 
21 /// A unique reference to a contiguous slice of memory.
22 ///
23 /// `BytesMut` represents a unique view into a potentially shared memory region.
24 /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
25 /// mutate the memory.
26 ///
27 /// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
28 /// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
29 /// same `buf` overlaps with its slice. That guarantee means that a write lock
30 /// is not required.
31 ///
32 /// # Growth
33 ///
34 /// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
35 /// necessary. However, explicitly reserving the required space up-front before
36 /// a series of inserts will be more efficient.
37 ///
38 /// # Examples
39 ///
40 /// ```
41 /// use bytes::{BytesMut, BufMut};
42 ///
43 /// let mut buf = BytesMut::with_capacity(64);
44 ///
45 /// buf.put_u8(b'h');
46 /// buf.put_u8(b'e');
47 /// buf.put(&b"llo"[..]);
48 ///
49 /// assert_eq!(&buf[..], b"hello");
50 ///
51 /// // Freeze the buffer so that it can be shared
52 /// let a = buf.freeze();
53 ///
54 /// // This does not allocate, instead `b` points to the same memory.
55 /// let b = a.clone();
56 ///
57 /// assert_eq!(&a[..], b"hello");
58 /// assert_eq!(&b[..], b"hello");
59 /// ```
60 pub struct BytesMut {
61     ptr: NonNull<u8>,
62     len: usize,
63     cap: usize,
64     data: *mut Shared,
65 }
66 
67 // Thread-safe reference-counted container for the shared storage. This mostly
68 // the same as `core::sync::Arc` but without the weak counter. The ref counting
69 // fns are based on the ones found in `std`.
70 //
71 // The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
72 // up making the overall code simpler and easier to reason about. This is due to
73 // some of the logic around setting `Inner::arc` and other ways the `arc` field
74 // is used. Using `Arc` ended up requiring a number of funky transmutes and
75 // other shenanigans to make it work.
76 struct Shared {
77     vec: Vec<u8>,
78     original_capacity_repr: usize,
79     ref_count: AtomicUsize,
80 }
81 
82 // Buffer storage strategy flags.
83 const KIND_ARC: usize = 0b0;
84 const KIND_VEC: usize = 0b1;
85 const KIND_MASK: usize = 0b1;
86 
87 // The max original capacity value. Any `Bytes` allocated with a greater initial
88 // capacity will default to this.
89 const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
90 // The original capacity algorithm will not take effect unless the originally
91 // allocated capacity was at least 1kb in size.
92 const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
93 // The original capacity is stored in powers of 2 starting at 1kb to a max of
94 // 64kb. Representing it as such requires only 3 bits of storage.
95 const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
96 const ORIGINAL_CAPACITY_OFFSET: usize = 2;
97 
98 // When the storage is in the `Vec` representation, the pointer can be advanced
99 // at most this value. This is due to the amount of storage available to track
100 // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
101 // bits.
102 const VEC_POS_OFFSET: usize = 5;
103 const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
104 const NOT_VEC_POS_MASK: usize = 0b11111;
105 
106 #[cfg(target_pointer_width = "64")]
107 const PTR_WIDTH: usize = 64;
108 #[cfg(target_pointer_width = "32")]
109 const PTR_WIDTH: usize = 32;
110 
111 /*
112  *
113  * ===== BytesMut =====
114  *
115  */
116 
117 impl BytesMut {
118     /// Creates a new `BytesMut` with the specified capacity.
119     ///
120     /// The returned `BytesMut` will be able to hold at least `capacity` bytes
121     /// without reallocating.
122     ///
123     /// It is important to note that this function does not specify the length
124     /// of the returned `BytesMut`, but only the capacity.
125     ///
126     /// # Examples
127     ///
128     /// ```
129     /// use bytes::{BytesMut, BufMut};
130     ///
131     /// let mut bytes = BytesMut::with_capacity(64);
132     ///
133     /// // `bytes` contains no data, even though there is capacity
134     /// assert_eq!(bytes.len(), 0);
135     ///
136     /// bytes.put(&b"hello world"[..]);
137     ///
138     /// assert_eq!(&bytes[..], b"hello world");
139     /// ```
140     #[inline]
with_capacity(capacity: usize) -> BytesMut141     pub fn with_capacity(capacity: usize) -> BytesMut {
142         BytesMut::from_vec(Vec::with_capacity(capacity))
143     }
144 
145     /// Creates a new `BytesMut` with default capacity.
146     ///
147     /// Resulting object has length 0 and unspecified capacity.
148     /// This function does not allocate.
149     ///
150     /// # Examples
151     ///
152     /// ```
153     /// use bytes::{BytesMut, BufMut};
154     ///
155     /// let mut bytes = BytesMut::new();
156     ///
157     /// assert_eq!(0, bytes.len());
158     ///
159     /// bytes.reserve(2);
160     /// bytes.put_slice(b"xy");
161     ///
162     /// assert_eq!(&b"xy"[..], &bytes[..]);
163     /// ```
164     #[inline]
new() -> BytesMut165     pub fn new() -> BytesMut {
166         BytesMut::with_capacity(0)
167     }
168 
169     /// Returns the number of bytes contained in this `BytesMut`.
170     ///
171     /// # Examples
172     ///
173     /// ```
174     /// use bytes::BytesMut;
175     ///
176     /// let b = BytesMut::from(&b"hello"[..]);
177     /// assert_eq!(b.len(), 5);
178     /// ```
179     #[inline]
len(&self) -> usize180     pub fn len(&self) -> usize {
181         self.len
182     }
183 
184     /// Returns true if the `BytesMut` has a length of 0.
185     ///
186     /// # Examples
187     ///
188     /// ```
189     /// use bytes::BytesMut;
190     ///
191     /// let b = BytesMut::with_capacity(64);
192     /// assert!(b.is_empty());
193     /// ```
194     #[inline]
is_empty(&self) -> bool195     pub fn is_empty(&self) -> bool {
196         self.len == 0
197     }
198 
199     /// Returns the number of bytes the `BytesMut` can hold without reallocating.
200     ///
201     /// # Examples
202     ///
203     /// ```
204     /// use bytes::BytesMut;
205     ///
206     /// let b = BytesMut::with_capacity(64);
207     /// assert_eq!(b.capacity(), 64);
208     /// ```
209     #[inline]
capacity(&self) -> usize210     pub fn capacity(&self) -> usize {
211         self.cap
212     }
213 
214     /// Converts `self` into an immutable `Bytes`.
215     ///
216     /// The conversion is zero cost and is used to indicate that the slice
217     /// referenced by the handle will no longer be mutated. Once the conversion
218     /// is done, the handle can be cloned and shared across threads.
219     ///
220     /// # Examples
221     ///
222     /// ```
223     /// use bytes::{BytesMut, BufMut};
224     /// use std::thread;
225     ///
226     /// let mut b = BytesMut::with_capacity(64);
227     /// b.put(&b"hello world"[..]);
228     /// let b1 = b.freeze();
229     /// let b2 = b1.clone();
230     ///
231     /// let th = thread::spawn(move || {
232     ///     assert_eq!(&b1[..], b"hello world");
233     /// });
234     ///
235     /// assert_eq!(&b2[..], b"hello world");
236     /// th.join().unwrap();
237     /// ```
238     #[inline]
freeze(mut self) -> Bytes239     pub fn freeze(mut self) -> Bytes {
240         if self.kind() == KIND_VEC {
241             // Just re-use `Bytes` internal Vec vtable
242             unsafe {
243                 let (off, _) = self.get_vec_pos();
244                 let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
245                 mem::forget(self);
246                 let mut b: Bytes = vec.into();
247                 b.advance(off);
248                 b
249             }
250         } else {
251             debug_assert_eq!(self.kind(), KIND_ARC);
252 
253             let ptr = self.ptr.as_ptr();
254             let len = self.len;
255             let data = AtomicPtr::new(self.data as _);
256             mem::forget(self);
257             unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
258         }
259     }
260 
261     /// Splits the bytes into two at the given index.
262     ///
263     /// Afterwards `self` contains elements `[0, at)`, and the returned
264     /// `BytesMut` contains elements `[at, capacity)`.
265     ///
266     /// This is an `O(1)` operation that just increases the reference count
267     /// and sets a few indices.
268     ///
269     /// # Examples
270     ///
271     /// ```
272     /// use bytes::BytesMut;
273     ///
274     /// let mut a = BytesMut::from(&b"hello world"[..]);
275     /// let mut b = a.split_off(5);
276     ///
277     /// a[0] = b'j';
278     /// b[0] = b'!';
279     ///
280     /// assert_eq!(&a[..], b"jello");
281     /// assert_eq!(&b[..], b"!world");
282     /// ```
283     ///
284     /// # Panics
285     ///
286     /// Panics if `at > capacity`.
287     #[must_use = "consider BytesMut::truncate if you don't need the other half"]
split_off(&mut self, at: usize) -> BytesMut288     pub fn split_off(&mut self, at: usize) -> BytesMut {
289         assert!(
290             at <= self.capacity(),
291             "split_off out of bounds: {:?} <= {:?}",
292             at,
293             self.capacity(),
294         );
295         unsafe {
296             let mut other = self.shallow_clone();
297             other.set_start(at);
298             self.set_end(at);
299             other
300         }
301     }
302 
303     /// Removes the bytes from the current view, returning them in a new
304     /// `BytesMut` handle.
305     ///
306     /// Afterwards, `self` will be empty, but will retain any additional
307     /// capacity that it had before the operation. This is identical to
308     /// `self.split_to(self.len())`.
309     ///
310     /// This is an `O(1)` operation that just increases the reference count and
311     /// sets a few indices.
312     ///
313     /// # Examples
314     ///
315     /// ```
316     /// use bytes::{BytesMut, BufMut};
317     ///
318     /// let mut buf = BytesMut::with_capacity(1024);
319     /// buf.put(&b"hello world"[..]);
320     ///
321     /// let other = buf.split();
322     ///
323     /// assert!(buf.is_empty());
324     /// assert_eq!(1013, buf.capacity());
325     ///
326     /// assert_eq!(other, b"hello world"[..]);
327     /// ```
328     #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
split(&mut self) -> BytesMut329     pub fn split(&mut self) -> BytesMut {
330         let len = self.len();
331         self.split_to(len)
332     }
333 
334     /// Splits the buffer into two at the given index.
335     ///
336     /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
337     /// contains elements `[0, at)`.
338     ///
339     /// This is an `O(1)` operation that just increases the reference count and
340     /// sets a few indices.
341     ///
342     /// # Examples
343     ///
344     /// ```
345     /// use bytes::BytesMut;
346     ///
347     /// let mut a = BytesMut::from(&b"hello world"[..]);
348     /// let mut b = a.split_to(5);
349     ///
350     /// a[0] = b'!';
351     /// b[0] = b'j';
352     ///
353     /// assert_eq!(&a[..], b"!world");
354     /// assert_eq!(&b[..], b"jello");
355     /// ```
356     ///
357     /// # Panics
358     ///
359     /// Panics if `at > len`.
360     #[must_use = "consider BytesMut::advance if you don't need the other half"]
split_to(&mut self, at: usize) -> BytesMut361     pub fn split_to(&mut self, at: usize) -> BytesMut {
362         assert!(
363             at <= self.len(),
364             "split_to out of bounds: {:?} <= {:?}",
365             at,
366             self.len(),
367         );
368 
369         unsafe {
370             let mut other = self.shallow_clone();
371             other.set_end(at);
372             self.set_start(at);
373             other
374         }
375     }
376 
377     /// Shortens the buffer, keeping the first `len` bytes and dropping the
378     /// rest.
379     ///
380     /// If `len` is greater than the buffer's current length, this has no
381     /// effect.
382     ///
383     /// Existing underlying capacity is preserved.
384     ///
385     /// The [`split_off`] method can emulate `truncate`, but this causes the
386     /// excess bytes to be returned instead of dropped.
387     ///
388     /// # Examples
389     ///
390     /// ```
391     /// use bytes::BytesMut;
392     ///
393     /// let mut buf = BytesMut::from(&b"hello world"[..]);
394     /// buf.truncate(5);
395     /// assert_eq!(buf, b"hello"[..]);
396     /// ```
397     ///
398     /// [`split_off`]: #method.split_off
truncate(&mut self, len: usize)399     pub fn truncate(&mut self, len: usize) {
400         if len <= self.len() {
401             unsafe {
402                 self.set_len(len);
403             }
404         }
405     }
406 
407     /// Clears the buffer, removing all data. Existing capacity is preserved.
408     ///
409     /// # Examples
410     ///
411     /// ```
412     /// use bytes::BytesMut;
413     ///
414     /// let mut buf = BytesMut::from(&b"hello world"[..]);
415     /// buf.clear();
416     /// assert!(buf.is_empty());
417     /// ```
clear(&mut self)418     pub fn clear(&mut self) {
419         self.truncate(0);
420     }
421 
422     /// Resizes the buffer so that `len` is equal to `new_len`.
423     ///
424     /// If `new_len` is greater than `len`, the buffer is extended by the
425     /// difference with each additional byte set to `value`. If `new_len` is
426     /// less than `len`, the buffer is simply truncated.
427     ///
428     /// # Examples
429     ///
430     /// ```
431     /// use bytes::BytesMut;
432     ///
433     /// let mut buf = BytesMut::new();
434     ///
435     /// buf.resize(3, 0x1);
436     /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
437     ///
438     /// buf.resize(2, 0x2);
439     /// assert_eq!(&buf[..], &[0x1, 0x1]);
440     ///
441     /// buf.resize(4, 0x3);
442     /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
443     /// ```
resize(&mut self, new_len: usize, value: u8)444     pub fn resize(&mut self, new_len: usize, value: u8) {
445         let len = self.len();
446         if new_len > len {
447             let additional = new_len - len;
448             self.reserve(additional);
449             unsafe {
450                 let dst = self.chunk_mut().as_mut_ptr();
451                 ptr::write_bytes(dst, value, additional);
452                 self.set_len(new_len);
453             }
454         } else {
455             self.truncate(new_len);
456         }
457     }
458 
459     /// Sets the length of the buffer.
460     ///
461     /// This will explicitly set the size of the buffer without actually
462     /// modifying the data, so it is up to the caller to ensure that the data
463     /// has been initialized.
464     ///
465     /// # Examples
466     ///
467     /// ```
468     /// use bytes::BytesMut;
469     ///
470     /// let mut b = BytesMut::from(&b"hello world"[..]);
471     ///
472     /// unsafe {
473     ///     b.set_len(5);
474     /// }
475     ///
476     /// assert_eq!(&b[..], b"hello");
477     ///
478     /// unsafe {
479     ///     b.set_len(11);
480     /// }
481     ///
482     /// assert_eq!(&b[..], b"hello world");
483     /// ```
484     #[inline]
set_len(&mut self, len: usize)485     pub unsafe fn set_len(&mut self, len: usize) {
486         debug_assert!(len <= self.cap, "set_len out of bounds");
487         self.len = len;
488     }
489 
490     /// Reserves capacity for at least `additional` more bytes to be inserted
491     /// into the given `BytesMut`.
492     ///
493     /// More than `additional` bytes may be reserved in order to avoid frequent
494     /// reallocations. A call to `reserve` may result in an allocation.
495     ///
496     /// Before allocating new buffer space, the function will attempt to reclaim
497     /// space in the existing buffer. If the current handle references a small
498     /// view in the original buffer and all other handles have been dropped,
499     /// and the requested capacity is less than or equal to the existing
500     /// buffer's capacity, then the current view will be copied to the front of
501     /// the buffer and the handle will take ownership of the full buffer.
502     ///
503     /// # Examples
504     ///
505     /// In the following example, a new buffer is allocated.
506     ///
507     /// ```
508     /// use bytes::BytesMut;
509     ///
510     /// let mut buf = BytesMut::from(&b"hello"[..]);
511     /// buf.reserve(64);
512     /// assert!(buf.capacity() >= 69);
513     /// ```
514     ///
515     /// In the following example, the existing buffer is reclaimed.
516     ///
517     /// ```
518     /// use bytes::{BytesMut, BufMut};
519     ///
520     /// let mut buf = BytesMut::with_capacity(128);
521     /// buf.put(&[0; 64][..]);
522     ///
523     /// let ptr = buf.as_ptr();
524     /// let other = buf.split();
525     ///
526     /// assert!(buf.is_empty());
527     /// assert_eq!(buf.capacity(), 64);
528     ///
529     /// drop(other);
530     /// buf.reserve(128);
531     ///
532     /// assert_eq!(buf.capacity(), 128);
533     /// assert_eq!(buf.as_ptr(), ptr);
534     /// ```
535     ///
536     /// # Panics
537     ///
538     /// Panics if the new capacity overflows `usize`.
539     #[inline]
reserve(&mut self, additional: usize)540     pub fn reserve(&mut self, additional: usize) {
541         let len = self.len();
542         let rem = self.capacity() - len;
543 
544         if additional <= rem {
545             // The handle can already store at least `additional` more bytes, so
546             // there is no further work needed to be done.
547             return;
548         }
549 
550         self.reserve_inner(additional);
551     }
552 
553     // In separate function to allow the short-circuits in `reserve` to
554     // be inline-able. Significant helps performance.
reserve_inner(&mut self, additional: usize)555     fn reserve_inner(&mut self, additional: usize) {
556         let len = self.len();
557         let kind = self.kind();
558 
559         if kind == KIND_VEC {
560             // If there's enough free space before the start of the buffer, then
561             // just copy the data backwards and reuse the already-allocated
562             // space.
563             //
564             // Otherwise, since backed by a vector, use `Vec::reserve`
565             unsafe {
566                 let (off, prev) = self.get_vec_pos();
567 
568                 // Only reuse space if we can satisfy the requested additional space.
569                 if self.capacity() - self.len() + off >= additional {
570                     // There's space - reuse it
571                     //
572                     // Just move the pointer back to the start after copying
573                     // data back.
574                     let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
575                     ptr::copy(self.ptr.as_ptr(), base_ptr, self.len);
576                     self.ptr = vptr(base_ptr);
577                     self.set_vec_pos(0, prev);
578 
579                     // Length stays constant, but since we moved backwards we
580                     // can gain capacity back.
581                     self.cap += off;
582                 } else {
583                     // No space - allocate more
584                     let mut v =
585                         ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
586                     v.reserve(additional);
587 
588                     // Update the info
589                     self.ptr = vptr(v.as_mut_ptr().offset(off as isize));
590                     self.len = v.len() - off;
591                     self.cap = v.capacity() - off;
592                 }
593 
594                 return;
595             }
596         }
597 
598         debug_assert_eq!(kind, KIND_ARC);
599         let shared: *mut Shared = self.data as _;
600 
601         // Reserving involves abandoning the currently shared buffer and
602         // allocating a new vector with the requested capacity.
603         //
604         // Compute the new capacity
605         let mut new_cap = len.checked_add(additional).expect("overflow");
606 
607         let original_capacity;
608         let original_capacity_repr;
609 
610         unsafe {
611             original_capacity_repr = (*shared).original_capacity_repr;
612             original_capacity = original_capacity_from_repr(original_capacity_repr);
613 
614             // First, try to reclaim the buffer. This is possible if the current
615             // handle is the only outstanding handle pointing to the buffer.
616             if (*shared).is_unique() {
617                 // This is the only handle to the buffer. It can be reclaimed.
618                 // However, before doing the work of copying data, check to make
619                 // sure that the vector has enough capacity.
620                 let v = &mut (*shared).vec;
621 
622                 if v.capacity() >= new_cap {
623                     // The capacity is sufficient, reclaim the buffer
624                     let ptr = v.as_mut_ptr();
625 
626                     ptr::copy(self.ptr.as_ptr(), ptr, len);
627 
628                     self.ptr = vptr(ptr);
629                     self.cap = v.capacity();
630 
631                     return;
632                 }
633 
634                 // The vector capacity is not sufficient. The reserve request is
635                 // asking for more than the initial buffer capacity. Allocate more
636                 // than requested if `new_cap` is not much bigger than the current
637                 // capacity.
638                 //
639                 // There are some situations, using `reserve_exact` that the
640                 // buffer capacity could be below `original_capacity`, so do a
641                 // check.
642                 let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
643 
644                 new_cap = cmp::max(cmp::max(double, new_cap), original_capacity);
645             } else {
646                 new_cap = cmp::max(new_cap, original_capacity);
647             }
648         }
649 
650         // Create a new vector to store the data
651         let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
652 
653         // Copy the bytes
654         v.extend_from_slice(self.as_ref());
655 
656         // Release the shared handle. This must be done *after* the bytes are
657         // copied.
658         unsafe { release_shared(shared) };
659 
660         // Update self
661         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
662         self.data = data as _;
663         self.ptr = vptr(v.as_mut_ptr());
664         self.len = v.len();
665         self.cap = v.capacity();
666     }
667 
668     /// Appends given bytes to this `BytesMut`.
669     ///
670     /// If this `BytesMut` object does not have enough capacity, it is resized
671     /// first.
672     ///
673     /// # Examples
674     ///
675     /// ```
676     /// use bytes::BytesMut;
677     ///
678     /// let mut buf = BytesMut::with_capacity(0);
679     /// buf.extend_from_slice(b"aaabbb");
680     /// buf.extend_from_slice(b"cccddd");
681     ///
682     /// assert_eq!(b"aaabbbcccddd", &buf[..]);
683     /// ```
extend_from_slice(&mut self, extend: &[u8])684     pub fn extend_from_slice(&mut self, extend: &[u8]) {
685         let cnt = extend.len();
686         self.reserve(cnt);
687 
688         unsafe {
689             let dst = self.uninit_slice();
690             // Reserved above
691             debug_assert!(dst.len() >= cnt);
692 
693             ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
694         }
695 
696         unsafe {
697             self.advance_mut(cnt);
698         }
699     }
700 
701     /// Absorbs a `BytesMut` that was previously split off.
702     ///
703     /// If the two `BytesMut` objects were previously contiguous, i.e., if
704     /// `other` was created by calling `split_off` on this `BytesMut`, then
705     /// this is an `O(1)` operation that just decreases a reference
706     /// count and sets a few indices. Otherwise this method degenerates to
707     /// `self.extend_from_slice(other.as_ref())`.
708     ///
709     /// # Examples
710     ///
711     /// ```
712     /// use bytes::BytesMut;
713     ///
714     /// let mut buf = BytesMut::with_capacity(64);
715     /// buf.extend_from_slice(b"aaabbbcccddd");
716     ///
717     /// let split = buf.split_off(6);
718     /// assert_eq!(b"aaabbb", &buf[..]);
719     /// assert_eq!(b"cccddd", &split[..]);
720     ///
721     /// buf.unsplit(split);
722     /// assert_eq!(b"aaabbbcccddd", &buf[..]);
723     /// ```
unsplit(&mut self, other: BytesMut)724     pub fn unsplit(&mut self, other: BytesMut) {
725         if self.is_empty() {
726             *self = other;
727             return;
728         }
729 
730         if let Err(other) = self.try_unsplit(other) {
731             self.extend_from_slice(other.as_ref());
732         }
733     }
734 
735     // private
736 
737     // For now, use a `Vec` to manage the memory for us, but we may want to
738     // change that in the future to some alternate allocator strategy.
739     //
740     // Thus, we don't expose an easy way to construct from a `Vec` since an
741     // internal change could make a simple pattern (`BytesMut::from(vec)`)
742     // suddenly a lot more expensive.
743     #[inline]
from_vec(mut vec: Vec<u8>) -> BytesMut744     pub(crate) fn from_vec(mut vec: Vec<u8>) -> BytesMut {
745         let ptr = vptr(vec.as_mut_ptr());
746         let len = vec.len();
747         let cap = vec.capacity();
748         mem::forget(vec);
749 
750         let original_capacity_repr = original_capacity_to_repr(cap);
751         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
752 
753         BytesMut {
754             ptr,
755             len,
756             cap,
757             data: data as *mut _,
758         }
759     }
760 
761     #[inline]
as_slice(&self) -> &[u8]762     fn as_slice(&self) -> &[u8] {
763         unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
764     }
765 
766     #[inline]
as_slice_mut(&mut self) -> &mut [u8]767     fn as_slice_mut(&mut self) -> &mut [u8] {
768         unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
769     }
770 
set_start(&mut self, start: usize)771     unsafe fn set_start(&mut self, start: usize) {
772         // Setting the start to 0 is a no-op, so return early if this is the
773         // case.
774         if start == 0 {
775             return;
776         }
777 
778         debug_assert!(start <= self.cap, "internal: set_start out of bounds");
779 
780         let kind = self.kind();
781 
782         if kind == KIND_VEC {
783             // Setting the start when in vec representation is a little more
784             // complicated. First, we have to track how far ahead the
785             // "start" of the byte buffer from the beginning of the vec. We
786             // also have to ensure that we don't exceed the maximum shift.
787             let (mut pos, prev) = self.get_vec_pos();
788             pos += start;
789 
790             if pos <= MAX_VEC_POS {
791                 self.set_vec_pos(pos, prev);
792             } else {
793                 // The repr must be upgraded to ARC. This will never happen
794                 // on 64 bit systems and will only happen on 32 bit systems
795                 // when shifting past 134,217,727 bytes. As such, we don't
796                 // worry too much about performance here.
797                 self.promote_to_shared(/*ref_count = */ 1);
798             }
799         }
800 
801         // Updating the start of the view is setting `ptr` to point to the
802         // new start and updating the `len` field to reflect the new length
803         // of the view.
804         self.ptr = vptr(self.ptr.as_ptr().offset(start as isize));
805 
806         if self.len >= start {
807             self.len -= start;
808         } else {
809             self.len = 0;
810         }
811 
812         self.cap -= start;
813     }
814 
set_end(&mut self, end: usize)815     unsafe fn set_end(&mut self, end: usize) {
816         debug_assert_eq!(self.kind(), KIND_ARC);
817         assert!(end <= self.cap, "set_end out of bounds");
818 
819         self.cap = end;
820         self.len = cmp::min(self.len, end);
821     }
822 
try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut>823     fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
824         if other.capacity() == 0 {
825             return Ok(());
826         }
827 
828         let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) };
829         if ptr == other.ptr.as_ptr()
830             && self.kind() == KIND_ARC
831             && other.kind() == KIND_ARC
832             && self.data == other.data
833         {
834             // Contiguous blocks, just combine directly
835             self.len += other.len;
836             self.cap += other.cap;
837             Ok(())
838         } else {
839             Err(other)
840         }
841     }
842 
843     #[inline]
kind(&self) -> usize844     fn kind(&self) -> usize {
845         self.data as usize & KIND_MASK
846     }
847 
promote_to_shared(&mut self, ref_cnt: usize)848     unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
849         debug_assert_eq!(self.kind(), KIND_VEC);
850         debug_assert!(ref_cnt == 1 || ref_cnt == 2);
851 
852         let original_capacity_repr =
853             (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
854 
855         // The vec offset cannot be concurrently mutated, so there
856         // should be no danger reading it.
857         let off = (self.data as usize) >> VEC_POS_OFFSET;
858 
859         // First, allocate a new `Shared` instance containing the
860         // `Vec` fields. It's important to note that `ptr`, `len`,
861         // and `cap` cannot be mutated without having `&mut self`.
862         // This means that these fields will not be concurrently
863         // updated and since the buffer hasn't been promoted to an
864         // `Arc`, those three fields still are the components of the
865         // vector.
866         let shared = Box::new(Shared {
867             vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
868             original_capacity_repr,
869             ref_count: AtomicUsize::new(ref_cnt),
870         });
871 
872         let shared = Box::into_raw(shared);
873 
874         // The pointer should be aligned, so this assert should
875         // always succeed.
876         debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
877 
878         self.data = shared as _;
879     }
880 
881     /// Makes an exact shallow clone of `self`.
882     ///
883     /// The kind of `self` doesn't matter, but this is unsafe
884     /// because the clone will have the same offsets. You must
885     /// be sure the returned value to the user doesn't allow
886     /// two views into the same range.
887     #[inline]
shallow_clone(&mut self) -> BytesMut888     unsafe fn shallow_clone(&mut self) -> BytesMut {
889         if self.kind() == KIND_ARC {
890             increment_shared(self.data);
891             ptr::read(self)
892         } else {
893             self.promote_to_shared(/*ref_count = */ 2);
894             ptr::read(self)
895         }
896     }
897 
898     #[inline]
get_vec_pos(&mut self) -> (usize, usize)899     unsafe fn get_vec_pos(&mut self) -> (usize, usize) {
900         debug_assert_eq!(self.kind(), KIND_VEC);
901 
902         let prev = self.data as usize;
903         (prev >> VEC_POS_OFFSET, prev)
904     }
905 
906     #[inline]
set_vec_pos(&mut self, pos: usize, prev: usize)907     unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) {
908         debug_assert_eq!(self.kind(), KIND_VEC);
909         debug_assert!(pos <= MAX_VEC_POS);
910 
911         self.data = ((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut _;
912     }
913 
914     #[inline]
uninit_slice(&mut self) -> &mut UninitSlice915     fn uninit_slice(&mut self) -> &mut UninitSlice {
916         unsafe {
917             let ptr = self.ptr.as_ptr().offset(self.len as isize);
918             let len = self.cap - self.len;
919 
920             UninitSlice::from_raw_parts_mut(ptr, len)
921         }
922     }
923 }
924 
925 impl Drop for BytesMut {
drop(&mut self)926     fn drop(&mut self) {
927         let kind = self.kind();
928 
929         if kind == KIND_VEC {
930             unsafe {
931                 let (off, _) = self.get_vec_pos();
932 
933                 // Vector storage, free the vector
934                 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
935             }
936         } else if kind == KIND_ARC {
937             unsafe { release_shared(self.data as _) };
938         }
939     }
940 }
941 
942 impl Buf for BytesMut {
943     #[inline]
remaining(&self) -> usize944     fn remaining(&self) -> usize {
945         self.len()
946     }
947 
948     #[inline]
chunk(&self) -> &[u8]949     fn chunk(&self) -> &[u8] {
950         self.as_slice()
951     }
952 
953     #[inline]
advance(&mut self, cnt: usize)954     fn advance(&mut self, cnt: usize) {
955         assert!(
956             cnt <= self.remaining(),
957             "cannot advance past `remaining`: {:?} <= {:?}",
958             cnt,
959             self.remaining(),
960         );
961         unsafe {
962             self.set_start(cnt);
963         }
964     }
965 
copy_to_bytes(&mut self, len: usize) -> crate::Bytes966     fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
967         self.split_to(len).freeze()
968     }
969 }
970 
971 unsafe impl BufMut for BytesMut {
972     #[inline]
remaining_mut(&self) -> usize973     fn remaining_mut(&self) -> usize {
974         usize::MAX - self.len()
975     }
976 
977     #[inline]
advance_mut(&mut self, cnt: usize)978     unsafe fn advance_mut(&mut self, cnt: usize) {
979         let new_len = self.len() + cnt;
980         assert!(
981             new_len <= self.cap,
982             "new_len = {}; capacity = {}",
983             new_len,
984             self.cap
985         );
986         self.len = new_len;
987     }
988 
989     #[inline]
chunk_mut(&mut self) -> &mut UninitSlice990     fn chunk_mut(&mut self) -> &mut UninitSlice {
991         if self.capacity() == self.len() {
992             self.reserve(64);
993         }
994         self.uninit_slice()
995     }
996 
997     // Specialize these methods so they can skip checking `remaining_mut`
998     // and `advance_mut`.
999 
put<T: crate::Buf>(&mut self, mut src: T) where Self: Sized,1000     fn put<T: crate::Buf>(&mut self, mut src: T)
1001     where
1002         Self: Sized,
1003     {
1004         while src.has_remaining() {
1005             let s = src.chunk();
1006             let l = s.len();
1007             self.extend_from_slice(s);
1008             src.advance(l);
1009         }
1010     }
1011 
put_slice(&mut self, src: &[u8])1012     fn put_slice(&mut self, src: &[u8]) {
1013         self.extend_from_slice(src);
1014     }
1015 
put_bytes(&mut self, val: u8, cnt: usize)1016     fn put_bytes(&mut self, val: u8, cnt: usize) {
1017         self.reserve(cnt);
1018         unsafe {
1019             let dst = self.uninit_slice();
1020             // Reserved above
1021             debug_assert!(dst.len() >= cnt);
1022 
1023             ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
1024 
1025             self.advance_mut(cnt);
1026         }
1027     }
1028 }
1029 
1030 impl AsRef<[u8]> for BytesMut {
1031     #[inline]
as_ref(&self) -> &[u8]1032     fn as_ref(&self) -> &[u8] {
1033         self.as_slice()
1034     }
1035 }
1036 
1037 impl Deref for BytesMut {
1038     type Target = [u8];
1039 
1040     #[inline]
deref(&self) -> &[u8]1041     fn deref(&self) -> &[u8] {
1042         self.as_ref()
1043     }
1044 }
1045 
1046 impl AsMut<[u8]> for BytesMut {
1047     #[inline]
as_mut(&mut self) -> &mut [u8]1048     fn as_mut(&mut self) -> &mut [u8] {
1049         self.as_slice_mut()
1050     }
1051 }
1052 
1053 impl DerefMut for BytesMut {
1054     #[inline]
deref_mut(&mut self) -> &mut [u8]1055     fn deref_mut(&mut self) -> &mut [u8] {
1056         self.as_mut()
1057     }
1058 }
1059 
1060 impl<'a> From<&'a [u8]> for BytesMut {
from(src: &'a [u8]) -> BytesMut1061     fn from(src: &'a [u8]) -> BytesMut {
1062         BytesMut::from_vec(src.to_vec())
1063     }
1064 }
1065 
1066 impl<'a> From<&'a str> for BytesMut {
from(src: &'a str) -> BytesMut1067     fn from(src: &'a str) -> BytesMut {
1068         BytesMut::from(src.as_bytes())
1069     }
1070 }
1071 
1072 impl From<BytesMut> for Bytes {
from(src: BytesMut) -> Bytes1073     fn from(src: BytesMut) -> Bytes {
1074         src.freeze()
1075     }
1076 }
1077 
1078 impl PartialEq for BytesMut {
eq(&self, other: &BytesMut) -> bool1079     fn eq(&self, other: &BytesMut) -> bool {
1080         self.as_slice() == other.as_slice()
1081     }
1082 }
1083 
1084 impl PartialOrd for BytesMut {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1085     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1086         self.as_slice().partial_cmp(other.as_slice())
1087     }
1088 }
1089 
1090 impl Ord for BytesMut {
cmp(&self, other: &BytesMut) -> cmp::Ordering1091     fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1092         self.as_slice().cmp(other.as_slice())
1093     }
1094 }
1095 
1096 impl Eq for BytesMut {}
1097 
1098 impl Default for BytesMut {
1099     #[inline]
default() -> BytesMut1100     fn default() -> BytesMut {
1101         BytesMut::new()
1102     }
1103 }
1104 
1105 impl hash::Hash for BytesMut {
hash<H>(&self, state: &mut H) where H: hash::Hasher,1106     fn hash<H>(&self, state: &mut H)
1107     where
1108         H: hash::Hasher,
1109     {
1110         let s: &[u8] = self.as_ref();
1111         s.hash(state);
1112     }
1113 }
1114 
1115 impl Borrow<[u8]> for BytesMut {
borrow(&self) -> &[u8]1116     fn borrow(&self) -> &[u8] {
1117         self.as_ref()
1118     }
1119 }
1120 
1121 impl BorrowMut<[u8]> for BytesMut {
borrow_mut(&mut self) -> &mut [u8]1122     fn borrow_mut(&mut self) -> &mut [u8] {
1123         self.as_mut()
1124     }
1125 }
1126 
1127 impl fmt::Write for BytesMut {
1128     #[inline]
write_str(&mut self, s: &str) -> fmt::Result1129     fn write_str(&mut self, s: &str) -> fmt::Result {
1130         if self.remaining_mut() >= s.len() {
1131             self.put_slice(s.as_bytes());
1132             Ok(())
1133         } else {
1134             Err(fmt::Error)
1135         }
1136     }
1137 
1138     #[inline]
write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result1139     fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1140         fmt::write(self, args)
1141     }
1142 }
1143 
1144 impl Clone for BytesMut {
clone(&self) -> BytesMut1145     fn clone(&self) -> BytesMut {
1146         BytesMut::from(&self[..])
1147     }
1148 }
1149 
1150 impl IntoIterator for BytesMut {
1151     type Item = u8;
1152     type IntoIter = IntoIter<BytesMut>;
1153 
into_iter(self) -> Self::IntoIter1154     fn into_iter(self) -> Self::IntoIter {
1155         IntoIter::new(self)
1156     }
1157 }
1158 
1159 impl<'a> IntoIterator for &'a BytesMut {
1160     type Item = &'a u8;
1161     type IntoIter = core::slice::Iter<'a, u8>;
1162 
into_iter(self) -> Self::IntoIter1163     fn into_iter(self) -> Self::IntoIter {
1164         self.as_ref().into_iter()
1165     }
1166 }
1167 
1168 impl Extend<u8> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8>,1169     fn extend<T>(&mut self, iter: T)
1170     where
1171         T: IntoIterator<Item = u8>,
1172     {
1173         let iter = iter.into_iter();
1174 
1175         let (lower, _) = iter.size_hint();
1176         self.reserve(lower);
1177 
1178         // TODO: optimize
1179         // 1. If self.kind() == KIND_VEC, use Vec::extend
1180         // 2. Make `reserve` inline-able
1181         for b in iter {
1182             self.reserve(1);
1183             self.put_u8(b);
1184         }
1185     }
1186 }
1187 
1188 impl<'a> Extend<&'a u8> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8>,1189     fn extend<T>(&mut self, iter: T)
1190     where
1191         T: IntoIterator<Item = &'a u8>,
1192     {
1193         self.extend(iter.into_iter().map(|b| *b))
1194     }
1195 }
1196 
1197 impl FromIterator<u8> for BytesMut {
from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self1198     fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1199         BytesMut::from_vec(Vec::from_iter(into_iter))
1200     }
1201 }
1202 
1203 impl<'a> FromIterator<&'a u8> for BytesMut {
from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self1204     fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1205         BytesMut::from_iter(into_iter.into_iter().map(|b| *b))
1206     }
1207 }
1208 
1209 /*
1210  *
1211  * ===== Inner =====
1212  *
1213  */
1214 
increment_shared(ptr: *mut Shared)1215 unsafe fn increment_shared(ptr: *mut Shared) {
1216     let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
1217 
1218     if old_size > isize::MAX as usize {
1219         crate::abort();
1220     }
1221 }
1222 
release_shared(ptr: *mut Shared)1223 unsafe fn release_shared(ptr: *mut Shared) {
1224     // `Shared` storage... follow the drop steps from Arc.
1225     if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
1226         return;
1227     }
1228 
1229     // This fence is needed to prevent reordering of use of the data and
1230     // deletion of the data.  Because it is marked `Release`, the decreasing
1231     // of the reference count synchronizes with this `Acquire` fence. This
1232     // means that use of the data happens before decreasing the reference
1233     // count, which happens before this fence, which happens before the
1234     // deletion of the data.
1235     //
1236     // As explained in the [Boost documentation][1],
1237     //
1238     // > It is important to enforce any possible access to the object in one
1239     // > thread (through an existing reference) to *happen before* deleting
1240     // > the object in a different thread. This is achieved by a "release"
1241     // > operation after dropping a reference (any access to the object
1242     // > through this reference must obviously happened before), and an
1243     // > "acquire" operation before deleting the object.
1244     //
1245     // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1246     atomic::fence(Ordering::Acquire);
1247 
1248     // Drop the data
1249     Box::from_raw(ptr);
1250 }
1251 
1252 impl Shared {
is_unique(&self) -> bool1253     fn is_unique(&self) -> bool {
1254         // The goal is to check if the current handle is the only handle
1255         // that currently has access to the buffer. This is done by
1256         // checking if the `ref_count` is currently 1.
1257         //
1258         // The `Acquire` ordering synchronizes with the `Release` as
1259         // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1260         // operation guarantees that any mutations done in other threads
1261         // are ordered before the `ref_count` is decremented. As such,
1262         // this `Acquire` will guarantee that those mutations are
1263         // visible to the current thread.
1264         self.ref_count.load(Ordering::Acquire) == 1
1265     }
1266 }
1267 
1268 #[inline]
original_capacity_to_repr(cap: usize) -> usize1269 fn original_capacity_to_repr(cap: usize) -> usize {
1270     let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
1271     cmp::min(
1272         width,
1273         MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
1274     )
1275 }
1276 
original_capacity_from_repr(repr: usize) -> usize1277 fn original_capacity_from_repr(repr: usize) -> usize {
1278     if repr == 0 {
1279         return 0;
1280     }
1281 
1282     1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
1283 }
1284 
1285 /*
1286 #[test]
1287 fn test_original_capacity_to_repr() {
1288     assert_eq!(original_capacity_to_repr(0), 0);
1289 
1290     let max_width = 32;
1291 
1292     for width in 1..(max_width + 1) {
1293         let cap = 1 << width - 1;
1294 
1295         let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1296             0
1297         } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1298             width - MIN_ORIGINAL_CAPACITY_WIDTH
1299         } else {
1300             MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1301         };
1302 
1303         assert_eq!(original_capacity_to_repr(cap), expected);
1304 
1305         if width > 1 {
1306             assert_eq!(original_capacity_to_repr(cap + 1), expected);
1307         }
1308 
1309         //  MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1310         if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1311             assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1312             assert_eq!(original_capacity_to_repr(cap + 76), expected);
1313         } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1314             assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1315             assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1316         }
1317     }
1318 }
1319 
1320 #[test]
1321 fn test_original_capacity_from_repr() {
1322     assert_eq!(0, original_capacity_from_repr(0));
1323 
1324     let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1325 
1326     assert_eq!(min_cap, original_capacity_from_repr(1));
1327     assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1328     assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1329     assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1330     assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1331     assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1332     assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1333 }
1334 */
1335 
1336 unsafe impl Send for BytesMut {}
1337 unsafe impl Sync for BytesMut {}
1338 
1339 /*
1340  *
1341  * ===== PartialEq / PartialOrd =====
1342  *
1343  */
1344 
1345 impl PartialEq<[u8]> for BytesMut {
eq(&self, other: &[u8]) -> bool1346     fn eq(&self, other: &[u8]) -> bool {
1347         &**self == other
1348     }
1349 }
1350 
1351 impl PartialOrd<[u8]> for BytesMut {
partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering>1352     fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
1353         (**self).partial_cmp(other)
1354     }
1355 }
1356 
1357 impl PartialEq<BytesMut> for [u8] {
eq(&self, other: &BytesMut) -> bool1358     fn eq(&self, other: &BytesMut) -> bool {
1359         *other == *self
1360     }
1361 }
1362 
1363 impl PartialOrd<BytesMut> for [u8] {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1364     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1365         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1366     }
1367 }
1368 
1369 impl PartialEq<str> for BytesMut {
eq(&self, other: &str) -> bool1370     fn eq(&self, other: &str) -> bool {
1371         &**self == other.as_bytes()
1372     }
1373 }
1374 
1375 impl PartialOrd<str> for BytesMut {
partial_cmp(&self, other: &str) -> Option<cmp::Ordering>1376     fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
1377         (**self).partial_cmp(other.as_bytes())
1378     }
1379 }
1380 
1381 impl PartialEq<BytesMut> for str {
eq(&self, other: &BytesMut) -> bool1382     fn eq(&self, other: &BytesMut) -> bool {
1383         *other == *self
1384     }
1385 }
1386 
1387 impl PartialOrd<BytesMut> for str {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1388     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1389         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1390     }
1391 }
1392 
1393 impl PartialEq<Vec<u8>> for BytesMut {
eq(&self, other: &Vec<u8>) -> bool1394     fn eq(&self, other: &Vec<u8>) -> bool {
1395         *self == &other[..]
1396     }
1397 }
1398 
1399 impl PartialOrd<Vec<u8>> for BytesMut {
partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering>1400     fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
1401         (**self).partial_cmp(&other[..])
1402     }
1403 }
1404 
1405 impl PartialEq<BytesMut> for Vec<u8> {
eq(&self, other: &BytesMut) -> bool1406     fn eq(&self, other: &BytesMut) -> bool {
1407         *other == *self
1408     }
1409 }
1410 
1411 impl PartialOrd<BytesMut> for Vec<u8> {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1412     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1413         other.partial_cmp(self)
1414     }
1415 }
1416 
1417 impl PartialEq<String> for BytesMut {
eq(&self, other: &String) -> bool1418     fn eq(&self, other: &String) -> bool {
1419         *self == &other[..]
1420     }
1421 }
1422 
1423 impl PartialOrd<String> for BytesMut {
partial_cmp(&self, other: &String) -> Option<cmp::Ordering>1424     fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
1425         (**self).partial_cmp(other.as_bytes())
1426     }
1427 }
1428 
1429 impl PartialEq<BytesMut> for String {
eq(&self, other: &BytesMut) -> bool1430     fn eq(&self, other: &BytesMut) -> bool {
1431         *other == *self
1432     }
1433 }
1434 
1435 impl PartialOrd<BytesMut> for String {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1436     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1437         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1438     }
1439 }
1440 
1441 impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
1442 where
1443     BytesMut: PartialEq<T>,
1444 {
eq(&self, other: &&'a T) -> bool1445     fn eq(&self, other: &&'a T) -> bool {
1446         *self == **other
1447     }
1448 }
1449 
1450 impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
1451 where
1452     BytesMut: PartialOrd<T>,
1453 {
partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering>1454     fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
1455         self.partial_cmp(*other)
1456     }
1457 }
1458 
1459 impl PartialEq<BytesMut> for &[u8] {
eq(&self, other: &BytesMut) -> bool1460     fn eq(&self, other: &BytesMut) -> bool {
1461         *other == *self
1462     }
1463 }
1464 
1465 impl PartialOrd<BytesMut> for &[u8] {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1466     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1467         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1468     }
1469 }
1470 
1471 impl PartialEq<BytesMut> for &str {
eq(&self, other: &BytesMut) -> bool1472     fn eq(&self, other: &BytesMut) -> bool {
1473         *other == *self
1474     }
1475 }
1476 
1477 impl PartialOrd<BytesMut> for &str {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1478     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1479         other.partial_cmp(self)
1480     }
1481 }
1482 
1483 impl PartialEq<BytesMut> for Bytes {
eq(&self, other: &BytesMut) -> bool1484     fn eq(&self, other: &BytesMut) -> bool {
1485         &other[..] == &self[..]
1486     }
1487 }
1488 
1489 impl PartialEq<Bytes> for BytesMut {
eq(&self, other: &Bytes) -> bool1490     fn eq(&self, other: &Bytes) -> bool {
1491         &other[..] == &self[..]
1492     }
1493 }
1494 
1495 #[inline]
vptr(ptr: *mut u8) -> NonNull<u8>1496 fn vptr(ptr: *mut u8) -> NonNull<u8> {
1497     if cfg!(debug_assertions) {
1498         NonNull::new(ptr).expect("Vec pointer should be non-null")
1499     } else {
1500         unsafe { NonNull::new_unchecked(ptr) }
1501     }
1502 }
1503 
rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8>1504 unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
1505     let ptr = ptr.offset(-(off as isize));
1506     len += off;
1507     cap += off;
1508 
1509     Vec::from_raw_parts(ptr, len, cap)
1510 }
1511 
1512 // ===== impl SharedVtable =====
1513 
1514 static SHARED_VTABLE: Vtable = Vtable {
1515     clone: shared_v_clone,
1516     drop: shared_v_drop,
1517 };
1518 
shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes1519 unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1520     let shared = data.load(Ordering::Relaxed) as *mut Shared;
1521     increment_shared(shared);
1522 
1523     let data = AtomicPtr::new(shared as _);
1524     Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
1525 }
1526 
shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize)1527 unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1528     data.with_mut(|shared| {
1529         release_shared(*shared as *mut Shared);
1530     });
1531 }
1532 
1533 // compile-fails
1534 
1535 /// ```compile_fail
1536 /// use bytes::BytesMut;
1537 /// #[deny(unused_must_use)]
1538 /// {
1539 ///     let mut b1 = BytesMut::from("hello world");
1540 ///     b1.split_to(6);
1541 /// }
1542 /// ```
_split_to_must_use()1543 fn _split_to_must_use() {}
1544 
1545 /// ```compile_fail
1546 /// use bytes::BytesMut;
1547 /// #[deny(unused_must_use)]
1548 /// {
1549 ///     let mut b1 = BytesMut::from("hello world");
1550 ///     b1.split_off(6);
1551 /// }
1552 /// ```
_split_off_must_use()1553 fn _split_off_must_use() {}
1554 
1555 /// ```compile_fail
1556 /// use bytes::BytesMut;
1557 /// #[deny(unused_must_use)]
1558 /// {
1559 ///     let mut b1 = BytesMut::from("hello world");
1560 ///     b1.split();
1561 /// }
1562 /// ```
_split_must_use()1563 fn _split_must_use() {}
1564 
1565 // fuzz tests
1566 #[cfg(all(test, loom))]
1567 mod fuzz {
1568     use loom::sync::Arc;
1569     use loom::thread;
1570 
1571     use super::BytesMut;
1572     use crate::Bytes;
1573 
1574     #[test]
bytes_mut_cloning_frozen()1575     fn bytes_mut_cloning_frozen() {
1576         loom::model(|| {
1577             let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
1578             let addr = a.as_ptr() as usize;
1579 
1580             // test the Bytes::clone is Sync by putting it in an Arc
1581             let a1 = Arc::new(a);
1582             let a2 = a1.clone();
1583 
1584             let t1 = thread::spawn(move || {
1585                 let b: Bytes = (*a1).clone();
1586                 assert_eq!(b.as_ptr() as usize, addr);
1587             });
1588 
1589             let t2 = thread::spawn(move || {
1590                 let b: Bytes = (*a2).clone();
1591                 assert_eq!(b.as_ptr() as usize, addr);
1592             });
1593 
1594             t1.join().unwrap();
1595             t2.join().unwrap();
1596         });
1597     }
1598 }
1599