1use core::iter::FromIterator;
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, RangeBounds};
4use core::ptr::NonNull;
5use core::{cmp, fmt, hash, ptr, slice, usize};
6
7use alloc::{
8    alloc::{dealloc, Layout},
9    borrow::Borrow,
10    boxed::Box,
11    string::String,
12    vec::Vec,
13};
14
15use crate::buf::IntoIter;
16#[allow(unused)]
17use crate::loom::sync::atomic::AtomicMut;
18use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
19use crate::{offset_from, Buf, BytesMut};
20
21pub struct Bytes {
103    ptr: *const u8,
104    len: usize,
105    data: AtomicPtr<()>,
107    vtable: &'static Vtable,
108}
109
110pub(crate) struct Vtable {
111    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
113    pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
117    pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
118    pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
120    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
122}
123
124impl Bytes {
125    #[inline]
138    #[cfg(not(all(loom, test)))]
139    pub const fn new() -> Self {
140        const EMPTY: &[u8] = &[];
143        Bytes::from_static(EMPTY)
144    }
145
146    #[cfg(all(loom, test))]
148    pub fn new() -> Self {
149        const EMPTY: &[u8] = &[];
150        Bytes::from_static(EMPTY)
151    }
152
153    #[inline]
167    #[cfg(not(all(loom, test)))]
168    pub const fn from_static(bytes: &'static [u8]) -> Self {
169        Bytes {
170            ptr: bytes.as_ptr(),
171            len: bytes.len(),
172            data: AtomicPtr::new(ptr::null_mut()),
173            vtable: &STATIC_VTABLE,
174        }
175    }
176
177    #[cfg(all(loom, test))]
179    pub fn from_static(bytes: &'static [u8]) -> Self {
180        Bytes {
181            ptr: bytes.as_ptr(),
182            len: bytes.len(),
183            data: AtomicPtr::new(ptr::null_mut()),
184            vtable: &STATIC_VTABLE,
185        }
186    }
187
188    fn new_empty_with_ptr(ptr: *const u8) -> Self {
190        debug_assert!(!ptr.is_null());
191
192        let ptr = without_provenance(ptr as usize);
195
196        Bytes {
197            ptr,
198            len: 0,
199            data: AtomicPtr::new(ptr::null_mut()),
200            vtable: &STATIC_VTABLE,
201        }
202    }
203
204    pub fn from_owner<T>(owner: T) -> Self
252    where
253        T: AsRef<[u8]> + Send + 'static,
254    {
255        let owned = Box::into_raw(Box::new(Owned {
271            lifetime: OwnedLifetime {
272                ref_cnt: AtomicUsize::new(1),
273                drop: owned_box_and_drop::<T>,
274            },
275            owner,
276        }));
277
278        let mut ret = Bytes {
279            ptr: NonNull::dangling().as_ptr(),
280            len: 0,
281            data: AtomicPtr::new(owned.cast()),
282            vtable: &OWNED_VTABLE,
283        };
284
285        let buf = unsafe { &*owned }.owner.as_ref();
286        ret.ptr = buf.as_ptr();
287        ret.len = buf.len();
288
289        ret
290    }
291
292    #[inline]
303    pub const fn len(&self) -> usize {
304        self.len
305    }
306
307    #[inline]
318    pub const fn is_empty(&self) -> bool {
319        self.len == 0
320    }
321
322    pub fn is_unique(&self) -> bool {
343        unsafe { (self.vtable.is_unique)(&self.data) }
344    }
345
346    pub fn copy_from_slice(data: &[u8]) -> Self {
348        data.to_vec().into()
349    }
350
351    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
374        use core::ops::Bound;
375
376        let len = self.len();
377
378        let begin = match range.start_bound() {
379            Bound::Included(&n) => n,
380            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
381            Bound::Unbounded => 0,
382        };
383
384        let end = match range.end_bound() {
385            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
386            Bound::Excluded(&n) => n,
387            Bound::Unbounded => len,
388        };
389
390        assert!(
391            begin <= end,
392            "range start must not be greater than end: {:?} <= {:?}",
393            begin,
394            end,
395        );
396        assert!(
397            end <= len,
398            "range end out of bounds: {:?} <= {:?}",
399            end,
400            len,
401        );
402
403        if end == begin {
404            return Bytes::new();
405        }
406
407        let mut ret = self.clone();
408
409        ret.len = end - begin;
410        ret.ptr = unsafe { ret.ptr.add(begin) };
411
412        ret
413    }
414
415    pub fn slice_ref(&self, subset: &[u8]) -> Self {
441        if subset.is_empty() {
444            return Bytes::new();
445        }
446
447        let bytes_p = self.as_ptr() as usize;
448        let bytes_len = self.len();
449
450        let sub_p = subset.as_ptr() as usize;
451        let sub_len = subset.len();
452
453        assert!(
454            sub_p >= bytes_p,
455            "subset pointer ({:p}) is smaller than self pointer ({:p})",
456            subset.as_ptr(),
457            self.as_ptr(),
458        );
459        assert!(
460            sub_p + sub_len <= bytes_p + bytes_len,
461            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
462            self.as_ptr(),
463            bytes_len,
464            subset.as_ptr(),
465            sub_len,
466        );
467
468        let sub_offset = sub_p - bytes_p;
469
470        self.slice(sub_offset..(sub_offset + sub_len))
471    }
472
473    #[must_use = "consider Bytes::truncate if you don't need the other half"]
499    pub fn split_off(&mut self, at: usize) -> Self {
500        if at == self.len() {
501            return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(at));
502        }
503
504        if at == 0 {
505            return mem::replace(self, Bytes::new_empty_with_ptr(self.ptr));
506        }
507
508        assert!(
509            at <= self.len(),
510            "split_off out of bounds: {:?} <= {:?}",
511            at,
512            self.len(),
513        );
514
515        let mut ret = self.clone();
516
517        self.len = at;
518
519        unsafe { ret.inc_start(at) };
520
521        ret
522    }
523
524    #[must_use = "consider Bytes::advance if you don't need the other half"]
548    pub fn split_to(&mut self, at: usize) -> Self {
549        if at == self.len() {
550            let end_ptr = self.ptr.wrapping_add(at);
551            return mem::replace(self, Bytes::new_empty_with_ptr(end_ptr));
552        }
553
554        if at == 0 {
555            return Bytes::new_empty_with_ptr(self.ptr);
556        }
557
558        assert!(
559            at <= self.len(),
560            "split_to out of bounds: {:?} <= {:?}",
561            at,
562            self.len(),
563        );
564
565        let mut ret = self.clone();
566
567        unsafe { self.inc_start(at) };
568
569        ret.len = at;
570        ret
571    }
572
573    #[inline]
592    pub fn truncate(&mut self, len: usize) {
593        if len < self.len {
594            if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
598                || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
599            {
600                drop(self.split_off(len));
601            } else {
602                self.len = len;
603            }
604        }
605    }
606
607    #[inline]
619    pub fn clear(&mut self) {
620        self.truncate(0);
621    }
622
623    pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
642        if self.is_unique() {
643            Ok(self.into())
644        } else {
645            Err(self)
646        }
647    }
648
649    #[inline]
650    pub(crate) unsafe fn with_vtable(
651        ptr: *const u8,
652        len: usize,
653        data: AtomicPtr<()>,
654        vtable: &'static Vtable,
655    ) -> Bytes {
656        Bytes {
657            ptr,
658            len,
659            data,
660            vtable,
661        }
662    }
663
664    #[inline]
667    fn as_slice(&self) -> &[u8] {
668        unsafe { slice::from_raw_parts(self.ptr, self.len) }
669    }
670
671    #[inline]
672    unsafe fn inc_start(&mut self, by: usize) {
673        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
675        self.len -= by;
676        self.ptr = self.ptr.add(by);
677    }
678}
679
680unsafe impl Send for Bytes {}
682unsafe impl Sync for Bytes {}
683
684impl Drop for Bytes {
685    #[inline]
686    fn drop(&mut self) {
687        unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
688    }
689}
690
691impl Clone for Bytes {
692    #[inline]
693    fn clone(&self) -> Bytes {
694        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
695    }
696}
697
698impl Buf for Bytes {
699    #[inline]
700    fn remaining(&self) -> usize {
701        self.len()
702    }
703
704    #[inline]
705    fn chunk(&self) -> &[u8] {
706        self.as_slice()
707    }
708
709    #[inline]
710    fn advance(&mut self, cnt: usize) {
711        assert!(
712            cnt <= self.len(),
713            "cannot advance past `remaining`: {:?} <= {:?}",
714            cnt,
715            self.len(),
716        );
717
718        unsafe {
719            self.inc_start(cnt);
720        }
721    }
722
723    fn copy_to_bytes(&mut self, len: usize) -> Self {
724        self.split_to(len)
725    }
726}
727
728impl Deref for Bytes {
729    type Target = [u8];
730
731    #[inline]
732    fn deref(&self) -> &[u8] {
733        self.as_slice()
734    }
735}
736
737impl AsRef<[u8]> for Bytes {
738    #[inline]
739    fn as_ref(&self) -> &[u8] {
740        self.as_slice()
741    }
742}
743
744impl hash::Hash for Bytes {
745    fn hash<H>(&self, state: &mut H)
746    where
747        H: hash::Hasher,
748    {
749        self.as_slice().hash(state);
750    }
751}
752
753impl Borrow<[u8]> for Bytes {
754    fn borrow(&self) -> &[u8] {
755        self.as_slice()
756    }
757}
758
759impl IntoIterator for Bytes {
760    type Item = u8;
761    type IntoIter = IntoIter<Bytes>;
762
763    fn into_iter(self) -> Self::IntoIter {
764        IntoIter::new(self)
765    }
766}
767
768impl<'a> IntoIterator for &'a Bytes {
769    type Item = &'a u8;
770    type IntoIter = core::slice::Iter<'a, u8>;
771
772    fn into_iter(self) -> Self::IntoIter {
773        self.as_slice().iter()
774    }
775}
776
777impl FromIterator<u8> for Bytes {
778    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
779        Vec::from_iter(into_iter).into()
780    }
781}
782
783impl PartialEq for Bytes {
786    fn eq(&self, other: &Bytes) -> bool {
787        self.as_slice() == other.as_slice()
788    }
789}
790
791impl PartialOrd for Bytes {
792    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
793        self.as_slice().partial_cmp(other.as_slice())
794    }
795}
796
797impl Ord for Bytes {
798    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
799        self.as_slice().cmp(other.as_slice())
800    }
801}
802
803impl Eq for Bytes {}
804
805impl PartialEq<[u8]> for Bytes {
806    fn eq(&self, other: &[u8]) -> bool {
807        self.as_slice() == other
808    }
809}
810
811impl PartialOrd<[u8]> for Bytes {
812    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
813        self.as_slice().partial_cmp(other)
814    }
815}
816
817impl PartialEq<Bytes> for [u8] {
818    fn eq(&self, other: &Bytes) -> bool {
819        *other == *self
820    }
821}
822
823impl PartialOrd<Bytes> for [u8] {
824    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
825        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
826    }
827}
828
829impl PartialEq<str> for Bytes {
830    fn eq(&self, other: &str) -> bool {
831        self.as_slice() == other.as_bytes()
832    }
833}
834
835impl PartialOrd<str> for Bytes {
836    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
837        self.as_slice().partial_cmp(other.as_bytes())
838    }
839}
840
841impl PartialEq<Bytes> for str {
842    fn eq(&self, other: &Bytes) -> bool {
843        *other == *self
844    }
845}
846
847impl PartialOrd<Bytes> for str {
848    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
849        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
850    }
851}
852
853impl PartialEq<Vec<u8>> for Bytes {
854    fn eq(&self, other: &Vec<u8>) -> bool {
855        *self == other[..]
856    }
857}
858
859impl PartialOrd<Vec<u8>> for Bytes {
860    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
861        self.as_slice().partial_cmp(&other[..])
862    }
863}
864
865impl PartialEq<Bytes> for Vec<u8> {
866    fn eq(&self, other: &Bytes) -> bool {
867        *other == *self
868    }
869}
870
871impl PartialOrd<Bytes> for Vec<u8> {
872    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
873        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
874    }
875}
876
877impl PartialEq<String> for Bytes {
878    fn eq(&self, other: &String) -> bool {
879        *self == other[..]
880    }
881}
882
883impl PartialOrd<String> for Bytes {
884    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
885        self.as_slice().partial_cmp(other.as_bytes())
886    }
887}
888
889impl PartialEq<Bytes> for String {
890    fn eq(&self, other: &Bytes) -> bool {
891        *other == *self
892    }
893}
894
895impl PartialOrd<Bytes> for String {
896    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
897        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
898    }
899}
900
901impl PartialEq<Bytes> for &[u8] {
902    fn eq(&self, other: &Bytes) -> bool {
903        *other == *self
904    }
905}
906
907impl PartialOrd<Bytes> for &[u8] {
908    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
909        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
910    }
911}
912
913impl PartialEq<Bytes> for &str {
914    fn eq(&self, other: &Bytes) -> bool {
915        *other == *self
916    }
917}
918
919impl PartialOrd<Bytes> for &str {
920    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
921        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
922    }
923}
924
925impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
926where
927    Bytes: PartialEq<T>,
928{
929    fn eq(&self, other: &&'a T) -> bool {
930        *self == **other
931    }
932}
933
934impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
935where
936    Bytes: PartialOrd<T>,
937{
938    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
939        self.partial_cmp(&**other)
940    }
941}
942
943impl Default for Bytes {
946    #[inline]
947    fn default() -> Bytes {
948        Bytes::new()
949    }
950}
951
952impl From<&'static [u8]> for Bytes {
953    fn from(slice: &'static [u8]) -> Bytes {
954        Bytes::from_static(slice)
955    }
956}
957
958impl From<&'static str> for Bytes {
959    fn from(slice: &'static str) -> Bytes {
960        Bytes::from_static(slice.as_bytes())
961    }
962}
963
964impl From<Vec<u8>> for Bytes {
965    fn from(vec: Vec<u8>) -> Bytes {
966        let mut vec = ManuallyDrop::new(vec);
967        let ptr = vec.as_mut_ptr();
968        let len = vec.len();
969        let cap = vec.capacity();
970
971        if len == cap {
973            let vec = ManuallyDrop::into_inner(vec);
974            return Bytes::from(vec.into_boxed_slice());
975        }
976
977        let shared = Box::new(Shared {
978            buf: ptr,
979            cap,
980            ref_cnt: AtomicUsize::new(1),
981        });
982
983        let shared = Box::into_raw(shared);
984        debug_assert!(
987            0 == (shared as usize & KIND_MASK),
988            "internal: Box<Shared> should have an aligned pointer",
989        );
990        Bytes {
991            ptr,
992            len,
993            data: AtomicPtr::new(shared as _),
994            vtable: &SHARED_VTABLE,
995        }
996    }
997}
998
999impl From<Box<[u8]>> for Bytes {
1000    fn from(slice: Box<[u8]>) -> Bytes {
1001        if slice.is_empty() {
1005            return Bytes::new();
1006        }
1007
1008        let len = slice.len();
1009        let ptr = Box::into_raw(slice) as *mut u8;
1010
1011        if ptr as usize & 0x1 == 0 {
1012            let data = ptr_map(ptr, |addr| addr | KIND_VEC);
1013            Bytes {
1014                ptr,
1015                len,
1016                data: AtomicPtr::new(data.cast()),
1017                vtable: &PROMOTABLE_EVEN_VTABLE,
1018            }
1019        } else {
1020            Bytes {
1021                ptr,
1022                len,
1023                data: AtomicPtr::new(ptr.cast()),
1024                vtable: &PROMOTABLE_ODD_VTABLE,
1025            }
1026        }
1027    }
1028}
1029
1030impl From<Bytes> for BytesMut {
1031    fn from(bytes: Bytes) -> Self {
1047        let bytes = ManuallyDrop::new(bytes);
1048        unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) }
1049    }
1050}
1051
1052impl From<String> for Bytes {
1053    fn from(s: String) -> Bytes {
1054        Bytes::from(s.into_bytes())
1055    }
1056}
1057
1058impl From<Bytes> for Vec<u8> {
1059    fn from(bytes: Bytes) -> Vec<u8> {
1060        let bytes = ManuallyDrop::new(bytes);
1061        unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
1062    }
1063}
1064
1065impl fmt::Debug for Vtable {
1068    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1069        f.debug_struct("Vtable")
1070            .field("clone", &(self.clone as *const ()))
1071            .field("drop", &(self.drop as *const ()))
1072            .finish()
1073    }
1074}
1075
1076const STATIC_VTABLE: Vtable = Vtable {
1079    clone: static_clone,
1080    to_vec: static_to_vec,
1081    to_mut: static_to_mut,
1082    is_unique: static_is_unique,
1083    drop: static_drop,
1084};
1085
1086unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1087    let slice = slice::from_raw_parts(ptr, len);
1088    Bytes::from_static(slice)
1089}
1090
1091unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1092    let slice = slice::from_raw_parts(ptr, len);
1093    slice.to_vec()
1094}
1095
1096unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1097    let slice = slice::from_raw_parts(ptr, len);
1098    BytesMut::from(slice)
1099}
1100
1101fn static_is_unique(_: &AtomicPtr<()>) -> bool {
1102    false
1103}
1104
1105unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
1106    }
1108
1109#[repr(C)]
1112struct OwnedLifetime {
1113    ref_cnt: AtomicUsize,
1114    drop: unsafe fn(*mut ()),
1115}
1116
1117#[repr(C)]
1118struct Owned<T> {
1119    lifetime: OwnedLifetime,
1120    owner: T,
1121}
1122
1123unsafe fn owned_box_and_drop<T>(ptr: *mut ()) {
1124    let b: Box<Owned<T>> = Box::from_raw(ptr as _);
1125    drop(b);
1126}
1127
1128unsafe fn owned_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1129    let owned = data.load(Ordering::Relaxed);
1130    let ref_cnt = &(*owned.cast::<OwnedLifetime>()).ref_cnt;
1131    let old_cnt = ref_cnt.fetch_add(1, Ordering::Relaxed);
1132    if old_cnt > usize::MAX >> 1 {
1133        crate::abort()
1134    }
1135
1136    Bytes {
1137        ptr,
1138        len,
1139        data: AtomicPtr::new(owned as _),
1140        vtable: &OWNED_VTABLE,
1141    }
1142}
1143
1144unsafe fn owned_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1145    let slice = slice::from_raw_parts(ptr, len);
1146    let vec = slice.to_vec();
1147    owned_drop_impl(data.load(Ordering::Relaxed));
1148    vec
1149}
1150
1151unsafe fn owned_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1152    BytesMut::from_vec(owned_to_vec(data, ptr, len))
1153}
1154
1155unsafe fn owned_is_unique(_data: &AtomicPtr<()>) -> bool {
1156    false
1157}
1158
1159unsafe fn owned_drop_impl(owned: *mut ()) {
1160    let lifetime = owned.cast::<OwnedLifetime>();
1161    let ref_cnt = &(*lifetime).ref_cnt;
1162
1163    let old_cnt = ref_cnt.fetch_sub(1, Ordering::Release);
1164    debug_assert!(
1165        old_cnt > 0 && old_cnt <= usize::MAX >> 1,
1166        "expected non-zero refcount and no underflow"
1167    );
1168    if old_cnt != 1 {
1169        return;
1170    }
1171    ref_cnt.load(Ordering::Acquire);
1172
1173    let drop_fn = &(*lifetime).drop;
1174    drop_fn(owned)
1175}
1176
1177unsafe fn owned_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1178    let owned = data.load(Ordering::Relaxed);
1179    owned_drop_impl(owned);
1180}
1181
1182static OWNED_VTABLE: Vtable = Vtable {
1183    clone: owned_clone,
1184    to_vec: owned_to_vec,
1185    to_mut: owned_to_mut,
1186    is_unique: owned_is_unique,
1187    drop: owned_drop,
1188};
1189
1190static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
1193    clone: promotable_even_clone,
1194    to_vec: promotable_even_to_vec,
1195    to_mut: promotable_even_to_mut,
1196    is_unique: promotable_is_unique,
1197    drop: promotable_even_drop,
1198};
1199
1200static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1201    clone: promotable_odd_clone,
1202    to_vec: promotable_odd_to_vec,
1203    to_mut: promotable_odd_to_mut,
1204    is_unique: promotable_is_unique,
1205    drop: promotable_odd_drop,
1206};
1207
1208unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1209    let shared = data.load(Ordering::Acquire);
1210    let kind = shared as usize & KIND_MASK;
1211
1212    if kind == KIND_ARC {
1213        shallow_clone_arc(shared.cast(), ptr, len)
1214    } else {
1215        debug_assert_eq!(kind, KIND_VEC);
1216        let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1217        shallow_clone_vec(data, shared, buf, ptr, len)
1218    }
1219}
1220
1221unsafe fn promotable_to_vec(
1222    data: &AtomicPtr<()>,
1223    ptr: *const u8,
1224    len: usize,
1225    f: fn(*mut ()) -> *mut u8,
1226) -> Vec<u8> {
1227    let shared = data.load(Ordering::Acquire);
1228    let kind = shared as usize & KIND_MASK;
1229
1230    if kind == KIND_ARC {
1231        shared_to_vec_impl(shared.cast(), ptr, len)
1232    } else {
1233        debug_assert_eq!(kind, KIND_VEC);
1235
1236        let buf = f(shared);
1237
1238        let cap = offset_from(ptr, buf) + len;
1239
1240        ptr::copy(ptr, buf, len);
1242
1243        Vec::from_raw_parts(buf, len, cap)
1244    }
1245}
1246
1247unsafe fn promotable_to_mut(
1248    data: &AtomicPtr<()>,
1249    ptr: *const u8,
1250    len: usize,
1251    f: fn(*mut ()) -> *mut u8,
1252) -> BytesMut {
1253    let shared = data.load(Ordering::Acquire);
1254    let kind = shared as usize & KIND_MASK;
1255
1256    if kind == KIND_ARC {
1257        shared_to_mut_impl(shared.cast(), ptr, len)
1258    } else {
1259        debug_assert_eq!(kind, KIND_VEC);
1264
1265        let buf = f(shared);
1266        let off = offset_from(ptr, buf);
1267        let cap = off + len;
1268        let v = Vec::from_raw_parts(buf, cap, cap);
1269
1270        let mut b = BytesMut::from_vec(v);
1271        b.advance_unchecked(off);
1272        b
1273    }
1274}
1275
1276unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1277    promotable_to_vec(data, ptr, len, |shared| {
1278        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1279    })
1280}
1281
1282unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1283    promotable_to_mut(data, ptr, len, |shared| {
1284        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1285    })
1286}
1287
1288unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1289    data.with_mut(|shared| {
1290        let shared = *shared;
1291        let kind = shared as usize & KIND_MASK;
1292
1293        if kind == KIND_ARC {
1294            release_shared(shared.cast());
1295        } else {
1296            debug_assert_eq!(kind, KIND_VEC);
1297            let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1298            free_boxed_slice(buf, ptr, len);
1299        }
1300    });
1301}
1302
1303unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1304    let shared = data.load(Ordering::Acquire);
1305    let kind = shared as usize & KIND_MASK;
1306
1307    if kind == KIND_ARC {
1308        shallow_clone_arc(shared as _, ptr, len)
1309    } else {
1310        debug_assert_eq!(kind, KIND_VEC);
1311        shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1312    }
1313}
1314
1315unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1316    promotable_to_vec(data, ptr, len, |shared| shared.cast())
1317}
1318
1319unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1320    promotable_to_mut(data, ptr, len, |shared| shared.cast())
1321}
1322
1323unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1324    data.with_mut(|shared| {
1325        let shared = *shared;
1326        let kind = shared as usize & KIND_MASK;
1327
1328        if kind == KIND_ARC {
1329            release_shared(shared.cast());
1330        } else {
1331            debug_assert_eq!(kind, KIND_VEC);
1332
1333            free_boxed_slice(shared.cast(), ptr, len);
1334        }
1335    });
1336}
1337
1338unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1339    let shared = data.load(Ordering::Acquire);
1340    let kind = shared as usize & KIND_MASK;
1341
1342    if kind == KIND_ARC {
1343        let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1344        ref_cnt == 1
1345    } else {
1346        true
1347    }
1348}
1349
1350unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1351    let cap = offset_from(offset, buf) + len;
1352    dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1353}
1354
1355struct Shared {
1358    buf: *mut u8,
1360    cap: usize,
1361    ref_cnt: AtomicUsize,
1362}
1363
1364impl Drop for Shared {
1365    fn drop(&mut self) {
1366        unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1367    }
1368}
1369
1370const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; static SHARED_VTABLE: Vtable = Vtable {
1377    clone: shared_clone,
1378    to_vec: shared_to_vec,
1379    to_mut: shared_to_mut,
1380    is_unique: shared_is_unique,
1381    drop: shared_drop,
1382};
1383
1384const KIND_ARC: usize = 0b0;
1385const KIND_VEC: usize = 0b1;
1386const KIND_MASK: usize = 0b1;
1387
1388unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1389    let shared = data.load(Ordering::Relaxed);
1390    shallow_clone_arc(shared as _, ptr, len)
1391}
1392
1393unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1394    if (*shared)
1401        .ref_cnt
1402        .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1403        .is_ok()
1404    {
1405        let shared = *Box::from_raw(shared);
1407        let shared = ManuallyDrop::new(shared);
1408        let buf = shared.buf;
1409        let cap = shared.cap;
1410
1411        ptr::copy(ptr, buf, len);
1413
1414        Vec::from_raw_parts(buf, len, cap)
1415    } else {
1416        let v = slice::from_raw_parts(ptr, len).to_vec();
1417        release_shared(shared);
1418        v
1419    }
1420}
1421
1422unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1423    shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1424}
1425
1426unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1427    if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1440        let shared = *Box::from_raw(shared);
1442        let shared = ManuallyDrop::new(shared);
1443        let buf = shared.buf;
1444        let cap = shared.cap;
1445
1446        let off = offset_from(ptr, buf);
1448        let v = Vec::from_raw_parts(buf, len + off, cap);
1449
1450        let mut b = BytesMut::from_vec(v);
1451        b.advance_unchecked(off);
1452        b
1453    } else {
1454        let v = slice::from_raw_parts(ptr, len).to_vec();
1456        release_shared(shared);
1457        BytesMut::from_vec(v)
1458    }
1459}
1460
1461unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1462    shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1463}
1464
1465pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1466    let shared = data.load(Ordering::Acquire);
1467    let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1468    ref_cnt == 1
1469}
1470
1471unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1472    data.with_mut(|shared| {
1473        release_shared(shared.cast());
1474    });
1475}
1476
1477unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1478    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1479
1480    if old_size > usize::MAX >> 1 {
1481        crate::abort();
1482    }
1483
1484    Bytes {
1485        ptr,
1486        len,
1487        data: AtomicPtr::new(shared as _),
1488        vtable: &SHARED_VTABLE,
1489    }
1490}
1491
1492#[cold]
1493unsafe fn shallow_clone_vec(
1494    atom: &AtomicPtr<()>,
1495    ptr: *const (),
1496    buf: *mut u8,
1497    offset: *const u8,
1498    len: usize,
1499) -> Bytes {
1500    let shared = Box::new(Shared {
1512        buf,
1513        cap: offset_from(offset, buf) + len,
1514        ref_cnt: AtomicUsize::new(2),
1518    });
1519
1520    let shared = Box::into_raw(shared);
1521
1522    debug_assert!(
1525        0 == (shared as usize & KIND_MASK),
1526        "internal: Box<Shared> should have an aligned pointer",
1527    );
1528
1529    match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1539        Ok(actual) => {
1540            debug_assert!(actual as usize == ptr as usize);
1541            Bytes {
1544                ptr: offset,
1545                len,
1546                data: AtomicPtr::new(shared as _),
1547                vtable: &SHARED_VTABLE,
1548            }
1549        }
1550        Err(actual) => {
1551            let shared = Box::from_raw(shared);
1555            mem::forget(*shared);
1556
1557            shallow_clone_arc(actual as _, offset, len)
1560        }
1561    }
1562}
1563
1564unsafe fn release_shared(ptr: *mut Shared) {
1565    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1567        return;
1568    }
1569
1570    (*ptr).ref_cnt.load(Ordering::Acquire);
1591
1592    drop(Box::from_raw(ptr));
1594}
1595
1596#[cfg(miri)]
1603fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1604where
1605    F: FnOnce(usize) -> usize,
1606{
1607    let old_addr = ptr as usize;
1608    let new_addr = f(old_addr);
1609    let diff = new_addr.wrapping_sub(old_addr);
1610    ptr.wrapping_add(diff)
1611}
1612
1613#[cfg(not(miri))]
1614fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1615where
1616    F: FnOnce(usize) -> usize,
1617{
1618    let old_addr = ptr as usize;
1619    let new_addr = f(old_addr);
1620    new_addr as *mut u8
1621}
1622
1623fn without_provenance(ptr: usize) -> *const u8 {
1624    core::ptr::null::<u8>().wrapping_add(ptr)
1625}
1626
1627fn _split_to_must_use() {}
1638
1639fn _split_off_must_use() {}
1648
1649#[cfg(all(test, loom))]
1651mod fuzz {
1652    use loom::sync::Arc;
1653    use loom::thread;
1654
1655    use super::Bytes;
1656    #[test]
1657    fn bytes_cloning_vec() {
1658        loom::model(|| {
1659            let a = Bytes::from(b"abcdefgh".to_vec());
1660            let addr = a.as_ptr() as usize;
1661
1662            let a1 = Arc::new(a);
1664            let a2 = a1.clone();
1665
1666            let t1 = thread::spawn(move || {
1667                let b: Bytes = (*a1).clone();
1668                assert_eq!(b.as_ptr() as usize, addr);
1669            });
1670
1671            let t2 = thread::spawn(move || {
1672                let b: Bytes = (*a2).clone();
1673                assert_eq!(b.as_ptr() as usize, addr);
1674            });
1675
1676            t1.join().unwrap();
1677            t2.join().unwrap();
1678        });
1679    }
1680}