From 2424b5361052cacc61950e82e827093925ccb4ba Mon Sep 17 00:00:00 2001 From: HyeonuPark Date: Sat, 20 Aug 2022 18:51:25 +0900 Subject: [PATCH 01/13] change to_vec to into_vec and takes &mut AtomicPtr<()> instead of &AtomicPtr<()> --- src/bytes.rs | 45 +++++++++++++++++++++++++-------------------- src/bytes_mut.rs | 6 +++--- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 0404a72db..953dca56d 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -110,8 +110,8 @@ pub(crate) struct Vtable { pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, /// fn(data, ptr, len) /// - /// takes `Bytes` to value - pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec, + /// Consumes `Bytes` to return `Vec` + pub into_vec: unsafe fn(&mut AtomicPtr<()>, *const u8, usize) -> Vec, /// fn(data, ptr, len) pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), } @@ -879,8 +879,8 @@ impl From for Bytes { impl From for Vec { fn from(bytes: Bytes) -> Vec { - let bytes = mem::ManuallyDrop::new(bytes); - unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) } + let bytes = &mut *mem::ManuallyDrop::new(bytes); + unsafe { (bytes.vtable.into_vec)(&mut bytes.data, bytes.ptr, bytes.len) } } } @@ -890,6 +890,7 @@ impl fmt::Debug for Vtable { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Vtable") .field("clone", &(self.clone as *const ())) + .field("into_vec", &(self.into_vec as *const ())) .field("drop", &(self.drop as *const ())) .finish() } @@ -899,7 +900,7 @@ impl fmt::Debug for Vtable { const STATIC_VTABLE: Vtable = Vtable { clone: static_clone, - to_vec: static_to_vec, + into_vec: static_into_vec, drop: static_drop, }; @@ -908,7 +909,7 @@ unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { Bytes::from_static(slice) } -unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { +unsafe fn static_into_vec(_: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { let slice = slice::from_raw_parts(ptr, len); slice.to_vec() } @@ -921,13 +922,13 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { clone: promotable_even_clone, - to_vec: promotable_even_to_vec, + into_vec: promotable_even_into_vec, drop: promotable_even_drop, }; static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { clone: promotable_odd_clone, - to_vec: promotable_odd_to_vec, + into_vec: promotable_odd_into_vec, drop: promotable_odd_drop, }; @@ -944,17 +945,17 @@ unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize } } -unsafe fn promotable_to_vec( - data: &AtomicPtr<()>, +unsafe fn promotable_into_vec( + data: &mut AtomicPtr<()>, ptr: *const u8, len: usize, f: fn(*mut ()) -> *mut u8, ) -> Vec { - let shared = data.load(Ordering::Acquire); + let shared = data.with_mut(|p| *p); let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { - shared_to_vec_impl(shared.cast(), ptr, len) + shared_into_vec_impl(shared.cast(), ptr, len) } else { // If Bytes holds a Vec, then the offset must be 0. debug_assert_eq!(kind, KIND_VEC); @@ -970,8 +971,12 @@ unsafe fn promotable_to_vec( } } -unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - promotable_to_vec(data, ptr, len, |shared| { +unsafe fn promotable_even_into_vec( + data: &mut AtomicPtr<()>, + ptr: *const u8, + len: usize, +) -> Vec { + promotable_into_vec(data, ptr, len, |shared| { ptr_map(shared.cast(), |addr| addr & !KIND_MASK) }) } @@ -1003,8 +1008,8 @@ unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) } } -unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - promotable_to_vec(data, ptr, len, |shared| shared.cast()) +unsafe fn promotable_odd_into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + promotable_into_vec(data, ptr, len, |shared| shared.cast()) } unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { @@ -1050,7 +1055,7 @@ const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignm static SHARED_VTABLE: Vtable = Vtable { clone: shared_clone, - to_vec: shared_to_vec, + into_vec: shared_into_vec, drop: shared_drop, }; @@ -1063,7 +1068,7 @@ unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Byte shallow_clone_arc(shared as _, ptr, len) } -unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec { +unsafe fn shared_into_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec { // Check that the ref_cnt is 1 (unique). // // If it is unique, then it is set to 0 with AcqRel fence for the same @@ -1092,8 +1097,8 @@ unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> } } -unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len) +unsafe fn shared_into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + shared_into_vec_impl((data.with_mut(|p| *p)).cast(), ptr, len) } unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 450b93279..7673bd0cb 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1705,7 +1705,7 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) static SHARED_VTABLE: Vtable = Vtable { clone: shared_v_clone, - to_vec: shared_v_to_vec, + into_vec: shared_v_into_vec, drop: shared_v_drop, }; @@ -1717,8 +1717,8 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } -unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - let shared: *mut Shared = data.load(Ordering::Relaxed).cast(); +unsafe fn shared_v_into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + let shared: *mut Shared = (data.with_mut(|p| *p)).cast(); if (*shared).is_unique() { let shared = &mut *shared; From 2439d6fd5dc9aacc8c41d9ffa35dd9c5d058d39b Mon Sep 17 00:00:00 2001 From: HyeonuPark Date: Sat, 20 Aug 2022 19:22:10 +0900 Subject: [PATCH 02/13] add will_truncate to Bytes vtable to prevent special casing promotable ones --- src/bytes.rs | 44 +++++++++++++++++++++++++++++++++++--------- src/bytes_mut.rs | 5 +++++ 2 files changed, 40 insertions(+), 9 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 953dca56d..e5c885b63 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -110,6 +110,11 @@ pub(crate) struct Vtable { pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, /// fn(data, ptr, len) /// + /// Called before the `Bytes::truncate` is processed. + /// Useful if the implementation needs some preparation step for it. + pub will_truncate: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), + /// fn(data, ptr, len) + /// /// Consumes `Bytes` to return `Vec` pub into_vec: unsafe fn(&mut AtomicPtr<()>, *const u8, usize) -> Vec, /// fn(data, ptr, len) @@ -455,16 +460,10 @@ impl Bytes { #[inline] pub fn truncate(&mut self, len: usize) { if len < self.len { - // The Vec "promotable" vtables do not store the capacity, - // so we cannot truncate while using this repr. We *have* to - // promote using `split_off` so the capacity can be stored. - if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE - || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE - { - drop(self.split_off(len)); - } else { - self.len = len; + unsafe { + (self.vtable.will_truncate)(&mut self.data, self.ptr, self.len); } + self.len = len; } } @@ -890,6 +889,7 @@ impl fmt::Debug for Vtable { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Vtable") .field("clone", &(self.clone as *const ())) + .field("will_truncate", &(self.will_truncate as *const ())) .field("into_vec", &(self.into_vec as *const ())) .field("drop", &(self.drop as *const ())) .finish() @@ -900,6 +900,7 @@ impl fmt::Debug for Vtable { const STATIC_VTABLE: Vtable = Vtable { clone: static_clone, + will_truncate: static_will_truncate, into_vec: static_into_vec, drop: static_drop, }; @@ -909,6 +910,10 @@ unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { Bytes::from_static(slice) } +unsafe fn static_will_truncate(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { + // nothing to do before truncate for &'static [u8] +} + unsafe fn static_into_vec(_: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { let slice = slice::from_raw_parts(ptr, len); slice.to_vec() @@ -922,12 +927,14 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { clone: promotable_even_clone, + will_truncate: promotable_even_will_truncate, into_vec: promotable_even_into_vec, drop: promotable_even_drop, }; static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { clone: promotable_odd_clone, + will_truncate: promotable_odd_will_truncate, into_vec: promotable_odd_into_vec, drop: promotable_odd_drop, }; @@ -945,6 +952,13 @@ unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize } } +unsafe fn promotable_even_will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + // The Vec "promotable" vtables do not store the capacity, + // so we cannot truncate while using this repr. We *have* to + // promote using `clone` so the capacity can be stored. + drop(promotable_even_clone(&*data, ptr, len)); +} + unsafe fn promotable_into_vec( data: &mut AtomicPtr<()>, ptr: *const u8, @@ -1008,6 +1022,13 @@ unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) } } +unsafe fn promotable_odd_will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + // The Vec "promotable" vtables do not store the capacity, + // so we cannot truncate while using this repr. We *have* to + // promote using `clone` so the capacity can be stored. + drop(promotable_odd_clone(&*data, ptr, len)); +} + unsafe fn promotable_odd_into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { promotable_into_vec(data, ptr, len, |shared| shared.cast()) } @@ -1055,6 +1076,7 @@ const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignm static SHARED_VTABLE: Vtable = Vtable { clone: shared_clone, + will_truncate: shared_will_truncate, into_vec: shared_into_vec, drop: shared_drop, }; @@ -1068,6 +1090,10 @@ unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Byte shallow_clone_arc(shared as _, ptr, len) } +unsafe fn shared_will_truncate(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { + // nothing to do before truncate for Shared +} + unsafe fn shared_into_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec { // Check that the ref_cnt is 1 (unique). // diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 7673bd0cb..248d3cca3 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1705,6 +1705,7 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) static SHARED_VTABLE: Vtable = Vtable { clone: shared_v_clone, + will_truncate: shared_v_will_truncate, into_vec: shared_v_into_vec, drop: shared_v_drop, }; @@ -1717,6 +1718,10 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } +unsafe fn shared_v_will_truncate(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { + // nothing to do before truncate for Shared +} + unsafe fn shared_v_into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { let shared: *mut Shared = (data.with_mut(|p| *p)).cast(); From a37efdab732caecf340557dbd85ae8f36125217f Mon Sep 17 00:00:00 2001 From: HyeonuPark Date: Sat, 20 Aug 2022 20:27:01 +0900 Subject: [PATCH 03/13] add `Bytes::with_impl` and related trait --- src/bytes.rs | 48 +++++++++++++++++++++++++++++++++++++++++++++++- src/lib.rs | 2 +- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index e5c885b63..633b86466 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -104,6 +104,32 @@ pub struct Bytes { data: AtomicPtr<()>, vtable: &'static Vtable, } +/// A trait for underlying implementations for `Bytes` type. +/// +/// All implementations must fulfill the following requirements: +/// - They are cheaply cloneable and thereby shareable between an unlimited amount +/// of components, for example by modifying a reference count. +/// - Instances can be sliced to refer to a subset of the the original buffer. +pub unsafe trait BytesImpl: 'static { + /// Decompose `Self` into parts used by `Bytes`. + fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize); + + /// Returns new `Bytes` based on the current parts. + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes; + + /// Called before the `Bytes::truncate` is processed. + /// Useful if the implementation needs some preparation step for it. + unsafe fn will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + // do nothing by default + let _ = (data, ptr, len); + } + + /// Consumes underlying resources and return `Vec` + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec; + + /// Release underlying resources. + unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize); +} pub(crate) struct Vtable { /// fn(data, ptr, len) @@ -115,7 +141,7 @@ pub(crate) struct Vtable { pub will_truncate: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), /// fn(data, ptr, len) /// - /// Consumes `Bytes` to return `Vec` + /// Consumes `Bytes` and return `Vec` pub into_vec: unsafe fn(&mut AtomicPtr<()>, *const u8, usize) -> Vec, /// fn(data, ptr, len) pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), @@ -183,6 +209,26 @@ impl Bytes { } } + /// Creates a new `Bytes` from `BytesImpl` implementation. + /// + /// Useful if you want to construct `Bytes` from your own buffer implementation. + #[inline] + pub fn with_impl(bytes_impl: T) -> Bytes { + let (data, ptr, len) = BytesImpl::into_bytes_parts(bytes_impl); + + Bytes { + ptr, + len, + data, + vtable: &Vtable { + clone: T::clone, + will_truncate: T::will_truncate, + into_vec: T::into_vec, + drop: T::drop, + }, + } + } + /// Returns the number of bytes contained in this `Bytes`. /// /// # Examples diff --git a/src/lib.rs b/src/lib.rs index af436b316..2a5f7fadb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,7 +88,7 @@ mod bytes; mod bytes_mut; mod fmt; mod loom; -pub use crate::bytes::Bytes; +pub use crate::bytes::{Bytes, BytesImpl}; pub use crate::bytes_mut::BytesMut; // Optional Serde support From bdc88ff223c466ce282c9bc3b24a1ad219d58b71 Mon Sep 17 00:00:00 2001 From: HyeonuPark Date: Sat, 20 Aug 2022 21:31:03 +0900 Subject: [PATCH 04/13] migrate existing vtable code to BytesImpl trait --- src/bytes.rs | 336 ++++++++++++++++++++++++----------------------- src/bytes_mut.rs | 75 ++++++----- 2 files changed, 208 insertions(+), 203 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 633b86466..1263ca1f1 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -131,20 +131,20 @@ pub unsafe trait BytesImpl: 'static { unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize); } -pub(crate) struct Vtable { +struct Vtable { /// fn(data, ptr, len) - pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, + clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, /// fn(data, ptr, len) /// /// Called before the `Bytes::truncate` is processed. /// Useful if the implementation needs some preparation step for it. - pub will_truncate: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), + will_truncate: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), /// fn(data, ptr, len) /// /// Consumes `Bytes` and return `Vec` - pub into_vec: unsafe fn(&mut AtomicPtr<()>, *const u8, usize) -> Vec, + into_vec: unsafe fn(&mut AtomicPtr<()>, *const u8, usize) -> Vec, /// fn(data, ptr, len) - pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), + drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), } impl Bytes { @@ -190,7 +190,14 @@ impl Bytes { /// ``` #[inline] #[cfg(not(all(loom, test)))] - pub const fn from_static(bytes: &'static [u8]) -> Self { + pub const fn from_static(bytes: &'static [u8]) -> Bytes { + const STATIC_VTABLE: Vtable = Vtable { + clone: ::clone, + will_truncate: ::will_truncate, + into_vec: ::into_vec, + drop: ::drop, + }; + Bytes { ptr: bytes.as_ptr(), len: bytes.len(), @@ -200,7 +207,14 @@ impl Bytes { } #[cfg(all(loom, test))] - pub fn from_static(bytes: &'static [u8]) -> Self { + pub fn from_static(bytes: &'static [u8]) -> Bytes { + const STATIC_VTABLE: Vtable = Vtable { + clone: ::clone, + will_truncate: ::will_truncate, + into_vec: ::into_vec, + drop: ::drop, + }; + Bytes { ptr: bytes.as_ptr(), len: bytes.len(), @@ -529,21 +543,6 @@ impl Bytes { self.truncate(0); } - #[inline] - pub(crate) unsafe fn with_vtable( - ptr: *const u8, - len: usize, - data: AtomicPtr<()>, - vtable: &'static Vtable, - ) -> Bytes { - Bytes { - ptr, - len, - data, - vtable, - } - } - // private #[inline] @@ -894,24 +893,10 @@ impl From> for Bytes { return Bytes::new(); } - let len = slice.len(); - let ptr = Box::into_raw(slice) as *mut u8; - - if ptr as usize & 0x1 == 0 { - let data = ptr_map(ptr, |addr| addr | KIND_VEC); - Bytes { - ptr, - len, - data: AtomicPtr::new(data.cast()), - vtable: &PROMOTABLE_EVEN_VTABLE, - } + if slice.as_ptr() as usize & 0x1 == 0 { + Bytes::with_impl(PromotableEvenImpl(Promotable::Owned(slice))) } else { - Bytes { - ptr, - len, - data: AtomicPtr::new(ptr.cast()), - vtable: &PROMOTABLE_ODD_VTABLE, - } + Bytes::with_impl(PromotableOddImpl(Promotable::Owned(slice))) } } } @@ -944,65 +929,97 @@ impl fmt::Debug for Vtable { // ===== impl StaticVtable ===== -const STATIC_VTABLE: Vtable = Vtable { - clone: static_clone, - will_truncate: static_will_truncate, - into_vec: static_into_vec, - drop: static_drop, -}; +struct StaticImpl(&'static [u8]); -unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let slice = slice::from_raw_parts(ptr, len); - Bytes::from_static(slice) -} +unsafe impl BytesImpl for StaticImpl { + fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { + let mut bytes = mem::ManuallyDrop::new(Bytes::from_static(this.0)); + (mem::take(&mut bytes.data), bytes.ptr, bytes.len) + } -unsafe fn static_will_truncate(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { - // nothing to do before truncate for &'static [u8] -} + unsafe fn clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + let slice = slice::from_raw_parts(ptr, len); + Bytes::from_static(slice) + } -unsafe fn static_into_vec(_: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - let slice = slice::from_raw_parts(ptr, len); - slice.to_vec() -} + unsafe fn into_vec(_: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + let slice = slice::from_raw_parts(ptr, len); + slice.to_vec() + } -unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { - // nothing to drop for &'static [u8] + unsafe fn drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { + // nothing to drop for &'static [u8] + } } // ===== impl PromotableVtable ===== -static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { - clone: promotable_even_clone, - will_truncate: promotable_even_will_truncate, - into_vec: promotable_even_into_vec, - drop: promotable_even_drop, -}; +struct PromotableEvenImpl(Promotable); -static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { - clone: promotable_odd_clone, - will_truncate: promotable_odd_will_truncate, - into_vec: promotable_odd_into_vec, - drop: promotable_odd_drop, -}; +struct PromotableOddImpl(Promotable); -unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; +enum Promotable { + Owned(Box<[u8]>), + #[allow(dead_code)] + Shared(SharedImpl), +} - if kind == KIND_ARC { - shallow_clone_arc(shared.cast(), ptr, len) - } else { - debug_assert_eq!(kind, KIND_VEC); - let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); - shallow_clone_vec(data, shared, buf, ptr, len) +unsafe impl BytesImpl for PromotableEvenImpl { + fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { + let slice = match this.0 { + Promotable::Owned(slice) => slice, + Promotable::Shared(shared) => return SharedImpl::into_bytes_parts(shared), + }; + + let len = slice.len(); + let ptr = Box::into_raw(slice) as *mut u8; + assert!(ptr as usize & 0x1 == 0); + + let data = ptr_map(ptr, |addr| addr | KIND_VEC); + + (AtomicPtr::new(data.cast()), ptr, len) + } + + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + let shared = data.load(Ordering::Acquire); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shallow_clone_arc(shared.cast(), ptr, len) + } else { + debug_assert_eq!(kind, KIND_VEC); + let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); + shallow_clone_vec(data, shared, buf, ptr, len) + } + } + + unsafe fn will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + // The Vec "promotable" vtables do not store the capacity, + // so we cannot truncate while using this repr. We *have* to + // promote using `clone` so the capacity can be stored. + drop(PromotableEvenImpl::clone(&*data, ptr, len)); } -} -unsafe fn promotable_even_will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - // The Vec "promotable" vtables do not store the capacity, - // so we cannot truncate while using this repr. We *have* to - // promote using `clone` so the capacity can be stored. - drop(promotable_even_clone(&*data, ptr, len)); + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + promotable_into_vec(data, ptr, len, |shared| { + ptr_map(shared.cast(), |addr| addr & !KIND_MASK) + }) + } + + unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + data.with_mut(|shared| { + let shared = *shared; + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + release_shared(shared.cast()); + } else { + debug_assert_eq!(kind, KIND_VEC); + let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); + free_boxed_slice(buf, ptr, len); + } + }); + } } unsafe fn promotable_into_vec( @@ -1031,67 +1048,57 @@ unsafe fn promotable_into_vec( } } -unsafe fn promotable_even_into_vec( - data: &mut AtomicPtr<()>, - ptr: *const u8, - len: usize, -) -> Vec { - promotable_into_vec(data, ptr, len, |shared| { - ptr_map(shared.cast(), |addr| addr & !KIND_MASK) - }) -} +unsafe impl BytesImpl for PromotableOddImpl { + fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { + let slice = match this.0 { + Promotable::Owned(slice) => slice, + Promotable::Shared(shared) => return SharedImpl::into_bytes_parts(shared), + }; -unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - data.with_mut(|shared| { - let shared = *shared; + let len = slice.len(); + let ptr = Box::into_raw(slice) as *mut u8; + assert!(ptr as usize & 0x1 == 1); + + (AtomicPtr::new(ptr.cast()), ptr, len) + } + + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + let shared = data.load(Ordering::Acquire); let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { - release_shared(shared.cast()); + shallow_clone_arc(shared as _, ptr, len) } else { debug_assert_eq!(kind, KIND_VEC); - let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); - free_boxed_slice(buf, ptr, len); + shallow_clone_vec(data, shared, shared.cast(), ptr, len) } - }); -} - -unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shallow_clone_arc(shared as _, ptr, len) - } else { - debug_assert_eq!(kind, KIND_VEC); - shallow_clone_vec(data, shared, shared.cast(), ptr, len) } -} -unsafe fn promotable_odd_will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - // The Vec "promotable" vtables do not store the capacity, - // so we cannot truncate while using this repr. We *have* to - // promote using `clone` so the capacity can be stored. - drop(promotable_odd_clone(&*data, ptr, len)); -} + unsafe fn will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + // The Vec "promotable" vtables do not store the capacity, + // so we cannot truncate while using this repr. We *have* to + // promote using `clone` so the capacity can be stored. + drop(PromotableOddImpl::clone(&*data, ptr, len)); + } -unsafe fn promotable_odd_into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - promotable_into_vec(data, ptr, len, |shared| shared.cast()) -} + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + promotable_into_vec(data, ptr, len, |shared| shared.cast()) + } -unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - data.with_mut(|shared| { - let shared = *shared; - let kind = shared as usize & KIND_MASK; + unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + data.with_mut(|shared| { + let shared = *shared; + let kind = shared as usize & KIND_MASK; - if kind == KIND_ARC { - release_shared(shared.cast()); - } else { - debug_assert_eq!(kind, KIND_VEC); + if kind == KIND_ARC { + release_shared(shared.cast()); + } else { + debug_assert_eq!(kind, KIND_VEC); - free_boxed_slice(shared.cast(), ptr, len); - } - }); + free_boxed_slice(shared.cast(), ptr, len); + } + }); + } } unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { @@ -1120,24 +1127,35 @@ impl Drop for Shared { // This flag is set when the LSB is 0. const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. -static SHARED_VTABLE: Vtable = Vtable { - clone: shared_clone, - will_truncate: shared_will_truncate, - into_vec: shared_into_vec, - drop: shared_drop, -}; +struct SharedImpl { + shared: *mut Shared, + offset: *const u8, + len: usize, +} const KIND_ARC: usize = 0b0; const KIND_VEC: usize = 0b1; const KIND_MASK: usize = 0b1; -unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Relaxed); - shallow_clone_arc(shared as _, ptr, len) -} +unsafe impl BytesImpl for SharedImpl { + fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { + (AtomicPtr::new(this.shared.cast()), this.offset, this.len) + } + + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + let shared = data.load(Ordering::Relaxed); + shallow_clone_arc(shared as _, ptr, len) + } + + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + shared_into_vec_impl((data.with_mut(|p| *p)).cast(), ptr, len) + } -unsafe fn shared_will_truncate(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { - // nothing to do before truncate for Shared + unsafe fn drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { + data.with_mut(|shared| { + release_shared(shared.cast()); + }); + } } unsafe fn shared_into_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec { @@ -1169,16 +1187,6 @@ unsafe fn shared_into_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) } } -unsafe fn shared_into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - shared_into_vec_impl((data.with_mut(|p| *p)).cast(), ptr, len) -} - -unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { - data.with_mut(|shared| { - release_shared(shared.cast()); - }); -} - unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes { let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); @@ -1186,12 +1194,11 @@ unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> crate::abort(); } - Bytes { - ptr, + Bytes::with_impl(SharedImpl { + shared, + offset: ptr, len, - data: AtomicPtr::new(shared as _), - vtable: &SHARED_VTABLE, - } + }) } #[cold] @@ -1245,12 +1252,11 @@ unsafe fn shallow_clone_vec( debug_assert!(actual as usize == ptr as usize); // The upgrade was successful, the new handle can be // returned. - Bytes { - ptr: offset, + Bytes::with_impl(SharedImpl { + shared, + offset, len, - data: AtomicPtr::new(shared as _), - vtable: &SHARED_VTABLE, - } + }) } Err(actual) => { // The upgrade failed, a concurrent clone happened. Release diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 248d3cca3..9749cf8af 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -13,7 +13,6 @@ use alloc::{ }; use crate::buf::{IntoIter, UninitSlice}; -use crate::bytes::Vtable; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; @@ -253,9 +252,9 @@ impl BytesMut { let ptr = self.ptr.as_ptr(); let len = self.len; - let data = AtomicPtr::new(self.data.cast()); + let shared = self.data; mem::forget(self); - unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } + Bytes::with_impl(SharedImpl { shared, ptr, len }) } } @@ -1703,51 +1702,51 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) // ===== impl SharedVtable ===== -static SHARED_VTABLE: Vtable = Vtable { - clone: shared_v_clone, - will_truncate: shared_v_will_truncate, - into_vec: shared_v_into_vec, - drop: shared_v_drop, -}; +struct SharedImpl { + shared: *mut Shared, + ptr: *const u8, + len: usize, +} -unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Relaxed) as *mut Shared; - increment_shared(shared); +unsafe impl crate::BytesImpl for SharedImpl { + fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { + (AtomicPtr::new(this.shared.cast()), this.ptr, this.len) + } - let data = AtomicPtr::new(shared as *mut ()); - Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) -} + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + let shared = data.load(Ordering::Relaxed) as *mut Shared; + increment_shared(shared); -unsafe fn shared_v_will_truncate(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { - // nothing to do before truncate for Shared -} + Bytes::with_impl(SharedImpl { shared, ptr, len }) + } -unsafe fn shared_v_into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - let shared: *mut Shared = (data.with_mut(|p| *p)).cast(); + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + let shared: *mut Shared = (data.with_mut(|p| *p)).cast(); - if (*shared).is_unique() { - let shared = &mut *shared; + if (*shared).is_unique() { + let shared = &mut *shared; - // Drop shared - let mut vec = mem::replace(&mut shared.vec, Vec::new()); - release_shared(shared); + // Drop shared + let mut vec = mem::replace(&mut shared.vec, Vec::new()); + release_shared(shared); - // Copy back buffer - ptr::copy(ptr, vec.as_mut_ptr(), len); - vec.set_len(len); + // Copy back buffer + ptr::copy(ptr, vec.as_mut_ptr(), len); + vec.set_len(len); - vec - } else { - let v = slice::from_raw_parts(ptr, len).to_vec(); - release_shared(shared); - v + vec + } else { + let v = slice::from_raw_parts(ptr, len).to_vec(); + release_shared(shared); + v + } } -} -unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { - data.with_mut(|shared| { - release_shared(*shared as *mut Shared); - }); + unsafe fn drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { + data.with_mut(|shared| { + release_shared(*shared as *mut Shared); + }); + } } // compile-fails From a9a7440f3396b00190a5c9fc03f1f7f7948bae33 Mon Sep 17 00:00:00 2001 From: HyeonuPark Date: Sun, 21 Aug 2022 00:05:58 +0900 Subject: [PATCH 05/13] add `Bytes::downcast_impl` to extract underlying implementation --- src/bytes.rs | 77 ++++++++++++++++++++++++++++++++++++++++++++++-- src/bytes_mut.rs | 8 +++++ 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 1263ca1f1..069b56072 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -1,3 +1,4 @@ +use core::any::TypeId; use core::iter::FromIterator; use core::ops::{Deref, RangeBounds}; use core::{cmp, fmt, hash, mem, ptr, slice, usize}; @@ -114,6 +115,9 @@ pub unsafe trait BytesImpl: 'static { /// Decompose `Self` into parts used by `Bytes`. fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize); + /// Creates itself directly from the raw bytes parts decomposed with `into_bytes_parts`. + unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self; + /// Returns new `Bytes` based on the current parts. unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes; @@ -132,6 +136,7 @@ pub unsafe trait BytesImpl: 'static { } struct Vtable { + type_id: fn() -> TypeId, /// fn(data, ptr, len) clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, /// fn(data, ptr, len) @@ -192,6 +197,7 @@ impl Bytes { #[cfg(not(all(loom, test)))] pub const fn from_static(bytes: &'static [u8]) -> Bytes { const STATIC_VTABLE: Vtable = Vtable { + type_id: TypeId::of::, clone: ::clone, will_truncate: ::will_truncate, into_vec: ::into_vec, @@ -209,6 +215,7 @@ impl Bytes { #[cfg(all(loom, test))] pub fn from_static(bytes: &'static [u8]) -> Bytes { const STATIC_VTABLE: Vtable = Vtable { + type_id: TypeId::of::, clone: ::clone, will_truncate: ::will_truncate, into_vec: ::into_vec, @@ -235,6 +242,7 @@ impl Bytes { len, data, vtable: &Vtable { + type_id: TypeId::of::, clone: T::clone, will_truncate: T::will_truncate, into_vec: T::into_vec, @@ -543,6 +551,19 @@ impl Bytes { self.truncate(0); } + /// Downcast this `Bytes` into its underlying implementation. + #[inline] + pub fn downcast_impl(self) -> Result { + if TypeId::of::() == (self.vtable.type_id)() { + Ok(unsafe { + let this = &mut *mem::ManuallyDrop::new(self); + T::from_bytes_parts(&mut this.data, this.ptr, this.len) + }) + } else { + Err(self) + } + } + // private #[inline] @@ -919,6 +940,7 @@ impl From for Vec { impl fmt::Debug for Vtable { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Vtable") + .field("type_id", &self.type_id) .field("clone", &(self.clone as *const ())) .field("will_truncate", &(self.will_truncate as *const ())) .field("into_vec", &(self.into_vec as *const ())) @@ -934,7 +956,15 @@ struct StaticImpl(&'static [u8]); unsafe impl BytesImpl for StaticImpl { fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { let mut bytes = mem::ManuallyDrop::new(Bytes::from_static(this.0)); - (mem::take(&mut bytes.data), bytes.ptr, bytes.len) + ( + mem::replace(&mut bytes.data, AtomicPtr::default()), + bytes.ptr, + bytes.len, + ) + } + + unsafe fn from_bytes_parts(_data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + StaticImpl(slice::from_raw_parts(ptr, len)) } unsafe fn clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { @@ -960,7 +990,6 @@ struct PromotableOddImpl(Promotable); enum Promotable { Owned(Box<[u8]>), - #[allow(dead_code)] Shared(SharedImpl), } @@ -980,6 +1009,12 @@ unsafe impl BytesImpl for PromotableEvenImpl { (AtomicPtr::new(data.cast()), ptr, len) } + unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + PromotableEvenImpl(promotable_from_bytes_parts(data, ptr, len, |shared| { + ptr_map(shared.cast(), |addr| addr & !KIND_MASK) + })) + } + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let shared = data.load(Ordering::Acquire); let kind = shared as usize & KIND_MASK; @@ -1022,6 +1057,30 @@ unsafe impl BytesImpl for PromotableEvenImpl { } } +unsafe fn promotable_from_bytes_parts( + data: &mut AtomicPtr<()>, + ptr: *const u8, + len: usize, + f: fn(*mut ()) -> *mut u8, +) -> Promotable { + let shared = data.with_mut(|p| *p); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + Promotable::Shared(SharedImpl::from_bytes_parts(data, ptr, len)) + } else { + debug_assert_eq!(kind, KIND_VEC); + + let buf = f(shared); + + let cap = (ptr as usize - buf as usize) + len; + + let vec = Vec::from_raw_parts(buf, cap, cap); + + Promotable::Owned(vec.into_boxed_slice()) + } +} + unsafe fn promotable_into_vec( data: &mut AtomicPtr<()>, ptr: *const u8, @@ -1062,6 +1121,12 @@ unsafe impl BytesImpl for PromotableOddImpl { (AtomicPtr::new(ptr.cast()), ptr, len) } + unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + PromotableOddImpl(promotable_from_bytes_parts(data, ptr, len, |shared| { + shared.cast() + })) + } + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let shared = data.load(Ordering::Acquire); let kind = shared as usize & KIND_MASK; @@ -1142,6 +1207,14 @@ unsafe impl BytesImpl for SharedImpl { (AtomicPtr::new(this.shared.cast()), this.offset, this.len) } + unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + SharedImpl { + shared: (data.with_mut(|p| *p)).cast(), + offset: ptr, + len, + } + } + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let shared = data.load(Ordering::Relaxed); shallow_clone_arc(shared as _, ptr, len) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 9749cf8af..7308cc9f5 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1713,6 +1713,14 @@ unsafe impl crate::BytesImpl for SharedImpl { (AtomicPtr::new(this.shared.cast()), this.ptr, this.len) } + unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + SharedImpl { + shared: (data.with_mut(|p| *p)).cast(), + ptr, + len, + } + } + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { let shared = data.load(Ordering::Relaxed) as *mut Shared; increment_shared(shared); From 7f6dea2981323fc05801ce552450dc1384903c56 Mon Sep 17 00:00:00 2001 From: Rick Richardson Date: Mon, 6 Feb 2023 16:02:44 -0800 Subject: [PATCH 06/13] naming and restructing changes for the trait to vtable design --- src/bytes.rs | 557 +++----------------------------------- src/bytes_mut.rs | 13 +- src/impls/mod.rs | 3 + src/impls/promotable.rs | 298 ++++++++++++++++++++ src/impls/shared.rs | 146 ++++++++++ src/impls/static_buf.rs | 37 +++ src/lib.rs | 5 +- src/shared_buf.rs | 88 ++++++ tests/extern_buf_bytes.rs | 355 ++++++++++++++++++++++++ 9 files changed, 976 insertions(+), 526 deletions(-) create mode 100644 src/impls/mod.rs create mode 100644 src/impls/promotable.rs create mode 100644 src/impls/shared.rs create mode 100644 src/impls/static_buf.rs create mode 100644 src/shared_buf.rs create mode 100644 tests/extern_buf_bytes.rs diff --git a/src/bytes.rs b/src/bytes.rs index 069b56072..76a247f6b 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -3,18 +3,14 @@ use core::iter::FromIterator; use core::ops::{Deref, RangeBounds}; use core::{cmp, fmt, hash, mem, ptr, slice, usize}; -use alloc::{ - alloc::{dealloc, Layout}, - borrow::Borrow, - boxed::Box, - string::String, - vec::Vec, -}; +use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec}; use crate::buf::IntoIter; +use crate::impls::*; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; -use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::loom::sync::atomic::AtomicPtr; +use crate::shared_buf::{BufferParts, SharedBuf}; use crate::Buf; /// A cheaply cloneable and sliceable chunk of contiguous memory. @@ -105,45 +101,13 @@ pub struct Bytes { data: AtomicPtr<()>, vtable: &'static Vtable, } -/// A trait for underlying implementations for `Bytes` type. -/// -/// All implementations must fulfill the following requirements: -/// - They are cheaply cloneable and thereby shareable between an unlimited amount -/// of components, for example by modifying a reference count. -/// - Instances can be sliced to refer to a subset of the the original buffer. -pub unsafe trait BytesImpl: 'static { - /// Decompose `Self` into parts used by `Bytes`. - fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize); - - /// Creates itself directly from the raw bytes parts decomposed with `into_bytes_parts`. - unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self; - - /// Returns new `Bytes` based on the current parts. - unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes; - - /// Called before the `Bytes::truncate` is processed. - /// Useful if the implementation needs some preparation step for it. - unsafe fn will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - // do nothing by default - let _ = (data, ptr, len); - } - - /// Consumes underlying resources and return `Vec` - unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec; - - /// Release underlying resources. - unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize); -} struct Vtable { type_id: fn() -> TypeId, /// fn(data, ptr, len) - clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, - /// fn(data, ptr, len) - /// - /// Called before the `Bytes::truncate` is processed. - /// Useful if the implementation needs some preparation step for it. - will_truncate: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), + clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BufferParts, + /// Called during `Bytes::try_resize` and `Bytes::truncate` + try_resize: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), /// fn(data, ptr, len) /// /// Consumes `Bytes` and return `Vec` @@ -197,11 +161,11 @@ impl Bytes { #[cfg(not(all(loom, test)))] pub const fn from_static(bytes: &'static [u8]) -> Bytes { const STATIC_VTABLE: Vtable = Vtable { - type_id: TypeId::of::, - clone: ::clone, - will_truncate: ::will_truncate, - into_vec: ::into_vec, - drop: ::drop, + type_id: TypeId::of::, + clone: ::clone, + try_resize: ::try_resize, + into_vec: ::into_vec, + drop: ::drop, }; Bytes { @@ -215,11 +179,11 @@ impl Bytes { #[cfg(all(loom, test))] pub fn from_static(bytes: &'static [u8]) -> Bytes { const STATIC_VTABLE: Vtable = Vtable { - type_id: TypeId::of::, - clone: ::clone, - will_truncate: ::will_truncate, - into_vec: ::into_vec, - drop: ::drop, + type_id: TypeId::of::, + clone: ::clone, + will_truncate: ::will_truncate, + into_vec: ::into_vec, + drop: ::drop, }; Bytes { @@ -230,12 +194,12 @@ impl Bytes { } } - /// Creates a new `Bytes` from `BytesImpl` implementation. + /// Creates a new `Bytes` from `SharedBuf` implementation. /// /// Useful if you want to construct `Bytes` from your own buffer implementation. #[inline] - pub fn with_impl(bytes_impl: T) -> Bytes { - let (data, ptr, len) = BytesImpl::into_bytes_parts(bytes_impl); + pub fn from_shared_buf(buf_impl: T) -> Bytes { + let (data, ptr, len) = SharedBuf::into_parts(buf_impl); Bytes { ptr, @@ -244,7 +208,7 @@ impl Bytes { vtable: &Vtable { type_id: TypeId::of::, clone: T::clone, - will_truncate: T::will_truncate, + try_resize: T::try_resize, into_vec: T::into_vec, drop: T::drop, }, @@ -529,7 +493,7 @@ impl Bytes { pub fn truncate(&mut self, len: usize) { if len < self.len { unsafe { - (self.vtable.will_truncate)(&mut self.data, self.ptr, self.len); + (self.vtable.try_resize)(&mut self.data, self.ptr, self.len); } self.len = len; } @@ -553,11 +517,11 @@ impl Bytes { /// Downcast this `Bytes` into its underlying implementation. #[inline] - pub fn downcast_impl(self) -> Result { + pub fn into_shared_buf(self) -> Result { if TypeId::of::() == (self.vtable.type_id)() { Ok(unsafe { let this = &mut *mem::ManuallyDrop::new(self); - T::from_bytes_parts(&mut this.data, this.ptr, this.len) + T::from_parts(&mut this.data, this.ptr, this.len) }) } else { Err(self) @@ -594,7 +558,13 @@ impl Drop for Bytes { impl Clone for Bytes { #[inline] fn clone(&self) -> Bytes { - unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) } + let (data, ptr, len) = unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }; + Bytes { + ptr, + len, + data, + vtable: self.vtable, + } } } @@ -915,9 +885,13 @@ impl From> for Bytes { } if slice.as_ptr() as usize & 0x1 == 0 { - Bytes::with_impl(PromotableEvenImpl(Promotable::Owned(slice))) + Bytes::from_shared_buf(promotable::PromotableEvenImpl( + promotable::Promotable::Owned(slice), + )) } else { - Bytes::with_impl(PromotableOddImpl(Promotable::Owned(slice))) + Bytes::from_shared_buf(promotable::PromotableOddImpl( + promotable::Promotable::Owned(slice), + )) } } } @@ -942,468 +916,13 @@ impl fmt::Debug for Vtable { f.debug_struct("Vtable") .field("type_id", &self.type_id) .field("clone", &(self.clone as *const ())) - .field("will_truncate", &(self.will_truncate as *const ())) + .field("try_resize", &(self.try_resize as *const ())) .field("into_vec", &(self.into_vec as *const ())) .field("drop", &(self.drop as *const ())) .finish() } } -// ===== impl StaticVtable ===== - -struct StaticImpl(&'static [u8]); - -unsafe impl BytesImpl for StaticImpl { - fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { - let mut bytes = mem::ManuallyDrop::new(Bytes::from_static(this.0)); - ( - mem::replace(&mut bytes.data, AtomicPtr::default()), - bytes.ptr, - bytes.len, - ) - } - - unsafe fn from_bytes_parts(_data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { - StaticImpl(slice::from_raw_parts(ptr, len)) - } - - unsafe fn clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let slice = slice::from_raw_parts(ptr, len); - Bytes::from_static(slice) - } - - unsafe fn into_vec(_: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - let slice = slice::from_raw_parts(ptr, len); - slice.to_vec() - } - - unsafe fn drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { - // nothing to drop for &'static [u8] - } -} - -// ===== impl PromotableVtable ===== - -struct PromotableEvenImpl(Promotable); - -struct PromotableOddImpl(Promotable); - -enum Promotable { - Owned(Box<[u8]>), - Shared(SharedImpl), -} - -unsafe impl BytesImpl for PromotableEvenImpl { - fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { - let slice = match this.0 { - Promotable::Owned(slice) => slice, - Promotable::Shared(shared) => return SharedImpl::into_bytes_parts(shared), - }; - - let len = slice.len(); - let ptr = Box::into_raw(slice) as *mut u8; - assert!(ptr as usize & 0x1 == 0); - - let data = ptr_map(ptr, |addr| addr | KIND_VEC); - - (AtomicPtr::new(data.cast()), ptr, len) - } - - unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { - PromotableEvenImpl(promotable_from_bytes_parts(data, ptr, len, |shared| { - ptr_map(shared.cast(), |addr| addr & !KIND_MASK) - })) - } - - unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shallow_clone_arc(shared.cast(), ptr, len) - } else { - debug_assert_eq!(kind, KIND_VEC); - let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); - shallow_clone_vec(data, shared, buf, ptr, len) - } - } - - unsafe fn will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - // The Vec "promotable" vtables do not store the capacity, - // so we cannot truncate while using this repr. We *have* to - // promote using `clone` so the capacity can be stored. - drop(PromotableEvenImpl::clone(&*data, ptr, len)); - } - - unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - promotable_into_vec(data, ptr, len, |shared| { - ptr_map(shared.cast(), |addr| addr & !KIND_MASK) - }) - } - - unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - data.with_mut(|shared| { - let shared = *shared; - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - release_shared(shared.cast()); - } else { - debug_assert_eq!(kind, KIND_VEC); - let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); - free_boxed_slice(buf, ptr, len); - } - }); - } -} - -unsafe fn promotable_from_bytes_parts( - data: &mut AtomicPtr<()>, - ptr: *const u8, - len: usize, - f: fn(*mut ()) -> *mut u8, -) -> Promotable { - let shared = data.with_mut(|p| *p); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - Promotable::Shared(SharedImpl::from_bytes_parts(data, ptr, len)) - } else { - debug_assert_eq!(kind, KIND_VEC); - - let buf = f(shared); - - let cap = (ptr as usize - buf as usize) + len; - - let vec = Vec::from_raw_parts(buf, cap, cap); - - Promotable::Owned(vec.into_boxed_slice()) - } -} - -unsafe fn promotable_into_vec( - data: &mut AtomicPtr<()>, - ptr: *const u8, - len: usize, - f: fn(*mut ()) -> *mut u8, -) -> Vec { - let shared = data.with_mut(|p| *p); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shared_into_vec_impl(shared.cast(), ptr, len) - } else { - // If Bytes holds a Vec, then the offset must be 0. - debug_assert_eq!(kind, KIND_VEC); - - let buf = f(shared); - - let cap = (ptr as usize - buf as usize) + len; - - // Copy back buffer - ptr::copy(ptr, buf, len); - - Vec::from_raw_parts(buf, len, cap) - } -} - -unsafe impl BytesImpl for PromotableOddImpl { - fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { - let slice = match this.0 { - Promotable::Owned(slice) => slice, - Promotable::Shared(shared) => return SharedImpl::into_bytes_parts(shared), - }; - - let len = slice.len(); - let ptr = Box::into_raw(slice) as *mut u8; - assert!(ptr as usize & 0x1 == 1); - - (AtomicPtr::new(ptr.cast()), ptr, len) - } - - unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { - PromotableOddImpl(promotable_from_bytes_parts(data, ptr, len, |shared| { - shared.cast() - })) - } - - unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Acquire); - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - shallow_clone_arc(shared as _, ptr, len) - } else { - debug_assert_eq!(kind, KIND_VEC); - shallow_clone_vec(data, shared, shared.cast(), ptr, len) - } - } - - unsafe fn will_truncate(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - // The Vec "promotable" vtables do not store the capacity, - // so we cannot truncate while using this repr. We *have* to - // promote using `clone` so the capacity can be stored. - drop(PromotableOddImpl::clone(&*data, ptr, len)); - } - - unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - promotable_into_vec(data, ptr, len, |shared| shared.cast()) - } - - unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { - data.with_mut(|shared| { - let shared = *shared; - let kind = shared as usize & KIND_MASK; - - if kind == KIND_ARC { - release_shared(shared.cast()); - } else { - debug_assert_eq!(kind, KIND_VEC); - - free_boxed_slice(shared.cast(), ptr, len); - } - }); - } -} - -unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { - let cap = (offset as usize - buf as usize) + len; - dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) -} - -// ===== impl SharedVtable ===== - -struct Shared { - // Holds arguments to dealloc upon Drop, but otherwise doesn't use them - buf: *mut u8, - cap: usize, - ref_cnt: AtomicUsize, -} - -impl Drop for Shared { - fn drop(&mut self) { - unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) } - } -} - -// Assert that the alignment of `Shared` is divisible by 2. -// This is a necessary invariant since we depend on allocating `Shared` a -// shared object to implicitly carry the `KIND_ARC` flag in its pointer. -// This flag is set when the LSB is 0. -const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. - -struct SharedImpl { - shared: *mut Shared, - offset: *const u8, - len: usize, -} - -const KIND_ARC: usize = 0b0; -const KIND_VEC: usize = 0b1; -const KIND_MASK: usize = 0b1; - -unsafe impl BytesImpl for SharedImpl { - fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { - (AtomicPtr::new(this.shared.cast()), this.offset, this.len) - } - - unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { - SharedImpl { - shared: (data.with_mut(|p| *p)).cast(), - offset: ptr, - len, - } - } - - unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { - let shared = data.load(Ordering::Relaxed); - shallow_clone_arc(shared as _, ptr, len) - } - - unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { - shared_into_vec_impl((data.with_mut(|p| *p)).cast(), ptr, len) - } - - unsafe fn drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { - data.with_mut(|shared| { - release_shared(shared.cast()); - }); - } -} - -unsafe fn shared_into_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec { - // Check that the ref_cnt is 1 (unique). - // - // If it is unique, then it is set to 0 with AcqRel fence for the same - // reason in release_shared. - // - // Otherwise, we take the other branch and call release_shared. - if (*shared) - .ref_cnt - .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) - .is_ok() - { - let buf = (*shared).buf; - let cap = (*shared).cap; - - // Deallocate Shared - drop(Box::from_raw(shared as *mut mem::ManuallyDrop)); - - // Copy back buffer - ptr::copy(ptr, buf, len); - - Vec::from_raw_parts(buf, len, cap) - } else { - let v = slice::from_raw_parts(ptr, len).to_vec(); - release_shared(shared); - v - } -} - -unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes { - let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); - - if old_size > usize::MAX >> 1 { - crate::abort(); - } - - Bytes::with_impl(SharedImpl { - shared, - offset: ptr, - len, - }) -} - -#[cold] -unsafe fn shallow_clone_vec( - atom: &AtomicPtr<()>, - ptr: *const (), - buf: *mut u8, - offset: *const u8, - len: usize, -) -> Bytes { - // If the buffer is still tracked in a `Vec`. It is time to - // promote the vec to an `Arc`. This could potentially be called - // concurrently, so some care must be taken. - - // First, allocate a new `Shared` instance containing the - // `Vec` fields. It's important to note that `ptr`, `len`, - // and `cap` cannot be mutated without having `&mut self`. - // This means that these fields will not be concurrently - // updated and since the buffer hasn't been promoted to an - // `Arc`, those three fields still are the components of the - // vector. - let shared = Box::new(Shared { - buf, - cap: (offset as usize - buf as usize) + len, - // Initialize refcount to 2. One for this reference, and one - // for the new clone that will be returned from - // `shallow_clone`. - ref_cnt: AtomicUsize::new(2), - }); - - let shared = Box::into_raw(shared); - - // The pointer should be aligned, so this assert should - // always succeed. - debug_assert!( - 0 == (shared as usize & KIND_MASK), - "internal: Box should have an aligned pointer", - ); - - // Try compare & swapping the pointer into the `arc` field. - // `Release` is used synchronize with other threads that - // will load the `arc` field. - // - // If the `compare_exchange` fails, then the thread lost the - // race to promote the buffer to shared. The `Acquire` - // ordering will synchronize with the `compare_exchange` - // that happened in the other thread and the `Shared` - // pointed to by `actual` will be visible. - match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) { - Ok(actual) => { - debug_assert!(actual as usize == ptr as usize); - // The upgrade was successful, the new handle can be - // returned. - Bytes::with_impl(SharedImpl { - shared, - offset, - len, - }) - } - Err(actual) => { - // The upgrade failed, a concurrent clone happened. Release - // the allocation that was made in this thread, it will not - // be needed. - let shared = Box::from_raw(shared); - mem::forget(*shared); - - // Buffer already promoted to shared storage, so increment ref - // count. - shallow_clone_arc(actual as _, offset, len) - } - } -} - -unsafe fn release_shared(ptr: *mut Shared) { - // `Shared` storage... follow the drop steps from Arc. - if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 { - return; - } - - // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the decreasing - // of the reference count synchronizes with this `Acquire` fence. This - // means that use of the data happens before decreasing the reference - // count, which happens before this fence, which happens before the - // deletion of the data. - // - // As explained in the [Boost documentation][1], - // - // > It is important to enforce any possible access to the object in one - // > thread (through an existing reference) to *happen before* deleting - // > the object in a different thread. This is achieved by a "release" - // > operation after dropping a reference (any access to the object - // > through this reference must obviously happened before), and an - // > "acquire" operation before deleting the object. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - // - // Thread sanitizer does not support atomic fences. Use an atomic load - // instead. - (*ptr).ref_cnt.load(Ordering::Acquire); - - // Drop the data - drop(Box::from_raw(ptr)); -} - -// Ideally we would always use this version of `ptr_map` since it is strict -// provenance compatible, but it results in worse codegen. We will however still -// use it on miri because it gives better diagnostics for people who test bytes -// code with miri. -// -// See https://github.com/tokio-rs/bytes/pull/545 for more info. -#[cfg(miri)] -fn ptr_map(ptr: *mut u8, f: F) -> *mut u8 -where - F: FnOnce(usize) -> usize, -{ - let old_addr = ptr as usize; - let new_addr = f(old_addr); - let diff = new_addr.wrapping_sub(old_addr); - ptr.wrapping_add(diff) -} - -#[cfg(not(miri))] -fn ptr_map(ptr: *mut u8, f: F) -> *mut u8 -where - F: FnOnce(usize) -> usize, -{ - let old_addr = ptr as usize; - let new_addr = f(old_addr); - new_addr as *mut u8 -} - // compile-fails /// ```compile_fail diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 7308cc9f5..026228ac8 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -16,6 +16,7 @@ use crate::buf::{IntoIter, UninitSlice}; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::shared_buf::{ SharedBuf, BufferParts }; use crate::{Buf, BufMut, Bytes}; /// A unique reference to a contiguous slice of memory. @@ -254,7 +255,7 @@ impl BytesMut { let len = self.len; let shared = self.data; mem::forget(self); - Bytes::with_impl(SharedImpl { shared, ptr, len }) + Bytes::from_shared_buf(SharedImpl { shared, ptr, len }) } } @@ -1708,12 +1709,12 @@ struct SharedImpl { len: usize, } -unsafe impl crate::BytesImpl for SharedImpl { - fn into_bytes_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { +unsafe impl SharedBuf for SharedImpl { + fn into_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { (AtomicPtr::new(this.shared.cast()), this.ptr, this.len) } - unsafe fn from_bytes_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + unsafe fn from_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { SharedImpl { shared: (data.with_mut(|p| *p)).cast(), ptr, @@ -1721,11 +1722,11 @@ unsafe impl crate::BytesImpl for SharedImpl { } } - unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BufferParts { let shared = data.load(Ordering::Relaxed) as *mut Shared; increment_shared(shared); - Bytes::with_impl(SharedImpl { shared, ptr, len }) + (AtomicPtr::new(shared.cast()), ptr, len) } unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { diff --git a/src/impls/mod.rs b/src/impls/mod.rs new file mode 100644 index 000000000..7e10acd14 --- /dev/null +++ b/src/impls/mod.rs @@ -0,0 +1,3 @@ +pub mod promotable; +pub mod shared; +pub mod static_buf; diff --git a/src/impls/promotable.rs b/src/impls/promotable.rs new file mode 100644 index 000000000..79f220ce3 --- /dev/null +++ b/src/impls/promotable.rs @@ -0,0 +1,298 @@ +use crate::shared_buf::{BufferParts, SharedBuf}; +use alloc::{ + alloc::{dealloc, Layout}, + boxed::Box, + vec::Vec, +}; +use core::{mem, ptr, usize}; + +use super::shared::{self, SharedImpl}; +#[allow(unused)] +use crate::loom::sync::atomic::AtomicMut; +use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +const KIND_ARC: usize = 0b0; +const KIND_VEC: usize = 0b1; +const KIND_MASK: usize = 0b1; + +// ===== impl PromotableVtable ===== + +pub(crate) struct PromotableEvenImpl(pub Promotable); + +pub(crate) struct PromotableOddImpl(pub Promotable); + +pub(crate) enum Promotable { + Owned(Box<[u8]>), + Shared(SharedImpl), +} + +unsafe impl SharedBuf for PromotableEvenImpl { + fn into_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { + let slice = match this.0 { + Promotable::Owned(slice) => slice, + Promotable::Shared(shared) => return SharedImpl::into_parts(shared), + }; + + let len = slice.len(); + let ptr = Box::into_raw(slice) as *mut u8; + assert!(ptr as usize & 0x1 == 0); + + let data = ptr_map(ptr, |addr| addr | KIND_VEC); + + (AtomicPtr::new(data.cast()), ptr, len) + } + + unsafe fn from_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + PromotableEvenImpl(promotable_from_bytes_parts(data, ptr, len, |shared| { + ptr_map(shared.cast(), |addr| addr & !KIND_MASK) + })) + } + + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BufferParts { + let shared = data.load(Ordering::Acquire); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shared::shallow_clone_arc(shared.cast(), ptr, len) + } else { + debug_assert_eq!(kind, KIND_VEC); + let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); + shallow_clone_vec(data, shared, buf, ptr, len) + } + } + + unsafe fn try_resize(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + // The Vec "promotable" vtables do not store the capacity, + // so we cannot truncate while using this repr. We *have* to + // promote using `clone` so the capacity can be stored. + drop(PromotableEvenImpl::clone(&*data, ptr, len)); + } + + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + promotable_into_vec(data, ptr, len, |shared| { + ptr_map(shared.cast(), |addr| addr & !KIND_MASK) + }) + } + + unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + data.with_mut(|shared| { + let shared = *shared; + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shared::release_shared(shared.cast()); + } else { + debug_assert_eq!(kind, KIND_VEC); + let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); + free_boxed_slice(buf, ptr, len); + } + }); + } +} + +unsafe fn promotable_from_bytes_parts( + data: &mut AtomicPtr<()>, + ptr: *const u8, + len: usize, + f: fn(*mut ()) -> *mut u8, +) -> Promotable { + let shared = data.with_mut(|p| *p); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + Promotable::Shared(SharedImpl::from_parts(data, ptr, len)) + } else { + debug_assert_eq!(kind, KIND_VEC); + + let buf = f(shared); + + let cap = (ptr as usize - buf as usize) + len; + + let vec = Vec::from_raw_parts(buf, cap, cap); + + Promotable::Owned(vec.into_boxed_slice()) + } +} + +unsafe fn promotable_into_vec( + data: &mut AtomicPtr<()>, + ptr: *const u8, + len: usize, + f: fn(*mut ()) -> *mut u8, +) -> Vec { + let shared = data.with_mut(|p| *p); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shared::shared_into_vec_impl(shared.cast(), ptr, len) + } else { + // If Bytes holds a Vec, then the offset must be 0. + debug_assert_eq!(kind, KIND_VEC); + + let buf = f(shared); + + let cap = (ptr as usize - buf as usize) + len; + + // Copy back buffer + ptr::copy(ptr, buf, len); + + Vec::from_raw_parts(buf, len, cap) + } +} + +unsafe impl SharedBuf for PromotableOddImpl { + fn into_parts(this: Self) -> BufferParts { + let slice = match this.0 { + Promotable::Owned(slice) => slice, + Promotable::Shared(shared) => return SharedImpl::into_parts(shared), + }; + + let len = slice.len(); + let ptr = Box::into_raw(slice) as *mut u8; + assert!(ptr as usize & 0x1 == 1); + + (AtomicPtr::new(ptr.cast()), ptr, len) + } + + unsafe fn from_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + PromotableOddImpl(promotable_from_bytes_parts(data, ptr, len, |shared| { + shared.cast() + })) + } + + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BufferParts { + let shared = data.load(Ordering::Acquire); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shared::shallow_clone_arc(shared as _, ptr, len) + } else { + debug_assert_eq!(kind, KIND_VEC); + shallow_clone_vec(data, shared, shared.cast(), ptr, len) + } + } + + unsafe fn try_resize(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + // The Vec "promotable" vtables do not store the capacity, + // so we cannot truncate while using this repr. We *have* to + // promote using `clone` so the capacity can be stored. + drop(PromotableOddImpl::clone(&*data, ptr, len)); + } + + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + promotable_into_vec(data, ptr, len, |shared| shared.cast()) + } + + unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + data.with_mut(|shared| { + let shared = *shared; + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shared::release_shared(shared.cast()); + } else { + debug_assert_eq!(kind, KIND_VEC); + + free_boxed_slice(shared.cast(), ptr, len); + } + }); + } +} + +unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { + let cap = (offset as usize - buf as usize) + len; + dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) +} + +// Ideally we would always use this version of `ptr_map` since it is strict +// provenance compatible, but it results in worse codegen. We will however still +// use it on miri because it gives better diagnostics for people who test bytes +// code with miri. +// +// See https://github.com/tokio-rs/bytes/pull/545 for more info. +#[cfg(miri)] +fn ptr_map(ptr: *mut u8, f: F) -> *mut u8 +where + F: FnOnce(usize) -> usize, +{ + let old_addr = ptr as usize; + let new_addr = f(old_addr); + let diff = new_addr.wrapping_sub(old_addr); + ptr.wrapping_add(diff) +} + +#[cfg(not(miri))] +fn ptr_map(ptr: *mut u8, f: F) -> *mut u8 +where + F: FnOnce(usize) -> usize, +{ + let old_addr = ptr as usize; + let new_addr = f(old_addr); + new_addr as *mut u8 +} + +#[cold] +unsafe fn shallow_clone_vec( + atom: &AtomicPtr<()>, + ptr: *const (), + buf: *mut u8, + offset: *const u8, + len: usize, +) -> BufferParts { + // If the buffer is still tracked in a `Vec`. It is time to + // promote the vec to an `Arc`. This could potentially be called + // concurrently, so some care must be taken. + + // First, allocate a new `Shared` instance containing the + // `Vec` fields. It's important to note that `ptr`, `len`, + // and `cap` cannot be mutated without having `&mut self`. + // This means that these fields will not be concurrently + // updated and since the buffer hasn't been promoted to an + // `Arc`, those three fields still are the components of the + // vector. + let shared = Box::new(shared::Shared { + buf, + cap: (offset as usize - buf as usize) + len, + // Initialize refcount to 2. One for this reference, and one + // for the new clone that will be returned from + // `shallow_clone`. + ref_cnt: AtomicUsize::new(2), + }); + + let shared = Box::into_raw(shared); + + // The pointer should be aligned, so this assert should + // always succeed. + debug_assert!( + 0 == (shared as usize & KIND_MASK), + "internal: Box should have an aligned pointer", + ); + + // Try compare & swapping the pointer into the `arc` field. + // `Release` is used synchronize with other threads that + // will load the `arc` field. + // + // If the `compare_exchange` fails, then the thread lost the + // race to promote the buffer to shared. The `Acquire` + // ordering will synchronize with the `compare_exchange` + // that happened in the other thread and the `Shared` + // pointed to by `actual` will be visible. + match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) { + Ok(actual) => { + debug_assert!(actual as usize == ptr as usize); + // The upgrade was successful, the new handle can be + // returned. + (AtomicPtr::new(shared.cast()), offset, len) + } + Err(actual) => { + // The upgrade failed, a concurrent clone happened. Release + // the allocation that was made in this thread, it will not + // be needed. + let shared = Box::from_raw(shared); + mem::forget(*shared); + + // Buffer already promoted to shared storage, so increment ref + // count. + shared::shallow_clone_arc(actual as _, offset, len) + } + } +} diff --git a/src/impls/shared.rs b/src/impls/shared.rs new file mode 100644 index 000000000..c13080838 --- /dev/null +++ b/src/impls/shared.rs @@ -0,0 +1,146 @@ +#[allow(unused)] +use crate::loom::sync::atomic::AtomicMut; +use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::shared_buf::{BufferParts, SharedBuf}; +use alloc::{ + alloc::{dealloc, Layout}, + boxed::Box, + vec::Vec, +}; +use core::{mem, ptr, slice, usize}; + +// ===== impl SharedVtable ===== + +pub(crate) struct Shared { + // Holds arguments to dealloc upon Drop, but otherwise doesn't use them + pub(crate) buf: *mut u8, + pub(crate) cap: usize, + pub(crate) ref_cnt: AtomicUsize, +} + +impl Drop for Shared { + fn drop(&mut self) { + unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) } + } +} + +// Assert that the alignment of `Shared` is divisible by 2. +// This is a necessary invariant since we depend on allocating `Shared` a +// shared object to implicitly carry the `KIND_ARC` flag in its pointer. +// This flag is set when the LSB is 0. +const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. + +pub(crate) struct SharedImpl { + shared: *mut Shared, + offset: *const u8, + len: usize, +} + +unsafe impl SharedBuf for SharedImpl { + fn into_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { + (AtomicPtr::new(this.shared.cast()), this.offset, this.len) + } + + unsafe fn from_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + SharedImpl { + shared: (data.with_mut(|p| *p)).cast(), + offset: ptr, + len, + } + } + + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BufferParts { + let shared = data.load(Ordering::Relaxed); + shallow_clone_arc(shared as _, ptr, len) + } + + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + shared_into_vec_impl((data.with_mut(|p| *p)).cast(), ptr, len) + } + + unsafe fn drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { + data.with_mut(|shared| { + release_shared(shared.cast()); + }); + } +} + +pub(crate) unsafe fn shared_into_vec_impl( + shared: *mut Shared, + ptr: *const u8, + len: usize, +) -> Vec { + // Check that the ref_cnt is 1 (unique). + // + // If it is unique, then it is set to 0 with AcqRel fence for the same + // reason in release_shared. + // + // Otherwise, we take the other branch and call release_shared. + if (*shared) + .ref_cnt + .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + { + let buf = (*shared).buf; + let cap = (*shared).cap; + + // Deallocate Shared + drop(Box::from_raw(shared as *mut mem::ManuallyDrop)); + + // Copy back buffer + ptr::copy(ptr, buf, len); + + Vec::from_raw_parts(buf, len, cap) + } else { + let v = slice::from_raw_parts(ptr, len).to_vec(); + release_shared(shared); + v + } +} + +pub(crate) unsafe fn shallow_clone_arc( + shared: *mut Shared, + ptr: *const u8, + len: usize, +) -> BufferParts { + let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); + + if old_size > usize::MAX >> 1 { + crate::abort(); + } + + let shared = AtomicPtr::new(shared.cast()); + (shared, ptr, len) +} + +pub(crate) unsafe fn release_shared(ptr: *mut Shared) { + // `Shared` storage... follow the drop steps from Arc. + if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 { + return; + } + + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + // + // Thread sanitizer does not support atomic fences. Use an atomic load + // instead. + (*ptr).ref_cnt.load(Ordering::Acquire); + + // Drop the data + drop(Box::from_raw(ptr)); +} diff --git a/src/impls/static_buf.rs b/src/impls/static_buf.rs new file mode 100644 index 000000000..8558935ab --- /dev/null +++ b/src/impls/static_buf.rs @@ -0,0 +1,37 @@ +#[allow(unused)] +use crate::loom::sync::atomic::AtomicPtr; +use crate::shared_buf::{BufferParts, SharedBuf}; +use alloc::vec::Vec; +use core::{ptr, slice, usize}; +// ===== impl StaticVtable ===== + +pub(crate) struct StaticImpl(&'static [u8]); + +unsafe impl SharedBuf for StaticImpl { + fn into_parts(this: Self) -> BufferParts { + ( + AtomicPtr::new(ptr::null_mut()), + this.0.as_ptr(), + this.0.len(), + ) + } + + unsafe fn from_parts(_data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self { + StaticImpl(slice::from_raw_parts(ptr, len)) + } + + unsafe fn clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BufferParts { + let slice = slice::from_raw_parts(ptr, len); + + (AtomicPtr::new(ptr::null_mut()), slice.as_ptr(), slice.len()) + } + + unsafe fn into_vec(_: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + let slice = slice::from_raw_parts(ptr, len); + slice.to_vec() + } + + unsafe fn drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { + // nothing to drop for &'static [u8] + } +} diff --git a/src/lib.rs b/src/lib.rs index 2a5f7fadb..6c9b52137 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,8 +88,11 @@ mod bytes; mod bytes_mut; mod fmt; mod loom; -pub use crate::bytes::{Bytes, BytesImpl}; +mod shared_buf; +mod impls; +pub use crate::bytes::Bytes; pub use crate::bytes_mut::BytesMut; +pub use crate::shared_buf::{ SharedBuf, BufferParts }; // Optional Serde support #[cfg(feature = "serde")] diff --git a/src/shared_buf.rs b/src/shared_buf.rs new file mode 100644 index 000000000..22e4d1086 --- /dev/null +++ b/src/shared_buf.rs @@ -0,0 +1,88 @@ +/// Refcounted Immutable Buffer +#[allow(unused)] +use crate::loom::sync::atomic::AtomicMut; +use crate::loom::sync::atomic::AtomicPtr; +use alloc::vec::Vec; + +/// A type alias for the tuple of: +/// 0. The data pointer referencing the container type used by the Bytes Instance +/// 1. The pointer offset into the buffer. +/// 2. The size of the buffer pointed to by [`ptr`] +pub type BufferParts = (AtomicPtr<()>, *const u8, usize); + +/// A trait that describes the inner shared buffer for [`Bytes`] types. +/// +/// The methods of the trait are all associated functions which are used as function +/// pointers in inner VTable implementation of the various modes of a [`Bytes`] instance. +/// +/// An implementor of this trait must be cheaply clonable, and feature a singular buffer +/// which can be safely sliced in any fashion between the bounds of it's pointer and its `len`. +/// +/// The remaining trait functions all take 3 parameters, which represent the state of the [`Bytes`] +/// instance that invoked the function. +/// The `data` param of each trait function equal the `AtomicPtr<()>` returned by into_parts. +/// The `ptr` param is the offset pointer into Self's buffer currently utilized in the calling [`Bytes`] instance. +/// The `len` param is the length of the slice from `ptr` currently utilized in the calling [`Bytes`] instance. +/// +/// For implementors that leverage refcounting, typically some sort of Wrapper struct +/// will need to act as a proxy between the [`Bytes`] instance and the inner type which does the +/// reference counting and manages its Buffer. This is similar to the implementation of [`Arc`]. +/// +/// # Example +/// +/// [Here is an example implementation](https://github.com/tokio-rs/bytes/blob/master/tests/extern_buf_bytes.rs#L58) +/// +/// # Safety +/// +/// This trait deals exclusively with raw pointers. These functions will cause UB if: +/// * The data pointer is NULL and the implemented functions expect a valid pointer. +/// * [`ptr`] is NULL or outside of the bounds of an allocated buffer. +/// * The len exceeds the capacity of the buffer pointed to by [`ptr`] and/or [`data`] +/// * The drop function deallocates the buffer in a different manner than it was allocated. +/// +/// * [`Arc`]: std::sync::Arc +pub unsafe trait SharedBuf: 'static { + /// Decompose `Self` into parts used by `Bytes`. + fn into_parts(this: Self) -> BufferParts; + + /// Creates itself directly from the raw bytes parts decomposed with `into_bytes_parts` + /// + /// # Safety + /// + /// The implementation of this function must ensure that data and ptr and len are valid + unsafe fn from_parts(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Self; + + /// (possibly) increases the reference count then + /// returns the parts necessary to construct a new Bytes instance. + /// + /// # Safety + /// + /// The implementation of this function must ensure that data and ptr and len are valid + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BufferParts; + + /// Called before the `Bytes::truncate` is processed. + /// Useful if the implementation needs some preparation step for it. + /// + /// # Safety + /// + /// The implementation of this function must ensure that data and ptr and len are valid + unsafe fn try_resize(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + let (_, _, _) = (data, ptr, len); + } + + /// Consumes underlying resources and return `Vec`, usually with allocation + /// + /// # Safety + /// + /// The implementation of this function must ensure that data and ptr and len are valid + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec; + + /// Release underlying resources. + /// Decrement a refcount. + /// If refcount == 0 then drop or otherwise deallocate any resources allocated by T + /// + /// # Safety + /// + /// The implementation of this function must ensure that data and ptr and len are valid + unsafe fn drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize); +} diff --git a/tests/extern_buf_bytes.rs b/tests/extern_buf_bytes.rs new file mode 100644 index 000000000..8efb860cd --- /dev/null +++ b/tests/extern_buf_bytes.rs @@ -0,0 +1,355 @@ +#![warn(rust_2018_idioms)] + +use bytes::{Buf, BufMut, BufferParts, Bytes, BytesMut, SharedBuf}; + +use std::alloc::{alloc, dealloc, Layout}; +use std::ptr::{self, NonNull}; +use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use std::usize; + +const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb"; +const SHORT: &[u8] = b"hello world"; + +struct ExternBuf { + ptr: NonNull, + cap: usize, + ref_count: AtomicUsize, +} + +impl ExternBuf { + // We're pretending that this is some sort of exotic allocation/recycling scheme + pub fn from_size(sz: usize) -> Self { + let layout = Layout::from_size_align(sz, 4).unwrap(); + let ptr = NonNull::new(unsafe { alloc(layout) }).unwrap(); + ExternBuf { + ptr, + cap: sz, + ref_count: AtomicUsize::new(1), + } + } + + pub fn into_shared(self) -> ExternBufWrapper { + let b = Box::new(self); + let inner = Box::into_raw(b); + ExternBufWrapper { inner } + } +} + +impl From<&[u8]> for ExternBuf { + fn from(buf: &[u8]) -> Self { + let sz = buf.len(); + let newbuf = ExternBuf::from_size(sz); + unsafe { ptr::copy_nonoverlapping(buf.as_ptr(), newbuf.ptr.as_ptr(), sz) }; + newbuf + } +} + +impl Drop for ExternBuf { + fn drop(&mut self) { + let layout = Layout::from_size_align(self.cap, 4).unwrap(); + unsafe { dealloc(self.ptr.as_mut(), layout) }; + } +} + +struct ExternBufWrapper { + inner: *mut ExternBuf, +} + +unsafe impl SharedBuf for ExternBufWrapper { + fn into_parts(this: Self) -> BufferParts { + unsafe { + ( + AtomicPtr::new(this.inner.cast()), + (*this.inner).ptr.as_ptr(), + (*this.inner).cap, + ) + } + } + + unsafe fn from_parts(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) -> Self { + let inner = data.load(Ordering::Acquire).cast(); + ExternBufWrapper { inner } + } + + unsafe fn clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BufferParts { + let inner: *mut ExternBuf = data.load(Ordering::Acquire).cast(); + let old_size = (*inner).ref_count.fetch_add(1, Ordering::Release); + if old_size > usize::MAX >> 1 { + panic!("wat"); + } + (AtomicPtr::new(inner.cast()), ptr, len) + } + + unsafe fn into_vec(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { + let inner: *mut ExternBuf = (*data.get_mut()).cast(); + if (*inner) + .ref_count + .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + { + let buf = (*inner).ptr; + let cap = (*inner).cap; + + // Deallocate Shared + drop(Box::from_raw( + inner as *mut std::mem::ManuallyDrop, + )); + + // Copy back buffer + ptr::copy(ptr, buf.as_ptr(), len); + + Vec::from_raw_parts(buf.as_ptr(), len, cap) + } else { + let v = std::slice::from_raw_parts(ptr, len).to_vec(); + Self::drop(data, ptr, len); + v + } + } + + unsafe fn drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { + let inner: *mut ExternBuf = (*data.get_mut()).cast(); + if (*inner).ref_count.fetch_sub(1, Ordering::Release) != 1 { + return; + } + (*inner).ref_count.load(Ordering::Acquire); + drop(Box::from_raw(inner)); + } +} + +fn is_sync() {} +fn is_send() {} + +#[test] +fn test_bounds() { + is_sync::(); + is_sync::(); + is_send::(); + is_send::(); +} + +#[test] +fn test_layout() { + use std::mem; + + assert_eq!( + mem::size_of::(), + mem::size_of::() * 4, + "Bytes size should be 4 words", + ); + assert_eq!( + mem::size_of::(), + mem::size_of::() * 4, + "BytesMut should be 4 words", + ); + + assert_eq!( + mem::size_of::(), + mem::size_of::>(), + "Bytes should be same size as Option", + ); + + assert_eq!( + mem::size_of::(), + mem::size_of::>(), + "BytesMut should be same size as Option", + ); +} + +#[test] +fn roundtrip() { + let eb = ExternBuf::from(&b"abcdefgh"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + let ebw = a.into_shared_buf::().unwrap(); + let a = Bytes::from_shared_buf(ebw); + let ebw2 = a.into_shared_buf::().unwrap(); + let a2 = Bytes::from_shared_buf(ebw2); + assert_eq!(a2, b"abcdefgh"[..]); +} + +#[test] +fn from_slice() { + let eb = ExternBuf::from(&b"abcdefgh"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + assert_eq!(a, b"abcdefgh"[..]); + assert_eq!(a, &b"abcdefgh"[..]); + assert_eq!(a, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a); + assert_eq!(&b"abcdefgh"[..], a); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a); + + let eb = ExternBuf::from(&b"abcdefgh"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + assert_eq!(a, b"abcdefgh"[..]); + assert_eq!(a, &b"abcdefgh"[..]); + assert_eq!(a, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a); + assert_eq!(&b"abcdefgh"[..], a); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a); +} + +#[test] +fn fmt() { + let a = format!("{:?}", Bytes::from(&b"abcdefg"[..])); + let b = "b\"abcdefg\""; + + assert_eq!(a, b); + + let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..])); + assert_eq!(a, b); +} + +#[test] +fn len() { + let eb = ExternBuf::from(&b"abcdefg"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + assert_eq!(a.len(), 7); + + let eb = ExternBuf::from(&b""[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + assert!(a.is_empty()); +} + +#[test] +fn index() { + let eb = ExternBuf::from(&b"hello world"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + assert_eq!(a[0..5], *b"hello"); +} + +#[test] +fn slice() { + let eb = ExternBuf::from(&b"hello world"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + + let b = a.slice(3..5); + assert_eq!(b, b"lo"[..]); + + let b = a.slice(0..0); + assert_eq!(b, b""[..]); + + let b = a.slice(3..3); + assert_eq!(b, b""[..]); + + let b = a.slice(a.len()..a.len()); + assert_eq!(b, b""[..]); + + let b = a.slice(..5); + assert_eq!(b, b"hello"[..]); + + let b = a.slice(3..); + assert_eq!(b, b"lo world"[..]); +} + +#[test] +#[should_panic] +fn slice_oob_1() { + let eb = ExternBuf::from(&b"hello world"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + a.slice(5..44); +} + +#[test] +#[should_panic] +fn slice_oob_2() { + let eb = ExternBuf::from(&b"hello world"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + a.slice(44..49); +} + +#[test] +fn split_off() { + let eb = ExternBuf::from(&b"helloworld"[..]); + let mut hello = Bytes::from_shared_buf(eb.into_shared()); + let world = hello.split_off(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); +} + +#[test] +#[should_panic] +fn split_off_oob() { + let eb = ExternBuf::from(&b"helloworld"[..]); + let mut hello = Bytes::from_shared_buf(eb.into_shared()); + let _ = hello.split_off(44); +} + +#[test] +fn split_off_to_loop() { + let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; + + for i in 0..(s.len() + 1) { + { + let eb = ExternBuf::from(&s[..]); + let mut bytes = Bytes::from_shared_buf(eb.into_shared()); + let off = bytes.split_off(i); + assert_eq!(i, bytes.len()); + let mut sum = Vec::new(); + sum.extend(bytes.iter()); + sum.extend(off.iter()); + assert_eq!(&s[..], &sum[..]); + } + { + let eb = ExternBuf::from(&s[..]); + let mut bytes = Bytes::from_shared_buf(eb.into_shared()); + let off = bytes.split_to(i); + assert_eq!(i, off.len()); + let mut sum = Vec::new(); + sum.extend(off.iter()); + sum.extend(bytes.iter()); + assert_eq!(&s[..], &sum[..]); + } + } +} + +#[test] +fn truncate() { + let s = &b"helloworld"[..]; + let eb = ExternBuf::from(&s[..]); + let mut hello = Bytes::from_shared_buf(eb.into_shared()); + hello.truncate(15); + assert_eq!(hello, s); + hello.truncate(10); + assert_eq!(hello, s); + hello.truncate(5); + assert_eq!(hello, "hello"); +} + +#[test] +// Only run these tests on little endian systems. CI uses qemu for testing +// big endian... and qemu doesn't really support threading all that well. +#[cfg(any(miri, target_endian = "little"))] +fn stress() { + // Tests promoting a buffer from a vec -> shared in a concurrent situation + use std::sync::{Arc, Barrier}; + use std::thread; + + const THREADS: usize = 8; + const ITERS: usize = if cfg!(miri) { 100 } else { 1_000 }; + + for i in 0..ITERS { + let data = [i as u8; 256]; + let eb = ExternBuf::from(&data[..]); + let buf = Arc::new(Bytes::from_shared_buf(eb.into_shared())); + + let barrier = Arc::new(Barrier::new(THREADS)); + let mut joins = Vec::with_capacity(THREADS); + + for _ in 0..THREADS { + let c = barrier.clone(); + let buf = buf.clone(); + + joins.push(thread::spawn(move || { + c.wait(); + let buf: Bytes = (*buf).clone(); + drop(buf); + })); + } + + for th in joins { + th.join().unwrap(); + } + + assert_eq!(*buf, data[..]); + } +} From 675f6fd093e51dfa29dd069d8b8305de8b325573 Mon Sep 17 00:00:00 2001 From: Rick Richardson Date: Mon, 6 Feb 2023 16:22:04 -0800 Subject: [PATCH 07/13] more doc updates --- src/bytes.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 76a247f6b..6b304efea 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -194,9 +194,10 @@ impl Bytes { } } - /// Creates a new `Bytes` from `SharedBuf` implementation. + /// Creates a new `Bytes` instance using an impl of [`SharedBuf`] as the internal buffer. /// - /// Useful if you want to construct `Bytes` from your own buffer implementation. + /// This takes an impl of `SharedBuf`, and wraps it in a `Bytes` instance. + /// This can be reversed with the [`into_shared_buf`] method. #[inline] pub fn from_shared_buf(buf_impl: T) -> Bytes { let (data, ptr, len) = SharedBuf::into_parts(buf_impl); @@ -516,6 +517,14 @@ impl Bytes { } /// Downcast this `Bytes` into its underlying implementation. + /// + /// The target type, T, must match the type that was originally used + /// to construct this `Bytes` instance. A runtime check is used + /// to validate this. + /// + /// On success, T is returned. + /// + /// On failure, self is returned as an `Err` #[inline] pub fn into_shared_buf(self) -> Result { if TypeId::of::() == (self.vtable.type_id)() { From a4c65af53740ceb3b20b7dbc44b6e09df92db05b Mon Sep 17 00:00:00 2001 From: Rick Richardson Date: Mon, 6 Feb 2023 17:46:50 -0800 Subject: [PATCH 08/13] tweaks to make the latest merge work.. also tracking down a miri issue --- src/bytes.rs | 20 +++-------- src/impls/shared.rs | 10 ++++++ tests/extern_buf_bytes.rs | 76 +++++++++++++++++++++------------------ 3 files changed, 56 insertions(+), 50 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 6b304efea..1158ec459 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -9,7 +9,7 @@ use crate::buf::IntoIter; use crate::impls::*; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; -use crate::loom::sync::atomic::AtomicPtr; +use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize}; use crate::shared_buf::{BufferParts, SharedBuf}; use crate::Buf; @@ -861,26 +861,14 @@ impl From> for Bytes { return Bytes::from(vec.into_boxed_slice()); } - let shared = Box::new(Shared { + let shared = Box::new(crate::impls::shared::Shared { buf: ptr, cap, ref_cnt: AtomicUsize::new(1), }); mem::forget(vec); - - let shared = Box::into_raw(shared); - // The pointer should be aligned, so this assert should - // always succeed. - debug_assert!( - 0 == (shared as usize & KIND_MASK), - "internal: Box should have an aligned pointer", - ); - Bytes { - ptr, - len, - data: AtomicPtr::new(shared as _), - vtable: &SHARED_VTABLE, - } + let imp = crate::impls::shared::SharedImpl::new(Box::into_raw(shared), ptr, len); + Bytes::from_shared_buf(imp) } } diff --git a/src/impls/shared.rs b/src/impls/shared.rs index c13080838..0203a99fb 100644 --- a/src/impls/shared.rs +++ b/src/impls/shared.rs @@ -36,6 +36,16 @@ pub(crate) struct SharedImpl { len: usize, } +impl SharedImpl { + pub(crate) fn new(shared: *mut Shared, ptr: *const u8, len: usize) -> Self { + SharedImpl { + shared, + offset: ptr, + len + } + } +} + unsafe impl SharedBuf for SharedImpl { fn into_parts(this: Self) -> (AtomicPtr<()>, *const u8, usize) { (AtomicPtr::new(this.shared.cast()), this.offset, this.len) diff --git a/tests/extern_buf_bytes.rs b/tests/extern_buf_bytes.rs index 8efb860cd..989c380a0 100644 --- a/tests/extern_buf_bytes.rs +++ b/tests/extern_buf_bytes.rs @@ -1,15 +1,12 @@ #![warn(rust_2018_idioms)] -use bytes::{Buf, BufMut, BufferParts, Bytes, BytesMut, SharedBuf}; +use bytes::{BufferParts, Bytes, BytesMut, SharedBuf}; use std::alloc::{alloc, dealloc, Layout}; use std::ptr::{self, NonNull}; use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use std::usize; -const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb"; -const SHORT: &[u8] = b"hello world"; - struct ExternBuf { ptr: NonNull, cap: usize, @@ -21,6 +18,8 @@ impl ExternBuf { pub fn from_size(sz: usize) -> Self { let layout = Layout::from_size_align(sz, 4).unwrap(); let ptr = NonNull::new(unsafe { alloc(layout) }).unwrap(); + let num = ptr.as_ptr() as usize; + println!("Alloc'd {}", num); ExternBuf { ptr, cap: sz, @@ -47,7 +46,11 @@ impl From<&[u8]> for ExternBuf { impl Drop for ExternBuf { fn drop(&mut self) { let layout = Layout::from_size_align(self.cap, 4).unwrap(); - unsafe { dealloc(self.ptr.as_mut(), layout) }; + unsafe { + let num = self.ptr.as_ptr() as usize; + println!("dealloc'ing {}", num); + dealloc(self.ptr.as_mut(), layout); + } } } @@ -90,7 +93,6 @@ unsafe impl SharedBuf for ExternBufWrapper { let buf = (*inner).ptr; let cap = (*inner).cap; - // Deallocate Shared drop(Box::from_raw( inner as *mut std::mem::ManuallyDrop, )); @@ -112,6 +114,10 @@ unsafe impl SharedBuf for ExternBufWrapper { return; } (*inner).ref_count.load(Ordering::Acquire); + println!( + "invoking drop over box::from_raw on {}", + (*inner).ptr.as_ptr() as usize + ); drop(Box::from_raw(inner)); } } @@ -119,6 +125,7 @@ unsafe impl SharedBuf for ExternBufWrapper { fn is_sync() {} fn is_send() {} +#[ignore] #[test] fn test_bounds() { is_sync::(); @@ -127,6 +134,7 @@ fn test_bounds() { is_send::(); } +#[ignore] #[test] fn test_layout() { use std::mem; @@ -155,6 +163,7 @@ fn test_layout() { ); } +#[ignore] #[test] fn roundtrip() { let eb = ExternBuf::from(&b"abcdefgh"[..]); @@ -168,36 +177,26 @@ fn roundtrip() { #[test] fn from_slice() { - let eb = ExternBuf::from(&b"abcdefgh"[..]); - let a = Bytes::from_shared_buf(eb.into_shared()); - assert_eq!(a, b"abcdefgh"[..]); - assert_eq!(a, &b"abcdefgh"[..]); - assert_eq!(a, Vec::from(&b"abcdefgh"[..])); - assert_eq!(b"abcdefgh"[..], a); - assert_eq!(&b"abcdefgh"[..], a); - assert_eq!(Vec::from(&b"abcdefgh"[..]), a); - - let eb = ExternBuf::from(&b"abcdefgh"[..]); - let a = Bytes::from_shared_buf(eb.into_shared()); - assert_eq!(a, b"abcdefgh"[..]); - assert_eq!(a, &b"abcdefgh"[..]); - assert_eq!(a, Vec::from(&b"abcdefgh"[..])); - assert_eq!(b"abcdefgh"[..], a); - assert_eq!(&b"abcdefgh"[..], a); - assert_eq!(Vec::from(&b"abcdefgh"[..]), a); -} - -#[test] -fn fmt() { - let a = format!("{:?}", Bytes::from(&b"abcdefg"[..])); - let b = "b\"abcdefg\""; - - assert_eq!(a, b); - - let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..])); - assert_eq!(a, b); + let eb1 = ExternBuf::from(&b"abcdefgh"[..]); + let a1 = Bytes::from_shared_buf(eb1.into_shared()); + assert_eq!(a1, b"abcdefgh"[..]); + assert_eq!(a1, &b"abcdefgh"[..]); + assert_eq!(a1, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a1); + assert_eq!(&b"abcdefgh"[..], a1); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a1); + + let eb2 = ExternBuf::from(&b"abcdefgh"[..]); + let a2 = Bytes::from_shared_buf(eb2.into_shared()); + assert_eq!(a2, b"abcdefgh"[..]); + assert_eq!(a2, &b"abcdefgh"[..]); + assert_eq!(a2, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a2); + assert_eq!(&b"abcdefgh"[..], a2); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a2); } +#[ignore] #[test] fn len() { let eb = ExternBuf::from(&b"abcdefg"[..]); @@ -209,6 +208,7 @@ fn len() { assert!(a.is_empty()); } +#[ignore] #[test] fn index() { let eb = ExternBuf::from(&b"hello world"[..]); @@ -216,6 +216,7 @@ fn index() { assert_eq!(a[0..5], *b"hello"); } +#[ignore] #[test] fn slice() { let eb = ExternBuf::from(&b"hello world"[..]); @@ -240,6 +241,7 @@ fn slice() { assert_eq!(b, b"lo world"[..]); } +#[ignore] #[test] #[should_panic] fn slice_oob_1() { @@ -248,6 +250,7 @@ fn slice_oob_1() { a.slice(5..44); } +#[ignore] #[test] #[should_panic] fn slice_oob_2() { @@ -256,6 +259,7 @@ fn slice_oob_2() { a.slice(44..49); } +#[ignore] #[test] fn split_off() { let eb = ExternBuf::from(&b"helloworld"[..]); @@ -266,6 +270,7 @@ fn split_off() { assert_eq!(world, &b"world"[..]); } +#[ignore] #[test] #[should_panic] fn split_off_oob() { @@ -274,6 +279,7 @@ fn split_off_oob() { let _ = hello.split_off(44); } +#[ignore] #[test] fn split_off_to_loop() { let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; @@ -302,6 +308,7 @@ fn split_off_to_loop() { } } +#[ignore] #[test] fn truncate() { let s = &b"helloworld"[..]; @@ -315,6 +322,7 @@ fn truncate() { assert_eq!(hello, "hello"); } +#[ignore] #[test] // Only run these tests on little endian systems. CI uses qemu for testing // big endian... and qemu doesn't really support threading all that well. From 8cff46b43c42e9ff86a119ffde71e762c5b030c6 Mon Sep 17 00:00:00 2001 From: Rick Richardson Date: Tue, 7 Feb 2023 08:09:09 -0800 Subject: [PATCH 09/13] formatting --- src/bytes_mut.rs | 2 +- src/impls/shared.rs | 2 +- src/lib.rs | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 026228ac8..b1470d0b0 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -16,7 +16,7 @@ use crate::buf::{IntoIter, UninitSlice}; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use crate::shared_buf::{ SharedBuf, BufferParts }; +use crate::shared_buf::{BufferParts, SharedBuf}; use crate::{Buf, BufMut, Bytes}; /// A unique reference to a contiguous slice of memory. diff --git a/src/impls/shared.rs b/src/impls/shared.rs index 0203a99fb..4656743b0 100644 --- a/src/impls/shared.rs +++ b/src/impls/shared.rs @@ -41,7 +41,7 @@ impl SharedImpl { SharedImpl { shared, offset: ptr, - len + len, } } } diff --git a/src/lib.rs b/src/lib.rs index 6c9b52137..18a453404 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -87,12 +87,12 @@ pub use crate::buf::{Buf, BufMut}; mod bytes; mod bytes_mut; mod fmt; +mod impls; mod loom; mod shared_buf; -mod impls; pub use crate::bytes::Bytes; pub use crate::bytes_mut::BytesMut; -pub use crate::shared_buf::{ SharedBuf, BufferParts }; +pub use crate::shared_buf::{BufferParts, SharedBuf}; // Optional Serde support #[cfg(feature = "serde")] From eb526f58719bfba95cc8cae9be46afae999fed9f Mon Sep 17 00:00:00 2001 From: Rick Richardson Date: Tue, 7 Feb 2023 08:13:41 -0800 Subject: [PATCH 10/13] rename for loom-cfg struct --- src/bytes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bytes.rs b/src/bytes.rs index 1158ec459..de762f2c9 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -181,7 +181,7 @@ impl Bytes { const STATIC_VTABLE: Vtable = Vtable { type_id: TypeId::of::, clone: ::clone, - will_truncate: ::will_truncate, + try_resize: ::try_resize, into_vec: ::into_vec, drop: ::drop, }; From 7cc348470faf14576515b6dd179aef0e278cb1e5 Mon Sep 17 00:00:00 2001 From: Rick Richardson Date: Wed, 8 Feb 2023 14:55:00 -0800 Subject: [PATCH 11/13] fixing miri errors --- tests/extern_buf_bytes.rs | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/tests/extern_buf_bytes.rs b/tests/extern_buf_bytes.rs index 989c380a0..605e59770 100644 --- a/tests/extern_buf_bytes.rs +++ b/tests/extern_buf_bytes.rs @@ -16,7 +16,7 @@ struct ExternBuf { impl ExternBuf { // We're pretending that this is some sort of exotic allocation/recycling scheme pub fn from_size(sz: usize) -> Self { - let layout = Layout::from_size_align(sz, 4).unwrap(); + let layout = Layout::from_size_align(sz, 1).unwrap(); let ptr = NonNull::new(unsafe { alloc(layout) }).unwrap(); let num = ptr.as_ptr() as usize; println!("Alloc'd {}", num); @@ -45,11 +45,11 @@ impl From<&[u8]> for ExternBuf { impl Drop for ExternBuf { fn drop(&mut self) { - let layout = Layout::from_size_align(self.cap, 4).unwrap(); + let layout = Layout::from_size_align(self.cap, 1).unwrap(); unsafe { let num = self.ptr.as_ptr() as usize; println!("dealloc'ing {}", num); - dealloc(self.ptr.as_mut(), layout); + dealloc(self.ptr.as_ptr(), layout); } } } @@ -109,7 +109,7 @@ unsafe impl SharedBuf for ExternBufWrapper { } unsafe fn drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { - let inner: *mut ExternBuf = (*data.get_mut()).cast(); + let inner: *mut ExternBuf = (data.get_mut()).cast(); if (*inner).ref_count.fetch_sub(1, Ordering::Release) != 1 { return; } @@ -175,6 +175,27 @@ fn roundtrip() { assert_eq!(a2, b"abcdefgh"[..]); } +#[test] +fn to_vec() { + let eb = ExternBuf::from(&b"abcdefgh"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + let v = Vec::from(a); + assert_eq!(v, b"abcdefgh"[..]); +} + +#[test] +fn refer_madness() { + let eb = ExternBuf::from(&b"abcdefgh"[..]); + let a = Bytes::from_shared_buf(eb.into_shared()); + let b = a.slice(..); + let c = b.slice(..); + let d = c.slice(..5); + let e = d.slice(1..3); + drop(d); + assert_eq!(e, b"bc"[..]); +} + +#[ignore] #[test] fn from_slice() { let eb1 = ExternBuf::from(&b"abcdefgh"[..]); From e623037ed70b5a312313216b938e503986a72a88 Mon Sep 17 00:00:00 2001 From: Rick Richardson Date: Wed, 8 Feb 2023 15:09:17 -0800 Subject: [PATCH 12/13] tweaking example-buf layout and removing print diag --- tests/extern_buf_bytes.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/tests/extern_buf_bytes.rs b/tests/extern_buf_bytes.rs index 605e59770..28f3c01f6 100644 --- a/tests/extern_buf_bytes.rs +++ b/tests/extern_buf_bytes.rs @@ -16,10 +16,8 @@ struct ExternBuf { impl ExternBuf { // We're pretending that this is some sort of exotic allocation/recycling scheme pub fn from_size(sz: usize) -> Self { - let layout = Layout::from_size_align(sz, 1).unwrap(); + let layout = Layout::array::(sz).unwrap(); let ptr = NonNull::new(unsafe { alloc(layout) }).unwrap(); - let num = ptr.as_ptr() as usize; - println!("Alloc'd {}", num); ExternBuf { ptr, cap: sz, @@ -45,10 +43,8 @@ impl From<&[u8]> for ExternBuf { impl Drop for ExternBuf { fn drop(&mut self) { - let layout = Layout::from_size_align(self.cap, 1).unwrap(); + let layout = Layout::array::(self.cap).unwrap(); unsafe { - let num = self.ptr.as_ptr() as usize; - println!("dealloc'ing {}", num); dealloc(self.ptr.as_ptr(), layout); } } @@ -114,10 +110,6 @@ unsafe impl SharedBuf for ExternBufWrapper { return; } (*inner).ref_count.load(Ordering::Acquire); - println!( - "invoking drop over box::from_raw on {}", - (*inner).ptr.as_ptr() as usize - ); drop(Box::from_raw(inner)); } } From 580869515102a61a0c0f388f8325bad9d98008fc Mon Sep 17 00:00:00 2001 From: Rick Richardson Date: Mon, 6 Mar 2023 10:30:33 -0800 Subject: [PATCH 13/13] un-ignoring tests --- tests/extern_buf_bytes.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tests/extern_buf_bytes.rs b/tests/extern_buf_bytes.rs index 28f3c01f6..311040103 100644 --- a/tests/extern_buf_bytes.rs +++ b/tests/extern_buf_bytes.rs @@ -117,7 +117,6 @@ unsafe impl SharedBuf for ExternBufWrapper { fn is_sync() {} fn is_send() {} -#[ignore] #[test] fn test_bounds() { is_sync::(); @@ -126,7 +125,6 @@ fn test_bounds() { is_send::(); } -#[ignore] #[test] fn test_layout() { use std::mem; @@ -155,7 +153,6 @@ fn test_layout() { ); } -#[ignore] #[test] fn roundtrip() { let eb = ExternBuf::from(&b"abcdefgh"[..]); @@ -187,7 +184,6 @@ fn refer_madness() { assert_eq!(e, b"bc"[..]); } -#[ignore] #[test] fn from_slice() { let eb1 = ExternBuf::from(&b"abcdefgh"[..]); @@ -209,7 +205,6 @@ fn from_slice() { assert_eq!(Vec::from(&b"abcdefgh"[..]), a2); } -#[ignore] #[test] fn len() { let eb = ExternBuf::from(&b"abcdefg"[..]); @@ -221,7 +216,6 @@ fn len() { assert!(a.is_empty()); } -#[ignore] #[test] fn index() { let eb = ExternBuf::from(&b"hello world"[..]); @@ -229,7 +223,6 @@ fn index() { assert_eq!(a[0..5], *b"hello"); } -#[ignore] #[test] fn slice() { let eb = ExternBuf::from(&b"hello world"[..]); @@ -254,7 +247,6 @@ fn slice() { assert_eq!(b, b"lo world"[..]); } -#[ignore] #[test] #[should_panic] fn slice_oob_1() { @@ -263,7 +255,6 @@ fn slice_oob_1() { a.slice(5..44); } -#[ignore] #[test] #[should_panic] fn slice_oob_2() { @@ -272,7 +263,6 @@ fn slice_oob_2() { a.slice(44..49); } -#[ignore] #[test] fn split_off() { let eb = ExternBuf::from(&b"helloworld"[..]); @@ -283,7 +273,6 @@ fn split_off() { assert_eq!(world, &b"world"[..]); } -#[ignore] #[test] #[should_panic] fn split_off_oob() { @@ -292,7 +281,6 @@ fn split_off_oob() { let _ = hello.split_off(44); } -#[ignore] #[test] fn split_off_to_loop() { let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; @@ -321,7 +309,6 @@ fn split_off_to_loop() { } } -#[ignore] #[test] fn truncate() { let s = &b"helloworld"[..]; @@ -335,7 +322,6 @@ fn truncate() { assert_eq!(hello, "hello"); } -#[ignore] #[test] // Only run these tests on little endian systems. CI uses qemu for testing // big endian... and qemu doesn't really support threading all that well.