From 7e3e3e9fd4de39b126e8a99960e5cc4c2c3dbeba Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 3 Jan 2021 16:43:14 +0100 Subject: [PATCH 01/11] Remove wildly unsafe `as_static` call. --- core/src/bbbuffer.rs | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index e2c5c8a..907d418 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -777,21 +777,6 @@ impl<'a, const N: usize> GrantW<'a, { N }> { self.buf } - /// Sometimes, it's not possible for the lifetimes to check out. For example, - /// if you need to hand this buffer to a function that expects to receive a - /// `&'static mut [u8]`, it is not possible for the inner reference to outlive the - /// grant itself. - /// - /// You MUST guarantee that in no cases, the reference that is returned here outlives - /// the grant itself. Once the grant has been released, referencing the data contained - /// WILL cause undefined behavior. - /// - /// Additionally, you must ensure that a separate reference to this data is not created - /// to this data, e.g. using `DerefMut` or the `buf()` method of this grant. - pub unsafe fn as_static_mut_buf(&mut self) -> &'static mut [u8] { - transmute::<&mut [u8], &'static mut [u8]>(self.buf) - } - #[inline(always)] pub(crate) fn commit_inner(&mut self, used: usize) { let inner = unsafe { &self.bbq.as_ref() }; @@ -916,21 +901,6 @@ impl<'a, const N: usize> GrantR<'a, { N }> { self.buf } - /// Sometimes, it's not possible for the lifetimes to check out. For example, - /// if you need to hand this buffer to a function that expects to receive a - /// `&'static [u8]`, it is not possible for the inner reference to outlive the - /// grant itself. - /// - /// You MUST guarantee that in no cases, the reference that is returned here outlives - /// the grant itself. Once the grant has been released, referencing the data contained - /// WILL cause undefined behavior. - /// - /// Additionally, you must ensure that a separate reference to this data is not created - /// to this data, e.g. using `Deref` or the `buf()` method of this grant. - pub unsafe fn as_static_buf(&self) -> &'static [u8] { - transmute::<&[u8], &'static [u8]>(self.buf) - } - #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { let inner = unsafe { &self.bbq.as_ref() }; From 6dd711b46f0d64422e62673ad7c005c8ecc4ed79 Mon Sep 17 00:00:00 2001 From: James Munns Date: Mon, 4 Jan 2021 17:39:20 +0100 Subject: [PATCH 02/11] Remove MaybeUninit, as we can now initalize the buffer in a const way --- core/src/bbbuffer.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index 907d418..f016663 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -6,7 +6,7 @@ use core::{ cell::UnsafeCell, cmp::min, marker::PhantomData, - mem::{forget, transmute, MaybeUninit}, + mem::forget, ops::{Deref, DerefMut}, ptr::NonNull, result::Result as CoreResult, @@ -20,7 +20,7 @@ use core::{ /// A backing structure for a BBQueue. Can be used to create either /// a BBQueue or a split Producer/Consumer pair pub struct BBBuffer { - buf: UnsafeCell>, + buf: UnsafeCell<[u8; N]>, /// Where the next byte will be written write: AtomicUsize, @@ -227,7 +227,7 @@ impl<'a, const N: usize> BBBuffer<{ N }> { } } -impl BBBuffer<{ A }> { +impl BBBuffer<{ N }> { /// Create a new constant inner portion of a `BBBuffer`. /// /// NOTE: This is only necessary to use when creating a `BBBuffer` at static @@ -247,7 +247,7 @@ impl BBBuffer<{ A }> { pub const fn new() -> Self { Self { // This will not be initialized until we split the buffer - buf: UnsafeCell::new(MaybeUninit::uninit()), + buf: UnsafeCell::new([0u8; N]), /// Owned by the writer write: AtomicUsize::new(0), @@ -394,8 +394,8 @@ impl<'a, const N: usize> Producer<'a, { N }> { // Safe write, only viewed by this task inner.reserve.store(start + sz, Release); - // This is sound, as UnsafeCell, MaybeUninit, and GenericArray - // are all `#[repr(Transparent)] + // This is sound, as UnsafeCell is `#[repr(Transparent)] + // Here we are casting a `*mut [u8; N]` to a `*mut u8` let start_of_buf_ptr = inner.buf.get().cast::(); let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) }; @@ -497,8 +497,8 @@ impl<'a, const N: usize> Producer<'a, { N }> { // Safe write, only viewed by this task inner.reserve.store(start + sz, Release); - // This is sound, as UnsafeCell, MaybeUninit, and GenericArray - // are all `#[repr(Transparent)] + // This is sound, as UnsafeCell is `#[repr(Transparent)] + // Here we are casting a `*mut [u8; N]` to a `*mut u8` let start_of_buf_ptr = inner.buf.get().cast::(); let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) }; @@ -588,8 +588,8 @@ impl<'a, const N: usize> Consumer<'a, { N }> { return Err(Error::InsufficientSize); } - // This is sound, as UnsafeCell, MaybeUninit, and GenericArray - // are all `#[repr(Transparent)] + // This is sound, as UnsafeCell is `#[repr(Transparent)] + // Here we are casting a `*mut [u8; N]` to a `*mut u8` let start_of_buf_ptr = inner.buf.get().cast::(); let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz) }; @@ -640,8 +640,8 @@ impl<'a, const N: usize> Consumer<'a, { N }> { return Err(Error::InsufficientSize); } - // This is sound, as UnsafeCell, MaybeUninit, and GenericArray - // are all `#[repr(Transparent)] + // This is sound, as UnsafeCell is `#[repr(Transparent)] + // Here we are casting a `*mut [u8; N]` to a `*mut u8` let start_of_buf_ptr = inner.buf.get().cast::(); let grant_slice1 = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz1) }; From 673ec4759a77f4a8937080ef6b9275727dc07ec3 Mon Sep 17 00:00:00 2001 From: James Munns Date: Mon, 4 Jan 2021 17:50:33 +0100 Subject: [PATCH 03/11] Introduce header type --- core/src/bbbuffer.rs | 191 +++++++++++++++++++++---------------------- 1 file changed, 95 insertions(+), 96 deletions(-) diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index f016663..24edc5a 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -17,11 +17,7 @@ use core::{ }, }; -/// A backing structure for a BBQueue. Can be used to create either -/// a BBQueue or a split Producer/Consumer pair -pub struct BBBuffer { - buf: UnsafeCell<[u8; N]>, - +struct BBHeader { /// Where the next byte will be written write: AtomicUsize, @@ -51,6 +47,13 @@ pub struct BBBuffer { already_split: AtomicBool, } +/// A backing structure for a BBQueue. Can be used to create either +/// a BBQueue or a split Producer/Consumer pair +pub struct BBBuffer { + buf: UnsafeCell<[u8; N]>, + hdr: BBHeader, +} + unsafe impl Sync for BBBuffer<{ A }> {} impl<'a, const N: usize> BBBuffer<{ N }> { @@ -86,17 +89,11 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// # } /// ``` pub fn try_split(&'a self) -> Result<(Producer<'a, { N }>, Consumer<'a, { N }>)> { - if atomic::swap(&self.already_split, true, AcqRel) { + if atomic::swap(&self.hdr.already_split, true, AcqRel) { return Err(Error::AlreadySplit); } unsafe { - // Explicitly zero the data to avoid undefined behavior. - // This is required, because we hand out references to the buffers, - // which mean that creating them as references is technically UB for now - let mu_ptr = self.buf.get(); - (*mu_ptr).as_mut_ptr().write_bytes(0u8, 1); - let nn1 = NonNull::new_unchecked(self as *const _ as *mut _); let nn2 = NonNull::new_unchecked(self as *const _ as *mut _); @@ -183,8 +180,8 @@ impl<'a, const N: usize> BBBuffer<{ N }> { return Err((prod, cons)); } - let wr_in_progress = self.write_in_progress.load(Acquire); - let rd_in_progress = self.read_in_progress.load(Acquire); + let wr_in_progress = self.hdr.write_in_progress.load(Acquire); + let rd_in_progress = self.hdr.read_in_progress.load(Acquire); if wr_in_progress || rd_in_progress { // Can't release, active grant(s) in progress @@ -196,13 +193,13 @@ impl<'a, const N: usize> BBBuffer<{ N }> { drop(cons); // Re-initialize the buffer (not totally needed, but nice to do) - self.write.store(0, Release); - self.read.store(0, Release); - self.reserve.store(0, Release); - self.last.store(0, Release); + self.hdr.write.store(0, Release); + self.hdr.read.store(0, Release); + self.hdr.reserve.store(0, Release); + self.hdr.last.store(0, Release); // Mark the buffer as ready to be split - self.already_split.store(false, Release); + self.hdr.already_split.store(false, Release); Ok(()) } @@ -249,36 +246,38 @@ impl BBBuffer<{ N }> { // This will not be initialized until we split the buffer buf: UnsafeCell::new([0u8; N]), - /// Owned by the writer - write: AtomicUsize::new(0), - - /// Owned by the reader - read: AtomicUsize::new(0), - - /// Cooperatively owned - /// - /// NOTE: This should generally be initialized as size_of::(), however - /// this would prevent the structure from being entirely zero-initialized, - /// and can cause the .data section to be much larger than necessary. By - /// forcing the `last` pointer to be zero initially, we place the structure - /// in an "inverted" condition, which will be resolved on the first commited - /// bytes that are written to the structure. - /// - /// When read == last == write, no bytes will be allowed to be read (good), but - /// write grants can be given out (also good). - last: AtomicUsize::new(0), - - /// Owned by the Writer, "private" - reserve: AtomicUsize::new(0), - - /// Owned by the Reader, "private" - read_in_progress: AtomicBool::new(false), - - /// Owned by the Writer, "private" - write_in_progress: AtomicBool::new(false), - - /// We haven't split at the start - already_split: AtomicBool::new(false), + hdr: BBHeader { + /// Owned by the writer + write: AtomicUsize::new(0), + + /// Owned by the reader + read: AtomicUsize::new(0), + + /// Cooperatively owned + /// + /// NOTE: This should generally be initialized as size_of::(), however + /// this would prevent the structure from being entirely zero-initialized, + /// and can cause the .data section to be much larger than necessary. By + /// forcing the `last` pointer to be zero initially, we place the structure + /// in an "inverted" condition, which will be resolved on the first commited + /// bytes that are written to the structure. + /// + /// When read == last == write, no bytes will be allowed to be read (good), but + /// write grants can be given out (also good). + last: AtomicUsize::new(0), + + /// Owned by the Writer, "private" + reserve: AtomicUsize::new(0), + + /// Owned by the Reader, "private" + read_in_progress: AtomicBool::new(false), + + /// Owned by the Writer, "private" + write_in_progress: AtomicBool::new(false), + + /// We haven't split at the start + already_split: AtomicBool::new(false), + } } } } @@ -348,16 +347,16 @@ impl<'a, const N: usize> Producer<'a, { N }> { /// # } /// ``` pub fn grant_exact(&mut self, sz: usize) -> Result> { - let inner = unsafe { &self.bbq.as_ref() }; + let inner = unsafe { self.bbq.as_ref() }; - if atomic::swap(&inner.write_in_progress, true, AcqRel) { + if atomic::swap(&inner.hdr.write_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } // Writer component. Must never write to `read`, // be careful writing to `load` - let write = inner.write.load(Acquire); - let read = inner.read.load(Acquire); + let write = inner.hdr.write.load(Acquire); + let read = inner.hdr.read.load(Acquire); let max = N; let already_inverted = write < read; @@ -367,7 +366,7 @@ impl<'a, const N: usize> Producer<'a, { N }> { write } else { // Inverted, no room is available - inner.write_in_progress.store(false, Release); + inner.hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } else { @@ -385,14 +384,14 @@ impl<'a, const N: usize> Producer<'a, { N }> { 0 } else { // Not invertible, no space - inner.write_in_progress.store(false, Release); + inner.hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } }; // Safe write, only viewed by this task - inner.reserve.store(start + sz, Release); + inner.hdr.reserve.store(start + sz, Release); // This is sound, as UnsafeCell is `#[repr(Transparent)] // Here we are casting a `*mut [u8; N]` to a `*mut u8` @@ -446,16 +445,16 @@ impl<'a, const N: usize> Producer<'a, { N }> { /// # } /// ``` pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { - let inner = unsafe { &self.bbq.as_ref() }; + let inner = unsafe { self.bbq.as_ref() }; - if atomic::swap(&inner.write_in_progress, true, AcqRel) { + if atomic::swap(&inner.hdr.write_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } // Writer component. Must never write to `read`, // be careful writing to `load` - let write = inner.write.load(Acquire); - let read = inner.read.load(Acquire); + let write = inner.hdr.write.load(Acquire); + let read = inner.hdr.read.load(Acquire); let max = N; let already_inverted = write < read; @@ -469,7 +468,7 @@ impl<'a, const N: usize> Producer<'a, { N }> { write } else { // Inverted, no room is available - inner.write_in_progress.store(false, Release); + inner.hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } else { @@ -488,14 +487,14 @@ impl<'a, const N: usize> Producer<'a, { N }> { 0 } else { // Not invertible, no space - inner.write_in_progress.store(false, Release); + inner.hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } }; // Safe write, only viewed by this task - inner.reserve.store(start + sz, Release); + inner.hdr.reserve.store(start + sz, Release); // This is sound, as UnsafeCell is `#[repr(Transparent)] // Here we are casting a `*mut [u8; N]` to a `*mut u8` @@ -551,15 +550,15 @@ impl<'a, const N: usize> Consumer<'a, { N }> { /// # } /// ``` pub fn read(&mut self) -> Result> { - let inner = unsafe { &self.bbq.as_ref() }; + let inner = unsafe { self.bbq.as_ref() }; - if atomic::swap(&inner.read_in_progress, true, AcqRel) { + if atomic::swap(&inner.hdr.read_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } - let write = inner.write.load(Acquire); - let last = inner.last.load(Acquire); - let mut read = inner.read.load(Acquire); + let write = inner.hdr.write.load(Acquire); + let last = inner.hdr.last.load(Acquire); + let mut read = inner.hdr.read.load(Acquire); // Resolve the inverted case or end of read if (read == last) && (write < read) { @@ -572,7 +571,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { // Commit does not check read, but if Grant has started an inversion, // grant could move Last to the prior write position // MOVING READ BACKWARDS! - inner.read.store(0, Release); + inner.hdr.read.store(0, Release); } let sz = if write < read { @@ -584,7 +583,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { } - read; if sz == 0 { - inner.read_in_progress.store(false, Release); + inner.hdr.read_in_progress.store(false, Release); return Err(Error::InsufficientSize); } @@ -603,15 +602,15 @@ impl<'a, const N: usize> Consumer<'a, { N }> { /// Obtains two disjoint slices, which are each contiguous of committed bytes. /// Combined these contain all previously commited data. pub fn split_read(&mut self) -> Result> { - let inner = unsafe { &self.bbq.as_ref() }; + let inner = unsafe { self.bbq.as_ref() }; - if atomic::swap(&inner.read_in_progress, true, AcqRel) { + if atomic::swap(&inner.hdr.read_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } - let write = inner.write.load(Acquire); - let last = inner.last.load(Acquire); - let mut read = inner.read.load(Acquire); + let write = inner.hdr.write.load(Acquire); + let last = inner.hdr.last.load(Acquire); + let mut read = inner.hdr.read.load(Acquire); // Resolve the inverted case or end of read if (read == last) && (write < read) { @@ -624,7 +623,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { // Commit does not check read, but if Grant has started an inversion, // grant could move Last to the prior write position // MOVING READ BACKWARDS! - inner.read.store(0, Release); + inner.hdr.read.store(0, Release); } let (sz1, sz2) = if write < read { @@ -636,7 +635,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { }; if sz1 == 0 { - inner.read_in_progress.store(false, Release); + inner.hdr.read_in_progress.store(false, Release); return Err(Error::InsufficientSize); } @@ -779,12 +778,12 @@ impl<'a, const N: usize> GrantW<'a, { N }> { #[inline(always)] pub(crate) fn commit_inner(&mut self, used: usize) { - let inner = unsafe { &self.bbq.as_ref() }; + let inner = unsafe { self.bbq.as_ref() }; // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.write_in_progress.load(Acquire) { + if !inner.hdr.write_in_progress.load(Acquire) { return; } @@ -795,17 +794,17 @@ impl<'a, const N: usize> GrantW<'a, { N }> { let len = self.buf.len(); let used = min(len, used); - let write = inner.write.load(Acquire); - atomic::fetch_sub(&inner.reserve, len - used, AcqRel); + let write = inner.hdr.write.load(Acquire); + atomic::fetch_sub(&inner.hdr.reserve, len - used, AcqRel); let max = N; - let last = inner.last.load(Acquire); - let new_write = inner.reserve.load(Acquire); + let last = inner.hdr.last.load(Acquire); + let new_write = inner.hdr.reserve.load(Acquire); if (new_write < write) && (write != max) { // We have already wrapped, but we are skipping some bytes at the end of the ring. // Mark `last` where the write pointer used to be to hold the line here - inner.last.store(write, Release); + inner.hdr.last.store(write, Release); } else if new_write > last { // We're about to pass the last pointer, which was previously the artificial // end of the ring. Now that we've passed it, we can "unlock" the section @@ -814,7 +813,7 @@ impl<'a, const N: usize> GrantW<'a, { N }> { // Since new_write is strictly larger than last, it is safe to move this as // the other thread will still be halted by the (about to be updated) write // value - inner.last.store(max, Release); + inner.hdr.last.store(max, Release); } // else: If new_write == last, either: // * last == max, so no need to write, OR @@ -824,10 +823,10 @@ impl<'a, const N: usize> GrantW<'a, { N }> { // Write must be updated AFTER last, otherwise read could think it was // time to invert early! - inner.write.store(new_write, Release); + inner.hdr.write.store(new_write, Release); // Allow subsequent grants - inner.write_in_progress.store(false, Release); + inner.hdr.write_in_progress.store(false, Release); } /// Configures the amount of bytes to be commited on drop. @@ -903,12 +902,12 @@ impl<'a, const N: usize> GrantR<'a, { N }> { #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { - let inner = unsafe { &self.bbq.as_ref() }; + let inner = unsafe { self.bbq.as_ref() }; // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.read_in_progress.load(Acquire) { + if !inner.hdr.read_in_progress.load(Acquire) { return; } @@ -916,9 +915,9 @@ impl<'a, const N: usize> GrantR<'a, { N }> { debug_assert!(used <= self.buf.len()); // This should be fine, purely incrementing - let _ = atomic::fetch_add(&inner.read, used, Release); + let _ = atomic::fetch_add(&inner.hdr.read, used, Release); - inner.read_in_progress.store(false, Release); + inner.hdr.read_in_progress.store(false, Release); } /// Configures the amount of bytes to be released on drop. @@ -987,12 +986,12 @@ impl<'a, const N: usize> SplitGrantR<'a, { N }> { #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { - let inner = unsafe { &self.bbq.as_ref() }; + let inner = unsafe { self.bbq.as_ref() }; // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.read_in_progress.load(Acquire) { + if !inner.hdr.read_in_progress.load(Acquire) { return; } @@ -1001,13 +1000,13 @@ impl<'a, const N: usize> SplitGrantR<'a, { N }> { if used <= self.buf1.len() { // This should be fine, purely incrementing - let _ = atomic::fetch_add(&inner.read, used, Release); + let _ = atomic::fetch_add(&inner.hdr.read, used, Release); } else { // Also release parts of the second buffer - inner.read.store(used - self.buf1.len(), Release); + inner.hdr.read.store(used - self.buf1.len(), Release); } - inner.read_in_progress.store(false, Release); + inner.hdr.read_in_progress.store(false, Release); } /// Configures the amount of bytes to be released on drop. From 348a3343111fe0c8c5172cc42f1b917c29d69aa8 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 20 Mar 2021 20:35:49 +0100 Subject: [PATCH 04/11] This was already here --- core/src/bbbuffer.rs | 12 ++++++++++-- core/src/lib.rs | 5 +++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index 24edc5a..6f46f2e 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -47,10 +47,18 @@ struct BBHeader { already_split: AtomicBool, } +trait BBGetter: Clone { + fn get_header(&self) -> &BBHeader; + fn get_storage(&self) -> (*mut u8, usize); +} + /// A backing structure for a BBQueue. Can be used to create either /// a BBQueue or a split Producer/Consumer pair -pub struct BBBuffer { - buf: UnsafeCell<[u8; N]>, +pub struct BBBuffer +where + STO: BBGetter, +{ + buf: STO, hdr: BBHeader, } diff --git a/core/src/lib.rs b/core/src/lib.rs index b6b1250..a96c947 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -102,8 +102,9 @@ //! most, so they should make no difference to most applications. #![cfg_attr(not(feature = "std"), no_std)] -#![deny(missing_docs)] -#![deny(warnings)] +// AJM: TODO - Restore +// #![deny(missing_docs)] +// #![deny(warnings)] mod bbbuffer; pub use bbbuffer::*; From c7c5478f79eb2dde58e6843090ab96353e974eb8 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 20 Mar 2021 21:06:14 +0100 Subject: [PATCH 05/11] Finish the trait prop --- core/src/bbbuffer.rs | 129 ++++++++++++++++++++++++++++--------------- core/src/framed.rs | 49 ++++++++++------ 2 files changed, 115 insertions(+), 63 deletions(-) diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index 6f46f2e..0b16d3c 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -47,24 +47,24 @@ struct BBHeader { already_split: AtomicBool, } -trait BBGetter: Clone { +pub(crate) trait BBGetter: Clone { fn get_header(&self) -> &BBHeader; fn get_storage(&self) -> (*mut u8, usize); } /// A backing structure for a BBQueue. Can be used to create either /// a BBQueue or a split Producer/Consumer pair -pub struct BBBuffer +pub struct BBBuffer where - STO: BBGetter, + STO: BBGetter, { buf: STO, hdr: BBHeader, } -unsafe impl Sync for BBBuffer<{ A }> {} +unsafe impl Sync for BBBuffer {} -impl<'a, const N: usize> BBBuffer<{ N }> { +impl<'a, STO: BBGetter, const N: usize> BBBuffer { /// Attempt to split the `BBBuffer` into `Consumer` and `Producer` halves to gain access to the /// buffer. If buffer has already been split, an error will be returned. /// @@ -96,7 +96,7 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// # bbqtest(); /// # } /// ``` - pub fn try_split(&'a self) -> Result<(Producer<'a, { N }>, Consumer<'a, { N }>)> { + pub fn try_split(&'a self) -> Result<(Producer<'a, STO, { N }>, Consumer<'a, STO, { N }>)> { if atomic::swap(&self.hdr.already_split, true, AcqRel) { return Err(Error::AlreadySplit); } @@ -132,7 +132,7 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// section while splitting. pub fn try_split_framed( &'a self, - ) -> Result<(FrameProducer<'a, { N }>, FrameConsumer<'a, { N }>)> { + ) -> Result<(FrameProducer<'a, STO, { N }>, FrameConsumer<'a, STO, { N }>)> { let (producer, consumer) = self.try_split()?; Ok((FrameProducer { producer }, FrameConsumer { consumer })) } @@ -172,9 +172,9 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// ``` pub fn try_release( &'a self, - prod: Producer<'a, { N }>, - cons: Consumer<'a, { N }>, - ) -> CoreResult<(), (Producer<'a, { N }>, Consumer<'a, { N }>)> { + prod: Producer<'a, STO, { N }>, + cons: Consumer<'a, STO, { N }>, + ) -> CoreResult<(), (Producer<'a, STO, { N }>, Consumer<'a, STO, { N }>)> { // Note: Re-entrancy is not possible because we require ownership // of the producer and consumer, which are not cloneable. We also // can assume the buffer has been split, because @@ -221,9 +221,9 @@ impl<'a, const N: usize> BBBuffer<{ N }> { /// will be returned. pub fn try_release_framed( &'a self, - prod: FrameProducer<'a, { N }>, - cons: FrameConsumer<'a, { N }>, - ) -> CoreResult<(), (FrameProducer<'a, { N }>, FrameConsumer<'a, { N }>)> { + prod: FrameProducer<'a, STO, { N }>, + cons: FrameConsumer<'a, STO, { N }>, + ) -> CoreResult<(), (FrameProducer<'a, STO, { N }>, FrameConsumer<'a, STO, { N }>)> { self.try_release(prod.producer, cons.consumer) .map_err(|(producer, consumer)| { // Restore the wrapper types @@ -232,7 +232,10 @@ impl<'a, const N: usize> BBBuffer<{ N }> { } } -impl BBBuffer<{ N }> { +impl BBBuffer +where + STO: BBGetter, +{ /// Create a new constant inner portion of a `BBBuffer`. /// /// NOTE: This is only necessary to use when creating a `BBBuffer` at static @@ -314,14 +317,23 @@ impl BBBuffer<{ N }> { /// /// See [this github issue](https://github.com/jamesmunns/bbqueue/issues/38) for a /// discussion of grant methods that could be added in the future. -pub struct Producer<'a, const N: usize> { - bbq: NonNull>, +pub struct Producer<'a, STO, const N: usize> +where + STO: BBGetter, +{ + bbq: NonNull>, pd: PhantomData<&'a ()>, } -unsafe impl<'a, const N: usize> Send for Producer<'a, { N }> {} +unsafe impl<'a, STO, const N: usize> Send for Producer<'a, STO, { N }> +where + STO: BBGetter, +{} -impl<'a, const N: usize> Producer<'a, { N }> { +impl<'a, STO, const N: usize> Producer<'a, STO, { N }> +where + STO: BBGetter, +{ /// Request a writable, contiguous section of memory of exactly /// `sz` bytes. If the buffer size requested is not available, /// an error will be returned. @@ -354,7 +366,7 @@ impl<'a, const N: usize> Producer<'a, { N }> { /// # bbqtest(); /// # } /// ``` - pub fn grant_exact(&mut self, sz: usize) -> Result> { + pub fn grant_exact(&mut self, sz: usize) -> Result> { let inner = unsafe { self.bbq.as_ref() }; if atomic::swap(&inner.hdr.write_in_progress, true, AcqRel) { @@ -452,7 +464,7 @@ impl<'a, const N: usize> Producer<'a, { N }> { /// # bbqtest(); /// # } /// ``` - pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { + pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { let inner = unsafe { self.bbq.as_ref() }; if atomic::swap(&inner.hdr.write_in_progress, true, AcqRel) { @@ -519,14 +531,14 @@ impl<'a, const N: usize> Producer<'a, { N }> { } /// `Consumer` is the primary interface for reading data from a `BBBuffer`. -pub struct Consumer<'a, const N: usize> { - bbq: NonNull>, +pub struct Consumer<'a, STO: BBGetter, const N: usize> { + bbq: NonNull>, pd: PhantomData<&'a ()>, } -unsafe impl<'a, const N: usize> Send for Consumer<'a, { N }> {} +unsafe impl<'a, STO: BBGetter, const N: usize> Send for Consumer<'a, STO, { N }> {} -impl<'a, const N: usize> Consumer<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { /// Obtains a contiguous slice of committed bytes. This slice may not /// contain ALL available bytes, if the writer has wrapped around. The /// remaining bytes will be available after all readable bytes are @@ -557,7 +569,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { /// # bbqtest(); /// # } /// ``` - pub fn read(&mut self) -> Result> { + pub fn read(&mut self) -> Result> { let inner = unsafe { self.bbq.as_ref() }; if atomic::swap(&inner.hdr.read_in_progress, true, AcqRel) { @@ -609,7 +621,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { /// Obtains two disjoint slices, which are each contiguous of committed bytes. /// Combined these contain all previously commited data. - pub fn split_read(&mut self) -> Result> { + pub fn split_read(&mut self) -> Result> { let inner = unsafe { self.bbq.as_ref() }; if atomic::swap(&inner.hdr.read_in_progress, true, AcqRel) { @@ -663,7 +675,7 @@ impl<'a, const N: usize> Consumer<'a, { N }> { } } -impl BBBuffer<{ N }> { +impl BBBuffer { /// Returns the size of the backing storage. /// /// This is the maximum number of bytes that can be stored in this queue. @@ -700,13 +712,16 @@ impl BBBuffer<{ N }> { /// If the `thumbv6` feature is selected, dropping the grant /// without committing it takes a short critical section, #[derive(Debug, PartialEq)] -pub struct GrantW<'a, const N: usize> { +pub struct GrantW<'a, STO, const N: usize> +where + STO: BBGetter, +{ pub(crate) buf: &'a mut [u8], - bbq: NonNull>, + bbq: NonNull>, pub(crate) to_commit: usize, } -unsafe impl<'a, const N: usize> Send for GrantW<'a, { N }> {} +unsafe impl<'a, STO: BBGetter, const N: usize> Send for GrantW<'a, STO, { N }> {} /// A structure representing a contiguous region of memory that /// may be read from, and potentially "released" (or cleared) @@ -721,9 +736,12 @@ unsafe impl<'a, const N: usize> Send for GrantW<'a, { N }> {} /// If the `thumbv6` feature is selected, dropping the grant /// without releasing it takes a short critical section, #[derive(Debug, PartialEq)] -pub struct GrantR<'a, const N: usize> { +pub struct GrantR<'a, STO, const N: usize> +where + STO: BBGetter, +{ pub(crate) buf: &'a mut [u8], - bbq: NonNull>, + bbq: NonNull>, pub(crate) to_release: usize, } @@ -731,18 +749,21 @@ pub struct GrantR<'a, const N: usize> { /// may be read from, and potentially "released" (or cleared) /// from the queue #[derive(Debug, PartialEq)] -pub struct SplitGrantR<'a, const N: usize> { +pub struct SplitGrantR<'a, STO, const N: usize> +where + STO: BBGetter, +{ pub(crate) buf1: &'a mut [u8], pub(crate) buf2: &'a mut [u8], - bbq: NonNull>, + bbq: NonNull>, pub(crate) to_release: usize, } -unsafe impl<'a, const N: usize> Send for GrantR<'a, { N }> {} +unsafe impl<'a, STO: BBGetter, const N: usize> Send for GrantR<'a, STO, { N }> {} -unsafe impl<'a, const N: usize> Send for SplitGrantR<'a, { N }> {} +unsafe impl<'a, STO: BBGetter, const N: usize> Send for SplitGrantR<'a, STO, { N }> {} -impl<'a, const N: usize> GrantW<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { /// Finalizes a writable grant given by `grant()` or `grant_max()`. /// This makes the data available to be read via `read()`. This consumes /// the grant. @@ -843,7 +864,7 @@ impl<'a, const N: usize> GrantW<'a, { N }> { } } -impl<'a, const N: usize> GrantR<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> GrantR<'a, STO, { N }> { /// Release a sequence of bytes from the buffer, allowing the space /// to be used by later writes. This consumes the grant. /// @@ -934,7 +955,7 @@ impl<'a, const N: usize> GrantR<'a, { N }> { } } -impl<'a, const N: usize> SplitGrantR<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> SplitGrantR<'a, STO, { N }> { /// Release a sequence of bytes from the buffer, allowing the space /// to be used by later writes. This consumes the grant. /// @@ -1028,19 +1049,28 @@ impl<'a, const N: usize> SplitGrantR<'a, { N }> { } } -impl<'a, const N: usize> Drop for GrantW<'a, N> { +impl<'a, STO, const N: usize> Drop for GrantW<'a, STO, N> +where + STO: BBGetter, +{ fn drop(&mut self) { self.commit_inner(self.to_commit) } } -impl<'a, const N: usize> Drop for GrantR<'a, N> { +impl<'a, STO, const N: usize> Drop for GrantR<'a, STO, N> +where + STO: BBGetter, +{ fn drop(&mut self) { self.release_inner(self.to_release) } } -impl<'a, const N: usize> Deref for GrantW<'a, N> { +impl<'a, STO, const N: usize> Deref for GrantW<'a, STO, N> +where + STO: BBGetter, +{ type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -1048,13 +1078,19 @@ impl<'a, const N: usize> Deref for GrantW<'a, N> { } } -impl<'a, const N: usize> DerefMut for GrantW<'a, N> { +impl<'a, STO, const N: usize> DerefMut for GrantW<'a, STO, N> +where + STO: BBGetter, +{ fn deref_mut(&mut self) -> &mut [u8] { self.buf } } -impl<'a, const N: usize> Deref for GrantR<'a, N> { +impl<'a, STO, const N: usize> Deref for GrantR<'a, STO, N> +where + STO: BBGetter, +{ type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -1062,7 +1098,10 @@ impl<'a, const N: usize> Deref for GrantR<'a, N> { } } -impl<'a, const N: usize> DerefMut for GrantR<'a, N> { +impl<'a, STO, const N: usize> DerefMut for GrantR<'a, STO, N> +where + STO: BBGetter, +{ fn deref_mut(&mut self) -> &mut [u8] { self.buf } diff --git a/core/src/framed.rs b/core/src/framed.rs index 19f805a..679c7a5 100644 --- a/core/src/framed.rs +++ b/core/src/framed.rs @@ -74,6 +74,7 @@ use crate::{Consumer, GrantR, GrantW, Producer}; use crate::{ vusize::{decode_usize, decoded_len, encode_usize_to_slice, encoded_len}, + bbbuffer::BBGetter, Result, }; @@ -83,16 +84,19 @@ use core::{ }; /// A producer of Framed data -pub struct FrameProducer<'a, const N: usize> { - pub(crate) producer: Producer<'a, N>, +pub struct FrameProducer<'a, STO, const N: usize> +where + STO: BBGetter, +{ + pub(crate) producer: Producer<'a, STO, N>, } -impl<'a, const N: usize> FrameProducer<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> FrameProducer<'a, STO, { N }> { /// Receive a grant for a frame with a maximum size of `max_sz` in bytes. /// /// This size does not include the size of the frame header. The exact size /// of the frame can be set on `commit`. - pub fn grant(&mut self, max_sz: usize) -> Result> { + pub fn grant(&mut self, max_sz: usize) -> Result> { let hdr_len = encoded_len(max_sz); Ok(FrameGrantW { grant_w: self.producer.grant_exact(max_sz + hdr_len)?, @@ -102,13 +106,16 @@ impl<'a, const N: usize> FrameProducer<'a, { N }> { } /// A consumer of Framed data -pub struct FrameConsumer<'a, const N: usize> { - pub(crate) consumer: Consumer<'a, N>, +pub struct FrameConsumer<'a, STO, const N: usize> +where + STO: BBGetter, +{ + pub(crate) consumer: Consumer<'a, STO, N>, } -impl<'a, const N: usize> FrameConsumer<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> FrameConsumer<'a, STO, { N }> { /// Obtain the next available frame, if any - pub fn read(&mut self) -> Option> { + pub fn read(&mut self) -> Option> { // Get all available bytes. We never wrap a frame around, // so if a header is available, the whole frame will be. let mut grant_r = self.consumer.read().ok()?; @@ -140,8 +147,11 @@ impl<'a, const N: usize> FrameConsumer<'a, { N }> { /// the contents without first calling `to_commit()`, then no /// frame will be comitted for writing. #[derive(Debug, PartialEq)] -pub struct FrameGrantW<'a, const N: usize> { - grant_w: GrantW<'a, N>, +pub struct FrameGrantW<'a, STO, const N: usize> +where + STO: BBGetter, +{ + grant_w: GrantW<'a, STO, N>, hdr_len: u8, } @@ -150,12 +160,15 @@ pub struct FrameGrantW<'a, const N: usize> { /// NOTE: If the grant is dropped without explicitly releasing /// the contents, then no frame will be released. #[derive(Debug, PartialEq)] -pub struct FrameGrantR<'a, const N: usize> { - grant_r: GrantR<'a, N>, +pub struct FrameGrantR<'a, STO, const N: usize> +where + STO: BBGetter, +{ + grant_r: GrantR<'a, STO, N>, hdr_len: u8, } -impl<'a, const N: usize> Deref for FrameGrantW<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> Deref for FrameGrantW<'a, STO, { N }> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -163,13 +176,13 @@ impl<'a, const N: usize> Deref for FrameGrantW<'a, { N }> { } } -impl<'a, const N: usize> DerefMut for FrameGrantW<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> DerefMut for FrameGrantW<'a, STO, { N }> { fn deref_mut(&mut self) -> &mut [u8] { &mut self.grant_w.buf[self.hdr_len.into()..] } } -impl<'a, const N: usize> Deref for FrameGrantR<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> Deref for FrameGrantR<'a, STO, { N }> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -177,13 +190,13 @@ impl<'a, const N: usize> Deref for FrameGrantR<'a, { N }> { } } -impl<'a, const N: usize> DerefMut for FrameGrantR<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> DerefMut for FrameGrantR<'a, STO, { N }> { fn deref_mut(&mut self) -> &mut [u8] { &mut self.grant_r.buf[self.hdr_len.into()..] } } -impl<'a, const N: usize> FrameGrantW<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> FrameGrantW<'a, STO, { N }> { /// Commit a frame to make it available to the Consumer half. /// /// `used` is the size of the payload, in bytes, not @@ -220,7 +233,7 @@ impl<'a, const N: usize> FrameGrantW<'a, { N }> { } } -impl<'a, const N: usize> FrameGrantR<'a, { N }> { +impl<'a, STO: BBGetter, const N: usize> FrameGrantR<'a, STO, { N }> { /// Release a frame to make the space available for future writing /// /// Note: The full frame is always released From 15e3a1bf3ab8c1f32e092adf26499cb4f7520027 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 20 Mar 2021 21:35:36 +0100 Subject: [PATCH 06/11] Now uses the trait all the way through --- bbqtest/src/benches.rs | 6 +- bbqtest/src/framed.rs | 12 +- bbqtest/src/lib.rs | 24 +- bbqtest/src/multi_thread.rs | 8 +- bbqtest/src/ring_around_the_senders.rs | 4 +- bbqtest/src/single_thread.rs | 4 +- core/src/bbbuffer.rs | 363 ++++++++++++++----------- core/src/framed.rs | 4 +- core/src/lib.rs | 9 +- 9 files changed, 241 insertions(+), 193 deletions(-) diff --git a/bbqtest/src/benches.rs b/bbqtest/src/benches.rs index 181a3c3..e6a4a7f 100644 --- a/bbqtest/src/benches.rs +++ b/bbqtest/src/benches.rs @@ -1,4 +1,4 @@ -use bbqueue::BBBuffer; +use bbqueue::BBQueue; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use std::cmp::min; @@ -17,7 +17,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { c.bench_function("bbq 2048/4096", |bench| bench.iter(|| chunky(&data, 2048))); - let buffy: BBBuffer = BBBuffer::new(); + let buffy: BBQueue = BBQueue::new(); let (mut prod, mut cons) = buffy.try_split().unwrap(); c.bench_function("bbq 8192/65536", |bench| { @@ -196,7 +196,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { use crossbeam_utils::thread; fn chunky(data: &[u8], chunksz: usize) { - let buffy: BBBuffer = BBBuffer::new(); + let buffy: BBQueue = BBQueue::new(); let (mut prod, mut cons) = buffy.try_split().unwrap(); thread::scope(|sc| { diff --git a/bbqtest/src/framed.rs b/bbqtest/src/framed.rs index 10ea8fa..810dd08 100644 --- a/bbqtest/src/framed.rs +++ b/bbqtest/src/framed.rs @@ -1,10 +1,10 @@ #[cfg(test)] mod tests { - use bbqueue::BBBuffer; + use bbqueue::BBQueue; #[test] fn frame_wrong_size() { - let bb: BBBuffer<256> = BBBuffer::new(); + let bb: BBQueue<256> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); // Create largeish grants @@ -25,7 +25,7 @@ mod tests { #[test] fn full_size() { - let bb: BBBuffer<256> = BBBuffer::new(); + let bb: BBQueue<256> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); let mut ctr = 0; @@ -66,7 +66,7 @@ mod tests { #[test] fn frame_overcommit() { - let bb: BBBuffer<256> = BBBuffer::new(); + let bb: BBQueue<256> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); // Create largeish grants @@ -93,7 +93,7 @@ mod tests { #[test] fn frame_undercommit() { - let bb: BBBuffer<512> = BBBuffer::new(); + let bb: BBQueue<512> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); for _ in 0..100_000 { @@ -132,7 +132,7 @@ mod tests { #[test] fn frame_auto_commit_release() { - let bb: BBBuffer<256> = BBBuffer::new(); + let bb: BBQueue<256> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); for _ in 0..100 { diff --git a/bbqtest/src/lib.rs b/bbqtest/src/lib.rs index aff1c2b..7b1abcf 100644 --- a/bbqtest/src/lib.rs +++ b/bbqtest/src/lib.rs @@ -8,11 +8,11 @@ mod single_thread; #[cfg(test)] mod tests { - use bbqueue::{BBBuffer, Error as BBQError}; + use bbqueue::{BBQueue, Error as BBQError}; #[test] fn deref_deref_mut() { - let bb: BBBuffer<6> = BBBuffer::new(); + let bb: BBQueue<6> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split().unwrap(); let mut wgr = prod.grant_exact(1).unwrap(); @@ -35,8 +35,8 @@ mod tests { #[test] fn static_allocator() { // Check we can make multiple static items... - static BBQ1: BBBuffer<6> = BBBuffer::new(); - static BBQ2: BBBuffer<6> = BBBuffer::new(); + static BBQ1: BBQueue<6> = BBQueue::new(); + static BBQ2: BBQueue<6> = BBQueue::new(); let (mut prod1, mut cons1) = BBQ1.try_split().unwrap(); let (mut _prod2, mut cons2) = BBQ2.try_split().unwrap(); @@ -56,8 +56,8 @@ mod tests { #[test] fn release() { // Check we can make multiple static items... - static BBQ1: BBBuffer<6> = BBBuffer::new(); - static BBQ2: BBBuffer<6> = BBBuffer::new(); + static BBQ1: BBQueue<6> = BBQueue::new(); + static BBQ2: BBQueue<6> = BBQueue::new(); let (prod1, cons1) = BBQ1.try_split().unwrap(); let (prod2, cons2) = BBQ2.try_split().unwrap(); @@ -94,7 +94,7 @@ mod tests { #[test] fn direct_usage_sanity() { // Initialize - let bb: BBBuffer<6> = BBBuffer::new(); + let bb: BBQueue<6> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split().unwrap(); assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); @@ -179,7 +179,7 @@ mod tests { #[test] fn zero_sized_grant() { - let bb: BBBuffer<1000> = BBBuffer::new(); + let bb: BBQueue<1000> = BBQueue::new(); let (mut prod, mut _cons) = bb.try_split().unwrap(); let size = 1000; @@ -192,7 +192,7 @@ mod tests { #[test] fn frame_sanity() { - let bb: BBBuffer<1000> = BBBuffer::new(); + let bb: BBQueue<1000> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); // One frame in, one frame out @@ -239,7 +239,7 @@ mod tests { #[test] fn frame_wrap() { - let bb: BBBuffer<22> = BBBuffer::new(); + let bb: BBQueue<22> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); // 10 + 1 used @@ -305,7 +305,7 @@ mod tests { #[test] fn frame_big_little() { - let bb: BBBuffer<65536> = BBBuffer::new(); + let bb: BBQueue<65536> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split_framed().unwrap(); // Create a frame that should take 3 bytes for the header @@ -329,7 +329,7 @@ mod tests { #[test] fn split_sanity_check() { - let bb: BBBuffer<10> = BBBuffer::new(); + let bb: BBQueue<10> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split().unwrap(); // Fill buffer diff --git a/bbqtest/src/multi_thread.rs b/bbqtest/src/multi_thread.rs index 33610a8..cec8a9b 100644 --- a/bbqtest/src/multi_thread.rs +++ b/bbqtest/src/multi_thread.rs @@ -1,7 +1,7 @@ #[cfg_attr(not(feature = "verbose"), allow(unused_variables))] #[cfg(test)] mod tests { - use bbqueue::{BBBuffer, Error}; + use bbqueue::{BBQueue, Error}; use rand::prelude::*; use std::thread::spawn; use std::time::{Duration, Instant}; @@ -48,7 +48,7 @@ mod tests { #[cfg(feature = "verbose")] println!("RTX: Running test..."); - static BB: BBBuffer = BBBuffer::new(); + static BB: BBQueue = BBQueue::new(); let (mut tx, mut rx) = BB.try_split().unwrap(); let mut last_tx = Instant::now(); @@ -140,7 +140,7 @@ mod tests { #[test] fn sanity_check() { - static BB: BBBuffer = BBBuffer::new(); + static BB: BBQueue = BBQueue::new(); let (mut tx, mut rx) = BB.try_split().unwrap(); let mut last_tx = Instant::now(); @@ -234,7 +234,7 @@ mod tests { #[test] fn sanity_check_grant_max() { - static BB: BBBuffer = BBBuffer::new(); + static BB: BBQueue = BBQueue::new(); let (mut tx, mut rx) = BB.try_split().unwrap(); #[cfg(feature = "verbose")] diff --git a/bbqtest/src/ring_around_the_senders.rs b/bbqtest/src/ring_around_the_senders.rs index c544609..0cc95fc 100644 --- a/bbqtest/src/ring_around_the_senders.rs +++ b/bbqtest/src/ring_around_the_senders.rs @@ -1,7 +1,7 @@ #[cfg(test)] mod tests { - use bbqueue::{BBBuffer, Consumer, GrantR, GrantW, Producer}; + use bbqueue::{BBQueue, Consumer, GrantR, GrantW, Producer}; enum Potato<'a, const N: usize> { Tx((Producer<'a, N>, u8)), @@ -76,7 +76,7 @@ mod tests { } } - static BB: BBBuffer = BBBuffer::new(); + static BB: BBQueue = BBQueue::new(); use std::sync::mpsc::{channel, Receiver, Sender}; use std::thread::spawn; diff --git a/bbqtest/src/single_thread.rs b/bbqtest/src/single_thread.rs index d4ce82c..1fdc96f 100644 --- a/bbqtest/src/single_thread.rs +++ b/bbqtest/src/single_thread.rs @@ -1,10 +1,10 @@ #[cfg(test)] mod tests { - use bbqueue::BBBuffer; + use bbqueue::BBQueue; #[test] fn sanity_check() { - let bb: BBBuffer<6> = BBBuffer::new(); + let bb: BBQueue<6> = BBQueue::new(); let (mut prod, mut cons) = bb.try_split().unwrap(); const ITERS: usize = 100000; diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index 0b16d3c..fa561fe 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -17,7 +17,12 @@ use core::{ }, }; -struct BBHeader { +struct OwnedBBBuffer { + hdr: BBHeader, + storage: UnsafeCell<[u8; N]> +} + +pub struct BBHeader { /// Where the next byte will be written write: AtomicUsize, @@ -47,25 +52,33 @@ struct BBHeader { already_split: AtomicBool, } -pub(crate) trait BBGetter: Clone { +// TODO(AJM): Seal this trait? Unsafe to impl? +// Do I ever need the header XOR the storage? Or should +// they always just travel together? +// -> Yes, they do, but it's probably not worth separating them, instead just handing +// back some combined type. They will probably always be "allocated" together. +// +// Maybe the BBGetter trait can be replaced with AsRef or something? +// Also, this would probably let anyone get access to the header of bbqueue, which +// would be wildly unsafe +pub trait BBGetter: Clone { fn get_header(&self) -> &BBHeader; fn get_storage(&self) -> (*mut u8, usize); } /// A backing structure for a BBQueue. Can be used to create either /// a BBQueue or a split Producer/Consumer pair -pub struct BBBuffer +pub struct BBQueue where STO: BBGetter, { - buf: STO, - hdr: BBHeader, + sto: STO } -unsafe impl Sync for BBBuffer {} +unsafe impl Sync for BBQueue {} -impl<'a, STO: BBGetter, const N: usize> BBBuffer { - /// Attempt to split the `BBBuffer` into `Consumer` and `Producer` halves to gain access to the +impl<'a, STO: BBGetter, const N: usize> BBQueue { + /// Attempt to split the `BBQueue` into `Consumer` and `Producer` halves to gain access to the /// buffer. If buffer has already been split, an error will be returned. /// /// NOTE: When splitting, the underlying buffer will be explicitly initialized @@ -80,10 +93,10 @@ impl<'a, STO: BBGetter, const N: usize> BBBuffer { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (prod, cons) = buffer.try_split().unwrap(); /// /// // Not possible to split twice @@ -97,7 +110,7 @@ impl<'a, STO: BBGetter, const N: usize> BBBuffer { /// # } /// ``` pub fn try_split(&'a self) -> Result<(Producer<'a, STO, { N }>, Consumer<'a, STO, { N }>)> { - if atomic::swap(&self.hdr.already_split, true, AcqRel) { + if atomic::swap(&self.sto.get_header().already_split, true, AcqRel) { return Err(Error::AlreadySplit); } @@ -118,7 +131,7 @@ impl<'a, STO: BBGetter, const N: usize> BBBuffer { } } - /// Attempt to split the `BBBuffer` into `FrameConsumer` and `FrameProducer` halves + /// Attempt to split the `BBQueue` into `FrameConsumer` and `FrameProducer` halves /// to gain access to the buffer. If buffer has already been split, an error /// will be returned. /// @@ -142,16 +155,16 @@ impl<'a, STO: BBGetter, const N: usize> BBBuffer { /// This re-initializes the buffer so it may be split in a different mode at a later /// time. There must be no read or write grants active, or an error will be returned. /// - /// The `Producer` and `Consumer` must be from THIS `BBBuffer`, or an error will + /// The `Producer` and `Consumer` must be from THIS `BBQueue`, or an error will /// be returned. /// /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (prod, cons) = buffer.try_split().unwrap(); /// /// // Not possible to split twice @@ -188,8 +201,10 @@ impl<'a, STO: BBGetter, const N: usize> BBBuffer { return Err((prod, cons)); } - let wr_in_progress = self.hdr.write_in_progress.load(Acquire); - let rd_in_progress = self.hdr.read_in_progress.load(Acquire); + let hdr = self.sto.get_header(); + + let wr_in_progress = hdr.write_in_progress.load(Acquire); + let rd_in_progress = hdr.read_in_progress.load(Acquire); if wr_in_progress || rd_in_progress { // Can't release, active grant(s) in progress @@ -201,13 +216,13 @@ impl<'a, STO: BBGetter, const N: usize> BBBuffer { drop(cons); // Re-initialize the buffer (not totally needed, but nice to do) - self.hdr.write.store(0, Release); - self.hdr.read.store(0, Release); - self.hdr.reserve.store(0, Release); - self.hdr.last.store(0, Release); + hdr.write.store(0, Release); + hdr.read.store(0, Release); + hdr.reserve.store(0, Release); + hdr.last.store(0, Release); // Mark the buffer as ready to be split - self.hdr.already_split.store(false, Release); + hdr.already_split.store(false, Release); Ok(()) } @@ -217,7 +232,7 @@ impl<'a, STO: BBGetter, const N: usize> BBBuffer { /// This re-initializes the buffer so it may be split in a different mode at a later /// time. There must be no read or write grants active, or an error will be returned. /// - /// The `FrameProducer` and `FrameConsumer` must be from THIS `BBBuffer`, or an error + /// The `FrameProducer` and `FrameConsumer` must be from THIS `BBQueue`, or an error /// will be returned. pub fn try_release_framed( &'a self, @@ -232,68 +247,72 @@ impl<'a, STO: BBGetter, const N: usize> BBBuffer { } } -impl BBBuffer -where - STO: BBGetter, -{ - /// Create a new constant inner portion of a `BBBuffer`. - /// - /// NOTE: This is only necessary to use when creating a `BBBuffer` at static - /// scope, and is generally never used directly. This process is necessary to - /// work around current limitations in `const fn`, and will be replaced in - /// the future. - /// - /// ```rust,no_run - /// use bbqueue_ng::BBBuffer; - /// - /// static BUF: BBBuffer<6> = BBBuffer::new(); - /// - /// fn main() { - /// let (prod, cons) = BUF.try_split().unwrap(); - /// } - /// ``` - pub const fn new() -> Self { - Self { - // This will not be initialized until we split the buffer - buf: UnsafeCell::new([0u8; N]), - - hdr: BBHeader { - /// Owned by the writer - write: AtomicUsize::new(0), - - /// Owned by the reader - read: AtomicUsize::new(0), - - /// Cooperatively owned - /// - /// NOTE: This should generally be initialized as size_of::(), however - /// this would prevent the structure from being entirely zero-initialized, - /// and can cause the .data section to be much larger than necessary. By - /// forcing the `last` pointer to be zero initially, we place the structure - /// in an "inverted" condition, which will be resolved on the first commited - /// bytes that are written to the structure. - /// - /// When read == last == write, no bytes will be allowed to be read (good), but - /// write grants can be given out (also good). - last: AtomicUsize::new(0), - - /// Owned by the Writer, "private" - reserve: AtomicUsize::new(0), - - /// Owned by the Reader, "private" - read_in_progress: AtomicBool::new(false), - - /// Owned by the Writer, "private" - write_in_progress: AtomicBool::new(false), - - /// We haven't split at the start - already_split: AtomicBool::new(false), - } - } - } -} -/// `Producer` is the primary interface for pushing data into a `BBBuffer`. +// TODO(AJM): Move this to BBBuffer's constructor +// +// +// impl BBQueue +// where +// STO: BBGetter, +// { +// /// Create a new constant inner portion of a `BBQueue`. +// /// +// /// NOTE: This is only necessary to use when creating a `BBQueue` at static +// /// scope, and is generally never used directly. This process is necessary to +// /// work around current limitations in `const fn`, and will be replaced in +// /// the future. +// /// +// /// ```rust,no_run +// /// use bbqueue_ng::BBQueue; +// /// +// /// static BUF: BBQueue<6> = BBQueue::new(); +// /// +// /// fn main() { +// /// let (prod, cons) = BUF.try_split().unwrap(); +// /// } +// /// ``` +// pub const fn new() -> Self { +// Self { +// // This will not be initialized until we split the buffer +// buf: UnsafeCell::new([0u8; N]), + +// hdr: BBHeader { +// /// Owned by the writer +// write: AtomicUsize::new(0), + +// /// Owned by the reader +// read: AtomicUsize::new(0), + +// /// Cooperatively owned +// /// +// /// NOTE: This should generally be initialized as size_of::(), however +// /// this would prevent the structure from being entirely zero-initialized, +// /// and can cause the .data section to be much larger than necessary. By +// /// forcing the `last` pointer to be zero initially, we place the structure +// /// in an "inverted" condition, which will be resolved on the first commited +// /// bytes that are written to the structure. +// /// +// /// When read == last == write, no bytes will be allowed to be read (good), but +// /// write grants can be given out (also good). +// last: AtomicUsize::new(0), + +// /// Owned by the Writer, "private" +// reserve: AtomicUsize::new(0), + +// /// Owned by the Reader, "private" +// read_in_progress: AtomicBool::new(false), + +// /// Owned by the Writer, "private" +// write_in_progress: AtomicBool::new(false), + +// /// We haven't split at the start +// already_split: AtomicBool::new(false), +// } +// } +// } +// } + +/// `Producer` is the primary interface for pushing data into a `BBQueue`. /// There are various methods for obtaining a grant to write to the buffer, with /// different potential tradeoffs. As all grants are required to be a contiguous /// range of data, different strategies are sometimes useful when making the decision @@ -321,7 +340,7 @@ pub struct Producer<'a, STO, const N: usize> where STO: BBGetter, { - bbq: NonNull>, + bbq: NonNull>, pd: PhantomData<&'a ()>, } @@ -345,10 +364,10 @@ where /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -367,16 +386,22 @@ where /// # } /// ``` pub fn grant_exact(&mut self, sz: usize) -> Result> { - let inner = unsafe { self.bbq.as_ref() }; + let (hdr, sto) = unsafe { + let bbq = self.bbq.as_ref(); + let hdr = bbq.sto.get_header(); + let sto = bbq.sto.get_storage(); + (hdr, sto) + }; - if atomic::swap(&inner.hdr.write_in_progress, true, AcqRel) { + + if atomic::swap(&hdr.write_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } // Writer component. Must never write to `read`, // be careful writing to `load` - let write = inner.hdr.write.load(Acquire); - let read = inner.hdr.read.load(Acquire); + let write = hdr.write.load(Acquire); + let read = hdr.read.load(Acquire); let max = N; let already_inverted = write < read; @@ -386,7 +411,7 @@ where write } else { // Inverted, no room is available - inner.hdr.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } else { @@ -404,18 +429,18 @@ where 0 } else { // Not invertible, no space - inner.hdr.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } }; // Safe write, only viewed by this task - inner.hdr.reserve.store(start + sz, Release); + hdr.reserve.store(start + sz, Release); // This is sound, as UnsafeCell is `#[repr(Transparent)] // Here we are casting a `*mut [u8; N]` to a `*mut u8` - let start_of_buf_ptr = inner.buf.get().cast::(); + let start_of_buf_ptr = sto.0; let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) }; @@ -436,10 +461,10 @@ where /// ``` /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -465,16 +490,21 @@ where /// # } /// ``` pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { - let inner = unsafe { self.bbq.as_ref() }; + let (hdr, sto) = unsafe { + let bbq = self.bbq.as_ref(); + let hdr = bbq.sto.get_header(); + let sto = bbq.sto.get_storage(); + (hdr, sto) + }; - if atomic::swap(&inner.hdr.write_in_progress, true, AcqRel) { + if atomic::swap(&hdr.write_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } // Writer component. Must never write to `read`, // be careful writing to `load` - let write = inner.hdr.write.load(Acquire); - let read = inner.hdr.read.load(Acquire); + let write = hdr.write.load(Acquire); + let read = hdr.read.load(Acquire); let max = N; let already_inverted = write < read; @@ -488,7 +518,7 @@ where write } else { // Inverted, no room is available - inner.hdr.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } else { @@ -507,18 +537,18 @@ where 0 } else { // Not invertible, no space - inner.hdr.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); return Err(Error::InsufficientSize); } } }; // Safe write, only viewed by this task - inner.hdr.reserve.store(start + sz, Release); + hdr.reserve.store(start + sz, Release); // This is sound, as UnsafeCell is `#[repr(Transparent)] // Here we are casting a `*mut [u8; N]` to a `*mut u8` - let start_of_buf_ptr = inner.buf.get().cast::(); + let start_of_buf_ptr = sto.0; let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) }; @@ -530,9 +560,9 @@ where } } -/// `Consumer` is the primary interface for reading data from a `BBBuffer`. +/// `Consumer` is the primary interface for reading data from a `BBQueue`. pub struct Consumer<'a, STO: BBGetter, const N: usize> { - bbq: NonNull>, + bbq: NonNull>, pd: PhantomData<&'a ()>, } @@ -547,10 +577,10 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -570,15 +600,20 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { /// # } /// ``` pub fn read(&mut self) -> Result> { - let inner = unsafe { self.bbq.as_ref() }; + let (hdr, sto) = unsafe { + let bbq = self.bbq.as_ref(); + let hdr = bbq.sto.get_header(); + let sto = bbq.sto.get_storage(); + (hdr, sto) + }; - if atomic::swap(&inner.hdr.read_in_progress, true, AcqRel) { + if atomic::swap(&hdr.read_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } - let write = inner.hdr.write.load(Acquire); - let last = inner.hdr.last.load(Acquire); - let mut read = inner.hdr.read.load(Acquire); + let write = hdr.write.load(Acquire); + let last = hdr.last.load(Acquire); + let mut read = hdr.read.load(Acquire); // Resolve the inverted case or end of read if (read == last) && (write < read) { @@ -591,7 +626,7 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { // Commit does not check read, but if Grant has started an inversion, // grant could move Last to the prior write position // MOVING READ BACKWARDS! - inner.hdr.read.store(0, Release); + hdr.read.store(0, Release); } let sz = if write < read { @@ -603,13 +638,11 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { } - read; if sz == 0 { - inner.hdr.read_in_progress.store(false, Release); + hdr.read_in_progress.store(false, Release); return Err(Error::InsufficientSize); } - // This is sound, as UnsafeCell is `#[repr(Transparent)] - // Here we are casting a `*mut [u8; N]` to a `*mut u8` - let start_of_buf_ptr = inner.buf.get().cast::(); + let start_of_buf_ptr = sto.0; let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz) }; Ok(GrantR { @@ -622,15 +655,20 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { /// Obtains two disjoint slices, which are each contiguous of committed bytes. /// Combined these contain all previously commited data. pub fn split_read(&mut self) -> Result> { - let inner = unsafe { self.bbq.as_ref() }; + let (hdr, sto) = unsafe { + let bbq = self.bbq.as_ref(); + let hdr = bbq.sto.get_header(); + let sto = bbq.sto.get_storage(); + (hdr, sto) + }; - if atomic::swap(&inner.hdr.read_in_progress, true, AcqRel) { + if atomic::swap(&hdr.read_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); } - let write = inner.hdr.write.load(Acquire); - let last = inner.hdr.last.load(Acquire); - let mut read = inner.hdr.read.load(Acquire); + let write = hdr.write.load(Acquire); + let last = hdr.last.load(Acquire); + let mut read = hdr.read.load(Acquire); // Resolve the inverted case or end of read if (read == last) && (write < read) { @@ -643,7 +681,7 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { // Commit does not check read, but if Grant has started an inversion, // grant could move Last to the prior write position // MOVING READ BACKWARDS! - inner.hdr.read.store(0, Release); + hdr.read.store(0, Release); } let (sz1, sz2) = if write < read { @@ -655,13 +693,13 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { }; if sz1 == 0 { - inner.hdr.read_in_progress.store(false, Release); + hdr.read_in_progress.store(false, Release); return Err(Error::InsufficientSize); } // This is sound, as UnsafeCell is `#[repr(Transparent)] // Here we are casting a `*mut [u8; N]` to a `*mut u8` - let start_of_buf_ptr = inner.buf.get().cast::(); + let start_of_buf_ptr = sto.0; let grant_slice1 = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz1) }; let grant_slice2 = unsafe { from_raw_parts_mut(start_of_buf_ptr, sz2) }; @@ -675,7 +713,7 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { } } -impl BBBuffer { +impl BBQueue { /// Returns the size of the backing storage. /// /// This is the maximum number of bytes that can be stored in this queue. @@ -683,10 +721,10 @@ impl BBBuffer { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// assert_eq!(buffer.capacity(), 6); /// # // bbqueue test shim! /// # } @@ -717,7 +755,7 @@ where STO: BBGetter, { pub(crate) buf: &'a mut [u8], - bbq: NonNull>, + bbq: NonNull>, pub(crate) to_commit: usize, } @@ -741,7 +779,7 @@ where STO: BBGetter, { pub(crate) buf: &'a mut [u8], - bbq: NonNull>, + bbq: NonNull>, pub(crate) to_release: usize, } @@ -755,7 +793,7 @@ where { pub(crate) buf1: &'a mut [u8], pub(crate) buf2: &'a mut [u8], - bbq: NonNull>, + bbq: NonNull>, pub(crate) to_release: usize, } @@ -783,10 +821,10 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { /// ```rust /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -807,12 +845,15 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { #[inline(always)] pub(crate) fn commit_inner(&mut self, used: usize) { - let inner = unsafe { self.bbq.as_ref() }; + let hdr = unsafe { + let bbq = self.bbq.as_ref(); + bbq.sto.get_header() + }; // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.hdr.write_in_progress.load(Acquire) { + if !hdr.write_in_progress.load(Acquire) { return; } @@ -823,17 +864,17 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { let len = self.buf.len(); let used = min(len, used); - let write = inner.hdr.write.load(Acquire); - atomic::fetch_sub(&inner.hdr.reserve, len - used, AcqRel); + let write = hdr.write.load(Acquire); + atomic::fetch_sub(&hdr.reserve, len - used, AcqRel); let max = N; - let last = inner.hdr.last.load(Acquire); - let new_write = inner.hdr.reserve.load(Acquire); + let last = hdr.last.load(Acquire); + let new_write = hdr.reserve.load(Acquire); if (new_write < write) && (write != max) { // We have already wrapped, but we are skipping some bytes at the end of the ring. // Mark `last` where the write pointer used to be to hold the line here - inner.hdr.last.store(write, Release); + hdr.last.store(write, Release); } else if new_write > last { // We're about to pass the last pointer, which was previously the artificial // end of the ring. Now that we've passed it, we can "unlock" the section @@ -842,7 +883,7 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { // Since new_write is strictly larger than last, it is safe to move this as // the other thread will still be halted by the (about to be updated) write // value - inner.hdr.last.store(max, Release); + hdr.last.store(max, Release); } // else: If new_write == last, either: // * last == max, so no need to write, OR @@ -852,10 +893,10 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { // Write must be updated AFTER last, otherwise read could think it was // time to invert early! - inner.hdr.write.store(new_write, Release); + hdr.write.store(new_write, Release); // Allow subsequent grants - inner.hdr.write_in_progress.store(false, Release); + hdr.write_in_progress.store(false, Release); } /// Configures the amount of bytes to be commited on drop. @@ -893,10 +934,10 @@ impl<'a, STO: BBGetter, const N: usize> GrantR<'a, STO, { N }> { /// ``` /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -931,12 +972,15 @@ impl<'a, STO: BBGetter, const N: usize> GrantR<'a, STO, { N }> { #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { - let inner = unsafe { self.bbq.as_ref() }; + let hdr = unsafe { + let bbq = self.bbq.as_ref(); + bbq.sto.get_header() + }; // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.hdr.read_in_progress.load(Acquire) { + if !hdr.read_in_progress.load(Acquire) { return; } @@ -944,9 +988,9 @@ impl<'a, STO: BBGetter, const N: usize> GrantR<'a, STO, { N }> { debug_assert!(used <= self.buf.len()); // This should be fine, purely incrementing - let _ = atomic::fetch_add(&inner.hdr.read, used, Release); + let _ = atomic::fetch_add(&hdr.read, used, Release); - inner.hdr.read_in_progress.store(false, Release); + hdr.read_in_progress.store(false, Release); } /// Configures the amount of bytes to be released on drop. @@ -977,10 +1021,10 @@ impl<'a, STO: BBGetter, const N: usize> SplitGrantR<'a, STO, { N }> { /// ``` /// # // bbqueue test shim! /// # fn bbqtest() { - /// use bbqueue_ng::BBBuffer; + /// use bbqueue_ng::BBQueue; /// /// // Create and split a new buffer of 6 elements - /// let buffer: BBBuffer<6> = BBBuffer::new(); + /// let buffer: BBQueue<6> = BBQueue::new(); /// let (mut prod, mut cons) = buffer.try_split().unwrap(); /// /// // Successfully obtain and commit a grant of four bytes @@ -1015,12 +1059,15 @@ impl<'a, STO: BBGetter, const N: usize> SplitGrantR<'a, STO, { N }> { #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { - let inner = unsafe { self.bbq.as_ref() }; + let hdr = unsafe { + let bbq = self.bbq.as_ref(); + bbq.sto.get_header() + }; // If there is no grant in progress, return early. This // generally means we are dropping the grant within a // wrapper structure - if !inner.hdr.read_in_progress.load(Acquire) { + if !hdr.read_in_progress.load(Acquire) { return; } @@ -1029,13 +1076,13 @@ impl<'a, STO: BBGetter, const N: usize> SplitGrantR<'a, STO, { N }> { if used <= self.buf1.len() { // This should be fine, purely incrementing - let _ = atomic::fetch_add(&inner.hdr.read, used, Release); + let _ = atomic::fetch_add(&hdr.read, used, Release); } else { // Also release parts of the second buffer - inner.hdr.read.store(used - self.buf1.len(), Release); + hdr.read.store(used - self.buf1.len(), Release); } - inner.hdr.read_in_progress.store(false, Release); + hdr.read_in_progress.store(false, Release); } /// Configures the amount of bytes to be released on drop. diff --git a/core/src/framed.rs b/core/src/framed.rs index 679c7a5..9db4ed8 100644 --- a/core/src/framed.rs +++ b/core/src/framed.rs @@ -11,9 +11,9 @@ //! ```rust //! # // bbqueue test shim! //! # fn bbqtest() { -//! use bbqueue_ng::BBBuffer; +//! use bbqueue_ng::BBQueue; //! -//! let bb: BBBuffer<1000> = BBBuffer::new(); +//! let bb: BBQueue<1000> = BBQueue::new(); //! let (mut prod, mut cons) = bb.try_split_framed().unwrap(); //! //! // One frame in, one frame out diff --git a/core/src/lib.rs b/core/src/lib.rs index a96c947..12476f9 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -26,10 +26,10 @@ //! ## Local usage //! //! ```rust, no_run -//! # use bbqueue_ng::BBBuffer; +//! # use bbqueue_ng::BBQueue; //! # //! // Create a buffer with six elements -//! let bb: BBBuffer<6> = BBBuffer::new(); +//! let bb: BBQueue<6> = BBQueue::new(); //! let (mut prod, mut cons) = bb.try_split().unwrap(); //! //! // Request space for one byte @@ -55,10 +55,10 @@ //! ## Static usage //! //! ```rust, no_run -//! # use bbqueue_ng::BBBuffer; +//! # use bbqueue_ng::BBQueue; //! # //! // Create a buffer with six elements -//! static BB: BBBuffer<6> = BBBuffer::new(); +//! static BB: BBQueue<6> = BBQueue::new(); //! //! fn main() { //! // Split the bbqueue into producer and consumer halves. @@ -105,6 +105,7 @@ // AJM: TODO - Restore // #![deny(missing_docs)] // #![deny(warnings)] +#![allow(dead_code, unused_imports)] mod bbbuffer; pub use bbbuffer::*; From 67e06c3ef2f6aee2b148ff3cf81c507f3603d698 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 20 Mar 2021 23:04:50 +0100 Subject: [PATCH 07/11] Close, but needs to move to BORROWING the STO inside of child objects. --- bbqtest/src/lib.rs | 686 +++++++++++++++++++++--------------------- core/src/bbbuffer.rs | 266 ++++++++-------- core/src/framed.rs | 2 +- core/storage-notes.md | 116 +++++++ rust-toolchain | 2 +- 5 files changed, 594 insertions(+), 478 deletions(-) create mode 100644 core/storage-notes.md diff --git a/bbqtest/src/lib.rs b/bbqtest/src/lib.rs index 7b1abcf..a434f58 100644 --- a/bbqtest/src/lib.rs +++ b/bbqtest/src/lib.rs @@ -1,18 +1,20 @@ //! NOTE: this crate is really just a shim for testing //! the other no-std crate. -mod framed; -mod multi_thread; -mod ring_around_the_senders; -mod single_thread; +// mod framed; +// mod multi_thread; +// mod ring_around_the_senders; +// mod single_thread; #[cfg(test)] mod tests { - use bbqueue::{BBQueue, Error as BBQError}; + use bbqueue::{BBQueue, Error as BBQError, OwnedBBBuffer as Obbb}; #[test] fn deref_deref_mut() { - let bb: BBQueue<6> = BBQueue::new(); + let sto = Obbb::new(); + let bb: BBQueue<&Obbb<6>, 6> = BBQueue::new(&sto); + let (mut prod, mut cons) = bb.try_split().unwrap(); let mut wgr = prod.grant_exact(1).unwrap(); @@ -34,9 +36,11 @@ mod tests { #[test] fn static_allocator() { + static STO_1: Obbb<6> = Obbb::new(); + static STO_2: Obbb<6> = Obbb::new(); // Check we can make multiple static items... - static BBQ1: BBQueue<6> = BBQueue::new(); - static BBQ2: BBQueue<6> = BBQueue::new(); + static BBQ1: BBQueue<&Obbb<6>, 6> = BBQueue::new(&STO_1); + static BBQ2: BBQueue<&Obbb<6>, 6> = BBQueue::new(&STO_2); let (mut prod1, mut cons1) = BBQ1.try_split().unwrap(); let (mut _prod2, mut cons2) = BBQ2.try_split().unwrap(); @@ -53,346 +57,346 @@ mod tests { assert_eq!(&*rgr1, &[1, 2, 3]); } - #[test] - fn release() { - // Check we can make multiple static items... - static BBQ1: BBQueue<6> = BBQueue::new(); - static BBQ2: BBQueue<6> = BBQueue::new(); - let (prod1, cons1) = BBQ1.try_split().unwrap(); - let (prod2, cons2) = BBQ2.try_split().unwrap(); - - // We cannot release with the wrong prod/cons - let (prod2, cons2) = BBQ1.try_release(prod2, cons2).unwrap_err(); - let (prod1, cons1) = BBQ2.try_release(prod1, cons1).unwrap_err(); - - // We cannot release with the wrong consumer... - let (prod1, cons2) = BBQ1.try_release(prod1, cons2).unwrap_err(); - - // ...or the wrong producer - let (prod2, cons1) = BBQ1.try_release(prod2, cons1).unwrap_err(); - - // We cannot release with a write grant in progress - let mut prod1 = prod1; - let wgr1 = prod1.grant_exact(3).unwrap(); - let (prod1, mut cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); - - // We cannot release with a read grant in progress - wgr1.commit(3); - let rgr1 = cons1.read().unwrap(); - let (prod1, cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); - - // But we can when everything is resolved - rgr1.release(3); - assert!(BBQ1.try_release(prod1, cons1).is_ok()); - assert!(BBQ2.try_release(prod2, cons2).is_ok()); - - // And we can re-split on-demand - let _ = BBQ1.try_split().unwrap(); - let _ = BBQ2.try_split().unwrap(); - } - - #[test] - fn direct_usage_sanity() { - // Initialize - let bb: BBQueue<6> = BBQueue::new(); - let (mut prod, mut cons) = bb.try_split().unwrap(); - assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); - - // Initial grant, shouldn't roll over - let mut x = prod.grant_exact(4).unwrap(); - - // Still no data available yet - assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); - - // Add full data from grant - x.copy_from_slice(&[1, 2, 3, 4]); - - // Still no data available yet - assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); - - // Commit data - x.commit(4); - - ::std::sync::atomic::fence(std::sync::atomic::Ordering::SeqCst); - - let a = cons.read().unwrap(); - assert_eq!(&*a, &[1, 2, 3, 4]); - - // Release the first two bytes - a.release(2); - - let r = cons.read().unwrap(); - assert_eq!(&*r, &[3, 4]); - r.release(0); + // #[test] + // fn release() { + // // Check we can make multiple static items... + // static BBQ1: BBQueue<6> = BBQueue::new(); + // static BBQ2: BBQueue<6> = BBQueue::new(); + // let (prod1, cons1) = BBQ1.try_split().unwrap(); + // let (prod2, cons2) = BBQ2.try_split().unwrap(); - // Grant two more - let mut x = prod.grant_exact(2).unwrap(); - let r = cons.read().unwrap(); - assert_eq!(&*r, &[3, 4]); - r.release(0); + // // We cannot release with the wrong prod/cons + // let (prod2, cons2) = BBQ1.try_release(prod2, cons2).unwrap_err(); + // let (prod1, cons1) = BBQ2.try_release(prod1, cons1).unwrap_err(); - // Add more data - x.copy_from_slice(&[11, 12]); - let r = cons.read().unwrap(); - assert_eq!(&*r, &[3, 4]); - r.release(0); + // // We cannot release with the wrong consumer... + // let (prod1, cons2) = BBQ1.try_release(prod1, cons2).unwrap_err(); - // Commit - x.commit(2); + // // ...or the wrong producer + // let (prod2, cons1) = BBQ1.try_release(prod2, cons1).unwrap_err(); - let a = cons.read().unwrap(); - assert_eq!(&*a, &[3, 4, 11, 12]); + // // We cannot release with a write grant in progress + // let mut prod1 = prod1; + // let wgr1 = prod1.grant_exact(3).unwrap(); + // let (prod1, mut cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); - a.release(2); - let r = cons.read().unwrap(); - assert_eq!(&*r, &[11, 12]); - r.release(0); + // // We cannot release with a read grant in progress + // wgr1.commit(3); + // let rgr1 = cons1.read().unwrap(); + // let (prod1, cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); - let mut x = prod.grant_exact(3).unwrap(); - let r = cons.read().unwrap(); - assert_eq!(&*r, &[11, 12]); - r.release(0); + // // But we can when everything is resolved + // rgr1.release(3); + // assert!(BBQ1.try_release(prod1, cons1).is_ok()); + // assert!(BBQ2.try_release(prod2, cons2).is_ok()); - x.copy_from_slice(&[21, 22, 23]); + // // And we can re-split on-demand + // let _ = BBQ1.try_split().unwrap(); + // let _ = BBQ2.try_split().unwrap(); + // } - let r = cons.read().unwrap(); - assert_eq!(&*r, &[11, 12]); - r.release(0); - x.commit(3); + // #[test] + // fn direct_usage_sanity() { + // // Initialize + // let bb: BBQueue<6> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split().unwrap(); + // assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); - let a = cons.read().unwrap(); - - // NOTE: The data we just added isn't available yet, - // since it has wrapped around - assert_eq!(&*a, &[11, 12]); - - a.release(2); - - // And now we can see it - let r = cons.read().unwrap(); - assert_eq!(&*r, &[21, 22, 23]); - r.release(0); - - // Ask for something way too big - assert!(prod.grant_exact(10).is_err()); - } - - #[test] - fn zero_sized_grant() { - let bb: BBQueue<1000> = BBQueue::new(); - let (mut prod, mut _cons) = bb.try_split().unwrap(); - - let size = 1000; - let grant = prod.grant_exact(size).unwrap(); - grant.commit(size); - - let grant = prod.grant_exact(0).unwrap(); - grant.commit(0); - } - - #[test] - fn frame_sanity() { - let bb: BBQueue<1000> = BBQueue::new(); - let (mut prod, mut cons) = bb.try_split_framed().unwrap(); - - // One frame in, one frame out - let mut wgrant = prod.grant(128).unwrap(); - assert_eq!(wgrant.len(), 128); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - wgrant.commit(128); - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 128); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - - // Three frames in, three frames out - let mut state = 0; - let states = [16usize, 32, 24]; - - for step in &states { - let mut wgrant = prod.grant(*step).unwrap(); - assert_eq!(wgrant.len(), *step); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = (idx + state) as u8; - } - wgrant.commit(*step); - state += *step; - } - - state = 0; - - for step in &states { - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), *step); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, (idx + state) as u8); - } - rgrant.release(); - state += *step; - } - } - - #[test] - fn frame_wrap() { - let bb: BBQueue<22> = BBQueue::new(); - let (mut prod, mut cons) = bb.try_split_framed().unwrap(); - - // 10 + 1 used - let mut wgrant = prod.grant(10).unwrap(); - assert_eq!(wgrant.len(), 10); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - wgrant.commit(10); - // 1 frame in queue - - // 20 + 2 used (assuming u64 test platform) - let mut wgrant = prod.grant(10).unwrap(); - assert_eq!(wgrant.len(), 10); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - wgrant.commit(10); - // 2 frames in queue - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 10); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - // 1 frame in queue - - // No more room! - assert!(prod.grant(10).is_err()); - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 10); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - // 0 frames in queue - - // 10 + 1 used (assuming u64 test platform) - let mut wgrant = prod.grant(10).unwrap(); - assert_eq!(wgrant.len(), 10); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - wgrant.commit(10); - // 1 frame in queue - - // No more room! - assert!(prod.grant(10).is_err()); - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 10); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - // 0 frames in queue - - // No more frames! - assert!(cons.read().is_none()); - } - - #[test] - fn frame_big_little() { - let bb: BBQueue<65536> = BBQueue::new(); - let (mut prod, mut cons) = bb.try_split_framed().unwrap(); - - // Create a frame that should take 3 bytes for the header - assert!(prod.grant(65534).is_err()); - - let mut wgrant = prod.grant(65533).unwrap(); - assert_eq!(wgrant.len(), 65533); - for (idx, i) in wgrant.iter_mut().enumerate() { - *i = idx as u8; - } - // Only commit 127 bytes, which fit into a header of 1 byte - wgrant.commit(127); - - let rgrant = cons.read().unwrap(); - assert_eq!(rgrant.len(), 127); - for (idx, i) in rgrant.iter().enumerate() { - assert_eq!(*i, idx as u8); - } - rgrant.release(); - } - - #[test] - fn split_sanity_check() { - let bb: BBQueue<10> = BBQueue::new(); - let (mut prod, mut cons) = bb.try_split().unwrap(); - - // Fill buffer - let mut wgrant = prod.grant_exact(10).unwrap(); - assert_eq!(wgrant.len(), 10); - wgrant.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); - wgrant.commit(10); - - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 10); - assert_eq!( - rgrant.bufs(), - (&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..], &[][..]) - ); - // Release part of the buffer - rgrant.release(6); - - // Almost fill buffer again => | 11 | 12 | 13 | 14 | 15 | x | 7 | 8 | 9 | 10 | - let mut wgrant = prod.grant_exact(5).unwrap(); - assert_eq!(wgrant.len(), 5); - wgrant.copy_from_slice(&[11, 12, 13, 14, 15]); - wgrant.commit(5); - - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 9); - assert_eq!( - rgrant.bufs(), - (&[7, 8, 9, 10][..], &[11, 12, 13, 14, 15][..]) - ); - - // Release part of the buffer => | x | x | x | 14 | 15 | x | x | x | x | x | - rgrant.release(7); - - // Check that it is not possible to claim more space than what should be available - assert!(prod.grant_exact(6).is_err()); - - // Fill buffer to the end => | x | x | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | - let mut wgrant = prod.grant_exact(5).unwrap(); - wgrant.copy_from_slice(&[21, 22, 23, 24, 25]); - wgrant.commit(5); - - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 7); - assert_eq!(rgrant.bufs(), (&[14, 15, 21, 22, 23, 24, 25][..], &[][..])); - rgrant.release(0); - - // Fill buffer to the end => | 26 | 27 | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | - let mut wgrant = prod.grant_exact(2).unwrap(); - wgrant.copy_from_slice(&[26, 27]); - wgrant.commit(2); - - // Fill buffer to the end => | x | 27 | x | x | x | x | x | x | x | x | - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 9); - assert_eq!( - rgrant.bufs(), - (&[14, 15, 21, 22, 23, 24, 25][..], &[26, 27][..]) - ); - rgrant.release(8); - - let rgrant = cons.split_read().unwrap(); - assert_eq!(rgrant.combined_len(), 1); - assert_eq!(rgrant.bufs(), (&[27][..], &[][..])); - rgrant.release(1); - } + // // Initial grant, shouldn't roll over + // let mut x = prod.grant_exact(4).unwrap(); + + // // Still no data available yet + // assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); + + // // Add full data from grant + // x.copy_from_slice(&[1, 2, 3, 4]); + + // // Still no data available yet + // assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); + + // // Commit data + // x.commit(4); + + // ::std::sync::atomic::fence(std::sync::atomic::Ordering::SeqCst); + + // let a = cons.read().unwrap(); + // assert_eq!(&*a, &[1, 2, 3, 4]); + + // // Release the first two bytes + // a.release(2); + + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[3, 4]); + // r.release(0); + + // // Grant two more + // let mut x = prod.grant_exact(2).unwrap(); + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[3, 4]); + // r.release(0); + + // // Add more data + // x.copy_from_slice(&[11, 12]); + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[3, 4]); + // r.release(0); + + // // Commit + // x.commit(2); + + // let a = cons.read().unwrap(); + // assert_eq!(&*a, &[3, 4, 11, 12]); + + // a.release(2); + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[11, 12]); + // r.release(0); + + // let mut x = prod.grant_exact(3).unwrap(); + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[11, 12]); + // r.release(0); + + // x.copy_from_slice(&[21, 22, 23]); + + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[11, 12]); + // r.release(0); + // x.commit(3); + + // let a = cons.read().unwrap(); + + // // NOTE: The data we just added isn't available yet, + // // since it has wrapped around + // assert_eq!(&*a, &[11, 12]); + + // a.release(2); + + // // And now we can see it + // let r = cons.read().unwrap(); + // assert_eq!(&*r, &[21, 22, 23]); + // r.release(0); + + // // Ask for something way too big + // assert!(prod.grant_exact(10).is_err()); + // } + + // #[test] + // fn zero_sized_grant() { + // let bb: BBQueue<1000> = BBQueue::new(); + // let (mut prod, mut _cons) = bb.try_split().unwrap(); + + // let size = 1000; + // let grant = prod.grant_exact(size).unwrap(); + // grant.commit(size); + + // let grant = prod.grant_exact(0).unwrap(); + // grant.commit(0); + // } + + // #[test] + // fn frame_sanity() { + // let bb: BBQueue<1000> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split_framed().unwrap(); + + // // One frame in, one frame out + // let mut wgrant = prod.grant(128).unwrap(); + // assert_eq!(wgrant.len(), 128); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // wgrant.commit(128); + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 128); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + + // // Three frames in, three frames out + // let mut state = 0; + // let states = [16usize, 32, 24]; + + // for step in &states { + // let mut wgrant = prod.grant(*step).unwrap(); + // assert_eq!(wgrant.len(), *step); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = (idx + state) as u8; + // } + // wgrant.commit(*step); + // state += *step; + // } + + // state = 0; + + // for step in &states { + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), *step); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, (idx + state) as u8); + // } + // rgrant.release(); + // state += *step; + // } + // } + + // #[test] + // fn frame_wrap() { + // let bb: BBQueue<22> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split_framed().unwrap(); + + // // 10 + 1 used + // let mut wgrant = prod.grant(10).unwrap(); + // assert_eq!(wgrant.len(), 10); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // wgrant.commit(10); + // // 1 frame in queue + + // // 20 + 2 used (assuming u64 test platform) + // let mut wgrant = prod.grant(10).unwrap(); + // assert_eq!(wgrant.len(), 10); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // wgrant.commit(10); + // // 2 frames in queue + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 10); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + // // 1 frame in queue + + // // No more room! + // assert!(prod.grant(10).is_err()); + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 10); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + // // 0 frames in queue + + // // 10 + 1 used (assuming u64 test platform) + // let mut wgrant = prod.grant(10).unwrap(); + // assert_eq!(wgrant.len(), 10); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // wgrant.commit(10); + // // 1 frame in queue + + // // No more room! + // assert!(prod.grant(10).is_err()); + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 10); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + // // 0 frames in queue + + // // No more frames! + // assert!(cons.read().is_none()); + // } + + // #[test] + // fn frame_big_little() { + // let bb: BBQueue<65536> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split_framed().unwrap(); + + // // Create a frame that should take 3 bytes for the header + // assert!(prod.grant(65534).is_err()); + + // let mut wgrant = prod.grant(65533).unwrap(); + // assert_eq!(wgrant.len(), 65533); + // for (idx, i) in wgrant.iter_mut().enumerate() { + // *i = idx as u8; + // } + // // Only commit 127 bytes, which fit into a header of 1 byte + // wgrant.commit(127); + + // let rgrant = cons.read().unwrap(); + // assert_eq!(rgrant.len(), 127); + // for (idx, i) in rgrant.iter().enumerate() { + // assert_eq!(*i, idx as u8); + // } + // rgrant.release(); + // } + + // #[test] + // fn split_sanity_check() { + // let bb: BBQueue<10> = BBQueue::new(); + // let (mut prod, mut cons) = bb.try_split().unwrap(); + + // // Fill buffer + // let mut wgrant = prod.grant_exact(10).unwrap(); + // assert_eq!(wgrant.len(), 10); + // wgrant.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + // wgrant.commit(10); + + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 10); + // assert_eq!( + // rgrant.bufs(), + // (&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..], &[][..]) + // ); + // // Release part of the buffer + // rgrant.release(6); + + // // Almost fill buffer again => | 11 | 12 | 13 | 14 | 15 | x | 7 | 8 | 9 | 10 | + // let mut wgrant = prod.grant_exact(5).unwrap(); + // assert_eq!(wgrant.len(), 5); + // wgrant.copy_from_slice(&[11, 12, 13, 14, 15]); + // wgrant.commit(5); + + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 9); + // assert_eq!( + // rgrant.bufs(), + // (&[7, 8, 9, 10][..], &[11, 12, 13, 14, 15][..]) + // ); + + // // Release part of the buffer => | x | x | x | 14 | 15 | x | x | x | x | x | + // rgrant.release(7); + + // // Check that it is not possible to claim more space than what should be available + // assert!(prod.grant_exact(6).is_err()); + + // // Fill buffer to the end => | x | x | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | + // let mut wgrant = prod.grant_exact(5).unwrap(); + // wgrant.copy_from_slice(&[21, 22, 23, 24, 25]); + // wgrant.commit(5); + + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 7); + // assert_eq!(rgrant.bufs(), (&[14, 15, 21, 22, 23, 24, 25][..], &[][..])); + // rgrant.release(0); + + // // Fill buffer to the end => | 26 | 27 | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | + // let mut wgrant = prod.grant_exact(2).unwrap(); + // wgrant.copy_from_slice(&[26, 27]); + // wgrant.commit(2); + + // // Fill buffer to the end => | x | 27 | x | x | x | x | x | x | x | x | + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 9); + // assert_eq!( + // rgrant.bufs(), + // (&[14, 15, 21, 22, 23, 24, 25][..], &[26, 27][..]) + // ); + // rgrant.release(8); + + // let rgrant = cons.split_read().unwrap(); + // assert_eq!(rgrant.combined_len(), 1); + // assert_eq!(rgrant.bufs(), (&[27][..], &[][..])); + // rgrant.release(1); + // } } diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index fa561fe..dd2f782 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -17,11 +17,14 @@ use core::{ }, }; -struct OwnedBBBuffer { + +pub struct OwnedBBBuffer { hdr: BBHeader, storage: UnsafeCell<[u8; N]> } +unsafe impl Sync for OwnedBBBuffer<{ A }> {} + pub struct BBHeader { /// Where the next byte will be written write: AtomicUsize, @@ -61,21 +64,43 @@ pub struct BBHeader { // Maybe the BBGetter trait can be replaced with AsRef or something? // Also, this would probably let anyone get access to the header of bbqueue, which // would be wildly unsafe -pub trait BBGetter: Clone { - fn get_header(&self) -> &BBHeader; - fn get_storage(&self) -> (*mut u8, usize); +pub(crate) mod sealed { + use crate::bbbuffer::BBHeader; + + pub trait BBGetter: Clone { + fn get_header(&self) -> &BBHeader; + fn get_storage(&self) -> (*mut u8, usize); + } + + impl BBGetter for &crate::bbbuffer::OwnedBBBuffer { + fn get_header(&self) -> &BBHeader { + &self.hdr + } + + fn get_storage(&self) -> (*mut u8, usize) { + let ptr = self.storage.get().cast(); + (ptr, N) + } + } } +use crate::sealed::BBGetter; /// A backing structure for a BBQueue. Can be used to create either /// a BBQueue or a split Producer/Consumer pair +// +// NOTE: The BBQueue is generic over ANY type, pub struct BBQueue -where - STO: BBGetter, { sto: STO } -unsafe impl Sync for BBQueue {} +impl<'a, STO, const N: usize> BBQueue { + pub const fn new(storage: STO) -> Self { + Self { + sto: storage, + } + } +} impl<'a, STO: BBGetter, const N: usize> BBQueue { /// Attempt to split the `BBQueue` into `Consumer` and `Producer` halves to gain access to the @@ -114,21 +139,16 @@ impl<'a, STO: BBGetter, const N: usize> BBQueue { return Err(Error::AlreadySplit); } - unsafe { - let nn1 = NonNull::new_unchecked(self as *const _ as *mut _); - let nn2 = NonNull::new_unchecked(self as *const _ as *mut _); - - Ok(( - Producer { - bbq: nn1, - pd: PhantomData, - }, - Consumer { - bbq: nn2, - pd: PhantomData, - }, - )) - } + Ok(( + Producer { + bbq: self.sto.clone(), + pd: PhantomData, + }, + Consumer { + bbq: self.sto.clone(), + pd: PhantomData, + }, + )) } /// Attempt to split the `BBQueue` into `FrameConsumer` and `FrameProducer` halves @@ -193,8 +213,9 @@ impl<'a, STO: BBGetter, const N: usize> BBQueue { // can assume the buffer has been split, because // Are these our producers and consumers? - let our_prod = prod.bbq.as_ptr() as *const Self == self; - let our_cons = cons.bbq.as_ptr() as *const Self == self; + // NOTE: Check the header rather than the data + let our_prod = prod.bbq.get_header() as *const BBHeader == self.sto.get_header() as *const BBHeader; + let our_cons = cons.bbq.get_header() as *const BBHeader == self.sto.get_header() as *const BBHeader; if !(our_prod && our_cons) { // Can't release, not our producer and consumer @@ -247,70 +268,63 @@ impl<'a, STO: BBGetter, const N: usize> BBQueue { } } - -// TODO(AJM): Move this to BBBuffer's constructor -// -// -// impl BBQueue -// where -// STO: BBGetter, -// { -// /// Create a new constant inner portion of a `BBQueue`. -// /// -// /// NOTE: This is only necessary to use when creating a `BBQueue` at static -// /// scope, and is generally never used directly. This process is necessary to -// /// work around current limitations in `const fn`, and will be replaced in -// /// the future. -// /// -// /// ```rust,no_run -// /// use bbqueue_ng::BBQueue; -// /// -// /// static BUF: BBQueue<6> = BBQueue::new(); -// /// -// /// fn main() { -// /// let (prod, cons) = BUF.try_split().unwrap(); -// /// } -// /// ``` -// pub const fn new() -> Self { -// Self { -// // This will not be initialized until we split the buffer -// buf: UnsafeCell::new([0u8; N]), - -// hdr: BBHeader { -// /// Owned by the writer -// write: AtomicUsize::new(0), - -// /// Owned by the reader -// read: AtomicUsize::new(0), - -// /// Cooperatively owned -// /// -// /// NOTE: This should generally be initialized as size_of::(), however -// /// this would prevent the structure from being entirely zero-initialized, -// /// and can cause the .data section to be much larger than necessary. By -// /// forcing the `last` pointer to be zero initially, we place the structure -// /// in an "inverted" condition, which will be resolved on the first commited -// /// bytes that are written to the structure. -// /// -// /// When read == last == write, no bytes will be allowed to be read (good), but -// /// write grants can be given out (also good). -// last: AtomicUsize::new(0), - -// /// Owned by the Writer, "private" -// reserve: AtomicUsize::new(0), - -// /// Owned by the Reader, "private" -// read_in_progress: AtomicBool::new(false), - -// /// Owned by the Writer, "private" -// write_in_progress: AtomicBool::new(false), - -// /// We haven't split at the start -// already_split: AtomicBool::new(false), -// } -// } -// } -// } +impl OwnedBBBuffer<{ N }> { + /// Create a new constant inner portion of a `BBQueue`. + /// + /// NOTE: This is only necessary to use when creating a `BBQueue` at static + /// scope, and is generally never used directly. This process is necessary to + /// work around current limitations in `const fn`, and will be replaced in + /// the future. + /// + /// ```rust,no_run + /// use bbqueue_ng::BBQueue; + /// + /// static BUF: BBQueue<6> = BBQueue::new(); + /// + /// fn main() { + /// let (prod, cons) = BUF.try_split().unwrap(); + /// } + /// ``` + pub const fn new() -> Self { + Self { + // This will not be initialized until we split the buffer + storage: UnsafeCell::new([0u8; N]), + + hdr: BBHeader { + /// Owned by the writer + write: AtomicUsize::new(0), + + /// Owned by the reader + read: AtomicUsize::new(0), + + /// Cooperatively owned + /// + /// NOTE: This should generally be initialized as size_of::(), however + /// this would prevent the structure from being entirely zero-initialized, + /// and can cause the .data section to be much larger than necessary. By + /// forcing the `last` pointer to be zero initially, we place the structure + /// in an "inverted" condition, which will be resolved on the first commited + /// bytes that are written to the structure. + /// + /// When read == last == write, no bytes will be allowed to be read (good), but + /// write grants can be given out (also good). + last: AtomicUsize::new(0), + + /// Owned by the Writer, "private" + reserve: AtomicUsize::new(0), + + /// Owned by the Reader, "private" + read_in_progress: AtomicBool::new(false), + + /// Owned by the Writer, "private" + write_in_progress: AtomicBool::new(false), + + /// We haven't split at the start + already_split: AtomicBool::new(false), + } + } + } +} /// `Producer` is the primary interface for pushing data into a `BBQueue`. /// There are various methods for obtaining a grant to write to the buffer, with @@ -340,8 +354,8 @@ pub struct Producer<'a, STO, const N: usize> where STO: BBGetter, { - bbq: NonNull>, - pd: PhantomData<&'a ()>, + bbq: STO, + pd: PhantomData<&'a [u8; N]>, } unsafe impl<'a, STO, const N: usize> Send for Producer<'a, STO, { N }> @@ -386,12 +400,8 @@ where /// # } /// ``` pub fn grant_exact(&mut self, sz: usize) -> Result> { - let (hdr, sto) = unsafe { - let bbq = self.bbq.as_ref(); - let hdr = bbq.sto.get_header(); - let sto = bbq.sto.get_storage(); - (hdr, sto) - }; + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); if atomic::swap(&hdr.write_in_progress, true, AcqRel) { @@ -446,8 +456,9 @@ where Ok(GrantW { buf: grant_slice, - bbq: self.bbq, + bbq: self.bbq.clone(), to_commit: 0, + pd: PhantomData, }) } @@ -490,12 +501,8 @@ where /// # } /// ``` pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { - let (hdr, sto) = unsafe { - let bbq = self.bbq.as_ref(); - let hdr = bbq.sto.get_header(); - let sto = bbq.sto.get_storage(); - (hdr, sto) - }; + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); if atomic::swap(&hdr.write_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); @@ -554,16 +561,17 @@ where Ok(GrantW { buf: grant_slice, - bbq: self.bbq, + bbq: self.bbq.clone(), to_commit: 0, + pd: PhantomData, }) } } /// `Consumer` is the primary interface for reading data from a `BBQueue`. pub struct Consumer<'a, STO: BBGetter, const N: usize> { - bbq: NonNull>, - pd: PhantomData<&'a ()>, + bbq: STO, + pd: PhantomData<&'a [u8; N]>, } unsafe impl<'a, STO: BBGetter, const N: usize> Send for Consumer<'a, STO, { N }> {} @@ -600,12 +608,8 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { /// # } /// ``` pub fn read(&mut self) -> Result> { - let (hdr, sto) = unsafe { - let bbq = self.bbq.as_ref(); - let hdr = bbq.sto.get_header(); - let sto = bbq.sto.get_storage(); - (hdr, sto) - }; + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); if atomic::swap(&hdr.read_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); @@ -647,20 +651,17 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { Ok(GrantR { buf: grant_slice, - bbq: self.bbq, + bbq: self.bbq.clone(), to_release: 0, + pd: PhantomData, }) } /// Obtains two disjoint slices, which are each contiguous of committed bytes. /// Combined these contain all previously commited data. pub fn split_read(&mut self) -> Result> { - let (hdr, sto) = unsafe { - let bbq = self.bbq.as_ref(); - let hdr = bbq.sto.get_header(); - let sto = bbq.sto.get_storage(); - (hdr, sto) - }; + let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); if atomic::swap(&hdr.read_in_progress, true, AcqRel) { return Err(Error::GrantInProgress); @@ -707,8 +708,9 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { Ok(SplitGrantR { buf1: grant_slice1, buf2: grant_slice2, - bbq: self.bbq, + bbq: self.bbq.clone(), to_release: 0, + pd: PhantomData, }) } } @@ -755,8 +757,9 @@ where STO: BBGetter, { pub(crate) buf: &'a mut [u8], - bbq: NonNull>, + bbq: STO, pub(crate) to_commit: usize, + pd: PhantomData<&'a [u8; N]>, } unsafe impl<'a, STO: BBGetter, const N: usize> Send for GrantW<'a, STO, { N }> {} @@ -779,8 +782,9 @@ where STO: BBGetter, { pub(crate) buf: &'a mut [u8], - bbq: NonNull>, + bbq: STO, pub(crate) to_release: usize, + pd: PhantomData<&'a [u8; N]>, } /// A structure representing up to two contiguous regions of memory that @@ -793,8 +797,9 @@ where { pub(crate) buf1: &'a mut [u8], pub(crate) buf2: &'a mut [u8], - bbq: NonNull>, + bbq: STO, pub(crate) to_release: usize, + pd: PhantomData<&'a [u8; N]>, } unsafe impl<'a, STO: BBGetter, const N: usize> Send for GrantR<'a, STO, { N }> {} @@ -845,10 +850,7 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { #[inline(always)] pub(crate) fn commit_inner(&mut self, used: usize) { - let hdr = unsafe { - let bbq = self.bbq.as_ref(); - bbq.sto.get_header() - }; + let hdr = self.bbq.get_header(); // If there is no grant in progress, return early. This // generally means we are dropping the grant within a @@ -972,10 +974,7 @@ impl<'a, STO: BBGetter, const N: usize> GrantR<'a, STO, { N }> { #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { - let hdr = unsafe { - let bbq = self.bbq.as_ref(); - bbq.sto.get_header() - }; + let hdr = self.bbq.get_header(); // If there is no grant in progress, return early. This // generally means we are dropping the grant within a @@ -1059,10 +1058,7 @@ impl<'a, STO: BBGetter, const N: usize> SplitGrantR<'a, STO, { N }> { #[inline(always)] pub(crate) fn release_inner(&mut self, used: usize) { - let hdr = unsafe { - let bbq = self.bbq.as_ref(); - bbq.sto.get_header() - }; + let hdr = self.bbq.get_header(); // If there is no grant in progress, return early. This // generally means we are dropping the grant within a diff --git a/core/src/framed.rs b/core/src/framed.rs index 9db4ed8..e695180 100644 --- a/core/src/framed.rs +++ b/core/src/framed.rs @@ -74,7 +74,7 @@ use crate::{Consumer, GrantR, GrantW, Producer}; use crate::{ vusize::{decode_usize, decoded_len, encode_usize_to_slice, encoded_len}, - bbbuffer::BBGetter, + bbbuffer::sealed::BBGetter, Result, }; diff --git a/core/storage-notes.md b/core/storage-notes.md new file mode 100644 index 0000000..4611d39 --- /dev/null +++ b/core/storage-notes.md @@ -0,0 +1,116 @@ +# BBQueue Storage Work + +## Use cases + +Current: "Embedded Use Case" + +* Statically allocate storage +* BBBuffer lives forever +* User uses Producer and Consumer + +Future: + +* Have the STORAGE for BBBuffer be provided seperately +* Allow for uses like: + * Statically allocated storage (like now) + * Heap Allocation provided storage (Arc, etc.) + * User provided storage (probably `unsafe`) + +## Sample Code for Use Cases + +### Static Buffer + +```rust +static BB_QUEUE: BBQueue> = BBQueue::new(BBBuffer::new()); + +fn main() { + let (prod, cons) = BB_QUEUE.try_split().unwrap(); + // ... +} +``` + +### Heap Allocation provided storage + +Choice A: Simple + +```rust +fn main() { + // BBQueue> + // Producer>, Consumer> + // + // Storage is dropped when `prod` and `cons` are BOTH dropped. + let (prod, cons) = BBQueue::new_arc::<1024>(); +} +``` + +Choice B: Explicit + +```rust +fn main() { + // EDIT: This is sub-par, because this would require `arc_queue`, + // `prod`, and `storage` to ALL be dropped + // before the buffer is dropped. + let arc_queue: BBQueue> = BBStorage::new_arc(); + let (prod, cons) = arc_queue.try_split().unwrap(); +} +``` + +### User provided storage + +Choice A: Naive + +EDIT: Not this. See below + +```rust +static mut UNSAFE_BUFFER: [u8; 1024] = [0u8; 1024]; + +fn main() { + let borrowed = unsafe { + // TODO: Make sure BBQueue has lifetime shorter + // than `'borrowed` here? In this case it is + // 'static, but may not always be. + BBStorageBorrowed::new(&mut UNSAFE_BUFFER); + }; + let bbqueue = BBQueue::new(borrowed); + + // NOTE: This is NOT good, because the bound lifetime + // of prod and cons will be that of `bbqueue`, which + // is probably not suitable (non-'static). In many cases, we want + // the producer and consumer to also have `Producer<'static>` lifetime + let (prod, cons) = bbqueue.try_split().unwrap(); +} +``` + +Choice B: "loadable" storage? + +This would require EITHER: + +* The BBStorage methods are failable +* The split belongs to the BBStorage item + * (Could be an inherent or trait method) +* Loadable storage panics on a split if not loaded + +```rust +static mut UNSAFE_BUFFER: [u8; 1024] = [0u8; 1024]; +static LOADABLE_BORROWED: BBStorageLoadBorrow::new(); + +fn main() { + // This could probably be shortened to a single "store and take header" action. + // Done in multiple steps here for clarity. + let mut_buf = unsafe { + &mut UNSAFE_BUFFER + }; + let old = LOADABLE_BORROWED.store(); // -> Result> + // Result: Err if already taken + // Option: Some if other buffer already stored + assert_eq!(Ok(None), old); + + let bbqueue = BBQueue::new(LOADABLE_BORROWED.take_header().unwrap()); + + // Here prod and cons are <'static>, because LOADABLE_BORROWED is static. + // BUUUUUT we still probably allow access of BBStorage methods, which would be totally unsafe + // + // EDIT: Okay, sealing the trait DOES prevent outer usage, so we're good on this regard! + let (prod, cons) = bbqueue.try_split().unwrap(); +} +``` diff --git a/rust-toolchain b/rust-toolchain index c089857..65b2df8 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2020-12-28 +beta From 61cce273f211f2c4c5fdca53e686eb87186d80bd Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 20 Mar 2021 23:10:35 +0100 Subject: [PATCH 08/11] One step less awkward --- bbqtest/src/lib.rs | 9 +++------ core/src/bbbuffer.rs | 25 +++++++++++++++---------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/bbqtest/src/lib.rs b/bbqtest/src/lib.rs index a434f58..f73eda2 100644 --- a/bbqtest/src/lib.rs +++ b/bbqtest/src/lib.rs @@ -12,8 +12,7 @@ mod tests { #[test] fn deref_deref_mut() { - let sto = Obbb::new(); - let bb: BBQueue<&Obbb<6>, 6> = BBQueue::new(&sto); + let bb: BBQueue, 6> = BBQueue::new(Obbb::new()); let (mut prod, mut cons) = bb.try_split().unwrap(); @@ -36,11 +35,9 @@ mod tests { #[test] fn static_allocator() { - static STO_1: Obbb<6> = Obbb::new(); - static STO_2: Obbb<6> = Obbb::new(); // Check we can make multiple static items... - static BBQ1: BBQueue<&Obbb<6>, 6> = BBQueue::new(&STO_1); - static BBQ2: BBQueue<&Obbb<6>, 6> = BBQueue::new(&STO_2); + static BBQ1: BBQueue, 6> = BBQueue::new(Obbb::new()); + static BBQ2: BBQueue, 6> = BBQueue::new(Obbb::new()); let (mut prod1, mut cons1) = BBQ1.try_split().unwrap(); let (mut _prod2, mut cons2) = BBQ2.try_split().unwrap(); diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index dd2f782..2d21979 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -67,12 +67,12 @@ pub struct BBHeader { pub(crate) mod sealed { use crate::bbbuffer::BBHeader; - pub trait BBGetter: Clone { + pub trait BBGetter { fn get_header(&self) -> &BBHeader; fn get_storage(&self) -> (*mut u8, usize); } - impl BBGetter for &crate::bbbuffer::OwnedBBBuffer { + impl BBGetter for crate::bbbuffer::OwnedBBBuffer { fn get_header(&self) -> &BBHeader { &self.hdr } @@ -141,11 +141,11 @@ impl<'a, STO: BBGetter, const N: usize> BBQueue { Ok(( Producer { - bbq: self.sto.clone(), + bbq: &self.sto, pd: PhantomData, }, Consumer { - bbq: self.sto.clone(), + bbq: &self.sto, pd: PhantomData, }, )) @@ -354,7 +354,8 @@ pub struct Producer<'a, STO, const N: usize> where STO: BBGetter, { - bbq: STO, + // TODO: Is 'a the right lifetime? + bbq: &'a STO, pd: PhantomData<&'a [u8; N]>, } @@ -570,7 +571,8 @@ where /// `Consumer` is the primary interface for reading data from a `BBQueue`. pub struct Consumer<'a, STO: BBGetter, const N: usize> { - bbq: STO, + // TODO: Is 'a the right lifetime? + bbq: &'a STO, pd: PhantomData<&'a [u8; N]>, } @@ -708,7 +710,7 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { Ok(SplitGrantR { buf1: grant_slice1, buf2: grant_slice2, - bbq: self.bbq.clone(), + bbq: self.bbq, to_release: 0, pd: PhantomData, }) @@ -757,7 +759,8 @@ where STO: BBGetter, { pub(crate) buf: &'a mut [u8], - bbq: STO, + // TODO: Is 'a the right lifetime? + bbq: &'a STO, pub(crate) to_commit: usize, pd: PhantomData<&'a [u8; N]>, } @@ -782,7 +785,8 @@ where STO: BBGetter, { pub(crate) buf: &'a mut [u8], - bbq: STO, + // TODO: Is 'a the right lifetime? + bbq: &'a STO, pub(crate) to_release: usize, pd: PhantomData<&'a [u8; N]>, } @@ -797,7 +801,8 @@ where { pub(crate) buf1: &'a mut [u8], pub(crate) buf2: &'a mut [u8], - bbq: STO, + // TODO: Is 'a the right lifetime? + bbq: &'a STO, pub(crate) to_release: usize, pd: PhantomData<&'a [u8; N]>, } From 59661485ca054e1dacbead93c683533deccbc7ce Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 20 Mar 2021 23:44:08 +0100 Subject: [PATCH 09/11] Remove unnecessary constant. I might need to do something different for Arc storage type. Perhaps AsRef? --- bbqtest/src/lib.rs | 6 +-- core/src/bbbuffer.rs | 93 ++++++++++++++++++++++---------------------- core/src/framed.rs | 36 ++++++++--------- 3 files changed, 68 insertions(+), 67 deletions(-) diff --git a/bbqtest/src/lib.rs b/bbqtest/src/lib.rs index f73eda2..c0f5b32 100644 --- a/bbqtest/src/lib.rs +++ b/bbqtest/src/lib.rs @@ -12,7 +12,7 @@ mod tests { #[test] fn deref_deref_mut() { - let bb: BBQueue, 6> = BBQueue::new(Obbb::new()); + let bb: BBQueue> = BBQueue::new(Obbb::new()); let (mut prod, mut cons) = bb.try_split().unwrap(); @@ -36,8 +36,8 @@ mod tests { #[test] fn static_allocator() { // Check we can make multiple static items... - static BBQ1: BBQueue, 6> = BBQueue::new(Obbb::new()); - static BBQ2: BBQueue, 6> = BBQueue::new(Obbb::new()); + static BBQ1: BBQueue> = BBQueue::new(Obbb::new()); + static BBQ2: BBQueue> = BBQueue::new(Obbb::new()); let (mut prod1, mut cons1) = BBQ1.try_split().unwrap(); let (mut _prod2, mut cons2) = BBQ2.try_split().unwrap(); diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index 2d21979..1e36817 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -89,12 +89,12 @@ use crate::sealed::BBGetter; /// a BBQueue or a split Producer/Consumer pair // // NOTE: The BBQueue is generic over ANY type, -pub struct BBQueue +pub struct BBQueue { sto: STO } -impl<'a, STO, const N: usize> BBQueue { +impl<'a, STO> BBQueue { pub const fn new(storage: STO) -> Self { Self { sto: storage, @@ -102,7 +102,7 @@ impl<'a, STO, const N: usize> BBQueue { } } -impl<'a, STO: BBGetter, const N: usize> BBQueue { +impl<'a, STO: BBGetter> BBQueue { /// Attempt to split the `BBQueue` into `Consumer` and `Producer` halves to gain access to the /// buffer. If buffer has already been split, an error will be returned. /// @@ -134,7 +134,7 @@ impl<'a, STO: BBGetter, const N: usize> BBQueue { /// # bbqtest(); /// # } /// ``` - pub fn try_split(&'a self) -> Result<(Producer<'a, STO, { N }>, Consumer<'a, STO, { N }>)> { + pub fn try_split(&'a self) -> Result<(Producer<'a, STO>, Consumer<'a, STO>)> { if atomic::swap(&self.sto.get_header().already_split, true, AcqRel) { return Err(Error::AlreadySplit); } @@ -165,7 +165,7 @@ impl<'a, STO: BBGetter, const N: usize> BBQueue { /// section while splitting. pub fn try_split_framed( &'a self, - ) -> Result<(FrameProducer<'a, STO, { N }>, FrameConsumer<'a, STO, { N }>)> { + ) -> Result<(FrameProducer<'a, STO>, FrameConsumer<'a, STO>)> { let (producer, consumer) = self.try_split()?; Ok((FrameProducer { producer }, FrameConsumer { consumer })) } @@ -205,9 +205,9 @@ impl<'a, STO: BBGetter, const N: usize> BBQueue { /// ``` pub fn try_release( &'a self, - prod: Producer<'a, STO, { N }>, - cons: Consumer<'a, STO, { N }>, - ) -> CoreResult<(), (Producer<'a, STO, { N }>, Consumer<'a, STO, { N }>)> { + prod: Producer<'a, STO>, + cons: Consumer<'a, STO>, + ) -> CoreResult<(), (Producer<'a, STO>, Consumer<'a, STO>)> { // Note: Re-entrancy is not possible because we require ownership // of the producer and consumer, which are not cloneable. We also // can assume the buffer has been split, because @@ -257,9 +257,9 @@ impl<'a, STO: BBGetter, const N: usize> BBQueue { /// will be returned. pub fn try_release_framed( &'a self, - prod: FrameProducer<'a, STO, { N }>, - cons: FrameConsumer<'a, STO, { N }>, - ) -> CoreResult<(), (FrameProducer<'a, STO, { N }>, FrameConsumer<'a, STO, { N }>)> { + prod: FrameProducer<'a, STO>, + cons: FrameConsumer<'a, STO>, + ) -> CoreResult<(), (FrameProducer<'a, STO>, FrameConsumer<'a, STO>)> { self.try_release(prod.producer, cons.consumer) .map_err(|(producer, consumer)| { // Restore the wrapper types @@ -350,21 +350,21 @@ impl OwnedBBBuffer<{ N }> { /// /// See [this github issue](https://github.com/jamesmunns/bbqueue/issues/38) for a /// discussion of grant methods that could be added in the future. -pub struct Producer<'a, STO, const N: usize> +pub struct Producer<'a, STO> where STO: BBGetter, { // TODO: Is 'a the right lifetime? bbq: &'a STO, - pd: PhantomData<&'a [u8; N]>, + pd: PhantomData<&'a ()>, } -unsafe impl<'a, STO, const N: usize> Send for Producer<'a, STO, { N }> +unsafe impl<'a, STO> Send for Producer<'a, STO> where STO: BBGetter, {} -impl<'a, STO, const N: usize> Producer<'a, STO, { N }> +impl<'a, STO> Producer<'a, STO> where STO: BBGetter, { @@ -400,7 +400,7 @@ where /// # bbqtest(); /// # } /// ``` - pub fn grant_exact(&mut self, sz: usize) -> Result> { + pub fn grant_exact(&mut self, sz: usize) -> Result> { let hdr = self.bbq.get_header(); let sto = self.bbq.get_storage(); @@ -413,7 +413,7 @@ where // be careful writing to `load` let write = hdr.write.load(Acquire); let read = hdr.read.load(Acquire); - let max = N; + let max = sto.1; let already_inverted = write < read; let start = if already_inverted { @@ -501,7 +501,7 @@ where /// # bbqtest(); /// # } /// ``` - pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { + pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { let hdr = self.bbq.get_header(); let sto = self.bbq.get_storage(); @@ -513,7 +513,7 @@ where // be careful writing to `load` let write = hdr.write.load(Acquire); let read = hdr.read.load(Acquire); - let max = N; + let max = sto.1; let already_inverted = write < read; @@ -570,15 +570,15 @@ where } /// `Consumer` is the primary interface for reading data from a `BBQueue`. -pub struct Consumer<'a, STO: BBGetter, const N: usize> { +pub struct Consumer<'a, STO: BBGetter> { // TODO: Is 'a the right lifetime? bbq: &'a STO, - pd: PhantomData<&'a [u8; N]>, + pd: PhantomData<&'a ()>, } -unsafe impl<'a, STO: BBGetter, const N: usize> Send for Consumer<'a, STO, { N }> {} +unsafe impl<'a, STO: BBGetter> Send for Consumer<'a, STO> {} -impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { +impl<'a, STO: BBGetter> Consumer<'a, STO> { /// Obtains a contiguous slice of committed bytes. This slice may not /// contain ALL available bytes, if the writer has wrapped around. The /// remaining bytes will be available after all readable bytes are @@ -609,7 +609,7 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { /// # bbqtest(); /// # } /// ``` - pub fn read(&mut self) -> Result> { + pub fn read(&mut self) -> Result> { let hdr = self.bbq.get_header(); let sto = self.bbq.get_storage(); @@ -661,7 +661,7 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { /// Obtains two disjoint slices, which are each contiguous of committed bytes. /// Combined these contain all previously commited data. - pub fn split_read(&mut self) -> Result> { + pub fn split_read(&mut self) -> Result> { let hdr = self.bbq.get_header(); let sto = self.bbq.get_storage(); @@ -717,7 +717,7 @@ impl<'a, STO: BBGetter, const N: usize> Consumer<'a, STO, { N }> { } } -impl BBQueue { +impl BBQueue { /// Returns the size of the backing storage. /// /// This is the maximum number of bytes that can be stored in this queue. @@ -739,7 +739,7 @@ impl BBQueue { /// # } /// ``` pub fn capacity(&self) -> usize { - N + self.sto.get_storage().1 } } @@ -754,7 +754,7 @@ impl BBQueue { /// If the `thumbv6` feature is selected, dropping the grant /// without committing it takes a short critical section, #[derive(Debug, PartialEq)] -pub struct GrantW<'a, STO, const N: usize> +pub struct GrantW<'a, STO> where STO: BBGetter, { @@ -762,10 +762,10 @@ where // TODO: Is 'a the right lifetime? bbq: &'a STO, pub(crate) to_commit: usize, - pd: PhantomData<&'a [u8; N]>, + pd: PhantomData<&'a ()>, } -unsafe impl<'a, STO: BBGetter, const N: usize> Send for GrantW<'a, STO, { N }> {} +unsafe impl<'a, STO: BBGetter> Send for GrantW<'a, STO> {} /// A structure representing a contiguous region of memory that /// may be read from, and potentially "released" (or cleared) @@ -780,7 +780,7 @@ unsafe impl<'a, STO: BBGetter, const N: usize> Send for GrantW<'a, STO, { N }> { /// If the `thumbv6` feature is selected, dropping the grant /// without releasing it takes a short critical section, #[derive(Debug, PartialEq)] -pub struct GrantR<'a, STO, const N: usize> +pub struct GrantR<'a, STO> where STO: BBGetter, { @@ -788,14 +788,14 @@ where // TODO: Is 'a the right lifetime? bbq: &'a STO, pub(crate) to_release: usize, - pd: PhantomData<&'a [u8; N]>, + pd: PhantomData<&'a ()>, } /// A structure representing up to two contiguous regions of memory that /// may be read from, and potentially "released" (or cleared) /// from the queue #[derive(Debug, PartialEq)] -pub struct SplitGrantR<'a, STO, const N: usize> +pub struct SplitGrantR<'a, STO> where STO: BBGetter, { @@ -804,14 +804,14 @@ where // TODO: Is 'a the right lifetime? bbq: &'a STO, pub(crate) to_release: usize, - pd: PhantomData<&'a [u8; N]>, + pd: PhantomData<&'a ()>, } -unsafe impl<'a, STO: BBGetter, const N: usize> Send for GrantR<'a, STO, { N }> {} +unsafe impl<'a, STO: BBGetter> Send for GrantR<'a, STO> {} -unsafe impl<'a, STO: BBGetter, const N: usize> Send for SplitGrantR<'a, STO, { N }> {} +unsafe impl<'a, STO: BBGetter> Send for SplitGrantR<'a, STO> {} -impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { +impl<'a, STO: BBGetter> GrantW<'a, STO> { /// Finalizes a writable grant given by `grant()` or `grant_max()`. /// This makes the data available to be read via `read()`. This consumes /// the grant. @@ -856,6 +856,7 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { #[inline(always)] pub(crate) fn commit_inner(&mut self, used: usize) { let hdr = self.bbq.get_header(); + let sto = self.bbq.get_storage(); // If there is no grant in progress, return early. This // generally means we are dropping the grant within a @@ -874,7 +875,7 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { let write = hdr.write.load(Acquire); atomic::fetch_sub(&hdr.reserve, len - used, AcqRel); - let max = N; + let max = sto.1; let last = hdr.last.load(Acquire); let new_write = hdr.reserve.load(Acquire); @@ -912,7 +913,7 @@ impl<'a, STO: BBGetter, const N: usize> GrantW<'a, STO, { N }> { } } -impl<'a, STO: BBGetter, const N: usize> GrantR<'a, STO, { N }> { +impl<'a, STO: BBGetter> GrantR<'a, STO> { /// Release a sequence of bytes from the buffer, allowing the space /// to be used by later writes. This consumes the grant. /// @@ -1003,7 +1004,7 @@ impl<'a, STO: BBGetter, const N: usize> GrantR<'a, STO, { N }> { } } -impl<'a, STO: BBGetter, const N: usize> SplitGrantR<'a, STO, { N }> { +impl<'a, STO: BBGetter> SplitGrantR<'a, STO> { /// Release a sequence of bytes from the buffer, allowing the space /// to be used by later writes. This consumes the grant. /// @@ -1097,7 +1098,7 @@ impl<'a, STO: BBGetter, const N: usize> SplitGrantR<'a, STO, { N }> { } } -impl<'a, STO, const N: usize> Drop for GrantW<'a, STO, N> +impl<'a, STO> Drop for GrantW<'a, STO> where STO: BBGetter, { @@ -1106,7 +1107,7 @@ where } } -impl<'a, STO, const N: usize> Drop for GrantR<'a, STO, N> +impl<'a, STO> Drop for GrantR<'a, STO> where STO: BBGetter, { @@ -1115,7 +1116,7 @@ where } } -impl<'a, STO, const N: usize> Deref for GrantW<'a, STO, N> +impl<'a, STO> Deref for GrantW<'a, STO> where STO: BBGetter, { @@ -1126,7 +1127,7 @@ where } } -impl<'a, STO, const N: usize> DerefMut for GrantW<'a, STO, N> +impl<'a, STO> DerefMut for GrantW<'a, STO> where STO: BBGetter, { @@ -1135,7 +1136,7 @@ where } } -impl<'a, STO, const N: usize> Deref for GrantR<'a, STO, N> +impl<'a, STO> Deref for GrantR<'a, STO> where STO: BBGetter, { @@ -1146,7 +1147,7 @@ where } } -impl<'a, STO, const N: usize> DerefMut for GrantR<'a, STO, N> +impl<'a, STO> DerefMut for GrantR<'a, STO> where STO: BBGetter, { diff --git a/core/src/framed.rs b/core/src/framed.rs index e695180..f1487b8 100644 --- a/core/src/framed.rs +++ b/core/src/framed.rs @@ -84,19 +84,19 @@ use core::{ }; /// A producer of Framed data -pub struct FrameProducer<'a, STO, const N: usize> +pub struct FrameProducer<'a, STO> where STO: BBGetter, { - pub(crate) producer: Producer<'a, STO, N>, + pub(crate) producer: Producer<'a, STO>, } -impl<'a, STO: BBGetter, const N: usize> FrameProducer<'a, STO, { N }> { +impl<'a, STO: BBGetter> FrameProducer<'a, STO> { /// Receive a grant for a frame with a maximum size of `max_sz` in bytes. /// /// This size does not include the size of the frame header. The exact size /// of the frame can be set on `commit`. - pub fn grant(&mut self, max_sz: usize) -> Result> { + pub fn grant(&mut self, max_sz: usize) -> Result> { let hdr_len = encoded_len(max_sz); Ok(FrameGrantW { grant_w: self.producer.grant_exact(max_sz + hdr_len)?, @@ -106,16 +106,16 @@ impl<'a, STO: BBGetter, const N: usize> FrameProducer<'a, STO, { N }> { } /// A consumer of Framed data -pub struct FrameConsumer<'a, STO, const N: usize> +pub struct FrameConsumer<'a, STO> where STO: BBGetter, { - pub(crate) consumer: Consumer<'a, STO, N>, + pub(crate) consumer: Consumer<'a, STO>, } -impl<'a, STO: BBGetter, const N: usize> FrameConsumer<'a, STO, { N }> { +impl<'a, STO: BBGetter> FrameConsumer<'a, STO> { /// Obtain the next available frame, if any - pub fn read(&mut self) -> Option> { + pub fn read(&mut self) -> Option> { // Get all available bytes. We never wrap a frame around, // so if a header is available, the whole frame will be. let mut grant_r = self.consumer.read().ok()?; @@ -147,11 +147,11 @@ impl<'a, STO: BBGetter, const N: usize> FrameConsumer<'a, STO, { N }> { /// the contents without first calling `to_commit()`, then no /// frame will be comitted for writing. #[derive(Debug, PartialEq)] -pub struct FrameGrantW<'a, STO, const N: usize> +pub struct FrameGrantW<'a, STO> where STO: BBGetter, { - grant_w: GrantW<'a, STO, N>, + grant_w: GrantW<'a, STO>, hdr_len: u8, } @@ -160,15 +160,15 @@ where /// NOTE: If the grant is dropped without explicitly releasing /// the contents, then no frame will be released. #[derive(Debug, PartialEq)] -pub struct FrameGrantR<'a, STO, const N: usize> +pub struct FrameGrantR<'a, STO> where STO: BBGetter, { - grant_r: GrantR<'a, STO, N>, + grant_r: GrantR<'a, STO>, hdr_len: u8, } -impl<'a, STO: BBGetter, const N: usize> Deref for FrameGrantW<'a, STO, { N }> { +impl<'a, STO: BBGetter> Deref for FrameGrantW<'a, STO> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -176,13 +176,13 @@ impl<'a, STO: BBGetter, const N: usize> Deref for FrameGrantW<'a, STO, { N }> { } } -impl<'a, STO: BBGetter, const N: usize> DerefMut for FrameGrantW<'a, STO, { N }> { +impl<'a, STO: BBGetter> DerefMut for FrameGrantW<'a, STO> { fn deref_mut(&mut self) -> &mut [u8] { &mut self.grant_w.buf[self.hdr_len.into()..] } } -impl<'a, STO: BBGetter, const N: usize> Deref for FrameGrantR<'a, STO, { N }> { +impl<'a, STO: BBGetter> Deref for FrameGrantR<'a, STO> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -190,13 +190,13 @@ impl<'a, STO: BBGetter, const N: usize> Deref for FrameGrantR<'a, STO, { N }> { } } -impl<'a, STO: BBGetter, const N: usize> DerefMut for FrameGrantR<'a, STO, { N }> { +impl<'a, STO: BBGetter> DerefMut for FrameGrantR<'a, STO> { fn deref_mut(&mut self) -> &mut [u8] { &mut self.grant_r.buf[self.hdr_len.into()..] } } -impl<'a, STO: BBGetter, const N: usize> FrameGrantW<'a, STO, { N }> { +impl<'a, STO: BBGetter> FrameGrantW<'a, STO> { /// Commit a frame to make it available to the Consumer half. /// /// `used` is the size of the payload, in bytes, not @@ -233,7 +233,7 @@ impl<'a, STO: BBGetter, const N: usize> FrameGrantW<'a, STO, { N }> { } } -impl<'a, STO: BBGetter, const N: usize> FrameGrantR<'a, STO, { N }> { +impl<'a, STO: BBGetter> FrameGrantR<'a, STO> { /// Release a frame to make the space available for future writing /// /// Note: The full frame is always released From 9ffaa73d22303dc53217604de61620318b0f56c9 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 20 Mar 2021 23:57:55 +0100 Subject: [PATCH 10/11] Update notes, still thinking about Arc --- core/Cargo.toml | 1 + core/storage-notes.md | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/core/Cargo.toml b/core/Cargo.toml index 56424a1..43d5976 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -19,6 +19,7 @@ cortex-m = { version = "0.6.0", optional = true } [features] thumbv6 = ["cortex-m"] +alloc = [] [package.metadata.docs.rs] all-features = true diff --git a/core/storage-notes.md b/core/storage-notes.md index 4611d39..75d3ebd 100644 --- a/core/storage-notes.md +++ b/core/storage-notes.md @@ -21,7 +21,8 @@ Future: ### Static Buffer ```rust -static BB_QUEUE: BBQueue> = BBQueue::new(BBBuffer::new()); + +static BB_QUEUE: BBQueue> = BBQueue::new(OwnedBBBuffer::new()); fn main() { let (prod, cons) = BB_QUEUE.try_split().unwrap(); @@ -55,6 +56,8 @@ fn main() { } ``` +NOTE: This is not yet possible as of the current state of the repo. I do intent do support it. + ### User provided storage Choice A: Naive @@ -114,3 +117,5 @@ fn main() { let (prod, cons) = bbqueue.try_split().unwrap(); } ``` + +NOTE: This is not yet possible as of the current state of the repo. I do intent do support it. From 6e08107f4a09e8e34168692e68ad0cd382f11ad7 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 21 Mar 2021 00:45:51 +0100 Subject: [PATCH 11/11] I... am surprised this works? EDIT: It doesn't. --- core/src/bbbuffer.rs | 111 +++++++++++++++++++++++++++---------------- core/src/framed.rs | 31 ++++++------ 2 files changed, 86 insertions(+), 56 deletions(-) diff --git a/core/src/bbbuffer.rs b/core/src/bbbuffer.rs index 1e36817..1446901 100644 --- a/core/src/bbbuffer.rs +++ b/core/src/bbbuffer.rs @@ -66,13 +66,19 @@ pub struct BBHeader { // would be wildly unsafe pub(crate) mod sealed { use crate::bbbuffer::BBHeader; + use crate::bbbuffer::OwnedBBBuffer; + + pub trait BBGetter<'a> { + type Duplicate: BBGetter<'a>; - pub trait BBGetter { fn get_header(&self) -> &BBHeader; fn get_storage(&self) -> (*mut u8, usize); + fn duplicate(&self) -> Self::Duplicate; } - impl BBGetter for crate::bbbuffer::OwnedBBBuffer { + impl<'a, const N: usize> BBGetter<'a> for OwnedBBBuffer { + type Duplicate = &'a OwnedBBBuffer; + fn get_header(&self) -> &BBHeader { &self.hdr } @@ -81,6 +87,27 @@ pub(crate) mod sealed { let ptr = self.storage.get().cast(); (ptr, N) } + + fn duplicate(&self) -> Self::Duplicate { + todo!() + } + } + + impl<'a, const N: usize> BBGetter<'a> for &'a OwnedBBBuffer { + type Duplicate = &'a OwnedBBBuffer; + + fn get_header(&self) -> &BBHeader { + &self.hdr + } + + fn get_storage(&self) -> (*mut u8, usize) { + let ptr = self.storage.get().cast(); + (ptr, N) + } + + fn duplicate(&self) -> Self::Duplicate { + todo!() + } } } use crate::sealed::BBGetter; @@ -102,7 +129,7 @@ impl<'a, STO> BBQueue { } } -impl<'a, STO: BBGetter> BBQueue { +impl<'a, STO: BBGetter<'a>> BBQueue { /// Attempt to split the `BBQueue` into `Consumer` and `Producer` halves to gain access to the /// buffer. If buffer has already been split, an error will be returned. /// @@ -134,18 +161,18 @@ impl<'a, STO: BBGetter> BBQueue { /// # bbqtest(); /// # } /// ``` - pub fn try_split(&'a self) -> Result<(Producer<'a, STO>, Consumer<'a, STO>)> { + pub fn try_split(&'a self) -> Result<(Producer<'a, STO::Duplicate>, Consumer<'a, STO::Duplicate>)> { if atomic::swap(&self.sto.get_header().already_split, true, AcqRel) { return Err(Error::AlreadySplit); } Ok(( Producer { - bbq: &self.sto, + bbq: self.sto.duplicate(), pd: PhantomData, }, Consumer { - bbq: &self.sto, + bbq: self.sto.duplicate(), pd: PhantomData, }, )) @@ -165,7 +192,7 @@ impl<'a, STO: BBGetter> BBQueue { /// section while splitting. pub fn try_split_framed( &'a self, - ) -> Result<(FrameProducer<'a, STO>, FrameConsumer<'a, STO>)> { + ) -> Result<(FrameProducer<'a, STO::Duplicate>, FrameConsumer<'a, STO::Duplicate>)> { let (producer, consumer) = self.try_split()?; Ok((FrameProducer { producer }, FrameConsumer { consumer })) } @@ -352,21 +379,21 @@ impl OwnedBBBuffer<{ N }> { /// discussion of grant methods that could be added in the future. pub struct Producer<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { // TODO: Is 'a the right lifetime? - bbq: &'a STO, + bbq: STO, pd: PhantomData<&'a ()>, } unsafe impl<'a, STO> Send for Producer<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, {} impl<'a, STO> Producer<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { /// Request a writable, contiguous section of memory of exactly /// `sz` bytes. If the buffer size requested is not available, @@ -400,7 +427,7 @@ where /// # bbqtest(); /// # } /// ``` - pub fn grant_exact(&mut self, sz: usize) -> Result> { + pub fn grant_exact(&mut self, sz: usize) -> Result> { let hdr = self.bbq.get_header(); let sto = self.bbq.get_storage(); @@ -457,7 +484,7 @@ where Ok(GrantW { buf: grant_slice, - bbq: self.bbq.clone(), + bbq: self.bbq.duplicate(), to_commit: 0, pd: PhantomData, }) @@ -501,7 +528,7 @@ where /// # bbqtest(); /// # } /// ``` - pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { + pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { let hdr = self.bbq.get_header(); let sto = self.bbq.get_storage(); @@ -562,7 +589,7 @@ where Ok(GrantW { buf: grant_slice, - bbq: self.bbq.clone(), + bbq: self.bbq.duplicate(), to_commit: 0, pd: PhantomData, }) @@ -570,15 +597,15 @@ where } /// `Consumer` is the primary interface for reading data from a `BBQueue`. -pub struct Consumer<'a, STO: BBGetter> { +pub struct Consumer<'a, STO: BBGetter<'a>> { // TODO: Is 'a the right lifetime? - bbq: &'a STO, + bbq: STO, pd: PhantomData<&'a ()>, } -unsafe impl<'a, STO: BBGetter> Send for Consumer<'a, STO> {} +unsafe impl<'a, STO: BBGetter<'a>> Send for Consumer<'a, STO> {} -impl<'a, STO: BBGetter> Consumer<'a, STO> { +impl<'a, STO: BBGetter<'a>> Consumer<'a, STO> { /// Obtains a contiguous slice of committed bytes. This slice may not /// contain ALL available bytes, if the writer has wrapped around. The /// remaining bytes will be available after all readable bytes are @@ -609,7 +636,7 @@ impl<'a, STO: BBGetter> Consumer<'a, STO> { /// # bbqtest(); /// # } /// ``` - pub fn read(&mut self) -> Result> { + pub fn read(&mut self) -> Result> { let hdr = self.bbq.get_header(); let sto = self.bbq.get_storage(); @@ -653,7 +680,7 @@ impl<'a, STO: BBGetter> Consumer<'a, STO> { Ok(GrantR { buf: grant_slice, - bbq: self.bbq.clone(), + bbq: self.bbq.duplicate(), to_release: 0, pd: PhantomData, }) @@ -661,7 +688,7 @@ impl<'a, STO: BBGetter> Consumer<'a, STO> { /// Obtains two disjoint slices, which are each contiguous of committed bytes. /// Combined these contain all previously commited data. - pub fn split_read(&mut self) -> Result> { + pub fn split_read(&mut self) -> Result> { let hdr = self.bbq.get_header(); let sto = self.bbq.get_storage(); @@ -710,14 +737,14 @@ impl<'a, STO: BBGetter> Consumer<'a, STO> { Ok(SplitGrantR { buf1: grant_slice1, buf2: grant_slice2, - bbq: self.bbq, + bbq: self.bbq.duplicate(), to_release: 0, pd: PhantomData, }) } } -impl BBQueue { +impl<'a, STO: BBGetter<'a>> BBQueue { /// Returns the size of the backing storage. /// /// This is the maximum number of bytes that can be stored in this queue. @@ -756,16 +783,16 @@ impl BBQueue { #[derive(Debug, PartialEq)] pub struct GrantW<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { pub(crate) buf: &'a mut [u8], // TODO: Is 'a the right lifetime? - bbq: &'a STO, + bbq: STO, pub(crate) to_commit: usize, pd: PhantomData<&'a ()>, } -unsafe impl<'a, STO: BBGetter> Send for GrantW<'a, STO> {} +unsafe impl<'a, STO: BBGetter<'a>> Send for GrantW<'a, STO> {} /// A structure representing a contiguous region of memory that /// may be read from, and potentially "released" (or cleared) @@ -782,11 +809,11 @@ unsafe impl<'a, STO: BBGetter> Send for GrantW<'a, STO> {} #[derive(Debug, PartialEq)] pub struct GrantR<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { pub(crate) buf: &'a mut [u8], // TODO: Is 'a the right lifetime? - bbq: &'a STO, + bbq: STO, pub(crate) to_release: usize, pd: PhantomData<&'a ()>, } @@ -797,21 +824,21 @@ where #[derive(Debug, PartialEq)] pub struct SplitGrantR<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { pub(crate) buf1: &'a mut [u8], pub(crate) buf2: &'a mut [u8], // TODO: Is 'a the right lifetime? - bbq: &'a STO, + bbq: STO, pub(crate) to_release: usize, pd: PhantomData<&'a ()>, } -unsafe impl<'a, STO: BBGetter> Send for GrantR<'a, STO> {} +unsafe impl<'a, STO: BBGetter<'a>> Send for GrantR<'a, STO> {} -unsafe impl<'a, STO: BBGetter> Send for SplitGrantR<'a, STO> {} +unsafe impl<'a, STO: BBGetter<'a>> Send for SplitGrantR<'a, STO> {} -impl<'a, STO: BBGetter> GrantW<'a, STO> { +impl<'a, STO: BBGetter<'a>> GrantW<'a, STO> { /// Finalizes a writable grant given by `grant()` or `grant_max()`. /// This makes the data available to be read via `read()`. This consumes /// the grant. @@ -913,7 +940,7 @@ impl<'a, STO: BBGetter> GrantW<'a, STO> { } } -impl<'a, STO: BBGetter> GrantR<'a, STO> { +impl<'a, STO: BBGetter<'a>> GrantR<'a, STO> { /// Release a sequence of bytes from the buffer, allowing the space /// to be used by later writes. This consumes the grant. /// @@ -1004,7 +1031,7 @@ impl<'a, STO: BBGetter> GrantR<'a, STO> { } } -impl<'a, STO: BBGetter> SplitGrantR<'a, STO> { +impl<'a, STO: BBGetter<'a>> SplitGrantR<'a, STO> { /// Release a sequence of bytes from the buffer, allowing the space /// to be used by later writes. This consumes the grant. /// @@ -1100,7 +1127,7 @@ impl<'a, STO: BBGetter> SplitGrantR<'a, STO> { impl<'a, STO> Drop for GrantW<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { fn drop(&mut self) { self.commit_inner(self.to_commit) @@ -1109,7 +1136,7 @@ where impl<'a, STO> Drop for GrantR<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { fn drop(&mut self) { self.release_inner(self.to_release) @@ -1118,7 +1145,7 @@ where impl<'a, STO> Deref for GrantW<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { type Target = [u8]; @@ -1129,7 +1156,7 @@ where impl<'a, STO> DerefMut for GrantW<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { fn deref_mut(&mut self) -> &mut [u8] { self.buf @@ -1138,7 +1165,7 @@ where impl<'a, STO> Deref for GrantR<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { type Target = [u8]; @@ -1149,7 +1176,7 @@ where impl<'a, STO> DerefMut for GrantR<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { fn deref_mut(&mut self) -> &mut [u8] { self.buf diff --git a/core/src/framed.rs b/core/src/framed.rs index f1487b8..9417a2c 100644 --- a/core/src/framed.rs +++ b/core/src/framed.rs @@ -86,17 +86,20 @@ use core::{ /// A producer of Framed data pub struct FrameProducer<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { pub(crate) producer: Producer<'a, STO>, } -impl<'a, STO: BBGetter> FrameProducer<'a, STO> { +impl<'a, STO> FrameProducer<'a, STO> +where + STO: BBGetter<'a>, +{ /// Receive a grant for a frame with a maximum size of `max_sz` in bytes. /// /// This size does not include the size of the frame header. The exact size /// of the frame can be set on `commit`. - pub fn grant(&mut self, max_sz: usize) -> Result> { + pub fn grant(&mut self, max_sz: usize) -> Result> { let hdr_len = encoded_len(max_sz); Ok(FrameGrantW { grant_w: self.producer.grant_exact(max_sz + hdr_len)?, @@ -108,14 +111,14 @@ impl<'a, STO: BBGetter> FrameProducer<'a, STO> { /// A consumer of Framed data pub struct FrameConsumer<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { pub(crate) consumer: Consumer<'a, STO>, } -impl<'a, STO: BBGetter> FrameConsumer<'a, STO> { +impl<'a, STO: BBGetter<'a>> FrameConsumer<'a, STO> { /// Obtain the next available frame, if any - pub fn read(&mut self) -> Option> { + pub fn read(&mut self) -> Option> { // Get all available bytes. We never wrap a frame around, // so if a header is available, the whole frame will be. let mut grant_r = self.consumer.read().ok()?; @@ -149,7 +152,7 @@ impl<'a, STO: BBGetter> FrameConsumer<'a, STO> { #[derive(Debug, PartialEq)] pub struct FrameGrantW<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { grant_w: GrantW<'a, STO>, hdr_len: u8, @@ -162,13 +165,13 @@ where #[derive(Debug, PartialEq)] pub struct FrameGrantR<'a, STO> where - STO: BBGetter, + STO: BBGetter<'a>, { grant_r: GrantR<'a, STO>, hdr_len: u8, } -impl<'a, STO: BBGetter> Deref for FrameGrantW<'a, STO> { +impl<'a, STO: BBGetter<'a>> Deref for FrameGrantW<'a, STO> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -176,13 +179,13 @@ impl<'a, STO: BBGetter> Deref for FrameGrantW<'a, STO> { } } -impl<'a, STO: BBGetter> DerefMut for FrameGrantW<'a, STO> { +impl<'a, STO: BBGetter<'a>> DerefMut for FrameGrantW<'a, STO> { fn deref_mut(&mut self) -> &mut [u8] { &mut self.grant_w.buf[self.hdr_len.into()..] } } -impl<'a, STO: BBGetter> Deref for FrameGrantR<'a, STO> { +impl<'a, STO: BBGetter<'a>> Deref for FrameGrantR<'a, STO> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -190,13 +193,13 @@ impl<'a, STO: BBGetter> Deref for FrameGrantR<'a, STO> { } } -impl<'a, STO: BBGetter> DerefMut for FrameGrantR<'a, STO> { +impl<'a, STO: BBGetter<'a>> DerefMut for FrameGrantR<'a, STO> { fn deref_mut(&mut self) -> &mut [u8] { &mut self.grant_r.buf[self.hdr_len.into()..] } } -impl<'a, STO: BBGetter> FrameGrantW<'a, STO> { +impl<'a, STO: BBGetter<'a>> FrameGrantW<'a, STO> { /// Commit a frame to make it available to the Consumer half. /// /// `used` is the size of the payload, in bytes, not @@ -233,7 +236,7 @@ impl<'a, STO: BBGetter> FrameGrantW<'a, STO> { } } -impl<'a, STO: BBGetter> FrameGrantR<'a, STO> { +impl<'a, STO: BBGetter<'a>> FrameGrantR<'a, STO> { /// Release a frame to make the space available for future writing /// /// Note: The full frame is always released