stm32: initial support for alternative ringbuffer implementation
This commit is contained in:
		
							parent
							
								
									4f08d5bc5f
								
							
						
					
					
						commit
						2b10caafd4
					
				| @ -93,6 +93,8 @@ aligned = "0.4.1" | ||||
| 
 | ||||
| [dev-dependencies] | ||||
| critical-section = { version = "1.1", features = ["std"] } | ||||
| proptest = "1.5.0" | ||||
| proptest-state-machine = "0.3.0" | ||||
| 
 | ||||
| [build-dependencies] | ||||
| proc-macro2 = "1.0.36" | ||||
|  | ||||
| @ -763,10 +763,6 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> { | ||||
|         self.0.get_remaining_transfers() as _ | ||||
|     } | ||||
| 
 | ||||
|     fn get_complete_count(&self) -> usize { | ||||
|         STATE[self.0.id as usize].complete_count.load(Ordering::Acquire) | ||||
|     } | ||||
| 
 | ||||
|     fn reset_complete_count(&mut self) -> usize { | ||||
|         let state = &STATE[self.0.id as usize]; | ||||
|         #[cfg(not(armv6m))] | ||||
|  | ||||
| @ -1,668 +0,0 @@ | ||||
| #![cfg_attr(gpdma, allow(unused))] | ||||
| 
 | ||||
| use core::future::poll_fn; | ||||
| use core::ops::Range; | ||||
| use core::sync::atomic::{compiler_fence, Ordering}; | ||||
| use core::task::{Poll, Waker}; | ||||
| 
 | ||||
| use super::word::Word; | ||||
| 
 | ||||
| /// A "read-only" ring-buffer to be used together with the DMA controller which
 | ||||
| /// writes in a circular way, "uncontrolled" to the buffer.
 | ||||
| ///
 | ||||
| /// A snapshot of the ring buffer state can be attained by setting the `ndtr` field
 | ||||
| /// to the current register value. `ndtr` describes the current position of the DMA
 | ||||
| /// write.
 | ||||
| ///
 | ||||
| /// # Buffer layout
 | ||||
| ///
 | ||||
| /// ```text
 | ||||
| /// Without wraparound:                             With wraparound:
 | ||||
| ///
 | ||||
| ///  + buf                      +--- NDTR ---+       + buf        +---------- NDTR ----------+
 | ||||
| ///  |                          |            |       |            |                          |
 | ||||
| ///  v                          v            v       v            v                          v
 | ||||
| /// +-----------------------------------------+     +-----------------------------------------+
 | ||||
| /// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo|     |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX|
 | ||||
| /// +-----------------------------------------+     +-----------------------------------------+
 | ||||
| ///  ^          ^               ^                    ^            ^           ^
 | ||||
| ///  |          |               |                    |            |           |
 | ||||
| ///  +- start --+               |                    +- end ------+           |
 | ||||
| ///  |                          |                    |                        |
 | ||||
| ///  +- end --------------------+                    +- start ----------------+
 | ||||
| /// ```
 | ||||
| pub struct ReadableDmaRingBuffer<'a, W: Word> { | ||||
|     pub(crate) dma_buf: &'a mut [W], | ||||
|     start: usize, | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, PartialEq)] | ||||
| #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||||
| pub struct OverrunError; | ||||
| 
 | ||||
| pub trait DmaCtrl { | ||||
|     /// Get the NDTR register value, i.e. the space left in the underlying
 | ||||
|     /// buffer until the dma writer wraps.
 | ||||
|     fn get_remaining_transfers(&self) -> usize; | ||||
| 
 | ||||
|     /// Get the transfer completed counter.
 | ||||
|     /// This counter is incremented by the dma controller when NDTR is reloaded,
 | ||||
|     /// i.e. when the writing wraps.
 | ||||
|     fn get_complete_count(&self) -> usize; | ||||
| 
 | ||||
|     /// Reset the transfer completed counter to 0 and return the value just prior to the reset.
 | ||||
|     fn reset_complete_count(&mut self) -> usize; | ||||
| 
 | ||||
|     /// Set the waker for a running poll_fn
 | ||||
|     fn set_waker(&mut self, waker: &Waker); | ||||
| } | ||||
| 
 | ||||
| impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> { | ||||
|     pub fn new(dma_buf: &'a mut [W]) -> Self { | ||||
|         Self { dma_buf, start: 0 } | ||||
|     } | ||||
| 
 | ||||
|     /// Reset the ring buffer to its initial state
 | ||||
|     pub fn clear(&mut self, dma: &mut impl DmaCtrl) { | ||||
|         self.start = 0; | ||||
|         dma.reset_complete_count(); | ||||
|     } | ||||
| 
 | ||||
|     /// The capacity of the ringbuffer
 | ||||
|     pub const fn cap(&self) -> usize { | ||||
|         self.dma_buf.len() | ||||
|     } | ||||
| 
 | ||||
|     /// The current position of the ringbuffer
 | ||||
|     fn pos(&self, dma: &mut impl DmaCtrl) -> usize { | ||||
|         self.cap() - dma.get_remaining_transfers() | ||||
|     } | ||||
| 
 | ||||
|     /// Read an exact number of elements from the ringbuffer.
 | ||||
|     ///
 | ||||
|     /// Returns the remaining number of elements available for immediate reading.
 | ||||
|     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
 | ||||
|     ///
 | ||||
|     /// Async/Wake Behavior:
 | ||||
|     /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
 | ||||
|     /// and when it wraps around. This means that when called with a buffer of length 'M', when this
 | ||||
|     /// ring buffer was created with a buffer of size 'N':
 | ||||
|     /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
 | ||||
|     /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
 | ||||
|     pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, OverrunError> { | ||||
|         let mut read_data = 0; | ||||
|         let buffer_len = buffer.len(); | ||||
| 
 | ||||
|         poll_fn(|cx| { | ||||
|             dma.set_waker(cx.waker()); | ||||
| 
 | ||||
|             compiler_fence(Ordering::SeqCst); | ||||
| 
 | ||||
|             match self.read(dma, &mut buffer[read_data..buffer_len]) { | ||||
|                 Ok((len, remaining)) => { | ||||
|                     read_data += len; | ||||
|                     if read_data == buffer_len { | ||||
|                         Poll::Ready(Ok(remaining)) | ||||
|                     } else { | ||||
|                         Poll::Pending | ||||
|                     } | ||||
|                 } | ||||
|                 Err(e) => Poll::Ready(Err(e)), | ||||
|             } | ||||
|         }) | ||||
|         .await | ||||
|     } | ||||
| 
 | ||||
|     /// Read elements from the ring buffer
 | ||||
|     /// Return a tuple of the length read and the length remaining in the buffer
 | ||||
|     /// If not all of the elements were read, then there will be some elements in the buffer remaining
 | ||||
|     /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
 | ||||
|     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
 | ||||
|     pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> { | ||||
|         /* | ||||
|             This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check | ||||
|             after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed | ||||
|             to fire in the same clock cycle that a register is read, so checking get_complete_count early does | ||||
|             not yield relevant information. | ||||
| 
 | ||||
|             Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full | ||||
|             buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error | ||||
|             conditions. | ||||
| 
 | ||||
|             After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that | ||||
|             the dma has not overrun within the data we could have copied. We check the data we could have copied | ||||
|             rather than the data we actually copied because it costs nothing and confirms an error condition | ||||
|             earlier. | ||||
|         */ | ||||
|         let end = self.pos(dma); | ||||
|         if self.start == end && dma.get_complete_count() == 0 { | ||||
|             // No elements are available in the buffer
 | ||||
|             Ok((0, self.cap())) | ||||
|         } else if self.start < end { | ||||
|             // The available, unread portion in the ring buffer DOES NOT wrap
 | ||||
|             // Copy out the elements from the dma buffer
 | ||||
|             let len = self.copy_to(buf, self.start..end); | ||||
| 
 | ||||
|             compiler_fence(Ordering::SeqCst); | ||||
| 
 | ||||
|             /* | ||||
|                 first, check if the dma has wrapped at all if it's after end | ||||
|                 or more than once if it's before start | ||||
| 
 | ||||
|                 this is in a critical section to try to reduce mushy behavior. | ||||
|                 it's not ideal but it's the best we can do | ||||
| 
 | ||||
|                 then, get the current position of of the dma write and check | ||||
|                 if it's inside data we could have copied | ||||
|             */ | ||||
|             let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count())); | ||||
|             if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 { | ||||
|                 Err(OverrunError) | ||||
|             } else { | ||||
|                 self.start = (self.start + len) % self.cap(); | ||||
| 
 | ||||
|                 Ok((len, self.cap() - self.start)) | ||||
|             } | ||||
|         } else if self.start + buf.len() < self.cap() { | ||||
|             // The available, unread portion in the ring buffer DOES wrap
 | ||||
|             // The DMA writer has wrapped since we last read and is currently
 | ||||
|             // writing (or the next byte added will be) in the beginning of the ring buffer.
 | ||||
| 
 | ||||
|             // The provided read buffer is not large enough to include all elements from the tail of the dma buffer.
 | ||||
| 
 | ||||
|             // Copy out from the dma buffer
 | ||||
|             let len = self.copy_to(buf, self.start..self.cap()); | ||||
| 
 | ||||
|             compiler_fence(Ordering::SeqCst); | ||||
| 
 | ||||
|             /* | ||||
|                 first, check if the dma has wrapped around more than once | ||||
| 
 | ||||
|                 then, get the current position of of the dma write and check | ||||
|                 if it's inside data we could have copied | ||||
|             */ | ||||
|             let pos = self.pos(dma); | ||||
|             if pos > self.start || pos < end || dma.get_complete_count() > 1 { | ||||
|                 Err(OverrunError) | ||||
|             } else { | ||||
|                 self.start = (self.start + len) % self.cap(); | ||||
| 
 | ||||
|                 Ok((len, self.start + end)) | ||||
|             } | ||||
|         } else { | ||||
|             // The available, unread portion in the ring buffer DOES wrap
 | ||||
|             // The DMA writer has wrapped since we last read and is currently
 | ||||
|             // writing (or the next byte added will be) in the beginning of the ring buffer.
 | ||||
| 
 | ||||
|             // The provided read buffer is large enough to include all elements from the tail of the dma buffer,
 | ||||
|             // so the next read will not have any unread tail elements in the ring buffer.
 | ||||
| 
 | ||||
|             // Copy out from the dma buffer
 | ||||
|             let tail = self.copy_to(buf, self.start..self.cap()); | ||||
|             let head = self.copy_to(&mut buf[tail..], 0..end); | ||||
| 
 | ||||
|             compiler_fence(Ordering::SeqCst); | ||||
| 
 | ||||
|             /* | ||||
|                 first, check if the dma has wrapped around more than once | ||||
| 
 | ||||
|                 then, get the current position of of the dma write and check | ||||
|                 if it's inside data we could have copied | ||||
|             */ | ||||
|             let pos = self.pos(dma); | ||||
|             if pos > self.start || pos < end || dma.reset_complete_count() > 1 { | ||||
|                 Err(OverrunError) | ||||
|             } else { | ||||
|                 self.start = head; | ||||
|                 Ok((tail + head, self.cap() - self.start)) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|     /// Copy from the dma buffer at `data_range` into `buf`
 | ||||
|     fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize { | ||||
|         // Limit the number of elements that can be copied
 | ||||
|         let length = usize::min(data_range.len(), buf.len()); | ||||
| 
 | ||||
|         // Copy from dma buffer into read buffer
 | ||||
|         // We need to do it like this instead of a simple copy_from_slice() because
 | ||||
|         // reading from a part of memory that may be simultaneously written to is unsafe
 | ||||
|         unsafe { | ||||
|             let dma_buf = self.dma_buf.as_ptr(); | ||||
| 
 | ||||
|             for i in 0..length { | ||||
|                 buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize)); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         length | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub struct WritableDmaRingBuffer<'a, W: Word> { | ||||
|     pub(crate) dma_buf: &'a mut [W], | ||||
|     end: usize, | ||||
| } | ||||
| 
 | ||||
| impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { | ||||
|     pub fn new(dma_buf: &'a mut [W]) -> Self { | ||||
|         Self { dma_buf, end: 0 } | ||||
|     } | ||||
| 
 | ||||
|     /// Reset the ring buffer to its initial state
 | ||||
|     pub fn clear(&mut self, dma: &mut impl DmaCtrl) { | ||||
|         self.end = 0; | ||||
|         dma.reset_complete_count(); | ||||
|     } | ||||
| 
 | ||||
|     /// The capacity of the ringbuffer
 | ||||
|     pub const fn cap(&self) -> usize { | ||||
|         self.dma_buf.len() | ||||
|     } | ||||
| 
 | ||||
|     /// The current position of the ringbuffer
 | ||||
|     fn pos(&self, dma: &mut impl DmaCtrl) -> usize { | ||||
|         self.cap() - dma.get_remaining_transfers() | ||||
|     } | ||||
| 
 | ||||
|     /// Write elements directly to the buffer. This must be done before the DMA is started
 | ||||
|     /// or after the buffer has been cleared using `clear()`.
 | ||||
|     pub fn write_immediate(&mut self, buffer: &[W]) -> Result<(usize, usize), OverrunError> { | ||||
|         if self.end != 0 { | ||||
|             return Err(OverrunError); | ||||
|         } | ||||
|         let written = self.copy_from(buffer, 0..self.cap()); | ||||
|         self.end = written % self.cap(); | ||||
|         Ok((written, self.cap() - written)) | ||||
|     } | ||||
| 
 | ||||
|     /// Write an exact number of elements to the ringbuffer.
 | ||||
|     pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, OverrunError> { | ||||
|         let mut written_data = 0; | ||||
|         let buffer_len = buffer.len(); | ||||
| 
 | ||||
|         poll_fn(|cx| { | ||||
|             dma.set_waker(cx.waker()); | ||||
| 
 | ||||
|             compiler_fence(Ordering::SeqCst); | ||||
| 
 | ||||
|             match self.write(dma, &buffer[written_data..buffer_len]) { | ||||
|                 Ok((len, remaining)) => { | ||||
|                     written_data += len; | ||||
|                     if written_data == buffer_len { | ||||
|                         Poll::Ready(Ok(remaining)) | ||||
|                     } else { | ||||
|                         Poll::Pending | ||||
|                     } | ||||
|                 } | ||||
|                 Err(e) => Poll::Ready(Err(e)), | ||||
|             } | ||||
|         }) | ||||
|         .await | ||||
|     } | ||||
| 
 | ||||
|     /// Write elements from the ring buffer
 | ||||
|     /// Return a tuple of the length written and the capacity remaining to be written in the buffer
 | ||||
|     pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> { | ||||
|         let start = self.pos(dma); | ||||
|         if start > self.end { | ||||
|             // The occupied portion in the ring buffer DOES wrap
 | ||||
|             let len = self.copy_from(buf, self.end..start); | ||||
| 
 | ||||
|             compiler_fence(Ordering::SeqCst); | ||||
| 
 | ||||
|             // Confirm that the DMA is not inside data we could have written
 | ||||
|             let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count())); | ||||
|             if (pos >= self.end && pos < start) || (complete_count > 0 && pos >= start) || complete_count > 1 { | ||||
|                 Err(OverrunError) | ||||
|             } else { | ||||
|                 self.end = (self.end + len) % self.cap(); | ||||
| 
 | ||||
|                 Ok((len, self.cap() - (start - self.end))) | ||||
|             } | ||||
|         } else if start == self.end && dma.get_complete_count() == 0 { | ||||
|             Ok((0, 0)) | ||||
|         } else if start <= self.end && self.end + buf.len() < self.cap() { | ||||
|             // The occupied portion in the ring buffer DOES NOT wrap
 | ||||
|             // and copying elements into the buffer WILL NOT cause it to
 | ||||
| 
 | ||||
|             // Copy into the dma buffer
 | ||||
|             let len = self.copy_from(buf, self.end..self.cap()); | ||||
| 
 | ||||
|             compiler_fence(Ordering::SeqCst); | ||||
| 
 | ||||
|             // Confirm that the DMA is not inside data we could have written
 | ||||
|             let pos = self.pos(dma); | ||||
|             if pos > self.end || pos < start || dma.get_complete_count() > 1 { | ||||
|                 Err(OverrunError) | ||||
|             } else { | ||||
|                 self.end = (self.end + len) % self.cap(); | ||||
| 
 | ||||
|                 Ok((len, self.cap() - (self.end - start))) | ||||
|             } | ||||
|         } else { | ||||
|             // The occupied portion in the ring buffer DOES NOT wrap
 | ||||
|             // and copying elements into the buffer WILL cause it to
 | ||||
| 
 | ||||
|             let tail = self.copy_from(buf, self.end..self.cap()); | ||||
|             let head = self.copy_from(&buf[tail..], 0..start); | ||||
| 
 | ||||
|             compiler_fence(Ordering::SeqCst); | ||||
| 
 | ||||
|             // Confirm that the DMA is not inside data we could have written
 | ||||
|             let pos = self.pos(dma); | ||||
|             if pos > self.end || pos < start || dma.reset_complete_count() > 1 { | ||||
|                 Err(OverrunError) | ||||
|             } else { | ||||
|                 self.end = head; | ||||
| 
 | ||||
|                 Ok((tail + head, self.cap() - (start - self.end))) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|     /// Copy into the dma buffer at `data_range` from `buf`
 | ||||
|     fn copy_from(&mut self, buf: &[W], data_range: Range<usize>) -> usize { | ||||
|         // Limit the number of elements that can be copied
 | ||||
|         let length = usize::min(data_range.len(), buf.len()); | ||||
| 
 | ||||
|         // Copy into dma buffer from read buffer
 | ||||
|         // We need to do it like this instead of a simple copy_from_slice() because
 | ||||
|         // reading from a part of memory that may be simultaneously written to is unsafe
 | ||||
|         unsafe { | ||||
|             let dma_buf = self.dma_buf.as_mut_ptr(); | ||||
| 
 | ||||
|             for i in 0..length { | ||||
|                 core::ptr::write_volatile(dma_buf.offset((data_range.start + i) as isize), buf[i]); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         length | ||||
|     } | ||||
| } | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use core::array; | ||||
|     use std::{cell, vec}; | ||||
| 
 | ||||
|     use super::*; | ||||
| 
 | ||||
|     #[allow(dead_code)] | ||||
|     #[derive(PartialEq, Debug)] | ||||
|     enum TestCircularTransferRequest { | ||||
|         GetCompleteCount(usize), | ||||
|         ResetCompleteCount(usize), | ||||
|         PositionRequest(usize), | ||||
|     } | ||||
| 
 | ||||
|     struct TestCircularTransfer { | ||||
|         len: usize, | ||||
|         requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>, | ||||
|     } | ||||
| 
 | ||||
|     impl DmaCtrl for TestCircularTransfer { | ||||
|         fn get_remaining_transfers(&self) -> usize { | ||||
|             match self.requests.borrow_mut().pop().unwrap() { | ||||
|                 TestCircularTransferRequest::PositionRequest(pos) => { | ||||
|                     let len = self.len; | ||||
| 
 | ||||
|                     assert!(len >= pos); | ||||
| 
 | ||||
|                     len - pos | ||||
|                 } | ||||
|                 _ => unreachable!(), | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         fn get_complete_count(&self) -> usize { | ||||
|             match self.requests.borrow_mut().pop().unwrap() { | ||||
|                 TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count, | ||||
|                 _ => unreachable!(), | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         fn reset_complete_count(&mut self) -> usize { | ||||
|             match self.requests.get_mut().pop().unwrap() { | ||||
|                 TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count, | ||||
|                 _ => unreachable!(), | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         fn set_waker(&mut self, waker: &Waker) {} | ||||
|     } | ||||
| 
 | ||||
|     impl TestCircularTransfer { | ||||
|         pub fn new(len: usize) -> Self { | ||||
|             Self { | ||||
|                 requests: cell::RefCell::new(vec![]), | ||||
|                 len, | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) { | ||||
|             requests.reverse(); | ||||
|             self.requests.replace(requests); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn empty_and_read_not_started() { | ||||
|         let mut dma_buf = [0u8; 16]; | ||||
|         let ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||||
| 
 | ||||
|         assert_eq!(0, ringbuf.start); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn can_read() { | ||||
|         let mut dma = TestCircularTransfer::new(16); | ||||
| 
 | ||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
 | ||||
|         let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||||
| 
 | ||||
|         assert_eq!(0, ringbuf.start); | ||||
|         assert_eq!(16, ringbuf.cap()); | ||||
| 
 | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(8), | ||||
|             TestCircularTransferRequest::PositionRequest(10), | ||||
|             TestCircularTransferRequest::GetCompleteCount(0), | ||||
|         ]); | ||||
|         let mut buf = [0; 2]; | ||||
|         assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!([0, 1], buf); | ||||
|         assert_eq!(2, ringbuf.start); | ||||
| 
 | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(10), | ||||
|             TestCircularTransferRequest::PositionRequest(12), | ||||
|             TestCircularTransferRequest::GetCompleteCount(0), | ||||
|         ]); | ||||
|         let mut buf = [0; 2]; | ||||
|         assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!([2, 3], buf); | ||||
|         assert_eq!(4, ringbuf.start); | ||||
| 
 | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(12), | ||||
|             TestCircularTransferRequest::PositionRequest(14), | ||||
|             TestCircularTransferRequest::GetCompleteCount(0), | ||||
|         ]); | ||||
|         let mut buf = [0; 8]; | ||||
|         assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]); | ||||
|         assert_eq!(12, ringbuf.start); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn can_read_with_wrap() { | ||||
|         let mut dma = TestCircularTransfer::new(16); | ||||
| 
 | ||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
 | ||||
|         let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||||
| 
 | ||||
|         assert_eq!(0, ringbuf.start); | ||||
|         assert_eq!(16, ringbuf.cap()); | ||||
| 
 | ||||
|         /* | ||||
|             Read to close to the end of the buffer | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(14), | ||||
|             TestCircularTransferRequest::PositionRequest(16), | ||||
|             TestCircularTransferRequest::GetCompleteCount(0), | ||||
|         ]); | ||||
|         let mut buf = [0; 14]; | ||||
|         assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!(14, ringbuf.start); | ||||
| 
 | ||||
|         /* | ||||
|             Now, read around the buffer | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(6), | ||||
|             TestCircularTransferRequest::PositionRequest(8), | ||||
|             TestCircularTransferRequest::ResetCompleteCount(1), | ||||
|         ]); | ||||
|         let mut buf = [0; 6]; | ||||
|         assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!(4, ringbuf.start); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() { | ||||
|         let mut dma = TestCircularTransfer::new(16); | ||||
| 
 | ||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
 | ||||
|         let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||||
| 
 | ||||
|         assert_eq!(0, ringbuf.start); | ||||
|         assert_eq!(16, ringbuf.cap()); | ||||
| 
 | ||||
|         /* | ||||
|             Read to close to the end of the buffer | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(14), | ||||
|             TestCircularTransferRequest::PositionRequest(16), | ||||
|             TestCircularTransferRequest::GetCompleteCount(0), | ||||
|         ]); | ||||
|         let mut buf = [0; 14]; | ||||
|         assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!(14, ringbuf.start); | ||||
| 
 | ||||
|         /* | ||||
|             Now, read to the end of the buffer | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(6), | ||||
|             TestCircularTransferRequest::PositionRequest(8), | ||||
|             TestCircularTransferRequest::ResetCompleteCount(1), | ||||
|         ]); | ||||
|         let mut buf = [0; 2]; | ||||
|         assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!(0, ringbuf.start); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn can_read_when_dma_writer_wraps_once_with_same_ndtr() { | ||||
|         let mut dma = TestCircularTransfer::new(16); | ||||
| 
 | ||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
 | ||||
|         let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||||
| 
 | ||||
|         assert_eq!(0, ringbuf.start); | ||||
|         assert_eq!(16, ringbuf.cap()); | ||||
| 
 | ||||
|         /* | ||||
|             Read to about the middle of the buffer | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(6), | ||||
|             TestCircularTransferRequest::PositionRequest(6), | ||||
|             TestCircularTransferRequest::GetCompleteCount(0), | ||||
|         ]); | ||||
|         let mut buf = [0; 6]; | ||||
|         assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!(6, ringbuf.start); | ||||
| 
 | ||||
|         /* | ||||
|             Now, wrap the DMA controller around | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(6), | ||||
|             TestCircularTransferRequest::GetCompleteCount(1), | ||||
|             TestCircularTransferRequest::PositionRequest(6), | ||||
|             TestCircularTransferRequest::GetCompleteCount(1), | ||||
|         ]); | ||||
|         let mut buf = [0; 6]; | ||||
|         assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!(12, ringbuf.start); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() { | ||||
|         let mut dma = TestCircularTransfer::new(16); | ||||
| 
 | ||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
 | ||||
|         let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||||
| 
 | ||||
|         assert_eq!(0, ringbuf.start); | ||||
|         assert_eq!(16, ringbuf.cap()); | ||||
| 
 | ||||
|         /* | ||||
|             Read a few bytes | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(2), | ||||
|             TestCircularTransferRequest::PositionRequest(2), | ||||
|             TestCircularTransferRequest::GetCompleteCount(0), | ||||
|         ]); | ||||
|         let mut buf = [0; 6]; | ||||
|         assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!(2, ringbuf.start); | ||||
| 
 | ||||
|         /* | ||||
|             Now, overtake the reader | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(4), | ||||
|             TestCircularTransferRequest::PositionRequest(6), | ||||
|             TestCircularTransferRequest::GetCompleteCount(1), | ||||
|         ]); | ||||
|         let mut buf = [0; 6]; | ||||
|         assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err()); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() { | ||||
|         let mut dma = TestCircularTransfer::new(16); | ||||
| 
 | ||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
 | ||||
|         let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf); | ||||
| 
 | ||||
|         assert_eq!(0, ringbuf.start); | ||||
|         assert_eq!(16, ringbuf.cap()); | ||||
| 
 | ||||
|         /* | ||||
|             Read to close to the end of the buffer | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(14), | ||||
|             TestCircularTransferRequest::PositionRequest(16), | ||||
|             TestCircularTransferRequest::GetCompleteCount(0), | ||||
|         ]); | ||||
|         let mut buf = [0; 14]; | ||||
|         assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||
|         assert_eq!(14, ringbuf.start); | ||||
| 
 | ||||
|         /* | ||||
|             Now, overtake the reader | ||||
|         */ | ||||
|         dma.setup(vec![ | ||||
|             TestCircularTransferRequest::PositionRequest(8), | ||||
|             TestCircularTransferRequest::PositionRequest(10), | ||||
|             TestCircularTransferRequest::ResetCompleteCount(2), | ||||
|         ]); | ||||
|         let mut buf = [0; 6]; | ||||
|         assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err()); | ||||
|     } | ||||
| } | ||||
							
								
								
									
										293
									
								
								embassy-stm32/src/dma/ringbuffer/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										293
									
								
								embassy-stm32/src/dma/ringbuffer/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,293 @@ | ||||
| #![cfg_attr(gpdma, allow(unused))] | ||||
| 
 | ||||
| use core::future::poll_fn; | ||||
| use core::task::{Poll, Waker}; | ||||
| 
 | ||||
| use crate::dma::word::Word; | ||||
| 
 | ||||
| pub trait DmaCtrl { | ||||
|     /// Get the NDTR register value, i.e. the space left in the underlying
 | ||||
|     /// buffer until the dma writer wraps.
 | ||||
|     fn get_remaining_transfers(&self) -> usize; | ||||
| 
 | ||||
|     /// Reset the transfer completed counter to 0 and return the value just prior to the reset.
 | ||||
|     fn reset_complete_count(&mut self) -> usize; | ||||
| 
 | ||||
|     /// Set the waker for a running poll_fn
 | ||||
|     fn set_waker(&mut self, waker: &Waker); | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, PartialEq)] | ||||
| #[cfg_attr(feature = "defmt", derive(defmt::Format))] | ||||
| pub struct OverrunError; | ||||
| 
 | ||||
| #[derive(Debug, Clone, Copy, Default)] | ||||
| struct DmaIndex { | ||||
|     completion_count: usize, | ||||
|     pos: usize, | ||||
| } | ||||
| 
 | ||||
| fn pos(cap: usize, dma: &impl DmaCtrl) -> usize { | ||||
|     cap - dma.get_remaining_transfers() | ||||
| } | ||||
| 
 | ||||
| impl DmaIndex { | ||||
|     fn reset(&mut self) { | ||||
|         self.pos = 0; | ||||
|         self.completion_count = 0; | ||||
|     } | ||||
| 
 | ||||
|     fn as_index(&self, cap: usize, offset: usize) -> usize { | ||||
|         (self.pos + offset) % cap | ||||
|     } | ||||
| 
 | ||||
|     fn dma_sync(&mut self, cap: usize, dma: &mut impl DmaCtrl) { | ||||
|         let fst_pos = pos(cap, dma); | ||||
|         let fst_count = dma.reset_complete_count(); | ||||
|         let pos = pos(cap, dma); | ||||
| 
 | ||||
|         let wrap_count = if pos >= fst_pos { | ||||
|             fst_count | ||||
|         } else { | ||||
|             fst_count + dma.reset_complete_count() | ||||
|         }; | ||||
| 
 | ||||
|         self.pos = pos; | ||||
|         self.completion_count += wrap_count; | ||||
|     } | ||||
| 
 | ||||
|     fn advance(&mut self, cap: usize, steps: usize) { | ||||
|         let next = self.pos + steps; | ||||
|         self.completion_count += next / cap; | ||||
|         self.pos = next % cap; | ||||
|     } | ||||
| 
 | ||||
|     fn normalize(lhs: &mut DmaIndex, rhs: &mut DmaIndex) { | ||||
|         let min_count = lhs.completion_count.min(rhs.completion_count); | ||||
|         lhs.completion_count -= min_count; | ||||
|         rhs.completion_count -= min_count; | ||||
|     } | ||||
| 
 | ||||
|     fn diff(&mut self, cap: usize, rhs: &mut DmaIndex) -> isize { | ||||
|         Self::normalize(self, rhs); | ||||
|         (self.completion_count * cap + self.pos) as isize - (rhs.completion_count * cap + rhs.pos) as isize | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub struct ReadableDmaRingBuffer<'a, W: Word> { | ||||
|     dma_buf: &'a mut [W], | ||||
|     write_index: DmaIndex, | ||||
|     read_index: DmaIndex, | ||||
| } | ||||
| 
 | ||||
| impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> { | ||||
|     /// Construct an empty buffer.
 | ||||
|     pub fn new(dma_buf: &'a mut [W]) -> Self { | ||||
|         Self { | ||||
|             dma_buf, | ||||
|             write_index: Default::default(), | ||||
|             read_index: Default::default(), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Reset the ring buffer to its initial state
 | ||||
|     pub fn clear(&mut self, dma: &mut impl DmaCtrl) { | ||||
|         dma.reset_complete_count(); | ||||
|         self.write_index.reset(); | ||||
|         self.update_dma_index(dma); | ||||
|         self.read_index = self.write_index; | ||||
|     } | ||||
| 
 | ||||
|     /// The capacity of the ringbuffer
 | ||||
|     pub const fn cap(&self) -> usize { | ||||
|         self.dma_buf.len() | ||||
|     } | ||||
| 
 | ||||
|     /// Read elements from the ring buffer
 | ||||
|     /// Return a tuple of the length read and the length remaining in the buffer
 | ||||
|     /// If not all of the elements were read, then there will be some elements in the buffer remaining
 | ||||
|     /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
 | ||||
|     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
 | ||||
|     pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> { | ||||
|         let readable = self.margin(dma)?.min(buf.len()); | ||||
|         for i in 0..readable { | ||||
|             buf[i] = self.read_buf(i); | ||||
|         } | ||||
|         let available = self.margin(dma)?; | ||||
|         self.read_index.advance(self.cap(), readable); | ||||
|         Ok((readable, available - readable)) | ||||
|     } | ||||
| 
 | ||||
|     /// Read an exact number of elements from the ringbuffer.
 | ||||
|     ///
 | ||||
|     /// Returns the remaining number of elements available for immediate reading.
 | ||||
|     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
 | ||||
|     ///
 | ||||
|     /// Async/Wake Behavior:
 | ||||
|     /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
 | ||||
|     /// and when it wraps around. This means that when called with a buffer of length 'M', when this
 | ||||
|     /// ring buffer was created with a buffer of size 'N':
 | ||||
|     /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
 | ||||
|     /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
 | ||||
|     pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, OverrunError> { | ||||
|         let mut read_data = 0; | ||||
|         let buffer_len = buffer.len(); | ||||
| 
 | ||||
|         poll_fn(|cx| { | ||||
|             dma.set_waker(cx.waker()); | ||||
| 
 | ||||
|             match self.read(dma, &mut buffer[read_data..buffer_len]) { | ||||
|                 Ok((len, remaining)) => { | ||||
|                     read_data += len; | ||||
|                     if read_data == buffer_len { | ||||
|                         Poll::Ready(Ok(remaining)) | ||||
|                     } else { | ||||
|                         Poll::Pending | ||||
|                     } | ||||
|                 } | ||||
|                 Err(e) => Poll::Ready(Err(e)), | ||||
|             } | ||||
|         }) | ||||
|         .await | ||||
|     } | ||||
| 
 | ||||
|     fn update_dma_index(&mut self, dma: &mut impl DmaCtrl) { | ||||
|         self.write_index.dma_sync(self.cap(), dma) | ||||
|     } | ||||
| 
 | ||||
|     fn read_buf(&self, offset: usize) -> W { | ||||
|         unsafe { | ||||
|             core::ptr::read_volatile( | ||||
|                 self.dma_buf | ||||
|                     .as_ptr() | ||||
|                     .offset(self.read_index.as_index(self.cap(), offset) as isize), | ||||
|             ) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Returns available dma samples
 | ||||
|     fn margin(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, OverrunError> { | ||||
|         self.update_dma_index(dma); | ||||
| 
 | ||||
|         let diff: usize = self | ||||
|             .write_index | ||||
|             .diff(self.cap(), &mut self.read_index) | ||||
|             .try_into() | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         if diff > self.cap() { | ||||
|             Err(OverrunError) | ||||
|         } else { | ||||
|             Ok(diff) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub struct WritableDmaRingBuffer<'a, W: Word> { | ||||
|     dma_buf: &'a mut [W], | ||||
|     read_index: DmaIndex, | ||||
|     write_index: DmaIndex, | ||||
| } | ||||
| 
 | ||||
| impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { | ||||
|     /// Construct a ringbuffer filled with the given buffer data.
 | ||||
|     pub fn new(dma_buf: &'a mut [W]) -> Self { | ||||
|         let len = dma_buf.len(); | ||||
|         Self { | ||||
|             dma_buf, | ||||
|             read_index: Default::default(), | ||||
|             write_index: DmaIndex { | ||||
|                 completion_count: 0, | ||||
|                 pos: len, | ||||
|             }, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Reset the ring buffer to its initial state. The buffer after the reset will be full.
 | ||||
|     pub fn clear(&mut self, dma: &mut impl DmaCtrl) { | ||||
|         dma.reset_complete_count(); | ||||
|         self.read_index.reset(); | ||||
|         self.update_dma_index(dma); | ||||
|         self.write_index = self.read_index; | ||||
|         self.write_index.advance(self.cap(), self.cap()); | ||||
|     } | ||||
| 
 | ||||
|     /// Get the capacity of the ringbuffer.
 | ||||
|     pub const fn cap(&self) -> usize { | ||||
|         self.dma_buf.len() | ||||
|     } | ||||
| 
 | ||||
|     /// Append data to the ring buffer.
 | ||||
|     /// Returns a tuple of the data written and the remaining write capacity in the buffer.
 | ||||
|     pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> { | ||||
|         let writable = self.margin(dma)?.min(buf.len()); | ||||
|         for i in 0..writable { | ||||
|             self.write_buf(i, buf[i]); | ||||
|         } | ||||
|         let available = self.margin(dma)?; | ||||
|         self.write_index.advance(self.cap(), writable); | ||||
|         Ok((writable, available - writable)) | ||||
|     } | ||||
| 
 | ||||
|     /// Write elements directly to the buffer.
 | ||||
|     pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { | ||||
|         for (i, data) in buf.iter().enumerate() { | ||||
|             self.write_buf(i, *data) | ||||
|         } | ||||
|         let written = buf.len().min(self.cap()); | ||||
|         Ok((written, self.cap() - written)) | ||||
|     } | ||||
| 
 | ||||
|     /// Write an exact number of elements to the ringbuffer.
 | ||||
|     pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, OverrunError> { | ||||
|         let mut written_data = 0; | ||||
|         let buffer_len = buffer.len(); | ||||
| 
 | ||||
|         poll_fn(|cx| { | ||||
|             dma.set_waker(cx.waker()); | ||||
| 
 | ||||
|             match self.write(dma, &buffer[written_data..buffer_len]) { | ||||
|                 Ok((len, remaining)) => { | ||||
|                     written_data += len; | ||||
|                     if written_data == buffer_len { | ||||
|                         Poll::Ready(Ok(remaining)) | ||||
|                     } else { | ||||
|                         Poll::Pending | ||||
|                     } | ||||
|                 } | ||||
|                 Err(e) => Poll::Ready(Err(e)), | ||||
|             } | ||||
|         }) | ||||
|         .await | ||||
|     } | ||||
| 
 | ||||
|     fn update_dma_index(&mut self, dma: &mut impl DmaCtrl) { | ||||
|         self.read_index.dma_sync(self.cap(), dma); | ||||
|     } | ||||
| 
 | ||||
|     fn write_buf(&mut self, offset: usize, value: W) { | ||||
|         unsafe { | ||||
|             core::ptr::write_volatile( | ||||
|                 self.dma_buf | ||||
|                     .as_mut_ptr() | ||||
|                     .offset(self.write_index.as_index(self.cap(), offset) as isize), | ||||
|                 value, | ||||
|             ) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn margin(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, OverrunError> { | ||||
|         self.update_dma_index(dma); | ||||
| 
 | ||||
|         let diff = self.write_index.diff(self.cap(), &mut self.read_index); | ||||
| 
 | ||||
|         if diff < 0 { | ||||
|             Err(OverrunError) | ||||
|         } else { | ||||
|             Ok(self.cap().saturating_sub(diff as usize)) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests; | ||||
							
								
								
									
										165
									
								
								embassy-stm32/src/dma/ringbuffer/tests/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										165
									
								
								embassy-stm32/src/dma/ringbuffer/tests/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,165 @@ | ||||
| use std::{cell, vec}; | ||||
| 
 | ||||
| use super::*; | ||||
| 
 | ||||
| #[allow(dead_code)] | ||||
| #[derive(PartialEq, Debug)] | ||||
| enum TestCircularTransferRequest { | ||||
|     ResetCompleteCount(usize), | ||||
|     PositionRequest(usize), | ||||
| } | ||||
| 
 | ||||
| struct TestCircularTransfer { | ||||
|     len: usize, | ||||
|     requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>, | ||||
| } | ||||
| 
 | ||||
| impl DmaCtrl for TestCircularTransfer { | ||||
|     fn get_remaining_transfers(&self) -> usize { | ||||
|         match self.requests.borrow_mut().pop().unwrap() { | ||||
|             TestCircularTransferRequest::PositionRequest(pos) => { | ||||
|                 let len = self.len; | ||||
| 
 | ||||
|                 assert!(len >= pos); | ||||
| 
 | ||||
|                 len - pos | ||||
|             } | ||||
|             _ => unreachable!(), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn reset_complete_count(&mut self) -> usize { | ||||
|         match self.requests.get_mut().pop().unwrap() { | ||||
|             TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count, | ||||
|             _ => unreachable!(), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn set_waker(&mut self, _waker: &Waker) {} | ||||
| } | ||||
| 
 | ||||
| impl TestCircularTransfer { | ||||
|     pub fn new(len: usize) -> Self { | ||||
|         Self { | ||||
|             requests: cell::RefCell::new(vec![]), | ||||
|             len, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) { | ||||
|         requests.reverse(); | ||||
|         self.requests.replace(requests); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| const CAP: usize = 16; | ||||
| 
 | ||||
| #[test] | ||||
| fn dma_index_dma_sync_syncs_position_to_last_read_if_sync_takes_place_on_same_dma_cycle() { | ||||
|     let mut dma = TestCircularTransfer::new(CAP); | ||||
|     dma.setup(vec![ | ||||
|         TestCircularTransferRequest::PositionRequest(4), | ||||
|         TestCircularTransferRequest::ResetCompleteCount(0), | ||||
|         TestCircularTransferRequest::PositionRequest(7), | ||||
|     ]); | ||||
|     let mut index = DmaIndex::default(); | ||||
|     index.dma_sync(CAP, &mut dma); | ||||
|     assert_eq!(index.completion_count, 0); | ||||
|     assert_eq!(index.pos, 7); | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| fn dma_index_dma_sync_updates_completion_count_properly_if_sync_takes_place_on_same_dma_cycle() { | ||||
|     let mut dma = TestCircularTransfer::new(CAP); | ||||
|     dma.setup(vec![ | ||||
|         TestCircularTransferRequest::PositionRequest(4), | ||||
|         TestCircularTransferRequest::ResetCompleteCount(2), | ||||
|         TestCircularTransferRequest::PositionRequest(7), | ||||
|     ]); | ||||
|     let mut index = DmaIndex::default(); | ||||
|     index.completion_count = 1; | ||||
|     index.dma_sync(CAP, &mut dma); | ||||
|     assert_eq!(index.completion_count, 3); | ||||
|     assert_eq!(index.pos, 7); | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| fn dma_index_dma_sync_syncs_to_last_position_if_reads_occur_on_different_dma_cycles() { | ||||
|     let mut dma = TestCircularTransfer::new(CAP); | ||||
|     dma.setup(vec![ | ||||
|         TestCircularTransferRequest::PositionRequest(10), | ||||
|         TestCircularTransferRequest::ResetCompleteCount(1), | ||||
|         TestCircularTransferRequest::PositionRequest(5), | ||||
|         TestCircularTransferRequest::ResetCompleteCount(0), | ||||
|     ]); | ||||
|     let mut index = DmaIndex::default(); | ||||
|     index.dma_sync(CAP, &mut dma); | ||||
|     assert_eq!(index.completion_count, 1); | ||||
|     assert_eq!(index.pos, 5); | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| fn dma_index_dma_sync_detects_new_cycle_if_later_position_is_less_than_first_and_first_completion_count_occurs_on_first_cycle( | ||||
| ) { | ||||
|     let mut dma = TestCircularTransfer::new(CAP); | ||||
|     dma.setup(vec![ | ||||
|         TestCircularTransferRequest::PositionRequest(10), | ||||
|         TestCircularTransferRequest::ResetCompleteCount(1), | ||||
|         TestCircularTransferRequest::PositionRequest(5), | ||||
|         TestCircularTransferRequest::ResetCompleteCount(1), | ||||
|     ]); | ||||
|     let mut index = DmaIndex::default(); | ||||
|     index.completion_count = 1; | ||||
|     index.dma_sync(CAP, &mut dma); | ||||
|     assert_eq!(index.completion_count, 3); | ||||
|     assert_eq!(index.pos, 5); | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| fn dma_index_dma_sync_detects_new_cycle_if_later_position_is_less_than_first_and_first_completion_count_occurs_on_later_cycle( | ||||
| ) { | ||||
|     let mut dma = TestCircularTransfer::new(CAP); | ||||
|     dma.setup(vec![ | ||||
|         TestCircularTransferRequest::PositionRequest(10), | ||||
|         TestCircularTransferRequest::ResetCompleteCount(2), | ||||
|         TestCircularTransferRequest::PositionRequest(5), | ||||
|         TestCircularTransferRequest::ResetCompleteCount(0), | ||||
|     ]); | ||||
|     let mut index = DmaIndex::default(); | ||||
|     index.completion_count = 1; | ||||
|     index.dma_sync(CAP, &mut dma); | ||||
|     assert_eq!(index.completion_count, 3); | ||||
|     assert_eq!(index.pos, 5); | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| fn dma_index_as_index_returns_index_mod_cap_by_default() { | ||||
|     let index = DmaIndex::default(); | ||||
|     assert_eq!(index.as_index(CAP, 0), 0); | ||||
|     assert_eq!(index.as_index(CAP, 1), 1); | ||||
|     assert_eq!(index.as_index(CAP, 2), 2); | ||||
|     assert_eq!(index.as_index(CAP, 3), 3); | ||||
|     assert_eq!(index.as_index(CAP, 4), 4); | ||||
|     assert_eq!(index.as_index(CAP, CAP), 0); | ||||
|     assert_eq!(index.as_index(CAP, CAP + 1), 1); | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| fn dma_index_advancing_increases_as_index() { | ||||
|     let mut index = DmaIndex::default(); | ||||
|     assert_eq!(index.as_index(CAP, 0), 0); | ||||
|     index.advance(CAP, 1); | ||||
|     assert_eq!(index.as_index(CAP, 0), 1); | ||||
|     index.advance(CAP, 1); | ||||
|     assert_eq!(index.as_index(CAP, 0), 2); | ||||
|     index.advance(CAP, 1); | ||||
|     assert_eq!(index.as_index(CAP, 0), 3); | ||||
|     index.advance(CAP, 1); | ||||
|     assert_eq!(index.as_index(CAP, 0), 4); | ||||
|     index.advance(CAP, CAP - 4); | ||||
|     assert_eq!(index.as_index(CAP, 0), 0); | ||||
|     index.advance(CAP, 1); | ||||
|     assert_eq!(index.as_index(CAP, 0), 1); | ||||
| } | ||||
| 
 | ||||
| mod prop_test; | ||||
							
								
								
									
										50
									
								
								embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,50 @@ | ||||
| use std::task::Waker; | ||||
| 
 | ||||
| use proptest::prop_oneof; | ||||
| use proptest::strategy::{self, BoxedStrategy, Strategy as _}; | ||||
| use proptest_state_machine::{prop_state_machine, ReferenceStateMachine, StateMachineTest}; | ||||
| 
 | ||||
| use super::*; | ||||
| 
 | ||||
| const CAP: usize = 128; | ||||
| 
 | ||||
| #[derive(Debug, Default)] | ||||
| struct DmaMock { | ||||
|     pos: usize, | ||||
|     wraps: usize, | ||||
| } | ||||
| 
 | ||||
| impl DmaMock { | ||||
|     pub fn advance(&mut self, steps: usize) { | ||||
|         let next = self.pos + steps; | ||||
|         self.pos = next % CAP; | ||||
|         self.wraps += next / CAP; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl DmaCtrl for DmaMock { | ||||
|     fn get_remaining_transfers(&self) -> usize { | ||||
|         CAP - self.pos | ||||
|     } | ||||
| 
 | ||||
|     fn reset_complete_count(&mut self) -> usize { | ||||
|         core::mem::replace(&mut self.wraps, 0) | ||||
|     } | ||||
| 
 | ||||
|     fn set_waker(&mut self, _waker: &Waker) {} | ||||
| } | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| enum Status { | ||||
|     Available(usize), | ||||
|     Failed, | ||||
| } | ||||
| 
 | ||||
| impl Status { | ||||
|     pub fn new(capacity: usize) -> Self { | ||||
|         Self::Available(capacity) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| mod reader; | ||||
| mod writer; | ||||
							
								
								
									
										122
									
								
								embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,122 @@ | ||||
| use core::fmt::Debug; | ||||
| 
 | ||||
| use super::*; | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| enum ReaderTransition { | ||||
|     Write(usize), | ||||
|     Clear, | ||||
|     ReadUpTo(usize), | ||||
| } | ||||
| 
 | ||||
| struct ReaderSM; | ||||
| 
 | ||||
| impl ReferenceStateMachine for ReaderSM { | ||||
|     type State = Status; | ||||
|     type Transition = ReaderTransition; | ||||
| 
 | ||||
|     fn init_state() -> BoxedStrategy<Self::State> { | ||||
|         strategy::Just(Status::new(0)).boxed() | ||||
|     } | ||||
| 
 | ||||
|     fn transitions(_state: &Self::State) -> BoxedStrategy<Self::Transition> { | ||||
|         prop_oneof![ | ||||
|             (1..50_usize).prop_map(ReaderTransition::Write), | ||||
|             (1..50_usize).prop_map(ReaderTransition::ReadUpTo), | ||||
|             strategy::Just(ReaderTransition::Clear), | ||||
|         ] | ||||
|         .boxed() | ||||
|     } | ||||
| 
 | ||||
|     fn apply(status: Self::State, transition: &Self::Transition) -> Self::State { | ||||
|         match (status, transition) { | ||||
|             (_, ReaderTransition::Clear) => Status::Available(0), | ||||
|             (Status::Available(x), ReaderTransition::Write(y)) => { | ||||
|                 if x + y > CAP { | ||||
|                     Status::Failed | ||||
|                 } else { | ||||
|                     Status::Available(x + y) | ||||
|                 } | ||||
|             } | ||||
|             (Status::Available(x), ReaderTransition::ReadUpTo(y)) => Status::Available(x.saturating_sub(*y)), | ||||
|             (Status::Failed, _) => Status::Failed, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| struct ReaderSut { | ||||
|     status: Status, | ||||
|     buffer: *mut [u8], | ||||
|     producer: DmaMock, | ||||
|     consumer: ReadableDmaRingBuffer<'static, u8>, | ||||
| } | ||||
| 
 | ||||
| impl Debug for ReaderSut { | ||||
|     fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { | ||||
|         <DmaMock as Debug>::fmt(&self.producer, f) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| struct ReaderTest; | ||||
| 
 | ||||
| impl StateMachineTest for ReaderTest { | ||||
|     type SystemUnderTest = ReaderSut; | ||||
|     type Reference = ReaderSM; | ||||
| 
 | ||||
|     fn init_test(ref_status: &<Self::Reference as ReferenceStateMachine>::State) -> Self::SystemUnderTest { | ||||
|         let buffer = Box::into_raw(Box::new([0; CAP])); | ||||
|         ReaderSut { | ||||
|             status: ref_status.clone(), | ||||
|             buffer, | ||||
|             producer: DmaMock::default(), | ||||
|             consumer: ReadableDmaRingBuffer::new(unsafe { &mut *buffer }), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn teardown(state: Self::SystemUnderTest) { | ||||
|         unsafe { | ||||
|             let _ = Box::from_raw(state.buffer); | ||||
|         }; | ||||
|     } | ||||
| 
 | ||||
|     fn apply( | ||||
|         mut sut: Self::SystemUnderTest, | ||||
|         ref_state: &<Self::Reference as ReferenceStateMachine>::State, | ||||
|         transition: <Self::Reference as ReferenceStateMachine>::Transition, | ||||
|     ) -> Self::SystemUnderTest { | ||||
|         match transition { | ||||
|             ReaderTransition::Write(x) => sut.producer.advance(x), | ||||
|             ReaderTransition::Clear => { | ||||
|                 sut.consumer.clear(&mut sut.producer); | ||||
|             } | ||||
|             ReaderTransition::ReadUpTo(x) => { | ||||
|                 let status = sut.status; | ||||
|                 let ReaderSut { | ||||
|                     ref mut producer, | ||||
|                     ref mut consumer, | ||||
|                     .. | ||||
|                 } = sut; | ||||
|                 let mut buf = vec![0; x]; | ||||
|                 let res = consumer.read(producer, &mut buf); | ||||
|                 match status { | ||||
|                     Status::Available(n) => { | ||||
|                         let readable = x.min(n); | ||||
| 
 | ||||
|                         assert_eq!(res.unwrap().0, readable); | ||||
|                     } | ||||
|                     Status::Failed => assert!(res.is_err()), | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         ReaderSut { | ||||
|             status: ref_state.clone(), | ||||
|             ..sut | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| prop_state_machine! { | ||||
|     #[test] | ||||
|     fn reader_state_test(sequential 1..20 => ReaderTest); | ||||
| } | ||||
							
								
								
									
										121
									
								
								embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,121 @@ | ||||
| use core::fmt::Debug; | ||||
| 
 | ||||
| use super::*; | ||||
| 
 | ||||
| #[derive(Debug, Clone)] | ||||
| enum WriterTransition { | ||||
|     Read(usize), | ||||
|     WriteUpTo(usize), | ||||
|     Clear, | ||||
| } | ||||
| 
 | ||||
| struct WriterSM; | ||||
| 
 | ||||
| impl ReferenceStateMachine for WriterSM { | ||||
|     type State = Status; | ||||
|     type Transition = WriterTransition; | ||||
| 
 | ||||
|     fn init_state() -> BoxedStrategy<Self::State> { | ||||
|         strategy::Just(Status::new(CAP)).boxed() | ||||
|     } | ||||
| 
 | ||||
|     fn transitions(_state: &Self::State) -> BoxedStrategy<Self::Transition> { | ||||
|         prop_oneof![ | ||||
|             (1..50_usize).prop_map(WriterTransition::Read), | ||||
|             (1..50_usize).prop_map(WriterTransition::WriteUpTo), | ||||
|             strategy::Just(WriterTransition::Clear), | ||||
|         ] | ||||
|         .boxed() | ||||
|     } | ||||
| 
 | ||||
|     fn apply(status: Self::State, transition: &Self::Transition) -> Self::State { | ||||
|         match (status, transition) { | ||||
|             (_, WriterTransition::Clear) => Status::Available(CAP), | ||||
|             (Status::Available(x), WriterTransition::Read(y)) => { | ||||
|                 if x < *y { | ||||
|                     Status::Failed | ||||
|                 } else { | ||||
|                     Status::Available(x - y) | ||||
|                 } | ||||
|             } | ||||
|             (Status::Available(x), WriterTransition::WriteUpTo(y)) => Status::Available((x + *y).min(CAP)), | ||||
|             (Status::Failed, _) => Status::Failed, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| struct WriterSut { | ||||
|     status: Status, | ||||
|     buffer: *mut [u8], | ||||
|     producer: WritableDmaRingBuffer<'static, u8>, | ||||
|     consumer: DmaMock, | ||||
| } | ||||
| 
 | ||||
| impl Debug for WriterSut { | ||||
|     fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { | ||||
|         <DmaMock as Debug>::fmt(&self.consumer, f) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| struct WriterTest; | ||||
| 
 | ||||
| impl StateMachineTest for WriterTest { | ||||
|     type SystemUnderTest = WriterSut; | ||||
|     type Reference = WriterSM; | ||||
| 
 | ||||
|     fn init_test(ref_status: &<Self::Reference as ReferenceStateMachine>::State) -> Self::SystemUnderTest { | ||||
|         let buffer = Box::into_raw(Box::new([0; CAP])); | ||||
|         WriterSut { | ||||
|             status: ref_status.clone(), | ||||
|             buffer, | ||||
|             producer: WritableDmaRingBuffer::new(unsafe { &mut *buffer }), | ||||
|             consumer: DmaMock::default(), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     fn teardown(state: Self::SystemUnderTest) { | ||||
|         unsafe { | ||||
|             let _ = Box::from_raw(state.buffer); | ||||
|         }; | ||||
|     } | ||||
| 
 | ||||
|     fn apply( | ||||
|         mut sut: Self::SystemUnderTest, | ||||
|         ref_status: &<Self::Reference as ReferenceStateMachine>::State, | ||||
|         transition: <Self::Reference as ReferenceStateMachine>::Transition, | ||||
|     ) -> Self::SystemUnderTest { | ||||
|         match transition { | ||||
|             WriterTransition::Read(x) => sut.consumer.advance(x), | ||||
|             WriterTransition::Clear => { | ||||
|                 sut.producer.clear(&mut sut.consumer); | ||||
|             } | ||||
|             WriterTransition::WriteUpTo(x) => { | ||||
|                 let status = sut.status; | ||||
|                 let WriterSut { | ||||
|                     ref mut producer, | ||||
|                     ref mut consumer, | ||||
|                     .. | ||||
|                 } = sut; | ||||
|                 let mut buf = vec![0; x]; | ||||
|                 let res = producer.write(consumer, &mut buf); | ||||
|                 match status { | ||||
|                     Status::Available(n) => { | ||||
|                         let writable = x.min(CAP - n.min(CAP)); | ||||
|                         assert_eq!(res.unwrap().0, writable); | ||||
|                     } | ||||
|                     Status::Failed => assert!(res.is_err()), | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         WriterSut { | ||||
|             status: ref_status.clone(), | ||||
|             ..sut | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| prop_state_machine! { | ||||
|     #[test] | ||||
|     fn writer_state_test(sequential 1..20 => WriterTest); | ||||
| } | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user