Merge pull request #4017 from shilga/SpinlockMutex

embassy-rp: Spinlock mutex implementation
This commit is contained in:
Dario Nieuwenhuis 2025-04-07 23:13:35 +00:00 committed by GitHub
commit a23e971d31
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 226 additions and 42 deletions

1
ci.sh
View File

@ -343,6 +343,7 @@ rm out/tests/stm32u5a5zj/usart
# As of 2025-02-17 these tests work when run from flash
rm out/tests/pimoroni-pico-plus-2/multicore
rm out/tests/pimoroni-pico-plus-2/gpio_multicore
rm out/tests/pimoroni-pico-plus-2/spinlock_mutex_multicore
# Doesn't work when run from ram on the 2350
rm out/tests/pimoroni-pico-plus-2/flash
# This test passes locally but fails on the HIL, no idea why

View File

@ -1,6 +1,7 @@
use core::sync::atomic::{AtomicU8, Ordering};
use crate::pac;
use crate::spinlock::Spinlock;
struct RpSpinlockCs;
critical_section::set_impl!(RpSpinlockCs);
@ -92,46 +93,4 @@ impl RpSpinlockCs {
}
}
pub struct Spinlock<const N: usize>(core::marker::PhantomData<()>)
where
Spinlock<N>: SpinlockValid;
impl<const N: usize> Spinlock<N>
where
Spinlock<N>: SpinlockValid,
{
/// Try to claim the spinlock. Will return `Some(Self)` if the lock is obtained, and `None` if the lock is
/// already in use somewhere else.
pub fn try_claim() -> Option<Self> {
let lock = pac::SIO.spinlock(N).read();
if lock > 0 {
Some(Self(core::marker::PhantomData))
} else {
None
}
}
/// Clear a locked spin-lock.
///
/// # Safety
///
/// Only call this function if you hold the spin-lock.
pub unsafe fn release() {
// Write (any value): release the lock
pac::SIO.spinlock(N).write_value(1);
}
}
impl<const N: usize> Drop for Spinlock<N>
where
Spinlock<N>: SpinlockValid,
{
fn drop(&mut self) {
// This is safe because we own the object, and hence hold the lock.
unsafe { Self::release() }
}
}
pub(crate) type Spinlock31 = Spinlock<31>;
pub trait SpinlockValid {}
impl SpinlockValid for Spinlock<31> {}

View File

@ -41,6 +41,8 @@ pub mod rom_data;
#[cfg(feature = "rp2040")]
pub mod rtc;
pub mod spi;
mod spinlock;
pub mod spinlock_mutex;
#[cfg(feature = "time-driver")]
pub mod time_driver;
#[cfg(feature = "_rp235x")]

View File

@ -0,0 +1,75 @@
use crate::pac;
pub struct Spinlock<const N: usize>(core::marker::PhantomData<()>)
where
Spinlock<N>: SpinlockValid;
impl<const N: usize> Spinlock<N>
where
Spinlock<N>: SpinlockValid,
{
/// Try to claim the spinlock. Will return `Some(Self)` if the lock is obtained, and `None` if the lock is
/// already in use somewhere else.
pub fn try_claim() -> Option<Self> {
let lock = pac::SIO.spinlock(N).read();
if lock > 0 {
Some(Self(core::marker::PhantomData))
} else {
None
}
}
/// Clear a locked spin-lock.
///
/// # Safety
///
/// Only call this function if you hold the spin-lock.
pub unsafe fn release() {
// Write (any value): release the lock
pac::SIO.spinlock(N).write_value(1);
}
}
impl<const N: usize> Drop for Spinlock<N>
where
Spinlock<N>: SpinlockValid,
{
fn drop(&mut self) {
// This is safe because we own the object, and hence hold the lock.
unsafe { Self::release() }
}
}
pub trait SpinlockValid {}
impl SpinlockValid for Spinlock<0> {}
impl SpinlockValid for Spinlock<1> {}
impl SpinlockValid for Spinlock<2> {}
impl SpinlockValid for Spinlock<3> {}
impl SpinlockValid for Spinlock<4> {}
impl SpinlockValid for Spinlock<5> {}
impl SpinlockValid for Spinlock<6> {}
impl SpinlockValid for Spinlock<7> {}
impl SpinlockValid for Spinlock<8> {}
impl SpinlockValid for Spinlock<9> {}
impl SpinlockValid for Spinlock<10> {}
impl SpinlockValid for Spinlock<11> {}
impl SpinlockValid for Spinlock<12> {}
impl SpinlockValid for Spinlock<13> {}
impl SpinlockValid for Spinlock<14> {}
impl SpinlockValid for Spinlock<15> {}
impl SpinlockValid for Spinlock<16> {}
impl SpinlockValid for Spinlock<17> {}
impl SpinlockValid for Spinlock<18> {}
impl SpinlockValid for Spinlock<19> {}
impl SpinlockValid for Spinlock<20> {}
impl SpinlockValid for Spinlock<21> {}
impl SpinlockValid for Spinlock<22> {}
impl SpinlockValid for Spinlock<23> {}
impl SpinlockValid for Spinlock<24> {}
impl SpinlockValid for Spinlock<25> {}
impl SpinlockValid for Spinlock<26> {}
impl SpinlockValid for Spinlock<27> {}
impl SpinlockValid for Spinlock<28> {}
impl SpinlockValid for Spinlock<29> {}
impl SpinlockValid for Spinlock<30> {}
impl SpinlockValid for Spinlock<31> {}

View File

@ -0,0 +1,93 @@
//! Mutex implementation utilizing an hardware spinlock
use core::marker::PhantomData;
use core::sync::atomic::Ordering;
use embassy_sync::blocking_mutex::raw::RawMutex;
use crate::spinlock::{Spinlock, SpinlockValid};
/// A mutex that allows borrowing data across executors and interrupts by utilizing an hardware spinlock
///
/// # Safety
///
/// This mutex is safe to share between different executors and interrupts.
pub struct SpinlockRawMutex<const N: usize> {
_phantom: PhantomData<()>,
}
unsafe impl<const N: usize> Send for SpinlockRawMutex<N> {}
unsafe impl<const N: usize> Sync for SpinlockRawMutex<N> {}
impl<const N: usize> SpinlockRawMutex<N> {
/// Create a new `SpinlockRawMutex`.
pub const fn new() -> Self {
Self { _phantom: PhantomData }
}
}
unsafe impl<const N: usize> RawMutex for SpinlockRawMutex<N>
where
Spinlock<N>: SpinlockValid,
{
const INIT: Self = Self::new();
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
// Store the initial interrupt state in stack variable
let interrupts_active = cortex_m::register::primask::read().is_active();
// Spin until we get the lock
loop {
// Need to disable interrupts to ensure that we will not deadlock
// if an interrupt or higher prio locks the spinlock after we acquire the lock
cortex_m::interrupt::disable();
// Ensure the compiler doesn't re-order accesses and violate safety here
core::sync::atomic::compiler_fence(Ordering::SeqCst);
if let Some(lock) = Spinlock::<N>::try_claim() {
// We just acquired the lock.
// 1. Forget it, so we don't immediately unlock
core::mem::forget(lock);
break;
}
// We didn't get the lock, enable interrupts if they were enabled before we started
if interrupts_active {
// safety: interrupts are only enabled, if they had been enabled before
unsafe {
cortex_m::interrupt::enable();
}
}
}
let retval = f();
// Ensure the compiler doesn't re-order accesses and violate safety here
core::sync::atomic::compiler_fence(Ordering::SeqCst);
// Release the spinlock to allow others to lock mutex again
// safety: this point is only reached a spinlock was acquired before
unsafe {
Spinlock::<N>::release();
}
// Re-enable interrupts if they were enabled before the mutex was locked
if interrupts_active {
// safety: interrupts are only enabled, if they had been enabled before
unsafe {
cortex_m::interrupt::enable();
}
}
retval
}
}
pub mod blocking_mutex {
//! Mutex implementation utilizing an hardware spinlock
use embassy_sync::blocking_mutex::Mutex;
use crate::spinlock_mutex::SpinlockRawMutex;
/// A mutex that allows borrowing data across executors and interrupts by utilizing an hardware spinlock.
///
/// # Safety
///
/// This mutex is safe to share between different executors and interrupts.
pub type SpinlockMutex<const N: usize, T> = Mutex<SpinlockRawMutex<N>, T>;
}

View File

@ -0,0 +1,54 @@
#![no_std]
#![no_main]
#[cfg(feature = "rp2040")]
teleprobe_meta::target!(b"rpi-pico");
#[cfg(feature = "rp235xb")]
teleprobe_meta::target!(b"pimoroni-pico-plus-2");
use defmt::{info, unwrap};
use embassy_executor::Executor;
use embassy_rp::multicore::{spawn_core1, Stack};
use embassy_rp::spinlock_mutex::SpinlockRawMutex;
use embassy_sync::channel::Channel;
use static_cell::StaticCell;
use {defmt_rtt as _, panic_probe as _};
static mut CORE1_STACK: Stack<1024> = Stack::new();
static EXECUTOR0: StaticCell<Executor> = StaticCell::new();
static EXECUTOR1: StaticCell<Executor> = StaticCell::new();
static CHANNEL0: Channel<SpinlockRawMutex<0>, bool, 1> = Channel::new();
static CHANNEL1: Channel<SpinlockRawMutex<1>, bool, 1> = Channel::new();
#[cortex_m_rt::entry]
fn main() -> ! {
let p = embassy_rp::init(Default::default());
spawn_core1(
p.CORE1,
unsafe { &mut *core::ptr::addr_of_mut!(CORE1_STACK) },
move || {
let executor1 = EXECUTOR1.init(Executor::new());
executor1.run(|spawner| unwrap!(spawner.spawn(core1_task())));
},
);
let executor0 = EXECUTOR0.init(Executor::new());
executor0.run(|spawner| unwrap!(spawner.spawn(core0_task())));
}
#[embassy_executor::task]
async fn core0_task() {
info!("CORE0 is running");
let ping = true;
CHANNEL0.send(ping).await;
let pong = CHANNEL1.receive().await;
assert_eq!(ping, pong);
info!("Test OK");
cortex_m::asm::bkpt();
}
#[embassy_executor::task]
async fn core1_task() {
info!("CORE1 is running");
let ping = CHANNEL0.receive().await;
CHANNEL1.send(ping).await;
}