ostd/sync/
spin.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use core::{
4    cell::UnsafeCell,
5    fmt,
6    marker::PhantomData,
7    ops::{Deref, DerefMut},
8    sync::atomic::{AtomicBool, Ordering},
9};
10
11use super::{LocalIrqDisabled, PreemptDisabled, guard::SpinGuardian};
12use crate::task::atomic_mode::AsAtomicModeGuard;
13
14/// A spin lock.
15///
16/// # Guard behavior
17///
18/// The type `G' specifies the guard behavior of the spin lock. While holding the lock,
19/// - if `G` is [`PreemptDisabled`], preemption is disabled;
20/// - if `G` is [`LocalIrqDisabled`], local IRQs are disabled.
21///
22/// The `G` can also be provided by other crates other than ostd,
23/// if it behaves similar like [`PreemptDisabled`] or [`LocalIrqDisabled`].
24///
25/// The guard behavior can be temporarily upgraded from [`PreemptDisabled`] to
26/// [`LocalIrqDisabled`] using the [`disable_irq`] method.
27///
28/// [`disable_irq`]: Self::disable_irq
29#[repr(transparent)]
30pub struct SpinLock<T: ?Sized, G = PreemptDisabled> {
31    phantom: PhantomData<G>,
32    /// Only the last field of a struct may have a dynamically sized type.
33    /// That's why SpinLockInner is put in the last field.
34    inner: SpinLockInner<T>,
35}
36
37struct SpinLockInner<T: ?Sized> {
38    lock: AtomicBool,
39    val: UnsafeCell<T>,
40}
41
42impl<T, G> SpinLock<T, G> {
43    /// Creates a new spin lock.
44    pub const fn new(val: T) -> Self {
45        let lock_inner = SpinLockInner {
46            lock: AtomicBool::new(false),
47            val: UnsafeCell::new(val),
48        };
49        Self {
50            phantom: PhantomData,
51            inner: lock_inner,
52        }
53    }
54}
55
56impl<T: ?Sized> SpinLock<T, PreemptDisabled> {
57    /// Converts the guard behavior from disabling preemption to disabling IRQs.
58    pub fn disable_irq(&self) -> &SpinLock<T, LocalIrqDisabled> {
59        let ptr = self as *const SpinLock<T, PreemptDisabled>;
60        let ptr = ptr as *const SpinLock<T, LocalIrqDisabled>;
61        // SAFETY:
62        // 1. The types `SpinLock<T, PreemptDisabled>`, `SpinLockInner<T>` and `SpinLock<T,
63        //    IrqDisabled>` have the same memory layout guaranteed by `#[repr(transparent)]`.
64        // 2. The specified memory location can be borrowed as an immutable reference for the
65        //    specified lifetime.
66        unsafe { &*ptr }
67    }
68}
69
70impl<T: ?Sized, G: SpinGuardian> SpinLock<T, G> {
71    /// Acquires the spin lock.
72    pub fn lock(&self) -> SpinLockGuard<'_, T, G> {
73        // Notice the guard must be created before acquiring the lock.
74        let inner_guard = G::guard();
75        self.acquire_lock();
76        SpinLockGuard {
77            lock: self,
78            guard: inner_guard,
79        }
80    }
81
82    /// Tries acquiring the spin lock immedidately.
83    pub fn try_lock(&self) -> Option<SpinLockGuard<'_, T, G>> {
84        let inner_guard = G::guard();
85        if self.try_acquire_lock() {
86            let lock_guard = SpinLockGuard {
87                lock: self,
88                guard: inner_guard,
89            };
90            return Some(lock_guard);
91        }
92        None
93    }
94
95    /// Returns a mutable reference to the underlying data.
96    ///
97    /// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
98    /// already statically guaranteed that access to the data is exclusive.
99    pub fn get_mut(&mut self) -> &mut T {
100        self.inner.val.get_mut()
101    }
102
103    /// Acquires the spin lock, otherwise busy waiting
104    fn acquire_lock(&self) {
105        while !self.try_acquire_lock() {
106            core::hint::spin_loop();
107        }
108    }
109
110    fn try_acquire_lock(&self) -> bool {
111        self.inner
112            .lock
113            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
114            .is_ok()
115    }
116
117    fn release_lock(&self) {
118        self.inner.lock.store(false, Ordering::Release);
119    }
120}
121
122impl<T: ?Sized + fmt::Debug, G> fmt::Debug for SpinLock<T, G> {
123    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
124        fmt::Debug::fmt(&self.inner.val, f)
125    }
126}
127
128// SAFETY: Only a single lock holder is permitted to access the inner data of Spinlock.
129unsafe impl<T: ?Sized + Send, G> Send for SpinLock<T, G> {}
130unsafe impl<T: ?Sized + Send, G> Sync for SpinLock<T, G> {}
131
132/// A guard that provides exclusive access to the data protected by a [`SpinLock`].
133#[clippy::has_significant_drop]
134#[must_use]
135pub struct SpinLockGuard<'a, T: ?Sized, G: SpinGuardian> {
136    guard: G::Guard,
137    lock: &'a SpinLock<T, G>,
138}
139
140impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for SpinLockGuard<'_, T, G> {
141    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
142        self.guard.as_atomic_mode_guard()
143    }
144}
145
146impl<T: ?Sized, G: SpinGuardian> Deref for SpinLockGuard<'_, T, G> {
147    type Target = T;
148
149    fn deref(&self) -> &T {
150        unsafe { &*self.lock.inner.val.get() }
151    }
152}
153
154impl<T: ?Sized, G: SpinGuardian> DerefMut for SpinLockGuard<'_, T, G> {
155    fn deref_mut(&mut self) -> &mut Self::Target {
156        unsafe { &mut *self.lock.inner.val.get() }
157    }
158}
159
160impl<T: ?Sized, G: SpinGuardian> Drop for SpinLockGuard<'_, T, G> {
161    fn drop(&mut self) {
162        self.lock.release_lock();
163    }
164}
165
166impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for SpinLockGuard<'_, T, G> {
167    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
168        fmt::Debug::fmt(&**self, f)
169    }
170}
171
172impl<T: ?Sized, G: SpinGuardian> !Send for SpinLockGuard<'_, T, G> {}
173
174// SAFETY: `SpinLockGuard` can be shared between tasks/threads in same CPU.
175// As `lock()` is only called when there are no race conditions caused by interrupts.
176unsafe impl<T: ?Sized + Sync, G: SpinGuardian> Sync for SpinLockGuard<'_, T, G> {}