1use core::{
4 cell::UnsafeCell,
5 fmt,
6 marker::PhantomData,
7 ops::{Deref, DerefMut},
8 sync::atomic::{AtomicBool, Ordering},
9};
10
11use super::{LocalIrqDisabled, PreemptDisabled, guard::SpinGuardian};
12use crate::task::atomic_mode::AsAtomicModeGuard;
13
14#[repr(transparent)]
30pub struct SpinLock<T: ?Sized, G = PreemptDisabled> {
31 phantom: PhantomData<G>,
32 inner: SpinLockInner<T>,
35}
36
37struct SpinLockInner<T: ?Sized> {
38 lock: AtomicBool,
39 val: UnsafeCell<T>,
40}
41
42impl<T, G> SpinLock<T, G> {
43 pub const fn new(val: T) -> Self {
45 let lock_inner = SpinLockInner {
46 lock: AtomicBool::new(false),
47 val: UnsafeCell::new(val),
48 };
49 Self {
50 phantom: PhantomData,
51 inner: lock_inner,
52 }
53 }
54}
55
56impl<T: ?Sized> SpinLock<T, PreemptDisabled> {
57 pub fn disable_irq(&self) -> &SpinLock<T, LocalIrqDisabled> {
59 let ptr = self as *const SpinLock<T, PreemptDisabled>;
60 let ptr = ptr as *const SpinLock<T, LocalIrqDisabled>;
61 unsafe { &*ptr }
67 }
68}
69
70impl<T: ?Sized, G: SpinGuardian> SpinLock<T, G> {
71 pub fn lock(&self) -> SpinLockGuard<'_, T, G> {
73 let inner_guard = G::guard();
75 self.acquire_lock();
76 SpinLockGuard {
77 lock: self,
78 guard: inner_guard,
79 }
80 }
81
82 pub fn try_lock(&self) -> Option<SpinLockGuard<'_, T, G>> {
84 let inner_guard = G::guard();
85 if self.try_acquire_lock() {
86 let lock_guard = SpinLockGuard {
87 lock: self,
88 guard: inner_guard,
89 };
90 return Some(lock_guard);
91 }
92 None
93 }
94
95 pub fn get_mut(&mut self) -> &mut T {
100 self.inner.val.get_mut()
101 }
102
103 fn acquire_lock(&self) {
105 while !self.try_acquire_lock() {
106 core::hint::spin_loop();
107 }
108 }
109
110 fn try_acquire_lock(&self) -> bool {
111 self.inner
112 .lock
113 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
114 .is_ok()
115 }
116
117 fn release_lock(&self) {
118 self.inner.lock.store(false, Ordering::Release);
119 }
120}
121
122impl<T: ?Sized + fmt::Debug, G> fmt::Debug for SpinLock<T, G> {
123 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
124 fmt::Debug::fmt(&self.inner.val, f)
125 }
126}
127
128unsafe impl<T: ?Sized + Send, G> Send for SpinLock<T, G> {}
130unsafe impl<T: ?Sized + Send, G> Sync for SpinLock<T, G> {}
131
132#[clippy::has_significant_drop]
134#[must_use]
135pub struct SpinLockGuard<'a, T: ?Sized, G: SpinGuardian> {
136 guard: G::Guard,
137 lock: &'a SpinLock<T, G>,
138}
139
140impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for SpinLockGuard<'_, T, G> {
141 fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
142 self.guard.as_atomic_mode_guard()
143 }
144}
145
146impl<T: ?Sized, G: SpinGuardian> Deref for SpinLockGuard<'_, T, G> {
147 type Target = T;
148
149 fn deref(&self) -> &T {
150 unsafe { &*self.lock.inner.val.get() }
151 }
152}
153
154impl<T: ?Sized, G: SpinGuardian> DerefMut for SpinLockGuard<'_, T, G> {
155 fn deref_mut(&mut self) -> &mut Self::Target {
156 unsafe { &mut *self.lock.inner.val.get() }
157 }
158}
159
160impl<T: ?Sized, G: SpinGuardian> Drop for SpinLockGuard<'_, T, G> {
161 fn drop(&mut self) {
162 self.lock.release_lock();
163 }
164}
165
166impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for SpinLockGuard<'_, T, G> {
167 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
168 fmt::Debug::fmt(&**self, f)
169 }
170}
171
172impl<T: ?Sized, G: SpinGuardian> !Send for SpinLockGuard<'_, T, G> {}
173
174unsafe impl<T: ?Sized + Sync, G: SpinGuardian> Sync for SpinLockGuard<'_, T, G> {}