ostd/sync/
rwlock.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use core::{
4    cell::UnsafeCell,
5    fmt,
6    marker::PhantomData,
7    ops::{Deref, DerefMut},
8    sync::atomic::{
9        AtomicUsize,
10        Ordering::{AcqRel, Acquire, Relaxed, Release},
11    },
12};
13
14use super::{
15    PreemptDisabled,
16    guard::{GuardTransfer, SpinGuardian},
17};
18use crate::task::atomic_mode::AsAtomicModeGuard;
19
20/// Spin-based Read-write Lock
21///
22/// # Overview
23///
24/// This lock allows for multiple readers, or at most one writer to access
25/// at any point in time. The writer of this lock has exclusive access to
26/// modify the underlying data, while the readers are allowed shared and
27/// read-only access.
28///
29/// The writing and reading portions cannot be active simultaneously, when
30/// one portion is in progress, the other portion will spin-wait. This is
31/// suitable for scenarios where the lock is expected to be held for short
32/// periods of time, and the overhead of context switching is higher than
33/// the cost of spinning.
34///
35/// In addition to traditional read and write locks, this implementation
36/// provides the upgradeable read lock (`upread lock`). The `upread lock`
37/// can be upgraded to write locks atomically, useful in scenarios
38/// where a decision to write is made after reading.
39///
40/// The type parameter `T` represents the data that this lock is protecting.
41/// It is necessary for `T` to satisfy [`Send`] to be shared across tasks and
42/// [`Sync`] to permit concurrent access via readers. The [`Deref`] method (and
43/// [`DerefMut`] for the writer) is implemented for the RAII guards returned
44/// by the locking methods, which allows for the access to the protected data
45/// while the lock is held.
46///
47/// # Usage
48/// The lock can be used in scenarios where data needs to be read frequently
49/// but written to occasionally.
50///
51/// Use `upread lock` in scenarios where related checking is performed before
52/// modification to effectively avoid deadlocks and improve efficiency.
53///
54/// This lock should not be used in scenarios where lock-holding times are
55/// long as it can lead to CPU resource wastage due to spinning.
56///
57/// # About Guard
58///
59/// See the comments of [`SpinLock`].
60///
61/// # Examples
62///
63/// ```
64/// use ostd::sync::RwLock;
65///
66/// let lock = RwLock::new(5)
67///
68/// // many read locks can be held at once
69/// {
70///     let r1 = lock.read();
71///     let r2 = lock.read();
72///     assert_eq!(*r1, 5);
73///     assert_eq!(*r2, 5);
74///     
75///     // Upgradeable read lock can share access to data with read locks
76///     let r3 = lock.upread();
77///     assert_eq!(*r3, 5);
78///     drop(r1);
79///     drop(r2);
80///     // read locks are dropped at this point
81///
82///     // An upread lock can only be upgraded successfully after all the
83///     // read locks are released, otherwise it will spin-wait.
84///     let mut w1 = r3.upgrade();
85///     *w1 += 1;
86///     assert_eq!(*w1, 6);
87/// }   // upread lock are dropped at this point
88///
89/// {   
90///     // Only one write lock can be held at a time
91///     let mut w2 = lock.write();
92///     *w2 += 1;
93///     assert_eq!(*w2, 7);
94/// }   // write lock is dropped at this point
95/// ```
96///
97/// [`SpinLock`]: super::SpinLock
98pub struct RwLock<T: ?Sized, Guard = PreemptDisabled> {
99    guard: PhantomData<Guard>,
100    /// The internal representation of the lock state is as follows:
101    /// - **Bit 63:** Writer lock.
102    /// - **Bit 62:** Upgradeable reader lock.
103    /// - **Bit 61:** Indicates if an upgradeable reader is being upgraded.
104    /// - **Bits 60-0:** Reader lock count.
105    lock: AtomicUsize,
106    val: UnsafeCell<T>,
107}
108
109const READER: usize = 1;
110const WRITER: usize = 1 << (usize::BITS - 1);
111const UPGRADEABLE_READER: usize = 1 << (usize::BITS - 2);
112const BEING_UPGRADED: usize = 1 << (usize::BITS - 3);
113const MAX_READER: usize = 1 << (usize::BITS - 4);
114
115impl<T, G> RwLock<T, G> {
116    /// Creates a new spin-based read-write lock with an initial value.
117    pub const fn new(val: T) -> Self {
118        Self {
119            guard: PhantomData,
120            lock: AtomicUsize::new(0),
121            val: UnsafeCell::new(val),
122        }
123    }
124}
125
126impl<T: ?Sized, G: SpinGuardian> RwLock<T, G> {
127    /// Acquires a read lock and spin-wait until it can be acquired.
128    ///
129    /// The calling thread will spin-wait until there are no writers or
130    /// upgrading upreaders present. There is no guarantee for the order
131    /// in which other readers or writers waiting simultaneously will
132    /// obtain the lock.
133    pub fn read(&self) -> RwLockReadGuard<'_, T, G> {
134        loop {
135            if let Some(readguard) = self.try_read() {
136                return readguard;
137            } else {
138                core::hint::spin_loop();
139            }
140        }
141    }
142
143    /// Acquires a write lock and spin-wait until it can be acquired.
144    ///
145    /// The calling thread will spin-wait until there are no other writers,
146    /// upreaders or readers present. There is no guarantee for the order
147    /// in which other readers or writers waiting simultaneously will
148    /// obtain the lock.
149    pub fn write(&self) -> RwLockWriteGuard<'_, T, G> {
150        loop {
151            if let Some(writeguard) = self.try_write() {
152                return writeguard;
153            } else {
154                core::hint::spin_loop();
155            }
156        }
157    }
158
159    /// Acquires an upreader and spin-wait until it can be acquired.
160    ///
161    /// The calling thread will spin-wait until there are no other writers,
162    /// or upreaders. There is no guarantee for the order in which other
163    /// readers or writers waiting simultaneously will obtain the lock.
164    ///
165    /// Upreader will not block new readers until it tries to upgrade. Upreader
166    /// and reader do not differ before invoking the upgread method. However,
167    /// only one upreader can exist at any time to avoid deadlock in the
168    /// upgread method.
169    pub fn upread(&self) -> RwLockUpgradeableGuard<'_, T, G> {
170        loop {
171            if let Some(guard) = self.try_upread() {
172                return guard;
173            } else {
174                core::hint::spin_loop();
175            }
176        }
177    }
178
179    /// Attempts to acquire a read lock.
180    ///
181    /// This function will never spin-wait and will return immediately.
182    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, T, G>> {
183        let guard = G::read_guard();
184        let lock = self.lock.fetch_add(READER, Acquire);
185        if lock & (WRITER | MAX_READER | BEING_UPGRADED) == 0 {
186            Some(RwLockReadGuard { inner: self, guard })
187        } else {
188            self.lock.fetch_sub(READER, Release);
189            None
190        }
191    }
192
193    /// Attempts to acquire a write lock.
194    ///
195    /// This function will never spin-wait and will return immediately.
196    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, T, G>> {
197        let guard = G::guard();
198        if self
199            .lock
200            .compare_exchange(0, WRITER, Acquire, Relaxed)
201            .is_ok()
202        {
203            Some(RwLockWriteGuard { inner: self, guard })
204        } else {
205            None
206        }
207    }
208
209    /// Attempts to acquire an upread lock.
210    ///
211    /// This function will never spin-wait and will return immediately.
212    pub fn try_upread(&self) -> Option<RwLockUpgradeableGuard<'_, T, G>> {
213        let guard = G::guard();
214        let lock = self.lock.fetch_or(UPGRADEABLE_READER, Acquire) & (WRITER | UPGRADEABLE_READER);
215        if lock == 0 {
216            return Some(RwLockUpgradeableGuard { inner: self, guard });
217        } else if lock == WRITER {
218            self.lock.fetch_sub(UPGRADEABLE_READER, Release);
219        }
220        None
221    }
222
223    /// Returns a mutable reference to the underlying data.
224    ///
225    /// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
226    /// already statically guaranteed that access to the data is exclusive.
227    pub fn get_mut(&mut self) -> &mut T {
228        self.val.get_mut()
229    }
230
231    /// Returns a raw pointer to the underlying data.
232    ///
233    /// This method is safe, but it's up to the caller to ensure that access to the data behind it
234    /// is still safe.
235    pub(super) fn as_ptr(&self) -> *mut T {
236        self.val.get()
237    }
238}
239
240impl<T: ?Sized + fmt::Debug, G> fmt::Debug for RwLock<T, G> {
241    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
242        fmt::Debug::fmt(&self.val, f)
243    }
244}
245
246/// Because there can be more than one readers to get the T's immutable ref,
247/// so T must be Sync to guarantee the sharing safety.
248unsafe impl<T: ?Sized + Send, G> Send for RwLock<T, G> {}
249unsafe impl<T: ?Sized + Send + Sync, G> Sync for RwLock<T, G> {}
250
251impl<T: ?Sized, G: SpinGuardian> !Send for RwLockWriteGuard<'_, T, G> {}
252unsafe impl<T: ?Sized + Sync, G: SpinGuardian> Sync for RwLockWriteGuard<'_, T, G> {}
253
254impl<T: ?Sized, G: SpinGuardian> !Send for RwLockReadGuard<'_, T, G> {}
255unsafe impl<T: ?Sized + Sync, G: SpinGuardian> Sync for RwLockReadGuard<'_, T, G> {}
256
257impl<T: ?Sized, G: SpinGuardian> !Send for RwLockUpgradeableGuard<'_, T, G> {}
258unsafe impl<T: ?Sized + Sync, G: SpinGuardian> Sync for RwLockUpgradeableGuard<'_, T, G> {}
259
260/// A guard that provides immutable data access.
261#[clippy::has_significant_drop]
262#[must_use]
263pub struct RwLockReadGuard<'a, T: ?Sized, G: SpinGuardian> {
264    guard: G::ReadGuard,
265    inner: &'a RwLock<T, G>,
266}
267
268impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockReadGuard<'_, T, G> {
269    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
270        self.guard.as_atomic_mode_guard()
271    }
272}
273
274impl<T: ?Sized, G: SpinGuardian> Deref for RwLockReadGuard<'_, T, G> {
275    type Target = T;
276
277    fn deref(&self) -> &T {
278        unsafe { &*self.inner.val.get() }
279    }
280}
281
282impl<T: ?Sized, G: SpinGuardian> Drop for RwLockReadGuard<'_, T, G> {
283    fn drop(&mut self) {
284        self.inner.lock.fetch_sub(READER, Release);
285    }
286}
287
288impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockReadGuard<'_, T, G> {
289    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
290        fmt::Debug::fmt(&**self, f)
291    }
292}
293
294/// A guard that provides mutable data access.
295pub struct RwLockWriteGuard<'a, T: ?Sized, G: SpinGuardian> {
296    guard: G::Guard,
297    inner: &'a RwLock<T, G>,
298}
299
300impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockWriteGuard<'_, T, G> {
301    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
302        self.guard.as_atomic_mode_guard()
303    }
304}
305
306impl<T: ?Sized, G: SpinGuardian> Deref for RwLockWriteGuard<'_, T, G> {
307    type Target = T;
308
309    fn deref(&self) -> &T {
310        unsafe { &*self.inner.val.get() }
311    }
312}
313
314impl<T: ?Sized, G: SpinGuardian> DerefMut for RwLockWriteGuard<'_, T, G> {
315    fn deref_mut(&mut self) -> &mut Self::Target {
316        unsafe { &mut *self.inner.val.get() }
317    }
318}
319
320impl<T: ?Sized, G: SpinGuardian> Drop for RwLockWriteGuard<'_, T, G> {
321    fn drop(&mut self) {
322        self.inner.lock.fetch_and(!WRITER, Release);
323    }
324}
325
326impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockWriteGuard<'_, T, G> {
327    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
328        fmt::Debug::fmt(&**self, f)
329    }
330}
331
332/// A guard that provides immutable data access but can be atomically
333/// upgraded to `RwLockWriteGuard`.
334pub struct RwLockUpgradeableGuard<'a, T: ?Sized, G: SpinGuardian> {
335    guard: G::Guard,
336    inner: &'a RwLock<T, G>,
337}
338
339impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockUpgradeableGuard<'_, T, G> {
340    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
341        self.guard.as_atomic_mode_guard()
342    }
343}
344
345impl<'a, T: ?Sized, G: SpinGuardian> RwLockUpgradeableGuard<'a, T, G> {
346    /// Upgrades this upread guard to a write guard atomically.
347    ///
348    /// After calling this method, subsequent readers will be blocked
349    /// while previous readers remain unaffected. The calling thread
350    /// will spin-wait until previous readers finish.
351    pub fn upgrade(mut self) -> RwLockWriteGuard<'a, T, G> {
352        self.inner.lock.fetch_or(BEING_UPGRADED, Acquire);
353        loop {
354            self = match self.try_upgrade() {
355                Ok(guard) => return guard,
356                Err(e) => e,
357            };
358        }
359    }
360    /// Attempts to upgrade this upread guard to a write guard atomically.
361    ///
362    /// This function will never spin-wait and will return immediately.
363    pub fn try_upgrade(mut self) -> Result<RwLockWriteGuard<'a, T, G>, Self> {
364        let res = self.inner.lock.compare_exchange(
365            UPGRADEABLE_READER | BEING_UPGRADED,
366            WRITER | UPGRADEABLE_READER,
367            AcqRel,
368            Relaxed,
369        );
370        if res.is_ok() {
371            let inner = self.inner;
372            let guard = self.guard.transfer_to();
373            drop(self);
374            Ok(RwLockWriteGuard { inner, guard })
375        } else {
376            Err(self)
377        }
378    }
379}
380
381impl<T: ?Sized, G: SpinGuardian> Deref for RwLockUpgradeableGuard<'_, T, G> {
382    type Target = T;
383
384    fn deref(&self) -> &T {
385        unsafe { &*self.inner.val.get() }
386    }
387}
388
389impl<T: ?Sized, G: SpinGuardian> Drop for RwLockUpgradeableGuard<'_, T, G> {
390    fn drop(&mut self) {
391        self.inner.lock.fetch_sub(UPGRADEABLE_READER, Release);
392    }
393}
394
395impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockUpgradeableGuard<'_, T, G> {
396    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
397        fmt::Debug::fmt(&**self, f)
398    }
399}