ostd/sync/
rwlock.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use core::{
4    cell::UnsafeCell,
5    fmt,
6    marker::PhantomData,
7    ops::{Deref, DerefMut},
8    sync::atomic::{
9        AtomicUsize,
10        Ordering::{AcqRel, Acquire, Relaxed, Release},
11    },
12};
13
14use super::{
15    PreemptDisabled,
16    guard::{GuardTransfer, SpinGuardian},
17};
18use crate::task::atomic_mode::AsAtomicModeGuard;
19
20/// Spin-based Read-write Lock
21///
22/// # Overview
23///
24/// This lock allows for multiple readers, or at most one writer to access
25/// at any point in time. The writer of this lock has exclusive access to
26/// modify the underlying data, while the readers are allowed shared and
27/// read-only access.
28///
29/// The writing and reading portions cannot be active simultaneously, when
30/// one portion is in progress, the other portion will spin-wait. This is
31/// suitable for scenarios where the lock is expected to be held for short
32/// periods of time, and the overhead of context switching is higher than
33/// the cost of spinning.
34///
35/// In addition to traditional read and write locks, this implementation
36/// provides the upgradeable read lock (`upread lock`). The `upread lock`
37/// can be upgraded to write locks atomically, useful in scenarios
38/// where a decision to write is made after reading.
39///
40/// The type parameter `T` represents the data that this lock is protecting.
41/// It is necessary for `T` to satisfy [`Send`] to be shared across tasks and
42/// [`Sync`] to permit concurrent access via readers. The [`Deref`] method (and
43/// [`DerefMut`] for the writer) is implemented for the RAII guards returned
44/// by the locking methods, which allows for the access to the protected data
45/// while the lock is held.
46///
47/// # Usage
48///
49/// The lock can be used in scenarios where data needs to be read frequently
50/// but written to occasionally.
51///
52/// Use `upread lock` in scenarios where related checking is performed before
53/// modification to effectively avoid deadlocks and improve efficiency.
54///
55/// This lock should not be used in scenarios where lock-holding times are
56/// long as it can lead to CPU resource wastage due to spinning.
57///
58/// # About Guard
59///
60/// See the comments of [`SpinLock`].
61///
62/// # Examples
63///
64/// ```
65/// use ostd::sync::RwLock;
66///
67/// let lock = RwLock::new(5)
68///
69/// // many read locks can be held at once
70/// {
71///     let r1 = lock.read();
72///     let r2 = lock.read();
73///     assert_eq!(*r1, 5);
74///     assert_eq!(*r2, 5);
75///     
76///     // Upgradeable read lock can share access to data with read locks
77///     let r3 = lock.upread();
78///     assert_eq!(*r3, 5);
79///     drop(r1);
80///     drop(r2);
81///     // read locks are dropped at this point
82///
83///     // An upread lock can only be upgraded successfully after all the
84///     // read locks are released, otherwise it will spin-wait.
85///     let mut w1 = r3.upgrade();
86///     *w1 += 1;
87///     assert_eq!(*w1, 6);
88/// }   // upread lock are dropped at this point
89///
90/// {   
91///     // Only one write lock can be held at a time
92///     let mut w2 = lock.write();
93///     *w2 += 1;
94///     assert_eq!(*w2, 7);
95/// }   // write lock is dropped at this point
96/// ```
97///
98/// [`SpinLock`]: super::SpinLock
99pub struct RwLock<T: ?Sized, Guard = PreemptDisabled> {
100    guard: PhantomData<Guard>,
101    /// The internal representation of the lock state is as follows:
102    /// - **Bit 63:** Writer lock.
103    /// - **Bit 62:** Upgradeable reader lock.
104    /// - **Bit 61:** Indicates if an upgradeable reader is being upgraded.
105    /// - **Bits 60-0:** Reader lock count.
106    lock: AtomicUsize,
107    val: UnsafeCell<T>,
108}
109
110const READER: usize = 1;
111const WRITER: usize = 1 << (usize::BITS - 1);
112const UPGRADEABLE_READER: usize = 1 << (usize::BITS - 2);
113const BEING_UPGRADED: usize = 1 << (usize::BITS - 3);
114
115/// This bit is reserved as an overflow sentinel.
116/// We intentionally cap read guards before counter growth can affect
117/// `BEING_UPGRADED` / `UPGRADEABLE_READER` / `WRITER` bits.
118/// This is defense-in-depth with no extra runtime cost.
119///
120/// This follows the same strategy as Rust std's `Arc`,
121/// which uses `isize::MAX` as a sentinel to prevent its reference count
122/// from overflowing into values that could compromise safety.
123///
124/// On 64-bit platforms (the only targets Asterinas supports),
125/// a counter overflow is not a practical concern:
126/// incrementing one-by-one from zero to `MAX_READER` (2^60)
127/// would take hundreds of years even at billions of increments per second.
128/// Nevertheless, this sentinel provides an extra layer of safety at no runtime cost.
129const MAX_READER: usize = 1 << (usize::BITS - 4);
130
131impl<T, G> RwLock<T, G> {
132    /// Creates a new spin-based read-write lock with an initial value.
133    pub const fn new(val: T) -> Self {
134        Self {
135            guard: PhantomData,
136            lock: AtomicUsize::new(0),
137            val: UnsafeCell::new(val),
138        }
139    }
140}
141
142impl<T: ?Sized, G: SpinGuardian> RwLock<T, G> {
143    /// Acquires a read lock and spin-wait until it can be acquired.
144    ///
145    /// The calling thread will spin-wait until there are no writers or
146    /// upgrading upreaders present. There is no guarantee for the order
147    /// in which other readers or writers waiting simultaneously will
148    /// obtain the lock.
149    pub fn read(&self) -> RwLockReadGuard<'_, T, G> {
150        loop {
151            if let Some(readguard) = self.try_read() {
152                return readguard;
153            } else {
154                core::hint::spin_loop();
155            }
156        }
157    }
158
159    /// Acquires a write lock and spin-wait until it can be acquired.
160    ///
161    /// The calling thread will spin-wait until there are no other writers,
162    /// upreaders or readers present. There is no guarantee for the order
163    /// in which other readers or writers waiting simultaneously will
164    /// obtain the lock.
165    pub fn write(&self) -> RwLockWriteGuard<'_, T, G> {
166        loop {
167            if let Some(writeguard) = self.try_write() {
168                return writeguard;
169            } else {
170                core::hint::spin_loop();
171            }
172        }
173    }
174
175    /// Acquires an upreader and spin-wait until it can be acquired.
176    ///
177    /// The calling thread will spin-wait until there are no other writers,
178    /// or upreaders. There is no guarantee for the order in which other
179    /// readers or writers waiting simultaneously will obtain the lock.
180    ///
181    /// Upreader will not block new readers until it tries to upgrade. Upreader
182    /// and reader do not differ before invoking the upgrade method. However,
183    /// only one upreader can exist at any time to avoid deadlock in the
184    /// upgrade method.
185    pub fn upread(&self) -> RwLockUpgradeableGuard<'_, T, G> {
186        loop {
187            if let Some(guard) = self.try_upread() {
188                return guard;
189            } else {
190                core::hint::spin_loop();
191            }
192        }
193    }
194
195    /// Attempts to acquire a read lock.
196    ///
197    /// This function will never spin-wait and will return immediately.
198    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, T, G>> {
199        let guard = G::read_guard();
200        let lock = self.lock.fetch_add(READER, Acquire);
201        if lock & (WRITER | MAX_READER | BEING_UPGRADED) == 0 {
202            Some(RwLockReadGuard { inner: self, guard })
203        } else {
204            self.lock.fetch_sub(READER, Release);
205            None
206        }
207    }
208
209    /// Attempts to acquire a write lock.
210    ///
211    /// This function will never spin-wait and will return immediately.
212    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, T, G>> {
213        let guard = G::guard();
214        if self
215            .lock
216            .compare_exchange(0, WRITER, Acquire, Relaxed)
217            .is_ok()
218        {
219            Some(RwLockWriteGuard { inner: self, guard })
220        } else {
221            None
222        }
223    }
224
225    /// Attempts to acquire an upread lock.
226    ///
227    /// This function will never spin-wait and will return immediately.
228    pub fn try_upread(&self) -> Option<RwLockUpgradeableGuard<'_, T, G>> {
229        let guard = G::guard();
230        let lock = self.lock.fetch_or(UPGRADEABLE_READER, Acquire) & (WRITER | UPGRADEABLE_READER);
231        if lock == 0 {
232            return Some(RwLockUpgradeableGuard { inner: self, guard });
233        } else if lock == WRITER {
234            self.lock.fetch_sub(UPGRADEABLE_READER, Release);
235        }
236        None
237    }
238
239    /// Returns a mutable reference to the underlying data.
240    ///
241    /// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
242    /// already statically guaranteed that access to the data is exclusive.
243    pub fn get_mut(&mut self) -> &mut T {
244        self.val.get_mut()
245    }
246
247    /// Returns a raw pointer to the underlying data.
248    ///
249    /// This method is safe, but it's up to the caller to ensure that access to the data behind it
250    /// is still safe.
251    pub(super) fn as_ptr(&self) -> *mut T {
252        self.val.get()
253    }
254}
255
256impl<T: ?Sized + fmt::Debug, G> fmt::Debug for RwLock<T, G> {
257    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
258        fmt::Debug::fmt(&self.val, f)
259    }
260}
261
262/// Because there can be more than one readers to get the T's immutable ref,
263/// so T must be Sync to guarantee the sharing safety.
264unsafe impl<T: ?Sized + Send, G> Send for RwLock<T, G> {}
265unsafe impl<T: ?Sized + Send + Sync, G> Sync for RwLock<T, G> {}
266
267impl<T: ?Sized, G: SpinGuardian> !Send for RwLockWriteGuard<'_, T, G> {}
268unsafe impl<T: ?Sized + Sync, G: SpinGuardian> Sync for RwLockWriteGuard<'_, T, G> {}
269
270impl<T: ?Sized, G: SpinGuardian> !Send for RwLockReadGuard<'_, T, G> {}
271unsafe impl<T: ?Sized + Sync, G: SpinGuardian> Sync for RwLockReadGuard<'_, T, G> {}
272
273impl<T: ?Sized, G: SpinGuardian> !Send for RwLockUpgradeableGuard<'_, T, G> {}
274unsafe impl<T: ?Sized + Sync, G: SpinGuardian> Sync for RwLockUpgradeableGuard<'_, T, G> {}
275
276/// A guard that provides immutable data access.
277#[clippy::has_significant_drop]
278#[must_use]
279pub struct RwLockReadGuard<'a, T: ?Sized, G: SpinGuardian> {
280    guard: G::ReadGuard,
281    inner: &'a RwLock<T, G>,
282}
283
284impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockReadGuard<'_, T, G> {
285    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
286        self.guard.as_atomic_mode_guard()
287    }
288}
289
290impl<T: ?Sized, G: SpinGuardian> Deref for RwLockReadGuard<'_, T, G> {
291    type Target = T;
292
293    fn deref(&self) -> &T {
294        unsafe { &*self.inner.val.get() }
295    }
296}
297
298impl<T: ?Sized, G: SpinGuardian> Drop for RwLockReadGuard<'_, T, G> {
299    fn drop(&mut self) {
300        self.inner.lock.fetch_sub(READER, Release);
301    }
302}
303
304impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockReadGuard<'_, T, G> {
305    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
306        fmt::Debug::fmt(&**self, f)
307    }
308}
309
310/// A guard that provides mutable data access.
311pub struct RwLockWriteGuard<'a, T: ?Sized, G: SpinGuardian> {
312    guard: G::Guard,
313    inner: &'a RwLock<T, G>,
314}
315
316impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockWriteGuard<'_, T, G> {
317    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
318        self.guard.as_atomic_mode_guard()
319    }
320}
321
322impl<T: ?Sized, G: SpinGuardian> Deref for RwLockWriteGuard<'_, T, G> {
323    type Target = T;
324
325    fn deref(&self) -> &T {
326        unsafe { &*self.inner.val.get() }
327    }
328}
329
330impl<T: ?Sized, G: SpinGuardian> DerefMut for RwLockWriteGuard<'_, T, G> {
331    fn deref_mut(&mut self) -> &mut Self::Target {
332        unsafe { &mut *self.inner.val.get() }
333    }
334}
335
336impl<T: ?Sized, G: SpinGuardian> Drop for RwLockWriteGuard<'_, T, G> {
337    fn drop(&mut self) {
338        self.inner.lock.fetch_and(!WRITER, Release);
339    }
340}
341
342impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockWriteGuard<'_, T, G> {
343    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
344        fmt::Debug::fmt(&**self, f)
345    }
346}
347
348/// A guard that provides immutable data access but can be atomically
349/// upgraded to `RwLockWriteGuard`.
350pub struct RwLockUpgradeableGuard<'a, T: ?Sized, G: SpinGuardian> {
351    guard: G::Guard,
352    inner: &'a RwLock<T, G>,
353}
354
355impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockUpgradeableGuard<'_, T, G> {
356    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
357        self.guard.as_atomic_mode_guard()
358    }
359}
360
361impl<'a, T: ?Sized, G: SpinGuardian> RwLockUpgradeableGuard<'a, T, G> {
362    /// Upgrades this upread guard to a write guard atomically.
363    ///
364    /// After calling this method, subsequent readers will be blocked
365    /// while previous readers remain unaffected. The calling thread
366    /// will spin-wait until previous readers finish.
367    pub fn upgrade(mut self) -> RwLockWriteGuard<'a, T, G> {
368        self.inner.lock.fetch_or(BEING_UPGRADED, Acquire);
369        loop {
370            self = match self.try_upgrade() {
371                Ok(guard) => return guard,
372                Err(e) => e,
373            };
374        }
375    }
376
377    /// Attempts to upgrade this upread guard to a write guard atomically.
378    ///
379    /// This function will never spin-wait and will return immediately.
380    ///
381    /// This function is not exposed publicly because the `BEING_UPGRADED` bit
382    /// is set only in [`Self::upgrade`].
383    fn try_upgrade(mut self) -> Result<RwLockWriteGuard<'a, T, G>, Self> {
384        let res = self.inner.lock.compare_exchange(
385            UPGRADEABLE_READER | BEING_UPGRADED,
386            WRITER | UPGRADEABLE_READER,
387            AcqRel,
388            Relaxed,
389        );
390        if res.is_ok() {
391            let inner = self.inner;
392            let guard = self.guard.transfer_to();
393            drop(self);
394            Ok(RwLockWriteGuard { inner, guard })
395        } else {
396            Err(self)
397        }
398    }
399}
400
401impl<T: ?Sized, G: SpinGuardian> Deref for RwLockUpgradeableGuard<'_, T, G> {
402    type Target = T;
403
404    fn deref(&self) -> &T {
405        unsafe { &*self.inner.val.get() }
406    }
407}
408
409impl<T: ?Sized, G: SpinGuardian> Drop for RwLockUpgradeableGuard<'_, T, G> {
410    fn drop(&mut self) {
411        self.inner.lock.fetch_sub(UPGRADEABLE_READER, Release);
412    }
413}
414
415impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockUpgradeableGuard<'_, T, G> {
416    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
417        fmt::Debug::fmt(&**self, f)
418    }
419}