Skip to main content

ostd/sync/
rwarc.rs

1// SPDX-License-Identifier: MPL-2.0
2use vstd::atomic_ghost::*;
3use vstd::prelude::*;
4use vstd_extra::prelude::*;
5
6use alloc::sync::Arc;
7//use core::sync::atomic::{fence, AtomicUsize, Ordering};
8
9use super::{PreemptDisabled, RwLock, RwLockReadGuard, RwLockWriteGuard};
10
11verus! {
12
13/// A reference-counting pointer with read-write capabilities.
14///
15/// This is essentially `Arc<RwLock<T>>`, so it can provide read-write capabilities through
16/// [`RwArc::read`] and [`RwArc::write`].
17///
18/// In addition, this allows to derive another reference-counting pointer with read-only
19/// capabilities ([`RoArc`]) via [`RwArc::clone_ro`].
20///
21/// The purpose of having this type is to allow lockless (read) access to the underlying data when
22/// there is only one [`RwArc`] instance for the particular allocation (note that there can be any
23/// number of [`RoArc`] instances for that allocation). See the [`RwArc::get`] method for more
24/// details.
25pub struct RwArc<T>(Arc<Inner<T>>);
26
27/// A reference-counting pointer with read-only capabilities.
28///
29/// This type can be created from an existing [`RwArc`] using its [`RwArc::clone_ro`] method. See
30/// the type and method documentation for more details.
31pub struct RoArc<T>(Arc<Inner<T>>);
32
33struct_with_invariants!{
34struct Inner<T> {
35    data: RwLock<T, PreemptDisabled>,
36    num_rw: AtomicUsize<_,int,_>,
37}
38
39closed spec fn wf(&self) -> bool {
40  invariant on num_rw with (data) is (v:usize, g:int) {
41    v == g
42  }
43}
44}
45
46impl<T> RwArc<T> {
47    #[verifier::type_invariant]
48    closed spec fn type_inv(self) -> bool {
49        self.wf()
50    }
51
52    pub closed spec fn wf(self) -> bool {
53        &&& self.0.wf()
54        &&& self.0.num_rw.well_formed()
55    }
56}
57
58impl<T> RwArc<T> {
59    /// Creates a new `RwArc<T>`.
60    pub fn new(data: T) -> Self {
61        //let inner = Inner { data: RwLock::new(data), num_rw: AtomicUsize::new(1) };
62        let data = RwLock::new(data);
63        let inner = Inner { data, num_rw: AtomicUsize::new(Ghost(data), 1, Tracked(1int)) };
64        Self(Arc::new(inner))
65    }
66
67    /// Acquires the read lock for immutable access.
68    pub fn read(&self) -> RwLockReadGuard<T, PreemptDisabled> {
69        self.0.data.read()
70    }
71
72    /// Acquires the write lock for mutable access.
73    pub fn write(&self) -> RwLockWriteGuard<T, PreemptDisabled> {
74        self.0.data.write()
75    }
76
77    /*
78    /// Returns an immutable reference if no other `RwArc` points to the same allocation.
79    ///
80    /// This method is cheap because it does not acquire a lock.
81    ///
82    /// It's still sound because:
83    /// - The mutable reference to `self` and the condition ensure that we are exclusively
84    ///   accessing the unique `RwArc` instance for the particular allocation.
85    /// - There may be any number of [`RoArc`]s pointing to the same allocation, but they may only
86    ///   produce immutable references to the underlying data.
87    #[verifier::external_body]
88    pub fn get(&mut self) -> Option<&T> {
89        if self.0.num_rw.load(Ordering::Relaxed) > 1 {
90            return None;
91        }
92        // This will synchronize with `RwArc::drop` to make sure its changes are visible to us.
93
94        fence(Ordering::Acquire);
95
96        let data_ptr = self.0.data.as_ptr();
97
98        // SAFETY: The data is valid. During the lifetime, no one will be able to create a mutable
99        // reference to the data, so it's okay to create an immutable reference like the one below.
100        Some(unsafe { &*data_ptr })
101    }
102    */
103    /// Clones a [`RoArc`] that points to the same allocation.
104    pub fn clone_ro(&self) -> RoArc<T> {
105        RoArc(self.0.clone())
106    }
107}
108
109#[verus_verify]
110impl<T> Clone for RwArc<T> {
111    #[verus_spec]
112    fn clone(&self) -> Self
113        returns
114            self,
115    {
116        proof!{
117            use_type_invariant(self);
118        }
119        let inner = self.0.clone();
120        // Note that overflowing the counter will make it unsound. But not to worry: the above
121        // `Arc::clone` must have already aborted the kernel before this happens.
122        // inner.num_rw.fetch_add(1, Ordering::Relaxed);
123        atomic_with_ghost! {
124            inner.num_rw => fetch_add(1);
125            ghost g => {
126                assume(g < usize::MAX);
127                g = g + 1;
128            }
129        };
130
131        Self(inner)
132    }
133}
134
135/*
136impl<T> Drop for RwArc<T> {
137    #[verifier::external_body]
138    fn drop(&mut self)
139        opens_invariants none
140        no_unwind
141    {
142        self.0.num_rw.fetch_sub(1, Ordering::Release);
143    }
144}*/
145
146impl<T: Clone> RwArc<T> {
147    /// Returns the contained value by cloning it.
148    pub fn get_cloned(&self) -> T {
149        let guard = self.read();
150        guard.clone()
151    }
152}
153
154impl<T> RoArc<T> {
155    /// Acquires the read lock for immutable access.
156    pub fn read(&self) -> RwLockReadGuard<T, PreemptDisabled> {
157        self.0.data.read()
158    }
159}
160
161} // verus!
162#[cfg(ktest)]
163mod test {
164    use super::*;
165    use crate::prelude::*;
166
167    #[ktest]
168    fn lockless_get() {
169        let mut rw1 = RwArc::new(1u32);
170        assert_eq!(rw1.get(), Some(1).as_ref());
171
172        let _ro = rw1.clone_ro();
173        assert_eq!(rw1.get(), Some(1).as_ref());
174
175        let rw2 = rw1.clone();
176        assert_eq!(rw1.get(), None);
177
178        drop(rw2);
179        assert_eq!(rw1.get(), Some(1).as_ref());
180    }
181}