ostd/sync/
rwarc.rs

1// SPDX-License-Identifier: MPL-2.0
2use vstd::prelude::*;
3use vstd::atomic_ghost::*;
4use vstd_extra::prelude::*;
5
6use alloc::sync::Arc;
7//use core::sync::atomic::{fence, AtomicUsize, Ordering};
8
9
10use super::{PreemptDisabled, RwLock, RwLockReadGuard, RwLockWriteGuard};
11
12verus! {
13
14/// A reference-counting pointer with read-write capabilities.
15///
16/// This is essentially `Arc<RwLock<T>>`, so it can provide read-write capabilities through
17/// [`RwArc::read`] and [`RwArc::write`].
18///
19/// In addition, this allows to derive another reference-counting pointer with read-only
20/// capabilities ([`RoArc`]) via [`RwArc::clone_ro`].
21///
22/// The purpose of having this type is to allow lockless (read) access to the underlying data when
23/// there is only one [`RwArc`] instance for the particular allocation (note that there can be any
24/// number of [`RoArc`] instances for that allocation). See the [`RwArc::get`] method for more
25/// details.
26pub struct RwArc<T>(Arc<Inner<T>>);
27
28/// A reference-counting pointer with read-only capabilities.
29///
30/// This type can be created from an existing [`RwArc`] using its [`RwArc::clone_ro`] method. See
31/// the type and method documentation for more details.
32pub struct RoArc<T>(Arc<Inner<T>>);
33
34struct_with_invariants!{
35struct Inner<T> {
36    data: RwLock<T, PreemptDisabled>,
37    num_rw: AtomicUsize<_,int,_>,
38}
39
40closed spec fn wf(&self) -> bool {
41  invariant on num_rw with (data) is (v:usize, g:int) {
42    v == g
43  } 
44}
45}
46
47impl<T> RwArc<T> {
48    #[verifier::type_invariant]
49    closed spec fn type_inv(self) -> bool {
50        self.wf()
51    }
52
53    pub closed spec fn wf(self) -> bool {
54        &&& self.0.wf()
55        &&& self.0.num_rw.well_formed()
56    }
57}
58
59impl<T> RwArc<T> {
60    /// Creates a new `RwArc<T>`.
61    pub fn new(data: T) -> Self {
62        //let inner = Inner { data: RwLock::new(data), num_rw: AtomicUsize::new(1) };
63        let data = RwLock::new(data);
64        let inner = Inner { data, num_rw: AtomicUsize::new(Ghost(data),1,Tracked(1int)) };
65        Self(Arc::new(inner))
66    }
67
68    /// Acquires the read lock for immutable access.
69    pub fn read(&self) -> RwLockReadGuard<T, PreemptDisabled> {
70        self.0.data.read()
71    }
72
73    /// Acquires the write lock for mutable access.
74    pub fn write(&self) -> RwLockWriteGuard<T, PreemptDisabled> {
75        self.0.data.write()
76    }
77
78    /*
79    /// Returns an immutable reference if no other `RwArc` points to the same allocation.
80    ///
81    /// This method is cheap because it does not acquire a lock.
82    ///
83    /// It's still sound because:
84    /// - The mutable reference to `self` and the condition ensure that we are exclusively
85    ///   accessing the unique `RwArc` instance for the particular allocation.
86    /// - There may be any number of [`RoArc`]s pointing to the same allocation, but they may only
87    ///   produce immutable references to the underlying data.
88    #[verifier::external_body]
89    pub fn get(&mut self) -> Option<&T> {
90        if self.0.num_rw.load(Ordering::Relaxed) > 1 {
91            return None;
92        }
93        // This will synchronize with `RwArc::drop` to make sure its changes are visible to us.
94
95        fence(Ordering::Acquire);
96
97        let data_ptr = self.0.data.as_ptr();
98
99        // SAFETY: The data is valid. During the lifetime, no one will be able to create a mutable
100        // reference to the data, so it's okay to create an immutable reference like the one below.
101        Some(unsafe { &*data_ptr })
102    }
103    */
104    /// Clones a [`RoArc`] that points to the same allocation.
105    pub fn clone_ro(&self) -> RoArc<T> {
106        RoArc(self.0.clone())
107    }
108}
109
110#[verus_verify]
111impl<T> Clone for RwArc<T> {
112    #[verus_spec]
113    fn clone(&self) -> Self 
114        returns
115            self,
116    {
117        proof!{
118            use_type_invariant(self);
119        }
120        let inner = self.0.clone();
121        // Note that overflowing the counter will make it unsound. But not to worry: the above
122        // `Arc::clone` must have already aborted the kernel before this happens.
123        // inner.num_rw.fetch_add(1, Ordering::Relaxed);
124        atomic_with_ghost! {
125            inner.num_rw => fetch_add(1);
126            ghost g => {
127                assume(g < usize::MAX);
128                g = g + 1;
129            }
130        };
131
132        Self(inner)
133    }
134}
135
136/*
137impl<T> Drop for RwArc<T> {
138    #[verifier::external_body]
139    fn drop(&mut self)
140        opens_invariants none
141        no_unwind
142    {
143        self.0.num_rw.fetch_sub(1, Ordering::Release);
144    }
145}*/
146
147impl<T: Clone> RwArc<T> {
148    /// Returns the contained value by cloning it.
149    pub fn get_cloned(&self) -> T {
150        let guard = self.read();
151        guard.clone()
152    }
153}
154
155impl<T> RoArc<T> {
156    /// Acquires the read lock for immutable access.
157    pub fn read(&self) -> RwLockReadGuard<T, PreemptDisabled> {
158        self.0.data.read()
159    }
160}
161
162} // verus!
163
164#[cfg(ktest)]
165mod test {
166    use super::*;
167    use crate::prelude::*;
168
169    #[ktest]
170    fn lockless_get() {
171        let mut rw1 = RwArc::new(1u32);
172        assert_eq!(rw1.get(), Some(1).as_ref());
173
174        let _ro = rw1.clone_ro();
175        assert_eq!(rw1.get(), Some(1).as_ref());
176
177        let rw2 = rw1.clone();
178        assert_eq!(rw1.get(), None);
179
180        drop(rw2);
181        assert_eq!(rw1.get(), Some(1).as_ref());
182    }
183}