ostd/sync/spin.rs
1// SPDX-License-Identifier: MPL-2.0
2use vstd::atomic_ghost::*;
3use vstd::cell::{self,pcell::*};
4use vstd::modes::*;
5use vstd::prelude::*;
6use vstd_extra::prelude::*;
7
8use alloc::sync::Arc;
9use core::{
10 cell::UnsafeCell,
11 fmt,
12 marker::PhantomData,
13 ops::{Deref, DerefMut},
14 // sync::atomic::{AtomicBool, Ordering},
15};
16
17use super::{guard::SpinGuardian, LocalIrqDisabled /*, PreemptDisabled*/};
18//use crate::task::atomic_mode::AsAtomicModeGuard;
19
20verus! {
21 broadcast use group_deref_spec;
22}
23
24/// A spin lock.
25///
26/// # Guard behavior
27///
28/// The type `G' specifies the guard behavior of the spin lock. While holding the lock,
29/// - if `G` is [`PreemptDisabled`], preemption is disabled;
30/// - if `G` is [`LocalIrqDisabled`], local IRQs are disabled.
31///
32/// The `G` can also be provided by other crates other than ostd,
33/// if it behaves similar like [`PreemptDisabled`] or [`LocalIrqDisabled`].
34///
35/// The guard behavior can be temporarily upgraded from [`PreemptDisabled`] to
36/// [`LocalIrqDisabled`] using the [`disable_irq`] method.
37///
38/// # Verified Properties
39/// ## Verification Design
40/// To verify the correctness of spin lock, we use a ghost permission (i.e., not present in executable Rust), and only the owner of the permission can access the protected data in the cell.
41/// When [`lock`] or [`try_lock`] succeeds, the ghost permission is transferred to the lock guard and given to the user for accessing the protected data.
42/// When the lock guard is dropped, the ghost permission is transferred back to the spin lock.
43///
44/// For curious readers, details of the permission can be found in [`vstd::cell::pcell::PointsTo`](https://verus-lang.github.io/verus/verusdoc/vstd/cell/pcell/struct.PointsTo.html).
45///
46/// [`disable_irq`]: Self::disable_irq
47/// [`lock`]: Self::lock
48/// [`try_lock`]: Self::try_lock
49///
50/// ## Invariant
51/// When the internal `AtomicBool` is `true`, the permission has been transferred to some lock guard and nobody else can acquire the lock; when it is `false`,
52/// the permission is held by the spin lock and can be acquired by a user.
53///
54/// ## Safety
55/// There are no data races.
56///
57/// ## Functional Correctness
58/// - At most one user can hold the lock at the same time.
59#[repr(transparent)]
60#[verus_verify]
61//pub struct SpinLock<T: ?Sized, G = PreemptDisabled> {
62pub struct SpinLock<T, G> {
63 phantom: PhantomData<G>,
64 /// Only the last field of a struct may have a dynamically sized type.
65 /// That's why SpinLockInner is put in the last field.
66 inner: SpinLockInner<T>,
67}
68
69verus! {
70
71struct_with_invariants! {
72struct SpinLockInner<T> {
73 lock: AtomicBool<_,Option<PointsTo<T>>,_>,
74 val: PCell<T>, //TODO: Waiting the new PCell that supports ?Sized
75 //val: UnsafeCell<T>,
76}
77
78closed spec fn wf(self) -> bool {
79 invariant on lock with (val) is (v:bool, g:Option<PointsTo<T>>) {
80 match g {
81 None => v == true,
82 Some(perm) => perm.id() == val.id() && !v
83 }
84 }
85}
86}
87
88impl<T> SpinLockInner<T>
89{
90 #[verifier::type_invariant]
91 closed spec fn type_inv(self) -> bool{
92 self.wf()
93 }
94}
95
96#[verus_verify]
97impl<T, G> SpinLock<T, G> {
98 /// Creates a new spin lock.
99 ///
100 /// # Verified Properties
101 /// ## Safety
102 /// This function is written in safe Rust and there is no undefined behavior.
103 /// ## Preconditions
104 /// None.
105 /// ## Postconditions
106 /// - The function will not panic.
107 /// - The created spin lock satisfies the invariant.
108 pub const fn new(val: T) -> Self
109 {
110 let (val, Tracked(perm)) = PCell::new(val);
111 let lock_inner = SpinLockInner {
112 lock: AtomicBool::new(Ghost(val),false,Tracked(Some(perm))),
113 //val: UnsafeCell::new(val),
114 val: val,
115 };
116 Self {
117 phantom: PhantomData,
118 inner: lock_inner,
119 }
120 }
121}
122
123verus!{}
124impl<T,G> SpinLock<T,G>
125{
126 pub closed spec fn cell_id(self) -> cell::CellId {
127 self.inner.val.id()
128 }
129
130 #[verifier::type_invariant]
131 pub closed spec fn type_inv(self) -> bool{
132 self.inner.type_inv()
133 }
134}
135}
136
137/*
138impl<T: ?Sized> SpinLock<T, PreemptDisabled> {
139 /// Converts the guard behavior from disabling preemption to disabling IRQs.
140 pub fn disable_irq(&self) -> &SpinLock<T, LocalIrqDisabled> {
141 let ptr = self as *const SpinLock<T, PreemptDisabled>;
142 let ptr = ptr as *const SpinLock<T, LocalIrqDisabled>;
143 // SAFETY:
144 // 1. The types `SpinLock<T, PreemptDisabled>`, `SpinLockInner<T>` and `SpinLock<T,
145 // IrqDisabled>` have the same memory layout guaranteed by `#[repr(transparent)]`.
146 // 2. The specified memory location can be borrowed as an immutable reference for the
147 // specified lifetime.
148 unsafe { &*ptr }
149 }
150}*/
151
152verus! {
153//impl<T: ?Sized, G: SpinGuardian> SpinLock<T, G> {
154impl<T, G: SpinGuardian> SpinLock<T, G> {
155
156 /// Acquires the spin lock.
157 #[verus_spec]
158 pub fn lock(&self) -> SpinLockGuard<T, G> {
159 // Notice the guard must be created before acquiring the lock.
160 proof!{ use_type_invariant(self);}
161 proof_decl!{
162 let tracked mut perm: PointsTo<T> = arbitrary_cell_pointsto();
163 }
164 let inner_guard = G::guard();
165 proof_with! {=> Tracked(perm)}
166 self.acquire_lock();
167 SpinLockGuard_ {
168 lock: self,
169 guard: inner_guard,
170 v_perm: Tracked(perm),
171 }
172 }
173
174 /// Acquires the spin lock through an [`Arc`].
175 ///
176 /// The method is similar to [`lock`], but it doesn't have the requirement
177 /// for compile-time checked lifetimes of the lock guard.
178 ///
179 /// [`lock`]: Self::lock
180 #[verus_spec]
181 pub fn lock_arc(self: &Arc<Self>) -> ArcSpinLockGuard<T, G> {
182 proof!{ use_type_invariant(self);}
183 proof_decl!{
184 let tracked mut perm: PointsTo<T> = arbitrary_cell_pointsto();
185 }
186 let inner_guard = G::guard();
187 proof_with! {=> Tracked(perm)}
188 self.acquire_lock();
189 proof!{
190 assert(perm.id() == (*self.clone().deref_spec()).cell_id());
191 }
192 SpinLockGuard_ {
193 lock: self.clone(),
194 guard: inner_guard,
195 v_perm: Tracked(perm),
196 }
197 }
198
199 #[verus_spec]
200 /// Tries acquiring the spin lock immedidately.
201 pub fn try_lock(&self) -> Option<SpinLockGuard<T, G>> {
202 let inner_guard = G::guard();
203 proof_decl!{
204 let tracked mut perm: Option<PointsTo<T>> = None;
205 }
206 if #[verus_spec(with => Tracked(perm))] self.try_acquire_lock() {
207 let lock_guard = SpinLockGuard_ {
208 lock: self,
209 guard: inner_guard,
210 v_perm: Tracked(perm.tracked_unwrap()),
211 };
212 return Some(lock_guard);
213 }
214 None
215 }
216
217 /*
218 /// Returns a mutable reference to the underlying data.
219 ///
220 /// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
221 /// already statically guaranteed that access to the data is exclusive.
222 pub fn get_mut(&mut self) -> &mut T {
223 self.inner.val.get_mut()
224 }*/
225
226 /// Acquires the spin lock, otherwise busy waiting
227 #[verus_spec(ret =>
228 with
229 -> perm: Tracked<PointsTo<T>>,
230 ensures
231 perm@.id() == self.inner.val.id(),
232 )]
233 #[verifier::exec_allows_no_decreases_clause]
234 fn acquire_lock(&self) {
235 proof_decl!{
236 let tracked mut perm: Option<PointsTo<T>> = None;
237 }
238 proof!{ use_type_invariant(self);}
239 #[verus_spec(
240 invariant self.type_inv(),
241 )]
242 while !#[verus_spec(with => Tracked(perm))]self.try_acquire_lock() {
243 core::hint::spin_loop();
244 }
245
246 proof_decl!{
247 let tracked mut perm = perm.tracked_unwrap();
248 }
249 // VERUS LIMITATION: Explicit return value to bind the ghost permission return value
250 #[verus_spec(with |= Tracked(perm))]
251 ()
252 }
253
254 #[verus_spec(ret =>
255 with
256 -> perm: Tracked<Option<PointsTo<T>>>,
257 ensures
258 ret && perm@ is Some && perm@ -> Some_0.id() == self.inner.val.id() || !ret && perm@ is None,
259 )]
260 fn try_acquire_lock(&self) -> bool {
261 /*self.inner
262 .lock
263 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
264 .is_ok()*/
265 proof_decl!{
266 let tracked mut perm: Option<PointsTo<T>> = None;
267 }
268 proof!{ use_type_invariant(self);}
269 proof_with!{ |= Tracked(perm)}
270 atomic_with_ghost! {
271 self.inner.lock => compare_exchange(false, true);
272 returning res;
273 ghost cell_perm => {
274 if res is Ok {
275 tracked_swap(&mut perm, &mut cell_perm);
276 }
277 }
278 }.is_ok()
279 }
280
281 /// An example to show the doc generation with `#[verus_spec]` ghost parameters.
282 /// It will be removed after the doc generation function is merged.
283 #[verus_spec(ret =>
284 with
285 ghost_in1: Tracked<int>,
286 ghost_in2: Tracked<int>,
287 ->
288 ghost_out: Ghost<(int,int)>,
289 requires
290 ghost_in1@ >= 0,
291 ensures
292 ghost_out@.0 == ghost_in1@,
293 ret == 0,
294 )]
295 pub fn spec_with_example(&self) -> u32 {
296 proof_decl!{
297 let ghost mut out: (int,int) = (0,0);
298 }
299 proof!{
300 out.0 = ghost_in1@;
301 out.1 = ghost_in2@;
302 }
303 proof_with!{|= Ghost(out)}
304 0
305 }
306
307 /*
308 fn release_lock(&self) {
309 self.inner.lock.store(false, Ordering::Release);
310 }
311 */
312}
313}
314
315/*
316impl<T: ?Sized + fmt::Debug, G> fmt::Debug for SpinLock<T, G> {
317 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
318 fmt::Debug::fmt(&self.inner.val, f)
319 }
320}
321
322// SAFETY: Only a single lock holder is permitted to access the inner data of Spinlock.
323unsafe impl<T: ?Sized + Send, G> Send for SpinLock<T, G> {}
324unsafe impl<T: ?Sized + Send, G> Sync for SpinLock<T, G> {}
325*/
326/// A guard that provides exclusive access to the data protected by a [`SpinLock`].
327pub type SpinLockGuard<'a, T, G> = SpinLockGuard_<T, &'a SpinLock<T, G>, G>;
328/// A guard that provides exclusive access to the data protected by a `Arc<SpinLock>`.
329pub type ArcSpinLockGuard<T, G> = SpinLockGuard_<T, Arc<SpinLock<T, G>>, G>;
330
331/// The guard of a spin lock.
332#[clippy::has_significant_drop]
333#[must_use]
334#[verifier::reject_recursive_types(T)]
335#[verifier::reject_recursive_types(G)]
336#[verus_verify]
337pub struct SpinLockGuard_<T/*: ?Sized*/, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> {
338 guard: G::Guard,
339 lock: R,
340 v_perm: Tracked<PointsTo<T>>, //Ghost permission for verification
341}
342
343verus! {
344impl<T, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> SpinLockGuard_<T, R, G>
345{
346 #[verifier::type_invariant]
347 spec fn type_inv(self) -> bool{
348 self.lock.deref_spec().cell_id() == self.v_perm@.id()
349 }
350}
351}
352/*
353impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> AsAtomicModeGuard
354 for SpinLockGuard_<T, R, G>
355{
356 fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
357 self.guard.as_atomic_mode_guard()
358 }
359}
360
361impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> Deref
362 for SpinLockGuard_<T, R, G>
363{
364 type Target = T;
365
366 fn deref(&self) -> &T {
367 unsafe { &*self.lock.inner.val.get() }
368 }
369}
370
371impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> DerefMut
372 for SpinLockGuard_<T, R, G>
373{
374 fn deref_mut(&mut self) -> &mut Self::Target {
375 unsafe { &mut *self.lock.inner.val.get() }
376 }
377}
378
379impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> Drop
380 for SpinLockGuard_<T, R, G>
381{
382 fn drop(&mut self) {
383 self.lock.release_lock();
384 }
385}
386
387impl<T: ?Sized + fmt::Debug, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> fmt::Debug
388 for SpinLockGuard_<T, R, G>
389{
390 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
391 fmt::Debug::fmt(&**self, f)
392 }
393}*/
394
395/*
396impl<T: ?Sized, R: Deref<Target = SpinLock<T, G>>, G: SpinGuardian> !Send
397 for SpinLockGuard_<T, R, G>
398{
399}
400
401// SAFETY: `SpinLockGuard_` can be shared between tasks/threads in same CPU.
402// As `lock()` is only called when there are no race conditions caused by interrupts.
403unsafe impl<T: ?Sized + Sync, R: Deref<Target = SpinLock<T, G>> + Sync, G: SpinGuardian> Sync
404 for SpinLockGuard_<T, R, G>
405{
406}
407*/