Skip to main content

ostd/sync/
rwlock.rs

1// SPDX-License-Identifier: MPL-2.0
2use vstd::atomic_ghost::*;
3use vstd::cell::{self, pcell::*, CellId};
4use vstd::pcm::Loc;
5use vstd::prelude::*;
6use vstd::tokens::frac::{self, Empty, Frac, FracGhost};
7use vstd_extra::resource::ghost_resource::{csum::*, excl::*, tokens::*};
8use vstd_extra::sum::*;
9use vstd_extra::{prelude::*, resource};
10
11use alloc::sync::Arc;
12use core::char::MAX;
13use core::{
14    cell::UnsafeCell,
15    fmt,
16    marker::PhantomData,
17    ops::{Deref, DerefMut},
18    sync::atomic::{
19        // AtomicUsize,
20        Ordering::{AcqRel, Acquire, Relaxed, Release},
21    },
22};
23
24use super::{
25    guard::{GuardTransfer, SpinGuardian},
26    PreemptDisabled,
27};
28
29verus! {
30
31const MAX_READER_U64: u64 = MAX_READER as u64;
32
33spec const V_MAX_READ_RETRACT_FRACS_SPEC: u64 = (MAX_READER_MASK + 1) as u64;
34
35#[verifier::when_used_as_spec(V_MAX_READ_RETRACT_FRACS_SPEC)]
36exec const V_MAX_READ_RETRACT_FRACS: u64
37    ensures
38        V_MAX_READ_RETRACT_FRACS == V_MAX_READ_RETRACT_FRACS_SPEC,
39        V_MAX_READ_RETRACT_FRACS == MAX_READER_MASK + 1,
40        V_MAX_READ_RETRACT_FRACS < u64::MAX,
41{
42    assert(MAX_READER_MASK + 1 < u64::MAX) by (compute_only);
43    (MAX_READER_MASK + 1) as u64
44}
45
46/// The token reserved in the lock when the write permission is given out.
47type NoPerm<T> = Empty<PointsTo<T>>;
48
49/// Half of the permission for read access, one for `RwLockUpgradeableGuard` and the other for all `RwLockReadGuard`s.
50type HalfPerm<T> = Frac<PointsTo<T>>;
51
52/// The permission for read access can be further split into `MAX_READER` pieces.
53type ReadPerm<T> = (HalfPerm<T>, OneLeftKnowledge<HalfPerm<T>, NoPerm<T>, 3>);
54
55tracked struct RwPerms<T> {
56    /// This token tracks whether the write permission is given out. If it is `Left`, it stores the knowledge that
57    /// there are active readers because the existence of `HalfPerm` resource for read access.
58    /// If it is `Right`, we know there is an active writer and there is no active reader, because there is a `NoPerm`
59    /// indicating the absence of `PointsTo<T>`.
60    core_token: SumResource<HalfPerm<T>, NoPerm<T>, 3>,
61    /// The permission to retract a `READER` count. Its total quantity tracks the gap between
62    /// the number of `try_read` increments recorded in the lock atomic and the number of active
63    /// `RwLockReadGuard`s (created and ongoing creation that will succeed) represented by `read_guard_token`.
64    /// It can be splited up to `V_MAX_READ_RETRACT_FRACS:= 2 * MAX_READER` pieces,
65    /// which allows at most `2*MAX_READER - 1` `try_read` attempts that will fail to acquire the lock.
66    read_retract_token: TokenResource<V_MAX_READ_RETRACT_FRACS>,
67    /// The permission to retract the set of `UPGRADEABLE_READER` bit.
68    upread_retract_token: Option<UniqueToken>,
69    /// Tracks whether there is a live `RwLockUpgradeableGuard`, also stores half of the permission for read access.
70    upreader_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>>,
71    /// Tracks the number of live `RwLockReadGuard`s. If it is `Left`, it stores the remaining read permissions.
72    /// If it is `Right`, it stores an empty token indicating the permission has been given out.
73    read_guard_token: Sum<
74        FracResource<ReadPerm<T>, MAX_READER_U64>,
75        Empty<ReadPerm<T>, MAX_READER_U64>,
76    >,
77}
78
79ghost struct RwId {
80    core_token_id: Loc,
81    frac_id: Loc,
82    read_retract_token_id: Loc,
83    upread_retract_token_id: Loc,
84    read_guard_token_id: Loc,
85}
86
87/// The number of `try_read` operations recorded in the lock atomic (created and ongoing) can never reach `2*MAX_READER` to avoid overflow.
88/// **NOTE**: We *ASSUME* this property always holds without any proof. We believe this is true in practice because:
89/// - More than `2^61` `try_read` operations are required to trigger the overflow concurrently, which is absurd in real world scenarios.
90/// - If one tries to create a huge number (more than `2*MAX_READER`) of `RwLockReadGuard`s in a loop with `mem::forget`, it will take years and
91/// will be prevented by the `MAX_READER` check.
92pub closed spec fn no_max_reader_overflow(v: usize) -> bool {
93    v & MAX_READER_MASK < MAX_READER_MASK
94}
95
96struct_with_invariants! {
97/// Spin-based Read-write Lock
98///
99/// # Overview
100///
101/// This lock allows for multiple readers, or at most one writer to access
102/// at any point in time. The writer of this lock has exclusive access to
103/// modify the underlying data, while the readers are allowed shared and
104/// read-only access.
105///
106/// The writing and reading portions cannot be active simultaneously, when
107/// one portion is in progress, the other portion will spin-wait. This is
108/// suitable for scenarios where the lock is expected to be held for short
109/// periods of time, and the overhead of context switching is higher than
110/// the cost of spinning.
111///
112/// In addition to traditional read and write locks, this implementation
113/// provides the upgradeable read lock (`upread lock`). The `upread lock`
114/// can be upgraded to write locks atomically, useful in scenarios
115/// where a decision to write is made after reading.
116///
117/// The type parameter `T` represents the data that this lock is protecting.
118/// It is necessary for `T` to satisfy [`Send`] to be shared across tasks and
119/// [`Sync`] to permit concurrent access via readers. The [`Deref`] method (and
120/// [`DerefMut`] for the writer) is implemented for the RAII guards returned
121/// by the locking methods, which allows for the access to the protected data
122/// while the lock is held.
123///
124/// # Usage
125/// The lock can be used in scenarios where data needs to be read frequently
126/// but written to occasionally.
127///
128/// Use `upread lock` in scenarios where related checking is performed before
129/// modification to effectively avoid deadlocks and improve efficiency.
130///
131/// This lock should not be used in scenarios where lock-holding times are
132/// long as it can lead to CPU resource wastage due to spinning.
133///
134/// # About Guard
135///
136/// See the comments of [`SpinLock`].
137///
138/// # Examples
139///
140/// ```
141/// use ostd::sync::RwLock;
142///
143/// let lock = RwLock::new(5)
144///
145/// // many read locks can be held at once
146/// {
147///     let r1 = lock.read();
148///     let r2 = lock.read();
149///     assert_eq!(*r1, 5);
150///     assert_eq!(*r2, 5);
151///
152///     // Upgradeable read lock can share access to data with read locks
153///     let r3 = lock.upread();
154///     assert_eq!(*r3, 5);
155///     drop(r1);
156///     drop(r2);
157///     // read locks are dropped at this point
158///
159///     // An upread lock can only be upgraded successfully after all the
160///     // read locks are released, otherwise it will spin-wait.
161///     let mut w1 = r3.upgrade();
162///     *w1 += 1;
163///     assert_eq!(*w1, 6);
164/// }   // upread lock are dropped at this point
165///
166/// {
167///     // Only one write lock can be held at a time
168///     let mut w2 = lock.write();
169///     *w2 += 1;
170///     assert_eq!(*w2, 7);
171/// }   // write lock is dropped at this point
172/// ```
173///
174/// [`SpinLock`]: super::SpinLock
175pub struct RwLock<T  /* : ?Sized*/ , Guard /* = PreemptDisabled*/ > {
176    guard: PhantomData<Guard>,
177    /// The internal representation of the lock state is as follows:
178    /// - **Bit 63:** Writer lock.
179    /// - **Bit 62:** Upgradeable reader lock.
180    /// - **Bit 61:** Indicates if an upgradeable reader is being upgraded.
181    /// - **Bits 60-0:** Reader lock count.
182    lock: AtomicUsize<_, RwPerms<T>,_>,
183    // val: UnsafeCell<T>,
184    val: PCell<T>,
185    v_id: Ghost<RwId>,
186}
187
188/// This invariant holds at any time, i.e. not violated during any method execution.
189closed spec fn wf(self) -> bool {
190    invariant on lock with (val, guard, v_id) is (v: usize, g: RwPerms<T>) {
191        // BITS VALUE
192        let has_writer_bit: bool = (v & WRITER) != 0;
193        let has_upgrade_bit: bool = (v & UPGRADEABLE_READER) != 0;
194        let has_max_reader_bit: bool = (v & MAX_READER) != 0;
195        // The total number of `try_read` attempts recorded in the lock atomic, including created `RwLockReadGuard`s
196        // and those who are trying, no matter they will succeed or fail.
197        let total_reader_bits: int = (v & MAX_READER_MASK) as int;
198        // The clamped value represented in the counter bits. This counts the maximum number of active `RwLockReadGuard`s.
199        // NOTE: This does not mean there are actually this number of active `RwLockReadGuard`s. The actual number of successfully
200        // created/creating `RwLockReadGuard`s can be smaller than this number, because previously created `RwLockReadGuard`s may be dropped.
201        let reader_bits: int = if has_max_reader_bit { MAX_READER as int } else { (v & READER_MASK) as int };
202
203        // ACTUAL NUMBER OF ACTIVE GUARDS.
204        // The number is tracked by the ghost resources remained in the lock.
205        // By active, we mean from the perspective the lock, the permissions for these guards are given out.
206        // The actual guards may be still being created, but they must be successfully created finally.
207        // This invariant maintains the consistency between the ghost resources and the lock atomic value.
208
209        // Whether there is an active writer
210        let active_writer: bool = g.core_token.is_right();
211        // The number of active `RwLockUpgradeableGuard`, which can only be 0 or 1.
212        let active_upgrade_guard: bool = !active_writer && g.upreader_guard_token is None;
213        // The number of active `RwLockReadGuard`s.
214        let active_read_guards: int = if g.read_guard_token is Left {
215            MAX_READER_U64 - g.read_guard_token.left().frac()
216        } else {
217            0
218        };
219        // The first `try_upread` that fails, which has not returned yet.
220        let pending_failed_upread_attempt: bool = g.upread_retract_token is None;
221        // The number of `try_read` attempts that will fail.
222        let failed_reader_attempts: int = V_MAX_READ_RETRACT_FRACS - g.read_retract_token.frac();
223
224        &&& if g.core_token.is_left() {
225            g.read_guard_token is Left
226        } else {
227            &&& g.upreader_guard_token is None
228            &&& g.read_guard_token is Right
229        }
230        // The `UPGRADEABLE_READER` bit is set iff there is an active `RwLockUpgradeableGuard` or a pending failed `try_upread` attempt.
231        &&& has_upgrade_bit <==> (active_upgrade_guard || pending_failed_upread_attempt)
232        // An active `RwLockUpgradeableGuard` cannot coexist with a pending failed `try_upread` attempt.
233        &&& !(active_upgrade_guard && pending_failed_upread_attempt)
234        // The `READER` bits count the number of all active read guards and pending failed `try_read` attempts.
235        &&& total_reader_bits == active_read_guards + failed_reader_attempts
236        // There is an active `RwLockWriteGuard` iff the `WRITER` bit is set.
237        &&& active_writer <==> has_writer_bit
238        // The number of active `RwLockReadGaurd`s is less than or equal to the `READER` bits.
239        &&& 0 <= active_read_guards <= reader_bits <= total_reader_bits
240        // The core invariant of `RwLock`: there are no simultaneous active writers and readers.
241        &&& !(active_writer && (active_read_guards + if active_upgrade_guard { 1int } else { 0 }) > 0)
242        &&& g.core_token.id() == v_id@.core_token_id
243        &&& g.core_token.wf()
244        &&& g.core_token.is_left() ==> {
245            &&& !g.core_token.is_resource_owner()
246            &&& g.core_token.frac() == 1
247        }
248        &&& g.core_token.is_right() ==> {
249            let empty = g.core_token.resource_right();
250            &&& empty.id() == v_id@.frac_id
251            &&& g.core_token.frac() == 2
252            &&& g.core_token.has_resource()
253        }
254        &&& g.read_retract_token.wf()
255        &&& g.read_retract_token.id() == v_id@.read_retract_token_id
256        &&& g.upread_retract_token is Some ==>
257            {
258                let token = g.upread_retract_token->Some_0;
259                &&& token.wf()
260                &&& token.id() == v_id@.upread_retract_token_id
261            }
262        &&& g.upreader_guard_token is Some ==> {
263            let token = g.upreader_guard_token->Some_0;
264            wf_upgradeable_guard_token(v_id@.core_token_id, v_id@.frac_id, val.id(), token)
265        }
266        &&& match g.read_guard_token {
267            Sum::Left(token) => {
268                &&& token.wf()
269                &&& token.id() == v_id@.read_guard_token_id
270                &&& token.not_empty() ==> {
271                    let resource = token.resource();
272                    let read_half_cell_perm = resource.0;
273                    let mode_knowledge = resource.1;
274                    &&& mode_knowledge.id() == v_id@.core_token_id
275                    &&& read_half_cell_perm.id() == v_id@.frac_id
276                    &&& read_half_cell_perm.resource().id() == val.id()
277                    &&& read_half_cell_perm.frac() == 1
278                }
279            },
280            Sum::Right(empty) => {
281                &&& empty.id() == v_id@.read_guard_token_id
282            },
283        }
284
285    }
286}
287
288}
289
290const READER: usize = 1;
291
292const WRITER: usize = 1 << (usize::BITS - 1);
293
294const UPGRADEABLE_READER: usize = 1 << (usize::BITS - 2);
295
296const BEING_UPGRADED: usize = 1 << (usize::BITS - 3);
297
298/// This bit is reserved as an overflow sentinel.
299/// We intentionally cap read guards before counter growth can affect
300/// `BEING_UPGRADED` / `UPGRADEABLE_READER` / `WRITER` bits.
301/// This is defense-in-depth with no extra runtime cost.
302///
303/// This follows the same strategy as Rust std's `Arc`,
304/// which uses `isize::MAX` as a sentinel to prevent its reference count
305/// from overflowing into values that could compromise safety.
306///
307/// On 64-bit platforms (the only targets Asterinas supports),
308/// a counter overflow is not a practical concern:
309/// incrementing one-by-one from zero to `MAX_READER` (2^60)
310/// would take hundreds of years even at billions of increments per second.
311/// Nevertheless, this sentinel provides an extra layer of safety at no runtime cost.
312const MAX_READER: usize = 1 << (usize::BITS - 4);
313
314/// Used only in verification. Excluding the `MAX_READER` bit.
315const READER_MASK: usize = usize::MAX >> 4;
316
317/// Used only in verification. Including the `MAX_READER` bit.
318const MAX_READER_MASK: usize = usize::MAX >> 3;
319
320impl<T, G> RwLock<T, G> {
321    /// Returns the unique [`CellId`](https://verus-lang.github.io/verus/verusdoc/vstd/cell/struct.CellId.html) of the internal `PCell<T>`.
322    pub closed spec fn cell_id(self) -> cell::CellId {
323        self.val.id()
324    }
325
326    pub closed spec fn core_token_id(self) -> Loc {
327        self.v_id@.core_token_id
328    }
329
330    pub closed spec fn frac_id(self) -> Loc {
331        self.v_id@.frac_id
332    }
333
334    pub closed spec fn upread_retract_token_id(self) -> Loc {
335        self.v_id@.upread_retract_token_id
336    }
337
338    pub closed spec fn read_guard_token_id(self) -> Loc {
339        self.v_id@.read_guard_token_id
340    }
341
342    /// Encapsulates the invariant described in the *Invariant* section of [`RwLock`].
343    #[verifier::type_invariant]
344    pub closed spec fn type_inv(self) -> bool {
345        self.wf()
346    }
347}
348
349closed spec fn wf_upgradeable_guard_token<T>(
350    core_token_id: Loc,
351    frac_id: Loc,
352    cell_id: CellId,
353    token: OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>,
354) -> bool {
355    let half_cell_perm = token.resource();
356    &&& token.id() == core_token_id
357    &&& half_cell_perm.id() == frac_id
358    &&& half_cell_perm.resource().id() == cell_id
359    &&& token.has_resource()
360    &&& half_cell_perm.frac() == 1
361    &&& token.wf()
362}
363
364impl<T, G> RwLock<T, G> {
365    /// Creates a new spin-based read-write lock with an initial value.
366    pub const fn new(val: T) -> Self {
367        let (val, Tracked(perm)) = PCell::new(val);
368
369        // Proof code
370        proof {
371            lemma_consts_properties();
372        }
373        let tracked mut frac_perm = Frac::<PointsTo<T>>::new(perm);
374        let tracked read_half_cell_perm = frac_perm.split(1int);
375        let ghost frac_id = frac_perm.id();
376        let tracked mut core_token = SumResource::alloc_left(frac_perm);
377        let tracked read_retract_token = TokenResource::<V_MAX_READ_RETRACT_FRACS>::alloc(());
378        let tracked upread_retract_token = UniqueToken::alloc(());
379        let tracked upreader_guard_token = core_token.split_one_left_owner();
380        let tracked left_token = core_token.split_one_left_knowledge();
381        let tracked read_guard_token = FracResource::<ReadPerm<T>, MAX_READER_U64>::alloc(
382            (read_half_cell_perm, left_token),
383        );
384        let ghost v_id = RwId {
385            frac_id,
386            core_token_id: core_token.id(),
387            upread_retract_token_id: upread_retract_token.id(),
388            read_retract_token_id: read_retract_token.id(),
389            read_guard_token_id: read_guard_token.id(),
390        };
391        let tracked perms = RwPerms {
392            core_token,
393            read_retract_token,
394            upread_retract_token: Some(upread_retract_token),
395            upreader_guard_token: Some(upreader_guard_token),
396            read_guard_token: Sum::Left(read_guard_token),
397        };
398
399        Self {
400            guard: PhantomData,
401            //lock: AtomicUsize::new(0),
402            lock: AtomicUsize::new(Ghost((val, PhantomData, Ghost(v_id))), 0, Tracked(perms)),
403            //val: UnsafeCell::new(val),
404            val: val,
405            v_id: Ghost(v_id),
406        }
407    }
408}
409
410#[verus_verify]
411impl<T  /*: ?Sized*/ , G: SpinGuardian> RwLock<T, G> {
412    /// Acquires a read lock and spin-wait until it can be acquired.
413    ///
414    /// The calling thread will spin-wait until there are no writers or
415    /// upgrading upreaders present. There is no guarantee for the order
416    /// in which other readers or writers waiting simultaneously will
417    /// obtain the lock.
418    #[verifier::exec_allows_no_decreases_clause]
419    pub fn read(&self) -> RwLockReadGuard<T, G> {
420        loop {
421            if let Some(readguard) = self.try_read() {
422                return readguard;
423            } else {
424                core::hint::spin_loop();
425            }
426        }
427    }
428
429    /// Acquires a write lock and spin-wait until it can be acquired.
430    ///
431    /// The calling thread will spin-wait until there are no other writers,
432    /// upreaders or readers present. There is no guarantee for the order
433    /// in which other readers or writers waiting simultaneously will
434    /// obtain the lock.
435    #[verifier::exec_allows_no_decreases_clause]
436    pub fn write(&self) -> RwLockWriteGuard<T, G> {
437        loop {
438            if let Some(writeguard) = self.try_write() {
439                return writeguard;
440            } else {
441                core::hint::spin_loop();
442            }
443        }
444    }
445
446    /// Acquires an upreader and spin-wait until it can be acquired.
447    ///
448    /// The calling thread will spin-wait until there are no other writers,
449    /// or upreaders. There is no guarantee for the order in which other
450    /// readers or writers waiting simultaneously will obtain the lock.
451    ///
452    /// Upreader will not block new readers until it tries to upgrade. Upreader
453    /// and reader do not differ before invoking the upgrade method. However,
454    /// only one upreader can exist at any time to avoid deadlock in the
455    /// upgrade method.
456    #[verifier::exec_allows_no_decreases_clause]
457    pub fn upread(&self) -> RwLockUpgradeableGuard<T, G> {
458        loop {
459            if let Some(guard) = self.try_upread() {
460                return guard;
461            } else {
462                core::hint::spin_loop();
463            }
464        }
465    }
466
467    /// Attempts to acquire a read lock.
468    ///
469    /// This function will never spin-wait and will return immediately.
470    #[verus_spec]
471    pub fn try_read(&self) -> Option<RwLockReadGuard<T, G>> {
472        proof_decl!{
473            let tracked mut read_token: Option<Frac<ReadPerm<T>,MAX_READER_U64>> = None;
474            let tracked mut retract_read_token: Option<Token<V_MAX_READ_RETRACT_FRACS>> = None;
475        }
476        proof!{
477            use_type_invariant(self);
478            lemma_consts_properties();
479        }
480        let guard = G::read_guard();
481
482        // let lock = self.lock.fetch_add(READER, Acquire);
483        let lock =
484            atomic_with_ghost!(
485            self.lock => fetch_add(READER);
486            update prev -> next;
487            ghost g => {
488                let prev_usize = prev as usize;
489                let next_usize = next as usize;
490                assume (no_max_reader_overflow(prev_usize));
491                lemma_consts_properties_value(prev_usize);
492                lemma_consts_properties_prev_next(prev_usize, next_usize);
493                if prev_usize & (WRITER | MAX_READER | BEING_UPGRADED) == 0 {
494                    let tracked mut tmp = g.read_guard_token.tracked_take_left();
495                    read_token = Some(tmp.split_one());
496                    g.read_guard_token = Sum::Left(tmp);
497                } else {
498                    retract_read_token = Some(g.read_retract_token.split_one());
499                }
500            }
501        );
502        if lock & (WRITER | MAX_READER | BEING_UPGRADED) == 0 {
503            Some(
504                RwLockReadGuard {
505                    inner: self,
506                    guard,
507                    v_token: Tracked(read_token.tracked_unwrap())
508                },
509            )
510        } else {
511            // self.lock.fetch_sub(READER, Release);
512            atomic_with_ghost!(
513                self.lock => fetch_sub(READER);
514                update prev -> next;
515                ghost g => {
516                    let prev_usize = prev as usize;
517                    let next_usize = next as usize;
518                    lemma_consts_properties_value(next_usize);
519                    lemma_consts_properties_prev_next(prev_usize, next_usize);
520                    g.read_retract_token.combine(retract_read_token.tracked_unwrap());
521                }
522            );
523            None
524        }
525    }
526
527    /// Attempts to acquire a write lock.
528    ///
529    /// This function will never spin-wait and will return immediately.
530    #[verus_spec]
531    pub fn try_write(&self) -> Option<RwLockWriteGuard<T, G>> {
532        proof_decl!{
533            let tracked mut guard_perm: Option<PointsTo<T>> = None;
534            let tracked mut guard_token: Option<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>> = None;
535        }
536        proof!{
537            use_type_invariant(self);
538            lemma_consts_properties();
539        }
540
541        let guard = G::guard();
542        // if self
543        //     .lock
544        //     .compare_exchange(0, WRITER, Acquire, Relaxed)
545        //     .is_ok()
546        if atomic_with_ghost!(
547            self.lock => compare_exchange(0, WRITER);
548            update prev -> next;
549            returning res;
550            ghost g => {
551                let prev_usize = prev as usize;
552                let next_usize = next as usize;
553                if res is Ok {
554                    // Retract the fractional permission for read access.
555                    let tracked read_guard_token = g.read_guard_token.tracked_take_left();
556                    let tracked (read_resource, read_empty) = read_guard_token.take_resource();
557                    g.read_guard_token = Sum::Right(read_empty);
558                    let tracked (read_half_cell_perm, left_token) = read_resource;
559                    g.core_token.join_one_left_knowledge(left_token);
560                    // Retract the fractional permission for upgradeable reader.
561                    let tracked upreader_guard_token = g.upreader_guard_token.tracked_take();
562                    g.core_token.join_one_left_owner(upreader_guard_token);
563                    // Combine the two halves of the permission for read access to get the full permission and give it out.
564                    let tracked mut pointsto = g.core_token.take_resource_left();
565                    pointsto.combine(read_half_cell_perm);
566                    let tracked (pointsto, empty) = pointsto.take_resource();
567                    guard_perm = Some(pointsto);
568                    g.core_token.change_to_right(empty);
569                    guard_token = Some(g.core_token.split_one_right_knowledge());
570                }
571            }
572        ).is_ok() {
573            Some(
574                RwLockWriteGuard {
575                    inner: self,
576                    guard,
577                    v_perm: Tracked(guard_perm.tracked_unwrap()),
578                    v_token: Tracked(guard_token.tracked_unwrap()),
579                },
580            )
581        } else {
582            None
583        }
584    }
585
586    /// Attempts to acquire an upread lock.
587    ///
588    /// This function will never spin-wait and will return immediately.
589    pub fn try_upread(&self) -> Option<RwLockUpgradeableGuard<T, G>> {
590        proof_decl!{
591            let tracked mut upgrade_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
592            let tracked mut retract_upgrade_token: Option<UniqueToken> = None;
593        }
594        proof!{
595            use_type_invariant(self);
596            lemma_consts_properties();
597        }
598        let guard = G::guard();
599        // let lock = self.lock.fetch_or(UPGRADEABLE_READER, Acquire) & (WRITER | UPGRADEABLE_READER);
600        let lock =
601            atomic_with_ghost!(
602            self.lock => fetch_or(UPGRADEABLE_READER);
603            update prev -> next;
604            ghost g => {
605                lemma_consts_properties_value(prev);
606                lemma_consts_properties_prev_next(prev, next);
607                if prev & (WRITER | UPGRADEABLE_READER) == 0 {
608                    upgrade_guard_token = Some(g.upreader_guard_token.tracked_take());
609                }
610                else if prev & (WRITER | UPGRADEABLE_READER) == WRITER {
611                    retract_upgrade_token = Some(g.upread_retract_token.tracked_take());
612                }
613            }
614        )
615            & (WRITER | UPGRADEABLE_READER);
616        if lock == 0 {
617            return Some(
618                RwLockUpgradeableGuard {
619                    inner: self,
620                    guard,
621                    v_token: Tracked(upgrade_guard_token.tracked_unwrap())
622                },
623            );
624        } else if lock == WRITER {
625            // self.lock.fetch_sub(UPGRADEABLE_READER, Release);
626            atomic_with_ghost!(
627                self.lock => fetch_sub(UPGRADEABLE_READER);
628                update prev -> next;
629                ghost g => {
630                    let prev_usize = prev as usize;
631                    let next_usize = next as usize;
632                    lemma_consts_properties_value(prev_usize);
633                    lemma_consts_properties_prev_next(prev_usize, next_usize);
634                    if g.upread_retract_token is Some {
635                        let tracked mut token = retract_upgrade_token.tracked_unwrap();
636                        token.validate_with_other(g.upread_retract_token.tracked_borrow());
637                    }
638                    else{
639                        g.upread_retract_token= retract_upgrade_token;
640                    }
641                }
642            );
643        }
644        None
645    }
646}
647
648/*
649impl<T, G: SpinGuardian> RwLock<T, G> {
650    /// Returns a mutable reference to the underlying data.
651    ///
652    /// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
653    /// already statically guaranteed that access to the data is exclusive.
654    pub fn get_mut(&mut self) -> &mut T {
655        self.val.get_mut()
656    }
657
658    /// Returns a raw pointer to the underlying data.
659    ///
660    /// This method is safe, but it's up to the caller to ensure that access to the data behind it
661    /// is still safe.
662    pub(super) fn as_ptr(&self) -> *mut T {
663        self.val.get()
664    }
665}*/
666
667/* the trait `core::fmt::Debug` is not implemented for `vstd::cell::pcell::PCell<T>`
668impl<T: ?Sized + fmt::Debug, G> fmt::Debug for RwLock<T, G> {
669    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
670        fmt::Debug::fmt(&self.val, f)
671    }
672}*/
673
674/// Because there can be more than one readers to get the T's immutable ref,
675/// so T must be Sync to guarantee the sharing safety.
676#[verifier::external]
677unsafe impl<T: Send, G> Send for RwLock<T, G> {
678
679}
680
681#[verifier::external]
682unsafe impl<T: Send + Sync, G> Sync for RwLock<T, G> {
683
684}
685
686impl<T  /*: ?Sized*/ , G: SpinGuardian> !Send for RwLockWriteGuard<'_, T, G> {
687
688}
689
690#[verifier::external]
691unsafe impl<T: Sync, G: SpinGuardian> Sync for RwLockWriteGuard<'_, T, G> {
692
693}
694
695impl<T  /*: ?Sized*/ , G: SpinGuardian> !Send for RwLockReadGuard<'_, T, G> {
696
697}
698
699#[verifier::external]
700unsafe impl<T: Sync, G: SpinGuardian> Sync for RwLockReadGuard<'_, T, G> {
701
702}
703
704impl<T  /*: ?Sized*/ , G: SpinGuardian> !Send for RwLockUpgradeableGuard<'_, T, G> {
705
706}
707
708#[verifier::external]
709unsafe impl<T: Sync, G: SpinGuardian> Sync for RwLockUpgradeableGuard<'_, T, G> {
710
711}
712
713/// A guard that provides immutable data access.
714#[verifier::reject_recursive_types(T)]
715#[verifier::reject_recursive_types(G)]
716#[clippy::has_significant_drop]
717#[must_use]
718#[verus_verify]
719pub struct RwLockReadGuard<'a, T /*: ?Sized*/, G: SpinGuardian> {
720    guard: G::ReadGuard,
721    inner: &'a RwLock<T, G>,
722    v_token: Tracked<Frac<ReadPerm<T>, MAX_READER_U64>>,
723}
724
725/*
726impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockReadGuard<'_, T, G> {
727    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
728        self.guard.as_atomic_mode_guard()
729    }
730}
731*/
732
733impl<'a, T, G: SpinGuardian> RwLockReadGuard<'a, T, G> {
734    #[verifier::type_invariant]
735    pub closed spec fn type_inv(self) -> bool {
736        let resource = self.v_token@.resource();
737        let read_half_cell_perm = resource.0;
738        let mode_knowledge = resource.1;
739        &&& self.inner.core_token_id() == mode_knowledge.id()
740        &&& self.inner.frac_id() == read_half_cell_perm.id()
741        &&& self.inner.cell_id() == read_half_cell_perm.resource().id()
742        &&& self.v_token@.id() == self.inner.read_guard_token_id()
743        &&& read_half_cell_perm.frac() == 1
744        &&& self.v_token@.frac() == 1
745    }
746
747    /// The value stored in the lock.
748    pub closed spec fn value(self) -> T {
749        *self.v_token@.resource().0.resource().value()
750    }
751
752    /// The value stored in the lock. It is an alias of `Self::value`.
753    pub open spec fn view(self) -> T {
754        self.value()
755    }
756
757    /// Borrows the inner value in tracked mode.
758    #[verifier::external_body]
759    pub proof fn tracked_borrow(tracked &self) -> (tracked r: &'a T)
760        returns
761            self.view(),
762    {
763        unimplemented!()
764    }
765}
766
767impl<T  /*: ?Sized*/ , G: SpinGuardian> Deref for RwLockReadGuard<'_, T, G> {
768    type Target = T;
769
770    #[verus_spec(returns self.view())]
771    fn deref(&self) -> &T {
772        proof!{
773            use_type_invariant(self);
774        }
775        // unsafe { &*self.inner.val.get() }
776        // The internal implementation of `PCell<T>::borrow` is exactly unsafe { &(*(*self.ucell).get()) },
777        // and here we verify that we have the permission to call `borrow`.
778        self.inner.val.borrow(Tracked(self.v_token.borrow().borrow().0.borrow()))
779    }
780}
781
782/* impl<T: ?Sized, R: Deref<Target = RwLock<T, G>> + Clone, G: SpinGuardian> Drop
783    for RwLockReadGuard_<T, R, G>
784{
785    fn drop(&mut self) {
786        self.inner.lock.fetch_sub(READER, Release);
787    }
788} */
789
790#[verus_verify]
791impl<T  /*: ?Sized*/ , G: SpinGuardian> RwLockReadGuard<'_, T, G> {
792    /// VERUS LIMITATION: We implement `drop` and call it manually because Verus's support for `Drop` is incomplete for now.
793    #[verus_spec]
794    fn drop(self) {
795        proof! {
796            use_type_invariant(&self);
797            use_type_invariant(self.inner);
798            lemma_consts_properties();
799        }
800        proof_decl! {
801            let tracked token = self.v_token.get();
802        }
803        // self.inner.lock.fetch_sub(READER, Release);
804        atomic_with_ghost!(
805            self.inner.lock => fetch_sub(READER);
806            update prev -> next;
807            ghost g => {
808                let prev_usize = prev as usize;
809                let next_usize = next as usize;
810                assume (no_max_reader_overflow(prev_usize));
811                lemma_consts_properties_value(next_usize);
812                lemma_consts_properties_prev_next(prev_usize, next_usize);
813                g.core_token.validate_with_one_left_knowledge(&token.borrow().1);
814                let tracked mut tmp = g.read_guard_token.tracked_take_left();
815                tmp.combine(token);
816                g.read_guard_token = Sum::Left(tmp);
817            }
818        );
819    }
820}
821
822/*
823impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockReadGuard<'_, T, G> {
824    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
825        fmt::Debug::fmt(&**self, f)
826    }
827}*/
828
829/// A guard that provides mutable data access.
830#[verifier::reject_recursive_types(T)]
831#[verifier::reject_recursive_types(G)]
832pub struct RwLockWriteGuard<'a, T  /*: ?Sized*/ , G: SpinGuardian> {
833    guard: G::Guard,
834    inner: &'a RwLock<T, G>,
835    /// Ghost permission for verification
836    v_perm: Tracked<PointsTo<T>>,
837    v_token: Tracked<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>>,
838}
839
840impl<'a, T, G: SpinGuardian> RwLockWriteGuard<'a, T, G> {
841    #[verifier::type_invariant]
842    spec fn type_inv(self) -> bool {
843        &&& self.inner.cell_id() == self.v_perm@.id()
844        &&& self.inner.core_token_id() == self.v_token@.id()
845    }
846
847    /// The value stored in the lock.
848    pub closed spec fn value(self) -> T {
849        *self.v_perm@.value()
850    }
851
852    /// The value stored in the lock. It is an alias of `Self::value`.
853    pub open spec fn view(self) -> T {
854        self.value()
855    }
856
857    /// Borrows the inner value in tracked mode.
858    #[verifier::external_body]
859    pub proof fn tracked_borrow(tracked &self) -> (tracked r: &'a T)
860        returns
861            self.view(),
862    {
863        unimplemented!()
864    }
865}
866
867/*
868impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockWriteGuard<'_, T, G> {
869    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
870        self.guard.as_atomic_mode_guard()
871    }
872}*/
873
874impl<T  /*: ?Sized*/ , G: SpinGuardian> Deref for RwLockWriteGuard<'_, T, G> {
875    type Target = T;
876
877    #[verus_spec(returns self.view())]
878    fn deref(&self) -> &T {
879        proof!{
880            use_type_invariant(self);
881        }
882        // unsafe { &*self.inner.val.get() }
883        // The internal implementation of `PCell<T>::borrow` is exactly unsafe { &(*(*self.ucell).get()) },
884        // and here we verify that we have the permission to call `borrow`.
885        self.inner.val.borrow(Tracked(self.v_perm.borrow()))
886    }
887}
888
889#[verus_verify]
890impl<T  /*: ?Sized*/ , G: SpinGuardian> DerefMut for RwLockWriteGuard<'_, T, G> {
891    #[verus_spec(ret =>
892        ensures
893            final(self).view() == *final(ret),
894    )]
895    fn deref_mut(&mut self) -> &mut Self::Target 
896    {
897        proof!{
898            use_type_invariant(&*self);
899        }
900        //unsafe { &mut *self.inner.val.get() }
901        pcell_borrow_mut(&self.inner.val, &mut self.v_perm)
902    }
903}
904
905/*
906impl<T: ?Sized, G: SpinGuardian> Drop for RwLockWriteGuard<'_, T, G> {
907    fn drop(&mut self) {
908        self.inner.lock.fetch_and(!WRITER, Release);
909    }
910}*/
911
912impl<T  /*: ?Sized*/ , G: SpinGuardian> RwLockWriteGuard<'_, T, G> {
913    /// VERUS LIMITATION: We implement `drop` and call it manually because Verus's support for `Drop` is incomplete for now.
914    pub fn drop(self) {
915        proof!{
916            use_type_invariant(&self);
917            use_type_invariant(self.inner);
918            lemma_consts_properties();
919        }
920        proof_decl! {
921            let tracked mut perm = self.v_perm.get();
922            let tracked token = self.v_token.get();
923        }
924        //self.inner.lock.fetch_and(!WRITER, Release);
925        atomic_with_ghost!{
926            self.inner.lock => fetch_and(!WRITER);
927            update prev -> next;
928            ghost g => {
929                let prev_usize = prev as usize;
930                let next_usize = next as usize;
931                lemma_consts_properties_prev_next(prev_usize, next_usize);
932                lemma_consts_properties_value(next_usize);
933                g.core_token.validate_with_one_right_knowledge(&token);
934                g.core_token.join_one_right_knowledge(token);
935                let tracked empty = g.core_token.take_resource_right();
936                let tracked mut full = empty.put_resource(perm);
937                let tracked read_half_cell_perm = full.split(1int);
938                g.core_token.change_to_left(full);
939                let tracked upreader_guard_token = g.core_token.split_one_left_owner();
940                g.upreader_guard_token = Some(upreader_guard_token);
941                let tracked left_token = g.core_token.split_one_left_knowledge();
942                let tracked read_guard_empty = g.read_guard_token.tracked_take_right();
943                let tracked read_guard_token = FracResource::alloc_from_empty(
944                    read_guard_empty,
945                    (read_half_cell_perm, left_token),
946                );
947                g.read_guard_token = Sum::Left(read_guard_token);
948            }
949        };
950    }
951}
952
953/*
954impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockWriteGuard<'_, T, G> {
955    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
956        fmt::Debug::fmt(&**self, f)
957    }
958}*/
959
960/// A guard that provides immutable data access but can be atomically
961/// upgraded to `RwLockWriteGuard`.
962#[verifier::reject_recursive_types(T)]
963#[verifier::reject_recursive_types(G)]
964pub struct RwLockUpgradeableGuard<'a, T  /*: ?Sized*/ , G: SpinGuardian> {
965    guard: G::Guard,
966    inner: &'a RwLock<T, G>,
967    v_token: Tracked<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>>,
968}
969
970/*
971impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockUpgradeableGuard<'_, T, G> {
972    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
973        self.guard.as_atomic_mode_guard()
974    }
975}*/
976
977impl<'a, T, G: SpinGuardian> RwLockUpgradeableGuard<'a, T, G> {
978    #[verifier::type_invariant]
979    pub closed spec fn type_inv(self) -> bool {
980        wf_upgradeable_guard_token(
981            self.inner.core_token_id(),
982            self.inner.frac_id(),
983            self.inner.cell_id(),
984            self.v_token@,
985        )
986    }
987
988    /// The value stored in the lock.
989    pub closed spec fn value(self) -> T {
990        *self.v_token@.resource().resource().value()
991    }
992
993    /// The value stored in the lock. It is an alias of `Self::value`.
994    pub open spec fn view(self) -> T {
995        self.value()
996    }
997}
998
999impl<'a, T  /*: ?Sized*/ , G: SpinGuardian> RwLockUpgradeableGuard<'a, T, G> {
1000    /// Upgrades this upread guard to a write guard atomically.
1001    ///
1002    /// After calling this method, subsequent readers will be blocked
1003    /// while previous readers remain unaffected. The calling thread
1004    /// will spin-wait until previous readers finish.
1005    #[verifier::exec_allows_no_decreases_clause]
1006    pub fn upgrade(  /* mut */ self) -> RwLockWriteGuard<'a, T, G> {
1007        let mut this = self;
1008        proof! {
1009            use_type_invariant(&this);
1010            use_type_invariant(&this.inner);
1011            lemma_consts_properties();
1012        }
1013        // self.inner.lock.fetch_or(BEING_UPGRADED, Acquire);
1014        atomic_with_ghost!(
1015            this.inner.lock => fetch_or(BEING_UPGRADED);
1016            update prev -> next;
1017            ghost g => {
1018                lemma_consts_properties_prev_next(prev, next);
1019            }
1020        );
1021        loop {
1022            // self = match self.try_upgrade() {
1023            this =
1024            match this.try_upgrade() {
1025                Ok(guard) => return guard,
1026                Err(e) => e,
1027            };
1028        }
1029    }
1030
1031    /// Attempts to upgrade this upread guard to a write guard atomically.
1032    ///
1033    /// This function will never spin-wait and will return immediately.
1034    ///
1035    /// This function is not exposed publicly because the `BEING_UPGRADED` bit
1036    /// is set only in [`Self::upgrade`].
1037    fn try_upgrade(  /* mut */ self) -> Result<RwLockWriteGuard<'a, T, G>, Self> {
1038        proof! {
1039            use_type_invariant(&self);
1040            use_type_invariant(self.inner);
1041            lemma_consts_properties();
1042        }
1043        let mut this = self;
1044        proof_decl! {
1045            let tracked mut upread_guard_token = this.v_token.get();
1046            let tracked mut write_perm: Option<PointsTo<T>> = None;
1047            let tracked mut err_upread_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
1048            let tracked mut retract_upgrade_token: Option<UniqueToken> = None;
1049            let tracked mut write_guard_token = None;
1050        }
1051
1052        // let res = self.inner.lock.compare_exchange(
1053        //     UPGRADEABLE_READER | BEING_UPGRADED,
1054        //     WRITER | UPGRADEABLE_READER,
1055        //     AcqRel,
1056        //     Relaxed,
1057        // );
1058        let res =
1059            atomic_with_ghost!(
1060            this.inner.lock => compare_exchange(UPGRADEABLE_READER | BEING_UPGRADED, WRITER | UPGRADEABLE_READER);
1061            update prev -> next;
1062            returning res;
1063            ghost g => {
1064                lemma_consts_properties_prev_next(prev, next);
1065                if res is Ok {
1066                    g.core_token.validate_with_one_left_owner(&upread_guard_token);
1067                    if g.upreader_guard_token is Some {
1068                        upread_guard_token.validate_with_one_left_owner(g.upreader_guard_token.tracked_borrow());
1069                    }
1070                    g.core_token.join_one_left_owner(upread_guard_token);
1071                    let tracked read_guard_token = g.read_guard_token.tracked_take_left();
1072                    let tracked (read_resource, read_empty) = read_guard_token.take_resource();
1073                    g.read_guard_token = Sum::Right(read_empty);
1074                    let tracked (read_half_cell_perm, left_token) = read_resource;
1075                    g.core_token.join_one_left_knowledge(left_token);
1076                    let tracked mut pointsto = g.core_token.take_resource_left();
1077                    pointsto.combine(read_half_cell_perm);
1078                    let tracked (pointsto, empty) = pointsto.take_resource();
1079                    write_perm = Some(pointsto);
1080                    g.core_token.change_to_right(empty);
1081                    write_guard_token = Some(g.core_token.split_one_right_knowledge());
1082                    retract_upgrade_token = Some(g.upread_retract_token.tracked_take());
1083                } else {
1084                    err_upread_guard_token = Some(upread_guard_token);
1085                }
1086            }
1087        );
1088        if res.is_ok() {
1089            let inner = this.inner;
1090            let guard = this.guard.transfer_to();
1091            // drop(self);
1092            atomic_with_ghost!(
1093                inner.lock => fetch_sub(UPGRADEABLE_READER);
1094                update prev -> next;
1095                ghost g => {
1096                    let prev_usize = prev as usize;
1097                    let next_usize = next as usize;
1098                    lemma_consts_properties_value(prev_usize);
1099                    lemma_consts_properties_prev_next(prev_usize, next_usize);
1100                    let tracked mut token = retract_upgrade_token.tracked_unwrap();
1101                    if g.upread_retract_token is Some {
1102                        token.validate_with_other(g.upread_retract_token.tracked_borrow());
1103                    }
1104                    g.upread_retract_token = Some(token);
1105                }
1106            );
1107            Ok(
1108                RwLockWriteGuard {
1109                    inner,
1110                    guard,
1111                    v_perm: Tracked(write_perm.tracked_unwrap()),
1112                    v_token: Tracked(write_guard_token.tracked_unwrap()),
1113                },
1114            )
1115        } else {
1116            Err(       
1117                RwLockUpgradeableGuard {
1118                    inner: this.inner,
1119                    guard: this.guard,
1120                    v_token: Tracked(err_upread_guard_token.tracked_unwrap()),
1121                },
1122            )
1123        }
1124    }
1125}
1126
1127impl<T  /*: ?Sized*/ , G: SpinGuardian> Deref for RwLockUpgradeableGuard<'_, T, G> {
1128    type Target = T;
1129
1130    #[verus_spec(returns self.view())]
1131    fn deref(&self) -> &T {
1132        proof!{
1133            use_type_invariant(self);
1134        }
1135        // unsafe { &*self.inner.val.get() }
1136        // The internal implementation of `PCell<T>::borrow` is exactly unsafe { &(*(*self.ucell).get()) },
1137        // and here we verify that we have the permission to call `borrow`.
1138        self.inner.val.borrow(Tracked(self.v_token.borrow().tracked_borrow().borrow()))
1139    }
1140}
1141
1142/*
1143impl<T: ?Sized, G: SpinGuardian> Drop for RwLockUpgradeableGuard<'_, T, G> {
1144    fn drop(&mut self) {
1145        self.inner.lock.fetch_sub(UPGRADEABLE_READER, Release);
1146    }
1147}*/
1148
1149impl<T  /*: ?Sized*/ , G: SpinGuardian> RwLockUpgradeableGuard<'_, T, G> {
1150    /// VERUS LIMITATION: We implement `drop` and call it manually because Verus's support for `Drop` is incomplete for now.
1151    pub fn drop(self) {
1152        proof! {
1153            use_type_invariant(&self);
1154            use_type_invariant(self.inner);
1155            lemma_consts_properties();
1156        }
1157        proof_decl!{
1158            let tracked guard_token = self.v_token.get();
1159        }
1160        //self.inner.lock.fetch_sub(UPGRADEABLE_READER, Release);
1161        atomic_with_ghost!(
1162            self.inner.lock => fetch_sub(UPGRADEABLE_READER);
1163            update prev -> next;
1164            ghost g => {
1165                let prev_usize = prev as usize;
1166                let next_usize = next as usize;
1167                lemma_consts_properties_value(prev_usize);
1168                lemma_consts_properties_prev_next(prev_usize, next_usize);
1169                g.core_token.validate_with_one_left_owner(&guard_token);
1170                if g.upreader_guard_token is Some {
1171                    guard_token.validate_with_one_left_owner(g.upreader_guard_token.tracked_borrow());
1172                    assert(false);
1173                } else {
1174                    g.upreader_guard_token= Some(guard_token);
1175                }
1176            }
1177        );
1178    }
1179}
1180
1181/*
1182impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockUpgradeableGuard<'_, T, G> {
1183    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1184        fmt::Debug::fmt(&**self, f)
1185    }
1186}
1187*/
1188
1189proof fn lemma_consts_properties()
1190    ensures
1191        0 & WRITER == 0,
1192        0 & UPGRADEABLE_READER == 0,
1193        0 & BEING_UPGRADED == 0,
1194        0 & READER_MASK == 0,
1195        0 & MAX_READER_MASK == 0,
1196        0 & MAX_READER == 0,
1197        0 & READER == 0,
1198        WRITER == 0x8000_0000_0000_0000,
1199        UPGRADEABLE_READER == 0x4000_0000_0000_0000,
1200        BEING_UPGRADED == 0x2000_0000_0000_0000,
1201        READER_MASK == 0x0FFF_FFFF_FFFF_FFFF,
1202        MAX_READER_MASK == 0x1FFF_FFFF_FFFF_FFFF,
1203        MAX_READER == 0x1000_0000_0000_0000,
1204        WRITER & WRITER == WRITER,
1205        WRITER & !WRITER == 0,
1206        WRITER & BEING_UPGRADED == 0,
1207        WRITER & READER_MASK == 0,
1208        WRITER & MAX_READER_MASK == 0,
1209        WRITER & MAX_READER == 0,
1210        WRITER & UPGRADEABLE_READER == 0,
1211        BEING_UPGRADED & WRITER == 0,
1212        BEING_UPGRADED & UPGRADEABLE_READER == 0,
1213        UPGRADEABLE_READER & BEING_UPGRADED == 0,
1214        UPGRADEABLE_READER & READER_MASK == 0,
1215        UPGRADEABLE_READER & MAX_READER_MASK == 0,
1216        UPGRADEABLE_READER & MAX_READER == 0,
1217        BEING_UPGRADED & READER_MASK == 0,
1218        BEING_UPGRADED & MAX_READER_MASK == 0,
1219        BEING_UPGRADED & MAX_READER == 0,
1220        (UPGRADEABLE_READER | BEING_UPGRADED) & WRITER == 0,
1221        (UPGRADEABLE_READER | BEING_UPGRADED) & UPGRADEABLE_READER == UPGRADEABLE_READER,
1222        (UPGRADEABLE_READER | BEING_UPGRADED) & BEING_UPGRADED == BEING_UPGRADED,
1223        (UPGRADEABLE_READER | BEING_UPGRADED) & READER_MASK == 0,
1224        (UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER_MASK == 0,
1225        (UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER == 0,
1226        (WRITER | UPGRADEABLE_READER) & WRITER == WRITER,
1227        (WRITER | UPGRADEABLE_READER) & UPGRADEABLE_READER == UPGRADEABLE_READER,
1228        (WRITER | UPGRADEABLE_READER) & BEING_UPGRADED == 0,
1229        (WRITER | UPGRADEABLE_READER) & READER_MASK == 0,
1230        (WRITER | UPGRADEABLE_READER) & MAX_READER_MASK == 0,
1231        (WRITER | UPGRADEABLE_READER) & MAX_READER == 0,
1232{
1233    assert(0 & WRITER == 0) by (compute_only);
1234    assert(0 & UPGRADEABLE_READER == 0) by (compute_only);
1235    assert(0 & BEING_UPGRADED == 0) by (compute_only);
1236    assert(0 & READER_MASK == 0) by (compute_only);
1237    assert(0 & MAX_READER == 0) by (compute_only);
1238    assert(0 & MAX_READER_MASK == 0) by (compute_only);
1239    assert(0 & READER == 0) by (compute_only);
1240    assert(WRITER == 0x8000_0000_0000_0000) by (compute_only);
1241    assert(UPGRADEABLE_READER == 0x4000_0000_0000_0000) by (compute_only);
1242    assert(BEING_UPGRADED == 0x2000_0000_0000_0000) by (compute_only);
1243    assert(READER_MASK == 0x0FFF_FFFF_FFFF_FFFF) by (compute_only);
1244    assert(MAX_READER == 0x1000_0000_0000_0000) by (compute_only);
1245    assert(MAX_READER_MASK == 0x1FFF_FFFF_FFFF_FFFF) by (compute_only);
1246    assert(WRITER & WRITER == WRITER) by (compute_only);
1247    assert(WRITER & !WRITER == 0) by (compute_only);
1248    assert(WRITER & BEING_UPGRADED == 0) by (compute_only);
1249    assert(WRITER & READER_MASK == 0) by (compute_only);
1250    assert(WRITER & MAX_READER_MASK == 0) by (compute_only);
1251    assert(WRITER & MAX_READER == 0) by (compute_only);
1252    assert(WRITER & UPGRADEABLE_READER == 0) by (compute_only);
1253    assert(BEING_UPGRADED & WRITER == 0) by (compute_only);
1254    assert(BEING_UPGRADED & UPGRADEABLE_READER == 0) by (compute_only);
1255    assert(UPGRADEABLE_READER & BEING_UPGRADED == 0) by (compute_only);
1256    assert(UPGRADEABLE_READER & READER_MASK == 0) by (compute_only);
1257    assert(UPGRADEABLE_READER & MAX_READER_MASK == 0) by (compute_only);
1258    assert(UPGRADEABLE_READER & MAX_READER == 0) by (compute_only);
1259    assert(BEING_UPGRADED & READER_MASK == 0) by (compute_only);
1260    assert(BEING_UPGRADED & MAX_READER_MASK == 0) by (compute_only);
1261    assert(BEING_UPGRADED & MAX_READER == 0) by (compute_only);
1262    assert((UPGRADEABLE_READER | BEING_UPGRADED) & WRITER == 0) by (compute_only);
1263    assert((UPGRADEABLE_READER | BEING_UPGRADED) & UPGRADEABLE_READER == UPGRADEABLE_READER)
1264        by (compute_only);
1265    assert((UPGRADEABLE_READER | BEING_UPGRADED) & BEING_UPGRADED == BEING_UPGRADED)
1266        by (compute_only);
1267    assert((UPGRADEABLE_READER | BEING_UPGRADED) & READER_MASK == 0) by (compute_only);
1268    assert((UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER_MASK == 0) by (compute_only);
1269    assert((UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER == 0) by (compute_only);
1270    assert((WRITER | UPGRADEABLE_READER) & WRITER == WRITER) by (compute_only);
1271    assert((WRITER | UPGRADEABLE_READER) & UPGRADEABLE_READER == UPGRADEABLE_READER)
1272        by (compute_only);
1273    assert((WRITER | UPGRADEABLE_READER) & BEING_UPGRADED == 0) by (compute_only);
1274    assert((WRITER | UPGRADEABLE_READER) & READER_MASK == 0) by (compute_only);
1275    assert((WRITER | UPGRADEABLE_READER) & MAX_READER_MASK == 0) by (compute_only);
1276    assert((WRITER | UPGRADEABLE_READER) & MAX_READER == 0) by (compute_only);
1277}
1278
1279proof fn lemma_consts_properties_value(prev: usize)
1280    ensures
1281        no_max_reader_overflow(prev) ==> prev + READER <= usize::MAX,
1282        prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0 ==> {
1283            &&& prev & WRITER == 0
1284            &&& prev & BEING_UPGRADED == 0
1285            &&& prev & MAX_READER == 0
1286        },
1287        prev & (WRITER | UPGRADEABLE_READER) == 0 ==> {
1288            &&& prev & WRITER == 0
1289            &&& prev & UPGRADEABLE_READER == 0
1290        },
1291        prev & MAX_READER == 0 ==> prev & READER_MASK == prev & MAX_READER_MASK,
1292        prev & MAX_READER != 0 ==> prev & MAX_READER_MASK >= MAX_READER,
1293        prev & (WRITER | UPGRADEABLE_READER) == WRITER ==> {
1294            &&& prev & UPGRADEABLE_READER == 0
1295            &&& prev & WRITER == WRITER
1296        },
1297        prev & UPGRADEABLE_READER != 0 ==> prev >= UPGRADEABLE_READER,
1298        prev & UPGRADEABLE_READER == 0 ==> {
1299            ||| prev & (WRITER | UPGRADEABLE_READER) == 0
1300            ||| prev & (WRITER | UPGRADEABLE_READER) == WRITER
1301        },
1302{
1303    if no_max_reader_overflow(prev) {
1304        assert(prev + READER <= usize::MAX) by (bit_vector)
1305            requires
1306                prev & MAX_READER_MASK < MAX_READER_MASK,
1307        ;
1308    }
1309    if prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0 {
1310        assert(prev & WRITER == 0) by (bit_vector)
1311            requires
1312                prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0,
1313        ;
1314        assert(prev & BEING_UPGRADED == 0) by (bit_vector)
1315            requires
1316                prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0,
1317        ;
1318        assert(prev & MAX_READER == 0) by (bit_vector)
1319            requires
1320                prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0,
1321        ;
1322    }
1323    if prev & (WRITER | UPGRADEABLE_READER) == 0 {
1324        assert(prev & WRITER == 0) by (bit_vector)
1325            requires
1326                prev & (WRITER | UPGRADEABLE_READER) == 0,
1327        ;
1328        assert(prev & UPGRADEABLE_READER == 0) by (bit_vector)
1329            requires
1330                prev & (WRITER | UPGRADEABLE_READER) == 0,
1331        ;
1332    }
1333    if prev & MAX_READER == 0 {
1334        assert(prev & READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1335            requires
1336                prev & MAX_READER == 0,
1337        ;
1338    }
1339    if prev & MAX_READER != 0 {
1340        assert(prev & MAX_READER_MASK >= MAX_READER) by (bit_vector)
1341            requires
1342                prev & MAX_READER != 0,
1343        ;
1344    }
1345    if prev & (WRITER | UPGRADEABLE_READER) == WRITER {
1346        assert(prev & UPGRADEABLE_READER == 0 && prev & WRITER == WRITER) by (bit_vector)
1347            requires
1348                prev & (WRITER | UPGRADEABLE_READER) == WRITER,
1349        ;
1350    }
1351    if prev & UPGRADEABLE_READER != 0 {
1352        assert(prev >= UPGRADEABLE_READER) by (bit_vector)
1353            requires
1354                prev & UPGRADEABLE_READER != 0,
1355        ;
1356    }
1357    if prev & UPGRADEABLE_READER == 0 {
1358        assert(prev & (WRITER | UPGRADEABLE_READER) == 0 || prev & (WRITER | UPGRADEABLE_READER)
1359            == WRITER) by (bit_vector)
1360            requires
1361                prev & UPGRADEABLE_READER == 0,
1362        ;
1363    }
1364}
1365
1366proof fn lemma_consts_properties_prev_next(prev: usize, next: usize)
1367    ensures
1368        prev & READER_MASK < MAX_READER,
1369        next == prev | UPGRADEABLE_READER ==> {
1370            &&& next & UPGRADEABLE_READER != 0
1371            &&& next & WRITER == prev & WRITER
1372            &&& next & READER_MASK == prev & READER_MASK
1373            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1374            &&& next & MAX_READER == prev & MAX_READER
1375            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1376        },
1377        next == prev | BEING_UPGRADED ==> {
1378            &&& next & BEING_UPGRADED != 0
1379            &&& next & WRITER == prev & WRITER
1380            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1381            &&& next & READER_MASK == prev & READER_MASK
1382            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1383            &&& next & MAX_READER == prev & MAX_READER
1384        },
1385        next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0 ==> {
1386            &&& next & UPGRADEABLE_READER == 0
1387            &&& next & WRITER == prev & WRITER
1388            &&& next & READER_MASK == prev & READER_MASK
1389            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1390            &&& next & MAX_READER == prev & MAX_READER
1391            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1392        },
1393        next == prev - READER && prev & READER_MASK != 0 ==> {
1394            &&& next & READER_MASK == (prev & READER_MASK) - READER
1395            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER
1396            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1397            &&& next & WRITER == prev & WRITER
1398            &&& next & MAX_READER == prev & MAX_READER
1399            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1400        },
1401        next == prev - READER && prev & MAX_READER_MASK != 0 ==> {
1402            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER
1403            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1404            &&& next & WRITER == prev & WRITER
1405            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1406        },
1407        next == prev + READER && no_max_reader_overflow(prev) ==> {
1408            &&& next & READER_MASK == if (prev & READER_MASK) + READER == MAX_READER {
1409                0
1410            } else {
1411                (prev & READER_MASK) + READER
1412            }
1413            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) + READER
1414            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1415            &&& next & WRITER == prev & WRITER
1416            &&& next & MAX_READER == if (prev & READER_MASK) + READER == MAX_READER {
1417                MAX_READER
1418            } else {
1419                prev & MAX_READER
1420            }
1421            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1422        },
1423        next == prev & !WRITER ==> {
1424            &&& next & WRITER == 0
1425            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1426            &&& next & READER_MASK == prev & READER_MASK
1427            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1428            &&& next & MAX_READER == prev & MAX_READER
1429            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1430        },
1431{
1432    assert(prev & READER_MASK < MAX_READER) by (bit_vector);
1433    if next == prev | UPGRADEABLE_READER {
1434        assert(next & UPGRADEABLE_READER != 0) by (bit_vector)
1435            requires
1436                next == prev | UPGRADEABLE_READER,
1437        ;
1438        assert(next & WRITER == prev & WRITER) by (bit_vector)
1439            requires
1440                next == prev | UPGRADEABLE_READER,
1441        ;
1442        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1443            requires
1444                next == prev | UPGRADEABLE_READER,
1445        ;
1446        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1447            requires
1448                next == prev | UPGRADEABLE_READER,
1449        ;
1450        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1451            requires
1452                next == prev | UPGRADEABLE_READER,
1453        ;
1454        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1455            requires
1456                next == prev | UPGRADEABLE_READER,
1457        ;
1458    }
1459    if next == prev | BEING_UPGRADED {
1460        assert(next & BEING_UPGRADED != 0) by (bit_vector)
1461            requires
1462                next == prev | BEING_UPGRADED,
1463        ;
1464        assert(next & WRITER == prev & WRITER) by (bit_vector)
1465            requires
1466                next == prev | BEING_UPGRADED,
1467        ;
1468        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1469            requires
1470                next == prev | BEING_UPGRADED,
1471        ;
1472        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1473            requires
1474                next == prev | BEING_UPGRADED,
1475        ;
1476        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1477            requires
1478                next == prev | BEING_UPGRADED,
1479        ;
1480        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1481            requires
1482                next == prev | BEING_UPGRADED,
1483        ;
1484    }
1485    if next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0 {
1486        assert(next & UPGRADEABLE_READER == 0) by (bit_vector)
1487            requires
1488                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1489        ;
1490        assert(next & WRITER == prev & WRITER) by (bit_vector)
1491            requires
1492                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1493        ;
1494        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1495            requires
1496                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1497        ;
1498        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1499            requires
1500                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1501        ;
1502        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1503            requires
1504                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1505        ;
1506        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1507            requires
1508                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1509        ;
1510    }
1511    if next == prev - READER && prev & READER_MASK != 0 {
1512        assert(next & READER_MASK == (prev & READER_MASK) - READER) by (bit_vector)
1513            requires
1514                next == prev - READER && prev & READER_MASK != 0,
1515        ;
1516        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER) by (bit_vector)
1517            requires
1518                next == prev - READER && prev & READER_MASK != 0,
1519        ;
1520        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1521            requires
1522                next == prev - READER && prev & READER_MASK != 0,
1523        ;
1524        assert(next & WRITER == prev & WRITER) by (bit_vector)
1525            requires
1526                next == prev - READER && prev & READER_MASK != 0,
1527        ;
1528        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1529            requires
1530                next == prev - READER && prev & READER_MASK != 0,
1531        ;
1532        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1533            requires
1534                next == prev - READER && prev & READER_MASK != 0,
1535        ;
1536    }
1537    if next == prev - READER && prev & MAX_READER_MASK != 0 {
1538        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER) by (bit_vector)
1539            requires
1540                next == prev - READER && prev & MAX_READER_MASK != 0,
1541        ;
1542        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1543            requires
1544                next == prev - READER && prev & MAX_READER_MASK != 0,
1545        ;
1546        assert(next & WRITER == prev & WRITER) by (bit_vector)
1547            requires
1548                next == prev - READER && prev & MAX_READER_MASK != 0,
1549        ;
1550        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1551            requires
1552                next == prev - READER && prev & MAX_READER_MASK != 0,
1553        ;
1554    }
1555    if next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK {
1556        assert(next & READER_MASK == if prev & READER_MASK == MAX_READER - READER {
1557            0
1558        } else {
1559            (prev & READER_MASK) + READER
1560        }) by (bit_vector)
1561            requires
1562                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1563        ;
1564        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) + READER) by (bit_vector)
1565            requires
1566                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1567        ;
1568        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1569            requires
1570                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1571        ;
1572        assert(next & WRITER == prev & WRITER) by (bit_vector)
1573            requires
1574                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1575        ;
1576        assert(next & MAX_READER == if (prev & READER_MASK) + READER == MAX_READER {
1577            MAX_READER
1578        } else {
1579            prev & MAX_READER
1580        }) by (bit_vector)
1581            requires
1582                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1583        ;
1584        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1585            requires
1586                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1587        ;
1588    }
1589    if next == prev & !WRITER {
1590        assert(next & WRITER == 0) by (bit_vector)
1591            requires
1592                next == prev & !WRITER,
1593        ;
1594        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1595            requires
1596                next == prev & !WRITER,
1597        ;
1598        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1599            requires
1600                next == prev & !WRITER,
1601        ;
1602        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1603            requires
1604                next == prev & !WRITER,
1605        ;
1606        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1607            requires
1608                next == prev & !WRITER,
1609        ;
1610        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1611            requires
1612                next == prev & !WRITER,
1613        ;
1614    }
1615}
1616
1617} // verus!