ostd/sync/
rwlock.rs

1// SPDX-License-Identifier: MPL-2.0
2use vstd::atomic_ghost::*;
3use vstd::cell::{self, pcell::*, CellId};
4use vstd::pcm::Loc;
5use vstd::prelude::*;
6use vstd::tokens::frac::{self, Empty, Frac, FracGhost};
7use vstd_extra::resource::ghost_resource::{csum::*, excl::*, tokens::*};
8use vstd_extra::sum::*;
9use vstd_extra::{prelude::*, resource};
10
11use alloc::sync::Arc;
12use core::char::MAX;
13use core::{
14    cell::UnsafeCell,
15    fmt,
16    marker::PhantomData,
17    ops::{Deref, DerefMut},
18    sync::atomic::{
19        // AtomicUsize,
20        Ordering::{AcqRel, Acquire, Relaxed, Release},
21    },
22};
23
24use super::{
25    guard::{GuardTransfer, SpinGuardian},
26    PreemptDisabled,
27};
28//use crate::task::atomic_mode::AsAtomicModeGuard;
29
30verus! {
31
32axiom fn is_exclusive<T>(tracked value: &mut PointsTo<T>, tracked other: &PointsTo<T>)
33    ensures
34        *value == *old(value),
35        value.id() != other.id(),
36;
37
38const MAX_READER_U64: u64 = MAX_READER as u64;
39
40spec const V_MAX_READ_RETRACT_FRACS_SPEC: u64 = (MAX_READER_MASK + 1) as u64;
41
42#[verifier::when_used_as_spec(V_MAX_READ_RETRACT_FRACS_SPEC)]
43exec const V_MAX_READ_RETRACT_FRACS: u64
44    ensures
45        V_MAX_READ_RETRACT_FRACS == V_MAX_READ_RETRACT_FRACS_SPEC,
46        V_MAX_READ_RETRACT_FRACS == MAX_READER_MASK + 1,
47        V_MAX_READ_RETRACT_FRACS < u64::MAX,
48{
49    assert(MAX_READER_MASK + 1 < u64::MAX) by (compute_only);
50    (MAX_READER_MASK + 1) as u64
51}
52
53/// The token reserved in the lock when the write permission is given out.
54type NoPerm<T> = Empty<PointsTo<T>>;
55
56/// Half of the permission for read access, one for `RwLockUpgradeableGuard` and the other for all `RwLockReadGuard`s.
57type HalfPerm<T> = Frac<PointsTo<T>>;
58
59/// The permission for read access can be further split into `MAX_READER` pieces.
60type ReadPerm<T> = (HalfPerm<T>, OneLeftKnowledge<HalfPerm<T>, NoPerm<T>, 3>);
61
62tracked struct RwPerms<T> {
63    /// This token tracks whether the write permission is given out. If it is `Left`, it stores the knowledge that
64    /// there are active readers because the existence of `HalfPerm` resource for read access.
65    /// If it is `Right`, we know there is an active writer and there is no active reader, because there is a `NoPerm`
66    /// indicating the absence of `PointsTo<T>`.
67    core_token: SumResource<HalfPerm<T>, NoPerm<T>, 3>,
68    /// The permission to retract a `READER` count. Its total quantity tracks the gap between
69    /// the number of `try_read` increments recorded in the lock atomic and the number of active
70    /// `RwLockReadGuard`s (created and ongoing creation that will succeed) represented by `read_guard_token`.
71    /// It can be splited up to `V_MAX_READ_RETRACT_FRACS:= 2 * MAX_READER` pieces,
72    /// which allows at most `2*MAX_READER - 1` `try_read` attempts that will fail to acquire the lock.
73    read_retract_token: TokenResource<V_MAX_READ_RETRACT_FRACS>,
74    /// The permission to retract the set of `UPGRADEABLE_READER` bit.
75    upread_retract_token: Option<UniqueToken>,
76    /// Tracks whether there is a live `RwLockUpgradeableGuard`, also stores half of the permission for read access.
77    upreader_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>>,
78    /// Tracks the number of live `RwLockReadGuard`s. If it is `Left`, it stores the remaining read permissions.
79    /// If it is `Right`, it stores an empty token indicating the permission has been given out.
80    read_guard_token: Sum<
81        FracResource<ReadPerm<T>, MAX_READER_U64>,
82        Empty<ReadPerm<T>, MAX_READER_U64>,
83    >,
84}
85
86ghost struct RwId {
87    core_token_id: Loc,
88    frac_id: Loc,
89    read_retract_token_id: Loc,
90    upread_retract_token_id: Loc,
91    read_guard_token_id: Loc,
92}
93
94/// The number of `try_read` operations recorded in the lock atomic (created and ongoing) can never reach `2*MAX_READER` to avoid overflow.
95/// **NOTE**: We *ASSUME* this property always holds without any proof. We believe this is true in practice because:
96/// - More than `2^61` `try_read` operations are required to trigger the overflow concurrently, which is absurd in real world scenarios.
97/// - If one tries to create a huge number (more than `2*MAX_READER`) of `RwLockReadGuard`s in a loop with `mem::forget`, it will take years and
98/// will be prevented by the `MAX_READER` check.
99pub closed spec fn no_max_reader_overflow(v: usize) -> bool {
100    v & MAX_READER_MASK < MAX_READER_MASK
101}
102
103struct_with_invariants! {
104/// Spin-based Read-write Lock
105///
106/// # Overview
107///
108/// This lock allows for multiple readers, or at most one writer to access
109/// at any point in time. The writer of this lock has exclusive access to
110/// modify the underlying data, while the readers are allowed shared and
111/// read-only access.
112///
113/// The writing and reading portions cannot be active simultaneously, when
114/// one portion is in progress, the other portion will spin-wait. This is
115/// suitable for scenarios where the lock is expected to be held for short
116/// periods of time, and the overhead of context switching is higher than
117/// the cost of spinning.
118///
119/// In addition to traditional read and write locks, this implementation
120/// provides the upgradeable read lock (`upread lock`). The `upread lock`
121/// can be upgraded to write locks atomically, useful in scenarios
122/// where a decision to write is made after reading.
123///
124/// The type parameter `T` represents the data that this lock is protecting.
125/// It is necessary for `T` to satisfy [`Send`] to be shared across tasks and
126/// [`Sync`] to permit concurrent access via readers. The [`Deref`] method (and
127/// [`DerefMut`] for the writer) is implemented for the RAII guards returned
128/// by the locking methods, which allows for the access to the protected data
129/// while the lock is held.
130///
131/// # Usage
132/// The lock can be used in scenarios where data needs to be read frequently
133/// but written to occasionally.
134///
135/// Use `upread lock` in scenarios where related checking is performed before
136/// modification to effectively avoid deadlocks and improve efficiency.
137///
138/// This lock should not be used in scenarios where lock-holding times are
139/// long as it can lead to CPU resource wastage due to spinning.
140///
141/// # About Guard
142///
143/// See the comments of [`SpinLock`].
144///
145/// # Examples
146///
147/// ```
148/// use ostd::sync::RwLock;
149///
150/// let lock = RwLock::new(5)
151///
152/// // many read locks can be held at once
153/// {
154///     let r1 = lock.read();
155///     let r2 = lock.read();
156///     assert_eq!(*r1, 5);
157///     assert_eq!(*r2, 5);
158///
159///     // Upgradeable read lock can share access to data with read locks
160///     let r3 = lock.upread();
161///     assert_eq!(*r3, 5);
162///     drop(r1);
163///     drop(r2);
164///     // read locks are dropped at this point
165///
166///     // An upread lock can only be upgraded successfully after all the
167///     // read locks are released, otherwise it will spin-wait.
168///     let mut w1 = r3.upgrade();
169///     *w1 += 1;
170///     assert_eq!(*w1, 6);
171/// }   // upread lock are dropped at this point
172///
173/// {
174///     // Only one write lock can be held at a time
175///     let mut w2 = lock.write();
176///     *w2 += 1;
177///     assert_eq!(*w2, 7);
178/// }   // write lock is dropped at this point
179/// ```
180///
181/// [`SpinLock`]: super::SpinLock
182pub struct RwLock<T  /* : ?Sized*/ , Guard /* = PreemptDisabled*/ > {
183    guard: PhantomData<Guard>,
184    /// The internal representation of the lock state is as follows:
185    /// - **Bit 63:** Writer lock.
186    /// - **Bit 62:** Upgradeable reader lock.
187    /// - **Bit 61:** Indicates if an upgradeable reader is being upgraded.
188    /// - **Bits 60-0:** Reader lock count.
189    lock: AtomicUsize<_, RwPerms<T>,_>,
190    // val: UnsafeCell<T>,
191    val: PCell<T>,
192    v_id: Ghost<RwId>,
193}
194
195/// This invariant holds at any time, i.e. not violated during any method execution.
196closed spec fn wf(self) -> bool {
197    invariant on lock with (val, guard, v_id) is (v: usize, g: RwPerms<T>) {
198        // BITS VALUE
199        let has_writer_bit: bool = (v & WRITER) != 0;
200        let has_upgrade_bit: bool = (v & UPGRADEABLE_READER) != 0;
201        let has_max_reader_bit: bool = (v & MAX_READER) != 0;
202        // The total number of `try_read` attempts recorded in the lock atomic, including created `RwLockReadGuard`s
203        // and those who are trying, no matter they will succeed or fail.
204        let total_reader_bits: int = (v & MAX_READER_MASK) as int;
205        // The clamped value represented in the counter bits. This counts the maximum number of active `RwLockReadGuard`s.
206        // NOTE: This does not mean there are actually this number of active `RwLockReadGuard`s. The actual number of successfully
207        // created/creating `RwLockReadGuard`s can be smaller than this number, because previously created `RwLockReadGuard`s may be dropped.
208        let reader_bits: int = if has_max_reader_bit { MAX_READER as int } else { (v & READER_MASK) as int };
209
210        // ACTUAL NUMBER OF ACTIVE GUARDS.
211        // The number is tracked by the ghost resources remained in the lock.
212        // By active, we mean from the perspective the lock, the permissions for these guards are given out.
213        // The actual guards may be still being created, but they must be successfully created finally.
214        // This invariant maintains the consistency between the ghost resources and the lock atomic value.
215
216        // Whether there is an active writer
217        let active_writer: bool = g.core_token.is_right();
218        // The number of active `RwLockUpgradeableGuard`, which can only be 0 or 1.
219        let active_upgrade_guard: bool = !active_writer && g.upreader_guard_token is None;
220        // The number of active `RwLockReadGuard`s.
221        let active_read_guards: int = if g.read_guard_token is Left {
222            MAX_READER_U64 - g.read_guard_token.left().frac()
223        } else {
224            0
225        };
226        // The first `try_upread` that fails, which has not returned yet.
227        let pending_failed_upread_attempt: bool = g.upread_retract_token is None;
228        // The number of `try_read` attempts that will fail.
229        let failed_reader_attempts: int = V_MAX_READ_RETRACT_FRACS - g.read_retract_token.frac();
230
231        &&& if g.core_token.is_left() {
232            g.read_guard_token is Left
233        } else {
234            &&& g.upreader_guard_token is None
235            &&& g.read_guard_token is Right
236        }
237        // The `UPGRADEABLE_READER` bit is set iff there is an active `RwLockUpgradeableGuard` or a pending failed `try_upread` attempt.
238        &&& has_upgrade_bit <==> (active_upgrade_guard || pending_failed_upread_attempt)
239        // An active `RwLockUpgradeableGuard` cannot coexist with a pending failed `try_upread` attempt.
240        &&& !(active_upgrade_guard && pending_failed_upread_attempt)
241        // The `READER` bits count the number of all active read guards and pending failed `try_read` attempts.
242        &&& total_reader_bits == active_read_guards + failed_reader_attempts
243        // There is an active `RwLockWriteGuard` iff the `WRITER` bit is set.
244        &&& active_writer <==> has_writer_bit
245        // The number of active `RwLockReadGaurd`s is less than or equal to the `READER` bits.
246        &&& 0 <= active_read_guards <= reader_bits <= total_reader_bits
247        // The core invariant of `RwLock`: there are no simultaneous active writers and readers.
248        &&& !(active_writer && (active_read_guards + if active_upgrade_guard { 1int } else { 0 }) > 0)
249        &&& g.core_token.id() == v_id@.core_token_id
250        &&& g.core_token.wf()
251        &&& g.core_token.is_left() ==> {
252            &&& !g.core_token.is_resource_owner()
253            &&& g.core_token.frac() == 1
254        }
255        &&& g.core_token.is_right() ==> {
256            let empty = g.core_token.resource_right();
257            &&& empty.id() == v_id@.frac_id
258            &&& g.core_token.frac() == 2
259            &&& g.core_token.has_resource()
260        }
261        &&& g.read_retract_token.wf()
262        &&& g.read_retract_token.id() == v_id@.read_retract_token_id
263        &&& g.upread_retract_token is Some ==>
264            {
265                let token = g.upread_retract_token->Some_0;
266                &&& token.wf()
267                &&& token.id() == v_id@.upread_retract_token_id
268            }
269        &&& g.upreader_guard_token is Some ==> {
270            let token = g.upreader_guard_token->Some_0;
271            wf_upgradeable_guard_token(v_id@.core_token_id, v_id@.frac_id, val.id(), token)
272        }
273        &&& match g.read_guard_token {
274            Sum::Left(token) => {
275                &&& token.wf()
276                &&& token.id() == v_id@.read_guard_token_id
277                &&& token.not_empty() ==> {
278                    let resource = token.resource();
279                    let read_half_cell_perm = resource.0;
280                    let mode_knowledge = resource.1;
281                    &&& mode_knowledge.id() == v_id@.core_token_id
282                    &&& read_half_cell_perm.id() == v_id@.frac_id
283                    &&& read_half_cell_perm.resource().id() == val.id()
284                    &&& read_half_cell_perm.frac() == 1
285                }
286            },
287            Sum::Right(empty) => {
288                &&& empty.id() == v_id@.read_guard_token_id
289            },
290        }
291
292    }
293}
294
295}
296
297const READER: usize = 1;
298
299const WRITER: usize = 1 << (usize::BITS - 1);
300
301const UPGRADEABLE_READER: usize = 1 << (usize::BITS - 2);
302
303const BEING_UPGRADED: usize = 1 << (usize::BITS - 3);
304
305/// This bit is reserved as an overflow sentinel.
306/// We intentionally cap read guards before counter growth can affect
307/// `BEING_UPGRADED` / `UPGRADEABLE_READER` / `WRITER` bits.
308/// This is defense-in-depth with no extra runtime cost.
309///
310/// This follows the same strategy as Rust std's `Arc`,
311/// which uses `isize::MAX` as a sentinel to prevent its reference count
312/// from overflowing into values that could compromise safety.
313///
314/// On 64-bit platforms (the only targets Asterinas supports),
315/// a counter overflow is not a practical concern:
316/// incrementing one-by-one from zero to `MAX_READER` (2^60)
317/// would take hundreds of years even at billions of increments per second.
318/// Nevertheless, this sentinel provides an extra layer of safety at no runtime cost.
319const MAX_READER: usize = 1 << (usize::BITS - 4);
320
321/// Used only in verification. Excluding the `MAX_READER` bit.
322const READER_MASK: usize = usize::MAX >> 4;
323
324/// Used only in verification. Including the `MAX_READER` bit.
325const MAX_READER_MASK: usize = usize::MAX >> 3;
326
327impl<T, G> RwLock<T, G> {
328    /// Returns the unique [`CellId`](https://verus-lang.github.io/verus/verusdoc/vstd/cell/struct.CellId.html) of the internal `PCell<T>`.
329    pub closed spec fn cell_id(self) -> cell::CellId {
330        self.val.id()
331    }
332
333    pub closed spec fn core_token_id(self) -> Loc {
334        self.v_id@.core_token_id
335    }
336
337    pub closed spec fn frac_id(self) -> Loc {
338        self.v_id@.frac_id
339    }
340
341    pub closed spec fn upread_retract_token_id(self) -> Loc {
342        self.v_id@.upread_retract_token_id
343    }
344
345    pub closed spec fn read_guard_token_id(self) -> Loc {
346        self.v_id@.read_guard_token_id
347    }
348
349    /// Encapsulates the invariant described in the *Invariant* section of [`RwLock`].
350    #[verifier::type_invariant]
351    pub closed spec fn type_inv(self) -> bool {
352        self.wf()
353    }
354}
355
356closed spec fn wf_upgradeable_guard_token<T>(
357    core_token_id: Loc,
358    frac_id: Loc,
359    cell_id: CellId,
360    token: OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>,
361) -> bool {
362    let half_cell_perm = token.resource();
363    &&& token.id() == core_token_id
364    &&& half_cell_perm.id() == frac_id
365    &&& half_cell_perm.resource().id() == cell_id
366    &&& token.has_resource()
367    &&& half_cell_perm.frac() == 1
368    &&& token.wf()
369}
370
371impl<T, G> RwLock<T, G> {
372    /// Creates a new spin-based read-write lock with an initial value.
373    pub const fn new(val: T) -> Self {
374        let (val, Tracked(perm)) = PCell::new(val);
375
376        // Proof code
377        proof {
378            lemma_consts_properties();
379        }
380        let tracked mut frac_perm = Frac::<PointsTo<T>>::new(perm);
381        let tracked read_half_cell_perm = frac_perm.split(1int);
382        let ghost frac_id = frac_perm.id();
383        let tracked mut core_token = SumResource::alloc_left(frac_perm);
384        let tracked read_retract_token = TokenResource::<V_MAX_READ_RETRACT_FRACS>::alloc(());
385        let tracked upread_retract_token = UniqueToken::alloc(());
386        let tracked upreader_guard_token = core_token.split_one_left_owner();
387        let tracked left_token = core_token.split_one_left_knowledge();
388        let tracked read_guard_token = FracResource::<ReadPerm<T>, MAX_READER_U64>::alloc(
389            (read_half_cell_perm, left_token),
390        );
391        let ghost v_id = RwId {
392            frac_id,
393            core_token_id: core_token.id(),
394            upread_retract_token_id: upread_retract_token.id(),
395            read_retract_token_id: read_retract_token.id(),
396            read_guard_token_id: read_guard_token.id(),
397        };
398        let tracked perms = RwPerms {
399            core_token,
400            read_retract_token,
401            upread_retract_token: Some(upread_retract_token),
402            upreader_guard_token: Some(upreader_guard_token),
403            read_guard_token: Sum::Left(read_guard_token),
404        };
405
406        Self {
407            guard: PhantomData,
408            //lock: AtomicUsize::new(0),
409            lock: AtomicUsize::new(Ghost((val, PhantomData, Ghost(v_id))), 0, Tracked(perms)),
410            //val: UnsafeCell::new(val),
411            val: val,
412            v_id: Ghost(v_id),
413        }
414    }
415}
416
417impl<T  /*: ?Sized*/ , G: SpinGuardian> RwLock<T, G> {
418    /// Acquires a read lock and spin-wait until it can be acquired.
419    ///
420    /// The calling thread will spin-wait until there are no writers or
421    /// upgrading upreaders present. There is no guarantee for the order
422    /// in which other readers or writers waiting simultaneously will
423    /// obtain the lock.
424    #[verifier::exec_allows_no_decreases_clause]
425    pub fn read(&self) -> RwLockReadGuard<T, G> {
426        loop {
427            if let Some(readguard) = self.try_read() {
428                return readguard;
429            } else {
430                core::hint::spin_loop();
431            }
432        }
433    }
434
435    /// Acquires a write lock and spin-wait until it can be acquired.
436    ///
437    /// The calling thread will spin-wait until there are no other writers,
438    /// upreaders or readers present. There is no guarantee for the order
439    /// in which other readers or writers waiting simultaneously will
440    /// obtain the lock.
441    #[verifier::exec_allows_no_decreases_clause]
442    pub fn write(&self) -> RwLockWriteGuard<T, G> {
443        loop {
444            if let Some(writeguard) = self.try_write() {
445                return writeguard;
446            } else {
447                core::hint::spin_loop();
448            }
449        }
450    }
451
452    /// Acquires an upreader and spin-wait until it can be acquired.
453    ///
454    /// The calling thread will spin-wait until there are no other writers,
455    /// or upreaders. There is no guarantee for the order in which other
456    /// readers or writers waiting simultaneously will obtain the lock.
457    ///
458    /// Upreader will not block new readers until it tries to upgrade. Upreader
459    /// and reader do not differ before invoking the upgrade method. However,
460    /// only one upreader can exist at any time to avoid deadlock in the
461    /// upgrade method.
462    #[verifier::exec_allows_no_decreases_clause]
463    pub fn upread(&self) -> RwLockUpgradeableGuard<T, G> {
464        loop {
465            if let Some(guard) = self.try_upread() {
466                return guard;
467            } else {
468                core::hint::spin_loop();
469            }
470        }
471    }
472
473    /// Attempts to acquire a read lock.
474    ///
475    /// This function will never spin-wait and will return immediately.
476    pub fn try_read(&self) -> Option<RwLockReadGuard<T, G>> {
477        proof_decl!{
478            let tracked mut read_token: Option<Frac<ReadPerm<T>,MAX_READER_U64>> = None;
479            let tracked mut retract_read_token: Option<Token<V_MAX_READ_RETRACT_FRACS>> = None;
480        }
481        proof!{
482            use_type_invariant(self);
483            lemma_consts_properties();
484        }
485        let guard = G::read_guard();
486
487        // let lock = self.lock.fetch_add(READER, Acquire);
488        let lock =
489            atomic_with_ghost!(
490            self.lock => fetch_add(READER);
491            update prev -> next;
492            ghost g => {
493                let prev_usize = prev as usize;
494                let next_usize = next as usize;
495                assume (no_max_reader_overflow(prev_usize));
496                lemma_consts_properties_value(prev_usize);
497                lemma_consts_properties_prev_next(prev_usize, next_usize);
498                if prev_usize & (WRITER | MAX_READER | BEING_UPGRADED) == 0 {
499                    let tracked mut tmp = g.read_guard_token.tracked_take_left();
500                    read_token = Some(tmp.split_one());
501                    g.read_guard_token = Sum::Left(tmp);
502                } else {
503                    retract_read_token = Some(g.read_retract_token.split_one());
504                }
505            }
506        );
507        if lock & (WRITER | MAX_READER | BEING_UPGRADED) == 0 {
508            Some(
509                RwLockReadGuard {
510                    inner: self,
511                    guard,
512                    v_token: Tracked(read_token.tracked_unwrap()),
513                },
514            )
515        } else {
516            // self.lock.fetch_sub(READER, Release);
517            atomic_with_ghost!(
518                self.lock => fetch_sub(READER);
519                update prev -> next;
520                ghost g => {
521                    let prev_usize = prev as usize;
522                    let next_usize = next as usize;
523                    lemma_consts_properties_value(next_usize);
524                    lemma_consts_properties_prev_next(prev_usize, next_usize);
525                    g.read_retract_token.combine(retract_read_token.tracked_unwrap());
526                }
527            );
528            None
529        }
530    }
531
532    /// Attempts to acquire a write lock.
533    ///
534    /// This function will never spin-wait and will return immediately.
535    pub fn try_write(&self) -> Option<RwLockWriteGuard<T, G>> {
536        proof_decl!{
537            let tracked mut guard_perm: Option<PointsTo<T>> = None;
538            let tracked mut guard_token: Option<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>> = None;
539        }
540        proof!{
541            use_type_invariant(self);
542            lemma_consts_properties();
543        }
544
545        let guard = G::guard();
546        // if self
547        //     .lock
548        //     .compare_exchange(0, WRITER, Acquire, Relaxed)
549        //     .is_ok()
550        if atomic_with_ghost!(
551            self.lock => compare_exchange(0, WRITER);
552            update prev -> next;
553            returning res;
554            ghost g => {
555                let prev_usize = prev as usize;
556                let next_usize = next as usize;
557                if res is Ok {
558                    // Retract the fractional permission for read access.
559                    let tracked read_guard_token = g.read_guard_token.tracked_take_left();
560                    let tracked (read_resource, read_empty) = read_guard_token.take_resource();
561                    g.read_guard_token = Sum::Right(read_empty);
562                    let tracked (read_half_cell_perm, left_token) = read_resource;
563                    g.core_token.join_one_left_knowledge(left_token);
564                    // Retract the fractional permission for upgradeable reader.
565                    let tracked upreader_guard_token = g.upreader_guard_token.tracked_take();
566                    g.core_token.join_one_left_owner(upreader_guard_token);
567                    // Combine the two halves of the permission for read access to get the full permission and give it out.
568                    let tracked mut pointsto = g.core_token.take_resource_left();
569                    pointsto.combine(read_half_cell_perm);
570                    let tracked (pointsto, empty) = pointsto.take_resource();
571                    guard_perm = Some(pointsto);
572                    g.core_token.change_to_right(empty);
573                    guard_token = Some(g.core_token.split_one_right_knowledge());
574                }
575            }
576        ).is_ok() {
577            Some(
578                RwLockWriteGuard {
579                    inner: self,
580                    guard,
581                    v_perm: Tracked(guard_perm.tracked_unwrap()),
582                    v_token: Tracked(guard_token.tracked_unwrap()),
583                },
584            )
585        } else {
586            None
587        }
588    }
589
590    /// Attempts to acquire an upread lock.
591    ///
592    /// This function will never spin-wait and will return immediately.
593    pub fn try_upread(&self) -> Option<RwLockUpgradeableGuard<T, G>> {
594        proof_decl!{
595            let tracked mut upgrade_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
596            let tracked mut retract_upgrade_token: Option<UniqueToken> = None;
597        }
598        proof!{
599            use_type_invariant(self);
600            lemma_consts_properties();
601        }
602        let guard = G::guard();
603        // let lock = self.lock.fetch_or(UPGRADEABLE_READER, Acquire) & (WRITER | UPGRADEABLE_READER);
604        let lock =
605            atomic_with_ghost!(
606            self.lock => fetch_or(UPGRADEABLE_READER);
607            update prev -> next;
608            ghost g => {
609                lemma_consts_properties_value(prev);
610                lemma_consts_properties_prev_next(prev, next);
611                if prev & (WRITER | UPGRADEABLE_READER) == 0 {
612                    upgrade_guard_token = Some(g.upreader_guard_token.tracked_take());
613                }
614                else if prev & (WRITER | UPGRADEABLE_READER) == WRITER {
615                    retract_upgrade_token = Some(g.upread_retract_token.tracked_take());
616                }
617            }
618        )
619            & (WRITER | UPGRADEABLE_READER);
620        if lock == 0 {
621            return Some(
622                RwLockUpgradeableGuard {
623                    inner: self,
624                    guard,
625                    v_token: Tracked(upgrade_guard_token.tracked_unwrap()),
626                },
627            );
628        } else if lock == WRITER {
629            // self.lock.fetch_sub(UPGRADEABLE_READER, Release);
630            atomic_with_ghost!(
631                self.lock => fetch_sub(UPGRADEABLE_READER);
632                update prev -> next;
633                ghost g => {
634                    let prev_usize = prev as usize;
635                    let next_usize = next as usize;
636                    lemma_consts_properties_value(prev_usize);
637                    lemma_consts_properties_prev_next(prev_usize, next_usize);
638                    if g.upread_retract_token is Some {
639                        let tracked mut token = retract_upgrade_token.tracked_unwrap();
640                        token.validate_with_other(g.upread_retract_token.tracked_borrow());
641                    }
642                    else{
643                        g.upread_retract_token= retract_upgrade_token;
644                    }
645                }
646            );
647        }
648        None
649    }
650}
651
652/*
653impl<T, G: SpinGuardian> RwLock<T, G> {
654    /// Returns a mutable reference to the underlying data.
655    ///
656    /// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
657    /// already statically guaranteed that access to the data is exclusive.
658    pub fn get_mut(&mut self) -> &mut T {
659        self.val.get_mut()
660    }
661
662    /// Returns a raw pointer to the underlying data.
663    ///
664    /// This method is safe, but it's up to the caller to ensure that access to the data behind it
665    /// is still safe.
666    pub(super) fn as_ptr(&self) -> *mut T {
667        self.val.get()
668    }
669}*/
670/* the trait `core::fmt::Debug` is not implemented for `vstd::cell::pcell::PCell<T>`
671impl<T: ?Sized + fmt::Debug, G> fmt::Debug for RwLock<T, G> {
672    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
673        fmt::Debug::fmt(&self.val, f)
674    }
675}*/
676/// Because there can be more than one readers to get the T's immutable ref,
677/// so T must be Sync to guarantee the sharing safety.
678#[verifier::external]
679unsafe impl<T: Send, G> Send for RwLock<T, G> {}
680
681#[verifier::external]
682unsafe impl<T: Send + Sync, G> Sync for RwLock<T, G> {}
683
684impl<T /*: ?Sized*/, G: SpinGuardian> !Send for RwLockWriteGuard<'_, T, G> {}
685#[verifier::external]
686unsafe impl<T: Sync, G: SpinGuardian> Sync for RwLockWriteGuard<'_, T, G> {}
687
688impl<T /*: ?Sized*/, G: SpinGuardian> !Send for RwLockReadGuard<'_, T, G> {}
689#[verifier::external]
690unsafe impl<T: Sync, G: SpinGuardian> Sync for RwLockReadGuard<'_, T, G> {}
691
692impl<T /*: ?Sized*/, G: SpinGuardian> !Send for RwLockUpgradeableGuard<'_, T, G> {}
693#[verifier::external]
694unsafe impl<T: Sync, G: SpinGuardian> Sync for RwLockUpgradeableGuard<'_, T, G> {}
695
696/// A guard that provides immutable data access.
697#[verifier::reject_recursive_types(T)]
698#[verifier::reject_recursive_types(G)]
699#[clippy::has_significant_drop]
700#[must_use]
701pub struct RwLockReadGuard<'a, T /*: ?Sized*/, G: SpinGuardian> {
702    guard: G::ReadGuard,
703    inner: &'a RwLock<T, G>,
704    v_token: Tracked<Frac<ReadPerm<T>, MAX_READER_U64>>,
705}
706
707/*
708impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockReadGuard<'_, T, G> {
709    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
710        self.guard.as_atomic_mode_guard()
711    }
712}
713*/
714
715impl<'a, T, G: SpinGuardian> RwLockReadGuard<'a, T, G> {
716    #[verifier::type_invariant]
717    pub closed spec fn type_inv(self) -> bool {
718        let resource = self.v_token@.resource();
719        let read_half_cell_perm = resource.0;
720        let mode_knowledge = resource.1;
721        &&& self.inner.core_token_id() == mode_knowledge.id()
722        &&& self.inner.frac_id() == read_half_cell_perm.id()
723        &&& self.inner.cell_id() == read_half_cell_perm.resource().id()
724        &&& self.v_token@.id() == self.inner.read_guard_token_id()
725        &&& read_half_cell_perm.frac() == 1
726        &&& self.v_token@.frac() == 1
727    }
728
729    /// The value stored in the lock.
730    pub closed spec fn value(self) -> T {
731        *self.v_token@.resource().0.resource().value()
732    }
733
734    /// The value stored in the lock. It is an alias of `Self::value`.
735    pub open spec fn view(self) -> T {
736        self.value()
737    }
738}
739
740impl<T  /*: ?Sized*/ , G: SpinGuardian> Deref for RwLockReadGuard<'_, T, G> {
741    type Target = T;
742
743    #[verus_spec(returns self.view())]
744    fn deref(&self) -> &T
745    {
746        proof!{
747            use_type_invariant(self);
748        }
749        // unsafe { &*self.inner.val.get() }
750        // The internal implementation of `PCell<T>::borrow` is exactly unsafe { &(*(*self.ucell).get()) },
751        // and here we verify that we have the permission to call `borrow`.
752        self.inner.val.borrow(Tracked(self.v_token.borrow().borrow().0.borrow()))
753    }
754}
755/* impl<T: ?Sized, R: Deref<Target = RwLock<T, G>> + Clone, G: SpinGuardian> Drop
756    for RwLockReadGuard_<T, R, G>
757{
758    fn drop(&mut self) {
759        self.inner.lock.fetch_sub(READER, Release);
760    }
761} */
762
763impl<T  /*: ?Sized*/ , G: SpinGuardian> RwLockReadGuard<'_, T, G> {
764    /// VERUS LIMITATION: We implement `drop` and call it manually because Verus's support for `Drop` is incomplete for now.
765    fn drop(self) {
766        proof! {
767            use_type_invariant(&self);
768            use_type_invariant(self.inner);
769            lemma_consts_properties();
770        }
771        let Tracked(token) = self.v_token;
772        // self.inner.lock.fetch_sub(READER, Release);
773        atomic_with_ghost!(
774            self.inner.lock => fetch_sub(READER);
775            update prev -> next;
776            ghost g => {
777                let prev_usize = prev as usize;
778                let next_usize = next as usize;
779                assume (no_max_reader_overflow(prev_usize));
780                lemma_consts_properties_value(next_usize);
781                lemma_consts_properties_prev_next(prev_usize, next_usize);
782                g.core_token.validate_with_one_left_knowledge(&token.borrow().1);
783                let tracked mut tmp = g.read_guard_token.tracked_take_left();
784                tmp.combine(token);
785                g.read_guard_token = Sum::Left(tmp);
786            }
787        );
788    }
789}
790
791/*
792impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockReadGuard<'_, T, G> {
793    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
794        fmt::Debug::fmt(&**self, f)
795    }
796}*/
797/// A guard that provides mutable data access.
798#[verifier::reject_recursive_types(T)]
799#[verifier::reject_recursive_types(G)]
800pub struct RwLockWriteGuard<'a, T /*: ?Sized*/, G: SpinGuardian> {
801    guard: G::Guard,
802    inner: &'a RwLock<T, G>,
803    /// Ghost permission for verification
804    v_perm: Tracked<PointsTo<T>>,
805    v_token: Tracked<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>>,
806}
807
808impl<'a, T, G: SpinGuardian> RwLockWriteGuard<'a, T, G> {
809    #[verifier::type_invariant]
810    spec fn type_inv(self) -> bool {
811        &&& self.inner.cell_id() == self.v_perm@.id()
812        &&& self.inner.core_token_id() == self.v_token@.id()
813    }
814
815    /// The value stored in the lock.
816    pub closed spec fn value(self) -> T {
817        *self.v_perm@.value()
818    }
819
820    /// The value stored in the lock. It is an alias of `Self::value`.
821    pub open spec fn view(self) -> T {
822        self.value()
823    }
824}
825
826/*
827impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockWriteGuard<'_, T, G> {
828    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
829        self.guard.as_atomic_mode_guard()
830    }
831}*/
832
833impl<T  /*: ?Sized*/ , G: SpinGuardian> Deref for RwLockWriteGuard<'_, T, G> {
834    type Target = T;
835
836    #[verus_spec(returns self.view())]
837    fn deref(&self) -> &T
838    {
839        proof!{
840            use_type_invariant(self);
841        }
842        // unsafe { &*self.inner.val.get() }
843        // The internal implementation of `PCell<T>::borrow` is exactly unsafe { &(*(*self.ucell).get()) },
844        // and here we verify that we have the permission to call `borrow`.
845        self.inner.val.borrow(Tracked(self.v_perm.borrow()))
846    }
847}
848
849/*
850impl<T: ?Sized, G: SpinGuardian> DerefMut for RwLockWriteGuard<'_, T, G> {
851    fn deref_mut(&mut self) -> &mut Self::Target {
852        unsafe { &mut *self.inner.val.get() }
853    }
854}
855
856impl<T: ?Sized, G: SpinGuardian> Drop for RwLockWriteGuard<'_, T, G> {
857    fn drop(&mut self) {
858        self.inner.lock.fetch_and(!WRITER, Release);
859    }
860}*/
861
862impl<T  /*: ?Sized*/ , G: SpinGuardian> RwLockWriteGuard<'_, T, G> {
863    /// VERUS LIMITATION: We implement `drop` and call it manually because Verus's support for `Drop` is incomplete for now.
864    pub fn drop(self) {
865        proof!{
866            use_type_invariant(&self);
867            use_type_invariant(self.inner);
868            lemma_consts_properties();
869        }
870        let Tracked(mut perm) = self.v_perm;
871        let Tracked(token) = self.v_token;
872        //self.inner.lock.fetch_and(!WRITER, Release);
873        atomic_with_ghost!{
874            self.inner.lock => fetch_and(!WRITER);
875            update prev -> next;
876            ghost g => {
877                let prev_usize = prev as usize;
878                let next_usize = next as usize;
879                lemma_consts_properties_prev_next(prev_usize, next_usize);
880                lemma_consts_properties_value(next_usize);
881                g.core_token.validate_with_one_right_knowledge(&token);
882                g.core_token.join_one_right_knowledge(token);
883                let tracked empty = g.core_token.take_resource_right();
884                let tracked mut full = empty.put_resource(perm);
885                let tracked read_half_cell_perm = full.split(1int);
886                g.core_token.change_to_left(full);
887                let tracked upreader_guard_token = g.core_token.split_one_left_owner();
888                g.upreader_guard_token = Some(upreader_guard_token);
889                let tracked left_token = g.core_token.split_one_left_knowledge();
890                let tracked read_guard_empty = g.read_guard_token.tracked_take_right();
891                let tracked read_guard_token = FracResource::alloc_from_empty(
892                    read_guard_empty,
893                    (read_half_cell_perm, left_token),
894                );
895                g.read_guard_token = Sum::Left(read_guard_token);
896            }
897        };
898    }
899}
900/*
901impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockWriteGuard<'_, T, G> {
902    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
903        fmt::Debug::fmt(&**self, f)
904    }
905}*/
906/// A guard that provides immutable data access but can be atomically
907/// upgraded to `RwLockWriteGuard`.
908#[verifier::reject_recursive_types(T)]
909#[verifier::reject_recursive_types(G)]
910pub struct RwLockUpgradeableGuard<'a, T /*: ?Sized*/, G: SpinGuardian> {
911    guard: G::Guard,
912    inner: &'a RwLock<T, G>,
913    v_token: Tracked<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>>,
914}
915/*
916impl<T: ?Sized, G: SpinGuardian> AsAtomicModeGuard for RwLockUpgradeableGuard<'_, T, G> {
917    fn as_atomic_mode_guard(&self) -> &dyn crate::task::atomic_mode::InAtomicMode {
918        self.guard.as_atomic_mode_guard()
919    }
920}*/
921
922impl<'a, T, G: SpinGuardian> RwLockUpgradeableGuard<'a, T, G> {
923    #[verifier::type_invariant]
924    pub closed spec fn type_inv(self) -> bool {
925        wf_upgradeable_guard_token(
926            self.inner.core_token_id(),
927            self.inner.frac_id(),
928            self.inner.cell_id(),
929            self.v_token@,
930        )
931    }
932
933    /// The value stored in the lock.
934    pub closed spec fn value(self) -> T {
935        *self.v_token@.resource().resource().value()
936    }
937
938    /// The value stored in the lock. It is an alias of `Self::value`.
939    pub open spec fn view(self) -> T {
940        self.value()
941    }
942}
943
944impl<'a, T  /*: ?Sized*/ , G: SpinGuardian> RwLockUpgradeableGuard<'a, T, G> {
945    /// Upgrades this upread guard to a write guard atomically.
946    ///
947    /// After calling this method, subsequent readers will be blocked
948    /// while previous readers remain unaffected. The calling thread
949    /// will spin-wait until previous readers finish.
950    #[verifier::exec_allows_no_decreases_clause]
951    pub fn upgrade(  /* mut */ self) -> RwLockWriteGuard<'a, T, G> {
952        let mut this = self;
953        proof! {
954            use_type_invariant(&this);
955            use_type_invariant(&this.inner);
956            lemma_consts_properties();
957        }
958        // self.inner.lock.fetch_or(BEING_UPGRADED, Acquire);
959        atomic_with_ghost!(
960            this.inner.lock => fetch_or(BEING_UPGRADED);
961            update prev -> next;
962            ghost g => {
963                lemma_consts_properties_prev_next(prev, next);
964            }
965        );
966        loop {
967            // self = match self.try_upgrade() {
968            this =
969            match this.try_upgrade() {
970                Ok(guard) => return guard,
971                Err(e) => e,
972            };
973        }
974    }
975
976    /// Attempts to upgrade this upread guard to a write guard atomically.
977    ///
978    /// This function will never spin-wait and will return immediately.
979    ///
980    /// This function is not exposed publicly because the `BEING_UPGRADED` bit
981    /// is set only in [`Self::upgrade`].
982    fn try_upgrade(  /* mut */ self) -> Result<RwLockWriteGuard<'a, T, G>, Self> {
983        proof! {
984            use_type_invariant(&self);
985            use_type_invariant(self.inner);
986            lemma_consts_properties();
987        }
988        let mut this = self;
989        let Tracked(upread_guard_token) = this.v_token;
990        proof_decl! {
991            let tracked mut write_perm: Option<PointsTo<T>> = None;
992            let tracked mut err_upread_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
993            let tracked mut retract_upgrade_token: Option<UniqueToken> = None;
994            let tracked mut write_guard_token = None;
995        }
996
997        // let res = self.inner.lock.compare_exchange(
998        //     UPGRADEABLE_READER | BEING_UPGRADED,
999        //     WRITER | UPGRADEABLE_READER,
1000        //     AcqRel,
1001        //     Relaxed,
1002        // );
1003        let res =
1004            atomic_with_ghost!(
1005            this.inner.lock => compare_exchange(UPGRADEABLE_READER | BEING_UPGRADED, WRITER | UPGRADEABLE_READER);
1006            update prev -> next;
1007            returning res;
1008            ghost g => {
1009                lemma_consts_properties_prev_next(prev, next);
1010                if res is Ok {
1011                    g.core_token.validate_with_one_left_owner(&upread_guard_token);
1012                    if g.upreader_guard_token is Some {
1013                        upread_guard_token.validate_with_one_left_owner(g.upreader_guard_token.tracked_borrow());
1014                    }
1015                    g.core_token.join_one_left_owner(upread_guard_token);
1016                    let tracked read_guard_token = g.read_guard_token.tracked_take_left();
1017                    let tracked (read_resource, read_empty) = read_guard_token.take_resource();
1018                    g.read_guard_token = Sum::Right(read_empty);
1019                    let tracked (read_half_cell_perm, left_token) = read_resource;
1020                    g.core_token.join_one_left_knowledge(left_token);
1021                    let tracked mut pointsto = g.core_token.take_resource_left();
1022                    pointsto.combine(read_half_cell_perm);
1023                    let tracked (pointsto, empty) = pointsto.take_resource();
1024                    write_perm = Some(pointsto);
1025                    g.core_token.change_to_right(empty);
1026                    write_guard_token = Some(g.core_token.split_one_right_knowledge());
1027                    retract_upgrade_token = Some(g.upread_retract_token.tracked_take());
1028                } else {
1029                    err_upread_guard_token = Some(upread_guard_token);
1030                }
1031            }
1032        );
1033        if res.is_ok() {
1034            let inner = this.inner;
1035            let guard = this.guard.transfer_to();
1036            // drop(self);
1037            atomic_with_ghost!(
1038                inner.lock => fetch_sub(UPGRADEABLE_READER);
1039                update prev -> next;
1040                ghost g => {
1041                    let prev_usize = prev as usize;
1042                    let next_usize = next as usize;
1043                    lemma_consts_properties_value(prev_usize);
1044                    lemma_consts_properties_prev_next(prev_usize, next_usize);
1045                    let tracked mut token = retract_upgrade_token.tracked_unwrap();
1046                    if g.upread_retract_token is Some {
1047                        token.validate_with_other(g.upread_retract_token.tracked_borrow());
1048                    }
1049                    g.upread_retract_token = Some(token);
1050                }
1051            );
1052            Ok(
1053                RwLockWriteGuard {
1054                    inner,
1055                    guard,
1056                    v_perm: Tracked(write_perm.tracked_unwrap()),
1057                    v_token: Tracked(write_guard_token.tracked_unwrap()),
1058                },
1059            )
1060        } else {
1061            Err(
1062                RwLockUpgradeableGuard {
1063                    inner: this.inner,
1064                    guard: this.guard,
1065                    v_token: Tracked(err_upread_guard_token.tracked_unwrap()),
1066                },
1067            )
1068        }
1069    }
1070}
1071
1072impl<T  /*: ?Sized*/ , G: SpinGuardian> Deref for RwLockUpgradeableGuard<'_, T, G> {
1073    type Target = T;
1074
1075    #[verus_spec(returns self.view())]
1076    fn deref(&self) -> &T
1077    {
1078        proof!{
1079            use_type_invariant(self);
1080        }
1081        // unsafe { &*self.inner.val.get() }
1082        // The internal implementation of `PCell<T>::borrow` is exactly unsafe { &(*(*self.ucell).get()) },
1083        // and here we verify that we have the permission to call `borrow`.
1084        self.inner.val.borrow(Tracked(self.v_token.borrow().tracked_borrow().borrow()))
1085    }
1086}
1087
1088/*
1089impl<T: ?Sized, G: SpinGuardian> Drop for RwLockUpgradeableGuard<'_, T, G> {
1090    fn drop(&mut self) {
1091        self.inner.lock.fetch_sub(UPGRADEABLE_READER, Release);
1092    }
1093}*/
1094
1095impl<T  /*: ?Sized*/ , G: SpinGuardian> RwLockUpgradeableGuard<'_, T, G> {
1096    /// VERUS LIMITATION: We implement `drop` and call it manually because Verus's support for `Drop` is incomplete for now.
1097    pub fn drop(self) {
1098        proof! {
1099            use_type_invariant(&self);
1100            use_type_invariant(self.inner);
1101            lemma_consts_properties();
1102        }
1103        let Tracked(guard_token) = self.v_token;
1104        //self.inner.lock.fetch_sub(UPGRADEABLE_READER, Release);
1105        atomic_with_ghost!(
1106            self.inner.lock => fetch_sub(UPGRADEABLE_READER);
1107            update prev -> next;
1108            ghost g => {
1109                let prev_usize = prev as usize;
1110                let next_usize = next as usize;
1111                lemma_consts_properties_value(prev_usize);
1112                lemma_consts_properties_prev_next(prev_usize, next_usize);
1113                g.core_token.validate_with_one_left_owner(&guard_token);
1114                if g.upreader_guard_token is Some {
1115                    guard_token.validate_with_one_left_owner(g.upreader_guard_token.tracked_borrow());
1116                    assert(false);
1117                } else {
1118                    g.upreader_guard_token= Some(guard_token);
1119                }
1120            }
1121        );
1122    }
1123}
1124
1125/*
1126impl<T: ?Sized + fmt::Debug, G: SpinGuardian> fmt::Debug for RwLockUpgradeableGuard<'_, T, G> {
1127    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1128        fmt::Debug::fmt(&**self, f)
1129    }
1130}
1131*/
1132
1133proof fn lemma_consts_properties()
1134    ensures
1135        0 & WRITER == 0,
1136        0 & UPGRADEABLE_READER == 0,
1137        0 & BEING_UPGRADED == 0,
1138        0 & READER_MASK == 0,
1139        0 & MAX_READER_MASK == 0,
1140        0 & MAX_READER == 0,
1141        0 & READER == 0,
1142        WRITER == 0x8000_0000_0000_0000,
1143        UPGRADEABLE_READER == 0x4000_0000_0000_0000,
1144        BEING_UPGRADED == 0x2000_0000_0000_0000,
1145        READER_MASK == 0x0FFF_FFFF_FFFF_FFFF,
1146        MAX_READER_MASK == 0x1FFF_FFFF_FFFF_FFFF,
1147        MAX_READER == 0x1000_0000_0000_0000,
1148        WRITER & WRITER == WRITER,
1149        WRITER & !WRITER == 0,
1150        WRITER & BEING_UPGRADED == 0,
1151        WRITER & READER_MASK == 0,
1152        WRITER & MAX_READER_MASK == 0,
1153        WRITER & MAX_READER == 0,
1154        WRITER & UPGRADEABLE_READER == 0,
1155        BEING_UPGRADED & WRITER == 0,
1156        BEING_UPGRADED & UPGRADEABLE_READER == 0,
1157        UPGRADEABLE_READER & BEING_UPGRADED == 0,
1158        UPGRADEABLE_READER & READER_MASK == 0,
1159        UPGRADEABLE_READER & MAX_READER_MASK == 0,
1160        UPGRADEABLE_READER & MAX_READER == 0,
1161        BEING_UPGRADED & READER_MASK == 0,
1162        BEING_UPGRADED & MAX_READER_MASK == 0,
1163        BEING_UPGRADED & MAX_READER == 0,
1164        (UPGRADEABLE_READER | BEING_UPGRADED) & WRITER == 0,
1165        (UPGRADEABLE_READER | BEING_UPGRADED) & UPGRADEABLE_READER == UPGRADEABLE_READER,
1166        (UPGRADEABLE_READER | BEING_UPGRADED) & BEING_UPGRADED == BEING_UPGRADED,
1167        (UPGRADEABLE_READER | BEING_UPGRADED) & READER_MASK == 0,
1168        (UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER_MASK == 0,
1169        (UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER == 0,
1170        (WRITER | UPGRADEABLE_READER) & WRITER == WRITER,
1171        (WRITER | UPGRADEABLE_READER) & UPGRADEABLE_READER == UPGRADEABLE_READER,
1172        (WRITER | UPGRADEABLE_READER) & BEING_UPGRADED == 0,
1173        (WRITER | UPGRADEABLE_READER) & READER_MASK == 0,
1174        (WRITER | UPGRADEABLE_READER) & MAX_READER_MASK == 0,
1175        (WRITER | UPGRADEABLE_READER) & MAX_READER == 0,
1176{
1177    assert(0 & WRITER == 0) by (compute_only);
1178    assert(0 & UPGRADEABLE_READER == 0) by (compute_only);
1179    assert(0 & BEING_UPGRADED == 0) by (compute_only);
1180    assert(0 & READER_MASK == 0) by (compute_only);
1181    assert(0 & MAX_READER == 0) by (compute_only);
1182    assert(0 & MAX_READER_MASK == 0) by (compute_only);
1183    assert(0 & READER == 0) by (compute_only);
1184    assert(WRITER == 0x8000_0000_0000_0000) by (compute_only);
1185    assert(UPGRADEABLE_READER == 0x4000_0000_0000_0000) by (compute_only);
1186    assert(BEING_UPGRADED == 0x2000_0000_0000_0000) by (compute_only);
1187    assert(READER_MASK == 0x0FFF_FFFF_FFFF_FFFF) by (compute_only);
1188    assert(MAX_READER == 0x1000_0000_0000_0000) by (compute_only);
1189    assert(MAX_READER_MASK == 0x1FFF_FFFF_FFFF_FFFF) by (compute_only);
1190    assert(WRITER & WRITER == WRITER) by (compute_only);
1191    assert(WRITER & !WRITER == 0) by (compute_only);
1192    assert(WRITER & BEING_UPGRADED == 0) by (compute_only);
1193    assert(WRITER & READER_MASK == 0) by (compute_only);
1194    assert(WRITER & MAX_READER_MASK == 0) by (compute_only);
1195    assert(WRITER & MAX_READER == 0) by (compute_only);
1196    assert(WRITER & UPGRADEABLE_READER == 0) by (compute_only);
1197    assert(BEING_UPGRADED & WRITER == 0) by (compute_only);
1198    assert(BEING_UPGRADED & UPGRADEABLE_READER == 0) by (compute_only);
1199    assert(UPGRADEABLE_READER & BEING_UPGRADED == 0) by (compute_only);
1200    assert(UPGRADEABLE_READER & READER_MASK == 0) by (compute_only);
1201    assert(UPGRADEABLE_READER & MAX_READER_MASK == 0) by (compute_only);
1202    assert(UPGRADEABLE_READER & MAX_READER == 0) by (compute_only);
1203    assert(BEING_UPGRADED & READER_MASK == 0) by (compute_only);
1204    assert(BEING_UPGRADED & MAX_READER_MASK == 0) by (compute_only);
1205    assert(BEING_UPGRADED & MAX_READER == 0) by (compute_only);
1206    assert((UPGRADEABLE_READER | BEING_UPGRADED) & WRITER == 0) by (compute_only);
1207    assert((UPGRADEABLE_READER | BEING_UPGRADED) & UPGRADEABLE_READER == UPGRADEABLE_READER)
1208        by (compute_only);
1209    assert((UPGRADEABLE_READER | BEING_UPGRADED) & BEING_UPGRADED == BEING_UPGRADED)
1210        by (compute_only);
1211    assert((UPGRADEABLE_READER | BEING_UPGRADED) & READER_MASK == 0) by (compute_only);
1212    assert((UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER_MASK == 0) by (compute_only);
1213    assert((UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER == 0) by (compute_only);
1214    assert((WRITER | UPGRADEABLE_READER) & WRITER == WRITER) by (compute_only);
1215    assert((WRITER | UPGRADEABLE_READER) & UPGRADEABLE_READER == UPGRADEABLE_READER)
1216        by (compute_only);
1217    assert((WRITER | UPGRADEABLE_READER) & BEING_UPGRADED == 0) by (compute_only);
1218    assert((WRITER | UPGRADEABLE_READER) & READER_MASK == 0) by (compute_only);
1219    assert((WRITER | UPGRADEABLE_READER) & MAX_READER_MASK == 0) by (compute_only);
1220    assert((WRITER | UPGRADEABLE_READER) & MAX_READER == 0) by (compute_only);
1221}
1222
1223proof fn lemma_consts_properties_value(prev: usize)
1224    ensures
1225        no_max_reader_overflow(prev) ==> prev + READER <= usize::MAX,
1226        prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0 ==> {
1227            &&& prev & WRITER == 0
1228            &&& prev & BEING_UPGRADED == 0
1229            &&& prev & MAX_READER == 0
1230        },
1231        prev & (WRITER | UPGRADEABLE_READER) == 0 ==> {
1232            &&& prev & WRITER == 0
1233            &&& prev & UPGRADEABLE_READER == 0
1234        },
1235        prev & MAX_READER == 0 ==> prev & READER_MASK == prev & MAX_READER_MASK,
1236        prev & MAX_READER != 0 ==> prev & MAX_READER_MASK >= MAX_READER,
1237        prev & (WRITER | UPGRADEABLE_READER) == WRITER ==> {
1238            &&& prev & UPGRADEABLE_READER == 0
1239            &&& prev & WRITER == WRITER
1240        },
1241        prev & UPGRADEABLE_READER != 0 ==> prev >= UPGRADEABLE_READER,
1242        prev & UPGRADEABLE_READER == 0 ==> {
1243            ||| prev & (WRITER | UPGRADEABLE_READER) == 0
1244            ||| prev & (WRITER | UPGRADEABLE_READER) == WRITER
1245        },
1246{
1247    if no_max_reader_overflow(prev) {
1248        assert(prev + READER <= usize::MAX) by (bit_vector)
1249            requires
1250                prev & MAX_READER_MASK < MAX_READER_MASK,
1251        ;
1252    }
1253    if prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0 {
1254        assert(prev & WRITER == 0) by (bit_vector)
1255            requires
1256                prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0,
1257        ;
1258        assert(prev & BEING_UPGRADED == 0) by (bit_vector)
1259            requires
1260                prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0,
1261        ;
1262        assert(prev & MAX_READER == 0) by (bit_vector)
1263            requires
1264                prev & (WRITER | MAX_READER | BEING_UPGRADED) == 0,
1265        ;
1266    }
1267    if prev & (WRITER | UPGRADEABLE_READER) == 0 {
1268        assert(prev & WRITER == 0) by (bit_vector)
1269            requires
1270                prev & (WRITER | UPGRADEABLE_READER) == 0,
1271        ;
1272        assert(prev & UPGRADEABLE_READER == 0) by (bit_vector)
1273            requires
1274                prev & (WRITER | UPGRADEABLE_READER) == 0,
1275        ;
1276    }
1277    if prev & MAX_READER == 0 {
1278        assert(prev & READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1279            requires
1280                prev & MAX_READER == 0,
1281        ;
1282    }
1283    if prev & MAX_READER != 0 {
1284        assert(prev & MAX_READER_MASK >= MAX_READER) by (bit_vector)
1285            requires
1286                prev & MAX_READER != 0,
1287        ;
1288    }
1289    if prev & (WRITER | UPGRADEABLE_READER) == WRITER {
1290        assert(prev & UPGRADEABLE_READER == 0 && prev & WRITER == WRITER) by (bit_vector)
1291            requires
1292                prev & (WRITER | UPGRADEABLE_READER) == WRITER,
1293        ;
1294    }
1295    if prev & UPGRADEABLE_READER != 0 {
1296        assert(prev >= UPGRADEABLE_READER) by (bit_vector)
1297            requires
1298                prev & UPGRADEABLE_READER != 0,
1299        ;
1300    }
1301    if prev & UPGRADEABLE_READER == 0 {
1302        assert(prev & (WRITER | UPGRADEABLE_READER) == 0 || prev & (WRITER | UPGRADEABLE_READER)
1303            == WRITER) by (bit_vector)
1304            requires
1305                prev & UPGRADEABLE_READER == 0,
1306        ;
1307    }
1308}
1309
1310proof fn lemma_consts_properties_prev_next(prev: usize, next: usize)
1311    ensures
1312        prev & READER_MASK < MAX_READER,
1313        next == prev | UPGRADEABLE_READER ==> {
1314            &&& next & UPGRADEABLE_READER != 0
1315            &&& next & WRITER == prev & WRITER
1316            &&& next & READER_MASK == prev & READER_MASK
1317            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1318            &&& next & MAX_READER == prev & MAX_READER
1319            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1320        },
1321        next == prev | BEING_UPGRADED ==> {
1322            &&& next & BEING_UPGRADED != 0
1323            &&& next & WRITER == prev & WRITER
1324            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1325            &&& next & READER_MASK == prev & READER_MASK
1326            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1327            &&& next & MAX_READER == prev & MAX_READER
1328        },
1329        next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0 ==> {
1330            &&& next & UPGRADEABLE_READER == 0
1331            &&& next & WRITER == prev & WRITER
1332            &&& next & READER_MASK == prev & READER_MASK
1333            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1334            &&& next & MAX_READER == prev & MAX_READER
1335            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1336        },
1337        next == prev - READER && prev & READER_MASK != 0 ==> {
1338            &&& next & READER_MASK == (prev & READER_MASK) - READER
1339            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER
1340            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1341            &&& next & WRITER == prev & WRITER
1342            &&& next & MAX_READER == prev & MAX_READER
1343            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1344        },
1345        next == prev - READER && prev & MAX_READER_MASK != 0 ==> {
1346            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER
1347            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1348            &&& next & WRITER == prev & WRITER
1349            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1350        },
1351        next == prev + READER && no_max_reader_overflow(prev) ==> {
1352            &&& next & READER_MASK == if (prev & READER_MASK) + READER == MAX_READER {
1353                0
1354            } else {
1355                (prev & READER_MASK) + READER
1356            }
1357            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) + READER
1358            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1359            &&& next & WRITER == prev & WRITER
1360            &&& next & MAX_READER == if (prev & READER_MASK) + READER == MAX_READER {
1361                MAX_READER
1362            } else {
1363                prev & MAX_READER
1364            }
1365            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1366        },
1367        next == prev & !WRITER ==> {
1368            &&& next & WRITER == 0
1369            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1370            &&& next & READER_MASK == prev & READER_MASK
1371            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1372            &&& next & MAX_READER == prev & MAX_READER
1373            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1374        },
1375{
1376    assert(prev & READER_MASK < MAX_READER) by (bit_vector);
1377    if next == prev | UPGRADEABLE_READER {
1378        assert(next & UPGRADEABLE_READER != 0) by (bit_vector)
1379            requires
1380                next == prev | UPGRADEABLE_READER,
1381        ;
1382        assert(next & WRITER == prev & WRITER) by (bit_vector)
1383            requires
1384                next == prev | UPGRADEABLE_READER,
1385        ;
1386        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1387            requires
1388                next == prev | UPGRADEABLE_READER,
1389        ;
1390        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1391            requires
1392                next == prev | UPGRADEABLE_READER,
1393        ;
1394        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1395            requires
1396                next == prev | UPGRADEABLE_READER,
1397        ;
1398        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1399            requires
1400                next == prev | UPGRADEABLE_READER,
1401        ;
1402    }
1403    if next == prev | BEING_UPGRADED {
1404        assert(next & BEING_UPGRADED != 0) by (bit_vector)
1405            requires
1406                next == prev | BEING_UPGRADED,
1407        ;
1408        assert(next & WRITER == prev & WRITER) by (bit_vector)
1409            requires
1410                next == prev | BEING_UPGRADED,
1411        ;
1412        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1413            requires
1414                next == prev | BEING_UPGRADED,
1415        ;
1416        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1417            requires
1418                next == prev | BEING_UPGRADED,
1419        ;
1420        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1421            requires
1422                next == prev | BEING_UPGRADED,
1423        ;
1424        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1425            requires
1426                next == prev | BEING_UPGRADED,
1427        ;
1428    }
1429    if next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0 {
1430        assert(next & UPGRADEABLE_READER == 0) by (bit_vector)
1431            requires
1432                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1433        ;
1434        assert(next & WRITER == prev & WRITER) by (bit_vector)
1435            requires
1436                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1437        ;
1438        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1439            requires
1440                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1441        ;
1442        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1443            requires
1444                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1445        ;
1446        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1447            requires
1448                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1449        ;
1450        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1451            requires
1452                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1453        ;
1454    }
1455    if next == prev - READER && prev & READER_MASK != 0 {
1456        assert(next & READER_MASK == (prev & READER_MASK) - READER) by (bit_vector)
1457            requires
1458                next == prev - READER && prev & READER_MASK != 0,
1459        ;
1460        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER) by (bit_vector)
1461            requires
1462                next == prev - READER && prev & READER_MASK != 0,
1463        ;
1464        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1465            requires
1466                next == prev - READER && prev & READER_MASK != 0,
1467        ;
1468        assert(next & WRITER == prev & WRITER) by (bit_vector)
1469            requires
1470                next == prev - READER && prev & READER_MASK != 0,
1471        ;
1472        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1473            requires
1474                next == prev - READER && prev & READER_MASK != 0,
1475        ;
1476        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1477            requires
1478                next == prev - READER && prev & READER_MASK != 0,
1479        ;
1480    }
1481    if next == prev - READER && prev & MAX_READER_MASK != 0 {
1482        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER) by (bit_vector)
1483            requires
1484                next == prev - READER && prev & MAX_READER_MASK != 0,
1485        ;
1486        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1487            requires
1488                next == prev - READER && prev & MAX_READER_MASK != 0,
1489        ;
1490        assert(next & WRITER == prev & WRITER) by (bit_vector)
1491            requires
1492                next == prev - READER && prev & MAX_READER_MASK != 0,
1493        ;
1494        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1495            requires
1496                next == prev - READER && prev & MAX_READER_MASK != 0,
1497        ;
1498    }
1499    if next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK {
1500        assert(next & READER_MASK == if prev & READER_MASK == MAX_READER - READER {
1501            0
1502        } else {
1503            (prev & READER_MASK) + READER
1504        }) by (bit_vector)
1505            requires
1506                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1507        ;
1508        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) + READER) by (bit_vector)
1509            requires
1510                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1511        ;
1512        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1513            requires
1514                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1515        ;
1516        assert(next & WRITER == prev & WRITER) by (bit_vector)
1517            requires
1518                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1519        ;
1520        assert(next & MAX_READER == if (prev & READER_MASK) + READER == MAX_READER {
1521            MAX_READER
1522        } else {
1523            prev & MAX_READER
1524        }) by (bit_vector)
1525            requires
1526                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1527        ;
1528        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1529            requires
1530                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1531        ;
1532    }
1533    if next == prev & !WRITER {
1534        assert(next & WRITER == 0) by (bit_vector)
1535            requires
1536                next == prev & !WRITER,
1537        ;
1538        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1539            requires
1540                next == prev & !WRITER,
1541        ;
1542        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1543            requires
1544                next == prev & !WRITER,
1545        ;
1546        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1547            requires
1548                next == prev & !WRITER,
1549        ;
1550        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1551            requires
1552                next == prev & !WRITER,
1553        ;
1554        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1555            requires
1556                next == prev & !WRITER,
1557        ;
1558    }
1559}
1560
1561} // verus!