Skip to main content

ostd/sync/
rwmutex.rs

1// SPDX-License-Identifier: MPL-2.0
2use vstd::atomic_ghost::*;
3use vstd::cell::{self, pcell::*, CellId};
4use vstd::pcm::Loc;
5use vstd::prelude::*;
6use vstd::tokens::frac::{Empty, Frac};
7use vstd_extra::auxiliary::pcell_borrow_mut;
8use vstd_extra::resource::ghost_resource::{csum::*, excl::*, tokens::*};
9use vstd_extra::sum::*;
10
11use core::{
12    cell::UnsafeCell,
13    ops::{Deref, DerefMut},
14    sync::atomic::{
15        // AtomicUsize,
16        Ordering::{AcqRel, Acquire, Relaxed, Release},
17    },
18};
19
20use super::WaitQueue;
21
22verus! {
23
24const MAX_READER_U64: u64 = MAX_READER as u64;
25
26spec const V_MAX_READ_RETRACT_FRACS_SPEC: u64 = (MAX_READER_MASK + 1) as u64;
27
28#[verifier::when_used_as_spec(V_MAX_READ_RETRACT_FRACS_SPEC)]
29exec const V_MAX_READ_RETRACT_FRACS: u64
30    ensures
31        V_MAX_READ_RETRACT_FRACS == V_MAX_READ_RETRACT_FRACS_SPEC,
32        V_MAX_READ_RETRACT_FRACS == MAX_READER_MASK + 1,
33        V_MAX_READ_RETRACT_FRACS < u64::MAX,
34{
35    assert(MAX_READER_MASK + 1 < u64::MAX) by (compute_only);
36    (MAX_READER_MASK + 1) as u64
37}
38
39type NoPerm<T> = Empty<PointsTo<T>>;
40
41type HalfPerm<T> = Frac<PointsTo<T>>;
42
43type ReadPerm<T> = (HalfPerm<T>, OneLeftKnowledge<HalfPerm<T>, NoPerm<T>, 3>);
44
45tracked struct RwPerms<T> {
46    core_token: SumResource<HalfPerm<T>, NoPerm<T>, 3>,
47    read_retract_token: TokenResource<V_MAX_READ_RETRACT_FRACS>,
48    upread_retract_token: Option<UniqueToken>,
49    upreader_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>>,
50    read_guard_token: Sum<
51        FracResource<ReadPerm<T>, MAX_READER_U64>,
52        Empty<ReadPerm<T>, MAX_READER_U64>,
53    >,
54}
55
56ghost struct RwId {
57    core_token_id: Loc,
58    frac_id: Loc,
59    read_retract_token_id: Loc,
60    upread_retract_token_id: Loc,
61    read_guard_token_id: Loc,
62}
63
64#[verifier::reject_recursive_types(T)]
65struct_with_invariants! {
66/// A mutex that provides data access to either one writer or many readers.
67///
68/// # Overview
69///
70/// This mutex allows for multiple readers, or at most one writer to access
71/// at any point in time. The writer of this mutex has exclusive access to
72/// modify the underlying data, while the readers are allowed shared and
73/// read-only access.
74///
75/// The writing and reading portions cannot be active simultaneously, when
76/// one portion is in progress, the other portion will sleep. This is
77/// suitable for scenarios where the mutex is expected to be held for a
78/// period of time, which can avoid wasting CPU resources.
79///
80/// This implementation provides the upgradeable read mutex (`upread mutex`).
81/// The `upread mutex` can be upgraded to write mutex atomically, useful in
82/// scenarios where a decision to write is made after reading.
83///
84/// The type parameter `T` represents the data that this mutex is protecting.
85/// It is necessary for `T` to satisfy [`Send`] to be shared across tasks and
86/// [`Sync`] to permit concurrent access via readers. The [`Deref`] method (and
87/// [`DerefMut`] for the writer) is implemented for the RAII guards returned
88/// by the locking methods, which allows for the access to the protected data
89/// while the mutex is held.
90///
91/// # Usage
92///
93/// The mutex can be used in scenarios where data needs to be read frequently
94/// but written to occasionally.
95///
96/// Use `upread mutex` in scenarios where related checking is performed before
97/// modification to effectively avoid deadlocks and improve efficiency.
98///
99/// # Safety
100///
101/// Avoid using `RwMutex` in an interrupt context, as it may result in sleeping
102/// and never being awakened.
103///
104/// # Examples
105///
106/// ```
107/// use ostd::sync::RwMutex;
108///
109/// let mutex = RwMutex::new(5)
110///
111/// // many read mutexes can be held at once
112/// {
113///     let r1 = mutex.read();
114///     let r2 = mutex.read();
115///     assert_eq!(*r1, 5);
116///     assert_eq!(*r2, 5);
117///
118///     // Upgradeable read mutex can share access to data with read mutexes
119///     let r3 = mutex.upread();
120///     assert_eq!(*r3, 5);
121///     drop(r1);
122///     drop(r2);
123///     // read mutexes are dropped at this point
124///
125///     // An upread mutex can only be upgraded successfully after all the
126///     // read mutexes are released, otherwise it will spin-wait.
127///     let mut w1 = r3.upgrade();
128///     *w1 += 1;
129///     assert_eq!(*w1, 6);
130/// }   // upread mutex are dropped at this point
131///
132/// {
133///     // Only one write mutex can be held at a time
134///     let mut w2 = mutex.write();
135///     *w2 += 1;
136///     assert_eq!(*w2, 7);
137/// }   // write mutex is dropped at this point
138/// ```
139pub struct RwMutex<T /*: ?Sized*/> {
140    /// The internal representation of the mutex state is as follows:
141    /// - **Bit 63:** Writer mutex.
142    /// - **Bit 62:** Upgradeable reader mutex.
143    /// - **Bit 61:** Indicates if an upgradeable reader is being upgraded.
144    /// - **Bits 60-0:** Reader mutex count.
145    lock: AtomicUsize<_, RwPerms<T>, _>,
146    /// Threads that fail to acquire the mutex will sleep on this waitqueue.
147    queue: WaitQueue,
148    // val: UnsafeCell<T>,
149    val: PCell<T>,
150    v_id: Ghost<RwId>,
151}
152
153closed spec fn wf(self) -> bool {
154    invariant on lock with (val, v_id) is (v: usize, g: RwPerms<T>) {
155        let has_writer_bit: bool = (v & WRITER) != 0;
156        let has_upgrade_bit: bool = (v & UPGRADEABLE_READER) != 0;
157        let has_max_reader_bit: bool = (v & MAX_READER) != 0;
158        let total_reader_bits: int = (v & MAX_READER_MASK) as int;
159        let reader_bits: int = if has_max_reader_bit {
160            MAX_READER as int
161        } else {
162            (v & READER_MASK) as int
163        };
164
165        let active_writer: bool = g.core_token.is_right();
166        let active_upgrade_guard: bool = !active_writer && g.upreader_guard_token is None;
167        let active_read_guards: int = if g.read_guard_token is Left {
168            MAX_READER_U64 - g.read_guard_token.left().frac()
169        } else {
170            0
171        };
172        let pending_failed_upread_attempt: bool = g.upread_retract_token is None;
173        let failed_reader_attempts: int = V_MAX_READ_RETRACT_FRACS - g.read_retract_token.frac();
174
175        &&& if g.core_token.is_left() {
176            g.read_guard_token is Left
177        } else {
178            &&& g.upreader_guard_token is None
179            &&& g.read_guard_token is Right
180        }
181        &&& has_upgrade_bit <==> (active_upgrade_guard || pending_failed_upread_attempt)
182        &&& !(active_upgrade_guard && pending_failed_upread_attempt)
183        &&& total_reader_bits == active_read_guards + failed_reader_attempts
184        &&& active_writer <==> has_writer_bit
185        &&& 0 <= active_read_guards <= reader_bits <= total_reader_bits
186        &&& !(active_writer && (active_read_guards + if active_upgrade_guard { 1int } else { 0 }) > 0)
187        &&& g.core_token.id() == v_id@.core_token_id
188        &&& g.core_token.wf()
189        &&& g.core_token.is_left() ==> {
190            &&& !g.core_token.is_resource_owner()
191            &&& g.core_token.frac() == 1
192        }
193        &&& g.core_token.is_right() ==> {
194            let empty = g.core_token.resource_right();
195            &&& empty.id() == v_id@.frac_id
196            &&& g.core_token.frac() == 2
197            &&& g.core_token.has_resource()
198        }
199        &&& g.read_retract_token.wf()
200        &&& g.read_retract_token.id() == v_id@.read_retract_token_id
201        &&& g.upread_retract_token is Some ==> {
202            let token = g.upread_retract_token->Some_0;
203            &&& token.wf()
204            &&& token.id() == v_id@.upread_retract_token_id
205        }
206        &&& g.upreader_guard_token is Some ==> {
207            let token = g.upreader_guard_token->Some_0;
208            wf_upgradeable_guard_token(v_id@.core_token_id, v_id@.frac_id, val.id(), token)
209        }
210        &&& match g.read_guard_token {
211            Sum::Left(token) => {
212                &&& token.wf()
213                &&& token.id() == v_id@.read_guard_token_id
214                &&& token.not_empty() ==> {
215                    let resource = token.resource();
216                    let read_half_cell_perm = resource.0;
217                    let mode_knowledge = resource.1;
218                    &&& mode_knowledge.id() == v_id@.core_token_id
219                    &&& read_half_cell_perm.id() == v_id@.frac_id
220                    &&& read_half_cell_perm.resource().id() == val.id()
221                    &&& read_half_cell_perm.frac() == 1
222                }
223            },
224            Sum::Right(empty) => {
225                &&& empty.id() == v_id@.read_guard_token_id
226            },
227        }
228    }
229}
230}
231
232const READER: usize = 1;
233
234const WRITER: usize = 1 << (usize::BITS - 1);
235
236const UPGRADEABLE_READER: usize = 1 << (usize::BITS - 2);
237
238const BEING_UPGRADED: usize = 1 << (usize::BITS - 3);
239
240/// This bit is reserved as an overflow sentinel.
241/// For more details, see comments on the `MAX_READER` constant
242/// in the [`super::rwlock`] module.
243const MAX_READER: usize = 1 << (usize::BITS - 4);
244
245const READER_MASK: usize = usize::MAX >> 4;
246
247const MAX_READER_MASK: usize = usize::MAX >> 3;
248
249pub closed spec fn no_max_reader_overflow(v: usize) -> bool {
250    v & MAX_READER_MASK < MAX_READER_MASK
251}
252
253impl<T> RwMutex<T> {
254    pub closed spec fn cell_id(self) -> cell::CellId {
255        self.val.id()
256    }
257
258    pub closed spec fn core_token_id(self) -> Loc {
259        self.v_id@.core_token_id
260    }
261
262    pub closed spec fn frac_id(self) -> Loc {
263        self.v_id@.frac_id
264    }
265
266    pub closed spec fn upread_retract_token_id(self) -> Loc {
267        self.v_id@.upread_retract_token_id
268    }
269
270    pub closed spec fn read_guard_token_id(self) -> Loc {
271        self.v_id@.read_guard_token_id
272    }
273
274    #[verifier::type_invariant]
275    pub closed spec fn type_inv(self) -> bool {
276        self.wf()
277    }
278
279    pub closed spec fn queue_inv(self) -> bool {
280        self.queue.type_inv()
281    }
282}
283
284closed spec fn wf_upgradeable_guard_token<T>(
285    core_token_id: Loc,
286    frac_id: Loc,
287    cell_id: CellId,
288    token: OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>,
289) -> bool {
290    let half_cell_perm = token.resource();
291    &&& token.id() == core_token_id
292    &&& half_cell_perm.id() == frac_id
293    &&& half_cell_perm.resource().id() == cell_id
294    &&& token.has_resource()
295    &&& half_cell_perm.frac() == 1
296    &&& token.wf()
297}
298
299impl<T> RwMutex<T> {
300    /// Creates a new read-write mutex with an initial value.
301    pub const fn new(val: T) -> Self {
302        let (val, Tracked(perm)) = PCell::new(val);
303
304        proof {
305            lemma_consts_properties();
306        }
307        let tracked mut frac_perm = Frac::<PointsTo<T>>::new(perm);
308        let tracked read_half_cell_perm = frac_perm.split(1int);
309        let ghost frac_id = frac_perm.id();
310        let tracked mut core_token = SumResource::alloc_left(frac_perm);
311        let tracked read_retract_token = TokenResource::<V_MAX_READ_RETRACT_FRACS>::alloc(());
312        let tracked upread_retract_token = UniqueToken::alloc(());
313        let tracked upreader_guard_token = core_token.split_one_left_owner();
314        let tracked left_token = core_token.split_one_left_knowledge();
315        let tracked read_guard_token = FracResource::<ReadPerm<T>, MAX_READER_U64>::alloc(
316            (read_half_cell_perm, left_token),
317        );
318        let ghost v_id = RwId {
319            frac_id,
320            core_token_id: core_token.id(),
321            upread_retract_token_id: upread_retract_token.id(),
322            read_retract_token_id: read_retract_token.id(),
323            read_guard_token_id: read_guard_token.id(),
324        };
325        let tracked perms = RwPerms {
326            core_token,
327            read_retract_token,
328            upread_retract_token: Some(upread_retract_token),
329            upreader_guard_token: Some(upreader_guard_token),
330            read_guard_token: Sum::Left(read_guard_token),
331        };
332
333        Self {
334            // val: UnsafeCell::new(val),
335            val,
336            lock: AtomicUsize::new(Ghost((val, Ghost(v_id))), 0, Tracked(perms)),
337            queue: WaitQueue::new(),
338            v_id: Ghost(v_id),
339        }
340    }
341}
342
343#[verus_verify]
344impl<T /*: ?Sized*/> RwMutex<T> {
345    /// Acquires a read mutex and sleep until it can be acquired.
346    ///
347    /// The calling thread will sleep until there are no writers or upgrading
348    /// upreaders present. The implementation of [`WaitQueue`] guarantees the
349    /// order in which other concurrent readers or writers waiting simultaneously
350    /// will acquire the mutex.
351    #[track_caller]
352    pub fn read(&self) -> RwMutexReadGuard<'_, T> {
353        self.queue.wait_until(|| self.try_read())
354    }
355
356    /// Acquires a write mutex and sleep until it can be acquired.
357    ///
358    /// The calling thread will sleep until there are no writers, upreaders,
359    /// or readers present. The implementation of [`WaitQueue`] guarantees the
360    /// order in which other concurrent readers or writers waiting simultaneously
361    /// will acquire the mutex.
362    #[track_caller]
363    pub fn write(&self) -> RwMutexWriteGuard<'_, T> {
364        self.queue.wait_until(|| self.try_write())
365    }
366
367    /// Acquires a upread mutex and sleep until it can be acquired.
368    ///
369    /// The calling thread will sleep until there are no writers or upreaders present.
370    /// The implementation of [`WaitQueue`] guarantees the order in which other concurrent
371    /// readers or writers waiting simultaneously will acquire the mutex.
372    ///
373    /// Upreader will not block new readers until it tries to upgrade. Upreader
374    /// and reader do not differ before invoking the upgrade method. However,
375    /// only one upreader can exist at any time to avoid deadlock in the
376    /// upgrade method.
377    #[track_caller]
378    pub fn upread(&self) -> RwMutexUpgradeableGuard<'_, T> {
379        self.queue.wait_until(|| self.try_upread())
380    }
381
382    /// Attempts to acquire a read mutex.
383    ///
384    /// This function will never sleep and will return immediately.
385    #[verus_spec]
386    pub fn try_read(&self) -> Option<RwMutexReadGuard<'_, T>> {
387        proof_decl! {
388            let tracked mut read_token: Option<Frac<ReadPerm<T>, MAX_READER_U64>> = None;
389            let tracked mut retract_read_token: Option<Token<V_MAX_READ_RETRACT_FRACS>> = None;
390        }
391        proof! {
392            use_type_invariant(self);
393            lemma_consts_properties();
394        }
395
396        let lock =
397            atomic_with_ghost!(
398            self.lock => fetch_add(READER);
399            update prev -> next;
400            ghost g => {
401                let prev_usize = prev as usize;
402                let next_usize = next as usize;
403                assume(no_max_reader_overflow(prev_usize));
404                lemma_consts_properties_value(prev_usize);
405                lemma_consts_properties_prev_next(prev_usize, next_usize);
406                if prev_usize & (WRITER | BEING_UPGRADED | MAX_READER) == 0 {
407                    let tracked mut tmp = g.read_guard_token.tracked_take_left();
408                    read_token = Some(tmp.split_one());
409                    g.read_guard_token = Sum::Left(tmp);
410                } else {
411                    retract_read_token = Some(g.read_retract_token.split_one());
412                }
413            }
414        );
415
416        if lock & (WRITER | BEING_UPGRADED | MAX_READER) == 0 {
417            Some(
418                RwMutexReadGuard {
419                    inner: self,
420                    v_token: Tracked(read_token.tracked_unwrap())
421            })
422        } else {
423            atomic_with_ghost!(
424                self.lock => fetch_sub(READER);
425                update prev -> next;
426                ghost g => {
427                    let prev_usize = prev as usize;
428                    let next_usize = next as usize;
429                    lemma_consts_properties_value(next_usize);
430                    lemma_consts_properties_prev_next(prev_usize, next_usize);
431                    g.read_retract_token.combine(retract_read_token.tracked_unwrap());
432                }
433            );
434            None
435        }
436    }
437
438    /// Attempts to acquire a write mutex.
439    ///
440    /// This function will never sleep and will return immediately.
441    pub fn try_write(&self) -> Option<RwMutexWriteGuard<'_, T>> {
442        proof_decl! {
443            let tracked mut guard_perm: Option<PointsTo<T>> = None;
444            let tracked mut guard_token: Option<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>> = None;
445        }
446        proof! {
447            use_type_invariant(self);
448            lemma_consts_properties();
449        }
450
451        if atomic_with_ghost!(
452            self.lock => compare_exchange(0, WRITER);
453            update prev -> next;
454            returning res;
455            ghost g => {
456                let prev_usize = prev as usize;
457                let next_usize = next as usize;
458                if res is Ok {
459                    let tracked read_guard_token = g.read_guard_token.tracked_take_left();
460                    let tracked (read_resource, read_empty) = read_guard_token.take_resource();
461                    g.read_guard_token = Sum::Right(read_empty);
462                    let tracked (read_half_cell_perm, left_token) = read_resource;
463                    g.core_token.join_one_left_knowledge(left_token);
464                    let tracked upreader_guard_token = g.upreader_guard_token.tracked_take();
465                    g.core_token.join_one_left_owner(upreader_guard_token);
466                    let tracked mut pointsto = g.core_token.take_resource_left();
467                    pointsto.combine(read_half_cell_perm);
468                    let tracked (pointsto, empty) = pointsto.take_resource();
469                    guard_perm = Some(pointsto);
470                    g.core_token.change_to_right(empty);
471                    guard_token = Some(g.core_token.split_one_right_knowledge());
472                } else {
473                    lemma_consts_properties_prev_next(prev_usize, next_usize);
474                }
475            }
476        ).is_ok() {
477            Some(
478                RwMutexWriteGuard { inner: self , v_perm: Tracked(guard_perm.tracked_unwrap()), v_token: Tracked(guard_token.tracked_unwrap())}
479            )
480        } else {
481            None
482        }
483    }
484
485    /// Attempts to acquire a upread mutex.
486    ///
487    /// This function will never sleep and will return immediately.
488    pub fn try_upread(&self) -> Option<RwMutexUpgradeableGuard<'_, T>> {
489        proof_decl! {
490            let tracked mut upgrade_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
491            let tracked mut retract_upgrade_token: Option<UniqueToken> = None;
492        }
493        proof! {
494            use_type_invariant(self);
495            lemma_consts_properties();
496        }
497
498        let lock =
499            atomic_with_ghost!(
500            self.lock => fetch_or(UPGRADEABLE_READER);
501            update prev -> next;
502            ghost g => {
503                lemma_consts_properties_value(prev);
504                lemma_consts_properties_prev_next(prev, next);
505                if prev & (WRITER | UPGRADEABLE_READER) == 0 {
506                    upgrade_guard_token = Some(g.upreader_guard_token.tracked_take());
507                } else if prev & (WRITER | UPGRADEABLE_READER) == WRITER {
508                    retract_upgrade_token = Some(g.upread_retract_token.tracked_take());
509                }
510            }
511        )
512            & (WRITER | UPGRADEABLE_READER);
513
514        if lock == 0 {
515            return Some(
516                RwMutexUpgradeableGuard { inner: self, v_token: Tracked(upgrade_guard_token.tracked_unwrap()) }
517            );
518        } else if lock == WRITER {
519            atomic_with_ghost!(
520                self.lock => fetch_sub(UPGRADEABLE_READER);
521                update prev -> next;
522                ghost g => {
523                    let prev_usize = prev as usize;
524                    let next_usize = next as usize;
525                    lemma_consts_properties_value(prev_usize);
526                    lemma_consts_properties_prev_next(prev_usize, next_usize);
527                    if g.upread_retract_token is Some {
528                        let tracked mut token = retract_upgrade_token.tracked_unwrap();
529                        token.validate_with_other(g.upread_retract_token.tracked_borrow());
530                    } else {
531                        g.upread_retract_token = retract_upgrade_token;
532                    }
533                }
534            );
535        }
536        None
537    }/* /// Returns a mutable reference to the underlying data.
538    ///
539    /// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
540    /// already statically guaranteed that access to the data is exclusive.
541    pub fn get_mut(&mut self) -> &mut T {
542        self.val.get_mut()
543    } */
544
545}
546
547/* impl<T: /*: ?Sized +*/ fmt::Debug> fmt::Debug for RwMutex<T> {
548    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
549        fmt::Debug::fmt(&self.val, f)
550    }
551} */
552
553/// Because there can be more than one readers to get the T's immutable ref,
554/// so T must be Sync to guarantee the sharing safety.
555#[verifier::external]
556unsafe impl<T:   /*: ?Sized +*/ Send> Send for RwMutex<T> {
557
558}
559
560#[verifier::external]
561unsafe impl<T:   /*: ?Sized +*/ Send + Sync> Sync for RwMutex<T> {
562
563}
564
565impl<T  /*: ?Sized*/ > !Send for RwMutexWriteGuard<'_, T> {
566
567}
568
569#[verifier::external]
570unsafe impl<T:   /*: ?Sized +*/ Sync> Sync for RwMutexWriteGuard<'_, T> {
571
572}
573
574impl<T  /*: ?Sized*/ > !Send for RwMutexReadGuard<'_, T> {
575
576}
577
578#[verifier::external]
579unsafe impl<T:   /*: ?Sized +*/ Sync> Sync for RwMutexReadGuard<'_, T> {
580
581}
582
583impl<T  /*: ?Sized*/ > !Send for RwMutexUpgradeableGuard<'_, T> {
584
585}
586
587#[verifier::external]
588unsafe impl<T:   /*: ?Sized +*/ Sync> Sync for RwMutexUpgradeableGuard<'_, T> {
589
590}
591
592/// A guard that provides immutable data access.
593#[verifier::reject_recursive_types(T)]
594#[clippy::has_significant_drop]
595#[must_use]
596pub struct RwMutexReadGuard<'a, T  /*: ?Sized*/ > {
597    inner: &'a RwMutex<T>,
598    v_token: Tracked<Frac<ReadPerm<T>, MAX_READER_U64>>,
599}
600
601impl<'a, T> RwMutexReadGuard<'a, T> {
602    #[verifier::type_invariant]
603    pub closed spec fn type_inv(self) -> bool {
604        let resource = self.v_token@.resource();
605        let read_half_cell_perm = resource.0;
606        let mode_knowledge = resource.1;
607        &&& self.inner.core_token_id() == mode_knowledge.id()
608        &&& self.inner.frac_id() == read_half_cell_perm.id()
609        &&& self.inner.cell_id() == read_half_cell_perm.resource().id()
610        &&& self.v_token@.id() == self.inner.read_guard_token_id()
611        &&& read_half_cell_perm.frac() == 1
612        &&& self.v_token@.frac() == 1
613    }
614
615    pub closed spec fn value(self) -> T {
616        *self.v_token@.resource().0.resource().value()
617    }
618
619    pub open spec fn view(self) -> T {
620        self.value()
621    }
622}
623
624impl<T  /*: ?Sized*/ > Deref for RwMutexReadGuard<'_, T> {
625    type Target = T;
626
627    #[verus_spec(returns self.view())]
628    fn deref(&self) -> &T {
629        proof! {
630            use_type_invariant(self);
631        }
632        // unsafe { &*self.inner.val.get() }
633        self.inner.val.borrow(Tracked(self.v_token.borrow().borrow().0.borrow()))
634    }
635}
636
637impl<T  /* : ?Sized */ > RwMutexReadGuard<'_, T> {
638    fn drop(self) {
639        // When there are no readers, wake up a waiting writer.
640        proof! {
641            use_type_invariant(&self);
642            use_type_invariant(self.inner);
643            lemma_consts_properties();
644        }
645        proof_decl! {
646            let tracked token = self.v_token.get();
647        }
648        if atomic_with_ghost!(
649            self.inner.lock => fetch_sub(READER);
650            update prev -> next;
651            ghost g => {
652                let prev_usize = prev as usize;
653                let next_usize = next as usize;
654                assume(no_max_reader_overflow(prev_usize));
655                lemma_consts_properties_value(next_usize);
656                lemma_consts_properties_prev_next(prev_usize, next_usize);
657                g.core_token.validate_with_one_left_knowledge(&token.borrow().1);
658                let tracked mut tmp = g.read_guard_token.tracked_take_left();
659                tmp.combine(token);
660                g.read_guard_token = Sum::Left(tmp);
661            }
662        )
663            == READER {
664            self.inner.queue.wake_one();
665        }
666    }
667}
668
669/// A guard that provides mutable data access.
670#[clippy::has_significant_drop]
671#[must_use]
672#[verifier::reject_recursive_types(T)]
673pub struct RwMutexWriteGuard<'a, T  /*: ?Sized*/ > {
674    inner: &'a RwMutex<T>,
675    v_perm: Tracked<PointsTo<T>>,
676    v_token: Tracked<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>>,
677}
678
679impl<'a, T> RwMutexWriteGuard<'a, T> {
680    #[verifier::type_invariant]
681    spec fn type_inv(self) -> bool {
682        &&& self.inner.cell_id() == self.v_perm@.id()
683        &&& self.inner.core_token_id() == self.v_token@.id()
684    }
685
686    pub closed spec fn value(self) -> T {
687        *self.v_perm@.value()
688    }
689
690    pub open spec fn view(self) -> T {
691        self.value()
692    }
693}
694
695impl<T  /*: ?Sized*/ > Deref for RwMutexWriteGuard<'_, T> {
696    type Target = T;
697
698    #[verus_spec(returns self.view())]
699    fn deref(&self) -> &T {
700        proof! {
701            use_type_invariant(self);
702        }
703        // unsafe { &*self.inner.val.get() }
704        self.inner.val.borrow(Tracked(self.v_perm.borrow()))
705    }
706}
707
708impl<'a, T  /*: ?Sized*/ > RwMutexWriteGuard<'a, T> {
709    /// Atomically downgrades a write guard to an upgradeable reader guard.
710    ///
711    /// This method always succeeds because the lock is exclusively held by the writer.
712    #[verifier::exec_allows_no_decreases_clause]
713    pub fn downgrade(self) -> RwMutexUpgradeableGuard<'a, T> {
714        let mut this = self;
715        loop {
716            this =
717            match this.try_downgrade() {
718                Ok(guard) => return guard,
719                Err(e) => e,
720            };
721        }
722    }
723
724    /// This is not exposed as a public method to prevent intermediate lock states from affecting the
725    /// downgrade process.
726    fn try_downgrade(self) -> Result<RwMutexUpgradeableGuard<'a, T>, Self> {
727        proof! {
728            use_type_invariant(&self);
729            use_type_invariant(self.inner);
730            lemma_consts_properties();
731        }
732        proof_decl! {
733            let tracked perm = self.v_perm.get();
734            let tracked token = self.v_token.get();
735            let tracked mut upgrade_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
736            let tracked mut err_perm: Option<PointsTo<T>> = None;
737            let tracked mut err_write_guard_token: Option<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>> = None;
738        }
739        let inner = self.inner;
740        let res =
741            atomic_with_ghost!(
742            self.inner.lock => compare_exchange(WRITER, UPGRADEABLE_READER);
743            update prev -> next;
744            returning res;
745            ghost g => {
746                lemma_consts_properties_prev_next(prev, next);
747                if res is Ok {
748                    g.core_token.validate_with_one_right_knowledge(&token);
749                    g.core_token.join_one_right_knowledge(token);
750                    let tracked empty = g.core_token.take_resource_right();
751                    let tracked mut full = empty.put_resource(perm);
752                    let tracked read_half_cell_perm = full.split(1int);
753                    g.core_token.change_to_left(full);
754                    upgrade_guard_token = Some(g.core_token.split_one_left_owner());
755                    let tracked left_token = g.core_token.split_one_left_knowledge();
756                    let tracked read_guard_empty = g.read_guard_token.tracked_take_right();
757                    let tracked read_guard_token = FracResource::alloc_from_empty(
758                        read_guard_empty,
759                        (read_half_cell_perm, left_token),
760                    );
761                    g.read_guard_token = Sum::Left(read_guard_token);
762                } else {
763                    err_perm = Some(perm);
764                    err_write_guard_token = Some(token);
765                }
766            }
767        );
768
769        if res.is_ok() {
770            // drop(self);
771            atomic_with_ghost! {
772                self.inner.lock => fetch_and(!WRITER);
773                update prev -> next;
774                ghost g => {
775                    let prev_usize = prev as usize;
776                    let next_usize = next as usize;
777                    let tracked mut guard_token = upgrade_guard_token.tracked_unwrap();
778                    g.core_token.validate_with_one_left_owner(&guard_token);
779                    if g.upreader_guard_token is Some {
780                        guard_token.validate_with_one_left_owner(
781                            g.upreader_guard_token.tracked_borrow(),
782                        );
783                        assert(false);
784                    }
785                    upgrade_guard_token = Some(guard_token);
786                    lemma_consts_properties_value(prev_usize);
787                    lemma_consts_properties_prev_next(prev_usize, next_usize);
788                    lemma_consts_properties_value(next_usize);
789                }
790            };
791            self.inner.queue.wake_all();
792            Ok(
793                RwMutexUpgradeableGuard { inner, v_token: Tracked(upgrade_guard_token.tracked_unwrap()) }
794            )
795        } else {
796            Err(
797                RwMutexWriteGuard { inner, v_perm: Tracked(err_perm.tracked_unwrap()), v_token: Tracked(err_write_guard_token.tracked_unwrap()) }
798            )
799        }
800    }
801
802    pub fn drop(self) {
803        proof! {
804            use_type_invariant(&self);
805            use_type_invariant(self.inner);
806            lemma_consts_properties();
807        }
808        proof_decl! {
809            let tracked perm = self.v_perm.get();
810            let tracked token = self.v_token.get();
811        }
812        atomic_with_ghost! {
813            self.inner.lock => fetch_and(!WRITER);
814            update prev -> next;
815            ghost g => {
816                let prev_usize = prev as usize;
817                let next_usize = next as usize;
818                lemma_consts_properties_prev_next(prev_usize, next_usize);
819                lemma_consts_properties_value(next_usize);
820                g.core_token.validate_with_one_right_knowledge(&token);
821                g.core_token.join_one_right_knowledge(token);
822                let tracked empty = g.core_token.take_resource_right();
823                let tracked mut full = empty.put_resource(perm);
824                let tracked read_half_cell_perm = full.split(1int);
825                g.core_token.change_to_left(full);
826                let tracked upreader_guard_token = g.core_token.split_one_left_owner();
827                g.upreader_guard_token = Some(upreader_guard_token);
828                let tracked left_token = g.core_token.split_one_left_knowledge();
829                let tracked read_guard_empty = g.read_guard_token.tracked_take_right();
830                let tracked read_guard_token = FracResource::alloc_from_empty(
831                    read_guard_empty,
832                    (read_half_cell_perm, left_token),
833                );
834                g.read_guard_token = Sum::Left(read_guard_token);
835            }
836        };
837        // When the current writer releases, wake up all the sleeping threads.
838        // All awakened threads may include readers and writers.
839        // Thanks to the `wait_until` method, either all readers
840        // continue to execute or one writer continues to execute.
841        self.inner.queue.wake_all();
842    }
843}
844
845#[verus_verify]
846impl<T /*: ?Sized*/ > DerefMut for RwMutexWriteGuard<'_, T> {
847    #[verus_spec(ret =>
848        ensures
849            final(self).view() == *final(ret),
850    )]
851    fn deref_mut(&mut self) -> (ret: &mut Self::Target) 
852    {
853        proof! {
854            use_type_invariant(&*self);
855        }
856        //unsafe { &mut *self.inner.val.get() }
857        pcell_borrow_mut(&self.inner.val, &mut self.v_perm)
858    }
859}
860
861/// A guard that provides immutable data access but can be atomically
862/// upgraded to [`RwMutexWriteGuard`].
863#[verifier::reject_recursive_types(T)]
864pub struct RwMutexUpgradeableGuard<'a, T  /*: ?Sized*/ > {
865    inner: &'a RwMutex<T>,
866    v_token: Tracked<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>>,
867}
868
869impl<'a, T  /*: ?Sized*/ > RwMutexUpgradeableGuard<'a, T> {
870    #[verifier::type_invariant]
871    pub closed spec fn type_inv(self) -> bool {
872        wf_upgradeable_guard_token(
873            self.inner.core_token_id(),
874            self.inner.frac_id(),
875            self.inner.cell_id(),
876            self.v_token@,
877        )
878    }
879
880    pub closed spec fn value(self) -> T {
881        *self.v_token@.resource().resource().value()
882    }
883
884    pub open spec fn view(self) -> T {
885        self.value()
886    }
887}
888
889#[verus_verify]
890impl<'a, T> RwMutexUpgradeableGuard<'a, T> {
891    /// Upgrades this upread guard to a write guard atomically.
892    ///
893    /// After calling this method, subsequent readers will be blocked
894    /// while previous readers remain unaffected.
895    ///
896    /// The calling thread will not sleep, but spin to wait for the existing
897    /// reader to be released. There are two main reasons.
898    /// - First, it needs to sleep in an extra waiting queue and needs extra wake-up logic and overhead.
899    /// - Second, upgrading method usually requires a high response time (because the mutex is being used now).
900    #[verifier::exec_allows_no_decreases_clause]
901    pub fn upgrade(self) -> RwMutexWriteGuard<'a, T> {
902        let mut this = self;
903        proof! {
904            use_type_invariant(&this);
905            use_type_invariant(&this.inner);
906            lemma_consts_properties();
907        }
908        atomic_with_ghost!(
909            this.inner.lock => fetch_or(BEING_UPGRADED);
910            update prev -> next;
911            ghost g => {
912                lemma_consts_properties_prev_next(prev, next);
913            }
914        );
915        loop {
916            this =
917            match this.try_upgrade() {
918                Ok(guard) => return guard,
919                Err(e) => e,
920            };
921        }
922    }
923
924    /// Attempts to upgrade this upread guard to a write guard atomically.
925    ///
926    /// This function will return immediately.
927    ///
928    /// This function is not exposed publicly because the `BEING_UPGRADED` bit
929    /// is set only in [`Self::upgrade`].
930    #[verus_spec]
931    fn try_upgrade(self) -> Result<RwMutexWriteGuard<'a, T>, Self> {
932        proof! {
933            use_type_invariant(&self);
934            use_type_invariant(self.inner);
935            lemma_consts_properties();
936        }
937        proof_decl! {
938            let tracked upread_guard_token = self.v_token.get();
939            let tracked mut write_perm: Option<PointsTo<T>> = None;
940            let tracked mut err_upread_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
941            let tracked mut retract_upgrade_token: Option<UniqueToken> = None;
942            let tracked mut write_guard_token: Option<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>> = None;
943        }
944
945        let res =
946            atomic_with_ghost!(
947            self.inner.lock => compare_exchange(UPGRADEABLE_READER | BEING_UPGRADED, WRITER | UPGRADEABLE_READER);
948            update prev -> next;
949            returning res;
950            ghost g => {
951                lemma_consts_properties_prev_next(prev, next);
952                if res is Ok {
953                    g.core_token.validate_with_one_left_owner(&upread_guard_token);
954                    if g.upreader_guard_token is Some {
955                        upread_guard_token.validate_with_one_left_owner(g.upreader_guard_token.tracked_borrow());
956                    }
957                    g.core_token.join_one_left_owner(upread_guard_token);
958                    let tracked read_guard_token = g.read_guard_token.tracked_take_left();
959                    let tracked (read_resource, read_empty) = read_guard_token.take_resource();
960                    g.read_guard_token = Sum::Right(read_empty);
961                    let tracked (read_half_cell_perm, left_token) = read_resource;
962                    g.core_token.join_one_left_knowledge(left_token);
963                    let tracked mut pointsto = g.core_token.take_resource_left();
964                    pointsto.combine(read_half_cell_perm);
965                    let tracked (pointsto, empty) = pointsto.take_resource();
966                    write_perm = Some(pointsto);
967                    g.core_token.change_to_right(empty);
968                    write_guard_token = Some(g.core_token.split_one_right_knowledge());
969                    retract_upgrade_token = Some(g.upread_retract_token.tracked_take());
970                } else {
971                    err_upread_guard_token = Some(upread_guard_token);
972                }
973            }
974        );
975
976        if res.is_ok() {
977            let inner = self.inner;
978            atomic_with_ghost!(
979                inner.lock => fetch_sub(UPGRADEABLE_READER);
980                update prev -> next;
981                ghost g => {
982                    let prev_usize = prev as usize;
983                    let next_usize = next as usize;
984                    lemma_consts_properties_value(prev_usize);
985                    lemma_consts_properties_prev_next(prev_usize, next_usize);
986                    let tracked mut token = retract_upgrade_token.tracked_unwrap();
987                    if g.upread_retract_token is Some {
988                        token.validate_with_other(g.upread_retract_token.tracked_borrow());
989                    }
990                    g.upread_retract_token = Some(token);
991                }
992            );
993            Ok(
994                RwMutexWriteGuard { inner, v_perm: Tracked(write_perm.tracked_unwrap()), v_token: Tracked(write_guard_token.tracked_unwrap()) }
995            )
996        } else {
997            Err(
998                RwMutexUpgradeableGuard { inner: self.inner, v_token: Tracked(err_upread_guard_token.tracked_unwrap()) }
999            )
1000        }
1001    }
1002
1003    #[verus_spec]
1004    pub fn drop(self) {
1005        proof! {
1006            use_type_invariant(&self);
1007            use_type_invariant(self.inner);
1008            lemma_consts_properties();
1009        }
1010        proof_decl! {
1011            let tracked guard_token = self.v_token.get();
1012        }
1013        let res = atomic_with_ghost!(
1014            self.inner.lock => fetch_sub(UPGRADEABLE_READER);
1015            update prev -> next;
1016            ghost g => {
1017                let prev_usize = prev as usize;
1018                let next_usize = next as usize;
1019                lemma_consts_properties_value(prev_usize);
1020                lemma_consts_properties_prev_next(prev_usize, next_usize);
1021                g.core_token.validate_with_one_left_owner(&guard_token);
1022                if g.upreader_guard_token is Some {
1023                    guard_token.validate_with_one_left_owner(g.upreader_guard_token.tracked_borrow());
1024                    assert(false);
1025                } else {
1026                    g.upreader_guard_token = Some(guard_token);
1027                }
1028            }
1029        );
1030        if res == UPGRADEABLE_READER {
1031            self.inner.queue.wake_all();
1032        }
1033    }
1034}
1035
1036impl<T  /*: ?Sized*/ > Deref for RwMutexUpgradeableGuard<'_, T> {
1037    type Target = T;
1038
1039    #[verus_spec(returns self.view())]
1040    fn deref(&self) -> &T {
1041        proof! {
1042            use_type_invariant(self);
1043        }
1044        // unsafe { &*self.inner.val.get() }
1045        self.inner.val.borrow(Tracked(self.v_token.borrow().tracked_borrow().borrow()))
1046    }
1047}
1048
1049proof fn lemma_consts_properties()
1050    ensures
1051        0 & WRITER == 0,
1052        0 & UPGRADEABLE_READER == 0,
1053        0 & BEING_UPGRADED == 0,
1054        0 & READER_MASK == 0,
1055        0 & MAX_READER_MASK == 0,
1056        0 & MAX_READER == 0,
1057        0 & READER == 0,
1058        WRITER == 0x8000_0000_0000_0000,
1059        UPGRADEABLE_READER == 0x4000_0000_0000_0000,
1060        BEING_UPGRADED == 0x2000_0000_0000_0000,
1061        READER_MASK == 0x0FFF_FFFF_FFFF_FFFF,
1062        MAX_READER_MASK == 0x1FFF_FFFF_FFFF_FFFF,
1063        MAX_READER == 0x1000_0000_0000_0000,
1064        WRITER & WRITER == WRITER,
1065        WRITER & !WRITER == 0,
1066        WRITER & BEING_UPGRADED == 0,
1067        WRITER & READER_MASK == 0,
1068        WRITER & MAX_READER_MASK == 0,
1069        WRITER & MAX_READER == 0,
1070        WRITER & UPGRADEABLE_READER == 0,
1071        BEING_UPGRADED & WRITER == 0,
1072        BEING_UPGRADED & UPGRADEABLE_READER == 0,
1073        UPGRADEABLE_READER & BEING_UPGRADED == 0,
1074        UPGRADEABLE_READER & READER_MASK == 0,
1075        UPGRADEABLE_READER & MAX_READER_MASK == 0,
1076        UPGRADEABLE_READER & MAX_READER == 0,
1077        BEING_UPGRADED & READER_MASK == 0,
1078        BEING_UPGRADED & MAX_READER_MASK == 0,
1079        BEING_UPGRADED & MAX_READER == 0,
1080        (UPGRADEABLE_READER | BEING_UPGRADED) & WRITER == 0,
1081        (UPGRADEABLE_READER | BEING_UPGRADED) & UPGRADEABLE_READER == UPGRADEABLE_READER,
1082        (UPGRADEABLE_READER | BEING_UPGRADED) & BEING_UPGRADED == BEING_UPGRADED,
1083        (UPGRADEABLE_READER | BEING_UPGRADED) & READER_MASK == 0,
1084        (UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER_MASK == 0,
1085        (UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER == 0,
1086        (WRITER | UPGRADEABLE_READER) & WRITER == WRITER,
1087        (WRITER | UPGRADEABLE_READER) & UPGRADEABLE_READER == UPGRADEABLE_READER,
1088        (WRITER | UPGRADEABLE_READER) & BEING_UPGRADED == 0,
1089        (WRITER | UPGRADEABLE_READER) & READER_MASK == 0,
1090        (WRITER | UPGRADEABLE_READER) & MAX_READER_MASK == 0,
1091        (WRITER | UPGRADEABLE_READER) & MAX_READER == 0,
1092{
1093    assert(0 & WRITER == 0) by (compute_only);
1094    assert(0 & UPGRADEABLE_READER == 0) by (compute_only);
1095    assert(0 & BEING_UPGRADED == 0) by (compute_only);
1096    assert(0 & READER_MASK == 0) by (compute_only);
1097    assert(0 & MAX_READER == 0) by (compute_only);
1098    assert(0 & MAX_READER_MASK == 0) by (compute_only);
1099    assert(0 & READER == 0) by (compute_only);
1100    assert(WRITER == 0x8000_0000_0000_0000) by (compute_only);
1101    assert(UPGRADEABLE_READER == 0x4000_0000_0000_0000) by (compute_only);
1102    assert(BEING_UPGRADED == 0x2000_0000_0000_0000) by (compute_only);
1103    assert(READER_MASK == 0x0FFF_FFFF_FFFF_FFFF) by (compute_only);
1104    assert(MAX_READER == 0x1000_0000_0000_0000) by (compute_only);
1105    assert(MAX_READER_MASK == 0x1FFF_FFFF_FFFF_FFFF) by (compute_only);
1106    assert(WRITER & WRITER == WRITER) by (compute_only);
1107    assert(WRITER & !WRITER == 0) by (compute_only);
1108    assert(WRITER & BEING_UPGRADED == 0) by (compute_only);
1109    assert(WRITER & READER_MASK == 0) by (compute_only);
1110    assert(WRITER & MAX_READER_MASK == 0) by (compute_only);
1111    assert(WRITER & MAX_READER == 0) by (compute_only);
1112    assert(WRITER & UPGRADEABLE_READER == 0) by (compute_only);
1113    assert(BEING_UPGRADED & WRITER == 0) by (compute_only);
1114    assert(BEING_UPGRADED & UPGRADEABLE_READER == 0) by (compute_only);
1115    assert(UPGRADEABLE_READER & BEING_UPGRADED == 0) by (compute_only);
1116    assert(UPGRADEABLE_READER & READER_MASK == 0) by (compute_only);
1117    assert(UPGRADEABLE_READER & MAX_READER_MASK == 0) by (compute_only);
1118    assert(UPGRADEABLE_READER & MAX_READER == 0) by (compute_only);
1119    assert(BEING_UPGRADED & READER_MASK == 0) by (compute_only);
1120    assert(BEING_UPGRADED & MAX_READER_MASK == 0) by (compute_only);
1121    assert(BEING_UPGRADED & MAX_READER == 0) by (compute_only);
1122    assert((UPGRADEABLE_READER | BEING_UPGRADED) & WRITER == 0) by (compute_only);
1123    assert((UPGRADEABLE_READER | BEING_UPGRADED) & UPGRADEABLE_READER == UPGRADEABLE_READER)
1124        by (compute_only);
1125    assert((UPGRADEABLE_READER | BEING_UPGRADED) & BEING_UPGRADED == BEING_UPGRADED)
1126        by (compute_only);
1127    assert((UPGRADEABLE_READER | BEING_UPGRADED) & READER_MASK == 0) by (compute_only);
1128    assert((UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER_MASK == 0) by (compute_only);
1129    assert((UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER == 0) by (compute_only);
1130    assert((WRITER | UPGRADEABLE_READER) & WRITER == WRITER) by (compute_only);
1131    assert((WRITER | UPGRADEABLE_READER) & UPGRADEABLE_READER == UPGRADEABLE_READER)
1132        by (compute_only);
1133    assert((WRITER | UPGRADEABLE_READER) & BEING_UPGRADED == 0) by (compute_only);
1134    assert((WRITER | UPGRADEABLE_READER) & READER_MASK == 0) by (compute_only);
1135    assert((WRITER | UPGRADEABLE_READER) & MAX_READER_MASK == 0) by (compute_only);
1136    assert((WRITER | UPGRADEABLE_READER) & MAX_READER == 0) by (compute_only);
1137}
1138
1139proof fn lemma_consts_properties_value(prev: usize)
1140    ensures
1141        no_max_reader_overflow(prev) ==> prev + READER <= usize::MAX,
1142        prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0 ==> {
1143            &&& prev & WRITER == 0
1144            &&& prev & BEING_UPGRADED == 0
1145            &&& prev & MAX_READER == 0
1146        },
1147        prev & (WRITER | UPGRADEABLE_READER) == 0 ==> {
1148            &&& prev & WRITER == 0
1149            &&& prev & UPGRADEABLE_READER == 0
1150        },
1151        prev & MAX_READER == 0 ==> prev & READER_MASK == prev & MAX_READER_MASK,
1152        prev & MAX_READER != 0 ==> prev & MAX_READER_MASK >= MAX_READER,
1153        prev & (WRITER | UPGRADEABLE_READER) == WRITER ==> {
1154            &&& prev & UPGRADEABLE_READER == 0
1155            &&& prev & WRITER == WRITER
1156        },
1157        prev & UPGRADEABLE_READER != 0 ==> prev >= UPGRADEABLE_READER,
1158        prev & UPGRADEABLE_READER == 0 ==> {
1159            ||| prev & (WRITER | UPGRADEABLE_READER) == 0
1160            ||| prev & (WRITER | UPGRADEABLE_READER) == WRITER
1161        },
1162{
1163    if no_max_reader_overflow(prev) {
1164        assert(prev + READER <= usize::MAX) by (bit_vector)
1165            requires
1166                prev & MAX_READER_MASK < MAX_READER_MASK,
1167        ;
1168    }
1169    if prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0 {
1170        assert(prev & WRITER == 0) by (bit_vector)
1171            requires
1172                prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0,
1173        ;
1174        assert(prev & BEING_UPGRADED == 0) by (bit_vector)
1175            requires
1176                prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0,
1177        ;
1178        assert(prev & MAX_READER == 0) by (bit_vector)
1179            requires
1180                prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0,
1181        ;
1182    }
1183    if prev & (WRITER | UPGRADEABLE_READER) == 0 {
1184        assert(prev & WRITER == 0) by (bit_vector)
1185            requires
1186                prev & (WRITER | UPGRADEABLE_READER) == 0,
1187        ;
1188        assert(prev & UPGRADEABLE_READER == 0) by (bit_vector)
1189            requires
1190                prev & (WRITER | UPGRADEABLE_READER) == 0,
1191        ;
1192    }
1193    if prev & MAX_READER == 0 {
1194        assert(prev & READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1195            requires
1196                prev & MAX_READER == 0,
1197        ;
1198    }
1199    if prev & MAX_READER != 0 {
1200        assert(prev & MAX_READER_MASK >= MAX_READER) by (bit_vector)
1201            requires
1202                prev & MAX_READER != 0,
1203        ;
1204    }
1205    if prev & (WRITER | UPGRADEABLE_READER) == WRITER {
1206        assert(prev & UPGRADEABLE_READER == 0 && prev & WRITER == WRITER) by (bit_vector)
1207            requires
1208                prev & (WRITER | UPGRADEABLE_READER) == WRITER,
1209        ;
1210    }
1211    if prev & UPGRADEABLE_READER != 0 {
1212        assert(prev >= UPGRADEABLE_READER) by (bit_vector)
1213            requires
1214                prev & UPGRADEABLE_READER != 0,
1215        ;
1216    }
1217    if prev & UPGRADEABLE_READER == 0 {
1218        assert(prev & (WRITER | UPGRADEABLE_READER) == 0 || prev & (WRITER | UPGRADEABLE_READER)
1219            == WRITER) by (bit_vector)
1220            requires
1221                prev & UPGRADEABLE_READER == 0,
1222        ;
1223    }
1224}
1225
1226proof fn lemma_consts_properties_prev_next(prev: usize, next: usize)
1227    ensures
1228        prev & READER_MASK < MAX_READER,
1229        next == UPGRADEABLE_READER && prev == WRITER ==> {
1230            &&& next & WRITER == 0
1231            &&& next & UPGRADEABLE_READER == UPGRADEABLE_READER
1232            &&& next & READER_MASK == 0
1233            &&& next & MAX_READER_MASK == 0
1234            &&& next & MAX_READER == 0
1235            &&& next & BEING_UPGRADED == 0
1236        },
1237        next == prev | UPGRADEABLE_READER ==> {
1238            &&& next & UPGRADEABLE_READER != 0
1239            &&& next & WRITER == prev & WRITER
1240            &&& next & READER_MASK == prev & READER_MASK
1241            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1242            &&& next & MAX_READER == prev & MAX_READER
1243            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1244        },
1245        next == prev | BEING_UPGRADED ==> {
1246            &&& next & BEING_UPGRADED != 0
1247            &&& next & WRITER == prev & WRITER
1248            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1249            &&& next & READER_MASK == prev & READER_MASK
1250            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1251            &&& next & MAX_READER == prev & MAX_READER
1252        },
1253        next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0 ==> {
1254            &&& next & UPGRADEABLE_READER == 0
1255            &&& next & WRITER == prev & WRITER
1256            &&& next & READER_MASK == prev & READER_MASK
1257            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1258            &&& next & MAX_READER == prev & MAX_READER
1259            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1260        },
1261        next == prev - READER && prev & READER_MASK != 0 ==> {
1262            &&& next & READER_MASK == (prev & READER_MASK) - READER
1263            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER
1264            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1265            &&& next & WRITER == prev & WRITER
1266            &&& next & MAX_READER == prev & MAX_READER
1267            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1268        },
1269        next == prev - READER && prev & MAX_READER_MASK != 0 ==> {
1270            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER
1271            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1272            &&& next & WRITER == prev & WRITER
1273            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1274        },
1275        next == prev + READER && no_max_reader_overflow(prev) ==> {
1276            &&& next & READER_MASK == if (prev & READER_MASK) + READER == MAX_READER {
1277                0
1278            } else {
1279                (prev & READER_MASK) + READER
1280            }
1281            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) + READER
1282            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1283            &&& next & WRITER == prev & WRITER
1284            &&& next & MAX_READER == if (prev & READER_MASK) + READER == MAX_READER {
1285                MAX_READER
1286            } else {
1287                prev & MAX_READER
1288            }
1289            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1290        },
1291        next == prev & !WRITER ==> {
1292            &&& next & WRITER == 0
1293            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1294            &&& next & READER_MASK == prev & READER_MASK
1295            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1296            &&& next & MAX_READER == prev & MAX_READER
1297            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1298        },
1299{
1300    assert(prev & READER_MASK < MAX_READER) by (bit_vector);
1301    if next == UPGRADEABLE_READER && prev == WRITER {
1302        assert(next & WRITER == 0) by (bit_vector)
1303            requires
1304                next == UPGRADEABLE_READER && prev == WRITER,
1305        ;
1306        assert(next & UPGRADEABLE_READER == UPGRADEABLE_READER) by (bit_vector)
1307            requires
1308                next == UPGRADEABLE_READER && prev == WRITER,
1309        ;
1310        assert(next & READER_MASK == 0) by (bit_vector)
1311            requires
1312                next == UPGRADEABLE_READER && prev == WRITER,
1313        ;
1314        assert(next & MAX_READER_MASK == 0) by (bit_vector)
1315            requires
1316                next == UPGRADEABLE_READER && prev == WRITER,
1317        ;
1318        assert(next & MAX_READER == 0) by (bit_vector)
1319            requires
1320                next == UPGRADEABLE_READER && prev == WRITER,
1321        ;
1322        assert(next & BEING_UPGRADED == 0) by (bit_vector)
1323            requires
1324                next == UPGRADEABLE_READER && prev == WRITER,
1325        ;
1326    }
1327    if next == prev | UPGRADEABLE_READER {
1328        assert(next & UPGRADEABLE_READER != 0) by (bit_vector)
1329            requires
1330                next == prev | UPGRADEABLE_READER,
1331        ;
1332        assert(next & WRITER == prev & WRITER) by (bit_vector)
1333            requires
1334                next == prev | UPGRADEABLE_READER,
1335        ;
1336        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1337            requires
1338                next == prev | UPGRADEABLE_READER,
1339        ;
1340        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1341            requires
1342                next == prev | UPGRADEABLE_READER,
1343        ;
1344        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1345            requires
1346                next == prev | UPGRADEABLE_READER,
1347        ;
1348        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1349            requires
1350                next == prev | UPGRADEABLE_READER,
1351        ;
1352    }
1353    if next == prev | BEING_UPGRADED {
1354        assert(next & BEING_UPGRADED != 0) by (bit_vector)
1355            requires
1356                next == prev | BEING_UPGRADED,
1357        ;
1358        assert(next & WRITER == prev & WRITER) by (bit_vector)
1359            requires
1360                next == prev | BEING_UPGRADED,
1361        ;
1362        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1363            requires
1364                next == prev | BEING_UPGRADED,
1365        ;
1366        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1367            requires
1368                next == prev | BEING_UPGRADED,
1369        ;
1370        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1371            requires
1372                next == prev | BEING_UPGRADED,
1373        ;
1374        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1375            requires
1376                next == prev | BEING_UPGRADED,
1377        ;
1378    }
1379    if next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0 {
1380        assert(next & UPGRADEABLE_READER == 0) by (bit_vector)
1381            requires
1382                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1383        ;
1384        assert(next & WRITER == prev & WRITER) by (bit_vector)
1385            requires
1386                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1387        ;
1388        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1389            requires
1390                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1391        ;
1392        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1393            requires
1394                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1395        ;
1396        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1397            requires
1398                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1399        ;
1400        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1401            requires
1402                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1403        ;
1404    }
1405    if next == prev - READER && prev & READER_MASK != 0 {
1406        assert(next & READER_MASK == (prev & READER_MASK) - READER) by (bit_vector)
1407            requires
1408                next == prev - READER && prev & READER_MASK != 0,
1409        ;
1410        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER) by (bit_vector)
1411            requires
1412                next == prev - READER && prev & READER_MASK != 0,
1413        ;
1414        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1415            requires
1416                next == prev - READER && prev & READER_MASK != 0,
1417        ;
1418        assert(next & WRITER == prev & WRITER) by (bit_vector)
1419            requires
1420                next == prev - READER && prev & READER_MASK != 0,
1421        ;
1422        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1423            requires
1424                next == prev - READER && prev & READER_MASK != 0,
1425        ;
1426        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1427            requires
1428                next == prev - READER && prev & READER_MASK != 0,
1429        ;
1430    }
1431    if next == prev - READER && prev & MAX_READER_MASK != 0 {
1432        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER) by (bit_vector)
1433            requires
1434                next == prev - READER && prev & MAX_READER_MASK != 0,
1435        ;
1436        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1437            requires
1438                next == prev - READER && prev & MAX_READER_MASK != 0,
1439        ;
1440        assert(next & WRITER == prev & WRITER) by (bit_vector)
1441            requires
1442                next == prev - READER && prev & MAX_READER_MASK != 0,
1443        ;
1444        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1445            requires
1446                next == prev - READER && prev & MAX_READER_MASK != 0,
1447        ;
1448    }
1449    if next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK {
1450        assert(next & READER_MASK == if prev & READER_MASK == MAX_READER - READER {
1451            0
1452        } else {
1453            (prev & READER_MASK) + READER
1454        }) by (bit_vector)
1455            requires
1456                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1457        ;
1458        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) + READER) by (bit_vector)
1459            requires
1460                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1461        ;
1462        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1463            requires
1464                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1465        ;
1466        assert(next & WRITER == prev & WRITER) by (bit_vector)
1467            requires
1468                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1469        ;
1470        assert(next & MAX_READER == if (prev & READER_MASK) + READER == MAX_READER {
1471            MAX_READER
1472        } else {
1473            prev & MAX_READER
1474        }) by (bit_vector)
1475            requires
1476                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1477        ;
1478        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1479            requires
1480                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1481        ;
1482    }
1483    if next == prev & !WRITER {
1484        assert(next & WRITER == 0) by (bit_vector)
1485            requires
1486                next == prev & !WRITER,
1487        ;
1488        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1489            requires
1490                next == prev & !WRITER,
1491        ;
1492        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1493            requires
1494                next == prev & !WRITER,
1495        ;
1496        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1497            requires
1498                next == prev & !WRITER,
1499        ;
1500        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1501            requires
1502                next == prev & !WRITER,
1503        ;
1504        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1505            requires
1506                next == prev & !WRITER,
1507        ;
1508    }
1509}
1510
1511} // verus!