ostd/sync/
rwmutex.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use vstd::atomic_ghost::*;
4use vstd::cell::{self, pcell::*, CellId};
5use vstd::pcm::Loc;
6use vstd::prelude::*;
7use vstd::tokens::frac::{Empty, Frac};
8use vstd_extra::resource::ghost_resource::{csum::*, excl::*, tokens::*};
9use vstd_extra::sum::*;
10
11use core::{
12    cell::UnsafeCell,
13    ops::{Deref, DerefMut},
14    sync::atomic::{
15        // AtomicUsize,
16        Ordering::{AcqRel, Acquire, Relaxed, Release},
17    },
18};
19
20use super::WaitQueue;
21
22verus! {
23
24const MAX_READER_U64: u64 = MAX_READER as u64;
25
26spec const V_MAX_READ_RETRACT_FRACS_SPEC: u64 = (MAX_READER_MASK + 1) as u64;
27
28#[verifier::when_used_as_spec(V_MAX_READ_RETRACT_FRACS_SPEC)]
29exec const V_MAX_READ_RETRACT_FRACS: u64
30    ensures
31        V_MAX_READ_RETRACT_FRACS == V_MAX_READ_RETRACT_FRACS_SPEC,
32        V_MAX_READ_RETRACT_FRACS == MAX_READER_MASK + 1,
33        V_MAX_READ_RETRACT_FRACS < u64::MAX,
34{
35    assert(MAX_READER_MASK + 1 < u64::MAX) by (compute_only);
36    (MAX_READER_MASK + 1) as u64
37}
38
39type NoPerm<T> = Empty<PointsTo<T>>;
40type HalfPerm<T> = Frac<PointsTo<T>>;
41type ReadPerm<T> = (HalfPerm<T>, OneLeftKnowledge<HalfPerm<T>, NoPerm<T>, 3>);
42
43tracked struct RwPerms<T> {
44    core_token: SumResource<HalfPerm<T>, NoPerm<T>, 3>,
45    read_retract_token: TokenResource<V_MAX_READ_RETRACT_FRACS>,
46    upread_retract_token: Option<UniqueToken>,
47    upreader_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>>,
48    read_guard_token: Sum<
49        FracResource<ReadPerm<T>, MAX_READER_U64>,
50        Empty<ReadPerm<T>, MAX_READER_U64>,
51    >,
52}
53
54ghost struct RwId {
55    core_token_id: Loc,
56    frac_id: Loc,
57    read_retract_token_id: Loc,
58    upread_retract_token_id: Loc,
59    read_guard_token_id: Loc,
60}
61
62#[verifier::reject_recursive_types(T)]
63struct_with_invariants! {
64/// A mutex that provides data access to either one writer or many readers.
65///
66/// # Overview
67///
68/// This mutex allows for multiple readers, or at most one writer to access
69/// at any point in time. The writer of this mutex has exclusive access to
70/// modify the underlying data, while the readers are allowed shared and
71/// read-only access.
72///
73/// The writing and reading portions cannot be active simultaneously, when
74/// one portion is in progress, the other portion will sleep. This is
75/// suitable for scenarios where the mutex is expected to be held for a
76/// period of time, which can avoid wasting CPU resources.
77///
78/// This implementation provides the upgradeable read mutex (`upread mutex`).
79/// The `upread mutex` can be upgraded to write mutex atomically, useful in
80/// scenarios where a decision to write is made after reading.
81///
82/// The type parameter `T` represents the data that this mutex is protecting.
83/// It is necessary for `T` to satisfy [`Send`] to be shared across tasks and
84/// [`Sync`] to permit concurrent access via readers. The [`Deref`] method (and
85/// [`DerefMut`] for the writer) is implemented for the RAII guards returned
86/// by the locking methods, which allows for the access to the protected data
87/// while the mutex is held.
88///
89/// # Usage
90///
91/// The mutex can be used in scenarios where data needs to be read frequently
92/// but written to occasionally.
93///
94/// Use `upread mutex` in scenarios where related checking is performed before
95/// modification to effectively avoid deadlocks and improve efficiency.
96///
97/// # Safety
98///
99/// Avoid using `RwMutex` in an interrupt context, as it may result in sleeping
100/// and never being awakened.
101///
102/// # Examples
103///
104/// ```
105/// use ostd::sync::RwMutex;
106///
107/// let mutex = RwMutex::new(5)
108///
109/// // many read mutexes can be held at once
110/// {
111///     let r1 = mutex.read();
112///     let r2 = mutex.read();
113///     assert_eq!(*r1, 5);
114///     assert_eq!(*r2, 5);
115///     
116///     // Upgradeable read mutex can share access to data with read mutexes
117///     let r3 = mutex.upread();
118///     assert_eq!(*r3, 5);
119///     drop(r1);
120///     drop(r2);
121///     // read mutexes are dropped at this point
122///
123///     // An upread mutex can only be upgraded successfully after all the
124///     // read mutexes are released, otherwise it will spin-wait.
125///     let mut w1 = r3.upgrade();
126///     *w1 += 1;
127///     assert_eq!(*w1, 6);
128/// }   // upread mutex are dropped at this point
129///
130/// {   
131///     // Only one write mutex can be held at a time
132///     let mut w2 = mutex.write();
133///     *w2 += 1;
134///     assert_eq!(*w2, 7);
135/// }   // write mutex is dropped at this point
136/// ```
137pub struct RwMutex<T /*: ?Sized*/> {
138    /// The internal representation of the mutex state is as follows:
139    /// - **Bit 63:** Writer mutex.
140    /// - **Bit 62:** Upgradeable reader mutex.
141    /// - **Bit 61:** Indicates if an upgradeable reader is being upgraded.
142    /// - **Bits 60-0:** Reader mutex count.
143    lock: AtomicUsize<_, RwPerms<T>, _>,
144    /// Threads that fail to acquire the mutex will sleep on this waitqueue.
145    queue: WaitQueue,
146    // val: UnsafeCell<T>,
147    val: PCell<T>,
148    v_id: Ghost<RwId>,
149}
150
151closed spec fn wf(self) -> bool {
152    invariant on lock with (val, v_id) is (v: usize, g: RwPerms<T>) {
153        let has_writer_bit: bool = (v & WRITER) != 0;
154        let has_upgrade_bit: bool = (v & UPGRADEABLE_READER) != 0;
155        let has_max_reader_bit: bool = (v & MAX_READER) != 0;
156        let total_reader_bits: int = (v & MAX_READER_MASK) as int;
157        let reader_bits: int = if has_max_reader_bit {
158            MAX_READER as int
159        } else {
160            (v & READER_MASK) as int
161        };
162
163        let active_writer: bool = g.core_token.is_right();
164        let active_upgrade_guard: bool = !active_writer && g.upreader_guard_token is None;
165        let active_read_guards: int = if g.read_guard_token is Left {
166            MAX_READER_U64 - g.read_guard_token.left().frac()
167        } else {
168            0
169        };
170        let pending_failed_upread_attempt: bool = g.upread_retract_token is None;
171        let failed_reader_attempts: int = V_MAX_READ_RETRACT_FRACS - g.read_retract_token.frac();
172
173        &&& if g.core_token.is_left() {
174            g.read_guard_token is Left
175        } else {
176            &&& g.upreader_guard_token is None
177            &&& g.read_guard_token is Right
178        }
179        &&& has_upgrade_bit <==> (active_upgrade_guard || pending_failed_upread_attempt)
180        &&& !(active_upgrade_guard && pending_failed_upread_attempt)
181        &&& total_reader_bits == active_read_guards + failed_reader_attempts
182        &&& active_writer <==> has_writer_bit
183        &&& 0 <= active_read_guards <= reader_bits <= total_reader_bits
184        &&& !(active_writer && (active_read_guards + if active_upgrade_guard { 1int } else { 0 }) > 0)
185        &&& g.core_token.id() == v_id@.core_token_id
186        &&& g.core_token.wf()
187        &&& g.core_token.is_left() ==> {
188            &&& !g.core_token.is_resource_owner()
189            &&& g.core_token.frac() == 1
190        }
191        &&& g.core_token.is_right() ==> {
192            let empty = g.core_token.resource_right();
193            &&& empty.id() == v_id@.frac_id
194            &&& g.core_token.frac() == 2
195            &&& g.core_token.has_resource()
196        }
197        &&& g.read_retract_token.wf()
198        &&& g.read_retract_token.id() == v_id@.read_retract_token_id
199        &&& g.upread_retract_token is Some ==> {
200            let token = g.upread_retract_token->Some_0;
201            &&& token.wf()
202            &&& token.id() == v_id@.upread_retract_token_id
203        }
204        &&& g.upreader_guard_token is Some ==> {
205            let token = g.upreader_guard_token->Some_0;
206            wf_upgradeable_guard_token(v_id@.core_token_id, v_id@.frac_id, val.id(), token)
207        }
208        &&& match g.read_guard_token {
209            Sum::Left(token) => {
210                &&& token.wf()
211                &&& token.id() == v_id@.read_guard_token_id
212                &&& token.not_empty() ==> {
213                    let resource = token.resource();
214                    let read_half_cell_perm = resource.0;
215                    let mode_knowledge = resource.1;
216                    &&& mode_knowledge.id() == v_id@.core_token_id
217                    &&& read_half_cell_perm.id() == v_id@.frac_id
218                    &&& read_half_cell_perm.resource().id() == val.id()
219                    &&& read_half_cell_perm.frac() == 1
220                }
221            },
222            Sum::Right(empty) => {
223                &&& empty.id() == v_id@.read_guard_token_id
224            },
225        }
226    }
227}
228}
229
230const READER: usize = 1;
231const WRITER: usize = 1 << (usize::BITS - 1);
232const UPGRADEABLE_READER: usize = 1 << (usize::BITS - 2);
233const BEING_UPGRADED: usize = 1 << (usize::BITS - 3);
234
235/// This bit is reserved as an overflow sentinel.
236/// For more details, see comments on the `MAX_READER` constant
237/// in the [`super::rwlock`] module.
238const MAX_READER: usize = 1 << (usize::BITS - 4);
239
240const READER_MASK: usize = usize::MAX >> 4;
241const MAX_READER_MASK: usize = usize::MAX >> 3;
242
243pub closed spec fn no_max_reader_overflow(v: usize) -> bool {
244    v & MAX_READER_MASK < MAX_READER_MASK
245}
246
247impl<T> RwMutex<T> {
248    pub closed spec fn cell_id(self) -> cell::CellId {
249        self.val.id()
250    }
251
252    pub closed spec fn core_token_id(self) -> Loc {
253        self.v_id@.core_token_id
254    }
255
256    pub closed spec fn frac_id(self) -> Loc {
257        self.v_id@.frac_id
258    }
259
260    pub closed spec fn upread_retract_token_id(self) -> Loc {
261        self.v_id@.upread_retract_token_id
262    }
263
264    pub closed spec fn read_guard_token_id(self) -> Loc {
265        self.v_id@.read_guard_token_id
266    }
267
268    #[verifier::type_invariant]
269    pub closed spec fn type_inv(self) -> bool {
270        self.wf()
271    }
272
273    pub closed spec fn queue_inv(self) -> bool {
274        self.queue.type_inv()
275    }
276}
277
278closed spec fn wf_upgradeable_guard_token<T>(
279    core_token_id: Loc,
280    frac_id: Loc,
281    cell_id: CellId,
282    token: OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>,
283) -> bool {
284    let half_cell_perm = token.resource();
285    &&& token.id() == core_token_id
286    &&& half_cell_perm.id() == frac_id
287    &&& half_cell_perm.resource().id() == cell_id
288    &&& token.has_resource()
289    &&& half_cell_perm.frac() == 1
290    &&& token.wf()
291}
292
293impl<T> RwMutex<T> {
294    /// Creates a new read-write mutex with an initial value.
295    pub const fn new(val: T) -> Self {
296        let (val, Tracked(perm)) = PCell::new(val);
297
298        proof {
299            lemma_consts_properties();
300        }
301        let tracked mut frac_perm = Frac::<PointsTo<T>>::new(perm);
302        let tracked read_half_cell_perm = frac_perm.split(1int);
303        let ghost frac_id = frac_perm.id();
304        let tracked mut core_token = SumResource::alloc_left(frac_perm);
305        let tracked read_retract_token = TokenResource::<V_MAX_READ_RETRACT_FRACS>::alloc(());
306        let tracked upread_retract_token = UniqueToken::alloc(());
307        let tracked upreader_guard_token = core_token.split_one_left_owner();
308        let tracked left_token = core_token.split_one_left_knowledge();
309        let tracked read_guard_token = FracResource::<ReadPerm<T>, MAX_READER_U64>::alloc(
310            (read_half_cell_perm, left_token),
311        );
312        let ghost v_id = RwId {
313            frac_id,
314            core_token_id: core_token.id(),
315            upread_retract_token_id: upread_retract_token.id(),
316            read_retract_token_id: read_retract_token.id(),
317            read_guard_token_id: read_guard_token.id(),
318        };
319        let tracked perms = RwPerms {
320            core_token,
321            read_retract_token,
322            upread_retract_token: Some(upread_retract_token),
323            upreader_guard_token: Some(upreader_guard_token),
324            read_guard_token: Sum::Left(read_guard_token),
325        };
326
327        Self {
328            // val: UnsafeCell::new(val),
329            val,
330            lock: AtomicUsize::new(Ghost((val, Ghost(v_id))), 0, Tracked(perms)),
331            queue: WaitQueue::new(),
332            v_id: Ghost(v_id),
333        }
334    }
335}
336
337impl<T /*: ?Sized*/> RwMutex<T> {
338    /// Acquires a read mutex and sleep until it can be acquired.
339    ///
340    /// The calling thread will sleep until there are no writers or upgrading
341    /// upreaders present. The implementation of [`WaitQueue`] guarantees the
342    /// order in which other concurrent readers or writers waiting simultaneously
343    /// will acquire the mutex.
344    #[track_caller]
345    pub fn read(&self) -> RwMutexReadGuard<'_, T> {
346        self.queue.wait_until(|| self.try_read())
347    }
348
349    /// Acquires a write mutex and sleep until it can be acquired.
350    ///
351    /// The calling thread will sleep until there are no writers, upreaders,
352    /// or readers present. The implementation of [`WaitQueue`] guarantees the
353    /// order in which other concurrent readers or writers waiting simultaneously
354    /// will acquire the mutex.
355    #[track_caller]
356    pub fn write(&self) -> RwMutexWriteGuard<'_, T> {
357        self.queue.wait_until(|| self.try_write())
358    }
359
360    /// Acquires a upread mutex and sleep until it can be acquired.
361    ///
362    /// The calling thread will sleep until there are no writers or upreaders present.
363    /// The implementation of [`WaitQueue`] guarantees the order in which other concurrent
364    /// readers or writers waiting simultaneously will acquire the mutex.
365    ///
366    /// Upreader will not block new readers until it tries to upgrade. Upreader
367    /// and reader do not differ before invoking the upgrade method. However,
368    /// only one upreader can exist at any time to avoid deadlock in the
369    /// upgrade method.
370    #[track_caller]
371    pub fn upread(&self) -> RwMutexUpgradeableGuard<'_, T> {
372        self.queue.wait_until(|| self.try_upread())
373    }
374
375    /// Attempts to acquire a read mutex.
376    ///
377    /// This function will never sleep and will return immediately.
378    pub fn try_read(&self) -> Option<RwMutexReadGuard<'_, T>> {
379        proof_decl! {
380            let tracked mut read_token: Option<Frac<ReadPerm<T>, MAX_READER_U64>> = None;
381            let tracked mut retract_read_token: Option<Token<V_MAX_READ_RETRACT_FRACS>> = None;
382        }
383        proof! {
384            use_type_invariant(self);
385            lemma_consts_properties();
386        }
387
388        let lock = atomic_with_ghost!(
389            self.lock => fetch_add(READER);
390            update prev -> next;
391            ghost g => {
392                let prev_usize = prev as usize;
393                let next_usize = next as usize;
394                assume(no_max_reader_overflow(prev_usize));
395                lemma_consts_properties_value(prev_usize);
396                lemma_consts_properties_prev_next(prev_usize, next_usize);
397                if prev_usize & (WRITER | BEING_UPGRADED | MAX_READER) == 0 {
398                    let tracked mut tmp = g.read_guard_token.tracked_take_left();
399                    read_token = Some(tmp.split_one());
400                    g.read_guard_token = Sum::Left(tmp);
401                } else {
402                    retract_read_token = Some(g.read_retract_token.split_one());
403                }
404            }
405        );
406
407        if lock & (WRITER | BEING_UPGRADED | MAX_READER) == 0 {
408            Some(RwMutexReadGuard {
409                inner: self,
410                v_token: Tracked(read_token.tracked_unwrap()),
411            })
412        } else {
413            atomic_with_ghost!(
414                self.lock => fetch_sub(READER);
415                update prev -> next;
416                ghost g => {
417                    let prev_usize = prev as usize;
418                    let next_usize = next as usize;
419                    lemma_consts_properties_value(next_usize);
420                    lemma_consts_properties_prev_next(prev_usize, next_usize);
421                    g.read_retract_token.combine(retract_read_token.tracked_unwrap());
422                }
423            );
424            None
425        }
426    }
427
428    /// Attempts to acquire a write mutex.
429    ///
430    /// This function will never sleep and will return immediately.
431    pub fn try_write(&self) -> Option<RwMutexWriteGuard<'_, T>> {
432        proof_decl! {
433            let tracked mut guard_perm: Option<PointsTo<T>> = None;
434            let tracked mut guard_token: Option<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>> = None;
435        }
436        proof! {
437            use_type_invariant(self);
438            lemma_consts_properties();
439        }
440
441        if atomic_with_ghost!(
442            self.lock => compare_exchange(0, WRITER);
443            update prev -> next;
444            returning res;
445            ghost g => {
446                let prev_usize = prev as usize;
447                let next_usize = next as usize;
448                if res is Ok {
449                    let tracked read_guard_token = g.read_guard_token.tracked_take_left();
450                    let tracked (read_resource, read_empty) = read_guard_token.take_resource();
451                    g.read_guard_token = Sum::Right(read_empty);
452                    let tracked (read_half_cell_perm, left_token) = read_resource;
453                    g.core_token.join_one_left_knowledge(left_token);
454                    let tracked upreader_guard_token = g.upreader_guard_token.tracked_take();
455                    g.core_token.join_one_left_owner(upreader_guard_token);
456                    let tracked mut pointsto = g.core_token.take_resource_left();
457                    pointsto.combine(read_half_cell_perm);
458                    let tracked (pointsto, empty) = pointsto.take_resource();
459                    guard_perm = Some(pointsto);
460                    g.core_token.change_to_right(empty);
461                    guard_token = Some(g.core_token.split_one_right_knowledge());
462                } else {
463                    lemma_consts_properties_prev_next(prev_usize, next_usize);
464                }
465            }
466        ).is_ok() {
467            Some(RwMutexWriteGuard {
468                inner: self,
469                v_perm: Tracked(guard_perm.tracked_unwrap()),
470                v_token: Tracked(guard_token.tracked_unwrap()),
471            })
472        } else {
473            None
474        }
475    }
476
477    /// Attempts to acquire a upread mutex.
478    ///
479    /// This function will never sleep and will return immediately.
480    pub fn try_upread(&self) -> Option<RwMutexUpgradeableGuard<'_, T>> {
481        proof_decl! {
482            let tracked mut upgrade_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
483            let tracked mut retract_upgrade_token: Option<UniqueToken> = None;
484        }
485        proof! {
486            use_type_invariant(self);
487            lemma_consts_properties();
488        }
489
490        let lock = atomic_with_ghost!(
491            self.lock => fetch_or(UPGRADEABLE_READER);
492            update prev -> next;
493            ghost g => {
494                lemma_consts_properties_value(prev);
495                lemma_consts_properties_prev_next(prev, next);
496                if prev & (WRITER | UPGRADEABLE_READER) == 0 {
497                    upgrade_guard_token = Some(g.upreader_guard_token.tracked_take());
498                } else if prev & (WRITER | UPGRADEABLE_READER) == WRITER {
499                    retract_upgrade_token = Some(g.upread_retract_token.tracked_take());
500                }
501            }
502        ) & (WRITER | UPGRADEABLE_READER);
503
504        if lock == 0 {
505            return Some(RwMutexUpgradeableGuard {
506                inner: self,
507                v_token: Tracked(upgrade_guard_token.tracked_unwrap()),
508            });
509        } else if lock == WRITER {
510            atomic_with_ghost!(
511                self.lock => fetch_sub(UPGRADEABLE_READER);
512                update prev -> next;
513                ghost g => {
514                    let prev_usize = prev as usize;
515                    let next_usize = next as usize;
516                    lemma_consts_properties_value(prev_usize);
517                    lemma_consts_properties_prev_next(prev_usize, next_usize);
518                    if g.upread_retract_token is Some {
519                        let tracked mut token = retract_upgrade_token.tracked_unwrap();
520                        token.validate_with_other(g.upread_retract_token.tracked_borrow());
521                    } else {
522                        g.upread_retract_token = retract_upgrade_token;
523                    }
524                }
525            );
526        }
527        None
528    }
529
530    /* /// Returns a mutable reference to the underlying data.
531    ///
532    /// This method is zero-cost: By holding a mutable reference to the lock, the compiler has
533    /// already statically guaranteed that access to the data is exclusive.
534    pub fn get_mut(&mut self) -> &mut T {
535        self.val.get_mut()
536    } */
537}
538
539/* impl<T: /*: ?Sized +*/ fmt::Debug> fmt::Debug for RwMutex<T> {
540    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
541        fmt::Debug::fmt(&self.val, f)
542    }
543} */
544
545/// Because there can be more than one readers to get the T's immutable ref,
546/// so T must be Sync to guarantee the sharing safety.
547#[verifier::external]
548unsafe impl<T: /*: ?Sized +*/ Send> Send for RwMutex<T> {}
549#[verifier::external]
550unsafe impl<T: /*: ?Sized +*/ Send + Sync> Sync for RwMutex<T> {}
551
552impl<T /*: ?Sized*/> !Send for RwMutexWriteGuard<'_, T> {}
553#[verifier::external]
554unsafe impl<T: /*: ?Sized +*/ Sync> Sync for RwMutexWriteGuard<'_, T> {}
555
556impl<T /*: ?Sized*/> !Send for RwMutexReadGuard<'_, T> {}
557#[verifier::external]
558unsafe impl<T: /*: ?Sized +*/ Sync> Sync for RwMutexReadGuard<'_, T> {}
559
560impl<T /*: ?Sized*/> !Send for RwMutexUpgradeableGuard<'_, T> {}
561#[verifier::external]
562unsafe impl<T: /*: ?Sized +*/ Sync> Sync for RwMutexUpgradeableGuard<'_, T> {}
563
564/// A guard that provides immutable data access.
565#[verifier::reject_recursive_types(T)]
566#[clippy::has_significant_drop]
567#[must_use]
568pub struct RwMutexReadGuard<'a, T /*: ?Sized*/> {
569    inner: &'a RwMutex<T>,
570    v_token: Tracked<Frac<ReadPerm<T>, MAX_READER_U64>>,
571}
572
573impl<'a, T> RwMutexReadGuard<'a, T> {
574    #[verifier::type_invariant]
575    pub closed spec fn type_inv(self) -> bool {
576        let resource = self.v_token@.resource();
577        let read_half_cell_perm = resource.0;
578        let mode_knowledge = resource.1;
579        &&& self.inner.core_token_id() == mode_knowledge.id()
580        &&& self.inner.frac_id() == read_half_cell_perm.id()
581        &&& self.inner.cell_id() == read_half_cell_perm.resource().id()
582        &&& self.v_token@.id() == self.inner.read_guard_token_id()
583        &&& read_half_cell_perm.frac() == 1
584        &&& self.v_token@.frac() == 1
585    }
586
587    pub closed spec fn value(self) -> T {
588        *self.v_token@.resource().0.resource().value()
589    }
590
591    pub open spec fn view(self) -> T {
592        self.value()
593    }
594}
595
596impl<T /*: ?Sized*/> Deref for RwMutexReadGuard<'_, T> {
597    type Target = T;
598
599    #[verus_spec(returns self.view())]
600    fn deref(&self) -> &T {
601        proof! {
602            use_type_invariant(self);
603        }
604        // unsafe { &*self.inner.val.get() }
605        self.inner.val.borrow(Tracked(self.v_token.borrow().borrow().0.borrow()))
606    }
607}
608
609impl<T/* : ?Sized */> RwMutexReadGuard<'_, T> {
610    fn drop(self) {
611        // When there are no readers, wake up a waiting writer.
612        proof! {
613            use_type_invariant(&self);
614            use_type_invariant(self.inner);
615            lemma_consts_properties();
616        }
617        let Tracked(token) = self.v_token;
618        if atomic_with_ghost!(
619            self.inner.lock => fetch_sub(READER);
620            update prev -> next;
621            ghost g => {
622                let prev_usize = prev as usize;
623                let next_usize = next as usize;
624                assume(no_max_reader_overflow(prev_usize));
625                lemma_consts_properties_value(next_usize);
626                lemma_consts_properties_prev_next(prev_usize, next_usize);
627                g.core_token.validate_with_one_left_knowledge(&token.borrow().1);
628                let tracked mut tmp = g.read_guard_token.tracked_take_left();
629                tmp.combine(token);
630                g.read_guard_token = Sum::Left(tmp);
631            }
632        ) == READER {
633            self.inner.queue.wake_one();
634        }
635    }
636}
637
638/// A guard that provides mutable data access.
639#[clippy::has_significant_drop]
640#[must_use]
641#[verifier::reject_recursive_types(T)]
642pub struct RwMutexWriteGuard<'a, T /*: ?Sized*/> {
643    inner: &'a RwMutex<T>,
644    v_perm: Tracked<PointsTo<T>>,
645    v_token: Tracked<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>>,
646}
647
648impl<'a, T> RwMutexWriteGuard<'a, T> {
649    #[verifier::type_invariant]
650    spec fn type_inv(self) -> bool {
651        &&& self.inner.cell_id() == self.v_perm@.id()
652        &&& self.inner.core_token_id() == self.v_token@.id()
653    }
654
655    pub closed spec fn value(self) -> T {
656        *self.v_perm@.value()
657    }
658
659    pub open spec fn view(self) -> T {
660        self.value()
661    }
662}
663
664impl<T /*: ?Sized*/> Deref for RwMutexWriteGuard<'_, T> {
665    type Target = T;
666
667    #[verus_spec(returns self.view())]
668    fn deref(&self) -> &T {
669        proof! {
670            use_type_invariant(self);
671        }
672        // unsafe { &*self.inner.val.get() }
673        self.inner.val.borrow(Tracked(self.v_perm.borrow()))
674    }
675}
676
677impl<'a, T /*: ?Sized*/> RwMutexWriteGuard<'a, T> {
678    /// Atomically downgrades a write guard to an upgradeable reader guard.
679    ///
680    /// This method always succeeds because the lock is exclusively held by the writer.
681    #[verifier::exec_allows_no_decreases_clause]
682    pub fn downgrade(self) -> RwMutexUpgradeableGuard<'a, T> {
683        let mut this = self;
684        loop {
685            this = match this.try_downgrade() {
686                Ok(guard) => return guard,
687                Err(e) => e,
688            };
689        }
690    }
691
692    /// This is not exposed as a public method to prevent intermediate lock states from affecting the
693    /// downgrade process.
694    fn try_downgrade(self) -> Result<RwMutexUpgradeableGuard<'a, T>, Self> {
695        proof! {
696            use_type_invariant(&self);
697            use_type_invariant(self.inner);
698            lemma_consts_properties();
699        }
700        let Tracked(perm) = self.v_perm;
701        let Tracked(token) = self.v_token;
702        proof_decl! {
703            let tracked mut upgrade_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
704            let tracked mut err_perm: Option<PointsTo<T>> = None;
705            let tracked mut err_write_guard_token: Option<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>> = None;
706        }
707        let inner = self.inner;
708        let res = atomic_with_ghost!(
709            self.inner.lock => compare_exchange(WRITER, UPGRADEABLE_READER);
710            update prev -> next;
711            returning res;
712            ghost g => {
713                lemma_consts_properties_prev_next(prev, next);
714                if res is Ok {
715                    g.core_token.validate_with_one_right_knowledge(&token);
716                    g.core_token.join_one_right_knowledge(token);
717                    let tracked empty = g.core_token.take_resource_right();
718                    let tracked mut full = empty.put_resource(perm);
719                    let tracked read_half_cell_perm = full.split(1int);
720                    g.core_token.change_to_left(full);
721                    upgrade_guard_token = Some(g.core_token.split_one_left_owner());
722                    let tracked left_token = g.core_token.split_one_left_knowledge();
723                    let tracked read_guard_empty = g.read_guard_token.tracked_take_right();
724                    let tracked read_guard_token = FracResource::alloc_from_empty(
725                        read_guard_empty,
726                        (read_half_cell_perm, left_token),
727                    );
728                    g.read_guard_token = Sum::Left(read_guard_token);
729                } else {
730                    err_perm = Some(perm);
731                    err_write_guard_token = Some(token);
732                }
733            }
734        );
735
736        if res.is_ok() {
737            // drop(self);
738            atomic_with_ghost! {
739                self.inner.lock => fetch_and(!WRITER);
740                update prev -> next;
741                ghost g => {
742                    let prev_usize = prev as usize;
743                    let next_usize = next as usize;
744                    let tracked mut guard_token = upgrade_guard_token.tracked_unwrap();
745                    g.core_token.validate_with_one_left_owner(&guard_token);
746                    if g.upreader_guard_token is Some {
747                        guard_token.validate_with_one_left_owner(
748                            g.upreader_guard_token.tracked_borrow(),
749                        );
750                        assert(false);
751                    }
752                    upgrade_guard_token = Some(guard_token);
753                    lemma_consts_properties_value(prev_usize);
754                    lemma_consts_properties_prev_next(prev_usize, next_usize);
755                    lemma_consts_properties_value(next_usize);
756                }
757            };
758            self.inner.queue.wake_all();
759            Ok(RwMutexUpgradeableGuard {
760                inner,
761                v_token: Tracked(upgrade_guard_token.tracked_unwrap()),
762            })
763        } else {
764            Err(RwMutexWriteGuard {
765                inner,
766                v_perm: Tracked(err_perm.tracked_unwrap()),
767                v_token: Tracked(err_write_guard_token.tracked_unwrap()),
768            })
769        }
770    }
771
772    pub fn drop(self) {
773        proof! {
774            use_type_invariant(&self);
775            use_type_invariant(self.inner);
776            lemma_consts_properties();
777        }
778        let Tracked(mut perm) = self.v_perm;
779        let Tracked(token) = self.v_token;
780        atomic_with_ghost! {
781            self.inner.lock => fetch_and(!WRITER);
782            update prev -> next;
783            ghost g => {
784                let prev_usize = prev as usize;
785                let next_usize = next as usize;
786                lemma_consts_properties_prev_next(prev_usize, next_usize);
787                lemma_consts_properties_value(next_usize);
788                g.core_token.validate_with_one_right_knowledge(&token);
789                g.core_token.join_one_right_knowledge(token);
790                let tracked empty = g.core_token.take_resource_right();
791                let tracked mut full = empty.put_resource(perm);
792                let tracked read_half_cell_perm = full.split(1int);
793                g.core_token.change_to_left(full);
794                let tracked upreader_guard_token = g.core_token.split_one_left_owner();
795                g.upreader_guard_token = Some(upreader_guard_token);
796                let tracked left_token = g.core_token.split_one_left_knowledge();
797                let tracked read_guard_empty = g.read_guard_token.tracked_take_right();
798                let tracked read_guard_token = FracResource::alloc_from_empty(
799                    read_guard_empty,
800                    (read_half_cell_perm, left_token),
801                );
802                g.read_guard_token = Sum::Left(read_guard_token);
803            }
804        };
805        // When the current writer releases, wake up all the sleeping threads.
806        // All awakened threads may include readers and writers.
807        // Thanks to the `wait_until` method, either all readers
808        // continue to execute or one writer continues to execute.
809        self.inner.queue.wake_all();
810    }
811}
812
813/// A guard that provides immutable data access but can be atomically
814/// upgraded to [`RwMutexWriteGuard`].
815#[verifier::reject_recursive_types(T)]
816pub struct RwMutexUpgradeableGuard<'a, T /*: ?Sized*/> {
817    inner: &'a RwMutex<T>,
818    v_token: Tracked<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>>,
819}
820
821impl<'a, T /*: ?Sized*/> RwMutexUpgradeableGuard<'a, T> {
822    #[verifier::type_invariant]
823    pub closed spec fn type_inv(self) -> bool {
824        wf_upgradeable_guard_token(
825            self.inner.core_token_id(),
826            self.inner.frac_id(),
827            self.inner.cell_id(),
828            self.v_token@,
829        )
830    }
831
832    pub closed spec fn value(self) -> T {
833        *self.v_token@.resource().resource().value()
834    }
835
836    pub open spec fn view(self) -> T {
837        self.value()
838    }
839}
840
841impl<'a, T> RwMutexUpgradeableGuard<'a, T> {
842    /// Upgrades this upread guard to a write guard atomically.
843    ///
844    /// After calling this method, subsequent readers will be blocked
845    /// while previous readers remain unaffected.
846    ///
847    /// The calling thread will not sleep, but spin to wait for the existing
848    /// reader to be released. There are two main reasons.
849    /// - First, it needs to sleep in an extra waiting queue and needs extra wake-up logic and overhead.
850    /// - Second, upgrading method usually requires a high response time (because the mutex is being used now).
851    #[verifier::exec_allows_no_decreases_clause]
852    pub fn upgrade(self) -> RwMutexWriteGuard<'a, T> {
853        let mut this = self;
854        proof! {
855            use_type_invariant(&this);
856            use_type_invariant(&this.inner);
857            lemma_consts_properties();
858        }
859        atomic_with_ghost!(
860            this.inner.lock => fetch_or(BEING_UPGRADED);
861            update prev -> next;
862            ghost g => {
863                lemma_consts_properties_prev_next(prev, next);
864            }
865        );
866        loop {
867            this = match this.try_upgrade() {
868                Ok(guard) => return guard,
869                Err(e) => e,
870            };
871        }
872    }
873
874    /// Attempts to upgrade this upread guard to a write guard atomically.
875    ///
876    /// This function will return immediately.
877    ///
878    /// This function is not exposed publicly because the `BEING_UPGRADED` bit
879    /// is set only in [`Self::upgrade`].
880    fn try_upgrade(self) -> Result<RwMutexWriteGuard<'a, T>, Self> {
881        proof! {
882            use_type_invariant(&self);
883            use_type_invariant(self.inner);
884            lemma_consts_properties();
885        }
886        let Tracked(upread_guard_token) = self.v_token;
887        proof_decl! {
888            let tracked mut write_perm: Option<PointsTo<T>> = None;
889            let tracked mut err_upread_guard_token: Option<OneLeftOwner<HalfPerm<T>, NoPerm<T>, 3>> = None;
890            let tracked mut retract_upgrade_token: Option<UniqueToken> = None;
891            let tracked mut write_guard_token: Option<OneRightKnowledge<HalfPerm<T>, NoPerm<T>, 3>> = None;
892        }
893
894        let res = atomic_with_ghost!(
895            self.inner.lock => compare_exchange(UPGRADEABLE_READER | BEING_UPGRADED, WRITER | UPGRADEABLE_READER);
896            update prev -> next;
897            returning res;
898            ghost g => {
899                lemma_consts_properties_prev_next(prev, next);
900                if res is Ok {
901                    g.core_token.validate_with_one_left_owner(&upread_guard_token);
902                    if g.upreader_guard_token is Some {
903                        upread_guard_token.validate_with_one_left_owner(g.upreader_guard_token.tracked_borrow());
904                    }
905                    g.core_token.join_one_left_owner(upread_guard_token);
906                    let tracked read_guard_token = g.read_guard_token.tracked_take_left();
907                    let tracked (read_resource, read_empty) = read_guard_token.take_resource();
908                    g.read_guard_token = Sum::Right(read_empty);
909                    let tracked (read_half_cell_perm, left_token) = read_resource;
910                    g.core_token.join_one_left_knowledge(left_token);
911                    let tracked mut pointsto = g.core_token.take_resource_left();
912                    pointsto.combine(read_half_cell_perm);
913                    let tracked (pointsto, empty) = pointsto.take_resource();
914                    write_perm = Some(pointsto);
915                    g.core_token.change_to_right(empty);
916                    write_guard_token = Some(g.core_token.split_one_right_knowledge());
917                    retract_upgrade_token = Some(g.upread_retract_token.tracked_take());
918                } else {
919                    err_upread_guard_token = Some(upread_guard_token);
920                }
921            }
922        );
923
924        if res.is_ok() {
925            let inner = self.inner;
926            atomic_with_ghost!(
927                inner.lock => fetch_sub(UPGRADEABLE_READER);
928                update prev -> next;
929                ghost g => {
930                    let prev_usize = prev as usize;
931                    let next_usize = next as usize;
932                    lemma_consts_properties_value(prev_usize);
933                    lemma_consts_properties_prev_next(prev_usize, next_usize);
934                    let tracked mut token = retract_upgrade_token.tracked_unwrap();
935                    if g.upread_retract_token is Some {
936                        token.validate_with_other(g.upread_retract_token.tracked_borrow());
937                    }
938                    g.upread_retract_token = Some(token);
939                }
940            );
941            Ok(RwMutexWriteGuard {
942                inner,
943                v_perm: Tracked(write_perm.tracked_unwrap()),
944                v_token: Tracked(write_guard_token.tracked_unwrap()),
945            })
946        } else {
947            Err(RwMutexUpgradeableGuard {
948                inner: self.inner,
949                v_token: Tracked(err_upread_guard_token.tracked_unwrap()),
950            })
951        }
952    }
953
954    pub fn drop(self) {
955        proof! {
956            use_type_invariant(&self);
957            use_type_invariant(self.inner);
958            lemma_consts_properties();
959        }
960        let Tracked(guard_token) = self.v_token;
961        let res = atomic_with_ghost!(
962            self.inner.lock => fetch_sub(UPGRADEABLE_READER);
963            update prev -> next;
964            ghost g => {
965                let prev_usize = prev as usize;
966                let next_usize = next as usize;
967                lemma_consts_properties_value(prev_usize);
968                lemma_consts_properties_prev_next(prev_usize, next_usize);
969                g.core_token.validate_with_one_left_owner(&guard_token);
970                if g.upreader_guard_token is Some {
971                    guard_token.validate_with_one_left_owner(g.upreader_guard_token.tracked_borrow());
972                    assert(false);
973                } else {
974                    g.upreader_guard_token = Some(guard_token);
975                }
976            }
977        );
978        if res == UPGRADEABLE_READER {
979            self.inner.queue.wake_all();
980        }
981    }
982}
983
984impl<T /*: ?Sized*/> Deref for RwMutexUpgradeableGuard<'_, T> {
985    type Target = T;
986
987    #[verus_spec(returns self.view())]
988    fn deref(&self) -> &T {
989        proof! {
990            use_type_invariant(self);
991        }
992        // unsafe { &*self.inner.val.get() }
993        self.inner.val.borrow(Tracked(self.v_token.borrow().tracked_borrow().borrow()))
994    }
995}
996
997proof fn lemma_consts_properties()
998    ensures
999        0 & WRITER == 0,
1000        0 & UPGRADEABLE_READER == 0,
1001        0 & BEING_UPGRADED == 0,
1002        0 & READER_MASK == 0,
1003        0 & MAX_READER_MASK == 0,
1004        0 & MAX_READER == 0,
1005        0 & READER == 0,
1006        WRITER == 0x8000_0000_0000_0000,
1007        UPGRADEABLE_READER == 0x4000_0000_0000_0000,
1008        BEING_UPGRADED == 0x2000_0000_0000_0000,
1009        READER_MASK == 0x0FFF_FFFF_FFFF_FFFF,
1010        MAX_READER_MASK == 0x1FFF_FFFF_FFFF_FFFF,
1011        MAX_READER == 0x1000_0000_0000_0000,
1012        WRITER & WRITER == WRITER,
1013        WRITER & !WRITER == 0,
1014        WRITER & BEING_UPGRADED == 0,
1015        WRITER & READER_MASK == 0,
1016        WRITER & MAX_READER_MASK == 0,
1017        WRITER & MAX_READER == 0,
1018        WRITER & UPGRADEABLE_READER == 0,
1019        BEING_UPGRADED & WRITER == 0,
1020        BEING_UPGRADED & UPGRADEABLE_READER == 0,
1021        UPGRADEABLE_READER & BEING_UPGRADED == 0,
1022        UPGRADEABLE_READER & READER_MASK == 0,
1023        UPGRADEABLE_READER & MAX_READER_MASK == 0,
1024        UPGRADEABLE_READER & MAX_READER == 0,
1025        BEING_UPGRADED & READER_MASK == 0,
1026        BEING_UPGRADED & MAX_READER_MASK == 0,
1027        BEING_UPGRADED & MAX_READER == 0,
1028        (UPGRADEABLE_READER | BEING_UPGRADED) & WRITER == 0,
1029        (UPGRADEABLE_READER | BEING_UPGRADED) & UPGRADEABLE_READER == UPGRADEABLE_READER,
1030        (UPGRADEABLE_READER | BEING_UPGRADED) & BEING_UPGRADED == BEING_UPGRADED,
1031        (UPGRADEABLE_READER | BEING_UPGRADED) & READER_MASK == 0,
1032        (UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER_MASK == 0,
1033        (UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER == 0,
1034        (WRITER | UPGRADEABLE_READER) & WRITER == WRITER,
1035        (WRITER | UPGRADEABLE_READER) & UPGRADEABLE_READER == UPGRADEABLE_READER,
1036        (WRITER | UPGRADEABLE_READER) & BEING_UPGRADED == 0,
1037        (WRITER | UPGRADEABLE_READER) & READER_MASK == 0,
1038        (WRITER | UPGRADEABLE_READER) & MAX_READER_MASK == 0,
1039        (WRITER | UPGRADEABLE_READER) & MAX_READER == 0,
1040{
1041    assert(0 & WRITER == 0) by (compute_only);
1042    assert(0 & UPGRADEABLE_READER == 0) by (compute_only);
1043    assert(0 & BEING_UPGRADED == 0) by (compute_only);
1044    assert(0 & READER_MASK == 0) by (compute_only);
1045    assert(0 & MAX_READER == 0) by (compute_only);
1046    assert(0 & MAX_READER_MASK == 0) by (compute_only);
1047    assert(0 & READER == 0) by (compute_only);
1048    assert(WRITER == 0x8000_0000_0000_0000) by (compute_only);
1049    assert(UPGRADEABLE_READER == 0x4000_0000_0000_0000) by (compute_only);
1050    assert(BEING_UPGRADED == 0x2000_0000_0000_0000) by (compute_only);
1051    assert(READER_MASK == 0x0FFF_FFFF_FFFF_FFFF) by (compute_only);
1052    assert(MAX_READER == 0x1000_0000_0000_0000) by (compute_only);
1053    assert(MAX_READER_MASK == 0x1FFF_FFFF_FFFF_FFFF) by (compute_only);
1054    assert(WRITER & WRITER == WRITER) by (compute_only);
1055    assert(WRITER & !WRITER == 0) by (compute_only);
1056    assert(WRITER & BEING_UPGRADED == 0) by (compute_only);
1057    assert(WRITER & READER_MASK == 0) by (compute_only);
1058    assert(WRITER & MAX_READER_MASK == 0) by (compute_only);
1059    assert(WRITER & MAX_READER == 0) by (compute_only);
1060    assert(WRITER & UPGRADEABLE_READER == 0) by (compute_only);
1061    assert(BEING_UPGRADED & WRITER == 0) by (compute_only);
1062    assert(BEING_UPGRADED & UPGRADEABLE_READER == 0) by (compute_only);
1063    assert(UPGRADEABLE_READER & BEING_UPGRADED == 0) by (compute_only);
1064    assert(UPGRADEABLE_READER & READER_MASK == 0) by (compute_only);
1065    assert(UPGRADEABLE_READER & MAX_READER_MASK == 0) by (compute_only);
1066    assert(UPGRADEABLE_READER & MAX_READER == 0) by (compute_only);
1067    assert(BEING_UPGRADED & READER_MASK == 0) by (compute_only);
1068    assert(BEING_UPGRADED & MAX_READER_MASK == 0) by (compute_only);
1069    assert(BEING_UPGRADED & MAX_READER == 0) by (compute_only);
1070    assert((UPGRADEABLE_READER | BEING_UPGRADED) & WRITER == 0) by (compute_only);
1071    assert((UPGRADEABLE_READER | BEING_UPGRADED) & UPGRADEABLE_READER == UPGRADEABLE_READER)
1072        by (compute_only);
1073    assert((UPGRADEABLE_READER | BEING_UPGRADED) & BEING_UPGRADED == BEING_UPGRADED)
1074        by (compute_only);
1075    assert((UPGRADEABLE_READER | BEING_UPGRADED) & READER_MASK == 0) by (compute_only);
1076    assert((UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER_MASK == 0) by (compute_only);
1077    assert((UPGRADEABLE_READER | BEING_UPGRADED) & MAX_READER == 0) by (compute_only);
1078    assert((WRITER | UPGRADEABLE_READER) & WRITER == WRITER) by (compute_only);
1079    assert((WRITER | UPGRADEABLE_READER) & UPGRADEABLE_READER == UPGRADEABLE_READER)
1080        by (compute_only);
1081    assert((WRITER | UPGRADEABLE_READER) & BEING_UPGRADED == 0) by (compute_only);
1082    assert((WRITER | UPGRADEABLE_READER) & READER_MASK == 0) by (compute_only);
1083    assert((WRITER | UPGRADEABLE_READER) & MAX_READER_MASK == 0) by (compute_only);
1084    assert((WRITER | UPGRADEABLE_READER) & MAX_READER == 0) by (compute_only);
1085}
1086
1087proof fn lemma_consts_properties_value(prev: usize)
1088    ensures
1089        no_max_reader_overflow(prev) ==> prev + READER <= usize::MAX,
1090        prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0 ==> {
1091            &&& prev & WRITER == 0
1092            &&& prev & BEING_UPGRADED == 0
1093            &&& prev & MAX_READER == 0
1094        },
1095        prev & (WRITER | UPGRADEABLE_READER) == 0 ==> {
1096            &&& prev & WRITER == 0
1097            &&& prev & UPGRADEABLE_READER == 0
1098        },
1099        prev & MAX_READER == 0 ==> prev & READER_MASK == prev & MAX_READER_MASK,
1100        prev & MAX_READER != 0 ==> prev & MAX_READER_MASK >= MAX_READER,
1101        prev & (WRITER | UPGRADEABLE_READER) == WRITER ==> {
1102            &&& prev & UPGRADEABLE_READER == 0
1103            &&& prev & WRITER == WRITER
1104        },
1105        prev & UPGRADEABLE_READER != 0 ==> prev >= UPGRADEABLE_READER,
1106        prev & UPGRADEABLE_READER == 0 ==> {
1107            ||| prev & (WRITER | UPGRADEABLE_READER) == 0
1108            ||| prev & (WRITER | UPGRADEABLE_READER) == WRITER
1109        },
1110{
1111    if no_max_reader_overflow(prev) {
1112        assert(prev + READER <= usize::MAX) by (bit_vector)
1113            requires
1114                prev & MAX_READER_MASK < MAX_READER_MASK,
1115        ;
1116    }
1117    if prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0 {
1118        assert(prev & WRITER == 0) by (bit_vector)
1119            requires
1120                prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0,
1121        ;
1122        assert(prev & BEING_UPGRADED == 0) by (bit_vector)
1123            requires
1124                prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0,
1125        ;
1126        assert(prev & MAX_READER == 0) by (bit_vector)
1127            requires
1128                prev & (WRITER | BEING_UPGRADED | MAX_READER) == 0,
1129        ;
1130    }
1131    if prev & (WRITER | UPGRADEABLE_READER) == 0 {
1132        assert(prev & WRITER == 0) by (bit_vector)
1133            requires
1134                prev & (WRITER | UPGRADEABLE_READER) == 0,
1135        ;
1136        assert(prev & UPGRADEABLE_READER == 0) by (bit_vector)
1137            requires
1138                prev & (WRITER | UPGRADEABLE_READER) == 0,
1139        ;
1140    }
1141    if prev & MAX_READER == 0 {
1142        assert(prev & READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1143            requires
1144                prev & MAX_READER == 0,
1145        ;
1146    }
1147    if prev & MAX_READER != 0 {
1148        assert(prev & MAX_READER_MASK >= MAX_READER) by (bit_vector)
1149            requires
1150                prev & MAX_READER != 0,
1151        ;
1152    }
1153    if prev & (WRITER | UPGRADEABLE_READER) == WRITER {
1154        assert(prev & UPGRADEABLE_READER == 0 && prev & WRITER == WRITER) by (bit_vector)
1155            requires
1156                prev & (WRITER | UPGRADEABLE_READER) == WRITER,
1157        ;
1158    }
1159    if prev & UPGRADEABLE_READER != 0 {
1160        assert(prev >= UPGRADEABLE_READER) by (bit_vector)
1161            requires
1162                prev & UPGRADEABLE_READER != 0,
1163        ;
1164    }
1165    if prev & UPGRADEABLE_READER == 0 {
1166        assert(prev & (WRITER | UPGRADEABLE_READER) == 0 || prev & (WRITER | UPGRADEABLE_READER)
1167            == WRITER) by (bit_vector)
1168            requires
1169                prev & UPGRADEABLE_READER == 0,
1170        ;
1171    }
1172}
1173
1174proof fn lemma_consts_properties_prev_next(prev: usize, next: usize)
1175    ensures
1176        prev & READER_MASK < MAX_READER,
1177        next == UPGRADEABLE_READER && prev == WRITER ==> {
1178            &&& next & WRITER == 0
1179            &&& next & UPGRADEABLE_READER == UPGRADEABLE_READER
1180            &&& next & READER_MASK == 0
1181            &&& next & MAX_READER_MASK == 0
1182            &&& next & MAX_READER == 0
1183            &&& next & BEING_UPGRADED == 0
1184        },
1185        next == prev | UPGRADEABLE_READER ==> {
1186            &&& next & UPGRADEABLE_READER != 0
1187            &&& next & WRITER == prev & WRITER
1188            &&& next & READER_MASK == prev & READER_MASK
1189            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1190            &&& next & MAX_READER == prev & MAX_READER
1191            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1192        },
1193        next == prev | BEING_UPGRADED ==> {
1194            &&& next & BEING_UPGRADED != 0
1195            &&& next & WRITER == prev & WRITER
1196            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1197            &&& next & READER_MASK == prev & READER_MASK
1198            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1199            &&& next & MAX_READER == prev & MAX_READER
1200        },
1201        next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0 ==> {
1202            &&& next & UPGRADEABLE_READER == 0
1203            &&& next & WRITER == prev & WRITER
1204            &&& next & READER_MASK == prev & READER_MASK
1205            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1206            &&& next & MAX_READER == prev & MAX_READER
1207            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1208        },
1209        next == prev - READER && prev & READER_MASK != 0 ==> {
1210            &&& next & READER_MASK == (prev & READER_MASK) - READER
1211            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER
1212            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1213            &&& next & WRITER == prev & WRITER
1214            &&& next & MAX_READER == prev & MAX_READER
1215            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1216        },
1217        next == prev - READER && prev & MAX_READER_MASK != 0 ==> {
1218            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER
1219            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1220            &&& next & WRITER == prev & WRITER
1221            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1222        },
1223        next == prev + READER && no_max_reader_overflow(prev) ==> {
1224            &&& next & READER_MASK == if (prev & READER_MASK) + READER == MAX_READER {
1225                0
1226            } else {
1227                (prev & READER_MASK) + READER
1228            }
1229            &&& next & MAX_READER_MASK == (prev & MAX_READER_MASK) + READER
1230            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1231            &&& next & WRITER == prev & WRITER
1232            &&& next & MAX_READER == if (prev & READER_MASK) + READER == MAX_READER {
1233                MAX_READER
1234            } else {
1235                prev & MAX_READER
1236            }
1237            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1238        },
1239        next == prev & !WRITER ==> {
1240            &&& next & WRITER == 0
1241            &&& next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER
1242            &&& next & READER_MASK == prev & READER_MASK
1243            &&& next & MAX_READER_MASK == prev & MAX_READER_MASK
1244            &&& next & MAX_READER == prev & MAX_READER
1245            &&& next & BEING_UPGRADED == prev & BEING_UPGRADED
1246        },
1247{
1248    assert(prev & READER_MASK < MAX_READER) by (bit_vector);
1249    if next == UPGRADEABLE_READER && prev == WRITER {
1250        assert(next & WRITER == 0) by (bit_vector)
1251            requires
1252                next == UPGRADEABLE_READER && prev == WRITER,
1253        ;
1254        assert(next & UPGRADEABLE_READER == UPGRADEABLE_READER) by (bit_vector)
1255            requires
1256                next == UPGRADEABLE_READER && prev == WRITER,
1257        ;
1258        assert(next & READER_MASK == 0) by (bit_vector)
1259            requires
1260                next == UPGRADEABLE_READER && prev == WRITER,
1261        ;
1262        assert(next & MAX_READER_MASK == 0) by (bit_vector)
1263            requires
1264                next == UPGRADEABLE_READER && prev == WRITER,
1265        ;
1266        assert(next & MAX_READER == 0) by (bit_vector)
1267            requires
1268                next == UPGRADEABLE_READER && prev == WRITER,
1269        ;
1270        assert(next & BEING_UPGRADED == 0) by (bit_vector)
1271            requires
1272                next == UPGRADEABLE_READER && prev == WRITER,
1273        ;
1274    }
1275    if next == prev | UPGRADEABLE_READER {
1276        assert(next & UPGRADEABLE_READER != 0) by (bit_vector)
1277            requires
1278                next == prev | UPGRADEABLE_READER,
1279        ;
1280        assert(next & WRITER == prev & WRITER) by (bit_vector)
1281            requires
1282                next == prev | UPGRADEABLE_READER,
1283        ;
1284        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1285            requires
1286                next == prev | UPGRADEABLE_READER,
1287        ;
1288        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1289            requires
1290                next == prev | UPGRADEABLE_READER,
1291        ;
1292        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1293            requires
1294                next == prev | UPGRADEABLE_READER,
1295        ;
1296        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1297            requires
1298                next == prev | UPGRADEABLE_READER,
1299        ;
1300    }
1301    if next == prev | BEING_UPGRADED {
1302        assert(next & BEING_UPGRADED != 0) by (bit_vector)
1303            requires
1304                next == prev | BEING_UPGRADED,
1305        ;
1306        assert(next & WRITER == prev & WRITER) by (bit_vector)
1307            requires
1308                next == prev | BEING_UPGRADED,
1309        ;
1310        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1311            requires
1312                next == prev | BEING_UPGRADED,
1313        ;
1314        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1315            requires
1316                next == prev | BEING_UPGRADED,
1317        ;
1318        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1319            requires
1320                next == prev | BEING_UPGRADED,
1321        ;
1322        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1323            requires
1324                next == prev | BEING_UPGRADED,
1325        ;
1326    }
1327    if next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0 {
1328        assert(next & UPGRADEABLE_READER == 0) by (bit_vector)
1329            requires
1330                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1331        ;
1332        assert(next & WRITER == prev & WRITER) by (bit_vector)
1333            requires
1334                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1335        ;
1336        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1337            requires
1338                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1339        ;
1340        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1341            requires
1342                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1343        ;
1344        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1345            requires
1346                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1347        ;
1348        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1349            requires
1350                next == prev - UPGRADEABLE_READER && prev & UPGRADEABLE_READER != 0,
1351        ;
1352    }
1353    if next == prev - READER && prev & READER_MASK != 0 {
1354        assert(next & READER_MASK == (prev & READER_MASK) - READER) by (bit_vector)
1355            requires
1356                next == prev - READER && prev & READER_MASK != 0,
1357        ;
1358        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER) by (bit_vector)
1359            requires
1360                next == prev - READER && prev & READER_MASK != 0,
1361        ;
1362        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1363            requires
1364                next == prev - READER && prev & READER_MASK != 0,
1365        ;
1366        assert(next & WRITER == prev & WRITER) by (bit_vector)
1367            requires
1368                next == prev - READER && prev & READER_MASK != 0,
1369        ;
1370        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1371            requires
1372                next == prev - READER && prev & READER_MASK != 0,
1373        ;
1374        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1375            requires
1376                next == prev - READER && prev & READER_MASK != 0,
1377        ;
1378    }
1379    if next == prev - READER && prev & MAX_READER_MASK != 0 {
1380        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) - READER) by (bit_vector)
1381            requires
1382                next == prev - READER && prev & MAX_READER_MASK != 0,
1383        ;
1384        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1385            requires
1386                next == prev - READER && prev & MAX_READER_MASK != 0,
1387        ;
1388        assert(next & WRITER == prev & WRITER) by (bit_vector)
1389            requires
1390                next == prev - READER && prev & MAX_READER_MASK != 0,
1391        ;
1392        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1393            requires
1394                next == prev - READER && prev & MAX_READER_MASK != 0,
1395        ;
1396    }
1397    if next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK {
1398        assert(next & READER_MASK == if prev & READER_MASK == MAX_READER - READER {
1399            0
1400        } else {
1401            (prev & READER_MASK) + READER
1402        }) by (bit_vector)
1403            requires
1404                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1405        ;
1406        assert(next & MAX_READER_MASK == (prev & MAX_READER_MASK) + READER) by (bit_vector)
1407            requires
1408                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1409        ;
1410        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1411            requires
1412                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1413        ;
1414        assert(next & WRITER == prev & WRITER) by (bit_vector)
1415            requires
1416                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1417        ;
1418        assert(next & MAX_READER == if (prev & READER_MASK) + READER == MAX_READER {
1419            MAX_READER
1420        } else {
1421            prev & MAX_READER
1422        }) by (bit_vector)
1423            requires
1424                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1425        ;
1426        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1427            requires
1428                next == prev + READER && prev & MAX_READER_MASK < MAX_READER_MASK,
1429        ;
1430    }
1431    if next == prev & !WRITER {
1432        assert(next & WRITER == 0) by (bit_vector)
1433            requires
1434                next == prev & !WRITER,
1435        ;
1436        assert(next & UPGRADEABLE_READER == prev & UPGRADEABLE_READER) by (bit_vector)
1437            requires
1438                next == prev & !WRITER,
1439        ;
1440        assert(next & READER_MASK == prev & READER_MASK) by (bit_vector)
1441            requires
1442                next == prev & !WRITER,
1443        ;
1444        assert(next & MAX_READER_MASK == prev & MAX_READER_MASK) by (bit_vector)
1445            requires
1446                next == prev & !WRITER,
1447        ;
1448        assert(next & MAX_READER == prev & MAX_READER) by (bit_vector)
1449            requires
1450                next == prev & !WRITER,
1451        ;
1452        assert(next & BEING_UPGRADED == prev & BEING_UPGRADED) by (bit_vector)
1453            requires
1454                next == prev & !WRITER,
1455        ;
1456    }
1457}
1458
1459} // verus!